1 /* 2 * Copyright (c) 2001 Wind River Systems 3 * Copyright (c) 1997, 1998, 1999, 2001 4 * Bill Paul <wpaul@windriver.com>. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Bill Paul. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $ 34 */ 35 36 #include "opt_bnx.h" 37 #include "opt_ifpoll.h" 38 39 #include <sys/param.h> 40 #include <sys/bus.h> 41 #include <sys/endian.h> 42 #include <sys/kernel.h> 43 #include <sys/interrupt.h> 44 #include <sys/mbuf.h> 45 #include <sys/malloc.h> 46 #include <sys/queue.h> 47 #include <sys/rman.h> 48 #include <sys/serialize.h> 49 #include <sys/socket.h> 50 #include <sys/sockio.h> 51 #include <sys/sysctl.h> 52 53 #include <netinet/ip.h> 54 #include <netinet/tcp.h> 55 56 #include <net/bpf.h> 57 #include <net/ethernet.h> 58 #include <net/if.h> 59 #include <net/if_arp.h> 60 #include <net/if_dl.h> 61 #include <net/if_media.h> 62 #include <net/if_poll.h> 63 #include <net/if_types.h> 64 #include <net/ifq_var.h> 65 #include <net/toeplitz.h> 66 #include <net/toeplitz2.h> 67 #include <net/vlan/if_vlan_var.h> 68 #include <net/vlan/if_vlan_ether.h> 69 70 #include <dev/netif/mii_layer/mii.h> 71 #include <dev/netif/mii_layer/miivar.h> 72 #include <dev/netif/mii_layer/brgphyreg.h> 73 74 #include "pcidevs.h" 75 #include <bus/pci/pcireg.h> 76 #include <bus/pci/pcivar.h> 77 78 #include <dev/netif/bge/if_bgereg.h> 79 #include <dev/netif/bnx/if_bnxvar.h> 80 81 /* "device miibus" required. See GENERIC if you get errors here. */ 82 #include "miibus_if.h" 83 84 #define BNX_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 85 86 #define BNX_RESET_SHUTDOWN 0 87 #define BNX_RESET_START 1 88 #define BNX_RESET_SUSPEND 2 89 90 #define BNX_INTR_CKINTVL ((10 * hz) / 1000) /* 10ms */ 91 92 #ifdef BNX_RSS_DEBUG 93 #define BNX_RSS_DPRINTF(sc, lvl, fmt, ...) \ 94 do { \ 95 if (sc->bnx_rss_debug >= lvl) \ 96 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \ 97 } while (0) 98 #else /* !BNX_RSS_DEBUG */ 99 #define BNX_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 100 #endif /* BNX_RSS_DEBUG */ 101 102 static const struct bnx_type { 103 uint16_t bnx_vid; 104 uint16_t bnx_did; 105 char *bnx_name; 106 } bnx_devs[] = { 107 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717, 108 "Broadcom BCM5717 Gigabit Ethernet" }, 109 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717C, 110 "Broadcom BCM5717C Gigabit Ethernet" }, 111 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5718, 112 "Broadcom BCM5718 Gigabit Ethernet" }, 113 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5719, 114 "Broadcom BCM5719 Gigabit Ethernet" }, 115 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720_ALT, 116 "Broadcom BCM5720 Gigabit Ethernet" }, 117 118 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5725, 119 "Broadcom BCM5725 Gigabit Ethernet" }, 120 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5727, 121 "Broadcom BCM5727 Gigabit Ethernet" }, 122 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5762, 123 "Broadcom BCM5762 Gigabit Ethernet" }, 124 125 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57761, 126 "Broadcom BCM57761 Gigabit Ethernet" }, 127 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57762, 128 "Broadcom BCM57762 Gigabit Ethernet" }, 129 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57765, 130 "Broadcom BCM57765 Gigabit Ethernet" }, 131 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57766, 132 "Broadcom BCM57766 Gigabit Ethernet" }, 133 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57781, 134 "Broadcom BCM57781 Gigabit Ethernet" }, 135 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57782, 136 "Broadcom BCM57782 Gigabit Ethernet" }, 137 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57785, 138 "Broadcom BCM57785 Gigabit Ethernet" }, 139 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57786, 140 "Broadcom BCM57786 Gigabit Ethernet" }, 141 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57791, 142 "Broadcom BCM57791 Fast Ethernet" }, 143 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57795, 144 "Broadcom BCM57795 Fast Ethernet" }, 145 146 { 0, 0, NULL } 147 }; 148 149 static const int bnx_tx_mailbox[BNX_TX_RING_MAX] = { 150 BGE_MBX_TX_HOST_PROD0_LO, 151 BGE_MBX_TX_HOST_PROD0_HI, 152 BGE_MBX_TX_HOST_PROD1_LO, 153 BGE_MBX_TX_HOST_PROD1_HI 154 }; 155 156 #define BNX_IS_JUMBO_CAPABLE(sc) ((sc)->bnx_flags & BNX_FLAG_JUMBO) 157 #define BNX_IS_5717_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_5717_PLUS) 158 #define BNX_IS_57765_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_57765_PLUS) 159 #define BNX_IS_57765_FAMILY(sc) \ 160 ((sc)->bnx_flags & BNX_FLAG_57765_FAMILY) 161 162 typedef int (*bnx_eaddr_fcn_t)(struct bnx_softc *, uint8_t[]); 163 164 static int bnx_probe(device_t); 165 static int bnx_attach(device_t); 166 static int bnx_detach(device_t); 167 static void bnx_shutdown(device_t); 168 static int bnx_suspend(device_t); 169 static int bnx_resume(device_t); 170 static int bnx_miibus_readreg(device_t, int, int); 171 static int bnx_miibus_writereg(device_t, int, int, int); 172 static void bnx_miibus_statchg(device_t); 173 174 static int bnx_handle_status(struct bnx_softc *); 175 #ifdef IFPOLL_ENABLE 176 static void bnx_npoll(struct ifnet *, struct ifpoll_info *); 177 static void bnx_npoll_rx(struct ifnet *, void *, int); 178 static void bnx_npoll_tx(struct ifnet *, void *, int); 179 static void bnx_npoll_tx_notag(struct ifnet *, void *, int); 180 static void bnx_npoll_status(struct ifnet *); 181 static void bnx_npoll_status_notag(struct ifnet *); 182 #endif 183 static void bnx_intr_legacy(void *); 184 static void bnx_msi(void *); 185 static void bnx_intr(struct bnx_softc *); 186 static void bnx_msix_status(void *); 187 static void bnx_msix_tx_status(void *); 188 static void bnx_msix_rx(void *); 189 static void bnx_msix_rxtx(void *); 190 static void bnx_enable_intr(struct bnx_softc *); 191 static void bnx_disable_intr(struct bnx_softc *); 192 static void bnx_txeof(struct bnx_tx_ring *, uint16_t); 193 static void bnx_rxeof(struct bnx_rx_ret_ring *, uint16_t, int); 194 static int bnx_alloc_intr(struct bnx_softc *); 195 static int bnx_setup_intr(struct bnx_softc *); 196 static void bnx_free_intr(struct bnx_softc *); 197 static void bnx_teardown_intr(struct bnx_softc *, int); 198 static int bnx_alloc_msix(struct bnx_softc *); 199 static void bnx_free_msix(struct bnx_softc *, boolean_t); 200 static void bnx_check_intr_rxtx(void *); 201 static void bnx_check_intr_rx(void *); 202 static void bnx_check_intr_tx(void *); 203 static void bnx_rx_std_refill_ithread(void *); 204 static void bnx_rx_std_refill(void *, void *); 205 static void bnx_rx_std_refill_sched_ipi(void *); 206 static void bnx_rx_std_refill_stop(void *); 207 static void bnx_rx_std_refill_sched(struct bnx_rx_ret_ring *, 208 struct bnx_rx_std_ring *); 209 210 static void bnx_start(struct ifnet *, struct ifaltq_subque *); 211 static int bnx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 212 static void bnx_init(void *); 213 static void bnx_stop(struct bnx_softc *); 214 static void bnx_watchdog(struct ifaltq_subque *); 215 static int bnx_ifmedia_upd(struct ifnet *); 216 static void bnx_ifmedia_sts(struct ifnet *, struct ifmediareq *); 217 static void bnx_tick(void *); 218 static void bnx_serialize(struct ifnet *, enum ifnet_serialize); 219 static void bnx_deserialize(struct ifnet *, enum ifnet_serialize); 220 static int bnx_tryserialize(struct ifnet *, enum ifnet_serialize); 221 #ifdef INVARIANTS 222 static void bnx_serialize_assert(struct ifnet *, enum ifnet_serialize, 223 boolean_t); 224 #endif 225 static void bnx_serialize_skipmain(struct bnx_softc *); 226 static void bnx_deserialize_skipmain(struct bnx_softc *sc); 227 228 static int bnx_alloc_jumbo_mem(struct bnx_softc *); 229 static void bnx_free_jumbo_mem(struct bnx_softc *); 230 static struct bnx_jslot 231 *bnx_jalloc(struct bnx_softc *); 232 static void bnx_jfree(void *); 233 static void bnx_jref(void *); 234 static int bnx_newbuf_std(struct bnx_rx_ret_ring *, int, int); 235 static int bnx_newbuf_jumbo(struct bnx_softc *, int, int); 236 static void bnx_setup_rxdesc_std(struct bnx_rx_std_ring *, int); 237 static void bnx_setup_rxdesc_jumbo(struct bnx_softc *, int); 238 static int bnx_init_rx_ring_std(struct bnx_rx_std_ring *); 239 static void bnx_free_rx_ring_std(struct bnx_rx_std_ring *); 240 static int bnx_init_rx_ring_jumbo(struct bnx_softc *); 241 static void bnx_free_rx_ring_jumbo(struct bnx_softc *); 242 static void bnx_free_tx_ring(struct bnx_tx_ring *); 243 static int bnx_init_tx_ring(struct bnx_tx_ring *); 244 static int bnx_create_tx_ring(struct bnx_tx_ring *); 245 static void bnx_destroy_tx_ring(struct bnx_tx_ring *); 246 static int bnx_create_rx_ret_ring(struct bnx_rx_ret_ring *); 247 static void bnx_destroy_rx_ret_ring(struct bnx_rx_ret_ring *); 248 static int bnx_dma_alloc(device_t); 249 static void bnx_dma_free(struct bnx_softc *); 250 static int bnx_dma_block_alloc(struct bnx_softc *, bus_size_t, 251 bus_dma_tag_t *, bus_dmamap_t *, void **, bus_addr_t *); 252 static void bnx_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *); 253 static struct mbuf * 254 bnx_defrag_shortdma(struct mbuf *); 255 static int bnx_encap(struct bnx_tx_ring *, struct mbuf **, 256 uint32_t *, int *); 257 static int bnx_setup_tso(struct bnx_tx_ring *, struct mbuf **, 258 uint16_t *, uint16_t *); 259 static void bnx_setup_serialize(struct bnx_softc *); 260 static void bnx_set_tick_cpuid(struct bnx_softc *, boolean_t); 261 static void bnx_setup_ring_cnt(struct bnx_softc *); 262 263 static struct pktinfo *bnx_rss_info(struct pktinfo *, 264 const struct bge_rx_bd *); 265 static void bnx_init_rss(struct bnx_softc *); 266 static void bnx_reset(struct bnx_softc *); 267 static int bnx_chipinit(struct bnx_softc *); 268 static int bnx_blockinit(struct bnx_softc *); 269 static void bnx_stop_block(struct bnx_softc *, bus_size_t, uint32_t); 270 static void bnx_enable_msi(struct bnx_softc *, boolean_t); 271 static void bnx_setmulti(struct bnx_softc *); 272 static void bnx_setpromisc(struct bnx_softc *); 273 static void bnx_stats_update_regs(struct bnx_softc *); 274 static uint32_t bnx_dma_swap_options(struct bnx_softc *); 275 276 static uint32_t bnx_readmem_ind(struct bnx_softc *, uint32_t); 277 static void bnx_writemem_ind(struct bnx_softc *, uint32_t, uint32_t); 278 #ifdef notdef 279 static uint32_t bnx_readreg_ind(struct bnx_softc *, uint32_t); 280 #endif 281 static void bnx_writemem_direct(struct bnx_softc *, uint32_t, uint32_t); 282 static void bnx_writembx(struct bnx_softc *, int, int); 283 static int bnx_read_nvram(struct bnx_softc *, caddr_t, int, int); 284 static uint8_t bnx_eeprom_getbyte(struct bnx_softc *, uint32_t, uint8_t *); 285 static int bnx_read_eeprom(struct bnx_softc *, caddr_t, uint32_t, size_t); 286 287 static void bnx_tbi_link_upd(struct bnx_softc *, uint32_t); 288 static void bnx_copper_link_upd(struct bnx_softc *, uint32_t); 289 static void bnx_autopoll_link_upd(struct bnx_softc *, uint32_t); 290 static void bnx_link_poll(struct bnx_softc *); 291 292 static int bnx_get_eaddr_mem(struct bnx_softc *, uint8_t[]); 293 static int bnx_get_eaddr_nvram(struct bnx_softc *, uint8_t[]); 294 static int bnx_get_eaddr_eeprom(struct bnx_softc *, uint8_t[]); 295 static int bnx_get_eaddr(struct bnx_softc *, uint8_t[]); 296 297 static void bnx_coal_change(struct bnx_softc *); 298 static int bnx_sysctl_force_defrag(SYSCTL_HANDLER_ARGS); 299 static int bnx_sysctl_tx_wreg(SYSCTL_HANDLER_ARGS); 300 static int bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS); 301 static int bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS); 302 static int bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS); 303 static int bnx_sysctl_rx_coal_bds_poll(SYSCTL_HANDLER_ARGS); 304 static int bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS); 305 static int bnx_sysctl_tx_coal_bds_poll(SYSCTL_HANDLER_ARGS); 306 static int bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS); 307 static int bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS); 308 static int bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *, 309 int, int, uint32_t); 310 #ifdef IFPOLL_ENABLE 311 static int bnx_sysctl_npoll_offset(SYSCTL_HANDLER_ARGS); 312 static int bnx_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS); 313 static int bnx_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS); 314 #endif 315 static int bnx_sysctl_std_refill(SYSCTL_HANDLER_ARGS); 316 317 static void bnx_sig_post_reset(struct bnx_softc *, int); 318 static void bnx_sig_pre_reset(struct bnx_softc *, int); 319 static void bnx_ape_lock_init(struct bnx_softc *); 320 static void bnx_ape_read_fw_ver(struct bnx_softc *); 321 static int bnx_ape_lock(struct bnx_softc *, int); 322 static void bnx_ape_unlock(struct bnx_softc *, int); 323 static void bnx_ape_send_event(struct bnx_softc *, uint32_t); 324 static void bnx_ape_driver_state_change(struct bnx_softc *, int); 325 326 static int bnx_msi_enable = 1; 327 static int bnx_msix_enable = 1; 328 329 static int bnx_rx_rings = 0; /* auto */ 330 static int bnx_tx_rings = 0; /* auto */ 331 332 TUNABLE_INT("hw.bnx.msi.enable", &bnx_msi_enable); 333 TUNABLE_INT("hw.bnx.msix.enable", &bnx_msix_enable); 334 TUNABLE_INT("hw.bnx.rx_rings", &bnx_rx_rings); 335 TUNABLE_INT("hw.bnx.tx_rings", &bnx_tx_rings); 336 337 static device_method_t bnx_methods[] = { 338 /* Device interface */ 339 DEVMETHOD(device_probe, bnx_probe), 340 DEVMETHOD(device_attach, bnx_attach), 341 DEVMETHOD(device_detach, bnx_detach), 342 DEVMETHOD(device_shutdown, bnx_shutdown), 343 DEVMETHOD(device_suspend, bnx_suspend), 344 DEVMETHOD(device_resume, bnx_resume), 345 346 /* bus interface */ 347 DEVMETHOD(bus_print_child, bus_generic_print_child), 348 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 349 350 /* MII interface */ 351 DEVMETHOD(miibus_readreg, bnx_miibus_readreg), 352 DEVMETHOD(miibus_writereg, bnx_miibus_writereg), 353 DEVMETHOD(miibus_statchg, bnx_miibus_statchg), 354 355 DEVMETHOD_END 356 }; 357 358 static DEFINE_CLASS_0(bnx, bnx_driver, bnx_methods, sizeof(struct bnx_softc)); 359 static devclass_t bnx_devclass; 360 361 DECLARE_DUMMY_MODULE(if_bnx); 362 DRIVER_MODULE(if_bnx, pci, bnx_driver, bnx_devclass, NULL, NULL); 363 DRIVER_MODULE(miibus, bnx, miibus_driver, miibus_devclass, NULL, NULL); 364 365 static uint32_t 366 bnx_readmem_ind(struct bnx_softc *sc, uint32_t off) 367 { 368 device_t dev = sc->bnx_dev; 369 uint32_t val; 370 371 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 372 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4); 373 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); 374 return (val); 375 } 376 377 static void 378 bnx_writemem_ind(struct bnx_softc *sc, uint32_t off, uint32_t val) 379 { 380 device_t dev = sc->bnx_dev; 381 382 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 383 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4); 384 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); 385 } 386 387 static void 388 bnx_writemem_direct(struct bnx_softc *sc, uint32_t off, uint32_t val) 389 { 390 CSR_WRITE_4(sc, off, val); 391 } 392 393 static void 394 bnx_writembx(struct bnx_softc *sc, int off, int val) 395 { 396 CSR_WRITE_4(sc, off, val); 397 } 398 399 /* 400 * Read a sequence of bytes from NVRAM. 401 */ 402 static int 403 bnx_read_nvram(struct bnx_softc *sc, caddr_t dest, int off, int cnt) 404 { 405 return (1); 406 } 407 408 /* 409 * Read a byte of data stored in the EEPROM at address 'addr.' The 410 * BCM570x supports both the traditional bitbang interface and an 411 * auto access interface for reading the EEPROM. We use the auto 412 * access method. 413 */ 414 static uint8_t 415 bnx_eeprom_getbyte(struct bnx_softc *sc, uint32_t addr, uint8_t *dest) 416 { 417 int i; 418 uint32_t byte = 0; 419 420 /* 421 * Enable use of auto EEPROM access so we can avoid 422 * having to use the bitbang method. 423 */ 424 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 425 426 /* Reset the EEPROM, load the clock period. */ 427 CSR_WRITE_4(sc, BGE_EE_ADDR, 428 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 429 DELAY(20); 430 431 /* Issue the read EEPROM command. */ 432 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 433 434 /* Wait for completion */ 435 for(i = 0; i < BNX_TIMEOUT * 10; i++) { 436 DELAY(10); 437 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 438 break; 439 } 440 441 if (i == BNX_TIMEOUT) { 442 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n"); 443 return(1); 444 } 445 446 /* Get result. */ 447 byte = CSR_READ_4(sc, BGE_EE_DATA); 448 449 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 450 451 return(0); 452 } 453 454 /* 455 * Read a sequence of bytes from the EEPROM. 456 */ 457 static int 458 bnx_read_eeprom(struct bnx_softc *sc, caddr_t dest, uint32_t off, size_t len) 459 { 460 size_t i; 461 int err; 462 uint8_t byte; 463 464 for (byte = 0, err = 0, i = 0; i < len; i++) { 465 err = bnx_eeprom_getbyte(sc, off + i, &byte); 466 if (err) 467 break; 468 *(dest + i) = byte; 469 } 470 471 return(err ? 1 : 0); 472 } 473 474 static int 475 bnx_miibus_readreg(device_t dev, int phy, int reg) 476 { 477 struct bnx_softc *sc = device_get_softc(dev); 478 uint32_t val; 479 int i; 480 481 KASSERT(phy == sc->bnx_phyno, 482 ("invalid phyno %d, should be %d", phy, sc->bnx_phyno)); 483 484 if (bnx_ape_lock(sc, sc->bnx_phy_ape_lock) != 0) 485 return 0; 486 487 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */ 488 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { 489 CSR_WRITE_4(sc, BGE_MI_MODE, 490 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL); 491 DELAY(80); 492 } 493 494 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY | 495 BGE_MIPHY(phy) | BGE_MIREG(reg)); 496 497 /* Poll for the PHY register access to complete. */ 498 for (i = 0; i < BNX_TIMEOUT; i++) { 499 DELAY(10); 500 val = CSR_READ_4(sc, BGE_MI_COMM); 501 if ((val & BGE_MICOMM_BUSY) == 0) { 502 DELAY(5); 503 val = CSR_READ_4(sc, BGE_MI_COMM); 504 break; 505 } 506 } 507 if (i == BNX_TIMEOUT) { 508 if_printf(&sc->arpcom.ac_if, "PHY read timed out " 509 "(phy %d, reg %d, val 0x%08x)\n", phy, reg, val); 510 val = 0; 511 } 512 513 /* Restore the autopoll bit if necessary. */ 514 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { 515 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode); 516 DELAY(80); 517 } 518 519 bnx_ape_unlock(sc, sc->bnx_phy_ape_lock); 520 521 if (val & BGE_MICOMM_READFAIL) 522 return 0; 523 524 return (val & 0xFFFF); 525 } 526 527 static int 528 bnx_miibus_writereg(device_t dev, int phy, int reg, int val) 529 { 530 struct bnx_softc *sc = device_get_softc(dev); 531 int i; 532 533 KASSERT(phy == sc->bnx_phyno, 534 ("invalid phyno %d, should be %d", phy, sc->bnx_phyno)); 535 536 if (bnx_ape_lock(sc, sc->bnx_phy_ape_lock) != 0) 537 return 0; 538 539 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */ 540 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { 541 CSR_WRITE_4(sc, BGE_MI_MODE, 542 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL); 543 DELAY(80); 544 } 545 546 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY | 547 BGE_MIPHY(phy) | BGE_MIREG(reg) | val); 548 549 for (i = 0; i < BNX_TIMEOUT; i++) { 550 DELAY(10); 551 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) { 552 DELAY(5); 553 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */ 554 break; 555 } 556 } 557 if (i == BNX_TIMEOUT) { 558 if_printf(&sc->arpcom.ac_if, "PHY write timed out " 559 "(phy %d, reg %d, val %d)\n", phy, reg, val); 560 } 561 562 /* Restore the autopoll bit if necessary. */ 563 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { 564 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode); 565 DELAY(80); 566 } 567 568 bnx_ape_unlock(sc, sc->bnx_phy_ape_lock); 569 570 return 0; 571 } 572 573 static void 574 bnx_miibus_statchg(device_t dev) 575 { 576 struct bnx_softc *sc; 577 struct mii_data *mii; 578 uint32_t mac_mode; 579 580 sc = device_get_softc(dev); 581 if ((sc->arpcom.ac_if.if_flags & IFF_RUNNING) == 0) 582 return; 583 584 mii = device_get_softc(sc->bnx_miibus); 585 586 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 587 (IFM_ACTIVE | IFM_AVALID)) { 588 switch (IFM_SUBTYPE(mii->mii_media_active)) { 589 case IFM_10_T: 590 case IFM_100_TX: 591 sc->bnx_link = 1; 592 break; 593 case IFM_1000_T: 594 case IFM_1000_SX: 595 case IFM_2500_SX: 596 sc->bnx_link = 1; 597 break; 598 default: 599 sc->bnx_link = 0; 600 break; 601 } 602 } else { 603 sc->bnx_link = 0; 604 } 605 if (sc->bnx_link == 0) 606 return; 607 608 /* 609 * APE firmware touches these registers to keep the MAC 610 * connected to the outside world. Try to keep the 611 * accesses atomic. 612 */ 613 614 mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & 615 ~(BGE_MACMODE_PORTMODE | BGE_MACMODE_HALF_DUPLEX); 616 617 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 618 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) 619 mac_mode |= BGE_PORTMODE_GMII; 620 else 621 mac_mode |= BGE_PORTMODE_MII; 622 623 if ((mii->mii_media_active & IFM_GMASK) != IFM_FDX) 624 mac_mode |= BGE_MACMODE_HALF_DUPLEX; 625 626 CSR_WRITE_4(sc, BGE_MAC_MODE, mac_mode); 627 DELAY(40); 628 } 629 630 /* 631 * Memory management for jumbo frames. 632 */ 633 static int 634 bnx_alloc_jumbo_mem(struct bnx_softc *sc) 635 { 636 struct ifnet *ifp = &sc->arpcom.ac_if; 637 struct bnx_jslot *entry; 638 uint8_t *ptr; 639 bus_addr_t paddr; 640 int i, error; 641 642 /* 643 * Create tag for jumbo mbufs. 644 * This is really a bit of a kludge. We allocate a special 645 * jumbo buffer pool which (thanks to the way our DMA 646 * memory allocation works) will consist of contiguous 647 * pages. This means that even though a jumbo buffer might 648 * be larger than a page size, we don't really need to 649 * map it into more than one DMA segment. However, the 650 * default mbuf tag will result in multi-segment mappings, 651 * so we have to create a special jumbo mbuf tag that 652 * lets us get away with mapping the jumbo buffers as 653 * a single segment. I think eventually the driver should 654 * be changed so that it uses ordinary mbufs and cluster 655 * buffers, i.e. jumbo frames can span multiple DMA 656 * descriptors. But that's a project for another day. 657 */ 658 659 /* 660 * Create DMA stuffs for jumbo RX ring. 661 */ 662 error = bnx_dma_block_alloc(sc, BGE_JUMBO_RX_RING_SZ, 663 &sc->bnx_cdata.bnx_rx_jumbo_ring_tag, 664 &sc->bnx_cdata.bnx_rx_jumbo_ring_map, 665 (void *)&sc->bnx_ldata.bnx_rx_jumbo_ring, 666 &sc->bnx_ldata.bnx_rx_jumbo_ring_paddr); 667 if (error) { 668 if_printf(ifp, "could not create jumbo RX ring\n"); 669 return error; 670 } 671 672 /* 673 * Create DMA stuffs for jumbo buffer block. 674 */ 675 error = bnx_dma_block_alloc(sc, BNX_JMEM, 676 &sc->bnx_cdata.bnx_jumbo_tag, 677 &sc->bnx_cdata.bnx_jumbo_map, 678 (void **)&sc->bnx_ldata.bnx_jumbo_buf, 679 &paddr); 680 if (error) { 681 if_printf(ifp, "could not create jumbo buffer\n"); 682 return error; 683 } 684 685 SLIST_INIT(&sc->bnx_jfree_listhead); 686 687 /* 688 * Now divide it up into 9K pieces and save the addresses 689 * in an array. Note that we play an evil trick here by using 690 * the first few bytes in the buffer to hold the the address 691 * of the softc structure for this interface. This is because 692 * bnx_jfree() needs it, but it is called by the mbuf management 693 * code which will not pass it to us explicitly. 694 */ 695 for (i = 0, ptr = sc->bnx_ldata.bnx_jumbo_buf; i < BNX_JSLOTS; i++) { 696 entry = &sc->bnx_cdata.bnx_jslots[i]; 697 entry->bnx_sc = sc; 698 entry->bnx_buf = ptr; 699 entry->bnx_paddr = paddr; 700 entry->bnx_inuse = 0; 701 entry->bnx_slot = i; 702 SLIST_INSERT_HEAD(&sc->bnx_jfree_listhead, entry, jslot_link); 703 704 ptr += BNX_JLEN; 705 paddr += BNX_JLEN; 706 } 707 return 0; 708 } 709 710 static void 711 bnx_free_jumbo_mem(struct bnx_softc *sc) 712 { 713 /* Destroy jumbo RX ring. */ 714 bnx_dma_block_free(sc->bnx_cdata.bnx_rx_jumbo_ring_tag, 715 sc->bnx_cdata.bnx_rx_jumbo_ring_map, 716 sc->bnx_ldata.bnx_rx_jumbo_ring); 717 718 /* Destroy jumbo buffer block. */ 719 bnx_dma_block_free(sc->bnx_cdata.bnx_jumbo_tag, 720 sc->bnx_cdata.bnx_jumbo_map, 721 sc->bnx_ldata.bnx_jumbo_buf); 722 } 723 724 /* 725 * Allocate a jumbo buffer. 726 */ 727 static struct bnx_jslot * 728 bnx_jalloc(struct bnx_softc *sc) 729 { 730 struct bnx_jslot *entry; 731 732 lwkt_serialize_enter(&sc->bnx_jslot_serializer); 733 entry = SLIST_FIRST(&sc->bnx_jfree_listhead); 734 if (entry) { 735 SLIST_REMOVE_HEAD(&sc->bnx_jfree_listhead, jslot_link); 736 entry->bnx_inuse = 1; 737 } else { 738 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n"); 739 } 740 lwkt_serialize_exit(&sc->bnx_jslot_serializer); 741 return(entry); 742 } 743 744 /* 745 * Adjust usage count on a jumbo buffer. 746 */ 747 static void 748 bnx_jref(void *arg) 749 { 750 struct bnx_jslot *entry = (struct bnx_jslot *)arg; 751 struct bnx_softc *sc = entry->bnx_sc; 752 753 if (sc == NULL) 754 panic("bnx_jref: can't find softc pointer!"); 755 756 if (&sc->bnx_cdata.bnx_jslots[entry->bnx_slot] != entry) { 757 panic("bnx_jref: asked to reference buffer " 758 "that we don't manage!"); 759 } else if (entry->bnx_inuse == 0) { 760 panic("bnx_jref: buffer already free!"); 761 } else { 762 atomic_add_int(&entry->bnx_inuse, 1); 763 } 764 } 765 766 /* 767 * Release a jumbo buffer. 768 */ 769 static void 770 bnx_jfree(void *arg) 771 { 772 struct bnx_jslot *entry = (struct bnx_jslot *)arg; 773 struct bnx_softc *sc = entry->bnx_sc; 774 775 if (sc == NULL) 776 panic("bnx_jfree: can't find softc pointer!"); 777 778 if (&sc->bnx_cdata.bnx_jslots[entry->bnx_slot] != entry) { 779 panic("bnx_jfree: asked to free buffer that we don't manage!"); 780 } else if (entry->bnx_inuse == 0) { 781 panic("bnx_jfree: buffer already free!"); 782 } else { 783 /* 784 * Possible MP race to 0, use the serializer. The atomic insn 785 * is still needed for races against bnx_jref(). 786 */ 787 lwkt_serialize_enter(&sc->bnx_jslot_serializer); 788 atomic_subtract_int(&entry->bnx_inuse, 1); 789 if (entry->bnx_inuse == 0) { 790 SLIST_INSERT_HEAD(&sc->bnx_jfree_listhead, 791 entry, jslot_link); 792 } 793 lwkt_serialize_exit(&sc->bnx_jslot_serializer); 794 } 795 } 796 797 798 /* 799 * Intialize a standard receive ring descriptor. 800 */ 801 static int 802 bnx_newbuf_std(struct bnx_rx_ret_ring *ret, int i, int init) 803 { 804 struct mbuf *m_new = NULL; 805 bus_dma_segment_t seg; 806 bus_dmamap_t map; 807 int error, nsegs; 808 struct bnx_rx_buf *rb; 809 810 rb = &ret->bnx_std->bnx_rx_std_buf[i]; 811 KASSERT(!rb->bnx_rx_refilled, ("RX buf %dth has been refilled", i)); 812 813 m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 814 if (m_new == NULL) { 815 error = ENOBUFS; 816 goto back; 817 } 818 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 819 m_adj(m_new, ETHER_ALIGN); 820 821 error = bus_dmamap_load_mbuf_segment(ret->bnx_rx_mtag, 822 ret->bnx_rx_tmpmap, m_new, &seg, 1, &nsegs, BUS_DMA_NOWAIT); 823 if (error) { 824 m_freem(m_new); 825 goto back; 826 } 827 828 if (!init) { 829 bus_dmamap_sync(ret->bnx_rx_mtag, rb->bnx_rx_dmamap, 830 BUS_DMASYNC_POSTREAD); 831 bus_dmamap_unload(ret->bnx_rx_mtag, rb->bnx_rx_dmamap); 832 } 833 834 map = ret->bnx_rx_tmpmap; 835 ret->bnx_rx_tmpmap = rb->bnx_rx_dmamap; 836 837 rb->bnx_rx_dmamap = map; 838 rb->bnx_rx_mbuf = m_new; 839 rb->bnx_rx_paddr = seg.ds_addr; 840 rb->bnx_rx_len = m_new->m_len; 841 back: 842 cpu_sfence(); 843 rb->bnx_rx_refilled = 1; 844 return error; 845 } 846 847 static void 848 bnx_setup_rxdesc_std(struct bnx_rx_std_ring *std, int i) 849 { 850 struct bnx_rx_buf *rb; 851 struct bge_rx_bd *r; 852 bus_addr_t paddr; 853 int len; 854 855 rb = &std->bnx_rx_std_buf[i]; 856 KASSERT(rb->bnx_rx_refilled, ("RX buf %dth is not refilled", i)); 857 858 paddr = rb->bnx_rx_paddr; 859 len = rb->bnx_rx_len; 860 861 cpu_mfence(); 862 863 rb->bnx_rx_refilled = 0; 864 865 r = &std->bnx_rx_std_ring[i]; 866 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(paddr); 867 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(paddr); 868 r->bge_len = len; 869 r->bge_idx = i; 870 r->bge_flags = BGE_RXBDFLAG_END; 871 } 872 873 /* 874 * Initialize a jumbo receive ring descriptor. This allocates 875 * a jumbo buffer from the pool managed internally by the driver. 876 */ 877 static int 878 bnx_newbuf_jumbo(struct bnx_softc *sc, int i, int init) 879 { 880 struct mbuf *m_new = NULL; 881 struct bnx_jslot *buf; 882 bus_addr_t paddr; 883 884 /* Allocate the mbuf. */ 885 MGETHDR(m_new, init ? MB_WAIT : MB_DONTWAIT, MT_DATA); 886 if (m_new == NULL) 887 return ENOBUFS; 888 889 /* Allocate the jumbo buffer */ 890 buf = bnx_jalloc(sc); 891 if (buf == NULL) { 892 m_freem(m_new); 893 return ENOBUFS; 894 } 895 896 /* Attach the buffer to the mbuf. */ 897 m_new->m_ext.ext_arg = buf; 898 m_new->m_ext.ext_buf = buf->bnx_buf; 899 m_new->m_ext.ext_free = bnx_jfree; 900 m_new->m_ext.ext_ref = bnx_jref; 901 m_new->m_ext.ext_size = BNX_JUMBO_FRAMELEN; 902 903 m_new->m_flags |= M_EXT; 904 905 m_new->m_data = m_new->m_ext.ext_buf; 906 m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size; 907 908 paddr = buf->bnx_paddr; 909 m_adj(m_new, ETHER_ALIGN); 910 paddr += ETHER_ALIGN; 911 912 /* Save necessary information */ 913 sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_rx_mbuf = m_new; 914 sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_rx_paddr = paddr; 915 916 /* Set up the descriptor. */ 917 bnx_setup_rxdesc_jumbo(sc, i); 918 return 0; 919 } 920 921 static void 922 bnx_setup_rxdesc_jumbo(struct bnx_softc *sc, int i) 923 { 924 struct bge_rx_bd *r; 925 struct bnx_rx_buf *rc; 926 927 r = &sc->bnx_ldata.bnx_rx_jumbo_ring[i]; 928 rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i]; 929 930 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bnx_rx_paddr); 931 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bnx_rx_paddr); 932 r->bge_len = rc->bnx_rx_mbuf->m_len; 933 r->bge_idx = i; 934 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING; 935 } 936 937 static int 938 bnx_init_rx_ring_std(struct bnx_rx_std_ring *std) 939 { 940 int i, error; 941 942 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 943 /* Use the first RX return ring's tmp RX mbuf DMA map */ 944 error = bnx_newbuf_std(&std->bnx_sc->bnx_rx_ret_ring[0], i, 1); 945 if (error) 946 return error; 947 bnx_setup_rxdesc_std(std, i); 948 } 949 950 std->bnx_rx_std_used = 0; 951 std->bnx_rx_std_refill = 0; 952 std->bnx_rx_std_running = 0; 953 cpu_sfence(); 954 lwkt_serialize_handler_enable(&std->bnx_rx_std_serialize); 955 956 std->bnx_rx_std = BGE_STD_RX_RING_CNT - 1; 957 bnx_writembx(std->bnx_sc, BGE_MBX_RX_STD_PROD_LO, std->bnx_rx_std); 958 959 return(0); 960 } 961 962 static void 963 bnx_free_rx_ring_std(struct bnx_rx_std_ring *std) 964 { 965 int i; 966 967 lwkt_serialize_handler_disable(&std->bnx_rx_std_serialize); 968 969 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 970 struct bnx_rx_buf *rb = &std->bnx_rx_std_buf[i]; 971 972 rb->bnx_rx_refilled = 0; 973 if (rb->bnx_rx_mbuf != NULL) { 974 bus_dmamap_unload(std->bnx_rx_mtag, rb->bnx_rx_dmamap); 975 m_freem(rb->bnx_rx_mbuf); 976 rb->bnx_rx_mbuf = NULL; 977 } 978 bzero(&std->bnx_rx_std_ring[i], sizeof(struct bge_rx_bd)); 979 } 980 } 981 982 static int 983 bnx_init_rx_ring_jumbo(struct bnx_softc *sc) 984 { 985 struct bge_rcb *rcb; 986 int i, error; 987 988 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 989 error = bnx_newbuf_jumbo(sc, i, 1); 990 if (error) 991 return error; 992 } 993 994 sc->bnx_jumbo = BGE_JUMBO_RX_RING_CNT - 1; 995 996 rcb = &sc->bnx_ldata.bnx_info.bnx_jumbo_rx_rcb; 997 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0); 998 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 999 1000 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bnx_jumbo); 1001 1002 return(0); 1003 } 1004 1005 static void 1006 bnx_free_rx_ring_jumbo(struct bnx_softc *sc) 1007 { 1008 int i; 1009 1010 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1011 struct bnx_rx_buf *rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i]; 1012 1013 if (rc->bnx_rx_mbuf != NULL) { 1014 m_freem(rc->bnx_rx_mbuf); 1015 rc->bnx_rx_mbuf = NULL; 1016 } 1017 bzero(&sc->bnx_ldata.bnx_rx_jumbo_ring[i], 1018 sizeof(struct bge_rx_bd)); 1019 } 1020 } 1021 1022 static void 1023 bnx_free_tx_ring(struct bnx_tx_ring *txr) 1024 { 1025 int i; 1026 1027 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1028 struct bnx_tx_buf *buf = &txr->bnx_tx_buf[i]; 1029 1030 if (buf->bnx_tx_mbuf != NULL) { 1031 bus_dmamap_unload(txr->bnx_tx_mtag, 1032 buf->bnx_tx_dmamap); 1033 m_freem(buf->bnx_tx_mbuf); 1034 buf->bnx_tx_mbuf = NULL; 1035 } 1036 bzero(&txr->bnx_tx_ring[i], sizeof(struct bge_tx_bd)); 1037 } 1038 txr->bnx_tx_saved_considx = BNX_TXCONS_UNSET; 1039 } 1040 1041 static int 1042 bnx_init_tx_ring(struct bnx_tx_ring *txr) 1043 { 1044 txr->bnx_tx_cnt = 0; 1045 txr->bnx_tx_saved_considx = 0; 1046 txr->bnx_tx_prodidx = 0; 1047 1048 /* Initialize transmit producer index for host-memory send ring. */ 1049 bnx_writembx(txr->bnx_sc, txr->bnx_tx_mbx, txr->bnx_tx_prodidx); 1050 1051 return(0); 1052 } 1053 1054 static void 1055 bnx_setmulti(struct bnx_softc *sc) 1056 { 1057 struct ifnet *ifp; 1058 struct ifmultiaddr *ifma; 1059 uint32_t hashes[4] = { 0, 0, 0, 0 }; 1060 int h, i; 1061 1062 ifp = &sc->arpcom.ac_if; 1063 1064 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 1065 for (i = 0; i < 4; i++) 1066 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF); 1067 return; 1068 } 1069 1070 /* First, zot all the existing filters. */ 1071 for (i = 0; i < 4; i++) 1072 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0); 1073 1074 /* Now program new ones. */ 1075 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1076 if (ifma->ifma_addr->sa_family != AF_LINK) 1077 continue; 1078 h = ether_crc32_le( 1079 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1080 ETHER_ADDR_LEN) & 0x7f; 1081 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 1082 } 1083 1084 for (i = 0; i < 4; i++) 1085 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 1086 } 1087 1088 /* 1089 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1090 * self-test results. 1091 */ 1092 static int 1093 bnx_chipinit(struct bnx_softc *sc) 1094 { 1095 uint32_t dma_rw_ctl, mode_ctl; 1096 int i; 1097 1098 /* Set endian type before we access any non-PCI registers. */ 1099 pci_write_config(sc->bnx_dev, BGE_PCI_MISC_CTL, 1100 BGE_INIT | BGE_PCIMISCCTL_TAGGED_STATUS, 4); 1101 1102 /* 1103 * Clear the MAC statistics block in the NIC's 1104 * internal memory. 1105 */ 1106 for (i = BGE_STATS_BLOCK; 1107 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t)) 1108 BNX_MEMWIN_WRITE(sc, i, 0); 1109 1110 for (i = BGE_STATUS_BLOCK; 1111 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t)) 1112 BNX_MEMWIN_WRITE(sc, i, 0); 1113 1114 if (BNX_IS_57765_FAMILY(sc)) { 1115 uint32_t val; 1116 1117 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0) { 1118 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL); 1119 val = mode_ctl & ~BGE_MODECTL_PCIE_PORTS; 1120 1121 /* Access the lower 1K of PL PCI-E block registers. */ 1122 CSR_WRITE_4(sc, BGE_MODE_CTL, 1123 val | BGE_MODECTL_PCIE_PL_SEL); 1124 1125 val = CSR_READ_4(sc, BGE_PCIE_PL_LO_PHYCTL5); 1126 val |= BGE_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ; 1127 CSR_WRITE_4(sc, BGE_PCIE_PL_LO_PHYCTL5, val); 1128 1129 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 1130 } 1131 if (sc->bnx_chiprev != BGE_CHIPREV_57765_AX) { 1132 /* Fix transmit hangs */ 1133 val = CSR_READ_4(sc, BGE_CPMU_PADRNG_CTL); 1134 val |= BGE_CPMU_PADRNG_CTL_RDIV2; 1135 CSR_WRITE_4(sc, BGE_CPMU_PADRNG_CTL, val); 1136 1137 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL); 1138 val = mode_ctl & ~BGE_MODECTL_PCIE_PORTS; 1139 1140 /* Access the lower 1K of DL PCI-E block registers. */ 1141 CSR_WRITE_4(sc, BGE_MODE_CTL, 1142 val | BGE_MODECTL_PCIE_DL_SEL); 1143 1144 val = CSR_READ_4(sc, BGE_PCIE_DL_LO_FTSMAX); 1145 val &= ~BGE_PCIE_DL_LO_FTSMAX_MASK; 1146 val |= BGE_PCIE_DL_LO_FTSMAX_VAL; 1147 CSR_WRITE_4(sc, BGE_PCIE_DL_LO_FTSMAX, val); 1148 1149 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 1150 } 1151 1152 val = CSR_READ_4(sc, BGE_CPMU_LSPD_10MB_CLK); 1153 val &= ~BGE_CPMU_LSPD_10MB_MACCLK_MASK; 1154 val |= BGE_CPMU_LSPD_10MB_MACCLK_6_25; 1155 CSR_WRITE_4(sc, BGE_CPMU_LSPD_10MB_CLK, val); 1156 } 1157 1158 /* 1159 * Set up the PCI DMA control register. 1160 */ 1161 dma_rw_ctl = pci_read_config(sc->bnx_dev, BGE_PCI_DMA_RW_CTL, 4); 1162 /* 1163 * Disable 32bytes cache alignment for DMA write to host memory 1164 * 1165 * NOTE: 1166 * 64bytes cache alignment for DMA write to host memory is still 1167 * enabled. 1168 */ 1169 dma_rw_ctl |= BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT; 1170 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0) 1171 dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK; 1172 /* 1173 * Enable HW workaround for controllers that misinterpret 1174 * a status tag update and leave interrupts permanently 1175 * disabled. 1176 */ 1177 if (sc->bnx_asicrev != BGE_ASICREV_BCM5717 && 1178 sc->bnx_asicrev != BGE_ASICREV_BCM5762 && 1179 !BNX_IS_57765_FAMILY(sc)) 1180 dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA; 1181 if (bootverbose) { 1182 if_printf(&sc->arpcom.ac_if, "DMA read/write %#x\n", 1183 dma_rw_ctl); 1184 } 1185 pci_write_config(sc->bnx_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4); 1186 1187 /* 1188 * Set up general mode register. 1189 */ 1190 mode_ctl = bnx_dma_swap_options(sc); 1191 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 || 1192 sc->bnx_asicrev == BGE_ASICREV_BCM5762) { 1193 /* Retain Host-2-BMC settings written by APE firmware. */ 1194 mode_ctl |= CSR_READ_4(sc, BGE_MODE_CTL) & 1195 (BGE_MODECTL_BYTESWAP_B2HRX_DATA | 1196 BGE_MODECTL_WORDSWAP_B2HRX_DATA | 1197 BGE_MODECTL_B2HRX_ENABLE | BGE_MODECTL_HTX2B_ENABLE); 1198 } 1199 mode_ctl |= BGE_MODECTL_MAC_ATTN_INTR | 1200 BGE_MODECTL_HOST_SEND_BDS | BGE_MODECTL_TX_NO_PHDR_CSUM; 1201 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 1202 1203 /* 1204 * Disable memory write invalidate. Apparently it is not supported 1205 * properly by these devices. Also ensure that INTx isn't disabled, 1206 * as these chips need it even when using MSI. 1207 */ 1208 PCI_CLRBIT(sc->bnx_dev, BGE_PCI_CMD, 1209 (PCIM_CMD_MWRICEN | PCIM_CMD_INTxDIS), 4); 1210 1211 /* Set the timer prescaler (always 66Mhz) */ 1212 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); 1213 1214 return(0); 1215 } 1216 1217 static int 1218 bnx_blockinit(struct bnx_softc *sc) 1219 { 1220 struct bnx_intr_data *intr; 1221 struct bge_rcb *rcb; 1222 bus_size_t vrcb; 1223 bge_hostaddr taddr; 1224 uint32_t val; 1225 int i, limit; 1226 1227 /* 1228 * Initialize the memory window pointer register so that 1229 * we can access the first 32K of internal NIC RAM. This will 1230 * allow us to set up the TX send ring RCBs and the RX return 1231 * ring RCBs, plus other things which live in NIC memory. 1232 */ 1233 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); 1234 1235 /* Configure mbuf pool watermarks */ 1236 if (BNX_IS_57765_PLUS(sc)) { 1237 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1238 if (sc->arpcom.ac_if.if_mtu > ETHERMTU) { 1239 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e); 1240 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea); 1241 } else { 1242 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a); 1243 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0); 1244 } 1245 } else { 1246 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1247 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 1248 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1249 } 1250 1251 /* Configure DMA resource watermarks */ 1252 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1253 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1254 1255 /* Enable buffer manager */ 1256 val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN; 1257 /* 1258 * Change the arbitration algorithm of TXMBUF read request to 1259 * round-robin instead of priority based for BCM5719. When 1260 * TXFIFO is almost empty, RDMA will hold its request until 1261 * TXFIFO is not almost empty. 1262 */ 1263 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719) 1264 val |= BGE_BMANMODE_NO_TX_UNDERRUN; 1265 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 || 1266 sc->bnx_chipid == BGE_CHIPID_BCM5719_A0 || 1267 sc->bnx_chipid == BGE_CHIPID_BCM5720_A0) 1268 val |= BGE_BMANMODE_LOMBUF_ATTN; 1269 CSR_WRITE_4(sc, BGE_BMAN_MODE, val); 1270 1271 /* Poll for buffer manager start indication */ 1272 for (i = 0; i < BNX_TIMEOUT; i++) { 1273 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1274 break; 1275 DELAY(10); 1276 } 1277 1278 if (i == BNX_TIMEOUT) { 1279 if_printf(&sc->arpcom.ac_if, 1280 "buffer manager failed to start\n"); 1281 return(ENXIO); 1282 } 1283 1284 /* Enable flow-through queues */ 1285 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 1286 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 1287 1288 /* Wait until queue initialization is complete */ 1289 for (i = 0; i < BNX_TIMEOUT; i++) { 1290 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 1291 break; 1292 DELAY(10); 1293 } 1294 1295 if (i == BNX_TIMEOUT) { 1296 if_printf(&sc->arpcom.ac_if, 1297 "flow-through queue init failed\n"); 1298 return(ENXIO); 1299 } 1300 1301 /* 1302 * Summary of rings supported by the controller: 1303 * 1304 * Standard Receive Producer Ring 1305 * - This ring is used to feed receive buffers for "standard" 1306 * sized frames (typically 1536 bytes) to the controller. 1307 * 1308 * Jumbo Receive Producer Ring 1309 * - This ring is used to feed receive buffers for jumbo sized 1310 * frames (i.e. anything bigger than the "standard" frames) 1311 * to the controller. 1312 * 1313 * Mini Receive Producer Ring 1314 * - This ring is used to feed receive buffers for "mini" 1315 * sized frames to the controller. 1316 * - This feature required external memory for the controller 1317 * but was never used in a production system. Should always 1318 * be disabled. 1319 * 1320 * Receive Return Ring 1321 * - After the controller has placed an incoming frame into a 1322 * receive buffer that buffer is moved into a receive return 1323 * ring. The driver is then responsible to passing the 1324 * buffer up to the stack. BCM5718/BCM57785 families support 1325 * multiple receive return rings. 1326 * 1327 * Send Ring 1328 * - This ring is used for outgoing frames. BCM5719/BCM5720 1329 * support multiple send rings. 1330 */ 1331 1332 /* Initialize the standard receive producer ring control block. */ 1333 rcb = &sc->bnx_ldata.bnx_info.bnx_std_rx_rcb; 1334 rcb->bge_hostaddr.bge_addr_lo = 1335 BGE_ADDR_LO(sc->bnx_rx_std_ring.bnx_rx_std_ring_paddr); 1336 rcb->bge_hostaddr.bge_addr_hi = 1337 BGE_ADDR_HI(sc->bnx_rx_std_ring.bnx_rx_std_ring_paddr); 1338 if (BNX_IS_57765_PLUS(sc)) { 1339 /* 1340 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32) 1341 * Bits 15-2 : Maximum RX frame size 1342 * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled 1343 * Bit 0 : Reserved 1344 */ 1345 rcb->bge_maxlen_flags = 1346 BGE_RCB_MAXLEN_FLAGS(512, BNX_MAX_FRAMELEN << 2); 1347 } else { 1348 /* 1349 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32) 1350 * Bits 15-2 : Reserved (should be 0) 1351 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled 1352 * Bit 0 : Reserved 1353 */ 1354 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 1355 } 1356 if (BNX_IS_5717_PLUS(sc)) 1357 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717; 1358 else 1359 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 1360 /* Write the standard receive producer ring control block. */ 1361 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 1362 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 1363 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1364 if (!BNX_IS_5717_PLUS(sc)) 1365 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 1366 /* Reset the standard receive producer ring producer index. */ 1367 bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0); 1368 1369 /* 1370 * Initialize the jumbo RX producer ring control 1371 * block. We set the 'ring disabled' bit in the 1372 * flags field until we're actually ready to start 1373 * using this ring (i.e. once we set the MTU 1374 * high enough to require it). 1375 */ 1376 if (BNX_IS_JUMBO_CAPABLE(sc)) { 1377 rcb = &sc->bnx_ldata.bnx_info.bnx_jumbo_rx_rcb; 1378 /* Get the jumbo receive producer ring RCB parameters. */ 1379 rcb->bge_hostaddr.bge_addr_lo = 1380 BGE_ADDR_LO(sc->bnx_ldata.bnx_rx_jumbo_ring_paddr); 1381 rcb->bge_hostaddr.bge_addr_hi = 1382 BGE_ADDR_HI(sc->bnx_ldata.bnx_rx_jumbo_ring_paddr); 1383 rcb->bge_maxlen_flags = 1384 BGE_RCB_MAXLEN_FLAGS(BNX_MAX_FRAMELEN, 1385 BGE_RCB_FLAG_RING_DISABLED); 1386 if (BNX_IS_5717_PLUS(sc)) 1387 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717; 1388 else 1389 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 1390 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 1391 rcb->bge_hostaddr.bge_addr_hi); 1392 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 1393 rcb->bge_hostaddr.bge_addr_lo); 1394 /* Program the jumbo receive producer ring RCB parameters. */ 1395 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 1396 rcb->bge_maxlen_flags); 1397 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 1398 /* Reset the jumbo receive producer ring producer index. */ 1399 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 1400 } 1401 1402 /* 1403 * The BD ring replenish thresholds control how often the 1404 * hardware fetches new BD's from the producer rings in host 1405 * memory. Setting the value too low on a busy system can 1406 * starve the hardware and recue the throughpout. 1407 * 1408 * Set the BD ring replentish thresholds. The recommended 1409 * values are 1/8th the number of descriptors allocated to 1410 * each ring. 1411 */ 1412 val = 8; 1413 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val); 1414 if (BNX_IS_JUMBO_CAPABLE(sc)) { 1415 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, 1416 BGE_JUMBO_RX_RING_CNT/8); 1417 } 1418 if (BNX_IS_57765_PLUS(sc)) { 1419 CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32); 1420 CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16); 1421 } 1422 1423 /* 1424 * Disable all send rings by setting the 'ring disabled' bit 1425 * in the flags field of all the TX send ring control blocks, 1426 * located in NIC memory. 1427 */ 1428 if (BNX_IS_5717_PLUS(sc)) 1429 limit = 4; 1430 else if (BNX_IS_57765_FAMILY(sc) || 1431 sc->bnx_asicrev == BGE_ASICREV_BCM5762) 1432 limit = 2; 1433 else 1434 limit = 1; 1435 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1436 for (i = 0; i < limit; i++) { 1437 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1438 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); 1439 vrcb += sizeof(struct bge_rcb); 1440 } 1441 1442 /* 1443 * Configure send ring RCBs 1444 */ 1445 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1446 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 1447 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i]; 1448 1449 BGE_HOSTADDR(taddr, txr->bnx_tx_ring_paddr); 1450 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 1451 taddr.bge_addr_hi); 1452 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 1453 taddr.bge_addr_lo); 1454 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1455 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 1456 vrcb += sizeof(struct bge_rcb); 1457 } 1458 1459 /* 1460 * Disable all receive return rings by setting the 1461 * 'ring disabled' bit in the flags field of all the receive 1462 * return ring control blocks, located in NIC memory. 1463 */ 1464 if (BNX_IS_5717_PLUS(sc)) { 1465 /* Should be 17, use 16 until we get an SRAM map. */ 1466 limit = 16; 1467 } else if (BNX_IS_57765_FAMILY(sc) || 1468 sc->bnx_asicrev == BGE_ASICREV_BCM5762) { 1469 limit = 4; 1470 } else { 1471 limit = 1; 1472 } 1473 /* Disable all receive return rings. */ 1474 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1475 for (i = 0; i < limit; i++) { 1476 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0); 1477 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0); 1478 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1479 BGE_RCB_FLAG_RING_DISABLED); 1480 bnx_writembx(sc, BGE_MBX_RX_CONS0_LO + 1481 (i * (sizeof(uint64_t))), 0); 1482 vrcb += sizeof(struct bge_rcb); 1483 } 1484 1485 /* 1486 * Set up receive return rings. 1487 */ 1488 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1489 for (i = 0; i < sc->bnx_rx_retcnt; ++i) { 1490 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[i]; 1491 1492 BGE_HOSTADDR(taddr, ret->bnx_rx_ret_ring_paddr); 1493 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 1494 taddr.bge_addr_hi); 1495 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 1496 taddr.bge_addr_lo); 1497 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1498 BGE_RCB_MAXLEN_FLAGS(BNX_RETURN_RING_CNT, 0)); 1499 vrcb += sizeof(struct bge_rcb); 1500 } 1501 1502 /* Set random backoff seed for TX */ 1503 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 1504 (sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] + 1505 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] + 1506 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5]) & 1507 BGE_TX_BACKOFF_SEED_MASK); 1508 1509 /* Set inter-packet gap */ 1510 val = 0x2620; 1511 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 || 1512 sc->bnx_asicrev == BGE_ASICREV_BCM5762) { 1513 val |= CSR_READ_4(sc, BGE_TX_LENGTHS) & 1514 (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK); 1515 } 1516 CSR_WRITE_4(sc, BGE_TX_LENGTHS, val); 1517 1518 /* 1519 * Specify which ring to use for packets that don't match 1520 * any RX rules. 1521 */ 1522 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 1523 1524 /* 1525 * Configure number of RX lists. One interrupt distribution 1526 * list, sixteen active lists, one bad frames class. 1527 */ 1528 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 1529 1530 /* Inialize RX list placement stats mask. */ 1531 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 1532 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 1533 1534 /* Disable host coalescing until we get it set up */ 1535 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 1536 1537 /* Poll to make sure it's shut down. */ 1538 for (i = 0; i < BNX_TIMEOUT; i++) { 1539 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 1540 break; 1541 DELAY(10); 1542 } 1543 1544 if (i == BNX_TIMEOUT) { 1545 if_printf(&sc->arpcom.ac_if, 1546 "host coalescing engine failed to idle\n"); 1547 return(ENXIO); 1548 } 1549 1550 /* Set up host coalescing defaults */ 1551 sc->bnx_coal_chg = BNX_RX_COAL_TICKS_CHG | 1552 BNX_TX_COAL_TICKS_CHG | 1553 BNX_RX_COAL_BDS_CHG | 1554 BNX_TX_COAL_BDS_CHG | 1555 BNX_RX_COAL_BDS_INT_CHG | 1556 BNX_TX_COAL_BDS_INT_CHG; 1557 bnx_coal_change(sc); 1558 1559 /* 1560 * Set up addresses of status blocks 1561 */ 1562 intr = &sc->bnx_intr_data[0]; 1563 bzero(intr->bnx_status_block, BGE_STATUS_BLK_SZ); 1564 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 1565 BGE_ADDR_HI(intr->bnx_status_block_paddr)); 1566 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, 1567 BGE_ADDR_LO(intr->bnx_status_block_paddr)); 1568 for (i = 1; i < sc->bnx_intr_cnt; ++i) { 1569 intr = &sc->bnx_intr_data[i]; 1570 bzero(intr->bnx_status_block, BGE_STATUS_BLK_SZ); 1571 CSR_WRITE_4(sc, BGE_VEC1_STATUSBLK_ADDR_HI + ((i - 1) * 8), 1572 BGE_ADDR_HI(intr->bnx_status_block_paddr)); 1573 CSR_WRITE_4(sc, BGE_VEC1_STATUSBLK_ADDR_LO + ((i - 1) * 8), 1574 BGE_ADDR_LO(intr->bnx_status_block_paddr)); 1575 } 1576 1577 /* Set up status block partail update size. */ 1578 val = BGE_STATBLKSZ_32BYTE; 1579 #if 0 1580 /* 1581 * Does not seem to have visible effect in both 1582 * bulk data (1472B UDP datagram) and tiny data 1583 * (18B UDP datagram) TX tests. 1584 */ 1585 val |= BGE_HCCMODE_CLRTICK_TX; 1586 #endif 1587 /* Turn on host coalescing state machine */ 1588 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE); 1589 1590 /* Turn on RX BD completion state machine and enable attentions */ 1591 CSR_WRITE_4(sc, BGE_RBDC_MODE, 1592 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); 1593 1594 /* Turn on RX list placement state machine */ 1595 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 1596 1597 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB | 1598 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR | 1599 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB | 1600 BGE_MACMODE_FRMHDR_DMA_ENB; 1601 1602 if (sc->bnx_flags & BNX_FLAG_TBI) 1603 val |= BGE_PORTMODE_TBI; 1604 else if (sc->bnx_flags & BNX_FLAG_MII_SERDES) 1605 val |= BGE_PORTMODE_GMII; 1606 else 1607 val |= BGE_PORTMODE_MII; 1608 1609 /* Allow APE to send/receive frames. */ 1610 if (sc->bnx_mfw_flags & BNX_MFW_ON_APE) 1611 val |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN; 1612 1613 /* Turn on DMA, clear stats */ 1614 CSR_WRITE_4(sc, BGE_MAC_MODE, val); 1615 DELAY(40); 1616 1617 /* Set misc. local control, enable interrupts on attentions */ 1618 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); 1619 1620 #ifdef notdef 1621 /* Assert GPIO pins for PHY reset */ 1622 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 1623 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 1624 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 1625 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 1626 #endif 1627 1628 if (sc->bnx_intr_type == PCI_INTR_TYPE_MSIX) 1629 bnx_enable_msi(sc, TRUE); 1630 1631 /* Turn on write DMA state machine */ 1632 val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS; 1633 /* Enable host coalescing bug fix. */ 1634 val |= BGE_WDMAMODE_STATUS_TAG_FIX; 1635 if (sc->bnx_asicrev == BGE_ASICREV_BCM5785) { 1636 /* Request larger DMA burst size to get better performance. */ 1637 val |= BGE_WDMAMODE_BURST_ALL_DATA; 1638 } 1639 CSR_WRITE_4(sc, BGE_WDMA_MODE, val); 1640 DELAY(40); 1641 1642 if (BNX_IS_57765_PLUS(sc)) { 1643 uint32_t dmactl, dmactl_reg; 1644 1645 if (sc->bnx_asicrev == BGE_ASICREV_BCM5762) 1646 dmactl_reg = BGE_RDMA_RSRVCTRL2; 1647 else 1648 dmactl_reg = BGE_RDMA_RSRVCTRL; 1649 1650 dmactl = CSR_READ_4(sc, dmactl_reg); 1651 /* 1652 * Adjust tx margin to prevent TX data corruption and 1653 * fix internal FIFO overflow. 1654 */ 1655 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 || 1656 sc->bnx_asicrev == BGE_ASICREV_BCM5720 || 1657 sc->bnx_asicrev == BGE_ASICREV_BCM5762) { 1658 dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK | 1659 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK | 1660 BGE_RDMA_RSRVCTRL_TXMRGN_MASK); 1661 dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K | 1662 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K | 1663 BGE_RDMA_RSRVCTRL_TXMRGN_320B; 1664 } 1665 /* 1666 * Enable fix for read DMA FIFO overruns. 1667 * The fix is to limit the number of RX BDs 1668 * the hardware would fetch at a fime. 1669 */ 1670 CSR_WRITE_4(sc, dmactl_reg, 1671 dmactl | BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX); 1672 } 1673 1674 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719) { 1675 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, 1676 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) | 1677 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K | 1678 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); 1679 } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 || 1680 sc->bnx_asicrev == BGE_ASICREV_BCM5762) { 1681 uint32_t ctrl_reg; 1682 1683 if (sc->bnx_asicrev == BGE_ASICREV_BCM5762) 1684 ctrl_reg = BGE_RDMA_LSO_CRPTEN_CTRL2; 1685 else 1686 ctrl_reg = BGE_RDMA_LSO_CRPTEN_CTRL; 1687 1688 /* 1689 * Allow 4KB burst length reads for non-LSO frames. 1690 * Enable 512B burst length reads for buffer descriptors. 1691 */ 1692 CSR_WRITE_4(sc, ctrl_reg, 1693 CSR_READ_4(sc, ctrl_reg) | 1694 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 | 1695 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); 1696 } 1697 1698 /* Turn on read DMA state machine */ 1699 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS; 1700 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717) 1701 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS; 1702 if (sc->bnx_asicrev == BGE_ASICREV_BCM5784 || 1703 sc->bnx_asicrev == BGE_ASICREV_BCM5785 || 1704 sc->bnx_asicrev == BGE_ASICREV_BCM57780) { 1705 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN | 1706 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN | 1707 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN; 1708 } 1709 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 || 1710 sc->bnx_asicrev == BGE_ASICREV_BCM5762) { 1711 val |= CSR_READ_4(sc, BGE_RDMA_MODE) & 1712 BGE_RDMAMODE_H2BNC_VLAN_DET; 1713 /* 1714 * Allow multiple outstanding read requests from 1715 * non-LSO read DMA engine. 1716 */ 1717 val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS; 1718 } 1719 if (sc->bnx_asicrev == BGE_ASICREV_BCM57766) 1720 val |= BGE_RDMAMODE_JMB_2K_MMRR; 1721 if (sc->bnx_flags & BNX_FLAG_TSO) 1722 val |= BGE_RDMAMODE_TSO4_ENABLE; 1723 val |= BGE_RDMAMODE_FIFO_LONG_BURST; 1724 CSR_WRITE_4(sc, BGE_RDMA_MODE, val); 1725 DELAY(40); 1726 1727 /* Turn on RX data completion state machine */ 1728 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 1729 1730 /* Turn on RX BD initiator state machine */ 1731 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 1732 1733 /* Turn on RX data and RX BD initiator state machine */ 1734 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 1735 1736 /* Turn on send BD completion state machine */ 1737 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 1738 1739 /* Turn on send data completion state machine */ 1740 val = BGE_SDCMODE_ENABLE; 1741 if (sc->bnx_asicrev == BGE_ASICREV_BCM5761) 1742 val |= BGE_SDCMODE_CDELAY; 1743 CSR_WRITE_4(sc, BGE_SDC_MODE, val); 1744 1745 /* Turn on send data initiator state machine */ 1746 if (sc->bnx_flags & BNX_FLAG_TSO) { 1747 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | 1748 BGE_SDIMODE_HW_LSO_PRE_DMA); 1749 } else { 1750 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 1751 } 1752 1753 /* Turn on send BD initiator state machine */ 1754 val = BGE_SBDIMODE_ENABLE; 1755 if (sc->bnx_tx_ringcnt > 1) 1756 val |= BGE_SBDIMODE_MULTI_TXR; 1757 CSR_WRITE_4(sc, BGE_SBDI_MODE, val); 1758 1759 /* Turn on send BD selector state machine */ 1760 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 1761 1762 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 1763 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 1764 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); 1765 1766 /* ack/clear link change events */ 1767 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1768 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 1769 BGE_MACSTAT_LINK_CHANGED); 1770 CSR_WRITE_4(sc, BGE_MI_STS, 0); 1771 1772 /* 1773 * Enable attention when the link has changed state for 1774 * devices that use auto polling. 1775 */ 1776 if (sc->bnx_flags & BNX_FLAG_TBI) { 1777 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1778 } else { 1779 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { 1780 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode); 1781 DELAY(80); 1782 } 1783 } 1784 1785 /* 1786 * Clear any pending link state attention. 1787 * Otherwise some link state change events may be lost until attention 1788 * is cleared by bnx_intr() -> bnx_softc.bnx_link_upd() sequence. 1789 * It's not necessary on newer BCM chips - perhaps enabling link 1790 * state change attentions implies clearing pending attention. 1791 */ 1792 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1793 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 1794 BGE_MACSTAT_LINK_CHANGED); 1795 1796 /* Enable link state change attentions. */ 1797 BNX_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 1798 1799 return(0); 1800 } 1801 1802 /* 1803 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 1804 * against our list and return its name if we find a match. Note 1805 * that since the Broadcom controller contains VPD support, we 1806 * can get the device name string from the controller itself instead 1807 * of the compiled-in string. This is a little slow, but it guarantees 1808 * we'll always announce the right product name. 1809 */ 1810 static int 1811 bnx_probe(device_t dev) 1812 { 1813 const struct bnx_type *t; 1814 uint16_t product, vendor; 1815 1816 if (!pci_is_pcie(dev)) 1817 return ENXIO; 1818 1819 product = pci_get_device(dev); 1820 vendor = pci_get_vendor(dev); 1821 1822 for (t = bnx_devs; t->bnx_name != NULL; t++) { 1823 if (vendor == t->bnx_vid && product == t->bnx_did) 1824 break; 1825 } 1826 if (t->bnx_name == NULL) 1827 return ENXIO; 1828 1829 device_set_desc(dev, t->bnx_name); 1830 return 0; 1831 } 1832 1833 static int 1834 bnx_attach(device_t dev) 1835 { 1836 struct ifnet *ifp; 1837 struct bnx_softc *sc; 1838 struct bnx_rx_std_ring *std; 1839 struct sysctl_ctx_list *ctx; 1840 struct sysctl_oid_list *tree; 1841 uint32_t hwcfg = 0; 1842 int error = 0, rid, capmask, i, std_cpuid, std_cpuid_def; 1843 uint8_t ether_addr[ETHER_ADDR_LEN]; 1844 uint16_t product; 1845 uintptr_t mii_priv = 0; 1846 #if defined(BNX_TSO_DEBUG) || defined(BNX_RSS_DEBUG) || defined(BNX_TSS_DEBUG) 1847 char desc[32]; 1848 #endif 1849 #ifdef IFPOLL_ENABLE 1850 int offset, offset_def; 1851 #endif 1852 1853 sc = device_get_softc(dev); 1854 sc->bnx_dev = dev; 1855 callout_init_mp(&sc->bnx_tick_timer); 1856 lwkt_serialize_init(&sc->bnx_jslot_serializer); 1857 lwkt_serialize_init(&sc->bnx_main_serialize); 1858 1859 /* Always setup interrupt mailboxes */ 1860 for (i = 0; i < BNX_INTR_MAX; ++i) { 1861 callout_init_mp(&sc->bnx_intr_data[i].bnx_intr_timer); 1862 sc->bnx_intr_data[i].bnx_sc = sc; 1863 sc->bnx_intr_data[i].bnx_intr_mbx = BGE_MBX_IRQ0_LO + (i * 8); 1864 sc->bnx_intr_data[i].bnx_intr_rid = -1; 1865 sc->bnx_intr_data[i].bnx_intr_cpuid = -1; 1866 } 1867 1868 sc->bnx_func_addr = pci_get_function(dev); 1869 product = pci_get_device(dev); 1870 1871 #ifndef BURN_BRIDGES 1872 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 1873 uint32_t irq, mem; 1874 1875 irq = pci_read_config(dev, PCIR_INTLINE, 4); 1876 mem = pci_read_config(dev, BGE_PCI_BAR0, 4); 1877 1878 device_printf(dev, "chip is in D%d power mode " 1879 "-- setting to D0\n", pci_get_powerstate(dev)); 1880 1881 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 1882 1883 pci_write_config(dev, PCIR_INTLINE, irq, 4); 1884 pci_write_config(dev, BGE_PCI_BAR0, mem, 4); 1885 } 1886 #endif /* !BURN_BRIDGE */ 1887 1888 /* 1889 * Map control/status registers. 1890 */ 1891 pci_enable_busmaster(dev); 1892 1893 rid = BGE_PCI_BAR0; 1894 sc->bnx_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 1895 RF_ACTIVE); 1896 1897 if (sc->bnx_res == NULL) { 1898 device_printf(dev, "couldn't map memory\n"); 1899 return ENXIO; 1900 } 1901 1902 sc->bnx_btag = rman_get_bustag(sc->bnx_res); 1903 sc->bnx_bhandle = rman_get_bushandle(sc->bnx_res); 1904 1905 /* Save various chip information */ 1906 sc->bnx_chipid = 1907 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 1908 BGE_PCIMISCCTL_ASICREV_SHIFT; 1909 if (BGE_ASICREV(sc->bnx_chipid) == BGE_ASICREV_USE_PRODID_REG) { 1910 /* All chips having dedicated ASICREV register have CPMU */ 1911 sc->bnx_flags |= BNX_FLAG_CPMU; 1912 1913 switch (product) { 1914 case PCI_PRODUCT_BROADCOM_BCM5717: 1915 case PCI_PRODUCT_BROADCOM_BCM5717C: 1916 case PCI_PRODUCT_BROADCOM_BCM5718: 1917 case PCI_PRODUCT_BROADCOM_BCM5719: 1918 case PCI_PRODUCT_BROADCOM_BCM5720_ALT: 1919 case PCI_PRODUCT_BROADCOM_BCM5725: 1920 case PCI_PRODUCT_BROADCOM_BCM5727: 1921 case PCI_PRODUCT_BROADCOM_BCM5762: 1922 sc->bnx_chipid = pci_read_config(dev, 1923 BGE_PCI_GEN2_PRODID_ASICREV, 4); 1924 break; 1925 1926 case PCI_PRODUCT_BROADCOM_BCM57761: 1927 case PCI_PRODUCT_BROADCOM_BCM57762: 1928 case PCI_PRODUCT_BROADCOM_BCM57765: 1929 case PCI_PRODUCT_BROADCOM_BCM57766: 1930 case PCI_PRODUCT_BROADCOM_BCM57781: 1931 case PCI_PRODUCT_BROADCOM_BCM57782: 1932 case PCI_PRODUCT_BROADCOM_BCM57785: 1933 case PCI_PRODUCT_BROADCOM_BCM57786: 1934 case PCI_PRODUCT_BROADCOM_BCM57791: 1935 case PCI_PRODUCT_BROADCOM_BCM57795: 1936 sc->bnx_chipid = pci_read_config(dev, 1937 BGE_PCI_GEN15_PRODID_ASICREV, 4); 1938 break; 1939 1940 default: 1941 sc->bnx_chipid = pci_read_config(dev, 1942 BGE_PCI_PRODID_ASICREV, 4); 1943 break; 1944 } 1945 } 1946 if (sc->bnx_chipid == BGE_CHIPID_BCM5717_C0) 1947 sc->bnx_chipid = BGE_CHIPID_BCM5720_A0; 1948 1949 sc->bnx_asicrev = BGE_ASICREV(sc->bnx_chipid); 1950 sc->bnx_chiprev = BGE_CHIPREV(sc->bnx_chipid); 1951 1952 switch (sc->bnx_asicrev) { 1953 case BGE_ASICREV_BCM5717: 1954 case BGE_ASICREV_BCM5719: 1955 case BGE_ASICREV_BCM5720: 1956 sc->bnx_flags |= BNX_FLAG_5717_PLUS | BNX_FLAG_57765_PLUS; 1957 break; 1958 1959 case BGE_ASICREV_BCM5762: 1960 sc->bnx_flags |= BNX_FLAG_57765_PLUS; 1961 break; 1962 1963 case BGE_ASICREV_BCM57765: 1964 case BGE_ASICREV_BCM57766: 1965 sc->bnx_flags |= BNX_FLAG_57765_FAMILY | BNX_FLAG_57765_PLUS; 1966 break; 1967 } 1968 1969 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 || 1970 sc->bnx_asicrev == BGE_ASICREV_BCM5719 || 1971 sc->bnx_asicrev == BGE_ASICREV_BCM5720 || 1972 sc->bnx_asicrev == BGE_ASICREV_BCM5762) 1973 sc->bnx_flags |= BNX_FLAG_APE; 1974 1975 sc->bnx_flags |= BNX_FLAG_TSO; 1976 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 && 1977 sc->bnx_chipid == BGE_CHIPID_BCM5719_A0) 1978 sc->bnx_flags &= ~BNX_FLAG_TSO; 1979 1980 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 || 1981 BNX_IS_57765_FAMILY(sc)) { 1982 /* 1983 * All BCM57785 and BCM5718 families chips have a bug that 1984 * under certain situation interrupt will not be enabled 1985 * even if status tag is written to interrupt mailbox. 1986 * 1987 * While BCM5719 and BCM5720 have a hardware workaround 1988 * which could fix the above bug. 1989 * See the comment near BGE_PCIDMARWCTL_TAGGED_STATUS_WA in 1990 * bnx_chipinit(). 1991 * 1992 * For the rest of the chips in these two families, we will 1993 * have to poll the status block at high rate (10ms currently) 1994 * to check whether the interrupt is hosed or not. 1995 * See bnx_check_intr_*() for details. 1996 */ 1997 sc->bnx_flags |= BNX_FLAG_STATUSTAG_BUG; 1998 } 1999 2000 sc->bnx_pciecap = pci_get_pciecap_ptr(sc->bnx_dev); 2001 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 || 2002 sc->bnx_asicrev == BGE_ASICREV_BCM5720) 2003 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_2048); 2004 else 2005 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_4096); 2006 device_printf(dev, "CHIP ID 0x%08x; " 2007 "ASIC REV 0x%02x; CHIP REV 0x%02x\n", 2008 sc->bnx_chipid, sc->bnx_asicrev, sc->bnx_chiprev); 2009 2010 /* 2011 * Set various PHY quirk flags. 2012 */ 2013 2014 capmask = MII_CAPMASK_DEFAULT; 2015 if (product == PCI_PRODUCT_BROADCOM_BCM57791 || 2016 product == PCI_PRODUCT_BROADCOM_BCM57795) { 2017 /* 10/100 only */ 2018 capmask &= ~BMSR_EXTSTAT; 2019 } 2020 2021 mii_priv |= BRGPHY_FLAG_WIRESPEED; 2022 if (sc->bnx_chipid == BGE_CHIPID_BCM5762_A0) 2023 mii_priv |= BRGPHY_FLAG_5762_A0; 2024 2025 /* 2026 * Chips with APE need BAR2 access for APE registers/memory. 2027 */ 2028 if (sc->bnx_flags & BNX_FLAG_APE) { 2029 uint32_t pcistate; 2030 2031 rid = PCIR_BAR(2); 2032 sc->bnx_res2 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 2033 RF_ACTIVE); 2034 if (sc->bnx_res2 == NULL) { 2035 device_printf(dev, "couldn't map BAR2 memory\n"); 2036 error = ENXIO; 2037 goto fail; 2038 } 2039 2040 /* Enable APE register/memory access by host driver. */ 2041 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4); 2042 pcistate |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR | 2043 BGE_PCISTATE_ALLOW_APE_SHMEM_WR | 2044 BGE_PCISTATE_ALLOW_APE_PSPACE_WR; 2045 pci_write_config(dev, BGE_PCI_PCISTATE, pcistate, 4); 2046 2047 bnx_ape_lock_init(sc); 2048 bnx_ape_read_fw_ver(sc); 2049 } 2050 2051 /* Initialize if_name earlier, so if_printf could be used */ 2052 ifp = &sc->arpcom.ac_if; 2053 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 2054 2055 /* 2056 * Try to reset the chip. 2057 */ 2058 bnx_sig_pre_reset(sc, BNX_RESET_SHUTDOWN); 2059 bnx_reset(sc); 2060 bnx_sig_post_reset(sc, BNX_RESET_SHUTDOWN); 2061 2062 if (bnx_chipinit(sc)) { 2063 device_printf(dev, "chip initialization failed\n"); 2064 error = ENXIO; 2065 goto fail; 2066 } 2067 2068 /* 2069 * Get station address 2070 */ 2071 error = bnx_get_eaddr(sc, ether_addr); 2072 if (error) { 2073 device_printf(dev, "failed to read station address\n"); 2074 goto fail; 2075 } 2076 2077 /* Setup RX/TX and interrupt count */ 2078 bnx_setup_ring_cnt(sc); 2079 2080 if ((sc->bnx_rx_retcnt == 1 && sc->bnx_tx_ringcnt == 1) || 2081 (sc->bnx_rx_retcnt > 1 && sc->bnx_tx_ringcnt > 1)) { 2082 /* 2083 * The RX ring and the corresponding TX ring processing 2084 * should be on the same CPU, since they share the same 2085 * status block. 2086 */ 2087 sc->bnx_flags |= BNX_FLAG_RXTX_BUNDLE; 2088 if (bootverbose) 2089 device_printf(dev, "RX/TX bundle\n"); 2090 if (sc->bnx_tx_ringcnt > 1) { 2091 /* 2092 * Multiple TX rings do not share status block 2093 * with link status, so link status will have 2094 * to save its own status_tag. 2095 */ 2096 sc->bnx_flags |= BNX_FLAG_STATUS_HASTAG; 2097 if (bootverbose) 2098 device_printf(dev, "status needs tag\n"); 2099 } 2100 } else { 2101 KKASSERT(sc->bnx_rx_retcnt > 1 && sc->bnx_tx_ringcnt == 1); 2102 if (bootverbose) 2103 device_printf(dev, "RX/TX not bundled\n"); 2104 } 2105 2106 error = bnx_dma_alloc(dev); 2107 if (error) 2108 goto fail; 2109 2110 #ifdef IFPOLL_ENABLE 2111 if (sc->bnx_flags & BNX_FLAG_RXTX_BUNDLE) { 2112 /* 2113 * NPOLLING RX/TX CPU offset 2114 */ 2115 if (sc->bnx_rx_retcnt == ncpus2) { 2116 offset = 0; 2117 } else { 2118 offset_def = 2119 (sc->bnx_rx_retcnt * device_get_unit(dev)) % ncpus2; 2120 offset = device_getenv_int(dev, "npoll.offset", 2121 offset_def); 2122 if (offset >= ncpus2 || 2123 offset % sc->bnx_rx_retcnt != 0) { 2124 device_printf(dev, "invalid npoll.offset %d, " 2125 "use %d\n", offset, offset_def); 2126 offset = offset_def; 2127 } 2128 } 2129 sc->bnx_npoll_rxoff = offset; 2130 sc->bnx_npoll_txoff = offset; 2131 } else { 2132 /* 2133 * NPOLLING RX CPU offset 2134 */ 2135 if (sc->bnx_rx_retcnt == ncpus2) { 2136 offset = 0; 2137 } else { 2138 offset_def = 2139 (sc->bnx_rx_retcnt * device_get_unit(dev)) % ncpus2; 2140 offset = device_getenv_int(dev, "npoll.rxoff", 2141 offset_def); 2142 if (offset >= ncpus2 || 2143 offset % sc->bnx_rx_retcnt != 0) { 2144 device_printf(dev, "invalid npoll.rxoff %d, " 2145 "use %d\n", offset, offset_def); 2146 offset = offset_def; 2147 } 2148 } 2149 sc->bnx_npoll_rxoff = offset; 2150 2151 /* 2152 * NPOLLING TX CPU offset 2153 */ 2154 offset_def = device_get_unit(dev) % ncpus2; 2155 offset = device_getenv_int(dev, "npoll.txoff", offset_def); 2156 if (offset >= ncpus2) { 2157 device_printf(dev, "invalid npoll.txoff %d, use %d\n", 2158 offset, offset_def); 2159 offset = offset_def; 2160 } 2161 sc->bnx_npoll_txoff = offset; 2162 } 2163 #endif /* IFPOLL_ENABLE */ 2164 2165 /* 2166 * Allocate interrupt 2167 */ 2168 error = bnx_alloc_intr(sc); 2169 if (error) 2170 goto fail; 2171 2172 /* Setup serializers */ 2173 bnx_setup_serialize(sc); 2174 2175 /* Set default tuneable values. */ 2176 sc->bnx_rx_coal_ticks = BNX_RX_COAL_TICKS_DEF; 2177 sc->bnx_tx_coal_ticks = BNX_TX_COAL_TICKS_DEF; 2178 sc->bnx_rx_coal_bds = BNX_RX_COAL_BDS_DEF; 2179 sc->bnx_rx_coal_bds_poll = sc->bnx_rx_ret_ring[0].bnx_rx_cntmax; 2180 sc->bnx_tx_coal_bds = BNX_TX_COAL_BDS_DEF; 2181 sc->bnx_tx_coal_bds_poll = BNX_TX_COAL_BDS_POLL_DEF; 2182 sc->bnx_rx_coal_bds_int = BNX_RX_COAL_BDS_INT_DEF; 2183 sc->bnx_tx_coal_bds_int = BNX_TX_COAL_BDS_INT_DEF; 2184 2185 /* Set up ifnet structure */ 2186 ifp->if_softc = sc; 2187 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2188 ifp->if_ioctl = bnx_ioctl; 2189 ifp->if_start = bnx_start; 2190 #ifdef IFPOLL_ENABLE 2191 ifp->if_npoll = bnx_npoll; 2192 #endif 2193 ifp->if_init = bnx_init; 2194 ifp->if_serialize = bnx_serialize; 2195 ifp->if_deserialize = bnx_deserialize; 2196 ifp->if_tryserialize = bnx_tryserialize; 2197 #ifdef INVARIANTS 2198 ifp->if_serialize_assert = bnx_serialize_assert; 2199 #endif 2200 ifp->if_mtu = ETHERMTU; 2201 ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; 2202 2203 ifp->if_capabilities |= IFCAP_HWCSUM; 2204 ifp->if_hwassist = BNX_CSUM_FEATURES; 2205 if (sc->bnx_flags & BNX_FLAG_TSO) { 2206 ifp->if_capabilities |= IFCAP_TSO; 2207 ifp->if_hwassist |= CSUM_TSO; 2208 } 2209 if (BNX_RSS_ENABLED(sc)) 2210 ifp->if_capabilities |= IFCAP_RSS; 2211 ifp->if_capenable = ifp->if_capabilities; 2212 2213 ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1); 2214 ifq_set_ready(&ifp->if_snd); 2215 ifq_set_subq_cnt(&ifp->if_snd, sc->bnx_tx_ringcnt); 2216 2217 if (sc->bnx_tx_ringcnt > 1) { 2218 ifp->if_mapsubq = ifq_mapsubq_mask; 2219 ifq_set_subq_mask(&ifp->if_snd, sc->bnx_tx_ringcnt - 1); 2220 } 2221 2222 /* 2223 * Figure out what sort of media we have by checking the 2224 * hardware config word in the first 32k of NIC internal memory, 2225 * or fall back to examining the EEPROM if necessary. 2226 * Note: on some BCM5700 cards, this value appears to be unset. 2227 * If that's the case, we have to rely on identifying the NIC 2228 * by its PCI subsystem ID, as we do below for the SysKonnect 2229 * SK-9D41. 2230 */ 2231 if (bnx_readmem_ind(sc, BGE_SRAM_DATA_SIG) == BGE_SRAM_DATA_SIG_MAGIC) { 2232 hwcfg = bnx_readmem_ind(sc, BGE_SRAM_DATA_CFG); 2233 } else { 2234 if (bnx_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET, 2235 sizeof(hwcfg))) { 2236 device_printf(dev, "failed to read EEPROM\n"); 2237 error = ENXIO; 2238 goto fail; 2239 } 2240 hwcfg = ntohl(hwcfg); 2241 } 2242 2243 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 2244 if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41 || 2245 (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) 2246 sc->bnx_flags |= BNX_FLAG_TBI; 2247 2248 /* Setup MI MODE */ 2249 if (sc->bnx_flags & BNX_FLAG_CPMU) 2250 sc->bnx_mi_mode = BGE_MIMODE_500KHZ_CONST; 2251 else 2252 sc->bnx_mi_mode = BGE_MIMODE_BASE; 2253 2254 /* Setup link status update stuffs */ 2255 if (sc->bnx_flags & BNX_FLAG_TBI) { 2256 sc->bnx_link_upd = bnx_tbi_link_upd; 2257 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED; 2258 } else if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { 2259 sc->bnx_link_upd = bnx_autopoll_link_upd; 2260 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED; 2261 } else { 2262 sc->bnx_link_upd = bnx_copper_link_upd; 2263 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED; 2264 } 2265 2266 /* Set default PHY address */ 2267 sc->bnx_phyno = 1; 2268 2269 /* 2270 * PHY address mapping for various devices. 2271 * 2272 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr | 2273 * ---------+-------+-------+-------+-------+ 2274 * BCM57XX | 1 | X | X | X | 2275 * BCM5717 | 1 | 8 | 2 | 9 | 2276 * BCM5719 | 1 | 8 | 2 | 9 | 2277 * BCM5720 | 1 | 8 | 2 | 9 | 2278 * 2279 * | F2 Cu | F2 Sr | F3 Cu | F3 Sr | 2280 * ---------+-------+-------+-------+-------+ 2281 * BCM57XX | X | X | X | X | 2282 * BCM5717 | X | X | X | X | 2283 * BCM5719 | 3 | 10 | 4 | 11 | 2284 * BCM5720 | X | X | X | X | 2285 * 2286 * Other addresses may respond but they are not 2287 * IEEE compliant PHYs and should be ignored. 2288 */ 2289 if (BNX_IS_5717_PLUS(sc)) { 2290 if (sc->bnx_chipid == BGE_CHIPID_BCM5717_A0) { 2291 if (CSR_READ_4(sc, BGE_SGDIG_STS) & 2292 BGE_SGDIGSTS_IS_SERDES) 2293 sc->bnx_phyno = sc->bnx_func_addr + 8; 2294 else 2295 sc->bnx_phyno = sc->bnx_func_addr + 1; 2296 } else { 2297 if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) & 2298 BGE_CPMU_PHY_STRAP_IS_SERDES) 2299 sc->bnx_phyno = sc->bnx_func_addr + 8; 2300 else 2301 sc->bnx_phyno = sc->bnx_func_addr + 1; 2302 } 2303 } 2304 2305 if (sc->bnx_flags & BNX_FLAG_TBI) { 2306 ifmedia_init(&sc->bnx_ifmedia, IFM_IMASK, 2307 bnx_ifmedia_upd, bnx_ifmedia_sts); 2308 ifmedia_add(&sc->bnx_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 2309 ifmedia_add(&sc->bnx_ifmedia, 2310 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL); 2311 ifmedia_add(&sc->bnx_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 2312 ifmedia_set(&sc->bnx_ifmedia, IFM_ETHER|IFM_AUTO); 2313 sc->bnx_ifmedia.ifm_media = sc->bnx_ifmedia.ifm_cur->ifm_media; 2314 } else { 2315 struct mii_probe_args mii_args; 2316 2317 mii_probe_args_init(&mii_args, bnx_ifmedia_upd, bnx_ifmedia_sts); 2318 mii_args.mii_probemask = 1 << sc->bnx_phyno; 2319 mii_args.mii_capmask = capmask; 2320 mii_args.mii_privtag = MII_PRIVTAG_BRGPHY; 2321 mii_args.mii_priv = mii_priv; 2322 2323 error = mii_probe(dev, &sc->bnx_miibus, &mii_args); 2324 if (error) { 2325 device_printf(dev, "MII without any PHY!\n"); 2326 goto fail; 2327 } 2328 } 2329 2330 ctx = device_get_sysctl_ctx(sc->bnx_dev); 2331 tree = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bnx_dev)); 2332 2333 SYSCTL_ADD_INT(ctx, tree, OID_AUTO, 2334 "rx_rings", CTLFLAG_RD, &sc->bnx_rx_retcnt, 0, "# of RX rings"); 2335 SYSCTL_ADD_INT(ctx, tree, OID_AUTO, 2336 "tx_rings", CTLFLAG_RD, &sc->bnx_tx_ringcnt, 0, "# of TX rings"); 2337 2338 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "rx_coal_ticks", 2339 CTLTYPE_INT | CTLFLAG_RW, 2340 sc, 0, bnx_sysctl_rx_coal_ticks, "I", 2341 "Receive coalescing ticks (usec)."); 2342 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "tx_coal_ticks", 2343 CTLTYPE_INT | CTLFLAG_RW, 2344 sc, 0, bnx_sysctl_tx_coal_ticks, "I", 2345 "Transmit coalescing ticks (usec)."); 2346 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "rx_coal_bds", 2347 CTLTYPE_INT | CTLFLAG_RW, 2348 sc, 0, bnx_sysctl_rx_coal_bds, "I", 2349 "Receive max coalesced BD count."); 2350 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "rx_coal_bds_poll", 2351 CTLTYPE_INT | CTLFLAG_RW, 2352 sc, 0, bnx_sysctl_rx_coal_bds_poll, "I", 2353 "Receive max coalesced BD count in polling."); 2354 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "tx_coal_bds", 2355 CTLTYPE_INT | CTLFLAG_RW, 2356 sc, 0, bnx_sysctl_tx_coal_bds, "I", 2357 "Transmit max coalesced BD count."); 2358 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "tx_coal_bds_poll", 2359 CTLTYPE_INT | CTLFLAG_RW, 2360 sc, 0, bnx_sysctl_tx_coal_bds_poll, "I", 2361 "Transmit max coalesced BD count in polling."); 2362 /* 2363 * A common design characteristic for many Broadcom 2364 * client controllers is that they only support a 2365 * single outstanding DMA read operation on the PCIe 2366 * bus. This means that it will take twice as long to 2367 * fetch a TX frame that is split into header and 2368 * payload buffers as it does to fetch a single, 2369 * contiguous TX frame (2 reads vs. 1 read). For these 2370 * controllers, coalescing buffers to reduce the number 2371 * of memory reads is effective way to get maximum 2372 * performance(about 940Mbps). Without collapsing TX 2373 * buffers the maximum TCP bulk transfer performance 2374 * is about 850Mbps. However forcing coalescing mbufs 2375 * consumes a lot of CPU cycles, so leave it off by 2376 * default. 2377 */ 2378 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, 2379 "force_defrag", CTLTYPE_INT | CTLFLAG_RW, 2380 sc, 0, bnx_sysctl_force_defrag, "I", 2381 "Force defragment on TX path"); 2382 2383 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, 2384 "tx_wreg", CTLTYPE_INT | CTLFLAG_RW, 2385 sc, 0, bnx_sysctl_tx_wreg, "I", 2386 "# of segments before writing to hardware register"); 2387 2388 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, 2389 "std_refill", CTLTYPE_INT | CTLFLAG_RW, 2390 sc, 0, bnx_sysctl_std_refill, "I", 2391 "# of packets received before scheduling standard refilling"); 2392 2393 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, 2394 "rx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW, 2395 sc, 0, bnx_sysctl_rx_coal_bds_int, "I", 2396 "Receive max coalesced BD count during interrupt."); 2397 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, 2398 "tx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW, 2399 sc, 0, bnx_sysctl_tx_coal_bds_int, "I", 2400 "Transmit max coalesced BD count during interrupt."); 2401 2402 #ifdef IFPOLL_ENABLE 2403 if (sc->bnx_flags & BNX_FLAG_RXTX_BUNDLE) { 2404 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, 2405 "npoll_offset", CTLTYPE_INT | CTLFLAG_RW, 2406 sc, 0, bnx_sysctl_npoll_offset, "I", 2407 "NPOLLING cpu offset"); 2408 } else { 2409 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, 2410 "npoll_rxoff", CTLTYPE_INT | CTLFLAG_RW, 2411 sc, 0, bnx_sysctl_npoll_rxoff, "I", 2412 "NPOLLING RX cpu offset"); 2413 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, 2414 "npoll_txoff", CTLTYPE_INT | CTLFLAG_RW, 2415 sc, 0, bnx_sysctl_npoll_txoff, "I", 2416 "NPOLLING TX cpu offset"); 2417 } 2418 #endif 2419 2420 #ifdef BNX_RSS_DEBUG 2421 SYSCTL_ADD_INT(ctx, tree, OID_AUTO, 2422 "std_refill_mask", CTLFLAG_RD, 2423 &sc->bnx_rx_std_ring.bnx_rx_std_refill, 0, ""); 2424 SYSCTL_ADD_INT(ctx, tree, OID_AUTO, 2425 "std_used", CTLFLAG_RD, 2426 &sc->bnx_rx_std_ring.bnx_rx_std_used, 0, ""); 2427 SYSCTL_ADD_INT(ctx, tree, OID_AUTO, 2428 "rss_debug", CTLFLAG_RW, &sc->bnx_rss_debug, 0, ""); 2429 for (i = 0; i < sc->bnx_rx_retcnt; ++i) { 2430 ksnprintf(desc, sizeof(desc), "rx_pkt%d", i); 2431 SYSCTL_ADD_ULONG(ctx, tree, OID_AUTO, 2432 desc, CTLFLAG_RW, &sc->bnx_rx_ret_ring[i].bnx_rx_pkt, ""); 2433 2434 ksnprintf(desc, sizeof(desc), "rx_force_sched%d", i); 2435 SYSCTL_ADD_ULONG(ctx, tree, OID_AUTO, 2436 desc, CTLFLAG_RW, 2437 &sc->bnx_rx_ret_ring[i].bnx_rx_force_sched, ""); 2438 } 2439 #endif 2440 #ifdef BNX_TSS_DEBUG 2441 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 2442 ksnprintf(desc, sizeof(desc), "tx_pkt%d", i); 2443 SYSCTL_ADD_ULONG(ctx, tree, OID_AUTO, 2444 desc, CTLFLAG_RW, &sc->bnx_tx_ring[i].bnx_tx_pkt, ""); 2445 } 2446 #endif 2447 2448 SYSCTL_ADD_ULONG(ctx, tree, OID_AUTO, 2449 "norxbds", CTLFLAG_RW, &sc->bnx_norxbds, ""); 2450 2451 SYSCTL_ADD_ULONG(ctx, tree, OID_AUTO, 2452 "errors", CTLFLAG_RW, &sc->bnx_errors, ""); 2453 2454 #ifdef BNX_TSO_DEBUG 2455 for (i = 0; i < BNX_TSO_NSTATS; ++i) { 2456 ksnprintf(desc, sizeof(desc), "tso%d", i + 1); 2457 SYSCTL_ADD_ULONG(ctx, tree, OID_AUTO, 2458 desc, CTLFLAG_RW, &sc->bnx_tsosegs[i], ""); 2459 } 2460 #endif 2461 2462 /* 2463 * Call MI attach routine. 2464 */ 2465 ether_ifattach(ifp, ether_addr, NULL); 2466 2467 /* Setup TX rings and subqueues */ 2468 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 2469 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i); 2470 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i]; 2471 2472 ifsq_set_cpuid(ifsq, txr->bnx_tx_cpuid); 2473 ifsq_set_hw_serialize(ifsq, &txr->bnx_tx_serialize); 2474 ifsq_set_priv(ifsq, txr); 2475 txr->bnx_ifsq = ifsq; 2476 2477 ifsq_watchdog_init(&txr->bnx_tx_watchdog, ifsq, bnx_watchdog); 2478 2479 if (bootverbose) { 2480 device_printf(dev, "txr %d -> cpu%d\n", i, 2481 txr->bnx_tx_cpuid); 2482 } 2483 } 2484 2485 error = bnx_setup_intr(sc); 2486 if (error) { 2487 ether_ifdetach(ifp); 2488 goto fail; 2489 } 2490 bnx_set_tick_cpuid(sc, FALSE); 2491 2492 /* 2493 * Create RX standard ring refilling thread 2494 */ 2495 std_cpuid_def = device_get_unit(dev) % ncpus; 2496 std_cpuid = device_getenv_int(dev, "std.cpuid", std_cpuid_def); 2497 if (std_cpuid < 0 || std_cpuid >= ncpus) { 2498 device_printf(dev, "invalid std.cpuid %d, use %d\n", 2499 std_cpuid, std_cpuid_def); 2500 std_cpuid = std_cpuid_def; 2501 } 2502 2503 std = &sc->bnx_rx_std_ring; 2504 lwkt_create(bnx_rx_std_refill_ithread, std, NULL, 2505 &std->bnx_rx_std_ithread, TDF_NOSTART | TDF_INTTHREAD, std_cpuid, 2506 "%s std", device_get_nameunit(dev)); 2507 lwkt_setpri(&std->bnx_rx_std_ithread, TDPRI_INT_MED); 2508 std->bnx_rx_std_ithread.td_preemptable = lwkt_preempt; 2509 sc->bnx_flags |= BNX_FLAG_STD_THREAD; 2510 2511 return(0); 2512 fail: 2513 bnx_detach(dev); 2514 return(error); 2515 } 2516 2517 static int 2518 bnx_detach(device_t dev) 2519 { 2520 struct bnx_softc *sc = device_get_softc(dev); 2521 2522 if (device_is_attached(dev)) { 2523 struct ifnet *ifp = &sc->arpcom.ac_if; 2524 2525 ifnet_serialize_all(ifp); 2526 bnx_stop(sc); 2527 bnx_teardown_intr(sc, sc->bnx_intr_cnt); 2528 ifnet_deserialize_all(ifp); 2529 2530 ether_ifdetach(ifp); 2531 } 2532 2533 if (sc->bnx_flags & BNX_FLAG_STD_THREAD) { 2534 struct bnx_rx_std_ring *std = &sc->bnx_rx_std_ring; 2535 2536 tsleep_interlock(std, 0); 2537 2538 if (std->bnx_rx_std_ithread.td_gd == mycpu) { 2539 bnx_rx_std_refill_stop(std); 2540 } else { 2541 lwkt_send_ipiq(std->bnx_rx_std_ithread.td_gd, 2542 bnx_rx_std_refill_stop, std); 2543 } 2544 2545 tsleep(std, PINTERLOCKED, "bnx_detach", 0); 2546 if (bootverbose) 2547 device_printf(dev, "RX std ithread exited\n"); 2548 2549 lwkt_synchronize_ipiqs("bnx_detach_ipiq"); 2550 } 2551 2552 if (sc->bnx_flags & BNX_FLAG_TBI) 2553 ifmedia_removeall(&sc->bnx_ifmedia); 2554 if (sc->bnx_miibus) 2555 device_delete_child(dev, sc->bnx_miibus); 2556 bus_generic_detach(dev); 2557 2558 bnx_free_intr(sc); 2559 2560 if (sc->bnx_msix_mem_res != NULL) { 2561 bus_release_resource(dev, SYS_RES_MEMORY, sc->bnx_msix_mem_rid, 2562 sc->bnx_msix_mem_res); 2563 } 2564 if (sc->bnx_res != NULL) { 2565 bus_release_resource(dev, SYS_RES_MEMORY, 2566 BGE_PCI_BAR0, sc->bnx_res); 2567 } 2568 if (sc->bnx_res2 != NULL) { 2569 bus_release_resource(dev, SYS_RES_MEMORY, 2570 PCIR_BAR(2), sc->bnx_res2); 2571 } 2572 2573 bnx_dma_free(sc); 2574 2575 if (sc->bnx_serialize != NULL) 2576 kfree(sc->bnx_serialize, M_DEVBUF); 2577 2578 return 0; 2579 } 2580 2581 static void 2582 bnx_reset(struct bnx_softc *sc) 2583 { 2584 device_t dev = sc->bnx_dev; 2585 uint32_t cachesize, command, reset, mac_mode, mac_mode_mask; 2586 void (*write_op)(struct bnx_softc *, uint32_t, uint32_t); 2587 int i, val = 0; 2588 uint16_t devctl; 2589 2590 mac_mode_mask = BGE_MACMODE_HALF_DUPLEX | BGE_MACMODE_PORTMODE; 2591 if (sc->bnx_mfw_flags & BNX_MFW_ON_APE) 2592 mac_mode_mask |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN; 2593 mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & mac_mode_mask; 2594 2595 write_op = bnx_writemem_direct; 2596 2597 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); 2598 for (i = 0; i < 8000; i++) { 2599 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1) 2600 break; 2601 DELAY(20); 2602 } 2603 if (i == 8000) 2604 if_printf(&sc->arpcom.ac_if, "NVRAM lock timedout!\n"); 2605 2606 /* Take APE lock when performing reset. */ 2607 bnx_ape_lock(sc, BGE_APE_LOCK_GRC); 2608 2609 /* Save some important PCI state. */ 2610 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4); 2611 command = pci_read_config(dev, BGE_PCI_CMD, 4); 2612 2613 pci_write_config(dev, BGE_PCI_MISC_CTL, 2614 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2615 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW| 2616 BGE_PCIMISCCTL_TAGGED_STATUS, 4); 2617 2618 /* Disable fastboot on controllers that support it. */ 2619 if (bootverbose) 2620 if_printf(&sc->arpcom.ac_if, "Disabling fastboot\n"); 2621 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0); 2622 2623 /* 2624 * Write the magic number to SRAM at offset 0xB50. 2625 * When firmware finishes its initialization it will 2626 * write ~BGE_SRAM_FW_MB_MAGIC to the same location. 2627 */ 2628 bnx_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC); 2629 2630 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1); 2631 2632 /* XXX: Broadcom Linux driver. */ 2633 /* Force PCI-E 1.0a mode */ 2634 if (!BNX_IS_57765_PLUS(sc) && 2635 CSR_READ_4(sc, BGE_PCIE_PHY_TSTCTL) == 2636 (BGE_PCIE_PHY_TSTCTL_PSCRAM | 2637 BGE_PCIE_PHY_TSTCTL_PCIE10)) { 2638 CSR_WRITE_4(sc, BGE_PCIE_PHY_TSTCTL, 2639 BGE_PCIE_PHY_TSTCTL_PSCRAM); 2640 } 2641 if (sc->bnx_chipid != BGE_CHIPID_BCM5750_A0) { 2642 /* Prevent PCIE link training during global reset */ 2643 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29)); 2644 reset |= (1<<29); 2645 } 2646 2647 /* 2648 * Set GPHY Power Down Override to leave GPHY 2649 * powered up in D0 uninitialized. 2650 */ 2651 if ((sc->bnx_flags & BNX_FLAG_CPMU) == 0) 2652 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE; 2653 2654 /* Issue global reset */ 2655 write_op(sc, BGE_MISC_CFG, reset); 2656 2657 DELAY(100 * 1000); 2658 2659 /* XXX: Broadcom Linux driver. */ 2660 if (sc->bnx_chipid == BGE_CHIPID_BCM5750_A0) { 2661 uint32_t v; 2662 2663 DELAY(500000); /* wait for link training to complete */ 2664 v = pci_read_config(dev, 0xc4, 4); 2665 pci_write_config(dev, 0xc4, v | (1<<15), 4); 2666 } 2667 2668 devctl = pci_read_config(dev, sc->bnx_pciecap + PCIER_DEVCTRL, 2); 2669 2670 /* Disable no snoop and disable relaxed ordering. */ 2671 devctl &= ~(PCIEM_DEVCTL_RELAX_ORDER | PCIEM_DEVCTL_NOSNOOP); 2672 2673 /* Old PCI-E chips only support 128 bytes Max PayLoad Size. */ 2674 if ((sc->bnx_flags & BNX_FLAG_CPMU) == 0) { 2675 devctl &= ~PCIEM_DEVCTL_MAX_PAYLOAD_MASK; 2676 devctl |= PCIEM_DEVCTL_MAX_PAYLOAD_128; 2677 } 2678 2679 pci_write_config(dev, sc->bnx_pciecap + PCIER_DEVCTRL, 2680 devctl, 2); 2681 2682 /* Clear error status. */ 2683 pci_write_config(dev, sc->bnx_pciecap + PCIER_DEVSTS, 2684 PCIEM_DEVSTS_CORR_ERR | 2685 PCIEM_DEVSTS_NFATAL_ERR | 2686 PCIEM_DEVSTS_FATAL_ERR | 2687 PCIEM_DEVSTS_UNSUPP_REQ, 2); 2688 2689 /* Reset some of the PCI state that got zapped by reset */ 2690 pci_write_config(dev, BGE_PCI_MISC_CTL, 2691 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2692 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW| 2693 BGE_PCIMISCCTL_TAGGED_STATUS, 4); 2694 val = BGE_PCISTATE_ROM_ENABLE | BGE_PCISTATE_ROM_RETRY_ENABLE; 2695 if (sc->bnx_mfw_flags & BNX_MFW_ON_APE) { 2696 val |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR | 2697 BGE_PCISTATE_ALLOW_APE_SHMEM_WR | 2698 BGE_PCISTATE_ALLOW_APE_PSPACE_WR; 2699 } 2700 pci_write_config(dev, BGE_PCI_PCISTATE, val, 4); 2701 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4); 2702 pci_write_config(dev, BGE_PCI_CMD, command, 4); 2703 2704 /* Enable memory arbiter */ 2705 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2706 2707 /* Fix up byte swapping */ 2708 CSR_WRITE_4(sc, BGE_MODE_CTL, bnx_dma_swap_options(sc)); 2709 2710 val = CSR_READ_4(sc, BGE_MAC_MODE); 2711 val = (val & ~mac_mode_mask) | mac_mode; 2712 CSR_WRITE_4(sc, BGE_MAC_MODE, val); 2713 DELAY(40); 2714 2715 bnx_ape_unlock(sc, BGE_APE_LOCK_GRC); 2716 2717 /* 2718 * Poll until we see the 1's complement of the magic number. 2719 * This indicates that the firmware initialization is complete. 2720 */ 2721 for (i = 0; i < BNX_FIRMWARE_TIMEOUT; i++) { 2722 val = bnx_readmem_ind(sc, BGE_SRAM_FW_MB); 2723 if (val == ~BGE_SRAM_FW_MB_MAGIC) 2724 break; 2725 DELAY(10); 2726 } 2727 if (i == BNX_FIRMWARE_TIMEOUT) { 2728 if_printf(&sc->arpcom.ac_if, "firmware handshake " 2729 "timed out, found 0x%08x\n", val); 2730 } 2731 2732 /* BCM57765 A0 needs additional time before accessing. */ 2733 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0) 2734 DELAY(10 * 1000); 2735 2736 /* 2737 * The 5704 in TBI mode apparently needs some special 2738 * adjustment to insure the SERDES drive level is set 2739 * to 1.2V. 2740 */ 2741 if (sc->bnx_asicrev == BGE_ASICREV_BCM5704 && 2742 (sc->bnx_flags & BNX_FLAG_TBI)) { 2743 uint32_t serdescfg; 2744 2745 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG); 2746 serdescfg = (serdescfg & ~0xFFF) | 0x880; 2747 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg); 2748 } 2749 2750 CSR_WRITE_4(sc, BGE_MI_MODE, 2751 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL); 2752 DELAY(80); 2753 2754 /* XXX: Broadcom Linux driver. */ 2755 if (!BNX_IS_57765_PLUS(sc)) { 2756 uint32_t v; 2757 2758 /* Enable Data FIFO protection. */ 2759 v = CSR_READ_4(sc, BGE_PCIE_TLDLPL_PORT); 2760 CSR_WRITE_4(sc, BGE_PCIE_TLDLPL_PORT, v | (1 << 25)); 2761 } 2762 2763 DELAY(10000); 2764 2765 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) { 2766 BNX_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE, 2767 CPMU_CLCK_ORIDE_MAC_ORIDE_EN); 2768 } 2769 } 2770 2771 /* 2772 * Frame reception handling. This is called if there's a frame 2773 * on the receive return list. 2774 * 2775 * Note: we have to be able to handle two possibilities here: 2776 * 1) the frame is from the jumbo recieve ring 2777 * 2) the frame is from the standard receive ring 2778 */ 2779 2780 static void 2781 bnx_rxeof(struct bnx_rx_ret_ring *ret, uint16_t rx_prod, int count) 2782 { 2783 struct bnx_softc *sc = ret->bnx_sc; 2784 struct bnx_rx_std_ring *std = ret->bnx_std; 2785 struct ifnet *ifp = &sc->arpcom.ac_if; 2786 int std_used = 0, cpuid = mycpuid; 2787 2788 while (ret->bnx_rx_saved_considx != rx_prod && count != 0) { 2789 struct pktinfo pi0, *pi = NULL; 2790 struct bge_rx_bd *cur_rx; 2791 struct bnx_rx_buf *rb; 2792 uint32_t rxidx; 2793 struct mbuf *m = NULL; 2794 uint16_t vlan_tag = 0; 2795 int have_tag = 0; 2796 2797 --count; 2798 2799 cur_rx = &ret->bnx_rx_ret_ring[ret->bnx_rx_saved_considx]; 2800 2801 rxidx = cur_rx->bge_idx; 2802 KKASSERT(rxidx < BGE_STD_RX_RING_CNT); 2803 2804 BNX_INC(ret->bnx_rx_saved_considx, BNX_RETURN_RING_CNT); 2805 #ifdef BNX_RSS_DEBUG 2806 ret->bnx_rx_pkt++; 2807 #endif 2808 2809 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 2810 have_tag = 1; 2811 vlan_tag = cur_rx->bge_vlan_tag; 2812 } 2813 2814 if (ret->bnx_rx_cnt >= ret->bnx_rx_cntmax) { 2815 atomic_add_int(&std->bnx_rx_std_used, std_used); 2816 std_used = 0; 2817 2818 bnx_rx_std_refill_sched(ret, std); 2819 } 2820 ret->bnx_rx_cnt++; 2821 ++std_used; 2822 2823 rb = &std->bnx_rx_std_buf[rxidx]; 2824 m = rb->bnx_rx_mbuf; 2825 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2826 IFNET_STAT_INC(ifp, ierrors, 1); 2827 cpu_sfence(); 2828 rb->bnx_rx_refilled = 1; 2829 continue; 2830 } 2831 if (bnx_newbuf_std(ret, rxidx, 0)) { 2832 IFNET_STAT_INC(ifp, ierrors, 1); 2833 continue; 2834 } 2835 2836 IFNET_STAT_INC(ifp, ipackets, 1); 2837 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 2838 m->m_pkthdr.rcvif = ifp; 2839 2840 if ((ifp->if_capenable & IFCAP_RXCSUM) && 2841 (cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) { 2842 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) { 2843 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2844 if ((cur_rx->bge_error_flag & 2845 BGE_RXERRFLAG_IP_CSUM_NOK) == 0) 2846 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2847 } 2848 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) { 2849 m->m_pkthdr.csum_data = 2850 cur_rx->bge_tcp_udp_csum; 2851 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 2852 CSUM_PSEUDO_HDR; 2853 } 2854 } 2855 if (ifp->if_capenable & IFCAP_RSS) { 2856 pi = bnx_rss_info(&pi0, cur_rx); 2857 if (pi != NULL && 2858 (cur_rx->bge_flags & BGE_RXBDFLAG_RSS_HASH)) { 2859 m->m_flags |= M_HASH; 2860 m->m_pkthdr.hash = 2861 toeplitz_hash(cur_rx->bge_hash); 2862 } 2863 } 2864 2865 /* 2866 * If we received a packet with a vlan tag, pass it 2867 * to vlan_input() instead of ether_input(). 2868 */ 2869 if (have_tag) { 2870 m->m_flags |= M_VLANTAG; 2871 m->m_pkthdr.ether_vlantag = vlan_tag; 2872 } 2873 ifp->if_input(ifp, m, pi, cpuid); 2874 } 2875 bnx_writembx(sc, ret->bnx_rx_mbx, ret->bnx_rx_saved_considx); 2876 2877 if (std_used > 0) { 2878 int cur_std_used; 2879 2880 cur_std_used = atomic_fetchadd_int(&std->bnx_rx_std_used, 2881 std_used); 2882 if (cur_std_used + std_used >= (BGE_STD_RX_RING_CNT / 2)) { 2883 #ifdef BNX_RSS_DEBUG 2884 ret->bnx_rx_force_sched++; 2885 #endif 2886 bnx_rx_std_refill_sched(ret, std); 2887 } 2888 } 2889 } 2890 2891 static void 2892 bnx_txeof(struct bnx_tx_ring *txr, uint16_t tx_cons) 2893 { 2894 struct ifnet *ifp = &txr->bnx_sc->arpcom.ac_if; 2895 2896 /* 2897 * Go through our tx ring and free mbufs for those 2898 * frames that have been sent. 2899 */ 2900 while (txr->bnx_tx_saved_considx != tx_cons) { 2901 struct bnx_tx_buf *buf; 2902 uint32_t idx = 0; 2903 2904 idx = txr->bnx_tx_saved_considx; 2905 buf = &txr->bnx_tx_buf[idx]; 2906 if (buf->bnx_tx_mbuf != NULL) { 2907 IFNET_STAT_INC(ifp, opackets, 1); 2908 #ifdef BNX_TSS_DEBUG 2909 txr->bnx_tx_pkt++; 2910 #endif 2911 bus_dmamap_unload(txr->bnx_tx_mtag, 2912 buf->bnx_tx_dmamap); 2913 m_freem(buf->bnx_tx_mbuf); 2914 buf->bnx_tx_mbuf = NULL; 2915 } 2916 txr->bnx_tx_cnt--; 2917 BNX_INC(txr->bnx_tx_saved_considx, BGE_TX_RING_CNT); 2918 } 2919 2920 if ((BGE_TX_RING_CNT - txr->bnx_tx_cnt) >= 2921 (BNX_NSEG_RSVD + BNX_NSEG_SPARE)) 2922 ifsq_clr_oactive(txr->bnx_ifsq); 2923 2924 if (txr->bnx_tx_cnt == 0) 2925 txr->bnx_tx_watchdog.wd_timer = 0; 2926 2927 if (!ifsq_is_empty(txr->bnx_ifsq)) 2928 ifsq_devstart(txr->bnx_ifsq); 2929 } 2930 2931 static int 2932 bnx_handle_status(struct bnx_softc *sc) 2933 { 2934 uint32_t status; 2935 int handle = 0; 2936 2937 status = *sc->bnx_hw_status; 2938 2939 if (status & BGE_STATFLAG_ERROR) { 2940 uint32_t val; 2941 int reset = 0; 2942 2943 sc->bnx_errors++; 2944 2945 val = CSR_READ_4(sc, BGE_FLOW_ATTN); 2946 if (val & ~BGE_FLOWATTN_MB_LOWAT) { 2947 if_printf(&sc->arpcom.ac_if, 2948 "flow attn 0x%08x\n", val); 2949 reset = 1; 2950 } 2951 2952 val = CSR_READ_4(sc, BGE_MSI_STATUS); 2953 if (val & ~BGE_MSISTAT_MSI_PCI_REQ) { 2954 if_printf(&sc->arpcom.ac_if, 2955 "msi status 0x%08x\n", val); 2956 reset = 1; 2957 } 2958 2959 val = CSR_READ_4(sc, BGE_RDMA_STATUS); 2960 if (val) { 2961 if_printf(&sc->arpcom.ac_if, 2962 "rmda status 0x%08x\n", val); 2963 reset = 1; 2964 } 2965 2966 val = CSR_READ_4(sc, BGE_WDMA_STATUS); 2967 if (val) { 2968 if_printf(&sc->arpcom.ac_if, 2969 "wdma status 0x%08x\n", val); 2970 reset = 1; 2971 } 2972 2973 if (reset) { 2974 bnx_serialize_skipmain(sc); 2975 bnx_init(sc); 2976 bnx_deserialize_skipmain(sc); 2977 } 2978 handle = 1; 2979 } 2980 2981 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) || sc->bnx_link_evt) { 2982 if (bootverbose) { 2983 if_printf(&sc->arpcom.ac_if, "link change, " 2984 "link_evt %d\n", sc->bnx_link_evt); 2985 } 2986 bnx_link_poll(sc); 2987 handle = 1; 2988 } 2989 2990 return handle; 2991 } 2992 2993 #ifdef IFPOLL_ENABLE 2994 2995 static void 2996 bnx_npoll_rx(struct ifnet *ifp __unused, void *xret, int cycle) 2997 { 2998 struct bnx_rx_ret_ring *ret = xret; 2999 uint16_t rx_prod; 3000 3001 ASSERT_SERIALIZED(&ret->bnx_rx_ret_serialize); 3002 3003 ret->bnx_saved_status_tag = *ret->bnx_hw_status_tag; 3004 cpu_lfence(); 3005 3006 rx_prod = *ret->bnx_rx_considx; 3007 if (ret->bnx_rx_saved_considx != rx_prod) 3008 bnx_rxeof(ret, rx_prod, cycle); 3009 } 3010 3011 static void 3012 bnx_npoll_tx_notag(struct ifnet *ifp __unused, void *xtxr, int cycle __unused) 3013 { 3014 struct bnx_tx_ring *txr = xtxr; 3015 uint16_t tx_cons; 3016 3017 ASSERT_SERIALIZED(&txr->bnx_tx_serialize); 3018 3019 tx_cons = *txr->bnx_tx_considx; 3020 if (txr->bnx_tx_saved_considx != tx_cons) 3021 bnx_txeof(txr, tx_cons); 3022 } 3023 3024 static void 3025 bnx_npoll_tx(struct ifnet *ifp, void *xtxr, int cycle) 3026 { 3027 struct bnx_tx_ring *txr = xtxr; 3028 3029 ASSERT_SERIALIZED(&txr->bnx_tx_serialize); 3030 3031 txr->bnx_saved_status_tag = *txr->bnx_hw_status_tag; 3032 cpu_lfence(); 3033 bnx_npoll_tx_notag(ifp, txr, cycle); 3034 } 3035 3036 static void 3037 bnx_npoll_status_notag(struct ifnet *ifp) 3038 { 3039 struct bnx_softc *sc = ifp->if_softc; 3040 3041 ASSERT_SERIALIZED(&sc->bnx_main_serialize); 3042 3043 if (bnx_handle_status(sc)) { 3044 /* 3045 * Status changes are handled; force the chip to 3046 * update the status block to reflect whether there 3047 * are more status changes or not, else staled status 3048 * changes are always seen. 3049 */ 3050 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 3051 } 3052 } 3053 3054 static void 3055 bnx_npoll_status(struct ifnet *ifp) 3056 { 3057 struct bnx_softc *sc = ifp->if_softc; 3058 3059 ASSERT_SERIALIZED(&sc->bnx_main_serialize); 3060 3061 sc->bnx_saved_status_tag = *sc->bnx_hw_status_tag; 3062 cpu_lfence(); 3063 bnx_npoll_status_notag(ifp); 3064 } 3065 3066 static void 3067 bnx_npoll(struct ifnet *ifp, struct ifpoll_info *info) 3068 { 3069 struct bnx_softc *sc = ifp->if_softc; 3070 int i; 3071 3072 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3073 3074 if (info != NULL) { 3075 if (sc->bnx_flags & BNX_FLAG_STATUS_HASTAG) 3076 info->ifpi_status.status_func = bnx_npoll_status; 3077 else 3078 info->ifpi_status.status_func = bnx_npoll_status_notag; 3079 info->ifpi_status.serializer = &sc->bnx_main_serialize; 3080 3081 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 3082 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i]; 3083 int idx = i + sc->bnx_npoll_txoff; 3084 3085 KKASSERT(idx < ncpus2); 3086 if (sc->bnx_flags & BNX_FLAG_RXTX_BUNDLE) { 3087 info->ifpi_tx[idx].poll_func = 3088 bnx_npoll_tx_notag; 3089 } else { 3090 info->ifpi_tx[idx].poll_func = bnx_npoll_tx; 3091 } 3092 info->ifpi_tx[idx].arg = txr; 3093 info->ifpi_tx[idx].serializer = &txr->bnx_tx_serialize; 3094 ifsq_set_cpuid(txr->bnx_ifsq, idx); 3095 } 3096 3097 for (i = 0; i < sc->bnx_rx_retcnt; ++i) { 3098 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[i]; 3099 int idx = i + sc->bnx_npoll_rxoff; 3100 3101 KKASSERT(idx < ncpus2); 3102 info->ifpi_rx[idx].poll_func = bnx_npoll_rx; 3103 info->ifpi_rx[idx].arg = ret; 3104 info->ifpi_rx[idx].serializer = 3105 &ret->bnx_rx_ret_serialize; 3106 } 3107 3108 if (ifp->if_flags & IFF_RUNNING) { 3109 bnx_disable_intr(sc); 3110 bnx_set_tick_cpuid(sc, TRUE); 3111 3112 sc->bnx_coal_chg = BNX_TX_COAL_BDS_CHG | 3113 BNX_RX_COAL_BDS_CHG; 3114 bnx_coal_change(sc); 3115 } 3116 } else { 3117 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 3118 ifsq_set_cpuid(sc->bnx_tx_ring[i].bnx_ifsq, 3119 sc->bnx_tx_ring[i].bnx_tx_cpuid); 3120 } 3121 if (ifp->if_flags & IFF_RUNNING) { 3122 sc->bnx_coal_chg = BNX_TX_COAL_BDS_CHG | 3123 BNX_RX_COAL_BDS_CHG; 3124 bnx_coal_change(sc); 3125 3126 bnx_enable_intr(sc); 3127 bnx_set_tick_cpuid(sc, FALSE); 3128 } 3129 } 3130 } 3131 3132 #endif /* IFPOLL_ENABLE */ 3133 3134 static void 3135 bnx_intr_legacy(void *xsc) 3136 { 3137 struct bnx_softc *sc = xsc; 3138 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[0]; 3139 3140 if (ret->bnx_saved_status_tag == *ret->bnx_hw_status_tag) { 3141 uint32_t val; 3142 3143 val = pci_read_config(sc->bnx_dev, BGE_PCI_PCISTATE, 4); 3144 if (val & BGE_PCISTAT_INTR_NOTACT) 3145 return; 3146 } 3147 3148 /* 3149 * NOTE: 3150 * Interrupt will have to be disabled if tagged status 3151 * is used, else interrupt will always be asserted on 3152 * certain chips (at least on BCM5750 AX/BX). 3153 */ 3154 bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1); 3155 3156 bnx_intr(sc); 3157 } 3158 3159 static void 3160 bnx_msi(void *xsc) 3161 { 3162 bnx_intr(xsc); 3163 } 3164 3165 static void 3166 bnx_intr(struct bnx_softc *sc) 3167 { 3168 struct ifnet *ifp = &sc->arpcom.ac_if; 3169 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[0]; 3170 3171 ASSERT_SERIALIZED(&sc->bnx_main_serialize); 3172 3173 ret->bnx_saved_status_tag = *ret->bnx_hw_status_tag; 3174 /* 3175 * Use a load fence to ensure that status_tag is saved 3176 * before rx_prod, tx_cons and status. 3177 */ 3178 cpu_lfence(); 3179 3180 bnx_handle_status(sc); 3181 3182 if (ifp->if_flags & IFF_RUNNING) { 3183 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0]; 3184 uint16_t rx_prod, tx_cons; 3185 3186 lwkt_serialize_enter(&ret->bnx_rx_ret_serialize); 3187 rx_prod = *ret->bnx_rx_considx; 3188 if (ret->bnx_rx_saved_considx != rx_prod) 3189 bnx_rxeof(ret, rx_prod, -1); 3190 lwkt_serialize_exit(&ret->bnx_rx_ret_serialize); 3191 3192 lwkt_serialize_enter(&txr->bnx_tx_serialize); 3193 tx_cons = *txr->bnx_tx_considx; 3194 if (txr->bnx_tx_saved_considx != tx_cons) 3195 bnx_txeof(txr, tx_cons); 3196 lwkt_serialize_exit(&txr->bnx_tx_serialize); 3197 } 3198 3199 bnx_writembx(sc, BGE_MBX_IRQ0_LO, ret->bnx_saved_status_tag << 24); 3200 } 3201 3202 static void 3203 bnx_msix_tx_status(void *xtxr) 3204 { 3205 struct bnx_tx_ring *txr = xtxr; 3206 struct bnx_softc *sc = txr->bnx_sc; 3207 struct ifnet *ifp = &sc->arpcom.ac_if; 3208 3209 ASSERT_SERIALIZED(&sc->bnx_main_serialize); 3210 3211 txr->bnx_saved_status_tag = *txr->bnx_hw_status_tag; 3212 /* 3213 * Use a load fence to ensure that status_tag is saved 3214 * before tx_cons and status. 3215 */ 3216 cpu_lfence(); 3217 3218 bnx_handle_status(sc); 3219 3220 if (ifp->if_flags & IFF_RUNNING) { 3221 uint16_t tx_cons; 3222 3223 lwkt_serialize_enter(&txr->bnx_tx_serialize); 3224 tx_cons = *txr->bnx_tx_considx; 3225 if (txr->bnx_tx_saved_considx != tx_cons) 3226 bnx_txeof(txr, tx_cons); 3227 lwkt_serialize_exit(&txr->bnx_tx_serialize); 3228 } 3229 3230 bnx_writembx(sc, BGE_MBX_IRQ0_LO, txr->bnx_saved_status_tag << 24); 3231 } 3232 3233 static void 3234 bnx_msix_rx(void *xret) 3235 { 3236 struct bnx_rx_ret_ring *ret = xret; 3237 uint16_t rx_prod; 3238 3239 ASSERT_SERIALIZED(&ret->bnx_rx_ret_serialize); 3240 3241 ret->bnx_saved_status_tag = *ret->bnx_hw_status_tag; 3242 /* 3243 * Use a load fence to ensure that status_tag is saved 3244 * before rx_prod. 3245 */ 3246 cpu_lfence(); 3247 3248 rx_prod = *ret->bnx_rx_considx; 3249 if (ret->bnx_rx_saved_considx != rx_prod) 3250 bnx_rxeof(ret, rx_prod, -1); 3251 3252 bnx_writembx(ret->bnx_sc, ret->bnx_msix_mbx, 3253 ret->bnx_saved_status_tag << 24); 3254 } 3255 3256 static void 3257 bnx_msix_rxtx(void *xret) 3258 { 3259 struct bnx_rx_ret_ring *ret = xret; 3260 struct bnx_tx_ring *txr = ret->bnx_txr; 3261 uint16_t rx_prod, tx_cons; 3262 3263 ASSERT_SERIALIZED(&ret->bnx_rx_ret_serialize); 3264 3265 ret->bnx_saved_status_tag = *ret->bnx_hw_status_tag; 3266 /* 3267 * Use a load fence to ensure that status_tag is saved 3268 * before rx_prod and tx_cons. 3269 */ 3270 cpu_lfence(); 3271 3272 rx_prod = *ret->bnx_rx_considx; 3273 if (ret->bnx_rx_saved_considx != rx_prod) 3274 bnx_rxeof(ret, rx_prod, -1); 3275 3276 lwkt_serialize_enter(&txr->bnx_tx_serialize); 3277 tx_cons = *txr->bnx_tx_considx; 3278 if (txr->bnx_tx_saved_considx != tx_cons) 3279 bnx_txeof(txr, tx_cons); 3280 lwkt_serialize_exit(&txr->bnx_tx_serialize); 3281 3282 bnx_writembx(ret->bnx_sc, ret->bnx_msix_mbx, 3283 ret->bnx_saved_status_tag << 24); 3284 } 3285 3286 static void 3287 bnx_msix_status(void *xsc) 3288 { 3289 struct bnx_softc *sc = xsc; 3290 3291 ASSERT_SERIALIZED(&sc->bnx_main_serialize); 3292 3293 sc->bnx_saved_status_tag = *sc->bnx_hw_status_tag; 3294 /* 3295 * Use a load fence to ensure that status_tag is saved 3296 * before status. 3297 */ 3298 cpu_lfence(); 3299 3300 bnx_handle_status(sc); 3301 3302 bnx_writembx(sc, BGE_MBX_IRQ0_LO, sc->bnx_saved_status_tag << 24); 3303 } 3304 3305 static void 3306 bnx_tick(void *xsc) 3307 { 3308 struct bnx_softc *sc = xsc; 3309 3310 lwkt_serialize_enter(&sc->bnx_main_serialize); 3311 3312 bnx_stats_update_regs(sc); 3313 3314 if (sc->bnx_flags & BNX_FLAG_TBI) { 3315 /* 3316 * Since in TBI mode auto-polling can't be used we should poll 3317 * link status manually. Here we register pending link event 3318 * and trigger interrupt. 3319 */ 3320 sc->bnx_link_evt++; 3321 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 3322 } else if (!sc->bnx_link) { 3323 mii_tick(device_get_softc(sc->bnx_miibus)); 3324 } 3325 3326 callout_reset_bycpu(&sc->bnx_tick_timer, hz, bnx_tick, sc, 3327 sc->bnx_tick_cpuid); 3328 3329 lwkt_serialize_exit(&sc->bnx_main_serialize); 3330 } 3331 3332 static void 3333 bnx_stats_update_regs(struct bnx_softc *sc) 3334 { 3335 struct ifnet *ifp = &sc->arpcom.ac_if; 3336 struct bge_mac_stats_regs stats; 3337 uint32_t *s, val; 3338 int i; 3339 3340 s = (uint32_t *)&stats; 3341 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) { 3342 *s = CSR_READ_4(sc, BGE_RX_STATS + i); 3343 s++; 3344 } 3345 3346 IFNET_STAT_SET(ifp, collisions, 3347 (stats.dot3StatsSingleCollisionFrames + 3348 stats.dot3StatsMultipleCollisionFrames + 3349 stats.dot3StatsExcessiveCollisions + 3350 stats.dot3StatsLateCollisions)); 3351 3352 val = CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS); 3353 sc->bnx_norxbds += val; 3354 } 3355 3356 /* 3357 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 3358 * pointers to descriptors. 3359 */ 3360 static int 3361 bnx_encap(struct bnx_tx_ring *txr, struct mbuf **m_head0, uint32_t *txidx, 3362 int *segs_used) 3363 { 3364 struct bge_tx_bd *d = NULL; 3365 uint16_t csum_flags = 0, vlan_tag = 0, mss = 0; 3366 bus_dma_segment_t segs[BNX_NSEG_NEW]; 3367 bus_dmamap_t map; 3368 int error, maxsegs, nsegs, idx, i; 3369 struct mbuf *m_head = *m_head0, *m_new; 3370 3371 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3372 #ifdef BNX_TSO_DEBUG 3373 int tso_nsegs; 3374 #endif 3375 3376 error = bnx_setup_tso(txr, m_head0, &mss, &csum_flags); 3377 if (error) 3378 return error; 3379 m_head = *m_head0; 3380 3381 #ifdef BNX_TSO_DEBUG 3382 tso_nsegs = (m_head->m_pkthdr.len / 3383 m_head->m_pkthdr.tso_segsz) - 1; 3384 if (tso_nsegs > (BNX_TSO_NSTATS - 1)) 3385 tso_nsegs = BNX_TSO_NSTATS - 1; 3386 else if (tso_nsegs < 0) 3387 tso_nsegs = 0; 3388 txr->bnx_sc->bnx_tsosegs[tso_nsegs]++; 3389 #endif 3390 } else if (m_head->m_pkthdr.csum_flags & BNX_CSUM_FEATURES) { 3391 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 3392 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 3393 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 3394 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 3395 if (m_head->m_flags & M_LASTFRAG) 3396 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END; 3397 else if (m_head->m_flags & M_FRAG) 3398 csum_flags |= BGE_TXBDFLAG_IP_FRAG; 3399 } 3400 if (m_head->m_flags & M_VLANTAG) { 3401 csum_flags |= BGE_TXBDFLAG_VLAN_TAG; 3402 vlan_tag = m_head->m_pkthdr.ether_vlantag; 3403 } 3404 3405 idx = *txidx; 3406 map = txr->bnx_tx_buf[idx].bnx_tx_dmamap; 3407 3408 maxsegs = (BGE_TX_RING_CNT - txr->bnx_tx_cnt) - BNX_NSEG_RSVD; 3409 KASSERT(maxsegs >= BNX_NSEG_SPARE, 3410 ("not enough segments %d", maxsegs)); 3411 3412 if (maxsegs > BNX_NSEG_NEW) 3413 maxsegs = BNX_NSEG_NEW; 3414 3415 /* 3416 * Pad outbound frame to BGE_MIN_FRAMELEN for an unusual reason. 3417 * The bge hardware will pad out Tx runts to BGE_MIN_FRAMELEN, 3418 * but when such padded frames employ the bge IP/TCP checksum 3419 * offload, the hardware checksum assist gives incorrect results 3420 * (possibly from incorporating its own padding into the UDP/TCP 3421 * checksum; who knows). If we pad such runts with zeros, the 3422 * onboard checksum comes out correct. 3423 */ 3424 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) && 3425 m_head->m_pkthdr.len < BNX_MIN_FRAMELEN) { 3426 error = m_devpad(m_head, BNX_MIN_FRAMELEN); 3427 if (error) 3428 goto back; 3429 } 3430 3431 if ((txr->bnx_tx_flags & BNX_TX_FLAG_SHORTDMA) && 3432 m_head->m_next != NULL) { 3433 m_new = bnx_defrag_shortdma(m_head); 3434 if (m_new == NULL) { 3435 error = ENOBUFS; 3436 goto back; 3437 } 3438 *m_head0 = m_head = m_new; 3439 } 3440 if ((m_head->m_pkthdr.csum_flags & CSUM_TSO) == 0 && 3441 (txr->bnx_tx_flags & BNX_TX_FLAG_FORCE_DEFRAG) && 3442 m_head->m_next != NULL) { 3443 /* 3444 * Forcefully defragment mbuf chain to overcome hardware 3445 * limitation which only support a single outstanding 3446 * DMA read operation. If it fails, keep moving on using 3447 * the original mbuf chain. 3448 */ 3449 m_new = m_defrag(m_head, MB_DONTWAIT); 3450 if (m_new != NULL) 3451 *m_head0 = m_head = m_new; 3452 } 3453 3454 error = bus_dmamap_load_mbuf_defrag(txr->bnx_tx_mtag, map, 3455 m_head0, segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 3456 if (error) 3457 goto back; 3458 *segs_used += nsegs; 3459 3460 m_head = *m_head0; 3461 bus_dmamap_sync(txr->bnx_tx_mtag, map, BUS_DMASYNC_PREWRITE); 3462 3463 for (i = 0; ; i++) { 3464 d = &txr->bnx_tx_ring[idx]; 3465 3466 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr); 3467 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr); 3468 d->bge_len = segs[i].ds_len; 3469 d->bge_flags = csum_flags; 3470 d->bge_vlan_tag = vlan_tag; 3471 d->bge_mss = mss; 3472 3473 if (i == nsegs - 1) 3474 break; 3475 BNX_INC(idx, BGE_TX_RING_CNT); 3476 } 3477 /* Mark the last segment as end of packet... */ 3478 d->bge_flags |= BGE_TXBDFLAG_END; 3479 3480 /* 3481 * Insure that the map for this transmission is placed at 3482 * the array index of the last descriptor in this chain. 3483 */ 3484 txr->bnx_tx_buf[*txidx].bnx_tx_dmamap = txr->bnx_tx_buf[idx].bnx_tx_dmamap; 3485 txr->bnx_tx_buf[idx].bnx_tx_dmamap = map; 3486 txr->bnx_tx_buf[idx].bnx_tx_mbuf = m_head; 3487 txr->bnx_tx_cnt += nsegs; 3488 3489 BNX_INC(idx, BGE_TX_RING_CNT); 3490 *txidx = idx; 3491 back: 3492 if (error) { 3493 m_freem(*m_head0); 3494 *m_head0 = NULL; 3495 } 3496 return error; 3497 } 3498 3499 /* 3500 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 3501 * to the mbuf data regions directly in the transmit descriptors. 3502 */ 3503 static void 3504 bnx_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 3505 { 3506 struct bnx_tx_ring *txr = ifsq_get_priv(ifsq); 3507 struct mbuf *m_head = NULL; 3508 uint32_t prodidx; 3509 int nsegs = 0; 3510 3511 KKASSERT(txr->bnx_ifsq == ifsq); 3512 ASSERT_SERIALIZED(&txr->bnx_tx_serialize); 3513 3514 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq)) 3515 return; 3516 3517 prodidx = txr->bnx_tx_prodidx; 3518 3519 while (txr->bnx_tx_buf[prodidx].bnx_tx_mbuf == NULL) { 3520 /* 3521 * Sanity check: avoid coming within BGE_NSEG_RSVD 3522 * descriptors of the end of the ring. Also make 3523 * sure there are BGE_NSEG_SPARE descriptors for 3524 * jumbo buffers' or TSO segments' defragmentation. 3525 */ 3526 if ((BGE_TX_RING_CNT - txr->bnx_tx_cnt) < 3527 (BNX_NSEG_RSVD + BNX_NSEG_SPARE)) { 3528 ifsq_set_oactive(ifsq); 3529 break; 3530 } 3531 3532 m_head = ifsq_dequeue(ifsq); 3533 if (m_head == NULL) 3534 break; 3535 3536 /* 3537 * Pack the data into the transmit ring. If we 3538 * don't have room, set the OACTIVE flag and wait 3539 * for the NIC to drain the ring. 3540 */ 3541 if (bnx_encap(txr, &m_head, &prodidx, &nsegs)) { 3542 ifsq_set_oactive(ifsq); 3543 IFNET_STAT_INC(ifp, oerrors, 1); 3544 break; 3545 } 3546 3547 if (nsegs >= txr->bnx_tx_wreg) { 3548 /* Transmit */ 3549 bnx_writembx(txr->bnx_sc, txr->bnx_tx_mbx, prodidx); 3550 nsegs = 0; 3551 } 3552 3553 ETHER_BPF_MTAP(ifp, m_head); 3554 3555 /* 3556 * Set a timeout in case the chip goes out to lunch. 3557 */ 3558 txr->bnx_tx_watchdog.wd_timer = 5; 3559 } 3560 3561 if (nsegs > 0) { 3562 /* Transmit */ 3563 bnx_writembx(txr->bnx_sc, txr->bnx_tx_mbx, prodidx); 3564 } 3565 txr->bnx_tx_prodidx = prodidx; 3566 } 3567 3568 static void 3569 bnx_init(void *xsc) 3570 { 3571 struct bnx_softc *sc = xsc; 3572 struct ifnet *ifp = &sc->arpcom.ac_if; 3573 uint16_t *m; 3574 uint32_t mode; 3575 int i; 3576 boolean_t polling; 3577 3578 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3579 3580 /* Cancel pending I/O and flush buffers. */ 3581 bnx_stop(sc); 3582 3583 bnx_sig_pre_reset(sc, BNX_RESET_START); 3584 bnx_reset(sc); 3585 bnx_sig_post_reset(sc, BNX_RESET_START); 3586 3587 bnx_chipinit(sc); 3588 3589 /* 3590 * Init the various state machines, ring 3591 * control blocks and firmware. 3592 */ 3593 if (bnx_blockinit(sc)) { 3594 if_printf(ifp, "initialization failure\n"); 3595 bnx_stop(sc); 3596 return; 3597 } 3598 3599 /* Specify MTU. */ 3600 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 3601 ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN); 3602 3603 /* Load our MAC address. */ 3604 m = (uint16_t *)&sc->arpcom.ac_enaddr[0]; 3605 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 3606 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 3607 3608 /* Enable or disable promiscuous mode as needed. */ 3609 bnx_setpromisc(sc); 3610 3611 /* Program multicast filter. */ 3612 bnx_setmulti(sc); 3613 3614 /* Init RX ring. */ 3615 if (bnx_init_rx_ring_std(&sc->bnx_rx_std_ring)) { 3616 if_printf(ifp, "RX ring initialization failed\n"); 3617 bnx_stop(sc); 3618 return; 3619 } 3620 3621 /* Init jumbo RX ring. */ 3622 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) { 3623 if (bnx_init_rx_ring_jumbo(sc)) { 3624 if_printf(ifp, "Jumbo RX ring initialization failed\n"); 3625 bnx_stop(sc); 3626 return; 3627 } 3628 } 3629 3630 /* Init our RX return ring index */ 3631 for (i = 0; i < sc->bnx_rx_retcnt; ++i) { 3632 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[i]; 3633 3634 ret->bnx_rx_saved_considx = 0; 3635 ret->bnx_rx_cnt = 0; 3636 } 3637 3638 /* Init TX ring. */ 3639 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) 3640 bnx_init_tx_ring(&sc->bnx_tx_ring[i]); 3641 3642 /* Enable TX MAC state machine lockup fix. */ 3643 mode = CSR_READ_4(sc, BGE_TX_MODE); 3644 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX; 3645 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 || 3646 sc->bnx_asicrev == BGE_ASICREV_BCM5762) { 3647 mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); 3648 mode |= CSR_READ_4(sc, BGE_TX_MODE) & 3649 (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); 3650 } 3651 /* Turn on transmitter */ 3652 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE); 3653 DELAY(100); 3654 3655 /* Initialize RSS */ 3656 mode = BGE_RXMODE_ENABLE | BGE_RXMODE_IPV6_ENABLE; 3657 if (BNX_RSS_ENABLED(sc)) { 3658 bnx_init_rss(sc); 3659 mode |= BGE_RXMODE_RSS_ENABLE | 3660 BGE_RXMODE_RSS_HASH_MASK_BITS | 3661 BGE_RXMODE_RSS_IPV4_HASH | 3662 BGE_RXMODE_RSS_TCP_IPV4_HASH; 3663 } 3664 /* Turn on receiver */ 3665 BNX_SETBIT(sc, BGE_RX_MODE, mode); 3666 DELAY(10); 3667 3668 /* 3669 * Set the number of good frames to receive after RX MBUF 3670 * Low Watermark has been reached. After the RX MAC receives 3671 * this number of frames, it will drop subsequent incoming 3672 * frames until the MBUF High Watermark is reached. 3673 */ 3674 if (BNX_IS_57765_FAMILY(sc)) 3675 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1); 3676 else 3677 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2); 3678 3679 if (sc->bnx_intr_type == PCI_INTR_TYPE_MSI || 3680 sc->bnx_intr_type == PCI_INTR_TYPE_MSIX) { 3681 if (bootverbose) { 3682 if_printf(ifp, "MSI_MODE: %#x\n", 3683 CSR_READ_4(sc, BGE_MSI_MODE)); 3684 } 3685 } 3686 3687 /* Tell firmware we're alive. */ 3688 BNX_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3689 3690 /* Enable host interrupts if polling(4) is not enabled. */ 3691 PCI_SETBIT(sc->bnx_dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA, 4); 3692 3693 polling = FALSE; 3694 #ifdef IFPOLL_ENABLE 3695 if (ifp->if_flags & IFF_NPOLLING) 3696 polling = TRUE; 3697 #endif 3698 if (polling) 3699 bnx_disable_intr(sc); 3700 else 3701 bnx_enable_intr(sc); 3702 bnx_set_tick_cpuid(sc, polling); 3703 3704 ifp->if_flags |= IFF_RUNNING; 3705 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 3706 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i]; 3707 3708 ifsq_clr_oactive(txr->bnx_ifsq); 3709 ifsq_watchdog_start(&txr->bnx_tx_watchdog); 3710 } 3711 3712 bnx_ifmedia_upd(ifp); 3713 3714 callout_reset_bycpu(&sc->bnx_tick_timer, hz, bnx_tick, sc, 3715 sc->bnx_tick_cpuid); 3716 } 3717 3718 /* 3719 * Set media options. 3720 */ 3721 static int 3722 bnx_ifmedia_upd(struct ifnet *ifp) 3723 { 3724 struct bnx_softc *sc = ifp->if_softc; 3725 3726 /* If this is a 1000baseX NIC, enable the TBI port. */ 3727 if (sc->bnx_flags & BNX_FLAG_TBI) { 3728 struct ifmedia *ifm = &sc->bnx_ifmedia; 3729 3730 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 3731 return(EINVAL); 3732 3733 switch(IFM_SUBTYPE(ifm->ifm_media)) { 3734 case IFM_AUTO: 3735 break; 3736 3737 case IFM_1000_SX: 3738 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 3739 BNX_CLRBIT(sc, BGE_MAC_MODE, 3740 BGE_MACMODE_HALF_DUPLEX); 3741 } else { 3742 BNX_SETBIT(sc, BGE_MAC_MODE, 3743 BGE_MACMODE_HALF_DUPLEX); 3744 } 3745 DELAY(40); 3746 break; 3747 default: 3748 return(EINVAL); 3749 } 3750 } else { 3751 struct mii_data *mii = device_get_softc(sc->bnx_miibus); 3752 3753 sc->bnx_link_evt++; 3754 sc->bnx_link = 0; 3755 if (mii->mii_instance) { 3756 struct mii_softc *miisc; 3757 3758 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 3759 mii_phy_reset(miisc); 3760 } 3761 mii_mediachg(mii); 3762 3763 /* 3764 * Force an interrupt so that we will call bnx_link_upd 3765 * if needed and clear any pending link state attention. 3766 * Without this we are not getting any further interrupts 3767 * for link state changes and thus will not UP the link and 3768 * not be able to send in bnx_start. The only way to get 3769 * things working was to receive a packet and get an RX 3770 * intr. 3771 * 3772 * bnx_tick should help for fiber cards and we might not 3773 * need to do this here if BNX_FLAG_TBI is set but as 3774 * we poll for fiber anyway it should not harm. 3775 */ 3776 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 3777 } 3778 return(0); 3779 } 3780 3781 /* 3782 * Report current media status. 3783 */ 3784 static void 3785 bnx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 3786 { 3787 struct bnx_softc *sc = ifp->if_softc; 3788 3789 if ((ifp->if_flags & IFF_RUNNING) == 0) 3790 return; 3791 3792 if (sc->bnx_flags & BNX_FLAG_TBI) { 3793 ifmr->ifm_status = IFM_AVALID; 3794 ifmr->ifm_active = IFM_ETHER; 3795 if (CSR_READ_4(sc, BGE_MAC_STS) & 3796 BGE_MACSTAT_TBI_PCS_SYNCHED) { 3797 ifmr->ifm_status |= IFM_ACTIVE; 3798 } else { 3799 ifmr->ifm_active |= IFM_NONE; 3800 return; 3801 } 3802 3803 ifmr->ifm_active |= IFM_1000_SX; 3804 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 3805 ifmr->ifm_active |= IFM_HDX; 3806 else 3807 ifmr->ifm_active |= IFM_FDX; 3808 } else { 3809 struct mii_data *mii = device_get_softc(sc->bnx_miibus); 3810 3811 mii_pollstat(mii); 3812 ifmr->ifm_active = mii->mii_media_active; 3813 ifmr->ifm_status = mii->mii_media_status; 3814 } 3815 } 3816 3817 static int 3818 bnx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 3819 { 3820 struct bnx_softc *sc = ifp->if_softc; 3821 struct ifreq *ifr = (struct ifreq *)data; 3822 int mask, error = 0; 3823 3824 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3825 3826 switch (command) { 3827 case SIOCSIFMTU: 3828 if ((!BNX_IS_JUMBO_CAPABLE(sc) && ifr->ifr_mtu > ETHERMTU) || 3829 (BNX_IS_JUMBO_CAPABLE(sc) && 3830 ifr->ifr_mtu > BNX_JUMBO_MTU)) { 3831 error = EINVAL; 3832 } else if (ifp->if_mtu != ifr->ifr_mtu) { 3833 ifp->if_mtu = ifr->ifr_mtu; 3834 if (ifp->if_flags & IFF_RUNNING) 3835 bnx_init(sc); 3836 } 3837 break; 3838 case SIOCSIFFLAGS: 3839 if (ifp->if_flags & IFF_UP) { 3840 if (ifp->if_flags & IFF_RUNNING) { 3841 mask = ifp->if_flags ^ sc->bnx_if_flags; 3842 3843 /* 3844 * If only the state of the PROMISC flag 3845 * changed, then just use the 'set promisc 3846 * mode' command instead of reinitializing 3847 * the entire NIC. Doing a full re-init 3848 * means reloading the firmware and waiting 3849 * for it to start up, which may take a 3850 * second or two. Similarly for ALLMULTI. 3851 */ 3852 if (mask & IFF_PROMISC) 3853 bnx_setpromisc(sc); 3854 if (mask & IFF_ALLMULTI) 3855 bnx_setmulti(sc); 3856 } else { 3857 bnx_init(sc); 3858 } 3859 } else if (ifp->if_flags & IFF_RUNNING) { 3860 bnx_stop(sc); 3861 } 3862 sc->bnx_if_flags = ifp->if_flags; 3863 break; 3864 case SIOCADDMULTI: 3865 case SIOCDELMULTI: 3866 if (ifp->if_flags & IFF_RUNNING) 3867 bnx_setmulti(sc); 3868 break; 3869 case SIOCSIFMEDIA: 3870 case SIOCGIFMEDIA: 3871 if (sc->bnx_flags & BNX_FLAG_TBI) { 3872 error = ifmedia_ioctl(ifp, ifr, 3873 &sc->bnx_ifmedia, command); 3874 } else { 3875 struct mii_data *mii; 3876 3877 mii = device_get_softc(sc->bnx_miibus); 3878 error = ifmedia_ioctl(ifp, ifr, 3879 &mii->mii_media, command); 3880 } 3881 break; 3882 case SIOCSIFCAP: 3883 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 3884 if (mask & IFCAP_HWCSUM) { 3885 ifp->if_capenable ^= (mask & IFCAP_HWCSUM); 3886 if (ifp->if_capenable & IFCAP_TXCSUM) 3887 ifp->if_hwassist |= BNX_CSUM_FEATURES; 3888 else 3889 ifp->if_hwassist &= ~BNX_CSUM_FEATURES; 3890 } 3891 if (mask & IFCAP_TSO) { 3892 ifp->if_capenable ^= (mask & IFCAP_TSO); 3893 if (ifp->if_capenable & IFCAP_TSO) 3894 ifp->if_hwassist |= CSUM_TSO; 3895 else 3896 ifp->if_hwassist &= ~CSUM_TSO; 3897 } 3898 if (mask & IFCAP_RSS) 3899 ifp->if_capenable ^= IFCAP_RSS; 3900 break; 3901 default: 3902 error = ether_ioctl(ifp, command, data); 3903 break; 3904 } 3905 return error; 3906 } 3907 3908 static void 3909 bnx_watchdog(struct ifaltq_subque *ifsq) 3910 { 3911 struct ifnet *ifp = ifsq_get_ifp(ifsq); 3912 struct bnx_softc *sc = ifp->if_softc; 3913 int i; 3914 3915 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3916 3917 if_printf(ifp, "watchdog timeout -- resetting\n"); 3918 3919 bnx_init(sc); 3920 3921 IFNET_STAT_INC(ifp, oerrors, 1); 3922 3923 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) 3924 ifsq_devstart_sched(sc->bnx_tx_ring[i].bnx_ifsq); 3925 } 3926 3927 /* 3928 * Stop the adapter and free any mbufs allocated to the 3929 * RX and TX lists. 3930 */ 3931 static void 3932 bnx_stop(struct bnx_softc *sc) 3933 { 3934 struct ifnet *ifp = &sc->arpcom.ac_if; 3935 int i; 3936 3937 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3938 3939 callout_stop(&sc->bnx_tick_timer); 3940 3941 /* Disable host interrupts. */ 3942 bnx_disable_intr(sc); 3943 3944 /* 3945 * Tell firmware we're shutting down. 3946 */ 3947 bnx_sig_pre_reset(sc, BNX_RESET_SHUTDOWN); 3948 3949 /* 3950 * Disable all of the receiver blocks 3951 */ 3952 bnx_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 3953 bnx_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 3954 bnx_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 3955 bnx_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 3956 bnx_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 3957 bnx_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 3958 3959 /* 3960 * Disable all of the transmit blocks 3961 */ 3962 bnx_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 3963 bnx_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 3964 bnx_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 3965 bnx_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 3966 bnx_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 3967 bnx_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 3968 3969 /* 3970 * Shut down all of the memory managers and related 3971 * state machines. 3972 */ 3973 bnx_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 3974 bnx_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 3975 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 3976 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 3977 3978 bnx_reset(sc); 3979 bnx_sig_post_reset(sc, BNX_RESET_SHUTDOWN); 3980 3981 /* 3982 * Tell firmware we're shutting down. 3983 */ 3984 BNX_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3985 3986 /* Free the RX lists. */ 3987 bnx_free_rx_ring_std(&sc->bnx_rx_std_ring); 3988 3989 /* Free jumbo RX list. */ 3990 if (BNX_IS_JUMBO_CAPABLE(sc)) 3991 bnx_free_rx_ring_jumbo(sc); 3992 3993 /* Free TX buffers. */ 3994 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 3995 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i]; 3996 3997 txr->bnx_saved_status_tag = 0; 3998 bnx_free_tx_ring(txr); 3999 } 4000 4001 /* Clear saved status tag */ 4002 for (i = 0; i < sc->bnx_rx_retcnt; ++i) 4003 sc->bnx_rx_ret_ring[i].bnx_saved_status_tag = 0; 4004 4005 sc->bnx_link = 0; 4006 sc->bnx_coal_chg = 0; 4007 4008 ifp->if_flags &= ~IFF_RUNNING; 4009 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 4010 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i]; 4011 4012 ifsq_clr_oactive(txr->bnx_ifsq); 4013 ifsq_watchdog_stop(&txr->bnx_tx_watchdog); 4014 } 4015 } 4016 4017 /* 4018 * Stop all chip I/O so that the kernel's probe routines don't 4019 * get confused by errant DMAs when rebooting. 4020 */ 4021 static void 4022 bnx_shutdown(device_t dev) 4023 { 4024 struct bnx_softc *sc = device_get_softc(dev); 4025 struct ifnet *ifp = &sc->arpcom.ac_if; 4026 4027 ifnet_serialize_all(ifp); 4028 bnx_stop(sc); 4029 ifnet_deserialize_all(ifp); 4030 } 4031 4032 static int 4033 bnx_suspend(device_t dev) 4034 { 4035 struct bnx_softc *sc = device_get_softc(dev); 4036 struct ifnet *ifp = &sc->arpcom.ac_if; 4037 4038 ifnet_serialize_all(ifp); 4039 bnx_stop(sc); 4040 ifnet_deserialize_all(ifp); 4041 4042 return 0; 4043 } 4044 4045 static int 4046 bnx_resume(device_t dev) 4047 { 4048 struct bnx_softc *sc = device_get_softc(dev); 4049 struct ifnet *ifp = &sc->arpcom.ac_if; 4050 4051 ifnet_serialize_all(ifp); 4052 4053 if (ifp->if_flags & IFF_UP) { 4054 int i; 4055 4056 bnx_init(sc); 4057 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) 4058 ifsq_devstart_sched(sc->bnx_tx_ring[i].bnx_ifsq); 4059 } 4060 4061 ifnet_deserialize_all(ifp); 4062 4063 return 0; 4064 } 4065 4066 static void 4067 bnx_setpromisc(struct bnx_softc *sc) 4068 { 4069 struct ifnet *ifp = &sc->arpcom.ac_if; 4070 4071 if (ifp->if_flags & IFF_PROMISC) 4072 BNX_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 4073 else 4074 BNX_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 4075 } 4076 4077 static void 4078 bnx_dma_free(struct bnx_softc *sc) 4079 { 4080 struct bnx_rx_std_ring *std = &sc->bnx_rx_std_ring; 4081 int i; 4082 4083 /* Destroy RX return rings */ 4084 if (sc->bnx_rx_ret_ring != NULL) { 4085 for (i = 0; i < sc->bnx_rx_retcnt; ++i) 4086 bnx_destroy_rx_ret_ring(&sc->bnx_rx_ret_ring[i]); 4087 kfree(sc->bnx_rx_ret_ring, M_DEVBUF); 4088 } 4089 4090 /* Destroy RX mbuf DMA stuffs. */ 4091 if (std->bnx_rx_mtag != NULL) { 4092 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 4093 KKASSERT(std->bnx_rx_std_buf[i].bnx_rx_mbuf == NULL); 4094 bus_dmamap_destroy(std->bnx_rx_mtag, 4095 std->bnx_rx_std_buf[i].bnx_rx_dmamap); 4096 } 4097 bus_dma_tag_destroy(std->bnx_rx_mtag); 4098 } 4099 4100 /* Destroy standard RX ring */ 4101 bnx_dma_block_free(std->bnx_rx_std_ring_tag, 4102 std->bnx_rx_std_ring_map, std->bnx_rx_std_ring); 4103 4104 /* Destroy TX rings */ 4105 if (sc->bnx_tx_ring != NULL) { 4106 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) 4107 bnx_destroy_tx_ring(&sc->bnx_tx_ring[i]); 4108 kfree(sc->bnx_tx_ring, M_DEVBUF); 4109 } 4110 4111 if (BNX_IS_JUMBO_CAPABLE(sc)) 4112 bnx_free_jumbo_mem(sc); 4113 4114 /* Destroy status blocks */ 4115 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 4116 struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; 4117 4118 bnx_dma_block_free(intr->bnx_status_tag, 4119 intr->bnx_status_map, intr->bnx_status_block); 4120 } 4121 4122 /* Destroy the parent tag */ 4123 if (sc->bnx_cdata.bnx_parent_tag != NULL) 4124 bus_dma_tag_destroy(sc->bnx_cdata.bnx_parent_tag); 4125 } 4126 4127 static int 4128 bnx_dma_alloc(device_t dev) 4129 { 4130 struct bnx_softc *sc = device_get_softc(dev); 4131 struct bnx_rx_std_ring *std = &sc->bnx_rx_std_ring; 4132 int i, error, mbx; 4133 4134 /* 4135 * Allocate the parent bus DMA tag appropriate for PCI. 4136 * 4137 * All of the NetExtreme/NetLink controllers have 4GB boundary 4138 * DMA bug. 4139 * Whenever an address crosses a multiple of the 4GB boundary 4140 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition 4141 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA 4142 * state machine will lockup and cause the device to hang. 4143 */ 4144 error = bus_dma_tag_create(NULL, 1, BGE_DMA_BOUNDARY_4G, 4145 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 4146 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 4147 0, &sc->bnx_cdata.bnx_parent_tag); 4148 if (error) { 4149 device_printf(dev, "could not create parent DMA tag\n"); 4150 return error; 4151 } 4152 4153 /* 4154 * Create DMA stuffs for status blocks. 4155 */ 4156 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 4157 struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; 4158 4159 error = bnx_dma_block_alloc(sc, 4160 __VM_CACHELINE_ALIGN(BGE_STATUS_BLK_SZ), 4161 &intr->bnx_status_tag, &intr->bnx_status_map, 4162 (void *)&intr->bnx_status_block, 4163 &intr->bnx_status_block_paddr); 4164 if (error) { 4165 device_printf(dev, 4166 "could not create %dth status block\n", i); 4167 return error; 4168 } 4169 } 4170 sc->bnx_hw_status = &sc->bnx_intr_data[0].bnx_status_block->bge_status; 4171 if (sc->bnx_flags & BNX_FLAG_STATUS_HASTAG) { 4172 sc->bnx_hw_status_tag = 4173 &sc->bnx_intr_data[0].bnx_status_block->bge_status_tag; 4174 } 4175 4176 /* 4177 * Create DMA tag and maps for RX mbufs. 4178 */ 4179 std->bnx_sc = sc; 4180 lwkt_serialize_init(&std->bnx_rx_std_serialize); 4181 error = bus_dma_tag_create(sc->bnx_cdata.bnx_parent_tag, 1, 0, 4182 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 4183 NULL, NULL, MCLBYTES, 1, MCLBYTES, 4184 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK, &std->bnx_rx_mtag); 4185 if (error) { 4186 device_printf(dev, "could not create RX mbuf DMA tag\n"); 4187 return error; 4188 } 4189 4190 for (i = 0; i < BGE_STD_RX_RING_CNT; ++i) { 4191 error = bus_dmamap_create(std->bnx_rx_mtag, BUS_DMA_WAITOK, 4192 &std->bnx_rx_std_buf[i].bnx_rx_dmamap); 4193 if (error) { 4194 int j; 4195 4196 for (j = 0; j < i; ++j) { 4197 bus_dmamap_destroy(std->bnx_rx_mtag, 4198 std->bnx_rx_std_buf[j].bnx_rx_dmamap); 4199 } 4200 bus_dma_tag_destroy(std->bnx_rx_mtag); 4201 std->bnx_rx_mtag = NULL; 4202 4203 device_printf(dev, 4204 "could not create %dth RX mbuf DMA map\n", i); 4205 return error; 4206 } 4207 } 4208 4209 /* 4210 * Create DMA stuffs for standard RX ring. 4211 */ 4212 error = bnx_dma_block_alloc(sc, BGE_STD_RX_RING_SZ, 4213 &std->bnx_rx_std_ring_tag, 4214 &std->bnx_rx_std_ring_map, 4215 (void *)&std->bnx_rx_std_ring, 4216 &std->bnx_rx_std_ring_paddr); 4217 if (error) { 4218 device_printf(dev, "could not create std RX ring\n"); 4219 return error; 4220 } 4221 4222 /* 4223 * Create RX return rings 4224 */ 4225 mbx = BGE_MBX_RX_CONS0_LO; 4226 sc->bnx_rx_ret_ring = kmalloc_cachealign( 4227 sizeof(struct bnx_rx_ret_ring) * sc->bnx_rx_retcnt, M_DEVBUF, 4228 M_WAITOK | M_ZERO); 4229 for (i = 0; i < sc->bnx_rx_retcnt; ++i) { 4230 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[i]; 4231 struct bnx_intr_data *intr; 4232 4233 ret->bnx_sc = sc; 4234 ret->bnx_std = std; 4235 ret->bnx_rx_mbx = mbx; 4236 ret->bnx_rx_cntmax = (BGE_STD_RX_RING_CNT / 4) / 4237 sc->bnx_rx_retcnt; 4238 ret->bnx_rx_mask = 1 << i; 4239 4240 if (!BNX_RSS_ENABLED(sc)) { 4241 intr = &sc->bnx_intr_data[0]; 4242 } else { 4243 KKASSERT(i + 1 < sc->bnx_intr_cnt); 4244 intr = &sc->bnx_intr_data[i + 1]; 4245 } 4246 4247 if (i == 0) { 4248 ret->bnx_rx_considx = 4249 &intr->bnx_status_block->bge_idx[0].bge_rx_prod_idx; 4250 } else if (i == 1) { 4251 ret->bnx_rx_considx = 4252 &intr->bnx_status_block->bge_rx_jumbo_cons_idx; 4253 } else if (i == 2) { 4254 ret->bnx_rx_considx = 4255 &intr->bnx_status_block->bge_rsvd1; 4256 } else if (i == 3) { 4257 ret->bnx_rx_considx = 4258 &intr->bnx_status_block->bge_rx_mini_cons_idx; 4259 } else { 4260 panic("unknown RX return ring %d\n", i); 4261 } 4262 ret->bnx_hw_status_tag = 4263 &intr->bnx_status_block->bge_status_tag; 4264 4265 error = bnx_create_rx_ret_ring(ret); 4266 if (error) { 4267 device_printf(dev, 4268 "could not create %dth RX ret ring\n", i); 4269 return error; 4270 } 4271 mbx += 8; 4272 } 4273 4274 /* 4275 * Create TX rings 4276 */ 4277 sc->bnx_tx_ring = kmalloc_cachealign( 4278 sizeof(struct bnx_tx_ring) * sc->bnx_tx_ringcnt, M_DEVBUF, 4279 M_WAITOK | M_ZERO); 4280 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 4281 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i]; 4282 struct bnx_intr_data *intr; 4283 4284 txr->bnx_sc = sc; 4285 txr->bnx_tx_mbx = bnx_tx_mailbox[i]; 4286 4287 if (sc->bnx_tx_ringcnt == 1) { 4288 intr = &sc->bnx_intr_data[0]; 4289 } else { 4290 KKASSERT(i + 1 < sc->bnx_intr_cnt); 4291 intr = &sc->bnx_intr_data[i + 1]; 4292 } 4293 4294 if ((sc->bnx_flags & BNX_FLAG_RXTX_BUNDLE) == 0) { 4295 txr->bnx_hw_status_tag = 4296 &intr->bnx_status_block->bge_status_tag; 4297 } 4298 txr->bnx_tx_considx = 4299 &intr->bnx_status_block->bge_idx[0].bge_tx_cons_idx; 4300 4301 error = bnx_create_tx_ring(txr); 4302 if (error) { 4303 device_printf(dev, 4304 "could not create %dth TX ring\n", i); 4305 return error; 4306 } 4307 } 4308 4309 /* 4310 * Create jumbo buffer pool. 4311 */ 4312 if (BNX_IS_JUMBO_CAPABLE(sc)) { 4313 error = bnx_alloc_jumbo_mem(sc); 4314 if (error) { 4315 device_printf(dev, 4316 "could not create jumbo buffer pool\n"); 4317 return error; 4318 } 4319 } 4320 4321 return 0; 4322 } 4323 4324 static int 4325 bnx_dma_block_alloc(struct bnx_softc *sc, bus_size_t size, bus_dma_tag_t *tag, 4326 bus_dmamap_t *map, void **addr, bus_addr_t *paddr) 4327 { 4328 bus_dmamem_t dmem; 4329 int error; 4330 4331 error = bus_dmamem_coherent(sc->bnx_cdata.bnx_parent_tag, PAGE_SIZE, 0, 4332 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 4333 size, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 4334 if (error) 4335 return error; 4336 4337 *tag = dmem.dmem_tag; 4338 *map = dmem.dmem_map; 4339 *addr = dmem.dmem_addr; 4340 *paddr = dmem.dmem_busaddr; 4341 4342 return 0; 4343 } 4344 4345 static void 4346 bnx_dma_block_free(bus_dma_tag_t tag, bus_dmamap_t map, void *addr) 4347 { 4348 if (tag != NULL) { 4349 bus_dmamap_unload(tag, map); 4350 bus_dmamem_free(tag, addr, map); 4351 bus_dma_tag_destroy(tag); 4352 } 4353 } 4354 4355 static void 4356 bnx_tbi_link_upd(struct bnx_softc *sc, uint32_t status) 4357 { 4358 struct ifnet *ifp = &sc->arpcom.ac_if; 4359 4360 #define PCS_ENCODE_ERR (BGE_MACSTAT_PORT_DECODE_ERROR|BGE_MACSTAT_MI_COMPLETE) 4361 4362 /* 4363 * Sometimes PCS encoding errors are detected in 4364 * TBI mode (on fiber NICs), and for some reason 4365 * the chip will signal them as link changes. 4366 * If we get a link change event, but the 'PCS 4367 * encoding error' bit in the MAC status register 4368 * is set, don't bother doing a link check. 4369 * This avoids spurious "gigabit link up" messages 4370 * that sometimes appear on fiber NICs during 4371 * periods of heavy traffic. 4372 */ 4373 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) { 4374 if (!sc->bnx_link) { 4375 sc->bnx_link++; 4376 if (sc->bnx_asicrev == BGE_ASICREV_BCM5704) { 4377 BNX_CLRBIT(sc, BGE_MAC_MODE, 4378 BGE_MACMODE_TBI_SEND_CFGS); 4379 DELAY(40); 4380 } 4381 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 4382 4383 if (bootverbose) 4384 if_printf(ifp, "link UP\n"); 4385 4386 ifp->if_link_state = LINK_STATE_UP; 4387 if_link_state_change(ifp); 4388 } 4389 } else if ((status & PCS_ENCODE_ERR) != PCS_ENCODE_ERR) { 4390 if (sc->bnx_link) { 4391 sc->bnx_link = 0; 4392 4393 if (bootverbose) 4394 if_printf(ifp, "link DOWN\n"); 4395 4396 ifp->if_link_state = LINK_STATE_DOWN; 4397 if_link_state_change(ifp); 4398 } 4399 } 4400 4401 #undef PCS_ENCODE_ERR 4402 4403 /* Clear the attention. */ 4404 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 4405 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 4406 BGE_MACSTAT_LINK_CHANGED); 4407 } 4408 4409 static void 4410 bnx_copper_link_upd(struct bnx_softc *sc, uint32_t status __unused) 4411 { 4412 struct ifnet *ifp = &sc->arpcom.ac_if; 4413 struct mii_data *mii = device_get_softc(sc->bnx_miibus); 4414 4415 mii_pollstat(mii); 4416 bnx_miibus_statchg(sc->bnx_dev); 4417 4418 if (bootverbose) { 4419 if (sc->bnx_link) 4420 if_printf(ifp, "link UP\n"); 4421 else 4422 if_printf(ifp, "link DOWN\n"); 4423 } 4424 4425 /* Clear the attention. */ 4426 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 4427 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 4428 BGE_MACSTAT_LINK_CHANGED); 4429 } 4430 4431 static void 4432 bnx_autopoll_link_upd(struct bnx_softc *sc, uint32_t status __unused) 4433 { 4434 struct ifnet *ifp = &sc->arpcom.ac_if; 4435 struct mii_data *mii = device_get_softc(sc->bnx_miibus); 4436 4437 mii_pollstat(mii); 4438 4439 if (!sc->bnx_link && 4440 (mii->mii_media_status & IFM_ACTIVE) && 4441 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 4442 sc->bnx_link++; 4443 if (bootverbose) 4444 if_printf(ifp, "link UP\n"); 4445 } else if (sc->bnx_link && 4446 (!(mii->mii_media_status & IFM_ACTIVE) || 4447 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { 4448 sc->bnx_link = 0; 4449 if (bootverbose) 4450 if_printf(ifp, "link DOWN\n"); 4451 } 4452 4453 /* Clear the attention. */ 4454 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 4455 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 4456 BGE_MACSTAT_LINK_CHANGED); 4457 } 4458 4459 static int 4460 bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS) 4461 { 4462 struct bnx_softc *sc = arg1; 4463 4464 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 4465 &sc->bnx_rx_coal_ticks, 4466 BNX_RX_COAL_TICKS_MIN, BNX_RX_COAL_TICKS_MAX, 4467 BNX_RX_COAL_TICKS_CHG); 4468 } 4469 4470 static int 4471 bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS) 4472 { 4473 struct bnx_softc *sc = arg1; 4474 4475 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 4476 &sc->bnx_tx_coal_ticks, 4477 BNX_TX_COAL_TICKS_MIN, BNX_TX_COAL_TICKS_MAX, 4478 BNX_TX_COAL_TICKS_CHG); 4479 } 4480 4481 static int 4482 bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS) 4483 { 4484 struct bnx_softc *sc = arg1; 4485 4486 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 4487 &sc->bnx_rx_coal_bds, 4488 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX, 4489 BNX_RX_COAL_BDS_CHG); 4490 } 4491 4492 static int 4493 bnx_sysctl_rx_coal_bds_poll(SYSCTL_HANDLER_ARGS) 4494 { 4495 struct bnx_softc *sc = arg1; 4496 4497 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 4498 &sc->bnx_rx_coal_bds_poll, 4499 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX, 4500 BNX_RX_COAL_BDS_CHG); 4501 } 4502 4503 static int 4504 bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS) 4505 { 4506 struct bnx_softc *sc = arg1; 4507 4508 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 4509 &sc->bnx_tx_coal_bds, 4510 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX, 4511 BNX_TX_COAL_BDS_CHG); 4512 } 4513 4514 static int 4515 bnx_sysctl_tx_coal_bds_poll(SYSCTL_HANDLER_ARGS) 4516 { 4517 struct bnx_softc *sc = arg1; 4518 4519 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 4520 &sc->bnx_tx_coal_bds_poll, 4521 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX, 4522 BNX_TX_COAL_BDS_CHG); 4523 } 4524 4525 static int 4526 bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS) 4527 { 4528 struct bnx_softc *sc = arg1; 4529 4530 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 4531 &sc->bnx_rx_coal_bds_int, 4532 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX, 4533 BNX_RX_COAL_BDS_INT_CHG); 4534 } 4535 4536 static int 4537 bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS) 4538 { 4539 struct bnx_softc *sc = arg1; 4540 4541 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 4542 &sc->bnx_tx_coal_bds_int, 4543 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX, 4544 BNX_TX_COAL_BDS_INT_CHG); 4545 } 4546 4547 static int 4548 bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *coal, 4549 int coal_min, int coal_max, uint32_t coal_chg_mask) 4550 { 4551 struct bnx_softc *sc = arg1; 4552 struct ifnet *ifp = &sc->arpcom.ac_if; 4553 int error = 0, v; 4554 4555 ifnet_serialize_all(ifp); 4556 4557 v = *coal; 4558 error = sysctl_handle_int(oidp, &v, 0, req); 4559 if (!error && req->newptr != NULL) { 4560 if (v < coal_min || v > coal_max) { 4561 error = EINVAL; 4562 } else { 4563 *coal = v; 4564 sc->bnx_coal_chg |= coal_chg_mask; 4565 4566 /* Commit changes */ 4567 bnx_coal_change(sc); 4568 } 4569 } 4570 4571 ifnet_deserialize_all(ifp); 4572 return error; 4573 } 4574 4575 static void 4576 bnx_coal_change(struct bnx_softc *sc) 4577 { 4578 struct ifnet *ifp = &sc->arpcom.ac_if; 4579 int i; 4580 4581 ASSERT_IFNET_SERIALIZED_ALL(ifp); 4582 4583 if (sc->bnx_coal_chg & BNX_RX_COAL_TICKS_CHG) { 4584 if (sc->bnx_rx_retcnt == 1) { 4585 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, 4586 sc->bnx_rx_coal_ticks); 4587 i = 0; 4588 } else { 4589 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, 0); 4590 for (i = 0; i < sc->bnx_rx_retcnt; ++i) { 4591 CSR_WRITE_4(sc, BGE_VEC1_RX_COAL_TICKS + 4592 (i * BGE_VEC_COALSET_SIZE), 4593 sc->bnx_rx_coal_ticks); 4594 } 4595 } 4596 for (; i < BNX_INTR_MAX - 1; ++i) { 4597 CSR_WRITE_4(sc, BGE_VEC1_RX_COAL_TICKS + 4598 (i * BGE_VEC_COALSET_SIZE), 0); 4599 } 4600 if (bootverbose) { 4601 if_printf(ifp, "rx_coal_ticks -> %u\n", 4602 sc->bnx_rx_coal_ticks); 4603 } 4604 } 4605 4606 if (sc->bnx_coal_chg & BNX_TX_COAL_TICKS_CHG) { 4607 if (sc->bnx_tx_ringcnt == 1) { 4608 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, 4609 sc->bnx_tx_coal_ticks); 4610 i = 0; 4611 } else { 4612 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, 0); 4613 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 4614 CSR_WRITE_4(sc, BGE_VEC1_TX_COAL_TICKS + 4615 (i * BGE_VEC_COALSET_SIZE), 4616 sc->bnx_tx_coal_ticks); 4617 } 4618 } 4619 for (; i < BNX_INTR_MAX - 1; ++i) { 4620 CSR_WRITE_4(sc, BGE_VEC1_TX_COAL_TICKS + 4621 (i * BGE_VEC_COALSET_SIZE), 0); 4622 } 4623 if (bootverbose) { 4624 if_printf(ifp, "tx_coal_ticks -> %u\n", 4625 sc->bnx_tx_coal_ticks); 4626 } 4627 } 4628 4629 if (sc->bnx_coal_chg & BNX_RX_COAL_BDS_CHG) { 4630 uint32_t rx_coal_bds; 4631 4632 if (ifp->if_flags & IFF_NPOLLING) 4633 rx_coal_bds = sc->bnx_rx_coal_bds_poll; 4634 else 4635 rx_coal_bds = sc->bnx_rx_coal_bds; 4636 4637 if (sc->bnx_rx_retcnt == 1) { 4638 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_coal_bds); 4639 i = 0; 4640 } else { 4641 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, 0); 4642 for (i = 0; i < sc->bnx_rx_retcnt; ++i) { 4643 CSR_WRITE_4(sc, BGE_VEC1_RX_MAX_COAL_BDS + 4644 (i * BGE_VEC_COALSET_SIZE), rx_coal_bds); 4645 } 4646 } 4647 for (; i < BNX_INTR_MAX - 1; ++i) { 4648 CSR_WRITE_4(sc, BGE_VEC1_RX_MAX_COAL_BDS + 4649 (i * BGE_VEC_COALSET_SIZE), 0); 4650 } 4651 if (bootverbose) { 4652 if_printf(ifp, "%srx_coal_bds -> %u\n", 4653 (ifp->if_flags & IFF_NPOLLING) ? "polling " : "", 4654 rx_coal_bds); 4655 } 4656 } 4657 4658 if (sc->bnx_coal_chg & BNX_TX_COAL_BDS_CHG) { 4659 uint32_t tx_coal_bds; 4660 4661 if (ifp->if_flags & IFF_NPOLLING) 4662 tx_coal_bds = sc->bnx_tx_coal_bds_poll; 4663 else 4664 tx_coal_bds = sc->bnx_tx_coal_bds; 4665 4666 if (sc->bnx_tx_ringcnt == 1) { 4667 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, tx_coal_bds); 4668 i = 0; 4669 } else { 4670 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, 0); 4671 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 4672 CSR_WRITE_4(sc, BGE_VEC1_TX_MAX_COAL_BDS + 4673 (i * BGE_VEC_COALSET_SIZE), tx_coal_bds); 4674 } 4675 } 4676 for (; i < BNX_INTR_MAX - 1; ++i) { 4677 CSR_WRITE_4(sc, BGE_VEC1_TX_MAX_COAL_BDS + 4678 (i * BGE_VEC_COALSET_SIZE), 0); 4679 } 4680 if (bootverbose) { 4681 if_printf(ifp, "%stx_coal_bds -> %u\n", 4682 (ifp->if_flags & IFF_NPOLLING) ? "polling " : "", 4683 tx_coal_bds); 4684 } 4685 } 4686 4687 if (sc->bnx_coal_chg & BNX_RX_COAL_BDS_INT_CHG) { 4688 if (sc->bnx_rx_retcnt == 1) { 4689 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 4690 sc->bnx_rx_coal_bds_int); 4691 i = 0; 4692 } else { 4693 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 4694 for (i = 0; i < sc->bnx_rx_retcnt; ++i) { 4695 CSR_WRITE_4(sc, BGE_VEC1_RX_MAX_COAL_BDS_INT + 4696 (i * BGE_VEC_COALSET_SIZE), 4697 sc->bnx_rx_coal_bds_int); 4698 } 4699 } 4700 for (; i < BNX_INTR_MAX - 1; ++i) { 4701 CSR_WRITE_4(sc, BGE_VEC1_RX_MAX_COAL_BDS_INT + 4702 (i * BGE_VEC_COALSET_SIZE), 0); 4703 } 4704 if (bootverbose) { 4705 if_printf(ifp, "rx_coal_bds_int -> %u\n", 4706 sc->bnx_rx_coal_bds_int); 4707 } 4708 } 4709 4710 if (sc->bnx_coal_chg & BNX_TX_COAL_BDS_INT_CHG) { 4711 if (sc->bnx_tx_ringcnt == 1) { 4712 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 4713 sc->bnx_tx_coal_bds_int); 4714 i = 0; 4715 } else { 4716 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 4717 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 4718 CSR_WRITE_4(sc, BGE_VEC1_TX_MAX_COAL_BDS_INT + 4719 (i * BGE_VEC_COALSET_SIZE), 4720 sc->bnx_tx_coal_bds_int); 4721 } 4722 } 4723 for (; i < BNX_INTR_MAX - 1; ++i) { 4724 CSR_WRITE_4(sc, BGE_VEC1_TX_MAX_COAL_BDS_INT + 4725 (i * BGE_VEC_COALSET_SIZE), 0); 4726 } 4727 if (bootverbose) { 4728 if_printf(ifp, "tx_coal_bds_int -> %u\n", 4729 sc->bnx_tx_coal_bds_int); 4730 } 4731 } 4732 4733 sc->bnx_coal_chg = 0; 4734 } 4735 4736 static void 4737 bnx_check_intr_rxtx(void *xintr) 4738 { 4739 struct bnx_intr_data *intr = xintr; 4740 struct bnx_rx_ret_ring *ret; 4741 struct bnx_tx_ring *txr; 4742 struct ifnet *ifp; 4743 4744 lwkt_serialize_enter(intr->bnx_intr_serialize); 4745 4746 KKASSERT(mycpuid == intr->bnx_intr_cpuid); 4747 4748 ifp = &intr->bnx_sc->arpcom.ac_if; 4749 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) { 4750 lwkt_serialize_exit(intr->bnx_intr_serialize); 4751 return; 4752 } 4753 4754 txr = intr->bnx_txr; 4755 ret = intr->bnx_ret; 4756 4757 if (*ret->bnx_rx_considx != ret->bnx_rx_saved_considx || 4758 *txr->bnx_tx_considx != txr->bnx_tx_saved_considx) { 4759 if (intr->bnx_rx_check_considx == ret->bnx_rx_saved_considx && 4760 intr->bnx_tx_check_considx == txr->bnx_tx_saved_considx) { 4761 if (!intr->bnx_intr_maylose) { 4762 intr->bnx_intr_maylose = TRUE; 4763 goto done; 4764 } 4765 if (bootverbose) 4766 if_printf(ifp, "lost interrupt\n"); 4767 intr->bnx_intr_func(intr->bnx_intr_arg); 4768 } 4769 } 4770 intr->bnx_intr_maylose = FALSE; 4771 intr->bnx_rx_check_considx = ret->bnx_rx_saved_considx; 4772 intr->bnx_tx_check_considx = txr->bnx_tx_saved_considx; 4773 4774 done: 4775 callout_reset(&intr->bnx_intr_timer, BNX_INTR_CKINTVL, 4776 intr->bnx_intr_check, intr); 4777 lwkt_serialize_exit(intr->bnx_intr_serialize); 4778 } 4779 4780 static void 4781 bnx_check_intr_tx(void *xintr) 4782 { 4783 struct bnx_intr_data *intr = xintr; 4784 struct bnx_tx_ring *txr; 4785 struct ifnet *ifp; 4786 4787 lwkt_serialize_enter(intr->bnx_intr_serialize); 4788 4789 KKASSERT(mycpuid == intr->bnx_intr_cpuid); 4790 4791 ifp = &intr->bnx_sc->arpcom.ac_if; 4792 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) { 4793 lwkt_serialize_exit(intr->bnx_intr_serialize); 4794 return; 4795 } 4796 4797 txr = intr->bnx_txr; 4798 4799 if (*txr->bnx_tx_considx != txr->bnx_tx_saved_considx) { 4800 if (intr->bnx_tx_check_considx == txr->bnx_tx_saved_considx) { 4801 if (!intr->bnx_intr_maylose) { 4802 intr->bnx_intr_maylose = TRUE; 4803 goto done; 4804 } 4805 if (bootverbose) 4806 if_printf(ifp, "lost interrupt\n"); 4807 intr->bnx_intr_func(intr->bnx_intr_arg); 4808 } 4809 } 4810 intr->bnx_intr_maylose = FALSE; 4811 intr->bnx_tx_check_considx = txr->bnx_tx_saved_considx; 4812 4813 done: 4814 callout_reset(&intr->bnx_intr_timer, BNX_INTR_CKINTVL, 4815 intr->bnx_intr_check, intr); 4816 lwkt_serialize_exit(intr->bnx_intr_serialize); 4817 } 4818 4819 static void 4820 bnx_check_intr_rx(void *xintr) 4821 { 4822 struct bnx_intr_data *intr = xintr; 4823 struct bnx_rx_ret_ring *ret; 4824 struct ifnet *ifp; 4825 4826 lwkt_serialize_enter(intr->bnx_intr_serialize); 4827 4828 KKASSERT(mycpuid == intr->bnx_intr_cpuid); 4829 4830 ifp = &intr->bnx_sc->arpcom.ac_if; 4831 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) { 4832 lwkt_serialize_exit(intr->bnx_intr_serialize); 4833 return; 4834 } 4835 4836 ret = intr->bnx_ret; 4837 4838 if (*ret->bnx_rx_considx != ret->bnx_rx_saved_considx) { 4839 if (intr->bnx_rx_check_considx == ret->bnx_rx_saved_considx) { 4840 if (!intr->bnx_intr_maylose) { 4841 intr->bnx_intr_maylose = TRUE; 4842 goto done; 4843 } 4844 if (bootverbose) 4845 if_printf(ifp, "lost interrupt\n"); 4846 intr->bnx_intr_func(intr->bnx_intr_arg); 4847 } 4848 } 4849 intr->bnx_intr_maylose = FALSE; 4850 intr->bnx_rx_check_considx = ret->bnx_rx_saved_considx; 4851 4852 done: 4853 callout_reset(&intr->bnx_intr_timer, BNX_INTR_CKINTVL, 4854 intr->bnx_intr_check, intr); 4855 lwkt_serialize_exit(intr->bnx_intr_serialize); 4856 } 4857 4858 static void 4859 bnx_enable_intr(struct bnx_softc *sc) 4860 { 4861 struct ifnet *ifp = &sc->arpcom.ac_if; 4862 int i; 4863 4864 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 4865 lwkt_serialize_handler_enable( 4866 sc->bnx_intr_data[i].bnx_intr_serialize); 4867 } 4868 4869 /* 4870 * Enable interrupt. 4871 */ 4872 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 4873 struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; 4874 4875 bnx_writembx(sc, intr->bnx_intr_mbx, 4876 (*intr->bnx_saved_status_tag) << 24); 4877 /* XXX Linux driver */ 4878 bnx_writembx(sc, intr->bnx_intr_mbx, 4879 (*intr->bnx_saved_status_tag) << 24); 4880 } 4881 4882 /* 4883 * Unmask the interrupt when we stop polling. 4884 */ 4885 PCI_CLRBIT(sc->bnx_dev, BGE_PCI_MISC_CTL, 4886 BGE_PCIMISCCTL_MASK_PCI_INTR, 4); 4887 4888 /* 4889 * Trigger another interrupt, since above writing 4890 * to interrupt mailbox0 may acknowledge pending 4891 * interrupt. 4892 */ 4893 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 4894 4895 if (sc->bnx_flags & BNX_FLAG_STATUSTAG_BUG) { 4896 if (bootverbose) 4897 if_printf(ifp, "status tag bug workaround\n"); 4898 4899 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 4900 struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; 4901 4902 if (intr->bnx_intr_check == NULL) 4903 continue; 4904 intr->bnx_intr_maylose = FALSE; 4905 intr->bnx_rx_check_considx = 0; 4906 intr->bnx_tx_check_considx = 0; 4907 callout_reset_bycpu(&intr->bnx_intr_timer, 4908 BNX_INTR_CKINTVL, intr->bnx_intr_check, intr, 4909 intr->bnx_intr_cpuid); 4910 } 4911 } 4912 } 4913 4914 static void 4915 bnx_disable_intr(struct bnx_softc *sc) 4916 { 4917 int i; 4918 4919 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 4920 struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; 4921 4922 callout_stop(&intr->bnx_intr_timer); 4923 intr->bnx_intr_maylose = FALSE; 4924 intr->bnx_rx_check_considx = 0; 4925 intr->bnx_tx_check_considx = 0; 4926 } 4927 4928 /* 4929 * Mask the interrupt when we start polling. 4930 */ 4931 PCI_SETBIT(sc->bnx_dev, BGE_PCI_MISC_CTL, 4932 BGE_PCIMISCCTL_MASK_PCI_INTR, 4); 4933 4934 /* 4935 * Acknowledge possible asserted interrupt. 4936 */ 4937 for (i = 0; i < BNX_INTR_MAX; ++i) 4938 bnx_writembx(sc, sc->bnx_intr_data[i].bnx_intr_mbx, 1); 4939 4940 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 4941 lwkt_serialize_handler_disable( 4942 sc->bnx_intr_data[i].bnx_intr_serialize); 4943 } 4944 } 4945 4946 static int 4947 bnx_get_eaddr_mem(struct bnx_softc *sc, uint8_t ether_addr[]) 4948 { 4949 uint32_t mac_addr; 4950 int ret = 1; 4951 4952 mac_addr = bnx_readmem_ind(sc, 0x0c14); 4953 if ((mac_addr >> 16) == 0x484b) { 4954 ether_addr[0] = (uint8_t)(mac_addr >> 8); 4955 ether_addr[1] = (uint8_t)mac_addr; 4956 mac_addr = bnx_readmem_ind(sc, 0x0c18); 4957 ether_addr[2] = (uint8_t)(mac_addr >> 24); 4958 ether_addr[3] = (uint8_t)(mac_addr >> 16); 4959 ether_addr[4] = (uint8_t)(mac_addr >> 8); 4960 ether_addr[5] = (uint8_t)mac_addr; 4961 ret = 0; 4962 } 4963 return ret; 4964 } 4965 4966 static int 4967 bnx_get_eaddr_nvram(struct bnx_softc *sc, uint8_t ether_addr[]) 4968 { 4969 int mac_offset = BGE_EE_MAC_OFFSET; 4970 4971 if (BNX_IS_5717_PLUS(sc)) { 4972 int f; 4973 4974 f = pci_get_function(sc->bnx_dev); 4975 if (f & 1) 4976 mac_offset = BGE_EE_MAC_OFFSET_5717; 4977 if (f > 1) 4978 mac_offset += BGE_EE_MAC_OFFSET_5717_OFF; 4979 } 4980 4981 return bnx_read_nvram(sc, ether_addr, mac_offset + 2, ETHER_ADDR_LEN); 4982 } 4983 4984 static int 4985 bnx_get_eaddr_eeprom(struct bnx_softc *sc, uint8_t ether_addr[]) 4986 { 4987 if (sc->bnx_flags & BNX_FLAG_NO_EEPROM) 4988 return 1; 4989 4990 return bnx_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2, 4991 ETHER_ADDR_LEN); 4992 } 4993 4994 static int 4995 bnx_get_eaddr(struct bnx_softc *sc, uint8_t eaddr[]) 4996 { 4997 static const bnx_eaddr_fcn_t bnx_eaddr_funcs[] = { 4998 /* NOTE: Order is critical */ 4999 bnx_get_eaddr_mem, 5000 bnx_get_eaddr_nvram, 5001 bnx_get_eaddr_eeprom, 5002 NULL 5003 }; 5004 const bnx_eaddr_fcn_t *func; 5005 5006 for (func = bnx_eaddr_funcs; *func != NULL; ++func) { 5007 if ((*func)(sc, eaddr) == 0) 5008 break; 5009 } 5010 return (*func == NULL ? ENXIO : 0); 5011 } 5012 5013 /* 5014 * NOTE: 'm' is not freed upon failure 5015 */ 5016 struct mbuf * 5017 bnx_defrag_shortdma(struct mbuf *m) 5018 { 5019 struct mbuf *n; 5020 int found; 5021 5022 /* 5023 * If device receive two back-to-back send BDs with less than 5024 * or equal to 8 total bytes then the device may hang. The two 5025 * back-to-back send BDs must in the same frame for this failure 5026 * to occur. Scan mbuf chains and see whether two back-to-back 5027 * send BDs are there. If this is the case, allocate new mbuf 5028 * and copy the frame to workaround the silicon bug. 5029 */ 5030 for (n = m, found = 0; n != NULL; n = n->m_next) { 5031 if (n->m_len < 8) { 5032 found++; 5033 if (found > 1) 5034 break; 5035 continue; 5036 } 5037 found = 0; 5038 } 5039 5040 if (found > 1) 5041 n = m_defrag(m, MB_DONTWAIT); 5042 else 5043 n = m; 5044 return n; 5045 } 5046 5047 static void 5048 bnx_stop_block(struct bnx_softc *sc, bus_size_t reg, uint32_t bit) 5049 { 5050 int i; 5051 5052 BNX_CLRBIT(sc, reg, bit); 5053 for (i = 0; i < BNX_TIMEOUT; i++) { 5054 if ((CSR_READ_4(sc, reg) & bit) == 0) 5055 return; 5056 DELAY(100); 5057 } 5058 } 5059 5060 static void 5061 bnx_link_poll(struct bnx_softc *sc) 5062 { 5063 uint32_t status; 5064 5065 status = CSR_READ_4(sc, BGE_MAC_STS); 5066 if ((status & sc->bnx_link_chg) || sc->bnx_link_evt) { 5067 sc->bnx_link_evt = 0; 5068 sc->bnx_link_upd(sc, status); 5069 } 5070 } 5071 5072 static void 5073 bnx_enable_msi(struct bnx_softc *sc, boolean_t is_msix) 5074 { 5075 uint32_t msi_mode; 5076 5077 msi_mode = CSR_READ_4(sc, BGE_MSI_MODE); 5078 msi_mode |= BGE_MSIMODE_ENABLE; 5079 /* 5080 * NOTE: 5081 * 5718-PG105-R says that "one shot" mode does not work 5082 * if MSI is used, however, it obviously works. 5083 */ 5084 msi_mode &= ~BGE_MSIMODE_ONESHOT_DISABLE; 5085 if (is_msix) 5086 msi_mode |= BGE_MSIMODE_MSIX_MULTIMODE; 5087 else 5088 msi_mode &= ~BGE_MSIMODE_MSIX_MULTIMODE; 5089 CSR_WRITE_4(sc, BGE_MSI_MODE, msi_mode); 5090 } 5091 5092 static uint32_t 5093 bnx_dma_swap_options(struct bnx_softc *sc) 5094 { 5095 uint32_t dma_options; 5096 5097 dma_options = BGE_MODECTL_WORDSWAP_NONFRAME | 5098 BGE_MODECTL_BYTESWAP_DATA | BGE_MODECTL_WORDSWAP_DATA; 5099 #if BYTE_ORDER == BIG_ENDIAN 5100 dma_options |= BGE_MODECTL_BYTESWAP_NONFRAME; 5101 #endif 5102 return dma_options; 5103 } 5104 5105 static int 5106 bnx_setup_tso(struct bnx_tx_ring *txr, struct mbuf **mp, 5107 uint16_t *mss0, uint16_t *flags0) 5108 { 5109 struct mbuf *m; 5110 struct ip *ip; 5111 struct tcphdr *th; 5112 int thoff, iphlen, hoff, hlen; 5113 uint16_t flags, mss; 5114 5115 m = *mp; 5116 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 5117 5118 hoff = m->m_pkthdr.csum_lhlen; 5119 iphlen = m->m_pkthdr.csum_iphlen; 5120 thoff = m->m_pkthdr.csum_thlen; 5121 5122 KASSERT(hoff > 0, ("invalid ether header len")); 5123 KASSERT(iphlen > 0, ("invalid ip header len")); 5124 KASSERT(thoff > 0, ("invalid tcp header len")); 5125 5126 if (__predict_false(m->m_len < hoff + iphlen + thoff)) { 5127 m = m_pullup(m, hoff + iphlen + thoff); 5128 if (m == NULL) { 5129 *mp = NULL; 5130 return ENOBUFS; 5131 } 5132 *mp = m; 5133 } 5134 ip = mtodoff(m, struct ip *, hoff); 5135 th = mtodoff(m, struct tcphdr *, hoff + iphlen); 5136 5137 mss = m->m_pkthdr.tso_segsz; 5138 flags = BGE_TXBDFLAG_CPU_PRE_DMA | BGE_TXBDFLAG_CPU_POST_DMA; 5139 5140 ip->ip_len = htons(mss + iphlen + thoff); 5141 th->th_sum = 0; 5142 5143 hlen = (iphlen + thoff) >> 2; 5144 mss |= ((hlen & 0x3) << 14); 5145 flags |= ((hlen & 0xf8) << 7) | ((hlen & 0x4) << 2); 5146 5147 *mss0 = mss; 5148 *flags0 = flags; 5149 5150 return 0; 5151 } 5152 5153 static int 5154 bnx_create_tx_ring(struct bnx_tx_ring *txr) 5155 { 5156 bus_size_t txmaxsz, txmaxsegsz; 5157 int i, error; 5158 5159 lwkt_serialize_init(&txr->bnx_tx_serialize); 5160 5161 /* 5162 * Create DMA tag and maps for TX mbufs. 5163 */ 5164 if (txr->bnx_sc->bnx_flags & BNX_FLAG_TSO) 5165 txmaxsz = IP_MAXPACKET + sizeof(struct ether_vlan_header); 5166 else 5167 txmaxsz = BNX_JUMBO_FRAMELEN; 5168 if (txr->bnx_sc->bnx_asicrev == BGE_ASICREV_BCM57766) 5169 txmaxsegsz = MCLBYTES; 5170 else 5171 txmaxsegsz = PAGE_SIZE; 5172 error = bus_dma_tag_create(txr->bnx_sc->bnx_cdata.bnx_parent_tag, 5173 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 5174 txmaxsz, BNX_NSEG_NEW, txmaxsegsz, 5175 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 5176 &txr->bnx_tx_mtag); 5177 if (error) { 5178 device_printf(txr->bnx_sc->bnx_dev, 5179 "could not create TX mbuf DMA tag\n"); 5180 return error; 5181 } 5182 5183 for (i = 0; i < BGE_TX_RING_CNT; i++) { 5184 error = bus_dmamap_create(txr->bnx_tx_mtag, 5185 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 5186 &txr->bnx_tx_buf[i].bnx_tx_dmamap); 5187 if (error) { 5188 int j; 5189 5190 for (j = 0; j < i; ++j) { 5191 bus_dmamap_destroy(txr->bnx_tx_mtag, 5192 txr->bnx_tx_buf[j].bnx_tx_dmamap); 5193 } 5194 bus_dma_tag_destroy(txr->bnx_tx_mtag); 5195 txr->bnx_tx_mtag = NULL; 5196 5197 device_printf(txr->bnx_sc->bnx_dev, 5198 "could not create TX mbuf DMA map\n"); 5199 return error; 5200 } 5201 } 5202 5203 /* 5204 * Create DMA stuffs for TX ring. 5205 */ 5206 error = bnx_dma_block_alloc(txr->bnx_sc, BGE_TX_RING_SZ, 5207 &txr->bnx_tx_ring_tag, 5208 &txr->bnx_tx_ring_map, 5209 (void *)&txr->bnx_tx_ring, 5210 &txr->bnx_tx_ring_paddr); 5211 if (error) { 5212 device_printf(txr->bnx_sc->bnx_dev, 5213 "could not create TX ring\n"); 5214 return error; 5215 } 5216 5217 txr->bnx_tx_flags |= BNX_TX_FLAG_SHORTDMA; 5218 txr->bnx_tx_wreg = BNX_TX_WREG_NSEGS; 5219 5220 return 0; 5221 } 5222 5223 static void 5224 bnx_destroy_tx_ring(struct bnx_tx_ring *txr) 5225 { 5226 /* Destroy TX mbuf DMA stuffs. */ 5227 if (txr->bnx_tx_mtag != NULL) { 5228 int i; 5229 5230 for (i = 0; i < BGE_TX_RING_CNT; i++) { 5231 KKASSERT(txr->bnx_tx_buf[i].bnx_tx_mbuf == NULL); 5232 bus_dmamap_destroy(txr->bnx_tx_mtag, 5233 txr->bnx_tx_buf[i].bnx_tx_dmamap); 5234 } 5235 bus_dma_tag_destroy(txr->bnx_tx_mtag); 5236 } 5237 5238 /* Destroy TX ring */ 5239 bnx_dma_block_free(txr->bnx_tx_ring_tag, 5240 txr->bnx_tx_ring_map, txr->bnx_tx_ring); 5241 } 5242 5243 static int 5244 bnx_sysctl_force_defrag(SYSCTL_HANDLER_ARGS) 5245 { 5246 struct bnx_softc *sc = (void *)arg1; 5247 struct ifnet *ifp = &sc->arpcom.ac_if; 5248 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0]; 5249 int error, defrag, i; 5250 5251 if (txr->bnx_tx_flags & BNX_TX_FLAG_FORCE_DEFRAG) 5252 defrag = 1; 5253 else 5254 defrag = 0; 5255 5256 error = sysctl_handle_int(oidp, &defrag, 0, req); 5257 if (error || req->newptr == NULL) 5258 return error; 5259 5260 ifnet_serialize_all(ifp); 5261 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 5262 txr = &sc->bnx_tx_ring[i]; 5263 if (defrag) 5264 txr->bnx_tx_flags |= BNX_TX_FLAG_FORCE_DEFRAG; 5265 else 5266 txr->bnx_tx_flags &= ~BNX_TX_FLAG_FORCE_DEFRAG; 5267 } 5268 ifnet_deserialize_all(ifp); 5269 5270 return 0; 5271 } 5272 5273 static int 5274 bnx_sysctl_tx_wreg(SYSCTL_HANDLER_ARGS) 5275 { 5276 struct bnx_softc *sc = (void *)arg1; 5277 struct ifnet *ifp = &sc->arpcom.ac_if; 5278 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0]; 5279 int error, tx_wreg, i; 5280 5281 tx_wreg = txr->bnx_tx_wreg; 5282 error = sysctl_handle_int(oidp, &tx_wreg, 0, req); 5283 if (error || req->newptr == NULL) 5284 return error; 5285 5286 ifnet_serialize_all(ifp); 5287 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) 5288 sc->bnx_tx_ring[i].bnx_tx_wreg = tx_wreg; 5289 ifnet_deserialize_all(ifp); 5290 5291 return 0; 5292 } 5293 5294 static int 5295 bnx_create_rx_ret_ring(struct bnx_rx_ret_ring *ret) 5296 { 5297 int error; 5298 5299 lwkt_serialize_init(&ret->bnx_rx_ret_serialize); 5300 5301 /* 5302 * Create DMA stuffs for RX return ring. 5303 */ 5304 error = bnx_dma_block_alloc(ret->bnx_sc, 5305 BGE_RX_RTN_RING_SZ(BNX_RETURN_RING_CNT), 5306 &ret->bnx_rx_ret_ring_tag, 5307 &ret->bnx_rx_ret_ring_map, 5308 (void *)&ret->bnx_rx_ret_ring, 5309 &ret->bnx_rx_ret_ring_paddr); 5310 if (error) { 5311 device_printf(ret->bnx_sc->bnx_dev, 5312 "could not create RX ret ring\n"); 5313 return error; 5314 } 5315 5316 /* Shadow standard ring's RX mbuf DMA tag */ 5317 ret->bnx_rx_mtag = ret->bnx_std->bnx_rx_mtag; 5318 5319 /* 5320 * Create tmp DMA map for RX mbufs. 5321 */ 5322 error = bus_dmamap_create(ret->bnx_rx_mtag, BUS_DMA_WAITOK, 5323 &ret->bnx_rx_tmpmap); 5324 if (error) { 5325 device_printf(ret->bnx_sc->bnx_dev, 5326 "could not create tmp RX mbuf DMA map\n"); 5327 ret->bnx_rx_mtag = NULL; 5328 return error; 5329 } 5330 return 0; 5331 } 5332 5333 static void 5334 bnx_destroy_rx_ret_ring(struct bnx_rx_ret_ring *ret) 5335 { 5336 /* Destroy tmp RX mbuf DMA map */ 5337 if (ret->bnx_rx_mtag != NULL) 5338 bus_dmamap_destroy(ret->bnx_rx_mtag, ret->bnx_rx_tmpmap); 5339 5340 /* Destroy RX return ring */ 5341 bnx_dma_block_free(ret->bnx_rx_ret_ring_tag, 5342 ret->bnx_rx_ret_ring_map, ret->bnx_rx_ret_ring); 5343 } 5344 5345 static int 5346 bnx_alloc_intr(struct bnx_softc *sc) 5347 { 5348 struct bnx_intr_data *intr; 5349 u_int intr_flags; 5350 int error; 5351 5352 if (sc->bnx_intr_cnt > 1) { 5353 error = bnx_alloc_msix(sc); 5354 if (error) 5355 return error; 5356 KKASSERT(sc->bnx_intr_type == PCI_INTR_TYPE_MSIX); 5357 return 0; 5358 } 5359 5360 KKASSERT(sc->bnx_intr_cnt == 1); 5361 5362 intr = &sc->bnx_intr_data[0]; 5363 intr->bnx_ret = &sc->bnx_rx_ret_ring[0]; 5364 intr->bnx_txr = &sc->bnx_tx_ring[0]; 5365 intr->bnx_intr_serialize = &sc->bnx_main_serialize; 5366 intr->bnx_intr_check = bnx_check_intr_rxtx; 5367 intr->bnx_saved_status_tag = &intr->bnx_ret->bnx_saved_status_tag; 5368 5369 sc->bnx_intr_type = pci_alloc_1intr(sc->bnx_dev, bnx_msi_enable, 5370 &intr->bnx_intr_rid, &intr_flags); 5371 5372 intr->bnx_intr_res = bus_alloc_resource_any(sc->bnx_dev, SYS_RES_IRQ, 5373 &intr->bnx_intr_rid, intr_flags); 5374 if (intr->bnx_intr_res == NULL) { 5375 device_printf(sc->bnx_dev, "could not alloc interrupt\n"); 5376 return ENXIO; 5377 } 5378 5379 if (sc->bnx_intr_type == PCI_INTR_TYPE_MSI) { 5380 bnx_enable_msi(sc, FALSE); 5381 intr->bnx_intr_func = bnx_msi; 5382 if (bootverbose) 5383 device_printf(sc->bnx_dev, "oneshot MSI\n"); 5384 } else { 5385 intr->bnx_intr_func = bnx_intr_legacy; 5386 } 5387 intr->bnx_intr_arg = sc; 5388 intr->bnx_intr_cpuid = rman_get_cpuid(intr->bnx_intr_res); 5389 5390 intr->bnx_txr->bnx_tx_cpuid = intr->bnx_intr_cpuid; 5391 5392 return 0; 5393 } 5394 5395 static int 5396 bnx_setup_intr(struct bnx_softc *sc) 5397 { 5398 int error, i; 5399 5400 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 5401 struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; 5402 5403 error = bus_setup_intr_descr(sc->bnx_dev, intr->bnx_intr_res, 5404 INTR_MPSAFE, intr->bnx_intr_func, intr->bnx_intr_arg, 5405 &intr->bnx_intr_hand, intr->bnx_intr_serialize, 5406 intr->bnx_intr_desc); 5407 if (error) { 5408 device_printf(sc->bnx_dev, 5409 "could not set up %dth intr\n", i); 5410 bnx_teardown_intr(sc, i); 5411 return error; 5412 } 5413 } 5414 return 0; 5415 } 5416 5417 static void 5418 bnx_teardown_intr(struct bnx_softc *sc, int cnt) 5419 { 5420 int i; 5421 5422 for (i = 0; i < cnt; ++i) { 5423 struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; 5424 5425 bus_teardown_intr(sc->bnx_dev, intr->bnx_intr_res, 5426 intr->bnx_intr_hand); 5427 } 5428 } 5429 5430 static void 5431 bnx_free_intr(struct bnx_softc *sc) 5432 { 5433 if (sc->bnx_intr_type != PCI_INTR_TYPE_MSIX) { 5434 struct bnx_intr_data *intr; 5435 5436 KKASSERT(sc->bnx_intr_cnt <= 1); 5437 intr = &sc->bnx_intr_data[0]; 5438 5439 if (intr->bnx_intr_res != NULL) { 5440 bus_release_resource(sc->bnx_dev, SYS_RES_IRQ, 5441 intr->bnx_intr_rid, intr->bnx_intr_res); 5442 } 5443 if (sc->bnx_intr_type == PCI_INTR_TYPE_MSI) 5444 pci_release_msi(sc->bnx_dev); 5445 } else { 5446 bnx_free_msix(sc, TRUE); 5447 } 5448 } 5449 5450 static void 5451 bnx_setup_serialize(struct bnx_softc *sc) 5452 { 5453 int i, j; 5454 5455 /* 5456 * Allocate serializer array 5457 */ 5458 5459 /* Main + RX STD + TX + RX RET */ 5460 sc->bnx_serialize_cnt = 1 + 1 + sc->bnx_tx_ringcnt + sc->bnx_rx_retcnt; 5461 5462 sc->bnx_serialize = 5463 kmalloc(sc->bnx_serialize_cnt * sizeof(struct lwkt_serialize *), 5464 M_DEVBUF, M_WAITOK | M_ZERO); 5465 5466 /* 5467 * Setup serializers 5468 * 5469 * NOTE: Order is critical 5470 */ 5471 5472 i = 0; 5473 5474 KKASSERT(i < sc->bnx_serialize_cnt); 5475 sc->bnx_serialize[i++] = &sc->bnx_main_serialize; 5476 5477 KKASSERT(i < sc->bnx_serialize_cnt); 5478 sc->bnx_serialize[i++] = &sc->bnx_rx_std_ring.bnx_rx_std_serialize; 5479 5480 for (j = 0; j < sc->bnx_rx_retcnt; ++j) { 5481 KKASSERT(i < sc->bnx_serialize_cnt); 5482 sc->bnx_serialize[i++] = 5483 &sc->bnx_rx_ret_ring[j].bnx_rx_ret_serialize; 5484 } 5485 5486 for (j = 0; j < sc->bnx_tx_ringcnt; ++j) { 5487 KKASSERT(i < sc->bnx_serialize_cnt); 5488 sc->bnx_serialize[i++] = 5489 &sc->bnx_tx_ring[j].bnx_tx_serialize; 5490 } 5491 5492 KKASSERT(i == sc->bnx_serialize_cnt); 5493 } 5494 5495 static void 5496 bnx_serialize(struct ifnet *ifp, enum ifnet_serialize slz) 5497 { 5498 struct bnx_softc *sc = ifp->if_softc; 5499 5500 ifnet_serialize_array_enter(sc->bnx_serialize, 5501 sc->bnx_serialize_cnt, slz); 5502 } 5503 5504 static void 5505 bnx_deserialize(struct ifnet *ifp, enum ifnet_serialize slz) 5506 { 5507 struct bnx_softc *sc = ifp->if_softc; 5508 5509 ifnet_serialize_array_exit(sc->bnx_serialize, 5510 sc->bnx_serialize_cnt, slz); 5511 } 5512 5513 static int 5514 bnx_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz) 5515 { 5516 struct bnx_softc *sc = ifp->if_softc; 5517 5518 return ifnet_serialize_array_try(sc->bnx_serialize, 5519 sc->bnx_serialize_cnt, slz); 5520 } 5521 5522 #ifdef INVARIANTS 5523 5524 static void 5525 bnx_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz, 5526 boolean_t serialized) 5527 { 5528 struct bnx_softc *sc = ifp->if_softc; 5529 5530 ifnet_serialize_array_assert(sc->bnx_serialize, sc->bnx_serialize_cnt, 5531 slz, serialized); 5532 } 5533 5534 #endif /* INVARIANTS */ 5535 5536 #ifdef IFPOLL_ENABLE 5537 5538 static int 5539 bnx_sysctl_npoll_offset(SYSCTL_HANDLER_ARGS) 5540 { 5541 struct bnx_softc *sc = (void *)arg1; 5542 struct ifnet *ifp = &sc->arpcom.ac_if; 5543 int error, off; 5544 5545 off = sc->bnx_npoll_rxoff; 5546 error = sysctl_handle_int(oidp, &off, 0, req); 5547 if (error || req->newptr == NULL) 5548 return error; 5549 if (off < 0) 5550 return EINVAL; 5551 5552 ifnet_serialize_all(ifp); 5553 if (off >= ncpus2 || off % sc->bnx_rx_retcnt != 0) { 5554 error = EINVAL; 5555 } else { 5556 error = 0; 5557 sc->bnx_npoll_txoff = off; 5558 sc->bnx_npoll_rxoff = off; 5559 } 5560 ifnet_deserialize_all(ifp); 5561 5562 return error; 5563 } 5564 5565 static int 5566 bnx_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS) 5567 { 5568 struct bnx_softc *sc = (void *)arg1; 5569 struct ifnet *ifp = &sc->arpcom.ac_if; 5570 int error, off; 5571 5572 off = sc->bnx_npoll_rxoff; 5573 error = sysctl_handle_int(oidp, &off, 0, req); 5574 if (error || req->newptr == NULL) 5575 return error; 5576 if (off < 0) 5577 return EINVAL; 5578 5579 ifnet_serialize_all(ifp); 5580 if (off >= ncpus2 || off % sc->bnx_rx_retcnt != 0) { 5581 error = EINVAL; 5582 } else { 5583 error = 0; 5584 sc->bnx_npoll_rxoff = off; 5585 } 5586 ifnet_deserialize_all(ifp); 5587 5588 return error; 5589 } 5590 5591 static int 5592 bnx_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS) 5593 { 5594 struct bnx_softc *sc = (void *)arg1; 5595 struct ifnet *ifp = &sc->arpcom.ac_if; 5596 int error, off; 5597 5598 off = sc->bnx_npoll_txoff; 5599 error = sysctl_handle_int(oidp, &off, 0, req); 5600 if (error || req->newptr == NULL) 5601 return error; 5602 if (off < 0) 5603 return EINVAL; 5604 5605 ifnet_serialize_all(ifp); 5606 if (off >= ncpus2) { 5607 error = EINVAL; 5608 } else { 5609 error = 0; 5610 sc->bnx_npoll_txoff = off; 5611 } 5612 ifnet_deserialize_all(ifp); 5613 5614 return error; 5615 } 5616 5617 #endif /* IFPOLL_ENABLE */ 5618 5619 static void 5620 bnx_set_tick_cpuid(struct bnx_softc *sc, boolean_t polling) 5621 { 5622 if (polling) 5623 sc->bnx_tick_cpuid = 0; /* XXX */ 5624 else 5625 sc->bnx_tick_cpuid = sc->bnx_intr_data[0].bnx_intr_cpuid; 5626 } 5627 5628 static void 5629 bnx_rx_std_refill_ithread(void *xstd) 5630 { 5631 struct bnx_rx_std_ring *std = xstd; 5632 struct globaldata *gd = mycpu; 5633 5634 crit_enter_gd(gd); 5635 5636 while (!std->bnx_rx_std_stop) { 5637 if (std->bnx_rx_std_refill) { 5638 lwkt_serialize_handler_call( 5639 &std->bnx_rx_std_serialize, 5640 bnx_rx_std_refill, std, NULL); 5641 } 5642 5643 crit_exit_gd(gd); 5644 crit_enter_gd(gd); 5645 5646 atomic_poll_release_int(&std->bnx_rx_std_running); 5647 cpu_mfence(); 5648 5649 if (!std->bnx_rx_std_refill && !std->bnx_rx_std_stop) { 5650 lwkt_deschedule_self(gd->gd_curthread); 5651 lwkt_switch(); 5652 } 5653 } 5654 5655 crit_exit_gd(gd); 5656 5657 wakeup(std); 5658 5659 lwkt_exit(); 5660 } 5661 5662 static void 5663 bnx_rx_std_refill(void *xstd, void *frame __unused) 5664 { 5665 struct bnx_rx_std_ring *std = xstd; 5666 int cnt, refill_mask; 5667 5668 again: 5669 cnt = 0; 5670 5671 cpu_lfence(); 5672 refill_mask = std->bnx_rx_std_refill; 5673 atomic_clear_int(&std->bnx_rx_std_refill, refill_mask); 5674 5675 while (refill_mask) { 5676 uint16_t check_idx = std->bnx_rx_std; 5677 int ret_idx; 5678 5679 ret_idx = bsfl(refill_mask); 5680 for (;;) { 5681 struct bnx_rx_buf *rb; 5682 int refilled; 5683 5684 BNX_INC(check_idx, BGE_STD_RX_RING_CNT); 5685 rb = &std->bnx_rx_std_buf[check_idx]; 5686 refilled = rb->bnx_rx_refilled; 5687 cpu_lfence(); 5688 if (refilled) { 5689 bnx_setup_rxdesc_std(std, check_idx); 5690 std->bnx_rx_std = check_idx; 5691 ++cnt; 5692 if (cnt >= 8) { 5693 atomic_subtract_int( 5694 &std->bnx_rx_std_used, cnt); 5695 bnx_writembx(std->bnx_sc, 5696 BGE_MBX_RX_STD_PROD_LO, 5697 std->bnx_rx_std); 5698 cnt = 0; 5699 } 5700 } else { 5701 break; 5702 } 5703 } 5704 refill_mask &= ~(1 << ret_idx); 5705 } 5706 5707 if (cnt) { 5708 atomic_subtract_int(&std->bnx_rx_std_used, cnt); 5709 bnx_writembx(std->bnx_sc, BGE_MBX_RX_STD_PROD_LO, 5710 std->bnx_rx_std); 5711 } 5712 5713 if (std->bnx_rx_std_refill) 5714 goto again; 5715 5716 atomic_poll_release_int(&std->bnx_rx_std_running); 5717 cpu_mfence(); 5718 5719 if (std->bnx_rx_std_refill) 5720 goto again; 5721 } 5722 5723 static int 5724 bnx_sysctl_std_refill(SYSCTL_HANDLER_ARGS) 5725 { 5726 struct bnx_softc *sc = (void *)arg1; 5727 struct ifnet *ifp = &sc->arpcom.ac_if; 5728 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[0]; 5729 int error, cntmax, i; 5730 5731 cntmax = ret->bnx_rx_cntmax; 5732 error = sysctl_handle_int(oidp, &cntmax, 0, req); 5733 if (error || req->newptr == NULL) 5734 return error; 5735 5736 ifnet_serialize_all(ifp); 5737 5738 if ((cntmax * sc->bnx_rx_retcnt) >= BGE_STD_RX_RING_CNT / 2) { 5739 error = EINVAL; 5740 goto back; 5741 } 5742 5743 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) 5744 sc->bnx_rx_ret_ring[i].bnx_rx_cntmax = cntmax; 5745 error = 0; 5746 5747 back: 5748 ifnet_deserialize_all(ifp); 5749 5750 return error; 5751 } 5752 5753 static void 5754 bnx_init_rss(struct bnx_softc *sc) 5755 { 5756 uint8_t key[BGE_RSS_KEYREG_CNT * BGE_RSS_KEYREG_SIZE]; 5757 int i, j, r; 5758 5759 KKASSERT(BNX_RSS_ENABLED(sc)); 5760 5761 /* 5762 * Configure RSS redirect table in following fashion: 5763 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)] 5764 */ 5765 r = 0; 5766 for (j = 0; j < BGE_RSS_INDIR_TBL_CNT; ++j) { 5767 uint32_t tbl = 0; 5768 5769 for (i = 0; i < BGE_RSS_INDIR_TBLENT_CNT; ++i) { 5770 uint32_t q; 5771 5772 q = r % sc->bnx_rx_retcnt; 5773 tbl |= q << (BGE_RSS_INDIR_TBLENT_SHIFT * 5774 (BGE_RSS_INDIR_TBLENT_CNT - i - 1)); 5775 ++r; 5776 } 5777 5778 BNX_RSS_DPRINTF(sc, 1, "tbl%d %08x\n", j, tbl); 5779 CSR_WRITE_4(sc, BGE_RSS_INDIR_TBL(j), tbl); 5780 } 5781 5782 toeplitz_get_key(key, sizeof(key)); 5783 for (i = 0; i < BGE_RSS_KEYREG_CNT; ++i) { 5784 uint32_t keyreg; 5785 5786 keyreg = BGE_RSS_KEYREG_VAL(key, i); 5787 5788 BNX_RSS_DPRINTF(sc, 1, "key%d %08x\n", i, keyreg); 5789 CSR_WRITE_4(sc, BGE_RSS_KEYREG(i), keyreg); 5790 } 5791 } 5792 5793 static void 5794 bnx_setup_ring_cnt(struct bnx_softc *sc) 5795 { 5796 int msix_enable, i, msix_cnt, msix_cnt2, ring_max; 5797 5798 sc->bnx_tx_ringcnt = 1; 5799 sc->bnx_rx_retcnt = 1; 5800 sc->bnx_intr_cnt = 1; 5801 5802 msix_enable = device_getenv_int(sc->bnx_dev, "msix.enable", 5803 bnx_msix_enable); 5804 if (!msix_enable) 5805 return; 5806 5807 if (ncpus2 == 1) 5808 return; 5809 5810 msix_cnt = pci_msix_count(sc->bnx_dev); 5811 if (msix_cnt <= 1) 5812 return; 5813 5814 i = 0; 5815 while ((1 << (i + 1)) <= msix_cnt) 5816 ++i; 5817 msix_cnt2 = 1 << i; 5818 5819 /* 5820 * One MSI-X vector is dedicated to status or single TX queue, 5821 * so make sure that there are enough MSI-X vectors. 5822 */ 5823 if (msix_cnt == msix_cnt2) { 5824 /* 5825 * XXX 5826 * This probably will not happen; 57785/5718 families 5827 * come with at least 5 MSI-X vectors. 5828 */ 5829 msix_cnt2 >>= 1; 5830 if (msix_cnt2 <= 1) { 5831 device_printf(sc->bnx_dev, 5832 "MSI-X count %d could not be used\n", msix_cnt); 5833 return; 5834 } 5835 device_printf(sc->bnx_dev, "MSI-X count %d is power of 2\n", 5836 msix_cnt); 5837 } 5838 5839 /* 5840 * Setup RX ring count 5841 */ 5842 ring_max = BNX_RX_RING_MAX; 5843 if (ring_max > msix_cnt2) 5844 ring_max = msix_cnt2; 5845 sc->bnx_rx_retcnt = device_getenv_int(sc->bnx_dev, "rx_rings", 5846 bnx_rx_rings); 5847 sc->bnx_rx_retcnt = if_ring_count2(sc->bnx_rx_retcnt, ring_max); 5848 5849 if (sc->bnx_rx_retcnt == 1) 5850 return; 5851 5852 /* 5853 * We need one extra MSI-X vector for link status or 5854 * TX ring (if only one TX ring is enabled). 5855 */ 5856 sc->bnx_intr_cnt = sc->bnx_rx_retcnt + 1; 5857 5858 /* 5859 * Setup TX ring count 5860 * 5861 * Currently only BCM5719 and BCM5720 support multiple TX rings 5862 * and the TX ring count must be less than the RX ring count. 5863 */ 5864 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 || 5865 sc->bnx_asicrev == BGE_ASICREV_BCM5720) { 5866 ring_max = BNX_TX_RING_MAX; 5867 if (ring_max > msix_cnt2) 5868 ring_max = msix_cnt2; 5869 if (ring_max > sc->bnx_rx_retcnt) 5870 ring_max = sc->bnx_rx_retcnt; 5871 sc->bnx_tx_ringcnt = device_getenv_int(sc->bnx_dev, "tx_rings", 5872 bnx_tx_rings); 5873 sc->bnx_tx_ringcnt = if_ring_count2(sc->bnx_tx_ringcnt, 5874 ring_max); 5875 } 5876 } 5877 5878 static int 5879 bnx_alloc_msix(struct bnx_softc *sc) 5880 { 5881 struct bnx_intr_data *intr; 5882 boolean_t setup = FALSE; 5883 int error, i, offset, offset_def; 5884 5885 KKASSERT(sc->bnx_intr_cnt > 1); 5886 KKASSERT(sc->bnx_intr_cnt == sc->bnx_rx_retcnt + 1); 5887 5888 if (sc->bnx_flags & BNX_FLAG_RXTX_BUNDLE) { 5889 /* 5890 * Link status 5891 */ 5892 intr = &sc->bnx_intr_data[0]; 5893 5894 intr->bnx_intr_serialize = &sc->bnx_main_serialize; 5895 intr->bnx_saved_status_tag = &sc->bnx_saved_status_tag; 5896 5897 intr->bnx_intr_func = bnx_msix_status; 5898 intr->bnx_intr_arg = sc; 5899 intr->bnx_intr_cpuid = 0; /* XXX */ 5900 5901 ksnprintf(intr->bnx_intr_desc0, sizeof(intr->bnx_intr_desc0), 5902 "%s sts", device_get_nameunit(sc->bnx_dev)); 5903 intr->bnx_intr_desc = intr->bnx_intr_desc0; 5904 5905 /* 5906 * RX/TX rings 5907 */ 5908 if (sc->bnx_rx_retcnt == ncpus2) { 5909 offset = 0; 5910 } else { 5911 offset_def = (sc->bnx_rx_retcnt * 5912 device_get_unit(sc->bnx_dev)) % ncpus2; 5913 5914 offset = device_getenv_int(sc->bnx_dev, 5915 "msix.offset", offset_def); 5916 if (offset >= ncpus2 || 5917 offset % sc->bnx_rx_retcnt != 0) { 5918 device_printf(sc->bnx_dev, 5919 "invalid msix.offset %d, use %d\n", 5920 offset, offset_def); 5921 offset = offset_def; 5922 } 5923 } 5924 5925 for (i = 1; i < sc->bnx_intr_cnt; ++i) { 5926 int idx = i - 1; 5927 5928 intr = &sc->bnx_intr_data[i]; 5929 5930 KKASSERT(idx < sc->bnx_rx_retcnt); 5931 intr->bnx_ret = &sc->bnx_rx_ret_ring[idx]; 5932 if (idx < sc->bnx_tx_ringcnt) { 5933 intr->bnx_txr = &sc->bnx_tx_ring[idx]; 5934 intr->bnx_ret->bnx_txr = intr->bnx_txr; 5935 } 5936 5937 intr->bnx_intr_serialize = 5938 &intr->bnx_ret->bnx_rx_ret_serialize; 5939 intr->bnx_saved_status_tag = 5940 &intr->bnx_ret->bnx_saved_status_tag; 5941 5942 intr->bnx_intr_arg = intr->bnx_ret; 5943 KKASSERT(idx + offset < ncpus2); 5944 intr->bnx_intr_cpuid = idx + offset; 5945 5946 if (intr->bnx_txr == NULL) { 5947 intr->bnx_intr_check = bnx_check_intr_rx; 5948 intr->bnx_intr_func = bnx_msix_rx; 5949 ksnprintf(intr->bnx_intr_desc0, 5950 sizeof(intr->bnx_intr_desc0), "%s rx%d", 5951 device_get_nameunit(sc->bnx_dev), idx); 5952 } else { 5953 intr->bnx_intr_check = bnx_check_intr_rxtx; 5954 intr->bnx_intr_func = bnx_msix_rxtx; 5955 ksnprintf(intr->bnx_intr_desc0, 5956 sizeof(intr->bnx_intr_desc0), "%s rxtx%d", 5957 device_get_nameunit(sc->bnx_dev), idx); 5958 5959 intr->bnx_txr->bnx_tx_cpuid = 5960 intr->bnx_intr_cpuid; 5961 } 5962 intr->bnx_intr_desc = intr->bnx_intr_desc0; 5963 5964 intr->bnx_ret->bnx_msix_mbx = intr->bnx_intr_mbx; 5965 } 5966 } else { 5967 /* 5968 * TX ring and link status 5969 */ 5970 offset_def = device_get_unit(sc->bnx_dev) % ncpus2; 5971 offset = device_getenv_int(sc->bnx_dev, "msix.txoff", 5972 offset_def); 5973 if (offset >= ncpus2) { 5974 device_printf(sc->bnx_dev, 5975 "invalid msix.txoff %d, use %d\n", 5976 offset, offset_def); 5977 offset = offset_def; 5978 } 5979 5980 intr = &sc->bnx_intr_data[0]; 5981 5982 intr->bnx_txr = &sc->bnx_tx_ring[0]; 5983 intr->bnx_intr_serialize = &sc->bnx_main_serialize; 5984 intr->bnx_intr_check = bnx_check_intr_tx; 5985 intr->bnx_saved_status_tag = 5986 &intr->bnx_txr->bnx_saved_status_tag; 5987 5988 intr->bnx_intr_func = bnx_msix_tx_status; 5989 intr->bnx_intr_arg = intr->bnx_txr; 5990 intr->bnx_intr_cpuid = offset; 5991 5992 ksnprintf(intr->bnx_intr_desc0, sizeof(intr->bnx_intr_desc0), 5993 "%s ststx", device_get_nameunit(sc->bnx_dev)); 5994 intr->bnx_intr_desc = intr->bnx_intr_desc0; 5995 5996 intr->bnx_txr->bnx_tx_cpuid = intr->bnx_intr_cpuid; 5997 5998 /* 5999 * RX rings 6000 */ 6001 if (sc->bnx_rx_retcnt == ncpus2) { 6002 offset = 0; 6003 } else { 6004 offset_def = (sc->bnx_rx_retcnt * 6005 device_get_unit(sc->bnx_dev)) % ncpus2; 6006 6007 offset = device_getenv_int(sc->bnx_dev, 6008 "msix.rxoff", offset_def); 6009 if (offset >= ncpus2 || 6010 offset % sc->bnx_rx_retcnt != 0) { 6011 device_printf(sc->bnx_dev, 6012 "invalid msix.rxoff %d, use %d\n", 6013 offset, offset_def); 6014 offset = offset_def; 6015 } 6016 } 6017 6018 for (i = 1; i < sc->bnx_intr_cnt; ++i) { 6019 int idx = i - 1; 6020 6021 intr = &sc->bnx_intr_data[i]; 6022 6023 KKASSERT(idx < sc->bnx_rx_retcnt); 6024 intr->bnx_ret = &sc->bnx_rx_ret_ring[idx]; 6025 intr->bnx_intr_serialize = 6026 &intr->bnx_ret->bnx_rx_ret_serialize; 6027 intr->bnx_intr_check = bnx_check_intr_rx; 6028 intr->bnx_saved_status_tag = 6029 &intr->bnx_ret->bnx_saved_status_tag; 6030 6031 intr->bnx_intr_func = bnx_msix_rx; 6032 intr->bnx_intr_arg = intr->bnx_ret; 6033 KKASSERT(idx + offset < ncpus2); 6034 intr->bnx_intr_cpuid = idx + offset; 6035 6036 ksnprintf(intr->bnx_intr_desc0, 6037 sizeof(intr->bnx_intr_desc0), "%s rx%d", 6038 device_get_nameunit(sc->bnx_dev), idx); 6039 intr->bnx_intr_desc = intr->bnx_intr_desc0; 6040 6041 intr->bnx_ret->bnx_msix_mbx = intr->bnx_intr_mbx; 6042 } 6043 } 6044 6045 if (BNX_IS_5717_PLUS(sc)) { 6046 sc->bnx_msix_mem_rid = PCIR_BAR(4); 6047 } else { 6048 if (sc->bnx_res2 == NULL) 6049 sc->bnx_msix_mem_rid = PCIR_BAR(2); 6050 } 6051 if (sc->bnx_msix_mem_rid != 0) { 6052 sc->bnx_msix_mem_res = bus_alloc_resource_any(sc->bnx_dev, 6053 SYS_RES_MEMORY, &sc->bnx_msix_mem_rid, RF_ACTIVE); 6054 if (sc->bnx_msix_mem_res == NULL) { 6055 device_printf(sc->bnx_dev, 6056 "could not alloc MSI-X table\n"); 6057 return ENXIO; 6058 } 6059 } 6060 6061 bnx_enable_msi(sc, TRUE); 6062 6063 error = pci_setup_msix(sc->bnx_dev); 6064 if (error) { 6065 device_printf(sc->bnx_dev, "could not setup MSI-X\n"); 6066 goto back; 6067 } 6068 setup = TRUE; 6069 6070 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 6071 intr = &sc->bnx_intr_data[i]; 6072 6073 error = pci_alloc_msix_vector(sc->bnx_dev, i, 6074 &intr->bnx_intr_rid, intr->bnx_intr_cpuid); 6075 if (error) { 6076 device_printf(sc->bnx_dev, 6077 "could not alloc MSI-X %d on cpu%d\n", 6078 i, intr->bnx_intr_cpuid); 6079 goto back; 6080 } 6081 6082 intr->bnx_intr_res = bus_alloc_resource_any(sc->bnx_dev, 6083 SYS_RES_IRQ, &intr->bnx_intr_rid, RF_ACTIVE); 6084 if (intr->bnx_intr_res == NULL) { 6085 device_printf(sc->bnx_dev, 6086 "could not alloc MSI-X %d resource\n", i); 6087 error = ENXIO; 6088 goto back; 6089 } 6090 } 6091 6092 pci_enable_msix(sc->bnx_dev); 6093 sc->bnx_intr_type = PCI_INTR_TYPE_MSIX; 6094 back: 6095 if (error) 6096 bnx_free_msix(sc, setup); 6097 return error; 6098 } 6099 6100 static void 6101 bnx_free_msix(struct bnx_softc *sc, boolean_t setup) 6102 { 6103 int i; 6104 6105 KKASSERT(sc->bnx_intr_cnt > 1); 6106 6107 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 6108 struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; 6109 6110 if (intr->bnx_intr_res != NULL) { 6111 bus_release_resource(sc->bnx_dev, SYS_RES_IRQ, 6112 intr->bnx_intr_rid, intr->bnx_intr_res); 6113 } 6114 if (intr->bnx_intr_rid >= 0) { 6115 pci_release_msix_vector(sc->bnx_dev, 6116 intr->bnx_intr_rid); 6117 } 6118 } 6119 if (setup) 6120 pci_teardown_msix(sc->bnx_dev); 6121 } 6122 6123 static void 6124 bnx_rx_std_refill_sched_ipi(void *xret) 6125 { 6126 struct bnx_rx_ret_ring *ret = xret; 6127 struct bnx_rx_std_ring *std = ret->bnx_std; 6128 struct globaldata *gd = mycpu; 6129 6130 crit_enter_gd(gd); 6131 6132 atomic_set_int(&std->bnx_rx_std_refill, ret->bnx_rx_mask); 6133 cpu_sfence(); 6134 6135 KKASSERT(std->bnx_rx_std_ithread.td_gd == gd); 6136 lwkt_schedule(&std->bnx_rx_std_ithread); 6137 6138 crit_exit_gd(gd); 6139 } 6140 6141 static void 6142 bnx_rx_std_refill_stop(void *xstd) 6143 { 6144 struct bnx_rx_std_ring *std = xstd; 6145 struct globaldata *gd = mycpu; 6146 6147 crit_enter_gd(gd); 6148 6149 std->bnx_rx_std_stop = 1; 6150 cpu_sfence(); 6151 6152 KKASSERT(std->bnx_rx_std_ithread.td_gd == gd); 6153 lwkt_schedule(&std->bnx_rx_std_ithread); 6154 6155 crit_exit_gd(gd); 6156 } 6157 6158 static void 6159 bnx_serialize_skipmain(struct bnx_softc *sc) 6160 { 6161 lwkt_serialize_array_enter(sc->bnx_serialize, 6162 sc->bnx_serialize_cnt, 1); 6163 } 6164 6165 static void 6166 bnx_deserialize_skipmain(struct bnx_softc *sc) 6167 { 6168 lwkt_serialize_array_exit(sc->bnx_serialize, 6169 sc->bnx_serialize_cnt, 1); 6170 } 6171 6172 static void 6173 bnx_rx_std_refill_sched(struct bnx_rx_ret_ring *ret, 6174 struct bnx_rx_std_ring *std) 6175 { 6176 struct globaldata *gd = mycpu; 6177 6178 ret->bnx_rx_cnt = 0; 6179 cpu_sfence(); 6180 6181 crit_enter_gd(gd); 6182 6183 atomic_set_int(&std->bnx_rx_std_refill, ret->bnx_rx_mask); 6184 cpu_sfence(); 6185 if (atomic_poll_acquire_int(&std->bnx_rx_std_running)) { 6186 if (std->bnx_rx_std_ithread.td_gd == gd) { 6187 lwkt_schedule(&std->bnx_rx_std_ithread); 6188 } else { 6189 lwkt_send_ipiq( 6190 std->bnx_rx_std_ithread.td_gd, 6191 bnx_rx_std_refill_sched_ipi, ret); 6192 } 6193 } 6194 6195 crit_exit_gd(gd); 6196 } 6197 6198 static struct pktinfo * 6199 bnx_rss_info(struct pktinfo *pi, const struct bge_rx_bd *cur_rx) 6200 { 6201 /* Don't pick up IPv6 packet */ 6202 if (cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) 6203 return NULL; 6204 6205 /* Don't pick up IP packet w/o IP checksum */ 6206 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) == 0 || 6207 (cur_rx->bge_error_flag & BGE_RXERRFLAG_IP_CSUM_NOK)) 6208 return NULL; 6209 6210 /* Don't pick up IP packet w/o TCP/UDP checksum */ 6211 if ((cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) == 0) 6212 return NULL; 6213 6214 /* May be IP fragment */ 6215 if (cur_rx->bge_tcp_udp_csum != 0xffff) 6216 return NULL; 6217 6218 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_IS_TCP) 6219 pi->pi_l3proto = IPPROTO_TCP; 6220 else 6221 pi->pi_l3proto = IPPROTO_UDP; 6222 pi->pi_netisr = NETISR_IP; 6223 pi->pi_flags = 0; 6224 6225 return pi; 6226 } 6227 6228 static void 6229 bnx_sig_pre_reset(struct bnx_softc *sc, int type) 6230 { 6231 if (type == BNX_RESET_START || type == BNX_RESET_SUSPEND) 6232 bnx_ape_driver_state_change(sc, type); 6233 } 6234 6235 static void 6236 bnx_sig_post_reset(struct bnx_softc *sc, int type) 6237 { 6238 if (type == BNX_RESET_SHUTDOWN) 6239 bnx_ape_driver_state_change(sc, type); 6240 } 6241 6242 /* 6243 * Clear all stale locks and select the lock for this driver instance. 6244 */ 6245 static void 6246 bnx_ape_lock_init(struct bnx_softc *sc) 6247 { 6248 uint32_t bit, regbase; 6249 int i; 6250 6251 regbase = BGE_APE_PER_LOCK_GRANT; 6252 6253 /* Clear any stale locks. */ 6254 for (i = BGE_APE_LOCK_PHY0; i <= BGE_APE_LOCK_GPIO; i++) { 6255 switch (i) { 6256 case BGE_APE_LOCK_PHY0: 6257 case BGE_APE_LOCK_PHY1: 6258 case BGE_APE_LOCK_PHY2: 6259 case BGE_APE_LOCK_PHY3: 6260 bit = BGE_APE_LOCK_GRANT_DRIVER0; 6261 break; 6262 6263 default: 6264 if (sc->bnx_func_addr == 0) 6265 bit = BGE_APE_LOCK_GRANT_DRIVER0; 6266 else 6267 bit = 1 << sc->bnx_func_addr; 6268 break; 6269 } 6270 APE_WRITE_4(sc, regbase + 4 * i, bit); 6271 } 6272 6273 /* Select the PHY lock based on the device's function number. */ 6274 switch (sc->bnx_func_addr) { 6275 case 0: 6276 sc->bnx_phy_ape_lock = BGE_APE_LOCK_PHY0; 6277 break; 6278 6279 case 1: 6280 sc->bnx_phy_ape_lock = BGE_APE_LOCK_PHY1; 6281 break; 6282 6283 case 2: 6284 sc->bnx_phy_ape_lock = BGE_APE_LOCK_PHY2; 6285 break; 6286 6287 case 3: 6288 sc->bnx_phy_ape_lock = BGE_APE_LOCK_PHY3; 6289 break; 6290 6291 default: 6292 device_printf(sc->bnx_dev, 6293 "PHY lock not supported on this function\n"); 6294 break; 6295 } 6296 } 6297 6298 /* 6299 * Check for APE firmware, set flags, and print version info. 6300 */ 6301 static void 6302 bnx_ape_read_fw_ver(struct bnx_softc *sc) 6303 { 6304 const char *fwtype; 6305 uint32_t apedata, features; 6306 6307 /* Check for a valid APE signature in shared memory. */ 6308 apedata = APE_READ_4(sc, BGE_APE_SEG_SIG); 6309 if (apedata != BGE_APE_SEG_SIG_MAGIC) { 6310 device_printf(sc->bnx_dev, "no APE signature\n"); 6311 sc->bnx_mfw_flags &= ~BNX_MFW_ON_APE; 6312 return; 6313 } 6314 6315 /* Check if APE firmware is running. */ 6316 apedata = APE_READ_4(sc, BGE_APE_FW_STATUS); 6317 if ((apedata & BGE_APE_FW_STATUS_READY) == 0) { 6318 device_printf(sc->bnx_dev, "APE signature found " 6319 "but FW status not ready! 0x%08x\n", apedata); 6320 return; 6321 } 6322 6323 sc->bnx_mfw_flags |= BNX_MFW_ON_APE; 6324 6325 /* Fetch the APE firwmare type and version. */ 6326 apedata = APE_READ_4(sc, BGE_APE_FW_VERSION); 6327 features = APE_READ_4(sc, BGE_APE_FW_FEATURES); 6328 if (features & BGE_APE_FW_FEATURE_NCSI) { 6329 sc->bnx_mfw_flags |= BNX_MFW_TYPE_NCSI; 6330 fwtype = "NCSI"; 6331 } else if (features & BGE_APE_FW_FEATURE_DASH) { 6332 sc->bnx_mfw_flags |= BNX_MFW_TYPE_DASH; 6333 fwtype = "DASH"; 6334 } else { 6335 fwtype = "UNKN"; 6336 } 6337 6338 /* Print the APE firmware version. */ 6339 device_printf(sc->bnx_dev, "APE FW version: %s v%d.%d.%d.%d\n", 6340 fwtype, 6341 (apedata & BGE_APE_FW_VERSION_MAJMSK) >> BGE_APE_FW_VERSION_MAJSFT, 6342 (apedata & BGE_APE_FW_VERSION_MINMSK) >> BGE_APE_FW_VERSION_MINSFT, 6343 (apedata & BGE_APE_FW_VERSION_REVMSK) >> BGE_APE_FW_VERSION_REVSFT, 6344 (apedata & BGE_APE_FW_VERSION_BLDMSK)); 6345 } 6346 6347 static int 6348 bnx_ape_lock(struct bnx_softc *sc, int locknum) 6349 { 6350 uint32_t bit, gnt, req, status; 6351 int i, off; 6352 6353 if ((sc->bnx_mfw_flags & BNX_MFW_ON_APE) == 0) 6354 return 0; 6355 6356 /* Lock request/grant registers have different bases. */ 6357 req = BGE_APE_PER_LOCK_REQ; 6358 gnt = BGE_APE_PER_LOCK_GRANT; 6359 6360 off = 4 * locknum; 6361 6362 switch (locknum) { 6363 case BGE_APE_LOCK_GPIO: 6364 /* Lock required when using GPIO. */ 6365 if (sc->bnx_func_addr == 0) 6366 bit = BGE_APE_LOCK_REQ_DRIVER0; 6367 else 6368 bit = 1 << sc->bnx_func_addr; 6369 break; 6370 6371 case BGE_APE_LOCK_GRC: 6372 /* Lock required to reset the device. */ 6373 if (sc->bnx_func_addr == 0) 6374 bit = BGE_APE_LOCK_REQ_DRIVER0; 6375 else 6376 bit = 1 << sc->bnx_func_addr; 6377 break; 6378 6379 case BGE_APE_LOCK_MEM: 6380 /* Lock required when accessing certain APE memory. */ 6381 if (sc->bnx_func_addr == 0) 6382 bit = BGE_APE_LOCK_REQ_DRIVER0; 6383 else 6384 bit = 1 << sc->bnx_func_addr; 6385 break; 6386 6387 case BGE_APE_LOCK_PHY0: 6388 case BGE_APE_LOCK_PHY1: 6389 case BGE_APE_LOCK_PHY2: 6390 case BGE_APE_LOCK_PHY3: 6391 /* Lock required when accessing PHYs. */ 6392 bit = BGE_APE_LOCK_REQ_DRIVER0; 6393 break; 6394 6395 default: 6396 return EINVAL; 6397 } 6398 6399 /* Request a lock. */ 6400 APE_WRITE_4(sc, req + off, bit); 6401 6402 /* Wait up to 1 second to acquire lock. */ 6403 for (i = 0; i < 20000; i++) { 6404 status = APE_READ_4(sc, gnt + off); 6405 if (status == bit) 6406 break; 6407 DELAY(50); 6408 } 6409 6410 /* Handle any errors. */ 6411 if (status != bit) { 6412 if_printf(&sc->arpcom.ac_if, "APE lock %d request failed! " 6413 "request = 0x%04x[0x%04x], status = 0x%04x[0x%04x]\n", 6414 locknum, req + off, bit & 0xFFFF, gnt + off, 6415 status & 0xFFFF); 6416 /* Revoke the lock request. */ 6417 APE_WRITE_4(sc, gnt + off, bit); 6418 return EBUSY; 6419 } 6420 6421 return 0; 6422 } 6423 6424 static void 6425 bnx_ape_unlock(struct bnx_softc *sc, int locknum) 6426 { 6427 uint32_t bit, gnt; 6428 int off; 6429 6430 if ((sc->bnx_mfw_flags & BNX_MFW_ON_APE) == 0) 6431 return; 6432 6433 gnt = BGE_APE_PER_LOCK_GRANT; 6434 6435 off = 4 * locknum; 6436 6437 switch (locknum) { 6438 case BGE_APE_LOCK_GPIO: 6439 if (sc->bnx_func_addr == 0) 6440 bit = BGE_APE_LOCK_GRANT_DRIVER0; 6441 else 6442 bit = 1 << sc->bnx_func_addr; 6443 break; 6444 6445 case BGE_APE_LOCK_GRC: 6446 if (sc->bnx_func_addr == 0) 6447 bit = BGE_APE_LOCK_GRANT_DRIVER0; 6448 else 6449 bit = 1 << sc->bnx_func_addr; 6450 break; 6451 6452 case BGE_APE_LOCK_MEM: 6453 if (sc->bnx_func_addr == 0) 6454 bit = BGE_APE_LOCK_GRANT_DRIVER0; 6455 else 6456 bit = 1 << sc->bnx_func_addr; 6457 break; 6458 6459 case BGE_APE_LOCK_PHY0: 6460 case BGE_APE_LOCK_PHY1: 6461 case BGE_APE_LOCK_PHY2: 6462 case BGE_APE_LOCK_PHY3: 6463 bit = BGE_APE_LOCK_GRANT_DRIVER0; 6464 break; 6465 6466 default: 6467 return; 6468 } 6469 6470 APE_WRITE_4(sc, gnt + off, bit); 6471 } 6472 6473 /* 6474 * Send an event to the APE firmware. 6475 */ 6476 static void 6477 bnx_ape_send_event(struct bnx_softc *sc, uint32_t event) 6478 { 6479 uint32_t apedata; 6480 int i; 6481 6482 /* NCSI does not support APE events. */ 6483 if ((sc->bnx_mfw_flags & BNX_MFW_ON_APE) == 0) 6484 return; 6485 6486 /* Wait up to 1ms for APE to service previous event. */ 6487 for (i = 10; i > 0; i--) { 6488 if (bnx_ape_lock(sc, BGE_APE_LOCK_MEM) != 0) 6489 break; 6490 apedata = APE_READ_4(sc, BGE_APE_EVENT_STATUS); 6491 if ((apedata & BGE_APE_EVENT_STATUS_EVENT_PENDING) == 0) { 6492 APE_WRITE_4(sc, BGE_APE_EVENT_STATUS, event | 6493 BGE_APE_EVENT_STATUS_EVENT_PENDING); 6494 bnx_ape_unlock(sc, BGE_APE_LOCK_MEM); 6495 APE_WRITE_4(sc, BGE_APE_EVENT, BGE_APE_EVENT_1); 6496 break; 6497 } 6498 bnx_ape_unlock(sc, BGE_APE_LOCK_MEM); 6499 DELAY(100); 6500 } 6501 if (i == 0) { 6502 if_printf(&sc->arpcom.ac_if, 6503 "APE event 0x%08x send timed out\n", event); 6504 } 6505 } 6506 6507 static void 6508 bnx_ape_driver_state_change(struct bnx_softc *sc, int kind) 6509 { 6510 uint32_t apedata, event; 6511 6512 if ((sc->bnx_mfw_flags & BNX_MFW_ON_APE) == 0) 6513 return; 6514 6515 switch (kind) { 6516 case BNX_RESET_START: 6517 /* If this is the first load, clear the load counter. */ 6518 apedata = APE_READ_4(sc, BGE_APE_HOST_SEG_SIG); 6519 if (apedata != BGE_APE_HOST_SEG_SIG_MAGIC) { 6520 APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, 0); 6521 } else { 6522 apedata = APE_READ_4(sc, BGE_APE_HOST_INIT_COUNT); 6523 APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, ++apedata); 6524 } 6525 APE_WRITE_4(sc, BGE_APE_HOST_SEG_SIG, 6526 BGE_APE_HOST_SEG_SIG_MAGIC); 6527 APE_WRITE_4(sc, BGE_APE_HOST_SEG_LEN, 6528 BGE_APE_HOST_SEG_LEN_MAGIC); 6529 6530 /* Add some version info if bnx(4) supports it. */ 6531 APE_WRITE_4(sc, BGE_APE_HOST_DRIVER_ID, 6532 BGE_APE_HOST_DRIVER_ID_MAGIC(1, 0)); 6533 APE_WRITE_4(sc, BGE_APE_HOST_BEHAVIOR, 6534 BGE_APE_HOST_BEHAV_NO_PHYLOCK); 6535 APE_WRITE_4(sc, BGE_APE_HOST_HEARTBEAT_INT_MS, 6536 BGE_APE_HOST_HEARTBEAT_INT_DISABLE); 6537 APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE, 6538 BGE_APE_HOST_DRVR_STATE_START); 6539 event = BGE_APE_EVENT_STATUS_STATE_START; 6540 break; 6541 6542 case BNX_RESET_SHUTDOWN: 6543 APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE, 6544 BGE_APE_HOST_DRVR_STATE_UNLOAD); 6545 event = BGE_APE_EVENT_STATUS_STATE_UNLOAD; 6546 break; 6547 6548 case BNX_RESET_SUSPEND: 6549 event = BGE_APE_EVENT_STATUS_STATE_SUSPEND; 6550 break; 6551 6552 default: 6553 return; 6554 } 6555 6556 bnx_ape_send_event(sc, event | BGE_APE_EVENT_STATUS_DRIVER_EVNT | 6557 BGE_APE_EVENT_STATUS_STATE_CHNGE); 6558 } 6559