1 /* 2 * Copyright (c) 2001 Wind River Systems 3 * Copyright (c) 1997, 1998, 1999, 2001 4 * Bill Paul <wpaul@windriver.com>. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Bill Paul. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $ 34 */ 35 36 #include "opt_bnx.h" 37 #include "opt_ifpoll.h" 38 39 #include <sys/param.h> 40 #include <sys/bus.h> 41 #include <sys/endian.h> 42 #include <sys/kernel.h> 43 #include <sys/interrupt.h> 44 #include <sys/mbuf.h> 45 #include <sys/malloc.h> 46 #include <sys/queue.h> 47 #include <sys/rman.h> 48 #include <sys/serialize.h> 49 #include <sys/socket.h> 50 #include <sys/sockio.h> 51 #include <sys/sysctl.h> 52 53 #include <netinet/ip.h> 54 #include <netinet/tcp.h> 55 56 #include <net/bpf.h> 57 #include <net/ethernet.h> 58 #include <net/if.h> 59 #include <net/if_arp.h> 60 #include <net/if_dl.h> 61 #include <net/if_media.h> 62 #include <net/if_poll.h> 63 #include <net/if_types.h> 64 #include <net/ifq_var.h> 65 #include <net/toeplitz.h> 66 #include <net/toeplitz2.h> 67 #include <net/vlan/if_vlan_var.h> 68 #include <net/vlan/if_vlan_ether.h> 69 70 #include <dev/netif/mii_layer/mii.h> 71 #include <dev/netif/mii_layer/miivar.h> 72 #include <dev/netif/mii_layer/brgphyreg.h> 73 74 #include "pcidevs.h" 75 #include <bus/pci/pcireg.h> 76 #include <bus/pci/pcivar.h> 77 78 #include <dev/netif/bge/if_bgereg.h> 79 #include <dev/netif/bnx/if_bnxvar.h> 80 81 /* "device miibus" required. See GENERIC if you get errors here. */ 82 #include "miibus_if.h" 83 84 #define BNX_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 85 86 #define BNX_RESET_SHUTDOWN 0 87 #define BNX_RESET_START 1 88 #define BNX_RESET_SUSPEND 2 89 90 #define BNX_INTR_CKINTVL ((10 * hz) / 1000) /* 10ms */ 91 92 #ifdef BNX_RSS_DEBUG 93 #define BNX_RSS_DPRINTF(sc, lvl, fmt, ...) \ 94 do { \ 95 if (sc->bnx_rss_debug >= lvl) \ 96 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \ 97 } while (0) 98 #else /* !BNX_RSS_DEBUG */ 99 #define BNX_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 100 #endif /* BNX_RSS_DEBUG */ 101 102 static const struct bnx_type { 103 uint16_t bnx_vid; 104 uint16_t bnx_did; 105 char *bnx_name; 106 } bnx_devs[] = { 107 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717, 108 "Broadcom BCM5717 Gigabit Ethernet" }, 109 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717C, 110 "Broadcom BCM5717C Gigabit Ethernet" }, 111 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5718, 112 "Broadcom BCM5718 Gigabit Ethernet" }, 113 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5719, 114 "Broadcom BCM5719 Gigabit Ethernet" }, 115 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720_ALT, 116 "Broadcom BCM5720 Gigabit Ethernet" }, 117 118 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5725, 119 "Broadcom BCM5725 Gigabit Ethernet" }, 120 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5727, 121 "Broadcom BCM5727 Gigabit Ethernet" }, 122 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5762, 123 "Broadcom BCM5762 Gigabit Ethernet" }, 124 125 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57761, 126 "Broadcom BCM57761 Gigabit Ethernet" }, 127 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57762, 128 "Broadcom BCM57762 Gigabit Ethernet" }, 129 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57765, 130 "Broadcom BCM57765 Gigabit Ethernet" }, 131 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57766, 132 "Broadcom BCM57766 Gigabit Ethernet" }, 133 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57781, 134 "Broadcom BCM57781 Gigabit Ethernet" }, 135 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57782, 136 "Broadcom BCM57782 Gigabit Ethernet" }, 137 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57785, 138 "Broadcom BCM57785 Gigabit Ethernet" }, 139 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57786, 140 "Broadcom BCM57786 Gigabit Ethernet" }, 141 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57791, 142 "Broadcom BCM57791 Fast Ethernet" }, 143 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57795, 144 "Broadcom BCM57795 Fast Ethernet" }, 145 146 { 0, 0, NULL } 147 }; 148 149 static const int bnx_tx_mailbox[BNX_TX_RING_MAX] = { 150 BGE_MBX_TX_HOST_PROD0_LO, 151 BGE_MBX_TX_HOST_PROD0_HI, 152 BGE_MBX_TX_HOST_PROD1_LO, 153 BGE_MBX_TX_HOST_PROD1_HI 154 }; 155 156 #define BNX_IS_JUMBO_CAPABLE(sc) ((sc)->bnx_flags & BNX_FLAG_JUMBO) 157 #define BNX_IS_5717_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_5717_PLUS) 158 #define BNX_IS_57765_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_57765_PLUS) 159 #define BNX_IS_57765_FAMILY(sc) \ 160 ((sc)->bnx_flags & BNX_FLAG_57765_FAMILY) 161 162 typedef int (*bnx_eaddr_fcn_t)(struct bnx_softc *, uint8_t[]); 163 164 static int bnx_probe(device_t); 165 static int bnx_attach(device_t); 166 static int bnx_detach(device_t); 167 static void bnx_shutdown(device_t); 168 static int bnx_suspend(device_t); 169 static int bnx_resume(device_t); 170 static int bnx_miibus_readreg(device_t, int, int); 171 static int bnx_miibus_writereg(device_t, int, int, int); 172 static void bnx_miibus_statchg(device_t); 173 174 static int bnx_handle_status(struct bnx_softc *); 175 #ifdef IFPOLL_ENABLE 176 static void bnx_npoll(struct ifnet *, struct ifpoll_info *); 177 static void bnx_npoll_rx(struct ifnet *, void *, int); 178 static void bnx_npoll_tx(struct ifnet *, void *, int); 179 static void bnx_npoll_tx_notag(struct ifnet *, void *, int); 180 static void bnx_npoll_status(struct ifnet *); 181 static void bnx_npoll_status_notag(struct ifnet *); 182 #endif 183 static void bnx_intr_legacy(void *); 184 static void bnx_msi(void *); 185 static void bnx_intr(struct bnx_softc *); 186 static void bnx_msix_status(void *); 187 static void bnx_msix_tx_status(void *); 188 static void bnx_msix_rx(void *); 189 static void bnx_msix_rxtx(void *); 190 static void bnx_enable_intr(struct bnx_softc *); 191 static void bnx_disable_intr(struct bnx_softc *); 192 static void bnx_txeof(struct bnx_tx_ring *, uint16_t); 193 static void bnx_rxeof(struct bnx_rx_ret_ring *, uint16_t, int); 194 static int bnx_alloc_intr(struct bnx_softc *); 195 static int bnx_setup_intr(struct bnx_softc *); 196 static void bnx_free_intr(struct bnx_softc *); 197 static void bnx_teardown_intr(struct bnx_softc *, int); 198 static int bnx_alloc_msix(struct bnx_softc *); 199 static void bnx_free_msix(struct bnx_softc *, boolean_t); 200 static void bnx_check_intr_rxtx(void *); 201 static void bnx_check_intr_rx(void *); 202 static void bnx_check_intr_tx(void *); 203 static void bnx_rx_std_refill_ithread(void *); 204 static void bnx_rx_std_refill(void *, void *); 205 static void bnx_rx_std_refill_sched_ipi(void *); 206 static void bnx_rx_std_refill_stop(void *); 207 static void bnx_rx_std_refill_sched(struct bnx_rx_ret_ring *, 208 struct bnx_rx_std_ring *); 209 210 static void bnx_start(struct ifnet *, struct ifaltq_subque *); 211 static int bnx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 212 static void bnx_init(void *); 213 static void bnx_stop(struct bnx_softc *); 214 static void bnx_watchdog(struct ifaltq_subque *); 215 static int bnx_ifmedia_upd(struct ifnet *); 216 static void bnx_ifmedia_sts(struct ifnet *, struct ifmediareq *); 217 static void bnx_tick(void *); 218 static void bnx_serialize(struct ifnet *, enum ifnet_serialize); 219 static void bnx_deserialize(struct ifnet *, enum ifnet_serialize); 220 static int bnx_tryserialize(struct ifnet *, enum ifnet_serialize); 221 #ifdef INVARIANTS 222 static void bnx_serialize_assert(struct ifnet *, enum ifnet_serialize, 223 boolean_t); 224 #endif 225 static void bnx_serialize_skipmain(struct bnx_softc *); 226 static void bnx_deserialize_skipmain(struct bnx_softc *sc); 227 228 static int bnx_alloc_jumbo_mem(struct bnx_softc *); 229 static void bnx_free_jumbo_mem(struct bnx_softc *); 230 static struct bnx_jslot 231 *bnx_jalloc(struct bnx_softc *); 232 static void bnx_jfree(void *); 233 static void bnx_jref(void *); 234 static int bnx_newbuf_std(struct bnx_rx_ret_ring *, int, int); 235 static int bnx_newbuf_jumbo(struct bnx_softc *, int, int); 236 static void bnx_setup_rxdesc_std(struct bnx_rx_std_ring *, int); 237 static void bnx_setup_rxdesc_jumbo(struct bnx_softc *, int); 238 static int bnx_init_rx_ring_std(struct bnx_rx_std_ring *); 239 static void bnx_free_rx_ring_std(struct bnx_rx_std_ring *); 240 static int bnx_init_rx_ring_jumbo(struct bnx_softc *); 241 static void bnx_free_rx_ring_jumbo(struct bnx_softc *); 242 static void bnx_free_tx_ring(struct bnx_tx_ring *); 243 static int bnx_init_tx_ring(struct bnx_tx_ring *); 244 static int bnx_create_tx_ring(struct bnx_tx_ring *); 245 static void bnx_destroy_tx_ring(struct bnx_tx_ring *); 246 static int bnx_create_rx_ret_ring(struct bnx_rx_ret_ring *); 247 static void bnx_destroy_rx_ret_ring(struct bnx_rx_ret_ring *); 248 static int bnx_dma_alloc(device_t); 249 static void bnx_dma_free(struct bnx_softc *); 250 static int bnx_dma_block_alloc(struct bnx_softc *, bus_size_t, 251 bus_dma_tag_t *, bus_dmamap_t *, void **, bus_addr_t *); 252 static void bnx_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *); 253 static struct mbuf * 254 bnx_defrag_shortdma(struct mbuf *); 255 static int bnx_encap(struct bnx_tx_ring *, struct mbuf **, 256 uint32_t *, int *); 257 static int bnx_setup_tso(struct bnx_tx_ring *, struct mbuf **, 258 uint16_t *, uint16_t *); 259 static void bnx_setup_serialize(struct bnx_softc *); 260 static void bnx_set_tick_cpuid(struct bnx_softc *, boolean_t); 261 static void bnx_setup_ring_cnt(struct bnx_softc *); 262 263 static struct pktinfo *bnx_rss_info(struct pktinfo *, 264 const struct bge_rx_bd *); 265 static void bnx_init_rss(struct bnx_softc *); 266 static void bnx_reset(struct bnx_softc *); 267 static int bnx_chipinit(struct bnx_softc *); 268 static int bnx_blockinit(struct bnx_softc *); 269 static void bnx_stop_block(struct bnx_softc *, bus_size_t, uint32_t); 270 static void bnx_enable_msi(struct bnx_softc *, boolean_t); 271 static void bnx_setmulti(struct bnx_softc *); 272 static void bnx_setpromisc(struct bnx_softc *); 273 static void bnx_stats_update_regs(struct bnx_softc *); 274 static uint32_t bnx_dma_swap_options(struct bnx_softc *); 275 276 static uint32_t bnx_readmem_ind(struct bnx_softc *, uint32_t); 277 static void bnx_writemem_ind(struct bnx_softc *, uint32_t, uint32_t); 278 #ifdef notdef 279 static uint32_t bnx_readreg_ind(struct bnx_softc *, uint32_t); 280 #endif 281 static void bnx_writemem_direct(struct bnx_softc *, uint32_t, uint32_t); 282 static void bnx_writembx(struct bnx_softc *, int, int); 283 static int bnx_read_nvram(struct bnx_softc *, caddr_t, int, int); 284 static uint8_t bnx_eeprom_getbyte(struct bnx_softc *, uint32_t, uint8_t *); 285 static int bnx_read_eeprom(struct bnx_softc *, caddr_t, uint32_t, size_t); 286 287 static void bnx_tbi_link_upd(struct bnx_softc *, uint32_t); 288 static void bnx_copper_link_upd(struct bnx_softc *, uint32_t); 289 static void bnx_autopoll_link_upd(struct bnx_softc *, uint32_t); 290 static void bnx_link_poll(struct bnx_softc *); 291 292 static int bnx_get_eaddr_mem(struct bnx_softc *, uint8_t[]); 293 static int bnx_get_eaddr_nvram(struct bnx_softc *, uint8_t[]); 294 static int bnx_get_eaddr_eeprom(struct bnx_softc *, uint8_t[]); 295 static int bnx_get_eaddr(struct bnx_softc *, uint8_t[]); 296 297 static void bnx_coal_change(struct bnx_softc *); 298 static int bnx_sysctl_force_defrag(SYSCTL_HANDLER_ARGS); 299 static int bnx_sysctl_tx_wreg(SYSCTL_HANDLER_ARGS); 300 static int bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS); 301 static int bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS); 302 static int bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS); 303 static int bnx_sysctl_rx_coal_bds_poll(SYSCTL_HANDLER_ARGS); 304 static int bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS); 305 static int bnx_sysctl_tx_coal_bds_poll(SYSCTL_HANDLER_ARGS); 306 static int bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS); 307 static int bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS); 308 static int bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *, 309 int, int, uint32_t); 310 #ifdef IFPOLL_ENABLE 311 static int bnx_sysctl_npoll_offset(SYSCTL_HANDLER_ARGS); 312 static int bnx_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS); 313 static int bnx_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS); 314 #endif 315 static int bnx_sysctl_std_refill(SYSCTL_HANDLER_ARGS); 316 317 static void bnx_sig_post_reset(struct bnx_softc *, int); 318 static void bnx_sig_pre_reset(struct bnx_softc *, int); 319 static void bnx_ape_lock_init(struct bnx_softc *); 320 static void bnx_ape_read_fw_ver(struct bnx_softc *); 321 static int bnx_ape_lock(struct bnx_softc *, int); 322 static void bnx_ape_unlock(struct bnx_softc *, int); 323 static void bnx_ape_send_event(struct bnx_softc *, uint32_t); 324 static void bnx_ape_driver_state_change(struct bnx_softc *, int); 325 326 static int bnx_msi_enable = 1; 327 static int bnx_msix_enable = 1; 328 329 static int bnx_rx_rings = 0; /* auto */ 330 static int bnx_tx_rings = 0; /* auto */ 331 332 TUNABLE_INT("hw.bnx.msi.enable", &bnx_msi_enable); 333 TUNABLE_INT("hw.bnx.msix.enable", &bnx_msix_enable); 334 TUNABLE_INT("hw.bnx.rx_rings", &bnx_rx_rings); 335 TUNABLE_INT("hw.bnx.tx_rings", &bnx_tx_rings); 336 337 static device_method_t bnx_methods[] = { 338 /* Device interface */ 339 DEVMETHOD(device_probe, bnx_probe), 340 DEVMETHOD(device_attach, bnx_attach), 341 DEVMETHOD(device_detach, bnx_detach), 342 DEVMETHOD(device_shutdown, bnx_shutdown), 343 DEVMETHOD(device_suspend, bnx_suspend), 344 DEVMETHOD(device_resume, bnx_resume), 345 346 /* bus interface */ 347 DEVMETHOD(bus_print_child, bus_generic_print_child), 348 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 349 350 /* MII interface */ 351 DEVMETHOD(miibus_readreg, bnx_miibus_readreg), 352 DEVMETHOD(miibus_writereg, bnx_miibus_writereg), 353 DEVMETHOD(miibus_statchg, bnx_miibus_statchg), 354 355 DEVMETHOD_END 356 }; 357 358 static DEFINE_CLASS_0(bnx, bnx_driver, bnx_methods, sizeof(struct bnx_softc)); 359 static devclass_t bnx_devclass; 360 361 DECLARE_DUMMY_MODULE(if_bnx); 362 MODULE_DEPEND(if_bnx, miibus, 1, 1, 1); 363 DRIVER_MODULE(if_bnx, pci, bnx_driver, bnx_devclass, NULL, NULL); 364 DRIVER_MODULE(miibus, bnx, miibus_driver, miibus_devclass, NULL, NULL); 365 366 static uint32_t 367 bnx_readmem_ind(struct bnx_softc *sc, uint32_t off) 368 { 369 device_t dev = sc->bnx_dev; 370 uint32_t val; 371 372 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 373 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4); 374 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); 375 return (val); 376 } 377 378 static void 379 bnx_writemem_ind(struct bnx_softc *sc, uint32_t off, uint32_t val) 380 { 381 device_t dev = sc->bnx_dev; 382 383 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 384 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4); 385 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); 386 } 387 388 static void 389 bnx_writemem_direct(struct bnx_softc *sc, uint32_t off, uint32_t val) 390 { 391 CSR_WRITE_4(sc, off, val); 392 } 393 394 static void 395 bnx_writembx(struct bnx_softc *sc, int off, int val) 396 { 397 CSR_WRITE_4(sc, off, val); 398 } 399 400 /* 401 * Read a sequence of bytes from NVRAM. 402 */ 403 static int 404 bnx_read_nvram(struct bnx_softc *sc, caddr_t dest, int off, int cnt) 405 { 406 return (1); 407 } 408 409 /* 410 * Read a byte of data stored in the EEPROM at address 'addr.' The 411 * BCM570x supports both the traditional bitbang interface and an 412 * auto access interface for reading the EEPROM. We use the auto 413 * access method. 414 */ 415 static uint8_t 416 bnx_eeprom_getbyte(struct bnx_softc *sc, uint32_t addr, uint8_t *dest) 417 { 418 int i; 419 uint32_t byte = 0; 420 421 /* 422 * Enable use of auto EEPROM access so we can avoid 423 * having to use the bitbang method. 424 */ 425 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 426 427 /* Reset the EEPROM, load the clock period. */ 428 CSR_WRITE_4(sc, BGE_EE_ADDR, 429 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 430 DELAY(20); 431 432 /* Issue the read EEPROM command. */ 433 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 434 435 /* Wait for completion */ 436 for(i = 0; i < BNX_TIMEOUT * 10; i++) { 437 DELAY(10); 438 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 439 break; 440 } 441 442 if (i == BNX_TIMEOUT) { 443 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n"); 444 return(1); 445 } 446 447 /* Get result. */ 448 byte = CSR_READ_4(sc, BGE_EE_DATA); 449 450 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 451 452 return(0); 453 } 454 455 /* 456 * Read a sequence of bytes from the EEPROM. 457 */ 458 static int 459 bnx_read_eeprom(struct bnx_softc *sc, caddr_t dest, uint32_t off, size_t len) 460 { 461 size_t i; 462 int err; 463 uint8_t byte; 464 465 for (byte = 0, err = 0, i = 0; i < len; i++) { 466 err = bnx_eeprom_getbyte(sc, off + i, &byte); 467 if (err) 468 break; 469 *(dest + i) = byte; 470 } 471 472 return(err ? 1 : 0); 473 } 474 475 static int 476 bnx_miibus_readreg(device_t dev, int phy, int reg) 477 { 478 struct bnx_softc *sc = device_get_softc(dev); 479 uint32_t val; 480 int i; 481 482 KASSERT(phy == sc->bnx_phyno, 483 ("invalid phyno %d, should be %d", phy, sc->bnx_phyno)); 484 485 if (bnx_ape_lock(sc, sc->bnx_phy_ape_lock) != 0) 486 return 0; 487 488 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */ 489 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { 490 CSR_WRITE_4(sc, BGE_MI_MODE, 491 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL); 492 DELAY(80); 493 } 494 495 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY | 496 BGE_MIPHY(phy) | BGE_MIREG(reg)); 497 498 /* Poll for the PHY register access to complete. */ 499 for (i = 0; i < BNX_TIMEOUT; i++) { 500 DELAY(10); 501 val = CSR_READ_4(sc, BGE_MI_COMM); 502 if ((val & BGE_MICOMM_BUSY) == 0) { 503 DELAY(5); 504 val = CSR_READ_4(sc, BGE_MI_COMM); 505 break; 506 } 507 } 508 if (i == BNX_TIMEOUT) { 509 if_printf(&sc->arpcom.ac_if, "PHY read timed out " 510 "(phy %d, reg %d, val 0x%08x)\n", phy, reg, val); 511 val = 0; 512 } 513 514 /* Restore the autopoll bit if necessary. */ 515 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { 516 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode); 517 DELAY(80); 518 } 519 520 bnx_ape_unlock(sc, sc->bnx_phy_ape_lock); 521 522 if (val & BGE_MICOMM_READFAIL) 523 return 0; 524 525 return (val & 0xFFFF); 526 } 527 528 static int 529 bnx_miibus_writereg(device_t dev, int phy, int reg, int val) 530 { 531 struct bnx_softc *sc = device_get_softc(dev); 532 int i; 533 534 KASSERT(phy == sc->bnx_phyno, 535 ("invalid phyno %d, should be %d", phy, sc->bnx_phyno)); 536 537 if (bnx_ape_lock(sc, sc->bnx_phy_ape_lock) != 0) 538 return 0; 539 540 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */ 541 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { 542 CSR_WRITE_4(sc, BGE_MI_MODE, 543 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL); 544 DELAY(80); 545 } 546 547 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY | 548 BGE_MIPHY(phy) | BGE_MIREG(reg) | val); 549 550 for (i = 0; i < BNX_TIMEOUT; i++) { 551 DELAY(10); 552 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) { 553 DELAY(5); 554 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */ 555 break; 556 } 557 } 558 if (i == BNX_TIMEOUT) { 559 if_printf(&sc->arpcom.ac_if, "PHY write timed out " 560 "(phy %d, reg %d, val %d)\n", phy, reg, val); 561 } 562 563 /* Restore the autopoll bit if necessary. */ 564 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { 565 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode); 566 DELAY(80); 567 } 568 569 bnx_ape_unlock(sc, sc->bnx_phy_ape_lock); 570 571 return 0; 572 } 573 574 static void 575 bnx_miibus_statchg(device_t dev) 576 { 577 struct bnx_softc *sc; 578 struct mii_data *mii; 579 uint32_t mac_mode; 580 581 sc = device_get_softc(dev); 582 if ((sc->arpcom.ac_if.if_flags & IFF_RUNNING) == 0) 583 return; 584 585 mii = device_get_softc(sc->bnx_miibus); 586 587 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 588 (IFM_ACTIVE | IFM_AVALID)) { 589 switch (IFM_SUBTYPE(mii->mii_media_active)) { 590 case IFM_10_T: 591 case IFM_100_TX: 592 sc->bnx_link = 1; 593 break; 594 case IFM_1000_T: 595 case IFM_1000_SX: 596 case IFM_2500_SX: 597 sc->bnx_link = 1; 598 break; 599 default: 600 sc->bnx_link = 0; 601 break; 602 } 603 } else { 604 sc->bnx_link = 0; 605 } 606 if (sc->bnx_link == 0) 607 return; 608 609 /* 610 * APE firmware touches these registers to keep the MAC 611 * connected to the outside world. Try to keep the 612 * accesses atomic. 613 */ 614 615 mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & 616 ~(BGE_MACMODE_PORTMODE | BGE_MACMODE_HALF_DUPLEX); 617 618 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 619 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) 620 mac_mode |= BGE_PORTMODE_GMII; 621 else 622 mac_mode |= BGE_PORTMODE_MII; 623 624 if ((mii->mii_media_active & IFM_GMASK) != IFM_FDX) 625 mac_mode |= BGE_MACMODE_HALF_DUPLEX; 626 627 CSR_WRITE_4(sc, BGE_MAC_MODE, mac_mode); 628 DELAY(40); 629 } 630 631 /* 632 * Memory management for jumbo frames. 633 */ 634 static int 635 bnx_alloc_jumbo_mem(struct bnx_softc *sc) 636 { 637 struct ifnet *ifp = &sc->arpcom.ac_if; 638 struct bnx_jslot *entry; 639 uint8_t *ptr; 640 bus_addr_t paddr; 641 int i, error; 642 643 /* 644 * Create tag for jumbo mbufs. 645 * This is really a bit of a kludge. We allocate a special 646 * jumbo buffer pool which (thanks to the way our DMA 647 * memory allocation works) will consist of contiguous 648 * pages. This means that even though a jumbo buffer might 649 * be larger than a page size, we don't really need to 650 * map it into more than one DMA segment. However, the 651 * default mbuf tag will result in multi-segment mappings, 652 * so we have to create a special jumbo mbuf tag that 653 * lets us get away with mapping the jumbo buffers as 654 * a single segment. I think eventually the driver should 655 * be changed so that it uses ordinary mbufs and cluster 656 * buffers, i.e. jumbo frames can span multiple DMA 657 * descriptors. But that's a project for another day. 658 */ 659 660 /* 661 * Create DMA stuffs for jumbo RX ring. 662 */ 663 error = bnx_dma_block_alloc(sc, BGE_JUMBO_RX_RING_SZ, 664 &sc->bnx_cdata.bnx_rx_jumbo_ring_tag, 665 &sc->bnx_cdata.bnx_rx_jumbo_ring_map, 666 (void *)&sc->bnx_ldata.bnx_rx_jumbo_ring, 667 &sc->bnx_ldata.bnx_rx_jumbo_ring_paddr); 668 if (error) { 669 if_printf(ifp, "could not create jumbo RX ring\n"); 670 return error; 671 } 672 673 /* 674 * Create DMA stuffs for jumbo buffer block. 675 */ 676 error = bnx_dma_block_alloc(sc, BNX_JMEM, 677 &sc->bnx_cdata.bnx_jumbo_tag, 678 &sc->bnx_cdata.bnx_jumbo_map, 679 (void **)&sc->bnx_ldata.bnx_jumbo_buf, 680 &paddr); 681 if (error) { 682 if_printf(ifp, "could not create jumbo buffer\n"); 683 return error; 684 } 685 686 SLIST_INIT(&sc->bnx_jfree_listhead); 687 688 /* 689 * Now divide it up into 9K pieces and save the addresses 690 * in an array. Note that we play an evil trick here by using 691 * the first few bytes in the buffer to hold the the address 692 * of the softc structure for this interface. This is because 693 * bnx_jfree() needs it, but it is called by the mbuf management 694 * code which will not pass it to us explicitly. 695 */ 696 for (i = 0, ptr = sc->bnx_ldata.bnx_jumbo_buf; i < BNX_JSLOTS; i++) { 697 entry = &sc->bnx_cdata.bnx_jslots[i]; 698 entry->bnx_sc = sc; 699 entry->bnx_buf = ptr; 700 entry->bnx_paddr = paddr; 701 entry->bnx_inuse = 0; 702 entry->bnx_slot = i; 703 SLIST_INSERT_HEAD(&sc->bnx_jfree_listhead, entry, jslot_link); 704 705 ptr += BNX_JLEN; 706 paddr += BNX_JLEN; 707 } 708 return 0; 709 } 710 711 static void 712 bnx_free_jumbo_mem(struct bnx_softc *sc) 713 { 714 /* Destroy jumbo RX ring. */ 715 bnx_dma_block_free(sc->bnx_cdata.bnx_rx_jumbo_ring_tag, 716 sc->bnx_cdata.bnx_rx_jumbo_ring_map, 717 sc->bnx_ldata.bnx_rx_jumbo_ring); 718 719 /* Destroy jumbo buffer block. */ 720 bnx_dma_block_free(sc->bnx_cdata.bnx_jumbo_tag, 721 sc->bnx_cdata.bnx_jumbo_map, 722 sc->bnx_ldata.bnx_jumbo_buf); 723 } 724 725 /* 726 * Allocate a jumbo buffer. 727 */ 728 static struct bnx_jslot * 729 bnx_jalloc(struct bnx_softc *sc) 730 { 731 struct bnx_jslot *entry; 732 733 lwkt_serialize_enter(&sc->bnx_jslot_serializer); 734 entry = SLIST_FIRST(&sc->bnx_jfree_listhead); 735 if (entry) { 736 SLIST_REMOVE_HEAD(&sc->bnx_jfree_listhead, jslot_link); 737 entry->bnx_inuse = 1; 738 } else { 739 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n"); 740 } 741 lwkt_serialize_exit(&sc->bnx_jslot_serializer); 742 return(entry); 743 } 744 745 /* 746 * Adjust usage count on a jumbo buffer. 747 */ 748 static void 749 bnx_jref(void *arg) 750 { 751 struct bnx_jslot *entry = (struct bnx_jslot *)arg; 752 struct bnx_softc *sc = entry->bnx_sc; 753 754 if (sc == NULL) 755 panic("bnx_jref: can't find softc pointer!"); 756 757 if (&sc->bnx_cdata.bnx_jslots[entry->bnx_slot] != entry) { 758 panic("bnx_jref: asked to reference buffer " 759 "that we don't manage!"); 760 } else if (entry->bnx_inuse == 0) { 761 panic("bnx_jref: buffer already free!"); 762 } else { 763 atomic_add_int(&entry->bnx_inuse, 1); 764 } 765 } 766 767 /* 768 * Release a jumbo buffer. 769 */ 770 static void 771 bnx_jfree(void *arg) 772 { 773 struct bnx_jslot *entry = (struct bnx_jslot *)arg; 774 struct bnx_softc *sc = entry->bnx_sc; 775 776 if (sc == NULL) 777 panic("bnx_jfree: can't find softc pointer!"); 778 779 if (&sc->bnx_cdata.bnx_jslots[entry->bnx_slot] != entry) { 780 panic("bnx_jfree: asked to free buffer that we don't manage!"); 781 } else if (entry->bnx_inuse == 0) { 782 panic("bnx_jfree: buffer already free!"); 783 } else { 784 /* 785 * Possible MP race to 0, use the serializer. The atomic insn 786 * is still needed for races against bnx_jref(). 787 */ 788 lwkt_serialize_enter(&sc->bnx_jslot_serializer); 789 atomic_subtract_int(&entry->bnx_inuse, 1); 790 if (entry->bnx_inuse == 0) { 791 SLIST_INSERT_HEAD(&sc->bnx_jfree_listhead, 792 entry, jslot_link); 793 } 794 lwkt_serialize_exit(&sc->bnx_jslot_serializer); 795 } 796 } 797 798 799 /* 800 * Intialize a standard receive ring descriptor. 801 */ 802 static int 803 bnx_newbuf_std(struct bnx_rx_ret_ring *ret, int i, int init) 804 { 805 struct mbuf *m_new = NULL; 806 bus_dma_segment_t seg; 807 bus_dmamap_t map; 808 int error, nsegs; 809 struct bnx_rx_buf *rb; 810 811 rb = &ret->bnx_std->bnx_rx_std_buf[i]; 812 KASSERT(!rb->bnx_rx_refilled, ("RX buf %dth has been refilled", i)); 813 814 m_new = m_getcl(init ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR); 815 if (m_new == NULL) { 816 error = ENOBUFS; 817 goto back; 818 } 819 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 820 m_adj(m_new, ETHER_ALIGN); 821 822 error = bus_dmamap_load_mbuf_segment(ret->bnx_rx_mtag, 823 ret->bnx_rx_tmpmap, m_new, &seg, 1, &nsegs, BUS_DMA_NOWAIT); 824 if (error) { 825 m_freem(m_new); 826 goto back; 827 } 828 829 if (!init) { 830 bus_dmamap_sync(ret->bnx_rx_mtag, rb->bnx_rx_dmamap, 831 BUS_DMASYNC_POSTREAD); 832 bus_dmamap_unload(ret->bnx_rx_mtag, rb->bnx_rx_dmamap); 833 } 834 835 map = ret->bnx_rx_tmpmap; 836 ret->bnx_rx_tmpmap = rb->bnx_rx_dmamap; 837 838 rb->bnx_rx_dmamap = map; 839 rb->bnx_rx_mbuf = m_new; 840 rb->bnx_rx_paddr = seg.ds_addr; 841 rb->bnx_rx_len = m_new->m_len; 842 back: 843 cpu_sfence(); 844 rb->bnx_rx_refilled = 1; 845 return error; 846 } 847 848 static void 849 bnx_setup_rxdesc_std(struct bnx_rx_std_ring *std, int i) 850 { 851 struct bnx_rx_buf *rb; 852 struct bge_rx_bd *r; 853 bus_addr_t paddr; 854 int len; 855 856 rb = &std->bnx_rx_std_buf[i]; 857 KASSERT(rb->bnx_rx_refilled, ("RX buf %dth is not refilled", i)); 858 859 paddr = rb->bnx_rx_paddr; 860 len = rb->bnx_rx_len; 861 862 cpu_mfence(); 863 864 rb->bnx_rx_refilled = 0; 865 866 r = &std->bnx_rx_std_ring[i]; 867 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(paddr); 868 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(paddr); 869 r->bge_len = len; 870 r->bge_idx = i; 871 r->bge_flags = BGE_RXBDFLAG_END; 872 } 873 874 /* 875 * Initialize a jumbo receive ring descriptor. This allocates 876 * a jumbo buffer from the pool managed internally by the driver. 877 */ 878 static int 879 bnx_newbuf_jumbo(struct bnx_softc *sc, int i, int init) 880 { 881 struct mbuf *m_new = NULL; 882 struct bnx_jslot *buf; 883 bus_addr_t paddr; 884 885 /* Allocate the mbuf. */ 886 MGETHDR(m_new, init ? M_WAITOK : M_NOWAIT, MT_DATA); 887 if (m_new == NULL) 888 return ENOBUFS; 889 890 /* Allocate the jumbo buffer */ 891 buf = bnx_jalloc(sc); 892 if (buf == NULL) { 893 m_freem(m_new); 894 return ENOBUFS; 895 } 896 897 /* Attach the buffer to the mbuf. */ 898 m_new->m_ext.ext_arg = buf; 899 m_new->m_ext.ext_buf = buf->bnx_buf; 900 m_new->m_ext.ext_free = bnx_jfree; 901 m_new->m_ext.ext_ref = bnx_jref; 902 m_new->m_ext.ext_size = BNX_JUMBO_FRAMELEN; 903 904 m_new->m_flags |= M_EXT; 905 906 m_new->m_data = m_new->m_ext.ext_buf; 907 m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size; 908 909 paddr = buf->bnx_paddr; 910 m_adj(m_new, ETHER_ALIGN); 911 paddr += ETHER_ALIGN; 912 913 /* Save necessary information */ 914 sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_rx_mbuf = m_new; 915 sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_rx_paddr = paddr; 916 917 /* Set up the descriptor. */ 918 bnx_setup_rxdesc_jumbo(sc, i); 919 return 0; 920 } 921 922 static void 923 bnx_setup_rxdesc_jumbo(struct bnx_softc *sc, int i) 924 { 925 struct bge_rx_bd *r; 926 struct bnx_rx_buf *rc; 927 928 r = &sc->bnx_ldata.bnx_rx_jumbo_ring[i]; 929 rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i]; 930 931 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bnx_rx_paddr); 932 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bnx_rx_paddr); 933 r->bge_len = rc->bnx_rx_mbuf->m_len; 934 r->bge_idx = i; 935 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING; 936 } 937 938 static int 939 bnx_init_rx_ring_std(struct bnx_rx_std_ring *std) 940 { 941 int i, error; 942 943 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 944 /* Use the first RX return ring's tmp RX mbuf DMA map */ 945 error = bnx_newbuf_std(&std->bnx_sc->bnx_rx_ret_ring[0], i, 1); 946 if (error) 947 return error; 948 bnx_setup_rxdesc_std(std, i); 949 } 950 951 std->bnx_rx_std_used = 0; 952 std->bnx_rx_std_refill = 0; 953 std->bnx_rx_std_running = 0; 954 cpu_sfence(); 955 lwkt_serialize_handler_enable(&std->bnx_rx_std_serialize); 956 957 std->bnx_rx_std = BGE_STD_RX_RING_CNT - 1; 958 bnx_writembx(std->bnx_sc, BGE_MBX_RX_STD_PROD_LO, std->bnx_rx_std); 959 960 return(0); 961 } 962 963 static void 964 bnx_free_rx_ring_std(struct bnx_rx_std_ring *std) 965 { 966 int i; 967 968 lwkt_serialize_handler_disable(&std->bnx_rx_std_serialize); 969 970 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 971 struct bnx_rx_buf *rb = &std->bnx_rx_std_buf[i]; 972 973 rb->bnx_rx_refilled = 0; 974 if (rb->bnx_rx_mbuf != NULL) { 975 bus_dmamap_unload(std->bnx_rx_mtag, rb->bnx_rx_dmamap); 976 m_freem(rb->bnx_rx_mbuf); 977 rb->bnx_rx_mbuf = NULL; 978 } 979 bzero(&std->bnx_rx_std_ring[i], sizeof(struct bge_rx_bd)); 980 } 981 } 982 983 static int 984 bnx_init_rx_ring_jumbo(struct bnx_softc *sc) 985 { 986 struct bge_rcb *rcb; 987 int i, error; 988 989 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 990 error = bnx_newbuf_jumbo(sc, i, 1); 991 if (error) 992 return error; 993 } 994 995 sc->bnx_jumbo = BGE_JUMBO_RX_RING_CNT - 1; 996 997 rcb = &sc->bnx_ldata.bnx_info.bnx_jumbo_rx_rcb; 998 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0); 999 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1000 1001 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bnx_jumbo); 1002 1003 return(0); 1004 } 1005 1006 static void 1007 bnx_free_rx_ring_jumbo(struct bnx_softc *sc) 1008 { 1009 int i; 1010 1011 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1012 struct bnx_rx_buf *rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i]; 1013 1014 if (rc->bnx_rx_mbuf != NULL) { 1015 m_freem(rc->bnx_rx_mbuf); 1016 rc->bnx_rx_mbuf = NULL; 1017 } 1018 bzero(&sc->bnx_ldata.bnx_rx_jumbo_ring[i], 1019 sizeof(struct bge_rx_bd)); 1020 } 1021 } 1022 1023 static void 1024 bnx_free_tx_ring(struct bnx_tx_ring *txr) 1025 { 1026 int i; 1027 1028 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1029 struct bnx_tx_buf *buf = &txr->bnx_tx_buf[i]; 1030 1031 if (buf->bnx_tx_mbuf != NULL) { 1032 bus_dmamap_unload(txr->bnx_tx_mtag, 1033 buf->bnx_tx_dmamap); 1034 m_freem(buf->bnx_tx_mbuf); 1035 buf->bnx_tx_mbuf = NULL; 1036 } 1037 bzero(&txr->bnx_tx_ring[i], sizeof(struct bge_tx_bd)); 1038 } 1039 txr->bnx_tx_saved_considx = BNX_TXCONS_UNSET; 1040 } 1041 1042 static int 1043 bnx_init_tx_ring(struct bnx_tx_ring *txr) 1044 { 1045 txr->bnx_tx_cnt = 0; 1046 txr->bnx_tx_saved_considx = 0; 1047 txr->bnx_tx_prodidx = 0; 1048 1049 /* Initialize transmit producer index for host-memory send ring. */ 1050 bnx_writembx(txr->bnx_sc, txr->bnx_tx_mbx, txr->bnx_tx_prodidx); 1051 1052 return(0); 1053 } 1054 1055 static void 1056 bnx_setmulti(struct bnx_softc *sc) 1057 { 1058 struct ifnet *ifp; 1059 struct ifmultiaddr *ifma; 1060 uint32_t hashes[4] = { 0, 0, 0, 0 }; 1061 int h, i; 1062 1063 ifp = &sc->arpcom.ac_if; 1064 1065 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 1066 for (i = 0; i < 4; i++) 1067 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF); 1068 return; 1069 } 1070 1071 /* First, zot all the existing filters. */ 1072 for (i = 0; i < 4; i++) 1073 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0); 1074 1075 /* Now program new ones. */ 1076 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1077 if (ifma->ifma_addr->sa_family != AF_LINK) 1078 continue; 1079 h = ether_crc32_le( 1080 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1081 ETHER_ADDR_LEN) & 0x7f; 1082 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 1083 } 1084 1085 for (i = 0; i < 4; i++) 1086 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 1087 } 1088 1089 /* 1090 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1091 * self-test results. 1092 */ 1093 static int 1094 bnx_chipinit(struct bnx_softc *sc) 1095 { 1096 uint32_t dma_rw_ctl, mode_ctl; 1097 int i; 1098 1099 /* Set endian type before we access any non-PCI registers. */ 1100 pci_write_config(sc->bnx_dev, BGE_PCI_MISC_CTL, 1101 BGE_INIT | BGE_PCIMISCCTL_TAGGED_STATUS, 4); 1102 1103 /* 1104 * Clear the MAC statistics block in the NIC's 1105 * internal memory. 1106 */ 1107 for (i = BGE_STATS_BLOCK; 1108 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t)) 1109 BNX_MEMWIN_WRITE(sc, i, 0); 1110 1111 for (i = BGE_STATUS_BLOCK; 1112 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t)) 1113 BNX_MEMWIN_WRITE(sc, i, 0); 1114 1115 if (BNX_IS_57765_FAMILY(sc)) { 1116 uint32_t val; 1117 1118 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0) { 1119 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL); 1120 val = mode_ctl & ~BGE_MODECTL_PCIE_PORTS; 1121 1122 /* Access the lower 1K of PL PCI-E block registers. */ 1123 CSR_WRITE_4(sc, BGE_MODE_CTL, 1124 val | BGE_MODECTL_PCIE_PL_SEL); 1125 1126 val = CSR_READ_4(sc, BGE_PCIE_PL_LO_PHYCTL5); 1127 val |= BGE_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ; 1128 CSR_WRITE_4(sc, BGE_PCIE_PL_LO_PHYCTL5, val); 1129 1130 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 1131 } 1132 if (sc->bnx_chiprev != BGE_CHIPREV_57765_AX) { 1133 /* Fix transmit hangs */ 1134 val = CSR_READ_4(sc, BGE_CPMU_PADRNG_CTL); 1135 val |= BGE_CPMU_PADRNG_CTL_RDIV2; 1136 CSR_WRITE_4(sc, BGE_CPMU_PADRNG_CTL, val); 1137 1138 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL); 1139 val = mode_ctl & ~BGE_MODECTL_PCIE_PORTS; 1140 1141 /* Access the lower 1K of DL PCI-E block registers. */ 1142 CSR_WRITE_4(sc, BGE_MODE_CTL, 1143 val | BGE_MODECTL_PCIE_DL_SEL); 1144 1145 val = CSR_READ_4(sc, BGE_PCIE_DL_LO_FTSMAX); 1146 val &= ~BGE_PCIE_DL_LO_FTSMAX_MASK; 1147 val |= BGE_PCIE_DL_LO_FTSMAX_VAL; 1148 CSR_WRITE_4(sc, BGE_PCIE_DL_LO_FTSMAX, val); 1149 1150 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 1151 } 1152 1153 val = CSR_READ_4(sc, BGE_CPMU_LSPD_10MB_CLK); 1154 val &= ~BGE_CPMU_LSPD_10MB_MACCLK_MASK; 1155 val |= BGE_CPMU_LSPD_10MB_MACCLK_6_25; 1156 CSR_WRITE_4(sc, BGE_CPMU_LSPD_10MB_CLK, val); 1157 } 1158 1159 /* 1160 * Set up the PCI DMA control register. 1161 */ 1162 dma_rw_ctl = pci_read_config(sc->bnx_dev, BGE_PCI_DMA_RW_CTL, 4); 1163 /* 1164 * Disable 32bytes cache alignment for DMA write to host memory 1165 * 1166 * NOTE: 1167 * 64bytes cache alignment for DMA write to host memory is still 1168 * enabled. 1169 */ 1170 dma_rw_ctl |= BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT; 1171 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0) 1172 dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK; 1173 /* 1174 * Enable HW workaround for controllers that misinterpret 1175 * a status tag update and leave interrupts permanently 1176 * disabled. 1177 */ 1178 if (sc->bnx_asicrev != BGE_ASICREV_BCM5717 && 1179 sc->bnx_asicrev != BGE_ASICREV_BCM5762 && 1180 !BNX_IS_57765_FAMILY(sc)) 1181 dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA; 1182 if (bootverbose) { 1183 if_printf(&sc->arpcom.ac_if, "DMA read/write %#x\n", 1184 dma_rw_ctl); 1185 } 1186 pci_write_config(sc->bnx_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4); 1187 1188 /* 1189 * Set up general mode register. 1190 */ 1191 mode_ctl = bnx_dma_swap_options(sc); 1192 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 || 1193 sc->bnx_asicrev == BGE_ASICREV_BCM5762) { 1194 /* Retain Host-2-BMC settings written by APE firmware. */ 1195 mode_ctl |= CSR_READ_4(sc, BGE_MODE_CTL) & 1196 (BGE_MODECTL_BYTESWAP_B2HRX_DATA | 1197 BGE_MODECTL_WORDSWAP_B2HRX_DATA | 1198 BGE_MODECTL_B2HRX_ENABLE | BGE_MODECTL_HTX2B_ENABLE); 1199 } 1200 mode_ctl |= BGE_MODECTL_MAC_ATTN_INTR | 1201 BGE_MODECTL_HOST_SEND_BDS | BGE_MODECTL_TX_NO_PHDR_CSUM; 1202 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 1203 1204 /* 1205 * Disable memory write invalidate. Apparently it is not supported 1206 * properly by these devices. Also ensure that INTx isn't disabled, 1207 * as these chips need it even when using MSI. 1208 */ 1209 PCI_CLRBIT(sc->bnx_dev, BGE_PCI_CMD, 1210 (PCIM_CMD_MWRICEN | PCIM_CMD_INTxDIS), 4); 1211 1212 /* Set the timer prescaler (always 66Mhz) */ 1213 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); 1214 1215 return(0); 1216 } 1217 1218 static int 1219 bnx_blockinit(struct bnx_softc *sc) 1220 { 1221 struct bnx_intr_data *intr; 1222 struct bge_rcb *rcb; 1223 bus_size_t vrcb; 1224 bge_hostaddr taddr; 1225 uint32_t val; 1226 int i, limit; 1227 1228 /* 1229 * Initialize the memory window pointer register so that 1230 * we can access the first 32K of internal NIC RAM. This will 1231 * allow us to set up the TX send ring RCBs and the RX return 1232 * ring RCBs, plus other things which live in NIC memory. 1233 */ 1234 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); 1235 1236 /* Configure mbuf pool watermarks */ 1237 if (BNX_IS_57765_PLUS(sc)) { 1238 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1239 if (sc->arpcom.ac_if.if_mtu > ETHERMTU) { 1240 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e); 1241 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea); 1242 } else { 1243 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a); 1244 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0); 1245 } 1246 } else { 1247 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1248 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 1249 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1250 } 1251 1252 /* Configure DMA resource watermarks */ 1253 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1254 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1255 1256 /* Enable buffer manager */ 1257 val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN; 1258 /* 1259 * Change the arbitration algorithm of TXMBUF read request to 1260 * round-robin instead of priority based for BCM5719. When 1261 * TXFIFO is almost empty, RDMA will hold its request until 1262 * TXFIFO is not almost empty. 1263 */ 1264 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719) 1265 val |= BGE_BMANMODE_NO_TX_UNDERRUN; 1266 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 || 1267 sc->bnx_chipid == BGE_CHIPID_BCM5719_A0 || 1268 sc->bnx_chipid == BGE_CHIPID_BCM5720_A0) 1269 val |= BGE_BMANMODE_LOMBUF_ATTN; 1270 CSR_WRITE_4(sc, BGE_BMAN_MODE, val); 1271 1272 /* Poll for buffer manager start indication */ 1273 for (i = 0; i < BNX_TIMEOUT; i++) { 1274 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1275 break; 1276 DELAY(10); 1277 } 1278 1279 if (i == BNX_TIMEOUT) { 1280 if_printf(&sc->arpcom.ac_if, 1281 "buffer manager failed to start\n"); 1282 return(ENXIO); 1283 } 1284 1285 /* Enable flow-through queues */ 1286 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 1287 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 1288 1289 /* Wait until queue initialization is complete */ 1290 for (i = 0; i < BNX_TIMEOUT; i++) { 1291 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 1292 break; 1293 DELAY(10); 1294 } 1295 1296 if (i == BNX_TIMEOUT) { 1297 if_printf(&sc->arpcom.ac_if, 1298 "flow-through queue init failed\n"); 1299 return(ENXIO); 1300 } 1301 1302 /* 1303 * Summary of rings supported by the controller: 1304 * 1305 * Standard Receive Producer Ring 1306 * - This ring is used to feed receive buffers for "standard" 1307 * sized frames (typically 1536 bytes) to the controller. 1308 * 1309 * Jumbo Receive Producer Ring 1310 * - This ring is used to feed receive buffers for jumbo sized 1311 * frames (i.e. anything bigger than the "standard" frames) 1312 * to the controller. 1313 * 1314 * Mini Receive Producer Ring 1315 * - This ring is used to feed receive buffers for "mini" 1316 * sized frames to the controller. 1317 * - This feature required external memory for the controller 1318 * but was never used in a production system. Should always 1319 * be disabled. 1320 * 1321 * Receive Return Ring 1322 * - After the controller has placed an incoming frame into a 1323 * receive buffer that buffer is moved into a receive return 1324 * ring. The driver is then responsible to passing the 1325 * buffer up to the stack. BCM5718/BCM57785 families support 1326 * multiple receive return rings. 1327 * 1328 * Send Ring 1329 * - This ring is used for outgoing frames. BCM5719/BCM5720 1330 * support multiple send rings. 1331 */ 1332 1333 /* Initialize the standard receive producer ring control block. */ 1334 rcb = &sc->bnx_ldata.bnx_info.bnx_std_rx_rcb; 1335 rcb->bge_hostaddr.bge_addr_lo = 1336 BGE_ADDR_LO(sc->bnx_rx_std_ring.bnx_rx_std_ring_paddr); 1337 rcb->bge_hostaddr.bge_addr_hi = 1338 BGE_ADDR_HI(sc->bnx_rx_std_ring.bnx_rx_std_ring_paddr); 1339 if (BNX_IS_57765_PLUS(sc)) { 1340 /* 1341 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32) 1342 * Bits 15-2 : Maximum RX frame size 1343 * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled 1344 * Bit 0 : Reserved 1345 */ 1346 rcb->bge_maxlen_flags = 1347 BGE_RCB_MAXLEN_FLAGS(512, BNX_MAX_FRAMELEN << 2); 1348 } else { 1349 /* 1350 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32) 1351 * Bits 15-2 : Reserved (should be 0) 1352 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled 1353 * Bit 0 : Reserved 1354 */ 1355 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 1356 } 1357 if (BNX_IS_5717_PLUS(sc)) 1358 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717; 1359 else 1360 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 1361 /* Write the standard receive producer ring control block. */ 1362 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 1363 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 1364 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1365 if (!BNX_IS_5717_PLUS(sc)) 1366 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 1367 /* Reset the standard receive producer ring producer index. */ 1368 bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0); 1369 1370 /* 1371 * Initialize the jumbo RX producer ring control 1372 * block. We set the 'ring disabled' bit in the 1373 * flags field until we're actually ready to start 1374 * using this ring (i.e. once we set the MTU 1375 * high enough to require it). 1376 */ 1377 if (BNX_IS_JUMBO_CAPABLE(sc)) { 1378 rcb = &sc->bnx_ldata.bnx_info.bnx_jumbo_rx_rcb; 1379 /* Get the jumbo receive producer ring RCB parameters. */ 1380 rcb->bge_hostaddr.bge_addr_lo = 1381 BGE_ADDR_LO(sc->bnx_ldata.bnx_rx_jumbo_ring_paddr); 1382 rcb->bge_hostaddr.bge_addr_hi = 1383 BGE_ADDR_HI(sc->bnx_ldata.bnx_rx_jumbo_ring_paddr); 1384 rcb->bge_maxlen_flags = 1385 BGE_RCB_MAXLEN_FLAGS(BNX_MAX_FRAMELEN, 1386 BGE_RCB_FLAG_RING_DISABLED); 1387 if (BNX_IS_5717_PLUS(sc)) 1388 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717; 1389 else 1390 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 1391 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 1392 rcb->bge_hostaddr.bge_addr_hi); 1393 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 1394 rcb->bge_hostaddr.bge_addr_lo); 1395 /* Program the jumbo receive producer ring RCB parameters. */ 1396 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 1397 rcb->bge_maxlen_flags); 1398 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 1399 /* Reset the jumbo receive producer ring producer index. */ 1400 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 1401 } 1402 1403 /* 1404 * The BD ring replenish thresholds control how often the 1405 * hardware fetches new BD's from the producer rings in host 1406 * memory. Setting the value too low on a busy system can 1407 * starve the hardware and recue the throughpout. 1408 * 1409 * Set the BD ring replentish thresholds. The recommended 1410 * values are 1/8th the number of descriptors allocated to 1411 * each ring. 1412 */ 1413 val = 8; 1414 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val); 1415 if (BNX_IS_JUMBO_CAPABLE(sc)) { 1416 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, 1417 BGE_JUMBO_RX_RING_CNT/8); 1418 } 1419 if (BNX_IS_57765_PLUS(sc)) { 1420 CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32); 1421 CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16); 1422 } 1423 1424 /* 1425 * Disable all send rings by setting the 'ring disabled' bit 1426 * in the flags field of all the TX send ring control blocks, 1427 * located in NIC memory. 1428 */ 1429 if (BNX_IS_5717_PLUS(sc)) 1430 limit = 4; 1431 else if (BNX_IS_57765_FAMILY(sc) || 1432 sc->bnx_asicrev == BGE_ASICREV_BCM5762) 1433 limit = 2; 1434 else 1435 limit = 1; 1436 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1437 for (i = 0; i < limit; i++) { 1438 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1439 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); 1440 vrcb += sizeof(struct bge_rcb); 1441 } 1442 1443 /* 1444 * Configure send ring RCBs 1445 */ 1446 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1447 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 1448 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i]; 1449 1450 BGE_HOSTADDR(taddr, txr->bnx_tx_ring_paddr); 1451 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 1452 taddr.bge_addr_hi); 1453 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 1454 taddr.bge_addr_lo); 1455 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1456 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 1457 vrcb += sizeof(struct bge_rcb); 1458 } 1459 1460 /* 1461 * Disable all receive return rings by setting the 1462 * 'ring disabled' bit in the flags field of all the receive 1463 * return ring control blocks, located in NIC memory. 1464 */ 1465 if (BNX_IS_5717_PLUS(sc)) { 1466 /* Should be 17, use 16 until we get an SRAM map. */ 1467 limit = 16; 1468 } else if (BNX_IS_57765_FAMILY(sc) || 1469 sc->bnx_asicrev == BGE_ASICREV_BCM5762) { 1470 limit = 4; 1471 } else { 1472 limit = 1; 1473 } 1474 /* Disable all receive return rings. */ 1475 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1476 for (i = 0; i < limit; i++) { 1477 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0); 1478 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0); 1479 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1480 BGE_RCB_FLAG_RING_DISABLED); 1481 bnx_writembx(sc, BGE_MBX_RX_CONS0_LO + 1482 (i * (sizeof(uint64_t))), 0); 1483 vrcb += sizeof(struct bge_rcb); 1484 } 1485 1486 /* 1487 * Set up receive return rings. 1488 */ 1489 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1490 for (i = 0; i < sc->bnx_rx_retcnt; ++i) { 1491 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[i]; 1492 1493 BGE_HOSTADDR(taddr, ret->bnx_rx_ret_ring_paddr); 1494 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 1495 taddr.bge_addr_hi); 1496 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 1497 taddr.bge_addr_lo); 1498 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1499 BGE_RCB_MAXLEN_FLAGS(BNX_RETURN_RING_CNT, 0)); 1500 vrcb += sizeof(struct bge_rcb); 1501 } 1502 1503 /* Set random backoff seed for TX */ 1504 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 1505 (sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] + 1506 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] + 1507 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5]) & 1508 BGE_TX_BACKOFF_SEED_MASK); 1509 1510 /* Set inter-packet gap */ 1511 val = 0x2620; 1512 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 || 1513 sc->bnx_asicrev == BGE_ASICREV_BCM5762) { 1514 val |= CSR_READ_4(sc, BGE_TX_LENGTHS) & 1515 (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK); 1516 } 1517 CSR_WRITE_4(sc, BGE_TX_LENGTHS, val); 1518 1519 /* 1520 * Specify which ring to use for packets that don't match 1521 * any RX rules. 1522 */ 1523 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 1524 1525 /* 1526 * Configure number of RX lists. One interrupt distribution 1527 * list, sixteen active lists, one bad frames class. 1528 */ 1529 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 1530 1531 /* Inialize RX list placement stats mask. */ 1532 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 1533 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 1534 1535 /* Disable host coalescing until we get it set up */ 1536 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 1537 1538 /* Poll to make sure it's shut down. */ 1539 for (i = 0; i < BNX_TIMEOUT; i++) { 1540 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 1541 break; 1542 DELAY(10); 1543 } 1544 1545 if (i == BNX_TIMEOUT) { 1546 if_printf(&sc->arpcom.ac_if, 1547 "host coalescing engine failed to idle\n"); 1548 return(ENXIO); 1549 } 1550 1551 /* Set up host coalescing defaults */ 1552 sc->bnx_coal_chg = BNX_RX_COAL_TICKS_CHG | 1553 BNX_TX_COAL_TICKS_CHG | 1554 BNX_RX_COAL_BDS_CHG | 1555 BNX_TX_COAL_BDS_CHG | 1556 BNX_RX_COAL_BDS_INT_CHG | 1557 BNX_TX_COAL_BDS_INT_CHG; 1558 bnx_coal_change(sc); 1559 1560 /* 1561 * Set up addresses of status blocks 1562 */ 1563 intr = &sc->bnx_intr_data[0]; 1564 bzero(intr->bnx_status_block, BGE_STATUS_BLK_SZ); 1565 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 1566 BGE_ADDR_HI(intr->bnx_status_block_paddr)); 1567 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, 1568 BGE_ADDR_LO(intr->bnx_status_block_paddr)); 1569 for (i = 1; i < sc->bnx_intr_cnt; ++i) { 1570 intr = &sc->bnx_intr_data[i]; 1571 bzero(intr->bnx_status_block, BGE_STATUS_BLK_SZ); 1572 CSR_WRITE_4(sc, BGE_VEC1_STATUSBLK_ADDR_HI + ((i - 1) * 8), 1573 BGE_ADDR_HI(intr->bnx_status_block_paddr)); 1574 CSR_WRITE_4(sc, BGE_VEC1_STATUSBLK_ADDR_LO + ((i - 1) * 8), 1575 BGE_ADDR_LO(intr->bnx_status_block_paddr)); 1576 } 1577 1578 /* Set up status block partail update size. */ 1579 val = BGE_STATBLKSZ_32BYTE; 1580 #if 0 1581 /* 1582 * Does not seem to have visible effect in both 1583 * bulk data (1472B UDP datagram) and tiny data 1584 * (18B UDP datagram) TX tests. 1585 */ 1586 val |= BGE_HCCMODE_CLRTICK_TX; 1587 #endif 1588 /* Turn on host coalescing state machine */ 1589 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE); 1590 1591 /* Turn on RX BD completion state machine and enable attentions */ 1592 CSR_WRITE_4(sc, BGE_RBDC_MODE, 1593 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); 1594 1595 /* Turn on RX list placement state machine */ 1596 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 1597 1598 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB | 1599 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR | 1600 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB | 1601 BGE_MACMODE_FRMHDR_DMA_ENB; 1602 1603 if (sc->bnx_flags & BNX_FLAG_TBI) 1604 val |= BGE_PORTMODE_TBI; 1605 else if (sc->bnx_flags & BNX_FLAG_MII_SERDES) 1606 val |= BGE_PORTMODE_GMII; 1607 else 1608 val |= BGE_PORTMODE_MII; 1609 1610 /* Allow APE to send/receive frames. */ 1611 if (sc->bnx_mfw_flags & BNX_MFW_ON_APE) 1612 val |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN; 1613 1614 /* Turn on DMA, clear stats */ 1615 CSR_WRITE_4(sc, BGE_MAC_MODE, val); 1616 DELAY(40); 1617 1618 /* Set misc. local control, enable interrupts on attentions */ 1619 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); 1620 1621 #ifdef notdef 1622 /* Assert GPIO pins for PHY reset */ 1623 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 1624 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 1625 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 1626 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 1627 #endif 1628 1629 if (sc->bnx_intr_type == PCI_INTR_TYPE_MSIX) 1630 bnx_enable_msi(sc, TRUE); 1631 1632 /* Turn on write DMA state machine */ 1633 val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS; 1634 /* Enable host coalescing bug fix. */ 1635 val |= BGE_WDMAMODE_STATUS_TAG_FIX; 1636 if (sc->bnx_asicrev == BGE_ASICREV_BCM5785) { 1637 /* Request larger DMA burst size to get better performance. */ 1638 val |= BGE_WDMAMODE_BURST_ALL_DATA; 1639 } 1640 CSR_WRITE_4(sc, BGE_WDMA_MODE, val); 1641 DELAY(40); 1642 1643 if (BNX_IS_57765_PLUS(sc)) { 1644 uint32_t dmactl, dmactl_reg; 1645 1646 if (sc->bnx_asicrev == BGE_ASICREV_BCM5762) 1647 dmactl_reg = BGE_RDMA_RSRVCTRL2; 1648 else 1649 dmactl_reg = BGE_RDMA_RSRVCTRL; 1650 1651 dmactl = CSR_READ_4(sc, dmactl_reg); 1652 /* 1653 * Adjust tx margin to prevent TX data corruption and 1654 * fix internal FIFO overflow. 1655 */ 1656 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 || 1657 sc->bnx_asicrev == BGE_ASICREV_BCM5720 || 1658 sc->bnx_asicrev == BGE_ASICREV_BCM5762) { 1659 dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK | 1660 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK | 1661 BGE_RDMA_RSRVCTRL_TXMRGN_MASK); 1662 dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K | 1663 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K | 1664 BGE_RDMA_RSRVCTRL_TXMRGN_320B; 1665 } 1666 /* 1667 * Enable fix for read DMA FIFO overruns. 1668 * The fix is to limit the number of RX BDs 1669 * the hardware would fetch at a fime. 1670 */ 1671 CSR_WRITE_4(sc, dmactl_reg, 1672 dmactl | BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX); 1673 } 1674 1675 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719) { 1676 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, 1677 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) | 1678 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K | 1679 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); 1680 } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 || 1681 sc->bnx_asicrev == BGE_ASICREV_BCM5762) { 1682 uint32_t ctrl_reg; 1683 1684 if (sc->bnx_asicrev == BGE_ASICREV_BCM5762) 1685 ctrl_reg = BGE_RDMA_LSO_CRPTEN_CTRL2; 1686 else 1687 ctrl_reg = BGE_RDMA_LSO_CRPTEN_CTRL; 1688 1689 /* 1690 * Allow 4KB burst length reads for non-LSO frames. 1691 * Enable 512B burst length reads for buffer descriptors. 1692 */ 1693 CSR_WRITE_4(sc, ctrl_reg, 1694 CSR_READ_4(sc, ctrl_reg) | 1695 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 | 1696 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); 1697 } 1698 1699 /* Turn on read DMA state machine */ 1700 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS; 1701 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717) 1702 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS; 1703 if (sc->bnx_asicrev == BGE_ASICREV_BCM5784 || 1704 sc->bnx_asicrev == BGE_ASICREV_BCM5785 || 1705 sc->bnx_asicrev == BGE_ASICREV_BCM57780) { 1706 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN | 1707 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN | 1708 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN; 1709 } 1710 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 || 1711 sc->bnx_asicrev == BGE_ASICREV_BCM5762) { 1712 val |= CSR_READ_4(sc, BGE_RDMA_MODE) & 1713 BGE_RDMAMODE_H2BNC_VLAN_DET; 1714 /* 1715 * Allow multiple outstanding read requests from 1716 * non-LSO read DMA engine. 1717 */ 1718 val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS; 1719 } 1720 if (sc->bnx_asicrev == BGE_ASICREV_BCM57766) 1721 val |= BGE_RDMAMODE_JMB_2K_MMRR; 1722 if (sc->bnx_flags & BNX_FLAG_TSO) 1723 val |= BGE_RDMAMODE_TSO4_ENABLE; 1724 val |= BGE_RDMAMODE_FIFO_LONG_BURST; 1725 CSR_WRITE_4(sc, BGE_RDMA_MODE, val); 1726 DELAY(40); 1727 1728 /* Turn on RX data completion state machine */ 1729 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 1730 1731 /* Turn on RX BD initiator state machine */ 1732 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 1733 1734 /* Turn on RX data and RX BD initiator state machine */ 1735 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 1736 1737 /* Turn on send BD completion state machine */ 1738 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 1739 1740 /* Turn on send data completion state machine */ 1741 val = BGE_SDCMODE_ENABLE; 1742 if (sc->bnx_asicrev == BGE_ASICREV_BCM5761) 1743 val |= BGE_SDCMODE_CDELAY; 1744 CSR_WRITE_4(sc, BGE_SDC_MODE, val); 1745 1746 /* Turn on send data initiator state machine */ 1747 if (sc->bnx_flags & BNX_FLAG_TSO) { 1748 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | 1749 BGE_SDIMODE_HW_LSO_PRE_DMA); 1750 } else { 1751 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 1752 } 1753 1754 /* Turn on send BD initiator state machine */ 1755 val = BGE_SBDIMODE_ENABLE; 1756 if (sc->bnx_tx_ringcnt > 1) 1757 val |= BGE_SBDIMODE_MULTI_TXR; 1758 CSR_WRITE_4(sc, BGE_SBDI_MODE, val); 1759 1760 /* Turn on send BD selector state machine */ 1761 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 1762 1763 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 1764 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 1765 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); 1766 1767 /* ack/clear link change events */ 1768 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1769 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 1770 BGE_MACSTAT_LINK_CHANGED); 1771 CSR_WRITE_4(sc, BGE_MI_STS, 0); 1772 1773 /* 1774 * Enable attention when the link has changed state for 1775 * devices that use auto polling. 1776 */ 1777 if (sc->bnx_flags & BNX_FLAG_TBI) { 1778 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1779 } else { 1780 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { 1781 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode); 1782 DELAY(80); 1783 } 1784 } 1785 1786 /* 1787 * Clear any pending link state attention. 1788 * Otherwise some link state change events may be lost until attention 1789 * is cleared by bnx_intr() -> bnx_softc.bnx_link_upd() sequence. 1790 * It's not necessary on newer BCM chips - perhaps enabling link 1791 * state change attentions implies clearing pending attention. 1792 */ 1793 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1794 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 1795 BGE_MACSTAT_LINK_CHANGED); 1796 1797 /* Enable link state change attentions. */ 1798 BNX_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 1799 1800 return(0); 1801 } 1802 1803 /* 1804 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 1805 * against our list and return its name if we find a match. Note 1806 * that since the Broadcom controller contains VPD support, we 1807 * can get the device name string from the controller itself instead 1808 * of the compiled-in string. This is a little slow, but it guarantees 1809 * we'll always announce the right product name. 1810 */ 1811 static int 1812 bnx_probe(device_t dev) 1813 { 1814 const struct bnx_type *t; 1815 uint16_t product, vendor; 1816 1817 if (!pci_is_pcie(dev)) 1818 return ENXIO; 1819 1820 product = pci_get_device(dev); 1821 vendor = pci_get_vendor(dev); 1822 1823 for (t = bnx_devs; t->bnx_name != NULL; t++) { 1824 if (vendor == t->bnx_vid && product == t->bnx_did) 1825 break; 1826 } 1827 if (t->bnx_name == NULL) 1828 return ENXIO; 1829 1830 device_set_desc(dev, t->bnx_name); 1831 return 0; 1832 } 1833 1834 static int 1835 bnx_attach(device_t dev) 1836 { 1837 struct ifnet *ifp; 1838 struct bnx_softc *sc; 1839 struct bnx_rx_std_ring *std; 1840 struct sysctl_ctx_list *ctx; 1841 struct sysctl_oid_list *tree; 1842 uint32_t hwcfg = 0; 1843 int error = 0, rid, capmask, i, std_cpuid, std_cpuid_def; 1844 uint8_t ether_addr[ETHER_ADDR_LEN]; 1845 uint16_t product; 1846 uintptr_t mii_priv = 0; 1847 #if defined(BNX_TSO_DEBUG) || defined(BNX_RSS_DEBUG) || defined(BNX_TSS_DEBUG) 1848 char desc[32]; 1849 #endif 1850 #ifdef IFPOLL_ENABLE 1851 int offset, offset_def; 1852 #endif 1853 1854 sc = device_get_softc(dev); 1855 sc->bnx_dev = dev; 1856 callout_init_mp(&sc->bnx_tick_timer); 1857 lwkt_serialize_init(&sc->bnx_jslot_serializer); 1858 lwkt_serialize_init(&sc->bnx_main_serialize); 1859 1860 /* Always setup interrupt mailboxes */ 1861 for (i = 0; i < BNX_INTR_MAX; ++i) { 1862 callout_init_mp(&sc->bnx_intr_data[i].bnx_intr_timer); 1863 sc->bnx_intr_data[i].bnx_sc = sc; 1864 sc->bnx_intr_data[i].bnx_intr_mbx = BGE_MBX_IRQ0_LO + (i * 8); 1865 sc->bnx_intr_data[i].bnx_intr_rid = -1; 1866 sc->bnx_intr_data[i].bnx_intr_cpuid = -1; 1867 } 1868 1869 sc->bnx_func_addr = pci_get_function(dev); 1870 product = pci_get_device(dev); 1871 1872 #ifndef BURN_BRIDGES 1873 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 1874 uint32_t irq, mem; 1875 1876 irq = pci_read_config(dev, PCIR_INTLINE, 4); 1877 mem = pci_read_config(dev, BGE_PCI_BAR0, 4); 1878 1879 device_printf(dev, "chip is in D%d power mode " 1880 "-- setting to D0\n", pci_get_powerstate(dev)); 1881 1882 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 1883 1884 pci_write_config(dev, PCIR_INTLINE, irq, 4); 1885 pci_write_config(dev, BGE_PCI_BAR0, mem, 4); 1886 } 1887 #endif /* !BURN_BRIDGE */ 1888 1889 /* 1890 * Map control/status registers. 1891 */ 1892 pci_enable_busmaster(dev); 1893 1894 rid = BGE_PCI_BAR0; 1895 sc->bnx_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 1896 RF_ACTIVE); 1897 1898 if (sc->bnx_res == NULL) { 1899 device_printf(dev, "couldn't map memory\n"); 1900 return ENXIO; 1901 } 1902 1903 sc->bnx_btag = rman_get_bustag(sc->bnx_res); 1904 sc->bnx_bhandle = rman_get_bushandle(sc->bnx_res); 1905 1906 /* Save various chip information */ 1907 sc->bnx_chipid = 1908 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 1909 BGE_PCIMISCCTL_ASICREV_SHIFT; 1910 if (BGE_ASICREV(sc->bnx_chipid) == BGE_ASICREV_USE_PRODID_REG) { 1911 /* All chips having dedicated ASICREV register have CPMU */ 1912 sc->bnx_flags |= BNX_FLAG_CPMU; 1913 1914 switch (product) { 1915 case PCI_PRODUCT_BROADCOM_BCM5717: 1916 case PCI_PRODUCT_BROADCOM_BCM5717C: 1917 case PCI_PRODUCT_BROADCOM_BCM5718: 1918 case PCI_PRODUCT_BROADCOM_BCM5719: 1919 case PCI_PRODUCT_BROADCOM_BCM5720_ALT: 1920 case PCI_PRODUCT_BROADCOM_BCM5725: 1921 case PCI_PRODUCT_BROADCOM_BCM5727: 1922 case PCI_PRODUCT_BROADCOM_BCM5762: 1923 sc->bnx_chipid = pci_read_config(dev, 1924 BGE_PCI_GEN2_PRODID_ASICREV, 4); 1925 break; 1926 1927 case PCI_PRODUCT_BROADCOM_BCM57761: 1928 case PCI_PRODUCT_BROADCOM_BCM57762: 1929 case PCI_PRODUCT_BROADCOM_BCM57765: 1930 case PCI_PRODUCT_BROADCOM_BCM57766: 1931 case PCI_PRODUCT_BROADCOM_BCM57781: 1932 case PCI_PRODUCT_BROADCOM_BCM57782: 1933 case PCI_PRODUCT_BROADCOM_BCM57785: 1934 case PCI_PRODUCT_BROADCOM_BCM57786: 1935 case PCI_PRODUCT_BROADCOM_BCM57791: 1936 case PCI_PRODUCT_BROADCOM_BCM57795: 1937 sc->bnx_chipid = pci_read_config(dev, 1938 BGE_PCI_GEN15_PRODID_ASICREV, 4); 1939 break; 1940 1941 default: 1942 sc->bnx_chipid = pci_read_config(dev, 1943 BGE_PCI_PRODID_ASICREV, 4); 1944 break; 1945 } 1946 } 1947 if (sc->bnx_chipid == BGE_CHIPID_BCM5717_C0) 1948 sc->bnx_chipid = BGE_CHIPID_BCM5720_A0; 1949 1950 sc->bnx_asicrev = BGE_ASICREV(sc->bnx_chipid); 1951 sc->bnx_chiprev = BGE_CHIPREV(sc->bnx_chipid); 1952 1953 switch (sc->bnx_asicrev) { 1954 case BGE_ASICREV_BCM5717: 1955 case BGE_ASICREV_BCM5719: 1956 case BGE_ASICREV_BCM5720: 1957 sc->bnx_flags |= BNX_FLAG_5717_PLUS | BNX_FLAG_57765_PLUS; 1958 break; 1959 1960 case BGE_ASICREV_BCM5762: 1961 sc->bnx_flags |= BNX_FLAG_57765_PLUS; 1962 break; 1963 1964 case BGE_ASICREV_BCM57765: 1965 case BGE_ASICREV_BCM57766: 1966 sc->bnx_flags |= BNX_FLAG_57765_FAMILY | BNX_FLAG_57765_PLUS; 1967 break; 1968 } 1969 1970 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 || 1971 sc->bnx_asicrev == BGE_ASICREV_BCM5719 || 1972 sc->bnx_asicrev == BGE_ASICREV_BCM5720 || 1973 sc->bnx_asicrev == BGE_ASICREV_BCM5762) 1974 sc->bnx_flags |= BNX_FLAG_APE; 1975 1976 sc->bnx_flags |= BNX_FLAG_TSO; 1977 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 && 1978 sc->bnx_chipid == BGE_CHIPID_BCM5719_A0) 1979 sc->bnx_flags &= ~BNX_FLAG_TSO; 1980 1981 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 || 1982 BNX_IS_57765_FAMILY(sc)) { 1983 /* 1984 * All BCM57785 and BCM5718 families chips have a bug that 1985 * under certain situation interrupt will not be enabled 1986 * even if status tag is written to interrupt mailbox. 1987 * 1988 * While BCM5719 and BCM5720 have a hardware workaround 1989 * which could fix the above bug. 1990 * See the comment near BGE_PCIDMARWCTL_TAGGED_STATUS_WA in 1991 * bnx_chipinit(). 1992 * 1993 * For the rest of the chips in these two families, we will 1994 * have to poll the status block at high rate (10ms currently) 1995 * to check whether the interrupt is hosed or not. 1996 * See bnx_check_intr_*() for details. 1997 */ 1998 sc->bnx_flags |= BNX_FLAG_STATUSTAG_BUG; 1999 } 2000 2001 sc->bnx_pciecap = pci_get_pciecap_ptr(sc->bnx_dev); 2002 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 || 2003 sc->bnx_asicrev == BGE_ASICREV_BCM5720) 2004 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_2048); 2005 else 2006 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_4096); 2007 device_printf(dev, "CHIP ID 0x%08x; " 2008 "ASIC REV 0x%02x; CHIP REV 0x%02x\n", 2009 sc->bnx_chipid, sc->bnx_asicrev, sc->bnx_chiprev); 2010 2011 /* 2012 * Set various PHY quirk flags. 2013 */ 2014 2015 capmask = MII_CAPMASK_DEFAULT; 2016 if (product == PCI_PRODUCT_BROADCOM_BCM57791 || 2017 product == PCI_PRODUCT_BROADCOM_BCM57795) { 2018 /* 10/100 only */ 2019 capmask &= ~BMSR_EXTSTAT; 2020 } 2021 2022 mii_priv |= BRGPHY_FLAG_WIRESPEED; 2023 if (sc->bnx_chipid == BGE_CHIPID_BCM5762_A0) 2024 mii_priv |= BRGPHY_FLAG_5762_A0; 2025 2026 /* 2027 * Chips with APE need BAR2 access for APE registers/memory. 2028 */ 2029 if (sc->bnx_flags & BNX_FLAG_APE) { 2030 uint32_t pcistate; 2031 2032 rid = PCIR_BAR(2); 2033 sc->bnx_res2 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 2034 RF_ACTIVE); 2035 if (sc->bnx_res2 == NULL) { 2036 device_printf(dev, "couldn't map BAR2 memory\n"); 2037 error = ENXIO; 2038 goto fail; 2039 } 2040 2041 /* Enable APE register/memory access by host driver. */ 2042 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4); 2043 pcistate |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR | 2044 BGE_PCISTATE_ALLOW_APE_SHMEM_WR | 2045 BGE_PCISTATE_ALLOW_APE_PSPACE_WR; 2046 pci_write_config(dev, BGE_PCI_PCISTATE, pcistate, 4); 2047 2048 bnx_ape_lock_init(sc); 2049 bnx_ape_read_fw_ver(sc); 2050 } 2051 2052 /* Initialize if_name earlier, so if_printf could be used */ 2053 ifp = &sc->arpcom.ac_if; 2054 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 2055 2056 /* 2057 * Try to reset the chip. 2058 */ 2059 bnx_sig_pre_reset(sc, BNX_RESET_SHUTDOWN); 2060 bnx_reset(sc); 2061 bnx_sig_post_reset(sc, BNX_RESET_SHUTDOWN); 2062 2063 if (bnx_chipinit(sc)) { 2064 device_printf(dev, "chip initialization failed\n"); 2065 error = ENXIO; 2066 goto fail; 2067 } 2068 2069 /* 2070 * Get station address 2071 */ 2072 error = bnx_get_eaddr(sc, ether_addr); 2073 if (error) { 2074 device_printf(dev, "failed to read station address\n"); 2075 goto fail; 2076 } 2077 2078 /* Setup RX/TX and interrupt count */ 2079 bnx_setup_ring_cnt(sc); 2080 2081 if ((sc->bnx_rx_retcnt == 1 && sc->bnx_tx_ringcnt == 1) || 2082 (sc->bnx_rx_retcnt > 1 && sc->bnx_tx_ringcnt > 1)) { 2083 /* 2084 * The RX ring and the corresponding TX ring processing 2085 * should be on the same CPU, since they share the same 2086 * status block. 2087 */ 2088 sc->bnx_flags |= BNX_FLAG_RXTX_BUNDLE; 2089 if (bootverbose) 2090 device_printf(dev, "RX/TX bundle\n"); 2091 if (sc->bnx_tx_ringcnt > 1) { 2092 /* 2093 * Multiple TX rings do not share status block 2094 * with link status, so link status will have 2095 * to save its own status_tag. 2096 */ 2097 sc->bnx_flags |= BNX_FLAG_STATUS_HASTAG; 2098 if (bootverbose) 2099 device_printf(dev, "status needs tag\n"); 2100 } 2101 } else { 2102 KKASSERT(sc->bnx_rx_retcnt > 1 && sc->bnx_tx_ringcnt == 1); 2103 if (bootverbose) 2104 device_printf(dev, "RX/TX not bundled\n"); 2105 } 2106 2107 error = bnx_dma_alloc(dev); 2108 if (error) 2109 goto fail; 2110 2111 #ifdef IFPOLL_ENABLE 2112 if (sc->bnx_flags & BNX_FLAG_RXTX_BUNDLE) { 2113 /* 2114 * NPOLLING RX/TX CPU offset 2115 */ 2116 if (sc->bnx_rx_retcnt == ncpus2) { 2117 offset = 0; 2118 } else { 2119 offset_def = 2120 (sc->bnx_rx_retcnt * device_get_unit(dev)) % ncpus2; 2121 offset = device_getenv_int(dev, "npoll.offset", 2122 offset_def); 2123 if (offset >= ncpus2 || 2124 offset % sc->bnx_rx_retcnt != 0) { 2125 device_printf(dev, "invalid npoll.offset %d, " 2126 "use %d\n", offset, offset_def); 2127 offset = offset_def; 2128 } 2129 } 2130 sc->bnx_npoll_rxoff = offset; 2131 sc->bnx_npoll_txoff = offset; 2132 } else { 2133 /* 2134 * NPOLLING RX CPU offset 2135 */ 2136 if (sc->bnx_rx_retcnt == ncpus2) { 2137 offset = 0; 2138 } else { 2139 offset_def = 2140 (sc->bnx_rx_retcnt * device_get_unit(dev)) % ncpus2; 2141 offset = device_getenv_int(dev, "npoll.rxoff", 2142 offset_def); 2143 if (offset >= ncpus2 || 2144 offset % sc->bnx_rx_retcnt != 0) { 2145 device_printf(dev, "invalid npoll.rxoff %d, " 2146 "use %d\n", offset, offset_def); 2147 offset = offset_def; 2148 } 2149 } 2150 sc->bnx_npoll_rxoff = offset; 2151 2152 /* 2153 * NPOLLING TX CPU offset 2154 */ 2155 offset_def = device_get_unit(dev) % ncpus2; 2156 offset = device_getenv_int(dev, "npoll.txoff", offset_def); 2157 if (offset >= ncpus2) { 2158 device_printf(dev, "invalid npoll.txoff %d, use %d\n", 2159 offset, offset_def); 2160 offset = offset_def; 2161 } 2162 sc->bnx_npoll_txoff = offset; 2163 } 2164 #endif /* IFPOLL_ENABLE */ 2165 2166 /* 2167 * Allocate interrupt 2168 */ 2169 error = bnx_alloc_intr(sc); 2170 if (error) 2171 goto fail; 2172 2173 /* Setup serializers */ 2174 bnx_setup_serialize(sc); 2175 2176 /* Set default tuneable values. */ 2177 sc->bnx_rx_coal_ticks = BNX_RX_COAL_TICKS_DEF; 2178 sc->bnx_tx_coal_ticks = BNX_TX_COAL_TICKS_DEF; 2179 sc->bnx_rx_coal_bds = BNX_RX_COAL_BDS_DEF; 2180 sc->bnx_rx_coal_bds_poll = sc->bnx_rx_ret_ring[0].bnx_rx_cntmax; 2181 sc->bnx_tx_coal_bds = BNX_TX_COAL_BDS_DEF; 2182 sc->bnx_tx_coal_bds_poll = BNX_TX_COAL_BDS_POLL_DEF; 2183 sc->bnx_rx_coal_bds_int = BNX_RX_COAL_BDS_INT_DEF; 2184 sc->bnx_tx_coal_bds_int = BNX_TX_COAL_BDS_INT_DEF; 2185 2186 /* Set up ifnet structure */ 2187 ifp->if_softc = sc; 2188 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2189 ifp->if_ioctl = bnx_ioctl; 2190 ifp->if_start = bnx_start; 2191 #ifdef IFPOLL_ENABLE 2192 ifp->if_npoll = bnx_npoll; 2193 #endif 2194 ifp->if_init = bnx_init; 2195 ifp->if_serialize = bnx_serialize; 2196 ifp->if_deserialize = bnx_deserialize; 2197 ifp->if_tryserialize = bnx_tryserialize; 2198 #ifdef INVARIANTS 2199 ifp->if_serialize_assert = bnx_serialize_assert; 2200 #endif 2201 ifp->if_mtu = ETHERMTU; 2202 ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; 2203 2204 ifp->if_capabilities |= IFCAP_HWCSUM; 2205 ifp->if_hwassist = BNX_CSUM_FEATURES; 2206 if (sc->bnx_flags & BNX_FLAG_TSO) { 2207 ifp->if_capabilities |= IFCAP_TSO; 2208 ifp->if_hwassist |= CSUM_TSO; 2209 } 2210 if (BNX_RSS_ENABLED(sc)) 2211 ifp->if_capabilities |= IFCAP_RSS; 2212 ifp->if_capenable = ifp->if_capabilities; 2213 2214 ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1); 2215 ifq_set_ready(&ifp->if_snd); 2216 ifq_set_subq_cnt(&ifp->if_snd, sc->bnx_tx_ringcnt); 2217 2218 if (sc->bnx_tx_ringcnt > 1) { 2219 ifp->if_mapsubq = ifq_mapsubq_mask; 2220 ifq_set_subq_mask(&ifp->if_snd, sc->bnx_tx_ringcnt - 1); 2221 } 2222 2223 /* 2224 * Figure out what sort of media we have by checking the 2225 * hardware config word in the first 32k of NIC internal memory, 2226 * or fall back to examining the EEPROM if necessary. 2227 * Note: on some BCM5700 cards, this value appears to be unset. 2228 * If that's the case, we have to rely on identifying the NIC 2229 * by its PCI subsystem ID, as we do below for the SysKonnect 2230 * SK-9D41. 2231 */ 2232 if (bnx_readmem_ind(sc, BGE_SRAM_DATA_SIG) == BGE_SRAM_DATA_SIG_MAGIC) { 2233 hwcfg = bnx_readmem_ind(sc, BGE_SRAM_DATA_CFG); 2234 } else { 2235 if (bnx_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET, 2236 sizeof(hwcfg))) { 2237 device_printf(dev, "failed to read EEPROM\n"); 2238 error = ENXIO; 2239 goto fail; 2240 } 2241 hwcfg = ntohl(hwcfg); 2242 } 2243 2244 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 2245 if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41 || 2246 (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) 2247 sc->bnx_flags |= BNX_FLAG_TBI; 2248 2249 /* Setup MI MODE */ 2250 if (sc->bnx_flags & BNX_FLAG_CPMU) 2251 sc->bnx_mi_mode = BGE_MIMODE_500KHZ_CONST; 2252 else 2253 sc->bnx_mi_mode = BGE_MIMODE_BASE; 2254 2255 /* Setup link status update stuffs */ 2256 if (sc->bnx_flags & BNX_FLAG_TBI) { 2257 sc->bnx_link_upd = bnx_tbi_link_upd; 2258 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED; 2259 } else if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { 2260 sc->bnx_link_upd = bnx_autopoll_link_upd; 2261 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED; 2262 } else { 2263 sc->bnx_link_upd = bnx_copper_link_upd; 2264 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED; 2265 } 2266 2267 /* Set default PHY address */ 2268 sc->bnx_phyno = 1; 2269 2270 /* 2271 * PHY address mapping for various devices. 2272 * 2273 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr | 2274 * ---------+-------+-------+-------+-------+ 2275 * BCM57XX | 1 | X | X | X | 2276 * BCM5717 | 1 | 8 | 2 | 9 | 2277 * BCM5719 | 1 | 8 | 2 | 9 | 2278 * BCM5720 | 1 | 8 | 2 | 9 | 2279 * 2280 * | F2 Cu | F2 Sr | F3 Cu | F3 Sr | 2281 * ---------+-------+-------+-------+-------+ 2282 * BCM57XX | X | X | X | X | 2283 * BCM5717 | X | X | X | X | 2284 * BCM5719 | 3 | 10 | 4 | 11 | 2285 * BCM5720 | X | X | X | X | 2286 * 2287 * Other addresses may respond but they are not 2288 * IEEE compliant PHYs and should be ignored. 2289 */ 2290 if (BNX_IS_5717_PLUS(sc)) { 2291 if (sc->bnx_chipid == BGE_CHIPID_BCM5717_A0) { 2292 if (CSR_READ_4(sc, BGE_SGDIG_STS) & 2293 BGE_SGDIGSTS_IS_SERDES) 2294 sc->bnx_phyno = sc->bnx_func_addr + 8; 2295 else 2296 sc->bnx_phyno = sc->bnx_func_addr + 1; 2297 } else { 2298 if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) & 2299 BGE_CPMU_PHY_STRAP_IS_SERDES) 2300 sc->bnx_phyno = sc->bnx_func_addr + 8; 2301 else 2302 sc->bnx_phyno = sc->bnx_func_addr + 1; 2303 } 2304 } 2305 2306 if (sc->bnx_flags & BNX_FLAG_TBI) { 2307 ifmedia_init(&sc->bnx_ifmedia, IFM_IMASK, 2308 bnx_ifmedia_upd, bnx_ifmedia_sts); 2309 ifmedia_add(&sc->bnx_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 2310 ifmedia_add(&sc->bnx_ifmedia, 2311 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL); 2312 ifmedia_add(&sc->bnx_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 2313 ifmedia_set(&sc->bnx_ifmedia, IFM_ETHER|IFM_AUTO); 2314 sc->bnx_ifmedia.ifm_media = sc->bnx_ifmedia.ifm_cur->ifm_media; 2315 } else { 2316 struct mii_probe_args mii_args; 2317 2318 mii_probe_args_init(&mii_args, bnx_ifmedia_upd, bnx_ifmedia_sts); 2319 mii_args.mii_probemask = 1 << sc->bnx_phyno; 2320 mii_args.mii_capmask = capmask; 2321 mii_args.mii_privtag = MII_PRIVTAG_BRGPHY; 2322 mii_args.mii_priv = mii_priv; 2323 2324 error = mii_probe(dev, &sc->bnx_miibus, &mii_args); 2325 if (error) { 2326 device_printf(dev, "MII without any PHY!\n"); 2327 goto fail; 2328 } 2329 } 2330 2331 ctx = device_get_sysctl_ctx(sc->bnx_dev); 2332 tree = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bnx_dev)); 2333 2334 SYSCTL_ADD_INT(ctx, tree, OID_AUTO, 2335 "rx_rings", CTLFLAG_RD, &sc->bnx_rx_retcnt, 0, "# of RX rings"); 2336 SYSCTL_ADD_INT(ctx, tree, OID_AUTO, 2337 "tx_rings", CTLFLAG_RD, &sc->bnx_tx_ringcnt, 0, "# of TX rings"); 2338 2339 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "rx_coal_ticks", 2340 CTLTYPE_INT | CTLFLAG_RW, 2341 sc, 0, bnx_sysctl_rx_coal_ticks, "I", 2342 "Receive coalescing ticks (usec)."); 2343 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "tx_coal_ticks", 2344 CTLTYPE_INT | CTLFLAG_RW, 2345 sc, 0, bnx_sysctl_tx_coal_ticks, "I", 2346 "Transmit coalescing ticks (usec)."); 2347 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "rx_coal_bds", 2348 CTLTYPE_INT | CTLFLAG_RW, 2349 sc, 0, bnx_sysctl_rx_coal_bds, "I", 2350 "Receive max coalesced BD count."); 2351 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "rx_coal_bds_poll", 2352 CTLTYPE_INT | CTLFLAG_RW, 2353 sc, 0, bnx_sysctl_rx_coal_bds_poll, "I", 2354 "Receive max coalesced BD count in polling."); 2355 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "tx_coal_bds", 2356 CTLTYPE_INT | CTLFLAG_RW, 2357 sc, 0, bnx_sysctl_tx_coal_bds, "I", 2358 "Transmit max coalesced BD count."); 2359 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "tx_coal_bds_poll", 2360 CTLTYPE_INT | CTLFLAG_RW, 2361 sc, 0, bnx_sysctl_tx_coal_bds_poll, "I", 2362 "Transmit max coalesced BD count in polling."); 2363 /* 2364 * A common design characteristic for many Broadcom 2365 * client controllers is that they only support a 2366 * single outstanding DMA read operation on the PCIe 2367 * bus. This means that it will take twice as long to 2368 * fetch a TX frame that is split into header and 2369 * payload buffers as it does to fetch a single, 2370 * contiguous TX frame (2 reads vs. 1 read). For these 2371 * controllers, coalescing buffers to reduce the number 2372 * of memory reads is effective way to get maximum 2373 * performance(about 940Mbps). Without collapsing TX 2374 * buffers the maximum TCP bulk transfer performance 2375 * is about 850Mbps. However forcing coalescing mbufs 2376 * consumes a lot of CPU cycles, so leave it off by 2377 * default. 2378 */ 2379 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, 2380 "force_defrag", CTLTYPE_INT | CTLFLAG_RW, 2381 sc, 0, bnx_sysctl_force_defrag, "I", 2382 "Force defragment on TX path"); 2383 2384 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, 2385 "tx_wreg", CTLTYPE_INT | CTLFLAG_RW, 2386 sc, 0, bnx_sysctl_tx_wreg, "I", 2387 "# of segments before writing to hardware register"); 2388 2389 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, 2390 "std_refill", CTLTYPE_INT | CTLFLAG_RW, 2391 sc, 0, bnx_sysctl_std_refill, "I", 2392 "# of packets received before scheduling standard refilling"); 2393 2394 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, 2395 "rx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW, 2396 sc, 0, bnx_sysctl_rx_coal_bds_int, "I", 2397 "Receive max coalesced BD count during interrupt."); 2398 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, 2399 "tx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW, 2400 sc, 0, bnx_sysctl_tx_coal_bds_int, "I", 2401 "Transmit max coalesced BD count during interrupt."); 2402 2403 #ifdef IFPOLL_ENABLE 2404 if (sc->bnx_flags & BNX_FLAG_RXTX_BUNDLE) { 2405 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, 2406 "npoll_offset", CTLTYPE_INT | CTLFLAG_RW, 2407 sc, 0, bnx_sysctl_npoll_offset, "I", 2408 "NPOLLING cpu offset"); 2409 } else { 2410 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, 2411 "npoll_rxoff", CTLTYPE_INT | CTLFLAG_RW, 2412 sc, 0, bnx_sysctl_npoll_rxoff, "I", 2413 "NPOLLING RX cpu offset"); 2414 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, 2415 "npoll_txoff", CTLTYPE_INT | CTLFLAG_RW, 2416 sc, 0, bnx_sysctl_npoll_txoff, "I", 2417 "NPOLLING TX cpu offset"); 2418 } 2419 #endif 2420 2421 #ifdef BNX_RSS_DEBUG 2422 SYSCTL_ADD_INT(ctx, tree, OID_AUTO, 2423 "std_refill_mask", CTLFLAG_RD, 2424 &sc->bnx_rx_std_ring.bnx_rx_std_refill, 0, ""); 2425 SYSCTL_ADD_INT(ctx, tree, OID_AUTO, 2426 "std_used", CTLFLAG_RD, 2427 &sc->bnx_rx_std_ring.bnx_rx_std_used, 0, ""); 2428 SYSCTL_ADD_INT(ctx, tree, OID_AUTO, 2429 "rss_debug", CTLFLAG_RW, &sc->bnx_rss_debug, 0, ""); 2430 for (i = 0; i < sc->bnx_rx_retcnt; ++i) { 2431 ksnprintf(desc, sizeof(desc), "rx_pkt%d", i); 2432 SYSCTL_ADD_ULONG(ctx, tree, OID_AUTO, 2433 desc, CTLFLAG_RW, &sc->bnx_rx_ret_ring[i].bnx_rx_pkt, ""); 2434 2435 ksnprintf(desc, sizeof(desc), "rx_force_sched%d", i); 2436 SYSCTL_ADD_ULONG(ctx, tree, OID_AUTO, 2437 desc, CTLFLAG_RW, 2438 &sc->bnx_rx_ret_ring[i].bnx_rx_force_sched, ""); 2439 } 2440 #endif 2441 #ifdef BNX_TSS_DEBUG 2442 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 2443 ksnprintf(desc, sizeof(desc), "tx_pkt%d", i); 2444 SYSCTL_ADD_ULONG(ctx, tree, OID_AUTO, 2445 desc, CTLFLAG_RW, &sc->bnx_tx_ring[i].bnx_tx_pkt, ""); 2446 } 2447 #endif 2448 2449 SYSCTL_ADD_ULONG(ctx, tree, OID_AUTO, 2450 "norxbds", CTLFLAG_RW, &sc->bnx_norxbds, ""); 2451 2452 SYSCTL_ADD_ULONG(ctx, tree, OID_AUTO, 2453 "errors", CTLFLAG_RW, &sc->bnx_errors, ""); 2454 2455 #ifdef BNX_TSO_DEBUG 2456 for (i = 0; i < BNX_TSO_NSTATS; ++i) { 2457 ksnprintf(desc, sizeof(desc), "tso%d", i + 1); 2458 SYSCTL_ADD_ULONG(ctx, tree, OID_AUTO, 2459 desc, CTLFLAG_RW, &sc->bnx_tsosegs[i], ""); 2460 } 2461 #endif 2462 2463 /* 2464 * Call MI attach routine. 2465 */ 2466 ether_ifattach(ifp, ether_addr, NULL); 2467 2468 /* Setup TX rings and subqueues */ 2469 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 2470 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i); 2471 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i]; 2472 2473 ifsq_set_cpuid(ifsq, txr->bnx_tx_cpuid); 2474 ifsq_set_hw_serialize(ifsq, &txr->bnx_tx_serialize); 2475 ifsq_set_priv(ifsq, txr); 2476 txr->bnx_ifsq = ifsq; 2477 2478 ifsq_watchdog_init(&txr->bnx_tx_watchdog, ifsq, bnx_watchdog); 2479 2480 if (bootverbose) { 2481 device_printf(dev, "txr %d -> cpu%d\n", i, 2482 txr->bnx_tx_cpuid); 2483 } 2484 } 2485 2486 error = bnx_setup_intr(sc); 2487 if (error) { 2488 ether_ifdetach(ifp); 2489 goto fail; 2490 } 2491 bnx_set_tick_cpuid(sc, FALSE); 2492 2493 /* 2494 * Create RX standard ring refilling thread 2495 */ 2496 std_cpuid_def = device_get_unit(dev) % ncpus; 2497 std_cpuid = device_getenv_int(dev, "std.cpuid", std_cpuid_def); 2498 if (std_cpuid < 0 || std_cpuid >= ncpus) { 2499 device_printf(dev, "invalid std.cpuid %d, use %d\n", 2500 std_cpuid, std_cpuid_def); 2501 std_cpuid = std_cpuid_def; 2502 } 2503 2504 std = &sc->bnx_rx_std_ring; 2505 lwkt_create(bnx_rx_std_refill_ithread, std, NULL, 2506 &std->bnx_rx_std_ithread, TDF_NOSTART | TDF_INTTHREAD, std_cpuid, 2507 "%s std", device_get_nameunit(dev)); 2508 lwkt_setpri(&std->bnx_rx_std_ithread, TDPRI_INT_MED); 2509 std->bnx_rx_std_ithread.td_preemptable = lwkt_preempt; 2510 sc->bnx_flags |= BNX_FLAG_STD_THREAD; 2511 2512 return(0); 2513 fail: 2514 bnx_detach(dev); 2515 return(error); 2516 } 2517 2518 static int 2519 bnx_detach(device_t dev) 2520 { 2521 struct bnx_softc *sc = device_get_softc(dev); 2522 2523 if (device_is_attached(dev)) { 2524 struct ifnet *ifp = &sc->arpcom.ac_if; 2525 2526 ifnet_serialize_all(ifp); 2527 bnx_stop(sc); 2528 bnx_teardown_intr(sc, sc->bnx_intr_cnt); 2529 ifnet_deserialize_all(ifp); 2530 2531 ether_ifdetach(ifp); 2532 } 2533 2534 if (sc->bnx_flags & BNX_FLAG_STD_THREAD) { 2535 struct bnx_rx_std_ring *std = &sc->bnx_rx_std_ring; 2536 2537 tsleep_interlock(std, 0); 2538 2539 if (std->bnx_rx_std_ithread.td_gd == mycpu) { 2540 bnx_rx_std_refill_stop(std); 2541 } else { 2542 lwkt_send_ipiq(std->bnx_rx_std_ithread.td_gd, 2543 bnx_rx_std_refill_stop, std); 2544 } 2545 2546 tsleep(std, PINTERLOCKED, "bnx_detach", 0); 2547 if (bootverbose) 2548 device_printf(dev, "RX std ithread exited\n"); 2549 2550 lwkt_synchronize_ipiqs("bnx_detach_ipiq"); 2551 } 2552 2553 if (sc->bnx_flags & BNX_FLAG_TBI) 2554 ifmedia_removeall(&sc->bnx_ifmedia); 2555 if (sc->bnx_miibus) 2556 device_delete_child(dev, sc->bnx_miibus); 2557 bus_generic_detach(dev); 2558 2559 bnx_free_intr(sc); 2560 2561 if (sc->bnx_msix_mem_res != NULL) { 2562 bus_release_resource(dev, SYS_RES_MEMORY, sc->bnx_msix_mem_rid, 2563 sc->bnx_msix_mem_res); 2564 } 2565 if (sc->bnx_res != NULL) { 2566 bus_release_resource(dev, SYS_RES_MEMORY, 2567 BGE_PCI_BAR0, sc->bnx_res); 2568 } 2569 if (sc->bnx_res2 != NULL) { 2570 bus_release_resource(dev, SYS_RES_MEMORY, 2571 PCIR_BAR(2), sc->bnx_res2); 2572 } 2573 2574 bnx_dma_free(sc); 2575 2576 if (sc->bnx_serialize != NULL) 2577 kfree(sc->bnx_serialize, M_DEVBUF); 2578 2579 return 0; 2580 } 2581 2582 static void 2583 bnx_reset(struct bnx_softc *sc) 2584 { 2585 device_t dev = sc->bnx_dev; 2586 uint32_t cachesize, command, reset, mac_mode, mac_mode_mask; 2587 void (*write_op)(struct bnx_softc *, uint32_t, uint32_t); 2588 int i, val = 0; 2589 uint16_t devctl; 2590 2591 mac_mode_mask = BGE_MACMODE_HALF_DUPLEX | BGE_MACMODE_PORTMODE; 2592 if (sc->bnx_mfw_flags & BNX_MFW_ON_APE) 2593 mac_mode_mask |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN; 2594 mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & mac_mode_mask; 2595 2596 write_op = bnx_writemem_direct; 2597 2598 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); 2599 for (i = 0; i < 8000; i++) { 2600 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1) 2601 break; 2602 DELAY(20); 2603 } 2604 if (i == 8000) 2605 if_printf(&sc->arpcom.ac_if, "NVRAM lock timedout!\n"); 2606 2607 /* Take APE lock when performing reset. */ 2608 bnx_ape_lock(sc, BGE_APE_LOCK_GRC); 2609 2610 /* Save some important PCI state. */ 2611 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4); 2612 command = pci_read_config(dev, BGE_PCI_CMD, 4); 2613 2614 pci_write_config(dev, BGE_PCI_MISC_CTL, 2615 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2616 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW| 2617 BGE_PCIMISCCTL_TAGGED_STATUS, 4); 2618 2619 /* Disable fastboot on controllers that support it. */ 2620 if (bootverbose) 2621 if_printf(&sc->arpcom.ac_if, "Disabling fastboot\n"); 2622 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0); 2623 2624 /* 2625 * Write the magic number to SRAM at offset 0xB50. 2626 * When firmware finishes its initialization it will 2627 * write ~BGE_SRAM_FW_MB_MAGIC to the same location. 2628 */ 2629 bnx_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC); 2630 2631 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1); 2632 2633 /* XXX: Broadcom Linux driver. */ 2634 /* Force PCI-E 1.0a mode */ 2635 if (!BNX_IS_57765_PLUS(sc) && 2636 CSR_READ_4(sc, BGE_PCIE_PHY_TSTCTL) == 2637 (BGE_PCIE_PHY_TSTCTL_PSCRAM | 2638 BGE_PCIE_PHY_TSTCTL_PCIE10)) { 2639 CSR_WRITE_4(sc, BGE_PCIE_PHY_TSTCTL, 2640 BGE_PCIE_PHY_TSTCTL_PSCRAM); 2641 } 2642 if (sc->bnx_chipid != BGE_CHIPID_BCM5750_A0) { 2643 /* Prevent PCIE link training during global reset */ 2644 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29)); 2645 reset |= (1<<29); 2646 } 2647 2648 /* 2649 * Set GPHY Power Down Override to leave GPHY 2650 * powered up in D0 uninitialized. 2651 */ 2652 if ((sc->bnx_flags & BNX_FLAG_CPMU) == 0) 2653 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE; 2654 2655 /* Issue global reset */ 2656 write_op(sc, BGE_MISC_CFG, reset); 2657 2658 DELAY(100 * 1000); 2659 2660 /* XXX: Broadcom Linux driver. */ 2661 if (sc->bnx_chipid == BGE_CHIPID_BCM5750_A0) { 2662 uint32_t v; 2663 2664 DELAY(500000); /* wait for link training to complete */ 2665 v = pci_read_config(dev, 0xc4, 4); 2666 pci_write_config(dev, 0xc4, v | (1<<15), 4); 2667 } 2668 2669 devctl = pci_read_config(dev, sc->bnx_pciecap + PCIER_DEVCTRL, 2); 2670 2671 /* Disable no snoop and disable relaxed ordering. */ 2672 devctl &= ~(PCIEM_DEVCTL_RELAX_ORDER | PCIEM_DEVCTL_NOSNOOP); 2673 2674 /* Old PCI-E chips only support 128 bytes Max PayLoad Size. */ 2675 if ((sc->bnx_flags & BNX_FLAG_CPMU) == 0) { 2676 devctl &= ~PCIEM_DEVCTL_MAX_PAYLOAD_MASK; 2677 devctl |= PCIEM_DEVCTL_MAX_PAYLOAD_128; 2678 } 2679 2680 pci_write_config(dev, sc->bnx_pciecap + PCIER_DEVCTRL, 2681 devctl, 2); 2682 2683 /* Clear error status. */ 2684 pci_write_config(dev, sc->bnx_pciecap + PCIER_DEVSTS, 2685 PCIEM_DEVSTS_CORR_ERR | 2686 PCIEM_DEVSTS_NFATAL_ERR | 2687 PCIEM_DEVSTS_FATAL_ERR | 2688 PCIEM_DEVSTS_UNSUPP_REQ, 2); 2689 2690 /* Reset some of the PCI state that got zapped by reset */ 2691 pci_write_config(dev, BGE_PCI_MISC_CTL, 2692 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2693 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW| 2694 BGE_PCIMISCCTL_TAGGED_STATUS, 4); 2695 val = BGE_PCISTATE_ROM_ENABLE | BGE_PCISTATE_ROM_RETRY_ENABLE; 2696 if (sc->bnx_mfw_flags & BNX_MFW_ON_APE) { 2697 val |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR | 2698 BGE_PCISTATE_ALLOW_APE_SHMEM_WR | 2699 BGE_PCISTATE_ALLOW_APE_PSPACE_WR; 2700 } 2701 pci_write_config(dev, BGE_PCI_PCISTATE, val, 4); 2702 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4); 2703 pci_write_config(dev, BGE_PCI_CMD, command, 4); 2704 2705 /* Enable memory arbiter */ 2706 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2707 2708 /* Fix up byte swapping */ 2709 CSR_WRITE_4(sc, BGE_MODE_CTL, bnx_dma_swap_options(sc)); 2710 2711 val = CSR_READ_4(sc, BGE_MAC_MODE); 2712 val = (val & ~mac_mode_mask) | mac_mode; 2713 CSR_WRITE_4(sc, BGE_MAC_MODE, val); 2714 DELAY(40); 2715 2716 bnx_ape_unlock(sc, BGE_APE_LOCK_GRC); 2717 2718 /* 2719 * Poll until we see the 1's complement of the magic number. 2720 * This indicates that the firmware initialization is complete. 2721 */ 2722 for (i = 0; i < BNX_FIRMWARE_TIMEOUT; i++) { 2723 val = bnx_readmem_ind(sc, BGE_SRAM_FW_MB); 2724 if (val == ~BGE_SRAM_FW_MB_MAGIC) 2725 break; 2726 DELAY(10); 2727 } 2728 if (i == BNX_FIRMWARE_TIMEOUT) { 2729 if_printf(&sc->arpcom.ac_if, "firmware handshake " 2730 "timed out, found 0x%08x\n", val); 2731 } 2732 2733 /* BCM57765 A0 needs additional time before accessing. */ 2734 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0) 2735 DELAY(10 * 1000); 2736 2737 /* 2738 * The 5704 in TBI mode apparently needs some special 2739 * adjustment to insure the SERDES drive level is set 2740 * to 1.2V. 2741 */ 2742 if (sc->bnx_asicrev == BGE_ASICREV_BCM5704 && 2743 (sc->bnx_flags & BNX_FLAG_TBI)) { 2744 uint32_t serdescfg; 2745 2746 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG); 2747 serdescfg = (serdescfg & ~0xFFF) | 0x880; 2748 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg); 2749 } 2750 2751 CSR_WRITE_4(sc, BGE_MI_MODE, 2752 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL); 2753 DELAY(80); 2754 2755 /* XXX: Broadcom Linux driver. */ 2756 if (!BNX_IS_57765_PLUS(sc)) { 2757 uint32_t v; 2758 2759 /* Enable Data FIFO protection. */ 2760 v = CSR_READ_4(sc, BGE_PCIE_TLDLPL_PORT); 2761 CSR_WRITE_4(sc, BGE_PCIE_TLDLPL_PORT, v | (1 << 25)); 2762 } 2763 2764 DELAY(10000); 2765 2766 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) { 2767 BNX_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE, 2768 CPMU_CLCK_ORIDE_MAC_ORIDE_EN); 2769 } 2770 } 2771 2772 /* 2773 * Frame reception handling. This is called if there's a frame 2774 * on the receive return list. 2775 * 2776 * Note: we have to be able to handle two possibilities here: 2777 * 1) the frame is from the jumbo recieve ring 2778 * 2) the frame is from the standard receive ring 2779 */ 2780 2781 static void 2782 bnx_rxeof(struct bnx_rx_ret_ring *ret, uint16_t rx_prod, int count) 2783 { 2784 struct bnx_softc *sc = ret->bnx_sc; 2785 struct bnx_rx_std_ring *std = ret->bnx_std; 2786 struct ifnet *ifp = &sc->arpcom.ac_if; 2787 int std_used = 0, cpuid = mycpuid; 2788 2789 while (ret->bnx_rx_saved_considx != rx_prod && count != 0) { 2790 struct pktinfo pi0, *pi = NULL; 2791 struct bge_rx_bd *cur_rx; 2792 struct bnx_rx_buf *rb; 2793 uint32_t rxidx; 2794 struct mbuf *m = NULL; 2795 uint16_t vlan_tag = 0; 2796 int have_tag = 0; 2797 2798 --count; 2799 2800 cur_rx = &ret->bnx_rx_ret_ring[ret->bnx_rx_saved_considx]; 2801 2802 rxidx = cur_rx->bge_idx; 2803 KKASSERT(rxidx < BGE_STD_RX_RING_CNT); 2804 2805 BNX_INC(ret->bnx_rx_saved_considx, BNX_RETURN_RING_CNT); 2806 #ifdef BNX_RSS_DEBUG 2807 ret->bnx_rx_pkt++; 2808 #endif 2809 2810 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 2811 have_tag = 1; 2812 vlan_tag = cur_rx->bge_vlan_tag; 2813 } 2814 2815 if (ret->bnx_rx_cnt >= ret->bnx_rx_cntmax) { 2816 atomic_add_int(&std->bnx_rx_std_used, std_used); 2817 std_used = 0; 2818 2819 bnx_rx_std_refill_sched(ret, std); 2820 } 2821 ret->bnx_rx_cnt++; 2822 ++std_used; 2823 2824 rb = &std->bnx_rx_std_buf[rxidx]; 2825 m = rb->bnx_rx_mbuf; 2826 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2827 IFNET_STAT_INC(ifp, ierrors, 1); 2828 cpu_sfence(); 2829 rb->bnx_rx_refilled = 1; 2830 continue; 2831 } 2832 if (bnx_newbuf_std(ret, rxidx, 0)) { 2833 IFNET_STAT_INC(ifp, ierrors, 1); 2834 continue; 2835 } 2836 2837 IFNET_STAT_INC(ifp, ipackets, 1); 2838 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 2839 m->m_pkthdr.rcvif = ifp; 2840 2841 if ((ifp->if_capenable & IFCAP_RXCSUM) && 2842 (cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) { 2843 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) { 2844 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2845 if ((cur_rx->bge_error_flag & 2846 BGE_RXERRFLAG_IP_CSUM_NOK) == 0) 2847 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2848 } 2849 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) { 2850 m->m_pkthdr.csum_data = 2851 cur_rx->bge_tcp_udp_csum; 2852 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 2853 CSUM_PSEUDO_HDR; 2854 } 2855 } 2856 if (ifp->if_capenable & IFCAP_RSS) { 2857 pi = bnx_rss_info(&pi0, cur_rx); 2858 if (pi != NULL && 2859 (cur_rx->bge_flags & BGE_RXBDFLAG_RSS_HASH)) { 2860 m->m_flags |= M_HASH; 2861 m->m_pkthdr.hash = 2862 toeplitz_hash(cur_rx->bge_hash); 2863 } 2864 } 2865 2866 /* 2867 * If we received a packet with a vlan tag, pass it 2868 * to vlan_input() instead of ether_input(). 2869 */ 2870 if (have_tag) { 2871 m->m_flags |= M_VLANTAG; 2872 m->m_pkthdr.ether_vlantag = vlan_tag; 2873 } 2874 ifp->if_input(ifp, m, pi, cpuid); 2875 } 2876 bnx_writembx(sc, ret->bnx_rx_mbx, ret->bnx_rx_saved_considx); 2877 2878 if (std_used > 0) { 2879 int cur_std_used; 2880 2881 cur_std_used = atomic_fetchadd_int(&std->bnx_rx_std_used, 2882 std_used); 2883 if (cur_std_used + std_used >= (BGE_STD_RX_RING_CNT / 2)) { 2884 #ifdef BNX_RSS_DEBUG 2885 ret->bnx_rx_force_sched++; 2886 #endif 2887 bnx_rx_std_refill_sched(ret, std); 2888 } 2889 } 2890 } 2891 2892 static void 2893 bnx_txeof(struct bnx_tx_ring *txr, uint16_t tx_cons) 2894 { 2895 struct ifnet *ifp = &txr->bnx_sc->arpcom.ac_if; 2896 2897 /* 2898 * Go through our tx ring and free mbufs for those 2899 * frames that have been sent. 2900 */ 2901 while (txr->bnx_tx_saved_considx != tx_cons) { 2902 struct bnx_tx_buf *buf; 2903 uint32_t idx = 0; 2904 2905 idx = txr->bnx_tx_saved_considx; 2906 buf = &txr->bnx_tx_buf[idx]; 2907 if (buf->bnx_tx_mbuf != NULL) { 2908 IFNET_STAT_INC(ifp, opackets, 1); 2909 #ifdef BNX_TSS_DEBUG 2910 txr->bnx_tx_pkt++; 2911 #endif 2912 bus_dmamap_unload(txr->bnx_tx_mtag, 2913 buf->bnx_tx_dmamap); 2914 m_freem(buf->bnx_tx_mbuf); 2915 buf->bnx_tx_mbuf = NULL; 2916 } 2917 txr->bnx_tx_cnt--; 2918 BNX_INC(txr->bnx_tx_saved_considx, BGE_TX_RING_CNT); 2919 } 2920 2921 if ((BGE_TX_RING_CNT - txr->bnx_tx_cnt) >= 2922 (BNX_NSEG_RSVD + BNX_NSEG_SPARE)) 2923 ifsq_clr_oactive(txr->bnx_ifsq); 2924 2925 if (txr->bnx_tx_cnt == 0) 2926 txr->bnx_tx_watchdog.wd_timer = 0; 2927 2928 if (!ifsq_is_empty(txr->bnx_ifsq)) 2929 ifsq_devstart(txr->bnx_ifsq); 2930 } 2931 2932 static int 2933 bnx_handle_status(struct bnx_softc *sc) 2934 { 2935 uint32_t status; 2936 int handle = 0; 2937 2938 status = *sc->bnx_hw_status; 2939 2940 if (status & BGE_STATFLAG_ERROR) { 2941 uint32_t val; 2942 int reset = 0; 2943 2944 sc->bnx_errors++; 2945 2946 val = CSR_READ_4(sc, BGE_FLOW_ATTN); 2947 if (val & ~BGE_FLOWATTN_MB_LOWAT) { 2948 if_printf(&sc->arpcom.ac_if, 2949 "flow attn 0x%08x\n", val); 2950 reset = 1; 2951 } 2952 2953 val = CSR_READ_4(sc, BGE_MSI_STATUS); 2954 if (val & ~BGE_MSISTAT_MSI_PCI_REQ) { 2955 if_printf(&sc->arpcom.ac_if, 2956 "msi status 0x%08x\n", val); 2957 reset = 1; 2958 } 2959 2960 val = CSR_READ_4(sc, BGE_RDMA_STATUS); 2961 if (val) { 2962 if_printf(&sc->arpcom.ac_if, 2963 "rmda status 0x%08x\n", val); 2964 reset = 1; 2965 } 2966 2967 val = CSR_READ_4(sc, BGE_WDMA_STATUS); 2968 if (val) { 2969 if_printf(&sc->arpcom.ac_if, 2970 "wdma status 0x%08x\n", val); 2971 reset = 1; 2972 } 2973 2974 if (reset) { 2975 bnx_serialize_skipmain(sc); 2976 bnx_init(sc); 2977 bnx_deserialize_skipmain(sc); 2978 } 2979 handle = 1; 2980 } 2981 2982 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) || sc->bnx_link_evt) { 2983 if (bootverbose) { 2984 if_printf(&sc->arpcom.ac_if, "link change, " 2985 "link_evt %d\n", sc->bnx_link_evt); 2986 } 2987 bnx_link_poll(sc); 2988 handle = 1; 2989 } 2990 2991 return handle; 2992 } 2993 2994 #ifdef IFPOLL_ENABLE 2995 2996 static void 2997 bnx_npoll_rx(struct ifnet *ifp __unused, void *xret, int cycle) 2998 { 2999 struct bnx_rx_ret_ring *ret = xret; 3000 uint16_t rx_prod; 3001 3002 ASSERT_SERIALIZED(&ret->bnx_rx_ret_serialize); 3003 3004 ret->bnx_saved_status_tag = *ret->bnx_hw_status_tag; 3005 cpu_lfence(); 3006 3007 rx_prod = *ret->bnx_rx_considx; 3008 if (ret->bnx_rx_saved_considx != rx_prod) 3009 bnx_rxeof(ret, rx_prod, cycle); 3010 } 3011 3012 static void 3013 bnx_npoll_tx_notag(struct ifnet *ifp __unused, void *xtxr, int cycle __unused) 3014 { 3015 struct bnx_tx_ring *txr = xtxr; 3016 uint16_t tx_cons; 3017 3018 ASSERT_SERIALIZED(&txr->bnx_tx_serialize); 3019 3020 tx_cons = *txr->bnx_tx_considx; 3021 if (txr->bnx_tx_saved_considx != tx_cons) 3022 bnx_txeof(txr, tx_cons); 3023 } 3024 3025 static void 3026 bnx_npoll_tx(struct ifnet *ifp, void *xtxr, int cycle) 3027 { 3028 struct bnx_tx_ring *txr = xtxr; 3029 3030 ASSERT_SERIALIZED(&txr->bnx_tx_serialize); 3031 3032 txr->bnx_saved_status_tag = *txr->bnx_hw_status_tag; 3033 cpu_lfence(); 3034 bnx_npoll_tx_notag(ifp, txr, cycle); 3035 } 3036 3037 static void 3038 bnx_npoll_status_notag(struct ifnet *ifp) 3039 { 3040 struct bnx_softc *sc = ifp->if_softc; 3041 3042 ASSERT_SERIALIZED(&sc->bnx_main_serialize); 3043 3044 if (bnx_handle_status(sc)) { 3045 /* 3046 * Status changes are handled; force the chip to 3047 * update the status block to reflect whether there 3048 * are more status changes or not, else staled status 3049 * changes are always seen. 3050 */ 3051 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 3052 } 3053 } 3054 3055 static void 3056 bnx_npoll_status(struct ifnet *ifp) 3057 { 3058 struct bnx_softc *sc = ifp->if_softc; 3059 3060 ASSERT_SERIALIZED(&sc->bnx_main_serialize); 3061 3062 sc->bnx_saved_status_tag = *sc->bnx_hw_status_tag; 3063 cpu_lfence(); 3064 bnx_npoll_status_notag(ifp); 3065 } 3066 3067 static void 3068 bnx_npoll(struct ifnet *ifp, struct ifpoll_info *info) 3069 { 3070 struct bnx_softc *sc = ifp->if_softc; 3071 int i; 3072 3073 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3074 3075 if (info != NULL) { 3076 if (sc->bnx_flags & BNX_FLAG_STATUS_HASTAG) 3077 info->ifpi_status.status_func = bnx_npoll_status; 3078 else 3079 info->ifpi_status.status_func = bnx_npoll_status_notag; 3080 info->ifpi_status.serializer = &sc->bnx_main_serialize; 3081 3082 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 3083 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i]; 3084 int idx = i + sc->bnx_npoll_txoff; 3085 3086 KKASSERT(idx < ncpus2); 3087 if (sc->bnx_flags & BNX_FLAG_RXTX_BUNDLE) { 3088 info->ifpi_tx[idx].poll_func = 3089 bnx_npoll_tx_notag; 3090 } else { 3091 info->ifpi_tx[idx].poll_func = bnx_npoll_tx; 3092 } 3093 info->ifpi_tx[idx].arg = txr; 3094 info->ifpi_tx[idx].serializer = &txr->bnx_tx_serialize; 3095 ifsq_set_cpuid(txr->bnx_ifsq, idx); 3096 } 3097 3098 for (i = 0; i < sc->bnx_rx_retcnt; ++i) { 3099 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[i]; 3100 int idx = i + sc->bnx_npoll_rxoff; 3101 3102 KKASSERT(idx < ncpus2); 3103 info->ifpi_rx[idx].poll_func = bnx_npoll_rx; 3104 info->ifpi_rx[idx].arg = ret; 3105 info->ifpi_rx[idx].serializer = 3106 &ret->bnx_rx_ret_serialize; 3107 } 3108 3109 if (ifp->if_flags & IFF_RUNNING) { 3110 bnx_disable_intr(sc); 3111 bnx_set_tick_cpuid(sc, TRUE); 3112 3113 sc->bnx_coal_chg = BNX_TX_COAL_BDS_CHG | 3114 BNX_RX_COAL_BDS_CHG; 3115 bnx_coal_change(sc); 3116 } 3117 } else { 3118 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 3119 ifsq_set_cpuid(sc->bnx_tx_ring[i].bnx_ifsq, 3120 sc->bnx_tx_ring[i].bnx_tx_cpuid); 3121 } 3122 if (ifp->if_flags & IFF_RUNNING) { 3123 sc->bnx_coal_chg = BNX_TX_COAL_BDS_CHG | 3124 BNX_RX_COAL_BDS_CHG; 3125 bnx_coal_change(sc); 3126 3127 bnx_enable_intr(sc); 3128 bnx_set_tick_cpuid(sc, FALSE); 3129 } 3130 } 3131 } 3132 3133 #endif /* IFPOLL_ENABLE */ 3134 3135 static void 3136 bnx_intr_legacy(void *xsc) 3137 { 3138 struct bnx_softc *sc = xsc; 3139 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[0]; 3140 3141 if (ret->bnx_saved_status_tag == *ret->bnx_hw_status_tag) { 3142 uint32_t val; 3143 3144 val = pci_read_config(sc->bnx_dev, BGE_PCI_PCISTATE, 4); 3145 if (val & BGE_PCISTAT_INTR_NOTACT) 3146 return; 3147 } 3148 3149 /* 3150 * NOTE: 3151 * Interrupt will have to be disabled if tagged status 3152 * is used, else interrupt will always be asserted on 3153 * certain chips (at least on BCM5750 AX/BX). 3154 */ 3155 bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1); 3156 3157 bnx_intr(sc); 3158 } 3159 3160 static void 3161 bnx_msi(void *xsc) 3162 { 3163 bnx_intr(xsc); 3164 } 3165 3166 static void 3167 bnx_intr(struct bnx_softc *sc) 3168 { 3169 struct ifnet *ifp = &sc->arpcom.ac_if; 3170 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[0]; 3171 3172 ASSERT_SERIALIZED(&sc->bnx_main_serialize); 3173 3174 ret->bnx_saved_status_tag = *ret->bnx_hw_status_tag; 3175 /* 3176 * Use a load fence to ensure that status_tag is saved 3177 * before rx_prod, tx_cons and status. 3178 */ 3179 cpu_lfence(); 3180 3181 bnx_handle_status(sc); 3182 3183 if (ifp->if_flags & IFF_RUNNING) { 3184 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0]; 3185 uint16_t rx_prod, tx_cons; 3186 3187 lwkt_serialize_enter(&ret->bnx_rx_ret_serialize); 3188 rx_prod = *ret->bnx_rx_considx; 3189 if (ret->bnx_rx_saved_considx != rx_prod) 3190 bnx_rxeof(ret, rx_prod, -1); 3191 lwkt_serialize_exit(&ret->bnx_rx_ret_serialize); 3192 3193 lwkt_serialize_enter(&txr->bnx_tx_serialize); 3194 tx_cons = *txr->bnx_tx_considx; 3195 if (txr->bnx_tx_saved_considx != tx_cons) 3196 bnx_txeof(txr, tx_cons); 3197 lwkt_serialize_exit(&txr->bnx_tx_serialize); 3198 } 3199 3200 bnx_writembx(sc, BGE_MBX_IRQ0_LO, ret->bnx_saved_status_tag << 24); 3201 } 3202 3203 static void 3204 bnx_msix_tx_status(void *xtxr) 3205 { 3206 struct bnx_tx_ring *txr = xtxr; 3207 struct bnx_softc *sc = txr->bnx_sc; 3208 struct ifnet *ifp = &sc->arpcom.ac_if; 3209 3210 ASSERT_SERIALIZED(&sc->bnx_main_serialize); 3211 3212 txr->bnx_saved_status_tag = *txr->bnx_hw_status_tag; 3213 /* 3214 * Use a load fence to ensure that status_tag is saved 3215 * before tx_cons and status. 3216 */ 3217 cpu_lfence(); 3218 3219 bnx_handle_status(sc); 3220 3221 if (ifp->if_flags & IFF_RUNNING) { 3222 uint16_t tx_cons; 3223 3224 lwkt_serialize_enter(&txr->bnx_tx_serialize); 3225 tx_cons = *txr->bnx_tx_considx; 3226 if (txr->bnx_tx_saved_considx != tx_cons) 3227 bnx_txeof(txr, tx_cons); 3228 lwkt_serialize_exit(&txr->bnx_tx_serialize); 3229 } 3230 3231 bnx_writembx(sc, BGE_MBX_IRQ0_LO, txr->bnx_saved_status_tag << 24); 3232 } 3233 3234 static void 3235 bnx_msix_rx(void *xret) 3236 { 3237 struct bnx_rx_ret_ring *ret = xret; 3238 uint16_t rx_prod; 3239 3240 ASSERT_SERIALIZED(&ret->bnx_rx_ret_serialize); 3241 3242 ret->bnx_saved_status_tag = *ret->bnx_hw_status_tag; 3243 /* 3244 * Use a load fence to ensure that status_tag is saved 3245 * before rx_prod. 3246 */ 3247 cpu_lfence(); 3248 3249 rx_prod = *ret->bnx_rx_considx; 3250 if (ret->bnx_rx_saved_considx != rx_prod) 3251 bnx_rxeof(ret, rx_prod, -1); 3252 3253 bnx_writembx(ret->bnx_sc, ret->bnx_msix_mbx, 3254 ret->bnx_saved_status_tag << 24); 3255 } 3256 3257 static void 3258 bnx_msix_rxtx(void *xret) 3259 { 3260 struct bnx_rx_ret_ring *ret = xret; 3261 struct bnx_tx_ring *txr = ret->bnx_txr; 3262 uint16_t rx_prod, tx_cons; 3263 3264 ASSERT_SERIALIZED(&ret->bnx_rx_ret_serialize); 3265 3266 ret->bnx_saved_status_tag = *ret->bnx_hw_status_tag; 3267 /* 3268 * Use a load fence to ensure that status_tag is saved 3269 * before rx_prod and tx_cons. 3270 */ 3271 cpu_lfence(); 3272 3273 rx_prod = *ret->bnx_rx_considx; 3274 if (ret->bnx_rx_saved_considx != rx_prod) 3275 bnx_rxeof(ret, rx_prod, -1); 3276 3277 lwkt_serialize_enter(&txr->bnx_tx_serialize); 3278 tx_cons = *txr->bnx_tx_considx; 3279 if (txr->bnx_tx_saved_considx != tx_cons) 3280 bnx_txeof(txr, tx_cons); 3281 lwkt_serialize_exit(&txr->bnx_tx_serialize); 3282 3283 bnx_writembx(ret->bnx_sc, ret->bnx_msix_mbx, 3284 ret->bnx_saved_status_tag << 24); 3285 } 3286 3287 static void 3288 bnx_msix_status(void *xsc) 3289 { 3290 struct bnx_softc *sc = xsc; 3291 3292 ASSERT_SERIALIZED(&sc->bnx_main_serialize); 3293 3294 sc->bnx_saved_status_tag = *sc->bnx_hw_status_tag; 3295 /* 3296 * Use a load fence to ensure that status_tag is saved 3297 * before status. 3298 */ 3299 cpu_lfence(); 3300 3301 bnx_handle_status(sc); 3302 3303 bnx_writembx(sc, BGE_MBX_IRQ0_LO, sc->bnx_saved_status_tag << 24); 3304 } 3305 3306 static void 3307 bnx_tick(void *xsc) 3308 { 3309 struct bnx_softc *sc = xsc; 3310 3311 lwkt_serialize_enter(&sc->bnx_main_serialize); 3312 3313 bnx_stats_update_regs(sc); 3314 3315 if (sc->bnx_flags & BNX_FLAG_TBI) { 3316 /* 3317 * Since in TBI mode auto-polling can't be used we should poll 3318 * link status manually. Here we register pending link event 3319 * and trigger interrupt. 3320 */ 3321 sc->bnx_link_evt++; 3322 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 3323 } else if (!sc->bnx_link) { 3324 mii_tick(device_get_softc(sc->bnx_miibus)); 3325 } 3326 3327 callout_reset_bycpu(&sc->bnx_tick_timer, hz, bnx_tick, sc, 3328 sc->bnx_tick_cpuid); 3329 3330 lwkt_serialize_exit(&sc->bnx_main_serialize); 3331 } 3332 3333 static void 3334 bnx_stats_update_regs(struct bnx_softc *sc) 3335 { 3336 struct ifnet *ifp = &sc->arpcom.ac_if; 3337 struct bge_mac_stats_regs stats; 3338 uint32_t *s, val; 3339 int i; 3340 3341 s = (uint32_t *)&stats; 3342 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) { 3343 *s = CSR_READ_4(sc, BGE_RX_STATS + i); 3344 s++; 3345 } 3346 3347 IFNET_STAT_SET(ifp, collisions, 3348 (stats.dot3StatsSingleCollisionFrames + 3349 stats.dot3StatsMultipleCollisionFrames + 3350 stats.dot3StatsExcessiveCollisions + 3351 stats.dot3StatsLateCollisions)); 3352 3353 val = CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS); 3354 sc->bnx_norxbds += val; 3355 } 3356 3357 /* 3358 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 3359 * pointers to descriptors. 3360 */ 3361 static int 3362 bnx_encap(struct bnx_tx_ring *txr, struct mbuf **m_head0, uint32_t *txidx, 3363 int *segs_used) 3364 { 3365 struct bge_tx_bd *d = NULL; 3366 uint16_t csum_flags = 0, vlan_tag = 0, mss = 0; 3367 bus_dma_segment_t segs[BNX_NSEG_NEW]; 3368 bus_dmamap_t map; 3369 int error, maxsegs, nsegs, idx, i; 3370 struct mbuf *m_head = *m_head0, *m_new; 3371 3372 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3373 #ifdef BNX_TSO_DEBUG 3374 int tso_nsegs; 3375 #endif 3376 3377 error = bnx_setup_tso(txr, m_head0, &mss, &csum_flags); 3378 if (error) 3379 return error; 3380 m_head = *m_head0; 3381 3382 #ifdef BNX_TSO_DEBUG 3383 tso_nsegs = (m_head->m_pkthdr.len / 3384 m_head->m_pkthdr.tso_segsz) - 1; 3385 if (tso_nsegs > (BNX_TSO_NSTATS - 1)) 3386 tso_nsegs = BNX_TSO_NSTATS - 1; 3387 else if (tso_nsegs < 0) 3388 tso_nsegs = 0; 3389 txr->bnx_sc->bnx_tsosegs[tso_nsegs]++; 3390 #endif 3391 } else if (m_head->m_pkthdr.csum_flags & BNX_CSUM_FEATURES) { 3392 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 3393 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 3394 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 3395 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 3396 if (m_head->m_flags & M_LASTFRAG) 3397 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END; 3398 else if (m_head->m_flags & M_FRAG) 3399 csum_flags |= BGE_TXBDFLAG_IP_FRAG; 3400 } 3401 if (m_head->m_flags & M_VLANTAG) { 3402 csum_flags |= BGE_TXBDFLAG_VLAN_TAG; 3403 vlan_tag = m_head->m_pkthdr.ether_vlantag; 3404 } 3405 3406 idx = *txidx; 3407 map = txr->bnx_tx_buf[idx].bnx_tx_dmamap; 3408 3409 maxsegs = (BGE_TX_RING_CNT - txr->bnx_tx_cnt) - BNX_NSEG_RSVD; 3410 KASSERT(maxsegs >= BNX_NSEG_SPARE, 3411 ("not enough segments %d", maxsegs)); 3412 3413 if (maxsegs > BNX_NSEG_NEW) 3414 maxsegs = BNX_NSEG_NEW; 3415 3416 /* 3417 * Pad outbound frame to BGE_MIN_FRAMELEN for an unusual reason. 3418 * The bge hardware will pad out Tx runts to BGE_MIN_FRAMELEN, 3419 * but when such padded frames employ the bge IP/TCP checksum 3420 * offload, the hardware checksum assist gives incorrect results 3421 * (possibly from incorporating its own padding into the UDP/TCP 3422 * checksum; who knows). If we pad such runts with zeros, the 3423 * onboard checksum comes out correct. 3424 */ 3425 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) && 3426 m_head->m_pkthdr.len < BNX_MIN_FRAMELEN) { 3427 error = m_devpad(m_head, BNX_MIN_FRAMELEN); 3428 if (error) 3429 goto back; 3430 } 3431 3432 if ((txr->bnx_tx_flags & BNX_TX_FLAG_SHORTDMA) && 3433 m_head->m_next != NULL) { 3434 m_new = bnx_defrag_shortdma(m_head); 3435 if (m_new == NULL) { 3436 error = ENOBUFS; 3437 goto back; 3438 } 3439 *m_head0 = m_head = m_new; 3440 } 3441 if ((m_head->m_pkthdr.csum_flags & CSUM_TSO) == 0 && 3442 (txr->bnx_tx_flags & BNX_TX_FLAG_FORCE_DEFRAG) && 3443 m_head->m_next != NULL) { 3444 /* 3445 * Forcefully defragment mbuf chain to overcome hardware 3446 * limitation which only support a single outstanding 3447 * DMA read operation. If it fails, keep moving on using 3448 * the original mbuf chain. 3449 */ 3450 m_new = m_defrag(m_head, M_NOWAIT); 3451 if (m_new != NULL) 3452 *m_head0 = m_head = m_new; 3453 } 3454 3455 error = bus_dmamap_load_mbuf_defrag(txr->bnx_tx_mtag, map, 3456 m_head0, segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 3457 if (error) 3458 goto back; 3459 *segs_used += nsegs; 3460 3461 m_head = *m_head0; 3462 bus_dmamap_sync(txr->bnx_tx_mtag, map, BUS_DMASYNC_PREWRITE); 3463 3464 for (i = 0; ; i++) { 3465 d = &txr->bnx_tx_ring[idx]; 3466 3467 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr); 3468 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr); 3469 d->bge_len = segs[i].ds_len; 3470 d->bge_flags = csum_flags; 3471 d->bge_vlan_tag = vlan_tag; 3472 d->bge_mss = mss; 3473 3474 if (i == nsegs - 1) 3475 break; 3476 BNX_INC(idx, BGE_TX_RING_CNT); 3477 } 3478 /* Mark the last segment as end of packet... */ 3479 d->bge_flags |= BGE_TXBDFLAG_END; 3480 3481 /* 3482 * Insure that the map for this transmission is placed at 3483 * the array index of the last descriptor in this chain. 3484 */ 3485 txr->bnx_tx_buf[*txidx].bnx_tx_dmamap = txr->bnx_tx_buf[idx].bnx_tx_dmamap; 3486 txr->bnx_tx_buf[idx].bnx_tx_dmamap = map; 3487 txr->bnx_tx_buf[idx].bnx_tx_mbuf = m_head; 3488 txr->bnx_tx_cnt += nsegs; 3489 3490 BNX_INC(idx, BGE_TX_RING_CNT); 3491 *txidx = idx; 3492 back: 3493 if (error) { 3494 m_freem(*m_head0); 3495 *m_head0 = NULL; 3496 } 3497 return error; 3498 } 3499 3500 /* 3501 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 3502 * to the mbuf data regions directly in the transmit descriptors. 3503 */ 3504 static void 3505 bnx_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 3506 { 3507 struct bnx_tx_ring *txr = ifsq_get_priv(ifsq); 3508 struct mbuf *m_head = NULL; 3509 uint32_t prodidx; 3510 int nsegs = 0; 3511 3512 KKASSERT(txr->bnx_ifsq == ifsq); 3513 ASSERT_SERIALIZED(&txr->bnx_tx_serialize); 3514 3515 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq)) 3516 return; 3517 3518 prodidx = txr->bnx_tx_prodidx; 3519 3520 while (txr->bnx_tx_buf[prodidx].bnx_tx_mbuf == NULL) { 3521 /* 3522 * Sanity check: avoid coming within BGE_NSEG_RSVD 3523 * descriptors of the end of the ring. Also make 3524 * sure there are BGE_NSEG_SPARE descriptors for 3525 * jumbo buffers' or TSO segments' defragmentation. 3526 */ 3527 if ((BGE_TX_RING_CNT - txr->bnx_tx_cnt) < 3528 (BNX_NSEG_RSVD + BNX_NSEG_SPARE)) { 3529 ifsq_set_oactive(ifsq); 3530 break; 3531 } 3532 3533 m_head = ifsq_dequeue(ifsq); 3534 if (m_head == NULL) 3535 break; 3536 3537 /* 3538 * Pack the data into the transmit ring. If we 3539 * don't have room, set the OACTIVE flag and wait 3540 * for the NIC to drain the ring. 3541 */ 3542 if (bnx_encap(txr, &m_head, &prodidx, &nsegs)) { 3543 ifsq_set_oactive(ifsq); 3544 IFNET_STAT_INC(ifp, oerrors, 1); 3545 break; 3546 } 3547 3548 if (nsegs >= txr->bnx_tx_wreg) { 3549 /* Transmit */ 3550 bnx_writembx(txr->bnx_sc, txr->bnx_tx_mbx, prodidx); 3551 nsegs = 0; 3552 } 3553 3554 ETHER_BPF_MTAP(ifp, m_head); 3555 3556 /* 3557 * Set a timeout in case the chip goes out to lunch. 3558 */ 3559 txr->bnx_tx_watchdog.wd_timer = 5; 3560 } 3561 3562 if (nsegs > 0) { 3563 /* Transmit */ 3564 bnx_writembx(txr->bnx_sc, txr->bnx_tx_mbx, prodidx); 3565 } 3566 txr->bnx_tx_prodidx = prodidx; 3567 } 3568 3569 static void 3570 bnx_init(void *xsc) 3571 { 3572 struct bnx_softc *sc = xsc; 3573 struct ifnet *ifp = &sc->arpcom.ac_if; 3574 uint16_t *m; 3575 uint32_t mode; 3576 int i; 3577 boolean_t polling; 3578 3579 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3580 3581 /* Cancel pending I/O and flush buffers. */ 3582 bnx_stop(sc); 3583 3584 bnx_sig_pre_reset(sc, BNX_RESET_START); 3585 bnx_reset(sc); 3586 bnx_sig_post_reset(sc, BNX_RESET_START); 3587 3588 bnx_chipinit(sc); 3589 3590 /* 3591 * Init the various state machines, ring 3592 * control blocks and firmware. 3593 */ 3594 if (bnx_blockinit(sc)) { 3595 if_printf(ifp, "initialization failure\n"); 3596 bnx_stop(sc); 3597 return; 3598 } 3599 3600 /* Specify MTU. */ 3601 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 3602 ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN); 3603 3604 /* Load our MAC address. */ 3605 m = (uint16_t *)&sc->arpcom.ac_enaddr[0]; 3606 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 3607 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 3608 3609 /* Enable or disable promiscuous mode as needed. */ 3610 bnx_setpromisc(sc); 3611 3612 /* Program multicast filter. */ 3613 bnx_setmulti(sc); 3614 3615 /* Init RX ring. */ 3616 if (bnx_init_rx_ring_std(&sc->bnx_rx_std_ring)) { 3617 if_printf(ifp, "RX ring initialization failed\n"); 3618 bnx_stop(sc); 3619 return; 3620 } 3621 3622 /* Init jumbo RX ring. */ 3623 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) { 3624 if (bnx_init_rx_ring_jumbo(sc)) { 3625 if_printf(ifp, "Jumbo RX ring initialization failed\n"); 3626 bnx_stop(sc); 3627 return; 3628 } 3629 } 3630 3631 /* Init our RX return ring index */ 3632 for (i = 0; i < sc->bnx_rx_retcnt; ++i) { 3633 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[i]; 3634 3635 ret->bnx_rx_saved_considx = 0; 3636 ret->bnx_rx_cnt = 0; 3637 } 3638 3639 /* Init TX ring. */ 3640 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) 3641 bnx_init_tx_ring(&sc->bnx_tx_ring[i]); 3642 3643 /* Enable TX MAC state machine lockup fix. */ 3644 mode = CSR_READ_4(sc, BGE_TX_MODE); 3645 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX; 3646 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 || 3647 sc->bnx_asicrev == BGE_ASICREV_BCM5762) { 3648 mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); 3649 mode |= CSR_READ_4(sc, BGE_TX_MODE) & 3650 (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); 3651 } 3652 /* Turn on transmitter */ 3653 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE); 3654 DELAY(100); 3655 3656 /* Initialize RSS */ 3657 mode = BGE_RXMODE_ENABLE | BGE_RXMODE_IPV6_ENABLE; 3658 if (BNX_RSS_ENABLED(sc)) { 3659 bnx_init_rss(sc); 3660 mode |= BGE_RXMODE_RSS_ENABLE | 3661 BGE_RXMODE_RSS_HASH_MASK_BITS | 3662 BGE_RXMODE_RSS_IPV4_HASH | 3663 BGE_RXMODE_RSS_TCP_IPV4_HASH; 3664 } 3665 /* Turn on receiver */ 3666 BNX_SETBIT(sc, BGE_RX_MODE, mode); 3667 DELAY(10); 3668 3669 /* 3670 * Set the number of good frames to receive after RX MBUF 3671 * Low Watermark has been reached. After the RX MAC receives 3672 * this number of frames, it will drop subsequent incoming 3673 * frames until the MBUF High Watermark is reached. 3674 */ 3675 if (BNX_IS_57765_FAMILY(sc)) 3676 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1); 3677 else 3678 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2); 3679 3680 if (sc->bnx_intr_type == PCI_INTR_TYPE_MSI || 3681 sc->bnx_intr_type == PCI_INTR_TYPE_MSIX) { 3682 if (bootverbose) { 3683 if_printf(ifp, "MSI_MODE: %#x\n", 3684 CSR_READ_4(sc, BGE_MSI_MODE)); 3685 } 3686 } 3687 3688 /* Tell firmware we're alive. */ 3689 BNX_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3690 3691 /* Enable host interrupts if polling(4) is not enabled. */ 3692 PCI_SETBIT(sc->bnx_dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA, 4); 3693 3694 polling = FALSE; 3695 #ifdef IFPOLL_ENABLE 3696 if (ifp->if_flags & IFF_NPOLLING) 3697 polling = TRUE; 3698 #endif 3699 if (polling) 3700 bnx_disable_intr(sc); 3701 else 3702 bnx_enable_intr(sc); 3703 bnx_set_tick_cpuid(sc, polling); 3704 3705 ifp->if_flags |= IFF_RUNNING; 3706 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 3707 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i]; 3708 3709 ifsq_clr_oactive(txr->bnx_ifsq); 3710 ifsq_watchdog_start(&txr->bnx_tx_watchdog); 3711 } 3712 3713 bnx_ifmedia_upd(ifp); 3714 3715 callout_reset_bycpu(&sc->bnx_tick_timer, hz, bnx_tick, sc, 3716 sc->bnx_tick_cpuid); 3717 } 3718 3719 /* 3720 * Set media options. 3721 */ 3722 static int 3723 bnx_ifmedia_upd(struct ifnet *ifp) 3724 { 3725 struct bnx_softc *sc = ifp->if_softc; 3726 3727 /* If this is a 1000baseX NIC, enable the TBI port. */ 3728 if (sc->bnx_flags & BNX_FLAG_TBI) { 3729 struct ifmedia *ifm = &sc->bnx_ifmedia; 3730 3731 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 3732 return(EINVAL); 3733 3734 switch(IFM_SUBTYPE(ifm->ifm_media)) { 3735 case IFM_AUTO: 3736 break; 3737 3738 case IFM_1000_SX: 3739 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 3740 BNX_CLRBIT(sc, BGE_MAC_MODE, 3741 BGE_MACMODE_HALF_DUPLEX); 3742 } else { 3743 BNX_SETBIT(sc, BGE_MAC_MODE, 3744 BGE_MACMODE_HALF_DUPLEX); 3745 } 3746 DELAY(40); 3747 break; 3748 default: 3749 return(EINVAL); 3750 } 3751 } else { 3752 struct mii_data *mii = device_get_softc(sc->bnx_miibus); 3753 3754 sc->bnx_link_evt++; 3755 sc->bnx_link = 0; 3756 if (mii->mii_instance) { 3757 struct mii_softc *miisc; 3758 3759 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 3760 mii_phy_reset(miisc); 3761 } 3762 mii_mediachg(mii); 3763 3764 /* 3765 * Force an interrupt so that we will call bnx_link_upd 3766 * if needed and clear any pending link state attention. 3767 * Without this we are not getting any further interrupts 3768 * for link state changes and thus will not UP the link and 3769 * not be able to send in bnx_start. The only way to get 3770 * things working was to receive a packet and get an RX 3771 * intr. 3772 * 3773 * bnx_tick should help for fiber cards and we might not 3774 * need to do this here if BNX_FLAG_TBI is set but as 3775 * we poll for fiber anyway it should not harm. 3776 */ 3777 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 3778 } 3779 return(0); 3780 } 3781 3782 /* 3783 * Report current media status. 3784 */ 3785 static void 3786 bnx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 3787 { 3788 struct bnx_softc *sc = ifp->if_softc; 3789 3790 if ((ifp->if_flags & IFF_RUNNING) == 0) 3791 return; 3792 3793 if (sc->bnx_flags & BNX_FLAG_TBI) { 3794 ifmr->ifm_status = IFM_AVALID; 3795 ifmr->ifm_active = IFM_ETHER; 3796 if (CSR_READ_4(sc, BGE_MAC_STS) & 3797 BGE_MACSTAT_TBI_PCS_SYNCHED) { 3798 ifmr->ifm_status |= IFM_ACTIVE; 3799 } else { 3800 ifmr->ifm_active |= IFM_NONE; 3801 return; 3802 } 3803 3804 ifmr->ifm_active |= IFM_1000_SX; 3805 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 3806 ifmr->ifm_active |= IFM_HDX; 3807 else 3808 ifmr->ifm_active |= IFM_FDX; 3809 } else { 3810 struct mii_data *mii = device_get_softc(sc->bnx_miibus); 3811 3812 mii_pollstat(mii); 3813 ifmr->ifm_active = mii->mii_media_active; 3814 ifmr->ifm_status = mii->mii_media_status; 3815 } 3816 } 3817 3818 static int 3819 bnx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 3820 { 3821 struct bnx_softc *sc = ifp->if_softc; 3822 struct ifreq *ifr = (struct ifreq *)data; 3823 int mask, error = 0; 3824 3825 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3826 3827 switch (command) { 3828 case SIOCSIFMTU: 3829 if ((!BNX_IS_JUMBO_CAPABLE(sc) && ifr->ifr_mtu > ETHERMTU) || 3830 (BNX_IS_JUMBO_CAPABLE(sc) && 3831 ifr->ifr_mtu > BNX_JUMBO_MTU)) { 3832 error = EINVAL; 3833 } else if (ifp->if_mtu != ifr->ifr_mtu) { 3834 ifp->if_mtu = ifr->ifr_mtu; 3835 if (ifp->if_flags & IFF_RUNNING) 3836 bnx_init(sc); 3837 } 3838 break; 3839 case SIOCSIFFLAGS: 3840 if (ifp->if_flags & IFF_UP) { 3841 if (ifp->if_flags & IFF_RUNNING) { 3842 mask = ifp->if_flags ^ sc->bnx_if_flags; 3843 3844 /* 3845 * If only the state of the PROMISC flag 3846 * changed, then just use the 'set promisc 3847 * mode' command instead of reinitializing 3848 * the entire NIC. Doing a full re-init 3849 * means reloading the firmware and waiting 3850 * for it to start up, which may take a 3851 * second or two. Similarly for ALLMULTI. 3852 */ 3853 if (mask & IFF_PROMISC) 3854 bnx_setpromisc(sc); 3855 if (mask & IFF_ALLMULTI) 3856 bnx_setmulti(sc); 3857 } else { 3858 bnx_init(sc); 3859 } 3860 } else if (ifp->if_flags & IFF_RUNNING) { 3861 bnx_stop(sc); 3862 } 3863 sc->bnx_if_flags = ifp->if_flags; 3864 break; 3865 case SIOCADDMULTI: 3866 case SIOCDELMULTI: 3867 if (ifp->if_flags & IFF_RUNNING) 3868 bnx_setmulti(sc); 3869 break; 3870 case SIOCSIFMEDIA: 3871 case SIOCGIFMEDIA: 3872 if (sc->bnx_flags & BNX_FLAG_TBI) { 3873 error = ifmedia_ioctl(ifp, ifr, 3874 &sc->bnx_ifmedia, command); 3875 } else { 3876 struct mii_data *mii; 3877 3878 mii = device_get_softc(sc->bnx_miibus); 3879 error = ifmedia_ioctl(ifp, ifr, 3880 &mii->mii_media, command); 3881 } 3882 break; 3883 case SIOCSIFCAP: 3884 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 3885 if (mask & IFCAP_HWCSUM) { 3886 ifp->if_capenable ^= (mask & IFCAP_HWCSUM); 3887 if (ifp->if_capenable & IFCAP_TXCSUM) 3888 ifp->if_hwassist |= BNX_CSUM_FEATURES; 3889 else 3890 ifp->if_hwassist &= ~BNX_CSUM_FEATURES; 3891 } 3892 if (mask & IFCAP_TSO) { 3893 ifp->if_capenable ^= (mask & IFCAP_TSO); 3894 if (ifp->if_capenable & IFCAP_TSO) 3895 ifp->if_hwassist |= CSUM_TSO; 3896 else 3897 ifp->if_hwassist &= ~CSUM_TSO; 3898 } 3899 if (mask & IFCAP_RSS) 3900 ifp->if_capenable ^= IFCAP_RSS; 3901 break; 3902 default: 3903 error = ether_ioctl(ifp, command, data); 3904 break; 3905 } 3906 return error; 3907 } 3908 3909 static void 3910 bnx_watchdog(struct ifaltq_subque *ifsq) 3911 { 3912 struct ifnet *ifp = ifsq_get_ifp(ifsq); 3913 struct bnx_softc *sc = ifp->if_softc; 3914 int i; 3915 3916 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3917 3918 if_printf(ifp, "watchdog timeout -- resetting\n"); 3919 3920 bnx_init(sc); 3921 3922 IFNET_STAT_INC(ifp, oerrors, 1); 3923 3924 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) 3925 ifsq_devstart_sched(sc->bnx_tx_ring[i].bnx_ifsq); 3926 } 3927 3928 /* 3929 * Stop the adapter and free any mbufs allocated to the 3930 * RX and TX lists. 3931 */ 3932 static void 3933 bnx_stop(struct bnx_softc *sc) 3934 { 3935 struct ifnet *ifp = &sc->arpcom.ac_if; 3936 int i; 3937 3938 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3939 3940 callout_stop(&sc->bnx_tick_timer); 3941 3942 /* Disable host interrupts. */ 3943 bnx_disable_intr(sc); 3944 3945 /* 3946 * Tell firmware we're shutting down. 3947 */ 3948 bnx_sig_pre_reset(sc, BNX_RESET_SHUTDOWN); 3949 3950 /* 3951 * Disable all of the receiver blocks 3952 */ 3953 bnx_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 3954 bnx_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 3955 bnx_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 3956 bnx_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 3957 bnx_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 3958 bnx_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 3959 3960 /* 3961 * Disable all of the transmit blocks 3962 */ 3963 bnx_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 3964 bnx_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 3965 bnx_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 3966 bnx_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 3967 bnx_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 3968 bnx_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 3969 3970 /* 3971 * Shut down all of the memory managers and related 3972 * state machines. 3973 */ 3974 bnx_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 3975 bnx_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 3976 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 3977 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 3978 3979 bnx_reset(sc); 3980 bnx_sig_post_reset(sc, BNX_RESET_SHUTDOWN); 3981 3982 /* 3983 * Tell firmware we're shutting down. 3984 */ 3985 BNX_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3986 3987 /* Free the RX lists. */ 3988 bnx_free_rx_ring_std(&sc->bnx_rx_std_ring); 3989 3990 /* Free jumbo RX list. */ 3991 if (BNX_IS_JUMBO_CAPABLE(sc)) 3992 bnx_free_rx_ring_jumbo(sc); 3993 3994 /* Free TX buffers. */ 3995 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 3996 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i]; 3997 3998 txr->bnx_saved_status_tag = 0; 3999 bnx_free_tx_ring(txr); 4000 } 4001 4002 /* Clear saved status tag */ 4003 for (i = 0; i < sc->bnx_rx_retcnt; ++i) 4004 sc->bnx_rx_ret_ring[i].bnx_saved_status_tag = 0; 4005 4006 sc->bnx_link = 0; 4007 sc->bnx_coal_chg = 0; 4008 4009 ifp->if_flags &= ~IFF_RUNNING; 4010 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 4011 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i]; 4012 4013 ifsq_clr_oactive(txr->bnx_ifsq); 4014 ifsq_watchdog_stop(&txr->bnx_tx_watchdog); 4015 } 4016 } 4017 4018 /* 4019 * Stop all chip I/O so that the kernel's probe routines don't 4020 * get confused by errant DMAs when rebooting. 4021 */ 4022 static void 4023 bnx_shutdown(device_t dev) 4024 { 4025 struct bnx_softc *sc = device_get_softc(dev); 4026 struct ifnet *ifp = &sc->arpcom.ac_if; 4027 4028 ifnet_serialize_all(ifp); 4029 bnx_stop(sc); 4030 ifnet_deserialize_all(ifp); 4031 } 4032 4033 static int 4034 bnx_suspend(device_t dev) 4035 { 4036 struct bnx_softc *sc = device_get_softc(dev); 4037 struct ifnet *ifp = &sc->arpcom.ac_if; 4038 4039 ifnet_serialize_all(ifp); 4040 bnx_stop(sc); 4041 ifnet_deserialize_all(ifp); 4042 4043 return 0; 4044 } 4045 4046 static int 4047 bnx_resume(device_t dev) 4048 { 4049 struct bnx_softc *sc = device_get_softc(dev); 4050 struct ifnet *ifp = &sc->arpcom.ac_if; 4051 4052 ifnet_serialize_all(ifp); 4053 4054 if (ifp->if_flags & IFF_UP) { 4055 int i; 4056 4057 bnx_init(sc); 4058 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) 4059 ifsq_devstart_sched(sc->bnx_tx_ring[i].bnx_ifsq); 4060 } 4061 4062 ifnet_deserialize_all(ifp); 4063 4064 return 0; 4065 } 4066 4067 static void 4068 bnx_setpromisc(struct bnx_softc *sc) 4069 { 4070 struct ifnet *ifp = &sc->arpcom.ac_if; 4071 4072 if (ifp->if_flags & IFF_PROMISC) 4073 BNX_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 4074 else 4075 BNX_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 4076 } 4077 4078 static void 4079 bnx_dma_free(struct bnx_softc *sc) 4080 { 4081 struct bnx_rx_std_ring *std = &sc->bnx_rx_std_ring; 4082 int i; 4083 4084 /* Destroy RX return rings */ 4085 if (sc->bnx_rx_ret_ring != NULL) { 4086 for (i = 0; i < sc->bnx_rx_retcnt; ++i) 4087 bnx_destroy_rx_ret_ring(&sc->bnx_rx_ret_ring[i]); 4088 kfree(sc->bnx_rx_ret_ring, M_DEVBUF); 4089 } 4090 4091 /* Destroy RX mbuf DMA stuffs. */ 4092 if (std->bnx_rx_mtag != NULL) { 4093 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 4094 KKASSERT(std->bnx_rx_std_buf[i].bnx_rx_mbuf == NULL); 4095 bus_dmamap_destroy(std->bnx_rx_mtag, 4096 std->bnx_rx_std_buf[i].bnx_rx_dmamap); 4097 } 4098 bus_dma_tag_destroy(std->bnx_rx_mtag); 4099 } 4100 4101 /* Destroy standard RX ring */ 4102 bnx_dma_block_free(std->bnx_rx_std_ring_tag, 4103 std->bnx_rx_std_ring_map, std->bnx_rx_std_ring); 4104 4105 /* Destroy TX rings */ 4106 if (sc->bnx_tx_ring != NULL) { 4107 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) 4108 bnx_destroy_tx_ring(&sc->bnx_tx_ring[i]); 4109 kfree(sc->bnx_tx_ring, M_DEVBUF); 4110 } 4111 4112 if (BNX_IS_JUMBO_CAPABLE(sc)) 4113 bnx_free_jumbo_mem(sc); 4114 4115 /* Destroy status blocks */ 4116 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 4117 struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; 4118 4119 bnx_dma_block_free(intr->bnx_status_tag, 4120 intr->bnx_status_map, intr->bnx_status_block); 4121 } 4122 4123 /* Destroy the parent tag */ 4124 if (sc->bnx_cdata.bnx_parent_tag != NULL) 4125 bus_dma_tag_destroy(sc->bnx_cdata.bnx_parent_tag); 4126 } 4127 4128 static int 4129 bnx_dma_alloc(device_t dev) 4130 { 4131 struct bnx_softc *sc = device_get_softc(dev); 4132 struct bnx_rx_std_ring *std = &sc->bnx_rx_std_ring; 4133 int i, error, mbx; 4134 4135 /* 4136 * Allocate the parent bus DMA tag appropriate for PCI. 4137 * 4138 * All of the NetExtreme/NetLink controllers have 4GB boundary 4139 * DMA bug. 4140 * Whenever an address crosses a multiple of the 4GB boundary 4141 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition 4142 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA 4143 * state machine will lockup and cause the device to hang. 4144 */ 4145 error = bus_dma_tag_create(NULL, 1, BGE_DMA_BOUNDARY_4G, 4146 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 4147 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 4148 0, &sc->bnx_cdata.bnx_parent_tag); 4149 if (error) { 4150 device_printf(dev, "could not create parent DMA tag\n"); 4151 return error; 4152 } 4153 4154 /* 4155 * Create DMA stuffs for status blocks. 4156 */ 4157 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 4158 struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; 4159 4160 error = bnx_dma_block_alloc(sc, 4161 __VM_CACHELINE_ALIGN(BGE_STATUS_BLK_SZ), 4162 &intr->bnx_status_tag, &intr->bnx_status_map, 4163 (void *)&intr->bnx_status_block, 4164 &intr->bnx_status_block_paddr); 4165 if (error) { 4166 device_printf(dev, 4167 "could not create %dth status block\n", i); 4168 return error; 4169 } 4170 } 4171 sc->bnx_hw_status = &sc->bnx_intr_data[0].bnx_status_block->bge_status; 4172 if (sc->bnx_flags & BNX_FLAG_STATUS_HASTAG) { 4173 sc->bnx_hw_status_tag = 4174 &sc->bnx_intr_data[0].bnx_status_block->bge_status_tag; 4175 } 4176 4177 /* 4178 * Create DMA tag and maps for RX mbufs. 4179 */ 4180 std->bnx_sc = sc; 4181 lwkt_serialize_init(&std->bnx_rx_std_serialize); 4182 error = bus_dma_tag_create(sc->bnx_cdata.bnx_parent_tag, 1, 0, 4183 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 4184 NULL, NULL, MCLBYTES, 1, MCLBYTES, 4185 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK, &std->bnx_rx_mtag); 4186 if (error) { 4187 device_printf(dev, "could not create RX mbuf DMA tag\n"); 4188 return error; 4189 } 4190 4191 for (i = 0; i < BGE_STD_RX_RING_CNT; ++i) { 4192 error = bus_dmamap_create(std->bnx_rx_mtag, BUS_DMA_WAITOK, 4193 &std->bnx_rx_std_buf[i].bnx_rx_dmamap); 4194 if (error) { 4195 int j; 4196 4197 for (j = 0; j < i; ++j) { 4198 bus_dmamap_destroy(std->bnx_rx_mtag, 4199 std->bnx_rx_std_buf[j].bnx_rx_dmamap); 4200 } 4201 bus_dma_tag_destroy(std->bnx_rx_mtag); 4202 std->bnx_rx_mtag = NULL; 4203 4204 device_printf(dev, 4205 "could not create %dth RX mbuf DMA map\n", i); 4206 return error; 4207 } 4208 } 4209 4210 /* 4211 * Create DMA stuffs for standard RX ring. 4212 */ 4213 error = bnx_dma_block_alloc(sc, BGE_STD_RX_RING_SZ, 4214 &std->bnx_rx_std_ring_tag, 4215 &std->bnx_rx_std_ring_map, 4216 (void *)&std->bnx_rx_std_ring, 4217 &std->bnx_rx_std_ring_paddr); 4218 if (error) { 4219 device_printf(dev, "could not create std RX ring\n"); 4220 return error; 4221 } 4222 4223 /* 4224 * Create RX return rings 4225 */ 4226 mbx = BGE_MBX_RX_CONS0_LO; 4227 sc->bnx_rx_ret_ring = kmalloc_cachealign( 4228 sizeof(struct bnx_rx_ret_ring) * sc->bnx_rx_retcnt, M_DEVBUF, 4229 M_WAITOK | M_ZERO); 4230 for (i = 0; i < sc->bnx_rx_retcnt; ++i) { 4231 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[i]; 4232 struct bnx_intr_data *intr; 4233 4234 ret->bnx_sc = sc; 4235 ret->bnx_std = std; 4236 ret->bnx_rx_mbx = mbx; 4237 ret->bnx_rx_cntmax = (BGE_STD_RX_RING_CNT / 4) / 4238 sc->bnx_rx_retcnt; 4239 ret->bnx_rx_mask = 1 << i; 4240 4241 if (!BNX_RSS_ENABLED(sc)) { 4242 intr = &sc->bnx_intr_data[0]; 4243 } else { 4244 KKASSERT(i + 1 < sc->bnx_intr_cnt); 4245 intr = &sc->bnx_intr_data[i + 1]; 4246 } 4247 4248 if (i == 0) { 4249 ret->bnx_rx_considx = 4250 &intr->bnx_status_block->bge_idx[0].bge_rx_prod_idx; 4251 } else if (i == 1) { 4252 ret->bnx_rx_considx = 4253 &intr->bnx_status_block->bge_rx_jumbo_cons_idx; 4254 } else if (i == 2) { 4255 ret->bnx_rx_considx = 4256 &intr->bnx_status_block->bge_rsvd1; 4257 } else if (i == 3) { 4258 ret->bnx_rx_considx = 4259 &intr->bnx_status_block->bge_rx_mini_cons_idx; 4260 } else { 4261 panic("unknown RX return ring %d\n", i); 4262 } 4263 ret->bnx_hw_status_tag = 4264 &intr->bnx_status_block->bge_status_tag; 4265 4266 error = bnx_create_rx_ret_ring(ret); 4267 if (error) { 4268 device_printf(dev, 4269 "could not create %dth RX ret ring\n", i); 4270 return error; 4271 } 4272 mbx += 8; 4273 } 4274 4275 /* 4276 * Create TX rings 4277 */ 4278 sc->bnx_tx_ring = kmalloc_cachealign( 4279 sizeof(struct bnx_tx_ring) * sc->bnx_tx_ringcnt, M_DEVBUF, 4280 M_WAITOK | M_ZERO); 4281 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 4282 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i]; 4283 struct bnx_intr_data *intr; 4284 4285 txr->bnx_sc = sc; 4286 txr->bnx_tx_mbx = bnx_tx_mailbox[i]; 4287 4288 if (sc->bnx_tx_ringcnt == 1) { 4289 intr = &sc->bnx_intr_data[0]; 4290 } else { 4291 KKASSERT(i + 1 < sc->bnx_intr_cnt); 4292 intr = &sc->bnx_intr_data[i + 1]; 4293 } 4294 4295 if ((sc->bnx_flags & BNX_FLAG_RXTX_BUNDLE) == 0) { 4296 txr->bnx_hw_status_tag = 4297 &intr->bnx_status_block->bge_status_tag; 4298 } 4299 txr->bnx_tx_considx = 4300 &intr->bnx_status_block->bge_idx[0].bge_tx_cons_idx; 4301 4302 error = bnx_create_tx_ring(txr); 4303 if (error) { 4304 device_printf(dev, 4305 "could not create %dth TX ring\n", i); 4306 return error; 4307 } 4308 } 4309 4310 /* 4311 * Create jumbo buffer pool. 4312 */ 4313 if (BNX_IS_JUMBO_CAPABLE(sc)) { 4314 error = bnx_alloc_jumbo_mem(sc); 4315 if (error) { 4316 device_printf(dev, 4317 "could not create jumbo buffer pool\n"); 4318 return error; 4319 } 4320 } 4321 4322 return 0; 4323 } 4324 4325 static int 4326 bnx_dma_block_alloc(struct bnx_softc *sc, bus_size_t size, bus_dma_tag_t *tag, 4327 bus_dmamap_t *map, void **addr, bus_addr_t *paddr) 4328 { 4329 bus_dmamem_t dmem; 4330 int error; 4331 4332 error = bus_dmamem_coherent(sc->bnx_cdata.bnx_parent_tag, PAGE_SIZE, 0, 4333 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 4334 size, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 4335 if (error) 4336 return error; 4337 4338 *tag = dmem.dmem_tag; 4339 *map = dmem.dmem_map; 4340 *addr = dmem.dmem_addr; 4341 *paddr = dmem.dmem_busaddr; 4342 4343 return 0; 4344 } 4345 4346 static void 4347 bnx_dma_block_free(bus_dma_tag_t tag, bus_dmamap_t map, void *addr) 4348 { 4349 if (tag != NULL) { 4350 bus_dmamap_unload(tag, map); 4351 bus_dmamem_free(tag, addr, map); 4352 bus_dma_tag_destroy(tag); 4353 } 4354 } 4355 4356 static void 4357 bnx_tbi_link_upd(struct bnx_softc *sc, uint32_t status) 4358 { 4359 struct ifnet *ifp = &sc->arpcom.ac_if; 4360 4361 #define PCS_ENCODE_ERR (BGE_MACSTAT_PORT_DECODE_ERROR|BGE_MACSTAT_MI_COMPLETE) 4362 4363 /* 4364 * Sometimes PCS encoding errors are detected in 4365 * TBI mode (on fiber NICs), and for some reason 4366 * the chip will signal them as link changes. 4367 * If we get a link change event, but the 'PCS 4368 * encoding error' bit in the MAC status register 4369 * is set, don't bother doing a link check. 4370 * This avoids spurious "gigabit link up" messages 4371 * that sometimes appear on fiber NICs during 4372 * periods of heavy traffic. 4373 */ 4374 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) { 4375 if (!sc->bnx_link) { 4376 sc->bnx_link++; 4377 if (sc->bnx_asicrev == BGE_ASICREV_BCM5704) { 4378 BNX_CLRBIT(sc, BGE_MAC_MODE, 4379 BGE_MACMODE_TBI_SEND_CFGS); 4380 DELAY(40); 4381 } 4382 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 4383 4384 if (bootverbose) 4385 if_printf(ifp, "link UP\n"); 4386 4387 ifp->if_link_state = LINK_STATE_UP; 4388 if_link_state_change(ifp); 4389 } 4390 } else if ((status & PCS_ENCODE_ERR) != PCS_ENCODE_ERR) { 4391 if (sc->bnx_link) { 4392 sc->bnx_link = 0; 4393 4394 if (bootverbose) 4395 if_printf(ifp, "link DOWN\n"); 4396 4397 ifp->if_link_state = LINK_STATE_DOWN; 4398 if_link_state_change(ifp); 4399 } 4400 } 4401 4402 #undef PCS_ENCODE_ERR 4403 4404 /* Clear the attention. */ 4405 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 4406 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 4407 BGE_MACSTAT_LINK_CHANGED); 4408 } 4409 4410 static void 4411 bnx_copper_link_upd(struct bnx_softc *sc, uint32_t status __unused) 4412 { 4413 struct ifnet *ifp = &sc->arpcom.ac_if; 4414 struct mii_data *mii = device_get_softc(sc->bnx_miibus); 4415 4416 mii_pollstat(mii); 4417 bnx_miibus_statchg(sc->bnx_dev); 4418 4419 if (bootverbose) { 4420 if (sc->bnx_link) 4421 if_printf(ifp, "link UP\n"); 4422 else 4423 if_printf(ifp, "link DOWN\n"); 4424 } 4425 4426 /* Clear the attention. */ 4427 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 4428 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 4429 BGE_MACSTAT_LINK_CHANGED); 4430 } 4431 4432 static void 4433 bnx_autopoll_link_upd(struct bnx_softc *sc, uint32_t status __unused) 4434 { 4435 struct ifnet *ifp = &sc->arpcom.ac_if; 4436 struct mii_data *mii = device_get_softc(sc->bnx_miibus); 4437 4438 mii_pollstat(mii); 4439 4440 if (!sc->bnx_link && 4441 (mii->mii_media_status & IFM_ACTIVE) && 4442 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 4443 sc->bnx_link++; 4444 if (bootverbose) 4445 if_printf(ifp, "link UP\n"); 4446 } else if (sc->bnx_link && 4447 (!(mii->mii_media_status & IFM_ACTIVE) || 4448 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { 4449 sc->bnx_link = 0; 4450 if (bootverbose) 4451 if_printf(ifp, "link DOWN\n"); 4452 } 4453 4454 /* Clear the attention. */ 4455 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 4456 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 4457 BGE_MACSTAT_LINK_CHANGED); 4458 } 4459 4460 static int 4461 bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS) 4462 { 4463 struct bnx_softc *sc = arg1; 4464 4465 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 4466 &sc->bnx_rx_coal_ticks, 4467 BNX_RX_COAL_TICKS_MIN, BNX_RX_COAL_TICKS_MAX, 4468 BNX_RX_COAL_TICKS_CHG); 4469 } 4470 4471 static int 4472 bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS) 4473 { 4474 struct bnx_softc *sc = arg1; 4475 4476 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 4477 &sc->bnx_tx_coal_ticks, 4478 BNX_TX_COAL_TICKS_MIN, BNX_TX_COAL_TICKS_MAX, 4479 BNX_TX_COAL_TICKS_CHG); 4480 } 4481 4482 static int 4483 bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS) 4484 { 4485 struct bnx_softc *sc = arg1; 4486 4487 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 4488 &sc->bnx_rx_coal_bds, 4489 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX, 4490 BNX_RX_COAL_BDS_CHG); 4491 } 4492 4493 static int 4494 bnx_sysctl_rx_coal_bds_poll(SYSCTL_HANDLER_ARGS) 4495 { 4496 struct bnx_softc *sc = arg1; 4497 4498 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 4499 &sc->bnx_rx_coal_bds_poll, 4500 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX, 4501 BNX_RX_COAL_BDS_CHG); 4502 } 4503 4504 static int 4505 bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS) 4506 { 4507 struct bnx_softc *sc = arg1; 4508 4509 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 4510 &sc->bnx_tx_coal_bds, 4511 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX, 4512 BNX_TX_COAL_BDS_CHG); 4513 } 4514 4515 static int 4516 bnx_sysctl_tx_coal_bds_poll(SYSCTL_HANDLER_ARGS) 4517 { 4518 struct bnx_softc *sc = arg1; 4519 4520 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 4521 &sc->bnx_tx_coal_bds_poll, 4522 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX, 4523 BNX_TX_COAL_BDS_CHG); 4524 } 4525 4526 static int 4527 bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS) 4528 { 4529 struct bnx_softc *sc = arg1; 4530 4531 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 4532 &sc->bnx_rx_coal_bds_int, 4533 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX, 4534 BNX_RX_COAL_BDS_INT_CHG); 4535 } 4536 4537 static int 4538 bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS) 4539 { 4540 struct bnx_softc *sc = arg1; 4541 4542 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 4543 &sc->bnx_tx_coal_bds_int, 4544 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX, 4545 BNX_TX_COAL_BDS_INT_CHG); 4546 } 4547 4548 static int 4549 bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *coal, 4550 int coal_min, int coal_max, uint32_t coal_chg_mask) 4551 { 4552 struct bnx_softc *sc = arg1; 4553 struct ifnet *ifp = &sc->arpcom.ac_if; 4554 int error = 0, v; 4555 4556 ifnet_serialize_all(ifp); 4557 4558 v = *coal; 4559 error = sysctl_handle_int(oidp, &v, 0, req); 4560 if (!error && req->newptr != NULL) { 4561 if (v < coal_min || v > coal_max) { 4562 error = EINVAL; 4563 } else { 4564 *coal = v; 4565 sc->bnx_coal_chg |= coal_chg_mask; 4566 4567 /* Commit changes */ 4568 bnx_coal_change(sc); 4569 } 4570 } 4571 4572 ifnet_deserialize_all(ifp); 4573 return error; 4574 } 4575 4576 static void 4577 bnx_coal_change(struct bnx_softc *sc) 4578 { 4579 struct ifnet *ifp = &sc->arpcom.ac_if; 4580 int i; 4581 4582 ASSERT_IFNET_SERIALIZED_ALL(ifp); 4583 4584 if (sc->bnx_coal_chg & BNX_RX_COAL_TICKS_CHG) { 4585 if (sc->bnx_rx_retcnt == 1) { 4586 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, 4587 sc->bnx_rx_coal_ticks); 4588 i = 0; 4589 } else { 4590 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, 0); 4591 for (i = 0; i < sc->bnx_rx_retcnt; ++i) { 4592 CSR_WRITE_4(sc, BGE_VEC1_RX_COAL_TICKS + 4593 (i * BGE_VEC_COALSET_SIZE), 4594 sc->bnx_rx_coal_ticks); 4595 } 4596 } 4597 for (; i < BNX_INTR_MAX - 1; ++i) { 4598 CSR_WRITE_4(sc, BGE_VEC1_RX_COAL_TICKS + 4599 (i * BGE_VEC_COALSET_SIZE), 0); 4600 } 4601 if (bootverbose) { 4602 if_printf(ifp, "rx_coal_ticks -> %u\n", 4603 sc->bnx_rx_coal_ticks); 4604 } 4605 } 4606 4607 if (sc->bnx_coal_chg & BNX_TX_COAL_TICKS_CHG) { 4608 if (sc->bnx_tx_ringcnt == 1) { 4609 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, 4610 sc->bnx_tx_coal_ticks); 4611 i = 0; 4612 } else { 4613 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, 0); 4614 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 4615 CSR_WRITE_4(sc, BGE_VEC1_TX_COAL_TICKS + 4616 (i * BGE_VEC_COALSET_SIZE), 4617 sc->bnx_tx_coal_ticks); 4618 } 4619 } 4620 for (; i < BNX_INTR_MAX - 1; ++i) { 4621 CSR_WRITE_4(sc, BGE_VEC1_TX_COAL_TICKS + 4622 (i * BGE_VEC_COALSET_SIZE), 0); 4623 } 4624 if (bootverbose) { 4625 if_printf(ifp, "tx_coal_ticks -> %u\n", 4626 sc->bnx_tx_coal_ticks); 4627 } 4628 } 4629 4630 if (sc->bnx_coal_chg & BNX_RX_COAL_BDS_CHG) { 4631 uint32_t rx_coal_bds; 4632 4633 if (ifp->if_flags & IFF_NPOLLING) 4634 rx_coal_bds = sc->bnx_rx_coal_bds_poll; 4635 else 4636 rx_coal_bds = sc->bnx_rx_coal_bds; 4637 4638 if (sc->bnx_rx_retcnt == 1) { 4639 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_coal_bds); 4640 i = 0; 4641 } else { 4642 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, 0); 4643 for (i = 0; i < sc->bnx_rx_retcnt; ++i) { 4644 CSR_WRITE_4(sc, BGE_VEC1_RX_MAX_COAL_BDS + 4645 (i * BGE_VEC_COALSET_SIZE), rx_coal_bds); 4646 } 4647 } 4648 for (; i < BNX_INTR_MAX - 1; ++i) { 4649 CSR_WRITE_4(sc, BGE_VEC1_RX_MAX_COAL_BDS + 4650 (i * BGE_VEC_COALSET_SIZE), 0); 4651 } 4652 if (bootverbose) { 4653 if_printf(ifp, "%srx_coal_bds -> %u\n", 4654 (ifp->if_flags & IFF_NPOLLING) ? "polling " : "", 4655 rx_coal_bds); 4656 } 4657 } 4658 4659 if (sc->bnx_coal_chg & BNX_TX_COAL_BDS_CHG) { 4660 uint32_t tx_coal_bds; 4661 4662 if (ifp->if_flags & IFF_NPOLLING) 4663 tx_coal_bds = sc->bnx_tx_coal_bds_poll; 4664 else 4665 tx_coal_bds = sc->bnx_tx_coal_bds; 4666 4667 if (sc->bnx_tx_ringcnt == 1) { 4668 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, tx_coal_bds); 4669 i = 0; 4670 } else { 4671 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, 0); 4672 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 4673 CSR_WRITE_4(sc, BGE_VEC1_TX_MAX_COAL_BDS + 4674 (i * BGE_VEC_COALSET_SIZE), tx_coal_bds); 4675 } 4676 } 4677 for (; i < BNX_INTR_MAX - 1; ++i) { 4678 CSR_WRITE_4(sc, BGE_VEC1_TX_MAX_COAL_BDS + 4679 (i * BGE_VEC_COALSET_SIZE), 0); 4680 } 4681 if (bootverbose) { 4682 if_printf(ifp, "%stx_coal_bds -> %u\n", 4683 (ifp->if_flags & IFF_NPOLLING) ? "polling " : "", 4684 tx_coal_bds); 4685 } 4686 } 4687 4688 if (sc->bnx_coal_chg & BNX_RX_COAL_BDS_INT_CHG) { 4689 if (sc->bnx_rx_retcnt == 1) { 4690 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 4691 sc->bnx_rx_coal_bds_int); 4692 i = 0; 4693 } else { 4694 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 4695 for (i = 0; i < sc->bnx_rx_retcnt; ++i) { 4696 CSR_WRITE_4(sc, BGE_VEC1_RX_MAX_COAL_BDS_INT + 4697 (i * BGE_VEC_COALSET_SIZE), 4698 sc->bnx_rx_coal_bds_int); 4699 } 4700 } 4701 for (; i < BNX_INTR_MAX - 1; ++i) { 4702 CSR_WRITE_4(sc, BGE_VEC1_RX_MAX_COAL_BDS_INT + 4703 (i * BGE_VEC_COALSET_SIZE), 0); 4704 } 4705 if (bootverbose) { 4706 if_printf(ifp, "rx_coal_bds_int -> %u\n", 4707 sc->bnx_rx_coal_bds_int); 4708 } 4709 } 4710 4711 if (sc->bnx_coal_chg & BNX_TX_COAL_BDS_INT_CHG) { 4712 if (sc->bnx_tx_ringcnt == 1) { 4713 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 4714 sc->bnx_tx_coal_bds_int); 4715 i = 0; 4716 } else { 4717 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 4718 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 4719 CSR_WRITE_4(sc, BGE_VEC1_TX_MAX_COAL_BDS_INT + 4720 (i * BGE_VEC_COALSET_SIZE), 4721 sc->bnx_tx_coal_bds_int); 4722 } 4723 } 4724 for (; i < BNX_INTR_MAX - 1; ++i) { 4725 CSR_WRITE_4(sc, BGE_VEC1_TX_MAX_COAL_BDS_INT + 4726 (i * BGE_VEC_COALSET_SIZE), 0); 4727 } 4728 if (bootverbose) { 4729 if_printf(ifp, "tx_coal_bds_int -> %u\n", 4730 sc->bnx_tx_coal_bds_int); 4731 } 4732 } 4733 4734 sc->bnx_coal_chg = 0; 4735 } 4736 4737 static void 4738 bnx_check_intr_rxtx(void *xintr) 4739 { 4740 struct bnx_intr_data *intr = xintr; 4741 struct bnx_rx_ret_ring *ret; 4742 struct bnx_tx_ring *txr; 4743 struct ifnet *ifp; 4744 4745 lwkt_serialize_enter(intr->bnx_intr_serialize); 4746 4747 KKASSERT(mycpuid == intr->bnx_intr_cpuid); 4748 4749 ifp = &intr->bnx_sc->arpcom.ac_if; 4750 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) { 4751 lwkt_serialize_exit(intr->bnx_intr_serialize); 4752 return; 4753 } 4754 4755 txr = intr->bnx_txr; 4756 ret = intr->bnx_ret; 4757 4758 if (*ret->bnx_rx_considx != ret->bnx_rx_saved_considx || 4759 *txr->bnx_tx_considx != txr->bnx_tx_saved_considx) { 4760 if (intr->bnx_rx_check_considx == ret->bnx_rx_saved_considx && 4761 intr->bnx_tx_check_considx == txr->bnx_tx_saved_considx) { 4762 if (!intr->bnx_intr_maylose) { 4763 intr->bnx_intr_maylose = TRUE; 4764 goto done; 4765 } 4766 if (bootverbose) 4767 if_printf(ifp, "lost interrupt\n"); 4768 intr->bnx_intr_func(intr->bnx_intr_arg); 4769 } 4770 } 4771 intr->bnx_intr_maylose = FALSE; 4772 intr->bnx_rx_check_considx = ret->bnx_rx_saved_considx; 4773 intr->bnx_tx_check_considx = txr->bnx_tx_saved_considx; 4774 4775 done: 4776 callout_reset(&intr->bnx_intr_timer, BNX_INTR_CKINTVL, 4777 intr->bnx_intr_check, intr); 4778 lwkt_serialize_exit(intr->bnx_intr_serialize); 4779 } 4780 4781 static void 4782 bnx_check_intr_tx(void *xintr) 4783 { 4784 struct bnx_intr_data *intr = xintr; 4785 struct bnx_tx_ring *txr; 4786 struct ifnet *ifp; 4787 4788 lwkt_serialize_enter(intr->bnx_intr_serialize); 4789 4790 KKASSERT(mycpuid == intr->bnx_intr_cpuid); 4791 4792 ifp = &intr->bnx_sc->arpcom.ac_if; 4793 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) { 4794 lwkt_serialize_exit(intr->bnx_intr_serialize); 4795 return; 4796 } 4797 4798 txr = intr->bnx_txr; 4799 4800 if (*txr->bnx_tx_considx != txr->bnx_tx_saved_considx) { 4801 if (intr->bnx_tx_check_considx == txr->bnx_tx_saved_considx) { 4802 if (!intr->bnx_intr_maylose) { 4803 intr->bnx_intr_maylose = TRUE; 4804 goto done; 4805 } 4806 if (bootverbose) 4807 if_printf(ifp, "lost interrupt\n"); 4808 intr->bnx_intr_func(intr->bnx_intr_arg); 4809 } 4810 } 4811 intr->bnx_intr_maylose = FALSE; 4812 intr->bnx_tx_check_considx = txr->bnx_tx_saved_considx; 4813 4814 done: 4815 callout_reset(&intr->bnx_intr_timer, BNX_INTR_CKINTVL, 4816 intr->bnx_intr_check, intr); 4817 lwkt_serialize_exit(intr->bnx_intr_serialize); 4818 } 4819 4820 static void 4821 bnx_check_intr_rx(void *xintr) 4822 { 4823 struct bnx_intr_data *intr = xintr; 4824 struct bnx_rx_ret_ring *ret; 4825 struct ifnet *ifp; 4826 4827 lwkt_serialize_enter(intr->bnx_intr_serialize); 4828 4829 KKASSERT(mycpuid == intr->bnx_intr_cpuid); 4830 4831 ifp = &intr->bnx_sc->arpcom.ac_if; 4832 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) { 4833 lwkt_serialize_exit(intr->bnx_intr_serialize); 4834 return; 4835 } 4836 4837 ret = intr->bnx_ret; 4838 4839 if (*ret->bnx_rx_considx != ret->bnx_rx_saved_considx) { 4840 if (intr->bnx_rx_check_considx == ret->bnx_rx_saved_considx) { 4841 if (!intr->bnx_intr_maylose) { 4842 intr->bnx_intr_maylose = TRUE; 4843 goto done; 4844 } 4845 if (bootverbose) 4846 if_printf(ifp, "lost interrupt\n"); 4847 intr->bnx_intr_func(intr->bnx_intr_arg); 4848 } 4849 } 4850 intr->bnx_intr_maylose = FALSE; 4851 intr->bnx_rx_check_considx = ret->bnx_rx_saved_considx; 4852 4853 done: 4854 callout_reset(&intr->bnx_intr_timer, BNX_INTR_CKINTVL, 4855 intr->bnx_intr_check, intr); 4856 lwkt_serialize_exit(intr->bnx_intr_serialize); 4857 } 4858 4859 static void 4860 bnx_enable_intr(struct bnx_softc *sc) 4861 { 4862 struct ifnet *ifp = &sc->arpcom.ac_if; 4863 int i; 4864 4865 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 4866 lwkt_serialize_handler_enable( 4867 sc->bnx_intr_data[i].bnx_intr_serialize); 4868 } 4869 4870 /* 4871 * Enable interrupt. 4872 */ 4873 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 4874 struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; 4875 4876 bnx_writembx(sc, intr->bnx_intr_mbx, 4877 (*intr->bnx_saved_status_tag) << 24); 4878 /* XXX Linux driver */ 4879 bnx_writembx(sc, intr->bnx_intr_mbx, 4880 (*intr->bnx_saved_status_tag) << 24); 4881 } 4882 4883 /* 4884 * Unmask the interrupt when we stop polling. 4885 */ 4886 PCI_CLRBIT(sc->bnx_dev, BGE_PCI_MISC_CTL, 4887 BGE_PCIMISCCTL_MASK_PCI_INTR, 4); 4888 4889 /* 4890 * Trigger another interrupt, since above writing 4891 * to interrupt mailbox0 may acknowledge pending 4892 * interrupt. 4893 */ 4894 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 4895 4896 if (sc->bnx_flags & BNX_FLAG_STATUSTAG_BUG) { 4897 if (bootverbose) 4898 if_printf(ifp, "status tag bug workaround\n"); 4899 4900 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 4901 struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; 4902 4903 if (intr->bnx_intr_check == NULL) 4904 continue; 4905 intr->bnx_intr_maylose = FALSE; 4906 intr->bnx_rx_check_considx = 0; 4907 intr->bnx_tx_check_considx = 0; 4908 callout_reset_bycpu(&intr->bnx_intr_timer, 4909 BNX_INTR_CKINTVL, intr->bnx_intr_check, intr, 4910 intr->bnx_intr_cpuid); 4911 } 4912 } 4913 } 4914 4915 static void 4916 bnx_disable_intr(struct bnx_softc *sc) 4917 { 4918 int i; 4919 4920 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 4921 struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; 4922 4923 callout_stop(&intr->bnx_intr_timer); 4924 intr->bnx_intr_maylose = FALSE; 4925 intr->bnx_rx_check_considx = 0; 4926 intr->bnx_tx_check_considx = 0; 4927 } 4928 4929 /* 4930 * Mask the interrupt when we start polling. 4931 */ 4932 PCI_SETBIT(sc->bnx_dev, BGE_PCI_MISC_CTL, 4933 BGE_PCIMISCCTL_MASK_PCI_INTR, 4); 4934 4935 /* 4936 * Acknowledge possible asserted interrupt. 4937 */ 4938 for (i = 0; i < BNX_INTR_MAX; ++i) 4939 bnx_writembx(sc, sc->bnx_intr_data[i].bnx_intr_mbx, 1); 4940 4941 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 4942 lwkt_serialize_handler_disable( 4943 sc->bnx_intr_data[i].bnx_intr_serialize); 4944 } 4945 } 4946 4947 static int 4948 bnx_get_eaddr_mem(struct bnx_softc *sc, uint8_t ether_addr[]) 4949 { 4950 uint32_t mac_addr; 4951 int ret = 1; 4952 4953 mac_addr = bnx_readmem_ind(sc, 0x0c14); 4954 if ((mac_addr >> 16) == 0x484b) { 4955 ether_addr[0] = (uint8_t)(mac_addr >> 8); 4956 ether_addr[1] = (uint8_t)mac_addr; 4957 mac_addr = bnx_readmem_ind(sc, 0x0c18); 4958 ether_addr[2] = (uint8_t)(mac_addr >> 24); 4959 ether_addr[3] = (uint8_t)(mac_addr >> 16); 4960 ether_addr[4] = (uint8_t)(mac_addr >> 8); 4961 ether_addr[5] = (uint8_t)mac_addr; 4962 ret = 0; 4963 } 4964 return ret; 4965 } 4966 4967 static int 4968 bnx_get_eaddr_nvram(struct bnx_softc *sc, uint8_t ether_addr[]) 4969 { 4970 int mac_offset = BGE_EE_MAC_OFFSET; 4971 4972 if (BNX_IS_5717_PLUS(sc)) { 4973 int f; 4974 4975 f = pci_get_function(sc->bnx_dev); 4976 if (f & 1) 4977 mac_offset = BGE_EE_MAC_OFFSET_5717; 4978 if (f > 1) 4979 mac_offset += BGE_EE_MAC_OFFSET_5717_OFF; 4980 } 4981 4982 return bnx_read_nvram(sc, ether_addr, mac_offset + 2, ETHER_ADDR_LEN); 4983 } 4984 4985 static int 4986 bnx_get_eaddr_eeprom(struct bnx_softc *sc, uint8_t ether_addr[]) 4987 { 4988 if (sc->bnx_flags & BNX_FLAG_NO_EEPROM) 4989 return 1; 4990 4991 return bnx_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2, 4992 ETHER_ADDR_LEN); 4993 } 4994 4995 static int 4996 bnx_get_eaddr(struct bnx_softc *sc, uint8_t eaddr[]) 4997 { 4998 static const bnx_eaddr_fcn_t bnx_eaddr_funcs[] = { 4999 /* NOTE: Order is critical */ 5000 bnx_get_eaddr_mem, 5001 bnx_get_eaddr_nvram, 5002 bnx_get_eaddr_eeprom, 5003 NULL 5004 }; 5005 const bnx_eaddr_fcn_t *func; 5006 5007 for (func = bnx_eaddr_funcs; *func != NULL; ++func) { 5008 if ((*func)(sc, eaddr) == 0) 5009 break; 5010 } 5011 return (*func == NULL ? ENXIO : 0); 5012 } 5013 5014 /* 5015 * NOTE: 'm' is not freed upon failure 5016 */ 5017 struct mbuf * 5018 bnx_defrag_shortdma(struct mbuf *m) 5019 { 5020 struct mbuf *n; 5021 int found; 5022 5023 /* 5024 * If device receive two back-to-back send BDs with less than 5025 * or equal to 8 total bytes then the device may hang. The two 5026 * back-to-back send BDs must in the same frame for this failure 5027 * to occur. Scan mbuf chains and see whether two back-to-back 5028 * send BDs are there. If this is the case, allocate new mbuf 5029 * and copy the frame to workaround the silicon bug. 5030 */ 5031 for (n = m, found = 0; n != NULL; n = n->m_next) { 5032 if (n->m_len < 8) { 5033 found++; 5034 if (found > 1) 5035 break; 5036 continue; 5037 } 5038 found = 0; 5039 } 5040 5041 if (found > 1) 5042 n = m_defrag(m, M_NOWAIT); 5043 else 5044 n = m; 5045 return n; 5046 } 5047 5048 static void 5049 bnx_stop_block(struct bnx_softc *sc, bus_size_t reg, uint32_t bit) 5050 { 5051 int i; 5052 5053 BNX_CLRBIT(sc, reg, bit); 5054 for (i = 0; i < BNX_TIMEOUT; i++) { 5055 if ((CSR_READ_4(sc, reg) & bit) == 0) 5056 return; 5057 DELAY(100); 5058 } 5059 } 5060 5061 static void 5062 bnx_link_poll(struct bnx_softc *sc) 5063 { 5064 uint32_t status; 5065 5066 status = CSR_READ_4(sc, BGE_MAC_STS); 5067 if ((status & sc->bnx_link_chg) || sc->bnx_link_evt) { 5068 sc->bnx_link_evt = 0; 5069 sc->bnx_link_upd(sc, status); 5070 } 5071 } 5072 5073 static void 5074 bnx_enable_msi(struct bnx_softc *sc, boolean_t is_msix) 5075 { 5076 uint32_t msi_mode; 5077 5078 msi_mode = CSR_READ_4(sc, BGE_MSI_MODE); 5079 msi_mode |= BGE_MSIMODE_ENABLE; 5080 /* 5081 * NOTE: 5082 * 5718-PG105-R says that "one shot" mode does not work 5083 * if MSI is used, however, it obviously works. 5084 */ 5085 msi_mode &= ~BGE_MSIMODE_ONESHOT_DISABLE; 5086 if (is_msix) 5087 msi_mode |= BGE_MSIMODE_MSIX_MULTIMODE; 5088 else 5089 msi_mode &= ~BGE_MSIMODE_MSIX_MULTIMODE; 5090 CSR_WRITE_4(sc, BGE_MSI_MODE, msi_mode); 5091 } 5092 5093 static uint32_t 5094 bnx_dma_swap_options(struct bnx_softc *sc) 5095 { 5096 uint32_t dma_options; 5097 5098 dma_options = BGE_MODECTL_WORDSWAP_NONFRAME | 5099 BGE_MODECTL_BYTESWAP_DATA | BGE_MODECTL_WORDSWAP_DATA; 5100 #if BYTE_ORDER == BIG_ENDIAN 5101 dma_options |= BGE_MODECTL_BYTESWAP_NONFRAME; 5102 #endif 5103 return dma_options; 5104 } 5105 5106 static int 5107 bnx_setup_tso(struct bnx_tx_ring *txr, struct mbuf **mp, 5108 uint16_t *mss0, uint16_t *flags0) 5109 { 5110 struct mbuf *m; 5111 struct ip *ip; 5112 struct tcphdr *th; 5113 int thoff, iphlen, hoff, hlen; 5114 uint16_t flags, mss; 5115 5116 m = *mp; 5117 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 5118 5119 hoff = m->m_pkthdr.csum_lhlen; 5120 iphlen = m->m_pkthdr.csum_iphlen; 5121 thoff = m->m_pkthdr.csum_thlen; 5122 5123 KASSERT(hoff > 0, ("invalid ether header len")); 5124 KASSERT(iphlen > 0, ("invalid ip header len")); 5125 KASSERT(thoff > 0, ("invalid tcp header len")); 5126 5127 if (__predict_false(m->m_len < hoff + iphlen + thoff)) { 5128 m = m_pullup(m, hoff + iphlen + thoff); 5129 if (m == NULL) { 5130 *mp = NULL; 5131 return ENOBUFS; 5132 } 5133 *mp = m; 5134 } 5135 ip = mtodoff(m, struct ip *, hoff); 5136 th = mtodoff(m, struct tcphdr *, hoff + iphlen); 5137 5138 mss = m->m_pkthdr.tso_segsz; 5139 flags = BGE_TXBDFLAG_CPU_PRE_DMA | BGE_TXBDFLAG_CPU_POST_DMA; 5140 5141 ip->ip_len = htons(mss + iphlen + thoff); 5142 th->th_sum = 0; 5143 5144 hlen = (iphlen + thoff) >> 2; 5145 mss |= ((hlen & 0x3) << 14); 5146 flags |= ((hlen & 0xf8) << 7) | ((hlen & 0x4) << 2); 5147 5148 *mss0 = mss; 5149 *flags0 = flags; 5150 5151 return 0; 5152 } 5153 5154 static int 5155 bnx_create_tx_ring(struct bnx_tx_ring *txr) 5156 { 5157 bus_size_t txmaxsz, txmaxsegsz; 5158 int i, error; 5159 5160 lwkt_serialize_init(&txr->bnx_tx_serialize); 5161 5162 /* 5163 * Create DMA tag and maps for TX mbufs. 5164 */ 5165 if (txr->bnx_sc->bnx_flags & BNX_FLAG_TSO) 5166 txmaxsz = IP_MAXPACKET + sizeof(struct ether_vlan_header); 5167 else 5168 txmaxsz = BNX_JUMBO_FRAMELEN; 5169 if (txr->bnx_sc->bnx_asicrev == BGE_ASICREV_BCM57766) 5170 txmaxsegsz = MCLBYTES; 5171 else 5172 txmaxsegsz = PAGE_SIZE; 5173 error = bus_dma_tag_create(txr->bnx_sc->bnx_cdata.bnx_parent_tag, 5174 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 5175 txmaxsz, BNX_NSEG_NEW, txmaxsegsz, 5176 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 5177 &txr->bnx_tx_mtag); 5178 if (error) { 5179 device_printf(txr->bnx_sc->bnx_dev, 5180 "could not create TX mbuf DMA tag\n"); 5181 return error; 5182 } 5183 5184 for (i = 0; i < BGE_TX_RING_CNT; i++) { 5185 error = bus_dmamap_create(txr->bnx_tx_mtag, 5186 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 5187 &txr->bnx_tx_buf[i].bnx_tx_dmamap); 5188 if (error) { 5189 int j; 5190 5191 for (j = 0; j < i; ++j) { 5192 bus_dmamap_destroy(txr->bnx_tx_mtag, 5193 txr->bnx_tx_buf[j].bnx_tx_dmamap); 5194 } 5195 bus_dma_tag_destroy(txr->bnx_tx_mtag); 5196 txr->bnx_tx_mtag = NULL; 5197 5198 device_printf(txr->bnx_sc->bnx_dev, 5199 "could not create TX mbuf DMA map\n"); 5200 return error; 5201 } 5202 } 5203 5204 /* 5205 * Create DMA stuffs for TX ring. 5206 */ 5207 error = bnx_dma_block_alloc(txr->bnx_sc, BGE_TX_RING_SZ, 5208 &txr->bnx_tx_ring_tag, 5209 &txr->bnx_tx_ring_map, 5210 (void *)&txr->bnx_tx_ring, 5211 &txr->bnx_tx_ring_paddr); 5212 if (error) { 5213 device_printf(txr->bnx_sc->bnx_dev, 5214 "could not create TX ring\n"); 5215 return error; 5216 } 5217 5218 txr->bnx_tx_flags |= BNX_TX_FLAG_SHORTDMA; 5219 txr->bnx_tx_wreg = BNX_TX_WREG_NSEGS; 5220 5221 return 0; 5222 } 5223 5224 static void 5225 bnx_destroy_tx_ring(struct bnx_tx_ring *txr) 5226 { 5227 /* Destroy TX mbuf DMA stuffs. */ 5228 if (txr->bnx_tx_mtag != NULL) { 5229 int i; 5230 5231 for (i = 0; i < BGE_TX_RING_CNT; i++) { 5232 KKASSERT(txr->bnx_tx_buf[i].bnx_tx_mbuf == NULL); 5233 bus_dmamap_destroy(txr->bnx_tx_mtag, 5234 txr->bnx_tx_buf[i].bnx_tx_dmamap); 5235 } 5236 bus_dma_tag_destroy(txr->bnx_tx_mtag); 5237 } 5238 5239 /* Destroy TX ring */ 5240 bnx_dma_block_free(txr->bnx_tx_ring_tag, 5241 txr->bnx_tx_ring_map, txr->bnx_tx_ring); 5242 } 5243 5244 static int 5245 bnx_sysctl_force_defrag(SYSCTL_HANDLER_ARGS) 5246 { 5247 struct bnx_softc *sc = (void *)arg1; 5248 struct ifnet *ifp = &sc->arpcom.ac_if; 5249 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0]; 5250 int error, defrag, i; 5251 5252 if (txr->bnx_tx_flags & BNX_TX_FLAG_FORCE_DEFRAG) 5253 defrag = 1; 5254 else 5255 defrag = 0; 5256 5257 error = sysctl_handle_int(oidp, &defrag, 0, req); 5258 if (error || req->newptr == NULL) 5259 return error; 5260 5261 ifnet_serialize_all(ifp); 5262 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 5263 txr = &sc->bnx_tx_ring[i]; 5264 if (defrag) 5265 txr->bnx_tx_flags |= BNX_TX_FLAG_FORCE_DEFRAG; 5266 else 5267 txr->bnx_tx_flags &= ~BNX_TX_FLAG_FORCE_DEFRAG; 5268 } 5269 ifnet_deserialize_all(ifp); 5270 5271 return 0; 5272 } 5273 5274 static int 5275 bnx_sysctl_tx_wreg(SYSCTL_HANDLER_ARGS) 5276 { 5277 struct bnx_softc *sc = (void *)arg1; 5278 struct ifnet *ifp = &sc->arpcom.ac_if; 5279 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0]; 5280 int error, tx_wreg, i; 5281 5282 tx_wreg = txr->bnx_tx_wreg; 5283 error = sysctl_handle_int(oidp, &tx_wreg, 0, req); 5284 if (error || req->newptr == NULL) 5285 return error; 5286 5287 ifnet_serialize_all(ifp); 5288 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) 5289 sc->bnx_tx_ring[i].bnx_tx_wreg = tx_wreg; 5290 ifnet_deserialize_all(ifp); 5291 5292 return 0; 5293 } 5294 5295 static int 5296 bnx_create_rx_ret_ring(struct bnx_rx_ret_ring *ret) 5297 { 5298 int error; 5299 5300 lwkt_serialize_init(&ret->bnx_rx_ret_serialize); 5301 5302 /* 5303 * Create DMA stuffs for RX return ring. 5304 */ 5305 error = bnx_dma_block_alloc(ret->bnx_sc, 5306 BGE_RX_RTN_RING_SZ(BNX_RETURN_RING_CNT), 5307 &ret->bnx_rx_ret_ring_tag, 5308 &ret->bnx_rx_ret_ring_map, 5309 (void *)&ret->bnx_rx_ret_ring, 5310 &ret->bnx_rx_ret_ring_paddr); 5311 if (error) { 5312 device_printf(ret->bnx_sc->bnx_dev, 5313 "could not create RX ret ring\n"); 5314 return error; 5315 } 5316 5317 /* Shadow standard ring's RX mbuf DMA tag */ 5318 ret->bnx_rx_mtag = ret->bnx_std->bnx_rx_mtag; 5319 5320 /* 5321 * Create tmp DMA map for RX mbufs. 5322 */ 5323 error = bus_dmamap_create(ret->bnx_rx_mtag, BUS_DMA_WAITOK, 5324 &ret->bnx_rx_tmpmap); 5325 if (error) { 5326 device_printf(ret->bnx_sc->bnx_dev, 5327 "could not create tmp RX mbuf DMA map\n"); 5328 ret->bnx_rx_mtag = NULL; 5329 return error; 5330 } 5331 return 0; 5332 } 5333 5334 static void 5335 bnx_destroy_rx_ret_ring(struct bnx_rx_ret_ring *ret) 5336 { 5337 /* Destroy tmp RX mbuf DMA map */ 5338 if (ret->bnx_rx_mtag != NULL) 5339 bus_dmamap_destroy(ret->bnx_rx_mtag, ret->bnx_rx_tmpmap); 5340 5341 /* Destroy RX return ring */ 5342 bnx_dma_block_free(ret->bnx_rx_ret_ring_tag, 5343 ret->bnx_rx_ret_ring_map, ret->bnx_rx_ret_ring); 5344 } 5345 5346 static int 5347 bnx_alloc_intr(struct bnx_softc *sc) 5348 { 5349 struct bnx_intr_data *intr; 5350 u_int intr_flags; 5351 int error; 5352 5353 if (sc->bnx_intr_cnt > 1) { 5354 error = bnx_alloc_msix(sc); 5355 if (error) 5356 return error; 5357 KKASSERT(sc->bnx_intr_type == PCI_INTR_TYPE_MSIX); 5358 return 0; 5359 } 5360 5361 KKASSERT(sc->bnx_intr_cnt == 1); 5362 5363 intr = &sc->bnx_intr_data[0]; 5364 intr->bnx_ret = &sc->bnx_rx_ret_ring[0]; 5365 intr->bnx_txr = &sc->bnx_tx_ring[0]; 5366 intr->bnx_intr_serialize = &sc->bnx_main_serialize; 5367 intr->bnx_intr_check = bnx_check_intr_rxtx; 5368 intr->bnx_saved_status_tag = &intr->bnx_ret->bnx_saved_status_tag; 5369 5370 sc->bnx_intr_type = pci_alloc_1intr(sc->bnx_dev, bnx_msi_enable, 5371 &intr->bnx_intr_rid, &intr_flags); 5372 5373 intr->bnx_intr_res = bus_alloc_resource_any(sc->bnx_dev, SYS_RES_IRQ, 5374 &intr->bnx_intr_rid, intr_flags); 5375 if (intr->bnx_intr_res == NULL) { 5376 device_printf(sc->bnx_dev, "could not alloc interrupt\n"); 5377 return ENXIO; 5378 } 5379 5380 if (sc->bnx_intr_type == PCI_INTR_TYPE_MSI) { 5381 bnx_enable_msi(sc, FALSE); 5382 intr->bnx_intr_func = bnx_msi; 5383 if (bootverbose) 5384 device_printf(sc->bnx_dev, "oneshot MSI\n"); 5385 } else { 5386 intr->bnx_intr_func = bnx_intr_legacy; 5387 } 5388 intr->bnx_intr_arg = sc; 5389 intr->bnx_intr_cpuid = rman_get_cpuid(intr->bnx_intr_res); 5390 5391 intr->bnx_txr->bnx_tx_cpuid = intr->bnx_intr_cpuid; 5392 5393 return 0; 5394 } 5395 5396 static int 5397 bnx_setup_intr(struct bnx_softc *sc) 5398 { 5399 int error, i; 5400 5401 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 5402 struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; 5403 5404 error = bus_setup_intr_descr(sc->bnx_dev, intr->bnx_intr_res, 5405 INTR_MPSAFE, intr->bnx_intr_func, intr->bnx_intr_arg, 5406 &intr->bnx_intr_hand, intr->bnx_intr_serialize, 5407 intr->bnx_intr_desc); 5408 if (error) { 5409 device_printf(sc->bnx_dev, 5410 "could not set up %dth intr\n", i); 5411 bnx_teardown_intr(sc, i); 5412 return error; 5413 } 5414 } 5415 return 0; 5416 } 5417 5418 static void 5419 bnx_teardown_intr(struct bnx_softc *sc, int cnt) 5420 { 5421 int i; 5422 5423 for (i = 0; i < cnt; ++i) { 5424 struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; 5425 5426 bus_teardown_intr(sc->bnx_dev, intr->bnx_intr_res, 5427 intr->bnx_intr_hand); 5428 } 5429 } 5430 5431 static void 5432 bnx_free_intr(struct bnx_softc *sc) 5433 { 5434 if (sc->bnx_intr_type != PCI_INTR_TYPE_MSIX) { 5435 struct bnx_intr_data *intr; 5436 5437 KKASSERT(sc->bnx_intr_cnt <= 1); 5438 intr = &sc->bnx_intr_data[0]; 5439 5440 if (intr->bnx_intr_res != NULL) { 5441 bus_release_resource(sc->bnx_dev, SYS_RES_IRQ, 5442 intr->bnx_intr_rid, intr->bnx_intr_res); 5443 } 5444 if (sc->bnx_intr_type == PCI_INTR_TYPE_MSI) 5445 pci_release_msi(sc->bnx_dev); 5446 } else { 5447 bnx_free_msix(sc, TRUE); 5448 } 5449 } 5450 5451 static void 5452 bnx_setup_serialize(struct bnx_softc *sc) 5453 { 5454 int i, j; 5455 5456 /* 5457 * Allocate serializer array 5458 */ 5459 5460 /* Main + RX STD + TX + RX RET */ 5461 sc->bnx_serialize_cnt = 1 + 1 + sc->bnx_tx_ringcnt + sc->bnx_rx_retcnt; 5462 5463 sc->bnx_serialize = 5464 kmalloc(sc->bnx_serialize_cnt * sizeof(struct lwkt_serialize *), 5465 M_DEVBUF, M_WAITOK | M_ZERO); 5466 5467 /* 5468 * Setup serializers 5469 * 5470 * NOTE: Order is critical 5471 */ 5472 5473 i = 0; 5474 5475 KKASSERT(i < sc->bnx_serialize_cnt); 5476 sc->bnx_serialize[i++] = &sc->bnx_main_serialize; 5477 5478 KKASSERT(i < sc->bnx_serialize_cnt); 5479 sc->bnx_serialize[i++] = &sc->bnx_rx_std_ring.bnx_rx_std_serialize; 5480 5481 for (j = 0; j < sc->bnx_rx_retcnt; ++j) { 5482 KKASSERT(i < sc->bnx_serialize_cnt); 5483 sc->bnx_serialize[i++] = 5484 &sc->bnx_rx_ret_ring[j].bnx_rx_ret_serialize; 5485 } 5486 5487 for (j = 0; j < sc->bnx_tx_ringcnt; ++j) { 5488 KKASSERT(i < sc->bnx_serialize_cnt); 5489 sc->bnx_serialize[i++] = 5490 &sc->bnx_tx_ring[j].bnx_tx_serialize; 5491 } 5492 5493 KKASSERT(i == sc->bnx_serialize_cnt); 5494 } 5495 5496 static void 5497 bnx_serialize(struct ifnet *ifp, enum ifnet_serialize slz) 5498 { 5499 struct bnx_softc *sc = ifp->if_softc; 5500 5501 ifnet_serialize_array_enter(sc->bnx_serialize, 5502 sc->bnx_serialize_cnt, slz); 5503 } 5504 5505 static void 5506 bnx_deserialize(struct ifnet *ifp, enum ifnet_serialize slz) 5507 { 5508 struct bnx_softc *sc = ifp->if_softc; 5509 5510 ifnet_serialize_array_exit(sc->bnx_serialize, 5511 sc->bnx_serialize_cnt, slz); 5512 } 5513 5514 static int 5515 bnx_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz) 5516 { 5517 struct bnx_softc *sc = ifp->if_softc; 5518 5519 return ifnet_serialize_array_try(sc->bnx_serialize, 5520 sc->bnx_serialize_cnt, slz); 5521 } 5522 5523 #ifdef INVARIANTS 5524 5525 static void 5526 bnx_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz, 5527 boolean_t serialized) 5528 { 5529 struct bnx_softc *sc = ifp->if_softc; 5530 5531 ifnet_serialize_array_assert(sc->bnx_serialize, sc->bnx_serialize_cnt, 5532 slz, serialized); 5533 } 5534 5535 #endif /* INVARIANTS */ 5536 5537 #ifdef IFPOLL_ENABLE 5538 5539 static int 5540 bnx_sysctl_npoll_offset(SYSCTL_HANDLER_ARGS) 5541 { 5542 struct bnx_softc *sc = (void *)arg1; 5543 struct ifnet *ifp = &sc->arpcom.ac_if; 5544 int error, off; 5545 5546 off = sc->bnx_npoll_rxoff; 5547 error = sysctl_handle_int(oidp, &off, 0, req); 5548 if (error || req->newptr == NULL) 5549 return error; 5550 if (off < 0) 5551 return EINVAL; 5552 5553 ifnet_serialize_all(ifp); 5554 if (off >= ncpus2 || off % sc->bnx_rx_retcnt != 0) { 5555 error = EINVAL; 5556 } else { 5557 error = 0; 5558 sc->bnx_npoll_txoff = off; 5559 sc->bnx_npoll_rxoff = off; 5560 } 5561 ifnet_deserialize_all(ifp); 5562 5563 return error; 5564 } 5565 5566 static int 5567 bnx_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS) 5568 { 5569 struct bnx_softc *sc = (void *)arg1; 5570 struct ifnet *ifp = &sc->arpcom.ac_if; 5571 int error, off; 5572 5573 off = sc->bnx_npoll_rxoff; 5574 error = sysctl_handle_int(oidp, &off, 0, req); 5575 if (error || req->newptr == NULL) 5576 return error; 5577 if (off < 0) 5578 return EINVAL; 5579 5580 ifnet_serialize_all(ifp); 5581 if (off >= ncpus2 || off % sc->bnx_rx_retcnt != 0) { 5582 error = EINVAL; 5583 } else { 5584 error = 0; 5585 sc->bnx_npoll_rxoff = off; 5586 } 5587 ifnet_deserialize_all(ifp); 5588 5589 return error; 5590 } 5591 5592 static int 5593 bnx_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS) 5594 { 5595 struct bnx_softc *sc = (void *)arg1; 5596 struct ifnet *ifp = &sc->arpcom.ac_if; 5597 int error, off; 5598 5599 off = sc->bnx_npoll_txoff; 5600 error = sysctl_handle_int(oidp, &off, 0, req); 5601 if (error || req->newptr == NULL) 5602 return error; 5603 if (off < 0) 5604 return EINVAL; 5605 5606 ifnet_serialize_all(ifp); 5607 if (off >= ncpus2) { 5608 error = EINVAL; 5609 } else { 5610 error = 0; 5611 sc->bnx_npoll_txoff = off; 5612 } 5613 ifnet_deserialize_all(ifp); 5614 5615 return error; 5616 } 5617 5618 #endif /* IFPOLL_ENABLE */ 5619 5620 static void 5621 bnx_set_tick_cpuid(struct bnx_softc *sc, boolean_t polling) 5622 { 5623 if (polling) 5624 sc->bnx_tick_cpuid = 0; /* XXX */ 5625 else 5626 sc->bnx_tick_cpuid = sc->bnx_intr_data[0].bnx_intr_cpuid; 5627 } 5628 5629 static void 5630 bnx_rx_std_refill_ithread(void *xstd) 5631 { 5632 struct bnx_rx_std_ring *std = xstd; 5633 struct globaldata *gd = mycpu; 5634 5635 crit_enter_gd(gd); 5636 5637 while (!std->bnx_rx_std_stop) { 5638 if (std->bnx_rx_std_refill) { 5639 lwkt_serialize_handler_call( 5640 &std->bnx_rx_std_serialize, 5641 bnx_rx_std_refill, std, NULL); 5642 } 5643 5644 crit_exit_gd(gd); 5645 crit_enter_gd(gd); 5646 5647 atomic_poll_release_int(&std->bnx_rx_std_running); 5648 cpu_mfence(); 5649 5650 if (!std->bnx_rx_std_refill && !std->bnx_rx_std_stop) { 5651 lwkt_deschedule_self(gd->gd_curthread); 5652 lwkt_switch(); 5653 } 5654 } 5655 5656 crit_exit_gd(gd); 5657 5658 wakeup(std); 5659 5660 lwkt_exit(); 5661 } 5662 5663 static void 5664 bnx_rx_std_refill(void *xstd, void *frame __unused) 5665 { 5666 struct bnx_rx_std_ring *std = xstd; 5667 int cnt, refill_mask; 5668 5669 again: 5670 cnt = 0; 5671 5672 cpu_lfence(); 5673 refill_mask = std->bnx_rx_std_refill; 5674 atomic_clear_int(&std->bnx_rx_std_refill, refill_mask); 5675 5676 while (refill_mask) { 5677 uint16_t check_idx = std->bnx_rx_std; 5678 int ret_idx; 5679 5680 ret_idx = bsfl(refill_mask); 5681 for (;;) { 5682 struct bnx_rx_buf *rb; 5683 int refilled; 5684 5685 BNX_INC(check_idx, BGE_STD_RX_RING_CNT); 5686 rb = &std->bnx_rx_std_buf[check_idx]; 5687 refilled = rb->bnx_rx_refilled; 5688 cpu_lfence(); 5689 if (refilled) { 5690 bnx_setup_rxdesc_std(std, check_idx); 5691 std->bnx_rx_std = check_idx; 5692 ++cnt; 5693 if (cnt >= 8) { 5694 atomic_subtract_int( 5695 &std->bnx_rx_std_used, cnt); 5696 bnx_writembx(std->bnx_sc, 5697 BGE_MBX_RX_STD_PROD_LO, 5698 std->bnx_rx_std); 5699 cnt = 0; 5700 } 5701 } else { 5702 break; 5703 } 5704 } 5705 refill_mask &= ~(1 << ret_idx); 5706 } 5707 5708 if (cnt) { 5709 atomic_subtract_int(&std->bnx_rx_std_used, cnt); 5710 bnx_writembx(std->bnx_sc, BGE_MBX_RX_STD_PROD_LO, 5711 std->bnx_rx_std); 5712 } 5713 5714 if (std->bnx_rx_std_refill) 5715 goto again; 5716 5717 atomic_poll_release_int(&std->bnx_rx_std_running); 5718 cpu_mfence(); 5719 5720 if (std->bnx_rx_std_refill) 5721 goto again; 5722 } 5723 5724 static int 5725 bnx_sysctl_std_refill(SYSCTL_HANDLER_ARGS) 5726 { 5727 struct bnx_softc *sc = (void *)arg1; 5728 struct ifnet *ifp = &sc->arpcom.ac_if; 5729 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[0]; 5730 int error, cntmax, i; 5731 5732 cntmax = ret->bnx_rx_cntmax; 5733 error = sysctl_handle_int(oidp, &cntmax, 0, req); 5734 if (error || req->newptr == NULL) 5735 return error; 5736 5737 ifnet_serialize_all(ifp); 5738 5739 if ((cntmax * sc->bnx_rx_retcnt) >= BGE_STD_RX_RING_CNT / 2) { 5740 error = EINVAL; 5741 goto back; 5742 } 5743 5744 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) 5745 sc->bnx_rx_ret_ring[i].bnx_rx_cntmax = cntmax; 5746 error = 0; 5747 5748 back: 5749 ifnet_deserialize_all(ifp); 5750 5751 return error; 5752 } 5753 5754 static void 5755 bnx_init_rss(struct bnx_softc *sc) 5756 { 5757 uint8_t key[BGE_RSS_KEYREG_CNT * BGE_RSS_KEYREG_SIZE]; 5758 int i, j, r; 5759 5760 KKASSERT(BNX_RSS_ENABLED(sc)); 5761 5762 /* 5763 * Configure RSS redirect table in following fashion: 5764 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)] 5765 */ 5766 r = 0; 5767 for (j = 0; j < BGE_RSS_INDIR_TBL_CNT; ++j) { 5768 uint32_t tbl = 0; 5769 5770 for (i = 0; i < BGE_RSS_INDIR_TBLENT_CNT; ++i) { 5771 uint32_t q; 5772 5773 q = r % sc->bnx_rx_retcnt; 5774 tbl |= q << (BGE_RSS_INDIR_TBLENT_SHIFT * 5775 (BGE_RSS_INDIR_TBLENT_CNT - i - 1)); 5776 ++r; 5777 } 5778 5779 BNX_RSS_DPRINTF(sc, 1, "tbl%d %08x\n", j, tbl); 5780 CSR_WRITE_4(sc, BGE_RSS_INDIR_TBL(j), tbl); 5781 } 5782 5783 toeplitz_get_key(key, sizeof(key)); 5784 for (i = 0; i < BGE_RSS_KEYREG_CNT; ++i) { 5785 uint32_t keyreg; 5786 5787 keyreg = BGE_RSS_KEYREG_VAL(key, i); 5788 5789 BNX_RSS_DPRINTF(sc, 1, "key%d %08x\n", i, keyreg); 5790 CSR_WRITE_4(sc, BGE_RSS_KEYREG(i), keyreg); 5791 } 5792 } 5793 5794 static void 5795 bnx_setup_ring_cnt(struct bnx_softc *sc) 5796 { 5797 int msix_enable, i, msix_cnt, msix_cnt2, ring_max; 5798 5799 sc->bnx_tx_ringcnt = 1; 5800 sc->bnx_rx_retcnt = 1; 5801 sc->bnx_intr_cnt = 1; 5802 5803 msix_enable = device_getenv_int(sc->bnx_dev, "msix.enable", 5804 bnx_msix_enable); 5805 if (!msix_enable) 5806 return; 5807 5808 if (ncpus2 == 1) 5809 return; 5810 5811 msix_cnt = pci_msix_count(sc->bnx_dev); 5812 if (msix_cnt <= 1) 5813 return; 5814 5815 i = 0; 5816 while ((1 << (i + 1)) <= msix_cnt) 5817 ++i; 5818 msix_cnt2 = 1 << i; 5819 5820 /* 5821 * One MSI-X vector is dedicated to status or single TX queue, 5822 * so make sure that there are enough MSI-X vectors. 5823 */ 5824 if (msix_cnt == msix_cnt2) { 5825 /* 5826 * XXX 5827 * This probably will not happen; 57785/5718 families 5828 * come with at least 5 MSI-X vectors. 5829 */ 5830 msix_cnt2 >>= 1; 5831 if (msix_cnt2 <= 1) { 5832 device_printf(sc->bnx_dev, 5833 "MSI-X count %d could not be used\n", msix_cnt); 5834 return; 5835 } 5836 device_printf(sc->bnx_dev, "MSI-X count %d is power of 2\n", 5837 msix_cnt); 5838 } 5839 5840 /* 5841 * Setup RX ring count 5842 */ 5843 ring_max = BNX_RX_RING_MAX; 5844 if (ring_max > msix_cnt2) 5845 ring_max = msix_cnt2; 5846 sc->bnx_rx_retcnt = device_getenv_int(sc->bnx_dev, "rx_rings", 5847 bnx_rx_rings); 5848 sc->bnx_rx_retcnt = if_ring_count2(sc->bnx_rx_retcnt, ring_max); 5849 5850 if (sc->bnx_rx_retcnt == 1) 5851 return; 5852 5853 /* 5854 * We need one extra MSI-X vector for link status or 5855 * TX ring (if only one TX ring is enabled). 5856 */ 5857 sc->bnx_intr_cnt = sc->bnx_rx_retcnt + 1; 5858 5859 /* 5860 * Setup TX ring count 5861 * 5862 * Currently only BCM5719 and BCM5720 support multiple TX rings 5863 * and the TX ring count must be less than the RX ring count. 5864 */ 5865 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 || 5866 sc->bnx_asicrev == BGE_ASICREV_BCM5720) { 5867 ring_max = BNX_TX_RING_MAX; 5868 if (ring_max > msix_cnt2) 5869 ring_max = msix_cnt2; 5870 if (ring_max > sc->bnx_rx_retcnt) 5871 ring_max = sc->bnx_rx_retcnt; 5872 sc->bnx_tx_ringcnt = device_getenv_int(sc->bnx_dev, "tx_rings", 5873 bnx_tx_rings); 5874 sc->bnx_tx_ringcnt = if_ring_count2(sc->bnx_tx_ringcnt, 5875 ring_max); 5876 } 5877 } 5878 5879 static int 5880 bnx_alloc_msix(struct bnx_softc *sc) 5881 { 5882 struct bnx_intr_data *intr; 5883 boolean_t setup = FALSE; 5884 int error, i, offset, offset_def; 5885 5886 KKASSERT(sc->bnx_intr_cnt > 1); 5887 KKASSERT(sc->bnx_intr_cnt == sc->bnx_rx_retcnt + 1); 5888 5889 if (sc->bnx_flags & BNX_FLAG_RXTX_BUNDLE) { 5890 /* 5891 * Link status 5892 */ 5893 intr = &sc->bnx_intr_data[0]; 5894 5895 intr->bnx_intr_serialize = &sc->bnx_main_serialize; 5896 intr->bnx_saved_status_tag = &sc->bnx_saved_status_tag; 5897 5898 intr->bnx_intr_func = bnx_msix_status; 5899 intr->bnx_intr_arg = sc; 5900 intr->bnx_intr_cpuid = 0; /* XXX */ 5901 5902 ksnprintf(intr->bnx_intr_desc0, sizeof(intr->bnx_intr_desc0), 5903 "%s sts", device_get_nameunit(sc->bnx_dev)); 5904 intr->bnx_intr_desc = intr->bnx_intr_desc0; 5905 5906 /* 5907 * RX/TX rings 5908 */ 5909 if (sc->bnx_rx_retcnt == ncpus2) { 5910 offset = 0; 5911 } else { 5912 offset_def = (sc->bnx_rx_retcnt * 5913 device_get_unit(sc->bnx_dev)) % ncpus2; 5914 5915 offset = device_getenv_int(sc->bnx_dev, 5916 "msix.offset", offset_def); 5917 if (offset >= ncpus2 || 5918 offset % sc->bnx_rx_retcnt != 0) { 5919 device_printf(sc->bnx_dev, 5920 "invalid msix.offset %d, use %d\n", 5921 offset, offset_def); 5922 offset = offset_def; 5923 } 5924 } 5925 5926 for (i = 1; i < sc->bnx_intr_cnt; ++i) { 5927 int idx = i - 1; 5928 5929 intr = &sc->bnx_intr_data[i]; 5930 5931 KKASSERT(idx < sc->bnx_rx_retcnt); 5932 intr->bnx_ret = &sc->bnx_rx_ret_ring[idx]; 5933 if (idx < sc->bnx_tx_ringcnt) { 5934 intr->bnx_txr = &sc->bnx_tx_ring[idx]; 5935 intr->bnx_ret->bnx_txr = intr->bnx_txr; 5936 } 5937 5938 intr->bnx_intr_serialize = 5939 &intr->bnx_ret->bnx_rx_ret_serialize; 5940 intr->bnx_saved_status_tag = 5941 &intr->bnx_ret->bnx_saved_status_tag; 5942 5943 intr->bnx_intr_arg = intr->bnx_ret; 5944 KKASSERT(idx + offset < ncpus2); 5945 intr->bnx_intr_cpuid = idx + offset; 5946 5947 if (intr->bnx_txr == NULL) { 5948 intr->bnx_intr_check = bnx_check_intr_rx; 5949 intr->bnx_intr_func = bnx_msix_rx; 5950 ksnprintf(intr->bnx_intr_desc0, 5951 sizeof(intr->bnx_intr_desc0), "%s rx%d", 5952 device_get_nameunit(sc->bnx_dev), idx); 5953 } else { 5954 intr->bnx_intr_check = bnx_check_intr_rxtx; 5955 intr->bnx_intr_func = bnx_msix_rxtx; 5956 ksnprintf(intr->bnx_intr_desc0, 5957 sizeof(intr->bnx_intr_desc0), "%s rxtx%d", 5958 device_get_nameunit(sc->bnx_dev), idx); 5959 5960 intr->bnx_txr->bnx_tx_cpuid = 5961 intr->bnx_intr_cpuid; 5962 } 5963 intr->bnx_intr_desc = intr->bnx_intr_desc0; 5964 5965 intr->bnx_ret->bnx_msix_mbx = intr->bnx_intr_mbx; 5966 } 5967 } else { 5968 /* 5969 * TX ring and link status 5970 */ 5971 offset_def = device_get_unit(sc->bnx_dev) % ncpus2; 5972 offset = device_getenv_int(sc->bnx_dev, "msix.txoff", 5973 offset_def); 5974 if (offset >= ncpus2) { 5975 device_printf(sc->bnx_dev, 5976 "invalid msix.txoff %d, use %d\n", 5977 offset, offset_def); 5978 offset = offset_def; 5979 } 5980 5981 intr = &sc->bnx_intr_data[0]; 5982 5983 intr->bnx_txr = &sc->bnx_tx_ring[0]; 5984 intr->bnx_intr_serialize = &sc->bnx_main_serialize; 5985 intr->bnx_intr_check = bnx_check_intr_tx; 5986 intr->bnx_saved_status_tag = 5987 &intr->bnx_txr->bnx_saved_status_tag; 5988 5989 intr->bnx_intr_func = bnx_msix_tx_status; 5990 intr->bnx_intr_arg = intr->bnx_txr; 5991 intr->bnx_intr_cpuid = offset; 5992 5993 ksnprintf(intr->bnx_intr_desc0, sizeof(intr->bnx_intr_desc0), 5994 "%s ststx", device_get_nameunit(sc->bnx_dev)); 5995 intr->bnx_intr_desc = intr->bnx_intr_desc0; 5996 5997 intr->bnx_txr->bnx_tx_cpuid = intr->bnx_intr_cpuid; 5998 5999 /* 6000 * RX rings 6001 */ 6002 if (sc->bnx_rx_retcnt == ncpus2) { 6003 offset = 0; 6004 } else { 6005 offset_def = (sc->bnx_rx_retcnt * 6006 device_get_unit(sc->bnx_dev)) % ncpus2; 6007 6008 offset = device_getenv_int(sc->bnx_dev, 6009 "msix.rxoff", offset_def); 6010 if (offset >= ncpus2 || 6011 offset % sc->bnx_rx_retcnt != 0) { 6012 device_printf(sc->bnx_dev, 6013 "invalid msix.rxoff %d, use %d\n", 6014 offset, offset_def); 6015 offset = offset_def; 6016 } 6017 } 6018 6019 for (i = 1; i < sc->bnx_intr_cnt; ++i) { 6020 int idx = i - 1; 6021 6022 intr = &sc->bnx_intr_data[i]; 6023 6024 KKASSERT(idx < sc->bnx_rx_retcnt); 6025 intr->bnx_ret = &sc->bnx_rx_ret_ring[idx]; 6026 intr->bnx_intr_serialize = 6027 &intr->bnx_ret->bnx_rx_ret_serialize; 6028 intr->bnx_intr_check = bnx_check_intr_rx; 6029 intr->bnx_saved_status_tag = 6030 &intr->bnx_ret->bnx_saved_status_tag; 6031 6032 intr->bnx_intr_func = bnx_msix_rx; 6033 intr->bnx_intr_arg = intr->bnx_ret; 6034 KKASSERT(idx + offset < ncpus2); 6035 intr->bnx_intr_cpuid = idx + offset; 6036 6037 ksnprintf(intr->bnx_intr_desc0, 6038 sizeof(intr->bnx_intr_desc0), "%s rx%d", 6039 device_get_nameunit(sc->bnx_dev), idx); 6040 intr->bnx_intr_desc = intr->bnx_intr_desc0; 6041 6042 intr->bnx_ret->bnx_msix_mbx = intr->bnx_intr_mbx; 6043 } 6044 } 6045 6046 if (BNX_IS_5717_PLUS(sc)) { 6047 sc->bnx_msix_mem_rid = PCIR_BAR(4); 6048 } else { 6049 if (sc->bnx_res2 == NULL) 6050 sc->bnx_msix_mem_rid = PCIR_BAR(2); 6051 } 6052 if (sc->bnx_msix_mem_rid != 0) { 6053 sc->bnx_msix_mem_res = bus_alloc_resource_any(sc->bnx_dev, 6054 SYS_RES_MEMORY, &sc->bnx_msix_mem_rid, RF_ACTIVE); 6055 if (sc->bnx_msix_mem_res == NULL) { 6056 device_printf(sc->bnx_dev, 6057 "could not alloc MSI-X table\n"); 6058 return ENXIO; 6059 } 6060 } 6061 6062 bnx_enable_msi(sc, TRUE); 6063 6064 error = pci_setup_msix(sc->bnx_dev); 6065 if (error) { 6066 device_printf(sc->bnx_dev, "could not setup MSI-X\n"); 6067 goto back; 6068 } 6069 setup = TRUE; 6070 6071 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 6072 intr = &sc->bnx_intr_data[i]; 6073 6074 error = pci_alloc_msix_vector(sc->bnx_dev, i, 6075 &intr->bnx_intr_rid, intr->bnx_intr_cpuid); 6076 if (error) { 6077 device_printf(sc->bnx_dev, 6078 "could not alloc MSI-X %d on cpu%d\n", 6079 i, intr->bnx_intr_cpuid); 6080 goto back; 6081 } 6082 6083 intr->bnx_intr_res = bus_alloc_resource_any(sc->bnx_dev, 6084 SYS_RES_IRQ, &intr->bnx_intr_rid, RF_ACTIVE); 6085 if (intr->bnx_intr_res == NULL) { 6086 device_printf(sc->bnx_dev, 6087 "could not alloc MSI-X %d resource\n", i); 6088 error = ENXIO; 6089 goto back; 6090 } 6091 } 6092 6093 pci_enable_msix(sc->bnx_dev); 6094 sc->bnx_intr_type = PCI_INTR_TYPE_MSIX; 6095 back: 6096 if (error) 6097 bnx_free_msix(sc, setup); 6098 return error; 6099 } 6100 6101 static void 6102 bnx_free_msix(struct bnx_softc *sc, boolean_t setup) 6103 { 6104 int i; 6105 6106 KKASSERT(sc->bnx_intr_cnt > 1); 6107 6108 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 6109 struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; 6110 6111 if (intr->bnx_intr_res != NULL) { 6112 bus_release_resource(sc->bnx_dev, SYS_RES_IRQ, 6113 intr->bnx_intr_rid, intr->bnx_intr_res); 6114 } 6115 if (intr->bnx_intr_rid >= 0) { 6116 pci_release_msix_vector(sc->bnx_dev, 6117 intr->bnx_intr_rid); 6118 } 6119 } 6120 if (setup) 6121 pci_teardown_msix(sc->bnx_dev); 6122 } 6123 6124 static void 6125 bnx_rx_std_refill_sched_ipi(void *xret) 6126 { 6127 struct bnx_rx_ret_ring *ret = xret; 6128 struct bnx_rx_std_ring *std = ret->bnx_std; 6129 struct globaldata *gd = mycpu; 6130 6131 crit_enter_gd(gd); 6132 6133 atomic_set_int(&std->bnx_rx_std_refill, ret->bnx_rx_mask); 6134 cpu_sfence(); 6135 6136 KKASSERT(std->bnx_rx_std_ithread.td_gd == gd); 6137 lwkt_schedule(&std->bnx_rx_std_ithread); 6138 6139 crit_exit_gd(gd); 6140 } 6141 6142 static void 6143 bnx_rx_std_refill_stop(void *xstd) 6144 { 6145 struct bnx_rx_std_ring *std = xstd; 6146 struct globaldata *gd = mycpu; 6147 6148 crit_enter_gd(gd); 6149 6150 std->bnx_rx_std_stop = 1; 6151 cpu_sfence(); 6152 6153 KKASSERT(std->bnx_rx_std_ithread.td_gd == gd); 6154 lwkt_schedule(&std->bnx_rx_std_ithread); 6155 6156 crit_exit_gd(gd); 6157 } 6158 6159 static void 6160 bnx_serialize_skipmain(struct bnx_softc *sc) 6161 { 6162 lwkt_serialize_array_enter(sc->bnx_serialize, 6163 sc->bnx_serialize_cnt, 1); 6164 } 6165 6166 static void 6167 bnx_deserialize_skipmain(struct bnx_softc *sc) 6168 { 6169 lwkt_serialize_array_exit(sc->bnx_serialize, 6170 sc->bnx_serialize_cnt, 1); 6171 } 6172 6173 static void 6174 bnx_rx_std_refill_sched(struct bnx_rx_ret_ring *ret, 6175 struct bnx_rx_std_ring *std) 6176 { 6177 struct globaldata *gd = mycpu; 6178 6179 ret->bnx_rx_cnt = 0; 6180 cpu_sfence(); 6181 6182 crit_enter_gd(gd); 6183 6184 atomic_set_int(&std->bnx_rx_std_refill, ret->bnx_rx_mask); 6185 cpu_sfence(); 6186 if (atomic_poll_acquire_int(&std->bnx_rx_std_running)) { 6187 if (std->bnx_rx_std_ithread.td_gd == gd) { 6188 lwkt_schedule(&std->bnx_rx_std_ithread); 6189 } else { 6190 lwkt_send_ipiq( 6191 std->bnx_rx_std_ithread.td_gd, 6192 bnx_rx_std_refill_sched_ipi, ret); 6193 } 6194 } 6195 6196 crit_exit_gd(gd); 6197 } 6198 6199 static struct pktinfo * 6200 bnx_rss_info(struct pktinfo *pi, const struct bge_rx_bd *cur_rx) 6201 { 6202 /* Don't pick up IPv6 packet */ 6203 if (cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) 6204 return NULL; 6205 6206 /* Don't pick up IP packet w/o IP checksum */ 6207 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) == 0 || 6208 (cur_rx->bge_error_flag & BGE_RXERRFLAG_IP_CSUM_NOK)) 6209 return NULL; 6210 6211 /* Don't pick up IP packet w/o TCP/UDP checksum */ 6212 if ((cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) == 0) 6213 return NULL; 6214 6215 /* May be IP fragment */ 6216 if (cur_rx->bge_tcp_udp_csum != 0xffff) 6217 return NULL; 6218 6219 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_IS_TCP) 6220 pi->pi_l3proto = IPPROTO_TCP; 6221 else 6222 pi->pi_l3proto = IPPROTO_UDP; 6223 pi->pi_netisr = NETISR_IP; 6224 pi->pi_flags = 0; 6225 6226 return pi; 6227 } 6228 6229 static void 6230 bnx_sig_pre_reset(struct bnx_softc *sc, int type) 6231 { 6232 if (type == BNX_RESET_START || type == BNX_RESET_SUSPEND) 6233 bnx_ape_driver_state_change(sc, type); 6234 } 6235 6236 static void 6237 bnx_sig_post_reset(struct bnx_softc *sc, int type) 6238 { 6239 if (type == BNX_RESET_SHUTDOWN) 6240 bnx_ape_driver_state_change(sc, type); 6241 } 6242 6243 /* 6244 * Clear all stale locks and select the lock for this driver instance. 6245 */ 6246 static void 6247 bnx_ape_lock_init(struct bnx_softc *sc) 6248 { 6249 uint32_t bit, regbase; 6250 int i; 6251 6252 regbase = BGE_APE_PER_LOCK_GRANT; 6253 6254 /* Clear any stale locks. */ 6255 for (i = BGE_APE_LOCK_PHY0; i <= BGE_APE_LOCK_GPIO; i++) { 6256 switch (i) { 6257 case BGE_APE_LOCK_PHY0: 6258 case BGE_APE_LOCK_PHY1: 6259 case BGE_APE_LOCK_PHY2: 6260 case BGE_APE_LOCK_PHY3: 6261 bit = BGE_APE_LOCK_GRANT_DRIVER0; 6262 break; 6263 6264 default: 6265 if (sc->bnx_func_addr == 0) 6266 bit = BGE_APE_LOCK_GRANT_DRIVER0; 6267 else 6268 bit = 1 << sc->bnx_func_addr; 6269 break; 6270 } 6271 APE_WRITE_4(sc, regbase + 4 * i, bit); 6272 } 6273 6274 /* Select the PHY lock based on the device's function number. */ 6275 switch (sc->bnx_func_addr) { 6276 case 0: 6277 sc->bnx_phy_ape_lock = BGE_APE_LOCK_PHY0; 6278 break; 6279 6280 case 1: 6281 sc->bnx_phy_ape_lock = BGE_APE_LOCK_PHY1; 6282 break; 6283 6284 case 2: 6285 sc->bnx_phy_ape_lock = BGE_APE_LOCK_PHY2; 6286 break; 6287 6288 case 3: 6289 sc->bnx_phy_ape_lock = BGE_APE_LOCK_PHY3; 6290 break; 6291 6292 default: 6293 device_printf(sc->bnx_dev, 6294 "PHY lock not supported on this function\n"); 6295 break; 6296 } 6297 } 6298 6299 /* 6300 * Check for APE firmware, set flags, and print version info. 6301 */ 6302 static void 6303 bnx_ape_read_fw_ver(struct bnx_softc *sc) 6304 { 6305 const char *fwtype; 6306 uint32_t apedata, features; 6307 6308 /* Check for a valid APE signature in shared memory. */ 6309 apedata = APE_READ_4(sc, BGE_APE_SEG_SIG); 6310 if (apedata != BGE_APE_SEG_SIG_MAGIC) { 6311 device_printf(sc->bnx_dev, "no APE signature\n"); 6312 sc->bnx_mfw_flags &= ~BNX_MFW_ON_APE; 6313 return; 6314 } 6315 6316 /* Check if APE firmware is running. */ 6317 apedata = APE_READ_4(sc, BGE_APE_FW_STATUS); 6318 if ((apedata & BGE_APE_FW_STATUS_READY) == 0) { 6319 device_printf(sc->bnx_dev, "APE signature found " 6320 "but FW status not ready! 0x%08x\n", apedata); 6321 return; 6322 } 6323 6324 sc->bnx_mfw_flags |= BNX_MFW_ON_APE; 6325 6326 /* Fetch the APE firwmare type and version. */ 6327 apedata = APE_READ_4(sc, BGE_APE_FW_VERSION); 6328 features = APE_READ_4(sc, BGE_APE_FW_FEATURES); 6329 if (features & BGE_APE_FW_FEATURE_NCSI) { 6330 sc->bnx_mfw_flags |= BNX_MFW_TYPE_NCSI; 6331 fwtype = "NCSI"; 6332 } else if (features & BGE_APE_FW_FEATURE_DASH) { 6333 sc->bnx_mfw_flags |= BNX_MFW_TYPE_DASH; 6334 fwtype = "DASH"; 6335 } else { 6336 fwtype = "UNKN"; 6337 } 6338 6339 /* Print the APE firmware version. */ 6340 device_printf(sc->bnx_dev, "APE FW version: %s v%d.%d.%d.%d\n", 6341 fwtype, 6342 (apedata & BGE_APE_FW_VERSION_MAJMSK) >> BGE_APE_FW_VERSION_MAJSFT, 6343 (apedata & BGE_APE_FW_VERSION_MINMSK) >> BGE_APE_FW_VERSION_MINSFT, 6344 (apedata & BGE_APE_FW_VERSION_REVMSK) >> BGE_APE_FW_VERSION_REVSFT, 6345 (apedata & BGE_APE_FW_VERSION_BLDMSK)); 6346 } 6347 6348 static int 6349 bnx_ape_lock(struct bnx_softc *sc, int locknum) 6350 { 6351 uint32_t bit, gnt, req, status; 6352 int i, off; 6353 6354 if ((sc->bnx_mfw_flags & BNX_MFW_ON_APE) == 0) 6355 return 0; 6356 6357 /* Lock request/grant registers have different bases. */ 6358 req = BGE_APE_PER_LOCK_REQ; 6359 gnt = BGE_APE_PER_LOCK_GRANT; 6360 6361 off = 4 * locknum; 6362 6363 switch (locknum) { 6364 case BGE_APE_LOCK_GPIO: 6365 /* Lock required when using GPIO. */ 6366 if (sc->bnx_func_addr == 0) 6367 bit = BGE_APE_LOCK_REQ_DRIVER0; 6368 else 6369 bit = 1 << sc->bnx_func_addr; 6370 break; 6371 6372 case BGE_APE_LOCK_GRC: 6373 /* Lock required to reset the device. */ 6374 if (sc->bnx_func_addr == 0) 6375 bit = BGE_APE_LOCK_REQ_DRIVER0; 6376 else 6377 bit = 1 << sc->bnx_func_addr; 6378 break; 6379 6380 case BGE_APE_LOCK_MEM: 6381 /* Lock required when accessing certain APE memory. */ 6382 if (sc->bnx_func_addr == 0) 6383 bit = BGE_APE_LOCK_REQ_DRIVER0; 6384 else 6385 bit = 1 << sc->bnx_func_addr; 6386 break; 6387 6388 case BGE_APE_LOCK_PHY0: 6389 case BGE_APE_LOCK_PHY1: 6390 case BGE_APE_LOCK_PHY2: 6391 case BGE_APE_LOCK_PHY3: 6392 /* Lock required when accessing PHYs. */ 6393 bit = BGE_APE_LOCK_REQ_DRIVER0; 6394 break; 6395 6396 default: 6397 return EINVAL; 6398 } 6399 6400 /* Request a lock. */ 6401 APE_WRITE_4(sc, req + off, bit); 6402 6403 /* Wait up to 1 second to acquire lock. */ 6404 for (i = 0; i < 20000; i++) { 6405 status = APE_READ_4(sc, gnt + off); 6406 if (status == bit) 6407 break; 6408 DELAY(50); 6409 } 6410 6411 /* Handle any errors. */ 6412 if (status != bit) { 6413 if_printf(&sc->arpcom.ac_if, "APE lock %d request failed! " 6414 "request = 0x%04x[0x%04x], status = 0x%04x[0x%04x]\n", 6415 locknum, req + off, bit & 0xFFFF, gnt + off, 6416 status & 0xFFFF); 6417 /* Revoke the lock request. */ 6418 APE_WRITE_4(sc, gnt + off, bit); 6419 return EBUSY; 6420 } 6421 6422 return 0; 6423 } 6424 6425 static void 6426 bnx_ape_unlock(struct bnx_softc *sc, int locknum) 6427 { 6428 uint32_t bit, gnt; 6429 int off; 6430 6431 if ((sc->bnx_mfw_flags & BNX_MFW_ON_APE) == 0) 6432 return; 6433 6434 gnt = BGE_APE_PER_LOCK_GRANT; 6435 6436 off = 4 * locknum; 6437 6438 switch (locknum) { 6439 case BGE_APE_LOCK_GPIO: 6440 if (sc->bnx_func_addr == 0) 6441 bit = BGE_APE_LOCK_GRANT_DRIVER0; 6442 else 6443 bit = 1 << sc->bnx_func_addr; 6444 break; 6445 6446 case BGE_APE_LOCK_GRC: 6447 if (sc->bnx_func_addr == 0) 6448 bit = BGE_APE_LOCK_GRANT_DRIVER0; 6449 else 6450 bit = 1 << sc->bnx_func_addr; 6451 break; 6452 6453 case BGE_APE_LOCK_MEM: 6454 if (sc->bnx_func_addr == 0) 6455 bit = BGE_APE_LOCK_GRANT_DRIVER0; 6456 else 6457 bit = 1 << sc->bnx_func_addr; 6458 break; 6459 6460 case BGE_APE_LOCK_PHY0: 6461 case BGE_APE_LOCK_PHY1: 6462 case BGE_APE_LOCK_PHY2: 6463 case BGE_APE_LOCK_PHY3: 6464 bit = BGE_APE_LOCK_GRANT_DRIVER0; 6465 break; 6466 6467 default: 6468 return; 6469 } 6470 6471 APE_WRITE_4(sc, gnt + off, bit); 6472 } 6473 6474 /* 6475 * Send an event to the APE firmware. 6476 */ 6477 static void 6478 bnx_ape_send_event(struct bnx_softc *sc, uint32_t event) 6479 { 6480 uint32_t apedata; 6481 int i; 6482 6483 /* NCSI does not support APE events. */ 6484 if ((sc->bnx_mfw_flags & BNX_MFW_ON_APE) == 0) 6485 return; 6486 6487 /* Wait up to 1ms for APE to service previous event. */ 6488 for (i = 10; i > 0; i--) { 6489 if (bnx_ape_lock(sc, BGE_APE_LOCK_MEM) != 0) 6490 break; 6491 apedata = APE_READ_4(sc, BGE_APE_EVENT_STATUS); 6492 if ((apedata & BGE_APE_EVENT_STATUS_EVENT_PENDING) == 0) { 6493 APE_WRITE_4(sc, BGE_APE_EVENT_STATUS, event | 6494 BGE_APE_EVENT_STATUS_EVENT_PENDING); 6495 bnx_ape_unlock(sc, BGE_APE_LOCK_MEM); 6496 APE_WRITE_4(sc, BGE_APE_EVENT, BGE_APE_EVENT_1); 6497 break; 6498 } 6499 bnx_ape_unlock(sc, BGE_APE_LOCK_MEM); 6500 DELAY(100); 6501 } 6502 if (i == 0) { 6503 if_printf(&sc->arpcom.ac_if, 6504 "APE event 0x%08x send timed out\n", event); 6505 } 6506 } 6507 6508 static void 6509 bnx_ape_driver_state_change(struct bnx_softc *sc, int kind) 6510 { 6511 uint32_t apedata, event; 6512 6513 if ((sc->bnx_mfw_flags & BNX_MFW_ON_APE) == 0) 6514 return; 6515 6516 switch (kind) { 6517 case BNX_RESET_START: 6518 /* If this is the first load, clear the load counter. */ 6519 apedata = APE_READ_4(sc, BGE_APE_HOST_SEG_SIG); 6520 if (apedata != BGE_APE_HOST_SEG_SIG_MAGIC) { 6521 APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, 0); 6522 } else { 6523 apedata = APE_READ_4(sc, BGE_APE_HOST_INIT_COUNT); 6524 APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, ++apedata); 6525 } 6526 APE_WRITE_4(sc, BGE_APE_HOST_SEG_SIG, 6527 BGE_APE_HOST_SEG_SIG_MAGIC); 6528 APE_WRITE_4(sc, BGE_APE_HOST_SEG_LEN, 6529 BGE_APE_HOST_SEG_LEN_MAGIC); 6530 6531 /* Add some version info if bnx(4) supports it. */ 6532 APE_WRITE_4(sc, BGE_APE_HOST_DRIVER_ID, 6533 BGE_APE_HOST_DRIVER_ID_MAGIC(1, 0)); 6534 APE_WRITE_4(sc, BGE_APE_HOST_BEHAVIOR, 6535 BGE_APE_HOST_BEHAV_NO_PHYLOCK); 6536 APE_WRITE_4(sc, BGE_APE_HOST_HEARTBEAT_INT_MS, 6537 BGE_APE_HOST_HEARTBEAT_INT_DISABLE); 6538 APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE, 6539 BGE_APE_HOST_DRVR_STATE_START); 6540 event = BGE_APE_EVENT_STATUS_STATE_START; 6541 break; 6542 6543 case BNX_RESET_SHUTDOWN: 6544 APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE, 6545 BGE_APE_HOST_DRVR_STATE_UNLOAD); 6546 event = BGE_APE_EVENT_STATUS_STATE_UNLOAD; 6547 break; 6548 6549 case BNX_RESET_SUSPEND: 6550 event = BGE_APE_EVENT_STATUS_STATE_SUSPEND; 6551 break; 6552 6553 default: 6554 return; 6555 } 6556 6557 bnx_ape_send_event(sc, event | BGE_APE_EVENT_STATUS_DRIVER_EVNT | 6558 BGE_APE_EVENT_STATUS_STATE_CHNGE); 6559 } 6560