1 /*- 2 * Copyright (c) 2006-2007 Broadcom Corporation 3 * David Christensen <davidch@broadcom.com>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Neither the name of Broadcom Corporation nor the name of its contributors 15 * may be used to endorse or promote products derived from this software 16 * without specific prior written consent. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' 19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS 22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 * 30 * $FreeBSD: src/sys/dev/bce/if_bce.c,v 1.31 2007/05/16 23:34:11 davidch Exp $ 31 */ 32 33 /* 34 * The following controllers are supported by this driver: 35 * BCM5706C A2, A3 36 * BCM5706S A2, A3 37 * BCM5708C B1, B2 38 * BCM5708S B1, B2 39 * BCM5709C A1, B2, C0 40 * BCM5716 C0 41 * 42 * The following controllers are not supported by this driver: 43 * BCM5706C A0, A1 44 * BCM5706S A0, A1 45 * BCM5708C A0, B0 46 * BCM5708S A0, B0 47 * BCM5709C A0, B0, B1 48 * BCM5709S A0, A1, B0, B1, B2, C0 49 * 50 * 51 * Note about MSI-X on 5709/5716: 52 * - 9 MSI-X vectors are supported. 53 * - MSI-X vectors, RX/TX rings and status blocks' association 54 * are fixed: 55 * o The first RX ring and the first TX ring use the first 56 * status block. 57 * o The first MSI-X vector is associated with the first 58 * status block. 59 * o The second RX ring and the second TX ring use the second 60 * status block. 61 * o The second MSI-X vector is associated with the second 62 * status block. 63 * ... 64 * and so on so forth. 65 * - Status blocks must reside in physically contiguous memory 66 * and each status block consumes 128bytes. In addition to 67 * this, the memory for the status blocks is aligned on 128bytes 68 * in this driver. (see bce_dma_alloc() and HC_CONFIG) 69 * - Each status block has its own coalesce parameters, which also 70 * serve as the related MSI-X vector's interrupt moderation 71 * parameters. (see bce_coal_change()) 72 */ 73 74 #include "opt_bce.h" 75 #include "opt_ifpoll.h" 76 77 #include <sys/param.h> 78 #include <sys/bus.h> 79 #include <sys/endian.h> 80 #include <sys/kernel.h> 81 #include <sys/interrupt.h> 82 #include <sys/mbuf.h> 83 #include <sys/malloc.h> 84 #include <sys/queue.h> 85 #include <sys/rman.h> 86 #include <sys/serialize.h> 87 #include <sys/socket.h> 88 #include <sys/sockio.h> 89 #include <sys/sysctl.h> 90 91 #include <netinet/ip.h> 92 #include <netinet/tcp.h> 93 94 #include <net/bpf.h> 95 #include <net/ethernet.h> 96 #include <net/if.h> 97 #include <net/if_arp.h> 98 #include <net/if_dl.h> 99 #include <net/if_media.h> 100 #include <net/if_poll.h> 101 #include <net/if_types.h> 102 #include <net/ifq_var.h> 103 #include <net/toeplitz.h> 104 #include <net/toeplitz2.h> 105 #include <net/vlan/if_vlan_var.h> 106 #include <net/vlan/if_vlan_ether.h> 107 108 #include <dev/netif/mii_layer/mii.h> 109 #include <dev/netif/mii_layer/miivar.h> 110 #include <dev/netif/mii_layer/brgphyreg.h> 111 112 #include <bus/pci/pcireg.h> 113 #include <bus/pci/pcivar.h> 114 115 #include "miibus_if.h" 116 117 #include <dev/netif/bce/if_bcereg.h> 118 #include <dev/netif/bce/if_bcefw.h> 119 120 #define BCE_MSI_CKINTVL ((10 * hz) / 1000) /* 10ms */ 121 122 #ifdef BCE_RSS_DEBUG 123 #define BCE_RSS_DPRINTF(sc, lvl, fmt, ...) \ 124 do { \ 125 if (sc->rss_debug >= lvl) \ 126 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \ 127 } while (0) 128 #else /* !BCE_RSS_DEBUG */ 129 #define BCE_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 130 #endif /* BCE_RSS_DEBUG */ 131 132 /****************************************************************************/ 133 /* PCI Device ID Table */ 134 /* */ 135 /* Used by bce_probe() to identify the devices supported by this driver. */ 136 /****************************************************************************/ 137 #define BCE_DEVDESC_MAX 64 138 139 static struct bce_type bce_devs[] = { 140 /* BCM5706C Controllers and OEM boards. */ 141 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3101, 142 "HP NC370T Multifunction Gigabit Server Adapter" }, 143 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3106, 144 "HP NC370i Multifunction Gigabit Server Adapter" }, 145 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3070, 146 "HP NC380T PCIe DP Multifunc Gig Server Adapter" }, 147 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x1709, 148 "HP NC371i Multifunction Gigabit Server Adapter" }, 149 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, PCI_ANY_ID, PCI_ANY_ID, 150 "Broadcom NetXtreme II BCM5706 1000Base-T" }, 151 152 /* BCM5706S controllers and OEM boards. */ 153 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102, 154 "HP NC370F Multifunction Gigabit Server Adapter" }, 155 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID, PCI_ANY_ID, 156 "Broadcom NetXtreme II BCM5706 1000Base-SX" }, 157 158 /* BCM5708C controllers and OEM boards. */ 159 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7037, 160 "HP NC373T PCIe Multifunction Gig Server Adapter" }, 161 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7038, 162 "HP NC373i Multifunction Gigabit Server Adapter" }, 163 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7045, 164 "HP NC374m PCIe Multifunction Adapter" }, 165 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, PCI_ANY_ID, PCI_ANY_ID, 166 "Broadcom NetXtreme II BCM5708 1000Base-T" }, 167 168 /* BCM5708S controllers and OEM boards. */ 169 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x1706, 170 "HP NC373m Multifunction Gigabit Server Adapter" }, 171 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703b, 172 "HP NC373i Multifunction Gigabit Server Adapter" }, 173 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703d, 174 "HP NC373F PCIe Multifunc Giga Server Adapter" }, 175 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, PCI_ANY_ID, PCI_ANY_ID, 176 "Broadcom NetXtreme II BCM5708S 1000Base-T" }, 177 178 /* BCM5709C controllers and OEM boards. */ 179 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7055, 180 "HP NC382i DP Multifunction Gigabit Server Adapter" }, 181 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7059, 182 "HP NC382T PCIe DP Multifunction Gigabit Server Adapter" }, 183 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, PCI_ANY_ID, PCI_ANY_ID, 184 "Broadcom NetXtreme II BCM5709 1000Base-T" }, 185 186 /* BCM5709S controllers and OEM boards. */ 187 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x171d, 188 "HP NC382m DP 1GbE Multifunction BL-c Adapter" }, 189 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x7056, 190 "HP NC382i DP Multifunction Gigabit Server Adapter" }, 191 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, PCI_ANY_ID, PCI_ANY_ID, 192 "Broadcom NetXtreme II BCM5709 1000Base-SX" }, 193 194 /* BCM5716 controllers and OEM boards. */ 195 { BRCM_VENDORID, BRCM_DEVICEID_BCM5716, PCI_ANY_ID, PCI_ANY_ID, 196 "Broadcom NetXtreme II BCM5716 1000Base-T" }, 197 198 { 0, 0, 0, 0, NULL } 199 }; 200 201 /****************************************************************************/ 202 /* Supported Flash NVRAM device data. */ 203 /****************************************************************************/ 204 static const struct flash_spec flash_table[] = 205 { 206 #define BUFFERED_FLAGS (BCE_NV_BUFFERED | BCE_NV_TRANSLATE) 207 #define NONBUFFERED_FLAGS (BCE_NV_WREN) 208 209 /* Slow EEPROM */ 210 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400, 211 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 212 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 213 "EEPROM - slow"}, 214 /* Expansion entry 0001 */ 215 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406, 216 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 217 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 218 "Entry 0001"}, 219 /* Saifun SA25F010 (non-buffered flash) */ 220 /* strap, cfg1, & write1 need updates */ 221 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406, 222 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 223 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2, 224 "Non-buffered flash (128kB)"}, 225 /* Saifun SA25F020 (non-buffered flash) */ 226 /* strap, cfg1, & write1 need updates */ 227 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406, 228 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 229 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4, 230 "Non-buffered flash (256kB)"}, 231 /* Expansion entry 0100 */ 232 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406, 233 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 234 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 235 "Entry 0100"}, 236 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */ 237 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406, 238 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 239 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2, 240 "Entry 0101: ST M45PE10 (128kB non-bufferred)"}, 241 /* Entry 0110: ST M45PE20 (non-buffered flash)*/ 242 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406, 243 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 244 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4, 245 "Entry 0110: ST M45PE20 (256kB non-bufferred)"}, 246 /* Saifun SA25F005 (non-buffered flash) */ 247 /* strap, cfg1, & write1 need updates */ 248 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406, 249 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 250 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE, 251 "Non-buffered flash (64kB)"}, 252 /* Fast EEPROM */ 253 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400, 254 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 255 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 256 "EEPROM - fast"}, 257 /* Expansion entry 1001 */ 258 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406, 259 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 260 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 261 "Entry 1001"}, 262 /* Expansion entry 1010 */ 263 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406, 264 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 265 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 266 "Entry 1010"}, 267 /* ATMEL AT45DB011B (buffered flash) */ 268 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400, 269 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 270 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE, 271 "Buffered flash (128kB)"}, 272 /* Expansion entry 1100 */ 273 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406, 274 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 275 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 276 "Entry 1100"}, 277 /* Expansion entry 1101 */ 278 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406, 279 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 280 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 281 "Entry 1101"}, 282 /* Ateml Expansion entry 1110 */ 283 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400, 284 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 285 BUFFERED_FLASH_BYTE_ADDR_MASK, 0, 286 "Entry 1110 (Atmel)"}, 287 /* ATMEL AT45DB021B (buffered flash) */ 288 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400, 289 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 290 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2, 291 "Buffered flash (256kB)"}, 292 }; 293 294 /* 295 * The BCM5709 controllers transparently handle the 296 * differences between Atmel 264 byte pages and all 297 * flash devices which use 256 byte pages, so no 298 * logical-to-physical mapping is required in the 299 * driver. 300 */ 301 static struct flash_spec flash_5709 = { 302 .flags = BCE_NV_BUFFERED, 303 .page_bits = BCM5709_FLASH_PAGE_BITS, 304 .page_size = BCM5709_FLASH_PAGE_SIZE, 305 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK, 306 .total_size = BUFFERED_FLASH_TOTAL_SIZE * 2, 307 .name = "5709/5716 buffered flash (256kB)", 308 }; 309 310 /****************************************************************************/ 311 /* DragonFly device entry points. */ 312 /****************************************************************************/ 313 static int bce_probe(device_t); 314 static int bce_attach(device_t); 315 static int bce_detach(device_t); 316 static void bce_shutdown(device_t); 317 static int bce_miibus_read_reg(device_t, int, int); 318 static int bce_miibus_write_reg(device_t, int, int, int); 319 static void bce_miibus_statchg(device_t); 320 321 /****************************************************************************/ 322 /* BCE Register/Memory Access Routines */ 323 /****************************************************************************/ 324 static uint32_t bce_reg_rd_ind(struct bce_softc *, uint32_t); 325 static void bce_reg_wr_ind(struct bce_softc *, uint32_t, uint32_t); 326 static void bce_shmem_wr(struct bce_softc *, uint32_t, uint32_t); 327 static uint32_t bce_shmem_rd(struct bce_softc *, u32); 328 static void bce_ctx_wr(struct bce_softc *, uint32_t, uint32_t, uint32_t); 329 330 /****************************************************************************/ 331 /* BCE NVRAM Access Routines */ 332 /****************************************************************************/ 333 static int bce_acquire_nvram_lock(struct bce_softc *); 334 static int bce_release_nvram_lock(struct bce_softc *); 335 static void bce_enable_nvram_access(struct bce_softc *); 336 static void bce_disable_nvram_access(struct bce_softc *); 337 static int bce_nvram_read_dword(struct bce_softc *, uint32_t, uint8_t *, 338 uint32_t); 339 static int bce_init_nvram(struct bce_softc *); 340 static int bce_nvram_read(struct bce_softc *, uint32_t, uint8_t *, int); 341 static int bce_nvram_test(struct bce_softc *); 342 343 /****************************************************************************/ 344 /* BCE DMA Allocate/Free Routines */ 345 /****************************************************************************/ 346 static int bce_dma_alloc(struct bce_softc *); 347 static void bce_dma_free(struct bce_softc *); 348 static void bce_dma_map_addr(void *, bus_dma_segment_t *, int, int); 349 350 /****************************************************************************/ 351 /* BCE Firmware Synchronization and Load */ 352 /****************************************************************************/ 353 static int bce_fw_sync(struct bce_softc *, uint32_t); 354 static void bce_load_rv2p_fw(struct bce_softc *, uint32_t *, 355 uint32_t, uint32_t); 356 static void bce_load_cpu_fw(struct bce_softc *, struct cpu_reg *, 357 struct fw_info *); 358 static void bce_start_cpu(struct bce_softc *, struct cpu_reg *); 359 static void bce_halt_cpu(struct bce_softc *, struct cpu_reg *); 360 static void bce_start_rxp_cpu(struct bce_softc *); 361 static void bce_init_rxp_cpu(struct bce_softc *); 362 static void bce_init_txp_cpu(struct bce_softc *); 363 static void bce_init_tpat_cpu(struct bce_softc *); 364 static void bce_init_cp_cpu(struct bce_softc *); 365 static void bce_init_com_cpu(struct bce_softc *); 366 static void bce_init_cpus(struct bce_softc *); 367 static void bce_setup_msix_table(struct bce_softc *); 368 static void bce_init_rss(struct bce_softc *); 369 370 static void bce_stop(struct bce_softc *); 371 static int bce_reset(struct bce_softc *, uint32_t); 372 static int bce_chipinit(struct bce_softc *); 373 static int bce_blockinit(struct bce_softc *); 374 static void bce_probe_pci_caps(struct bce_softc *); 375 static void bce_print_adapter_info(struct bce_softc *); 376 static void bce_get_media(struct bce_softc *); 377 static void bce_mgmt_init(struct bce_softc *); 378 static int bce_init_ctx(struct bce_softc *); 379 static void bce_get_mac_addr(struct bce_softc *); 380 static void bce_set_mac_addr(struct bce_softc *); 381 static void bce_set_rx_mode(struct bce_softc *); 382 static void bce_coal_change(struct bce_softc *); 383 static void bce_npoll_coal_change(struct bce_softc *); 384 static void bce_setup_serialize(struct bce_softc *); 385 static void bce_serialize_skipmain(struct bce_softc *); 386 static void bce_deserialize_skipmain(struct bce_softc *); 387 static void bce_set_timer_cpuid(struct bce_softc *, boolean_t); 388 static int bce_alloc_intr(struct bce_softc *); 389 static void bce_free_intr(struct bce_softc *); 390 static void bce_try_alloc_msix(struct bce_softc *); 391 static void bce_free_msix(struct bce_softc *, boolean_t); 392 static void bce_setup_ring_cnt(struct bce_softc *); 393 static int bce_setup_intr(struct bce_softc *); 394 static void bce_teardown_intr(struct bce_softc *); 395 static int bce_setup_msix(struct bce_softc *); 396 static void bce_teardown_msix(struct bce_softc *, int); 397 398 static int bce_create_tx_ring(struct bce_tx_ring *); 399 static void bce_destroy_tx_ring(struct bce_tx_ring *); 400 static void bce_init_tx_context(struct bce_tx_ring *); 401 static int bce_init_tx_chain(struct bce_tx_ring *); 402 static void bce_free_tx_chain(struct bce_tx_ring *); 403 static void bce_xmit(struct bce_tx_ring *); 404 static int bce_encap(struct bce_tx_ring *, struct mbuf **, int *); 405 static int bce_tso_setup(struct bce_tx_ring *, struct mbuf **, 406 uint16_t *, uint16_t *); 407 408 static int bce_create_rx_ring(struct bce_rx_ring *); 409 static void bce_destroy_rx_ring(struct bce_rx_ring *); 410 static void bce_init_rx_context(struct bce_rx_ring *); 411 static int bce_init_rx_chain(struct bce_rx_ring *); 412 static void bce_free_rx_chain(struct bce_rx_ring *); 413 static int bce_newbuf_std(struct bce_rx_ring *, uint16_t *, uint16_t, 414 uint32_t *, int); 415 static void bce_setup_rxdesc_std(struct bce_rx_ring *, uint16_t, 416 uint32_t *); 417 static struct pktinfo *bce_rss_pktinfo(struct pktinfo *, uint32_t, 418 const struct l2_fhdr *); 419 420 static void bce_start(struct ifnet *, struct ifaltq_subque *); 421 static int bce_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 422 static void bce_watchdog(struct ifaltq_subque *); 423 static int bce_ifmedia_upd(struct ifnet *); 424 static void bce_ifmedia_sts(struct ifnet *, struct ifmediareq *); 425 static void bce_init(void *); 426 #ifdef IFPOLL_ENABLE 427 static void bce_npoll(struct ifnet *, struct ifpoll_info *); 428 static void bce_npoll_rx(struct ifnet *, void *, int); 429 static void bce_npoll_tx(struct ifnet *, void *, int); 430 static void bce_npoll_status(struct ifnet *); 431 static void bce_npoll_rx_pack(struct ifnet *, void *, int); 432 #endif 433 static void bce_serialize(struct ifnet *, enum ifnet_serialize); 434 static void bce_deserialize(struct ifnet *, enum ifnet_serialize); 435 static int bce_tryserialize(struct ifnet *, enum ifnet_serialize); 436 #ifdef INVARIANTS 437 static void bce_serialize_assert(struct ifnet *, enum ifnet_serialize, 438 boolean_t); 439 #endif 440 441 static void bce_intr(struct bce_softc *); 442 static void bce_intr_legacy(void *); 443 static void bce_intr_msi(void *); 444 static void bce_intr_msi_oneshot(void *); 445 static void bce_intr_msix_rxtx(void *); 446 static void bce_intr_msix_rx(void *); 447 static void bce_tx_intr(struct bce_tx_ring *, uint16_t); 448 static void bce_rx_intr(struct bce_rx_ring *, int, uint16_t); 449 static void bce_phy_intr(struct bce_softc *); 450 static void bce_disable_intr(struct bce_softc *); 451 static void bce_enable_intr(struct bce_softc *); 452 static void bce_reenable_intr(struct bce_rx_ring *); 453 static void bce_check_msi(void *); 454 455 static void bce_stats_update(struct bce_softc *); 456 static void bce_tick(void *); 457 static void bce_tick_serialized(struct bce_softc *); 458 static void bce_pulse(void *); 459 460 static void bce_add_sysctls(struct bce_softc *); 461 static int bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS); 462 static int bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS); 463 static int bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS); 464 static int bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS); 465 static int bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS); 466 static int bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS); 467 static int bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS); 468 static int bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS); 469 #ifdef IFPOLL_ENABLE 470 static int bce_sysctl_npoll_offset(SYSCTL_HANDLER_ARGS); 471 #endif 472 static int bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS, 473 uint32_t *, uint32_t); 474 475 /* 476 * NOTE: 477 * Don't set bce_tx_ticks_int/bce_tx_ticks to 1023. Linux's bnx2 478 * takes 1023 as the TX ticks limit. However, using 1023 will 479 * cause 5708(B2) to generate extra interrupts (~2000/s) even when 480 * there is _no_ network activity on the NIC. 481 */ 482 static uint32_t bce_tx_bds_int = 255; /* bcm: 20 */ 483 static uint32_t bce_tx_bds = 255; /* bcm: 20 */ 484 static uint32_t bce_tx_ticks_int = 1022; /* bcm: 80 */ 485 static uint32_t bce_tx_ticks = 1022; /* bcm: 80 */ 486 static uint32_t bce_rx_bds_int = 128; /* bcm: 6 */ 487 static uint32_t bce_rx_bds = 0; /* bcm: 6 */ 488 static uint32_t bce_rx_ticks_int = 150; /* bcm: 18 */ 489 static uint32_t bce_rx_ticks = 150; /* bcm: 18 */ 490 491 static int bce_tx_wreg = 8; 492 493 static int bce_msi_enable = 1; 494 static int bce_msix_enable = 1; 495 496 static int bce_rx_pages = RX_PAGES_DEFAULT; 497 static int bce_tx_pages = TX_PAGES_DEFAULT; 498 499 static int bce_rx_rings = 0; /* auto */ 500 static int bce_tx_rings = 0; /* auto */ 501 502 TUNABLE_INT("hw.bce.tx_bds_int", &bce_tx_bds_int); 503 TUNABLE_INT("hw.bce.tx_bds", &bce_tx_bds); 504 TUNABLE_INT("hw.bce.tx_ticks_int", &bce_tx_ticks_int); 505 TUNABLE_INT("hw.bce.tx_ticks", &bce_tx_ticks); 506 TUNABLE_INT("hw.bce.rx_bds_int", &bce_rx_bds_int); 507 TUNABLE_INT("hw.bce.rx_bds", &bce_rx_bds); 508 TUNABLE_INT("hw.bce.rx_ticks_int", &bce_rx_ticks_int); 509 TUNABLE_INT("hw.bce.rx_ticks", &bce_rx_ticks); 510 TUNABLE_INT("hw.bce.msi.enable", &bce_msi_enable); 511 TUNABLE_INT("hw.bce.msix.enable", &bce_msix_enable); 512 TUNABLE_INT("hw.bce.rx_pages", &bce_rx_pages); 513 TUNABLE_INT("hw.bce.tx_pages", &bce_tx_pages); 514 TUNABLE_INT("hw.bce.tx_wreg", &bce_tx_wreg); 515 TUNABLE_INT("hw.bce.tx_rings", &bce_tx_rings); 516 TUNABLE_INT("hw.bce.rx_rings", &bce_rx_rings); 517 518 /****************************************************************************/ 519 /* DragonFly device dispatch table. */ 520 /****************************************************************************/ 521 static device_method_t bce_methods[] = { 522 /* Device interface */ 523 DEVMETHOD(device_probe, bce_probe), 524 DEVMETHOD(device_attach, bce_attach), 525 DEVMETHOD(device_detach, bce_detach), 526 DEVMETHOD(device_shutdown, bce_shutdown), 527 528 /* bus interface */ 529 DEVMETHOD(bus_print_child, bus_generic_print_child), 530 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 531 532 /* MII interface */ 533 DEVMETHOD(miibus_readreg, bce_miibus_read_reg), 534 DEVMETHOD(miibus_writereg, bce_miibus_write_reg), 535 DEVMETHOD(miibus_statchg, bce_miibus_statchg), 536 537 DEVMETHOD_END 538 }; 539 540 static driver_t bce_driver = { 541 "bce", 542 bce_methods, 543 sizeof(struct bce_softc) 544 }; 545 546 static devclass_t bce_devclass; 547 548 DECLARE_DUMMY_MODULE(if_bce); 549 MODULE_DEPEND(bce, miibus, 1, 1, 1); 550 DRIVER_MODULE(if_bce, pci, bce_driver, bce_devclass, NULL, NULL); 551 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, NULL, NULL); 552 553 /****************************************************************************/ 554 /* Device probe function. */ 555 /* */ 556 /* Compares the device to the driver's list of supported devices and */ 557 /* reports back to the OS whether this is the right driver for the device. */ 558 /* */ 559 /* Returns: */ 560 /* BUS_PROBE_DEFAULT on success, positive value on failure. */ 561 /****************************************************************************/ 562 static int 563 bce_probe(device_t dev) 564 { 565 struct bce_type *t; 566 uint16_t vid, did, svid, sdid; 567 568 /* Get the data for the device to be probed. */ 569 vid = pci_get_vendor(dev); 570 did = pci_get_device(dev); 571 svid = pci_get_subvendor(dev); 572 sdid = pci_get_subdevice(dev); 573 574 /* Look through the list of known devices for a match. */ 575 for (t = bce_devs; t->bce_name != NULL; ++t) { 576 if (vid == t->bce_vid && did == t->bce_did && 577 (svid == t->bce_svid || t->bce_svid == PCI_ANY_ID) && 578 (sdid == t->bce_sdid || t->bce_sdid == PCI_ANY_ID)) { 579 uint32_t revid = pci_read_config(dev, PCIR_REVID, 4); 580 char *descbuf; 581 582 descbuf = kmalloc(BCE_DEVDESC_MAX, M_TEMP, M_WAITOK); 583 584 /* Print out the device identity. */ 585 ksnprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)", 586 t->bce_name, 587 ((revid & 0xf0) >> 4) + 'A', revid & 0xf); 588 589 device_set_desc_copy(dev, descbuf); 590 kfree(descbuf, M_TEMP); 591 return 0; 592 } 593 } 594 return ENXIO; 595 } 596 597 /****************************************************************************/ 598 /* PCI Capabilities Probe Function. */ 599 /* */ 600 /* Walks the PCI capabiites list for the device to find what features are */ 601 /* supported. */ 602 /* */ 603 /* Returns: */ 604 /* None. */ 605 /****************************************************************************/ 606 static void 607 bce_print_adapter_info(struct bce_softc *sc) 608 { 609 device_printf(sc->bce_dev, "ASIC (0x%08X); ", sc->bce_chipid); 610 611 kprintf("Rev (%c%d); ", ((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A', 612 ((BCE_CHIP_ID(sc) & 0x0ff0) >> 4)); 613 614 /* Bus info. */ 615 if (sc->bce_flags & BCE_PCIE_FLAG) { 616 kprintf("Bus (PCIe x%d, ", sc->link_width); 617 switch (sc->link_speed) { 618 case 1: 619 kprintf("2.5Gbps); "); 620 break; 621 case 2: 622 kprintf("5Gbps); "); 623 break; 624 default: 625 kprintf("Unknown link speed); "); 626 break; 627 } 628 } else { 629 kprintf("Bus (PCI%s, %s, %dMHz); ", 630 ((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""), 631 ((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? "32-bit" : "64-bit"), 632 sc->bus_speed_mhz); 633 } 634 635 /* Firmware version and device features. */ 636 kprintf("B/C (%s)", sc->bce_bc_ver); 637 638 if ((sc->bce_flags & BCE_MFW_ENABLE_FLAG) || 639 (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)) { 640 kprintf("; Flags("); 641 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) 642 kprintf("MFW[%s]", sc->bce_mfw_ver); 643 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) 644 kprintf(" 2.5G"); 645 kprintf(")"); 646 } 647 kprintf("\n"); 648 } 649 650 /****************************************************************************/ 651 /* PCI Capabilities Probe Function. */ 652 /* */ 653 /* Walks the PCI capabiites list for the device to find what features are */ 654 /* supported. */ 655 /* */ 656 /* Returns: */ 657 /* None. */ 658 /****************************************************************************/ 659 static void 660 bce_probe_pci_caps(struct bce_softc *sc) 661 { 662 device_t dev = sc->bce_dev; 663 uint8_t ptr; 664 665 if (pci_is_pcix(dev)) 666 sc->bce_cap_flags |= BCE_PCIX_CAPABLE_FLAG; 667 668 ptr = pci_get_pciecap_ptr(dev); 669 if (ptr) { 670 uint16_t link_status = pci_read_config(dev, ptr + 0x12, 2); 671 672 sc->link_speed = link_status & 0xf; 673 sc->link_width = (link_status >> 4) & 0x3f; 674 sc->bce_cap_flags |= BCE_PCIE_CAPABLE_FLAG; 675 sc->bce_flags |= BCE_PCIE_FLAG; 676 } 677 } 678 679 /****************************************************************************/ 680 /* Device attach function. */ 681 /* */ 682 /* Allocates device resources, performs secondary chip identification, */ 683 /* resets and initializes the hardware, and initializes driver instance */ 684 /* variables. */ 685 /* */ 686 /* Returns: */ 687 /* 0 on success, positive value on failure. */ 688 /****************************************************************************/ 689 static int 690 bce_attach(device_t dev) 691 { 692 struct bce_softc *sc = device_get_softc(dev); 693 struct ifnet *ifp = &sc->arpcom.ac_if; 694 uint32_t val; 695 int rid, rc = 0; 696 int i, j; 697 struct mii_probe_args mii_args; 698 uintptr_t mii_priv = 0; 699 #ifdef IFPOLL_ENABLE 700 int offset, offset_def; 701 #endif 702 703 sc->bce_dev = dev; 704 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 705 706 lwkt_serialize_init(&sc->main_serialize); 707 for (i = 0; i < BCE_MSIX_MAX; ++i) { 708 struct bce_msix_data *msix = &sc->bce_msix[i]; 709 710 msix->msix_cpuid = -1; 711 msix->msix_rid = -1; 712 } 713 714 pci_enable_busmaster(dev); 715 716 bce_probe_pci_caps(sc); 717 718 /* Allocate PCI memory resources. */ 719 rid = PCIR_BAR(0); 720 sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 721 RF_ACTIVE | PCI_RF_DENSE); 722 if (sc->bce_res_mem == NULL) { 723 device_printf(dev, "PCI memory allocation failed\n"); 724 return ENXIO; 725 } 726 sc->bce_btag = rman_get_bustag(sc->bce_res_mem); 727 sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem); 728 729 /* 730 * Configure byte swap and enable indirect register access. 731 * Rely on CPU to do target byte swapping on big endian systems. 732 * Access to registers outside of PCI configurtion space are not 733 * valid until this is done. 734 */ 735 pci_write_config(dev, BCE_PCICFG_MISC_CONFIG, 736 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 737 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4); 738 739 /* Save ASIC revsion info. */ 740 sc->bce_chipid = REG_RD(sc, BCE_MISC_ID); 741 742 /* Weed out any non-production controller revisions. */ 743 switch (BCE_CHIP_ID(sc)) { 744 case BCE_CHIP_ID_5706_A0: 745 case BCE_CHIP_ID_5706_A1: 746 case BCE_CHIP_ID_5708_A0: 747 case BCE_CHIP_ID_5708_B0: 748 case BCE_CHIP_ID_5709_A0: 749 case BCE_CHIP_ID_5709_B0: 750 case BCE_CHIP_ID_5709_B1: 751 #ifdef foo 752 /* 5709C B2 seems to work fine */ 753 case BCE_CHIP_ID_5709_B2: 754 #endif 755 device_printf(dev, "Unsupported chip id 0x%08x!\n", 756 BCE_CHIP_ID(sc)); 757 rc = ENODEV; 758 goto fail; 759 } 760 761 mii_priv |= BRGPHY_FLAG_WIRESPEED; 762 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { 763 if (BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax || 764 BCE_CHIP_REV(sc) == BCE_CHIP_REV_Bx) 765 mii_priv |= BRGPHY_FLAG_NO_EARLYDAC; 766 } else { 767 mii_priv |= BRGPHY_FLAG_BER_BUG; 768 } 769 770 /* 771 * Find the base address for shared memory access. 772 * Newer versions of bootcode use a signature and offset 773 * while older versions use a fixed address. 774 */ 775 val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE); 776 if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == 777 BCE_SHM_HDR_SIGNATURE_SIG) { 778 /* Multi-port devices use different offsets in shared memory. */ 779 sc->bce_shmem_base = REG_RD_IND(sc, 780 BCE_SHM_HDR_ADDR_0 + (pci_get_function(sc->bce_dev) << 2)); 781 } else { 782 sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE; 783 } 784 785 /* Fetch the bootcode revision. */ 786 val = bce_shmem_rd(sc, BCE_DEV_INFO_BC_REV); 787 for (i = 0, j = 0; i < 3; i++) { 788 uint8_t num; 789 int k, skip0; 790 791 num = (uint8_t)(val >> (24 - (i * 8))); 792 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) { 793 if (num >= k || !skip0 || k == 1) { 794 sc->bce_bc_ver[j++] = (num / k) + '0'; 795 skip0 = 0; 796 } 797 } 798 if (i != 2) 799 sc->bce_bc_ver[j++] = '.'; 800 } 801 802 /* Check if any management firwmare is running. */ 803 val = bce_shmem_rd(sc, BCE_PORT_FEATURE); 804 if (val & BCE_PORT_FEATURE_ASF_ENABLED) { 805 sc->bce_flags |= BCE_MFW_ENABLE_FLAG; 806 807 /* Allow time for firmware to enter the running state. */ 808 for (i = 0; i < 30; i++) { 809 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION); 810 if (val & BCE_CONDITION_MFW_RUN_MASK) 811 break; 812 DELAY(10000); 813 } 814 } 815 816 /* Check the current bootcode state. */ 817 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION) & 818 BCE_CONDITION_MFW_RUN_MASK; 819 if (val != BCE_CONDITION_MFW_RUN_UNKNOWN && 820 val != BCE_CONDITION_MFW_RUN_NONE) { 821 uint32_t addr = bce_shmem_rd(sc, BCE_MFW_VER_PTR); 822 823 for (i = 0, j = 0; j < 3; j++) { 824 val = bce_reg_rd_ind(sc, addr + j * 4); 825 val = bswap32(val); 826 memcpy(&sc->bce_mfw_ver[i], &val, 4); 827 i += 4; 828 } 829 } 830 831 /* Get PCI bus information (speed and type). */ 832 val = REG_RD(sc, BCE_PCICFG_MISC_STATUS); 833 if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) { 834 uint32_t clkreg; 835 836 sc->bce_flags |= BCE_PCIX_FLAG; 837 838 clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS) & 839 BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET; 840 switch (clkreg) { 841 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ: 842 sc->bus_speed_mhz = 133; 843 break; 844 845 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ: 846 sc->bus_speed_mhz = 100; 847 break; 848 849 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ: 850 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ: 851 sc->bus_speed_mhz = 66; 852 break; 853 854 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ: 855 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ: 856 sc->bus_speed_mhz = 50; 857 break; 858 859 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW: 860 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ: 861 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ: 862 sc->bus_speed_mhz = 33; 863 break; 864 } 865 } else { 866 if (val & BCE_PCICFG_MISC_STATUS_M66EN) 867 sc->bus_speed_mhz = 66; 868 else 869 sc->bus_speed_mhz = 33; 870 } 871 872 if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET) 873 sc->bce_flags |= BCE_PCI_32BIT_FLAG; 874 875 /* Reset the controller. */ 876 rc = bce_reset(sc, BCE_DRV_MSG_CODE_RESET); 877 if (rc != 0) 878 goto fail; 879 880 /* Initialize the controller. */ 881 rc = bce_chipinit(sc); 882 if (rc != 0) { 883 device_printf(dev, "Controller initialization failed!\n"); 884 goto fail; 885 } 886 887 /* Perform NVRAM test. */ 888 rc = bce_nvram_test(sc); 889 if (rc != 0) { 890 device_printf(dev, "NVRAM test failed!\n"); 891 goto fail; 892 } 893 894 /* Fetch the permanent Ethernet MAC address. */ 895 bce_get_mac_addr(sc); 896 897 /* 898 * Trip points control how many BDs 899 * should be ready before generating an 900 * interrupt while ticks control how long 901 * a BD can sit in the chain before 902 * generating an interrupt. Set the default 903 * values for the RX and TX rings. 904 */ 905 906 #ifdef BCE_DRBUG 907 /* Force more frequent interrupts. */ 908 sc->bce_tx_quick_cons_trip_int = 1; 909 sc->bce_tx_quick_cons_trip = 1; 910 sc->bce_tx_ticks_int = 0; 911 sc->bce_tx_ticks = 0; 912 913 sc->bce_rx_quick_cons_trip_int = 1; 914 sc->bce_rx_quick_cons_trip = 1; 915 sc->bce_rx_ticks_int = 0; 916 sc->bce_rx_ticks = 0; 917 #else 918 sc->bce_tx_quick_cons_trip_int = bce_tx_bds_int; 919 sc->bce_tx_quick_cons_trip = bce_tx_bds; 920 sc->bce_tx_ticks_int = bce_tx_ticks_int; 921 sc->bce_tx_ticks = bce_tx_ticks; 922 923 sc->bce_rx_quick_cons_trip_int = bce_rx_bds_int; 924 sc->bce_rx_quick_cons_trip = bce_rx_bds; 925 sc->bce_rx_ticks_int = bce_rx_ticks_int; 926 sc->bce_rx_ticks = bce_rx_ticks; 927 #endif 928 929 /* Update statistics once every second. */ 930 sc->bce_stats_ticks = 1000000 & 0xffff00; 931 932 /* Find the media type for the adapter. */ 933 bce_get_media(sc); 934 935 /* Find out RX/TX ring count */ 936 bce_setup_ring_cnt(sc); 937 938 /* Allocate DMA memory resources. */ 939 rc = bce_dma_alloc(sc); 940 if (rc != 0) { 941 device_printf(dev, "DMA resource allocation failed!\n"); 942 goto fail; 943 } 944 945 #ifdef IFPOLL_ENABLE 946 /* 947 * NPOLLING RX/TX CPU offset 948 */ 949 if (sc->rx_ring_cnt2 == ncpus2) { 950 offset = 0; 951 } else { 952 offset_def = (sc->rx_ring_cnt2 * device_get_unit(dev)) % ncpus2; 953 offset = device_getenv_int(dev, "npoll.offset", offset_def); 954 if (offset >= ncpus2 || 955 offset % sc->rx_ring_cnt2 != 0) { 956 device_printf(dev, "invalid npoll.offset %d, use %d\n", 957 offset, offset_def); 958 offset = offset_def; 959 } 960 } 961 sc->npoll_ofs = offset; 962 #endif 963 964 /* Allocate PCI IRQ resources. */ 965 rc = bce_alloc_intr(sc); 966 if (rc != 0) 967 goto fail; 968 969 /* Setup serializer */ 970 bce_setup_serialize(sc); 971 972 /* Initialize the ifnet interface. */ 973 ifp->if_softc = sc; 974 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 975 ifp->if_ioctl = bce_ioctl; 976 ifp->if_start = bce_start; 977 ifp->if_init = bce_init; 978 ifp->if_serialize = bce_serialize; 979 ifp->if_deserialize = bce_deserialize; 980 ifp->if_tryserialize = bce_tryserialize; 981 #ifdef INVARIANTS 982 ifp->if_serialize_assert = bce_serialize_assert; 983 #endif 984 #ifdef IFPOLL_ENABLE 985 ifp->if_npoll = bce_npoll; 986 #endif 987 988 ifp->if_mtu = ETHERMTU; 989 ifp->if_hwassist = BCE_CSUM_FEATURES | CSUM_TSO; 990 ifp->if_capabilities = BCE_IF_CAPABILITIES; 991 if (sc->rx_ring_cnt > 1) 992 ifp->if_capabilities |= IFCAP_RSS; 993 ifp->if_capenable = ifp->if_capabilities; 994 995 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) 996 ifp->if_baudrate = IF_Gbps(2.5); 997 else 998 ifp->if_baudrate = IF_Gbps(1); 999 1000 ifq_set_maxlen(&ifp->if_snd, USABLE_TX_BD(&sc->tx_rings[0])); 1001 ifq_set_ready(&ifp->if_snd); 1002 ifq_set_subq_cnt(&ifp->if_snd, sc->tx_ring_cnt); 1003 1004 if (sc->tx_ring_cnt > 1) { 1005 ifp->if_mapsubq = ifq_mapsubq_mask; 1006 ifq_set_subq_mask(&ifp->if_snd, sc->tx_ring_cnt - 1); 1007 } 1008 1009 /* 1010 * Look for our PHY. 1011 */ 1012 mii_probe_args_init(&mii_args, bce_ifmedia_upd, bce_ifmedia_sts); 1013 mii_args.mii_probemask = 1 << sc->bce_phy_addr; 1014 mii_args.mii_privtag = MII_PRIVTAG_BRGPHY; 1015 mii_args.mii_priv = mii_priv; 1016 1017 rc = mii_probe(dev, &sc->bce_miibus, &mii_args); 1018 if (rc != 0) { 1019 device_printf(dev, "PHY probe failed!\n"); 1020 goto fail; 1021 } 1022 1023 /* Attach to the Ethernet interface list. */ 1024 ether_ifattach(ifp, sc->eaddr, NULL); 1025 1026 /* Setup TX rings and subqueues */ 1027 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1028 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i); 1029 struct bce_tx_ring *txr = &sc->tx_rings[i]; 1030 1031 ifsq_set_cpuid(ifsq, sc->bce_msix[i].msix_cpuid); 1032 ifsq_set_priv(ifsq, txr); 1033 ifsq_set_hw_serialize(ifsq, &txr->tx_serialize); 1034 txr->ifsq = ifsq; 1035 1036 ifsq_watchdog_init(&txr->tx_watchdog, ifsq, bce_watchdog); 1037 } 1038 1039 callout_init_mp(&sc->bce_tick_callout); 1040 callout_init_mp(&sc->bce_pulse_callout); 1041 callout_init_mp(&sc->bce_ckmsi_callout); 1042 1043 rc = bce_setup_intr(sc); 1044 if (rc != 0) { 1045 device_printf(dev, "Failed to setup IRQ!\n"); 1046 ether_ifdetach(ifp); 1047 goto fail; 1048 } 1049 1050 /* Set timer CPUID */ 1051 bce_set_timer_cpuid(sc, FALSE); 1052 1053 /* Add the supported sysctls to the kernel. */ 1054 bce_add_sysctls(sc); 1055 1056 /* 1057 * The chip reset earlier notified the bootcode that 1058 * a driver is present. We now need to start our pulse 1059 * routine so that the bootcode is reminded that we're 1060 * still running. 1061 */ 1062 bce_pulse(sc); 1063 1064 /* Get the firmware running so IPMI still works */ 1065 bce_mgmt_init(sc); 1066 1067 if (bootverbose) 1068 bce_print_adapter_info(sc); 1069 1070 return 0; 1071 fail: 1072 bce_detach(dev); 1073 return(rc); 1074 } 1075 1076 /****************************************************************************/ 1077 /* Device detach function. */ 1078 /* */ 1079 /* Stops the controller, resets the controller, and releases resources. */ 1080 /* */ 1081 /* Returns: */ 1082 /* 0 on success, positive value on failure. */ 1083 /****************************************************************************/ 1084 static int 1085 bce_detach(device_t dev) 1086 { 1087 struct bce_softc *sc = device_get_softc(dev); 1088 1089 if (device_is_attached(dev)) { 1090 struct ifnet *ifp = &sc->arpcom.ac_if; 1091 uint32_t msg; 1092 1093 ifnet_serialize_all(ifp); 1094 1095 /* Stop and reset the controller. */ 1096 callout_stop(&sc->bce_pulse_callout); 1097 bce_stop(sc); 1098 if (sc->bce_flags & BCE_NO_WOL_FLAG) 1099 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN; 1100 else 1101 msg = BCE_DRV_MSG_CODE_UNLOAD; 1102 bce_reset(sc, msg); 1103 1104 bce_teardown_intr(sc); 1105 1106 ifnet_deserialize_all(ifp); 1107 1108 ether_ifdetach(ifp); 1109 } 1110 1111 /* If we have a child device on the MII bus remove it too. */ 1112 if (sc->bce_miibus) 1113 device_delete_child(dev, sc->bce_miibus); 1114 bus_generic_detach(dev); 1115 1116 bce_free_intr(sc); 1117 1118 if (sc->bce_res_mem != NULL) { 1119 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), 1120 sc->bce_res_mem); 1121 } 1122 1123 bce_dma_free(sc); 1124 1125 if (sc->bce_sysctl_tree != NULL) 1126 sysctl_ctx_free(&sc->bce_sysctl_ctx); 1127 1128 if (sc->serializes != NULL) 1129 kfree(sc->serializes, M_DEVBUF); 1130 1131 return 0; 1132 } 1133 1134 /****************************************************************************/ 1135 /* Device shutdown function. */ 1136 /* */ 1137 /* Stops and resets the controller. */ 1138 /* */ 1139 /* Returns: */ 1140 /* Nothing */ 1141 /****************************************************************************/ 1142 static void 1143 bce_shutdown(device_t dev) 1144 { 1145 struct bce_softc *sc = device_get_softc(dev); 1146 struct ifnet *ifp = &sc->arpcom.ac_if; 1147 uint32_t msg; 1148 1149 ifnet_serialize_all(ifp); 1150 1151 bce_stop(sc); 1152 if (sc->bce_flags & BCE_NO_WOL_FLAG) 1153 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN; 1154 else 1155 msg = BCE_DRV_MSG_CODE_UNLOAD; 1156 bce_reset(sc, msg); 1157 1158 ifnet_deserialize_all(ifp); 1159 } 1160 1161 /****************************************************************************/ 1162 /* Indirect register read. */ 1163 /* */ 1164 /* Reads NetXtreme II registers using an index/data register pair in PCI */ 1165 /* configuration space. Using this mechanism avoids issues with posted */ 1166 /* reads but is much slower than memory-mapped I/O. */ 1167 /* */ 1168 /* Returns: */ 1169 /* The value of the register. */ 1170 /****************************************************************************/ 1171 static uint32_t 1172 bce_reg_rd_ind(struct bce_softc *sc, uint32_t offset) 1173 { 1174 device_t dev = sc->bce_dev; 1175 1176 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4); 1177 return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4); 1178 } 1179 1180 /****************************************************************************/ 1181 /* Indirect register write. */ 1182 /* */ 1183 /* Writes NetXtreme II registers using an index/data register pair in PCI */ 1184 /* configuration space. Using this mechanism avoids issues with posted */ 1185 /* writes but is muchh slower than memory-mapped I/O. */ 1186 /* */ 1187 /* Returns: */ 1188 /* Nothing. */ 1189 /****************************************************************************/ 1190 static void 1191 bce_reg_wr_ind(struct bce_softc *sc, uint32_t offset, uint32_t val) 1192 { 1193 device_t dev = sc->bce_dev; 1194 1195 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4); 1196 pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4); 1197 } 1198 1199 /****************************************************************************/ 1200 /* Shared memory write. */ 1201 /* */ 1202 /* Writes NetXtreme II shared memory region. */ 1203 /* */ 1204 /* Returns: */ 1205 /* Nothing. */ 1206 /****************************************************************************/ 1207 static void 1208 bce_shmem_wr(struct bce_softc *sc, uint32_t offset, uint32_t val) 1209 { 1210 bce_reg_wr_ind(sc, sc->bce_shmem_base + offset, val); 1211 } 1212 1213 /****************************************************************************/ 1214 /* Shared memory read. */ 1215 /* */ 1216 /* Reads NetXtreme II shared memory region. */ 1217 /* */ 1218 /* Returns: */ 1219 /* The 32 bit value read. */ 1220 /****************************************************************************/ 1221 static u32 1222 bce_shmem_rd(struct bce_softc *sc, uint32_t offset) 1223 { 1224 return bce_reg_rd_ind(sc, sc->bce_shmem_base + offset); 1225 } 1226 1227 /****************************************************************************/ 1228 /* Context memory write. */ 1229 /* */ 1230 /* The NetXtreme II controller uses context memory to track connection */ 1231 /* information for L2 and higher network protocols. */ 1232 /* */ 1233 /* Returns: */ 1234 /* Nothing. */ 1235 /****************************************************************************/ 1236 static void 1237 bce_ctx_wr(struct bce_softc *sc, uint32_t cid_addr, uint32_t ctx_offset, 1238 uint32_t ctx_val) 1239 { 1240 uint32_t idx, offset = ctx_offset + cid_addr; 1241 uint32_t val, retry_cnt = 5; 1242 1243 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 1244 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 1245 REG_WR(sc, BCE_CTX_CTX_DATA, ctx_val); 1246 REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_WRITE_REQ)); 1247 1248 for (idx = 0; idx < retry_cnt; idx++) { 1249 val = REG_RD(sc, BCE_CTX_CTX_CTRL); 1250 if ((val & BCE_CTX_CTX_CTRL_WRITE_REQ) == 0) 1251 break; 1252 DELAY(5); 1253 } 1254 1255 if (val & BCE_CTX_CTX_CTRL_WRITE_REQ) { 1256 device_printf(sc->bce_dev, 1257 "Unable to write CTX memory: " 1258 "cid_addr = 0x%08X, offset = 0x%08X!\n", 1259 cid_addr, ctx_offset); 1260 } 1261 } else { 1262 REG_WR(sc, BCE_CTX_DATA_ADR, offset); 1263 REG_WR(sc, BCE_CTX_DATA, ctx_val); 1264 } 1265 } 1266 1267 /****************************************************************************/ 1268 /* PHY register read. */ 1269 /* */ 1270 /* Implements register reads on the MII bus. */ 1271 /* */ 1272 /* Returns: */ 1273 /* The value of the register. */ 1274 /****************************************************************************/ 1275 static int 1276 bce_miibus_read_reg(device_t dev, int phy, int reg) 1277 { 1278 struct bce_softc *sc = device_get_softc(dev); 1279 uint32_t val; 1280 int i; 1281 1282 /* Make sure we are accessing the correct PHY address. */ 1283 KASSERT(phy == sc->bce_phy_addr, 1284 ("invalid phyno %d, should be %d\n", phy, sc->bce_phy_addr)); 1285 1286 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1287 val = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1288 val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL; 1289 1290 REG_WR(sc, BCE_EMAC_MDIO_MODE, val); 1291 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1292 1293 DELAY(40); 1294 } 1295 1296 val = BCE_MIPHY(phy) | BCE_MIREG(reg) | 1297 BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT | 1298 BCE_EMAC_MDIO_COMM_START_BUSY; 1299 REG_WR(sc, BCE_EMAC_MDIO_COMM, val); 1300 1301 for (i = 0; i < BCE_PHY_TIMEOUT; i++) { 1302 DELAY(10); 1303 1304 val = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1305 if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) { 1306 DELAY(5); 1307 1308 val = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1309 val &= BCE_EMAC_MDIO_COMM_DATA; 1310 break; 1311 } 1312 } 1313 1314 if (val & BCE_EMAC_MDIO_COMM_START_BUSY) { 1315 if_printf(&sc->arpcom.ac_if, 1316 "Error: PHY read timeout! phy = %d, reg = 0x%04X\n", 1317 phy, reg); 1318 val = 0x0; 1319 } else { 1320 val = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1321 } 1322 1323 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1324 val = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1325 val |= BCE_EMAC_MDIO_MODE_AUTO_POLL; 1326 1327 REG_WR(sc, BCE_EMAC_MDIO_MODE, val); 1328 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1329 1330 DELAY(40); 1331 } 1332 return (val & 0xffff); 1333 } 1334 1335 /****************************************************************************/ 1336 /* PHY register write. */ 1337 /* */ 1338 /* Implements register writes on the MII bus. */ 1339 /* */ 1340 /* Returns: */ 1341 /* The value of the register. */ 1342 /****************************************************************************/ 1343 static int 1344 bce_miibus_write_reg(device_t dev, int phy, int reg, int val) 1345 { 1346 struct bce_softc *sc = device_get_softc(dev); 1347 uint32_t val1; 1348 int i; 1349 1350 /* Make sure we are accessing the correct PHY address. */ 1351 KASSERT(phy == sc->bce_phy_addr, 1352 ("invalid phyno %d, should be %d\n", phy, sc->bce_phy_addr)); 1353 1354 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1355 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1356 val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL; 1357 1358 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1); 1359 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1360 1361 DELAY(40); 1362 } 1363 1364 val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val | 1365 BCE_EMAC_MDIO_COMM_COMMAND_WRITE | 1366 BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT; 1367 REG_WR(sc, BCE_EMAC_MDIO_COMM, val1); 1368 1369 for (i = 0; i < BCE_PHY_TIMEOUT; i++) { 1370 DELAY(10); 1371 1372 val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1373 if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) { 1374 DELAY(5); 1375 break; 1376 } 1377 } 1378 1379 if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY) 1380 if_printf(&sc->arpcom.ac_if, "PHY write timeout!\n"); 1381 1382 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1383 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1384 val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL; 1385 1386 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1); 1387 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1388 1389 DELAY(40); 1390 } 1391 return 0; 1392 } 1393 1394 /****************************************************************************/ 1395 /* MII bus status change. */ 1396 /* */ 1397 /* Called by the MII bus driver when the PHY establishes link to set the */ 1398 /* MAC interface registers. */ 1399 /* */ 1400 /* Returns: */ 1401 /* Nothing. */ 1402 /****************************************************************************/ 1403 static void 1404 bce_miibus_statchg(device_t dev) 1405 { 1406 struct bce_softc *sc = device_get_softc(dev); 1407 struct mii_data *mii = device_get_softc(sc->bce_miibus); 1408 1409 BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT); 1410 1411 /* 1412 * Set MII or GMII interface based on the speed negotiated 1413 * by the PHY. 1414 */ 1415 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 1416 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) { 1417 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_GMII); 1418 } else { 1419 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_MII); 1420 } 1421 1422 /* 1423 * Set half or full duplex based on the duplicity negotiated 1424 * by the PHY. 1425 */ 1426 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 1427 BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX); 1428 } else { 1429 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX); 1430 } 1431 } 1432 1433 /****************************************************************************/ 1434 /* Acquire NVRAM lock. */ 1435 /* */ 1436 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */ 1437 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 1438 /* for use by the driver. */ 1439 /* */ 1440 /* Returns: */ 1441 /* 0 on success, positive value on failure. */ 1442 /****************************************************************************/ 1443 static int 1444 bce_acquire_nvram_lock(struct bce_softc *sc) 1445 { 1446 uint32_t val; 1447 int j; 1448 1449 /* Request access to the flash interface. */ 1450 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2); 1451 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1452 val = REG_RD(sc, BCE_NVM_SW_ARB); 1453 if (val & BCE_NVM_SW_ARB_ARB_ARB2) 1454 break; 1455 1456 DELAY(5); 1457 } 1458 1459 if (j >= NVRAM_TIMEOUT_COUNT) { 1460 return EBUSY; 1461 } 1462 return 0; 1463 } 1464 1465 /****************************************************************************/ 1466 /* Release NVRAM lock. */ 1467 /* */ 1468 /* When the caller is finished accessing NVRAM the lock must be released. */ 1469 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 1470 /* for use by the driver. */ 1471 /* */ 1472 /* Returns: */ 1473 /* 0 on success, positive value on failure. */ 1474 /****************************************************************************/ 1475 static int 1476 bce_release_nvram_lock(struct bce_softc *sc) 1477 { 1478 int j; 1479 uint32_t val; 1480 1481 /* 1482 * Relinquish nvram interface. 1483 */ 1484 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2); 1485 1486 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1487 val = REG_RD(sc, BCE_NVM_SW_ARB); 1488 if (!(val & BCE_NVM_SW_ARB_ARB_ARB2)) 1489 break; 1490 1491 DELAY(5); 1492 } 1493 1494 if (j >= NVRAM_TIMEOUT_COUNT) { 1495 return EBUSY; 1496 } 1497 return 0; 1498 } 1499 1500 /****************************************************************************/ 1501 /* Enable NVRAM access. */ 1502 /* */ 1503 /* Before accessing NVRAM for read or write operations the caller must */ 1504 /* enabled NVRAM access. */ 1505 /* */ 1506 /* Returns: */ 1507 /* Nothing. */ 1508 /****************************************************************************/ 1509 static void 1510 bce_enable_nvram_access(struct bce_softc *sc) 1511 { 1512 uint32_t val; 1513 1514 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE); 1515 /* Enable both bits, even on read. */ 1516 REG_WR(sc, BCE_NVM_ACCESS_ENABLE, 1517 val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN); 1518 } 1519 1520 /****************************************************************************/ 1521 /* Disable NVRAM access. */ 1522 /* */ 1523 /* When the caller is finished accessing NVRAM access must be disabled. */ 1524 /* */ 1525 /* Returns: */ 1526 /* Nothing. */ 1527 /****************************************************************************/ 1528 static void 1529 bce_disable_nvram_access(struct bce_softc *sc) 1530 { 1531 uint32_t val; 1532 1533 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE); 1534 1535 /* Disable both bits, even after read. */ 1536 REG_WR(sc, BCE_NVM_ACCESS_ENABLE, 1537 val & ~(BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN)); 1538 } 1539 1540 /****************************************************************************/ 1541 /* Read a dword (32 bits) from NVRAM. */ 1542 /* */ 1543 /* Read a 32 bit word from NVRAM. The caller is assumed to have already */ 1544 /* obtained the NVRAM lock and enabled the controller for NVRAM access. */ 1545 /* */ 1546 /* Returns: */ 1547 /* 0 on success and the 32 bit value read, positive value on failure. */ 1548 /****************************************************************************/ 1549 static int 1550 bce_nvram_read_dword(struct bce_softc *sc, uint32_t offset, uint8_t *ret_val, 1551 uint32_t cmd_flags) 1552 { 1553 uint32_t cmd; 1554 int i, rc = 0; 1555 1556 /* Build the command word. */ 1557 cmd = BCE_NVM_COMMAND_DOIT | cmd_flags; 1558 1559 /* Calculate the offset for buffered flash. */ 1560 if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) { 1561 offset = ((offset / sc->bce_flash_info->page_size) << 1562 sc->bce_flash_info->page_bits) + 1563 (offset % sc->bce_flash_info->page_size); 1564 } 1565 1566 /* 1567 * Clear the DONE bit separately, set the address to read, 1568 * and issue the read. 1569 */ 1570 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE); 1571 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE); 1572 REG_WR(sc, BCE_NVM_COMMAND, cmd); 1573 1574 /* Wait for completion. */ 1575 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) { 1576 uint32_t val; 1577 1578 DELAY(5); 1579 1580 val = REG_RD(sc, BCE_NVM_COMMAND); 1581 if (val & BCE_NVM_COMMAND_DONE) { 1582 val = REG_RD(sc, BCE_NVM_READ); 1583 1584 val = be32toh(val); 1585 memcpy(ret_val, &val, 4); 1586 break; 1587 } 1588 } 1589 1590 /* Check for errors. */ 1591 if (i >= NVRAM_TIMEOUT_COUNT) { 1592 if_printf(&sc->arpcom.ac_if, 1593 "Timeout error reading NVRAM at offset 0x%08X!\n", 1594 offset); 1595 rc = EBUSY; 1596 } 1597 return rc; 1598 } 1599 1600 /****************************************************************************/ 1601 /* Initialize NVRAM access. */ 1602 /* */ 1603 /* Identify the NVRAM device in use and prepare the NVRAM interface to */ 1604 /* access that device. */ 1605 /* */ 1606 /* Returns: */ 1607 /* 0 on success, positive value on failure. */ 1608 /****************************************************************************/ 1609 static int 1610 bce_init_nvram(struct bce_softc *sc) 1611 { 1612 uint32_t val; 1613 int j, entry_count, rc = 0; 1614 const struct flash_spec *flash; 1615 1616 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 1617 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 1618 sc->bce_flash_info = &flash_5709; 1619 goto bce_init_nvram_get_flash_size; 1620 } 1621 1622 /* Determine the selected interface. */ 1623 val = REG_RD(sc, BCE_NVM_CFG1); 1624 1625 entry_count = sizeof(flash_table) / sizeof(struct flash_spec); 1626 1627 /* 1628 * Flash reconfiguration is required to support additional 1629 * NVRAM devices not directly supported in hardware. 1630 * Check if the flash interface was reconfigured 1631 * by the bootcode. 1632 */ 1633 1634 if (val & 0x40000000) { 1635 /* Flash interface reconfigured by bootcode. */ 1636 for (j = 0, flash = flash_table; j < entry_count; 1637 j++, flash++) { 1638 if ((val & FLASH_BACKUP_STRAP_MASK) == 1639 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) { 1640 sc->bce_flash_info = flash; 1641 break; 1642 } 1643 } 1644 } else { 1645 /* Flash interface not yet reconfigured. */ 1646 uint32_t mask; 1647 1648 if (val & (1 << 23)) 1649 mask = FLASH_BACKUP_STRAP_MASK; 1650 else 1651 mask = FLASH_STRAP_MASK; 1652 1653 /* Look for the matching NVRAM device configuration data. */ 1654 for (j = 0, flash = flash_table; j < entry_count; 1655 j++, flash++) { 1656 /* Check if the device matches any of the known devices. */ 1657 if ((val & mask) == (flash->strapping & mask)) { 1658 /* Found a device match. */ 1659 sc->bce_flash_info = flash; 1660 1661 /* Request access to the flash interface. */ 1662 rc = bce_acquire_nvram_lock(sc); 1663 if (rc != 0) 1664 return rc; 1665 1666 /* Reconfigure the flash interface. */ 1667 bce_enable_nvram_access(sc); 1668 REG_WR(sc, BCE_NVM_CFG1, flash->config1); 1669 REG_WR(sc, BCE_NVM_CFG2, flash->config2); 1670 REG_WR(sc, BCE_NVM_CFG3, flash->config3); 1671 REG_WR(sc, BCE_NVM_WRITE1, flash->write1); 1672 bce_disable_nvram_access(sc); 1673 bce_release_nvram_lock(sc); 1674 break; 1675 } 1676 } 1677 } 1678 1679 /* Check if a matching device was found. */ 1680 if (j == entry_count) { 1681 sc->bce_flash_info = NULL; 1682 if_printf(&sc->arpcom.ac_if, "Unknown Flash NVRAM found!\n"); 1683 return ENODEV; 1684 } 1685 1686 bce_init_nvram_get_flash_size: 1687 /* Write the flash config data to the shared memory interface. */ 1688 val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG2) & 1689 BCE_SHARED_HW_CFG2_NVM_SIZE_MASK; 1690 if (val) 1691 sc->bce_flash_size = val; 1692 else 1693 sc->bce_flash_size = sc->bce_flash_info->total_size; 1694 1695 return rc; 1696 } 1697 1698 /****************************************************************************/ 1699 /* Read an arbitrary range of data from NVRAM. */ 1700 /* */ 1701 /* Prepares the NVRAM interface for access and reads the requested data */ 1702 /* into the supplied buffer. */ 1703 /* */ 1704 /* Returns: */ 1705 /* 0 on success and the data read, positive value on failure. */ 1706 /****************************************************************************/ 1707 static int 1708 bce_nvram_read(struct bce_softc *sc, uint32_t offset, uint8_t *ret_buf, 1709 int buf_size) 1710 { 1711 uint32_t cmd_flags, offset32, len32, extra; 1712 int rc = 0; 1713 1714 if (buf_size == 0) 1715 return 0; 1716 1717 /* Request access to the flash interface. */ 1718 rc = bce_acquire_nvram_lock(sc); 1719 if (rc != 0) 1720 return rc; 1721 1722 /* Enable access to flash interface */ 1723 bce_enable_nvram_access(sc); 1724 1725 len32 = buf_size; 1726 offset32 = offset; 1727 extra = 0; 1728 1729 cmd_flags = 0; 1730 1731 /* XXX should we release nvram lock if read_dword() fails? */ 1732 if (offset32 & 3) { 1733 uint8_t buf[4]; 1734 uint32_t pre_len; 1735 1736 offset32 &= ~3; 1737 pre_len = 4 - (offset & 3); 1738 1739 if (pre_len >= len32) { 1740 pre_len = len32; 1741 cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST; 1742 } else { 1743 cmd_flags = BCE_NVM_COMMAND_FIRST; 1744 } 1745 1746 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); 1747 if (rc) 1748 return rc; 1749 1750 memcpy(ret_buf, buf + (offset & 3), pre_len); 1751 1752 offset32 += 4; 1753 ret_buf += pre_len; 1754 len32 -= pre_len; 1755 } 1756 1757 if (len32 & 3) { 1758 extra = 4 - (len32 & 3); 1759 len32 = (len32 + 4) & ~3; 1760 } 1761 1762 if (len32 == 4) { 1763 uint8_t buf[4]; 1764 1765 if (cmd_flags) 1766 cmd_flags = BCE_NVM_COMMAND_LAST; 1767 else 1768 cmd_flags = BCE_NVM_COMMAND_FIRST | 1769 BCE_NVM_COMMAND_LAST; 1770 1771 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); 1772 1773 memcpy(ret_buf, buf, 4 - extra); 1774 } else if (len32 > 0) { 1775 uint8_t buf[4]; 1776 1777 /* Read the first word. */ 1778 if (cmd_flags) 1779 cmd_flags = 0; 1780 else 1781 cmd_flags = BCE_NVM_COMMAND_FIRST; 1782 1783 rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags); 1784 1785 /* Advance to the next dword. */ 1786 offset32 += 4; 1787 ret_buf += 4; 1788 len32 -= 4; 1789 1790 while (len32 > 4 && rc == 0) { 1791 rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0); 1792 1793 /* Advance to the next dword. */ 1794 offset32 += 4; 1795 ret_buf += 4; 1796 len32 -= 4; 1797 } 1798 1799 if (rc) 1800 goto bce_nvram_read_locked_exit; 1801 1802 cmd_flags = BCE_NVM_COMMAND_LAST; 1803 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); 1804 1805 memcpy(ret_buf, buf, 4 - extra); 1806 } 1807 1808 bce_nvram_read_locked_exit: 1809 /* Disable access to flash interface and release the lock. */ 1810 bce_disable_nvram_access(sc); 1811 bce_release_nvram_lock(sc); 1812 1813 return rc; 1814 } 1815 1816 /****************************************************************************/ 1817 /* Verifies that NVRAM is accessible and contains valid data. */ 1818 /* */ 1819 /* Reads the configuration data from NVRAM and verifies that the CRC is */ 1820 /* correct. */ 1821 /* */ 1822 /* Returns: */ 1823 /* 0 on success, positive value on failure. */ 1824 /****************************************************************************/ 1825 static int 1826 bce_nvram_test(struct bce_softc *sc) 1827 { 1828 uint32_t buf[BCE_NVRAM_SIZE / 4]; 1829 uint32_t magic, csum; 1830 uint8_t *data = (uint8_t *)buf; 1831 int rc = 0; 1832 1833 /* 1834 * Check that the device NVRAM is valid by reading 1835 * the magic value at offset 0. 1836 */ 1837 rc = bce_nvram_read(sc, 0, data, 4); 1838 if (rc != 0) 1839 return rc; 1840 1841 magic = be32toh(buf[0]); 1842 if (magic != BCE_NVRAM_MAGIC) { 1843 if_printf(&sc->arpcom.ac_if, 1844 "Invalid NVRAM magic value! Expected: 0x%08X, " 1845 "Found: 0x%08X\n", BCE_NVRAM_MAGIC, magic); 1846 return ENODEV; 1847 } 1848 1849 /* 1850 * Verify that the device NVRAM includes valid 1851 * configuration data. 1852 */ 1853 rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE); 1854 if (rc != 0) 1855 return rc; 1856 1857 csum = ether_crc32_le(data, 0x100); 1858 if (csum != BCE_CRC32_RESIDUAL) { 1859 if_printf(&sc->arpcom.ac_if, 1860 "Invalid Manufacturing Information NVRAM CRC! " 1861 "Expected: 0x%08X, Found: 0x%08X\n", 1862 BCE_CRC32_RESIDUAL, csum); 1863 return ENODEV; 1864 } 1865 1866 csum = ether_crc32_le(data + 0x100, 0x100); 1867 if (csum != BCE_CRC32_RESIDUAL) { 1868 if_printf(&sc->arpcom.ac_if, 1869 "Invalid Feature Configuration Information " 1870 "NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n", 1871 BCE_CRC32_RESIDUAL, csum); 1872 rc = ENODEV; 1873 } 1874 return rc; 1875 } 1876 1877 /****************************************************************************/ 1878 /* Identifies the current media type of the controller and sets the PHY */ 1879 /* address. */ 1880 /* */ 1881 /* Returns: */ 1882 /* Nothing. */ 1883 /****************************************************************************/ 1884 static void 1885 bce_get_media(struct bce_softc *sc) 1886 { 1887 uint32_t val; 1888 1889 sc->bce_phy_addr = 1; 1890 1891 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 1892 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 1893 uint32_t val = REG_RD(sc, BCE_MISC_DUAL_MEDIA_CTRL); 1894 uint32_t bond_id = val & BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID; 1895 uint32_t strap; 1896 1897 /* 1898 * The BCM5709S is software configurable 1899 * for Copper or SerDes operation. 1900 */ 1901 if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) { 1902 return; 1903 } else if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) { 1904 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 1905 return; 1906 } 1907 1908 if (val & BCE_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE) { 1909 strap = (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21; 1910 } else { 1911 strap = 1912 (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8; 1913 } 1914 1915 if (pci_get_function(sc->bce_dev) == 0) { 1916 switch (strap) { 1917 case 0x4: 1918 case 0x5: 1919 case 0x6: 1920 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 1921 break; 1922 } 1923 } else { 1924 switch (strap) { 1925 case 0x1: 1926 case 0x2: 1927 case 0x4: 1928 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 1929 break; 1930 } 1931 } 1932 } else if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) { 1933 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 1934 } 1935 1936 if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) { 1937 sc->bce_flags |= BCE_NO_WOL_FLAG; 1938 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) { 1939 sc->bce_phy_addr = 2; 1940 val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG); 1941 if (val & BCE_SHARED_HW_CFG_PHY_2_5G) 1942 sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG; 1943 } 1944 } else if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) || 1945 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)) { 1946 sc->bce_phy_flags |= BCE_PHY_CRC_FIX_FLAG; 1947 } 1948 } 1949 1950 static void 1951 bce_destroy_tx_ring(struct bce_tx_ring *txr) 1952 { 1953 int i; 1954 1955 /* Destroy the TX buffer descriptor DMA stuffs. */ 1956 if (txr->tx_bd_chain_tag != NULL) { 1957 for (i = 0; i < txr->tx_pages; i++) { 1958 if (txr->tx_bd_chain[i] != NULL) { 1959 bus_dmamap_unload(txr->tx_bd_chain_tag, 1960 txr->tx_bd_chain_map[i]); 1961 bus_dmamem_free(txr->tx_bd_chain_tag, 1962 txr->tx_bd_chain[i], 1963 txr->tx_bd_chain_map[i]); 1964 } 1965 } 1966 bus_dma_tag_destroy(txr->tx_bd_chain_tag); 1967 } 1968 1969 /* Destroy the TX mbuf DMA stuffs. */ 1970 if (txr->tx_mbuf_tag != NULL) { 1971 for (i = 0; i < TOTAL_TX_BD(txr); i++) { 1972 /* Must have been unloaded in bce_stop() */ 1973 KKASSERT(txr->tx_bufs[i].tx_mbuf_ptr == NULL); 1974 bus_dmamap_destroy(txr->tx_mbuf_tag, 1975 txr->tx_bufs[i].tx_mbuf_map); 1976 } 1977 bus_dma_tag_destroy(txr->tx_mbuf_tag); 1978 } 1979 1980 if (txr->tx_bd_chain_map != NULL) 1981 kfree(txr->tx_bd_chain_map, M_DEVBUF); 1982 if (txr->tx_bd_chain != NULL) 1983 kfree(txr->tx_bd_chain, M_DEVBUF); 1984 if (txr->tx_bd_chain_paddr != NULL) 1985 kfree(txr->tx_bd_chain_paddr, M_DEVBUF); 1986 1987 if (txr->tx_bufs != NULL) 1988 kfree(txr->tx_bufs, M_DEVBUF); 1989 } 1990 1991 static void 1992 bce_destroy_rx_ring(struct bce_rx_ring *rxr) 1993 { 1994 int i; 1995 1996 /* Destroy the RX buffer descriptor DMA stuffs. */ 1997 if (rxr->rx_bd_chain_tag != NULL) { 1998 for (i = 0; i < rxr->rx_pages; i++) { 1999 if (rxr->rx_bd_chain[i] != NULL) { 2000 bus_dmamap_unload(rxr->rx_bd_chain_tag, 2001 rxr->rx_bd_chain_map[i]); 2002 bus_dmamem_free(rxr->rx_bd_chain_tag, 2003 rxr->rx_bd_chain[i], 2004 rxr->rx_bd_chain_map[i]); 2005 } 2006 } 2007 bus_dma_tag_destroy(rxr->rx_bd_chain_tag); 2008 } 2009 2010 /* Destroy the RX mbuf DMA stuffs. */ 2011 if (rxr->rx_mbuf_tag != NULL) { 2012 for (i = 0; i < TOTAL_RX_BD(rxr); i++) { 2013 /* Must have been unloaded in bce_stop() */ 2014 KKASSERT(rxr->rx_bufs[i].rx_mbuf_ptr == NULL); 2015 bus_dmamap_destroy(rxr->rx_mbuf_tag, 2016 rxr->rx_bufs[i].rx_mbuf_map); 2017 } 2018 bus_dmamap_destroy(rxr->rx_mbuf_tag, rxr->rx_mbuf_tmpmap); 2019 bus_dma_tag_destroy(rxr->rx_mbuf_tag); 2020 } 2021 2022 if (rxr->rx_bd_chain_map != NULL) 2023 kfree(rxr->rx_bd_chain_map, M_DEVBUF); 2024 if (rxr->rx_bd_chain != NULL) 2025 kfree(rxr->rx_bd_chain, M_DEVBUF); 2026 if (rxr->rx_bd_chain_paddr != NULL) 2027 kfree(rxr->rx_bd_chain_paddr, M_DEVBUF); 2028 2029 if (rxr->rx_bufs != NULL) 2030 kfree(rxr->rx_bufs, M_DEVBUF); 2031 } 2032 2033 /****************************************************************************/ 2034 /* Free any DMA memory owned by the driver. */ 2035 /* */ 2036 /* Scans through each data structre that requires DMA memory and frees */ 2037 /* the memory if allocated. */ 2038 /* */ 2039 /* Returns: */ 2040 /* Nothing. */ 2041 /****************************************************************************/ 2042 static void 2043 bce_dma_free(struct bce_softc *sc) 2044 { 2045 int i; 2046 2047 /* Destroy the status block. */ 2048 if (sc->status_tag != NULL) { 2049 if (sc->status_block != NULL) { 2050 bus_dmamap_unload(sc->status_tag, sc->status_map); 2051 bus_dmamem_free(sc->status_tag, sc->status_block, 2052 sc->status_map); 2053 } 2054 bus_dma_tag_destroy(sc->status_tag); 2055 } 2056 2057 /* Destroy the statistics block. */ 2058 if (sc->stats_tag != NULL) { 2059 if (sc->stats_block != NULL) { 2060 bus_dmamap_unload(sc->stats_tag, sc->stats_map); 2061 bus_dmamem_free(sc->stats_tag, sc->stats_block, 2062 sc->stats_map); 2063 } 2064 bus_dma_tag_destroy(sc->stats_tag); 2065 } 2066 2067 /* Destroy the CTX DMA stuffs. */ 2068 if (sc->ctx_tag != NULL) { 2069 for (i = 0; i < sc->ctx_pages; i++) { 2070 if (sc->ctx_block[i] != NULL) { 2071 bus_dmamap_unload(sc->ctx_tag, sc->ctx_map[i]); 2072 bus_dmamem_free(sc->ctx_tag, sc->ctx_block[i], 2073 sc->ctx_map[i]); 2074 } 2075 } 2076 bus_dma_tag_destroy(sc->ctx_tag); 2077 } 2078 2079 /* Free TX rings */ 2080 if (sc->tx_rings != NULL) { 2081 for (i = 0; i < sc->tx_ring_cnt; ++i) 2082 bce_destroy_tx_ring(&sc->tx_rings[i]); 2083 kfree(sc->tx_rings, M_DEVBUF); 2084 } 2085 2086 /* Free RX rings */ 2087 if (sc->rx_rings != NULL) { 2088 for (i = 0; i < sc->rx_ring_cnt; ++i) 2089 bce_destroy_rx_ring(&sc->rx_rings[i]); 2090 kfree(sc->rx_rings, M_DEVBUF); 2091 } 2092 2093 /* Destroy the parent tag */ 2094 if (sc->parent_tag != NULL) 2095 bus_dma_tag_destroy(sc->parent_tag); 2096 } 2097 2098 /****************************************************************************/ 2099 /* Get DMA memory from the OS. */ 2100 /* */ 2101 /* Validates that the OS has provided DMA buffers in response to a */ 2102 /* bus_dmamap_load() call and saves the physical address of those buffers. */ 2103 /* When the callback is used the OS will return 0 for the mapping function */ 2104 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any */ 2105 /* failures back to the caller. */ 2106 /* */ 2107 /* Returns: */ 2108 /* Nothing. */ 2109 /****************************************************************************/ 2110 static void 2111 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 2112 { 2113 bus_addr_t *busaddr = arg; 2114 2115 /* Check for an error and signal the caller that an error occurred. */ 2116 if (error) 2117 return; 2118 2119 KASSERT(nseg == 1, ("only one segment is allowed")); 2120 *busaddr = segs->ds_addr; 2121 } 2122 2123 static int 2124 bce_create_tx_ring(struct bce_tx_ring *txr) 2125 { 2126 int pages, rc, i; 2127 2128 lwkt_serialize_init(&txr->tx_serialize); 2129 txr->tx_wreg = bce_tx_wreg; 2130 2131 pages = device_getenv_int(txr->sc->bce_dev, "tx_pages", bce_tx_pages); 2132 if (pages <= 0 || pages > TX_PAGES_MAX || !powerof2(pages)) { 2133 device_printf(txr->sc->bce_dev, "invalid # of TX pages\n"); 2134 pages = TX_PAGES_DEFAULT; 2135 } 2136 txr->tx_pages = pages; 2137 2138 txr->tx_bd_chain_map = kmalloc(sizeof(bus_dmamap_t) * txr->tx_pages, 2139 M_DEVBUF, M_WAITOK | M_ZERO); 2140 txr->tx_bd_chain = kmalloc(sizeof(struct tx_bd *) * txr->tx_pages, 2141 M_DEVBUF, M_WAITOK | M_ZERO); 2142 txr->tx_bd_chain_paddr = kmalloc(sizeof(bus_addr_t) * txr->tx_pages, 2143 M_DEVBUF, M_WAITOK | M_ZERO); 2144 2145 txr->tx_bufs = kmalloc_cachealign( 2146 sizeof(struct bce_tx_buf) * TOTAL_TX_BD(txr), 2147 M_DEVBUF, M_WAITOK | M_ZERO); 2148 2149 /* 2150 * Create a DMA tag for the TX buffer descriptor chain, 2151 * allocate and clear the memory, and fetch the 2152 * physical address of the block. 2153 */ 2154 rc = bus_dma_tag_create(txr->sc->parent_tag, BCM_PAGE_SIZE, 0, 2155 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 2156 BCE_TX_CHAIN_PAGE_SZ, 1, BCE_TX_CHAIN_PAGE_SZ, 2157 0, &txr->tx_bd_chain_tag); 2158 if (rc != 0) { 2159 device_printf(txr->sc->bce_dev, "Could not allocate " 2160 "TX descriptor chain DMA tag!\n"); 2161 return rc; 2162 } 2163 2164 for (i = 0; i < txr->tx_pages; i++) { 2165 bus_addr_t busaddr; 2166 2167 rc = bus_dmamem_alloc(txr->tx_bd_chain_tag, 2168 (void **)&txr->tx_bd_chain[i], 2169 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 2170 &txr->tx_bd_chain_map[i]); 2171 if (rc != 0) { 2172 device_printf(txr->sc->bce_dev, 2173 "Could not allocate %dth TX descriptor " 2174 "chain DMA memory!\n", i); 2175 return rc; 2176 } 2177 2178 rc = bus_dmamap_load(txr->tx_bd_chain_tag, 2179 txr->tx_bd_chain_map[i], 2180 txr->tx_bd_chain[i], 2181 BCE_TX_CHAIN_PAGE_SZ, 2182 bce_dma_map_addr, &busaddr, 2183 BUS_DMA_WAITOK); 2184 if (rc != 0) { 2185 if (rc == EINPROGRESS) { 2186 panic("%s coherent memory loading " 2187 "is still in progress!", 2188 txr->sc->arpcom.ac_if.if_xname); 2189 } 2190 device_printf(txr->sc->bce_dev, "Could not map %dth " 2191 "TX descriptor chain DMA memory!\n", i); 2192 bus_dmamem_free(txr->tx_bd_chain_tag, 2193 txr->tx_bd_chain[i], 2194 txr->tx_bd_chain_map[i]); 2195 txr->tx_bd_chain[i] = NULL; 2196 return rc; 2197 } 2198 2199 txr->tx_bd_chain_paddr[i] = busaddr; 2200 } 2201 2202 /* Create a DMA tag for TX mbufs. */ 2203 rc = bus_dma_tag_create(txr->sc->parent_tag, 1, 0, 2204 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 2205 IP_MAXPACKET + sizeof(struct ether_vlan_header), 2206 BCE_MAX_SEGMENTS, PAGE_SIZE, 2207 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 2208 &txr->tx_mbuf_tag); 2209 if (rc != 0) { 2210 device_printf(txr->sc->bce_dev, 2211 "Could not allocate TX mbuf DMA tag!\n"); 2212 return rc; 2213 } 2214 2215 /* Create DMA maps for the TX mbufs clusters. */ 2216 for (i = 0; i < TOTAL_TX_BD(txr); i++) { 2217 rc = bus_dmamap_create(txr->tx_mbuf_tag, 2218 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 2219 &txr->tx_bufs[i].tx_mbuf_map); 2220 if (rc != 0) { 2221 int j; 2222 2223 for (j = 0; j < i; ++j) { 2224 bus_dmamap_destroy(txr->tx_mbuf_tag, 2225 txr->tx_bufs[j].tx_mbuf_map); 2226 } 2227 bus_dma_tag_destroy(txr->tx_mbuf_tag); 2228 txr->tx_mbuf_tag = NULL; 2229 2230 device_printf(txr->sc->bce_dev, "Unable to create " 2231 "%dth TX mbuf DMA map!\n", i); 2232 return rc; 2233 } 2234 } 2235 return 0; 2236 } 2237 2238 static int 2239 bce_create_rx_ring(struct bce_rx_ring *rxr) 2240 { 2241 int pages, rc, i; 2242 2243 lwkt_serialize_init(&rxr->rx_serialize); 2244 2245 pages = device_getenv_int(rxr->sc->bce_dev, "rx_pages", bce_rx_pages); 2246 if (pages <= 0 || pages > RX_PAGES_MAX || !powerof2(pages)) { 2247 device_printf(rxr->sc->bce_dev, "invalid # of RX pages\n"); 2248 pages = RX_PAGES_DEFAULT; 2249 } 2250 rxr->rx_pages = pages; 2251 2252 rxr->rx_bd_chain_map = kmalloc(sizeof(bus_dmamap_t) * rxr->rx_pages, 2253 M_DEVBUF, M_WAITOK | M_ZERO); 2254 rxr->rx_bd_chain = kmalloc(sizeof(struct rx_bd *) * rxr->rx_pages, 2255 M_DEVBUF, M_WAITOK | M_ZERO); 2256 rxr->rx_bd_chain_paddr = kmalloc(sizeof(bus_addr_t) * rxr->rx_pages, 2257 M_DEVBUF, M_WAITOK | M_ZERO); 2258 2259 rxr->rx_bufs = kmalloc_cachealign( 2260 sizeof(struct bce_rx_buf) * TOTAL_RX_BD(rxr), 2261 M_DEVBUF, M_WAITOK | M_ZERO); 2262 2263 /* 2264 * Create a DMA tag for the RX buffer descriptor chain, 2265 * allocate and clear the memory, and fetch the physical 2266 * address of the blocks. 2267 */ 2268 rc = bus_dma_tag_create(rxr->sc->parent_tag, BCM_PAGE_SIZE, 0, 2269 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 2270 BCE_RX_CHAIN_PAGE_SZ, 1, BCE_RX_CHAIN_PAGE_SZ, 2271 0, &rxr->rx_bd_chain_tag); 2272 if (rc != 0) { 2273 device_printf(rxr->sc->bce_dev, "Could not allocate " 2274 "RX descriptor chain DMA tag!\n"); 2275 return rc; 2276 } 2277 2278 for (i = 0; i < rxr->rx_pages; i++) { 2279 bus_addr_t busaddr; 2280 2281 rc = bus_dmamem_alloc(rxr->rx_bd_chain_tag, 2282 (void **)&rxr->rx_bd_chain[i], 2283 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 2284 &rxr->rx_bd_chain_map[i]); 2285 if (rc != 0) { 2286 device_printf(rxr->sc->bce_dev, 2287 "Could not allocate %dth RX descriptor " 2288 "chain DMA memory!\n", i); 2289 return rc; 2290 } 2291 2292 rc = bus_dmamap_load(rxr->rx_bd_chain_tag, 2293 rxr->rx_bd_chain_map[i], 2294 rxr->rx_bd_chain[i], 2295 BCE_RX_CHAIN_PAGE_SZ, 2296 bce_dma_map_addr, &busaddr, 2297 BUS_DMA_WAITOK); 2298 if (rc != 0) { 2299 if (rc == EINPROGRESS) { 2300 panic("%s coherent memory loading " 2301 "is still in progress!", 2302 rxr->sc->arpcom.ac_if.if_xname); 2303 } 2304 device_printf(rxr->sc->bce_dev, 2305 "Could not map %dth RX descriptor " 2306 "chain DMA memory!\n", i); 2307 bus_dmamem_free(rxr->rx_bd_chain_tag, 2308 rxr->rx_bd_chain[i], 2309 rxr->rx_bd_chain_map[i]); 2310 rxr->rx_bd_chain[i] = NULL; 2311 return rc; 2312 } 2313 2314 rxr->rx_bd_chain_paddr[i] = busaddr; 2315 } 2316 2317 /* Create a DMA tag for RX mbufs. */ 2318 rc = bus_dma_tag_create(rxr->sc->parent_tag, BCE_DMA_RX_ALIGN, 0, 2319 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 2320 MCLBYTES, 1, MCLBYTES, 2321 BUS_DMA_ALLOCNOW | BUS_DMA_ALIGNED | BUS_DMA_WAITOK, 2322 &rxr->rx_mbuf_tag); 2323 if (rc != 0) { 2324 device_printf(rxr->sc->bce_dev, 2325 "Could not allocate RX mbuf DMA tag!\n"); 2326 return rc; 2327 } 2328 2329 /* Create tmp DMA map for RX mbuf clusters. */ 2330 rc = bus_dmamap_create(rxr->rx_mbuf_tag, BUS_DMA_WAITOK, 2331 &rxr->rx_mbuf_tmpmap); 2332 if (rc != 0) { 2333 bus_dma_tag_destroy(rxr->rx_mbuf_tag); 2334 rxr->rx_mbuf_tag = NULL; 2335 2336 device_printf(rxr->sc->bce_dev, 2337 "Could not create RX mbuf tmp DMA map!\n"); 2338 return rc; 2339 } 2340 2341 /* Create DMA maps for the RX mbuf clusters. */ 2342 for (i = 0; i < TOTAL_RX_BD(rxr); i++) { 2343 rc = bus_dmamap_create(rxr->rx_mbuf_tag, BUS_DMA_WAITOK, 2344 &rxr->rx_bufs[i].rx_mbuf_map); 2345 if (rc != 0) { 2346 int j; 2347 2348 for (j = 0; j < i; ++j) { 2349 bus_dmamap_destroy(rxr->rx_mbuf_tag, 2350 rxr->rx_bufs[j].rx_mbuf_map); 2351 } 2352 bus_dma_tag_destroy(rxr->rx_mbuf_tag); 2353 rxr->rx_mbuf_tag = NULL; 2354 2355 device_printf(rxr->sc->bce_dev, "Unable to create " 2356 "%dth RX mbuf DMA map!\n", i); 2357 return rc; 2358 } 2359 } 2360 return 0; 2361 } 2362 2363 /****************************************************************************/ 2364 /* Allocate any DMA memory needed by the driver. */ 2365 /* */ 2366 /* Allocates DMA memory needed for the various global structures needed by */ 2367 /* hardware. */ 2368 /* */ 2369 /* Memory alignment requirements: */ 2370 /* -----------------+----------+----------+----------+----------+ */ 2371 /* Data Structure | 5706 | 5708 | 5709 | 5716 | */ 2372 /* -----------------+----------+----------+----------+----------+ */ 2373 /* Status Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */ 2374 /* Statistics Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */ 2375 /* RX Buffers | 16 bytes | 16 bytes | 16 bytes | 16 bytes | */ 2376 /* PG Buffers | none | none | none | none | */ 2377 /* TX Buffers | none | none | none | none | */ 2378 /* Chain Pages(1) | 4KiB | 4KiB | 4KiB | 4KiB | */ 2379 /* Context Pages(1) | N/A | N/A | 4KiB | 4KiB | */ 2380 /* -----------------+----------+----------+----------+----------+ */ 2381 /* */ 2382 /* (1) Must align with CPU page size (BCM_PAGE_SZIE). */ 2383 /* */ 2384 /* Returns: */ 2385 /* 0 for success, positive value for failure. */ 2386 /****************************************************************************/ 2387 static int 2388 bce_dma_alloc(struct bce_softc *sc) 2389 { 2390 struct ifnet *ifp = &sc->arpcom.ac_if; 2391 int i, rc = 0; 2392 bus_addr_t busaddr, max_busaddr; 2393 bus_size_t status_align, stats_align, status_size; 2394 2395 /* 2396 * The embedded PCIe to PCI-X bridge (EPB) 2397 * in the 5708 cannot address memory above 2398 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043). 2399 */ 2400 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708) 2401 max_busaddr = BCE_BUS_SPACE_MAXADDR; 2402 else 2403 max_busaddr = BUS_SPACE_MAXADDR; 2404 2405 /* 2406 * BCM5709 and BCM5716 uses host memory as cache for context memory. 2407 */ 2408 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 2409 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 2410 sc->ctx_pages = BCE_CTX_BLK_SZ / BCM_PAGE_SIZE; 2411 if (sc->ctx_pages == 0) 2412 sc->ctx_pages = 1; 2413 if (sc->ctx_pages > BCE_CTX_PAGES) { 2414 device_printf(sc->bce_dev, "excessive ctx pages %d\n", 2415 sc->ctx_pages); 2416 return ENOMEM; 2417 } 2418 status_align = 16; 2419 stats_align = 16; 2420 } else { 2421 status_align = 8; 2422 stats_align = 8; 2423 } 2424 2425 /* 2426 * Each MSI-X vector needs a status block; each status block 2427 * consumes 128bytes and is 128bytes aligned. 2428 */ 2429 if (sc->rx_ring_cnt > 1) { 2430 status_size = BCE_MSIX_MAX * BCE_STATUS_BLK_MSIX_ALIGN; 2431 status_align = BCE_STATUS_BLK_MSIX_ALIGN; 2432 } else { 2433 status_size = BCE_STATUS_BLK_SZ; 2434 } 2435 2436 /* 2437 * Allocate the parent bus DMA tag appropriate for PCI. 2438 */ 2439 rc = bus_dma_tag_create(NULL, 1, BCE_DMA_BOUNDARY, 2440 max_busaddr, BUS_SPACE_MAXADDR, 2441 NULL, NULL, 2442 BUS_SPACE_MAXSIZE_32BIT, 0, 2443 BUS_SPACE_MAXSIZE_32BIT, 2444 0, &sc->parent_tag); 2445 if (rc != 0) { 2446 if_printf(ifp, "Could not allocate parent DMA tag!\n"); 2447 return rc; 2448 } 2449 2450 /* 2451 * Allocate status block. 2452 */ 2453 sc->status_block = bus_dmamem_coherent_any(sc->parent_tag, 2454 status_align, status_size, 2455 BUS_DMA_WAITOK | BUS_DMA_ZERO, 2456 &sc->status_tag, &sc->status_map, 2457 &sc->status_block_paddr); 2458 if (sc->status_block == NULL) { 2459 if_printf(ifp, "Could not allocate status block!\n"); 2460 return ENOMEM; 2461 } 2462 2463 /* 2464 * Allocate statistics block. 2465 */ 2466 sc->stats_block = bus_dmamem_coherent_any(sc->parent_tag, 2467 stats_align, BCE_STATS_BLK_SZ, 2468 BUS_DMA_WAITOK | BUS_DMA_ZERO, 2469 &sc->stats_tag, &sc->stats_map, 2470 &sc->stats_block_paddr); 2471 if (sc->stats_block == NULL) { 2472 if_printf(ifp, "Could not allocate statistics block!\n"); 2473 return ENOMEM; 2474 } 2475 2476 /* 2477 * Allocate context block, if needed 2478 */ 2479 if (sc->ctx_pages != 0) { 2480 rc = bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 0, 2481 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 2482 NULL, NULL, 2483 BCM_PAGE_SIZE, 1, BCM_PAGE_SIZE, 2484 0, &sc->ctx_tag); 2485 if (rc != 0) { 2486 if_printf(ifp, "Could not allocate " 2487 "context block DMA tag!\n"); 2488 return rc; 2489 } 2490 2491 for (i = 0; i < sc->ctx_pages; i++) { 2492 rc = bus_dmamem_alloc(sc->ctx_tag, 2493 (void **)&sc->ctx_block[i], 2494 BUS_DMA_WAITOK | BUS_DMA_ZERO | 2495 BUS_DMA_COHERENT, 2496 &sc->ctx_map[i]); 2497 if (rc != 0) { 2498 if_printf(ifp, "Could not allocate %dth context " 2499 "DMA memory!\n", i); 2500 return rc; 2501 } 2502 2503 rc = bus_dmamap_load(sc->ctx_tag, sc->ctx_map[i], 2504 sc->ctx_block[i], BCM_PAGE_SIZE, 2505 bce_dma_map_addr, &busaddr, 2506 BUS_DMA_WAITOK); 2507 if (rc != 0) { 2508 if (rc == EINPROGRESS) { 2509 panic("%s coherent memory loading " 2510 "is still in progress!", ifp->if_xname); 2511 } 2512 if_printf(ifp, "Could not map %dth context " 2513 "DMA memory!\n", i); 2514 bus_dmamem_free(sc->ctx_tag, sc->ctx_block[i], 2515 sc->ctx_map[i]); 2516 sc->ctx_block[i] = NULL; 2517 return rc; 2518 } 2519 sc->ctx_paddr[i] = busaddr; 2520 } 2521 } 2522 2523 sc->tx_rings = kmalloc_cachealign( 2524 sizeof(struct bce_tx_ring) * sc->tx_ring_cnt, M_DEVBUF, 2525 M_WAITOK | M_ZERO); 2526 for (i = 0; i < sc->tx_ring_cnt; ++i) { 2527 sc->tx_rings[i].sc = sc; 2528 if (i == 0) { 2529 sc->tx_rings[i].tx_cid = TX_CID; 2530 sc->tx_rings[i].tx_hw_cons = 2531 &sc->status_block->status_tx_quick_consumer_index0; 2532 } else { 2533 struct status_block_msix *sblk = 2534 (struct status_block_msix *) 2535 (((uint8_t *)(sc->status_block)) + 2536 (i * BCE_STATUS_BLK_MSIX_ALIGN)); 2537 2538 sc->tx_rings[i].tx_cid = TX_TSS_CID + i - 1; 2539 sc->tx_rings[i].tx_hw_cons = 2540 &sblk->status_tx_quick_consumer_index; 2541 } 2542 2543 rc = bce_create_tx_ring(&sc->tx_rings[i]); 2544 if (rc != 0) { 2545 device_printf(sc->bce_dev, 2546 "can't create %dth tx ring\n", i); 2547 return rc; 2548 } 2549 } 2550 2551 sc->rx_rings = kmalloc_cachealign( 2552 sizeof(struct bce_rx_ring) * sc->rx_ring_cnt, M_DEVBUF, 2553 M_WAITOK | M_ZERO); 2554 for (i = 0; i < sc->rx_ring_cnt; ++i) { 2555 sc->rx_rings[i].sc = sc; 2556 sc->rx_rings[i].idx = i; 2557 if (i == 0) { 2558 sc->rx_rings[i].rx_cid = RX_CID; 2559 sc->rx_rings[i].rx_hw_cons = 2560 &sc->status_block->status_rx_quick_consumer_index0; 2561 sc->rx_rings[i].hw_status_idx = 2562 &sc->status_block->status_idx; 2563 } else { 2564 struct status_block_msix *sblk = 2565 (struct status_block_msix *) 2566 (((uint8_t *)(sc->status_block)) + 2567 (i * BCE_STATUS_BLK_MSIX_ALIGN)); 2568 2569 sc->rx_rings[i].rx_cid = RX_RSS_CID + i - 1; 2570 sc->rx_rings[i].rx_hw_cons = 2571 &sblk->status_rx_quick_consumer_index; 2572 sc->rx_rings[i].hw_status_idx = &sblk->status_idx; 2573 } 2574 2575 rc = bce_create_rx_ring(&sc->rx_rings[i]); 2576 if (rc != 0) { 2577 device_printf(sc->bce_dev, 2578 "can't create %dth rx ring\n", i); 2579 return rc; 2580 } 2581 } 2582 2583 return 0; 2584 } 2585 2586 /****************************************************************************/ 2587 /* Firmware synchronization. */ 2588 /* */ 2589 /* Before performing certain events such as a chip reset, synchronize with */ 2590 /* the firmware first. */ 2591 /* */ 2592 /* Returns: */ 2593 /* 0 for success, positive value for failure. */ 2594 /****************************************************************************/ 2595 static int 2596 bce_fw_sync(struct bce_softc *sc, uint32_t msg_data) 2597 { 2598 int i, rc = 0; 2599 uint32_t val; 2600 2601 /* Don't waste any time if we've timed out before. */ 2602 if (sc->bce_fw_timed_out) 2603 return EBUSY; 2604 2605 /* Increment the message sequence number. */ 2606 sc->bce_fw_wr_seq++; 2607 msg_data |= sc->bce_fw_wr_seq; 2608 2609 /* Send the message to the bootcode driver mailbox. */ 2610 bce_shmem_wr(sc, BCE_DRV_MB, msg_data); 2611 2612 /* Wait for the bootcode to acknowledge the message. */ 2613 for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) { 2614 /* Check for a response in the bootcode firmware mailbox. */ 2615 val = bce_shmem_rd(sc, BCE_FW_MB); 2616 if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ)) 2617 break; 2618 DELAY(1000); 2619 } 2620 2621 /* If we've timed out, tell the bootcode that we've stopped waiting. */ 2622 if ((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ) && 2623 (msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0) { 2624 if_printf(&sc->arpcom.ac_if, 2625 "Firmware synchronization timeout! " 2626 "msg_data = 0x%08X\n", msg_data); 2627 2628 msg_data &= ~BCE_DRV_MSG_CODE; 2629 msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT; 2630 2631 bce_shmem_wr(sc, BCE_DRV_MB, msg_data); 2632 2633 sc->bce_fw_timed_out = 1; 2634 rc = EBUSY; 2635 } 2636 return rc; 2637 } 2638 2639 /****************************************************************************/ 2640 /* Load Receive Virtual 2 Physical (RV2P) processor firmware. */ 2641 /* */ 2642 /* Returns: */ 2643 /* Nothing. */ 2644 /****************************************************************************/ 2645 static void 2646 bce_load_rv2p_fw(struct bce_softc *sc, uint32_t *rv2p_code, 2647 uint32_t rv2p_code_len, uint32_t rv2p_proc) 2648 { 2649 int i; 2650 uint32_t val; 2651 2652 for (i = 0; i < rv2p_code_len; i += 8) { 2653 REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code); 2654 rv2p_code++; 2655 REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code); 2656 rv2p_code++; 2657 2658 if (rv2p_proc == RV2P_PROC1) { 2659 val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR; 2660 REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val); 2661 } else { 2662 val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR; 2663 REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val); 2664 } 2665 } 2666 2667 /* Reset the processor, un-stall is done later. */ 2668 if (rv2p_proc == RV2P_PROC1) 2669 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET); 2670 else 2671 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET); 2672 } 2673 2674 /****************************************************************************/ 2675 /* Load RISC processor firmware. */ 2676 /* */ 2677 /* Loads firmware from the file if_bcefw.h into the scratchpad memory */ 2678 /* associated with a particular processor. */ 2679 /* */ 2680 /* Returns: */ 2681 /* Nothing. */ 2682 /****************************************************************************/ 2683 static void 2684 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg, 2685 struct fw_info *fw) 2686 { 2687 uint32_t offset; 2688 int j; 2689 2690 bce_halt_cpu(sc, cpu_reg); 2691 2692 /* Load the Text area. */ 2693 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base); 2694 if (fw->text) { 2695 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) 2696 REG_WR_IND(sc, offset, fw->text[j]); 2697 } 2698 2699 /* Load the Data area. */ 2700 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base); 2701 if (fw->data) { 2702 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) 2703 REG_WR_IND(sc, offset, fw->data[j]); 2704 } 2705 2706 /* Load the SBSS area. */ 2707 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base); 2708 if (fw->sbss) { 2709 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) 2710 REG_WR_IND(sc, offset, fw->sbss[j]); 2711 } 2712 2713 /* Load the BSS area. */ 2714 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base); 2715 if (fw->bss) { 2716 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) 2717 REG_WR_IND(sc, offset, fw->bss[j]); 2718 } 2719 2720 /* Load the Read-Only area. */ 2721 offset = cpu_reg->spad_base + 2722 (fw->rodata_addr - cpu_reg->mips_view_base); 2723 if (fw->rodata) { 2724 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) 2725 REG_WR_IND(sc, offset, fw->rodata[j]); 2726 } 2727 2728 /* Clear the pre-fetch instruction and set the FW start address. */ 2729 REG_WR_IND(sc, cpu_reg->inst, 0); 2730 REG_WR_IND(sc, cpu_reg->pc, fw->start_addr); 2731 } 2732 2733 /****************************************************************************/ 2734 /* Starts the RISC processor. */ 2735 /* */ 2736 /* Assumes the CPU starting address has already been set. */ 2737 /* */ 2738 /* Returns: */ 2739 /* Nothing. */ 2740 /****************************************************************************/ 2741 static void 2742 bce_start_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg) 2743 { 2744 uint32_t val; 2745 2746 /* Start the CPU. */ 2747 val = REG_RD_IND(sc, cpu_reg->mode); 2748 val &= ~cpu_reg->mode_value_halt; 2749 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 2750 REG_WR_IND(sc, cpu_reg->mode, val); 2751 } 2752 2753 /****************************************************************************/ 2754 /* Halts the RISC processor. */ 2755 /* */ 2756 /* Returns: */ 2757 /* Nothing. */ 2758 /****************************************************************************/ 2759 static void 2760 bce_halt_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg) 2761 { 2762 uint32_t val; 2763 2764 /* Halt the CPU. */ 2765 val = REG_RD_IND(sc, cpu_reg->mode); 2766 val |= cpu_reg->mode_value_halt; 2767 REG_WR_IND(sc, cpu_reg->mode, val); 2768 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 2769 } 2770 2771 /****************************************************************************/ 2772 /* Start the RX CPU. */ 2773 /* */ 2774 /* Returns: */ 2775 /* Nothing. */ 2776 /****************************************************************************/ 2777 static void 2778 bce_start_rxp_cpu(struct bce_softc *sc) 2779 { 2780 struct cpu_reg cpu_reg; 2781 2782 cpu_reg.mode = BCE_RXP_CPU_MODE; 2783 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT; 2784 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA; 2785 cpu_reg.state = BCE_RXP_CPU_STATE; 2786 cpu_reg.state_value_clear = 0xffffff; 2787 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE; 2788 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK; 2789 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER; 2790 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION; 2791 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT; 2792 cpu_reg.spad_base = BCE_RXP_SCRATCH; 2793 cpu_reg.mips_view_base = 0x8000000; 2794 2795 bce_start_cpu(sc, &cpu_reg); 2796 } 2797 2798 /****************************************************************************/ 2799 /* Initialize the RX CPU. */ 2800 /* */ 2801 /* Returns: */ 2802 /* Nothing. */ 2803 /****************************************************************************/ 2804 static void 2805 bce_init_rxp_cpu(struct bce_softc *sc) 2806 { 2807 struct cpu_reg cpu_reg; 2808 struct fw_info fw; 2809 2810 cpu_reg.mode = BCE_RXP_CPU_MODE; 2811 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT; 2812 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA; 2813 cpu_reg.state = BCE_RXP_CPU_STATE; 2814 cpu_reg.state_value_clear = 0xffffff; 2815 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE; 2816 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK; 2817 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER; 2818 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION; 2819 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT; 2820 cpu_reg.spad_base = BCE_RXP_SCRATCH; 2821 cpu_reg.mips_view_base = 0x8000000; 2822 2823 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 2824 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 2825 fw.ver_major = bce_RXP_b09FwReleaseMajor; 2826 fw.ver_minor = bce_RXP_b09FwReleaseMinor; 2827 fw.ver_fix = bce_RXP_b09FwReleaseFix; 2828 fw.start_addr = bce_RXP_b09FwStartAddr; 2829 2830 fw.text_addr = bce_RXP_b09FwTextAddr; 2831 fw.text_len = bce_RXP_b09FwTextLen; 2832 fw.text_index = 0; 2833 fw.text = bce_RXP_b09FwText; 2834 2835 fw.data_addr = bce_RXP_b09FwDataAddr; 2836 fw.data_len = bce_RXP_b09FwDataLen; 2837 fw.data_index = 0; 2838 fw.data = bce_RXP_b09FwData; 2839 2840 fw.sbss_addr = bce_RXP_b09FwSbssAddr; 2841 fw.sbss_len = bce_RXP_b09FwSbssLen; 2842 fw.sbss_index = 0; 2843 fw.sbss = bce_RXP_b09FwSbss; 2844 2845 fw.bss_addr = bce_RXP_b09FwBssAddr; 2846 fw.bss_len = bce_RXP_b09FwBssLen; 2847 fw.bss_index = 0; 2848 fw.bss = bce_RXP_b09FwBss; 2849 2850 fw.rodata_addr = bce_RXP_b09FwRodataAddr; 2851 fw.rodata_len = bce_RXP_b09FwRodataLen; 2852 fw.rodata_index = 0; 2853 fw.rodata = bce_RXP_b09FwRodata; 2854 } else { 2855 fw.ver_major = bce_RXP_b06FwReleaseMajor; 2856 fw.ver_minor = bce_RXP_b06FwReleaseMinor; 2857 fw.ver_fix = bce_RXP_b06FwReleaseFix; 2858 fw.start_addr = bce_RXP_b06FwStartAddr; 2859 2860 fw.text_addr = bce_RXP_b06FwTextAddr; 2861 fw.text_len = bce_RXP_b06FwTextLen; 2862 fw.text_index = 0; 2863 fw.text = bce_RXP_b06FwText; 2864 2865 fw.data_addr = bce_RXP_b06FwDataAddr; 2866 fw.data_len = bce_RXP_b06FwDataLen; 2867 fw.data_index = 0; 2868 fw.data = bce_RXP_b06FwData; 2869 2870 fw.sbss_addr = bce_RXP_b06FwSbssAddr; 2871 fw.sbss_len = bce_RXP_b06FwSbssLen; 2872 fw.sbss_index = 0; 2873 fw.sbss = bce_RXP_b06FwSbss; 2874 2875 fw.bss_addr = bce_RXP_b06FwBssAddr; 2876 fw.bss_len = bce_RXP_b06FwBssLen; 2877 fw.bss_index = 0; 2878 fw.bss = bce_RXP_b06FwBss; 2879 2880 fw.rodata_addr = bce_RXP_b06FwRodataAddr; 2881 fw.rodata_len = bce_RXP_b06FwRodataLen; 2882 fw.rodata_index = 0; 2883 fw.rodata = bce_RXP_b06FwRodata; 2884 } 2885 2886 bce_load_cpu_fw(sc, &cpu_reg, &fw); 2887 /* Delay RXP start until initialization is complete. */ 2888 } 2889 2890 /****************************************************************************/ 2891 /* Initialize the TX CPU. */ 2892 /* */ 2893 /* Returns: */ 2894 /* Nothing. */ 2895 /****************************************************************************/ 2896 static void 2897 bce_init_txp_cpu(struct bce_softc *sc) 2898 { 2899 struct cpu_reg cpu_reg; 2900 struct fw_info fw; 2901 2902 cpu_reg.mode = BCE_TXP_CPU_MODE; 2903 cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT; 2904 cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA; 2905 cpu_reg.state = BCE_TXP_CPU_STATE; 2906 cpu_reg.state_value_clear = 0xffffff; 2907 cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE; 2908 cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK; 2909 cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER; 2910 cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION; 2911 cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT; 2912 cpu_reg.spad_base = BCE_TXP_SCRATCH; 2913 cpu_reg.mips_view_base = 0x8000000; 2914 2915 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 2916 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 2917 fw.ver_major = bce_TXP_b09FwReleaseMajor; 2918 fw.ver_minor = bce_TXP_b09FwReleaseMinor; 2919 fw.ver_fix = bce_TXP_b09FwReleaseFix; 2920 fw.start_addr = bce_TXP_b09FwStartAddr; 2921 2922 fw.text_addr = bce_TXP_b09FwTextAddr; 2923 fw.text_len = bce_TXP_b09FwTextLen; 2924 fw.text_index = 0; 2925 fw.text = bce_TXP_b09FwText; 2926 2927 fw.data_addr = bce_TXP_b09FwDataAddr; 2928 fw.data_len = bce_TXP_b09FwDataLen; 2929 fw.data_index = 0; 2930 fw.data = bce_TXP_b09FwData; 2931 2932 fw.sbss_addr = bce_TXP_b09FwSbssAddr; 2933 fw.sbss_len = bce_TXP_b09FwSbssLen; 2934 fw.sbss_index = 0; 2935 fw.sbss = bce_TXP_b09FwSbss; 2936 2937 fw.bss_addr = bce_TXP_b09FwBssAddr; 2938 fw.bss_len = bce_TXP_b09FwBssLen; 2939 fw.bss_index = 0; 2940 fw.bss = bce_TXP_b09FwBss; 2941 2942 fw.rodata_addr = bce_TXP_b09FwRodataAddr; 2943 fw.rodata_len = bce_TXP_b09FwRodataLen; 2944 fw.rodata_index = 0; 2945 fw.rodata = bce_TXP_b09FwRodata; 2946 } else { 2947 fw.ver_major = bce_TXP_b06FwReleaseMajor; 2948 fw.ver_minor = bce_TXP_b06FwReleaseMinor; 2949 fw.ver_fix = bce_TXP_b06FwReleaseFix; 2950 fw.start_addr = bce_TXP_b06FwStartAddr; 2951 2952 fw.text_addr = bce_TXP_b06FwTextAddr; 2953 fw.text_len = bce_TXP_b06FwTextLen; 2954 fw.text_index = 0; 2955 fw.text = bce_TXP_b06FwText; 2956 2957 fw.data_addr = bce_TXP_b06FwDataAddr; 2958 fw.data_len = bce_TXP_b06FwDataLen; 2959 fw.data_index = 0; 2960 fw.data = bce_TXP_b06FwData; 2961 2962 fw.sbss_addr = bce_TXP_b06FwSbssAddr; 2963 fw.sbss_len = bce_TXP_b06FwSbssLen; 2964 fw.sbss_index = 0; 2965 fw.sbss = bce_TXP_b06FwSbss; 2966 2967 fw.bss_addr = bce_TXP_b06FwBssAddr; 2968 fw.bss_len = bce_TXP_b06FwBssLen; 2969 fw.bss_index = 0; 2970 fw.bss = bce_TXP_b06FwBss; 2971 2972 fw.rodata_addr = bce_TXP_b06FwRodataAddr; 2973 fw.rodata_len = bce_TXP_b06FwRodataLen; 2974 fw.rodata_index = 0; 2975 fw.rodata = bce_TXP_b06FwRodata; 2976 } 2977 2978 bce_load_cpu_fw(sc, &cpu_reg, &fw); 2979 bce_start_cpu(sc, &cpu_reg); 2980 } 2981 2982 /****************************************************************************/ 2983 /* Initialize the TPAT CPU. */ 2984 /* */ 2985 /* Returns: */ 2986 /* Nothing. */ 2987 /****************************************************************************/ 2988 static void 2989 bce_init_tpat_cpu(struct bce_softc *sc) 2990 { 2991 struct cpu_reg cpu_reg; 2992 struct fw_info fw; 2993 2994 cpu_reg.mode = BCE_TPAT_CPU_MODE; 2995 cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT; 2996 cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA; 2997 cpu_reg.state = BCE_TPAT_CPU_STATE; 2998 cpu_reg.state_value_clear = 0xffffff; 2999 cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE; 3000 cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK; 3001 cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER; 3002 cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION; 3003 cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT; 3004 cpu_reg.spad_base = BCE_TPAT_SCRATCH; 3005 cpu_reg.mips_view_base = 0x8000000; 3006 3007 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3008 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3009 fw.ver_major = bce_TPAT_b09FwReleaseMajor; 3010 fw.ver_minor = bce_TPAT_b09FwReleaseMinor; 3011 fw.ver_fix = bce_TPAT_b09FwReleaseFix; 3012 fw.start_addr = bce_TPAT_b09FwStartAddr; 3013 3014 fw.text_addr = bce_TPAT_b09FwTextAddr; 3015 fw.text_len = bce_TPAT_b09FwTextLen; 3016 fw.text_index = 0; 3017 fw.text = bce_TPAT_b09FwText; 3018 3019 fw.data_addr = bce_TPAT_b09FwDataAddr; 3020 fw.data_len = bce_TPAT_b09FwDataLen; 3021 fw.data_index = 0; 3022 fw.data = bce_TPAT_b09FwData; 3023 3024 fw.sbss_addr = bce_TPAT_b09FwSbssAddr; 3025 fw.sbss_len = bce_TPAT_b09FwSbssLen; 3026 fw.sbss_index = 0; 3027 fw.sbss = bce_TPAT_b09FwSbss; 3028 3029 fw.bss_addr = bce_TPAT_b09FwBssAddr; 3030 fw.bss_len = bce_TPAT_b09FwBssLen; 3031 fw.bss_index = 0; 3032 fw.bss = bce_TPAT_b09FwBss; 3033 3034 fw.rodata_addr = bce_TPAT_b09FwRodataAddr; 3035 fw.rodata_len = bce_TPAT_b09FwRodataLen; 3036 fw.rodata_index = 0; 3037 fw.rodata = bce_TPAT_b09FwRodata; 3038 } else { 3039 fw.ver_major = bce_TPAT_b06FwReleaseMajor; 3040 fw.ver_minor = bce_TPAT_b06FwReleaseMinor; 3041 fw.ver_fix = bce_TPAT_b06FwReleaseFix; 3042 fw.start_addr = bce_TPAT_b06FwStartAddr; 3043 3044 fw.text_addr = bce_TPAT_b06FwTextAddr; 3045 fw.text_len = bce_TPAT_b06FwTextLen; 3046 fw.text_index = 0; 3047 fw.text = bce_TPAT_b06FwText; 3048 3049 fw.data_addr = bce_TPAT_b06FwDataAddr; 3050 fw.data_len = bce_TPAT_b06FwDataLen; 3051 fw.data_index = 0; 3052 fw.data = bce_TPAT_b06FwData; 3053 3054 fw.sbss_addr = bce_TPAT_b06FwSbssAddr; 3055 fw.sbss_len = bce_TPAT_b06FwSbssLen; 3056 fw.sbss_index = 0; 3057 fw.sbss = bce_TPAT_b06FwSbss; 3058 3059 fw.bss_addr = bce_TPAT_b06FwBssAddr; 3060 fw.bss_len = bce_TPAT_b06FwBssLen; 3061 fw.bss_index = 0; 3062 fw.bss = bce_TPAT_b06FwBss; 3063 3064 fw.rodata_addr = bce_TPAT_b06FwRodataAddr; 3065 fw.rodata_len = bce_TPAT_b06FwRodataLen; 3066 fw.rodata_index = 0; 3067 fw.rodata = bce_TPAT_b06FwRodata; 3068 } 3069 3070 bce_load_cpu_fw(sc, &cpu_reg, &fw); 3071 bce_start_cpu(sc, &cpu_reg); 3072 } 3073 3074 /****************************************************************************/ 3075 /* Initialize the CP CPU. */ 3076 /* */ 3077 /* Returns: */ 3078 /* Nothing. */ 3079 /****************************************************************************/ 3080 static void 3081 bce_init_cp_cpu(struct bce_softc *sc) 3082 { 3083 struct cpu_reg cpu_reg; 3084 struct fw_info fw; 3085 3086 cpu_reg.mode = BCE_CP_CPU_MODE; 3087 cpu_reg.mode_value_halt = BCE_CP_CPU_MODE_SOFT_HALT; 3088 cpu_reg.mode_value_sstep = BCE_CP_CPU_MODE_STEP_ENA; 3089 cpu_reg.state = BCE_CP_CPU_STATE; 3090 cpu_reg.state_value_clear = 0xffffff; 3091 cpu_reg.gpr0 = BCE_CP_CPU_REG_FILE; 3092 cpu_reg.evmask = BCE_CP_CPU_EVENT_MASK; 3093 cpu_reg.pc = BCE_CP_CPU_PROGRAM_COUNTER; 3094 cpu_reg.inst = BCE_CP_CPU_INSTRUCTION; 3095 cpu_reg.bp = BCE_CP_CPU_HW_BREAKPOINT; 3096 cpu_reg.spad_base = BCE_CP_SCRATCH; 3097 cpu_reg.mips_view_base = 0x8000000; 3098 3099 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3100 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3101 fw.ver_major = bce_CP_b09FwReleaseMajor; 3102 fw.ver_minor = bce_CP_b09FwReleaseMinor; 3103 fw.ver_fix = bce_CP_b09FwReleaseFix; 3104 fw.start_addr = bce_CP_b09FwStartAddr; 3105 3106 fw.text_addr = bce_CP_b09FwTextAddr; 3107 fw.text_len = bce_CP_b09FwTextLen; 3108 fw.text_index = 0; 3109 fw.text = bce_CP_b09FwText; 3110 3111 fw.data_addr = bce_CP_b09FwDataAddr; 3112 fw.data_len = bce_CP_b09FwDataLen; 3113 fw.data_index = 0; 3114 fw.data = bce_CP_b09FwData; 3115 3116 fw.sbss_addr = bce_CP_b09FwSbssAddr; 3117 fw.sbss_len = bce_CP_b09FwSbssLen; 3118 fw.sbss_index = 0; 3119 fw.sbss = bce_CP_b09FwSbss; 3120 3121 fw.bss_addr = bce_CP_b09FwBssAddr; 3122 fw.bss_len = bce_CP_b09FwBssLen; 3123 fw.bss_index = 0; 3124 fw.bss = bce_CP_b09FwBss; 3125 3126 fw.rodata_addr = bce_CP_b09FwRodataAddr; 3127 fw.rodata_len = bce_CP_b09FwRodataLen; 3128 fw.rodata_index = 0; 3129 fw.rodata = bce_CP_b09FwRodata; 3130 } else { 3131 fw.ver_major = bce_CP_b06FwReleaseMajor; 3132 fw.ver_minor = bce_CP_b06FwReleaseMinor; 3133 fw.ver_fix = bce_CP_b06FwReleaseFix; 3134 fw.start_addr = bce_CP_b06FwStartAddr; 3135 3136 fw.text_addr = bce_CP_b06FwTextAddr; 3137 fw.text_len = bce_CP_b06FwTextLen; 3138 fw.text_index = 0; 3139 fw.text = bce_CP_b06FwText; 3140 3141 fw.data_addr = bce_CP_b06FwDataAddr; 3142 fw.data_len = bce_CP_b06FwDataLen; 3143 fw.data_index = 0; 3144 fw.data = bce_CP_b06FwData; 3145 3146 fw.sbss_addr = bce_CP_b06FwSbssAddr; 3147 fw.sbss_len = bce_CP_b06FwSbssLen; 3148 fw.sbss_index = 0; 3149 fw.sbss = bce_CP_b06FwSbss; 3150 3151 fw.bss_addr = bce_CP_b06FwBssAddr; 3152 fw.bss_len = bce_CP_b06FwBssLen; 3153 fw.bss_index = 0; 3154 fw.bss = bce_CP_b06FwBss; 3155 3156 fw.rodata_addr = bce_CP_b06FwRodataAddr; 3157 fw.rodata_len = bce_CP_b06FwRodataLen; 3158 fw.rodata_index = 0; 3159 fw.rodata = bce_CP_b06FwRodata; 3160 } 3161 3162 bce_load_cpu_fw(sc, &cpu_reg, &fw); 3163 bce_start_cpu(sc, &cpu_reg); 3164 } 3165 3166 /****************************************************************************/ 3167 /* Initialize the COM CPU. */ 3168 /* */ 3169 /* Returns: */ 3170 /* Nothing. */ 3171 /****************************************************************************/ 3172 static void 3173 bce_init_com_cpu(struct bce_softc *sc) 3174 { 3175 struct cpu_reg cpu_reg; 3176 struct fw_info fw; 3177 3178 cpu_reg.mode = BCE_COM_CPU_MODE; 3179 cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT; 3180 cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA; 3181 cpu_reg.state = BCE_COM_CPU_STATE; 3182 cpu_reg.state_value_clear = 0xffffff; 3183 cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE; 3184 cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK; 3185 cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER; 3186 cpu_reg.inst = BCE_COM_CPU_INSTRUCTION; 3187 cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT; 3188 cpu_reg.spad_base = BCE_COM_SCRATCH; 3189 cpu_reg.mips_view_base = 0x8000000; 3190 3191 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3192 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3193 fw.ver_major = bce_COM_b09FwReleaseMajor; 3194 fw.ver_minor = bce_COM_b09FwReleaseMinor; 3195 fw.ver_fix = bce_COM_b09FwReleaseFix; 3196 fw.start_addr = bce_COM_b09FwStartAddr; 3197 3198 fw.text_addr = bce_COM_b09FwTextAddr; 3199 fw.text_len = bce_COM_b09FwTextLen; 3200 fw.text_index = 0; 3201 fw.text = bce_COM_b09FwText; 3202 3203 fw.data_addr = bce_COM_b09FwDataAddr; 3204 fw.data_len = bce_COM_b09FwDataLen; 3205 fw.data_index = 0; 3206 fw.data = bce_COM_b09FwData; 3207 3208 fw.sbss_addr = bce_COM_b09FwSbssAddr; 3209 fw.sbss_len = bce_COM_b09FwSbssLen; 3210 fw.sbss_index = 0; 3211 fw.sbss = bce_COM_b09FwSbss; 3212 3213 fw.bss_addr = bce_COM_b09FwBssAddr; 3214 fw.bss_len = bce_COM_b09FwBssLen; 3215 fw.bss_index = 0; 3216 fw.bss = bce_COM_b09FwBss; 3217 3218 fw.rodata_addr = bce_COM_b09FwRodataAddr; 3219 fw.rodata_len = bce_COM_b09FwRodataLen; 3220 fw.rodata_index = 0; 3221 fw.rodata = bce_COM_b09FwRodata; 3222 } else { 3223 fw.ver_major = bce_COM_b06FwReleaseMajor; 3224 fw.ver_minor = bce_COM_b06FwReleaseMinor; 3225 fw.ver_fix = bce_COM_b06FwReleaseFix; 3226 fw.start_addr = bce_COM_b06FwStartAddr; 3227 3228 fw.text_addr = bce_COM_b06FwTextAddr; 3229 fw.text_len = bce_COM_b06FwTextLen; 3230 fw.text_index = 0; 3231 fw.text = bce_COM_b06FwText; 3232 3233 fw.data_addr = bce_COM_b06FwDataAddr; 3234 fw.data_len = bce_COM_b06FwDataLen; 3235 fw.data_index = 0; 3236 fw.data = bce_COM_b06FwData; 3237 3238 fw.sbss_addr = bce_COM_b06FwSbssAddr; 3239 fw.sbss_len = bce_COM_b06FwSbssLen; 3240 fw.sbss_index = 0; 3241 fw.sbss = bce_COM_b06FwSbss; 3242 3243 fw.bss_addr = bce_COM_b06FwBssAddr; 3244 fw.bss_len = bce_COM_b06FwBssLen; 3245 fw.bss_index = 0; 3246 fw.bss = bce_COM_b06FwBss; 3247 3248 fw.rodata_addr = bce_COM_b06FwRodataAddr; 3249 fw.rodata_len = bce_COM_b06FwRodataLen; 3250 fw.rodata_index = 0; 3251 fw.rodata = bce_COM_b06FwRodata; 3252 } 3253 3254 bce_load_cpu_fw(sc, &cpu_reg, &fw); 3255 bce_start_cpu(sc, &cpu_reg); 3256 } 3257 3258 /****************************************************************************/ 3259 /* Initialize the RV2P, RX, TX, TPAT, COM, and CP CPUs. */ 3260 /* */ 3261 /* Loads the firmware for each CPU and starts the CPU. */ 3262 /* */ 3263 /* Returns: */ 3264 /* Nothing. */ 3265 /****************************************************************************/ 3266 static void 3267 bce_init_cpus(struct bce_softc *sc) 3268 { 3269 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3270 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3271 if (BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax) { 3272 bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc1, 3273 sizeof(bce_xi90_rv2p_proc1), RV2P_PROC1); 3274 bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc2, 3275 sizeof(bce_xi90_rv2p_proc2), RV2P_PROC2); 3276 } else { 3277 bce_load_rv2p_fw(sc, bce_xi_rv2p_proc1, 3278 sizeof(bce_xi_rv2p_proc1), RV2P_PROC1); 3279 bce_load_rv2p_fw(sc, bce_xi_rv2p_proc2, 3280 sizeof(bce_xi_rv2p_proc2), RV2P_PROC2); 3281 } 3282 } else { 3283 bce_load_rv2p_fw(sc, bce_rv2p_proc1, 3284 sizeof(bce_rv2p_proc1), RV2P_PROC1); 3285 bce_load_rv2p_fw(sc, bce_rv2p_proc2, 3286 sizeof(bce_rv2p_proc2), RV2P_PROC2); 3287 } 3288 3289 bce_init_rxp_cpu(sc); 3290 bce_init_txp_cpu(sc); 3291 bce_init_tpat_cpu(sc); 3292 bce_init_com_cpu(sc); 3293 bce_init_cp_cpu(sc); 3294 } 3295 3296 /****************************************************************************/ 3297 /* Initialize context memory. */ 3298 /* */ 3299 /* Clears the memory associated with each Context ID (CID). */ 3300 /* */ 3301 /* Returns: */ 3302 /* Nothing. */ 3303 /****************************************************************************/ 3304 static int 3305 bce_init_ctx(struct bce_softc *sc) 3306 { 3307 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3308 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3309 /* DRC: Replace this constant value with a #define. */ 3310 int i, retry_cnt = 10; 3311 uint32_t val; 3312 3313 /* 3314 * BCM5709 context memory may be cached 3315 * in host memory so prepare the host memory 3316 * for access. 3317 */ 3318 val = BCE_CTX_COMMAND_ENABLED | BCE_CTX_COMMAND_MEM_INIT | 3319 (1 << 12); 3320 val |= (BCM_PAGE_BITS - 8) << 16; 3321 REG_WR(sc, BCE_CTX_COMMAND, val); 3322 3323 /* Wait for mem init command to complete. */ 3324 for (i = 0; i < retry_cnt; i++) { 3325 val = REG_RD(sc, BCE_CTX_COMMAND); 3326 if (!(val & BCE_CTX_COMMAND_MEM_INIT)) 3327 break; 3328 DELAY(2); 3329 } 3330 if (i == retry_cnt) { 3331 device_printf(sc->bce_dev, 3332 "Context memory initialization failed!\n"); 3333 return ETIMEDOUT; 3334 } 3335 3336 for (i = 0; i < sc->ctx_pages; i++) { 3337 int j; 3338 3339 /* 3340 * Set the physical address of the context 3341 * memory cache. 3342 */ 3343 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA0, 3344 BCE_ADDR_LO(sc->ctx_paddr[i] & 0xfffffff0) | 3345 BCE_CTX_HOST_PAGE_TBL_DATA0_VALID); 3346 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA1, 3347 BCE_ADDR_HI(sc->ctx_paddr[i])); 3348 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_CTRL, 3349 i | BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); 3350 3351 /* 3352 * Verify that the context memory write was successful. 3353 */ 3354 for (j = 0; j < retry_cnt; j++) { 3355 val = REG_RD(sc, BCE_CTX_HOST_PAGE_TBL_CTRL); 3356 if ((val & 3357 BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0) 3358 break; 3359 DELAY(5); 3360 } 3361 if (j == retry_cnt) { 3362 device_printf(sc->bce_dev, 3363 "Failed to initialize context page!\n"); 3364 return ETIMEDOUT; 3365 } 3366 } 3367 } else { 3368 uint32_t vcid_addr, offset; 3369 3370 /* 3371 * For the 5706/5708, context memory is local to 3372 * the controller, so initialize the controller 3373 * context memory. 3374 */ 3375 3376 vcid_addr = GET_CID_ADDR(96); 3377 while (vcid_addr) { 3378 vcid_addr -= PHY_CTX_SIZE; 3379 3380 REG_WR(sc, BCE_CTX_VIRT_ADDR, 0); 3381 REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr); 3382 3383 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) 3384 CTX_WR(sc, 0x00, offset, 0); 3385 3386 REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr); 3387 REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr); 3388 } 3389 } 3390 return 0; 3391 } 3392 3393 /****************************************************************************/ 3394 /* Fetch the permanent MAC address of the controller. */ 3395 /* */ 3396 /* Returns: */ 3397 /* Nothing. */ 3398 /****************************************************************************/ 3399 static void 3400 bce_get_mac_addr(struct bce_softc *sc) 3401 { 3402 uint32_t mac_lo = 0, mac_hi = 0; 3403 3404 /* 3405 * The NetXtreme II bootcode populates various NIC 3406 * power-on and runtime configuration items in a 3407 * shared memory area. The factory configured MAC 3408 * address is available from both NVRAM and the 3409 * shared memory area so we'll read the value from 3410 * shared memory for speed. 3411 */ 3412 3413 mac_hi = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_UPPER); 3414 mac_lo = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_LOWER); 3415 3416 if (mac_lo == 0 && mac_hi == 0) { 3417 if_printf(&sc->arpcom.ac_if, "Invalid Ethernet address!\n"); 3418 } else { 3419 sc->eaddr[0] = (u_char)(mac_hi >> 8); 3420 sc->eaddr[1] = (u_char)(mac_hi >> 0); 3421 sc->eaddr[2] = (u_char)(mac_lo >> 24); 3422 sc->eaddr[3] = (u_char)(mac_lo >> 16); 3423 sc->eaddr[4] = (u_char)(mac_lo >> 8); 3424 sc->eaddr[5] = (u_char)(mac_lo >> 0); 3425 } 3426 } 3427 3428 /****************************************************************************/ 3429 /* Program the MAC address. */ 3430 /* */ 3431 /* Returns: */ 3432 /* Nothing. */ 3433 /****************************************************************************/ 3434 static void 3435 bce_set_mac_addr(struct bce_softc *sc) 3436 { 3437 const uint8_t *mac_addr = sc->eaddr; 3438 uint32_t val; 3439 3440 val = (mac_addr[0] << 8) | mac_addr[1]; 3441 REG_WR(sc, BCE_EMAC_MAC_MATCH0, val); 3442 3443 val = (mac_addr[2] << 24) | 3444 (mac_addr[3] << 16) | 3445 (mac_addr[4] << 8) | 3446 mac_addr[5]; 3447 REG_WR(sc, BCE_EMAC_MAC_MATCH1, val); 3448 } 3449 3450 /****************************************************************************/ 3451 /* Stop the controller. */ 3452 /* */ 3453 /* Returns: */ 3454 /* Nothing. */ 3455 /****************************************************************************/ 3456 static void 3457 bce_stop(struct bce_softc *sc) 3458 { 3459 struct ifnet *ifp = &sc->arpcom.ac_if; 3460 int i; 3461 3462 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3463 3464 callout_stop(&sc->bce_tick_callout); 3465 3466 /* Disable the transmit/receive blocks. */ 3467 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, BCE_MISC_ENABLE_CLR_DEFAULT); 3468 REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS); 3469 DELAY(20); 3470 3471 bce_disable_intr(sc); 3472 3473 ifp->if_flags &= ~IFF_RUNNING; 3474 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3475 ifsq_clr_oactive(sc->tx_rings[i].ifsq); 3476 ifsq_watchdog_stop(&sc->tx_rings[i].tx_watchdog); 3477 } 3478 3479 /* Free the RX lists. */ 3480 for (i = 0; i < sc->rx_ring_cnt; ++i) 3481 bce_free_rx_chain(&sc->rx_rings[i]); 3482 3483 /* Free TX buffers. */ 3484 for (i = 0; i < sc->tx_ring_cnt; ++i) 3485 bce_free_tx_chain(&sc->tx_rings[i]); 3486 3487 sc->bce_link = 0; 3488 sc->bce_coalchg_mask = 0; 3489 } 3490 3491 static int 3492 bce_reset(struct bce_softc *sc, uint32_t reset_code) 3493 { 3494 uint32_t val; 3495 int i, rc = 0; 3496 3497 /* Wait for pending PCI transactions to complete. */ 3498 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, 3499 BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE | 3500 BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE | 3501 BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE | 3502 BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE); 3503 val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS); 3504 DELAY(5); 3505 3506 /* Disable DMA */ 3507 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3508 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3509 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL); 3510 val &= ~BCE_MISC_NEW_CORE_CTL_DMA_ENABLE; 3511 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val); 3512 } 3513 3514 /* Assume bootcode is running. */ 3515 sc->bce_fw_timed_out = 0; 3516 sc->bce_drv_cardiac_arrest = 0; 3517 3518 /* Give the firmware a chance to prepare for the reset. */ 3519 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code); 3520 if (rc) { 3521 if_printf(&sc->arpcom.ac_if, 3522 "Firmware is not ready for reset\n"); 3523 return rc; 3524 } 3525 3526 /* Set a firmware reminder that this is a soft reset. */ 3527 bce_shmem_wr(sc, BCE_DRV_RESET_SIGNATURE, 3528 BCE_DRV_RESET_SIGNATURE_MAGIC); 3529 3530 /* Dummy read to force the chip to complete all current transactions. */ 3531 val = REG_RD(sc, BCE_MISC_ID); 3532 3533 /* Chip reset. */ 3534 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3535 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3536 REG_WR(sc, BCE_MISC_COMMAND, BCE_MISC_COMMAND_SW_RESET); 3537 REG_RD(sc, BCE_MISC_COMMAND); 3538 DELAY(5); 3539 3540 val = BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 3541 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 3542 3543 pci_write_config(sc->bce_dev, BCE_PCICFG_MISC_CONFIG, val, 4); 3544 } else { 3545 val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3546 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 3547 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 3548 REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val); 3549 3550 /* Allow up to 30us for reset to complete. */ 3551 for (i = 0; i < 10; i++) { 3552 val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG); 3553 if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3554 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) 3555 break; 3556 DELAY(10); 3557 } 3558 3559 /* Check that reset completed successfully. */ 3560 if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3561 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) { 3562 if_printf(&sc->arpcom.ac_if, "Reset failed!\n"); 3563 return EBUSY; 3564 } 3565 } 3566 3567 /* Make sure byte swapping is properly configured. */ 3568 val = REG_RD(sc, BCE_PCI_SWAP_DIAG0); 3569 if (val != 0x01020304) { 3570 if_printf(&sc->arpcom.ac_if, "Byte swap is incorrect!\n"); 3571 return ENODEV; 3572 } 3573 3574 /* Just completed a reset, assume that firmware is running again. */ 3575 sc->bce_fw_timed_out = 0; 3576 sc->bce_drv_cardiac_arrest = 0; 3577 3578 /* Wait for the firmware to finish its initialization. */ 3579 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code); 3580 if (rc) { 3581 if_printf(&sc->arpcom.ac_if, 3582 "Firmware did not complete initialization!\n"); 3583 } 3584 3585 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX) { 3586 bce_setup_msix_table(sc); 3587 /* Prevent MSIX table reads and write from timing out */ 3588 REG_WR(sc, BCE_MISC_ECO_HW_CTL, 3589 BCE_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN); 3590 3591 } 3592 return rc; 3593 } 3594 3595 static int 3596 bce_chipinit(struct bce_softc *sc) 3597 { 3598 uint32_t val; 3599 int rc = 0; 3600 3601 /* Make sure the interrupt is not active. */ 3602 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT); 3603 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD); 3604 3605 /* 3606 * Initialize DMA byte/word swapping, configure the number of DMA 3607 * channels and PCI clock compensation delay. 3608 */ 3609 val = BCE_DMA_CONFIG_DATA_BYTE_SWAP | 3610 BCE_DMA_CONFIG_DATA_WORD_SWAP | 3611 #if BYTE_ORDER == BIG_ENDIAN 3612 BCE_DMA_CONFIG_CNTL_BYTE_SWAP | 3613 #endif 3614 BCE_DMA_CONFIG_CNTL_WORD_SWAP | 3615 DMA_READ_CHANS << 12 | 3616 DMA_WRITE_CHANS << 16; 3617 3618 val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY; 3619 3620 if ((sc->bce_flags & BCE_PCIX_FLAG) && sc->bus_speed_mhz == 133) 3621 val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP; 3622 3623 /* 3624 * This setting resolves a problem observed on certain Intel PCI 3625 * chipsets that cannot handle multiple outstanding DMA operations. 3626 * See errata E9_5706A1_65. 3627 */ 3628 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706 && 3629 BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0 && 3630 !(sc->bce_flags & BCE_PCIX_FLAG)) 3631 val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA; 3632 3633 REG_WR(sc, BCE_DMA_CONFIG, val); 3634 3635 /* Enable the RX_V2P and Context state machines before access. */ 3636 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 3637 BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE | 3638 BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE | 3639 BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE); 3640 3641 /* Initialize context mapping and zero out the quick contexts. */ 3642 rc = bce_init_ctx(sc); 3643 if (rc != 0) 3644 return rc; 3645 3646 /* Initialize the on-boards CPUs */ 3647 bce_init_cpus(sc); 3648 3649 /* Enable management frames (NC-SI) to flow to the MCP. */ 3650 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { 3651 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) | 3652 BCE_RPM_MGMT_PKT_CTRL_MGMT_EN; 3653 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val); 3654 } 3655 3656 /* Prepare NVRAM for access. */ 3657 rc = bce_init_nvram(sc); 3658 if (rc != 0) 3659 return rc; 3660 3661 /* Set the kernel bypass block size */ 3662 val = REG_RD(sc, BCE_MQ_CONFIG); 3663 val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE; 3664 val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; 3665 3666 /* Enable bins used on the 5709/5716. */ 3667 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3668 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3669 val |= BCE_MQ_CONFIG_BIN_MQ_MODE; 3670 if (BCE_CHIP_ID(sc) == BCE_CHIP_ID_5709_A1) 3671 val |= BCE_MQ_CONFIG_HALT_DIS; 3672 } 3673 3674 REG_WR(sc, BCE_MQ_CONFIG, val); 3675 3676 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE); 3677 REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val); 3678 REG_WR(sc, BCE_MQ_KNL_WIND_END, val); 3679 3680 /* Set the page size and clear the RV2P processor stall bits. */ 3681 val = (BCM_PAGE_BITS - 8) << 24; 3682 REG_WR(sc, BCE_RV2P_CONFIG, val); 3683 3684 /* Configure page size. */ 3685 val = REG_RD(sc, BCE_TBDR_CONFIG); 3686 val &= ~BCE_TBDR_CONFIG_PAGE_SIZE; 3687 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40; 3688 REG_WR(sc, BCE_TBDR_CONFIG, val); 3689 3690 /* Set the perfect match control register to default. */ 3691 REG_WR_IND(sc, BCE_RXP_PM_CTRL, 0); 3692 3693 return 0; 3694 } 3695 3696 /****************************************************************************/ 3697 /* Initialize the controller in preparation to send/receive traffic. */ 3698 /* */ 3699 /* Returns: */ 3700 /* 0 for success, positive value for failure. */ 3701 /****************************************************************************/ 3702 static int 3703 bce_blockinit(struct bce_softc *sc) 3704 { 3705 uint32_t reg, val; 3706 int i; 3707 3708 /* Load the hardware default MAC address. */ 3709 bce_set_mac_addr(sc); 3710 3711 /* Set the Ethernet backoff seed value */ 3712 val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) + 3713 sc->eaddr[3] + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16); 3714 REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val); 3715 3716 sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE; 3717 3718 /* Set up link change interrupt generation. */ 3719 REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK); 3720 3721 /* Program the physical address of the status block. */ 3722 REG_WR(sc, BCE_HC_STATUS_ADDR_L, BCE_ADDR_LO(sc->status_block_paddr)); 3723 REG_WR(sc, BCE_HC_STATUS_ADDR_H, BCE_ADDR_HI(sc->status_block_paddr)); 3724 3725 /* Program the physical address of the statistics block. */ 3726 REG_WR(sc, BCE_HC_STATISTICS_ADDR_L, 3727 BCE_ADDR_LO(sc->stats_block_paddr)); 3728 REG_WR(sc, BCE_HC_STATISTICS_ADDR_H, 3729 BCE_ADDR_HI(sc->stats_block_paddr)); 3730 3731 /* Program various host coalescing parameters. */ 3732 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP, 3733 (sc->bce_tx_quick_cons_trip_int << 16) | 3734 sc->bce_tx_quick_cons_trip); 3735 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP, 3736 (sc->bce_rx_quick_cons_trip_int << 16) | 3737 sc->bce_rx_quick_cons_trip); 3738 REG_WR(sc, BCE_HC_COMP_PROD_TRIP, 3739 (sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip); 3740 REG_WR(sc, BCE_HC_TX_TICKS, 3741 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks); 3742 REG_WR(sc, BCE_HC_RX_TICKS, 3743 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks); 3744 REG_WR(sc, BCE_HC_COM_TICKS, 3745 (sc->bce_com_ticks_int << 16) | sc->bce_com_ticks); 3746 REG_WR(sc, BCE_HC_CMD_TICKS, 3747 (sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks); 3748 REG_WR(sc, BCE_HC_STATS_TICKS, (sc->bce_stats_ticks & 0xffff00)); 3749 REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */ 3750 3751 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX) 3752 REG_WR(sc, BCE_HC_MSIX_BIT_VECTOR, BCE_HC_MSIX_BIT_VECTOR_VAL); 3753 3754 val = BCE_HC_CONFIG_TX_TMR_MODE | BCE_HC_CONFIG_COLLECT_STATS; 3755 if ((sc->bce_flags & BCE_ONESHOT_MSI_FLAG) || 3756 sc->bce_irq_type == PCI_INTR_TYPE_MSIX) { 3757 if (bootverbose) { 3758 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX) { 3759 if_printf(&sc->arpcom.ac_if, 3760 "using MSI-X\n"); 3761 } else { 3762 if_printf(&sc->arpcom.ac_if, 3763 "using oneshot MSI\n"); 3764 } 3765 } 3766 val |= BCE_HC_CONFIG_ONE_SHOT | BCE_HC_CONFIG_USE_INT_PARAM; 3767 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX) 3768 val |= BCE_HC_CONFIG_SB_ADDR_INC_128B; 3769 } 3770 REG_WR(sc, BCE_HC_CONFIG, val); 3771 3772 for (i = 1; i < sc->rx_ring_cnt; ++i) { 3773 uint32_t base; 3774 3775 base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) + BCE_HC_SB_CONFIG_1; 3776 KKASSERT(base <= BCE_HC_SB_CONFIG_8); 3777 3778 REG_WR(sc, base, 3779 BCE_HC_SB_CONFIG_1_TX_TMR_MODE | 3780 /* BCE_HC_SB_CONFIG_1_RX_TMR_MODE | */ 3781 BCE_HC_SB_CONFIG_1_ONE_SHOT); 3782 3783 REG_WR(sc, base + BCE_HC_TX_QUICK_CONS_TRIP_OFF, 3784 (sc->bce_tx_quick_cons_trip_int << 16) | 3785 sc->bce_tx_quick_cons_trip); 3786 REG_WR(sc, base + BCE_HC_RX_QUICK_CONS_TRIP_OFF, 3787 (sc->bce_rx_quick_cons_trip_int << 16) | 3788 sc->bce_rx_quick_cons_trip); 3789 REG_WR(sc, base + BCE_HC_TX_TICKS_OFF, 3790 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks); 3791 REG_WR(sc, base + BCE_HC_RX_TICKS_OFF, 3792 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks); 3793 } 3794 3795 /* Clear the internal statistics counters. */ 3796 REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW); 3797 3798 /* Verify that bootcode is running. */ 3799 reg = bce_shmem_rd(sc, BCE_DEV_INFO_SIGNATURE); 3800 3801 if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) != 3802 BCE_DEV_INFO_SIGNATURE_MAGIC) { 3803 if_printf(&sc->arpcom.ac_if, 3804 "Bootcode not running! Found: 0x%08X, " 3805 "Expected: 08%08X\n", 3806 reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK, 3807 BCE_DEV_INFO_SIGNATURE_MAGIC); 3808 return ENODEV; 3809 } 3810 3811 /* Enable DMA */ 3812 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3813 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3814 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL); 3815 val |= BCE_MISC_NEW_CORE_CTL_DMA_ENABLE; 3816 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val); 3817 } 3818 3819 /* Allow bootcode to apply any additional fixes before enabling MAC. */ 3820 bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET); 3821 3822 /* Enable link state change interrupt generation. */ 3823 REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE); 3824 3825 /* Enable the RXP. */ 3826 bce_start_rxp_cpu(sc); 3827 3828 /* Disable management frames (NC-SI) from flowing to the MCP. */ 3829 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { 3830 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) & 3831 ~BCE_RPM_MGMT_PKT_CTRL_MGMT_EN; 3832 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val); 3833 } 3834 3835 /* Enable all remaining blocks in the MAC. */ 3836 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3837 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3838 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 3839 BCE_MISC_ENABLE_DEFAULT_XI); 3840 } else { 3841 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT); 3842 } 3843 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS); 3844 DELAY(20); 3845 3846 /* Save the current host coalescing block settings. */ 3847 sc->hc_command = REG_RD(sc, BCE_HC_COMMAND); 3848 3849 return 0; 3850 } 3851 3852 /****************************************************************************/ 3853 /* Encapsulate an mbuf cluster into the rx_bd chain. */ 3854 /* */ 3855 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's. */ 3856 /* This routine will map an mbuf cluster into 1 or more rx_bd's as */ 3857 /* necessary. */ 3858 /* */ 3859 /* Returns: */ 3860 /* 0 for success, positive value for failure. */ 3861 /****************************************************************************/ 3862 static int 3863 bce_newbuf_std(struct bce_rx_ring *rxr, uint16_t *prod, uint16_t chain_prod, 3864 uint32_t *prod_bseq, int init) 3865 { 3866 struct bce_rx_buf *rx_buf; 3867 bus_dmamap_t map; 3868 bus_dma_segment_t seg; 3869 struct mbuf *m_new; 3870 int error, nseg; 3871 3872 /* This is a new mbuf allocation. */ 3873 m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 3874 if (m_new == NULL) 3875 return ENOBUFS; 3876 3877 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 3878 3879 /* Map the mbuf cluster into device memory. */ 3880 error = bus_dmamap_load_mbuf_segment(rxr->rx_mbuf_tag, 3881 rxr->rx_mbuf_tmpmap, m_new, &seg, 1, &nseg, BUS_DMA_NOWAIT); 3882 if (error) { 3883 m_freem(m_new); 3884 if (init) { 3885 if_printf(&rxr->sc->arpcom.ac_if, 3886 "Error mapping mbuf into RX chain!\n"); 3887 } 3888 return error; 3889 } 3890 3891 rx_buf = &rxr->rx_bufs[chain_prod]; 3892 if (rx_buf->rx_mbuf_ptr != NULL) 3893 bus_dmamap_unload(rxr->rx_mbuf_tag, rx_buf->rx_mbuf_map); 3894 3895 map = rx_buf->rx_mbuf_map; 3896 rx_buf->rx_mbuf_map = rxr->rx_mbuf_tmpmap; 3897 rxr->rx_mbuf_tmpmap = map; 3898 3899 /* Save the mbuf and update our counter. */ 3900 rx_buf->rx_mbuf_ptr = m_new; 3901 rx_buf->rx_mbuf_paddr = seg.ds_addr; 3902 rxr->free_rx_bd--; 3903 3904 bce_setup_rxdesc_std(rxr, chain_prod, prod_bseq); 3905 3906 return 0; 3907 } 3908 3909 static void 3910 bce_setup_rxdesc_std(struct bce_rx_ring *rxr, uint16_t chain_prod, 3911 uint32_t *prod_bseq) 3912 { 3913 const struct bce_rx_buf *rx_buf; 3914 struct rx_bd *rxbd; 3915 bus_addr_t paddr; 3916 int len; 3917 3918 rx_buf = &rxr->rx_bufs[chain_prod]; 3919 paddr = rx_buf->rx_mbuf_paddr; 3920 len = rx_buf->rx_mbuf_ptr->m_len; 3921 3922 /* Setup the rx_bd for the first segment. */ 3923 rxbd = &rxr->rx_bd_chain[RX_PAGE(chain_prod)][RX_IDX(chain_prod)]; 3924 3925 rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(paddr)); 3926 rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(paddr)); 3927 rxbd->rx_bd_len = htole32(len); 3928 rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START); 3929 *prod_bseq += len; 3930 3931 rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END); 3932 } 3933 3934 /****************************************************************************/ 3935 /* Initialize the TX context memory. */ 3936 /* */ 3937 /* Returns: */ 3938 /* Nothing */ 3939 /****************************************************************************/ 3940 static void 3941 bce_init_tx_context(struct bce_tx_ring *txr) 3942 { 3943 uint32_t val; 3944 3945 /* Initialize the context ID for an L2 TX chain. */ 3946 if (BCE_CHIP_NUM(txr->sc) == BCE_CHIP_NUM_5709 || 3947 BCE_CHIP_NUM(txr->sc) == BCE_CHIP_NUM_5716) { 3948 /* Set the CID type to support an L2 connection. */ 3949 val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2; 3950 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid), 3951 BCE_L2CTX_TX_TYPE_XI, val); 3952 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16); 3953 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid), 3954 BCE_L2CTX_TX_CMD_TYPE_XI, val); 3955 3956 /* Point the hardware to the first page in the chain. */ 3957 val = BCE_ADDR_HI(txr->tx_bd_chain_paddr[0]); 3958 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid), 3959 BCE_L2CTX_TX_TBDR_BHADDR_HI_XI, val); 3960 val = BCE_ADDR_LO(txr->tx_bd_chain_paddr[0]); 3961 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid), 3962 BCE_L2CTX_TX_TBDR_BHADDR_LO_XI, val); 3963 } else { 3964 /* Set the CID type to support an L2 connection. */ 3965 val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2; 3966 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid), 3967 BCE_L2CTX_TX_TYPE, val); 3968 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16); 3969 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid), 3970 BCE_L2CTX_TX_CMD_TYPE, val); 3971 3972 /* Point the hardware to the first page in the chain. */ 3973 val = BCE_ADDR_HI(txr->tx_bd_chain_paddr[0]); 3974 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid), 3975 BCE_L2CTX_TX_TBDR_BHADDR_HI, val); 3976 val = BCE_ADDR_LO(txr->tx_bd_chain_paddr[0]); 3977 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid), 3978 BCE_L2CTX_TX_TBDR_BHADDR_LO, val); 3979 } 3980 } 3981 3982 /****************************************************************************/ 3983 /* Allocate memory and initialize the TX data structures. */ 3984 /* */ 3985 /* Returns: */ 3986 /* 0 for success, positive value for failure. */ 3987 /****************************************************************************/ 3988 static int 3989 bce_init_tx_chain(struct bce_tx_ring *txr) 3990 { 3991 struct tx_bd *txbd; 3992 int i, rc = 0; 3993 3994 /* Set the initial TX producer/consumer indices. */ 3995 txr->tx_prod = 0; 3996 txr->tx_cons = 0; 3997 txr->tx_prod_bseq = 0; 3998 txr->used_tx_bd = 0; 3999 txr->max_tx_bd = USABLE_TX_BD(txr); 4000 4001 /* 4002 * The NetXtreme II supports a linked-list structre called 4003 * a Buffer Descriptor Chain (or BD chain). A BD chain 4004 * consists of a series of 1 or more chain pages, each of which 4005 * consists of a fixed number of BD entries. 4006 * The last BD entry on each page is a pointer to the next page 4007 * in the chain, and the last pointer in the BD chain 4008 * points back to the beginning of the chain. 4009 */ 4010 4011 /* Set the TX next pointer chain entries. */ 4012 for (i = 0; i < txr->tx_pages; i++) { 4013 int j; 4014 4015 txbd = &txr->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE]; 4016 4017 /* Check if we've reached the last page. */ 4018 if (i == (txr->tx_pages - 1)) 4019 j = 0; 4020 else 4021 j = i + 1; 4022 4023 txbd->tx_bd_haddr_hi = 4024 htole32(BCE_ADDR_HI(txr->tx_bd_chain_paddr[j])); 4025 txbd->tx_bd_haddr_lo = 4026 htole32(BCE_ADDR_LO(txr->tx_bd_chain_paddr[j])); 4027 } 4028 bce_init_tx_context(txr); 4029 4030 return(rc); 4031 } 4032 4033 /****************************************************************************/ 4034 /* Free memory and clear the TX data structures. */ 4035 /* */ 4036 /* Returns: */ 4037 /* Nothing. */ 4038 /****************************************************************************/ 4039 static void 4040 bce_free_tx_chain(struct bce_tx_ring *txr) 4041 { 4042 int i; 4043 4044 /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */ 4045 for (i = 0; i < TOTAL_TX_BD(txr); i++) { 4046 struct bce_tx_buf *tx_buf = &txr->tx_bufs[i]; 4047 4048 if (tx_buf->tx_mbuf_ptr != NULL) { 4049 bus_dmamap_unload(txr->tx_mbuf_tag, 4050 tx_buf->tx_mbuf_map); 4051 m_freem(tx_buf->tx_mbuf_ptr); 4052 tx_buf->tx_mbuf_ptr = NULL; 4053 } 4054 } 4055 4056 /* Clear each TX chain page. */ 4057 for (i = 0; i < txr->tx_pages; i++) 4058 bzero(txr->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ); 4059 txr->used_tx_bd = 0; 4060 } 4061 4062 /****************************************************************************/ 4063 /* Initialize the RX context memory. */ 4064 /* */ 4065 /* Returns: */ 4066 /* Nothing */ 4067 /****************************************************************************/ 4068 static void 4069 bce_init_rx_context(struct bce_rx_ring *rxr) 4070 { 4071 uint32_t val; 4072 4073 /* Initialize the context ID for an L2 RX chain. */ 4074 val = BCE_L2CTX_RX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE | 4075 BCE_L2CTX_RX_CTX_TYPE_SIZE_L2 | (0x02 << 8); 4076 4077 /* 4078 * Set the level for generating pause frames 4079 * when the number of available rx_bd's gets 4080 * too low (the low watermark) and the level 4081 * when pause frames can be stopped (the high 4082 * watermark). 4083 */ 4084 if (BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5709 || 4085 BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5716) { 4086 uint32_t lo_water, hi_water; 4087 4088 lo_water = BCE_L2CTX_RX_LO_WATER_MARK_DEFAULT; 4089 hi_water = USABLE_RX_BD(rxr) / 4; 4090 4091 lo_water /= BCE_L2CTX_RX_LO_WATER_MARK_SCALE; 4092 hi_water /= BCE_L2CTX_RX_HI_WATER_MARK_SCALE; 4093 4094 if (hi_water > 0xf) 4095 hi_water = 0xf; 4096 else if (hi_water == 0) 4097 lo_water = 0; 4098 val |= lo_water | 4099 (hi_water << BCE_L2CTX_RX_HI_WATER_MARK_SHIFT); 4100 } 4101 4102 CTX_WR(rxr->sc, GET_CID_ADDR(rxr->rx_cid), 4103 BCE_L2CTX_RX_CTX_TYPE, val); 4104 4105 /* Setup the MQ BIN mapping for l2_ctx_host_bseq. */ 4106 if (BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5709 || 4107 BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5716) { 4108 val = REG_RD(rxr->sc, BCE_MQ_MAP_L2_5); 4109 REG_WR(rxr->sc, BCE_MQ_MAP_L2_5, val | BCE_MQ_MAP_L2_5_ARM); 4110 } 4111 4112 /* Point the hardware to the first page in the chain. */ 4113 val = BCE_ADDR_HI(rxr->rx_bd_chain_paddr[0]); 4114 CTX_WR(rxr->sc, GET_CID_ADDR(rxr->rx_cid), 4115 BCE_L2CTX_RX_NX_BDHADDR_HI, val); 4116 val = BCE_ADDR_LO(rxr->rx_bd_chain_paddr[0]); 4117 CTX_WR(rxr->sc, GET_CID_ADDR(rxr->rx_cid), 4118 BCE_L2CTX_RX_NX_BDHADDR_LO, val); 4119 } 4120 4121 /****************************************************************************/ 4122 /* Allocate memory and initialize the RX data structures. */ 4123 /* */ 4124 /* Returns: */ 4125 /* 0 for success, positive value for failure. */ 4126 /****************************************************************************/ 4127 static int 4128 bce_init_rx_chain(struct bce_rx_ring *rxr) 4129 { 4130 struct rx_bd *rxbd; 4131 int i, rc = 0; 4132 uint16_t prod, chain_prod; 4133 uint32_t prod_bseq; 4134 4135 /* Initialize the RX producer and consumer indices. */ 4136 rxr->rx_prod = 0; 4137 rxr->rx_cons = 0; 4138 rxr->rx_prod_bseq = 0; 4139 rxr->free_rx_bd = USABLE_RX_BD(rxr); 4140 rxr->max_rx_bd = USABLE_RX_BD(rxr); 4141 4142 /* Clear cache status index */ 4143 rxr->last_status_idx = 0; 4144 4145 /* Initialize the RX next pointer chain entries. */ 4146 for (i = 0; i < rxr->rx_pages; i++) { 4147 int j; 4148 4149 rxbd = &rxr->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE]; 4150 4151 /* Check if we've reached the last page. */ 4152 if (i == (rxr->rx_pages - 1)) 4153 j = 0; 4154 else 4155 j = i + 1; 4156 4157 /* Setup the chain page pointers. */ 4158 rxbd->rx_bd_haddr_hi = 4159 htole32(BCE_ADDR_HI(rxr->rx_bd_chain_paddr[j])); 4160 rxbd->rx_bd_haddr_lo = 4161 htole32(BCE_ADDR_LO(rxr->rx_bd_chain_paddr[j])); 4162 } 4163 4164 /* Allocate mbuf clusters for the rx_bd chain. */ 4165 prod = prod_bseq = 0; 4166 while (prod < TOTAL_RX_BD(rxr)) { 4167 chain_prod = RX_CHAIN_IDX(rxr, prod); 4168 if (bce_newbuf_std(rxr, &prod, chain_prod, &prod_bseq, 1)) { 4169 if_printf(&rxr->sc->arpcom.ac_if, 4170 "Error filling RX chain: rx_bd[0x%04X]!\n", 4171 chain_prod); 4172 rc = ENOBUFS; 4173 break; 4174 } 4175 prod = NEXT_RX_BD(prod); 4176 } 4177 4178 /* Save the RX chain producer index. */ 4179 rxr->rx_prod = prod; 4180 rxr->rx_prod_bseq = prod_bseq; 4181 4182 /* Tell the chip about the waiting rx_bd's. */ 4183 REG_WR16(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BDIDX, 4184 rxr->rx_prod); 4185 REG_WR(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BSEQ, 4186 rxr->rx_prod_bseq); 4187 4188 bce_init_rx_context(rxr); 4189 4190 return(rc); 4191 } 4192 4193 /****************************************************************************/ 4194 /* Free memory and clear the RX data structures. */ 4195 /* */ 4196 /* Returns: */ 4197 /* Nothing. */ 4198 /****************************************************************************/ 4199 static void 4200 bce_free_rx_chain(struct bce_rx_ring *rxr) 4201 { 4202 int i; 4203 4204 /* Free any mbufs still in the RX mbuf chain. */ 4205 for (i = 0; i < TOTAL_RX_BD(rxr); i++) { 4206 struct bce_rx_buf *rx_buf = &rxr->rx_bufs[i]; 4207 4208 if (rx_buf->rx_mbuf_ptr != NULL) { 4209 bus_dmamap_unload(rxr->rx_mbuf_tag, 4210 rx_buf->rx_mbuf_map); 4211 m_freem(rx_buf->rx_mbuf_ptr); 4212 rx_buf->rx_mbuf_ptr = NULL; 4213 } 4214 } 4215 4216 /* Clear each RX chain page. */ 4217 for (i = 0; i < rxr->rx_pages; i++) 4218 bzero(rxr->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ); 4219 } 4220 4221 /****************************************************************************/ 4222 /* Set media options. */ 4223 /* */ 4224 /* Returns: */ 4225 /* 0 for success, positive value for failure. */ 4226 /****************************************************************************/ 4227 static int 4228 bce_ifmedia_upd(struct ifnet *ifp) 4229 { 4230 struct bce_softc *sc = ifp->if_softc; 4231 struct mii_data *mii = device_get_softc(sc->bce_miibus); 4232 int error = 0; 4233 4234 /* 4235 * 'mii' will be NULL, when this function is called on following 4236 * code path: bce_attach() -> bce_mgmt_init() 4237 */ 4238 if (mii != NULL) { 4239 /* Make sure the MII bus has been enumerated. */ 4240 sc->bce_link = 0; 4241 if (mii->mii_instance) { 4242 struct mii_softc *miisc; 4243 4244 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 4245 mii_phy_reset(miisc); 4246 } 4247 error = mii_mediachg(mii); 4248 } 4249 return error; 4250 } 4251 4252 /****************************************************************************/ 4253 /* Reports current media status. */ 4254 /* */ 4255 /* Returns: */ 4256 /* Nothing. */ 4257 /****************************************************************************/ 4258 static void 4259 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 4260 { 4261 struct bce_softc *sc = ifp->if_softc; 4262 struct mii_data *mii = device_get_softc(sc->bce_miibus); 4263 4264 mii_pollstat(mii); 4265 ifmr->ifm_active = mii->mii_media_active; 4266 ifmr->ifm_status = mii->mii_media_status; 4267 } 4268 4269 /****************************************************************************/ 4270 /* Handles PHY generated interrupt events. */ 4271 /* */ 4272 /* Returns: */ 4273 /* Nothing. */ 4274 /****************************************************************************/ 4275 static void 4276 bce_phy_intr(struct bce_softc *sc) 4277 { 4278 uint32_t new_link_state, old_link_state; 4279 struct ifnet *ifp = &sc->arpcom.ac_if; 4280 4281 ASSERT_SERIALIZED(&sc->main_serialize); 4282 4283 new_link_state = sc->status_block->status_attn_bits & 4284 STATUS_ATTN_BITS_LINK_STATE; 4285 old_link_state = sc->status_block->status_attn_bits_ack & 4286 STATUS_ATTN_BITS_LINK_STATE; 4287 4288 /* Handle any changes if the link state has changed. */ 4289 if (new_link_state != old_link_state) { /* XXX redundant? */ 4290 /* Update the status_attn_bits_ack field in the status block. */ 4291 if (new_link_state) { 4292 REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD, 4293 STATUS_ATTN_BITS_LINK_STATE); 4294 if (bootverbose) 4295 if_printf(ifp, "Link is now UP.\n"); 4296 } else { 4297 REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD, 4298 STATUS_ATTN_BITS_LINK_STATE); 4299 if (bootverbose) 4300 if_printf(ifp, "Link is now DOWN.\n"); 4301 } 4302 4303 /* 4304 * Assume link is down and allow tick routine to 4305 * update the state based on the actual media state. 4306 */ 4307 sc->bce_link = 0; 4308 callout_stop(&sc->bce_tick_callout); 4309 bce_tick_serialized(sc); 4310 } 4311 4312 /* Acknowledge the link change interrupt. */ 4313 REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE); 4314 } 4315 4316 /****************************************************************************/ 4317 /* Reads the receive consumer value from the status block (skipping over */ 4318 /* chain page pointer if necessary). */ 4319 /* */ 4320 /* Returns: */ 4321 /* hw_cons */ 4322 /****************************************************************************/ 4323 static __inline uint16_t 4324 bce_get_hw_rx_cons(struct bce_rx_ring *rxr) 4325 { 4326 uint16_t hw_cons = *rxr->rx_hw_cons; 4327 4328 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) 4329 hw_cons++; 4330 return hw_cons; 4331 } 4332 4333 /****************************************************************************/ 4334 /* Handles received frame interrupt events. */ 4335 /* */ 4336 /* Returns: */ 4337 /* Nothing. */ 4338 /****************************************************************************/ 4339 static void 4340 bce_rx_intr(struct bce_rx_ring *rxr, int count, uint16_t hw_cons) 4341 { 4342 struct ifnet *ifp = &rxr->sc->arpcom.ac_if; 4343 uint16_t sw_cons, sw_chain_cons, sw_prod, sw_chain_prod; 4344 uint32_t sw_prod_bseq; 4345 4346 ASSERT_SERIALIZED(&rxr->rx_serialize); 4347 4348 /* Get working copies of the driver's view of the RX indices. */ 4349 sw_cons = rxr->rx_cons; 4350 sw_prod = rxr->rx_prod; 4351 sw_prod_bseq = rxr->rx_prod_bseq; 4352 4353 /* Scan through the receive chain as long as there is work to do. */ 4354 while (sw_cons != hw_cons) { 4355 struct pktinfo pi0, *pi = NULL; 4356 struct bce_rx_buf *rx_buf; 4357 struct mbuf *m = NULL; 4358 struct l2_fhdr *l2fhdr = NULL; 4359 unsigned int len; 4360 uint32_t status = 0; 4361 4362 #ifdef IFPOLL_ENABLE 4363 if (count >= 0 && count-- == 0) 4364 break; 4365 #endif 4366 4367 /* 4368 * Convert the producer/consumer indices 4369 * to an actual rx_bd index. 4370 */ 4371 sw_chain_cons = RX_CHAIN_IDX(rxr, sw_cons); 4372 sw_chain_prod = RX_CHAIN_IDX(rxr, sw_prod); 4373 rx_buf = &rxr->rx_bufs[sw_chain_cons]; 4374 4375 rxr->free_rx_bd++; 4376 4377 /* The mbuf is stored with the last rx_bd entry of a packet. */ 4378 if (rx_buf->rx_mbuf_ptr != NULL) { 4379 if (sw_chain_cons != sw_chain_prod) { 4380 if_printf(ifp, "RX cons(%d) != prod(%d), " 4381 "drop!\n", sw_chain_cons, sw_chain_prod); 4382 IFNET_STAT_INC(ifp, ierrors, 1); 4383 4384 bce_setup_rxdesc_std(rxr, sw_chain_cons, 4385 &sw_prod_bseq); 4386 m = NULL; 4387 goto bce_rx_int_next_rx; 4388 } 4389 4390 /* Unmap the mbuf from DMA space. */ 4391 bus_dmamap_sync(rxr->rx_mbuf_tag, rx_buf->rx_mbuf_map, 4392 BUS_DMASYNC_POSTREAD); 4393 4394 /* Save the mbuf from the driver's chain. */ 4395 m = rx_buf->rx_mbuf_ptr; 4396 4397 /* 4398 * Frames received on the NetXteme II are prepended 4399 * with an l2_fhdr structure which provides status 4400 * information about the received frame (including 4401 * VLAN tags and checksum info). The frames are also 4402 * automatically adjusted to align the IP header 4403 * (i.e. two null bytes are inserted before the 4404 * Ethernet header). As a result the data DMA'd by 4405 * the controller into the mbuf is as follows: 4406 * 4407 * +---------+-----+---------------------+-----+ 4408 * | l2_fhdr | pad | packet data | FCS | 4409 * +---------+-----+---------------------+-----+ 4410 * 4411 * The l2_fhdr needs to be checked and skipped and the 4412 * FCS needs to be stripped before sending the packet 4413 * up the stack. 4414 */ 4415 l2fhdr = mtod(m, struct l2_fhdr *); 4416 4417 len = l2fhdr->l2_fhdr_pkt_len; 4418 status = l2fhdr->l2_fhdr_status; 4419 4420 len -= ETHER_CRC_LEN; 4421 4422 /* Check the received frame for errors. */ 4423 if (status & (L2_FHDR_ERRORS_BAD_CRC | 4424 L2_FHDR_ERRORS_PHY_DECODE | 4425 L2_FHDR_ERRORS_ALIGNMENT | 4426 L2_FHDR_ERRORS_TOO_SHORT | 4427 L2_FHDR_ERRORS_GIANT_FRAME)) { 4428 IFNET_STAT_INC(ifp, ierrors, 1); 4429 4430 /* Reuse the mbuf for a new frame. */ 4431 bce_setup_rxdesc_std(rxr, sw_chain_prod, 4432 &sw_prod_bseq); 4433 m = NULL; 4434 goto bce_rx_int_next_rx; 4435 } 4436 4437 /* 4438 * Get a new mbuf for the rx_bd. If no new 4439 * mbufs are available then reuse the current mbuf, 4440 * log an ierror on the interface, and generate 4441 * an error in the system log. 4442 */ 4443 if (bce_newbuf_std(rxr, &sw_prod, sw_chain_prod, 4444 &sw_prod_bseq, 0)) { 4445 IFNET_STAT_INC(ifp, ierrors, 1); 4446 4447 /* Try and reuse the exisitng mbuf. */ 4448 bce_setup_rxdesc_std(rxr, sw_chain_prod, 4449 &sw_prod_bseq); 4450 m = NULL; 4451 goto bce_rx_int_next_rx; 4452 } 4453 4454 /* 4455 * Skip over the l2_fhdr when passing 4456 * the data up the stack. 4457 */ 4458 m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN); 4459 4460 m->m_pkthdr.len = m->m_len = len; 4461 m->m_pkthdr.rcvif = ifp; 4462 4463 /* Validate the checksum if offload enabled. */ 4464 if (ifp->if_capenable & IFCAP_RXCSUM) { 4465 /* Check for an IP datagram. */ 4466 if (status & L2_FHDR_STATUS_IP_DATAGRAM) { 4467 m->m_pkthdr.csum_flags |= 4468 CSUM_IP_CHECKED; 4469 4470 /* Check if the IP checksum is valid. */ 4471 if ((l2fhdr->l2_fhdr_ip_xsum ^ 4472 0xffff) == 0) { 4473 m->m_pkthdr.csum_flags |= 4474 CSUM_IP_VALID; 4475 } 4476 } 4477 4478 /* Check for a valid TCP/UDP frame. */ 4479 if (status & (L2_FHDR_STATUS_TCP_SEGMENT | 4480 L2_FHDR_STATUS_UDP_DATAGRAM)) { 4481 4482 /* Check for a good TCP/UDP checksum. */ 4483 if ((status & 4484 (L2_FHDR_ERRORS_TCP_XSUM | 4485 L2_FHDR_ERRORS_UDP_XSUM)) == 0) { 4486 m->m_pkthdr.csum_data = 4487 l2fhdr->l2_fhdr_tcp_udp_xsum; 4488 m->m_pkthdr.csum_flags |= 4489 CSUM_DATA_VALID | 4490 CSUM_PSEUDO_HDR; 4491 } 4492 } 4493 } 4494 if (ifp->if_capenable & IFCAP_RSS) { 4495 pi = bce_rss_pktinfo(&pi0, status, l2fhdr); 4496 if (pi != NULL && 4497 (status & L2_FHDR_STATUS_RSS_HASH)) { 4498 m->m_flags |= M_HASH; 4499 m->m_pkthdr.hash = 4500 toeplitz_hash(l2fhdr->l2_fhdr_hash); 4501 } 4502 } 4503 4504 IFNET_STAT_INC(ifp, ipackets, 1); 4505 bce_rx_int_next_rx: 4506 sw_prod = NEXT_RX_BD(sw_prod); 4507 } 4508 4509 sw_cons = NEXT_RX_BD(sw_cons); 4510 4511 /* If we have a packet, pass it up the stack */ 4512 if (m) { 4513 if (status & L2_FHDR_STATUS_L2_VLAN_TAG) { 4514 m->m_flags |= M_VLANTAG; 4515 m->m_pkthdr.ether_vlantag = 4516 l2fhdr->l2_fhdr_vlan_tag; 4517 } 4518 ether_input_pkt(ifp, m, pi); 4519 #ifdef BCE_RSS_DEBUG 4520 rxr->rx_pkts++; 4521 #endif 4522 } 4523 } 4524 4525 rxr->rx_cons = sw_cons; 4526 rxr->rx_prod = sw_prod; 4527 rxr->rx_prod_bseq = sw_prod_bseq; 4528 4529 REG_WR16(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BDIDX, 4530 rxr->rx_prod); 4531 REG_WR(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BSEQ, 4532 rxr->rx_prod_bseq); 4533 } 4534 4535 /****************************************************************************/ 4536 /* Reads the transmit consumer value from the status block (skipping over */ 4537 /* chain page pointer if necessary). */ 4538 /* */ 4539 /* Returns: */ 4540 /* hw_cons */ 4541 /****************************************************************************/ 4542 static __inline uint16_t 4543 bce_get_hw_tx_cons(struct bce_tx_ring *txr) 4544 { 4545 uint16_t hw_cons = *txr->tx_hw_cons; 4546 4547 if ((hw_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) 4548 hw_cons++; 4549 return hw_cons; 4550 } 4551 4552 /****************************************************************************/ 4553 /* Handles transmit completion interrupt events. */ 4554 /* */ 4555 /* Returns: */ 4556 /* Nothing. */ 4557 /****************************************************************************/ 4558 static void 4559 bce_tx_intr(struct bce_tx_ring *txr, uint16_t hw_tx_cons) 4560 { 4561 struct ifnet *ifp = &txr->sc->arpcom.ac_if; 4562 uint16_t sw_tx_cons, sw_tx_chain_cons; 4563 4564 ASSERT_SERIALIZED(&txr->tx_serialize); 4565 4566 /* Get the hardware's view of the TX consumer index. */ 4567 sw_tx_cons = txr->tx_cons; 4568 4569 /* Cycle through any completed TX chain page entries. */ 4570 while (sw_tx_cons != hw_tx_cons) { 4571 struct bce_tx_buf *tx_buf; 4572 4573 sw_tx_chain_cons = TX_CHAIN_IDX(txr, sw_tx_cons); 4574 tx_buf = &txr->tx_bufs[sw_tx_chain_cons]; 4575 4576 /* 4577 * Free the associated mbuf. Remember 4578 * that only the last tx_bd of a packet 4579 * has an mbuf pointer and DMA map. 4580 */ 4581 if (tx_buf->tx_mbuf_ptr != NULL) { 4582 /* Unmap the mbuf. */ 4583 bus_dmamap_unload(txr->tx_mbuf_tag, 4584 tx_buf->tx_mbuf_map); 4585 4586 /* Free the mbuf. */ 4587 m_freem(tx_buf->tx_mbuf_ptr); 4588 tx_buf->tx_mbuf_ptr = NULL; 4589 4590 IFNET_STAT_INC(ifp, opackets, 1); 4591 #ifdef BCE_TSS_DEBUG 4592 txr->tx_pkts++; 4593 #endif 4594 } 4595 4596 txr->used_tx_bd--; 4597 sw_tx_cons = NEXT_TX_BD(sw_tx_cons); 4598 } 4599 4600 if (txr->used_tx_bd == 0) { 4601 /* Clear the TX timeout timer. */ 4602 txr->tx_watchdog.wd_timer = 0; 4603 } 4604 4605 /* Clear the tx hardware queue full flag. */ 4606 if (txr->max_tx_bd - txr->used_tx_bd >= BCE_TX_SPARE_SPACE) 4607 ifsq_clr_oactive(txr->ifsq); 4608 txr->tx_cons = sw_tx_cons; 4609 } 4610 4611 /****************************************************************************/ 4612 /* Disables interrupt generation. */ 4613 /* */ 4614 /* Returns: */ 4615 /* Nothing. */ 4616 /****************************************************************************/ 4617 static void 4618 bce_disable_intr(struct bce_softc *sc) 4619 { 4620 int i; 4621 4622 for (i = 0; i < sc->rx_ring_cnt; ++i) { 4623 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 4624 (sc->rx_rings[i].idx << 24) | 4625 BCE_PCICFG_INT_ACK_CMD_MASK_INT); 4626 } 4627 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD); 4628 4629 callout_stop(&sc->bce_ckmsi_callout); 4630 sc->bce_msi_maylose = FALSE; 4631 sc->bce_check_rx_cons = 0; 4632 sc->bce_check_tx_cons = 0; 4633 sc->bce_check_status_idx = 0xffff; 4634 4635 for (i = 0; i < sc->rx_ring_cnt; ++i) 4636 lwkt_serialize_handler_disable(sc->bce_msix[i].msix_serialize); 4637 } 4638 4639 /****************************************************************************/ 4640 /* Enables interrupt generation. */ 4641 /* */ 4642 /* Returns: */ 4643 /* Nothing. */ 4644 /****************************************************************************/ 4645 static void 4646 bce_enable_intr(struct bce_softc *sc) 4647 { 4648 int i; 4649 4650 for (i = 0; i < sc->rx_ring_cnt; ++i) 4651 lwkt_serialize_handler_enable(sc->bce_msix[i].msix_serialize); 4652 4653 for (i = 0; i < sc->rx_ring_cnt; ++i) { 4654 struct bce_rx_ring *rxr = &sc->rx_rings[i]; 4655 4656 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, (rxr->idx << 24) | 4657 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | 4658 BCE_PCICFG_INT_ACK_CMD_MASK_INT | 4659 rxr->last_status_idx); 4660 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, (rxr->idx << 24) | 4661 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | 4662 rxr->last_status_idx); 4663 } 4664 REG_WR(sc, BCE_HC_COMMAND, sc->hc_command | BCE_HC_COMMAND_COAL_NOW); 4665 4666 if (sc->bce_flags & BCE_CHECK_MSI_FLAG) { 4667 sc->bce_msi_maylose = FALSE; 4668 sc->bce_check_rx_cons = 0; 4669 sc->bce_check_tx_cons = 0; 4670 sc->bce_check_status_idx = 0xffff; 4671 4672 if (bootverbose) 4673 if_printf(&sc->arpcom.ac_if, "check msi\n"); 4674 4675 callout_reset_bycpu(&sc->bce_ckmsi_callout, BCE_MSI_CKINTVL, 4676 bce_check_msi, sc, sc->bce_msix[0].msix_cpuid); 4677 } 4678 } 4679 4680 /****************************************************************************/ 4681 /* Reenables interrupt generation during interrupt handling. */ 4682 /* */ 4683 /* Returns: */ 4684 /* Nothing. */ 4685 /****************************************************************************/ 4686 static void 4687 bce_reenable_intr(struct bce_rx_ring *rxr) 4688 { 4689 REG_WR(rxr->sc, BCE_PCICFG_INT_ACK_CMD, (rxr->idx << 24) | 4690 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | rxr->last_status_idx); 4691 } 4692 4693 /****************************************************************************/ 4694 /* Handles controller initialization. */ 4695 /* */ 4696 /* Returns: */ 4697 /* Nothing. */ 4698 /****************************************************************************/ 4699 static void 4700 bce_init(void *xsc) 4701 { 4702 struct bce_softc *sc = xsc; 4703 struct ifnet *ifp = &sc->arpcom.ac_if; 4704 uint32_t ether_mtu; 4705 int error, i; 4706 boolean_t polling; 4707 4708 ASSERT_IFNET_SERIALIZED_ALL(ifp); 4709 4710 /* Check if the driver is still running and bail out if it is. */ 4711 if (ifp->if_flags & IFF_RUNNING) 4712 return; 4713 4714 bce_stop(sc); 4715 4716 error = bce_reset(sc, BCE_DRV_MSG_CODE_RESET); 4717 if (error) { 4718 if_printf(ifp, "Controller reset failed!\n"); 4719 goto back; 4720 } 4721 4722 error = bce_chipinit(sc); 4723 if (error) { 4724 if_printf(ifp, "Controller initialization failed!\n"); 4725 goto back; 4726 } 4727 4728 error = bce_blockinit(sc); 4729 if (error) { 4730 if_printf(ifp, "Block initialization failed!\n"); 4731 goto back; 4732 } 4733 4734 /* Load our MAC address. */ 4735 bcopy(IF_LLADDR(ifp), sc->eaddr, ETHER_ADDR_LEN); 4736 bce_set_mac_addr(sc); 4737 4738 /* Calculate and program the Ethernet MTU size. */ 4739 ether_mtu = ETHER_HDR_LEN + EVL_ENCAPLEN + ifp->if_mtu + ETHER_CRC_LEN; 4740 4741 /* 4742 * Program the mtu, enabling jumbo frame 4743 * support if necessary. Also set the mbuf 4744 * allocation count for RX frames. 4745 */ 4746 if (ether_mtu > ETHER_MAX_LEN + EVL_ENCAPLEN) { 4747 #ifdef notyet 4748 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, 4749 min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) | 4750 BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA); 4751 #else 4752 panic("jumbo buffer is not supported yet"); 4753 #endif 4754 } else { 4755 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu); 4756 } 4757 4758 /* Program appropriate promiscuous/multicast filtering. */ 4759 bce_set_rx_mode(sc); 4760 4761 /* 4762 * Init RX buffer descriptor chain. 4763 */ 4764 REG_WR(sc, BCE_RLUP_RSS_CONFIG, 0); 4765 bce_reg_wr_ind(sc, BCE_RXP_SCRATCH_RSS_TBL_SZ, 0); 4766 4767 for (i = 0; i < sc->rx_ring_cnt; ++i) 4768 bce_init_rx_chain(&sc->rx_rings[i]); /* XXX return value */ 4769 4770 if (sc->rx_ring_cnt > 1) 4771 bce_init_rss(sc); 4772 4773 /* 4774 * Init TX buffer descriptor chain. 4775 */ 4776 REG_WR(sc, BCE_TSCH_TSS_CFG, 0); 4777 4778 for (i = 0; i < sc->tx_ring_cnt; ++i) 4779 bce_init_tx_chain(&sc->tx_rings[i]); 4780 4781 if (sc->tx_ring_cnt > 1) { 4782 REG_WR(sc, BCE_TSCH_TSS_CFG, 4783 ((sc->tx_ring_cnt - 1) << 24) | (TX_TSS_CID << 7)); 4784 } 4785 4786 polling = FALSE; 4787 #ifdef IFPOLL_ENABLE 4788 if (ifp->if_flags & IFF_NPOLLING) 4789 polling = TRUE; 4790 #endif 4791 4792 if (polling) { 4793 /* Disable interrupts if we are polling. */ 4794 bce_disable_intr(sc); 4795 4796 /* Change coalesce parameters */ 4797 bce_npoll_coal_change(sc); 4798 } else { 4799 /* Enable host interrupts. */ 4800 bce_enable_intr(sc); 4801 } 4802 bce_set_timer_cpuid(sc, polling); 4803 4804 bce_ifmedia_upd(ifp); 4805 4806 ifp->if_flags |= IFF_RUNNING; 4807 for (i = 0; i < sc->tx_ring_cnt; ++i) { 4808 ifsq_clr_oactive(sc->tx_rings[i].ifsq); 4809 ifsq_watchdog_start(&sc->tx_rings[i].tx_watchdog); 4810 } 4811 4812 callout_reset_bycpu(&sc->bce_tick_callout, hz, bce_tick, sc, 4813 sc->bce_timer_cpuid); 4814 back: 4815 if (error) 4816 bce_stop(sc); 4817 } 4818 4819 /****************************************************************************/ 4820 /* Initialize the controller just enough so that any management firmware */ 4821 /* running on the device will continue to operate corectly. */ 4822 /* */ 4823 /* Returns: */ 4824 /* Nothing. */ 4825 /****************************************************************************/ 4826 static void 4827 bce_mgmt_init(struct bce_softc *sc) 4828 { 4829 struct ifnet *ifp = &sc->arpcom.ac_if; 4830 4831 /* Bail out if management firmware is not running. */ 4832 if (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)) 4833 return; 4834 4835 /* Enable all critical blocks in the MAC. */ 4836 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 4837 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 4838 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 4839 BCE_MISC_ENABLE_DEFAULT_XI); 4840 } else { 4841 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT); 4842 } 4843 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS); 4844 DELAY(20); 4845 4846 bce_ifmedia_upd(ifp); 4847 } 4848 4849 /****************************************************************************/ 4850 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */ 4851 /* memory visible to the controller. */ 4852 /* */ 4853 /* Returns: */ 4854 /* 0 for success, positive value for failure. */ 4855 /****************************************************************************/ 4856 static int 4857 bce_encap(struct bce_tx_ring *txr, struct mbuf **m_head, int *nsegs_used) 4858 { 4859 bus_dma_segment_t segs[BCE_MAX_SEGMENTS]; 4860 bus_dmamap_t map, tmp_map; 4861 struct mbuf *m0 = *m_head; 4862 struct tx_bd *txbd = NULL; 4863 uint16_t vlan_tag = 0, flags = 0, mss = 0; 4864 uint16_t chain_prod, chain_prod_start, prod; 4865 uint32_t prod_bseq; 4866 int i, error, maxsegs, nsegs; 4867 4868 /* Transfer any checksum offload flags to the bd. */ 4869 if (m0->m_pkthdr.csum_flags & CSUM_TSO) { 4870 error = bce_tso_setup(txr, m_head, &flags, &mss); 4871 if (error) 4872 return ENOBUFS; 4873 m0 = *m_head; 4874 } else if (m0->m_pkthdr.csum_flags & BCE_CSUM_FEATURES) { 4875 if (m0->m_pkthdr.csum_flags & CSUM_IP) 4876 flags |= TX_BD_FLAGS_IP_CKSUM; 4877 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 4878 flags |= TX_BD_FLAGS_TCP_UDP_CKSUM; 4879 } 4880 4881 /* Transfer any VLAN tags to the bd. */ 4882 if (m0->m_flags & M_VLANTAG) { 4883 flags |= TX_BD_FLAGS_VLAN_TAG; 4884 vlan_tag = m0->m_pkthdr.ether_vlantag; 4885 } 4886 4887 prod = txr->tx_prod; 4888 chain_prod_start = chain_prod = TX_CHAIN_IDX(txr, prod); 4889 4890 /* Map the mbuf into DMAable memory. */ 4891 map = txr->tx_bufs[chain_prod_start].tx_mbuf_map; 4892 4893 maxsegs = txr->max_tx_bd - txr->used_tx_bd; 4894 KASSERT(maxsegs >= BCE_TX_SPARE_SPACE, 4895 ("not enough segments %d", maxsegs)); 4896 if (maxsegs > BCE_MAX_SEGMENTS) 4897 maxsegs = BCE_MAX_SEGMENTS; 4898 4899 /* Map the mbuf into our DMA address space. */ 4900 error = bus_dmamap_load_mbuf_defrag(txr->tx_mbuf_tag, map, m_head, 4901 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 4902 if (error) 4903 goto back; 4904 bus_dmamap_sync(txr->tx_mbuf_tag, map, BUS_DMASYNC_PREWRITE); 4905 4906 *nsegs_used += nsegs; 4907 4908 /* Reset m0 */ 4909 m0 = *m_head; 4910 4911 /* prod points to an empty tx_bd at this point. */ 4912 prod_bseq = txr->tx_prod_bseq; 4913 4914 /* 4915 * Cycle through each mbuf segment that makes up 4916 * the outgoing frame, gathering the mapping info 4917 * for that segment and creating a tx_bd to for 4918 * the mbuf. 4919 */ 4920 for (i = 0; i < nsegs; i++) { 4921 chain_prod = TX_CHAIN_IDX(txr, prod); 4922 txbd = 4923 &txr->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)]; 4924 4925 txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr)); 4926 txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr)); 4927 txbd->tx_bd_mss_nbytes = htole32(mss << 16) | 4928 htole16(segs[i].ds_len); 4929 txbd->tx_bd_vlan_tag = htole16(vlan_tag); 4930 txbd->tx_bd_flags = htole16(flags); 4931 4932 prod_bseq += segs[i].ds_len; 4933 if (i == 0) 4934 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START); 4935 prod = NEXT_TX_BD(prod); 4936 } 4937 4938 /* Set the END flag on the last TX buffer descriptor. */ 4939 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END); 4940 4941 /* 4942 * Ensure that the mbuf pointer for this transmission 4943 * is placed at the array index of the last 4944 * descriptor in this chain. This is done 4945 * because a single map is used for all 4946 * segments of the mbuf and we don't want to 4947 * unload the map before all of the segments 4948 * have been freed. 4949 */ 4950 txr->tx_bufs[chain_prod].tx_mbuf_ptr = m0; 4951 4952 tmp_map = txr->tx_bufs[chain_prod].tx_mbuf_map; 4953 txr->tx_bufs[chain_prod].tx_mbuf_map = map; 4954 txr->tx_bufs[chain_prod_start].tx_mbuf_map = tmp_map; 4955 4956 txr->used_tx_bd += nsegs; 4957 4958 /* prod points to the next free tx_bd at this point. */ 4959 txr->tx_prod = prod; 4960 txr->tx_prod_bseq = prod_bseq; 4961 back: 4962 if (error) { 4963 m_freem(*m_head); 4964 *m_head = NULL; 4965 } 4966 return error; 4967 } 4968 4969 static void 4970 bce_xmit(struct bce_tx_ring *txr) 4971 { 4972 /* Start the transmit. */ 4973 REG_WR16(txr->sc, MB_GET_CID_ADDR(txr->tx_cid) + BCE_L2CTX_TX_HOST_BIDX, 4974 txr->tx_prod); 4975 REG_WR(txr->sc, MB_GET_CID_ADDR(txr->tx_cid) + BCE_L2CTX_TX_HOST_BSEQ, 4976 txr->tx_prod_bseq); 4977 } 4978 4979 /****************************************************************************/ 4980 /* Main transmit routine when called from another routine with a lock. */ 4981 /* */ 4982 /* Returns: */ 4983 /* Nothing. */ 4984 /****************************************************************************/ 4985 static void 4986 bce_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 4987 { 4988 struct bce_softc *sc = ifp->if_softc; 4989 struct bce_tx_ring *txr = ifsq_get_priv(ifsq); 4990 int count = 0; 4991 4992 KKASSERT(txr->ifsq == ifsq); 4993 ASSERT_SERIALIZED(&txr->tx_serialize); 4994 4995 /* If there's no link or the transmit queue is empty then just exit. */ 4996 if (!sc->bce_link) { 4997 ifsq_purge(ifsq); 4998 return; 4999 } 5000 5001 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq)) 5002 return; 5003 5004 for (;;) { 5005 struct mbuf *m_head; 5006 5007 /* 5008 * We keep BCE_TX_SPARE_SPACE entries, so bce_encap() is 5009 * unlikely to fail. 5010 */ 5011 if (txr->max_tx_bd - txr->used_tx_bd < BCE_TX_SPARE_SPACE) { 5012 ifsq_set_oactive(ifsq); 5013 break; 5014 } 5015 5016 /* Check for any frames to send. */ 5017 m_head = ifsq_dequeue(ifsq); 5018 if (m_head == NULL) 5019 break; 5020 5021 /* 5022 * Pack the data into the transmit ring. If we 5023 * don't have room, place the mbuf back at the 5024 * head of the queue and set the OACTIVE flag 5025 * to wait for the NIC to drain the chain. 5026 */ 5027 if (bce_encap(txr, &m_head, &count)) { 5028 IFNET_STAT_INC(ifp, oerrors, 1); 5029 if (txr->used_tx_bd == 0) { 5030 continue; 5031 } else { 5032 ifsq_set_oactive(ifsq); 5033 break; 5034 } 5035 } 5036 5037 if (count >= txr->tx_wreg) { 5038 bce_xmit(txr); 5039 count = 0; 5040 } 5041 5042 /* Send a copy of the frame to any BPF listeners. */ 5043 ETHER_BPF_MTAP(ifp, m_head); 5044 5045 /* Set the tx timeout. */ 5046 txr->tx_watchdog.wd_timer = BCE_TX_TIMEOUT; 5047 } 5048 if (count > 0) 5049 bce_xmit(txr); 5050 } 5051 5052 /****************************************************************************/ 5053 /* Handles any IOCTL calls from the operating system. */ 5054 /* */ 5055 /* Returns: */ 5056 /* 0 for success, positive value for failure. */ 5057 /****************************************************************************/ 5058 static int 5059 bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 5060 { 5061 struct bce_softc *sc = ifp->if_softc; 5062 struct ifreq *ifr = (struct ifreq *)data; 5063 struct mii_data *mii; 5064 int mask, error = 0; 5065 5066 ASSERT_IFNET_SERIALIZED_ALL(ifp); 5067 5068 switch(command) { 5069 case SIOCSIFMTU: 5070 /* Check that the MTU setting is supported. */ 5071 if (ifr->ifr_mtu < BCE_MIN_MTU || 5072 #ifdef notyet 5073 ifr->ifr_mtu > BCE_MAX_JUMBO_MTU 5074 #else 5075 ifr->ifr_mtu > ETHERMTU 5076 #endif 5077 ) { 5078 error = EINVAL; 5079 break; 5080 } 5081 5082 ifp->if_mtu = ifr->ifr_mtu; 5083 ifp->if_flags &= ~IFF_RUNNING; /* Force reinitialize */ 5084 bce_init(sc); 5085 break; 5086 5087 case SIOCSIFFLAGS: 5088 if (ifp->if_flags & IFF_UP) { 5089 if (ifp->if_flags & IFF_RUNNING) { 5090 mask = ifp->if_flags ^ sc->bce_if_flags; 5091 5092 if (mask & (IFF_PROMISC | IFF_ALLMULTI)) 5093 bce_set_rx_mode(sc); 5094 } else { 5095 bce_init(sc); 5096 } 5097 } else if (ifp->if_flags & IFF_RUNNING) { 5098 bce_stop(sc); 5099 5100 /* If MFW is running, restart the controller a bit. */ 5101 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { 5102 bce_reset(sc, BCE_DRV_MSG_CODE_RESET); 5103 bce_chipinit(sc); 5104 bce_mgmt_init(sc); 5105 } 5106 } 5107 sc->bce_if_flags = ifp->if_flags; 5108 break; 5109 5110 case SIOCADDMULTI: 5111 case SIOCDELMULTI: 5112 if (ifp->if_flags & IFF_RUNNING) 5113 bce_set_rx_mode(sc); 5114 break; 5115 5116 case SIOCSIFMEDIA: 5117 case SIOCGIFMEDIA: 5118 mii = device_get_softc(sc->bce_miibus); 5119 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 5120 break; 5121 5122 case SIOCSIFCAP: 5123 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 5124 if (mask & IFCAP_HWCSUM) { 5125 ifp->if_capenable ^= (mask & IFCAP_HWCSUM); 5126 if (ifp->if_capenable & IFCAP_TXCSUM) 5127 ifp->if_hwassist |= BCE_CSUM_FEATURES; 5128 else 5129 ifp->if_hwassist &= ~BCE_CSUM_FEATURES; 5130 } 5131 if (mask & IFCAP_TSO) { 5132 ifp->if_capenable ^= IFCAP_TSO; 5133 if (ifp->if_capenable & IFCAP_TSO) 5134 ifp->if_hwassist |= CSUM_TSO; 5135 else 5136 ifp->if_hwassist &= ~CSUM_TSO; 5137 } 5138 if (mask & IFCAP_RSS) 5139 ifp->if_capenable ^= IFCAP_RSS; 5140 break; 5141 5142 default: 5143 error = ether_ioctl(ifp, command, data); 5144 break; 5145 } 5146 return error; 5147 } 5148 5149 /****************************************************************************/ 5150 /* Transmit timeout handler. */ 5151 /* */ 5152 /* Returns: */ 5153 /* Nothing. */ 5154 /****************************************************************************/ 5155 static void 5156 bce_watchdog(struct ifaltq_subque *ifsq) 5157 { 5158 struct ifnet *ifp = ifsq_get_ifp(ifsq); 5159 struct bce_softc *sc = ifp->if_softc; 5160 int i; 5161 5162 ASSERT_IFNET_SERIALIZED_ALL(ifp); 5163 5164 /* 5165 * If we are in this routine because of pause frames, then 5166 * don't reset the hardware. 5167 */ 5168 if (REG_RD(sc, BCE_EMAC_TX_STATUS) & BCE_EMAC_TX_STATUS_XOFFED) 5169 return; 5170 5171 if_printf(ifp, "Watchdog timeout occurred, resetting!\n"); 5172 5173 ifp->if_flags &= ~IFF_RUNNING; /* Force reinitialize */ 5174 bce_init(sc); 5175 5176 IFNET_STAT_INC(ifp, oerrors, 1); 5177 5178 for (i = 0; i < sc->tx_ring_cnt; ++i) 5179 ifsq_devstart_sched(sc->tx_rings[i].ifsq); 5180 } 5181 5182 #ifdef IFPOLL_ENABLE 5183 5184 static void 5185 bce_npoll_status(struct ifnet *ifp) 5186 { 5187 struct bce_softc *sc = ifp->if_softc; 5188 struct status_block *sblk = sc->status_block; 5189 uint32_t status_attn_bits; 5190 5191 ASSERT_SERIALIZED(&sc->main_serialize); 5192 5193 status_attn_bits = sblk->status_attn_bits; 5194 5195 /* Was it a link change interrupt? */ 5196 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 5197 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) { 5198 bce_phy_intr(sc); 5199 5200 /* 5201 * Clear any transient status updates during link state change. 5202 */ 5203 REG_WR(sc, BCE_HC_COMMAND, 5204 sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT); 5205 REG_RD(sc, BCE_HC_COMMAND); 5206 } 5207 5208 /* 5209 * If any other attention is asserted then the chip is toast. 5210 */ 5211 if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) != 5212 (sblk->status_attn_bits_ack & ~STATUS_ATTN_BITS_LINK_STATE)) { 5213 if_printf(ifp, "Fatal attention detected: 0x%08X\n", 5214 sblk->status_attn_bits); 5215 bce_serialize_skipmain(sc); 5216 bce_init(sc); 5217 bce_deserialize_skipmain(sc); 5218 } 5219 } 5220 5221 static void 5222 bce_npoll_rx(struct ifnet *ifp, void *arg, int count) 5223 { 5224 struct bce_rx_ring *rxr = arg; 5225 uint16_t hw_rx_cons; 5226 5227 ASSERT_SERIALIZED(&rxr->rx_serialize); 5228 5229 /* 5230 * Save the status block index value for use when enabling 5231 * the interrupt. 5232 */ 5233 rxr->last_status_idx = *rxr->hw_status_idx; 5234 5235 /* Make sure status index is extracted before RX/TX cons */ 5236 cpu_lfence(); 5237 5238 hw_rx_cons = bce_get_hw_rx_cons(rxr); 5239 5240 /* Check for any completed RX frames. */ 5241 if (hw_rx_cons != rxr->rx_cons) 5242 bce_rx_intr(rxr, count, hw_rx_cons); 5243 } 5244 5245 static void 5246 bce_npoll_rx_pack(struct ifnet *ifp, void *arg, int count) 5247 { 5248 struct bce_rx_ring *rxr = arg; 5249 5250 KASSERT(rxr->idx == 0, ("not the first RX ring, but %d", rxr->idx)); 5251 bce_npoll_rx(ifp, rxr, count); 5252 5253 KASSERT(rxr->sc->rx_ring_cnt != rxr->sc->rx_ring_cnt2, 5254 ("RX ring count %d, count2 %d", rxr->sc->rx_ring_cnt, 5255 rxr->sc->rx_ring_cnt2)); 5256 5257 /* Last ring carries packets whose masked hash is 0 */ 5258 rxr = &rxr->sc->rx_rings[rxr->sc->rx_ring_cnt - 1]; 5259 5260 lwkt_serialize_enter(&rxr->rx_serialize); 5261 bce_npoll_rx(ifp, rxr, count); 5262 lwkt_serialize_exit(&rxr->rx_serialize); 5263 } 5264 5265 static void 5266 bce_npoll_tx(struct ifnet *ifp, void *arg, int count __unused) 5267 { 5268 struct bce_tx_ring *txr = arg; 5269 uint16_t hw_tx_cons; 5270 5271 ASSERT_SERIALIZED(&txr->tx_serialize); 5272 5273 hw_tx_cons = bce_get_hw_tx_cons(txr); 5274 5275 /* Check for any completed TX frames. */ 5276 if (hw_tx_cons != txr->tx_cons) { 5277 bce_tx_intr(txr, hw_tx_cons); 5278 if (!ifsq_is_empty(txr->ifsq)) 5279 ifsq_devstart(txr->ifsq); 5280 } 5281 } 5282 5283 static void 5284 bce_npoll(struct ifnet *ifp, struct ifpoll_info *info) 5285 { 5286 struct bce_softc *sc = ifp->if_softc; 5287 int i; 5288 5289 ASSERT_IFNET_SERIALIZED_ALL(ifp); 5290 5291 if (info != NULL) { 5292 info->ifpi_status.status_func = bce_npoll_status; 5293 info->ifpi_status.serializer = &sc->main_serialize; 5294 5295 for (i = 0; i < sc->tx_ring_cnt; ++i) { 5296 struct bce_tx_ring *txr = &sc->tx_rings[i]; 5297 int idx = i + sc->npoll_ofs; 5298 5299 KKASSERT(idx < ncpus2); 5300 info->ifpi_tx[idx].poll_func = bce_npoll_tx; 5301 info->ifpi_tx[idx].arg = txr; 5302 info->ifpi_tx[idx].serializer = &txr->tx_serialize; 5303 ifsq_set_cpuid(txr->ifsq, idx); 5304 } 5305 5306 for (i = 0; i < sc->rx_ring_cnt2; ++i) { 5307 struct bce_rx_ring *rxr = &sc->rx_rings[i]; 5308 int idx = i + sc->npoll_ofs; 5309 5310 KKASSERT(idx < ncpus2); 5311 if (i == 0 && sc->rx_ring_cnt2 != sc->rx_ring_cnt) { 5312 /* 5313 * If RSS is enabled, the packets whose 5314 * masked hash are 0 are queued to the 5315 * last RX ring; piggyback the last RX 5316 * ring's processing in the first RX 5317 * polling handler. (see also: comment 5318 * in bce_setup_ring_cnt()) 5319 */ 5320 if (bootverbose) { 5321 if_printf(ifp, "npoll pack last " 5322 "RX ring on cpu%d\n", idx); 5323 } 5324 info->ifpi_rx[idx].poll_func = 5325 bce_npoll_rx_pack; 5326 } else { 5327 info->ifpi_rx[idx].poll_func = bce_npoll_rx; 5328 } 5329 info->ifpi_rx[idx].arg = rxr; 5330 info->ifpi_rx[idx].serializer = &rxr->rx_serialize; 5331 } 5332 5333 if (ifp->if_flags & IFF_RUNNING) { 5334 bce_set_timer_cpuid(sc, TRUE); 5335 bce_disable_intr(sc); 5336 bce_npoll_coal_change(sc); 5337 } 5338 } else { 5339 for (i = 0; i < sc->tx_ring_cnt; ++i) { 5340 ifsq_set_cpuid(sc->tx_rings[i].ifsq, 5341 sc->bce_msix[i].msix_cpuid); 5342 } 5343 5344 if (ifp->if_flags & IFF_RUNNING) { 5345 bce_set_timer_cpuid(sc, FALSE); 5346 bce_enable_intr(sc); 5347 5348 sc->bce_coalchg_mask |= BCE_COALMASK_TX_BDS_INT | 5349 BCE_COALMASK_RX_BDS_INT; 5350 bce_coal_change(sc); 5351 } 5352 } 5353 } 5354 5355 #endif /* IFPOLL_ENABLE */ 5356 5357 /* 5358 * Interrupt handler. 5359 */ 5360 /****************************************************************************/ 5361 /* Main interrupt entry point. Verifies that the controller generated the */ 5362 /* interrupt and then calls a separate routine for handle the various */ 5363 /* interrupt causes (PHY, TX, RX). */ 5364 /* */ 5365 /* Returns: */ 5366 /* 0 for success, positive value for failure. */ 5367 /****************************************************************************/ 5368 static void 5369 bce_intr(struct bce_softc *sc) 5370 { 5371 struct ifnet *ifp = &sc->arpcom.ac_if; 5372 struct status_block *sblk; 5373 uint16_t hw_rx_cons, hw_tx_cons; 5374 uint32_t status_attn_bits; 5375 struct bce_tx_ring *txr = &sc->tx_rings[0]; 5376 struct bce_rx_ring *rxr = &sc->rx_rings[0]; 5377 5378 ASSERT_SERIALIZED(&sc->main_serialize); 5379 5380 sblk = sc->status_block; 5381 5382 /* 5383 * Save the status block index value for use during 5384 * the next interrupt. 5385 */ 5386 rxr->last_status_idx = *rxr->hw_status_idx; 5387 5388 /* Make sure status index is extracted before RX/TX cons */ 5389 cpu_lfence(); 5390 5391 /* Check if the hardware has finished any work. */ 5392 hw_rx_cons = bce_get_hw_rx_cons(rxr); 5393 hw_tx_cons = bce_get_hw_tx_cons(txr); 5394 5395 status_attn_bits = sblk->status_attn_bits; 5396 5397 /* Was it a link change interrupt? */ 5398 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 5399 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) { 5400 bce_phy_intr(sc); 5401 5402 /* 5403 * Clear any transient status updates during link state 5404 * change. 5405 */ 5406 REG_WR(sc, BCE_HC_COMMAND, 5407 sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT); 5408 REG_RD(sc, BCE_HC_COMMAND); 5409 } 5410 5411 /* 5412 * If any other attention is asserted then 5413 * the chip is toast. 5414 */ 5415 if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) != 5416 (sblk->status_attn_bits_ack & ~STATUS_ATTN_BITS_LINK_STATE)) { 5417 if_printf(ifp, "Fatal attention detected: 0x%08X\n", 5418 sblk->status_attn_bits); 5419 bce_serialize_skipmain(sc); 5420 bce_init(sc); 5421 bce_deserialize_skipmain(sc); 5422 return; 5423 } 5424 5425 /* Check for any completed RX frames. */ 5426 lwkt_serialize_enter(&rxr->rx_serialize); 5427 if (hw_rx_cons != rxr->rx_cons) 5428 bce_rx_intr(rxr, -1, hw_rx_cons); 5429 lwkt_serialize_exit(&rxr->rx_serialize); 5430 5431 /* Check for any completed TX frames. */ 5432 lwkt_serialize_enter(&txr->tx_serialize); 5433 if (hw_tx_cons != txr->tx_cons) { 5434 bce_tx_intr(txr, hw_tx_cons); 5435 if (!ifsq_is_empty(txr->ifsq)) 5436 ifsq_devstart(txr->ifsq); 5437 } 5438 lwkt_serialize_exit(&txr->tx_serialize); 5439 } 5440 5441 static void 5442 bce_intr_legacy(void *xsc) 5443 { 5444 struct bce_softc *sc = xsc; 5445 struct bce_rx_ring *rxr = &sc->rx_rings[0]; 5446 struct status_block *sblk; 5447 5448 sblk = sc->status_block; 5449 5450 /* 5451 * If the hardware status block index matches the last value 5452 * read by the driver and we haven't asserted our interrupt 5453 * then there's nothing to do. 5454 */ 5455 if (sblk->status_idx == rxr->last_status_idx && 5456 (REG_RD(sc, BCE_PCICFG_MISC_STATUS) & 5457 BCE_PCICFG_MISC_STATUS_INTA_VALUE)) 5458 return; 5459 5460 /* Ack the interrupt and stop others from occuring. */ 5461 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 5462 BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | 5463 BCE_PCICFG_INT_ACK_CMD_MASK_INT); 5464 5465 /* 5466 * Read back to deassert IRQ immediately to avoid too 5467 * many spurious interrupts. 5468 */ 5469 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD); 5470 5471 bce_intr(sc); 5472 5473 /* Re-enable interrupts. */ 5474 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 5475 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | 5476 BCE_PCICFG_INT_ACK_CMD_MASK_INT | rxr->last_status_idx); 5477 bce_reenable_intr(rxr); 5478 } 5479 5480 static void 5481 bce_intr_msi(void *xsc) 5482 { 5483 struct bce_softc *sc = xsc; 5484 5485 /* Ack the interrupt and stop others from occuring. */ 5486 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 5487 BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | 5488 BCE_PCICFG_INT_ACK_CMD_MASK_INT); 5489 5490 bce_intr(sc); 5491 5492 /* Re-enable interrupts */ 5493 bce_reenable_intr(&sc->rx_rings[0]); 5494 } 5495 5496 static void 5497 bce_intr_msi_oneshot(void *xsc) 5498 { 5499 struct bce_softc *sc = xsc; 5500 5501 bce_intr(sc); 5502 5503 /* Re-enable interrupts */ 5504 bce_reenable_intr(&sc->rx_rings[0]); 5505 } 5506 5507 static void 5508 bce_intr_msix_rxtx(void *xrxr) 5509 { 5510 struct bce_rx_ring *rxr = xrxr; 5511 struct bce_tx_ring *txr; 5512 uint16_t hw_rx_cons, hw_tx_cons; 5513 5514 ASSERT_SERIALIZED(&rxr->rx_serialize); 5515 5516 KKASSERT(rxr->idx < rxr->sc->tx_ring_cnt); 5517 txr = &rxr->sc->tx_rings[rxr->idx]; 5518 5519 /* 5520 * Save the status block index value for use during 5521 * the next interrupt. 5522 */ 5523 rxr->last_status_idx = *rxr->hw_status_idx; 5524 5525 /* Make sure status index is extracted before RX/TX cons */ 5526 cpu_lfence(); 5527 5528 /* Check if the hardware has finished any work. */ 5529 hw_rx_cons = bce_get_hw_rx_cons(rxr); 5530 if (hw_rx_cons != rxr->rx_cons) 5531 bce_rx_intr(rxr, -1, hw_rx_cons); 5532 5533 /* Check for any completed TX frames. */ 5534 hw_tx_cons = bce_get_hw_tx_cons(txr); 5535 lwkt_serialize_enter(&txr->tx_serialize); 5536 if (hw_tx_cons != txr->tx_cons) { 5537 bce_tx_intr(txr, hw_tx_cons); 5538 if (!ifsq_is_empty(txr->ifsq)) 5539 ifsq_devstart(txr->ifsq); 5540 } 5541 lwkt_serialize_exit(&txr->tx_serialize); 5542 5543 /* Re-enable interrupts */ 5544 bce_reenable_intr(rxr); 5545 } 5546 5547 static void 5548 bce_intr_msix_rx(void *xrxr) 5549 { 5550 struct bce_rx_ring *rxr = xrxr; 5551 uint16_t hw_rx_cons; 5552 5553 ASSERT_SERIALIZED(&rxr->rx_serialize); 5554 5555 /* 5556 * Save the status block index value for use during 5557 * the next interrupt. 5558 */ 5559 rxr->last_status_idx = *rxr->hw_status_idx; 5560 5561 /* Make sure status index is extracted before RX cons */ 5562 cpu_lfence(); 5563 5564 /* Check if the hardware has finished any work. */ 5565 hw_rx_cons = bce_get_hw_rx_cons(rxr); 5566 if (hw_rx_cons != rxr->rx_cons) 5567 bce_rx_intr(rxr, -1, hw_rx_cons); 5568 5569 /* Re-enable interrupts */ 5570 bce_reenable_intr(rxr); 5571 } 5572 5573 /****************************************************************************/ 5574 /* Programs the various packet receive modes (broadcast and multicast). */ 5575 /* */ 5576 /* Returns: */ 5577 /* Nothing. */ 5578 /****************************************************************************/ 5579 static void 5580 bce_set_rx_mode(struct bce_softc *sc) 5581 { 5582 struct ifnet *ifp = &sc->arpcom.ac_if; 5583 struct ifmultiaddr *ifma; 5584 uint32_t hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 5585 uint32_t rx_mode, sort_mode; 5586 int h, i; 5587 5588 ASSERT_IFNET_SERIALIZED_ALL(ifp); 5589 5590 /* Initialize receive mode default settings. */ 5591 rx_mode = sc->rx_mode & 5592 ~(BCE_EMAC_RX_MODE_PROMISCUOUS | 5593 BCE_EMAC_RX_MODE_KEEP_VLAN_TAG); 5594 sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN; 5595 5596 /* 5597 * ASF/IPMI/UMP firmware requires that VLAN tag stripping 5598 * be enbled. 5599 */ 5600 if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) && 5601 !(sc->bce_flags & BCE_MFW_ENABLE_FLAG)) 5602 rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG; 5603 5604 /* 5605 * Check for promiscuous, all multicast, or selected 5606 * multicast address filtering. 5607 */ 5608 if (ifp->if_flags & IFF_PROMISC) { 5609 /* Enable promiscuous mode. */ 5610 rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS; 5611 sort_mode |= BCE_RPM_SORT_USER0_PROM_EN; 5612 } else if (ifp->if_flags & IFF_ALLMULTI) { 5613 /* Enable all multicast addresses. */ 5614 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { 5615 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 5616 0xffffffff); 5617 } 5618 sort_mode |= BCE_RPM_SORT_USER0_MC_EN; 5619 } else { 5620 /* Accept one or more multicast(s). */ 5621 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 5622 if (ifma->ifma_addr->sa_family != AF_LINK) 5623 continue; 5624 h = ether_crc32_le( 5625 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 5626 ETHER_ADDR_LEN) & 0xFF; 5627 hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F); 5628 } 5629 5630 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { 5631 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 5632 hashes[i]); 5633 } 5634 sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN; 5635 } 5636 5637 /* Only make changes if the recive mode has actually changed. */ 5638 if (rx_mode != sc->rx_mode) { 5639 sc->rx_mode = rx_mode; 5640 REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode); 5641 } 5642 5643 /* Disable and clear the exisitng sort before enabling a new sort. */ 5644 REG_WR(sc, BCE_RPM_SORT_USER0, 0x0); 5645 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode); 5646 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA); 5647 } 5648 5649 /****************************************************************************/ 5650 /* Called periodically to updates statistics from the controllers */ 5651 /* statistics block. */ 5652 /* */ 5653 /* Returns: */ 5654 /* Nothing. */ 5655 /****************************************************************************/ 5656 static void 5657 bce_stats_update(struct bce_softc *sc) 5658 { 5659 struct ifnet *ifp = &sc->arpcom.ac_if; 5660 struct statistics_block *stats = sc->stats_block; 5661 5662 ASSERT_SERIALIZED(&sc->main_serialize); 5663 5664 /* 5665 * Certain controllers don't report carrier sense errors correctly. 5666 * See errata E11_5708CA0_1165. 5667 */ 5668 if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) && 5669 !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0)) { 5670 IFNET_STAT_INC(ifp, oerrors, 5671 (u_long)stats->stat_Dot3StatsCarrierSenseErrors); 5672 } 5673 5674 /* 5675 * Update the sysctl statistics from the hardware statistics. 5676 */ 5677 sc->stat_IfHCInOctets = 5678 ((uint64_t)stats->stat_IfHCInOctets_hi << 32) + 5679 (uint64_t)stats->stat_IfHCInOctets_lo; 5680 5681 sc->stat_IfHCInBadOctets = 5682 ((uint64_t)stats->stat_IfHCInBadOctets_hi << 32) + 5683 (uint64_t)stats->stat_IfHCInBadOctets_lo; 5684 5685 sc->stat_IfHCOutOctets = 5686 ((uint64_t)stats->stat_IfHCOutOctets_hi << 32) + 5687 (uint64_t)stats->stat_IfHCOutOctets_lo; 5688 5689 sc->stat_IfHCOutBadOctets = 5690 ((uint64_t)stats->stat_IfHCOutBadOctets_hi << 32) + 5691 (uint64_t)stats->stat_IfHCOutBadOctets_lo; 5692 5693 sc->stat_IfHCInUcastPkts = 5694 ((uint64_t)stats->stat_IfHCInUcastPkts_hi << 32) + 5695 (uint64_t)stats->stat_IfHCInUcastPkts_lo; 5696 5697 sc->stat_IfHCInMulticastPkts = 5698 ((uint64_t)stats->stat_IfHCInMulticastPkts_hi << 32) + 5699 (uint64_t)stats->stat_IfHCInMulticastPkts_lo; 5700 5701 sc->stat_IfHCInBroadcastPkts = 5702 ((uint64_t)stats->stat_IfHCInBroadcastPkts_hi << 32) + 5703 (uint64_t)stats->stat_IfHCInBroadcastPkts_lo; 5704 5705 sc->stat_IfHCOutUcastPkts = 5706 ((uint64_t)stats->stat_IfHCOutUcastPkts_hi << 32) + 5707 (uint64_t)stats->stat_IfHCOutUcastPkts_lo; 5708 5709 sc->stat_IfHCOutMulticastPkts = 5710 ((uint64_t)stats->stat_IfHCOutMulticastPkts_hi << 32) + 5711 (uint64_t)stats->stat_IfHCOutMulticastPkts_lo; 5712 5713 sc->stat_IfHCOutBroadcastPkts = 5714 ((uint64_t)stats->stat_IfHCOutBroadcastPkts_hi << 32) + 5715 (uint64_t)stats->stat_IfHCOutBroadcastPkts_lo; 5716 5717 sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors = 5718 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors; 5719 5720 sc->stat_Dot3StatsCarrierSenseErrors = 5721 stats->stat_Dot3StatsCarrierSenseErrors; 5722 5723 sc->stat_Dot3StatsFCSErrors = 5724 stats->stat_Dot3StatsFCSErrors; 5725 5726 sc->stat_Dot3StatsAlignmentErrors = 5727 stats->stat_Dot3StatsAlignmentErrors; 5728 5729 sc->stat_Dot3StatsSingleCollisionFrames = 5730 stats->stat_Dot3StatsSingleCollisionFrames; 5731 5732 sc->stat_Dot3StatsMultipleCollisionFrames = 5733 stats->stat_Dot3StatsMultipleCollisionFrames; 5734 5735 sc->stat_Dot3StatsDeferredTransmissions = 5736 stats->stat_Dot3StatsDeferredTransmissions; 5737 5738 sc->stat_Dot3StatsExcessiveCollisions = 5739 stats->stat_Dot3StatsExcessiveCollisions; 5740 5741 sc->stat_Dot3StatsLateCollisions = 5742 stats->stat_Dot3StatsLateCollisions; 5743 5744 sc->stat_EtherStatsCollisions = 5745 stats->stat_EtherStatsCollisions; 5746 5747 sc->stat_EtherStatsFragments = 5748 stats->stat_EtherStatsFragments; 5749 5750 sc->stat_EtherStatsJabbers = 5751 stats->stat_EtherStatsJabbers; 5752 5753 sc->stat_EtherStatsUndersizePkts = 5754 stats->stat_EtherStatsUndersizePkts; 5755 5756 sc->stat_EtherStatsOverrsizePkts = 5757 stats->stat_EtherStatsOverrsizePkts; 5758 5759 sc->stat_EtherStatsPktsRx64Octets = 5760 stats->stat_EtherStatsPktsRx64Octets; 5761 5762 sc->stat_EtherStatsPktsRx65Octetsto127Octets = 5763 stats->stat_EtherStatsPktsRx65Octetsto127Octets; 5764 5765 sc->stat_EtherStatsPktsRx128Octetsto255Octets = 5766 stats->stat_EtherStatsPktsRx128Octetsto255Octets; 5767 5768 sc->stat_EtherStatsPktsRx256Octetsto511Octets = 5769 stats->stat_EtherStatsPktsRx256Octetsto511Octets; 5770 5771 sc->stat_EtherStatsPktsRx512Octetsto1023Octets = 5772 stats->stat_EtherStatsPktsRx512Octetsto1023Octets; 5773 5774 sc->stat_EtherStatsPktsRx1024Octetsto1522Octets = 5775 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets; 5776 5777 sc->stat_EtherStatsPktsRx1523Octetsto9022Octets = 5778 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets; 5779 5780 sc->stat_EtherStatsPktsTx64Octets = 5781 stats->stat_EtherStatsPktsTx64Octets; 5782 5783 sc->stat_EtherStatsPktsTx65Octetsto127Octets = 5784 stats->stat_EtherStatsPktsTx65Octetsto127Octets; 5785 5786 sc->stat_EtherStatsPktsTx128Octetsto255Octets = 5787 stats->stat_EtherStatsPktsTx128Octetsto255Octets; 5788 5789 sc->stat_EtherStatsPktsTx256Octetsto511Octets = 5790 stats->stat_EtherStatsPktsTx256Octetsto511Octets; 5791 5792 sc->stat_EtherStatsPktsTx512Octetsto1023Octets = 5793 stats->stat_EtherStatsPktsTx512Octetsto1023Octets; 5794 5795 sc->stat_EtherStatsPktsTx1024Octetsto1522Octets = 5796 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets; 5797 5798 sc->stat_EtherStatsPktsTx1523Octetsto9022Octets = 5799 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets; 5800 5801 sc->stat_XonPauseFramesReceived = 5802 stats->stat_XonPauseFramesReceived; 5803 5804 sc->stat_XoffPauseFramesReceived = 5805 stats->stat_XoffPauseFramesReceived; 5806 5807 sc->stat_OutXonSent = 5808 stats->stat_OutXonSent; 5809 5810 sc->stat_OutXoffSent = 5811 stats->stat_OutXoffSent; 5812 5813 sc->stat_FlowControlDone = 5814 stats->stat_FlowControlDone; 5815 5816 sc->stat_MacControlFramesReceived = 5817 stats->stat_MacControlFramesReceived; 5818 5819 sc->stat_XoffStateEntered = 5820 stats->stat_XoffStateEntered; 5821 5822 sc->stat_IfInFramesL2FilterDiscards = 5823 stats->stat_IfInFramesL2FilterDiscards; 5824 5825 sc->stat_IfInRuleCheckerDiscards = 5826 stats->stat_IfInRuleCheckerDiscards; 5827 5828 sc->stat_IfInFTQDiscards = 5829 stats->stat_IfInFTQDiscards; 5830 5831 sc->stat_IfInMBUFDiscards = 5832 stats->stat_IfInMBUFDiscards; 5833 5834 sc->stat_IfInRuleCheckerP4Hit = 5835 stats->stat_IfInRuleCheckerP4Hit; 5836 5837 sc->stat_CatchupInRuleCheckerDiscards = 5838 stats->stat_CatchupInRuleCheckerDiscards; 5839 5840 sc->stat_CatchupInFTQDiscards = 5841 stats->stat_CatchupInFTQDiscards; 5842 5843 sc->stat_CatchupInMBUFDiscards = 5844 stats->stat_CatchupInMBUFDiscards; 5845 5846 sc->stat_CatchupInRuleCheckerP4Hit = 5847 stats->stat_CatchupInRuleCheckerP4Hit; 5848 5849 sc->com_no_buffers = REG_RD_IND(sc, 0x120084); 5850 5851 /* 5852 * Update the interface statistics from the 5853 * hardware statistics. 5854 */ 5855 IFNET_STAT_SET(ifp, collisions, (u_long)sc->stat_EtherStatsCollisions); 5856 5857 IFNET_STAT_SET(ifp, ierrors, (u_long)sc->stat_EtherStatsUndersizePkts + 5858 (u_long)sc->stat_EtherStatsOverrsizePkts + 5859 (u_long)sc->stat_IfInMBUFDiscards + 5860 (u_long)sc->stat_Dot3StatsAlignmentErrors + 5861 (u_long)sc->stat_Dot3StatsFCSErrors + 5862 (u_long)sc->stat_IfInRuleCheckerDiscards + 5863 (u_long)sc->stat_IfInFTQDiscards + 5864 (u_long)sc->com_no_buffers); 5865 5866 IFNET_STAT_SET(ifp, oerrors, 5867 (u_long)sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors + 5868 (u_long)sc->stat_Dot3StatsExcessiveCollisions + 5869 (u_long)sc->stat_Dot3StatsLateCollisions); 5870 } 5871 5872 /****************************************************************************/ 5873 /* Periodic function to notify the bootcode that the driver is still */ 5874 /* present. */ 5875 /* */ 5876 /* Returns: */ 5877 /* Nothing. */ 5878 /****************************************************************************/ 5879 static void 5880 bce_pulse(void *xsc) 5881 { 5882 struct bce_softc *sc = xsc; 5883 struct ifnet *ifp = &sc->arpcom.ac_if; 5884 uint32_t msg; 5885 5886 lwkt_serialize_enter(&sc->main_serialize); 5887 5888 /* Tell the firmware that the driver is still running. */ 5889 msg = (uint32_t)++sc->bce_fw_drv_pulse_wr_seq; 5890 bce_shmem_wr(sc, BCE_DRV_PULSE_MB, msg); 5891 5892 /* Update the bootcode condition. */ 5893 sc->bc_state = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION); 5894 5895 /* Report whether the bootcode still knows the driver is running. */ 5896 if (!sc->bce_drv_cardiac_arrest) { 5897 if (!(sc->bc_state & BCE_CONDITION_DRV_PRESENT)) { 5898 sc->bce_drv_cardiac_arrest = 1; 5899 if_printf(ifp, "Bootcode lost the driver pulse! " 5900 "(bc_state = 0x%08X)\n", sc->bc_state); 5901 } 5902 } else { 5903 /* 5904 * Not supported by all bootcode versions. 5905 * (v5.0.11+ and v5.2.1+) Older bootcode 5906 * will require the driver to reset the 5907 * controller to clear this condition. 5908 */ 5909 if (sc->bc_state & BCE_CONDITION_DRV_PRESENT) { 5910 sc->bce_drv_cardiac_arrest = 0; 5911 if_printf(ifp, "Bootcode found the driver pulse! " 5912 "(bc_state = 0x%08X)\n", sc->bc_state); 5913 } 5914 } 5915 5916 /* Schedule the next pulse. */ 5917 callout_reset_bycpu(&sc->bce_pulse_callout, hz, bce_pulse, sc, 5918 sc->bce_timer_cpuid); 5919 5920 lwkt_serialize_exit(&sc->main_serialize); 5921 } 5922 5923 /****************************************************************************/ 5924 /* Periodic function to check whether MSI is lost */ 5925 /* */ 5926 /* Returns: */ 5927 /* Nothing. */ 5928 /****************************************************************************/ 5929 static void 5930 bce_check_msi(void *xsc) 5931 { 5932 struct bce_softc *sc = xsc; 5933 struct ifnet *ifp = &sc->arpcom.ac_if; 5934 struct status_block *sblk = sc->status_block; 5935 struct bce_tx_ring *txr = &sc->tx_rings[0]; 5936 struct bce_rx_ring *rxr = &sc->rx_rings[0]; 5937 5938 lwkt_serialize_enter(&sc->main_serialize); 5939 5940 KKASSERT(mycpuid == sc->bce_msix[0].msix_cpuid); 5941 5942 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) { 5943 lwkt_serialize_exit(&sc->main_serialize); 5944 return; 5945 } 5946 5947 if (bce_get_hw_rx_cons(rxr) != rxr->rx_cons || 5948 bce_get_hw_tx_cons(txr) != txr->tx_cons || 5949 (sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 5950 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) { 5951 if (sc->bce_check_rx_cons == rxr->rx_cons && 5952 sc->bce_check_tx_cons == txr->tx_cons && 5953 sc->bce_check_status_idx == rxr->last_status_idx) { 5954 uint32_t msi_ctrl; 5955 5956 if (!sc->bce_msi_maylose) { 5957 sc->bce_msi_maylose = TRUE; 5958 goto done; 5959 } 5960 5961 msi_ctrl = REG_RD(sc, BCE_PCICFG_MSI_CONTROL); 5962 if (msi_ctrl & BCE_PCICFG_MSI_CONTROL_ENABLE) { 5963 if (bootverbose) 5964 if_printf(ifp, "lost MSI\n"); 5965 5966 REG_WR(sc, BCE_PCICFG_MSI_CONTROL, 5967 msi_ctrl & ~BCE_PCICFG_MSI_CONTROL_ENABLE); 5968 REG_WR(sc, BCE_PCICFG_MSI_CONTROL, msi_ctrl); 5969 5970 bce_intr_msi(sc); 5971 } else if (bootverbose) { 5972 if_printf(ifp, "MSI may be lost\n"); 5973 } 5974 } 5975 } 5976 sc->bce_msi_maylose = FALSE; 5977 sc->bce_check_rx_cons = rxr->rx_cons; 5978 sc->bce_check_tx_cons = txr->tx_cons; 5979 sc->bce_check_status_idx = rxr->last_status_idx; 5980 5981 done: 5982 callout_reset(&sc->bce_ckmsi_callout, BCE_MSI_CKINTVL, 5983 bce_check_msi, sc); 5984 lwkt_serialize_exit(&sc->main_serialize); 5985 } 5986 5987 /****************************************************************************/ 5988 /* Periodic function to perform maintenance tasks. */ 5989 /* */ 5990 /* Returns: */ 5991 /* Nothing. */ 5992 /****************************************************************************/ 5993 static void 5994 bce_tick_serialized(struct bce_softc *sc) 5995 { 5996 struct mii_data *mii; 5997 5998 ASSERT_SERIALIZED(&sc->main_serialize); 5999 6000 /* Update the statistics from the hardware statistics block. */ 6001 bce_stats_update(sc); 6002 6003 /* Schedule the next tick. */ 6004 callout_reset_bycpu(&sc->bce_tick_callout, hz, bce_tick, sc, 6005 sc->bce_timer_cpuid); 6006 6007 /* If link is up already up then we're done. */ 6008 if (sc->bce_link) 6009 return; 6010 6011 mii = device_get_softc(sc->bce_miibus); 6012 mii_tick(mii); 6013 6014 /* Check if the link has come up. */ 6015 if ((mii->mii_media_status & IFM_ACTIVE) && 6016 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 6017 int i; 6018 6019 sc->bce_link++; 6020 /* Now that link is up, handle any outstanding TX traffic. */ 6021 for (i = 0; i < sc->tx_ring_cnt; ++i) 6022 ifsq_devstart_sched(sc->tx_rings[i].ifsq); 6023 } 6024 } 6025 6026 static void 6027 bce_tick(void *xsc) 6028 { 6029 struct bce_softc *sc = xsc; 6030 6031 lwkt_serialize_enter(&sc->main_serialize); 6032 bce_tick_serialized(sc); 6033 lwkt_serialize_exit(&sc->main_serialize); 6034 } 6035 6036 /****************************************************************************/ 6037 /* Adds any sysctl parameters for tuning or debugging purposes. */ 6038 /* */ 6039 /* Returns: */ 6040 /* 0 for success, positive value for failure. */ 6041 /****************************************************************************/ 6042 static void 6043 bce_add_sysctls(struct bce_softc *sc) 6044 { 6045 struct sysctl_ctx_list *ctx; 6046 struct sysctl_oid_list *children; 6047 #if defined(BCE_TSS_DEBUG) || defined(BCE_RSS_DEBUG) 6048 char node[32]; 6049 int i; 6050 #endif 6051 6052 sysctl_ctx_init(&sc->bce_sysctl_ctx); 6053 sc->bce_sysctl_tree = SYSCTL_ADD_NODE(&sc->bce_sysctl_ctx, 6054 SYSCTL_STATIC_CHILDREN(_hw), 6055 OID_AUTO, 6056 device_get_nameunit(sc->bce_dev), 6057 CTLFLAG_RD, 0, ""); 6058 if (sc->bce_sysctl_tree == NULL) { 6059 device_printf(sc->bce_dev, "can't add sysctl node\n"); 6060 return; 6061 } 6062 6063 ctx = &sc->bce_sysctl_ctx; 6064 children = SYSCTL_CHILDREN(sc->bce_sysctl_tree); 6065 6066 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds_int", 6067 CTLTYPE_INT | CTLFLAG_RW, 6068 sc, 0, bce_sysctl_tx_bds_int, "I", 6069 "Send max coalesced BD count during interrupt"); 6070 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds", 6071 CTLTYPE_INT | CTLFLAG_RW, 6072 sc, 0, bce_sysctl_tx_bds, "I", 6073 "Send max coalesced BD count"); 6074 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks_int", 6075 CTLTYPE_INT | CTLFLAG_RW, 6076 sc, 0, bce_sysctl_tx_ticks_int, "I", 6077 "Send coalescing ticks during interrupt"); 6078 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks", 6079 CTLTYPE_INT | CTLFLAG_RW, 6080 sc, 0, bce_sysctl_tx_ticks, "I", 6081 "Send coalescing ticks"); 6082 6083 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds_int", 6084 CTLTYPE_INT | CTLFLAG_RW, 6085 sc, 0, bce_sysctl_rx_bds_int, "I", 6086 "Receive max coalesced BD count during interrupt"); 6087 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds", 6088 CTLTYPE_INT | CTLFLAG_RW, 6089 sc, 0, bce_sysctl_rx_bds, "I", 6090 "Receive max coalesced BD count"); 6091 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks_int", 6092 CTLTYPE_INT | CTLFLAG_RW, 6093 sc, 0, bce_sysctl_rx_ticks_int, "I", 6094 "Receive coalescing ticks during interrupt"); 6095 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks", 6096 CTLTYPE_INT | CTLFLAG_RW, 6097 sc, 0, bce_sysctl_rx_ticks, "I", 6098 "Receive coalescing ticks"); 6099 6100 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_rings", 6101 CTLFLAG_RD, &sc->rx_ring_cnt, 0, "# of RX rings"); 6102 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_pages", 6103 CTLFLAG_RD, &sc->rx_rings[0].rx_pages, 0, "# of RX pages"); 6104 6105 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_rings", 6106 CTLFLAG_RD, &sc->tx_ring_cnt, 0, "# of TX rings"); 6107 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_pages", 6108 CTLFLAG_RD, &sc->tx_rings[0].tx_pages, 0, "# of TX pages"); 6109 6110 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_wreg", 6111 CTLFLAG_RW, &sc->tx_rings[0].tx_wreg, 0, 6112 "# segments before write to hardware registers"); 6113 6114 #ifdef IFPOLL_ENABLE 6115 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "npoll_offset", 6116 CTLTYPE_INT|CTLFLAG_RW, sc, 0, bce_sysctl_npoll_offset, 6117 "I", "NPOLLING cpu offset"); 6118 #endif 6119 6120 #ifdef BCE_RSS_DEBUG 6121 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rss_debug", 6122 CTLFLAG_RW, &sc->rss_debug, 0, "RSS debug level"); 6123 for (i = 0; i < sc->rx_ring_cnt; ++i) { 6124 ksnprintf(node, sizeof(node), "rx%d_pkt", i); 6125 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, node, 6126 CTLFLAG_RW, &sc->rx_rings[i].rx_pkts, 6127 "RXed packets"); 6128 } 6129 #endif 6130 6131 #ifdef BCE_TSS_DEBUG 6132 for (i = 0; i < sc->tx_ring_cnt; ++i) { 6133 ksnprintf(node, sizeof(node), "tx%d_pkt", i); 6134 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, node, 6135 CTLFLAG_RW, &sc->tx_rings[i].tx_pkts, 6136 "TXed packets"); 6137 } 6138 #endif 6139 6140 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6141 "stat_IfHCInOctets", 6142 CTLFLAG_RD, &sc->stat_IfHCInOctets, 6143 "Bytes received"); 6144 6145 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6146 "stat_IfHCInBadOctets", 6147 CTLFLAG_RD, &sc->stat_IfHCInBadOctets, 6148 "Bad bytes received"); 6149 6150 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6151 "stat_IfHCOutOctets", 6152 CTLFLAG_RD, &sc->stat_IfHCOutOctets, 6153 "Bytes sent"); 6154 6155 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6156 "stat_IfHCOutBadOctets", 6157 CTLFLAG_RD, &sc->stat_IfHCOutBadOctets, 6158 "Bad bytes sent"); 6159 6160 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6161 "stat_IfHCInUcastPkts", 6162 CTLFLAG_RD, &sc->stat_IfHCInUcastPkts, 6163 "Unicast packets received"); 6164 6165 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6166 "stat_IfHCInMulticastPkts", 6167 CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts, 6168 "Multicast packets received"); 6169 6170 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6171 "stat_IfHCInBroadcastPkts", 6172 CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts, 6173 "Broadcast packets received"); 6174 6175 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6176 "stat_IfHCOutUcastPkts", 6177 CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts, 6178 "Unicast packets sent"); 6179 6180 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6181 "stat_IfHCOutMulticastPkts", 6182 CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts, 6183 "Multicast packets sent"); 6184 6185 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6186 "stat_IfHCOutBroadcastPkts", 6187 CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts, 6188 "Broadcast packets sent"); 6189 6190 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6191 "stat_emac_tx_stat_dot3statsinternalmactransmiterrors", 6192 CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors, 6193 0, "Internal MAC transmit errors"); 6194 6195 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6196 "stat_Dot3StatsCarrierSenseErrors", 6197 CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors, 6198 0, "Carrier sense errors"); 6199 6200 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6201 "stat_Dot3StatsFCSErrors", 6202 CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors, 6203 0, "Frame check sequence errors"); 6204 6205 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6206 "stat_Dot3StatsAlignmentErrors", 6207 CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors, 6208 0, "Alignment errors"); 6209 6210 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6211 "stat_Dot3StatsSingleCollisionFrames", 6212 CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames, 6213 0, "Single Collision Frames"); 6214 6215 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6216 "stat_Dot3StatsMultipleCollisionFrames", 6217 CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames, 6218 0, "Multiple Collision Frames"); 6219 6220 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6221 "stat_Dot3StatsDeferredTransmissions", 6222 CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions, 6223 0, "Deferred Transmissions"); 6224 6225 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6226 "stat_Dot3StatsExcessiveCollisions", 6227 CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions, 6228 0, "Excessive Collisions"); 6229 6230 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6231 "stat_Dot3StatsLateCollisions", 6232 CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions, 6233 0, "Late Collisions"); 6234 6235 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6236 "stat_EtherStatsCollisions", 6237 CTLFLAG_RD, &sc->stat_EtherStatsCollisions, 6238 0, "Collisions"); 6239 6240 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6241 "stat_EtherStatsFragments", 6242 CTLFLAG_RD, &sc->stat_EtherStatsFragments, 6243 0, "Fragments"); 6244 6245 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6246 "stat_EtherStatsJabbers", 6247 CTLFLAG_RD, &sc->stat_EtherStatsJabbers, 6248 0, "Jabbers"); 6249 6250 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6251 "stat_EtherStatsUndersizePkts", 6252 CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts, 6253 0, "Undersize packets"); 6254 6255 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6256 "stat_EtherStatsOverrsizePkts", 6257 CTLFLAG_RD, &sc->stat_EtherStatsOverrsizePkts, 6258 0, "stat_EtherStatsOverrsizePkts"); 6259 6260 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6261 "stat_EtherStatsPktsRx64Octets", 6262 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets, 6263 0, "Bytes received in 64 byte packets"); 6264 6265 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6266 "stat_EtherStatsPktsRx65Octetsto127Octets", 6267 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets, 6268 0, "Bytes received in 65 to 127 byte packets"); 6269 6270 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6271 "stat_EtherStatsPktsRx128Octetsto255Octets", 6272 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets, 6273 0, "Bytes received in 128 to 255 byte packets"); 6274 6275 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6276 "stat_EtherStatsPktsRx256Octetsto511Octets", 6277 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets, 6278 0, "Bytes received in 256 to 511 byte packets"); 6279 6280 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6281 "stat_EtherStatsPktsRx512Octetsto1023Octets", 6282 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets, 6283 0, "Bytes received in 512 to 1023 byte packets"); 6284 6285 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6286 "stat_EtherStatsPktsRx1024Octetsto1522Octets", 6287 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets, 6288 0, "Bytes received in 1024 t0 1522 byte packets"); 6289 6290 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6291 "stat_EtherStatsPktsRx1523Octetsto9022Octets", 6292 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets, 6293 0, "Bytes received in 1523 to 9022 byte packets"); 6294 6295 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6296 "stat_EtherStatsPktsTx64Octets", 6297 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets, 6298 0, "Bytes sent in 64 byte packets"); 6299 6300 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6301 "stat_EtherStatsPktsTx65Octetsto127Octets", 6302 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets, 6303 0, "Bytes sent in 65 to 127 byte packets"); 6304 6305 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6306 "stat_EtherStatsPktsTx128Octetsto255Octets", 6307 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets, 6308 0, "Bytes sent in 128 to 255 byte packets"); 6309 6310 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6311 "stat_EtherStatsPktsTx256Octetsto511Octets", 6312 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets, 6313 0, "Bytes sent in 256 to 511 byte packets"); 6314 6315 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6316 "stat_EtherStatsPktsTx512Octetsto1023Octets", 6317 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets, 6318 0, "Bytes sent in 512 to 1023 byte packets"); 6319 6320 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6321 "stat_EtherStatsPktsTx1024Octetsto1522Octets", 6322 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets, 6323 0, "Bytes sent in 1024 to 1522 byte packets"); 6324 6325 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6326 "stat_EtherStatsPktsTx1523Octetsto9022Octets", 6327 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets, 6328 0, "Bytes sent in 1523 to 9022 byte packets"); 6329 6330 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6331 "stat_XonPauseFramesReceived", 6332 CTLFLAG_RD, &sc->stat_XonPauseFramesReceived, 6333 0, "XON pause frames receved"); 6334 6335 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6336 "stat_XoffPauseFramesReceived", 6337 CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived, 6338 0, "XOFF pause frames received"); 6339 6340 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6341 "stat_OutXonSent", 6342 CTLFLAG_RD, &sc->stat_OutXonSent, 6343 0, "XON pause frames sent"); 6344 6345 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6346 "stat_OutXoffSent", 6347 CTLFLAG_RD, &sc->stat_OutXoffSent, 6348 0, "XOFF pause frames sent"); 6349 6350 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6351 "stat_FlowControlDone", 6352 CTLFLAG_RD, &sc->stat_FlowControlDone, 6353 0, "Flow control done"); 6354 6355 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6356 "stat_MacControlFramesReceived", 6357 CTLFLAG_RD, &sc->stat_MacControlFramesReceived, 6358 0, "MAC control frames received"); 6359 6360 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6361 "stat_XoffStateEntered", 6362 CTLFLAG_RD, &sc->stat_XoffStateEntered, 6363 0, "XOFF state entered"); 6364 6365 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6366 "stat_IfInFramesL2FilterDiscards", 6367 CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards, 6368 0, "Received L2 packets discarded"); 6369 6370 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6371 "stat_IfInRuleCheckerDiscards", 6372 CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards, 6373 0, "Received packets discarded by rule"); 6374 6375 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6376 "stat_IfInFTQDiscards", 6377 CTLFLAG_RD, &sc->stat_IfInFTQDiscards, 6378 0, "Received packet FTQ discards"); 6379 6380 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6381 "stat_IfInMBUFDiscards", 6382 CTLFLAG_RD, &sc->stat_IfInMBUFDiscards, 6383 0, "Received packets discarded due to lack of controller buffer memory"); 6384 6385 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6386 "stat_IfInRuleCheckerP4Hit", 6387 CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit, 6388 0, "Received packets rule checker hits"); 6389 6390 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6391 "stat_CatchupInRuleCheckerDiscards", 6392 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards, 6393 0, "Received packets discarded in Catchup path"); 6394 6395 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6396 "stat_CatchupInFTQDiscards", 6397 CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards, 6398 0, "Received packets discarded in FTQ in Catchup path"); 6399 6400 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6401 "stat_CatchupInMBUFDiscards", 6402 CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards, 6403 0, "Received packets discarded in controller buffer memory in Catchup path"); 6404 6405 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6406 "stat_CatchupInRuleCheckerP4Hit", 6407 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit, 6408 0, "Received packets rule checker hits in Catchup path"); 6409 6410 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6411 "com_no_buffers", 6412 CTLFLAG_RD, &sc->com_no_buffers, 6413 0, "Valid packets received but no RX buffers available"); 6414 } 6415 6416 static int 6417 bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS) 6418 { 6419 struct bce_softc *sc = arg1; 6420 6421 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 6422 &sc->bce_tx_quick_cons_trip_int, 6423 BCE_COALMASK_TX_BDS_INT); 6424 } 6425 6426 static int 6427 bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS) 6428 { 6429 struct bce_softc *sc = arg1; 6430 6431 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 6432 &sc->bce_tx_quick_cons_trip, 6433 BCE_COALMASK_TX_BDS); 6434 } 6435 6436 static int 6437 bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS) 6438 { 6439 struct bce_softc *sc = arg1; 6440 6441 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 6442 &sc->bce_tx_ticks_int, 6443 BCE_COALMASK_TX_TICKS_INT); 6444 } 6445 6446 static int 6447 bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS) 6448 { 6449 struct bce_softc *sc = arg1; 6450 6451 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 6452 &sc->bce_tx_ticks, 6453 BCE_COALMASK_TX_TICKS); 6454 } 6455 6456 static int 6457 bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS) 6458 { 6459 struct bce_softc *sc = arg1; 6460 6461 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 6462 &sc->bce_rx_quick_cons_trip_int, 6463 BCE_COALMASK_RX_BDS_INT); 6464 } 6465 6466 static int 6467 bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS) 6468 { 6469 struct bce_softc *sc = arg1; 6470 6471 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 6472 &sc->bce_rx_quick_cons_trip, 6473 BCE_COALMASK_RX_BDS); 6474 } 6475 6476 static int 6477 bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS) 6478 { 6479 struct bce_softc *sc = arg1; 6480 6481 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 6482 &sc->bce_rx_ticks_int, 6483 BCE_COALMASK_RX_TICKS_INT); 6484 } 6485 6486 static int 6487 bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS) 6488 { 6489 struct bce_softc *sc = arg1; 6490 6491 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 6492 &sc->bce_rx_ticks, 6493 BCE_COALMASK_RX_TICKS); 6494 } 6495 6496 static int 6497 bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS, uint32_t *coal, 6498 uint32_t coalchg_mask) 6499 { 6500 struct bce_softc *sc = arg1; 6501 struct ifnet *ifp = &sc->arpcom.ac_if; 6502 int error = 0, v; 6503 6504 ifnet_serialize_all(ifp); 6505 6506 v = *coal; 6507 error = sysctl_handle_int(oidp, &v, 0, req); 6508 if (!error && req->newptr != NULL) { 6509 if (v < 0) { 6510 error = EINVAL; 6511 } else { 6512 *coal = v; 6513 sc->bce_coalchg_mask |= coalchg_mask; 6514 6515 /* Commit changes */ 6516 bce_coal_change(sc); 6517 } 6518 } 6519 6520 ifnet_deserialize_all(ifp); 6521 return error; 6522 } 6523 6524 static void 6525 bce_coal_change(struct bce_softc *sc) 6526 { 6527 struct ifnet *ifp = &sc->arpcom.ac_if; 6528 int i; 6529 6530 ASSERT_SERIALIZED(&sc->main_serialize); 6531 6532 if ((ifp->if_flags & IFF_RUNNING) == 0) { 6533 sc->bce_coalchg_mask = 0; 6534 return; 6535 } 6536 6537 if (sc->bce_coalchg_mask & 6538 (BCE_COALMASK_TX_BDS | BCE_COALMASK_TX_BDS_INT)) { 6539 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP, 6540 (sc->bce_tx_quick_cons_trip_int << 16) | 6541 sc->bce_tx_quick_cons_trip); 6542 for (i = 1; i < sc->rx_ring_cnt; ++i) { 6543 uint32_t base; 6544 6545 base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) + 6546 BCE_HC_SB_CONFIG_1; 6547 REG_WR(sc, base + BCE_HC_TX_QUICK_CONS_TRIP_OFF, 6548 (sc->bce_tx_quick_cons_trip_int << 16) | 6549 sc->bce_tx_quick_cons_trip); 6550 } 6551 if (bootverbose) { 6552 if_printf(ifp, "tx_bds %u, tx_bds_int %u\n", 6553 sc->bce_tx_quick_cons_trip, 6554 sc->bce_tx_quick_cons_trip_int); 6555 } 6556 } 6557 6558 if (sc->bce_coalchg_mask & 6559 (BCE_COALMASK_TX_TICKS | BCE_COALMASK_TX_TICKS_INT)) { 6560 REG_WR(sc, BCE_HC_TX_TICKS, 6561 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks); 6562 for (i = 1; i < sc->rx_ring_cnt; ++i) { 6563 uint32_t base; 6564 6565 base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) + 6566 BCE_HC_SB_CONFIG_1; 6567 REG_WR(sc, base + BCE_HC_TX_TICKS_OFF, 6568 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks); 6569 } 6570 if (bootverbose) { 6571 if_printf(ifp, "tx_ticks %u, tx_ticks_int %u\n", 6572 sc->bce_tx_ticks, sc->bce_tx_ticks_int); 6573 } 6574 } 6575 6576 if (sc->bce_coalchg_mask & 6577 (BCE_COALMASK_RX_BDS | BCE_COALMASK_RX_BDS_INT)) { 6578 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP, 6579 (sc->bce_rx_quick_cons_trip_int << 16) | 6580 sc->bce_rx_quick_cons_trip); 6581 for (i = 1; i < sc->rx_ring_cnt; ++i) { 6582 uint32_t base; 6583 6584 base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) + 6585 BCE_HC_SB_CONFIG_1; 6586 REG_WR(sc, base + BCE_HC_RX_QUICK_CONS_TRIP_OFF, 6587 (sc->bce_rx_quick_cons_trip_int << 16) | 6588 sc->bce_rx_quick_cons_trip); 6589 } 6590 if (bootverbose) { 6591 if_printf(ifp, "rx_bds %u, rx_bds_int %u\n", 6592 sc->bce_rx_quick_cons_trip, 6593 sc->bce_rx_quick_cons_trip_int); 6594 } 6595 } 6596 6597 if (sc->bce_coalchg_mask & 6598 (BCE_COALMASK_RX_TICKS | BCE_COALMASK_RX_TICKS_INT)) { 6599 REG_WR(sc, BCE_HC_RX_TICKS, 6600 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks); 6601 for (i = 1; i < sc->rx_ring_cnt; ++i) { 6602 uint32_t base; 6603 6604 base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) + 6605 BCE_HC_SB_CONFIG_1; 6606 REG_WR(sc, base + BCE_HC_RX_TICKS_OFF, 6607 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks); 6608 } 6609 if (bootverbose) { 6610 if_printf(ifp, "rx_ticks %u, rx_ticks_int %u\n", 6611 sc->bce_rx_ticks, sc->bce_rx_ticks_int); 6612 } 6613 } 6614 6615 sc->bce_coalchg_mask = 0; 6616 } 6617 6618 static int 6619 bce_tso_setup(struct bce_tx_ring *txr, struct mbuf **mp, 6620 uint16_t *flags0, uint16_t *mss0) 6621 { 6622 struct mbuf *m; 6623 uint16_t flags; 6624 int thoff, iphlen, hoff; 6625 6626 m = *mp; 6627 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 6628 6629 hoff = m->m_pkthdr.csum_lhlen; 6630 iphlen = m->m_pkthdr.csum_iphlen; 6631 thoff = m->m_pkthdr.csum_thlen; 6632 6633 KASSERT(hoff >= sizeof(struct ether_header), 6634 ("invalid ether header len %d", hoff)); 6635 KASSERT(iphlen >= sizeof(struct ip), 6636 ("invalid ip header len %d", iphlen)); 6637 KASSERT(thoff >= sizeof(struct tcphdr), 6638 ("invalid tcp header len %d", thoff)); 6639 6640 if (__predict_false(m->m_len < hoff + iphlen + thoff)) { 6641 m = m_pullup(m, hoff + iphlen + thoff); 6642 if (m == NULL) { 6643 *mp = NULL; 6644 return ENOBUFS; 6645 } 6646 *mp = m; 6647 } 6648 6649 /* Set the LSO flag in the TX BD */ 6650 flags = TX_BD_FLAGS_SW_LSO; 6651 6652 /* Set the length of IP + TCP options (in 32 bit words) */ 6653 flags |= (((iphlen + thoff - 6654 sizeof(struct ip) - sizeof(struct tcphdr)) >> 2) << 8); 6655 6656 *mss0 = htole16(m->m_pkthdr.tso_segsz); 6657 *flags0 = flags; 6658 6659 return 0; 6660 } 6661 6662 static void 6663 bce_setup_serialize(struct bce_softc *sc) 6664 { 6665 int i, j; 6666 6667 /* 6668 * Allocate serializer array 6669 */ 6670 6671 /* Main + TX + RX */ 6672 sc->serialize_cnt = 1 + sc->tx_ring_cnt + sc->rx_ring_cnt; 6673 6674 sc->serializes = 6675 kmalloc(sc->serialize_cnt * sizeof(struct lwkt_serialize *), 6676 M_DEVBUF, M_WAITOK | M_ZERO); 6677 6678 /* 6679 * Setup serializers 6680 * 6681 * NOTE: Order is critical 6682 */ 6683 6684 i = 0; 6685 6686 KKASSERT(i < sc->serialize_cnt); 6687 sc->serializes[i++] = &sc->main_serialize; 6688 6689 for (j = 0; j < sc->rx_ring_cnt; ++j) { 6690 KKASSERT(i < sc->serialize_cnt); 6691 sc->serializes[i++] = &sc->rx_rings[j].rx_serialize; 6692 } 6693 6694 for (j = 0; j < sc->tx_ring_cnt; ++j) { 6695 KKASSERT(i < sc->serialize_cnt); 6696 sc->serializes[i++] = &sc->tx_rings[j].tx_serialize; 6697 } 6698 6699 KKASSERT(i == sc->serialize_cnt); 6700 } 6701 6702 static void 6703 bce_serialize(struct ifnet *ifp, enum ifnet_serialize slz) 6704 { 6705 struct bce_softc *sc = ifp->if_softc; 6706 6707 ifnet_serialize_array_enter(sc->serializes, sc->serialize_cnt, slz); 6708 } 6709 6710 static void 6711 bce_deserialize(struct ifnet *ifp, enum ifnet_serialize slz) 6712 { 6713 struct bce_softc *sc = ifp->if_softc; 6714 6715 ifnet_serialize_array_exit(sc->serializes, sc->serialize_cnt, slz); 6716 } 6717 6718 static int 6719 bce_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz) 6720 { 6721 struct bce_softc *sc = ifp->if_softc; 6722 6723 return ifnet_serialize_array_try(sc->serializes, sc->serialize_cnt, 6724 slz); 6725 } 6726 6727 #ifdef INVARIANTS 6728 6729 static void 6730 bce_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz, 6731 boolean_t serialized) 6732 { 6733 struct bce_softc *sc = ifp->if_softc; 6734 6735 ifnet_serialize_array_assert(sc->serializes, sc->serialize_cnt, 6736 slz, serialized); 6737 } 6738 6739 #endif /* INVARIANTS */ 6740 6741 static void 6742 bce_serialize_skipmain(struct bce_softc *sc) 6743 { 6744 lwkt_serialize_array_enter(sc->serializes, sc->serialize_cnt, 1); 6745 } 6746 6747 static void 6748 bce_deserialize_skipmain(struct bce_softc *sc) 6749 { 6750 lwkt_serialize_array_exit(sc->serializes, sc->serialize_cnt, 1); 6751 } 6752 6753 #ifdef IFPOLL_ENABLE 6754 6755 static int 6756 bce_sysctl_npoll_offset(SYSCTL_HANDLER_ARGS) 6757 { 6758 struct bce_softc *sc = (void *)arg1; 6759 struct ifnet *ifp = &sc->arpcom.ac_if; 6760 int error, off; 6761 6762 off = sc->npoll_ofs; 6763 error = sysctl_handle_int(oidp, &off, 0, req); 6764 if (error || req->newptr == NULL) 6765 return error; 6766 if (off < 0) 6767 return EINVAL; 6768 6769 ifnet_serialize_all(ifp); 6770 if (off >= ncpus2 || off % sc->rx_ring_cnt2 != 0) { 6771 error = EINVAL; 6772 } else { 6773 error = 0; 6774 sc->npoll_ofs = off; 6775 } 6776 ifnet_deserialize_all(ifp); 6777 6778 return error; 6779 } 6780 6781 #endif /* IFPOLL_ENABLE */ 6782 6783 static void 6784 bce_set_timer_cpuid(struct bce_softc *sc, boolean_t polling) 6785 { 6786 if (polling) 6787 sc->bce_timer_cpuid = 0; /* XXX */ 6788 else 6789 sc->bce_timer_cpuid = sc->bce_msix[0].msix_cpuid; 6790 } 6791 6792 static int 6793 bce_alloc_intr(struct bce_softc *sc) 6794 { 6795 u_int irq_flags; 6796 6797 bce_try_alloc_msix(sc); 6798 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX) 6799 return 0; 6800 6801 sc->bce_irq_type = pci_alloc_1intr(sc->bce_dev, bce_msi_enable, 6802 &sc->bce_irq_rid, &irq_flags); 6803 6804 sc->bce_res_irq = bus_alloc_resource_any(sc->bce_dev, SYS_RES_IRQ, 6805 &sc->bce_irq_rid, irq_flags); 6806 if (sc->bce_res_irq == NULL) { 6807 device_printf(sc->bce_dev, "PCI map interrupt failed\n"); 6808 return ENXIO; 6809 } 6810 sc->bce_msix[0].msix_cpuid = rman_get_cpuid(sc->bce_res_irq); 6811 sc->bce_msix[0].msix_serialize = &sc->main_serialize; 6812 6813 return 0; 6814 } 6815 6816 static void 6817 bce_try_alloc_msix(struct bce_softc *sc) 6818 { 6819 struct bce_msix_data *msix; 6820 int offset, i, error; 6821 boolean_t setup = FALSE; 6822 6823 if (sc->rx_ring_cnt == 1) 6824 return; 6825 6826 if (sc->rx_ring_cnt2 == ncpus2) { 6827 offset = 0; 6828 } else { 6829 int offset_def = 6830 (sc->rx_ring_cnt2 * device_get_unit(sc->bce_dev)) % ncpus2; 6831 6832 offset = device_getenv_int(sc->bce_dev, 6833 "msix.offset", offset_def); 6834 if (offset >= ncpus2 || offset % sc->rx_ring_cnt2 != 0) { 6835 device_printf(sc->bce_dev, 6836 "invalid msix.offset %d, use %d\n", 6837 offset, offset_def); 6838 offset = offset_def; 6839 } 6840 } 6841 6842 msix = &sc->bce_msix[0]; 6843 msix->msix_serialize = &sc->main_serialize; 6844 msix->msix_func = bce_intr_msi_oneshot; 6845 msix->msix_arg = sc; 6846 KKASSERT(offset < ncpus2); 6847 msix->msix_cpuid = offset; 6848 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc), "%s combo", 6849 device_get_nameunit(sc->bce_dev)); 6850 6851 for (i = 1; i < sc->rx_ring_cnt; ++i) { 6852 struct bce_rx_ring *rxr = &sc->rx_rings[i]; 6853 6854 msix = &sc->bce_msix[i]; 6855 6856 msix->msix_serialize = &rxr->rx_serialize; 6857 msix->msix_arg = rxr; 6858 msix->msix_cpuid = offset + (i % sc->rx_ring_cnt2); 6859 KKASSERT(msix->msix_cpuid < ncpus2); 6860 6861 if (i < sc->tx_ring_cnt) { 6862 msix->msix_func = bce_intr_msix_rxtx; 6863 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc), 6864 "%s rxtx%d", device_get_nameunit(sc->bce_dev), i); 6865 } else { 6866 msix->msix_func = bce_intr_msix_rx; 6867 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc), 6868 "%s rx%d", device_get_nameunit(sc->bce_dev), i); 6869 } 6870 } 6871 6872 /* 6873 * Setup MSI-X table 6874 */ 6875 bce_setup_msix_table(sc); 6876 REG_WR(sc, BCE_PCI_MSIX_CONTROL, BCE_MSIX_MAX - 1); 6877 REG_WR(sc, BCE_PCI_MSIX_TBL_OFF_BIR, BCE_PCI_GRC_WINDOW2_BASE); 6878 REG_WR(sc, BCE_PCI_MSIX_PBA_OFF_BIT, BCE_PCI_GRC_WINDOW3_BASE); 6879 /* Flush */ 6880 REG_RD(sc, BCE_PCI_MSIX_CONTROL); 6881 6882 error = pci_setup_msix(sc->bce_dev); 6883 if (error) { 6884 device_printf(sc->bce_dev, "Setup MSI-X failed\n"); 6885 goto back; 6886 } 6887 setup = TRUE; 6888 6889 for (i = 0; i < sc->rx_ring_cnt; ++i) { 6890 msix = &sc->bce_msix[i]; 6891 6892 error = pci_alloc_msix_vector(sc->bce_dev, i, &msix->msix_rid, 6893 msix->msix_cpuid); 6894 if (error) { 6895 device_printf(sc->bce_dev, 6896 "Unable to allocate MSI-X %d on cpu%d\n", 6897 i, msix->msix_cpuid); 6898 goto back; 6899 } 6900 6901 msix->msix_res = bus_alloc_resource_any(sc->bce_dev, 6902 SYS_RES_IRQ, &msix->msix_rid, RF_ACTIVE); 6903 if (msix->msix_res == NULL) { 6904 device_printf(sc->bce_dev, 6905 "Unable to allocate MSI-X %d resource\n", i); 6906 error = ENOMEM; 6907 goto back; 6908 } 6909 } 6910 6911 pci_enable_msix(sc->bce_dev); 6912 sc->bce_irq_type = PCI_INTR_TYPE_MSIX; 6913 back: 6914 if (error) 6915 bce_free_msix(sc, setup); 6916 } 6917 6918 static void 6919 bce_setup_ring_cnt(struct bce_softc *sc) 6920 { 6921 int msix_enable, ring_max, msix_cnt2, msix_cnt, i; 6922 6923 sc->rx_ring_cnt = 1; 6924 sc->rx_ring_cnt2 = 1; 6925 sc->tx_ring_cnt = 1; 6926 6927 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5709 && 6928 BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5716) 6929 return; 6930 6931 msix_enable = device_getenv_int(sc->bce_dev, "msix.enable", 6932 bce_msix_enable); 6933 if (!msix_enable) 6934 return; 6935 6936 if (ncpus2 == 1) 6937 return; 6938 6939 msix_cnt = pci_msix_count(sc->bce_dev); 6940 if (msix_cnt <= 1) 6941 return; 6942 6943 i = 0; 6944 while ((1 << (i + 1)) <= msix_cnt) 6945 ++i; 6946 msix_cnt2 = 1 << i; 6947 6948 /* 6949 * One extra RX ring will be needed (see below), so make sure 6950 * that there are enough MSI-X vectors. 6951 */ 6952 if (msix_cnt == msix_cnt2) { 6953 /* 6954 * XXX 6955 * This probably will not happen; 5709/5716 6956 * come with 9 MSI-X vectors. 6957 */ 6958 msix_cnt2 >>= 1; 6959 if (msix_cnt2 <= 1) { 6960 device_printf(sc->bce_dev, 6961 "MSI-X count %d could not be used\n", msix_cnt); 6962 return; 6963 } 6964 device_printf(sc->bce_dev, "MSI-X count %d is power of 2\n", 6965 msix_cnt); 6966 } 6967 6968 /* 6969 * Setup RX ring count 6970 */ 6971 ring_max = BCE_RX_RING_MAX; 6972 if (ring_max > msix_cnt2) 6973 ring_max = msix_cnt2; 6974 sc->rx_ring_cnt2 = device_getenv_int(sc->bce_dev, "rx_rings", 6975 bce_rx_rings); 6976 sc->rx_ring_cnt2 = if_ring_count2(sc->rx_ring_cnt2, ring_max); 6977 6978 /* 6979 * Don't use MSI-X, if the effective RX ring count is 1. 6980 * Since if the effective RX ring count is 1, the TX ring 6981 * count will be 1. This RX ring and the TX ring must be 6982 * bundled into one MSI-X vector, so the hot path will be 6983 * exact same as using MSI. Besides, the first RX ring 6984 * must be fully populated, which only accepts packets whose 6985 * RSS hash can't calculated, e.g. ARP packets; waste of 6986 * resource at least. 6987 */ 6988 if (sc->rx_ring_cnt2 == 1) 6989 return; 6990 6991 /* 6992 * One extra RX ring is allocated, since the first RX ring 6993 * could not be used for RSS hashed packets whose masked 6994 * hash is 0. The first RX ring is only used for packets 6995 * whose RSS hash could not be calculated, e.g. ARP packets. 6996 * This extra RX ring will be used for packets whose masked 6997 * hash is 0. The effective RX ring count involved in RSS 6998 * is still sc->rx_ring_cnt2. 6999 */ 7000 KKASSERT(sc->rx_ring_cnt2 + 1 <= msix_cnt); 7001 sc->rx_ring_cnt = sc->rx_ring_cnt2 + 1; 7002 7003 /* 7004 * Setup TX ring count 7005 * 7006 * NOTE: 7007 * TX ring count must be less than the effective RSS RX ring 7008 * count, since we use RX ring software data struct to save 7009 * status index and various other MSI-X related stuffs. 7010 */ 7011 ring_max = BCE_TX_RING_MAX; 7012 if (ring_max > msix_cnt2) 7013 ring_max = msix_cnt2; 7014 if (ring_max > sc->rx_ring_cnt2) 7015 ring_max = sc->rx_ring_cnt2; 7016 sc->tx_ring_cnt = device_getenv_int(sc->bce_dev, "tx_rings", 7017 bce_tx_rings); 7018 sc->tx_ring_cnt = if_ring_count2(sc->tx_ring_cnt, ring_max); 7019 } 7020 7021 static void 7022 bce_free_msix(struct bce_softc *sc, boolean_t setup) 7023 { 7024 int i; 7025 7026 KKASSERT(sc->rx_ring_cnt > 1); 7027 7028 for (i = 0; i < sc->rx_ring_cnt; ++i) { 7029 struct bce_msix_data *msix = &sc->bce_msix[i]; 7030 7031 if (msix->msix_res != NULL) { 7032 bus_release_resource(sc->bce_dev, SYS_RES_IRQ, 7033 msix->msix_rid, msix->msix_res); 7034 } 7035 if (msix->msix_rid >= 0) 7036 pci_release_msix_vector(sc->bce_dev, msix->msix_rid); 7037 } 7038 if (setup) 7039 pci_teardown_msix(sc->bce_dev); 7040 } 7041 7042 static void 7043 bce_free_intr(struct bce_softc *sc) 7044 { 7045 if (sc->bce_irq_type != PCI_INTR_TYPE_MSIX) { 7046 if (sc->bce_res_irq != NULL) { 7047 bus_release_resource(sc->bce_dev, SYS_RES_IRQ, 7048 sc->bce_irq_rid, sc->bce_res_irq); 7049 } 7050 if (sc->bce_irq_type == PCI_INTR_TYPE_MSI) 7051 pci_release_msi(sc->bce_dev); 7052 } else { 7053 bce_free_msix(sc, TRUE); 7054 } 7055 } 7056 7057 static void 7058 bce_setup_msix_table(struct bce_softc *sc) 7059 { 7060 REG_WR(sc, BCE_PCI_GRC_WINDOW_ADDR, BCE_PCI_GRC_WINDOW_ADDR_SEP_WIN); 7061 REG_WR(sc, BCE_PCI_GRC_WINDOW2_ADDR, BCE_MSIX_TABLE_ADDR); 7062 REG_WR(sc, BCE_PCI_GRC_WINDOW3_ADDR, BCE_MSIX_PBA_ADDR); 7063 } 7064 7065 static int 7066 bce_setup_intr(struct bce_softc *sc) 7067 { 7068 void (*irq_handle)(void *); 7069 int error; 7070 7071 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX) 7072 return bce_setup_msix(sc); 7073 7074 if (sc->bce_irq_type == PCI_INTR_TYPE_LEGACY) { 7075 irq_handle = bce_intr_legacy; 7076 } else if (sc->bce_irq_type == PCI_INTR_TYPE_MSI) { 7077 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 7078 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 7079 irq_handle = bce_intr_msi_oneshot; 7080 sc->bce_flags |= BCE_ONESHOT_MSI_FLAG; 7081 } else { 7082 irq_handle = bce_intr_msi; 7083 sc->bce_flags |= BCE_CHECK_MSI_FLAG; 7084 } 7085 } else { 7086 panic("%s: unsupported intr type %d", 7087 device_get_nameunit(sc->bce_dev), sc->bce_irq_type); 7088 } 7089 7090 error = bus_setup_intr(sc->bce_dev, sc->bce_res_irq, INTR_MPSAFE, 7091 irq_handle, sc, &sc->bce_intrhand, &sc->main_serialize); 7092 if (error != 0) { 7093 device_printf(sc->bce_dev, "Failed to setup IRQ!\n"); 7094 return error; 7095 } 7096 7097 return 0; 7098 } 7099 7100 static void 7101 bce_teardown_intr(struct bce_softc *sc) 7102 { 7103 if (sc->bce_irq_type != PCI_INTR_TYPE_MSIX) 7104 bus_teardown_intr(sc->bce_dev, sc->bce_res_irq, sc->bce_intrhand); 7105 else 7106 bce_teardown_msix(sc, sc->rx_ring_cnt); 7107 } 7108 7109 static int 7110 bce_setup_msix(struct bce_softc *sc) 7111 { 7112 int i; 7113 7114 for (i = 0; i < sc->rx_ring_cnt; ++i) { 7115 struct bce_msix_data *msix = &sc->bce_msix[i]; 7116 int error; 7117 7118 error = bus_setup_intr_descr(sc->bce_dev, msix->msix_res, 7119 INTR_MPSAFE, msix->msix_func, msix->msix_arg, 7120 &msix->msix_handle, msix->msix_serialize, msix->msix_desc); 7121 if (error) { 7122 device_printf(sc->bce_dev, "could not set up %s " 7123 "interrupt handler.\n", msix->msix_desc); 7124 bce_teardown_msix(sc, i); 7125 return error; 7126 } 7127 } 7128 return 0; 7129 } 7130 7131 static void 7132 bce_teardown_msix(struct bce_softc *sc, int msix_cnt) 7133 { 7134 int i; 7135 7136 for (i = 0; i < msix_cnt; ++i) { 7137 struct bce_msix_data *msix = &sc->bce_msix[i]; 7138 7139 bus_teardown_intr(sc->bce_dev, msix->msix_res, 7140 msix->msix_handle); 7141 } 7142 } 7143 7144 static void 7145 bce_init_rss(struct bce_softc *sc) 7146 { 7147 uint8_t key[BCE_RLUP_RSS_KEY_CNT * BCE_RLUP_RSS_KEY_SIZE]; 7148 uint32_t tbl = 0; 7149 int i; 7150 7151 KKASSERT(sc->rx_ring_cnt > 2); 7152 7153 /* 7154 * Configure RSS keys 7155 */ 7156 toeplitz_get_key(key, sizeof(key)); 7157 for (i = 0; i < BCE_RLUP_RSS_KEY_CNT; ++i) { 7158 uint32_t rss_key; 7159 7160 rss_key = BCE_RLUP_RSS_KEYVAL(key, i); 7161 BCE_RSS_DPRINTF(sc, 1, "rss_key%d 0x%08x\n", i, rss_key); 7162 7163 REG_WR(sc, BCE_RLUP_RSS_KEY(i), rss_key); 7164 } 7165 7166 /* 7167 * Configure the redirect table 7168 * 7169 * NOTE: 7170 * - The "queue ID" in redirect table is the software RX ring's 7171 * index _minus_ one. 7172 * - The last RX ring, whose "queue ID" is (sc->rx_ring_cnt - 2) 7173 * will be used for packets whose masked hash is 0. 7174 * (see also: comment in bce_setup_ring_cnt()) 7175 * 7176 * The redirect table is configured in following fashion, except 7177 * for the masked hash 0, which is noted above: 7178 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)] 7179 */ 7180 for (i = 0; i < BCE_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) { 7181 int shift = (i % 8) << 2, qid; 7182 7183 qid = i % sc->rx_ring_cnt2; 7184 if (qid > 0) 7185 --qid; 7186 else 7187 qid = sc->rx_ring_cnt - 2; 7188 KKASSERT(qid < (sc->rx_ring_cnt - 1)); 7189 7190 tbl |= qid << shift; 7191 if (i % 8 == 7) { 7192 BCE_RSS_DPRINTF(sc, 1, "tbl 0x%08x\n", tbl); 7193 REG_WR(sc, BCE_RLUP_RSS_DATA, tbl); 7194 REG_WR(sc, BCE_RLUP_RSS_COMMAND, (i >> 3) | 7195 BCE_RLUP_RSS_COMMAND_RSS_WRITE_MASK | 7196 BCE_RLUP_RSS_COMMAND_WRITE | 7197 BCE_RLUP_RSS_COMMAND_HASH_MASK); 7198 tbl = 0; 7199 } 7200 } 7201 REG_WR(sc, BCE_RLUP_RSS_CONFIG, 7202 BCE_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI); 7203 } 7204 7205 static void 7206 bce_npoll_coal_change(struct bce_softc *sc) 7207 { 7208 uint32_t old_rx_cons, old_tx_cons; 7209 7210 old_rx_cons = sc->bce_rx_quick_cons_trip_int; 7211 old_tx_cons = sc->bce_tx_quick_cons_trip_int; 7212 sc->bce_rx_quick_cons_trip_int = 1; 7213 sc->bce_tx_quick_cons_trip_int = 1; 7214 7215 sc->bce_coalchg_mask |= BCE_COALMASK_TX_BDS_INT | 7216 BCE_COALMASK_RX_BDS_INT; 7217 bce_coal_change(sc); 7218 7219 sc->bce_rx_quick_cons_trip_int = old_rx_cons; 7220 sc->bce_tx_quick_cons_trip_int = old_tx_cons; 7221 } 7222 7223 static struct pktinfo * 7224 bce_rss_pktinfo(struct pktinfo *pi, uint32_t status, 7225 const struct l2_fhdr *l2fhdr) 7226 { 7227 /* Check for an IP datagram. */ 7228 if ((status & L2_FHDR_STATUS_IP_DATAGRAM) == 0) 7229 return NULL; 7230 7231 /* Check if the IP checksum is valid. */ 7232 if (l2fhdr->l2_fhdr_ip_xsum != 0xffff) 7233 return NULL; 7234 7235 /* Check for a valid TCP/UDP frame. */ 7236 if (status & L2_FHDR_STATUS_TCP_SEGMENT) { 7237 if (status & L2_FHDR_ERRORS_TCP_XSUM) 7238 return NULL; 7239 if (l2fhdr->l2_fhdr_tcp_udp_xsum != 0xffff) 7240 return NULL; 7241 pi->pi_l3proto = IPPROTO_TCP; 7242 } else if (status & L2_FHDR_STATUS_UDP_DATAGRAM) { 7243 if (status & L2_FHDR_ERRORS_UDP_XSUM) 7244 return NULL; 7245 if (l2fhdr->l2_fhdr_tcp_udp_xsum != 0xffff) 7246 return NULL; 7247 pi->pi_l3proto = IPPROTO_UDP; 7248 } else { 7249 return NULL; 7250 } 7251 pi->pi_netisr = NETISR_IP; 7252 pi->pi_flags = 0; 7253 7254 return pi; 7255 } 7256