1 /*- 2 * Copyright (c) 2006-2007 Broadcom Corporation 3 * David Christensen <davidch@broadcom.com>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Neither the name of Broadcom Corporation nor the name of its contributors 15 * may be used to endorse or promote products derived from this software 16 * without specific prior written consent. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' 19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS 22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 * 30 * $FreeBSD: src/sys/dev/bce/if_bce.c,v 1.31 2007/05/16 23:34:11 davidch Exp $ 31 */ 32 33 /* 34 * The following controllers are supported by this driver: 35 * BCM5706C A2, A3 36 * BCM5706S A2, A3 37 * BCM5708C B1, B2 38 * BCM5708S B1, B2 39 * BCM5709C A1, B2, C0 40 * BCM5716 C0 41 * 42 * The following controllers are not supported by this driver: 43 * BCM5706C A0, A1 44 * BCM5706S A0, A1 45 * BCM5708C A0, B0 46 * BCM5708S A0, B0 47 * BCM5709C A0, B0, B1 48 * BCM5709S A0, A1, B0, B1, B2, C0 49 * 50 * 51 * Note about MSI-X on 5709/5716: 52 * - 9 MSI-X vectors are supported. 53 * - MSI-X vectors, RX/TX rings and status blocks' association 54 * are fixed: 55 * o The first RX ring and the first TX ring use the first 56 * status block. 57 * o The first MSI-X vector is associated with the first 58 * status block. 59 * o The second RX ring and the second TX ring use the second 60 * status block. 61 * o The second MSI-X vector is associated with the second 62 * status block. 63 * ... 64 * and so on so forth. 65 * - Status blocks must reside in physically contiguous memory 66 * and each status block consumes 128bytes. In addition to 67 * this, the memory for the status blocks is aligned on 128bytes 68 * in this driver. (see bce_dma_alloc() and HC_CONFIG) 69 * - Each status block has its own coalesce parameters, which also 70 * serve as the related MSI-X vector's interrupt moderation 71 * parameters. (see bce_coal_change()) 72 */ 73 74 #include "opt_bce.h" 75 #include "opt_ifpoll.h" 76 77 #include <sys/param.h> 78 #include <sys/bus.h> 79 #include <sys/endian.h> 80 #include <sys/kernel.h> 81 #include <sys/interrupt.h> 82 #include <sys/mbuf.h> 83 #include <sys/malloc.h> 84 #include <sys/queue.h> 85 #include <sys/rman.h> 86 #include <sys/serialize.h> 87 #include <sys/socket.h> 88 #include <sys/sockio.h> 89 #include <sys/sysctl.h> 90 91 #include <netinet/ip.h> 92 #include <netinet/tcp.h> 93 94 #include <net/bpf.h> 95 #include <net/ethernet.h> 96 #include <net/if.h> 97 #include <net/if_arp.h> 98 #include <net/if_dl.h> 99 #include <net/if_media.h> 100 #include <net/if_poll.h> 101 #include <net/if_types.h> 102 #include <net/ifq_var.h> 103 #include <net/toeplitz.h> 104 #include <net/toeplitz2.h> 105 #include <net/vlan/if_vlan_var.h> 106 #include <net/vlan/if_vlan_ether.h> 107 108 #include <dev/netif/mii_layer/mii.h> 109 #include <dev/netif/mii_layer/miivar.h> 110 #include <dev/netif/mii_layer/brgphyreg.h> 111 112 #include <bus/pci/pcireg.h> 113 #include <bus/pci/pcivar.h> 114 115 #include "miibus_if.h" 116 117 #include <dev/netif/bce/if_bcereg.h> 118 #include <dev/netif/bce/if_bcefw.h> 119 120 #define BCE_MSI_CKINTVL ((10 * hz) / 1000) /* 10ms */ 121 122 #ifdef BCE_RSS_DEBUG 123 #define BCE_RSS_DPRINTF(sc, lvl, fmt, ...) \ 124 do { \ 125 if (sc->rss_debug >= lvl) \ 126 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \ 127 } while (0) 128 #else /* !BCE_RSS_DEBUG */ 129 #define BCE_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 130 #endif /* BCE_RSS_DEBUG */ 131 132 /****************************************************************************/ 133 /* PCI Device ID Table */ 134 /* */ 135 /* Used by bce_probe() to identify the devices supported by this driver. */ 136 /****************************************************************************/ 137 #define BCE_DEVDESC_MAX 64 138 139 static struct bce_type bce_devs[] = { 140 /* BCM5706C Controllers and OEM boards. */ 141 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3101, 142 "HP NC370T Multifunction Gigabit Server Adapter" }, 143 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3106, 144 "HP NC370i Multifunction Gigabit Server Adapter" }, 145 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3070, 146 "HP NC380T PCIe DP Multifunc Gig Server Adapter" }, 147 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x1709, 148 "HP NC371i Multifunction Gigabit Server Adapter" }, 149 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, PCI_ANY_ID, PCI_ANY_ID, 150 "Broadcom NetXtreme II BCM5706 1000Base-T" }, 151 152 /* BCM5706S controllers and OEM boards. */ 153 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102, 154 "HP NC370F Multifunction Gigabit Server Adapter" }, 155 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID, PCI_ANY_ID, 156 "Broadcom NetXtreme II BCM5706 1000Base-SX" }, 157 158 /* BCM5708C controllers and OEM boards. */ 159 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7037, 160 "HP NC373T PCIe Multifunction Gig Server Adapter" }, 161 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7038, 162 "HP NC373i Multifunction Gigabit Server Adapter" }, 163 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7045, 164 "HP NC374m PCIe Multifunction Adapter" }, 165 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, PCI_ANY_ID, PCI_ANY_ID, 166 "Broadcom NetXtreme II BCM5708 1000Base-T" }, 167 168 /* BCM5708S controllers and OEM boards. */ 169 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x1706, 170 "HP NC373m Multifunction Gigabit Server Adapter" }, 171 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703b, 172 "HP NC373i Multifunction Gigabit Server Adapter" }, 173 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703d, 174 "HP NC373F PCIe Multifunc Giga Server Adapter" }, 175 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, PCI_ANY_ID, PCI_ANY_ID, 176 "Broadcom NetXtreme II BCM5708S 1000Base-T" }, 177 178 /* BCM5709C controllers and OEM boards. */ 179 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7055, 180 "HP NC382i DP Multifunction Gigabit Server Adapter" }, 181 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7059, 182 "HP NC382T PCIe DP Multifunction Gigabit Server Adapter" }, 183 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, PCI_ANY_ID, PCI_ANY_ID, 184 "Broadcom NetXtreme II BCM5709 1000Base-T" }, 185 186 /* BCM5709S controllers and OEM boards. */ 187 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x171d, 188 "HP NC382m DP 1GbE Multifunction BL-c Adapter" }, 189 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x7056, 190 "HP NC382i DP Multifunction Gigabit Server Adapter" }, 191 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, PCI_ANY_ID, PCI_ANY_ID, 192 "Broadcom NetXtreme II BCM5709 1000Base-SX" }, 193 194 /* BCM5716 controllers and OEM boards. */ 195 { BRCM_VENDORID, BRCM_DEVICEID_BCM5716, PCI_ANY_ID, PCI_ANY_ID, 196 "Broadcom NetXtreme II BCM5716 1000Base-T" }, 197 198 { 0, 0, 0, 0, NULL } 199 }; 200 201 /****************************************************************************/ 202 /* Supported Flash NVRAM device data. */ 203 /****************************************************************************/ 204 static const struct flash_spec flash_table[] = 205 { 206 #define BUFFERED_FLAGS (BCE_NV_BUFFERED | BCE_NV_TRANSLATE) 207 #define NONBUFFERED_FLAGS (BCE_NV_WREN) 208 209 /* Slow EEPROM */ 210 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400, 211 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 212 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 213 "EEPROM - slow"}, 214 /* Expansion entry 0001 */ 215 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406, 216 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 217 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 218 "Entry 0001"}, 219 /* Saifun SA25F010 (non-buffered flash) */ 220 /* strap, cfg1, & write1 need updates */ 221 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406, 222 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 223 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2, 224 "Non-buffered flash (128kB)"}, 225 /* Saifun SA25F020 (non-buffered flash) */ 226 /* strap, cfg1, & write1 need updates */ 227 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406, 228 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 229 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4, 230 "Non-buffered flash (256kB)"}, 231 /* Expansion entry 0100 */ 232 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406, 233 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 234 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 235 "Entry 0100"}, 236 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */ 237 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406, 238 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 239 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2, 240 "Entry 0101: ST M45PE10 (128kB non-bufferred)"}, 241 /* Entry 0110: ST M45PE20 (non-buffered flash)*/ 242 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406, 243 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 244 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4, 245 "Entry 0110: ST M45PE20 (256kB non-bufferred)"}, 246 /* Saifun SA25F005 (non-buffered flash) */ 247 /* strap, cfg1, & write1 need updates */ 248 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406, 249 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 250 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE, 251 "Non-buffered flash (64kB)"}, 252 /* Fast EEPROM */ 253 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400, 254 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 255 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 256 "EEPROM - fast"}, 257 /* Expansion entry 1001 */ 258 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406, 259 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 260 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 261 "Entry 1001"}, 262 /* Expansion entry 1010 */ 263 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406, 264 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 265 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 266 "Entry 1010"}, 267 /* ATMEL AT45DB011B (buffered flash) */ 268 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400, 269 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 270 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE, 271 "Buffered flash (128kB)"}, 272 /* Expansion entry 1100 */ 273 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406, 274 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 275 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 276 "Entry 1100"}, 277 /* Expansion entry 1101 */ 278 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406, 279 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 280 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 281 "Entry 1101"}, 282 /* Ateml Expansion entry 1110 */ 283 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400, 284 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 285 BUFFERED_FLASH_BYTE_ADDR_MASK, 0, 286 "Entry 1110 (Atmel)"}, 287 /* ATMEL AT45DB021B (buffered flash) */ 288 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400, 289 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 290 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2, 291 "Buffered flash (256kB)"}, 292 }; 293 294 /* 295 * The BCM5709 controllers transparently handle the 296 * differences between Atmel 264 byte pages and all 297 * flash devices which use 256 byte pages, so no 298 * logical-to-physical mapping is required in the 299 * driver. 300 */ 301 static struct flash_spec flash_5709 = { 302 .flags = BCE_NV_BUFFERED, 303 .page_bits = BCM5709_FLASH_PAGE_BITS, 304 .page_size = BCM5709_FLASH_PAGE_SIZE, 305 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK, 306 .total_size = BUFFERED_FLASH_TOTAL_SIZE * 2, 307 .name = "5709/5716 buffered flash (256kB)", 308 }; 309 310 /****************************************************************************/ 311 /* DragonFly device entry points. */ 312 /****************************************************************************/ 313 static int bce_probe(device_t); 314 static int bce_attach(device_t); 315 static int bce_detach(device_t); 316 static void bce_shutdown(device_t); 317 static int bce_miibus_read_reg(device_t, int, int); 318 static int bce_miibus_write_reg(device_t, int, int, int); 319 static void bce_miibus_statchg(device_t); 320 321 /****************************************************************************/ 322 /* BCE Register/Memory Access Routines */ 323 /****************************************************************************/ 324 static uint32_t bce_reg_rd_ind(struct bce_softc *, uint32_t); 325 static void bce_reg_wr_ind(struct bce_softc *, uint32_t, uint32_t); 326 static void bce_shmem_wr(struct bce_softc *, uint32_t, uint32_t); 327 static uint32_t bce_shmem_rd(struct bce_softc *, u32); 328 static void bce_ctx_wr(struct bce_softc *, uint32_t, uint32_t, uint32_t); 329 330 /****************************************************************************/ 331 /* BCE NVRAM Access Routines */ 332 /****************************************************************************/ 333 static int bce_acquire_nvram_lock(struct bce_softc *); 334 static int bce_release_nvram_lock(struct bce_softc *); 335 static void bce_enable_nvram_access(struct bce_softc *); 336 static void bce_disable_nvram_access(struct bce_softc *); 337 static int bce_nvram_read_dword(struct bce_softc *, uint32_t, uint8_t *, 338 uint32_t); 339 static int bce_init_nvram(struct bce_softc *); 340 static int bce_nvram_read(struct bce_softc *, uint32_t, uint8_t *, int); 341 static int bce_nvram_test(struct bce_softc *); 342 343 /****************************************************************************/ 344 /* BCE DMA Allocate/Free Routines */ 345 /****************************************************************************/ 346 static int bce_dma_alloc(struct bce_softc *); 347 static void bce_dma_free(struct bce_softc *); 348 static void bce_dma_map_addr(void *, bus_dma_segment_t *, int, int); 349 350 /****************************************************************************/ 351 /* BCE Firmware Synchronization and Load */ 352 /****************************************************************************/ 353 static int bce_fw_sync(struct bce_softc *, uint32_t); 354 static void bce_load_rv2p_fw(struct bce_softc *, uint32_t *, 355 uint32_t, uint32_t); 356 static void bce_load_cpu_fw(struct bce_softc *, struct cpu_reg *, 357 struct fw_info *); 358 static void bce_start_cpu(struct bce_softc *, struct cpu_reg *); 359 static void bce_halt_cpu(struct bce_softc *, struct cpu_reg *); 360 static void bce_start_rxp_cpu(struct bce_softc *); 361 static void bce_init_rxp_cpu(struct bce_softc *); 362 static void bce_init_txp_cpu(struct bce_softc *); 363 static void bce_init_tpat_cpu(struct bce_softc *); 364 static void bce_init_cp_cpu(struct bce_softc *); 365 static void bce_init_com_cpu(struct bce_softc *); 366 static void bce_init_cpus(struct bce_softc *); 367 static void bce_setup_msix_table(struct bce_softc *); 368 static void bce_init_rss(struct bce_softc *); 369 370 static void bce_stop(struct bce_softc *); 371 static int bce_reset(struct bce_softc *, uint32_t); 372 static int bce_chipinit(struct bce_softc *); 373 static int bce_blockinit(struct bce_softc *); 374 static void bce_probe_pci_caps(struct bce_softc *); 375 static void bce_print_adapter_info(struct bce_softc *); 376 static void bce_get_media(struct bce_softc *); 377 static void bce_mgmt_init(struct bce_softc *); 378 static int bce_init_ctx(struct bce_softc *); 379 static void bce_get_mac_addr(struct bce_softc *); 380 static void bce_set_mac_addr(struct bce_softc *); 381 static void bce_set_rx_mode(struct bce_softc *); 382 static void bce_coal_change(struct bce_softc *); 383 static void bce_npoll_coal_change(struct bce_softc *); 384 static void bce_setup_serialize(struct bce_softc *); 385 static void bce_serialize_skipmain(struct bce_softc *); 386 static void bce_deserialize_skipmain(struct bce_softc *); 387 static void bce_set_timer_cpuid(struct bce_softc *, boolean_t); 388 static int bce_alloc_intr(struct bce_softc *); 389 static void bce_free_intr(struct bce_softc *); 390 static void bce_try_alloc_msix(struct bce_softc *); 391 static void bce_free_msix(struct bce_softc *, boolean_t); 392 static void bce_setup_ring_cnt(struct bce_softc *); 393 static int bce_setup_intr(struct bce_softc *); 394 static void bce_teardown_intr(struct bce_softc *); 395 static int bce_setup_msix(struct bce_softc *); 396 static void bce_teardown_msix(struct bce_softc *, int); 397 398 static int bce_create_tx_ring(struct bce_tx_ring *); 399 static void bce_destroy_tx_ring(struct bce_tx_ring *); 400 static void bce_init_tx_context(struct bce_tx_ring *); 401 static int bce_init_tx_chain(struct bce_tx_ring *); 402 static void bce_free_tx_chain(struct bce_tx_ring *); 403 static void bce_xmit(struct bce_tx_ring *); 404 static int bce_encap(struct bce_tx_ring *, struct mbuf **, int *); 405 static int bce_tso_setup(struct bce_tx_ring *, struct mbuf **, 406 uint16_t *, uint16_t *); 407 408 static int bce_create_rx_ring(struct bce_rx_ring *); 409 static void bce_destroy_rx_ring(struct bce_rx_ring *); 410 static void bce_init_rx_context(struct bce_rx_ring *); 411 static int bce_init_rx_chain(struct bce_rx_ring *); 412 static void bce_free_rx_chain(struct bce_rx_ring *); 413 static int bce_newbuf_std(struct bce_rx_ring *, uint16_t *, uint16_t, 414 uint32_t *, int); 415 static void bce_setup_rxdesc_std(struct bce_rx_ring *, uint16_t, 416 uint32_t *); 417 static struct pktinfo *bce_rss_pktinfo(struct pktinfo *, uint32_t, 418 const struct l2_fhdr *); 419 420 static void bce_start(struct ifnet *, struct ifaltq_subque *); 421 static int bce_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 422 static void bce_watchdog(struct ifaltq_subque *); 423 static int bce_ifmedia_upd(struct ifnet *); 424 static void bce_ifmedia_sts(struct ifnet *, struct ifmediareq *); 425 static void bce_init(void *); 426 #ifdef IFPOLL_ENABLE 427 static void bce_npoll(struct ifnet *, struct ifpoll_info *); 428 static void bce_npoll_rx(struct ifnet *, void *, int); 429 static void bce_npoll_tx(struct ifnet *, void *, int); 430 static void bce_npoll_status(struct ifnet *); 431 static void bce_npoll_rx_pack(struct ifnet *, void *, int); 432 #endif 433 static void bce_serialize(struct ifnet *, enum ifnet_serialize); 434 static void bce_deserialize(struct ifnet *, enum ifnet_serialize); 435 static int bce_tryserialize(struct ifnet *, enum ifnet_serialize); 436 #ifdef INVARIANTS 437 static void bce_serialize_assert(struct ifnet *, enum ifnet_serialize, 438 boolean_t); 439 #endif 440 441 static void bce_intr(struct bce_softc *); 442 static void bce_intr_legacy(void *); 443 static void bce_intr_msi(void *); 444 static void bce_intr_msi_oneshot(void *); 445 static void bce_intr_msix_rxtx(void *); 446 static void bce_intr_msix_rx(void *); 447 static void bce_tx_intr(struct bce_tx_ring *, uint16_t); 448 static void bce_rx_intr(struct bce_rx_ring *, int, uint16_t); 449 static void bce_phy_intr(struct bce_softc *); 450 static void bce_disable_intr(struct bce_softc *); 451 static void bce_enable_intr(struct bce_softc *); 452 static void bce_reenable_intr(struct bce_rx_ring *); 453 static void bce_check_msi(void *); 454 455 static void bce_stats_update(struct bce_softc *); 456 static void bce_tick(void *); 457 static void bce_tick_serialized(struct bce_softc *); 458 static void bce_pulse(void *); 459 460 static void bce_add_sysctls(struct bce_softc *); 461 static int bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS); 462 static int bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS); 463 static int bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS); 464 static int bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS); 465 static int bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS); 466 static int bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS); 467 static int bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS); 468 static int bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS); 469 #ifdef IFPOLL_ENABLE 470 static int bce_sysctl_npoll_offset(SYSCTL_HANDLER_ARGS); 471 #endif 472 static int bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS, 473 uint32_t *, uint32_t); 474 475 /* 476 * NOTE: 477 * Don't set bce_tx_ticks_int/bce_tx_ticks to 1023. Linux's bnx2 478 * takes 1023 as the TX ticks limit. However, using 1023 will 479 * cause 5708(B2) to generate extra interrupts (~2000/s) even when 480 * there is _no_ network activity on the NIC. 481 */ 482 static uint32_t bce_tx_bds_int = 255; /* bcm: 20 */ 483 static uint32_t bce_tx_bds = 255; /* bcm: 20 */ 484 static uint32_t bce_tx_ticks_int = 1022; /* bcm: 80 */ 485 static uint32_t bce_tx_ticks = 1022; /* bcm: 80 */ 486 static uint32_t bce_rx_bds_int = 128; /* bcm: 6 */ 487 static uint32_t bce_rx_bds = 0; /* bcm: 6 */ 488 static uint32_t bce_rx_ticks_int = 150; /* bcm: 18 */ 489 static uint32_t bce_rx_ticks = 150; /* bcm: 18 */ 490 491 static int bce_tx_wreg = 8; 492 493 static int bce_msi_enable = 1; 494 static int bce_msix_enable = 1; 495 496 static int bce_rx_pages = RX_PAGES_DEFAULT; 497 static int bce_tx_pages = TX_PAGES_DEFAULT; 498 499 static int bce_rx_rings = 0; /* auto */ 500 static int bce_tx_rings = 0; /* auto */ 501 502 TUNABLE_INT("hw.bce.tx_bds_int", &bce_tx_bds_int); 503 TUNABLE_INT("hw.bce.tx_bds", &bce_tx_bds); 504 TUNABLE_INT("hw.bce.tx_ticks_int", &bce_tx_ticks_int); 505 TUNABLE_INT("hw.bce.tx_ticks", &bce_tx_ticks); 506 TUNABLE_INT("hw.bce.rx_bds_int", &bce_rx_bds_int); 507 TUNABLE_INT("hw.bce.rx_bds", &bce_rx_bds); 508 TUNABLE_INT("hw.bce.rx_ticks_int", &bce_rx_ticks_int); 509 TUNABLE_INT("hw.bce.rx_ticks", &bce_rx_ticks); 510 TUNABLE_INT("hw.bce.msi.enable", &bce_msi_enable); 511 TUNABLE_INT("hw.bce.msix.enable", &bce_msix_enable); 512 TUNABLE_INT("hw.bce.rx_pages", &bce_rx_pages); 513 TUNABLE_INT("hw.bce.tx_pages", &bce_tx_pages); 514 TUNABLE_INT("hw.bce.tx_wreg", &bce_tx_wreg); 515 TUNABLE_INT("hw.bce.tx_rings", &bce_tx_rings); 516 TUNABLE_INT("hw.bce.rx_rings", &bce_rx_rings); 517 518 /****************************************************************************/ 519 /* DragonFly device dispatch table. */ 520 /****************************************************************************/ 521 static device_method_t bce_methods[] = { 522 /* Device interface */ 523 DEVMETHOD(device_probe, bce_probe), 524 DEVMETHOD(device_attach, bce_attach), 525 DEVMETHOD(device_detach, bce_detach), 526 DEVMETHOD(device_shutdown, bce_shutdown), 527 528 /* bus interface */ 529 DEVMETHOD(bus_print_child, bus_generic_print_child), 530 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 531 532 /* MII interface */ 533 DEVMETHOD(miibus_readreg, bce_miibus_read_reg), 534 DEVMETHOD(miibus_writereg, bce_miibus_write_reg), 535 DEVMETHOD(miibus_statchg, bce_miibus_statchg), 536 537 DEVMETHOD_END 538 }; 539 540 static driver_t bce_driver = { 541 "bce", 542 bce_methods, 543 sizeof(struct bce_softc) 544 }; 545 546 static devclass_t bce_devclass; 547 548 DECLARE_DUMMY_MODULE(if_bce); 549 MODULE_DEPEND(bce, miibus, 1, 1, 1); 550 DRIVER_MODULE(if_bce, pci, bce_driver, bce_devclass, NULL, NULL); 551 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, NULL, NULL); 552 553 /****************************************************************************/ 554 /* Device probe function. */ 555 /* */ 556 /* Compares the device to the driver's list of supported devices and */ 557 /* reports back to the OS whether this is the right driver for the device. */ 558 /* */ 559 /* Returns: */ 560 /* BUS_PROBE_DEFAULT on success, positive value on failure. */ 561 /****************************************************************************/ 562 static int 563 bce_probe(device_t dev) 564 { 565 struct bce_type *t; 566 uint16_t vid, did, svid, sdid; 567 568 /* Get the data for the device to be probed. */ 569 vid = pci_get_vendor(dev); 570 did = pci_get_device(dev); 571 svid = pci_get_subvendor(dev); 572 sdid = pci_get_subdevice(dev); 573 574 /* Look through the list of known devices for a match. */ 575 for (t = bce_devs; t->bce_name != NULL; ++t) { 576 if (vid == t->bce_vid && did == t->bce_did && 577 (svid == t->bce_svid || t->bce_svid == PCI_ANY_ID) && 578 (sdid == t->bce_sdid || t->bce_sdid == PCI_ANY_ID)) { 579 uint32_t revid = pci_read_config(dev, PCIR_REVID, 4); 580 char *descbuf; 581 582 descbuf = kmalloc(BCE_DEVDESC_MAX, M_TEMP, M_WAITOK); 583 584 /* Print out the device identity. */ 585 ksnprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)", 586 t->bce_name, 587 ((revid & 0xf0) >> 4) + 'A', revid & 0xf); 588 589 device_set_desc_copy(dev, descbuf); 590 kfree(descbuf, M_TEMP); 591 return 0; 592 } 593 } 594 return ENXIO; 595 } 596 597 /****************************************************************************/ 598 /* PCI Capabilities Probe Function. */ 599 /* */ 600 /* Walks the PCI capabiites list for the device to find what features are */ 601 /* supported. */ 602 /* */ 603 /* Returns: */ 604 /* None. */ 605 /****************************************************************************/ 606 static void 607 bce_print_adapter_info(struct bce_softc *sc) 608 { 609 device_printf(sc->bce_dev, "ASIC (0x%08X); ", sc->bce_chipid); 610 611 kprintf("Rev (%c%d); ", ((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A', 612 ((BCE_CHIP_ID(sc) & 0x0ff0) >> 4)); 613 614 /* Bus info. */ 615 if (sc->bce_flags & BCE_PCIE_FLAG) { 616 kprintf("Bus (PCIe x%d, ", sc->link_width); 617 switch (sc->link_speed) { 618 case 1: 619 kprintf("2.5Gbps); "); 620 break; 621 case 2: 622 kprintf("5Gbps); "); 623 break; 624 default: 625 kprintf("Unknown link speed); "); 626 break; 627 } 628 } else { 629 kprintf("Bus (PCI%s, %s, %dMHz); ", 630 ((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""), 631 ((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? "32-bit" : "64-bit"), 632 sc->bus_speed_mhz); 633 } 634 635 /* Firmware version and device features. */ 636 kprintf("B/C (%s)", sc->bce_bc_ver); 637 638 if ((sc->bce_flags & BCE_MFW_ENABLE_FLAG) || 639 (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)) { 640 kprintf("; Flags("); 641 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) 642 kprintf("MFW[%s]", sc->bce_mfw_ver); 643 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) 644 kprintf(" 2.5G"); 645 kprintf(")"); 646 } 647 kprintf("\n"); 648 } 649 650 /****************************************************************************/ 651 /* PCI Capabilities Probe Function. */ 652 /* */ 653 /* Walks the PCI capabiites list for the device to find what features are */ 654 /* supported. */ 655 /* */ 656 /* Returns: */ 657 /* None. */ 658 /****************************************************************************/ 659 static void 660 bce_probe_pci_caps(struct bce_softc *sc) 661 { 662 device_t dev = sc->bce_dev; 663 uint8_t ptr; 664 665 if (pci_is_pcix(dev)) 666 sc->bce_cap_flags |= BCE_PCIX_CAPABLE_FLAG; 667 668 ptr = pci_get_pciecap_ptr(dev); 669 if (ptr) { 670 uint16_t link_status = pci_read_config(dev, ptr + 0x12, 2); 671 672 sc->link_speed = link_status & 0xf; 673 sc->link_width = (link_status >> 4) & 0x3f; 674 sc->bce_cap_flags |= BCE_PCIE_CAPABLE_FLAG; 675 sc->bce_flags |= BCE_PCIE_FLAG; 676 } 677 } 678 679 /****************************************************************************/ 680 /* Device attach function. */ 681 /* */ 682 /* Allocates device resources, performs secondary chip identification, */ 683 /* resets and initializes the hardware, and initializes driver instance */ 684 /* variables. */ 685 /* */ 686 /* Returns: */ 687 /* 0 on success, positive value on failure. */ 688 /****************************************************************************/ 689 static int 690 bce_attach(device_t dev) 691 { 692 struct bce_softc *sc = device_get_softc(dev); 693 struct ifnet *ifp = &sc->arpcom.ac_if; 694 uint32_t val; 695 int rid, rc = 0; 696 int i, j; 697 struct mii_probe_args mii_args; 698 uintptr_t mii_priv = 0; 699 #ifdef IFPOLL_ENABLE 700 int offset, offset_def; 701 #endif 702 703 sc->bce_dev = dev; 704 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 705 706 lwkt_serialize_init(&sc->main_serialize); 707 for (i = 0; i < BCE_MSIX_MAX; ++i) { 708 struct bce_msix_data *msix = &sc->bce_msix[i]; 709 710 msix->msix_cpuid = -1; 711 msix->msix_rid = -1; 712 } 713 714 pci_enable_busmaster(dev); 715 716 bce_probe_pci_caps(sc); 717 718 /* Allocate PCI memory resources. */ 719 rid = PCIR_BAR(0); 720 sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 721 RF_ACTIVE | PCI_RF_DENSE); 722 if (sc->bce_res_mem == NULL) { 723 device_printf(dev, "PCI memory allocation failed\n"); 724 return ENXIO; 725 } 726 sc->bce_btag = rman_get_bustag(sc->bce_res_mem); 727 sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem); 728 729 /* 730 * Configure byte swap and enable indirect register access. 731 * Rely on CPU to do target byte swapping on big endian systems. 732 * Access to registers outside of PCI configurtion space are not 733 * valid until this is done. 734 */ 735 pci_write_config(dev, BCE_PCICFG_MISC_CONFIG, 736 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 737 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4); 738 739 /* Save ASIC revsion info. */ 740 sc->bce_chipid = REG_RD(sc, BCE_MISC_ID); 741 742 /* Weed out any non-production controller revisions. */ 743 switch (BCE_CHIP_ID(sc)) { 744 case BCE_CHIP_ID_5706_A0: 745 case BCE_CHIP_ID_5706_A1: 746 case BCE_CHIP_ID_5708_A0: 747 case BCE_CHIP_ID_5708_B0: 748 case BCE_CHIP_ID_5709_A0: 749 case BCE_CHIP_ID_5709_B0: 750 case BCE_CHIP_ID_5709_B1: 751 #ifdef foo 752 /* 5709C B2 seems to work fine */ 753 case BCE_CHIP_ID_5709_B2: 754 #endif 755 device_printf(dev, "Unsupported chip id 0x%08x!\n", 756 BCE_CHIP_ID(sc)); 757 rc = ENODEV; 758 goto fail; 759 } 760 761 mii_priv |= BRGPHY_FLAG_WIRESPEED; 762 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { 763 if (BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax || 764 BCE_CHIP_REV(sc) == BCE_CHIP_REV_Bx) 765 mii_priv |= BRGPHY_FLAG_NO_EARLYDAC; 766 } else { 767 mii_priv |= BRGPHY_FLAG_BER_BUG; 768 } 769 770 /* 771 * Find the base address for shared memory access. 772 * Newer versions of bootcode use a signature and offset 773 * while older versions use a fixed address. 774 */ 775 val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE); 776 if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == 777 BCE_SHM_HDR_SIGNATURE_SIG) { 778 /* Multi-port devices use different offsets in shared memory. */ 779 sc->bce_shmem_base = REG_RD_IND(sc, 780 BCE_SHM_HDR_ADDR_0 + (pci_get_function(sc->bce_dev) << 2)); 781 } else { 782 sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE; 783 } 784 785 /* Fetch the bootcode revision. */ 786 val = bce_shmem_rd(sc, BCE_DEV_INFO_BC_REV); 787 for (i = 0, j = 0; i < 3; i++) { 788 uint8_t num; 789 int k, skip0; 790 791 num = (uint8_t)(val >> (24 - (i * 8))); 792 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) { 793 if (num >= k || !skip0 || k == 1) { 794 sc->bce_bc_ver[j++] = (num / k) + '0'; 795 skip0 = 0; 796 } 797 } 798 if (i != 2) 799 sc->bce_bc_ver[j++] = '.'; 800 } 801 802 /* Check if any management firwmare is running. */ 803 val = bce_shmem_rd(sc, BCE_PORT_FEATURE); 804 if (val & BCE_PORT_FEATURE_ASF_ENABLED) { 805 sc->bce_flags |= BCE_MFW_ENABLE_FLAG; 806 807 /* Allow time for firmware to enter the running state. */ 808 for (i = 0; i < 30; i++) { 809 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION); 810 if (val & BCE_CONDITION_MFW_RUN_MASK) 811 break; 812 DELAY(10000); 813 } 814 } 815 816 /* Check the current bootcode state. */ 817 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION) & 818 BCE_CONDITION_MFW_RUN_MASK; 819 if (val != BCE_CONDITION_MFW_RUN_UNKNOWN && 820 val != BCE_CONDITION_MFW_RUN_NONE) { 821 uint32_t addr = bce_shmem_rd(sc, BCE_MFW_VER_PTR); 822 823 for (i = 0, j = 0; j < 3; j++) { 824 val = bce_reg_rd_ind(sc, addr + j * 4); 825 val = bswap32(val); 826 memcpy(&sc->bce_mfw_ver[i], &val, 4); 827 i += 4; 828 } 829 } 830 831 /* Get PCI bus information (speed and type). */ 832 val = REG_RD(sc, BCE_PCICFG_MISC_STATUS); 833 if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) { 834 uint32_t clkreg; 835 836 sc->bce_flags |= BCE_PCIX_FLAG; 837 838 clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS) & 839 BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET; 840 switch (clkreg) { 841 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ: 842 sc->bus_speed_mhz = 133; 843 break; 844 845 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ: 846 sc->bus_speed_mhz = 100; 847 break; 848 849 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ: 850 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ: 851 sc->bus_speed_mhz = 66; 852 break; 853 854 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ: 855 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ: 856 sc->bus_speed_mhz = 50; 857 break; 858 859 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW: 860 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ: 861 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ: 862 sc->bus_speed_mhz = 33; 863 break; 864 } 865 } else { 866 if (val & BCE_PCICFG_MISC_STATUS_M66EN) 867 sc->bus_speed_mhz = 66; 868 else 869 sc->bus_speed_mhz = 33; 870 } 871 872 if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET) 873 sc->bce_flags |= BCE_PCI_32BIT_FLAG; 874 875 /* Reset the controller. */ 876 rc = bce_reset(sc, BCE_DRV_MSG_CODE_RESET); 877 if (rc != 0) 878 goto fail; 879 880 /* Initialize the controller. */ 881 rc = bce_chipinit(sc); 882 if (rc != 0) { 883 device_printf(dev, "Controller initialization failed!\n"); 884 goto fail; 885 } 886 887 /* Perform NVRAM test. */ 888 rc = bce_nvram_test(sc); 889 if (rc != 0) { 890 device_printf(dev, "NVRAM test failed!\n"); 891 goto fail; 892 } 893 894 /* Fetch the permanent Ethernet MAC address. */ 895 bce_get_mac_addr(sc); 896 897 /* 898 * Trip points control how many BDs 899 * should be ready before generating an 900 * interrupt while ticks control how long 901 * a BD can sit in the chain before 902 * generating an interrupt. Set the default 903 * values for the RX and TX rings. 904 */ 905 906 #ifdef BCE_DRBUG 907 /* Force more frequent interrupts. */ 908 sc->bce_tx_quick_cons_trip_int = 1; 909 sc->bce_tx_quick_cons_trip = 1; 910 sc->bce_tx_ticks_int = 0; 911 sc->bce_tx_ticks = 0; 912 913 sc->bce_rx_quick_cons_trip_int = 1; 914 sc->bce_rx_quick_cons_trip = 1; 915 sc->bce_rx_ticks_int = 0; 916 sc->bce_rx_ticks = 0; 917 #else 918 sc->bce_tx_quick_cons_trip_int = bce_tx_bds_int; 919 sc->bce_tx_quick_cons_trip = bce_tx_bds; 920 sc->bce_tx_ticks_int = bce_tx_ticks_int; 921 sc->bce_tx_ticks = bce_tx_ticks; 922 923 sc->bce_rx_quick_cons_trip_int = bce_rx_bds_int; 924 sc->bce_rx_quick_cons_trip = bce_rx_bds; 925 sc->bce_rx_ticks_int = bce_rx_ticks_int; 926 sc->bce_rx_ticks = bce_rx_ticks; 927 #endif 928 929 /* Update statistics once every second. */ 930 sc->bce_stats_ticks = 1000000 & 0xffff00; 931 932 /* Find the media type for the adapter. */ 933 bce_get_media(sc); 934 935 /* Find out RX/TX ring count */ 936 bce_setup_ring_cnt(sc); 937 938 /* Allocate DMA memory resources. */ 939 rc = bce_dma_alloc(sc); 940 if (rc != 0) { 941 device_printf(dev, "DMA resource allocation failed!\n"); 942 goto fail; 943 } 944 945 #ifdef IFPOLL_ENABLE 946 /* 947 * NPOLLING RX/TX CPU offset 948 */ 949 if (sc->rx_ring_cnt2 == ncpus2) { 950 offset = 0; 951 } else { 952 offset_def = (sc->rx_ring_cnt2 * device_get_unit(dev)) % ncpus2; 953 offset = device_getenv_int(dev, "npoll.offset", offset_def); 954 if (offset >= ncpus2 || 955 offset % sc->rx_ring_cnt2 != 0) { 956 device_printf(dev, "invalid npoll.offset %d, use %d\n", 957 offset, offset_def); 958 offset = offset_def; 959 } 960 } 961 sc->npoll_ofs = offset; 962 #endif 963 964 /* Allocate PCI IRQ resources. */ 965 rc = bce_alloc_intr(sc); 966 if (rc != 0) 967 goto fail; 968 969 /* Setup serializer */ 970 bce_setup_serialize(sc); 971 972 /* Initialize the ifnet interface. */ 973 ifp->if_softc = sc; 974 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 975 ifp->if_ioctl = bce_ioctl; 976 ifp->if_start = bce_start; 977 ifp->if_init = bce_init; 978 ifp->if_serialize = bce_serialize; 979 ifp->if_deserialize = bce_deserialize; 980 ifp->if_tryserialize = bce_tryserialize; 981 #ifdef INVARIANTS 982 ifp->if_serialize_assert = bce_serialize_assert; 983 #endif 984 #ifdef IFPOLL_ENABLE 985 ifp->if_npoll = bce_npoll; 986 #endif 987 988 ifp->if_mtu = ETHERMTU; 989 ifp->if_hwassist = BCE_CSUM_FEATURES | CSUM_TSO; 990 ifp->if_capabilities = BCE_IF_CAPABILITIES; 991 if (sc->rx_ring_cnt > 1) 992 ifp->if_capabilities |= IFCAP_RSS; 993 ifp->if_capenable = ifp->if_capabilities; 994 995 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) 996 ifp->if_baudrate = IF_Gbps(2.5); 997 else 998 ifp->if_baudrate = IF_Gbps(1); 999 1000 ifq_set_maxlen(&ifp->if_snd, USABLE_TX_BD(&sc->tx_rings[0])); 1001 ifq_set_ready(&ifp->if_snd); 1002 ifq_set_subq_cnt(&ifp->if_snd, sc->tx_ring_cnt); 1003 1004 if (sc->tx_ring_cnt > 1) { 1005 ifp->if_mapsubq = ifq_mapsubq_mask; 1006 ifq_set_subq_mask(&ifp->if_snd, sc->tx_ring_cnt - 1); 1007 } 1008 1009 /* 1010 * Look for our PHY. 1011 */ 1012 mii_probe_args_init(&mii_args, bce_ifmedia_upd, bce_ifmedia_sts); 1013 mii_args.mii_probemask = 1 << sc->bce_phy_addr; 1014 mii_args.mii_privtag = MII_PRIVTAG_BRGPHY; 1015 mii_args.mii_priv = mii_priv; 1016 1017 rc = mii_probe(dev, &sc->bce_miibus, &mii_args); 1018 if (rc != 0) { 1019 device_printf(dev, "PHY probe failed!\n"); 1020 goto fail; 1021 } 1022 1023 /* Attach to the Ethernet interface list. */ 1024 ether_ifattach(ifp, sc->eaddr, NULL); 1025 1026 /* Setup TX rings and subqueues */ 1027 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1028 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i); 1029 struct bce_tx_ring *txr = &sc->tx_rings[i]; 1030 1031 ifsq_set_cpuid(ifsq, sc->bce_msix[i].msix_cpuid); 1032 ifsq_set_priv(ifsq, txr); 1033 ifsq_set_hw_serialize(ifsq, &txr->tx_serialize); 1034 txr->ifsq = ifsq; 1035 1036 ifsq_watchdog_init(&txr->tx_watchdog, ifsq, bce_watchdog); 1037 } 1038 1039 callout_init_mp(&sc->bce_tick_callout); 1040 callout_init_mp(&sc->bce_pulse_callout); 1041 callout_init_mp(&sc->bce_ckmsi_callout); 1042 1043 rc = bce_setup_intr(sc); 1044 if (rc != 0) { 1045 device_printf(dev, "Failed to setup IRQ!\n"); 1046 ether_ifdetach(ifp); 1047 goto fail; 1048 } 1049 1050 /* Set timer CPUID */ 1051 bce_set_timer_cpuid(sc, FALSE); 1052 1053 /* Add the supported sysctls to the kernel. */ 1054 bce_add_sysctls(sc); 1055 1056 /* 1057 * The chip reset earlier notified the bootcode that 1058 * a driver is present. We now need to start our pulse 1059 * routine so that the bootcode is reminded that we're 1060 * still running. 1061 */ 1062 bce_pulse(sc); 1063 1064 /* Get the firmware running so IPMI still works */ 1065 bce_mgmt_init(sc); 1066 1067 if (bootverbose) 1068 bce_print_adapter_info(sc); 1069 1070 return 0; 1071 fail: 1072 bce_detach(dev); 1073 return(rc); 1074 } 1075 1076 /****************************************************************************/ 1077 /* Device detach function. */ 1078 /* */ 1079 /* Stops the controller, resets the controller, and releases resources. */ 1080 /* */ 1081 /* Returns: */ 1082 /* 0 on success, positive value on failure. */ 1083 /****************************************************************************/ 1084 static int 1085 bce_detach(device_t dev) 1086 { 1087 struct bce_softc *sc = device_get_softc(dev); 1088 1089 if (device_is_attached(dev)) { 1090 struct ifnet *ifp = &sc->arpcom.ac_if; 1091 uint32_t msg; 1092 1093 ifnet_serialize_all(ifp); 1094 1095 /* Stop and reset the controller. */ 1096 callout_stop(&sc->bce_pulse_callout); 1097 bce_stop(sc); 1098 if (sc->bce_flags & BCE_NO_WOL_FLAG) 1099 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN; 1100 else 1101 msg = BCE_DRV_MSG_CODE_UNLOAD; 1102 bce_reset(sc, msg); 1103 1104 bce_teardown_intr(sc); 1105 1106 ifnet_deserialize_all(ifp); 1107 1108 ether_ifdetach(ifp); 1109 } 1110 1111 /* If we have a child device on the MII bus remove it too. */ 1112 if (sc->bce_miibus) 1113 device_delete_child(dev, sc->bce_miibus); 1114 bus_generic_detach(dev); 1115 1116 bce_free_intr(sc); 1117 1118 if (sc->bce_res_mem != NULL) { 1119 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), 1120 sc->bce_res_mem); 1121 } 1122 1123 bce_dma_free(sc); 1124 1125 if (sc->serializes != NULL) 1126 kfree(sc->serializes, M_DEVBUF); 1127 1128 return 0; 1129 } 1130 1131 /****************************************************************************/ 1132 /* Device shutdown function. */ 1133 /* */ 1134 /* Stops and resets the controller. */ 1135 /* */ 1136 /* Returns: */ 1137 /* Nothing */ 1138 /****************************************************************************/ 1139 static void 1140 bce_shutdown(device_t dev) 1141 { 1142 struct bce_softc *sc = device_get_softc(dev); 1143 struct ifnet *ifp = &sc->arpcom.ac_if; 1144 uint32_t msg; 1145 1146 ifnet_serialize_all(ifp); 1147 1148 bce_stop(sc); 1149 if (sc->bce_flags & BCE_NO_WOL_FLAG) 1150 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN; 1151 else 1152 msg = BCE_DRV_MSG_CODE_UNLOAD; 1153 bce_reset(sc, msg); 1154 1155 ifnet_deserialize_all(ifp); 1156 } 1157 1158 /****************************************************************************/ 1159 /* Indirect register read. */ 1160 /* */ 1161 /* Reads NetXtreme II registers using an index/data register pair in PCI */ 1162 /* configuration space. Using this mechanism avoids issues with posted */ 1163 /* reads but is much slower than memory-mapped I/O. */ 1164 /* */ 1165 /* Returns: */ 1166 /* The value of the register. */ 1167 /****************************************************************************/ 1168 static uint32_t 1169 bce_reg_rd_ind(struct bce_softc *sc, uint32_t offset) 1170 { 1171 device_t dev = sc->bce_dev; 1172 1173 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4); 1174 return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4); 1175 } 1176 1177 /****************************************************************************/ 1178 /* Indirect register write. */ 1179 /* */ 1180 /* Writes NetXtreme II registers using an index/data register pair in PCI */ 1181 /* configuration space. Using this mechanism avoids issues with posted */ 1182 /* writes but is muchh slower than memory-mapped I/O. */ 1183 /* */ 1184 /* Returns: */ 1185 /* Nothing. */ 1186 /****************************************************************************/ 1187 static void 1188 bce_reg_wr_ind(struct bce_softc *sc, uint32_t offset, uint32_t val) 1189 { 1190 device_t dev = sc->bce_dev; 1191 1192 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4); 1193 pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4); 1194 } 1195 1196 /****************************************************************************/ 1197 /* Shared memory write. */ 1198 /* */ 1199 /* Writes NetXtreme II shared memory region. */ 1200 /* */ 1201 /* Returns: */ 1202 /* Nothing. */ 1203 /****************************************************************************/ 1204 static void 1205 bce_shmem_wr(struct bce_softc *sc, uint32_t offset, uint32_t val) 1206 { 1207 bce_reg_wr_ind(sc, sc->bce_shmem_base + offset, val); 1208 } 1209 1210 /****************************************************************************/ 1211 /* Shared memory read. */ 1212 /* */ 1213 /* Reads NetXtreme II shared memory region. */ 1214 /* */ 1215 /* Returns: */ 1216 /* The 32 bit value read. */ 1217 /****************************************************************************/ 1218 static u32 1219 bce_shmem_rd(struct bce_softc *sc, uint32_t offset) 1220 { 1221 return bce_reg_rd_ind(sc, sc->bce_shmem_base + offset); 1222 } 1223 1224 /****************************************************************************/ 1225 /* Context memory write. */ 1226 /* */ 1227 /* The NetXtreme II controller uses context memory to track connection */ 1228 /* information for L2 and higher network protocols. */ 1229 /* */ 1230 /* Returns: */ 1231 /* Nothing. */ 1232 /****************************************************************************/ 1233 static void 1234 bce_ctx_wr(struct bce_softc *sc, uint32_t cid_addr, uint32_t ctx_offset, 1235 uint32_t ctx_val) 1236 { 1237 uint32_t idx, offset = ctx_offset + cid_addr; 1238 uint32_t val, retry_cnt = 5; 1239 1240 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 1241 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 1242 REG_WR(sc, BCE_CTX_CTX_DATA, ctx_val); 1243 REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_WRITE_REQ)); 1244 1245 for (idx = 0; idx < retry_cnt; idx++) { 1246 val = REG_RD(sc, BCE_CTX_CTX_CTRL); 1247 if ((val & BCE_CTX_CTX_CTRL_WRITE_REQ) == 0) 1248 break; 1249 DELAY(5); 1250 } 1251 1252 if (val & BCE_CTX_CTX_CTRL_WRITE_REQ) { 1253 device_printf(sc->bce_dev, 1254 "Unable to write CTX memory: " 1255 "cid_addr = 0x%08X, offset = 0x%08X!\n", 1256 cid_addr, ctx_offset); 1257 } 1258 } else { 1259 REG_WR(sc, BCE_CTX_DATA_ADR, offset); 1260 REG_WR(sc, BCE_CTX_DATA, ctx_val); 1261 } 1262 } 1263 1264 /****************************************************************************/ 1265 /* PHY register read. */ 1266 /* */ 1267 /* Implements register reads on the MII bus. */ 1268 /* */ 1269 /* Returns: */ 1270 /* The value of the register. */ 1271 /****************************************************************************/ 1272 static int 1273 bce_miibus_read_reg(device_t dev, int phy, int reg) 1274 { 1275 struct bce_softc *sc = device_get_softc(dev); 1276 uint32_t val; 1277 int i; 1278 1279 /* Make sure we are accessing the correct PHY address. */ 1280 KASSERT(phy == sc->bce_phy_addr, 1281 ("invalid phyno %d, should be %d\n", phy, sc->bce_phy_addr)); 1282 1283 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1284 val = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1285 val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL; 1286 1287 REG_WR(sc, BCE_EMAC_MDIO_MODE, val); 1288 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1289 1290 DELAY(40); 1291 } 1292 1293 val = BCE_MIPHY(phy) | BCE_MIREG(reg) | 1294 BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT | 1295 BCE_EMAC_MDIO_COMM_START_BUSY; 1296 REG_WR(sc, BCE_EMAC_MDIO_COMM, val); 1297 1298 for (i = 0; i < BCE_PHY_TIMEOUT; i++) { 1299 DELAY(10); 1300 1301 val = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1302 if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) { 1303 DELAY(5); 1304 1305 val = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1306 val &= BCE_EMAC_MDIO_COMM_DATA; 1307 break; 1308 } 1309 } 1310 1311 if (val & BCE_EMAC_MDIO_COMM_START_BUSY) { 1312 if_printf(&sc->arpcom.ac_if, 1313 "Error: PHY read timeout! phy = %d, reg = 0x%04X\n", 1314 phy, reg); 1315 val = 0x0; 1316 } else { 1317 val = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1318 } 1319 1320 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1321 val = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1322 val |= BCE_EMAC_MDIO_MODE_AUTO_POLL; 1323 1324 REG_WR(sc, BCE_EMAC_MDIO_MODE, val); 1325 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1326 1327 DELAY(40); 1328 } 1329 return (val & 0xffff); 1330 } 1331 1332 /****************************************************************************/ 1333 /* PHY register write. */ 1334 /* */ 1335 /* Implements register writes on the MII bus. */ 1336 /* */ 1337 /* Returns: */ 1338 /* The value of the register. */ 1339 /****************************************************************************/ 1340 static int 1341 bce_miibus_write_reg(device_t dev, int phy, int reg, int val) 1342 { 1343 struct bce_softc *sc = device_get_softc(dev); 1344 uint32_t val1; 1345 int i; 1346 1347 /* Make sure we are accessing the correct PHY address. */ 1348 KASSERT(phy == sc->bce_phy_addr, 1349 ("invalid phyno %d, should be %d\n", phy, sc->bce_phy_addr)); 1350 1351 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1352 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1353 val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL; 1354 1355 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1); 1356 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1357 1358 DELAY(40); 1359 } 1360 1361 val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val | 1362 BCE_EMAC_MDIO_COMM_COMMAND_WRITE | 1363 BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT; 1364 REG_WR(sc, BCE_EMAC_MDIO_COMM, val1); 1365 1366 for (i = 0; i < BCE_PHY_TIMEOUT; i++) { 1367 DELAY(10); 1368 1369 val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1370 if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) { 1371 DELAY(5); 1372 break; 1373 } 1374 } 1375 1376 if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY) 1377 if_printf(&sc->arpcom.ac_if, "PHY write timeout!\n"); 1378 1379 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1380 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1381 val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL; 1382 1383 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1); 1384 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1385 1386 DELAY(40); 1387 } 1388 return 0; 1389 } 1390 1391 /****************************************************************************/ 1392 /* MII bus status change. */ 1393 /* */ 1394 /* Called by the MII bus driver when the PHY establishes link to set the */ 1395 /* MAC interface registers. */ 1396 /* */ 1397 /* Returns: */ 1398 /* Nothing. */ 1399 /****************************************************************************/ 1400 static void 1401 bce_miibus_statchg(device_t dev) 1402 { 1403 struct bce_softc *sc = device_get_softc(dev); 1404 struct mii_data *mii = device_get_softc(sc->bce_miibus); 1405 1406 BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT); 1407 1408 /* 1409 * Set MII or GMII interface based on the speed negotiated 1410 * by the PHY. 1411 */ 1412 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 1413 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) { 1414 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_GMII); 1415 } else { 1416 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_MII); 1417 } 1418 1419 /* 1420 * Set half or full duplex based on the duplicity negotiated 1421 * by the PHY. 1422 */ 1423 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 1424 BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX); 1425 } else { 1426 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX); 1427 } 1428 } 1429 1430 /****************************************************************************/ 1431 /* Acquire NVRAM lock. */ 1432 /* */ 1433 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */ 1434 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 1435 /* for use by the driver. */ 1436 /* */ 1437 /* Returns: */ 1438 /* 0 on success, positive value on failure. */ 1439 /****************************************************************************/ 1440 static int 1441 bce_acquire_nvram_lock(struct bce_softc *sc) 1442 { 1443 uint32_t val; 1444 int j; 1445 1446 /* Request access to the flash interface. */ 1447 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2); 1448 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1449 val = REG_RD(sc, BCE_NVM_SW_ARB); 1450 if (val & BCE_NVM_SW_ARB_ARB_ARB2) 1451 break; 1452 1453 DELAY(5); 1454 } 1455 1456 if (j >= NVRAM_TIMEOUT_COUNT) { 1457 return EBUSY; 1458 } 1459 return 0; 1460 } 1461 1462 /****************************************************************************/ 1463 /* Release NVRAM lock. */ 1464 /* */ 1465 /* When the caller is finished accessing NVRAM the lock must be released. */ 1466 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 1467 /* for use by the driver. */ 1468 /* */ 1469 /* Returns: */ 1470 /* 0 on success, positive value on failure. */ 1471 /****************************************************************************/ 1472 static int 1473 bce_release_nvram_lock(struct bce_softc *sc) 1474 { 1475 int j; 1476 uint32_t val; 1477 1478 /* 1479 * Relinquish nvram interface. 1480 */ 1481 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2); 1482 1483 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1484 val = REG_RD(sc, BCE_NVM_SW_ARB); 1485 if (!(val & BCE_NVM_SW_ARB_ARB_ARB2)) 1486 break; 1487 1488 DELAY(5); 1489 } 1490 1491 if (j >= NVRAM_TIMEOUT_COUNT) { 1492 return EBUSY; 1493 } 1494 return 0; 1495 } 1496 1497 /****************************************************************************/ 1498 /* Enable NVRAM access. */ 1499 /* */ 1500 /* Before accessing NVRAM for read or write operations the caller must */ 1501 /* enabled NVRAM access. */ 1502 /* */ 1503 /* Returns: */ 1504 /* Nothing. */ 1505 /****************************************************************************/ 1506 static void 1507 bce_enable_nvram_access(struct bce_softc *sc) 1508 { 1509 uint32_t val; 1510 1511 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE); 1512 /* Enable both bits, even on read. */ 1513 REG_WR(sc, BCE_NVM_ACCESS_ENABLE, 1514 val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN); 1515 } 1516 1517 /****************************************************************************/ 1518 /* Disable NVRAM access. */ 1519 /* */ 1520 /* When the caller is finished accessing NVRAM access must be disabled. */ 1521 /* */ 1522 /* Returns: */ 1523 /* Nothing. */ 1524 /****************************************************************************/ 1525 static void 1526 bce_disable_nvram_access(struct bce_softc *sc) 1527 { 1528 uint32_t val; 1529 1530 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE); 1531 1532 /* Disable both bits, even after read. */ 1533 REG_WR(sc, BCE_NVM_ACCESS_ENABLE, 1534 val & ~(BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN)); 1535 } 1536 1537 /****************************************************************************/ 1538 /* Read a dword (32 bits) from NVRAM. */ 1539 /* */ 1540 /* Read a 32 bit word from NVRAM. The caller is assumed to have already */ 1541 /* obtained the NVRAM lock and enabled the controller for NVRAM access. */ 1542 /* */ 1543 /* Returns: */ 1544 /* 0 on success and the 32 bit value read, positive value on failure. */ 1545 /****************************************************************************/ 1546 static int 1547 bce_nvram_read_dword(struct bce_softc *sc, uint32_t offset, uint8_t *ret_val, 1548 uint32_t cmd_flags) 1549 { 1550 uint32_t cmd; 1551 int i, rc = 0; 1552 1553 /* Build the command word. */ 1554 cmd = BCE_NVM_COMMAND_DOIT | cmd_flags; 1555 1556 /* Calculate the offset for buffered flash. */ 1557 if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) { 1558 offset = ((offset / sc->bce_flash_info->page_size) << 1559 sc->bce_flash_info->page_bits) + 1560 (offset % sc->bce_flash_info->page_size); 1561 } 1562 1563 /* 1564 * Clear the DONE bit separately, set the address to read, 1565 * and issue the read. 1566 */ 1567 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE); 1568 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE); 1569 REG_WR(sc, BCE_NVM_COMMAND, cmd); 1570 1571 /* Wait for completion. */ 1572 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) { 1573 uint32_t val; 1574 1575 DELAY(5); 1576 1577 val = REG_RD(sc, BCE_NVM_COMMAND); 1578 if (val & BCE_NVM_COMMAND_DONE) { 1579 val = REG_RD(sc, BCE_NVM_READ); 1580 1581 val = be32toh(val); 1582 memcpy(ret_val, &val, 4); 1583 break; 1584 } 1585 } 1586 1587 /* Check for errors. */ 1588 if (i >= NVRAM_TIMEOUT_COUNT) { 1589 if_printf(&sc->arpcom.ac_if, 1590 "Timeout error reading NVRAM at offset 0x%08X!\n", 1591 offset); 1592 rc = EBUSY; 1593 } 1594 return rc; 1595 } 1596 1597 /****************************************************************************/ 1598 /* Initialize NVRAM access. */ 1599 /* */ 1600 /* Identify the NVRAM device in use and prepare the NVRAM interface to */ 1601 /* access that device. */ 1602 /* */ 1603 /* Returns: */ 1604 /* 0 on success, positive value on failure. */ 1605 /****************************************************************************/ 1606 static int 1607 bce_init_nvram(struct bce_softc *sc) 1608 { 1609 uint32_t val; 1610 int j, entry_count, rc = 0; 1611 const struct flash_spec *flash; 1612 1613 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 1614 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 1615 sc->bce_flash_info = &flash_5709; 1616 goto bce_init_nvram_get_flash_size; 1617 } 1618 1619 /* Determine the selected interface. */ 1620 val = REG_RD(sc, BCE_NVM_CFG1); 1621 1622 entry_count = sizeof(flash_table) / sizeof(struct flash_spec); 1623 1624 /* 1625 * Flash reconfiguration is required to support additional 1626 * NVRAM devices not directly supported in hardware. 1627 * Check if the flash interface was reconfigured 1628 * by the bootcode. 1629 */ 1630 1631 if (val & 0x40000000) { 1632 /* Flash interface reconfigured by bootcode. */ 1633 for (j = 0, flash = flash_table; j < entry_count; 1634 j++, flash++) { 1635 if ((val & FLASH_BACKUP_STRAP_MASK) == 1636 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) { 1637 sc->bce_flash_info = flash; 1638 break; 1639 } 1640 } 1641 } else { 1642 /* Flash interface not yet reconfigured. */ 1643 uint32_t mask; 1644 1645 if (val & (1 << 23)) 1646 mask = FLASH_BACKUP_STRAP_MASK; 1647 else 1648 mask = FLASH_STRAP_MASK; 1649 1650 /* Look for the matching NVRAM device configuration data. */ 1651 for (j = 0, flash = flash_table; j < entry_count; 1652 j++, flash++) { 1653 /* Check if the device matches any of the known devices. */ 1654 if ((val & mask) == (flash->strapping & mask)) { 1655 /* Found a device match. */ 1656 sc->bce_flash_info = flash; 1657 1658 /* Request access to the flash interface. */ 1659 rc = bce_acquire_nvram_lock(sc); 1660 if (rc != 0) 1661 return rc; 1662 1663 /* Reconfigure the flash interface. */ 1664 bce_enable_nvram_access(sc); 1665 REG_WR(sc, BCE_NVM_CFG1, flash->config1); 1666 REG_WR(sc, BCE_NVM_CFG2, flash->config2); 1667 REG_WR(sc, BCE_NVM_CFG3, flash->config3); 1668 REG_WR(sc, BCE_NVM_WRITE1, flash->write1); 1669 bce_disable_nvram_access(sc); 1670 bce_release_nvram_lock(sc); 1671 break; 1672 } 1673 } 1674 } 1675 1676 /* Check if a matching device was found. */ 1677 if (j == entry_count) { 1678 sc->bce_flash_info = NULL; 1679 if_printf(&sc->arpcom.ac_if, "Unknown Flash NVRAM found!\n"); 1680 return ENODEV; 1681 } 1682 1683 bce_init_nvram_get_flash_size: 1684 /* Write the flash config data to the shared memory interface. */ 1685 val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG2) & 1686 BCE_SHARED_HW_CFG2_NVM_SIZE_MASK; 1687 if (val) 1688 sc->bce_flash_size = val; 1689 else 1690 sc->bce_flash_size = sc->bce_flash_info->total_size; 1691 1692 return rc; 1693 } 1694 1695 /****************************************************************************/ 1696 /* Read an arbitrary range of data from NVRAM. */ 1697 /* */ 1698 /* Prepares the NVRAM interface for access and reads the requested data */ 1699 /* into the supplied buffer. */ 1700 /* */ 1701 /* Returns: */ 1702 /* 0 on success and the data read, positive value on failure. */ 1703 /****************************************************************************/ 1704 static int 1705 bce_nvram_read(struct bce_softc *sc, uint32_t offset, uint8_t *ret_buf, 1706 int buf_size) 1707 { 1708 uint32_t cmd_flags, offset32, len32, extra; 1709 int rc = 0; 1710 1711 if (buf_size == 0) 1712 return 0; 1713 1714 /* Request access to the flash interface. */ 1715 rc = bce_acquire_nvram_lock(sc); 1716 if (rc != 0) 1717 return rc; 1718 1719 /* Enable access to flash interface */ 1720 bce_enable_nvram_access(sc); 1721 1722 len32 = buf_size; 1723 offset32 = offset; 1724 extra = 0; 1725 1726 cmd_flags = 0; 1727 1728 /* XXX should we release nvram lock if read_dword() fails? */ 1729 if (offset32 & 3) { 1730 uint8_t buf[4]; 1731 uint32_t pre_len; 1732 1733 offset32 &= ~3; 1734 pre_len = 4 - (offset & 3); 1735 1736 if (pre_len >= len32) { 1737 pre_len = len32; 1738 cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST; 1739 } else { 1740 cmd_flags = BCE_NVM_COMMAND_FIRST; 1741 } 1742 1743 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); 1744 if (rc) 1745 return rc; 1746 1747 memcpy(ret_buf, buf + (offset & 3), pre_len); 1748 1749 offset32 += 4; 1750 ret_buf += pre_len; 1751 len32 -= pre_len; 1752 } 1753 1754 if (len32 & 3) { 1755 extra = 4 - (len32 & 3); 1756 len32 = (len32 + 4) & ~3; 1757 } 1758 1759 if (len32 == 4) { 1760 uint8_t buf[4]; 1761 1762 if (cmd_flags) 1763 cmd_flags = BCE_NVM_COMMAND_LAST; 1764 else 1765 cmd_flags = BCE_NVM_COMMAND_FIRST | 1766 BCE_NVM_COMMAND_LAST; 1767 1768 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); 1769 1770 memcpy(ret_buf, buf, 4 - extra); 1771 } else if (len32 > 0) { 1772 uint8_t buf[4]; 1773 1774 /* Read the first word. */ 1775 if (cmd_flags) 1776 cmd_flags = 0; 1777 else 1778 cmd_flags = BCE_NVM_COMMAND_FIRST; 1779 1780 rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags); 1781 1782 /* Advance to the next dword. */ 1783 offset32 += 4; 1784 ret_buf += 4; 1785 len32 -= 4; 1786 1787 while (len32 > 4 && rc == 0) { 1788 rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0); 1789 1790 /* Advance to the next dword. */ 1791 offset32 += 4; 1792 ret_buf += 4; 1793 len32 -= 4; 1794 } 1795 1796 if (rc) 1797 goto bce_nvram_read_locked_exit; 1798 1799 cmd_flags = BCE_NVM_COMMAND_LAST; 1800 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); 1801 1802 memcpy(ret_buf, buf, 4 - extra); 1803 } 1804 1805 bce_nvram_read_locked_exit: 1806 /* Disable access to flash interface and release the lock. */ 1807 bce_disable_nvram_access(sc); 1808 bce_release_nvram_lock(sc); 1809 1810 return rc; 1811 } 1812 1813 /****************************************************************************/ 1814 /* Verifies that NVRAM is accessible and contains valid data. */ 1815 /* */ 1816 /* Reads the configuration data from NVRAM and verifies that the CRC is */ 1817 /* correct. */ 1818 /* */ 1819 /* Returns: */ 1820 /* 0 on success, positive value on failure. */ 1821 /****************************************************************************/ 1822 static int 1823 bce_nvram_test(struct bce_softc *sc) 1824 { 1825 uint32_t buf[BCE_NVRAM_SIZE / 4]; 1826 uint32_t magic, csum; 1827 uint8_t *data = (uint8_t *)buf; 1828 int rc = 0; 1829 1830 /* 1831 * Check that the device NVRAM is valid by reading 1832 * the magic value at offset 0. 1833 */ 1834 rc = bce_nvram_read(sc, 0, data, 4); 1835 if (rc != 0) 1836 return rc; 1837 1838 magic = be32toh(buf[0]); 1839 if (magic != BCE_NVRAM_MAGIC) { 1840 if_printf(&sc->arpcom.ac_if, 1841 "Invalid NVRAM magic value! Expected: 0x%08X, " 1842 "Found: 0x%08X\n", BCE_NVRAM_MAGIC, magic); 1843 return ENODEV; 1844 } 1845 1846 /* 1847 * Verify that the device NVRAM includes valid 1848 * configuration data. 1849 */ 1850 rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE); 1851 if (rc != 0) 1852 return rc; 1853 1854 csum = ether_crc32_le(data, 0x100); 1855 if (csum != BCE_CRC32_RESIDUAL) { 1856 if_printf(&sc->arpcom.ac_if, 1857 "Invalid Manufacturing Information NVRAM CRC! " 1858 "Expected: 0x%08X, Found: 0x%08X\n", 1859 BCE_CRC32_RESIDUAL, csum); 1860 return ENODEV; 1861 } 1862 1863 csum = ether_crc32_le(data + 0x100, 0x100); 1864 if (csum != BCE_CRC32_RESIDUAL) { 1865 if_printf(&sc->arpcom.ac_if, 1866 "Invalid Feature Configuration Information " 1867 "NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n", 1868 BCE_CRC32_RESIDUAL, csum); 1869 rc = ENODEV; 1870 } 1871 return rc; 1872 } 1873 1874 /****************************************************************************/ 1875 /* Identifies the current media type of the controller and sets the PHY */ 1876 /* address. */ 1877 /* */ 1878 /* Returns: */ 1879 /* Nothing. */ 1880 /****************************************************************************/ 1881 static void 1882 bce_get_media(struct bce_softc *sc) 1883 { 1884 uint32_t val; 1885 1886 sc->bce_phy_addr = 1; 1887 1888 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 1889 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 1890 uint32_t val = REG_RD(sc, BCE_MISC_DUAL_MEDIA_CTRL); 1891 uint32_t bond_id = val & BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID; 1892 uint32_t strap; 1893 1894 /* 1895 * The BCM5709S is software configurable 1896 * for Copper or SerDes operation. 1897 */ 1898 if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) { 1899 return; 1900 } else if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) { 1901 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 1902 return; 1903 } 1904 1905 if (val & BCE_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE) { 1906 strap = (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21; 1907 } else { 1908 strap = 1909 (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8; 1910 } 1911 1912 if (pci_get_function(sc->bce_dev) == 0) { 1913 switch (strap) { 1914 case 0x4: 1915 case 0x5: 1916 case 0x6: 1917 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 1918 break; 1919 } 1920 } else { 1921 switch (strap) { 1922 case 0x1: 1923 case 0x2: 1924 case 0x4: 1925 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 1926 break; 1927 } 1928 } 1929 } else if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) { 1930 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 1931 } 1932 1933 if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) { 1934 sc->bce_flags |= BCE_NO_WOL_FLAG; 1935 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) { 1936 sc->bce_phy_addr = 2; 1937 val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG); 1938 if (val & BCE_SHARED_HW_CFG_PHY_2_5G) 1939 sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG; 1940 } 1941 } else if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) || 1942 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)) { 1943 sc->bce_phy_flags |= BCE_PHY_CRC_FIX_FLAG; 1944 } 1945 } 1946 1947 static void 1948 bce_destroy_tx_ring(struct bce_tx_ring *txr) 1949 { 1950 int i; 1951 1952 /* Destroy the TX buffer descriptor DMA stuffs. */ 1953 if (txr->tx_bd_chain_tag != NULL) { 1954 for (i = 0; i < txr->tx_pages; i++) { 1955 if (txr->tx_bd_chain[i] != NULL) { 1956 bus_dmamap_unload(txr->tx_bd_chain_tag, 1957 txr->tx_bd_chain_map[i]); 1958 bus_dmamem_free(txr->tx_bd_chain_tag, 1959 txr->tx_bd_chain[i], 1960 txr->tx_bd_chain_map[i]); 1961 } 1962 } 1963 bus_dma_tag_destroy(txr->tx_bd_chain_tag); 1964 } 1965 1966 /* Destroy the TX mbuf DMA stuffs. */ 1967 if (txr->tx_mbuf_tag != NULL) { 1968 for (i = 0; i < TOTAL_TX_BD(txr); i++) { 1969 /* Must have been unloaded in bce_stop() */ 1970 KKASSERT(txr->tx_bufs[i].tx_mbuf_ptr == NULL); 1971 bus_dmamap_destroy(txr->tx_mbuf_tag, 1972 txr->tx_bufs[i].tx_mbuf_map); 1973 } 1974 bus_dma_tag_destroy(txr->tx_mbuf_tag); 1975 } 1976 1977 if (txr->tx_bd_chain_map != NULL) 1978 kfree(txr->tx_bd_chain_map, M_DEVBUF); 1979 if (txr->tx_bd_chain != NULL) 1980 kfree(txr->tx_bd_chain, M_DEVBUF); 1981 if (txr->tx_bd_chain_paddr != NULL) 1982 kfree(txr->tx_bd_chain_paddr, M_DEVBUF); 1983 1984 if (txr->tx_bufs != NULL) 1985 kfree(txr->tx_bufs, M_DEVBUF); 1986 } 1987 1988 static void 1989 bce_destroy_rx_ring(struct bce_rx_ring *rxr) 1990 { 1991 int i; 1992 1993 /* Destroy the RX buffer descriptor DMA stuffs. */ 1994 if (rxr->rx_bd_chain_tag != NULL) { 1995 for (i = 0; i < rxr->rx_pages; i++) { 1996 if (rxr->rx_bd_chain[i] != NULL) { 1997 bus_dmamap_unload(rxr->rx_bd_chain_tag, 1998 rxr->rx_bd_chain_map[i]); 1999 bus_dmamem_free(rxr->rx_bd_chain_tag, 2000 rxr->rx_bd_chain[i], 2001 rxr->rx_bd_chain_map[i]); 2002 } 2003 } 2004 bus_dma_tag_destroy(rxr->rx_bd_chain_tag); 2005 } 2006 2007 /* Destroy the RX mbuf DMA stuffs. */ 2008 if (rxr->rx_mbuf_tag != NULL) { 2009 for (i = 0; i < TOTAL_RX_BD(rxr); i++) { 2010 /* Must have been unloaded in bce_stop() */ 2011 KKASSERT(rxr->rx_bufs[i].rx_mbuf_ptr == NULL); 2012 bus_dmamap_destroy(rxr->rx_mbuf_tag, 2013 rxr->rx_bufs[i].rx_mbuf_map); 2014 } 2015 bus_dmamap_destroy(rxr->rx_mbuf_tag, rxr->rx_mbuf_tmpmap); 2016 bus_dma_tag_destroy(rxr->rx_mbuf_tag); 2017 } 2018 2019 if (rxr->rx_bd_chain_map != NULL) 2020 kfree(rxr->rx_bd_chain_map, M_DEVBUF); 2021 if (rxr->rx_bd_chain != NULL) 2022 kfree(rxr->rx_bd_chain, M_DEVBUF); 2023 if (rxr->rx_bd_chain_paddr != NULL) 2024 kfree(rxr->rx_bd_chain_paddr, M_DEVBUF); 2025 2026 if (rxr->rx_bufs != NULL) 2027 kfree(rxr->rx_bufs, M_DEVBUF); 2028 } 2029 2030 /****************************************************************************/ 2031 /* Free any DMA memory owned by the driver. */ 2032 /* */ 2033 /* Scans through each data structre that requires DMA memory and frees */ 2034 /* the memory if allocated. */ 2035 /* */ 2036 /* Returns: */ 2037 /* Nothing. */ 2038 /****************************************************************************/ 2039 static void 2040 bce_dma_free(struct bce_softc *sc) 2041 { 2042 int i; 2043 2044 /* Destroy the status block. */ 2045 if (sc->status_tag != NULL) { 2046 if (sc->status_block != NULL) { 2047 bus_dmamap_unload(sc->status_tag, sc->status_map); 2048 bus_dmamem_free(sc->status_tag, sc->status_block, 2049 sc->status_map); 2050 } 2051 bus_dma_tag_destroy(sc->status_tag); 2052 } 2053 2054 /* Destroy the statistics block. */ 2055 if (sc->stats_tag != NULL) { 2056 if (sc->stats_block != NULL) { 2057 bus_dmamap_unload(sc->stats_tag, sc->stats_map); 2058 bus_dmamem_free(sc->stats_tag, sc->stats_block, 2059 sc->stats_map); 2060 } 2061 bus_dma_tag_destroy(sc->stats_tag); 2062 } 2063 2064 /* Destroy the CTX DMA stuffs. */ 2065 if (sc->ctx_tag != NULL) { 2066 for (i = 0; i < sc->ctx_pages; i++) { 2067 if (sc->ctx_block[i] != NULL) { 2068 bus_dmamap_unload(sc->ctx_tag, sc->ctx_map[i]); 2069 bus_dmamem_free(sc->ctx_tag, sc->ctx_block[i], 2070 sc->ctx_map[i]); 2071 } 2072 } 2073 bus_dma_tag_destroy(sc->ctx_tag); 2074 } 2075 2076 /* Free TX rings */ 2077 if (sc->tx_rings != NULL) { 2078 for (i = 0; i < sc->tx_ring_cnt; ++i) 2079 bce_destroy_tx_ring(&sc->tx_rings[i]); 2080 kfree(sc->tx_rings, M_DEVBUF); 2081 } 2082 2083 /* Free RX rings */ 2084 if (sc->rx_rings != NULL) { 2085 for (i = 0; i < sc->rx_ring_cnt; ++i) 2086 bce_destroy_rx_ring(&sc->rx_rings[i]); 2087 kfree(sc->rx_rings, M_DEVBUF); 2088 } 2089 2090 /* Destroy the parent tag */ 2091 if (sc->parent_tag != NULL) 2092 bus_dma_tag_destroy(sc->parent_tag); 2093 } 2094 2095 /****************************************************************************/ 2096 /* Get DMA memory from the OS. */ 2097 /* */ 2098 /* Validates that the OS has provided DMA buffers in response to a */ 2099 /* bus_dmamap_load() call and saves the physical address of those buffers. */ 2100 /* When the callback is used the OS will return 0 for the mapping function */ 2101 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any */ 2102 /* failures back to the caller. */ 2103 /* */ 2104 /* Returns: */ 2105 /* Nothing. */ 2106 /****************************************************************************/ 2107 static void 2108 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 2109 { 2110 bus_addr_t *busaddr = arg; 2111 2112 /* Check for an error and signal the caller that an error occurred. */ 2113 if (error) 2114 return; 2115 2116 KASSERT(nseg == 1, ("only one segment is allowed")); 2117 *busaddr = segs->ds_addr; 2118 } 2119 2120 static int 2121 bce_create_tx_ring(struct bce_tx_ring *txr) 2122 { 2123 int pages, rc, i; 2124 2125 lwkt_serialize_init(&txr->tx_serialize); 2126 txr->tx_wreg = bce_tx_wreg; 2127 2128 pages = device_getenv_int(txr->sc->bce_dev, "tx_pages", bce_tx_pages); 2129 if (pages <= 0 || pages > TX_PAGES_MAX || !powerof2(pages)) { 2130 device_printf(txr->sc->bce_dev, "invalid # of TX pages\n"); 2131 pages = TX_PAGES_DEFAULT; 2132 } 2133 txr->tx_pages = pages; 2134 2135 txr->tx_bd_chain_map = kmalloc(sizeof(bus_dmamap_t) * txr->tx_pages, 2136 M_DEVBUF, M_WAITOK | M_ZERO); 2137 txr->tx_bd_chain = kmalloc(sizeof(struct tx_bd *) * txr->tx_pages, 2138 M_DEVBUF, M_WAITOK | M_ZERO); 2139 txr->tx_bd_chain_paddr = kmalloc(sizeof(bus_addr_t) * txr->tx_pages, 2140 M_DEVBUF, M_WAITOK | M_ZERO); 2141 2142 txr->tx_bufs = kmalloc_cachealign( 2143 sizeof(struct bce_tx_buf) * TOTAL_TX_BD(txr), 2144 M_DEVBUF, M_WAITOK | M_ZERO); 2145 2146 /* 2147 * Create a DMA tag for the TX buffer descriptor chain, 2148 * allocate and clear the memory, and fetch the 2149 * physical address of the block. 2150 */ 2151 rc = bus_dma_tag_create(txr->sc->parent_tag, BCM_PAGE_SIZE, 0, 2152 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 2153 BCE_TX_CHAIN_PAGE_SZ, 1, BCE_TX_CHAIN_PAGE_SZ, 2154 0, &txr->tx_bd_chain_tag); 2155 if (rc != 0) { 2156 device_printf(txr->sc->bce_dev, "Could not allocate " 2157 "TX descriptor chain DMA tag!\n"); 2158 return rc; 2159 } 2160 2161 for (i = 0; i < txr->tx_pages; i++) { 2162 bus_addr_t busaddr; 2163 2164 rc = bus_dmamem_alloc(txr->tx_bd_chain_tag, 2165 (void **)&txr->tx_bd_chain[i], 2166 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 2167 &txr->tx_bd_chain_map[i]); 2168 if (rc != 0) { 2169 device_printf(txr->sc->bce_dev, 2170 "Could not allocate %dth TX descriptor " 2171 "chain DMA memory!\n", i); 2172 return rc; 2173 } 2174 2175 rc = bus_dmamap_load(txr->tx_bd_chain_tag, 2176 txr->tx_bd_chain_map[i], 2177 txr->tx_bd_chain[i], 2178 BCE_TX_CHAIN_PAGE_SZ, 2179 bce_dma_map_addr, &busaddr, 2180 BUS_DMA_WAITOK); 2181 if (rc != 0) { 2182 if (rc == EINPROGRESS) { 2183 panic("%s coherent memory loading " 2184 "is still in progress!", 2185 txr->sc->arpcom.ac_if.if_xname); 2186 } 2187 device_printf(txr->sc->bce_dev, "Could not map %dth " 2188 "TX descriptor chain DMA memory!\n", i); 2189 bus_dmamem_free(txr->tx_bd_chain_tag, 2190 txr->tx_bd_chain[i], 2191 txr->tx_bd_chain_map[i]); 2192 txr->tx_bd_chain[i] = NULL; 2193 return rc; 2194 } 2195 2196 txr->tx_bd_chain_paddr[i] = busaddr; 2197 } 2198 2199 /* Create a DMA tag for TX mbufs. */ 2200 rc = bus_dma_tag_create(txr->sc->parent_tag, 1, 0, 2201 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 2202 IP_MAXPACKET + sizeof(struct ether_vlan_header), 2203 BCE_MAX_SEGMENTS, PAGE_SIZE, 2204 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 2205 &txr->tx_mbuf_tag); 2206 if (rc != 0) { 2207 device_printf(txr->sc->bce_dev, 2208 "Could not allocate TX mbuf DMA tag!\n"); 2209 return rc; 2210 } 2211 2212 /* Create DMA maps for the TX mbufs clusters. */ 2213 for (i = 0; i < TOTAL_TX_BD(txr); i++) { 2214 rc = bus_dmamap_create(txr->tx_mbuf_tag, 2215 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 2216 &txr->tx_bufs[i].tx_mbuf_map); 2217 if (rc != 0) { 2218 int j; 2219 2220 for (j = 0; j < i; ++j) { 2221 bus_dmamap_destroy(txr->tx_mbuf_tag, 2222 txr->tx_bufs[j].tx_mbuf_map); 2223 } 2224 bus_dma_tag_destroy(txr->tx_mbuf_tag); 2225 txr->tx_mbuf_tag = NULL; 2226 2227 device_printf(txr->sc->bce_dev, "Unable to create " 2228 "%dth TX mbuf DMA map!\n", i); 2229 return rc; 2230 } 2231 } 2232 return 0; 2233 } 2234 2235 static int 2236 bce_create_rx_ring(struct bce_rx_ring *rxr) 2237 { 2238 int pages, rc, i; 2239 2240 lwkt_serialize_init(&rxr->rx_serialize); 2241 2242 pages = device_getenv_int(rxr->sc->bce_dev, "rx_pages", bce_rx_pages); 2243 if (pages <= 0 || pages > RX_PAGES_MAX || !powerof2(pages)) { 2244 device_printf(rxr->sc->bce_dev, "invalid # of RX pages\n"); 2245 pages = RX_PAGES_DEFAULT; 2246 } 2247 rxr->rx_pages = pages; 2248 2249 rxr->rx_bd_chain_map = kmalloc(sizeof(bus_dmamap_t) * rxr->rx_pages, 2250 M_DEVBUF, M_WAITOK | M_ZERO); 2251 rxr->rx_bd_chain = kmalloc(sizeof(struct rx_bd *) * rxr->rx_pages, 2252 M_DEVBUF, M_WAITOK | M_ZERO); 2253 rxr->rx_bd_chain_paddr = kmalloc(sizeof(bus_addr_t) * rxr->rx_pages, 2254 M_DEVBUF, M_WAITOK | M_ZERO); 2255 2256 rxr->rx_bufs = kmalloc_cachealign( 2257 sizeof(struct bce_rx_buf) * TOTAL_RX_BD(rxr), 2258 M_DEVBUF, M_WAITOK | M_ZERO); 2259 2260 /* 2261 * Create a DMA tag for the RX buffer descriptor chain, 2262 * allocate and clear the memory, and fetch the physical 2263 * address of the blocks. 2264 */ 2265 rc = bus_dma_tag_create(rxr->sc->parent_tag, BCM_PAGE_SIZE, 0, 2266 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 2267 BCE_RX_CHAIN_PAGE_SZ, 1, BCE_RX_CHAIN_PAGE_SZ, 2268 0, &rxr->rx_bd_chain_tag); 2269 if (rc != 0) { 2270 device_printf(rxr->sc->bce_dev, "Could not allocate " 2271 "RX descriptor chain DMA tag!\n"); 2272 return rc; 2273 } 2274 2275 for (i = 0; i < rxr->rx_pages; i++) { 2276 bus_addr_t busaddr; 2277 2278 rc = bus_dmamem_alloc(rxr->rx_bd_chain_tag, 2279 (void **)&rxr->rx_bd_chain[i], 2280 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 2281 &rxr->rx_bd_chain_map[i]); 2282 if (rc != 0) { 2283 device_printf(rxr->sc->bce_dev, 2284 "Could not allocate %dth RX descriptor " 2285 "chain DMA memory!\n", i); 2286 return rc; 2287 } 2288 2289 rc = bus_dmamap_load(rxr->rx_bd_chain_tag, 2290 rxr->rx_bd_chain_map[i], 2291 rxr->rx_bd_chain[i], 2292 BCE_RX_CHAIN_PAGE_SZ, 2293 bce_dma_map_addr, &busaddr, 2294 BUS_DMA_WAITOK); 2295 if (rc != 0) { 2296 if (rc == EINPROGRESS) { 2297 panic("%s coherent memory loading " 2298 "is still in progress!", 2299 rxr->sc->arpcom.ac_if.if_xname); 2300 } 2301 device_printf(rxr->sc->bce_dev, 2302 "Could not map %dth RX descriptor " 2303 "chain DMA memory!\n", i); 2304 bus_dmamem_free(rxr->rx_bd_chain_tag, 2305 rxr->rx_bd_chain[i], 2306 rxr->rx_bd_chain_map[i]); 2307 rxr->rx_bd_chain[i] = NULL; 2308 return rc; 2309 } 2310 2311 rxr->rx_bd_chain_paddr[i] = busaddr; 2312 } 2313 2314 /* Create a DMA tag for RX mbufs. */ 2315 rc = bus_dma_tag_create(rxr->sc->parent_tag, BCE_DMA_RX_ALIGN, 0, 2316 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 2317 MCLBYTES, 1, MCLBYTES, 2318 BUS_DMA_ALLOCNOW | BUS_DMA_ALIGNED | BUS_DMA_WAITOK, 2319 &rxr->rx_mbuf_tag); 2320 if (rc != 0) { 2321 device_printf(rxr->sc->bce_dev, 2322 "Could not allocate RX mbuf DMA tag!\n"); 2323 return rc; 2324 } 2325 2326 /* Create tmp DMA map for RX mbuf clusters. */ 2327 rc = bus_dmamap_create(rxr->rx_mbuf_tag, BUS_DMA_WAITOK, 2328 &rxr->rx_mbuf_tmpmap); 2329 if (rc != 0) { 2330 bus_dma_tag_destroy(rxr->rx_mbuf_tag); 2331 rxr->rx_mbuf_tag = NULL; 2332 2333 device_printf(rxr->sc->bce_dev, 2334 "Could not create RX mbuf tmp DMA map!\n"); 2335 return rc; 2336 } 2337 2338 /* Create DMA maps for the RX mbuf clusters. */ 2339 for (i = 0; i < TOTAL_RX_BD(rxr); i++) { 2340 rc = bus_dmamap_create(rxr->rx_mbuf_tag, BUS_DMA_WAITOK, 2341 &rxr->rx_bufs[i].rx_mbuf_map); 2342 if (rc != 0) { 2343 int j; 2344 2345 for (j = 0; j < i; ++j) { 2346 bus_dmamap_destroy(rxr->rx_mbuf_tag, 2347 rxr->rx_bufs[j].rx_mbuf_map); 2348 } 2349 bus_dma_tag_destroy(rxr->rx_mbuf_tag); 2350 rxr->rx_mbuf_tag = NULL; 2351 2352 device_printf(rxr->sc->bce_dev, "Unable to create " 2353 "%dth RX mbuf DMA map!\n", i); 2354 return rc; 2355 } 2356 } 2357 return 0; 2358 } 2359 2360 /****************************************************************************/ 2361 /* Allocate any DMA memory needed by the driver. */ 2362 /* */ 2363 /* Allocates DMA memory needed for the various global structures needed by */ 2364 /* hardware. */ 2365 /* */ 2366 /* Memory alignment requirements: */ 2367 /* -----------------+----------+----------+----------+----------+ */ 2368 /* Data Structure | 5706 | 5708 | 5709 | 5716 | */ 2369 /* -----------------+----------+----------+----------+----------+ */ 2370 /* Status Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */ 2371 /* Statistics Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */ 2372 /* RX Buffers | 16 bytes | 16 bytes | 16 bytes | 16 bytes | */ 2373 /* PG Buffers | none | none | none | none | */ 2374 /* TX Buffers | none | none | none | none | */ 2375 /* Chain Pages(1) | 4KiB | 4KiB | 4KiB | 4KiB | */ 2376 /* Context Pages(1) | N/A | N/A | 4KiB | 4KiB | */ 2377 /* -----------------+----------+----------+----------+----------+ */ 2378 /* */ 2379 /* (1) Must align with CPU page size (BCM_PAGE_SZIE). */ 2380 /* */ 2381 /* Returns: */ 2382 /* 0 for success, positive value for failure. */ 2383 /****************************************************************************/ 2384 static int 2385 bce_dma_alloc(struct bce_softc *sc) 2386 { 2387 struct ifnet *ifp = &sc->arpcom.ac_if; 2388 int i, rc = 0; 2389 bus_addr_t busaddr, max_busaddr; 2390 bus_size_t status_align, stats_align, status_size; 2391 2392 /* 2393 * The embedded PCIe to PCI-X bridge (EPB) 2394 * in the 5708 cannot address memory above 2395 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043). 2396 */ 2397 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708) 2398 max_busaddr = BCE_BUS_SPACE_MAXADDR; 2399 else 2400 max_busaddr = BUS_SPACE_MAXADDR; 2401 2402 /* 2403 * BCM5709 and BCM5716 uses host memory as cache for context memory. 2404 */ 2405 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 2406 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 2407 sc->ctx_pages = BCE_CTX_BLK_SZ / BCM_PAGE_SIZE; 2408 if (sc->ctx_pages == 0) 2409 sc->ctx_pages = 1; 2410 if (sc->ctx_pages > BCE_CTX_PAGES) { 2411 device_printf(sc->bce_dev, "excessive ctx pages %d\n", 2412 sc->ctx_pages); 2413 return ENOMEM; 2414 } 2415 status_align = 16; 2416 stats_align = 16; 2417 } else { 2418 status_align = 8; 2419 stats_align = 8; 2420 } 2421 2422 /* 2423 * Each MSI-X vector needs a status block; each status block 2424 * consumes 128bytes and is 128bytes aligned. 2425 */ 2426 if (sc->rx_ring_cnt > 1) { 2427 status_size = BCE_MSIX_MAX * BCE_STATUS_BLK_MSIX_ALIGN; 2428 status_align = BCE_STATUS_BLK_MSIX_ALIGN; 2429 } else { 2430 status_size = BCE_STATUS_BLK_SZ; 2431 } 2432 2433 /* 2434 * Allocate the parent bus DMA tag appropriate for PCI. 2435 */ 2436 rc = bus_dma_tag_create(NULL, 1, BCE_DMA_BOUNDARY, 2437 max_busaddr, BUS_SPACE_MAXADDR, 2438 NULL, NULL, 2439 BUS_SPACE_MAXSIZE_32BIT, 0, 2440 BUS_SPACE_MAXSIZE_32BIT, 2441 0, &sc->parent_tag); 2442 if (rc != 0) { 2443 if_printf(ifp, "Could not allocate parent DMA tag!\n"); 2444 return rc; 2445 } 2446 2447 /* 2448 * Allocate status block. 2449 */ 2450 sc->status_block = bus_dmamem_coherent_any(sc->parent_tag, 2451 status_align, status_size, 2452 BUS_DMA_WAITOK | BUS_DMA_ZERO, 2453 &sc->status_tag, &sc->status_map, 2454 &sc->status_block_paddr); 2455 if (sc->status_block == NULL) { 2456 if_printf(ifp, "Could not allocate status block!\n"); 2457 return ENOMEM; 2458 } 2459 2460 /* 2461 * Allocate statistics block. 2462 */ 2463 sc->stats_block = bus_dmamem_coherent_any(sc->parent_tag, 2464 stats_align, BCE_STATS_BLK_SZ, 2465 BUS_DMA_WAITOK | BUS_DMA_ZERO, 2466 &sc->stats_tag, &sc->stats_map, 2467 &sc->stats_block_paddr); 2468 if (sc->stats_block == NULL) { 2469 if_printf(ifp, "Could not allocate statistics block!\n"); 2470 return ENOMEM; 2471 } 2472 2473 /* 2474 * Allocate context block, if needed 2475 */ 2476 if (sc->ctx_pages != 0) { 2477 rc = bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 0, 2478 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 2479 NULL, NULL, 2480 BCM_PAGE_SIZE, 1, BCM_PAGE_SIZE, 2481 0, &sc->ctx_tag); 2482 if (rc != 0) { 2483 if_printf(ifp, "Could not allocate " 2484 "context block DMA tag!\n"); 2485 return rc; 2486 } 2487 2488 for (i = 0; i < sc->ctx_pages; i++) { 2489 rc = bus_dmamem_alloc(sc->ctx_tag, 2490 (void **)&sc->ctx_block[i], 2491 BUS_DMA_WAITOK | BUS_DMA_ZERO | 2492 BUS_DMA_COHERENT, 2493 &sc->ctx_map[i]); 2494 if (rc != 0) { 2495 if_printf(ifp, "Could not allocate %dth context " 2496 "DMA memory!\n", i); 2497 return rc; 2498 } 2499 2500 rc = bus_dmamap_load(sc->ctx_tag, sc->ctx_map[i], 2501 sc->ctx_block[i], BCM_PAGE_SIZE, 2502 bce_dma_map_addr, &busaddr, 2503 BUS_DMA_WAITOK); 2504 if (rc != 0) { 2505 if (rc == EINPROGRESS) { 2506 panic("%s coherent memory loading " 2507 "is still in progress!", ifp->if_xname); 2508 } 2509 if_printf(ifp, "Could not map %dth context " 2510 "DMA memory!\n", i); 2511 bus_dmamem_free(sc->ctx_tag, sc->ctx_block[i], 2512 sc->ctx_map[i]); 2513 sc->ctx_block[i] = NULL; 2514 return rc; 2515 } 2516 sc->ctx_paddr[i] = busaddr; 2517 } 2518 } 2519 2520 sc->tx_rings = kmalloc_cachealign( 2521 sizeof(struct bce_tx_ring) * sc->tx_ring_cnt, M_DEVBUF, 2522 M_WAITOK | M_ZERO); 2523 for (i = 0; i < sc->tx_ring_cnt; ++i) { 2524 sc->tx_rings[i].sc = sc; 2525 if (i == 0) { 2526 sc->tx_rings[i].tx_cid = TX_CID; 2527 sc->tx_rings[i].tx_hw_cons = 2528 &sc->status_block->status_tx_quick_consumer_index0; 2529 } else { 2530 struct status_block_msix *sblk = 2531 (struct status_block_msix *) 2532 (((uint8_t *)(sc->status_block)) + 2533 (i * BCE_STATUS_BLK_MSIX_ALIGN)); 2534 2535 sc->tx_rings[i].tx_cid = TX_TSS_CID + i - 1; 2536 sc->tx_rings[i].tx_hw_cons = 2537 &sblk->status_tx_quick_consumer_index; 2538 } 2539 2540 rc = bce_create_tx_ring(&sc->tx_rings[i]); 2541 if (rc != 0) { 2542 device_printf(sc->bce_dev, 2543 "can't create %dth tx ring\n", i); 2544 return rc; 2545 } 2546 } 2547 2548 sc->rx_rings = kmalloc_cachealign( 2549 sizeof(struct bce_rx_ring) * sc->rx_ring_cnt, M_DEVBUF, 2550 M_WAITOK | M_ZERO); 2551 for (i = 0; i < sc->rx_ring_cnt; ++i) { 2552 sc->rx_rings[i].sc = sc; 2553 sc->rx_rings[i].idx = i; 2554 if (i == 0) { 2555 sc->rx_rings[i].rx_cid = RX_CID; 2556 sc->rx_rings[i].rx_hw_cons = 2557 &sc->status_block->status_rx_quick_consumer_index0; 2558 sc->rx_rings[i].hw_status_idx = 2559 &sc->status_block->status_idx; 2560 } else { 2561 struct status_block_msix *sblk = 2562 (struct status_block_msix *) 2563 (((uint8_t *)(sc->status_block)) + 2564 (i * BCE_STATUS_BLK_MSIX_ALIGN)); 2565 2566 sc->rx_rings[i].rx_cid = RX_RSS_CID + i - 1; 2567 sc->rx_rings[i].rx_hw_cons = 2568 &sblk->status_rx_quick_consumer_index; 2569 sc->rx_rings[i].hw_status_idx = &sblk->status_idx; 2570 } 2571 2572 rc = bce_create_rx_ring(&sc->rx_rings[i]); 2573 if (rc != 0) { 2574 device_printf(sc->bce_dev, 2575 "can't create %dth rx ring\n", i); 2576 return rc; 2577 } 2578 } 2579 2580 return 0; 2581 } 2582 2583 /****************************************************************************/ 2584 /* Firmware synchronization. */ 2585 /* */ 2586 /* Before performing certain events such as a chip reset, synchronize with */ 2587 /* the firmware first. */ 2588 /* */ 2589 /* Returns: */ 2590 /* 0 for success, positive value for failure. */ 2591 /****************************************************************************/ 2592 static int 2593 bce_fw_sync(struct bce_softc *sc, uint32_t msg_data) 2594 { 2595 int i, rc = 0; 2596 uint32_t val; 2597 2598 /* Don't waste any time if we've timed out before. */ 2599 if (sc->bce_fw_timed_out) 2600 return EBUSY; 2601 2602 /* Increment the message sequence number. */ 2603 sc->bce_fw_wr_seq++; 2604 msg_data |= sc->bce_fw_wr_seq; 2605 2606 /* Send the message to the bootcode driver mailbox. */ 2607 bce_shmem_wr(sc, BCE_DRV_MB, msg_data); 2608 2609 /* Wait for the bootcode to acknowledge the message. */ 2610 for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) { 2611 /* Check for a response in the bootcode firmware mailbox. */ 2612 val = bce_shmem_rd(sc, BCE_FW_MB); 2613 if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ)) 2614 break; 2615 DELAY(1000); 2616 } 2617 2618 /* If we've timed out, tell the bootcode that we've stopped waiting. */ 2619 if ((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ) && 2620 (msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0) { 2621 if_printf(&sc->arpcom.ac_if, 2622 "Firmware synchronization timeout! " 2623 "msg_data = 0x%08X\n", msg_data); 2624 2625 msg_data &= ~BCE_DRV_MSG_CODE; 2626 msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT; 2627 2628 bce_shmem_wr(sc, BCE_DRV_MB, msg_data); 2629 2630 sc->bce_fw_timed_out = 1; 2631 rc = EBUSY; 2632 } 2633 return rc; 2634 } 2635 2636 /****************************************************************************/ 2637 /* Load Receive Virtual 2 Physical (RV2P) processor firmware. */ 2638 /* */ 2639 /* Returns: */ 2640 /* Nothing. */ 2641 /****************************************************************************/ 2642 static void 2643 bce_load_rv2p_fw(struct bce_softc *sc, uint32_t *rv2p_code, 2644 uint32_t rv2p_code_len, uint32_t rv2p_proc) 2645 { 2646 int i; 2647 uint32_t val; 2648 2649 for (i = 0; i < rv2p_code_len; i += 8) { 2650 REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code); 2651 rv2p_code++; 2652 REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code); 2653 rv2p_code++; 2654 2655 if (rv2p_proc == RV2P_PROC1) { 2656 val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR; 2657 REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val); 2658 } else { 2659 val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR; 2660 REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val); 2661 } 2662 } 2663 2664 /* Reset the processor, un-stall is done later. */ 2665 if (rv2p_proc == RV2P_PROC1) 2666 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET); 2667 else 2668 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET); 2669 } 2670 2671 /****************************************************************************/ 2672 /* Load RISC processor firmware. */ 2673 /* */ 2674 /* Loads firmware from the file if_bcefw.h into the scratchpad memory */ 2675 /* associated with a particular processor. */ 2676 /* */ 2677 /* Returns: */ 2678 /* Nothing. */ 2679 /****************************************************************************/ 2680 static void 2681 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg, 2682 struct fw_info *fw) 2683 { 2684 uint32_t offset; 2685 int j; 2686 2687 bce_halt_cpu(sc, cpu_reg); 2688 2689 /* Load the Text area. */ 2690 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base); 2691 if (fw->text) { 2692 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) 2693 REG_WR_IND(sc, offset, fw->text[j]); 2694 } 2695 2696 /* Load the Data area. */ 2697 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base); 2698 if (fw->data) { 2699 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) 2700 REG_WR_IND(sc, offset, fw->data[j]); 2701 } 2702 2703 /* Load the SBSS area. */ 2704 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base); 2705 if (fw->sbss) { 2706 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) 2707 REG_WR_IND(sc, offset, fw->sbss[j]); 2708 } 2709 2710 /* Load the BSS area. */ 2711 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base); 2712 if (fw->bss) { 2713 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) 2714 REG_WR_IND(sc, offset, fw->bss[j]); 2715 } 2716 2717 /* Load the Read-Only area. */ 2718 offset = cpu_reg->spad_base + 2719 (fw->rodata_addr - cpu_reg->mips_view_base); 2720 if (fw->rodata) { 2721 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) 2722 REG_WR_IND(sc, offset, fw->rodata[j]); 2723 } 2724 2725 /* Clear the pre-fetch instruction and set the FW start address. */ 2726 REG_WR_IND(sc, cpu_reg->inst, 0); 2727 REG_WR_IND(sc, cpu_reg->pc, fw->start_addr); 2728 } 2729 2730 /****************************************************************************/ 2731 /* Starts the RISC processor. */ 2732 /* */ 2733 /* Assumes the CPU starting address has already been set. */ 2734 /* */ 2735 /* Returns: */ 2736 /* Nothing. */ 2737 /****************************************************************************/ 2738 static void 2739 bce_start_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg) 2740 { 2741 uint32_t val; 2742 2743 /* Start the CPU. */ 2744 val = REG_RD_IND(sc, cpu_reg->mode); 2745 val &= ~cpu_reg->mode_value_halt; 2746 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 2747 REG_WR_IND(sc, cpu_reg->mode, val); 2748 } 2749 2750 /****************************************************************************/ 2751 /* Halts the RISC processor. */ 2752 /* */ 2753 /* Returns: */ 2754 /* Nothing. */ 2755 /****************************************************************************/ 2756 static void 2757 bce_halt_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg) 2758 { 2759 uint32_t val; 2760 2761 /* Halt the CPU. */ 2762 val = REG_RD_IND(sc, cpu_reg->mode); 2763 val |= cpu_reg->mode_value_halt; 2764 REG_WR_IND(sc, cpu_reg->mode, val); 2765 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 2766 } 2767 2768 /****************************************************************************/ 2769 /* Start the RX CPU. */ 2770 /* */ 2771 /* Returns: */ 2772 /* Nothing. */ 2773 /****************************************************************************/ 2774 static void 2775 bce_start_rxp_cpu(struct bce_softc *sc) 2776 { 2777 struct cpu_reg cpu_reg; 2778 2779 cpu_reg.mode = BCE_RXP_CPU_MODE; 2780 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT; 2781 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA; 2782 cpu_reg.state = BCE_RXP_CPU_STATE; 2783 cpu_reg.state_value_clear = 0xffffff; 2784 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE; 2785 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK; 2786 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER; 2787 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION; 2788 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT; 2789 cpu_reg.spad_base = BCE_RXP_SCRATCH; 2790 cpu_reg.mips_view_base = 0x8000000; 2791 2792 bce_start_cpu(sc, &cpu_reg); 2793 } 2794 2795 /****************************************************************************/ 2796 /* Initialize the RX CPU. */ 2797 /* */ 2798 /* Returns: */ 2799 /* Nothing. */ 2800 /****************************************************************************/ 2801 static void 2802 bce_init_rxp_cpu(struct bce_softc *sc) 2803 { 2804 struct cpu_reg cpu_reg; 2805 struct fw_info fw; 2806 2807 cpu_reg.mode = BCE_RXP_CPU_MODE; 2808 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT; 2809 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA; 2810 cpu_reg.state = BCE_RXP_CPU_STATE; 2811 cpu_reg.state_value_clear = 0xffffff; 2812 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE; 2813 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK; 2814 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER; 2815 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION; 2816 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT; 2817 cpu_reg.spad_base = BCE_RXP_SCRATCH; 2818 cpu_reg.mips_view_base = 0x8000000; 2819 2820 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 2821 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 2822 fw.ver_major = bce_RXP_b09FwReleaseMajor; 2823 fw.ver_minor = bce_RXP_b09FwReleaseMinor; 2824 fw.ver_fix = bce_RXP_b09FwReleaseFix; 2825 fw.start_addr = bce_RXP_b09FwStartAddr; 2826 2827 fw.text_addr = bce_RXP_b09FwTextAddr; 2828 fw.text_len = bce_RXP_b09FwTextLen; 2829 fw.text_index = 0; 2830 fw.text = bce_RXP_b09FwText; 2831 2832 fw.data_addr = bce_RXP_b09FwDataAddr; 2833 fw.data_len = bce_RXP_b09FwDataLen; 2834 fw.data_index = 0; 2835 fw.data = bce_RXP_b09FwData; 2836 2837 fw.sbss_addr = bce_RXP_b09FwSbssAddr; 2838 fw.sbss_len = bce_RXP_b09FwSbssLen; 2839 fw.sbss_index = 0; 2840 fw.sbss = bce_RXP_b09FwSbss; 2841 2842 fw.bss_addr = bce_RXP_b09FwBssAddr; 2843 fw.bss_len = bce_RXP_b09FwBssLen; 2844 fw.bss_index = 0; 2845 fw.bss = bce_RXP_b09FwBss; 2846 2847 fw.rodata_addr = bce_RXP_b09FwRodataAddr; 2848 fw.rodata_len = bce_RXP_b09FwRodataLen; 2849 fw.rodata_index = 0; 2850 fw.rodata = bce_RXP_b09FwRodata; 2851 } else { 2852 fw.ver_major = bce_RXP_b06FwReleaseMajor; 2853 fw.ver_minor = bce_RXP_b06FwReleaseMinor; 2854 fw.ver_fix = bce_RXP_b06FwReleaseFix; 2855 fw.start_addr = bce_RXP_b06FwStartAddr; 2856 2857 fw.text_addr = bce_RXP_b06FwTextAddr; 2858 fw.text_len = bce_RXP_b06FwTextLen; 2859 fw.text_index = 0; 2860 fw.text = bce_RXP_b06FwText; 2861 2862 fw.data_addr = bce_RXP_b06FwDataAddr; 2863 fw.data_len = bce_RXP_b06FwDataLen; 2864 fw.data_index = 0; 2865 fw.data = bce_RXP_b06FwData; 2866 2867 fw.sbss_addr = bce_RXP_b06FwSbssAddr; 2868 fw.sbss_len = bce_RXP_b06FwSbssLen; 2869 fw.sbss_index = 0; 2870 fw.sbss = bce_RXP_b06FwSbss; 2871 2872 fw.bss_addr = bce_RXP_b06FwBssAddr; 2873 fw.bss_len = bce_RXP_b06FwBssLen; 2874 fw.bss_index = 0; 2875 fw.bss = bce_RXP_b06FwBss; 2876 2877 fw.rodata_addr = bce_RXP_b06FwRodataAddr; 2878 fw.rodata_len = bce_RXP_b06FwRodataLen; 2879 fw.rodata_index = 0; 2880 fw.rodata = bce_RXP_b06FwRodata; 2881 } 2882 2883 bce_load_cpu_fw(sc, &cpu_reg, &fw); 2884 /* Delay RXP start until initialization is complete. */ 2885 } 2886 2887 /****************************************************************************/ 2888 /* Initialize the TX CPU. */ 2889 /* */ 2890 /* Returns: */ 2891 /* Nothing. */ 2892 /****************************************************************************/ 2893 static void 2894 bce_init_txp_cpu(struct bce_softc *sc) 2895 { 2896 struct cpu_reg cpu_reg; 2897 struct fw_info fw; 2898 2899 cpu_reg.mode = BCE_TXP_CPU_MODE; 2900 cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT; 2901 cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA; 2902 cpu_reg.state = BCE_TXP_CPU_STATE; 2903 cpu_reg.state_value_clear = 0xffffff; 2904 cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE; 2905 cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK; 2906 cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER; 2907 cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION; 2908 cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT; 2909 cpu_reg.spad_base = BCE_TXP_SCRATCH; 2910 cpu_reg.mips_view_base = 0x8000000; 2911 2912 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 2913 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 2914 fw.ver_major = bce_TXP_b09FwReleaseMajor; 2915 fw.ver_minor = bce_TXP_b09FwReleaseMinor; 2916 fw.ver_fix = bce_TXP_b09FwReleaseFix; 2917 fw.start_addr = bce_TXP_b09FwStartAddr; 2918 2919 fw.text_addr = bce_TXP_b09FwTextAddr; 2920 fw.text_len = bce_TXP_b09FwTextLen; 2921 fw.text_index = 0; 2922 fw.text = bce_TXP_b09FwText; 2923 2924 fw.data_addr = bce_TXP_b09FwDataAddr; 2925 fw.data_len = bce_TXP_b09FwDataLen; 2926 fw.data_index = 0; 2927 fw.data = bce_TXP_b09FwData; 2928 2929 fw.sbss_addr = bce_TXP_b09FwSbssAddr; 2930 fw.sbss_len = bce_TXP_b09FwSbssLen; 2931 fw.sbss_index = 0; 2932 fw.sbss = bce_TXP_b09FwSbss; 2933 2934 fw.bss_addr = bce_TXP_b09FwBssAddr; 2935 fw.bss_len = bce_TXP_b09FwBssLen; 2936 fw.bss_index = 0; 2937 fw.bss = bce_TXP_b09FwBss; 2938 2939 fw.rodata_addr = bce_TXP_b09FwRodataAddr; 2940 fw.rodata_len = bce_TXP_b09FwRodataLen; 2941 fw.rodata_index = 0; 2942 fw.rodata = bce_TXP_b09FwRodata; 2943 } else { 2944 fw.ver_major = bce_TXP_b06FwReleaseMajor; 2945 fw.ver_minor = bce_TXP_b06FwReleaseMinor; 2946 fw.ver_fix = bce_TXP_b06FwReleaseFix; 2947 fw.start_addr = bce_TXP_b06FwStartAddr; 2948 2949 fw.text_addr = bce_TXP_b06FwTextAddr; 2950 fw.text_len = bce_TXP_b06FwTextLen; 2951 fw.text_index = 0; 2952 fw.text = bce_TXP_b06FwText; 2953 2954 fw.data_addr = bce_TXP_b06FwDataAddr; 2955 fw.data_len = bce_TXP_b06FwDataLen; 2956 fw.data_index = 0; 2957 fw.data = bce_TXP_b06FwData; 2958 2959 fw.sbss_addr = bce_TXP_b06FwSbssAddr; 2960 fw.sbss_len = bce_TXP_b06FwSbssLen; 2961 fw.sbss_index = 0; 2962 fw.sbss = bce_TXP_b06FwSbss; 2963 2964 fw.bss_addr = bce_TXP_b06FwBssAddr; 2965 fw.bss_len = bce_TXP_b06FwBssLen; 2966 fw.bss_index = 0; 2967 fw.bss = bce_TXP_b06FwBss; 2968 2969 fw.rodata_addr = bce_TXP_b06FwRodataAddr; 2970 fw.rodata_len = bce_TXP_b06FwRodataLen; 2971 fw.rodata_index = 0; 2972 fw.rodata = bce_TXP_b06FwRodata; 2973 } 2974 2975 bce_load_cpu_fw(sc, &cpu_reg, &fw); 2976 bce_start_cpu(sc, &cpu_reg); 2977 } 2978 2979 /****************************************************************************/ 2980 /* Initialize the TPAT CPU. */ 2981 /* */ 2982 /* Returns: */ 2983 /* Nothing. */ 2984 /****************************************************************************/ 2985 static void 2986 bce_init_tpat_cpu(struct bce_softc *sc) 2987 { 2988 struct cpu_reg cpu_reg; 2989 struct fw_info fw; 2990 2991 cpu_reg.mode = BCE_TPAT_CPU_MODE; 2992 cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT; 2993 cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA; 2994 cpu_reg.state = BCE_TPAT_CPU_STATE; 2995 cpu_reg.state_value_clear = 0xffffff; 2996 cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE; 2997 cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK; 2998 cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER; 2999 cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION; 3000 cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT; 3001 cpu_reg.spad_base = BCE_TPAT_SCRATCH; 3002 cpu_reg.mips_view_base = 0x8000000; 3003 3004 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3005 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3006 fw.ver_major = bce_TPAT_b09FwReleaseMajor; 3007 fw.ver_minor = bce_TPAT_b09FwReleaseMinor; 3008 fw.ver_fix = bce_TPAT_b09FwReleaseFix; 3009 fw.start_addr = bce_TPAT_b09FwStartAddr; 3010 3011 fw.text_addr = bce_TPAT_b09FwTextAddr; 3012 fw.text_len = bce_TPAT_b09FwTextLen; 3013 fw.text_index = 0; 3014 fw.text = bce_TPAT_b09FwText; 3015 3016 fw.data_addr = bce_TPAT_b09FwDataAddr; 3017 fw.data_len = bce_TPAT_b09FwDataLen; 3018 fw.data_index = 0; 3019 fw.data = bce_TPAT_b09FwData; 3020 3021 fw.sbss_addr = bce_TPAT_b09FwSbssAddr; 3022 fw.sbss_len = bce_TPAT_b09FwSbssLen; 3023 fw.sbss_index = 0; 3024 fw.sbss = bce_TPAT_b09FwSbss; 3025 3026 fw.bss_addr = bce_TPAT_b09FwBssAddr; 3027 fw.bss_len = bce_TPAT_b09FwBssLen; 3028 fw.bss_index = 0; 3029 fw.bss = bce_TPAT_b09FwBss; 3030 3031 fw.rodata_addr = bce_TPAT_b09FwRodataAddr; 3032 fw.rodata_len = bce_TPAT_b09FwRodataLen; 3033 fw.rodata_index = 0; 3034 fw.rodata = bce_TPAT_b09FwRodata; 3035 } else { 3036 fw.ver_major = bce_TPAT_b06FwReleaseMajor; 3037 fw.ver_minor = bce_TPAT_b06FwReleaseMinor; 3038 fw.ver_fix = bce_TPAT_b06FwReleaseFix; 3039 fw.start_addr = bce_TPAT_b06FwStartAddr; 3040 3041 fw.text_addr = bce_TPAT_b06FwTextAddr; 3042 fw.text_len = bce_TPAT_b06FwTextLen; 3043 fw.text_index = 0; 3044 fw.text = bce_TPAT_b06FwText; 3045 3046 fw.data_addr = bce_TPAT_b06FwDataAddr; 3047 fw.data_len = bce_TPAT_b06FwDataLen; 3048 fw.data_index = 0; 3049 fw.data = bce_TPAT_b06FwData; 3050 3051 fw.sbss_addr = bce_TPAT_b06FwSbssAddr; 3052 fw.sbss_len = bce_TPAT_b06FwSbssLen; 3053 fw.sbss_index = 0; 3054 fw.sbss = bce_TPAT_b06FwSbss; 3055 3056 fw.bss_addr = bce_TPAT_b06FwBssAddr; 3057 fw.bss_len = bce_TPAT_b06FwBssLen; 3058 fw.bss_index = 0; 3059 fw.bss = bce_TPAT_b06FwBss; 3060 3061 fw.rodata_addr = bce_TPAT_b06FwRodataAddr; 3062 fw.rodata_len = bce_TPAT_b06FwRodataLen; 3063 fw.rodata_index = 0; 3064 fw.rodata = bce_TPAT_b06FwRodata; 3065 } 3066 3067 bce_load_cpu_fw(sc, &cpu_reg, &fw); 3068 bce_start_cpu(sc, &cpu_reg); 3069 } 3070 3071 /****************************************************************************/ 3072 /* Initialize the CP CPU. */ 3073 /* */ 3074 /* Returns: */ 3075 /* Nothing. */ 3076 /****************************************************************************/ 3077 static void 3078 bce_init_cp_cpu(struct bce_softc *sc) 3079 { 3080 struct cpu_reg cpu_reg; 3081 struct fw_info fw; 3082 3083 cpu_reg.mode = BCE_CP_CPU_MODE; 3084 cpu_reg.mode_value_halt = BCE_CP_CPU_MODE_SOFT_HALT; 3085 cpu_reg.mode_value_sstep = BCE_CP_CPU_MODE_STEP_ENA; 3086 cpu_reg.state = BCE_CP_CPU_STATE; 3087 cpu_reg.state_value_clear = 0xffffff; 3088 cpu_reg.gpr0 = BCE_CP_CPU_REG_FILE; 3089 cpu_reg.evmask = BCE_CP_CPU_EVENT_MASK; 3090 cpu_reg.pc = BCE_CP_CPU_PROGRAM_COUNTER; 3091 cpu_reg.inst = BCE_CP_CPU_INSTRUCTION; 3092 cpu_reg.bp = BCE_CP_CPU_HW_BREAKPOINT; 3093 cpu_reg.spad_base = BCE_CP_SCRATCH; 3094 cpu_reg.mips_view_base = 0x8000000; 3095 3096 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3097 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3098 fw.ver_major = bce_CP_b09FwReleaseMajor; 3099 fw.ver_minor = bce_CP_b09FwReleaseMinor; 3100 fw.ver_fix = bce_CP_b09FwReleaseFix; 3101 fw.start_addr = bce_CP_b09FwStartAddr; 3102 3103 fw.text_addr = bce_CP_b09FwTextAddr; 3104 fw.text_len = bce_CP_b09FwTextLen; 3105 fw.text_index = 0; 3106 fw.text = bce_CP_b09FwText; 3107 3108 fw.data_addr = bce_CP_b09FwDataAddr; 3109 fw.data_len = bce_CP_b09FwDataLen; 3110 fw.data_index = 0; 3111 fw.data = bce_CP_b09FwData; 3112 3113 fw.sbss_addr = bce_CP_b09FwSbssAddr; 3114 fw.sbss_len = bce_CP_b09FwSbssLen; 3115 fw.sbss_index = 0; 3116 fw.sbss = bce_CP_b09FwSbss; 3117 3118 fw.bss_addr = bce_CP_b09FwBssAddr; 3119 fw.bss_len = bce_CP_b09FwBssLen; 3120 fw.bss_index = 0; 3121 fw.bss = bce_CP_b09FwBss; 3122 3123 fw.rodata_addr = bce_CP_b09FwRodataAddr; 3124 fw.rodata_len = bce_CP_b09FwRodataLen; 3125 fw.rodata_index = 0; 3126 fw.rodata = bce_CP_b09FwRodata; 3127 } else { 3128 fw.ver_major = bce_CP_b06FwReleaseMajor; 3129 fw.ver_minor = bce_CP_b06FwReleaseMinor; 3130 fw.ver_fix = bce_CP_b06FwReleaseFix; 3131 fw.start_addr = bce_CP_b06FwStartAddr; 3132 3133 fw.text_addr = bce_CP_b06FwTextAddr; 3134 fw.text_len = bce_CP_b06FwTextLen; 3135 fw.text_index = 0; 3136 fw.text = bce_CP_b06FwText; 3137 3138 fw.data_addr = bce_CP_b06FwDataAddr; 3139 fw.data_len = bce_CP_b06FwDataLen; 3140 fw.data_index = 0; 3141 fw.data = bce_CP_b06FwData; 3142 3143 fw.sbss_addr = bce_CP_b06FwSbssAddr; 3144 fw.sbss_len = bce_CP_b06FwSbssLen; 3145 fw.sbss_index = 0; 3146 fw.sbss = bce_CP_b06FwSbss; 3147 3148 fw.bss_addr = bce_CP_b06FwBssAddr; 3149 fw.bss_len = bce_CP_b06FwBssLen; 3150 fw.bss_index = 0; 3151 fw.bss = bce_CP_b06FwBss; 3152 3153 fw.rodata_addr = bce_CP_b06FwRodataAddr; 3154 fw.rodata_len = bce_CP_b06FwRodataLen; 3155 fw.rodata_index = 0; 3156 fw.rodata = bce_CP_b06FwRodata; 3157 } 3158 3159 bce_load_cpu_fw(sc, &cpu_reg, &fw); 3160 bce_start_cpu(sc, &cpu_reg); 3161 } 3162 3163 /****************************************************************************/ 3164 /* Initialize the COM CPU. */ 3165 /* */ 3166 /* Returns: */ 3167 /* Nothing. */ 3168 /****************************************************************************/ 3169 static void 3170 bce_init_com_cpu(struct bce_softc *sc) 3171 { 3172 struct cpu_reg cpu_reg; 3173 struct fw_info fw; 3174 3175 cpu_reg.mode = BCE_COM_CPU_MODE; 3176 cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT; 3177 cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA; 3178 cpu_reg.state = BCE_COM_CPU_STATE; 3179 cpu_reg.state_value_clear = 0xffffff; 3180 cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE; 3181 cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK; 3182 cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER; 3183 cpu_reg.inst = BCE_COM_CPU_INSTRUCTION; 3184 cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT; 3185 cpu_reg.spad_base = BCE_COM_SCRATCH; 3186 cpu_reg.mips_view_base = 0x8000000; 3187 3188 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3189 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3190 fw.ver_major = bce_COM_b09FwReleaseMajor; 3191 fw.ver_minor = bce_COM_b09FwReleaseMinor; 3192 fw.ver_fix = bce_COM_b09FwReleaseFix; 3193 fw.start_addr = bce_COM_b09FwStartAddr; 3194 3195 fw.text_addr = bce_COM_b09FwTextAddr; 3196 fw.text_len = bce_COM_b09FwTextLen; 3197 fw.text_index = 0; 3198 fw.text = bce_COM_b09FwText; 3199 3200 fw.data_addr = bce_COM_b09FwDataAddr; 3201 fw.data_len = bce_COM_b09FwDataLen; 3202 fw.data_index = 0; 3203 fw.data = bce_COM_b09FwData; 3204 3205 fw.sbss_addr = bce_COM_b09FwSbssAddr; 3206 fw.sbss_len = bce_COM_b09FwSbssLen; 3207 fw.sbss_index = 0; 3208 fw.sbss = bce_COM_b09FwSbss; 3209 3210 fw.bss_addr = bce_COM_b09FwBssAddr; 3211 fw.bss_len = bce_COM_b09FwBssLen; 3212 fw.bss_index = 0; 3213 fw.bss = bce_COM_b09FwBss; 3214 3215 fw.rodata_addr = bce_COM_b09FwRodataAddr; 3216 fw.rodata_len = bce_COM_b09FwRodataLen; 3217 fw.rodata_index = 0; 3218 fw.rodata = bce_COM_b09FwRodata; 3219 } else { 3220 fw.ver_major = bce_COM_b06FwReleaseMajor; 3221 fw.ver_minor = bce_COM_b06FwReleaseMinor; 3222 fw.ver_fix = bce_COM_b06FwReleaseFix; 3223 fw.start_addr = bce_COM_b06FwStartAddr; 3224 3225 fw.text_addr = bce_COM_b06FwTextAddr; 3226 fw.text_len = bce_COM_b06FwTextLen; 3227 fw.text_index = 0; 3228 fw.text = bce_COM_b06FwText; 3229 3230 fw.data_addr = bce_COM_b06FwDataAddr; 3231 fw.data_len = bce_COM_b06FwDataLen; 3232 fw.data_index = 0; 3233 fw.data = bce_COM_b06FwData; 3234 3235 fw.sbss_addr = bce_COM_b06FwSbssAddr; 3236 fw.sbss_len = bce_COM_b06FwSbssLen; 3237 fw.sbss_index = 0; 3238 fw.sbss = bce_COM_b06FwSbss; 3239 3240 fw.bss_addr = bce_COM_b06FwBssAddr; 3241 fw.bss_len = bce_COM_b06FwBssLen; 3242 fw.bss_index = 0; 3243 fw.bss = bce_COM_b06FwBss; 3244 3245 fw.rodata_addr = bce_COM_b06FwRodataAddr; 3246 fw.rodata_len = bce_COM_b06FwRodataLen; 3247 fw.rodata_index = 0; 3248 fw.rodata = bce_COM_b06FwRodata; 3249 } 3250 3251 bce_load_cpu_fw(sc, &cpu_reg, &fw); 3252 bce_start_cpu(sc, &cpu_reg); 3253 } 3254 3255 /****************************************************************************/ 3256 /* Initialize the RV2P, RX, TX, TPAT, COM, and CP CPUs. */ 3257 /* */ 3258 /* Loads the firmware for each CPU and starts the CPU. */ 3259 /* */ 3260 /* Returns: */ 3261 /* Nothing. */ 3262 /****************************************************************************/ 3263 static void 3264 bce_init_cpus(struct bce_softc *sc) 3265 { 3266 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3267 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3268 if (BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax) { 3269 bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc1, 3270 sizeof(bce_xi90_rv2p_proc1), RV2P_PROC1); 3271 bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc2, 3272 sizeof(bce_xi90_rv2p_proc2), RV2P_PROC2); 3273 } else { 3274 bce_load_rv2p_fw(sc, bce_xi_rv2p_proc1, 3275 sizeof(bce_xi_rv2p_proc1), RV2P_PROC1); 3276 bce_load_rv2p_fw(sc, bce_xi_rv2p_proc2, 3277 sizeof(bce_xi_rv2p_proc2), RV2P_PROC2); 3278 } 3279 } else { 3280 bce_load_rv2p_fw(sc, bce_rv2p_proc1, 3281 sizeof(bce_rv2p_proc1), RV2P_PROC1); 3282 bce_load_rv2p_fw(sc, bce_rv2p_proc2, 3283 sizeof(bce_rv2p_proc2), RV2P_PROC2); 3284 } 3285 3286 bce_init_rxp_cpu(sc); 3287 bce_init_txp_cpu(sc); 3288 bce_init_tpat_cpu(sc); 3289 bce_init_com_cpu(sc); 3290 bce_init_cp_cpu(sc); 3291 } 3292 3293 /****************************************************************************/ 3294 /* Initialize context memory. */ 3295 /* */ 3296 /* Clears the memory associated with each Context ID (CID). */ 3297 /* */ 3298 /* Returns: */ 3299 /* Nothing. */ 3300 /****************************************************************************/ 3301 static int 3302 bce_init_ctx(struct bce_softc *sc) 3303 { 3304 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3305 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3306 /* DRC: Replace this constant value with a #define. */ 3307 int i, retry_cnt = 10; 3308 uint32_t val; 3309 3310 /* 3311 * BCM5709 context memory may be cached 3312 * in host memory so prepare the host memory 3313 * for access. 3314 */ 3315 val = BCE_CTX_COMMAND_ENABLED | BCE_CTX_COMMAND_MEM_INIT | 3316 (1 << 12); 3317 val |= (BCM_PAGE_BITS - 8) << 16; 3318 REG_WR(sc, BCE_CTX_COMMAND, val); 3319 3320 /* Wait for mem init command to complete. */ 3321 for (i = 0; i < retry_cnt; i++) { 3322 val = REG_RD(sc, BCE_CTX_COMMAND); 3323 if (!(val & BCE_CTX_COMMAND_MEM_INIT)) 3324 break; 3325 DELAY(2); 3326 } 3327 if (i == retry_cnt) { 3328 device_printf(sc->bce_dev, 3329 "Context memory initialization failed!\n"); 3330 return ETIMEDOUT; 3331 } 3332 3333 for (i = 0; i < sc->ctx_pages; i++) { 3334 int j; 3335 3336 /* 3337 * Set the physical address of the context 3338 * memory cache. 3339 */ 3340 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA0, 3341 BCE_ADDR_LO(sc->ctx_paddr[i] & 0xfffffff0) | 3342 BCE_CTX_HOST_PAGE_TBL_DATA0_VALID); 3343 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA1, 3344 BCE_ADDR_HI(sc->ctx_paddr[i])); 3345 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_CTRL, 3346 i | BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); 3347 3348 /* 3349 * Verify that the context memory write was successful. 3350 */ 3351 for (j = 0; j < retry_cnt; j++) { 3352 val = REG_RD(sc, BCE_CTX_HOST_PAGE_TBL_CTRL); 3353 if ((val & 3354 BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0) 3355 break; 3356 DELAY(5); 3357 } 3358 if (j == retry_cnt) { 3359 device_printf(sc->bce_dev, 3360 "Failed to initialize context page!\n"); 3361 return ETIMEDOUT; 3362 } 3363 } 3364 } else { 3365 uint32_t vcid_addr, offset; 3366 3367 /* 3368 * For the 5706/5708, context memory is local to 3369 * the controller, so initialize the controller 3370 * context memory. 3371 */ 3372 3373 vcid_addr = GET_CID_ADDR(96); 3374 while (vcid_addr) { 3375 vcid_addr -= PHY_CTX_SIZE; 3376 3377 REG_WR(sc, BCE_CTX_VIRT_ADDR, 0); 3378 REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr); 3379 3380 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) 3381 CTX_WR(sc, 0x00, offset, 0); 3382 3383 REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr); 3384 REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr); 3385 } 3386 } 3387 return 0; 3388 } 3389 3390 /****************************************************************************/ 3391 /* Fetch the permanent MAC address of the controller. */ 3392 /* */ 3393 /* Returns: */ 3394 /* Nothing. */ 3395 /****************************************************************************/ 3396 static void 3397 bce_get_mac_addr(struct bce_softc *sc) 3398 { 3399 uint32_t mac_lo = 0, mac_hi = 0; 3400 3401 /* 3402 * The NetXtreme II bootcode populates various NIC 3403 * power-on and runtime configuration items in a 3404 * shared memory area. The factory configured MAC 3405 * address is available from both NVRAM and the 3406 * shared memory area so we'll read the value from 3407 * shared memory for speed. 3408 */ 3409 3410 mac_hi = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_UPPER); 3411 mac_lo = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_LOWER); 3412 3413 if (mac_lo == 0 && mac_hi == 0) { 3414 if_printf(&sc->arpcom.ac_if, "Invalid Ethernet address!\n"); 3415 } else { 3416 sc->eaddr[0] = (u_char)(mac_hi >> 8); 3417 sc->eaddr[1] = (u_char)(mac_hi >> 0); 3418 sc->eaddr[2] = (u_char)(mac_lo >> 24); 3419 sc->eaddr[3] = (u_char)(mac_lo >> 16); 3420 sc->eaddr[4] = (u_char)(mac_lo >> 8); 3421 sc->eaddr[5] = (u_char)(mac_lo >> 0); 3422 } 3423 } 3424 3425 /****************************************************************************/ 3426 /* Program the MAC address. */ 3427 /* */ 3428 /* Returns: */ 3429 /* Nothing. */ 3430 /****************************************************************************/ 3431 static void 3432 bce_set_mac_addr(struct bce_softc *sc) 3433 { 3434 const uint8_t *mac_addr = sc->eaddr; 3435 uint32_t val; 3436 3437 val = (mac_addr[0] << 8) | mac_addr[1]; 3438 REG_WR(sc, BCE_EMAC_MAC_MATCH0, val); 3439 3440 val = (mac_addr[2] << 24) | 3441 (mac_addr[3] << 16) | 3442 (mac_addr[4] << 8) | 3443 mac_addr[5]; 3444 REG_WR(sc, BCE_EMAC_MAC_MATCH1, val); 3445 } 3446 3447 /****************************************************************************/ 3448 /* Stop the controller. */ 3449 /* */ 3450 /* Returns: */ 3451 /* Nothing. */ 3452 /****************************************************************************/ 3453 static void 3454 bce_stop(struct bce_softc *sc) 3455 { 3456 struct ifnet *ifp = &sc->arpcom.ac_if; 3457 int i; 3458 3459 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3460 3461 callout_stop(&sc->bce_tick_callout); 3462 3463 /* Disable the transmit/receive blocks. */ 3464 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, BCE_MISC_ENABLE_CLR_DEFAULT); 3465 REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS); 3466 DELAY(20); 3467 3468 bce_disable_intr(sc); 3469 3470 ifp->if_flags &= ~IFF_RUNNING; 3471 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3472 ifsq_clr_oactive(sc->tx_rings[i].ifsq); 3473 ifsq_watchdog_stop(&sc->tx_rings[i].tx_watchdog); 3474 } 3475 3476 /* Free the RX lists. */ 3477 for (i = 0; i < sc->rx_ring_cnt; ++i) 3478 bce_free_rx_chain(&sc->rx_rings[i]); 3479 3480 /* Free TX buffers. */ 3481 for (i = 0; i < sc->tx_ring_cnt; ++i) 3482 bce_free_tx_chain(&sc->tx_rings[i]); 3483 3484 sc->bce_link = 0; 3485 sc->bce_coalchg_mask = 0; 3486 } 3487 3488 static int 3489 bce_reset(struct bce_softc *sc, uint32_t reset_code) 3490 { 3491 uint32_t val; 3492 int i, rc = 0; 3493 3494 /* Wait for pending PCI transactions to complete. */ 3495 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, 3496 BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE | 3497 BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE | 3498 BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE | 3499 BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE); 3500 val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS); 3501 DELAY(5); 3502 3503 /* Disable DMA */ 3504 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3505 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3506 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL); 3507 val &= ~BCE_MISC_NEW_CORE_CTL_DMA_ENABLE; 3508 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val); 3509 } 3510 3511 /* Assume bootcode is running. */ 3512 sc->bce_fw_timed_out = 0; 3513 sc->bce_drv_cardiac_arrest = 0; 3514 3515 /* Give the firmware a chance to prepare for the reset. */ 3516 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code); 3517 if (rc) { 3518 if_printf(&sc->arpcom.ac_if, 3519 "Firmware is not ready for reset\n"); 3520 return rc; 3521 } 3522 3523 /* Set a firmware reminder that this is a soft reset. */ 3524 bce_shmem_wr(sc, BCE_DRV_RESET_SIGNATURE, 3525 BCE_DRV_RESET_SIGNATURE_MAGIC); 3526 3527 /* Dummy read to force the chip to complete all current transactions. */ 3528 val = REG_RD(sc, BCE_MISC_ID); 3529 3530 /* Chip reset. */ 3531 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3532 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3533 REG_WR(sc, BCE_MISC_COMMAND, BCE_MISC_COMMAND_SW_RESET); 3534 REG_RD(sc, BCE_MISC_COMMAND); 3535 DELAY(5); 3536 3537 val = BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 3538 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 3539 3540 pci_write_config(sc->bce_dev, BCE_PCICFG_MISC_CONFIG, val, 4); 3541 } else { 3542 val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3543 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 3544 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 3545 REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val); 3546 3547 /* Allow up to 30us for reset to complete. */ 3548 for (i = 0; i < 10; i++) { 3549 val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG); 3550 if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3551 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) 3552 break; 3553 DELAY(10); 3554 } 3555 3556 /* Check that reset completed successfully. */ 3557 if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3558 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) { 3559 if_printf(&sc->arpcom.ac_if, "Reset failed!\n"); 3560 return EBUSY; 3561 } 3562 } 3563 3564 /* Make sure byte swapping is properly configured. */ 3565 val = REG_RD(sc, BCE_PCI_SWAP_DIAG0); 3566 if (val != 0x01020304) { 3567 if_printf(&sc->arpcom.ac_if, "Byte swap is incorrect!\n"); 3568 return ENODEV; 3569 } 3570 3571 /* Just completed a reset, assume that firmware is running again. */ 3572 sc->bce_fw_timed_out = 0; 3573 sc->bce_drv_cardiac_arrest = 0; 3574 3575 /* Wait for the firmware to finish its initialization. */ 3576 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code); 3577 if (rc) { 3578 if_printf(&sc->arpcom.ac_if, 3579 "Firmware did not complete initialization!\n"); 3580 } 3581 3582 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX) { 3583 bce_setup_msix_table(sc); 3584 /* Prevent MSIX table reads and write from timing out */ 3585 REG_WR(sc, BCE_MISC_ECO_HW_CTL, 3586 BCE_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN); 3587 3588 } 3589 return rc; 3590 } 3591 3592 static int 3593 bce_chipinit(struct bce_softc *sc) 3594 { 3595 uint32_t val; 3596 int rc = 0; 3597 3598 /* Make sure the interrupt is not active. */ 3599 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT); 3600 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD); 3601 3602 /* 3603 * Initialize DMA byte/word swapping, configure the number of DMA 3604 * channels and PCI clock compensation delay. 3605 */ 3606 val = BCE_DMA_CONFIG_DATA_BYTE_SWAP | 3607 BCE_DMA_CONFIG_DATA_WORD_SWAP | 3608 #if BYTE_ORDER == BIG_ENDIAN 3609 BCE_DMA_CONFIG_CNTL_BYTE_SWAP | 3610 #endif 3611 BCE_DMA_CONFIG_CNTL_WORD_SWAP | 3612 DMA_READ_CHANS << 12 | 3613 DMA_WRITE_CHANS << 16; 3614 3615 val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY; 3616 3617 if ((sc->bce_flags & BCE_PCIX_FLAG) && sc->bus_speed_mhz == 133) 3618 val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP; 3619 3620 /* 3621 * This setting resolves a problem observed on certain Intel PCI 3622 * chipsets that cannot handle multiple outstanding DMA operations. 3623 * See errata E9_5706A1_65. 3624 */ 3625 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706 && 3626 BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0 && 3627 !(sc->bce_flags & BCE_PCIX_FLAG)) 3628 val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA; 3629 3630 REG_WR(sc, BCE_DMA_CONFIG, val); 3631 3632 /* Enable the RX_V2P and Context state machines before access. */ 3633 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 3634 BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE | 3635 BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE | 3636 BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE); 3637 3638 /* Initialize context mapping and zero out the quick contexts. */ 3639 rc = bce_init_ctx(sc); 3640 if (rc != 0) 3641 return rc; 3642 3643 /* Initialize the on-boards CPUs */ 3644 bce_init_cpus(sc); 3645 3646 /* Enable management frames (NC-SI) to flow to the MCP. */ 3647 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { 3648 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) | 3649 BCE_RPM_MGMT_PKT_CTRL_MGMT_EN; 3650 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val); 3651 } 3652 3653 /* Prepare NVRAM for access. */ 3654 rc = bce_init_nvram(sc); 3655 if (rc != 0) 3656 return rc; 3657 3658 /* Set the kernel bypass block size */ 3659 val = REG_RD(sc, BCE_MQ_CONFIG); 3660 val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE; 3661 val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; 3662 3663 /* Enable bins used on the 5709/5716. */ 3664 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3665 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3666 val |= BCE_MQ_CONFIG_BIN_MQ_MODE; 3667 if (BCE_CHIP_ID(sc) == BCE_CHIP_ID_5709_A1) 3668 val |= BCE_MQ_CONFIG_HALT_DIS; 3669 } 3670 3671 REG_WR(sc, BCE_MQ_CONFIG, val); 3672 3673 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE); 3674 REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val); 3675 REG_WR(sc, BCE_MQ_KNL_WIND_END, val); 3676 3677 /* Set the page size and clear the RV2P processor stall bits. */ 3678 val = (BCM_PAGE_BITS - 8) << 24; 3679 REG_WR(sc, BCE_RV2P_CONFIG, val); 3680 3681 /* Configure page size. */ 3682 val = REG_RD(sc, BCE_TBDR_CONFIG); 3683 val &= ~BCE_TBDR_CONFIG_PAGE_SIZE; 3684 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40; 3685 REG_WR(sc, BCE_TBDR_CONFIG, val); 3686 3687 /* Set the perfect match control register to default. */ 3688 REG_WR_IND(sc, BCE_RXP_PM_CTRL, 0); 3689 3690 return 0; 3691 } 3692 3693 /****************************************************************************/ 3694 /* Initialize the controller in preparation to send/receive traffic. */ 3695 /* */ 3696 /* Returns: */ 3697 /* 0 for success, positive value for failure. */ 3698 /****************************************************************************/ 3699 static int 3700 bce_blockinit(struct bce_softc *sc) 3701 { 3702 uint32_t reg, val; 3703 int i; 3704 3705 /* Load the hardware default MAC address. */ 3706 bce_set_mac_addr(sc); 3707 3708 /* Set the Ethernet backoff seed value */ 3709 val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) + 3710 sc->eaddr[3] + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16); 3711 REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val); 3712 3713 sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE; 3714 3715 /* Set up link change interrupt generation. */ 3716 REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK); 3717 3718 /* Program the physical address of the status block. */ 3719 REG_WR(sc, BCE_HC_STATUS_ADDR_L, BCE_ADDR_LO(sc->status_block_paddr)); 3720 REG_WR(sc, BCE_HC_STATUS_ADDR_H, BCE_ADDR_HI(sc->status_block_paddr)); 3721 3722 /* Program the physical address of the statistics block. */ 3723 REG_WR(sc, BCE_HC_STATISTICS_ADDR_L, 3724 BCE_ADDR_LO(sc->stats_block_paddr)); 3725 REG_WR(sc, BCE_HC_STATISTICS_ADDR_H, 3726 BCE_ADDR_HI(sc->stats_block_paddr)); 3727 3728 /* Program various host coalescing parameters. */ 3729 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP, 3730 (sc->bce_tx_quick_cons_trip_int << 16) | 3731 sc->bce_tx_quick_cons_trip); 3732 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP, 3733 (sc->bce_rx_quick_cons_trip_int << 16) | 3734 sc->bce_rx_quick_cons_trip); 3735 REG_WR(sc, BCE_HC_COMP_PROD_TRIP, 3736 (sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip); 3737 REG_WR(sc, BCE_HC_TX_TICKS, 3738 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks); 3739 REG_WR(sc, BCE_HC_RX_TICKS, 3740 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks); 3741 REG_WR(sc, BCE_HC_COM_TICKS, 3742 (sc->bce_com_ticks_int << 16) | sc->bce_com_ticks); 3743 REG_WR(sc, BCE_HC_CMD_TICKS, 3744 (sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks); 3745 REG_WR(sc, BCE_HC_STATS_TICKS, (sc->bce_stats_ticks & 0xffff00)); 3746 REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */ 3747 3748 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX) 3749 REG_WR(sc, BCE_HC_MSIX_BIT_VECTOR, BCE_HC_MSIX_BIT_VECTOR_VAL); 3750 3751 val = BCE_HC_CONFIG_TX_TMR_MODE | BCE_HC_CONFIG_COLLECT_STATS; 3752 if ((sc->bce_flags & BCE_ONESHOT_MSI_FLAG) || 3753 sc->bce_irq_type == PCI_INTR_TYPE_MSIX) { 3754 if (bootverbose) { 3755 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX) { 3756 if_printf(&sc->arpcom.ac_if, 3757 "using MSI-X\n"); 3758 } else { 3759 if_printf(&sc->arpcom.ac_if, 3760 "using oneshot MSI\n"); 3761 } 3762 } 3763 val |= BCE_HC_CONFIG_ONE_SHOT | BCE_HC_CONFIG_USE_INT_PARAM; 3764 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX) 3765 val |= BCE_HC_CONFIG_SB_ADDR_INC_128B; 3766 } 3767 REG_WR(sc, BCE_HC_CONFIG, val); 3768 3769 for (i = 1; i < sc->rx_ring_cnt; ++i) { 3770 uint32_t base; 3771 3772 base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) + BCE_HC_SB_CONFIG_1; 3773 KKASSERT(base <= BCE_HC_SB_CONFIG_8); 3774 3775 REG_WR(sc, base, 3776 BCE_HC_SB_CONFIG_1_TX_TMR_MODE | 3777 /* BCE_HC_SB_CONFIG_1_RX_TMR_MODE | */ 3778 BCE_HC_SB_CONFIG_1_ONE_SHOT); 3779 3780 REG_WR(sc, base + BCE_HC_TX_QUICK_CONS_TRIP_OFF, 3781 (sc->bce_tx_quick_cons_trip_int << 16) | 3782 sc->bce_tx_quick_cons_trip); 3783 REG_WR(sc, base + BCE_HC_RX_QUICK_CONS_TRIP_OFF, 3784 (sc->bce_rx_quick_cons_trip_int << 16) | 3785 sc->bce_rx_quick_cons_trip); 3786 REG_WR(sc, base + BCE_HC_TX_TICKS_OFF, 3787 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks); 3788 REG_WR(sc, base + BCE_HC_RX_TICKS_OFF, 3789 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks); 3790 } 3791 3792 /* Clear the internal statistics counters. */ 3793 REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW); 3794 3795 /* Verify that bootcode is running. */ 3796 reg = bce_shmem_rd(sc, BCE_DEV_INFO_SIGNATURE); 3797 3798 if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) != 3799 BCE_DEV_INFO_SIGNATURE_MAGIC) { 3800 if_printf(&sc->arpcom.ac_if, 3801 "Bootcode not running! Found: 0x%08X, " 3802 "Expected: 08%08X\n", 3803 reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK, 3804 BCE_DEV_INFO_SIGNATURE_MAGIC); 3805 return ENODEV; 3806 } 3807 3808 /* Enable DMA */ 3809 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3810 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3811 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL); 3812 val |= BCE_MISC_NEW_CORE_CTL_DMA_ENABLE; 3813 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val); 3814 } 3815 3816 /* Allow bootcode to apply any additional fixes before enabling MAC. */ 3817 bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET); 3818 3819 /* Enable link state change interrupt generation. */ 3820 REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE); 3821 3822 /* Enable the RXP. */ 3823 bce_start_rxp_cpu(sc); 3824 3825 /* Disable management frames (NC-SI) from flowing to the MCP. */ 3826 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { 3827 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) & 3828 ~BCE_RPM_MGMT_PKT_CTRL_MGMT_EN; 3829 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val); 3830 } 3831 3832 /* Enable all remaining blocks in the MAC. */ 3833 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3834 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3835 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 3836 BCE_MISC_ENABLE_DEFAULT_XI); 3837 } else { 3838 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT); 3839 } 3840 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS); 3841 DELAY(20); 3842 3843 /* Save the current host coalescing block settings. */ 3844 sc->hc_command = REG_RD(sc, BCE_HC_COMMAND); 3845 3846 return 0; 3847 } 3848 3849 /****************************************************************************/ 3850 /* Encapsulate an mbuf cluster into the rx_bd chain. */ 3851 /* */ 3852 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's. */ 3853 /* This routine will map an mbuf cluster into 1 or more rx_bd's as */ 3854 /* necessary. */ 3855 /* */ 3856 /* Returns: */ 3857 /* 0 for success, positive value for failure. */ 3858 /****************************************************************************/ 3859 static int 3860 bce_newbuf_std(struct bce_rx_ring *rxr, uint16_t *prod, uint16_t chain_prod, 3861 uint32_t *prod_bseq, int init) 3862 { 3863 struct bce_rx_buf *rx_buf; 3864 bus_dmamap_t map; 3865 bus_dma_segment_t seg; 3866 struct mbuf *m_new; 3867 int error, nseg; 3868 3869 /* This is a new mbuf allocation. */ 3870 m_new = m_getcl(init ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR); 3871 if (m_new == NULL) 3872 return ENOBUFS; 3873 3874 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 3875 3876 /* Map the mbuf cluster into device memory. */ 3877 error = bus_dmamap_load_mbuf_segment(rxr->rx_mbuf_tag, 3878 rxr->rx_mbuf_tmpmap, m_new, &seg, 1, &nseg, BUS_DMA_NOWAIT); 3879 if (error) { 3880 m_freem(m_new); 3881 if (init) { 3882 if_printf(&rxr->sc->arpcom.ac_if, 3883 "Error mapping mbuf into RX chain!\n"); 3884 } 3885 return error; 3886 } 3887 3888 rx_buf = &rxr->rx_bufs[chain_prod]; 3889 if (rx_buf->rx_mbuf_ptr != NULL) 3890 bus_dmamap_unload(rxr->rx_mbuf_tag, rx_buf->rx_mbuf_map); 3891 3892 map = rx_buf->rx_mbuf_map; 3893 rx_buf->rx_mbuf_map = rxr->rx_mbuf_tmpmap; 3894 rxr->rx_mbuf_tmpmap = map; 3895 3896 /* Save the mbuf and update our counter. */ 3897 rx_buf->rx_mbuf_ptr = m_new; 3898 rx_buf->rx_mbuf_paddr = seg.ds_addr; 3899 rxr->free_rx_bd--; 3900 3901 bce_setup_rxdesc_std(rxr, chain_prod, prod_bseq); 3902 3903 return 0; 3904 } 3905 3906 static void 3907 bce_setup_rxdesc_std(struct bce_rx_ring *rxr, uint16_t chain_prod, 3908 uint32_t *prod_bseq) 3909 { 3910 const struct bce_rx_buf *rx_buf; 3911 struct rx_bd *rxbd; 3912 bus_addr_t paddr; 3913 int len; 3914 3915 rx_buf = &rxr->rx_bufs[chain_prod]; 3916 paddr = rx_buf->rx_mbuf_paddr; 3917 len = rx_buf->rx_mbuf_ptr->m_len; 3918 3919 /* Setup the rx_bd for the first segment. */ 3920 rxbd = &rxr->rx_bd_chain[RX_PAGE(chain_prod)][RX_IDX(chain_prod)]; 3921 3922 rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(paddr)); 3923 rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(paddr)); 3924 rxbd->rx_bd_len = htole32(len); 3925 rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START); 3926 *prod_bseq += len; 3927 3928 rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END); 3929 } 3930 3931 /****************************************************************************/ 3932 /* Initialize the TX context memory. */ 3933 /* */ 3934 /* Returns: */ 3935 /* Nothing */ 3936 /****************************************************************************/ 3937 static void 3938 bce_init_tx_context(struct bce_tx_ring *txr) 3939 { 3940 uint32_t val; 3941 3942 /* Initialize the context ID for an L2 TX chain. */ 3943 if (BCE_CHIP_NUM(txr->sc) == BCE_CHIP_NUM_5709 || 3944 BCE_CHIP_NUM(txr->sc) == BCE_CHIP_NUM_5716) { 3945 /* Set the CID type to support an L2 connection. */ 3946 val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2; 3947 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid), 3948 BCE_L2CTX_TX_TYPE_XI, val); 3949 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16); 3950 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid), 3951 BCE_L2CTX_TX_CMD_TYPE_XI, val); 3952 3953 /* Point the hardware to the first page in the chain. */ 3954 val = BCE_ADDR_HI(txr->tx_bd_chain_paddr[0]); 3955 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid), 3956 BCE_L2CTX_TX_TBDR_BHADDR_HI_XI, val); 3957 val = BCE_ADDR_LO(txr->tx_bd_chain_paddr[0]); 3958 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid), 3959 BCE_L2CTX_TX_TBDR_BHADDR_LO_XI, val); 3960 } else { 3961 /* Set the CID type to support an L2 connection. */ 3962 val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2; 3963 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid), 3964 BCE_L2CTX_TX_TYPE, val); 3965 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16); 3966 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid), 3967 BCE_L2CTX_TX_CMD_TYPE, val); 3968 3969 /* Point the hardware to the first page in the chain. */ 3970 val = BCE_ADDR_HI(txr->tx_bd_chain_paddr[0]); 3971 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid), 3972 BCE_L2CTX_TX_TBDR_BHADDR_HI, val); 3973 val = BCE_ADDR_LO(txr->tx_bd_chain_paddr[0]); 3974 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid), 3975 BCE_L2CTX_TX_TBDR_BHADDR_LO, val); 3976 } 3977 } 3978 3979 /****************************************************************************/ 3980 /* Allocate memory and initialize the TX data structures. */ 3981 /* */ 3982 /* Returns: */ 3983 /* 0 for success, positive value for failure. */ 3984 /****************************************************************************/ 3985 static int 3986 bce_init_tx_chain(struct bce_tx_ring *txr) 3987 { 3988 struct tx_bd *txbd; 3989 int i, rc = 0; 3990 3991 /* Set the initial TX producer/consumer indices. */ 3992 txr->tx_prod = 0; 3993 txr->tx_cons = 0; 3994 txr->tx_prod_bseq = 0; 3995 txr->used_tx_bd = 0; 3996 txr->max_tx_bd = USABLE_TX_BD(txr); 3997 3998 /* 3999 * The NetXtreme II supports a linked-list structre called 4000 * a Buffer Descriptor Chain (or BD chain). A BD chain 4001 * consists of a series of 1 or more chain pages, each of which 4002 * consists of a fixed number of BD entries. 4003 * The last BD entry on each page is a pointer to the next page 4004 * in the chain, and the last pointer in the BD chain 4005 * points back to the beginning of the chain. 4006 */ 4007 4008 /* Set the TX next pointer chain entries. */ 4009 for (i = 0; i < txr->tx_pages; i++) { 4010 int j; 4011 4012 txbd = &txr->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE]; 4013 4014 /* Check if we've reached the last page. */ 4015 if (i == (txr->tx_pages - 1)) 4016 j = 0; 4017 else 4018 j = i + 1; 4019 4020 txbd->tx_bd_haddr_hi = 4021 htole32(BCE_ADDR_HI(txr->tx_bd_chain_paddr[j])); 4022 txbd->tx_bd_haddr_lo = 4023 htole32(BCE_ADDR_LO(txr->tx_bd_chain_paddr[j])); 4024 } 4025 bce_init_tx_context(txr); 4026 4027 return(rc); 4028 } 4029 4030 /****************************************************************************/ 4031 /* Free memory and clear the TX data structures. */ 4032 /* */ 4033 /* Returns: */ 4034 /* Nothing. */ 4035 /****************************************************************************/ 4036 static void 4037 bce_free_tx_chain(struct bce_tx_ring *txr) 4038 { 4039 int i; 4040 4041 /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */ 4042 for (i = 0; i < TOTAL_TX_BD(txr); i++) { 4043 struct bce_tx_buf *tx_buf = &txr->tx_bufs[i]; 4044 4045 if (tx_buf->tx_mbuf_ptr != NULL) { 4046 bus_dmamap_unload(txr->tx_mbuf_tag, 4047 tx_buf->tx_mbuf_map); 4048 m_freem(tx_buf->tx_mbuf_ptr); 4049 tx_buf->tx_mbuf_ptr = NULL; 4050 } 4051 } 4052 4053 /* Clear each TX chain page. */ 4054 for (i = 0; i < txr->tx_pages; i++) 4055 bzero(txr->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ); 4056 txr->used_tx_bd = 0; 4057 } 4058 4059 /****************************************************************************/ 4060 /* Initialize the RX context memory. */ 4061 /* */ 4062 /* Returns: */ 4063 /* Nothing */ 4064 /****************************************************************************/ 4065 static void 4066 bce_init_rx_context(struct bce_rx_ring *rxr) 4067 { 4068 uint32_t val; 4069 4070 /* Initialize the context ID for an L2 RX chain. */ 4071 val = BCE_L2CTX_RX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE | 4072 BCE_L2CTX_RX_CTX_TYPE_SIZE_L2 | (0x02 << 8); 4073 4074 /* 4075 * Set the level for generating pause frames 4076 * when the number of available rx_bd's gets 4077 * too low (the low watermark) and the level 4078 * when pause frames can be stopped (the high 4079 * watermark). 4080 */ 4081 if (BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5709 || 4082 BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5716) { 4083 uint32_t lo_water, hi_water; 4084 4085 lo_water = BCE_L2CTX_RX_LO_WATER_MARK_DEFAULT; 4086 hi_water = USABLE_RX_BD(rxr) / 4; 4087 4088 lo_water /= BCE_L2CTX_RX_LO_WATER_MARK_SCALE; 4089 hi_water /= BCE_L2CTX_RX_HI_WATER_MARK_SCALE; 4090 4091 if (hi_water > 0xf) 4092 hi_water = 0xf; 4093 else if (hi_water == 0) 4094 lo_water = 0; 4095 val |= lo_water | 4096 (hi_water << BCE_L2CTX_RX_HI_WATER_MARK_SHIFT); 4097 } 4098 4099 CTX_WR(rxr->sc, GET_CID_ADDR(rxr->rx_cid), 4100 BCE_L2CTX_RX_CTX_TYPE, val); 4101 4102 /* Setup the MQ BIN mapping for l2_ctx_host_bseq. */ 4103 if (BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5709 || 4104 BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5716) { 4105 val = REG_RD(rxr->sc, BCE_MQ_MAP_L2_5); 4106 REG_WR(rxr->sc, BCE_MQ_MAP_L2_5, val | BCE_MQ_MAP_L2_5_ARM); 4107 } 4108 4109 /* Point the hardware to the first page in the chain. */ 4110 val = BCE_ADDR_HI(rxr->rx_bd_chain_paddr[0]); 4111 CTX_WR(rxr->sc, GET_CID_ADDR(rxr->rx_cid), 4112 BCE_L2CTX_RX_NX_BDHADDR_HI, val); 4113 val = BCE_ADDR_LO(rxr->rx_bd_chain_paddr[0]); 4114 CTX_WR(rxr->sc, GET_CID_ADDR(rxr->rx_cid), 4115 BCE_L2CTX_RX_NX_BDHADDR_LO, val); 4116 } 4117 4118 /****************************************************************************/ 4119 /* Allocate memory and initialize the RX data structures. */ 4120 /* */ 4121 /* Returns: */ 4122 /* 0 for success, positive value for failure. */ 4123 /****************************************************************************/ 4124 static int 4125 bce_init_rx_chain(struct bce_rx_ring *rxr) 4126 { 4127 struct rx_bd *rxbd; 4128 int i, rc = 0; 4129 uint16_t prod, chain_prod; 4130 uint32_t prod_bseq; 4131 4132 /* Initialize the RX producer and consumer indices. */ 4133 rxr->rx_prod = 0; 4134 rxr->rx_cons = 0; 4135 rxr->rx_prod_bseq = 0; 4136 rxr->free_rx_bd = USABLE_RX_BD(rxr); 4137 rxr->max_rx_bd = USABLE_RX_BD(rxr); 4138 4139 /* Clear cache status index */ 4140 rxr->last_status_idx = 0; 4141 4142 /* Initialize the RX next pointer chain entries. */ 4143 for (i = 0; i < rxr->rx_pages; i++) { 4144 int j; 4145 4146 rxbd = &rxr->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE]; 4147 4148 /* Check if we've reached the last page. */ 4149 if (i == (rxr->rx_pages - 1)) 4150 j = 0; 4151 else 4152 j = i + 1; 4153 4154 /* Setup the chain page pointers. */ 4155 rxbd->rx_bd_haddr_hi = 4156 htole32(BCE_ADDR_HI(rxr->rx_bd_chain_paddr[j])); 4157 rxbd->rx_bd_haddr_lo = 4158 htole32(BCE_ADDR_LO(rxr->rx_bd_chain_paddr[j])); 4159 } 4160 4161 /* Allocate mbuf clusters for the rx_bd chain. */ 4162 prod = prod_bseq = 0; 4163 while (prod < TOTAL_RX_BD(rxr)) { 4164 chain_prod = RX_CHAIN_IDX(rxr, prod); 4165 if (bce_newbuf_std(rxr, &prod, chain_prod, &prod_bseq, 1)) { 4166 if_printf(&rxr->sc->arpcom.ac_if, 4167 "Error filling RX chain: rx_bd[0x%04X]!\n", 4168 chain_prod); 4169 rc = ENOBUFS; 4170 break; 4171 } 4172 prod = NEXT_RX_BD(prod); 4173 } 4174 4175 /* Save the RX chain producer index. */ 4176 rxr->rx_prod = prod; 4177 rxr->rx_prod_bseq = prod_bseq; 4178 4179 /* Tell the chip about the waiting rx_bd's. */ 4180 REG_WR16(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BDIDX, 4181 rxr->rx_prod); 4182 REG_WR(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BSEQ, 4183 rxr->rx_prod_bseq); 4184 4185 bce_init_rx_context(rxr); 4186 4187 return(rc); 4188 } 4189 4190 /****************************************************************************/ 4191 /* Free memory and clear the RX data structures. */ 4192 /* */ 4193 /* Returns: */ 4194 /* Nothing. */ 4195 /****************************************************************************/ 4196 static void 4197 bce_free_rx_chain(struct bce_rx_ring *rxr) 4198 { 4199 int i; 4200 4201 /* Free any mbufs still in the RX mbuf chain. */ 4202 for (i = 0; i < TOTAL_RX_BD(rxr); i++) { 4203 struct bce_rx_buf *rx_buf = &rxr->rx_bufs[i]; 4204 4205 if (rx_buf->rx_mbuf_ptr != NULL) { 4206 bus_dmamap_unload(rxr->rx_mbuf_tag, 4207 rx_buf->rx_mbuf_map); 4208 m_freem(rx_buf->rx_mbuf_ptr); 4209 rx_buf->rx_mbuf_ptr = NULL; 4210 } 4211 } 4212 4213 /* Clear each RX chain page. */ 4214 for (i = 0; i < rxr->rx_pages; i++) 4215 bzero(rxr->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ); 4216 } 4217 4218 /****************************************************************************/ 4219 /* Set media options. */ 4220 /* */ 4221 /* Returns: */ 4222 /* 0 for success, positive value for failure. */ 4223 /****************************************************************************/ 4224 static int 4225 bce_ifmedia_upd(struct ifnet *ifp) 4226 { 4227 struct bce_softc *sc = ifp->if_softc; 4228 struct mii_data *mii = device_get_softc(sc->bce_miibus); 4229 int error = 0; 4230 4231 /* 4232 * 'mii' will be NULL, when this function is called on following 4233 * code path: bce_attach() -> bce_mgmt_init() 4234 */ 4235 if (mii != NULL) { 4236 /* Make sure the MII bus has been enumerated. */ 4237 sc->bce_link = 0; 4238 if (mii->mii_instance) { 4239 struct mii_softc *miisc; 4240 4241 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 4242 mii_phy_reset(miisc); 4243 } 4244 error = mii_mediachg(mii); 4245 } 4246 return error; 4247 } 4248 4249 /****************************************************************************/ 4250 /* Reports current media status. */ 4251 /* */ 4252 /* Returns: */ 4253 /* Nothing. */ 4254 /****************************************************************************/ 4255 static void 4256 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 4257 { 4258 struct bce_softc *sc = ifp->if_softc; 4259 struct mii_data *mii = device_get_softc(sc->bce_miibus); 4260 4261 mii_pollstat(mii); 4262 ifmr->ifm_active = mii->mii_media_active; 4263 ifmr->ifm_status = mii->mii_media_status; 4264 } 4265 4266 /****************************************************************************/ 4267 /* Handles PHY generated interrupt events. */ 4268 /* */ 4269 /* Returns: */ 4270 /* Nothing. */ 4271 /****************************************************************************/ 4272 static void 4273 bce_phy_intr(struct bce_softc *sc) 4274 { 4275 uint32_t new_link_state, old_link_state; 4276 struct ifnet *ifp = &sc->arpcom.ac_if; 4277 4278 ASSERT_SERIALIZED(&sc->main_serialize); 4279 4280 new_link_state = sc->status_block->status_attn_bits & 4281 STATUS_ATTN_BITS_LINK_STATE; 4282 old_link_state = sc->status_block->status_attn_bits_ack & 4283 STATUS_ATTN_BITS_LINK_STATE; 4284 4285 /* Handle any changes if the link state has changed. */ 4286 if (new_link_state != old_link_state) { /* XXX redundant? */ 4287 /* Update the status_attn_bits_ack field in the status block. */ 4288 if (new_link_state) { 4289 REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD, 4290 STATUS_ATTN_BITS_LINK_STATE); 4291 if (bootverbose) 4292 if_printf(ifp, "Link is now UP.\n"); 4293 } else { 4294 REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD, 4295 STATUS_ATTN_BITS_LINK_STATE); 4296 if (bootverbose) 4297 if_printf(ifp, "Link is now DOWN.\n"); 4298 } 4299 4300 /* 4301 * Assume link is down and allow tick routine to 4302 * update the state based on the actual media state. 4303 */ 4304 sc->bce_link = 0; 4305 callout_stop(&sc->bce_tick_callout); 4306 bce_tick_serialized(sc); 4307 } 4308 4309 /* Acknowledge the link change interrupt. */ 4310 REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE); 4311 } 4312 4313 /****************************************************************************/ 4314 /* Reads the receive consumer value from the status block (skipping over */ 4315 /* chain page pointer if necessary). */ 4316 /* */ 4317 /* Returns: */ 4318 /* hw_cons */ 4319 /****************************************************************************/ 4320 static __inline uint16_t 4321 bce_get_hw_rx_cons(struct bce_rx_ring *rxr) 4322 { 4323 uint16_t hw_cons = *rxr->rx_hw_cons; 4324 4325 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) 4326 hw_cons++; 4327 return hw_cons; 4328 } 4329 4330 /****************************************************************************/ 4331 /* Handles received frame interrupt events. */ 4332 /* */ 4333 /* Returns: */ 4334 /* Nothing. */ 4335 /****************************************************************************/ 4336 static void 4337 bce_rx_intr(struct bce_rx_ring *rxr, int count, uint16_t hw_cons) 4338 { 4339 struct ifnet *ifp = &rxr->sc->arpcom.ac_if; 4340 uint16_t sw_cons, sw_chain_cons, sw_prod, sw_chain_prod; 4341 uint32_t sw_prod_bseq; 4342 int cpuid = mycpuid; 4343 4344 ASSERT_SERIALIZED(&rxr->rx_serialize); 4345 4346 /* Get working copies of the driver's view of the RX indices. */ 4347 sw_cons = rxr->rx_cons; 4348 sw_prod = rxr->rx_prod; 4349 sw_prod_bseq = rxr->rx_prod_bseq; 4350 4351 /* Scan through the receive chain as long as there is work to do. */ 4352 while (sw_cons != hw_cons) { 4353 struct pktinfo pi0, *pi = NULL; 4354 struct bce_rx_buf *rx_buf; 4355 struct mbuf *m = NULL; 4356 struct l2_fhdr *l2fhdr = NULL; 4357 unsigned int len; 4358 uint32_t status = 0; 4359 4360 #ifdef IFPOLL_ENABLE 4361 if (count >= 0 && count-- == 0) 4362 break; 4363 #endif 4364 4365 /* 4366 * Convert the producer/consumer indices 4367 * to an actual rx_bd index. 4368 */ 4369 sw_chain_cons = RX_CHAIN_IDX(rxr, sw_cons); 4370 sw_chain_prod = RX_CHAIN_IDX(rxr, sw_prod); 4371 rx_buf = &rxr->rx_bufs[sw_chain_cons]; 4372 4373 rxr->free_rx_bd++; 4374 4375 /* The mbuf is stored with the last rx_bd entry of a packet. */ 4376 if (rx_buf->rx_mbuf_ptr != NULL) { 4377 if (sw_chain_cons != sw_chain_prod) { 4378 if_printf(ifp, "RX cons(%d) != prod(%d), " 4379 "drop!\n", sw_chain_cons, sw_chain_prod); 4380 IFNET_STAT_INC(ifp, ierrors, 1); 4381 4382 bce_setup_rxdesc_std(rxr, sw_chain_cons, 4383 &sw_prod_bseq); 4384 m = NULL; 4385 goto bce_rx_int_next_rx; 4386 } 4387 4388 /* Unmap the mbuf from DMA space. */ 4389 bus_dmamap_sync(rxr->rx_mbuf_tag, rx_buf->rx_mbuf_map, 4390 BUS_DMASYNC_POSTREAD); 4391 4392 /* Save the mbuf from the driver's chain. */ 4393 m = rx_buf->rx_mbuf_ptr; 4394 4395 /* 4396 * Frames received on the NetXteme II are prepended 4397 * with an l2_fhdr structure which provides status 4398 * information about the received frame (including 4399 * VLAN tags and checksum info). The frames are also 4400 * automatically adjusted to align the IP header 4401 * (i.e. two null bytes are inserted before the 4402 * Ethernet header). As a result the data DMA'd by 4403 * the controller into the mbuf is as follows: 4404 * 4405 * +---------+-----+---------------------+-----+ 4406 * | l2_fhdr | pad | packet data | FCS | 4407 * +---------+-----+---------------------+-----+ 4408 * 4409 * The l2_fhdr needs to be checked and skipped and the 4410 * FCS needs to be stripped before sending the packet 4411 * up the stack. 4412 */ 4413 l2fhdr = mtod(m, struct l2_fhdr *); 4414 4415 len = l2fhdr->l2_fhdr_pkt_len; 4416 status = l2fhdr->l2_fhdr_status; 4417 4418 len -= ETHER_CRC_LEN; 4419 4420 /* Check the received frame for errors. */ 4421 if (status & (L2_FHDR_ERRORS_BAD_CRC | 4422 L2_FHDR_ERRORS_PHY_DECODE | 4423 L2_FHDR_ERRORS_ALIGNMENT | 4424 L2_FHDR_ERRORS_TOO_SHORT | 4425 L2_FHDR_ERRORS_GIANT_FRAME)) { 4426 IFNET_STAT_INC(ifp, ierrors, 1); 4427 4428 /* Reuse the mbuf for a new frame. */ 4429 bce_setup_rxdesc_std(rxr, sw_chain_prod, 4430 &sw_prod_bseq); 4431 m = NULL; 4432 goto bce_rx_int_next_rx; 4433 } 4434 4435 /* 4436 * Get a new mbuf for the rx_bd. If no new 4437 * mbufs are available then reuse the current mbuf, 4438 * log an ierror on the interface, and generate 4439 * an error in the system log. 4440 */ 4441 if (bce_newbuf_std(rxr, &sw_prod, sw_chain_prod, 4442 &sw_prod_bseq, 0)) { 4443 IFNET_STAT_INC(ifp, ierrors, 1); 4444 4445 /* Try and reuse the exisitng mbuf. */ 4446 bce_setup_rxdesc_std(rxr, sw_chain_prod, 4447 &sw_prod_bseq); 4448 m = NULL; 4449 goto bce_rx_int_next_rx; 4450 } 4451 4452 /* 4453 * Skip over the l2_fhdr when passing 4454 * the data up the stack. 4455 */ 4456 m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN); 4457 4458 m->m_pkthdr.len = m->m_len = len; 4459 m->m_pkthdr.rcvif = ifp; 4460 4461 /* Validate the checksum if offload enabled. */ 4462 if (ifp->if_capenable & IFCAP_RXCSUM) { 4463 /* Check for an IP datagram. */ 4464 if (status & L2_FHDR_STATUS_IP_DATAGRAM) { 4465 m->m_pkthdr.csum_flags |= 4466 CSUM_IP_CHECKED; 4467 4468 /* Check if the IP checksum is valid. */ 4469 if ((l2fhdr->l2_fhdr_ip_xsum ^ 4470 0xffff) == 0) { 4471 m->m_pkthdr.csum_flags |= 4472 CSUM_IP_VALID; 4473 } 4474 } 4475 4476 /* Check for a valid TCP/UDP frame. */ 4477 if (status & (L2_FHDR_STATUS_TCP_SEGMENT | 4478 L2_FHDR_STATUS_UDP_DATAGRAM)) { 4479 4480 /* Check for a good TCP/UDP checksum. */ 4481 if ((status & 4482 (L2_FHDR_ERRORS_TCP_XSUM | 4483 L2_FHDR_ERRORS_UDP_XSUM)) == 0) { 4484 m->m_pkthdr.csum_data = 4485 l2fhdr->l2_fhdr_tcp_udp_xsum; 4486 m->m_pkthdr.csum_flags |= 4487 CSUM_DATA_VALID | 4488 CSUM_PSEUDO_HDR; 4489 } 4490 } 4491 } 4492 if (ifp->if_capenable & IFCAP_RSS) { 4493 pi = bce_rss_pktinfo(&pi0, status, l2fhdr); 4494 if (pi != NULL && 4495 (status & L2_FHDR_STATUS_RSS_HASH)) { 4496 m->m_flags |= M_HASH; 4497 m->m_pkthdr.hash = 4498 toeplitz_hash(l2fhdr->l2_fhdr_hash); 4499 } 4500 } 4501 4502 IFNET_STAT_INC(ifp, ipackets, 1); 4503 bce_rx_int_next_rx: 4504 sw_prod = NEXT_RX_BD(sw_prod); 4505 } 4506 4507 sw_cons = NEXT_RX_BD(sw_cons); 4508 4509 /* If we have a packet, pass it up the stack */ 4510 if (m) { 4511 if (status & L2_FHDR_STATUS_L2_VLAN_TAG) { 4512 m->m_flags |= M_VLANTAG; 4513 m->m_pkthdr.ether_vlantag = 4514 l2fhdr->l2_fhdr_vlan_tag; 4515 } 4516 ifp->if_input(ifp, m, pi, cpuid); 4517 #ifdef BCE_RSS_DEBUG 4518 rxr->rx_pkts++; 4519 #endif 4520 } 4521 } 4522 4523 rxr->rx_cons = sw_cons; 4524 rxr->rx_prod = sw_prod; 4525 rxr->rx_prod_bseq = sw_prod_bseq; 4526 4527 REG_WR16(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BDIDX, 4528 rxr->rx_prod); 4529 REG_WR(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BSEQ, 4530 rxr->rx_prod_bseq); 4531 } 4532 4533 /****************************************************************************/ 4534 /* Reads the transmit consumer value from the status block (skipping over */ 4535 /* chain page pointer if necessary). */ 4536 /* */ 4537 /* Returns: */ 4538 /* hw_cons */ 4539 /****************************************************************************/ 4540 static __inline uint16_t 4541 bce_get_hw_tx_cons(struct bce_tx_ring *txr) 4542 { 4543 uint16_t hw_cons = *txr->tx_hw_cons; 4544 4545 if ((hw_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) 4546 hw_cons++; 4547 return hw_cons; 4548 } 4549 4550 /****************************************************************************/ 4551 /* Handles transmit completion interrupt events. */ 4552 /* */ 4553 /* Returns: */ 4554 /* Nothing. */ 4555 /****************************************************************************/ 4556 static void 4557 bce_tx_intr(struct bce_tx_ring *txr, uint16_t hw_tx_cons) 4558 { 4559 struct ifnet *ifp = &txr->sc->arpcom.ac_if; 4560 uint16_t sw_tx_cons, sw_tx_chain_cons; 4561 4562 ASSERT_SERIALIZED(&txr->tx_serialize); 4563 4564 /* Get the hardware's view of the TX consumer index. */ 4565 sw_tx_cons = txr->tx_cons; 4566 4567 /* Cycle through any completed TX chain page entries. */ 4568 while (sw_tx_cons != hw_tx_cons) { 4569 struct bce_tx_buf *tx_buf; 4570 4571 sw_tx_chain_cons = TX_CHAIN_IDX(txr, sw_tx_cons); 4572 tx_buf = &txr->tx_bufs[sw_tx_chain_cons]; 4573 4574 /* 4575 * Free the associated mbuf. Remember 4576 * that only the last tx_bd of a packet 4577 * has an mbuf pointer and DMA map. 4578 */ 4579 if (tx_buf->tx_mbuf_ptr != NULL) { 4580 /* Unmap the mbuf. */ 4581 bus_dmamap_unload(txr->tx_mbuf_tag, 4582 tx_buf->tx_mbuf_map); 4583 4584 /* Free the mbuf. */ 4585 m_freem(tx_buf->tx_mbuf_ptr); 4586 tx_buf->tx_mbuf_ptr = NULL; 4587 4588 IFNET_STAT_INC(ifp, opackets, 1); 4589 #ifdef BCE_TSS_DEBUG 4590 txr->tx_pkts++; 4591 #endif 4592 } 4593 4594 txr->used_tx_bd--; 4595 sw_tx_cons = NEXT_TX_BD(sw_tx_cons); 4596 } 4597 4598 if (txr->used_tx_bd == 0) { 4599 /* Clear the TX timeout timer. */ 4600 txr->tx_watchdog.wd_timer = 0; 4601 } 4602 4603 /* Clear the tx hardware queue full flag. */ 4604 if (txr->max_tx_bd - txr->used_tx_bd >= BCE_TX_SPARE_SPACE) 4605 ifsq_clr_oactive(txr->ifsq); 4606 txr->tx_cons = sw_tx_cons; 4607 } 4608 4609 /****************************************************************************/ 4610 /* Disables interrupt generation. */ 4611 /* */ 4612 /* Returns: */ 4613 /* Nothing. */ 4614 /****************************************************************************/ 4615 static void 4616 bce_disable_intr(struct bce_softc *sc) 4617 { 4618 int i; 4619 4620 for (i = 0; i < sc->rx_ring_cnt; ++i) { 4621 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 4622 (sc->rx_rings[i].idx << 24) | 4623 BCE_PCICFG_INT_ACK_CMD_MASK_INT); 4624 } 4625 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD); 4626 4627 callout_stop(&sc->bce_ckmsi_callout); 4628 sc->bce_msi_maylose = FALSE; 4629 sc->bce_check_rx_cons = 0; 4630 sc->bce_check_tx_cons = 0; 4631 sc->bce_check_status_idx = 0xffff; 4632 4633 for (i = 0; i < sc->rx_ring_cnt; ++i) 4634 lwkt_serialize_handler_disable(sc->bce_msix[i].msix_serialize); 4635 } 4636 4637 /****************************************************************************/ 4638 /* Enables interrupt generation. */ 4639 /* */ 4640 /* Returns: */ 4641 /* Nothing. */ 4642 /****************************************************************************/ 4643 static void 4644 bce_enable_intr(struct bce_softc *sc) 4645 { 4646 int i; 4647 4648 for (i = 0; i < sc->rx_ring_cnt; ++i) 4649 lwkt_serialize_handler_enable(sc->bce_msix[i].msix_serialize); 4650 4651 for (i = 0; i < sc->rx_ring_cnt; ++i) { 4652 struct bce_rx_ring *rxr = &sc->rx_rings[i]; 4653 4654 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, (rxr->idx << 24) | 4655 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | 4656 BCE_PCICFG_INT_ACK_CMD_MASK_INT | 4657 rxr->last_status_idx); 4658 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, (rxr->idx << 24) | 4659 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | 4660 rxr->last_status_idx); 4661 } 4662 REG_WR(sc, BCE_HC_COMMAND, sc->hc_command | BCE_HC_COMMAND_COAL_NOW); 4663 4664 if (sc->bce_flags & BCE_CHECK_MSI_FLAG) { 4665 sc->bce_msi_maylose = FALSE; 4666 sc->bce_check_rx_cons = 0; 4667 sc->bce_check_tx_cons = 0; 4668 sc->bce_check_status_idx = 0xffff; 4669 4670 if (bootverbose) 4671 if_printf(&sc->arpcom.ac_if, "check msi\n"); 4672 4673 callout_reset_bycpu(&sc->bce_ckmsi_callout, BCE_MSI_CKINTVL, 4674 bce_check_msi, sc, sc->bce_msix[0].msix_cpuid); 4675 } 4676 } 4677 4678 /****************************************************************************/ 4679 /* Reenables interrupt generation during interrupt handling. */ 4680 /* */ 4681 /* Returns: */ 4682 /* Nothing. */ 4683 /****************************************************************************/ 4684 static void 4685 bce_reenable_intr(struct bce_rx_ring *rxr) 4686 { 4687 REG_WR(rxr->sc, BCE_PCICFG_INT_ACK_CMD, (rxr->idx << 24) | 4688 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | rxr->last_status_idx); 4689 } 4690 4691 /****************************************************************************/ 4692 /* Handles controller initialization. */ 4693 /* */ 4694 /* Returns: */ 4695 /* Nothing. */ 4696 /****************************************************************************/ 4697 static void 4698 bce_init(void *xsc) 4699 { 4700 struct bce_softc *sc = xsc; 4701 struct ifnet *ifp = &sc->arpcom.ac_if; 4702 uint32_t ether_mtu; 4703 int error, i; 4704 boolean_t polling; 4705 4706 ASSERT_IFNET_SERIALIZED_ALL(ifp); 4707 4708 /* Check if the driver is still running and bail out if it is. */ 4709 if (ifp->if_flags & IFF_RUNNING) 4710 return; 4711 4712 bce_stop(sc); 4713 4714 error = bce_reset(sc, BCE_DRV_MSG_CODE_RESET); 4715 if (error) { 4716 if_printf(ifp, "Controller reset failed!\n"); 4717 goto back; 4718 } 4719 4720 error = bce_chipinit(sc); 4721 if (error) { 4722 if_printf(ifp, "Controller initialization failed!\n"); 4723 goto back; 4724 } 4725 4726 error = bce_blockinit(sc); 4727 if (error) { 4728 if_printf(ifp, "Block initialization failed!\n"); 4729 goto back; 4730 } 4731 4732 /* Load our MAC address. */ 4733 bcopy(IF_LLADDR(ifp), sc->eaddr, ETHER_ADDR_LEN); 4734 bce_set_mac_addr(sc); 4735 4736 /* Calculate and program the Ethernet MTU size. */ 4737 ether_mtu = ETHER_HDR_LEN + EVL_ENCAPLEN + ifp->if_mtu + ETHER_CRC_LEN; 4738 4739 /* 4740 * Program the mtu, enabling jumbo frame 4741 * support if necessary. Also set the mbuf 4742 * allocation count for RX frames. 4743 */ 4744 if (ether_mtu > ETHER_MAX_LEN + EVL_ENCAPLEN) { 4745 #ifdef notyet 4746 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, 4747 min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) | 4748 BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA); 4749 #else 4750 panic("jumbo buffer is not supported yet"); 4751 #endif 4752 } else { 4753 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu); 4754 } 4755 4756 /* Program appropriate promiscuous/multicast filtering. */ 4757 bce_set_rx_mode(sc); 4758 4759 /* 4760 * Init RX buffer descriptor chain. 4761 */ 4762 REG_WR(sc, BCE_RLUP_RSS_CONFIG, 0); 4763 bce_reg_wr_ind(sc, BCE_RXP_SCRATCH_RSS_TBL_SZ, 0); 4764 4765 for (i = 0; i < sc->rx_ring_cnt; ++i) 4766 bce_init_rx_chain(&sc->rx_rings[i]); /* XXX return value */ 4767 4768 if (sc->rx_ring_cnt > 1) 4769 bce_init_rss(sc); 4770 4771 /* 4772 * Init TX buffer descriptor chain. 4773 */ 4774 REG_WR(sc, BCE_TSCH_TSS_CFG, 0); 4775 4776 for (i = 0; i < sc->tx_ring_cnt; ++i) 4777 bce_init_tx_chain(&sc->tx_rings[i]); 4778 4779 if (sc->tx_ring_cnt > 1) { 4780 REG_WR(sc, BCE_TSCH_TSS_CFG, 4781 ((sc->tx_ring_cnt - 1) << 24) | (TX_TSS_CID << 7)); 4782 } 4783 4784 polling = FALSE; 4785 #ifdef IFPOLL_ENABLE 4786 if (ifp->if_flags & IFF_NPOLLING) 4787 polling = TRUE; 4788 #endif 4789 4790 if (polling) { 4791 /* Disable interrupts if we are polling. */ 4792 bce_disable_intr(sc); 4793 4794 /* Change coalesce parameters */ 4795 bce_npoll_coal_change(sc); 4796 } else { 4797 /* Enable host interrupts. */ 4798 bce_enable_intr(sc); 4799 } 4800 bce_set_timer_cpuid(sc, polling); 4801 4802 bce_ifmedia_upd(ifp); 4803 4804 ifp->if_flags |= IFF_RUNNING; 4805 for (i = 0; i < sc->tx_ring_cnt; ++i) { 4806 ifsq_clr_oactive(sc->tx_rings[i].ifsq); 4807 ifsq_watchdog_start(&sc->tx_rings[i].tx_watchdog); 4808 } 4809 4810 callout_reset_bycpu(&sc->bce_tick_callout, hz, bce_tick, sc, 4811 sc->bce_timer_cpuid); 4812 back: 4813 if (error) 4814 bce_stop(sc); 4815 } 4816 4817 /****************************************************************************/ 4818 /* Initialize the controller just enough so that any management firmware */ 4819 /* running on the device will continue to operate corectly. */ 4820 /* */ 4821 /* Returns: */ 4822 /* Nothing. */ 4823 /****************************************************************************/ 4824 static void 4825 bce_mgmt_init(struct bce_softc *sc) 4826 { 4827 struct ifnet *ifp = &sc->arpcom.ac_if; 4828 4829 /* Bail out if management firmware is not running. */ 4830 if (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)) 4831 return; 4832 4833 /* Enable all critical blocks in the MAC. */ 4834 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 4835 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 4836 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 4837 BCE_MISC_ENABLE_DEFAULT_XI); 4838 } else { 4839 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT); 4840 } 4841 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS); 4842 DELAY(20); 4843 4844 bce_ifmedia_upd(ifp); 4845 } 4846 4847 /****************************************************************************/ 4848 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */ 4849 /* memory visible to the controller. */ 4850 /* */ 4851 /* Returns: */ 4852 /* 0 for success, positive value for failure. */ 4853 /****************************************************************************/ 4854 static int 4855 bce_encap(struct bce_tx_ring *txr, struct mbuf **m_head, int *nsegs_used) 4856 { 4857 bus_dma_segment_t segs[BCE_MAX_SEGMENTS]; 4858 bus_dmamap_t map, tmp_map; 4859 struct mbuf *m0 = *m_head; 4860 struct tx_bd *txbd = NULL; 4861 uint16_t vlan_tag = 0, flags = 0, mss = 0; 4862 uint16_t chain_prod, chain_prod_start, prod; 4863 uint32_t prod_bseq; 4864 int i, error, maxsegs, nsegs; 4865 4866 /* Transfer any checksum offload flags to the bd. */ 4867 if (m0->m_pkthdr.csum_flags & CSUM_TSO) { 4868 error = bce_tso_setup(txr, m_head, &flags, &mss); 4869 if (error) 4870 return ENOBUFS; 4871 m0 = *m_head; 4872 } else if (m0->m_pkthdr.csum_flags & BCE_CSUM_FEATURES) { 4873 if (m0->m_pkthdr.csum_flags & CSUM_IP) 4874 flags |= TX_BD_FLAGS_IP_CKSUM; 4875 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 4876 flags |= TX_BD_FLAGS_TCP_UDP_CKSUM; 4877 } 4878 4879 /* Transfer any VLAN tags to the bd. */ 4880 if (m0->m_flags & M_VLANTAG) { 4881 flags |= TX_BD_FLAGS_VLAN_TAG; 4882 vlan_tag = m0->m_pkthdr.ether_vlantag; 4883 } 4884 4885 prod = txr->tx_prod; 4886 chain_prod_start = chain_prod = TX_CHAIN_IDX(txr, prod); 4887 4888 /* Map the mbuf into DMAable memory. */ 4889 map = txr->tx_bufs[chain_prod_start].tx_mbuf_map; 4890 4891 maxsegs = txr->max_tx_bd - txr->used_tx_bd; 4892 KASSERT(maxsegs >= BCE_TX_SPARE_SPACE, 4893 ("not enough segments %d", maxsegs)); 4894 if (maxsegs > BCE_MAX_SEGMENTS) 4895 maxsegs = BCE_MAX_SEGMENTS; 4896 4897 /* Map the mbuf into our DMA address space. */ 4898 error = bus_dmamap_load_mbuf_defrag(txr->tx_mbuf_tag, map, m_head, 4899 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 4900 if (error) 4901 goto back; 4902 bus_dmamap_sync(txr->tx_mbuf_tag, map, BUS_DMASYNC_PREWRITE); 4903 4904 *nsegs_used += nsegs; 4905 4906 /* Reset m0 */ 4907 m0 = *m_head; 4908 4909 /* prod points to an empty tx_bd at this point. */ 4910 prod_bseq = txr->tx_prod_bseq; 4911 4912 /* 4913 * Cycle through each mbuf segment that makes up 4914 * the outgoing frame, gathering the mapping info 4915 * for that segment and creating a tx_bd to for 4916 * the mbuf. 4917 */ 4918 for (i = 0; i < nsegs; i++) { 4919 chain_prod = TX_CHAIN_IDX(txr, prod); 4920 txbd = 4921 &txr->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)]; 4922 4923 txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr)); 4924 txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr)); 4925 txbd->tx_bd_mss_nbytes = htole32(mss << 16) | 4926 htole16(segs[i].ds_len); 4927 txbd->tx_bd_vlan_tag = htole16(vlan_tag); 4928 txbd->tx_bd_flags = htole16(flags); 4929 4930 prod_bseq += segs[i].ds_len; 4931 if (i == 0) 4932 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START); 4933 prod = NEXT_TX_BD(prod); 4934 } 4935 4936 /* Set the END flag on the last TX buffer descriptor. */ 4937 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END); 4938 4939 /* 4940 * Ensure that the mbuf pointer for this transmission 4941 * is placed at the array index of the last 4942 * descriptor in this chain. This is done 4943 * because a single map is used for all 4944 * segments of the mbuf and we don't want to 4945 * unload the map before all of the segments 4946 * have been freed. 4947 */ 4948 txr->tx_bufs[chain_prod].tx_mbuf_ptr = m0; 4949 4950 tmp_map = txr->tx_bufs[chain_prod].tx_mbuf_map; 4951 txr->tx_bufs[chain_prod].tx_mbuf_map = map; 4952 txr->tx_bufs[chain_prod_start].tx_mbuf_map = tmp_map; 4953 4954 txr->used_tx_bd += nsegs; 4955 4956 /* prod points to the next free tx_bd at this point. */ 4957 txr->tx_prod = prod; 4958 txr->tx_prod_bseq = prod_bseq; 4959 back: 4960 if (error) { 4961 m_freem(*m_head); 4962 *m_head = NULL; 4963 } 4964 return error; 4965 } 4966 4967 static void 4968 bce_xmit(struct bce_tx_ring *txr) 4969 { 4970 /* Start the transmit. */ 4971 REG_WR16(txr->sc, MB_GET_CID_ADDR(txr->tx_cid) + BCE_L2CTX_TX_HOST_BIDX, 4972 txr->tx_prod); 4973 REG_WR(txr->sc, MB_GET_CID_ADDR(txr->tx_cid) + BCE_L2CTX_TX_HOST_BSEQ, 4974 txr->tx_prod_bseq); 4975 } 4976 4977 /****************************************************************************/ 4978 /* Main transmit routine when called from another routine with a lock. */ 4979 /* */ 4980 /* Returns: */ 4981 /* Nothing. */ 4982 /****************************************************************************/ 4983 static void 4984 bce_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 4985 { 4986 struct bce_softc *sc = ifp->if_softc; 4987 struct bce_tx_ring *txr = ifsq_get_priv(ifsq); 4988 int count = 0; 4989 4990 KKASSERT(txr->ifsq == ifsq); 4991 ASSERT_SERIALIZED(&txr->tx_serialize); 4992 4993 /* If there's no link or the transmit queue is empty then just exit. */ 4994 if (!sc->bce_link) { 4995 ifsq_purge(ifsq); 4996 return; 4997 } 4998 4999 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq)) 5000 return; 5001 5002 for (;;) { 5003 struct mbuf *m_head; 5004 5005 /* 5006 * We keep BCE_TX_SPARE_SPACE entries, so bce_encap() is 5007 * unlikely to fail. 5008 */ 5009 if (txr->max_tx_bd - txr->used_tx_bd < BCE_TX_SPARE_SPACE) { 5010 ifsq_set_oactive(ifsq); 5011 break; 5012 } 5013 5014 /* Check for any frames to send. */ 5015 m_head = ifsq_dequeue(ifsq); 5016 if (m_head == NULL) 5017 break; 5018 5019 /* 5020 * Pack the data into the transmit ring. If we 5021 * don't have room, place the mbuf back at the 5022 * head of the queue and set the OACTIVE flag 5023 * to wait for the NIC to drain the chain. 5024 */ 5025 if (bce_encap(txr, &m_head, &count)) { 5026 IFNET_STAT_INC(ifp, oerrors, 1); 5027 if (txr->used_tx_bd == 0) { 5028 continue; 5029 } else { 5030 ifsq_set_oactive(ifsq); 5031 break; 5032 } 5033 } 5034 5035 if (count >= txr->tx_wreg) { 5036 bce_xmit(txr); 5037 count = 0; 5038 } 5039 5040 /* Send a copy of the frame to any BPF listeners. */ 5041 ETHER_BPF_MTAP(ifp, m_head); 5042 5043 /* Set the tx timeout. */ 5044 txr->tx_watchdog.wd_timer = BCE_TX_TIMEOUT; 5045 } 5046 if (count > 0) 5047 bce_xmit(txr); 5048 } 5049 5050 /****************************************************************************/ 5051 /* Handles any IOCTL calls from the operating system. */ 5052 /* */ 5053 /* Returns: */ 5054 /* 0 for success, positive value for failure. */ 5055 /****************************************************************************/ 5056 static int 5057 bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 5058 { 5059 struct bce_softc *sc = ifp->if_softc; 5060 struct ifreq *ifr = (struct ifreq *)data; 5061 struct mii_data *mii; 5062 int mask, error = 0; 5063 5064 ASSERT_IFNET_SERIALIZED_ALL(ifp); 5065 5066 switch(command) { 5067 case SIOCSIFMTU: 5068 /* Check that the MTU setting is supported. */ 5069 if (ifr->ifr_mtu < BCE_MIN_MTU || 5070 #ifdef notyet 5071 ifr->ifr_mtu > BCE_MAX_JUMBO_MTU 5072 #else 5073 ifr->ifr_mtu > ETHERMTU 5074 #endif 5075 ) { 5076 error = EINVAL; 5077 break; 5078 } 5079 5080 ifp->if_mtu = ifr->ifr_mtu; 5081 ifp->if_flags &= ~IFF_RUNNING; /* Force reinitialize */ 5082 bce_init(sc); 5083 break; 5084 5085 case SIOCSIFFLAGS: 5086 if (ifp->if_flags & IFF_UP) { 5087 if (ifp->if_flags & IFF_RUNNING) { 5088 mask = ifp->if_flags ^ sc->bce_if_flags; 5089 5090 if (mask & (IFF_PROMISC | IFF_ALLMULTI)) 5091 bce_set_rx_mode(sc); 5092 } else { 5093 bce_init(sc); 5094 } 5095 } else if (ifp->if_flags & IFF_RUNNING) { 5096 bce_stop(sc); 5097 5098 /* If MFW is running, restart the controller a bit. */ 5099 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { 5100 bce_reset(sc, BCE_DRV_MSG_CODE_RESET); 5101 bce_chipinit(sc); 5102 bce_mgmt_init(sc); 5103 } 5104 } 5105 sc->bce_if_flags = ifp->if_flags; 5106 break; 5107 5108 case SIOCADDMULTI: 5109 case SIOCDELMULTI: 5110 if (ifp->if_flags & IFF_RUNNING) 5111 bce_set_rx_mode(sc); 5112 break; 5113 5114 case SIOCSIFMEDIA: 5115 case SIOCGIFMEDIA: 5116 mii = device_get_softc(sc->bce_miibus); 5117 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 5118 break; 5119 5120 case SIOCSIFCAP: 5121 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 5122 if (mask & IFCAP_HWCSUM) { 5123 ifp->if_capenable ^= (mask & IFCAP_HWCSUM); 5124 if (ifp->if_capenable & IFCAP_TXCSUM) 5125 ifp->if_hwassist |= BCE_CSUM_FEATURES; 5126 else 5127 ifp->if_hwassist &= ~BCE_CSUM_FEATURES; 5128 } 5129 if (mask & IFCAP_TSO) { 5130 ifp->if_capenable ^= IFCAP_TSO; 5131 if (ifp->if_capenable & IFCAP_TSO) 5132 ifp->if_hwassist |= CSUM_TSO; 5133 else 5134 ifp->if_hwassist &= ~CSUM_TSO; 5135 } 5136 if (mask & IFCAP_RSS) 5137 ifp->if_capenable ^= IFCAP_RSS; 5138 break; 5139 5140 default: 5141 error = ether_ioctl(ifp, command, data); 5142 break; 5143 } 5144 return error; 5145 } 5146 5147 /****************************************************************************/ 5148 /* Transmit timeout handler. */ 5149 /* */ 5150 /* Returns: */ 5151 /* Nothing. */ 5152 /****************************************************************************/ 5153 static void 5154 bce_watchdog(struct ifaltq_subque *ifsq) 5155 { 5156 struct ifnet *ifp = ifsq_get_ifp(ifsq); 5157 struct bce_softc *sc = ifp->if_softc; 5158 int i; 5159 5160 ASSERT_IFNET_SERIALIZED_ALL(ifp); 5161 5162 /* 5163 * If we are in this routine because of pause frames, then 5164 * don't reset the hardware. 5165 */ 5166 if (REG_RD(sc, BCE_EMAC_TX_STATUS) & BCE_EMAC_TX_STATUS_XOFFED) 5167 return; 5168 5169 if_printf(ifp, "Watchdog timeout occurred, resetting!\n"); 5170 5171 ifp->if_flags &= ~IFF_RUNNING; /* Force reinitialize */ 5172 bce_init(sc); 5173 5174 IFNET_STAT_INC(ifp, oerrors, 1); 5175 5176 for (i = 0; i < sc->tx_ring_cnt; ++i) 5177 ifsq_devstart_sched(sc->tx_rings[i].ifsq); 5178 } 5179 5180 #ifdef IFPOLL_ENABLE 5181 5182 static void 5183 bce_npoll_status(struct ifnet *ifp) 5184 { 5185 struct bce_softc *sc = ifp->if_softc; 5186 struct status_block *sblk = sc->status_block; 5187 uint32_t status_attn_bits; 5188 5189 ASSERT_SERIALIZED(&sc->main_serialize); 5190 5191 status_attn_bits = sblk->status_attn_bits; 5192 5193 /* Was it a link change interrupt? */ 5194 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 5195 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) { 5196 bce_phy_intr(sc); 5197 5198 /* 5199 * Clear any transient status updates during link state change. 5200 */ 5201 REG_WR(sc, BCE_HC_COMMAND, 5202 sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT); 5203 REG_RD(sc, BCE_HC_COMMAND); 5204 } 5205 5206 /* 5207 * If any other attention is asserted then the chip is toast. 5208 */ 5209 if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) != 5210 (sblk->status_attn_bits_ack & ~STATUS_ATTN_BITS_LINK_STATE)) { 5211 if_printf(ifp, "Fatal attention detected: 0x%08X\n", 5212 sblk->status_attn_bits); 5213 bce_serialize_skipmain(sc); 5214 bce_init(sc); 5215 bce_deserialize_skipmain(sc); 5216 } 5217 } 5218 5219 static void 5220 bce_npoll_rx(struct ifnet *ifp, void *arg, int count) 5221 { 5222 struct bce_rx_ring *rxr = arg; 5223 uint16_t hw_rx_cons; 5224 5225 ASSERT_SERIALIZED(&rxr->rx_serialize); 5226 5227 /* 5228 * Save the status block index value for use when enabling 5229 * the interrupt. 5230 */ 5231 rxr->last_status_idx = *rxr->hw_status_idx; 5232 5233 /* Make sure status index is extracted before RX/TX cons */ 5234 cpu_lfence(); 5235 5236 hw_rx_cons = bce_get_hw_rx_cons(rxr); 5237 5238 /* Check for any completed RX frames. */ 5239 if (hw_rx_cons != rxr->rx_cons) 5240 bce_rx_intr(rxr, count, hw_rx_cons); 5241 } 5242 5243 static void 5244 bce_npoll_rx_pack(struct ifnet *ifp, void *arg, int count) 5245 { 5246 struct bce_rx_ring *rxr = arg; 5247 5248 KASSERT(rxr->idx == 0, ("not the first RX ring, but %d", rxr->idx)); 5249 bce_npoll_rx(ifp, rxr, count); 5250 5251 KASSERT(rxr->sc->rx_ring_cnt != rxr->sc->rx_ring_cnt2, 5252 ("RX ring count %d, count2 %d", rxr->sc->rx_ring_cnt, 5253 rxr->sc->rx_ring_cnt2)); 5254 5255 /* Last ring carries packets whose masked hash is 0 */ 5256 rxr = &rxr->sc->rx_rings[rxr->sc->rx_ring_cnt - 1]; 5257 5258 lwkt_serialize_enter(&rxr->rx_serialize); 5259 bce_npoll_rx(ifp, rxr, count); 5260 lwkt_serialize_exit(&rxr->rx_serialize); 5261 } 5262 5263 static void 5264 bce_npoll_tx(struct ifnet *ifp, void *arg, int count __unused) 5265 { 5266 struct bce_tx_ring *txr = arg; 5267 uint16_t hw_tx_cons; 5268 5269 ASSERT_SERIALIZED(&txr->tx_serialize); 5270 5271 hw_tx_cons = bce_get_hw_tx_cons(txr); 5272 5273 /* Check for any completed TX frames. */ 5274 if (hw_tx_cons != txr->tx_cons) { 5275 bce_tx_intr(txr, hw_tx_cons); 5276 if (!ifsq_is_empty(txr->ifsq)) 5277 ifsq_devstart(txr->ifsq); 5278 } 5279 } 5280 5281 static void 5282 bce_npoll(struct ifnet *ifp, struct ifpoll_info *info) 5283 { 5284 struct bce_softc *sc = ifp->if_softc; 5285 int i; 5286 5287 ASSERT_IFNET_SERIALIZED_ALL(ifp); 5288 5289 if (info != NULL) { 5290 info->ifpi_status.status_func = bce_npoll_status; 5291 info->ifpi_status.serializer = &sc->main_serialize; 5292 5293 for (i = 0; i < sc->tx_ring_cnt; ++i) { 5294 struct bce_tx_ring *txr = &sc->tx_rings[i]; 5295 int idx = i + sc->npoll_ofs; 5296 5297 KKASSERT(idx < ncpus2); 5298 info->ifpi_tx[idx].poll_func = bce_npoll_tx; 5299 info->ifpi_tx[idx].arg = txr; 5300 info->ifpi_tx[idx].serializer = &txr->tx_serialize; 5301 ifsq_set_cpuid(txr->ifsq, idx); 5302 } 5303 5304 for (i = 0; i < sc->rx_ring_cnt2; ++i) { 5305 struct bce_rx_ring *rxr = &sc->rx_rings[i]; 5306 int idx = i + sc->npoll_ofs; 5307 5308 KKASSERT(idx < ncpus2); 5309 if (i == 0 && sc->rx_ring_cnt2 != sc->rx_ring_cnt) { 5310 /* 5311 * If RSS is enabled, the packets whose 5312 * masked hash are 0 are queued to the 5313 * last RX ring; piggyback the last RX 5314 * ring's processing in the first RX 5315 * polling handler. (see also: comment 5316 * in bce_setup_ring_cnt()) 5317 */ 5318 if (bootverbose) { 5319 if_printf(ifp, "npoll pack last " 5320 "RX ring on cpu%d\n", idx); 5321 } 5322 info->ifpi_rx[idx].poll_func = 5323 bce_npoll_rx_pack; 5324 } else { 5325 info->ifpi_rx[idx].poll_func = bce_npoll_rx; 5326 } 5327 info->ifpi_rx[idx].arg = rxr; 5328 info->ifpi_rx[idx].serializer = &rxr->rx_serialize; 5329 } 5330 5331 if (ifp->if_flags & IFF_RUNNING) { 5332 bce_set_timer_cpuid(sc, TRUE); 5333 bce_disable_intr(sc); 5334 bce_npoll_coal_change(sc); 5335 } 5336 } else { 5337 for (i = 0; i < sc->tx_ring_cnt; ++i) { 5338 ifsq_set_cpuid(sc->tx_rings[i].ifsq, 5339 sc->bce_msix[i].msix_cpuid); 5340 } 5341 5342 if (ifp->if_flags & IFF_RUNNING) { 5343 bce_set_timer_cpuid(sc, FALSE); 5344 bce_enable_intr(sc); 5345 5346 sc->bce_coalchg_mask |= BCE_COALMASK_TX_BDS_INT | 5347 BCE_COALMASK_RX_BDS_INT; 5348 bce_coal_change(sc); 5349 } 5350 } 5351 } 5352 5353 #endif /* IFPOLL_ENABLE */ 5354 5355 /* 5356 * Interrupt handler. 5357 */ 5358 /****************************************************************************/ 5359 /* Main interrupt entry point. Verifies that the controller generated the */ 5360 /* interrupt and then calls a separate routine for handle the various */ 5361 /* interrupt causes (PHY, TX, RX). */ 5362 /* */ 5363 /* Returns: */ 5364 /* 0 for success, positive value for failure. */ 5365 /****************************************************************************/ 5366 static void 5367 bce_intr(struct bce_softc *sc) 5368 { 5369 struct ifnet *ifp = &sc->arpcom.ac_if; 5370 struct status_block *sblk; 5371 uint16_t hw_rx_cons, hw_tx_cons; 5372 uint32_t status_attn_bits; 5373 struct bce_tx_ring *txr = &sc->tx_rings[0]; 5374 struct bce_rx_ring *rxr = &sc->rx_rings[0]; 5375 5376 ASSERT_SERIALIZED(&sc->main_serialize); 5377 5378 sblk = sc->status_block; 5379 5380 /* 5381 * Save the status block index value for use during 5382 * the next interrupt. 5383 */ 5384 rxr->last_status_idx = *rxr->hw_status_idx; 5385 5386 /* Make sure status index is extracted before RX/TX cons */ 5387 cpu_lfence(); 5388 5389 /* Check if the hardware has finished any work. */ 5390 hw_rx_cons = bce_get_hw_rx_cons(rxr); 5391 hw_tx_cons = bce_get_hw_tx_cons(txr); 5392 5393 status_attn_bits = sblk->status_attn_bits; 5394 5395 /* Was it a link change interrupt? */ 5396 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 5397 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) { 5398 bce_phy_intr(sc); 5399 5400 /* 5401 * Clear any transient status updates during link state 5402 * change. 5403 */ 5404 REG_WR(sc, BCE_HC_COMMAND, 5405 sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT); 5406 REG_RD(sc, BCE_HC_COMMAND); 5407 } 5408 5409 /* 5410 * If any other attention is asserted then 5411 * the chip is toast. 5412 */ 5413 if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) != 5414 (sblk->status_attn_bits_ack & ~STATUS_ATTN_BITS_LINK_STATE)) { 5415 if_printf(ifp, "Fatal attention detected: 0x%08X\n", 5416 sblk->status_attn_bits); 5417 bce_serialize_skipmain(sc); 5418 bce_init(sc); 5419 bce_deserialize_skipmain(sc); 5420 return; 5421 } 5422 5423 /* Check for any completed RX frames. */ 5424 lwkt_serialize_enter(&rxr->rx_serialize); 5425 if (hw_rx_cons != rxr->rx_cons) 5426 bce_rx_intr(rxr, -1, hw_rx_cons); 5427 lwkt_serialize_exit(&rxr->rx_serialize); 5428 5429 /* Check for any completed TX frames. */ 5430 lwkt_serialize_enter(&txr->tx_serialize); 5431 if (hw_tx_cons != txr->tx_cons) { 5432 bce_tx_intr(txr, hw_tx_cons); 5433 if (!ifsq_is_empty(txr->ifsq)) 5434 ifsq_devstart(txr->ifsq); 5435 } 5436 lwkt_serialize_exit(&txr->tx_serialize); 5437 } 5438 5439 static void 5440 bce_intr_legacy(void *xsc) 5441 { 5442 struct bce_softc *sc = xsc; 5443 struct bce_rx_ring *rxr = &sc->rx_rings[0]; 5444 struct status_block *sblk; 5445 5446 sblk = sc->status_block; 5447 5448 /* 5449 * If the hardware status block index matches the last value 5450 * read by the driver and we haven't asserted our interrupt 5451 * then there's nothing to do. 5452 */ 5453 if (sblk->status_idx == rxr->last_status_idx && 5454 (REG_RD(sc, BCE_PCICFG_MISC_STATUS) & 5455 BCE_PCICFG_MISC_STATUS_INTA_VALUE)) 5456 return; 5457 5458 /* Ack the interrupt and stop others from occuring. */ 5459 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 5460 BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | 5461 BCE_PCICFG_INT_ACK_CMD_MASK_INT); 5462 5463 /* 5464 * Read back to deassert IRQ immediately to avoid too 5465 * many spurious interrupts. 5466 */ 5467 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD); 5468 5469 bce_intr(sc); 5470 5471 /* Re-enable interrupts. */ 5472 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 5473 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | 5474 BCE_PCICFG_INT_ACK_CMD_MASK_INT | rxr->last_status_idx); 5475 bce_reenable_intr(rxr); 5476 } 5477 5478 static void 5479 bce_intr_msi(void *xsc) 5480 { 5481 struct bce_softc *sc = xsc; 5482 5483 /* Ack the interrupt and stop others from occuring. */ 5484 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 5485 BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | 5486 BCE_PCICFG_INT_ACK_CMD_MASK_INT); 5487 5488 bce_intr(sc); 5489 5490 /* Re-enable interrupts */ 5491 bce_reenable_intr(&sc->rx_rings[0]); 5492 } 5493 5494 static void 5495 bce_intr_msi_oneshot(void *xsc) 5496 { 5497 struct bce_softc *sc = xsc; 5498 5499 bce_intr(sc); 5500 5501 /* Re-enable interrupts */ 5502 bce_reenable_intr(&sc->rx_rings[0]); 5503 } 5504 5505 static void 5506 bce_intr_msix_rxtx(void *xrxr) 5507 { 5508 struct bce_rx_ring *rxr = xrxr; 5509 struct bce_tx_ring *txr; 5510 uint16_t hw_rx_cons, hw_tx_cons; 5511 5512 ASSERT_SERIALIZED(&rxr->rx_serialize); 5513 5514 KKASSERT(rxr->idx < rxr->sc->tx_ring_cnt); 5515 txr = &rxr->sc->tx_rings[rxr->idx]; 5516 5517 /* 5518 * Save the status block index value for use during 5519 * the next interrupt. 5520 */ 5521 rxr->last_status_idx = *rxr->hw_status_idx; 5522 5523 /* Make sure status index is extracted before RX/TX cons */ 5524 cpu_lfence(); 5525 5526 /* Check if the hardware has finished any work. */ 5527 hw_rx_cons = bce_get_hw_rx_cons(rxr); 5528 if (hw_rx_cons != rxr->rx_cons) 5529 bce_rx_intr(rxr, -1, hw_rx_cons); 5530 5531 /* Check for any completed TX frames. */ 5532 hw_tx_cons = bce_get_hw_tx_cons(txr); 5533 lwkt_serialize_enter(&txr->tx_serialize); 5534 if (hw_tx_cons != txr->tx_cons) { 5535 bce_tx_intr(txr, hw_tx_cons); 5536 if (!ifsq_is_empty(txr->ifsq)) 5537 ifsq_devstart(txr->ifsq); 5538 } 5539 lwkt_serialize_exit(&txr->tx_serialize); 5540 5541 /* Re-enable interrupts */ 5542 bce_reenable_intr(rxr); 5543 } 5544 5545 static void 5546 bce_intr_msix_rx(void *xrxr) 5547 { 5548 struct bce_rx_ring *rxr = xrxr; 5549 uint16_t hw_rx_cons; 5550 5551 ASSERT_SERIALIZED(&rxr->rx_serialize); 5552 5553 /* 5554 * Save the status block index value for use during 5555 * the next interrupt. 5556 */ 5557 rxr->last_status_idx = *rxr->hw_status_idx; 5558 5559 /* Make sure status index is extracted before RX cons */ 5560 cpu_lfence(); 5561 5562 /* Check if the hardware has finished any work. */ 5563 hw_rx_cons = bce_get_hw_rx_cons(rxr); 5564 if (hw_rx_cons != rxr->rx_cons) 5565 bce_rx_intr(rxr, -1, hw_rx_cons); 5566 5567 /* Re-enable interrupts */ 5568 bce_reenable_intr(rxr); 5569 } 5570 5571 /****************************************************************************/ 5572 /* Programs the various packet receive modes (broadcast and multicast). */ 5573 /* */ 5574 /* Returns: */ 5575 /* Nothing. */ 5576 /****************************************************************************/ 5577 static void 5578 bce_set_rx_mode(struct bce_softc *sc) 5579 { 5580 struct ifnet *ifp = &sc->arpcom.ac_if; 5581 struct ifmultiaddr *ifma; 5582 uint32_t hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 5583 uint32_t rx_mode, sort_mode; 5584 int h, i; 5585 5586 ASSERT_IFNET_SERIALIZED_ALL(ifp); 5587 5588 /* Initialize receive mode default settings. */ 5589 rx_mode = sc->rx_mode & 5590 ~(BCE_EMAC_RX_MODE_PROMISCUOUS | 5591 BCE_EMAC_RX_MODE_KEEP_VLAN_TAG); 5592 sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN; 5593 5594 /* 5595 * ASF/IPMI/UMP firmware requires that VLAN tag stripping 5596 * be enbled. 5597 */ 5598 if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) && 5599 !(sc->bce_flags & BCE_MFW_ENABLE_FLAG)) 5600 rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG; 5601 5602 /* 5603 * Check for promiscuous, all multicast, or selected 5604 * multicast address filtering. 5605 */ 5606 if (ifp->if_flags & IFF_PROMISC) { 5607 /* Enable promiscuous mode. */ 5608 rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS; 5609 sort_mode |= BCE_RPM_SORT_USER0_PROM_EN; 5610 } else if (ifp->if_flags & IFF_ALLMULTI) { 5611 /* Enable all multicast addresses. */ 5612 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { 5613 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 5614 0xffffffff); 5615 } 5616 sort_mode |= BCE_RPM_SORT_USER0_MC_EN; 5617 } else { 5618 /* Accept one or more multicast(s). */ 5619 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 5620 if (ifma->ifma_addr->sa_family != AF_LINK) 5621 continue; 5622 h = ether_crc32_le( 5623 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 5624 ETHER_ADDR_LEN) & 0xFF; 5625 hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F); 5626 } 5627 5628 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { 5629 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 5630 hashes[i]); 5631 } 5632 sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN; 5633 } 5634 5635 /* Only make changes if the recive mode has actually changed. */ 5636 if (rx_mode != sc->rx_mode) { 5637 sc->rx_mode = rx_mode; 5638 REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode); 5639 } 5640 5641 /* Disable and clear the exisitng sort before enabling a new sort. */ 5642 REG_WR(sc, BCE_RPM_SORT_USER0, 0x0); 5643 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode); 5644 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA); 5645 } 5646 5647 /****************************************************************************/ 5648 /* Called periodically to updates statistics from the controllers */ 5649 /* statistics block. */ 5650 /* */ 5651 /* Returns: */ 5652 /* Nothing. */ 5653 /****************************************************************************/ 5654 static void 5655 bce_stats_update(struct bce_softc *sc) 5656 { 5657 struct ifnet *ifp = &sc->arpcom.ac_if; 5658 struct statistics_block *stats = sc->stats_block; 5659 5660 ASSERT_SERIALIZED(&sc->main_serialize); 5661 5662 /* 5663 * Certain controllers don't report carrier sense errors correctly. 5664 * See errata E11_5708CA0_1165. 5665 */ 5666 if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) && 5667 !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0)) { 5668 IFNET_STAT_INC(ifp, oerrors, 5669 (u_long)stats->stat_Dot3StatsCarrierSenseErrors); 5670 } 5671 5672 /* 5673 * Update the sysctl statistics from the hardware statistics. 5674 */ 5675 sc->stat_IfHCInOctets = 5676 ((uint64_t)stats->stat_IfHCInOctets_hi << 32) + 5677 (uint64_t)stats->stat_IfHCInOctets_lo; 5678 5679 sc->stat_IfHCInBadOctets = 5680 ((uint64_t)stats->stat_IfHCInBadOctets_hi << 32) + 5681 (uint64_t)stats->stat_IfHCInBadOctets_lo; 5682 5683 sc->stat_IfHCOutOctets = 5684 ((uint64_t)stats->stat_IfHCOutOctets_hi << 32) + 5685 (uint64_t)stats->stat_IfHCOutOctets_lo; 5686 5687 sc->stat_IfHCOutBadOctets = 5688 ((uint64_t)stats->stat_IfHCOutBadOctets_hi << 32) + 5689 (uint64_t)stats->stat_IfHCOutBadOctets_lo; 5690 5691 sc->stat_IfHCInUcastPkts = 5692 ((uint64_t)stats->stat_IfHCInUcastPkts_hi << 32) + 5693 (uint64_t)stats->stat_IfHCInUcastPkts_lo; 5694 5695 sc->stat_IfHCInMulticastPkts = 5696 ((uint64_t)stats->stat_IfHCInMulticastPkts_hi << 32) + 5697 (uint64_t)stats->stat_IfHCInMulticastPkts_lo; 5698 5699 sc->stat_IfHCInBroadcastPkts = 5700 ((uint64_t)stats->stat_IfHCInBroadcastPkts_hi << 32) + 5701 (uint64_t)stats->stat_IfHCInBroadcastPkts_lo; 5702 5703 sc->stat_IfHCOutUcastPkts = 5704 ((uint64_t)stats->stat_IfHCOutUcastPkts_hi << 32) + 5705 (uint64_t)stats->stat_IfHCOutUcastPkts_lo; 5706 5707 sc->stat_IfHCOutMulticastPkts = 5708 ((uint64_t)stats->stat_IfHCOutMulticastPkts_hi << 32) + 5709 (uint64_t)stats->stat_IfHCOutMulticastPkts_lo; 5710 5711 sc->stat_IfHCOutBroadcastPkts = 5712 ((uint64_t)stats->stat_IfHCOutBroadcastPkts_hi << 32) + 5713 (uint64_t)stats->stat_IfHCOutBroadcastPkts_lo; 5714 5715 sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors = 5716 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors; 5717 5718 sc->stat_Dot3StatsCarrierSenseErrors = 5719 stats->stat_Dot3StatsCarrierSenseErrors; 5720 5721 sc->stat_Dot3StatsFCSErrors = 5722 stats->stat_Dot3StatsFCSErrors; 5723 5724 sc->stat_Dot3StatsAlignmentErrors = 5725 stats->stat_Dot3StatsAlignmentErrors; 5726 5727 sc->stat_Dot3StatsSingleCollisionFrames = 5728 stats->stat_Dot3StatsSingleCollisionFrames; 5729 5730 sc->stat_Dot3StatsMultipleCollisionFrames = 5731 stats->stat_Dot3StatsMultipleCollisionFrames; 5732 5733 sc->stat_Dot3StatsDeferredTransmissions = 5734 stats->stat_Dot3StatsDeferredTransmissions; 5735 5736 sc->stat_Dot3StatsExcessiveCollisions = 5737 stats->stat_Dot3StatsExcessiveCollisions; 5738 5739 sc->stat_Dot3StatsLateCollisions = 5740 stats->stat_Dot3StatsLateCollisions; 5741 5742 sc->stat_EtherStatsCollisions = 5743 stats->stat_EtherStatsCollisions; 5744 5745 sc->stat_EtherStatsFragments = 5746 stats->stat_EtherStatsFragments; 5747 5748 sc->stat_EtherStatsJabbers = 5749 stats->stat_EtherStatsJabbers; 5750 5751 sc->stat_EtherStatsUndersizePkts = 5752 stats->stat_EtherStatsUndersizePkts; 5753 5754 sc->stat_EtherStatsOverrsizePkts = 5755 stats->stat_EtherStatsOverrsizePkts; 5756 5757 sc->stat_EtherStatsPktsRx64Octets = 5758 stats->stat_EtherStatsPktsRx64Octets; 5759 5760 sc->stat_EtherStatsPktsRx65Octetsto127Octets = 5761 stats->stat_EtherStatsPktsRx65Octetsto127Octets; 5762 5763 sc->stat_EtherStatsPktsRx128Octetsto255Octets = 5764 stats->stat_EtherStatsPktsRx128Octetsto255Octets; 5765 5766 sc->stat_EtherStatsPktsRx256Octetsto511Octets = 5767 stats->stat_EtherStatsPktsRx256Octetsto511Octets; 5768 5769 sc->stat_EtherStatsPktsRx512Octetsto1023Octets = 5770 stats->stat_EtherStatsPktsRx512Octetsto1023Octets; 5771 5772 sc->stat_EtherStatsPktsRx1024Octetsto1522Octets = 5773 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets; 5774 5775 sc->stat_EtherStatsPktsRx1523Octetsto9022Octets = 5776 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets; 5777 5778 sc->stat_EtherStatsPktsTx64Octets = 5779 stats->stat_EtherStatsPktsTx64Octets; 5780 5781 sc->stat_EtherStatsPktsTx65Octetsto127Octets = 5782 stats->stat_EtherStatsPktsTx65Octetsto127Octets; 5783 5784 sc->stat_EtherStatsPktsTx128Octetsto255Octets = 5785 stats->stat_EtherStatsPktsTx128Octetsto255Octets; 5786 5787 sc->stat_EtherStatsPktsTx256Octetsto511Octets = 5788 stats->stat_EtherStatsPktsTx256Octetsto511Octets; 5789 5790 sc->stat_EtherStatsPktsTx512Octetsto1023Octets = 5791 stats->stat_EtherStatsPktsTx512Octetsto1023Octets; 5792 5793 sc->stat_EtherStatsPktsTx1024Octetsto1522Octets = 5794 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets; 5795 5796 sc->stat_EtherStatsPktsTx1523Octetsto9022Octets = 5797 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets; 5798 5799 sc->stat_XonPauseFramesReceived = 5800 stats->stat_XonPauseFramesReceived; 5801 5802 sc->stat_XoffPauseFramesReceived = 5803 stats->stat_XoffPauseFramesReceived; 5804 5805 sc->stat_OutXonSent = 5806 stats->stat_OutXonSent; 5807 5808 sc->stat_OutXoffSent = 5809 stats->stat_OutXoffSent; 5810 5811 sc->stat_FlowControlDone = 5812 stats->stat_FlowControlDone; 5813 5814 sc->stat_MacControlFramesReceived = 5815 stats->stat_MacControlFramesReceived; 5816 5817 sc->stat_XoffStateEntered = 5818 stats->stat_XoffStateEntered; 5819 5820 sc->stat_IfInFramesL2FilterDiscards = 5821 stats->stat_IfInFramesL2FilterDiscards; 5822 5823 sc->stat_IfInRuleCheckerDiscards = 5824 stats->stat_IfInRuleCheckerDiscards; 5825 5826 sc->stat_IfInFTQDiscards = 5827 stats->stat_IfInFTQDiscards; 5828 5829 sc->stat_IfInMBUFDiscards = 5830 stats->stat_IfInMBUFDiscards; 5831 5832 sc->stat_IfInRuleCheckerP4Hit = 5833 stats->stat_IfInRuleCheckerP4Hit; 5834 5835 sc->stat_CatchupInRuleCheckerDiscards = 5836 stats->stat_CatchupInRuleCheckerDiscards; 5837 5838 sc->stat_CatchupInFTQDiscards = 5839 stats->stat_CatchupInFTQDiscards; 5840 5841 sc->stat_CatchupInMBUFDiscards = 5842 stats->stat_CatchupInMBUFDiscards; 5843 5844 sc->stat_CatchupInRuleCheckerP4Hit = 5845 stats->stat_CatchupInRuleCheckerP4Hit; 5846 5847 sc->com_no_buffers = REG_RD_IND(sc, 0x120084); 5848 5849 /* 5850 * Update the interface statistics from the 5851 * hardware statistics. 5852 */ 5853 IFNET_STAT_SET(ifp, collisions, (u_long)sc->stat_EtherStatsCollisions); 5854 5855 IFNET_STAT_SET(ifp, ierrors, (u_long)sc->stat_EtherStatsUndersizePkts + 5856 (u_long)sc->stat_EtherStatsOverrsizePkts + 5857 (u_long)sc->stat_IfInMBUFDiscards + 5858 (u_long)sc->stat_Dot3StatsAlignmentErrors + 5859 (u_long)sc->stat_Dot3StatsFCSErrors + 5860 (u_long)sc->stat_IfInRuleCheckerDiscards + 5861 (u_long)sc->stat_IfInFTQDiscards + 5862 (u_long)sc->com_no_buffers); 5863 5864 IFNET_STAT_SET(ifp, oerrors, 5865 (u_long)sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors + 5866 (u_long)sc->stat_Dot3StatsExcessiveCollisions + 5867 (u_long)sc->stat_Dot3StatsLateCollisions); 5868 } 5869 5870 /****************************************************************************/ 5871 /* Periodic function to notify the bootcode that the driver is still */ 5872 /* present. */ 5873 /* */ 5874 /* Returns: */ 5875 /* Nothing. */ 5876 /****************************************************************************/ 5877 static void 5878 bce_pulse(void *xsc) 5879 { 5880 struct bce_softc *sc = xsc; 5881 struct ifnet *ifp = &sc->arpcom.ac_if; 5882 uint32_t msg; 5883 5884 lwkt_serialize_enter(&sc->main_serialize); 5885 5886 /* Tell the firmware that the driver is still running. */ 5887 msg = (uint32_t)++sc->bce_fw_drv_pulse_wr_seq; 5888 bce_shmem_wr(sc, BCE_DRV_PULSE_MB, msg); 5889 5890 /* Update the bootcode condition. */ 5891 sc->bc_state = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION); 5892 5893 /* Report whether the bootcode still knows the driver is running. */ 5894 if (!sc->bce_drv_cardiac_arrest) { 5895 if (!(sc->bc_state & BCE_CONDITION_DRV_PRESENT)) { 5896 sc->bce_drv_cardiac_arrest = 1; 5897 if_printf(ifp, "Bootcode lost the driver pulse! " 5898 "(bc_state = 0x%08X)\n", sc->bc_state); 5899 } 5900 } else { 5901 /* 5902 * Not supported by all bootcode versions. 5903 * (v5.0.11+ and v5.2.1+) Older bootcode 5904 * will require the driver to reset the 5905 * controller to clear this condition. 5906 */ 5907 if (sc->bc_state & BCE_CONDITION_DRV_PRESENT) { 5908 sc->bce_drv_cardiac_arrest = 0; 5909 if_printf(ifp, "Bootcode found the driver pulse! " 5910 "(bc_state = 0x%08X)\n", sc->bc_state); 5911 } 5912 } 5913 5914 /* Schedule the next pulse. */ 5915 callout_reset_bycpu(&sc->bce_pulse_callout, hz, bce_pulse, sc, 5916 sc->bce_timer_cpuid); 5917 5918 lwkt_serialize_exit(&sc->main_serialize); 5919 } 5920 5921 /****************************************************************************/ 5922 /* Periodic function to check whether MSI is lost */ 5923 /* */ 5924 /* Returns: */ 5925 /* Nothing. */ 5926 /****************************************************************************/ 5927 static void 5928 bce_check_msi(void *xsc) 5929 { 5930 struct bce_softc *sc = xsc; 5931 struct ifnet *ifp = &sc->arpcom.ac_if; 5932 struct status_block *sblk = sc->status_block; 5933 struct bce_tx_ring *txr = &sc->tx_rings[0]; 5934 struct bce_rx_ring *rxr = &sc->rx_rings[0]; 5935 5936 lwkt_serialize_enter(&sc->main_serialize); 5937 5938 KKASSERT(mycpuid == sc->bce_msix[0].msix_cpuid); 5939 5940 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) { 5941 lwkt_serialize_exit(&sc->main_serialize); 5942 return; 5943 } 5944 5945 if (bce_get_hw_rx_cons(rxr) != rxr->rx_cons || 5946 bce_get_hw_tx_cons(txr) != txr->tx_cons || 5947 (sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 5948 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) { 5949 if (sc->bce_check_rx_cons == rxr->rx_cons && 5950 sc->bce_check_tx_cons == txr->tx_cons && 5951 sc->bce_check_status_idx == rxr->last_status_idx) { 5952 uint32_t msi_ctrl; 5953 5954 if (!sc->bce_msi_maylose) { 5955 sc->bce_msi_maylose = TRUE; 5956 goto done; 5957 } 5958 5959 msi_ctrl = REG_RD(sc, BCE_PCICFG_MSI_CONTROL); 5960 if (msi_ctrl & BCE_PCICFG_MSI_CONTROL_ENABLE) { 5961 if (bootverbose) 5962 if_printf(ifp, "lost MSI\n"); 5963 5964 REG_WR(sc, BCE_PCICFG_MSI_CONTROL, 5965 msi_ctrl & ~BCE_PCICFG_MSI_CONTROL_ENABLE); 5966 REG_WR(sc, BCE_PCICFG_MSI_CONTROL, msi_ctrl); 5967 5968 bce_intr_msi(sc); 5969 } else if (bootverbose) { 5970 if_printf(ifp, "MSI may be lost\n"); 5971 } 5972 } 5973 } 5974 sc->bce_msi_maylose = FALSE; 5975 sc->bce_check_rx_cons = rxr->rx_cons; 5976 sc->bce_check_tx_cons = txr->tx_cons; 5977 sc->bce_check_status_idx = rxr->last_status_idx; 5978 5979 done: 5980 callout_reset(&sc->bce_ckmsi_callout, BCE_MSI_CKINTVL, 5981 bce_check_msi, sc); 5982 lwkt_serialize_exit(&sc->main_serialize); 5983 } 5984 5985 /****************************************************************************/ 5986 /* Periodic function to perform maintenance tasks. */ 5987 /* */ 5988 /* Returns: */ 5989 /* Nothing. */ 5990 /****************************************************************************/ 5991 static void 5992 bce_tick_serialized(struct bce_softc *sc) 5993 { 5994 struct mii_data *mii; 5995 5996 ASSERT_SERIALIZED(&sc->main_serialize); 5997 5998 /* Update the statistics from the hardware statistics block. */ 5999 bce_stats_update(sc); 6000 6001 /* Schedule the next tick. */ 6002 callout_reset_bycpu(&sc->bce_tick_callout, hz, bce_tick, sc, 6003 sc->bce_timer_cpuid); 6004 6005 /* If link is up already up then we're done. */ 6006 if (sc->bce_link) 6007 return; 6008 6009 mii = device_get_softc(sc->bce_miibus); 6010 mii_tick(mii); 6011 6012 /* Check if the link has come up. */ 6013 if ((mii->mii_media_status & IFM_ACTIVE) && 6014 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 6015 int i; 6016 6017 sc->bce_link++; 6018 /* Now that link is up, handle any outstanding TX traffic. */ 6019 for (i = 0; i < sc->tx_ring_cnt; ++i) 6020 ifsq_devstart_sched(sc->tx_rings[i].ifsq); 6021 } 6022 } 6023 6024 static void 6025 bce_tick(void *xsc) 6026 { 6027 struct bce_softc *sc = xsc; 6028 6029 lwkt_serialize_enter(&sc->main_serialize); 6030 bce_tick_serialized(sc); 6031 lwkt_serialize_exit(&sc->main_serialize); 6032 } 6033 6034 /****************************************************************************/ 6035 /* Adds any sysctl parameters for tuning or debugging purposes. */ 6036 /* */ 6037 /* Returns: */ 6038 /* 0 for success, positive value for failure. */ 6039 /****************************************************************************/ 6040 static void 6041 bce_add_sysctls(struct bce_softc *sc) 6042 { 6043 struct sysctl_ctx_list *ctx; 6044 struct sysctl_oid_list *children; 6045 #if defined(BCE_TSS_DEBUG) || defined(BCE_RSS_DEBUG) 6046 char node[32]; 6047 int i; 6048 #endif 6049 6050 ctx = device_get_sysctl_ctx(sc->bce_dev); 6051 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bce_dev)); 6052 6053 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds_int", 6054 CTLTYPE_INT | CTLFLAG_RW, 6055 sc, 0, bce_sysctl_tx_bds_int, "I", 6056 "Send max coalesced BD count during interrupt"); 6057 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds", 6058 CTLTYPE_INT | CTLFLAG_RW, 6059 sc, 0, bce_sysctl_tx_bds, "I", 6060 "Send max coalesced BD count"); 6061 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks_int", 6062 CTLTYPE_INT | CTLFLAG_RW, 6063 sc, 0, bce_sysctl_tx_ticks_int, "I", 6064 "Send coalescing ticks during interrupt"); 6065 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks", 6066 CTLTYPE_INT | CTLFLAG_RW, 6067 sc, 0, bce_sysctl_tx_ticks, "I", 6068 "Send coalescing ticks"); 6069 6070 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds_int", 6071 CTLTYPE_INT | CTLFLAG_RW, 6072 sc, 0, bce_sysctl_rx_bds_int, "I", 6073 "Receive max coalesced BD count during interrupt"); 6074 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds", 6075 CTLTYPE_INT | CTLFLAG_RW, 6076 sc, 0, bce_sysctl_rx_bds, "I", 6077 "Receive max coalesced BD count"); 6078 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks_int", 6079 CTLTYPE_INT | CTLFLAG_RW, 6080 sc, 0, bce_sysctl_rx_ticks_int, "I", 6081 "Receive coalescing ticks during interrupt"); 6082 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks", 6083 CTLTYPE_INT | CTLFLAG_RW, 6084 sc, 0, bce_sysctl_rx_ticks, "I", 6085 "Receive coalescing ticks"); 6086 6087 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_rings", 6088 CTLFLAG_RD, &sc->rx_ring_cnt, 0, "# of RX rings"); 6089 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_pages", 6090 CTLFLAG_RD, &sc->rx_rings[0].rx_pages, 0, "# of RX pages"); 6091 6092 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_rings", 6093 CTLFLAG_RD, &sc->tx_ring_cnt, 0, "# of TX rings"); 6094 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_pages", 6095 CTLFLAG_RD, &sc->tx_rings[0].tx_pages, 0, "# of TX pages"); 6096 6097 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_wreg", 6098 CTLFLAG_RW, &sc->tx_rings[0].tx_wreg, 0, 6099 "# segments before write to hardware registers"); 6100 6101 #ifdef IFPOLL_ENABLE 6102 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "npoll_offset", 6103 CTLTYPE_INT|CTLFLAG_RW, sc, 0, bce_sysctl_npoll_offset, 6104 "I", "NPOLLING cpu offset"); 6105 #endif 6106 6107 #ifdef BCE_RSS_DEBUG 6108 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rss_debug", 6109 CTLFLAG_RW, &sc->rss_debug, 0, "RSS debug level"); 6110 for (i = 0; i < sc->rx_ring_cnt; ++i) { 6111 ksnprintf(node, sizeof(node), "rx%d_pkt", i); 6112 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, node, 6113 CTLFLAG_RW, &sc->rx_rings[i].rx_pkts, 6114 "RXed packets"); 6115 } 6116 #endif 6117 6118 #ifdef BCE_TSS_DEBUG 6119 for (i = 0; i < sc->tx_ring_cnt; ++i) { 6120 ksnprintf(node, sizeof(node), "tx%d_pkt", i); 6121 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, node, 6122 CTLFLAG_RW, &sc->tx_rings[i].tx_pkts, 6123 "TXed packets"); 6124 } 6125 #endif 6126 6127 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6128 "stat_IfHCInOctets", 6129 CTLFLAG_RD, &sc->stat_IfHCInOctets, 6130 "Bytes received"); 6131 6132 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6133 "stat_IfHCInBadOctets", 6134 CTLFLAG_RD, &sc->stat_IfHCInBadOctets, 6135 "Bad bytes received"); 6136 6137 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6138 "stat_IfHCOutOctets", 6139 CTLFLAG_RD, &sc->stat_IfHCOutOctets, 6140 "Bytes sent"); 6141 6142 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6143 "stat_IfHCOutBadOctets", 6144 CTLFLAG_RD, &sc->stat_IfHCOutBadOctets, 6145 "Bad bytes sent"); 6146 6147 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6148 "stat_IfHCInUcastPkts", 6149 CTLFLAG_RD, &sc->stat_IfHCInUcastPkts, 6150 "Unicast packets received"); 6151 6152 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6153 "stat_IfHCInMulticastPkts", 6154 CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts, 6155 "Multicast packets received"); 6156 6157 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6158 "stat_IfHCInBroadcastPkts", 6159 CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts, 6160 "Broadcast packets received"); 6161 6162 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6163 "stat_IfHCOutUcastPkts", 6164 CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts, 6165 "Unicast packets sent"); 6166 6167 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6168 "stat_IfHCOutMulticastPkts", 6169 CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts, 6170 "Multicast packets sent"); 6171 6172 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6173 "stat_IfHCOutBroadcastPkts", 6174 CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts, 6175 "Broadcast packets sent"); 6176 6177 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6178 "stat_emac_tx_stat_dot3statsinternalmactransmiterrors", 6179 CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors, 6180 0, "Internal MAC transmit errors"); 6181 6182 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6183 "stat_Dot3StatsCarrierSenseErrors", 6184 CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors, 6185 0, "Carrier sense errors"); 6186 6187 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6188 "stat_Dot3StatsFCSErrors", 6189 CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors, 6190 0, "Frame check sequence errors"); 6191 6192 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6193 "stat_Dot3StatsAlignmentErrors", 6194 CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors, 6195 0, "Alignment errors"); 6196 6197 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6198 "stat_Dot3StatsSingleCollisionFrames", 6199 CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames, 6200 0, "Single Collision Frames"); 6201 6202 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6203 "stat_Dot3StatsMultipleCollisionFrames", 6204 CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames, 6205 0, "Multiple Collision Frames"); 6206 6207 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6208 "stat_Dot3StatsDeferredTransmissions", 6209 CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions, 6210 0, "Deferred Transmissions"); 6211 6212 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6213 "stat_Dot3StatsExcessiveCollisions", 6214 CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions, 6215 0, "Excessive Collisions"); 6216 6217 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6218 "stat_Dot3StatsLateCollisions", 6219 CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions, 6220 0, "Late Collisions"); 6221 6222 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6223 "stat_EtherStatsCollisions", 6224 CTLFLAG_RD, &sc->stat_EtherStatsCollisions, 6225 0, "Collisions"); 6226 6227 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6228 "stat_EtherStatsFragments", 6229 CTLFLAG_RD, &sc->stat_EtherStatsFragments, 6230 0, "Fragments"); 6231 6232 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6233 "stat_EtherStatsJabbers", 6234 CTLFLAG_RD, &sc->stat_EtherStatsJabbers, 6235 0, "Jabbers"); 6236 6237 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6238 "stat_EtherStatsUndersizePkts", 6239 CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts, 6240 0, "Undersize packets"); 6241 6242 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6243 "stat_EtherStatsOverrsizePkts", 6244 CTLFLAG_RD, &sc->stat_EtherStatsOverrsizePkts, 6245 0, "stat_EtherStatsOverrsizePkts"); 6246 6247 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6248 "stat_EtherStatsPktsRx64Octets", 6249 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets, 6250 0, "Bytes received in 64 byte packets"); 6251 6252 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6253 "stat_EtherStatsPktsRx65Octetsto127Octets", 6254 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets, 6255 0, "Bytes received in 65 to 127 byte packets"); 6256 6257 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6258 "stat_EtherStatsPktsRx128Octetsto255Octets", 6259 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets, 6260 0, "Bytes received in 128 to 255 byte packets"); 6261 6262 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6263 "stat_EtherStatsPktsRx256Octetsto511Octets", 6264 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets, 6265 0, "Bytes received in 256 to 511 byte packets"); 6266 6267 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6268 "stat_EtherStatsPktsRx512Octetsto1023Octets", 6269 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets, 6270 0, "Bytes received in 512 to 1023 byte packets"); 6271 6272 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6273 "stat_EtherStatsPktsRx1024Octetsto1522Octets", 6274 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets, 6275 0, "Bytes received in 1024 t0 1522 byte packets"); 6276 6277 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6278 "stat_EtherStatsPktsRx1523Octetsto9022Octets", 6279 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets, 6280 0, "Bytes received in 1523 to 9022 byte packets"); 6281 6282 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6283 "stat_EtherStatsPktsTx64Octets", 6284 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets, 6285 0, "Bytes sent in 64 byte packets"); 6286 6287 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6288 "stat_EtherStatsPktsTx65Octetsto127Octets", 6289 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets, 6290 0, "Bytes sent in 65 to 127 byte packets"); 6291 6292 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6293 "stat_EtherStatsPktsTx128Octetsto255Octets", 6294 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets, 6295 0, "Bytes sent in 128 to 255 byte packets"); 6296 6297 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6298 "stat_EtherStatsPktsTx256Octetsto511Octets", 6299 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets, 6300 0, "Bytes sent in 256 to 511 byte packets"); 6301 6302 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6303 "stat_EtherStatsPktsTx512Octetsto1023Octets", 6304 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets, 6305 0, "Bytes sent in 512 to 1023 byte packets"); 6306 6307 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6308 "stat_EtherStatsPktsTx1024Octetsto1522Octets", 6309 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets, 6310 0, "Bytes sent in 1024 to 1522 byte packets"); 6311 6312 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6313 "stat_EtherStatsPktsTx1523Octetsto9022Octets", 6314 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets, 6315 0, "Bytes sent in 1523 to 9022 byte packets"); 6316 6317 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6318 "stat_XonPauseFramesReceived", 6319 CTLFLAG_RD, &sc->stat_XonPauseFramesReceived, 6320 0, "XON pause frames receved"); 6321 6322 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6323 "stat_XoffPauseFramesReceived", 6324 CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived, 6325 0, "XOFF pause frames received"); 6326 6327 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6328 "stat_OutXonSent", 6329 CTLFLAG_RD, &sc->stat_OutXonSent, 6330 0, "XON pause frames sent"); 6331 6332 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6333 "stat_OutXoffSent", 6334 CTLFLAG_RD, &sc->stat_OutXoffSent, 6335 0, "XOFF pause frames sent"); 6336 6337 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6338 "stat_FlowControlDone", 6339 CTLFLAG_RD, &sc->stat_FlowControlDone, 6340 0, "Flow control done"); 6341 6342 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6343 "stat_MacControlFramesReceived", 6344 CTLFLAG_RD, &sc->stat_MacControlFramesReceived, 6345 0, "MAC control frames received"); 6346 6347 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6348 "stat_XoffStateEntered", 6349 CTLFLAG_RD, &sc->stat_XoffStateEntered, 6350 0, "XOFF state entered"); 6351 6352 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6353 "stat_IfInFramesL2FilterDiscards", 6354 CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards, 6355 0, "Received L2 packets discarded"); 6356 6357 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6358 "stat_IfInRuleCheckerDiscards", 6359 CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards, 6360 0, "Received packets discarded by rule"); 6361 6362 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6363 "stat_IfInFTQDiscards", 6364 CTLFLAG_RD, &sc->stat_IfInFTQDiscards, 6365 0, "Received packet FTQ discards"); 6366 6367 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6368 "stat_IfInMBUFDiscards", 6369 CTLFLAG_RD, &sc->stat_IfInMBUFDiscards, 6370 0, "Received packets discarded due to lack of controller buffer memory"); 6371 6372 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6373 "stat_IfInRuleCheckerP4Hit", 6374 CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit, 6375 0, "Received packets rule checker hits"); 6376 6377 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6378 "stat_CatchupInRuleCheckerDiscards", 6379 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards, 6380 0, "Received packets discarded in Catchup path"); 6381 6382 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6383 "stat_CatchupInFTQDiscards", 6384 CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards, 6385 0, "Received packets discarded in FTQ in Catchup path"); 6386 6387 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6388 "stat_CatchupInMBUFDiscards", 6389 CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards, 6390 0, "Received packets discarded in controller buffer memory in Catchup path"); 6391 6392 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6393 "stat_CatchupInRuleCheckerP4Hit", 6394 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit, 6395 0, "Received packets rule checker hits in Catchup path"); 6396 6397 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6398 "com_no_buffers", 6399 CTLFLAG_RD, &sc->com_no_buffers, 6400 0, "Valid packets received but no RX buffers available"); 6401 } 6402 6403 static int 6404 bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS) 6405 { 6406 struct bce_softc *sc = arg1; 6407 6408 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 6409 &sc->bce_tx_quick_cons_trip_int, 6410 BCE_COALMASK_TX_BDS_INT); 6411 } 6412 6413 static int 6414 bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS) 6415 { 6416 struct bce_softc *sc = arg1; 6417 6418 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 6419 &sc->bce_tx_quick_cons_trip, 6420 BCE_COALMASK_TX_BDS); 6421 } 6422 6423 static int 6424 bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS) 6425 { 6426 struct bce_softc *sc = arg1; 6427 6428 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 6429 &sc->bce_tx_ticks_int, 6430 BCE_COALMASK_TX_TICKS_INT); 6431 } 6432 6433 static int 6434 bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS) 6435 { 6436 struct bce_softc *sc = arg1; 6437 6438 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 6439 &sc->bce_tx_ticks, 6440 BCE_COALMASK_TX_TICKS); 6441 } 6442 6443 static int 6444 bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS) 6445 { 6446 struct bce_softc *sc = arg1; 6447 6448 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 6449 &sc->bce_rx_quick_cons_trip_int, 6450 BCE_COALMASK_RX_BDS_INT); 6451 } 6452 6453 static int 6454 bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS) 6455 { 6456 struct bce_softc *sc = arg1; 6457 6458 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 6459 &sc->bce_rx_quick_cons_trip, 6460 BCE_COALMASK_RX_BDS); 6461 } 6462 6463 static int 6464 bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS) 6465 { 6466 struct bce_softc *sc = arg1; 6467 6468 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 6469 &sc->bce_rx_ticks_int, 6470 BCE_COALMASK_RX_TICKS_INT); 6471 } 6472 6473 static int 6474 bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS) 6475 { 6476 struct bce_softc *sc = arg1; 6477 6478 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 6479 &sc->bce_rx_ticks, 6480 BCE_COALMASK_RX_TICKS); 6481 } 6482 6483 static int 6484 bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS, uint32_t *coal, 6485 uint32_t coalchg_mask) 6486 { 6487 struct bce_softc *sc = arg1; 6488 struct ifnet *ifp = &sc->arpcom.ac_if; 6489 int error = 0, v; 6490 6491 ifnet_serialize_all(ifp); 6492 6493 v = *coal; 6494 error = sysctl_handle_int(oidp, &v, 0, req); 6495 if (!error && req->newptr != NULL) { 6496 if (v < 0) { 6497 error = EINVAL; 6498 } else { 6499 *coal = v; 6500 sc->bce_coalchg_mask |= coalchg_mask; 6501 6502 /* Commit changes */ 6503 bce_coal_change(sc); 6504 } 6505 } 6506 6507 ifnet_deserialize_all(ifp); 6508 return error; 6509 } 6510 6511 static void 6512 bce_coal_change(struct bce_softc *sc) 6513 { 6514 struct ifnet *ifp = &sc->arpcom.ac_if; 6515 int i; 6516 6517 ASSERT_SERIALIZED(&sc->main_serialize); 6518 6519 if ((ifp->if_flags & IFF_RUNNING) == 0) { 6520 sc->bce_coalchg_mask = 0; 6521 return; 6522 } 6523 6524 if (sc->bce_coalchg_mask & 6525 (BCE_COALMASK_TX_BDS | BCE_COALMASK_TX_BDS_INT)) { 6526 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP, 6527 (sc->bce_tx_quick_cons_trip_int << 16) | 6528 sc->bce_tx_quick_cons_trip); 6529 for (i = 1; i < sc->rx_ring_cnt; ++i) { 6530 uint32_t base; 6531 6532 base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) + 6533 BCE_HC_SB_CONFIG_1; 6534 REG_WR(sc, base + BCE_HC_TX_QUICK_CONS_TRIP_OFF, 6535 (sc->bce_tx_quick_cons_trip_int << 16) | 6536 sc->bce_tx_quick_cons_trip); 6537 } 6538 if (bootverbose) { 6539 if_printf(ifp, "tx_bds %u, tx_bds_int %u\n", 6540 sc->bce_tx_quick_cons_trip, 6541 sc->bce_tx_quick_cons_trip_int); 6542 } 6543 } 6544 6545 if (sc->bce_coalchg_mask & 6546 (BCE_COALMASK_TX_TICKS | BCE_COALMASK_TX_TICKS_INT)) { 6547 REG_WR(sc, BCE_HC_TX_TICKS, 6548 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks); 6549 for (i = 1; i < sc->rx_ring_cnt; ++i) { 6550 uint32_t base; 6551 6552 base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) + 6553 BCE_HC_SB_CONFIG_1; 6554 REG_WR(sc, base + BCE_HC_TX_TICKS_OFF, 6555 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks); 6556 } 6557 if (bootverbose) { 6558 if_printf(ifp, "tx_ticks %u, tx_ticks_int %u\n", 6559 sc->bce_tx_ticks, sc->bce_tx_ticks_int); 6560 } 6561 } 6562 6563 if (sc->bce_coalchg_mask & 6564 (BCE_COALMASK_RX_BDS | BCE_COALMASK_RX_BDS_INT)) { 6565 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP, 6566 (sc->bce_rx_quick_cons_trip_int << 16) | 6567 sc->bce_rx_quick_cons_trip); 6568 for (i = 1; i < sc->rx_ring_cnt; ++i) { 6569 uint32_t base; 6570 6571 base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) + 6572 BCE_HC_SB_CONFIG_1; 6573 REG_WR(sc, base + BCE_HC_RX_QUICK_CONS_TRIP_OFF, 6574 (sc->bce_rx_quick_cons_trip_int << 16) | 6575 sc->bce_rx_quick_cons_trip); 6576 } 6577 if (bootverbose) { 6578 if_printf(ifp, "rx_bds %u, rx_bds_int %u\n", 6579 sc->bce_rx_quick_cons_trip, 6580 sc->bce_rx_quick_cons_trip_int); 6581 } 6582 } 6583 6584 if (sc->bce_coalchg_mask & 6585 (BCE_COALMASK_RX_TICKS | BCE_COALMASK_RX_TICKS_INT)) { 6586 REG_WR(sc, BCE_HC_RX_TICKS, 6587 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks); 6588 for (i = 1; i < sc->rx_ring_cnt; ++i) { 6589 uint32_t base; 6590 6591 base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) + 6592 BCE_HC_SB_CONFIG_1; 6593 REG_WR(sc, base + BCE_HC_RX_TICKS_OFF, 6594 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks); 6595 } 6596 if (bootverbose) { 6597 if_printf(ifp, "rx_ticks %u, rx_ticks_int %u\n", 6598 sc->bce_rx_ticks, sc->bce_rx_ticks_int); 6599 } 6600 } 6601 6602 sc->bce_coalchg_mask = 0; 6603 } 6604 6605 static int 6606 bce_tso_setup(struct bce_tx_ring *txr, struct mbuf **mp, 6607 uint16_t *flags0, uint16_t *mss0) 6608 { 6609 struct mbuf *m; 6610 uint16_t flags; 6611 int thoff, iphlen, hoff; 6612 6613 m = *mp; 6614 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 6615 6616 hoff = m->m_pkthdr.csum_lhlen; 6617 iphlen = m->m_pkthdr.csum_iphlen; 6618 thoff = m->m_pkthdr.csum_thlen; 6619 6620 KASSERT(hoff >= sizeof(struct ether_header), 6621 ("invalid ether header len %d", hoff)); 6622 KASSERT(iphlen >= sizeof(struct ip), 6623 ("invalid ip header len %d", iphlen)); 6624 KASSERT(thoff >= sizeof(struct tcphdr), 6625 ("invalid tcp header len %d", thoff)); 6626 6627 if (__predict_false(m->m_len < hoff + iphlen + thoff)) { 6628 m = m_pullup(m, hoff + iphlen + thoff); 6629 if (m == NULL) { 6630 *mp = NULL; 6631 return ENOBUFS; 6632 } 6633 *mp = m; 6634 } 6635 6636 /* Set the LSO flag in the TX BD */ 6637 flags = TX_BD_FLAGS_SW_LSO; 6638 6639 /* Set the length of IP + TCP options (in 32 bit words) */ 6640 flags |= (((iphlen + thoff - 6641 sizeof(struct ip) - sizeof(struct tcphdr)) >> 2) << 8); 6642 6643 *mss0 = htole16(m->m_pkthdr.tso_segsz); 6644 *flags0 = flags; 6645 6646 return 0; 6647 } 6648 6649 static void 6650 bce_setup_serialize(struct bce_softc *sc) 6651 { 6652 int i, j; 6653 6654 /* 6655 * Allocate serializer array 6656 */ 6657 6658 /* Main + TX + RX */ 6659 sc->serialize_cnt = 1 + sc->tx_ring_cnt + sc->rx_ring_cnt; 6660 6661 sc->serializes = 6662 kmalloc(sc->serialize_cnt * sizeof(struct lwkt_serialize *), 6663 M_DEVBUF, M_WAITOK | M_ZERO); 6664 6665 /* 6666 * Setup serializers 6667 * 6668 * NOTE: Order is critical 6669 */ 6670 6671 i = 0; 6672 6673 KKASSERT(i < sc->serialize_cnt); 6674 sc->serializes[i++] = &sc->main_serialize; 6675 6676 for (j = 0; j < sc->rx_ring_cnt; ++j) { 6677 KKASSERT(i < sc->serialize_cnt); 6678 sc->serializes[i++] = &sc->rx_rings[j].rx_serialize; 6679 } 6680 6681 for (j = 0; j < sc->tx_ring_cnt; ++j) { 6682 KKASSERT(i < sc->serialize_cnt); 6683 sc->serializes[i++] = &sc->tx_rings[j].tx_serialize; 6684 } 6685 6686 KKASSERT(i == sc->serialize_cnt); 6687 } 6688 6689 static void 6690 bce_serialize(struct ifnet *ifp, enum ifnet_serialize slz) 6691 { 6692 struct bce_softc *sc = ifp->if_softc; 6693 6694 ifnet_serialize_array_enter(sc->serializes, sc->serialize_cnt, slz); 6695 } 6696 6697 static void 6698 bce_deserialize(struct ifnet *ifp, enum ifnet_serialize slz) 6699 { 6700 struct bce_softc *sc = ifp->if_softc; 6701 6702 ifnet_serialize_array_exit(sc->serializes, sc->serialize_cnt, slz); 6703 } 6704 6705 static int 6706 bce_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz) 6707 { 6708 struct bce_softc *sc = ifp->if_softc; 6709 6710 return ifnet_serialize_array_try(sc->serializes, sc->serialize_cnt, 6711 slz); 6712 } 6713 6714 #ifdef INVARIANTS 6715 6716 static void 6717 bce_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz, 6718 boolean_t serialized) 6719 { 6720 struct bce_softc *sc = ifp->if_softc; 6721 6722 ifnet_serialize_array_assert(sc->serializes, sc->serialize_cnt, 6723 slz, serialized); 6724 } 6725 6726 #endif /* INVARIANTS */ 6727 6728 static void 6729 bce_serialize_skipmain(struct bce_softc *sc) 6730 { 6731 lwkt_serialize_array_enter(sc->serializes, sc->serialize_cnt, 1); 6732 } 6733 6734 static void 6735 bce_deserialize_skipmain(struct bce_softc *sc) 6736 { 6737 lwkt_serialize_array_exit(sc->serializes, sc->serialize_cnt, 1); 6738 } 6739 6740 #ifdef IFPOLL_ENABLE 6741 6742 static int 6743 bce_sysctl_npoll_offset(SYSCTL_HANDLER_ARGS) 6744 { 6745 struct bce_softc *sc = (void *)arg1; 6746 struct ifnet *ifp = &sc->arpcom.ac_if; 6747 int error, off; 6748 6749 off = sc->npoll_ofs; 6750 error = sysctl_handle_int(oidp, &off, 0, req); 6751 if (error || req->newptr == NULL) 6752 return error; 6753 if (off < 0) 6754 return EINVAL; 6755 6756 ifnet_serialize_all(ifp); 6757 if (off >= ncpus2 || off % sc->rx_ring_cnt2 != 0) { 6758 error = EINVAL; 6759 } else { 6760 error = 0; 6761 sc->npoll_ofs = off; 6762 } 6763 ifnet_deserialize_all(ifp); 6764 6765 return error; 6766 } 6767 6768 #endif /* IFPOLL_ENABLE */ 6769 6770 static void 6771 bce_set_timer_cpuid(struct bce_softc *sc, boolean_t polling) 6772 { 6773 if (polling) 6774 sc->bce_timer_cpuid = 0; /* XXX */ 6775 else 6776 sc->bce_timer_cpuid = sc->bce_msix[0].msix_cpuid; 6777 } 6778 6779 static int 6780 bce_alloc_intr(struct bce_softc *sc) 6781 { 6782 u_int irq_flags; 6783 6784 bce_try_alloc_msix(sc); 6785 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX) 6786 return 0; 6787 6788 sc->bce_irq_type = pci_alloc_1intr(sc->bce_dev, bce_msi_enable, 6789 &sc->bce_irq_rid, &irq_flags); 6790 6791 sc->bce_res_irq = bus_alloc_resource_any(sc->bce_dev, SYS_RES_IRQ, 6792 &sc->bce_irq_rid, irq_flags); 6793 if (sc->bce_res_irq == NULL) { 6794 device_printf(sc->bce_dev, "PCI map interrupt failed\n"); 6795 return ENXIO; 6796 } 6797 sc->bce_msix[0].msix_cpuid = rman_get_cpuid(sc->bce_res_irq); 6798 sc->bce_msix[0].msix_serialize = &sc->main_serialize; 6799 6800 return 0; 6801 } 6802 6803 static void 6804 bce_try_alloc_msix(struct bce_softc *sc) 6805 { 6806 struct bce_msix_data *msix; 6807 int offset, i, error; 6808 boolean_t setup = FALSE; 6809 6810 if (sc->rx_ring_cnt == 1) 6811 return; 6812 6813 if (sc->rx_ring_cnt2 == ncpus2) { 6814 offset = 0; 6815 } else { 6816 int offset_def = 6817 (sc->rx_ring_cnt2 * device_get_unit(sc->bce_dev)) % ncpus2; 6818 6819 offset = device_getenv_int(sc->bce_dev, 6820 "msix.offset", offset_def); 6821 if (offset >= ncpus2 || offset % sc->rx_ring_cnt2 != 0) { 6822 device_printf(sc->bce_dev, 6823 "invalid msix.offset %d, use %d\n", 6824 offset, offset_def); 6825 offset = offset_def; 6826 } 6827 } 6828 6829 msix = &sc->bce_msix[0]; 6830 msix->msix_serialize = &sc->main_serialize; 6831 msix->msix_func = bce_intr_msi_oneshot; 6832 msix->msix_arg = sc; 6833 KKASSERT(offset < ncpus2); 6834 msix->msix_cpuid = offset; 6835 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc), "%s combo", 6836 device_get_nameunit(sc->bce_dev)); 6837 6838 for (i = 1; i < sc->rx_ring_cnt; ++i) { 6839 struct bce_rx_ring *rxr = &sc->rx_rings[i]; 6840 6841 msix = &sc->bce_msix[i]; 6842 6843 msix->msix_serialize = &rxr->rx_serialize; 6844 msix->msix_arg = rxr; 6845 msix->msix_cpuid = offset + (i % sc->rx_ring_cnt2); 6846 KKASSERT(msix->msix_cpuid < ncpus2); 6847 6848 if (i < sc->tx_ring_cnt) { 6849 msix->msix_func = bce_intr_msix_rxtx; 6850 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc), 6851 "%s rxtx%d", device_get_nameunit(sc->bce_dev), i); 6852 } else { 6853 msix->msix_func = bce_intr_msix_rx; 6854 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc), 6855 "%s rx%d", device_get_nameunit(sc->bce_dev), i); 6856 } 6857 } 6858 6859 /* 6860 * Setup MSI-X table 6861 */ 6862 bce_setup_msix_table(sc); 6863 REG_WR(sc, BCE_PCI_MSIX_CONTROL, BCE_MSIX_MAX - 1); 6864 REG_WR(sc, BCE_PCI_MSIX_TBL_OFF_BIR, BCE_PCI_GRC_WINDOW2_BASE); 6865 REG_WR(sc, BCE_PCI_MSIX_PBA_OFF_BIT, BCE_PCI_GRC_WINDOW3_BASE); 6866 /* Flush */ 6867 REG_RD(sc, BCE_PCI_MSIX_CONTROL); 6868 6869 error = pci_setup_msix(sc->bce_dev); 6870 if (error) { 6871 device_printf(sc->bce_dev, "Setup MSI-X failed\n"); 6872 goto back; 6873 } 6874 setup = TRUE; 6875 6876 for (i = 0; i < sc->rx_ring_cnt; ++i) { 6877 msix = &sc->bce_msix[i]; 6878 6879 error = pci_alloc_msix_vector(sc->bce_dev, i, &msix->msix_rid, 6880 msix->msix_cpuid); 6881 if (error) { 6882 device_printf(sc->bce_dev, 6883 "Unable to allocate MSI-X %d on cpu%d\n", 6884 i, msix->msix_cpuid); 6885 goto back; 6886 } 6887 6888 msix->msix_res = bus_alloc_resource_any(sc->bce_dev, 6889 SYS_RES_IRQ, &msix->msix_rid, RF_ACTIVE); 6890 if (msix->msix_res == NULL) { 6891 device_printf(sc->bce_dev, 6892 "Unable to allocate MSI-X %d resource\n", i); 6893 error = ENOMEM; 6894 goto back; 6895 } 6896 } 6897 6898 pci_enable_msix(sc->bce_dev); 6899 sc->bce_irq_type = PCI_INTR_TYPE_MSIX; 6900 back: 6901 if (error) 6902 bce_free_msix(sc, setup); 6903 } 6904 6905 static void 6906 bce_setup_ring_cnt(struct bce_softc *sc) 6907 { 6908 int msix_enable, ring_max, msix_cnt2, msix_cnt, i; 6909 6910 sc->rx_ring_cnt = 1; 6911 sc->rx_ring_cnt2 = 1; 6912 sc->tx_ring_cnt = 1; 6913 6914 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5709 && 6915 BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5716) 6916 return; 6917 6918 msix_enable = device_getenv_int(sc->bce_dev, "msix.enable", 6919 bce_msix_enable); 6920 if (!msix_enable) 6921 return; 6922 6923 if (ncpus2 == 1) 6924 return; 6925 6926 msix_cnt = pci_msix_count(sc->bce_dev); 6927 if (msix_cnt <= 1) 6928 return; 6929 6930 i = 0; 6931 while ((1 << (i + 1)) <= msix_cnt) 6932 ++i; 6933 msix_cnt2 = 1 << i; 6934 6935 /* 6936 * One extra RX ring will be needed (see below), so make sure 6937 * that there are enough MSI-X vectors. 6938 */ 6939 if (msix_cnt == msix_cnt2) { 6940 /* 6941 * XXX 6942 * This probably will not happen; 5709/5716 6943 * come with 9 MSI-X vectors. 6944 */ 6945 msix_cnt2 >>= 1; 6946 if (msix_cnt2 <= 1) { 6947 device_printf(sc->bce_dev, 6948 "MSI-X count %d could not be used\n", msix_cnt); 6949 return; 6950 } 6951 device_printf(sc->bce_dev, "MSI-X count %d is power of 2\n", 6952 msix_cnt); 6953 } 6954 6955 /* 6956 * Setup RX ring count 6957 */ 6958 ring_max = BCE_RX_RING_MAX; 6959 if (ring_max > msix_cnt2) 6960 ring_max = msix_cnt2; 6961 sc->rx_ring_cnt2 = device_getenv_int(sc->bce_dev, "rx_rings", 6962 bce_rx_rings); 6963 sc->rx_ring_cnt2 = if_ring_count2(sc->rx_ring_cnt2, ring_max); 6964 6965 /* 6966 * Don't use MSI-X, if the effective RX ring count is 1. 6967 * Since if the effective RX ring count is 1, the TX ring 6968 * count will be 1. This RX ring and the TX ring must be 6969 * bundled into one MSI-X vector, so the hot path will be 6970 * exact same as using MSI. Besides, the first RX ring 6971 * must be fully populated, which only accepts packets whose 6972 * RSS hash can't calculated, e.g. ARP packets; waste of 6973 * resource at least. 6974 */ 6975 if (sc->rx_ring_cnt2 == 1) 6976 return; 6977 6978 /* 6979 * One extra RX ring is allocated, since the first RX ring 6980 * could not be used for RSS hashed packets whose masked 6981 * hash is 0. The first RX ring is only used for packets 6982 * whose RSS hash could not be calculated, e.g. ARP packets. 6983 * This extra RX ring will be used for packets whose masked 6984 * hash is 0. The effective RX ring count involved in RSS 6985 * is still sc->rx_ring_cnt2. 6986 */ 6987 KKASSERT(sc->rx_ring_cnt2 + 1 <= msix_cnt); 6988 sc->rx_ring_cnt = sc->rx_ring_cnt2 + 1; 6989 6990 /* 6991 * Setup TX ring count 6992 * 6993 * NOTE: 6994 * TX ring count must be less than the effective RSS RX ring 6995 * count, since we use RX ring software data struct to save 6996 * status index and various other MSI-X related stuffs. 6997 */ 6998 ring_max = BCE_TX_RING_MAX; 6999 if (ring_max > msix_cnt2) 7000 ring_max = msix_cnt2; 7001 if (ring_max > sc->rx_ring_cnt2) 7002 ring_max = sc->rx_ring_cnt2; 7003 sc->tx_ring_cnt = device_getenv_int(sc->bce_dev, "tx_rings", 7004 bce_tx_rings); 7005 sc->tx_ring_cnt = if_ring_count2(sc->tx_ring_cnt, ring_max); 7006 } 7007 7008 static void 7009 bce_free_msix(struct bce_softc *sc, boolean_t setup) 7010 { 7011 int i; 7012 7013 KKASSERT(sc->rx_ring_cnt > 1); 7014 7015 for (i = 0; i < sc->rx_ring_cnt; ++i) { 7016 struct bce_msix_data *msix = &sc->bce_msix[i]; 7017 7018 if (msix->msix_res != NULL) { 7019 bus_release_resource(sc->bce_dev, SYS_RES_IRQ, 7020 msix->msix_rid, msix->msix_res); 7021 } 7022 if (msix->msix_rid >= 0) 7023 pci_release_msix_vector(sc->bce_dev, msix->msix_rid); 7024 } 7025 if (setup) 7026 pci_teardown_msix(sc->bce_dev); 7027 } 7028 7029 static void 7030 bce_free_intr(struct bce_softc *sc) 7031 { 7032 if (sc->bce_irq_type != PCI_INTR_TYPE_MSIX) { 7033 if (sc->bce_res_irq != NULL) { 7034 bus_release_resource(sc->bce_dev, SYS_RES_IRQ, 7035 sc->bce_irq_rid, sc->bce_res_irq); 7036 } 7037 if (sc->bce_irq_type == PCI_INTR_TYPE_MSI) 7038 pci_release_msi(sc->bce_dev); 7039 } else { 7040 bce_free_msix(sc, TRUE); 7041 } 7042 } 7043 7044 static void 7045 bce_setup_msix_table(struct bce_softc *sc) 7046 { 7047 REG_WR(sc, BCE_PCI_GRC_WINDOW_ADDR, BCE_PCI_GRC_WINDOW_ADDR_SEP_WIN); 7048 REG_WR(sc, BCE_PCI_GRC_WINDOW2_ADDR, BCE_MSIX_TABLE_ADDR); 7049 REG_WR(sc, BCE_PCI_GRC_WINDOW3_ADDR, BCE_MSIX_PBA_ADDR); 7050 } 7051 7052 static int 7053 bce_setup_intr(struct bce_softc *sc) 7054 { 7055 void (*irq_handle)(void *); 7056 int error; 7057 7058 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX) 7059 return bce_setup_msix(sc); 7060 7061 if (sc->bce_irq_type == PCI_INTR_TYPE_LEGACY) { 7062 irq_handle = bce_intr_legacy; 7063 } else if (sc->bce_irq_type == PCI_INTR_TYPE_MSI) { 7064 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 7065 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 7066 irq_handle = bce_intr_msi_oneshot; 7067 sc->bce_flags |= BCE_ONESHOT_MSI_FLAG; 7068 } else { 7069 irq_handle = bce_intr_msi; 7070 sc->bce_flags |= BCE_CHECK_MSI_FLAG; 7071 } 7072 } else { 7073 panic("%s: unsupported intr type %d", 7074 device_get_nameunit(sc->bce_dev), sc->bce_irq_type); 7075 } 7076 7077 error = bus_setup_intr(sc->bce_dev, sc->bce_res_irq, INTR_MPSAFE, 7078 irq_handle, sc, &sc->bce_intrhand, &sc->main_serialize); 7079 if (error != 0) { 7080 device_printf(sc->bce_dev, "Failed to setup IRQ!\n"); 7081 return error; 7082 } 7083 7084 return 0; 7085 } 7086 7087 static void 7088 bce_teardown_intr(struct bce_softc *sc) 7089 { 7090 if (sc->bce_irq_type != PCI_INTR_TYPE_MSIX) 7091 bus_teardown_intr(sc->bce_dev, sc->bce_res_irq, sc->bce_intrhand); 7092 else 7093 bce_teardown_msix(sc, sc->rx_ring_cnt); 7094 } 7095 7096 static int 7097 bce_setup_msix(struct bce_softc *sc) 7098 { 7099 int i; 7100 7101 for (i = 0; i < sc->rx_ring_cnt; ++i) { 7102 struct bce_msix_data *msix = &sc->bce_msix[i]; 7103 int error; 7104 7105 error = bus_setup_intr_descr(sc->bce_dev, msix->msix_res, 7106 INTR_MPSAFE, msix->msix_func, msix->msix_arg, 7107 &msix->msix_handle, msix->msix_serialize, msix->msix_desc); 7108 if (error) { 7109 device_printf(sc->bce_dev, "could not set up %s " 7110 "interrupt handler.\n", msix->msix_desc); 7111 bce_teardown_msix(sc, i); 7112 return error; 7113 } 7114 } 7115 return 0; 7116 } 7117 7118 static void 7119 bce_teardown_msix(struct bce_softc *sc, int msix_cnt) 7120 { 7121 int i; 7122 7123 for (i = 0; i < msix_cnt; ++i) { 7124 struct bce_msix_data *msix = &sc->bce_msix[i]; 7125 7126 bus_teardown_intr(sc->bce_dev, msix->msix_res, 7127 msix->msix_handle); 7128 } 7129 } 7130 7131 static void 7132 bce_init_rss(struct bce_softc *sc) 7133 { 7134 uint8_t key[BCE_RLUP_RSS_KEY_CNT * BCE_RLUP_RSS_KEY_SIZE]; 7135 uint32_t tbl = 0; 7136 int i; 7137 7138 KKASSERT(sc->rx_ring_cnt > 2); 7139 7140 /* 7141 * Configure RSS keys 7142 */ 7143 toeplitz_get_key(key, sizeof(key)); 7144 for (i = 0; i < BCE_RLUP_RSS_KEY_CNT; ++i) { 7145 uint32_t rss_key; 7146 7147 rss_key = BCE_RLUP_RSS_KEYVAL(key, i); 7148 BCE_RSS_DPRINTF(sc, 1, "rss_key%d 0x%08x\n", i, rss_key); 7149 7150 REG_WR(sc, BCE_RLUP_RSS_KEY(i), rss_key); 7151 } 7152 7153 /* 7154 * Configure the redirect table 7155 * 7156 * NOTE: 7157 * - The "queue ID" in redirect table is the software RX ring's 7158 * index _minus_ one. 7159 * - The last RX ring, whose "queue ID" is (sc->rx_ring_cnt - 2) 7160 * will be used for packets whose masked hash is 0. 7161 * (see also: comment in bce_setup_ring_cnt()) 7162 * 7163 * The redirect table is configured in following fashion, except 7164 * for the masked hash 0, which is noted above: 7165 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)] 7166 */ 7167 for (i = 0; i < BCE_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) { 7168 int shift = (i % 8) << 2, qid; 7169 7170 qid = i % sc->rx_ring_cnt2; 7171 if (qid > 0) 7172 --qid; 7173 else 7174 qid = sc->rx_ring_cnt - 2; 7175 KKASSERT(qid < (sc->rx_ring_cnt - 1)); 7176 7177 tbl |= qid << shift; 7178 if (i % 8 == 7) { 7179 BCE_RSS_DPRINTF(sc, 1, "tbl 0x%08x\n", tbl); 7180 REG_WR(sc, BCE_RLUP_RSS_DATA, tbl); 7181 REG_WR(sc, BCE_RLUP_RSS_COMMAND, (i >> 3) | 7182 BCE_RLUP_RSS_COMMAND_RSS_WRITE_MASK | 7183 BCE_RLUP_RSS_COMMAND_WRITE | 7184 BCE_RLUP_RSS_COMMAND_HASH_MASK); 7185 tbl = 0; 7186 } 7187 } 7188 REG_WR(sc, BCE_RLUP_RSS_CONFIG, 7189 BCE_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI); 7190 } 7191 7192 static void 7193 bce_npoll_coal_change(struct bce_softc *sc) 7194 { 7195 uint32_t old_rx_cons, old_tx_cons; 7196 7197 old_rx_cons = sc->bce_rx_quick_cons_trip_int; 7198 old_tx_cons = sc->bce_tx_quick_cons_trip_int; 7199 sc->bce_rx_quick_cons_trip_int = 1; 7200 sc->bce_tx_quick_cons_trip_int = 1; 7201 7202 sc->bce_coalchg_mask |= BCE_COALMASK_TX_BDS_INT | 7203 BCE_COALMASK_RX_BDS_INT; 7204 bce_coal_change(sc); 7205 7206 sc->bce_rx_quick_cons_trip_int = old_rx_cons; 7207 sc->bce_tx_quick_cons_trip_int = old_tx_cons; 7208 } 7209 7210 static struct pktinfo * 7211 bce_rss_pktinfo(struct pktinfo *pi, uint32_t status, 7212 const struct l2_fhdr *l2fhdr) 7213 { 7214 /* Check for an IP datagram. */ 7215 if ((status & L2_FHDR_STATUS_IP_DATAGRAM) == 0) 7216 return NULL; 7217 7218 /* Check if the IP checksum is valid. */ 7219 if (l2fhdr->l2_fhdr_ip_xsum != 0xffff) 7220 return NULL; 7221 7222 /* Check for a valid TCP/UDP frame. */ 7223 if (status & L2_FHDR_STATUS_TCP_SEGMENT) { 7224 if (status & L2_FHDR_ERRORS_TCP_XSUM) 7225 return NULL; 7226 if (l2fhdr->l2_fhdr_tcp_udp_xsum != 0xffff) 7227 return NULL; 7228 pi->pi_l3proto = IPPROTO_TCP; 7229 } else if (status & L2_FHDR_STATUS_UDP_DATAGRAM) { 7230 if (status & L2_FHDR_ERRORS_UDP_XSUM) 7231 return NULL; 7232 if (l2fhdr->l2_fhdr_tcp_udp_xsum != 0xffff) 7233 return NULL; 7234 pi->pi_l3proto = IPPROTO_UDP; 7235 } else { 7236 return NULL; 7237 } 7238 pi->pi_netisr = NETISR_IP; 7239 pi->pi_flags = 0; 7240 7241 return pi; 7242 } 7243