1 /*- 2 * Copyright (c) 2006-2007 Broadcom Corporation 3 * David Christensen <davidch@broadcom.com>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Neither the name of Broadcom Corporation nor the name of its contributors 15 * may be used to endorse or promote products derived from this software 16 * without specific prior written consent. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' 19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS 22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 * 30 * $FreeBSD: src/sys/dev/bce/if_bce.c,v 1.31 2007/05/16 23:34:11 davidch Exp $ 31 */ 32 33 /* 34 * The following controllers are supported by this driver: 35 * BCM5706C A2, A3 36 * BCM5706S A2, A3 37 * BCM5708C B1, B2 38 * BCM5708S B1, B2 39 * BCM5709C A1, C0 40 * BCM5716 C0 41 * 42 * The following controllers are not supported by this driver: 43 * BCM5706C A0, A1 44 * BCM5706S A0, A1 45 * BCM5708C A0, B0 46 * BCM5708S A0, B0 47 * BCM5709C A0, B0, B1 48 * BCM5709S A0, A1, B0, B1, B2, C0 49 */ 50 51 #include "opt_bce.h" 52 #include "opt_polling.h" 53 54 #include <sys/param.h> 55 #include <sys/bus.h> 56 #include <sys/endian.h> 57 #include <sys/kernel.h> 58 #include <sys/interrupt.h> 59 #include <sys/mbuf.h> 60 #include <sys/malloc.h> 61 #include <sys/queue.h> 62 #ifdef BCE_DEBUG 63 #include <sys/random.h> 64 #endif 65 #include <sys/rman.h> 66 #include <sys/serialize.h> 67 #include <sys/socket.h> 68 #include <sys/sockio.h> 69 #include <sys/sysctl.h> 70 71 #include <net/bpf.h> 72 #include <net/ethernet.h> 73 #include <net/if.h> 74 #include <net/if_arp.h> 75 #include <net/if_dl.h> 76 #include <net/if_media.h> 77 #include <net/if_types.h> 78 #include <net/ifq_var.h> 79 #include <net/vlan/if_vlan_var.h> 80 #include <net/vlan/if_vlan_ether.h> 81 82 #include <dev/netif/mii_layer/mii.h> 83 #include <dev/netif/mii_layer/miivar.h> 84 85 #include <bus/pci/pcireg.h> 86 #include <bus/pci/pcivar.h> 87 88 #include "miibus_if.h" 89 90 #include <dev/netif/bce/if_bcereg.h> 91 #include <dev/netif/bce/if_bcefw.h> 92 93 /****************************************************************************/ 94 /* BCE Debug Options */ 95 /****************************************************************************/ 96 #ifdef BCE_DEBUG 97 98 static uint32_t bce_debug = BCE_WARN; 99 100 /* 101 * 0 = Never 102 * 1 = 1 in 2,147,483,648 103 * 256 = 1 in 8,388,608 104 * 2048 = 1 in 1,048,576 105 * 65536 = 1 in 32,768 106 * 1048576 = 1 in 2,048 107 * 268435456 = 1 in 8 108 * 536870912 = 1 in 4 109 * 1073741824 = 1 in 2 110 * 111 * bce_debug_l2fhdr_status_check: 112 * How often the l2_fhdr frame error check will fail. 113 * 114 * bce_debug_unexpected_attention: 115 * How often the unexpected attention check will fail. 116 * 117 * bce_debug_mbuf_allocation_failure: 118 * How often to simulate an mbuf allocation failure. 119 * 120 * bce_debug_dma_map_addr_failure: 121 * How often to simulate a DMA mapping failure. 122 * 123 * bce_debug_bootcode_running_failure: 124 * How often to simulate a bootcode failure. 125 */ 126 static int bce_debug_l2fhdr_status_check = 0; 127 static int bce_debug_unexpected_attention = 0; 128 static int bce_debug_mbuf_allocation_failure = 0; 129 static int bce_debug_dma_map_addr_failure = 0; 130 static int bce_debug_bootcode_running_failure = 0; 131 132 #endif /* BCE_DEBUG */ 133 134 135 /****************************************************************************/ 136 /* PCI Device ID Table */ 137 /* */ 138 /* Used by bce_probe() to identify the devices supported by this driver. */ 139 /****************************************************************************/ 140 #define BCE_DEVDESC_MAX 64 141 142 static struct bce_type bce_devs[] = { 143 /* BCM5706C Controllers and OEM boards. */ 144 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3101, 145 "HP NC370T Multifunction Gigabit Server Adapter" }, 146 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3106, 147 "HP NC370i Multifunction Gigabit Server Adapter" }, 148 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3070, 149 "HP NC380T PCIe DP Multifunc Gig Server Adapter" }, 150 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x1709, 151 "HP NC371i Multifunction Gigabit Server Adapter" }, 152 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, PCI_ANY_ID, PCI_ANY_ID, 153 "Broadcom NetXtreme II BCM5706 1000Base-T" }, 154 155 /* BCM5706S controllers and OEM boards. */ 156 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102, 157 "HP NC370F Multifunction Gigabit Server Adapter" }, 158 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID, PCI_ANY_ID, 159 "Broadcom NetXtreme II BCM5706 1000Base-SX" }, 160 161 /* BCM5708C controllers and OEM boards. */ 162 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7037, 163 "HP NC373T PCIe Multifunction Gig Server Adapter" }, 164 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7038, 165 "HP NC373i Multifunction Gigabit Server Adapter" }, 166 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7045, 167 "HP NC374m PCIe Multifunction Adapter" }, 168 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, PCI_ANY_ID, PCI_ANY_ID, 169 "Broadcom NetXtreme II BCM5708 1000Base-T" }, 170 171 /* BCM5708S controllers and OEM boards. */ 172 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x1706, 173 "HP NC373m Multifunction Gigabit Server Adapter" }, 174 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703b, 175 "HP NC373i Multifunction Gigabit Server Adapter" }, 176 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703d, 177 "HP NC373F PCIe Multifunc Giga Server Adapter" }, 178 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, PCI_ANY_ID, PCI_ANY_ID, 179 "Broadcom NetXtreme II BCM5708S 1000Base-T" }, 180 181 /* BCM5709C controllers and OEM boards. */ 182 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7055, 183 "HP NC382i DP Multifunction Gigabit Server Adapter" }, 184 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7059, 185 "HP NC382T PCIe DP Multifunction Gigabit Server Adapter" }, 186 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, PCI_ANY_ID, PCI_ANY_ID, 187 "Broadcom NetXtreme II BCM5709 1000Base-T" }, 188 189 /* BCM5709S controllers and OEM boards. */ 190 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x171d, 191 "HP NC382m DP 1GbE Multifunction BL-c Adapter" }, 192 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x7056, 193 "HP NC382i DP Multifunction Gigabit Server Adapter" }, 194 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, PCI_ANY_ID, PCI_ANY_ID, 195 "Broadcom NetXtreme II BCM5709 1000Base-SX" }, 196 197 /* BCM5716 controllers and OEM boards. */ 198 { BRCM_VENDORID, BRCM_DEVICEID_BCM5716, PCI_ANY_ID, PCI_ANY_ID, 199 "Broadcom NetXtreme II BCM5716 1000Base-T" }, 200 201 { 0, 0, 0, 0, NULL } 202 }; 203 204 205 /****************************************************************************/ 206 /* Supported Flash NVRAM device data. */ 207 /****************************************************************************/ 208 static const struct flash_spec flash_table[] = 209 { 210 #define BUFFERED_FLAGS (BCE_NV_BUFFERED | BCE_NV_TRANSLATE) 211 #define NONBUFFERED_FLAGS (BCE_NV_WREN) 212 213 /* Slow EEPROM */ 214 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400, 215 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 216 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 217 "EEPROM - slow"}, 218 /* Expansion entry 0001 */ 219 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406, 220 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 221 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 222 "Entry 0001"}, 223 /* Saifun SA25F010 (non-buffered flash) */ 224 /* strap, cfg1, & write1 need updates */ 225 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406, 226 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 227 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2, 228 "Non-buffered flash (128kB)"}, 229 /* Saifun SA25F020 (non-buffered flash) */ 230 /* strap, cfg1, & write1 need updates */ 231 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406, 232 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 233 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4, 234 "Non-buffered flash (256kB)"}, 235 /* Expansion entry 0100 */ 236 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406, 237 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 238 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 239 "Entry 0100"}, 240 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */ 241 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406, 242 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 243 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2, 244 "Entry 0101: ST M45PE10 (128kB non-bufferred)"}, 245 /* Entry 0110: ST M45PE20 (non-buffered flash)*/ 246 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406, 247 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 248 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4, 249 "Entry 0110: ST M45PE20 (256kB non-bufferred)"}, 250 /* Saifun SA25F005 (non-buffered flash) */ 251 /* strap, cfg1, & write1 need updates */ 252 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406, 253 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 254 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE, 255 "Non-buffered flash (64kB)"}, 256 /* Fast EEPROM */ 257 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400, 258 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 259 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 260 "EEPROM - fast"}, 261 /* Expansion entry 1001 */ 262 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406, 263 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 264 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 265 "Entry 1001"}, 266 /* Expansion entry 1010 */ 267 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406, 268 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 269 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 270 "Entry 1010"}, 271 /* ATMEL AT45DB011B (buffered flash) */ 272 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400, 273 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 274 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE, 275 "Buffered flash (128kB)"}, 276 /* Expansion entry 1100 */ 277 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406, 278 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 279 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 280 "Entry 1100"}, 281 /* Expansion entry 1101 */ 282 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406, 283 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 284 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 285 "Entry 1101"}, 286 /* Ateml Expansion entry 1110 */ 287 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400, 288 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 289 BUFFERED_FLASH_BYTE_ADDR_MASK, 0, 290 "Entry 1110 (Atmel)"}, 291 /* ATMEL AT45DB021B (buffered flash) */ 292 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400, 293 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 294 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2, 295 "Buffered flash (256kB)"}, 296 }; 297 298 /* 299 * The BCM5709 controllers transparently handle the 300 * differences between Atmel 264 byte pages and all 301 * flash devices which use 256 byte pages, so no 302 * logical-to-physical mapping is required in the 303 * driver. 304 */ 305 static struct flash_spec flash_5709 = { 306 .flags = BCE_NV_BUFFERED, 307 .page_bits = BCM5709_FLASH_PAGE_BITS, 308 .page_size = BCM5709_FLASH_PAGE_SIZE, 309 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK, 310 .total_size = BUFFERED_FLASH_TOTAL_SIZE * 2, 311 .name = "5709/5716 buffered flash (256kB)", 312 }; 313 314 315 /****************************************************************************/ 316 /* DragonFly device entry points. */ 317 /****************************************************************************/ 318 static int bce_probe(device_t); 319 static int bce_attach(device_t); 320 static int bce_detach(device_t); 321 static void bce_shutdown(device_t); 322 323 /****************************************************************************/ 324 /* BCE Debug Data Structure Dump Routines */ 325 /****************************************************************************/ 326 #ifdef BCE_DEBUG 327 static void bce_dump_mbuf(struct bce_softc *, struct mbuf *); 328 static void bce_dump_tx_mbuf_chain(struct bce_softc *, int, int); 329 static void bce_dump_rx_mbuf_chain(struct bce_softc *, int, int); 330 static void bce_dump_txbd(struct bce_softc *, int, struct tx_bd *); 331 static void bce_dump_rxbd(struct bce_softc *, int, struct rx_bd *); 332 static void bce_dump_l2fhdr(struct bce_softc *, int, 333 struct l2_fhdr *) __unused; 334 static void bce_dump_tx_chain(struct bce_softc *, int, int); 335 static void bce_dump_rx_chain(struct bce_softc *, int, int); 336 static void bce_dump_status_block(struct bce_softc *); 337 static void bce_dump_driver_state(struct bce_softc *); 338 static void bce_dump_stats_block(struct bce_softc *) __unused; 339 static void bce_dump_hw_state(struct bce_softc *); 340 static void bce_dump_txp_state(struct bce_softc *); 341 static void bce_dump_rxp_state(struct bce_softc *) __unused; 342 static void bce_dump_tpat_state(struct bce_softc *) __unused; 343 static void bce_freeze_controller(struct bce_softc *) __unused; 344 static void bce_unfreeze_controller(struct bce_softc *) __unused; 345 static void bce_breakpoint(struct bce_softc *); 346 #endif /* BCE_DEBUG */ 347 348 349 /****************************************************************************/ 350 /* BCE Register/Memory Access Routines */ 351 /****************************************************************************/ 352 static uint32_t bce_reg_rd_ind(struct bce_softc *, uint32_t); 353 static void bce_reg_wr_ind(struct bce_softc *, uint32_t, uint32_t); 354 static void bce_shmem_wr(struct bce_softc *, uint32_t, uint32_t); 355 static uint32_t bce_shmem_rd(struct bce_softc *, u32); 356 static void bce_ctx_wr(struct bce_softc *, uint32_t, uint32_t, uint32_t); 357 static int bce_miibus_read_reg(device_t, int, int); 358 static int bce_miibus_write_reg(device_t, int, int, int); 359 static void bce_miibus_statchg(device_t); 360 361 362 /****************************************************************************/ 363 /* BCE NVRAM Access Routines */ 364 /****************************************************************************/ 365 static int bce_acquire_nvram_lock(struct bce_softc *); 366 static int bce_release_nvram_lock(struct bce_softc *); 367 static void bce_enable_nvram_access(struct bce_softc *); 368 static void bce_disable_nvram_access(struct bce_softc *); 369 static int bce_nvram_read_dword(struct bce_softc *, uint32_t, uint8_t *, 370 uint32_t); 371 static int bce_init_nvram(struct bce_softc *); 372 static int bce_nvram_read(struct bce_softc *, uint32_t, uint8_t *, int); 373 static int bce_nvram_test(struct bce_softc *); 374 375 /****************************************************************************/ 376 /* BCE DMA Allocate/Free Routines */ 377 /****************************************************************************/ 378 static int bce_dma_alloc(struct bce_softc *); 379 static void bce_dma_free(struct bce_softc *); 380 static void bce_dma_map_addr(void *, bus_dma_segment_t *, int, int); 381 382 /****************************************************************************/ 383 /* BCE Firmware Synchronization and Load */ 384 /****************************************************************************/ 385 static int bce_fw_sync(struct bce_softc *, uint32_t); 386 static void bce_load_rv2p_fw(struct bce_softc *, uint32_t *, 387 uint32_t, uint32_t); 388 static void bce_load_cpu_fw(struct bce_softc *, struct cpu_reg *, 389 struct fw_info *); 390 static void bce_start_cpu(struct bce_softc *, struct cpu_reg *); 391 static void bce_halt_cpu(struct bce_softc *, struct cpu_reg *); 392 static void bce_start_rxp_cpu(struct bce_softc *); 393 static void bce_init_rxp_cpu(struct bce_softc *); 394 static void bce_init_txp_cpu(struct bce_softc *); 395 static void bce_init_tpat_cpu(struct bce_softc *); 396 static void bce_init_cp_cpu(struct bce_softc *); 397 static void bce_init_com_cpu(struct bce_softc *); 398 static void bce_init_cpus(struct bce_softc *); 399 400 static void bce_stop(struct bce_softc *); 401 static int bce_reset(struct bce_softc *, uint32_t); 402 static int bce_chipinit(struct bce_softc *); 403 static int bce_blockinit(struct bce_softc *); 404 static int bce_newbuf_std(struct bce_softc *, uint16_t *, uint16_t *, 405 uint32_t *, int); 406 static void bce_setup_rxdesc_std(struct bce_softc *, uint16_t, uint32_t *); 407 static void bce_probe_pci_caps(struct bce_softc *); 408 static void bce_print_adapter_info(struct bce_softc *); 409 static void bce_get_media(struct bce_softc *); 410 411 static void bce_init_tx_context(struct bce_softc *); 412 static int bce_init_tx_chain(struct bce_softc *); 413 static void bce_init_rx_context(struct bce_softc *); 414 static int bce_init_rx_chain(struct bce_softc *); 415 static void bce_free_rx_chain(struct bce_softc *); 416 static void bce_free_tx_chain(struct bce_softc *); 417 418 static int bce_encap(struct bce_softc *, struct mbuf **); 419 static void bce_start(struct ifnet *); 420 static int bce_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 421 static void bce_watchdog(struct ifnet *); 422 static int bce_ifmedia_upd(struct ifnet *); 423 static void bce_ifmedia_sts(struct ifnet *, struct ifmediareq *); 424 static void bce_init(void *); 425 static void bce_mgmt_init(struct bce_softc *); 426 427 static int bce_init_ctx(struct bce_softc *); 428 static void bce_get_mac_addr(struct bce_softc *); 429 static void bce_set_mac_addr(struct bce_softc *); 430 static void bce_phy_intr(struct bce_softc *); 431 static void bce_rx_intr(struct bce_softc *, int); 432 static void bce_tx_intr(struct bce_softc *); 433 static void bce_disable_intr(struct bce_softc *); 434 static void bce_enable_intr(struct bce_softc *, int); 435 436 #ifdef DEVICE_POLLING 437 static void bce_poll(struct ifnet *, enum poll_cmd, int); 438 #endif 439 static void bce_intr(struct bce_softc *); 440 static void bce_intr_legacy(void *); 441 static void bce_intr_msi(void *); 442 static void bce_intr_msi_oneshot(void *); 443 static void bce_set_rx_mode(struct bce_softc *); 444 static void bce_stats_update(struct bce_softc *); 445 static void bce_tick(void *); 446 static void bce_tick_serialized(struct bce_softc *); 447 static void bce_pulse(void *); 448 static void bce_pulse_check_msi(struct bce_softc *); 449 static void bce_add_sysctls(struct bce_softc *); 450 451 static void bce_coal_change(struct bce_softc *); 452 static int bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS); 453 static int bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS); 454 static int bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS); 455 static int bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS); 456 static int bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS); 457 static int bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS); 458 static int bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS); 459 static int bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS); 460 static int bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS, 461 uint32_t *, uint32_t); 462 463 /* 464 * NOTE: 465 * Don't set bce_tx_ticks_int/bce_tx_ticks to 1023. Linux's bnx2 466 * takes 1023 as the TX ticks limit. However, using 1023 will 467 * cause 5708(B2) to generate extra interrupts (~2000/s) even when 468 * there is _no_ network activity on the NIC. 469 */ 470 static uint32_t bce_tx_bds_int = 255; /* bcm: 20 */ 471 static uint32_t bce_tx_bds = 255; /* bcm: 20 */ 472 static uint32_t bce_tx_ticks_int = 1022; /* bcm: 80 */ 473 static uint32_t bce_tx_ticks = 1022; /* bcm: 80 */ 474 static uint32_t bce_rx_bds_int = 128; /* bcm: 6 */ 475 static uint32_t bce_rx_bds = 128; /* bcm: 6 */ 476 static uint32_t bce_rx_ticks_int = 125; /* bcm: 18 */ 477 static uint32_t bce_rx_ticks = 125; /* bcm: 18 */ 478 479 static int bce_msi_enable = 1; 480 481 TUNABLE_INT("hw.bce.tx_bds_int", &bce_tx_bds_int); 482 TUNABLE_INT("hw.bce.tx_bds", &bce_tx_bds); 483 TUNABLE_INT("hw.bce.tx_ticks_int", &bce_tx_ticks_int); 484 TUNABLE_INT("hw.bce.tx_ticks", &bce_tx_ticks); 485 TUNABLE_INT("hw.bce.rx_bds_int", &bce_rx_bds_int); 486 TUNABLE_INT("hw.bce.rx_bds", &bce_rx_bds); 487 TUNABLE_INT("hw.bce.rx_ticks_int", &bce_rx_ticks_int); 488 TUNABLE_INT("hw.bce.rx_ticks", &bce_rx_ticks); 489 TUNABLE_INT("hw.bce.msi.enable", &bce_msi_enable); 490 491 /****************************************************************************/ 492 /* DragonFly device dispatch table. */ 493 /****************************************************************************/ 494 static device_method_t bce_methods[] = { 495 /* Device interface */ 496 DEVMETHOD(device_probe, bce_probe), 497 DEVMETHOD(device_attach, bce_attach), 498 DEVMETHOD(device_detach, bce_detach), 499 DEVMETHOD(device_shutdown, bce_shutdown), 500 501 /* bus interface */ 502 DEVMETHOD(bus_print_child, bus_generic_print_child), 503 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 504 505 /* MII interface */ 506 DEVMETHOD(miibus_readreg, bce_miibus_read_reg), 507 DEVMETHOD(miibus_writereg, bce_miibus_write_reg), 508 DEVMETHOD(miibus_statchg, bce_miibus_statchg), 509 510 { 0, 0 } 511 }; 512 513 static driver_t bce_driver = { 514 "bce", 515 bce_methods, 516 sizeof(struct bce_softc) 517 }; 518 519 static devclass_t bce_devclass; 520 521 522 DECLARE_DUMMY_MODULE(if_bce); 523 MODULE_DEPEND(bce, miibus, 1, 1, 1); 524 DRIVER_MODULE(if_bce, pci, bce_driver, bce_devclass, NULL, NULL); 525 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, NULL, NULL); 526 527 528 /****************************************************************************/ 529 /* Device probe function. */ 530 /* */ 531 /* Compares the device to the driver's list of supported devices and */ 532 /* reports back to the OS whether this is the right driver for the device. */ 533 /* */ 534 /* Returns: */ 535 /* BUS_PROBE_DEFAULT on success, positive value on failure. */ 536 /****************************************************************************/ 537 static int 538 bce_probe(device_t dev) 539 { 540 struct bce_type *t; 541 uint16_t vid, did, svid, sdid; 542 543 /* Get the data for the device to be probed. */ 544 vid = pci_get_vendor(dev); 545 did = pci_get_device(dev); 546 svid = pci_get_subvendor(dev); 547 sdid = pci_get_subdevice(dev); 548 549 /* Look through the list of known devices for a match. */ 550 for (t = bce_devs; t->bce_name != NULL; ++t) { 551 if (vid == t->bce_vid && did == t->bce_did && 552 (svid == t->bce_svid || t->bce_svid == PCI_ANY_ID) && 553 (sdid == t->bce_sdid || t->bce_sdid == PCI_ANY_ID)) { 554 uint32_t revid = pci_read_config(dev, PCIR_REVID, 4); 555 char *descbuf; 556 557 descbuf = kmalloc(BCE_DEVDESC_MAX, M_TEMP, M_WAITOK); 558 559 /* Print out the device identity. */ 560 ksnprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)", 561 t->bce_name, 562 ((revid & 0xf0) >> 4) + 'A', revid & 0xf); 563 564 device_set_desc_copy(dev, descbuf); 565 kfree(descbuf, M_TEMP); 566 return 0; 567 } 568 } 569 return ENXIO; 570 } 571 572 573 /****************************************************************************/ 574 /* PCI Capabilities Probe Function. */ 575 /* */ 576 /* Walks the PCI capabiites list for the device to find what features are */ 577 /* supported. */ 578 /* */ 579 /* Returns: */ 580 /* None. */ 581 /****************************************************************************/ 582 static void 583 bce_print_adapter_info(struct bce_softc *sc) 584 { 585 device_printf(sc->bce_dev, "ASIC (0x%08X); ", sc->bce_chipid); 586 587 kprintf("Rev (%c%d); ", ((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A', 588 ((BCE_CHIP_ID(sc) & 0x0ff0) >> 4)); 589 590 /* Bus info. */ 591 if (sc->bce_flags & BCE_PCIE_FLAG) { 592 kprintf("Bus (PCIe x%d, ", sc->link_width); 593 switch (sc->link_speed) { 594 case 1: 595 kprintf("2.5Gbps); "); 596 break; 597 case 2: 598 kprintf("5Gbps); "); 599 break; 600 default: 601 kprintf("Unknown link speed); "); 602 break; 603 } 604 } else { 605 kprintf("Bus (PCI%s, %s, %dMHz); ", 606 ((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""), 607 ((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? "32-bit" : "64-bit"), 608 sc->bus_speed_mhz); 609 } 610 611 /* Firmware version and device features. */ 612 kprintf("B/C (%s)", sc->bce_bc_ver); 613 614 if ((sc->bce_flags & BCE_MFW_ENABLE_FLAG) || 615 (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)) { 616 kprintf("; Flags("); 617 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) 618 kprintf("MFW[%s]", sc->bce_mfw_ver); 619 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) 620 kprintf(" 2.5G"); 621 kprintf(")"); 622 } 623 kprintf("\n"); 624 } 625 626 627 /****************************************************************************/ 628 /* PCI Capabilities Probe Function. */ 629 /* */ 630 /* Walks the PCI capabiites list for the device to find what features are */ 631 /* supported. */ 632 /* */ 633 /* Returns: */ 634 /* None. */ 635 /****************************************************************************/ 636 static void 637 bce_probe_pci_caps(struct bce_softc *sc) 638 { 639 device_t dev = sc->bce_dev; 640 uint8_t ptr; 641 642 if (pci_is_pcix(dev)) 643 sc->bce_cap_flags |= BCE_PCIX_CAPABLE_FLAG; 644 645 ptr = pci_get_pciecap_ptr(dev); 646 if (ptr) { 647 uint16_t link_status = pci_read_config(dev, ptr + 0x12, 2); 648 649 sc->link_speed = link_status & 0xf; 650 sc->link_width = (link_status >> 4) & 0x3f; 651 sc->bce_cap_flags |= BCE_PCIE_CAPABLE_FLAG; 652 sc->bce_flags |= BCE_PCIE_FLAG; 653 } 654 } 655 656 657 /****************************************************************************/ 658 /* Device attach function. */ 659 /* */ 660 /* Allocates device resources, performs secondary chip identification, */ 661 /* resets and initializes the hardware, and initializes driver instance */ 662 /* variables. */ 663 /* */ 664 /* Returns: */ 665 /* 0 on success, positive value on failure. */ 666 /****************************************************************************/ 667 static int 668 bce_attach(device_t dev) 669 { 670 struct bce_softc *sc = device_get_softc(dev); 671 struct ifnet *ifp = &sc->arpcom.ac_if; 672 uint32_t val; 673 u_int irq_flags; 674 void (*irq_handle)(void *); 675 int rid, rc = 0; 676 int i, j; 677 678 sc->bce_dev = dev; 679 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 680 681 pci_enable_busmaster(dev); 682 683 bce_probe_pci_caps(sc); 684 685 /* Allocate PCI memory resources. */ 686 rid = PCIR_BAR(0); 687 sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 688 RF_ACTIVE | PCI_RF_DENSE); 689 if (sc->bce_res_mem == NULL) { 690 device_printf(dev, "PCI memory allocation failed\n"); 691 return ENXIO; 692 } 693 sc->bce_btag = rman_get_bustag(sc->bce_res_mem); 694 sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem); 695 696 /* Allocate PCI IRQ resources. */ 697 sc->bce_irq_type = pci_alloc_1intr(dev, bce_msi_enable, 698 &sc->bce_irq_rid, &irq_flags); 699 700 sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 701 &sc->bce_irq_rid, irq_flags); 702 if (sc->bce_res_irq == NULL) { 703 device_printf(dev, "PCI map interrupt failed\n"); 704 rc = ENXIO; 705 goto fail; 706 } 707 708 /* 709 * Configure byte swap and enable indirect register access. 710 * Rely on CPU to do target byte swapping on big endian systems. 711 * Access to registers outside of PCI configurtion space are not 712 * valid until this is done. 713 */ 714 pci_write_config(dev, BCE_PCICFG_MISC_CONFIG, 715 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 716 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4); 717 718 /* Save ASIC revsion info. */ 719 sc->bce_chipid = REG_RD(sc, BCE_MISC_ID); 720 721 /* Weed out any non-production controller revisions. */ 722 switch (BCE_CHIP_ID(sc)) { 723 case BCE_CHIP_ID_5706_A0: 724 case BCE_CHIP_ID_5706_A1: 725 case BCE_CHIP_ID_5708_A0: 726 case BCE_CHIP_ID_5708_B0: 727 case BCE_CHIP_ID_5709_A0: 728 case BCE_CHIP_ID_5709_B0: 729 case BCE_CHIP_ID_5709_B1: 730 #ifdef foo 731 /* 5709C B2 seems to work fine */ 732 case BCE_CHIP_ID_5709_B2: 733 #endif 734 device_printf(dev, "Unsupported chip id 0x%08x!\n", 735 BCE_CHIP_ID(sc)); 736 rc = ENODEV; 737 goto fail; 738 } 739 740 if (sc->bce_irq_type == PCI_INTR_TYPE_LEGACY) { 741 irq_handle = bce_intr_legacy; 742 } else if (sc->bce_irq_type == PCI_INTR_TYPE_MSI) { 743 irq_handle = bce_intr_msi; 744 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { 745 irq_handle = bce_intr_msi_oneshot; 746 sc->bce_flags |= BCE_ONESHOT_MSI_FLAG; 747 } 748 } else { 749 panic("%s: unsupported intr type %d\n", 750 device_get_nameunit(dev), sc->bce_irq_type); 751 } 752 753 /* 754 * Find the base address for shared memory access. 755 * Newer versions of bootcode use a signature and offset 756 * while older versions use a fixed address. 757 */ 758 val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE); 759 if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == 760 BCE_SHM_HDR_SIGNATURE_SIG) { 761 /* Multi-port devices use different offsets in shared memory. */ 762 sc->bce_shmem_base = REG_RD_IND(sc, 763 BCE_SHM_HDR_ADDR_0 + (pci_get_function(sc->bce_dev) << 2)); 764 } else { 765 sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE; 766 } 767 DBPRINT(sc, BCE_INFO, "bce_shmem_base = 0x%08X\n", sc->bce_shmem_base); 768 769 /* Fetch the bootcode revision. */ 770 val = bce_shmem_rd(sc, BCE_DEV_INFO_BC_REV); 771 for (i = 0, j = 0; i < 3; i++) { 772 uint8_t num; 773 int k, skip0; 774 775 num = (uint8_t)(val >> (24 - (i * 8))); 776 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) { 777 if (num >= k || !skip0 || k == 1) { 778 sc->bce_bc_ver[j++] = (num / k) + '0'; 779 skip0 = 0; 780 } 781 } 782 if (i != 2) 783 sc->bce_bc_ver[j++] = '.'; 784 } 785 786 /* Check if any management firwmare is running. */ 787 val = bce_shmem_rd(sc, BCE_PORT_FEATURE); 788 if (val & BCE_PORT_FEATURE_ASF_ENABLED) { 789 sc->bce_flags |= BCE_MFW_ENABLE_FLAG; 790 791 /* Allow time for firmware to enter the running state. */ 792 for (i = 0; i < 30; i++) { 793 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION); 794 if (val & BCE_CONDITION_MFW_RUN_MASK) 795 break; 796 DELAY(10000); 797 } 798 } 799 800 /* Check the current bootcode state. */ 801 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION) & 802 BCE_CONDITION_MFW_RUN_MASK; 803 if (val != BCE_CONDITION_MFW_RUN_UNKNOWN && 804 val != BCE_CONDITION_MFW_RUN_NONE) { 805 uint32_t addr = bce_shmem_rd(sc, BCE_MFW_VER_PTR); 806 807 for (i = 0, j = 0; j < 3; j++) { 808 val = bce_reg_rd_ind(sc, addr + j * 4); 809 val = bswap32(val); 810 memcpy(&sc->bce_mfw_ver[i], &val, 4); 811 i += 4; 812 } 813 } 814 815 /* Get PCI bus information (speed and type). */ 816 val = REG_RD(sc, BCE_PCICFG_MISC_STATUS); 817 if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) { 818 uint32_t clkreg; 819 820 sc->bce_flags |= BCE_PCIX_FLAG; 821 822 clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS) & 823 BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET; 824 switch (clkreg) { 825 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ: 826 sc->bus_speed_mhz = 133; 827 break; 828 829 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ: 830 sc->bus_speed_mhz = 100; 831 break; 832 833 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ: 834 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ: 835 sc->bus_speed_mhz = 66; 836 break; 837 838 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ: 839 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ: 840 sc->bus_speed_mhz = 50; 841 break; 842 843 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW: 844 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ: 845 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ: 846 sc->bus_speed_mhz = 33; 847 break; 848 } 849 } else { 850 if (val & BCE_PCICFG_MISC_STATUS_M66EN) 851 sc->bus_speed_mhz = 66; 852 else 853 sc->bus_speed_mhz = 33; 854 } 855 856 if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET) 857 sc->bce_flags |= BCE_PCI_32BIT_FLAG; 858 859 /* Reset the controller. */ 860 rc = bce_reset(sc, BCE_DRV_MSG_CODE_RESET); 861 if (rc != 0) 862 goto fail; 863 864 /* Initialize the controller. */ 865 rc = bce_chipinit(sc); 866 if (rc != 0) { 867 device_printf(dev, "Controller initialization failed!\n"); 868 goto fail; 869 } 870 871 /* Perform NVRAM test. */ 872 rc = bce_nvram_test(sc); 873 if (rc != 0) { 874 device_printf(dev, "NVRAM test failed!\n"); 875 goto fail; 876 } 877 878 /* Fetch the permanent Ethernet MAC address. */ 879 bce_get_mac_addr(sc); 880 881 /* 882 * Trip points control how many BDs 883 * should be ready before generating an 884 * interrupt while ticks control how long 885 * a BD can sit in the chain before 886 * generating an interrupt. Set the default 887 * values for the RX and TX rings. 888 */ 889 890 #ifdef BCE_DRBUG 891 /* Force more frequent interrupts. */ 892 sc->bce_tx_quick_cons_trip_int = 1; 893 sc->bce_tx_quick_cons_trip = 1; 894 sc->bce_tx_ticks_int = 0; 895 sc->bce_tx_ticks = 0; 896 897 sc->bce_rx_quick_cons_trip_int = 1; 898 sc->bce_rx_quick_cons_trip = 1; 899 sc->bce_rx_ticks_int = 0; 900 sc->bce_rx_ticks = 0; 901 #else 902 sc->bce_tx_quick_cons_trip_int = bce_tx_bds_int; 903 sc->bce_tx_quick_cons_trip = bce_tx_bds; 904 sc->bce_tx_ticks_int = bce_tx_ticks_int; 905 sc->bce_tx_ticks = bce_tx_ticks; 906 907 sc->bce_rx_quick_cons_trip_int = bce_rx_bds_int; 908 sc->bce_rx_quick_cons_trip = bce_rx_bds; 909 sc->bce_rx_ticks_int = bce_rx_ticks_int; 910 sc->bce_rx_ticks = bce_rx_ticks; 911 #endif 912 913 /* Update statistics once every second. */ 914 sc->bce_stats_ticks = 1000000 & 0xffff00; 915 916 /* Find the media type for the adapter. */ 917 bce_get_media(sc); 918 919 /* Allocate DMA memory resources. */ 920 rc = bce_dma_alloc(sc); 921 if (rc != 0) { 922 device_printf(dev, "DMA resource allocation failed!\n"); 923 goto fail; 924 } 925 926 /* Initialize the ifnet interface. */ 927 ifp->if_softc = sc; 928 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 929 ifp->if_ioctl = bce_ioctl; 930 ifp->if_start = bce_start; 931 ifp->if_init = bce_init; 932 ifp->if_watchdog = bce_watchdog; 933 #ifdef DEVICE_POLLING 934 ifp->if_poll = bce_poll; 935 #endif 936 ifp->if_mtu = ETHERMTU; 937 ifp->if_hwassist = BCE_IF_HWASSIST; 938 ifp->if_capabilities = BCE_IF_CAPABILITIES; 939 ifp->if_capenable = ifp->if_capabilities; 940 ifq_set_maxlen(&ifp->if_snd, USABLE_TX_BD); 941 ifq_set_ready(&ifp->if_snd); 942 943 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) 944 ifp->if_baudrate = IF_Gbps(2.5); 945 else 946 ifp->if_baudrate = IF_Gbps(1); 947 948 /* Assume a standard 1500 byte MTU size for mbuf allocations. */ 949 sc->mbuf_alloc_size = MCLBYTES; 950 951 /* Look for our PHY. */ 952 rc = mii_phy_probe(dev, &sc->bce_miibus, 953 bce_ifmedia_upd, bce_ifmedia_sts); 954 if (rc != 0) { 955 device_printf(dev, "PHY probe failed!\n"); 956 goto fail; 957 } 958 959 /* Attach to the Ethernet interface list. */ 960 ether_ifattach(ifp, sc->eaddr, NULL); 961 962 callout_init_mp(&sc->bce_tick_callout); 963 callout_init_mp(&sc->bce_pulse_callout); 964 965 /* Hookup IRQ last. */ 966 rc = bus_setup_intr(dev, sc->bce_res_irq, INTR_MPSAFE, irq_handle, sc, 967 &sc->bce_intrhand, ifp->if_serializer); 968 if (rc != 0) { 969 device_printf(dev, "Failed to setup IRQ!\n"); 970 ether_ifdetach(ifp); 971 goto fail; 972 } 973 974 ifp->if_cpuid = rman_get_cpuid(sc->bce_res_irq); 975 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); 976 977 /* Print some important debugging info. */ 978 DBRUN(BCE_INFO, bce_dump_driver_state(sc)); 979 980 /* Add the supported sysctls to the kernel. */ 981 bce_add_sysctls(sc); 982 983 /* 984 * The chip reset earlier notified the bootcode that 985 * a driver is present. We now need to start our pulse 986 * routine so that the bootcode is reminded that we're 987 * still running. 988 */ 989 bce_pulse(sc); 990 991 /* Get the firmware running so IPMI still works */ 992 bce_mgmt_init(sc); 993 994 if (bootverbose) 995 bce_print_adapter_info(sc); 996 997 return 0; 998 fail: 999 bce_detach(dev); 1000 return(rc); 1001 } 1002 1003 1004 /****************************************************************************/ 1005 /* Device detach function. */ 1006 /* */ 1007 /* Stops the controller, resets the controller, and releases resources. */ 1008 /* */ 1009 /* Returns: */ 1010 /* 0 on success, positive value on failure. */ 1011 /****************************************************************************/ 1012 static int 1013 bce_detach(device_t dev) 1014 { 1015 struct bce_softc *sc = device_get_softc(dev); 1016 1017 if (device_is_attached(dev)) { 1018 struct ifnet *ifp = &sc->arpcom.ac_if; 1019 uint32_t msg; 1020 1021 /* Stop and reset the controller. */ 1022 lwkt_serialize_enter(ifp->if_serializer); 1023 callout_stop(&sc->bce_pulse_callout); 1024 bce_stop(sc); 1025 if (sc->bce_flags & BCE_NO_WOL_FLAG) 1026 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN; 1027 else 1028 msg = BCE_DRV_MSG_CODE_UNLOAD; 1029 bce_reset(sc, msg); 1030 bus_teardown_intr(dev, sc->bce_res_irq, sc->bce_intrhand); 1031 lwkt_serialize_exit(ifp->if_serializer); 1032 1033 ether_ifdetach(ifp); 1034 } 1035 1036 /* If we have a child device on the MII bus remove it too. */ 1037 if (sc->bce_miibus) 1038 device_delete_child(dev, sc->bce_miibus); 1039 bus_generic_detach(dev); 1040 1041 if (sc->bce_res_irq != NULL) { 1042 bus_release_resource(dev, SYS_RES_IRQ, sc->bce_irq_rid, 1043 sc->bce_res_irq); 1044 } 1045 1046 if (sc->bce_irq_type == PCI_INTR_TYPE_MSI) 1047 pci_release_msi(dev); 1048 1049 if (sc->bce_res_mem != NULL) { 1050 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), 1051 sc->bce_res_mem); 1052 } 1053 1054 bce_dma_free(sc); 1055 1056 if (sc->bce_sysctl_tree != NULL) 1057 sysctl_ctx_free(&sc->bce_sysctl_ctx); 1058 1059 return 0; 1060 } 1061 1062 1063 /****************************************************************************/ 1064 /* Device shutdown function. */ 1065 /* */ 1066 /* Stops and resets the controller. */ 1067 /* */ 1068 /* Returns: */ 1069 /* Nothing */ 1070 /****************************************************************************/ 1071 static void 1072 bce_shutdown(device_t dev) 1073 { 1074 struct bce_softc *sc = device_get_softc(dev); 1075 struct ifnet *ifp = &sc->arpcom.ac_if; 1076 uint32_t msg; 1077 1078 lwkt_serialize_enter(ifp->if_serializer); 1079 bce_stop(sc); 1080 if (sc->bce_flags & BCE_NO_WOL_FLAG) 1081 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN; 1082 else 1083 msg = BCE_DRV_MSG_CODE_UNLOAD; 1084 bce_reset(sc, msg); 1085 lwkt_serialize_exit(ifp->if_serializer); 1086 } 1087 1088 1089 /****************************************************************************/ 1090 /* Indirect register read. */ 1091 /* */ 1092 /* Reads NetXtreme II registers using an index/data register pair in PCI */ 1093 /* configuration space. Using this mechanism avoids issues with posted */ 1094 /* reads but is much slower than memory-mapped I/O. */ 1095 /* */ 1096 /* Returns: */ 1097 /* The value of the register. */ 1098 /****************************************************************************/ 1099 static uint32_t 1100 bce_reg_rd_ind(struct bce_softc *sc, uint32_t offset) 1101 { 1102 device_t dev = sc->bce_dev; 1103 1104 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4); 1105 #ifdef BCE_DEBUG 1106 { 1107 uint32_t val; 1108 val = pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4); 1109 DBPRINT(sc, BCE_EXCESSIVE, 1110 "%s(); offset = 0x%08X, val = 0x%08X\n", 1111 __func__, offset, val); 1112 return val; 1113 } 1114 #else 1115 return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4); 1116 #endif 1117 } 1118 1119 1120 /****************************************************************************/ 1121 /* Indirect register write. */ 1122 /* */ 1123 /* Writes NetXtreme II registers using an index/data register pair in PCI */ 1124 /* configuration space. Using this mechanism avoids issues with posted */ 1125 /* writes but is muchh slower than memory-mapped I/O. */ 1126 /* */ 1127 /* Returns: */ 1128 /* Nothing. */ 1129 /****************************************************************************/ 1130 static void 1131 bce_reg_wr_ind(struct bce_softc *sc, uint32_t offset, uint32_t val) 1132 { 1133 device_t dev = sc->bce_dev; 1134 1135 DBPRINT(sc, BCE_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n", 1136 __func__, offset, val); 1137 1138 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4); 1139 pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4); 1140 } 1141 1142 1143 /****************************************************************************/ 1144 /* Shared memory write. */ 1145 /* */ 1146 /* Writes NetXtreme II shared memory region. */ 1147 /* */ 1148 /* Returns: */ 1149 /* Nothing. */ 1150 /****************************************************************************/ 1151 static void 1152 bce_shmem_wr(struct bce_softc *sc, uint32_t offset, uint32_t val) 1153 { 1154 bce_reg_wr_ind(sc, sc->bce_shmem_base + offset, val); 1155 } 1156 1157 1158 /****************************************************************************/ 1159 /* Shared memory read. */ 1160 /* */ 1161 /* Reads NetXtreme II shared memory region. */ 1162 /* */ 1163 /* Returns: */ 1164 /* The 32 bit value read. */ 1165 /****************************************************************************/ 1166 static u32 1167 bce_shmem_rd(struct bce_softc *sc, uint32_t offset) 1168 { 1169 return bce_reg_rd_ind(sc, sc->bce_shmem_base + offset); 1170 } 1171 1172 1173 /****************************************************************************/ 1174 /* Context memory write. */ 1175 /* */ 1176 /* The NetXtreme II controller uses context memory to track connection */ 1177 /* information for L2 and higher network protocols. */ 1178 /* */ 1179 /* Returns: */ 1180 /* Nothing. */ 1181 /****************************************************************************/ 1182 static void 1183 bce_ctx_wr(struct bce_softc *sc, uint32_t cid_addr, uint32_t ctx_offset, 1184 uint32_t ctx_val) 1185 { 1186 uint32_t idx, offset = ctx_offset + cid_addr; 1187 uint32_t val, retry_cnt = 5; 1188 1189 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 1190 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 1191 REG_WR(sc, BCE_CTX_CTX_DATA, ctx_val); 1192 REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_WRITE_REQ)); 1193 1194 for (idx = 0; idx < retry_cnt; idx++) { 1195 val = REG_RD(sc, BCE_CTX_CTX_CTRL); 1196 if ((val & BCE_CTX_CTX_CTRL_WRITE_REQ) == 0) 1197 break; 1198 DELAY(5); 1199 } 1200 1201 if (val & BCE_CTX_CTX_CTRL_WRITE_REQ) { 1202 device_printf(sc->bce_dev, 1203 "Unable to write CTX memory: " 1204 "cid_addr = 0x%08X, offset = 0x%08X!\n", 1205 cid_addr, ctx_offset); 1206 } 1207 } else { 1208 REG_WR(sc, BCE_CTX_DATA_ADR, offset); 1209 REG_WR(sc, BCE_CTX_DATA, ctx_val); 1210 } 1211 } 1212 1213 1214 /****************************************************************************/ 1215 /* PHY register read. */ 1216 /* */ 1217 /* Implements register reads on the MII bus. */ 1218 /* */ 1219 /* Returns: */ 1220 /* The value of the register. */ 1221 /****************************************************************************/ 1222 static int 1223 bce_miibus_read_reg(device_t dev, int phy, int reg) 1224 { 1225 struct bce_softc *sc = device_get_softc(dev); 1226 uint32_t val; 1227 int i; 1228 1229 /* Make sure we are accessing the correct PHY address. */ 1230 if (phy != sc->bce_phy_addr) { 1231 DBPRINT(sc, BCE_VERBOSE, 1232 "Invalid PHY address %d for PHY read!\n", phy); 1233 return 0; 1234 } 1235 1236 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1237 val = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1238 val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL; 1239 1240 REG_WR(sc, BCE_EMAC_MDIO_MODE, val); 1241 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1242 1243 DELAY(40); 1244 } 1245 1246 val = BCE_MIPHY(phy) | BCE_MIREG(reg) | 1247 BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT | 1248 BCE_EMAC_MDIO_COMM_START_BUSY; 1249 REG_WR(sc, BCE_EMAC_MDIO_COMM, val); 1250 1251 for (i = 0; i < BCE_PHY_TIMEOUT; i++) { 1252 DELAY(10); 1253 1254 val = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1255 if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) { 1256 DELAY(5); 1257 1258 val = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1259 val &= BCE_EMAC_MDIO_COMM_DATA; 1260 break; 1261 } 1262 } 1263 1264 if (val & BCE_EMAC_MDIO_COMM_START_BUSY) { 1265 if_printf(&sc->arpcom.ac_if, 1266 "Error: PHY read timeout! phy = %d, reg = 0x%04X\n", 1267 phy, reg); 1268 val = 0x0; 1269 } else { 1270 val = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1271 } 1272 1273 DBPRINT(sc, BCE_EXCESSIVE, 1274 "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n", 1275 __func__, phy, (uint16_t)reg & 0xffff, (uint16_t) val & 0xffff); 1276 1277 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1278 val = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1279 val |= BCE_EMAC_MDIO_MODE_AUTO_POLL; 1280 1281 REG_WR(sc, BCE_EMAC_MDIO_MODE, val); 1282 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1283 1284 DELAY(40); 1285 } 1286 return (val & 0xffff); 1287 } 1288 1289 1290 /****************************************************************************/ 1291 /* PHY register write. */ 1292 /* */ 1293 /* Implements register writes on the MII bus. */ 1294 /* */ 1295 /* Returns: */ 1296 /* The value of the register. */ 1297 /****************************************************************************/ 1298 static int 1299 bce_miibus_write_reg(device_t dev, int phy, int reg, int val) 1300 { 1301 struct bce_softc *sc = device_get_softc(dev); 1302 uint32_t val1; 1303 int i; 1304 1305 /* Make sure we are accessing the correct PHY address. */ 1306 if (phy != sc->bce_phy_addr) { 1307 DBPRINT(sc, BCE_WARN, 1308 "Invalid PHY address %d for PHY write!\n", phy); 1309 return(0); 1310 } 1311 1312 DBPRINT(sc, BCE_EXCESSIVE, 1313 "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n", 1314 __func__, phy, (uint16_t)(reg & 0xffff), 1315 (uint16_t)(val & 0xffff)); 1316 1317 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1318 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1319 val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL; 1320 1321 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1); 1322 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1323 1324 DELAY(40); 1325 } 1326 1327 val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val | 1328 BCE_EMAC_MDIO_COMM_COMMAND_WRITE | 1329 BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT; 1330 REG_WR(sc, BCE_EMAC_MDIO_COMM, val1); 1331 1332 for (i = 0; i < BCE_PHY_TIMEOUT; i++) { 1333 DELAY(10); 1334 1335 val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1336 if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) { 1337 DELAY(5); 1338 break; 1339 } 1340 } 1341 1342 if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY) 1343 if_printf(&sc->arpcom.ac_if, "PHY write timeout!\n"); 1344 1345 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1346 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1347 val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL; 1348 1349 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1); 1350 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1351 1352 DELAY(40); 1353 } 1354 return 0; 1355 } 1356 1357 1358 /****************************************************************************/ 1359 /* MII bus status change. */ 1360 /* */ 1361 /* Called by the MII bus driver when the PHY establishes link to set the */ 1362 /* MAC interface registers. */ 1363 /* */ 1364 /* Returns: */ 1365 /* Nothing. */ 1366 /****************************************************************************/ 1367 static void 1368 bce_miibus_statchg(device_t dev) 1369 { 1370 struct bce_softc *sc = device_get_softc(dev); 1371 struct mii_data *mii = device_get_softc(sc->bce_miibus); 1372 1373 DBPRINT(sc, BCE_INFO, "mii_media_active = 0x%08X\n", 1374 mii->mii_media_active); 1375 1376 #ifdef BCE_DEBUG 1377 /* Decode the interface media flags. */ 1378 if_printf(&sc->arpcom.ac_if, "Media: ( "); 1379 switch(IFM_TYPE(mii->mii_media_active)) { 1380 case IFM_ETHER: 1381 kprintf("Ethernet )"); 1382 break; 1383 default: 1384 kprintf("Unknown )"); 1385 break; 1386 } 1387 1388 kprintf(" Media Options: ( "); 1389 switch(IFM_SUBTYPE(mii->mii_media_active)) { 1390 case IFM_AUTO: 1391 kprintf("Autoselect )"); 1392 break; 1393 case IFM_MANUAL: 1394 kprintf("Manual )"); 1395 break; 1396 case IFM_NONE: 1397 kprintf("None )"); 1398 break; 1399 case IFM_10_T: 1400 kprintf("10Base-T )"); 1401 break; 1402 case IFM_100_TX: 1403 kprintf("100Base-TX )"); 1404 break; 1405 case IFM_1000_SX: 1406 kprintf("1000Base-SX )"); 1407 break; 1408 case IFM_1000_T: 1409 kprintf("1000Base-T )"); 1410 break; 1411 default: 1412 kprintf("Other )"); 1413 break; 1414 } 1415 1416 kprintf(" Global Options: ("); 1417 if (mii->mii_media_active & IFM_FDX) 1418 kprintf(" FullDuplex"); 1419 if (mii->mii_media_active & IFM_HDX) 1420 kprintf(" HalfDuplex"); 1421 if (mii->mii_media_active & IFM_LOOP) 1422 kprintf(" Loopback"); 1423 if (mii->mii_media_active & IFM_FLAG0) 1424 kprintf(" Flag0"); 1425 if (mii->mii_media_active & IFM_FLAG1) 1426 kprintf(" Flag1"); 1427 if (mii->mii_media_active & IFM_FLAG2) 1428 kprintf(" Flag2"); 1429 kprintf(" )\n"); 1430 #endif 1431 1432 BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT); 1433 1434 /* 1435 * Set MII or GMII interface based on the speed negotiated 1436 * by the PHY. 1437 */ 1438 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 1439 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) { 1440 DBPRINT(sc, BCE_INFO, "Setting GMII interface.\n"); 1441 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_GMII); 1442 } else { 1443 DBPRINT(sc, BCE_INFO, "Setting MII interface.\n"); 1444 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_MII); 1445 } 1446 1447 /* 1448 * Set half or full duplex based on the duplicity negotiated 1449 * by the PHY. 1450 */ 1451 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 1452 DBPRINT(sc, BCE_INFO, "Setting Full-Duplex interface.\n"); 1453 BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX); 1454 } else { 1455 DBPRINT(sc, BCE_INFO, "Setting Half-Duplex interface.\n"); 1456 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX); 1457 } 1458 } 1459 1460 1461 /****************************************************************************/ 1462 /* Acquire NVRAM lock. */ 1463 /* */ 1464 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */ 1465 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 1466 /* for use by the driver. */ 1467 /* */ 1468 /* Returns: */ 1469 /* 0 on success, positive value on failure. */ 1470 /****************************************************************************/ 1471 static int 1472 bce_acquire_nvram_lock(struct bce_softc *sc) 1473 { 1474 uint32_t val; 1475 int j; 1476 1477 DBPRINT(sc, BCE_VERBOSE, "Acquiring NVRAM lock.\n"); 1478 1479 /* Request access to the flash interface. */ 1480 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2); 1481 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1482 val = REG_RD(sc, BCE_NVM_SW_ARB); 1483 if (val & BCE_NVM_SW_ARB_ARB_ARB2) 1484 break; 1485 1486 DELAY(5); 1487 } 1488 1489 if (j >= NVRAM_TIMEOUT_COUNT) { 1490 DBPRINT(sc, BCE_WARN, "Timeout acquiring NVRAM lock!\n"); 1491 return EBUSY; 1492 } 1493 return 0; 1494 } 1495 1496 1497 /****************************************************************************/ 1498 /* Release NVRAM lock. */ 1499 /* */ 1500 /* When the caller is finished accessing NVRAM the lock must be released. */ 1501 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 1502 /* for use by the driver. */ 1503 /* */ 1504 /* Returns: */ 1505 /* 0 on success, positive value on failure. */ 1506 /****************************************************************************/ 1507 static int 1508 bce_release_nvram_lock(struct bce_softc *sc) 1509 { 1510 int j; 1511 uint32_t val; 1512 1513 DBPRINT(sc, BCE_VERBOSE, "Releasing NVRAM lock.\n"); 1514 1515 /* 1516 * Relinquish nvram interface. 1517 */ 1518 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2); 1519 1520 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1521 val = REG_RD(sc, BCE_NVM_SW_ARB); 1522 if (!(val & BCE_NVM_SW_ARB_ARB_ARB2)) 1523 break; 1524 1525 DELAY(5); 1526 } 1527 1528 if (j >= NVRAM_TIMEOUT_COUNT) { 1529 DBPRINT(sc, BCE_WARN, "Timeout reeasing NVRAM lock!\n"); 1530 return EBUSY; 1531 } 1532 return 0; 1533 } 1534 1535 1536 /****************************************************************************/ 1537 /* Enable NVRAM access. */ 1538 /* */ 1539 /* Before accessing NVRAM for read or write operations the caller must */ 1540 /* enabled NVRAM access. */ 1541 /* */ 1542 /* Returns: */ 1543 /* Nothing. */ 1544 /****************************************************************************/ 1545 static void 1546 bce_enable_nvram_access(struct bce_softc *sc) 1547 { 1548 uint32_t val; 1549 1550 DBPRINT(sc, BCE_VERBOSE, "Enabling NVRAM access.\n"); 1551 1552 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE); 1553 /* Enable both bits, even on read. */ 1554 REG_WR(sc, BCE_NVM_ACCESS_ENABLE, 1555 val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN); 1556 } 1557 1558 1559 /****************************************************************************/ 1560 /* Disable NVRAM access. */ 1561 /* */ 1562 /* When the caller is finished accessing NVRAM access must be disabled. */ 1563 /* */ 1564 /* Returns: */ 1565 /* Nothing. */ 1566 /****************************************************************************/ 1567 static void 1568 bce_disable_nvram_access(struct bce_softc *sc) 1569 { 1570 uint32_t val; 1571 1572 DBPRINT(sc, BCE_VERBOSE, "Disabling NVRAM access.\n"); 1573 1574 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE); 1575 1576 /* Disable both bits, even after read. */ 1577 REG_WR(sc, BCE_NVM_ACCESS_ENABLE, 1578 val & ~(BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN)); 1579 } 1580 1581 1582 /****************************************************************************/ 1583 /* Read a dword (32 bits) from NVRAM. */ 1584 /* */ 1585 /* Read a 32 bit word from NVRAM. The caller is assumed to have already */ 1586 /* obtained the NVRAM lock and enabled the controller for NVRAM access. */ 1587 /* */ 1588 /* Returns: */ 1589 /* 0 on success and the 32 bit value read, positive value on failure. */ 1590 /****************************************************************************/ 1591 static int 1592 bce_nvram_read_dword(struct bce_softc *sc, uint32_t offset, uint8_t *ret_val, 1593 uint32_t cmd_flags) 1594 { 1595 uint32_t cmd; 1596 int i, rc = 0; 1597 1598 /* Build the command word. */ 1599 cmd = BCE_NVM_COMMAND_DOIT | cmd_flags; 1600 1601 /* Calculate the offset for buffered flash. */ 1602 if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) { 1603 offset = ((offset / sc->bce_flash_info->page_size) << 1604 sc->bce_flash_info->page_bits) + 1605 (offset % sc->bce_flash_info->page_size); 1606 } 1607 1608 /* 1609 * Clear the DONE bit separately, set the address to read, 1610 * and issue the read. 1611 */ 1612 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE); 1613 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE); 1614 REG_WR(sc, BCE_NVM_COMMAND, cmd); 1615 1616 /* Wait for completion. */ 1617 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) { 1618 uint32_t val; 1619 1620 DELAY(5); 1621 1622 val = REG_RD(sc, BCE_NVM_COMMAND); 1623 if (val & BCE_NVM_COMMAND_DONE) { 1624 val = REG_RD(sc, BCE_NVM_READ); 1625 1626 val = be32toh(val); 1627 memcpy(ret_val, &val, 4); 1628 break; 1629 } 1630 } 1631 1632 /* Check for errors. */ 1633 if (i >= NVRAM_TIMEOUT_COUNT) { 1634 if_printf(&sc->arpcom.ac_if, 1635 "Timeout error reading NVRAM at offset 0x%08X!\n", 1636 offset); 1637 rc = EBUSY; 1638 } 1639 return rc; 1640 } 1641 1642 1643 /****************************************************************************/ 1644 /* Initialize NVRAM access. */ 1645 /* */ 1646 /* Identify the NVRAM device in use and prepare the NVRAM interface to */ 1647 /* access that device. */ 1648 /* */ 1649 /* Returns: */ 1650 /* 0 on success, positive value on failure. */ 1651 /****************************************************************************/ 1652 static int 1653 bce_init_nvram(struct bce_softc *sc) 1654 { 1655 uint32_t val; 1656 int j, entry_count, rc = 0; 1657 const struct flash_spec *flash; 1658 1659 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__); 1660 1661 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 1662 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 1663 sc->bce_flash_info = &flash_5709; 1664 goto bce_init_nvram_get_flash_size; 1665 } 1666 1667 /* Determine the selected interface. */ 1668 val = REG_RD(sc, BCE_NVM_CFG1); 1669 1670 entry_count = sizeof(flash_table) / sizeof(struct flash_spec); 1671 1672 /* 1673 * Flash reconfiguration is required to support additional 1674 * NVRAM devices not directly supported in hardware. 1675 * Check if the flash interface was reconfigured 1676 * by the bootcode. 1677 */ 1678 1679 if (val & 0x40000000) { 1680 /* Flash interface reconfigured by bootcode. */ 1681 1682 DBPRINT(sc, BCE_INFO_LOAD, 1683 "%s(): Flash WAS reconfigured.\n", __func__); 1684 1685 for (j = 0, flash = flash_table; j < entry_count; 1686 j++, flash++) { 1687 if ((val & FLASH_BACKUP_STRAP_MASK) == 1688 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) { 1689 sc->bce_flash_info = flash; 1690 break; 1691 } 1692 } 1693 } else { 1694 /* Flash interface not yet reconfigured. */ 1695 uint32_t mask; 1696 1697 DBPRINT(sc, BCE_INFO_LOAD, 1698 "%s(): Flash was NOT reconfigured.\n", __func__); 1699 1700 if (val & (1 << 23)) 1701 mask = FLASH_BACKUP_STRAP_MASK; 1702 else 1703 mask = FLASH_STRAP_MASK; 1704 1705 /* Look for the matching NVRAM device configuration data. */ 1706 for (j = 0, flash = flash_table; j < entry_count; 1707 j++, flash++) { 1708 /* Check if the device matches any of the known devices. */ 1709 if ((val & mask) == (flash->strapping & mask)) { 1710 /* Found a device match. */ 1711 sc->bce_flash_info = flash; 1712 1713 /* Request access to the flash interface. */ 1714 rc = bce_acquire_nvram_lock(sc); 1715 if (rc != 0) 1716 return rc; 1717 1718 /* Reconfigure the flash interface. */ 1719 bce_enable_nvram_access(sc); 1720 REG_WR(sc, BCE_NVM_CFG1, flash->config1); 1721 REG_WR(sc, BCE_NVM_CFG2, flash->config2); 1722 REG_WR(sc, BCE_NVM_CFG3, flash->config3); 1723 REG_WR(sc, BCE_NVM_WRITE1, flash->write1); 1724 bce_disable_nvram_access(sc); 1725 bce_release_nvram_lock(sc); 1726 break; 1727 } 1728 } 1729 } 1730 1731 /* Check if a matching device was found. */ 1732 if (j == entry_count) { 1733 sc->bce_flash_info = NULL; 1734 if_printf(&sc->arpcom.ac_if, "Unknown Flash NVRAM found!\n"); 1735 return ENODEV; 1736 } 1737 1738 bce_init_nvram_get_flash_size: 1739 /* Write the flash config data to the shared memory interface. */ 1740 val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG2) & 1741 BCE_SHARED_HW_CFG2_NVM_SIZE_MASK; 1742 if (val) 1743 sc->bce_flash_size = val; 1744 else 1745 sc->bce_flash_size = sc->bce_flash_info->total_size; 1746 1747 DBPRINT(sc, BCE_INFO_LOAD, "%s() flash->total_size = 0x%08X\n", 1748 __func__, sc->bce_flash_info->total_size); 1749 1750 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__); 1751 1752 return rc; 1753 } 1754 1755 1756 /****************************************************************************/ 1757 /* Read an arbitrary range of data from NVRAM. */ 1758 /* */ 1759 /* Prepares the NVRAM interface for access and reads the requested data */ 1760 /* into the supplied buffer. */ 1761 /* */ 1762 /* Returns: */ 1763 /* 0 on success and the data read, positive value on failure. */ 1764 /****************************************************************************/ 1765 static int 1766 bce_nvram_read(struct bce_softc *sc, uint32_t offset, uint8_t *ret_buf, 1767 int buf_size) 1768 { 1769 uint32_t cmd_flags, offset32, len32, extra; 1770 int rc = 0; 1771 1772 if (buf_size == 0) 1773 return 0; 1774 1775 /* Request access to the flash interface. */ 1776 rc = bce_acquire_nvram_lock(sc); 1777 if (rc != 0) 1778 return rc; 1779 1780 /* Enable access to flash interface */ 1781 bce_enable_nvram_access(sc); 1782 1783 len32 = buf_size; 1784 offset32 = offset; 1785 extra = 0; 1786 1787 cmd_flags = 0; 1788 1789 /* XXX should we release nvram lock if read_dword() fails? */ 1790 if (offset32 & 3) { 1791 uint8_t buf[4]; 1792 uint32_t pre_len; 1793 1794 offset32 &= ~3; 1795 pre_len = 4 - (offset & 3); 1796 1797 if (pre_len >= len32) { 1798 pre_len = len32; 1799 cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST; 1800 } else { 1801 cmd_flags = BCE_NVM_COMMAND_FIRST; 1802 } 1803 1804 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); 1805 if (rc) 1806 return rc; 1807 1808 memcpy(ret_buf, buf + (offset & 3), pre_len); 1809 1810 offset32 += 4; 1811 ret_buf += pre_len; 1812 len32 -= pre_len; 1813 } 1814 1815 if (len32 & 3) { 1816 extra = 4 - (len32 & 3); 1817 len32 = (len32 + 4) & ~3; 1818 } 1819 1820 if (len32 == 4) { 1821 uint8_t buf[4]; 1822 1823 if (cmd_flags) 1824 cmd_flags = BCE_NVM_COMMAND_LAST; 1825 else 1826 cmd_flags = BCE_NVM_COMMAND_FIRST | 1827 BCE_NVM_COMMAND_LAST; 1828 1829 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); 1830 1831 memcpy(ret_buf, buf, 4 - extra); 1832 } else if (len32 > 0) { 1833 uint8_t buf[4]; 1834 1835 /* Read the first word. */ 1836 if (cmd_flags) 1837 cmd_flags = 0; 1838 else 1839 cmd_flags = BCE_NVM_COMMAND_FIRST; 1840 1841 rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags); 1842 1843 /* Advance to the next dword. */ 1844 offset32 += 4; 1845 ret_buf += 4; 1846 len32 -= 4; 1847 1848 while (len32 > 4 && rc == 0) { 1849 rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0); 1850 1851 /* Advance to the next dword. */ 1852 offset32 += 4; 1853 ret_buf += 4; 1854 len32 -= 4; 1855 } 1856 1857 if (rc) 1858 goto bce_nvram_read_locked_exit; 1859 1860 cmd_flags = BCE_NVM_COMMAND_LAST; 1861 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); 1862 1863 memcpy(ret_buf, buf, 4 - extra); 1864 } 1865 1866 bce_nvram_read_locked_exit: 1867 /* Disable access to flash interface and release the lock. */ 1868 bce_disable_nvram_access(sc); 1869 bce_release_nvram_lock(sc); 1870 1871 return rc; 1872 } 1873 1874 1875 /****************************************************************************/ 1876 /* Verifies that NVRAM is accessible and contains valid data. */ 1877 /* */ 1878 /* Reads the configuration data from NVRAM and verifies that the CRC is */ 1879 /* correct. */ 1880 /* */ 1881 /* Returns: */ 1882 /* 0 on success, positive value on failure. */ 1883 /****************************************************************************/ 1884 static int 1885 bce_nvram_test(struct bce_softc *sc) 1886 { 1887 uint32_t buf[BCE_NVRAM_SIZE / 4]; 1888 uint32_t magic, csum; 1889 uint8_t *data = (uint8_t *)buf; 1890 int rc = 0; 1891 1892 /* 1893 * Check that the device NVRAM is valid by reading 1894 * the magic value at offset 0. 1895 */ 1896 rc = bce_nvram_read(sc, 0, data, 4); 1897 if (rc != 0) 1898 return rc; 1899 1900 magic = be32toh(buf[0]); 1901 if (magic != BCE_NVRAM_MAGIC) { 1902 if_printf(&sc->arpcom.ac_if, 1903 "Invalid NVRAM magic value! Expected: 0x%08X, " 1904 "Found: 0x%08X\n", BCE_NVRAM_MAGIC, magic); 1905 return ENODEV; 1906 } 1907 1908 /* 1909 * Verify that the device NVRAM includes valid 1910 * configuration data. 1911 */ 1912 rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE); 1913 if (rc != 0) 1914 return rc; 1915 1916 csum = ether_crc32_le(data, 0x100); 1917 if (csum != BCE_CRC32_RESIDUAL) { 1918 if_printf(&sc->arpcom.ac_if, 1919 "Invalid Manufacturing Information NVRAM CRC! " 1920 "Expected: 0x%08X, Found: 0x%08X\n", 1921 BCE_CRC32_RESIDUAL, csum); 1922 return ENODEV; 1923 } 1924 1925 csum = ether_crc32_le(data + 0x100, 0x100); 1926 if (csum != BCE_CRC32_RESIDUAL) { 1927 if_printf(&sc->arpcom.ac_if, 1928 "Invalid Feature Configuration Information " 1929 "NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n", 1930 BCE_CRC32_RESIDUAL, csum); 1931 rc = ENODEV; 1932 } 1933 return rc; 1934 } 1935 1936 1937 /****************************************************************************/ 1938 /* Identifies the current media type of the controller and sets the PHY */ 1939 /* address. */ 1940 /* */ 1941 /* Returns: */ 1942 /* Nothing. */ 1943 /****************************************************************************/ 1944 static void 1945 bce_get_media(struct bce_softc *sc) 1946 { 1947 uint32_t val; 1948 1949 sc->bce_phy_addr = 1; 1950 1951 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 1952 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 1953 uint32_t val = REG_RD(sc, BCE_MISC_DUAL_MEDIA_CTRL); 1954 uint32_t bond_id = val & BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID; 1955 uint32_t strap; 1956 1957 /* 1958 * The BCM5709S is software configurable 1959 * for Copper or SerDes operation. 1960 */ 1961 if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) { 1962 return; 1963 } else if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) { 1964 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 1965 return; 1966 } 1967 1968 if (val & BCE_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE) { 1969 strap = (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21; 1970 } else { 1971 strap = 1972 (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8; 1973 } 1974 1975 if (pci_get_function(sc->bce_dev) == 0) { 1976 switch (strap) { 1977 case 0x4: 1978 case 0x5: 1979 case 0x6: 1980 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 1981 break; 1982 } 1983 } else { 1984 switch (strap) { 1985 case 0x1: 1986 case 0x2: 1987 case 0x4: 1988 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 1989 break; 1990 } 1991 } 1992 } else if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) { 1993 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 1994 } 1995 1996 if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) { 1997 sc->bce_flags |= BCE_NO_WOL_FLAG; 1998 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) { 1999 sc->bce_phy_addr = 2; 2000 val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG); 2001 if (val & BCE_SHARED_HW_CFG_PHY_2_5G) 2002 sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG; 2003 } 2004 } else if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) || 2005 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)) { 2006 sc->bce_phy_flags |= BCE_PHY_CRC_FIX_FLAG; 2007 } 2008 } 2009 2010 2011 /****************************************************************************/ 2012 /* Free any DMA memory owned by the driver. */ 2013 /* */ 2014 /* Scans through each data structre that requires DMA memory and frees */ 2015 /* the memory if allocated. */ 2016 /* */ 2017 /* Returns: */ 2018 /* Nothing. */ 2019 /****************************************************************************/ 2020 static void 2021 bce_dma_free(struct bce_softc *sc) 2022 { 2023 int i; 2024 2025 /* Destroy the status block. */ 2026 if (sc->status_tag != NULL) { 2027 if (sc->status_block != NULL) { 2028 bus_dmamap_unload(sc->status_tag, sc->status_map); 2029 bus_dmamem_free(sc->status_tag, sc->status_block, 2030 sc->status_map); 2031 } 2032 bus_dma_tag_destroy(sc->status_tag); 2033 } 2034 2035 2036 /* Destroy the statistics block. */ 2037 if (sc->stats_tag != NULL) { 2038 if (sc->stats_block != NULL) { 2039 bus_dmamap_unload(sc->stats_tag, sc->stats_map); 2040 bus_dmamem_free(sc->stats_tag, sc->stats_block, 2041 sc->stats_map); 2042 } 2043 bus_dma_tag_destroy(sc->stats_tag); 2044 } 2045 2046 /* Destroy the CTX DMA stuffs. */ 2047 if (sc->ctx_tag != NULL) { 2048 for (i = 0; i < sc->ctx_pages; i++) { 2049 if (sc->ctx_block[i] != NULL) { 2050 bus_dmamap_unload(sc->ctx_tag, sc->ctx_map[i]); 2051 bus_dmamem_free(sc->ctx_tag, sc->ctx_block[i], 2052 sc->ctx_map[i]); 2053 } 2054 } 2055 bus_dma_tag_destroy(sc->ctx_tag); 2056 } 2057 2058 /* Destroy the TX buffer descriptor DMA stuffs. */ 2059 if (sc->tx_bd_chain_tag != NULL) { 2060 for (i = 0; i < TX_PAGES; i++) { 2061 if (sc->tx_bd_chain[i] != NULL) { 2062 bus_dmamap_unload(sc->tx_bd_chain_tag, 2063 sc->tx_bd_chain_map[i]); 2064 bus_dmamem_free(sc->tx_bd_chain_tag, 2065 sc->tx_bd_chain[i], 2066 sc->tx_bd_chain_map[i]); 2067 } 2068 } 2069 bus_dma_tag_destroy(sc->tx_bd_chain_tag); 2070 } 2071 2072 /* Destroy the RX buffer descriptor DMA stuffs. */ 2073 if (sc->rx_bd_chain_tag != NULL) { 2074 for (i = 0; i < RX_PAGES; i++) { 2075 if (sc->rx_bd_chain[i] != NULL) { 2076 bus_dmamap_unload(sc->rx_bd_chain_tag, 2077 sc->rx_bd_chain_map[i]); 2078 bus_dmamem_free(sc->rx_bd_chain_tag, 2079 sc->rx_bd_chain[i], 2080 sc->rx_bd_chain_map[i]); 2081 } 2082 } 2083 bus_dma_tag_destroy(sc->rx_bd_chain_tag); 2084 } 2085 2086 /* Destroy the TX mbuf DMA stuffs. */ 2087 if (sc->tx_mbuf_tag != NULL) { 2088 for (i = 0; i < TOTAL_TX_BD; i++) { 2089 /* Must have been unloaded in bce_stop() */ 2090 KKASSERT(sc->tx_mbuf_ptr[i] == NULL); 2091 bus_dmamap_destroy(sc->tx_mbuf_tag, 2092 sc->tx_mbuf_map[i]); 2093 } 2094 bus_dma_tag_destroy(sc->tx_mbuf_tag); 2095 } 2096 2097 /* Destroy the RX mbuf DMA stuffs. */ 2098 if (sc->rx_mbuf_tag != NULL) { 2099 for (i = 0; i < TOTAL_RX_BD; i++) { 2100 /* Must have been unloaded in bce_stop() */ 2101 KKASSERT(sc->rx_mbuf_ptr[i] == NULL); 2102 bus_dmamap_destroy(sc->rx_mbuf_tag, 2103 sc->rx_mbuf_map[i]); 2104 } 2105 bus_dmamap_destroy(sc->rx_mbuf_tag, sc->rx_mbuf_tmpmap); 2106 bus_dma_tag_destroy(sc->rx_mbuf_tag); 2107 } 2108 2109 /* Destroy the parent tag */ 2110 if (sc->parent_tag != NULL) 2111 bus_dma_tag_destroy(sc->parent_tag); 2112 } 2113 2114 2115 /****************************************************************************/ 2116 /* Get DMA memory from the OS. */ 2117 /* */ 2118 /* Validates that the OS has provided DMA buffers in response to a */ 2119 /* bus_dmamap_load() call and saves the physical address of those buffers. */ 2120 /* When the callback is used the OS will return 0 for the mapping function */ 2121 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any */ 2122 /* failures back to the caller. */ 2123 /* */ 2124 /* Returns: */ 2125 /* Nothing. */ 2126 /****************************************************************************/ 2127 static void 2128 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 2129 { 2130 bus_addr_t *busaddr = arg; 2131 2132 /* 2133 * Simulate a mapping failure. 2134 * XXX not correct. 2135 */ 2136 DBRUNIF(DB_RANDOMTRUE(bce_debug_dma_map_addr_failure), 2137 kprintf("bce: %s(%d): Simulating DMA mapping error.\n", 2138 __FILE__, __LINE__); 2139 error = ENOMEM); 2140 2141 /* Check for an error and signal the caller that an error occurred. */ 2142 if (error) 2143 return; 2144 2145 KASSERT(nseg == 1, ("only one segment is allowed\n")); 2146 *busaddr = segs->ds_addr; 2147 } 2148 2149 2150 /****************************************************************************/ 2151 /* Allocate any DMA memory needed by the driver. */ 2152 /* */ 2153 /* Allocates DMA memory needed for the various global structures needed by */ 2154 /* hardware. */ 2155 /* */ 2156 /* Memory alignment requirements: */ 2157 /* -----------------+----------+----------+----------+----------+ */ 2158 /* Data Structure | 5706 | 5708 | 5709 | 5716 | */ 2159 /* -----------------+----------+----------+----------+----------+ */ 2160 /* Status Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */ 2161 /* Statistics Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */ 2162 /* RX Buffers | 16 bytes | 16 bytes | 16 bytes | 16 bytes | */ 2163 /* PG Buffers | none | none | none | none | */ 2164 /* TX Buffers | none | none | none | none | */ 2165 /* Chain Pages(1) | 4KiB | 4KiB | 4KiB | 4KiB | */ 2166 /* Context Pages(1) | N/A | N/A | 4KiB | 4KiB | */ 2167 /* -----------------+----------+----------+----------+----------+ */ 2168 /* */ 2169 /* (1) Must align with CPU page size (BCM_PAGE_SZIE). */ 2170 /* */ 2171 /* Returns: */ 2172 /* 0 for success, positive value for failure. */ 2173 /****************************************************************************/ 2174 static int 2175 bce_dma_alloc(struct bce_softc *sc) 2176 { 2177 struct ifnet *ifp = &sc->arpcom.ac_if; 2178 int i, j, rc = 0; 2179 bus_addr_t busaddr, max_busaddr; 2180 bus_size_t status_align, stats_align; 2181 2182 /* 2183 * The embedded PCIe to PCI-X bridge (EPB) 2184 * in the 5708 cannot address memory above 2185 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043). 2186 */ 2187 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708) 2188 max_busaddr = BCE_BUS_SPACE_MAXADDR; 2189 else 2190 max_busaddr = BUS_SPACE_MAXADDR; 2191 2192 /* 2193 * BCM5709 and BCM5716 uses host memory as cache for context memory. 2194 */ 2195 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 2196 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 2197 sc->ctx_pages = BCE_CTX_BLK_SZ / BCM_PAGE_SIZE; 2198 if (sc->ctx_pages == 0) 2199 sc->ctx_pages = 1; 2200 if (sc->ctx_pages > BCE_CTX_PAGES) { 2201 device_printf(sc->bce_dev, "excessive ctx pages %d\n", 2202 sc->ctx_pages); 2203 return ENOMEM; 2204 } 2205 status_align = 16; 2206 stats_align = 16; 2207 } else { 2208 status_align = 8; 2209 stats_align = 8; 2210 } 2211 2212 /* 2213 * Allocate the parent bus DMA tag appropriate for PCI. 2214 */ 2215 rc = bus_dma_tag_create(NULL, 1, BCE_DMA_BOUNDARY, 2216 max_busaddr, BUS_SPACE_MAXADDR, 2217 NULL, NULL, 2218 BUS_SPACE_MAXSIZE_32BIT, 0, 2219 BUS_SPACE_MAXSIZE_32BIT, 2220 0, &sc->parent_tag); 2221 if (rc != 0) { 2222 if_printf(ifp, "Could not allocate parent DMA tag!\n"); 2223 return rc; 2224 } 2225 2226 /* 2227 * Allocate status block. 2228 */ 2229 sc->status_block = bus_dmamem_coherent_any(sc->parent_tag, 2230 status_align, BCE_STATUS_BLK_SZ, 2231 BUS_DMA_WAITOK | BUS_DMA_ZERO, 2232 &sc->status_tag, &sc->status_map, 2233 &sc->status_block_paddr); 2234 if (sc->status_block == NULL) { 2235 if_printf(ifp, "Could not allocate status block!\n"); 2236 return ENOMEM; 2237 } 2238 2239 /* 2240 * Allocate statistics block. 2241 */ 2242 sc->stats_block = bus_dmamem_coherent_any(sc->parent_tag, 2243 stats_align, BCE_STATS_BLK_SZ, 2244 BUS_DMA_WAITOK | BUS_DMA_ZERO, 2245 &sc->stats_tag, &sc->stats_map, 2246 &sc->stats_block_paddr); 2247 if (sc->stats_block == NULL) { 2248 if_printf(ifp, "Could not allocate statistics block!\n"); 2249 return ENOMEM; 2250 } 2251 2252 /* 2253 * Allocate context block, if needed 2254 */ 2255 if (sc->ctx_pages != 0) { 2256 rc = bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 0, 2257 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 2258 NULL, NULL, 2259 BCM_PAGE_SIZE, 1, BCM_PAGE_SIZE, 2260 0, &sc->ctx_tag); 2261 if (rc != 0) { 2262 if_printf(ifp, "Could not allocate " 2263 "context block DMA tag!\n"); 2264 return rc; 2265 } 2266 2267 for (i = 0; i < sc->ctx_pages; i++) { 2268 rc = bus_dmamem_alloc(sc->ctx_tag, 2269 (void **)&sc->ctx_block[i], 2270 BUS_DMA_WAITOK | BUS_DMA_ZERO | 2271 BUS_DMA_COHERENT, 2272 &sc->ctx_map[i]); 2273 if (rc != 0) { 2274 if_printf(ifp, "Could not allocate %dth context " 2275 "DMA memory!\n", i); 2276 return rc; 2277 } 2278 2279 rc = bus_dmamap_load(sc->ctx_tag, sc->ctx_map[i], 2280 sc->ctx_block[i], BCM_PAGE_SIZE, 2281 bce_dma_map_addr, &busaddr, 2282 BUS_DMA_WAITOK); 2283 if (rc != 0) { 2284 if (rc == EINPROGRESS) { 2285 panic("%s coherent memory loading " 2286 "is still in progress!", ifp->if_xname); 2287 } 2288 if_printf(ifp, "Could not map %dth context " 2289 "DMA memory!\n", i); 2290 bus_dmamem_free(sc->ctx_tag, sc->ctx_block[i], 2291 sc->ctx_map[i]); 2292 sc->ctx_block[i] = NULL; 2293 return rc; 2294 } 2295 sc->ctx_paddr[i] = busaddr; 2296 } 2297 } 2298 2299 /* 2300 * Create a DMA tag for the TX buffer descriptor chain, 2301 * allocate and clear the memory, and fetch the 2302 * physical address of the block. 2303 */ 2304 rc = bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 0, 2305 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 2306 NULL, NULL, 2307 BCE_TX_CHAIN_PAGE_SZ, 1, BCE_TX_CHAIN_PAGE_SZ, 2308 0, &sc->tx_bd_chain_tag); 2309 if (rc != 0) { 2310 if_printf(ifp, "Could not allocate " 2311 "TX descriptor chain DMA tag!\n"); 2312 return rc; 2313 } 2314 2315 for (i = 0; i < TX_PAGES; i++) { 2316 rc = bus_dmamem_alloc(sc->tx_bd_chain_tag, 2317 (void **)&sc->tx_bd_chain[i], 2318 BUS_DMA_WAITOK | BUS_DMA_ZERO | 2319 BUS_DMA_COHERENT, 2320 &sc->tx_bd_chain_map[i]); 2321 if (rc != 0) { 2322 if_printf(ifp, "Could not allocate %dth TX descriptor " 2323 "chain DMA memory!\n", i); 2324 return rc; 2325 } 2326 2327 rc = bus_dmamap_load(sc->tx_bd_chain_tag, 2328 sc->tx_bd_chain_map[i], 2329 sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ, 2330 bce_dma_map_addr, &busaddr, 2331 BUS_DMA_WAITOK); 2332 if (rc != 0) { 2333 if (rc == EINPROGRESS) { 2334 panic("%s coherent memory loading " 2335 "is still in progress!", ifp->if_xname); 2336 } 2337 if_printf(ifp, "Could not map %dth TX descriptor " 2338 "chain DMA memory!\n", i); 2339 bus_dmamem_free(sc->tx_bd_chain_tag, 2340 sc->tx_bd_chain[i], 2341 sc->tx_bd_chain_map[i]); 2342 sc->tx_bd_chain[i] = NULL; 2343 return rc; 2344 } 2345 2346 sc->tx_bd_chain_paddr[i] = busaddr; 2347 /* DRC - Fix for 64 bit systems. */ 2348 DBPRINT(sc, BCE_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n", 2349 i, (uint32_t)sc->tx_bd_chain_paddr[i]); 2350 } 2351 2352 /* Create a DMA tag for TX mbufs. */ 2353 rc = bus_dma_tag_create(sc->parent_tag, 1, 0, 2354 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 2355 NULL, NULL, 2356 /* BCE_MAX_JUMBO_ETHER_MTU_VLAN */MCLBYTES, 2357 BCE_MAX_SEGMENTS, MCLBYTES, 2358 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | 2359 BUS_DMA_ONEBPAGE, 2360 &sc->tx_mbuf_tag); 2361 if (rc != 0) { 2362 if_printf(ifp, "Could not allocate TX mbuf DMA tag!\n"); 2363 return rc; 2364 } 2365 2366 /* Create DMA maps for the TX mbufs clusters. */ 2367 for (i = 0; i < TOTAL_TX_BD; i++) { 2368 rc = bus_dmamap_create(sc->tx_mbuf_tag, 2369 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 2370 &sc->tx_mbuf_map[i]); 2371 if (rc != 0) { 2372 for (j = 0; j < i; ++j) { 2373 bus_dmamap_destroy(sc->tx_mbuf_tag, 2374 sc->tx_mbuf_map[i]); 2375 } 2376 bus_dma_tag_destroy(sc->tx_mbuf_tag); 2377 sc->tx_mbuf_tag = NULL; 2378 2379 if_printf(ifp, "Unable to create " 2380 "%dth TX mbuf DMA map!\n", i); 2381 return rc; 2382 } 2383 } 2384 2385 /* 2386 * Create a DMA tag for the RX buffer descriptor chain, 2387 * allocate and clear the memory, and fetch the physical 2388 * address of the blocks. 2389 */ 2390 rc = bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 0, 2391 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 2392 NULL, NULL, 2393 BCE_RX_CHAIN_PAGE_SZ, 1, BCE_RX_CHAIN_PAGE_SZ, 2394 0, &sc->rx_bd_chain_tag); 2395 if (rc != 0) { 2396 if_printf(ifp, "Could not allocate " 2397 "RX descriptor chain DMA tag!\n"); 2398 return rc; 2399 } 2400 2401 for (i = 0; i < RX_PAGES; i++) { 2402 rc = bus_dmamem_alloc(sc->rx_bd_chain_tag, 2403 (void **)&sc->rx_bd_chain[i], 2404 BUS_DMA_WAITOK | BUS_DMA_ZERO | 2405 BUS_DMA_COHERENT, 2406 &sc->rx_bd_chain_map[i]); 2407 if (rc != 0) { 2408 if_printf(ifp, "Could not allocate %dth RX descriptor " 2409 "chain DMA memory!\n", i); 2410 return rc; 2411 } 2412 2413 rc = bus_dmamap_load(sc->rx_bd_chain_tag, 2414 sc->rx_bd_chain_map[i], 2415 sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ, 2416 bce_dma_map_addr, &busaddr, 2417 BUS_DMA_WAITOK); 2418 if (rc != 0) { 2419 if (rc == EINPROGRESS) { 2420 panic("%s coherent memory loading " 2421 "is still in progress!", ifp->if_xname); 2422 } 2423 if_printf(ifp, "Could not map %dth RX descriptor " 2424 "chain DMA memory!\n", i); 2425 bus_dmamem_free(sc->rx_bd_chain_tag, 2426 sc->rx_bd_chain[i], 2427 sc->rx_bd_chain_map[i]); 2428 sc->rx_bd_chain[i] = NULL; 2429 return rc; 2430 } 2431 2432 sc->rx_bd_chain_paddr[i] = busaddr; 2433 /* DRC - Fix for 64 bit systems. */ 2434 DBPRINT(sc, BCE_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n", 2435 i, (uint32_t)sc->rx_bd_chain_paddr[i]); 2436 } 2437 2438 /* Create a DMA tag for RX mbufs. */ 2439 rc = bus_dma_tag_create(sc->parent_tag, BCE_DMA_RX_ALIGN, 0, 2440 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 2441 NULL, NULL, 2442 MCLBYTES, 1, MCLBYTES, 2443 BUS_DMA_ALLOCNOW | BUS_DMA_ALIGNED | 2444 BUS_DMA_WAITOK, 2445 &sc->rx_mbuf_tag); 2446 if (rc != 0) { 2447 if_printf(ifp, "Could not allocate RX mbuf DMA tag!\n"); 2448 return rc; 2449 } 2450 2451 /* Create tmp DMA map for RX mbuf clusters. */ 2452 rc = bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_WAITOK, 2453 &sc->rx_mbuf_tmpmap); 2454 if (rc != 0) { 2455 bus_dma_tag_destroy(sc->rx_mbuf_tag); 2456 sc->rx_mbuf_tag = NULL; 2457 2458 if_printf(ifp, "Could not create RX mbuf tmp DMA map!\n"); 2459 return rc; 2460 } 2461 2462 /* Create DMA maps for the RX mbuf clusters. */ 2463 for (i = 0; i < TOTAL_RX_BD; i++) { 2464 rc = bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_WAITOK, 2465 &sc->rx_mbuf_map[i]); 2466 if (rc != 0) { 2467 for (j = 0; j < i; ++j) { 2468 bus_dmamap_destroy(sc->rx_mbuf_tag, 2469 sc->rx_mbuf_map[j]); 2470 } 2471 bus_dma_tag_destroy(sc->rx_mbuf_tag); 2472 sc->rx_mbuf_tag = NULL; 2473 2474 if_printf(ifp, "Unable to create " 2475 "%dth RX mbuf DMA map!\n", i); 2476 return rc; 2477 } 2478 } 2479 return 0; 2480 } 2481 2482 2483 /****************************************************************************/ 2484 /* Firmware synchronization. */ 2485 /* */ 2486 /* Before performing certain events such as a chip reset, synchronize with */ 2487 /* the firmware first. */ 2488 /* */ 2489 /* Returns: */ 2490 /* 0 for success, positive value for failure. */ 2491 /****************************************************************************/ 2492 static int 2493 bce_fw_sync(struct bce_softc *sc, uint32_t msg_data) 2494 { 2495 int i, rc = 0; 2496 uint32_t val; 2497 2498 /* Don't waste any time if we've timed out before. */ 2499 if (sc->bce_fw_timed_out) 2500 return EBUSY; 2501 2502 /* Increment the message sequence number. */ 2503 sc->bce_fw_wr_seq++; 2504 msg_data |= sc->bce_fw_wr_seq; 2505 2506 DBPRINT(sc, BCE_VERBOSE, "bce_fw_sync(): msg_data = 0x%08X\n", msg_data); 2507 2508 /* Send the message to the bootcode driver mailbox. */ 2509 bce_shmem_wr(sc, BCE_DRV_MB, msg_data); 2510 2511 /* Wait for the bootcode to acknowledge the message. */ 2512 for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) { 2513 /* Check for a response in the bootcode firmware mailbox. */ 2514 val = bce_shmem_rd(sc, BCE_FW_MB); 2515 if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ)) 2516 break; 2517 DELAY(1000); 2518 } 2519 2520 /* If we've timed out, tell the bootcode that we've stopped waiting. */ 2521 if ((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ) && 2522 (msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0) { 2523 if_printf(&sc->arpcom.ac_if, 2524 "Firmware synchronization timeout! " 2525 "msg_data = 0x%08X\n", msg_data); 2526 2527 msg_data &= ~BCE_DRV_MSG_CODE; 2528 msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT; 2529 2530 bce_shmem_wr(sc, BCE_DRV_MB, msg_data); 2531 2532 sc->bce_fw_timed_out = 1; 2533 rc = EBUSY; 2534 } 2535 return rc; 2536 } 2537 2538 2539 /****************************************************************************/ 2540 /* Load Receive Virtual 2 Physical (RV2P) processor firmware. */ 2541 /* */ 2542 /* Returns: */ 2543 /* Nothing. */ 2544 /****************************************************************************/ 2545 static void 2546 bce_load_rv2p_fw(struct bce_softc *sc, uint32_t *rv2p_code, 2547 uint32_t rv2p_code_len, uint32_t rv2p_proc) 2548 { 2549 int i; 2550 uint32_t val; 2551 2552 for (i = 0; i < rv2p_code_len; i += 8) { 2553 REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code); 2554 rv2p_code++; 2555 REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code); 2556 rv2p_code++; 2557 2558 if (rv2p_proc == RV2P_PROC1) { 2559 val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR; 2560 REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val); 2561 } else { 2562 val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR; 2563 REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val); 2564 } 2565 } 2566 2567 /* Reset the processor, un-stall is done later. */ 2568 if (rv2p_proc == RV2P_PROC1) 2569 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET); 2570 else 2571 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET); 2572 } 2573 2574 2575 /****************************************************************************/ 2576 /* Load RISC processor firmware. */ 2577 /* */ 2578 /* Loads firmware from the file if_bcefw.h into the scratchpad memory */ 2579 /* associated with a particular processor. */ 2580 /* */ 2581 /* Returns: */ 2582 /* Nothing. */ 2583 /****************************************************************************/ 2584 static void 2585 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg, 2586 struct fw_info *fw) 2587 { 2588 uint32_t offset; 2589 int j; 2590 2591 bce_halt_cpu(sc, cpu_reg); 2592 2593 /* Load the Text area. */ 2594 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base); 2595 if (fw->text) { 2596 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) 2597 REG_WR_IND(sc, offset, fw->text[j]); 2598 } 2599 2600 /* Load the Data area. */ 2601 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base); 2602 if (fw->data) { 2603 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) 2604 REG_WR_IND(sc, offset, fw->data[j]); 2605 } 2606 2607 /* Load the SBSS area. */ 2608 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base); 2609 if (fw->sbss) { 2610 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) 2611 REG_WR_IND(sc, offset, fw->sbss[j]); 2612 } 2613 2614 /* Load the BSS area. */ 2615 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base); 2616 if (fw->bss) { 2617 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) 2618 REG_WR_IND(sc, offset, fw->bss[j]); 2619 } 2620 2621 /* Load the Read-Only area. */ 2622 offset = cpu_reg->spad_base + 2623 (fw->rodata_addr - cpu_reg->mips_view_base); 2624 if (fw->rodata) { 2625 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) 2626 REG_WR_IND(sc, offset, fw->rodata[j]); 2627 } 2628 2629 /* Clear the pre-fetch instruction and set the FW start address. */ 2630 REG_WR_IND(sc, cpu_reg->inst, 0); 2631 REG_WR_IND(sc, cpu_reg->pc, fw->start_addr); 2632 } 2633 2634 2635 /****************************************************************************/ 2636 /* Starts the RISC processor. */ 2637 /* */ 2638 /* Assumes the CPU starting address has already been set. */ 2639 /* */ 2640 /* Returns: */ 2641 /* Nothing. */ 2642 /****************************************************************************/ 2643 static void 2644 bce_start_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg) 2645 { 2646 uint32_t val; 2647 2648 /* Start the CPU. */ 2649 val = REG_RD_IND(sc, cpu_reg->mode); 2650 val &= ~cpu_reg->mode_value_halt; 2651 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 2652 REG_WR_IND(sc, cpu_reg->mode, val); 2653 } 2654 2655 2656 /****************************************************************************/ 2657 /* Halts the RISC processor. */ 2658 /* */ 2659 /* Returns: */ 2660 /* Nothing. */ 2661 /****************************************************************************/ 2662 static void 2663 bce_halt_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg) 2664 { 2665 uint32_t val; 2666 2667 /* Halt the CPU. */ 2668 val = REG_RD_IND(sc, cpu_reg->mode); 2669 val |= cpu_reg->mode_value_halt; 2670 REG_WR_IND(sc, cpu_reg->mode, val); 2671 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 2672 } 2673 2674 2675 /****************************************************************************/ 2676 /* Start the RX CPU. */ 2677 /* */ 2678 /* Returns: */ 2679 /* Nothing. */ 2680 /****************************************************************************/ 2681 static void 2682 bce_start_rxp_cpu(struct bce_softc *sc) 2683 { 2684 struct cpu_reg cpu_reg; 2685 2686 cpu_reg.mode = BCE_RXP_CPU_MODE; 2687 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT; 2688 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA; 2689 cpu_reg.state = BCE_RXP_CPU_STATE; 2690 cpu_reg.state_value_clear = 0xffffff; 2691 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE; 2692 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK; 2693 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER; 2694 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION; 2695 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT; 2696 cpu_reg.spad_base = BCE_RXP_SCRATCH; 2697 cpu_reg.mips_view_base = 0x8000000; 2698 2699 bce_start_cpu(sc, &cpu_reg); 2700 } 2701 2702 2703 /****************************************************************************/ 2704 /* Initialize the RX CPU. */ 2705 /* */ 2706 /* Returns: */ 2707 /* Nothing. */ 2708 /****************************************************************************/ 2709 static void 2710 bce_init_rxp_cpu(struct bce_softc *sc) 2711 { 2712 struct cpu_reg cpu_reg; 2713 struct fw_info fw; 2714 2715 cpu_reg.mode = BCE_RXP_CPU_MODE; 2716 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT; 2717 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA; 2718 cpu_reg.state = BCE_RXP_CPU_STATE; 2719 cpu_reg.state_value_clear = 0xffffff; 2720 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE; 2721 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK; 2722 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER; 2723 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION; 2724 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT; 2725 cpu_reg.spad_base = BCE_RXP_SCRATCH; 2726 cpu_reg.mips_view_base = 0x8000000; 2727 2728 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 2729 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 2730 fw.ver_major = bce_RXP_b09FwReleaseMajor; 2731 fw.ver_minor = bce_RXP_b09FwReleaseMinor; 2732 fw.ver_fix = bce_RXP_b09FwReleaseFix; 2733 fw.start_addr = bce_RXP_b09FwStartAddr; 2734 2735 fw.text_addr = bce_RXP_b09FwTextAddr; 2736 fw.text_len = bce_RXP_b09FwTextLen; 2737 fw.text_index = 0; 2738 fw.text = bce_RXP_b09FwText; 2739 2740 fw.data_addr = bce_RXP_b09FwDataAddr; 2741 fw.data_len = bce_RXP_b09FwDataLen; 2742 fw.data_index = 0; 2743 fw.data = bce_RXP_b09FwData; 2744 2745 fw.sbss_addr = bce_RXP_b09FwSbssAddr; 2746 fw.sbss_len = bce_RXP_b09FwSbssLen; 2747 fw.sbss_index = 0; 2748 fw.sbss = bce_RXP_b09FwSbss; 2749 2750 fw.bss_addr = bce_RXP_b09FwBssAddr; 2751 fw.bss_len = bce_RXP_b09FwBssLen; 2752 fw.bss_index = 0; 2753 fw.bss = bce_RXP_b09FwBss; 2754 2755 fw.rodata_addr = bce_RXP_b09FwRodataAddr; 2756 fw.rodata_len = bce_RXP_b09FwRodataLen; 2757 fw.rodata_index = 0; 2758 fw.rodata = bce_RXP_b09FwRodata; 2759 } else { 2760 fw.ver_major = bce_RXP_b06FwReleaseMajor; 2761 fw.ver_minor = bce_RXP_b06FwReleaseMinor; 2762 fw.ver_fix = bce_RXP_b06FwReleaseFix; 2763 fw.start_addr = bce_RXP_b06FwStartAddr; 2764 2765 fw.text_addr = bce_RXP_b06FwTextAddr; 2766 fw.text_len = bce_RXP_b06FwTextLen; 2767 fw.text_index = 0; 2768 fw.text = bce_RXP_b06FwText; 2769 2770 fw.data_addr = bce_RXP_b06FwDataAddr; 2771 fw.data_len = bce_RXP_b06FwDataLen; 2772 fw.data_index = 0; 2773 fw.data = bce_RXP_b06FwData; 2774 2775 fw.sbss_addr = bce_RXP_b06FwSbssAddr; 2776 fw.sbss_len = bce_RXP_b06FwSbssLen; 2777 fw.sbss_index = 0; 2778 fw.sbss = bce_RXP_b06FwSbss; 2779 2780 fw.bss_addr = bce_RXP_b06FwBssAddr; 2781 fw.bss_len = bce_RXP_b06FwBssLen; 2782 fw.bss_index = 0; 2783 fw.bss = bce_RXP_b06FwBss; 2784 2785 fw.rodata_addr = bce_RXP_b06FwRodataAddr; 2786 fw.rodata_len = bce_RXP_b06FwRodataLen; 2787 fw.rodata_index = 0; 2788 fw.rodata = bce_RXP_b06FwRodata; 2789 } 2790 2791 DBPRINT(sc, BCE_INFO_RESET, "Loading RX firmware.\n"); 2792 bce_load_cpu_fw(sc, &cpu_reg, &fw); 2793 /* Delay RXP start until initialization is complete. */ 2794 } 2795 2796 2797 /****************************************************************************/ 2798 /* Initialize the TX CPU. */ 2799 /* */ 2800 /* Returns: */ 2801 /* Nothing. */ 2802 /****************************************************************************/ 2803 static void 2804 bce_init_txp_cpu(struct bce_softc *sc) 2805 { 2806 struct cpu_reg cpu_reg; 2807 struct fw_info fw; 2808 2809 cpu_reg.mode = BCE_TXP_CPU_MODE; 2810 cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT; 2811 cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA; 2812 cpu_reg.state = BCE_TXP_CPU_STATE; 2813 cpu_reg.state_value_clear = 0xffffff; 2814 cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE; 2815 cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK; 2816 cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER; 2817 cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION; 2818 cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT; 2819 cpu_reg.spad_base = BCE_TXP_SCRATCH; 2820 cpu_reg.mips_view_base = 0x8000000; 2821 2822 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 2823 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 2824 fw.ver_major = bce_TXP_b09FwReleaseMajor; 2825 fw.ver_minor = bce_TXP_b09FwReleaseMinor; 2826 fw.ver_fix = bce_TXP_b09FwReleaseFix; 2827 fw.start_addr = bce_TXP_b09FwStartAddr; 2828 2829 fw.text_addr = bce_TXP_b09FwTextAddr; 2830 fw.text_len = bce_TXP_b09FwTextLen; 2831 fw.text_index = 0; 2832 fw.text = bce_TXP_b09FwText; 2833 2834 fw.data_addr = bce_TXP_b09FwDataAddr; 2835 fw.data_len = bce_TXP_b09FwDataLen; 2836 fw.data_index = 0; 2837 fw.data = bce_TXP_b09FwData; 2838 2839 fw.sbss_addr = bce_TXP_b09FwSbssAddr; 2840 fw.sbss_len = bce_TXP_b09FwSbssLen; 2841 fw.sbss_index = 0; 2842 fw.sbss = bce_TXP_b09FwSbss; 2843 2844 fw.bss_addr = bce_TXP_b09FwBssAddr; 2845 fw.bss_len = bce_TXP_b09FwBssLen; 2846 fw.bss_index = 0; 2847 fw.bss = bce_TXP_b09FwBss; 2848 2849 fw.rodata_addr = bce_TXP_b09FwRodataAddr; 2850 fw.rodata_len = bce_TXP_b09FwRodataLen; 2851 fw.rodata_index = 0; 2852 fw.rodata = bce_TXP_b09FwRodata; 2853 } else { 2854 fw.ver_major = bce_TXP_b06FwReleaseMajor; 2855 fw.ver_minor = bce_TXP_b06FwReleaseMinor; 2856 fw.ver_fix = bce_TXP_b06FwReleaseFix; 2857 fw.start_addr = bce_TXP_b06FwStartAddr; 2858 2859 fw.text_addr = bce_TXP_b06FwTextAddr; 2860 fw.text_len = bce_TXP_b06FwTextLen; 2861 fw.text_index = 0; 2862 fw.text = bce_TXP_b06FwText; 2863 2864 fw.data_addr = bce_TXP_b06FwDataAddr; 2865 fw.data_len = bce_TXP_b06FwDataLen; 2866 fw.data_index = 0; 2867 fw.data = bce_TXP_b06FwData; 2868 2869 fw.sbss_addr = bce_TXP_b06FwSbssAddr; 2870 fw.sbss_len = bce_TXP_b06FwSbssLen; 2871 fw.sbss_index = 0; 2872 fw.sbss = bce_TXP_b06FwSbss; 2873 2874 fw.bss_addr = bce_TXP_b06FwBssAddr; 2875 fw.bss_len = bce_TXP_b06FwBssLen; 2876 fw.bss_index = 0; 2877 fw.bss = bce_TXP_b06FwBss; 2878 2879 fw.rodata_addr = bce_TXP_b06FwRodataAddr; 2880 fw.rodata_len = bce_TXP_b06FwRodataLen; 2881 fw.rodata_index = 0; 2882 fw.rodata = bce_TXP_b06FwRodata; 2883 } 2884 2885 DBPRINT(sc, BCE_INFO_RESET, "Loading TX firmware.\n"); 2886 bce_load_cpu_fw(sc, &cpu_reg, &fw); 2887 bce_start_cpu(sc, &cpu_reg); 2888 } 2889 2890 2891 /****************************************************************************/ 2892 /* Initialize the TPAT CPU. */ 2893 /* */ 2894 /* Returns: */ 2895 /* Nothing. */ 2896 /****************************************************************************/ 2897 static void 2898 bce_init_tpat_cpu(struct bce_softc *sc) 2899 { 2900 struct cpu_reg cpu_reg; 2901 struct fw_info fw; 2902 2903 cpu_reg.mode = BCE_TPAT_CPU_MODE; 2904 cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT; 2905 cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA; 2906 cpu_reg.state = BCE_TPAT_CPU_STATE; 2907 cpu_reg.state_value_clear = 0xffffff; 2908 cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE; 2909 cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK; 2910 cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER; 2911 cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION; 2912 cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT; 2913 cpu_reg.spad_base = BCE_TPAT_SCRATCH; 2914 cpu_reg.mips_view_base = 0x8000000; 2915 2916 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 2917 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 2918 fw.ver_major = bce_TPAT_b09FwReleaseMajor; 2919 fw.ver_minor = bce_TPAT_b09FwReleaseMinor; 2920 fw.ver_fix = bce_TPAT_b09FwReleaseFix; 2921 fw.start_addr = bce_TPAT_b09FwStartAddr; 2922 2923 fw.text_addr = bce_TPAT_b09FwTextAddr; 2924 fw.text_len = bce_TPAT_b09FwTextLen; 2925 fw.text_index = 0; 2926 fw.text = bce_TPAT_b09FwText; 2927 2928 fw.data_addr = bce_TPAT_b09FwDataAddr; 2929 fw.data_len = bce_TPAT_b09FwDataLen; 2930 fw.data_index = 0; 2931 fw.data = bce_TPAT_b09FwData; 2932 2933 fw.sbss_addr = bce_TPAT_b09FwSbssAddr; 2934 fw.sbss_len = bce_TPAT_b09FwSbssLen; 2935 fw.sbss_index = 0; 2936 fw.sbss = bce_TPAT_b09FwSbss; 2937 2938 fw.bss_addr = bce_TPAT_b09FwBssAddr; 2939 fw.bss_len = bce_TPAT_b09FwBssLen; 2940 fw.bss_index = 0; 2941 fw.bss = bce_TPAT_b09FwBss; 2942 2943 fw.rodata_addr = bce_TPAT_b09FwRodataAddr; 2944 fw.rodata_len = bce_TPAT_b09FwRodataLen; 2945 fw.rodata_index = 0; 2946 fw.rodata = bce_TPAT_b09FwRodata; 2947 } else { 2948 fw.ver_major = bce_TPAT_b06FwReleaseMajor; 2949 fw.ver_minor = bce_TPAT_b06FwReleaseMinor; 2950 fw.ver_fix = bce_TPAT_b06FwReleaseFix; 2951 fw.start_addr = bce_TPAT_b06FwStartAddr; 2952 2953 fw.text_addr = bce_TPAT_b06FwTextAddr; 2954 fw.text_len = bce_TPAT_b06FwTextLen; 2955 fw.text_index = 0; 2956 fw.text = bce_TPAT_b06FwText; 2957 2958 fw.data_addr = bce_TPAT_b06FwDataAddr; 2959 fw.data_len = bce_TPAT_b06FwDataLen; 2960 fw.data_index = 0; 2961 fw.data = bce_TPAT_b06FwData; 2962 2963 fw.sbss_addr = bce_TPAT_b06FwSbssAddr; 2964 fw.sbss_len = bce_TPAT_b06FwSbssLen; 2965 fw.sbss_index = 0; 2966 fw.sbss = bce_TPAT_b06FwSbss; 2967 2968 fw.bss_addr = bce_TPAT_b06FwBssAddr; 2969 fw.bss_len = bce_TPAT_b06FwBssLen; 2970 fw.bss_index = 0; 2971 fw.bss = bce_TPAT_b06FwBss; 2972 2973 fw.rodata_addr = bce_TPAT_b06FwRodataAddr; 2974 fw.rodata_len = bce_TPAT_b06FwRodataLen; 2975 fw.rodata_index = 0; 2976 fw.rodata = bce_TPAT_b06FwRodata; 2977 } 2978 2979 DBPRINT(sc, BCE_INFO_RESET, "Loading TPAT firmware.\n"); 2980 bce_load_cpu_fw(sc, &cpu_reg, &fw); 2981 bce_start_cpu(sc, &cpu_reg); 2982 } 2983 2984 2985 /****************************************************************************/ 2986 /* Initialize the CP CPU. */ 2987 /* */ 2988 /* Returns: */ 2989 /* Nothing. */ 2990 /****************************************************************************/ 2991 static void 2992 bce_init_cp_cpu(struct bce_softc *sc) 2993 { 2994 struct cpu_reg cpu_reg; 2995 struct fw_info fw; 2996 2997 cpu_reg.mode = BCE_CP_CPU_MODE; 2998 cpu_reg.mode_value_halt = BCE_CP_CPU_MODE_SOFT_HALT; 2999 cpu_reg.mode_value_sstep = BCE_CP_CPU_MODE_STEP_ENA; 3000 cpu_reg.state = BCE_CP_CPU_STATE; 3001 cpu_reg.state_value_clear = 0xffffff; 3002 cpu_reg.gpr0 = BCE_CP_CPU_REG_FILE; 3003 cpu_reg.evmask = BCE_CP_CPU_EVENT_MASK; 3004 cpu_reg.pc = BCE_CP_CPU_PROGRAM_COUNTER; 3005 cpu_reg.inst = BCE_CP_CPU_INSTRUCTION; 3006 cpu_reg.bp = BCE_CP_CPU_HW_BREAKPOINT; 3007 cpu_reg.spad_base = BCE_CP_SCRATCH; 3008 cpu_reg.mips_view_base = 0x8000000; 3009 3010 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3011 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3012 fw.ver_major = bce_CP_b09FwReleaseMajor; 3013 fw.ver_minor = bce_CP_b09FwReleaseMinor; 3014 fw.ver_fix = bce_CP_b09FwReleaseFix; 3015 fw.start_addr = bce_CP_b09FwStartAddr; 3016 3017 fw.text_addr = bce_CP_b09FwTextAddr; 3018 fw.text_len = bce_CP_b09FwTextLen; 3019 fw.text_index = 0; 3020 fw.text = bce_CP_b09FwText; 3021 3022 fw.data_addr = bce_CP_b09FwDataAddr; 3023 fw.data_len = bce_CP_b09FwDataLen; 3024 fw.data_index = 0; 3025 fw.data = bce_CP_b09FwData; 3026 3027 fw.sbss_addr = bce_CP_b09FwSbssAddr; 3028 fw.sbss_len = bce_CP_b09FwSbssLen; 3029 fw.sbss_index = 0; 3030 fw.sbss = bce_CP_b09FwSbss; 3031 3032 fw.bss_addr = bce_CP_b09FwBssAddr; 3033 fw.bss_len = bce_CP_b09FwBssLen; 3034 fw.bss_index = 0; 3035 fw.bss = bce_CP_b09FwBss; 3036 3037 fw.rodata_addr = bce_CP_b09FwRodataAddr; 3038 fw.rodata_len = bce_CP_b09FwRodataLen; 3039 fw.rodata_index = 0; 3040 fw.rodata = bce_CP_b09FwRodata; 3041 } else { 3042 fw.ver_major = bce_CP_b06FwReleaseMajor; 3043 fw.ver_minor = bce_CP_b06FwReleaseMinor; 3044 fw.ver_fix = bce_CP_b06FwReleaseFix; 3045 fw.start_addr = bce_CP_b06FwStartAddr; 3046 3047 fw.text_addr = bce_CP_b06FwTextAddr; 3048 fw.text_len = bce_CP_b06FwTextLen; 3049 fw.text_index = 0; 3050 fw.text = bce_CP_b06FwText; 3051 3052 fw.data_addr = bce_CP_b06FwDataAddr; 3053 fw.data_len = bce_CP_b06FwDataLen; 3054 fw.data_index = 0; 3055 fw.data = bce_CP_b06FwData; 3056 3057 fw.sbss_addr = bce_CP_b06FwSbssAddr; 3058 fw.sbss_len = bce_CP_b06FwSbssLen; 3059 fw.sbss_index = 0; 3060 fw.sbss = bce_CP_b06FwSbss; 3061 3062 fw.bss_addr = bce_CP_b06FwBssAddr; 3063 fw.bss_len = bce_CP_b06FwBssLen; 3064 fw.bss_index = 0; 3065 fw.bss = bce_CP_b06FwBss; 3066 3067 fw.rodata_addr = bce_CP_b06FwRodataAddr; 3068 fw.rodata_len = bce_CP_b06FwRodataLen; 3069 fw.rodata_index = 0; 3070 fw.rodata = bce_CP_b06FwRodata; 3071 } 3072 3073 DBPRINT(sc, BCE_INFO_RESET, "Loading CP firmware.\n"); 3074 bce_load_cpu_fw(sc, &cpu_reg, &fw); 3075 bce_start_cpu(sc, &cpu_reg); 3076 } 3077 3078 3079 /****************************************************************************/ 3080 /* Initialize the COM CPU. */ 3081 /* */ 3082 /* Returns: */ 3083 /* Nothing. */ 3084 /****************************************************************************/ 3085 static void 3086 bce_init_com_cpu(struct bce_softc *sc) 3087 { 3088 struct cpu_reg cpu_reg; 3089 struct fw_info fw; 3090 3091 cpu_reg.mode = BCE_COM_CPU_MODE; 3092 cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT; 3093 cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA; 3094 cpu_reg.state = BCE_COM_CPU_STATE; 3095 cpu_reg.state_value_clear = 0xffffff; 3096 cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE; 3097 cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK; 3098 cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER; 3099 cpu_reg.inst = BCE_COM_CPU_INSTRUCTION; 3100 cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT; 3101 cpu_reg.spad_base = BCE_COM_SCRATCH; 3102 cpu_reg.mips_view_base = 0x8000000; 3103 3104 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3105 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3106 fw.ver_major = bce_COM_b09FwReleaseMajor; 3107 fw.ver_minor = bce_COM_b09FwReleaseMinor; 3108 fw.ver_fix = bce_COM_b09FwReleaseFix; 3109 fw.start_addr = bce_COM_b09FwStartAddr; 3110 3111 fw.text_addr = bce_COM_b09FwTextAddr; 3112 fw.text_len = bce_COM_b09FwTextLen; 3113 fw.text_index = 0; 3114 fw.text = bce_COM_b09FwText; 3115 3116 fw.data_addr = bce_COM_b09FwDataAddr; 3117 fw.data_len = bce_COM_b09FwDataLen; 3118 fw.data_index = 0; 3119 fw.data = bce_COM_b09FwData; 3120 3121 fw.sbss_addr = bce_COM_b09FwSbssAddr; 3122 fw.sbss_len = bce_COM_b09FwSbssLen; 3123 fw.sbss_index = 0; 3124 fw.sbss = bce_COM_b09FwSbss; 3125 3126 fw.bss_addr = bce_COM_b09FwBssAddr; 3127 fw.bss_len = bce_COM_b09FwBssLen; 3128 fw.bss_index = 0; 3129 fw.bss = bce_COM_b09FwBss; 3130 3131 fw.rodata_addr = bce_COM_b09FwRodataAddr; 3132 fw.rodata_len = bce_COM_b09FwRodataLen; 3133 fw.rodata_index = 0; 3134 fw.rodata = bce_COM_b09FwRodata; 3135 } else { 3136 fw.ver_major = bce_COM_b06FwReleaseMajor; 3137 fw.ver_minor = bce_COM_b06FwReleaseMinor; 3138 fw.ver_fix = bce_COM_b06FwReleaseFix; 3139 fw.start_addr = bce_COM_b06FwStartAddr; 3140 3141 fw.text_addr = bce_COM_b06FwTextAddr; 3142 fw.text_len = bce_COM_b06FwTextLen; 3143 fw.text_index = 0; 3144 fw.text = bce_COM_b06FwText; 3145 3146 fw.data_addr = bce_COM_b06FwDataAddr; 3147 fw.data_len = bce_COM_b06FwDataLen; 3148 fw.data_index = 0; 3149 fw.data = bce_COM_b06FwData; 3150 3151 fw.sbss_addr = bce_COM_b06FwSbssAddr; 3152 fw.sbss_len = bce_COM_b06FwSbssLen; 3153 fw.sbss_index = 0; 3154 fw.sbss = bce_COM_b06FwSbss; 3155 3156 fw.bss_addr = bce_COM_b06FwBssAddr; 3157 fw.bss_len = bce_COM_b06FwBssLen; 3158 fw.bss_index = 0; 3159 fw.bss = bce_COM_b06FwBss; 3160 3161 fw.rodata_addr = bce_COM_b06FwRodataAddr; 3162 fw.rodata_len = bce_COM_b06FwRodataLen; 3163 fw.rodata_index = 0; 3164 fw.rodata = bce_COM_b06FwRodata; 3165 } 3166 3167 DBPRINT(sc, BCE_INFO_RESET, "Loading COM firmware.\n"); 3168 bce_load_cpu_fw(sc, &cpu_reg, &fw); 3169 bce_start_cpu(sc, &cpu_reg); 3170 } 3171 3172 3173 /****************************************************************************/ 3174 /* Initialize the RV2P, RX, TX, TPAT, COM, and CP CPUs. */ 3175 /* */ 3176 /* Loads the firmware for each CPU and starts the CPU. */ 3177 /* */ 3178 /* Returns: */ 3179 /* Nothing. */ 3180 /****************************************************************************/ 3181 static void 3182 bce_init_cpus(struct bce_softc *sc) 3183 { 3184 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3185 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3186 if (BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax) { 3187 bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc1, 3188 sizeof(bce_xi90_rv2p_proc1), RV2P_PROC1); 3189 bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc2, 3190 sizeof(bce_xi90_rv2p_proc2), RV2P_PROC2); 3191 } else { 3192 bce_load_rv2p_fw(sc, bce_xi_rv2p_proc1, 3193 sizeof(bce_xi_rv2p_proc1), RV2P_PROC1); 3194 bce_load_rv2p_fw(sc, bce_xi_rv2p_proc2, 3195 sizeof(bce_xi_rv2p_proc2), RV2P_PROC2); 3196 } 3197 } else { 3198 bce_load_rv2p_fw(sc, bce_rv2p_proc1, 3199 sizeof(bce_rv2p_proc1), RV2P_PROC1); 3200 bce_load_rv2p_fw(sc, bce_rv2p_proc2, 3201 sizeof(bce_rv2p_proc2), RV2P_PROC2); 3202 } 3203 3204 bce_init_rxp_cpu(sc); 3205 bce_init_txp_cpu(sc); 3206 bce_init_tpat_cpu(sc); 3207 bce_init_com_cpu(sc); 3208 bce_init_cp_cpu(sc); 3209 } 3210 3211 3212 /****************************************************************************/ 3213 /* Initialize context memory. */ 3214 /* */ 3215 /* Clears the memory associated with each Context ID (CID). */ 3216 /* */ 3217 /* Returns: */ 3218 /* Nothing. */ 3219 /****************************************************************************/ 3220 static int 3221 bce_init_ctx(struct bce_softc *sc) 3222 { 3223 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3224 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3225 /* DRC: Replace this constant value with a #define. */ 3226 int i, retry_cnt = 10; 3227 uint32_t val; 3228 3229 /* 3230 * BCM5709 context memory may be cached 3231 * in host memory so prepare the host memory 3232 * for access. 3233 */ 3234 val = BCE_CTX_COMMAND_ENABLED | BCE_CTX_COMMAND_MEM_INIT | 3235 (1 << 12); 3236 val |= (BCM_PAGE_BITS - 8) << 16; 3237 REG_WR(sc, BCE_CTX_COMMAND, val); 3238 3239 /* Wait for mem init command to complete. */ 3240 for (i = 0; i < retry_cnt; i++) { 3241 val = REG_RD(sc, BCE_CTX_COMMAND); 3242 if (!(val & BCE_CTX_COMMAND_MEM_INIT)) 3243 break; 3244 DELAY(2); 3245 } 3246 if (i == retry_cnt) { 3247 device_printf(sc->bce_dev, 3248 "Context memory initialization failed!\n"); 3249 return ETIMEDOUT; 3250 } 3251 3252 for (i = 0; i < sc->ctx_pages; i++) { 3253 int j; 3254 3255 /* 3256 * Set the physical address of the context 3257 * memory cache. 3258 */ 3259 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA0, 3260 BCE_ADDR_LO(sc->ctx_paddr[i] & 0xfffffff0) | 3261 BCE_CTX_HOST_PAGE_TBL_DATA0_VALID); 3262 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA1, 3263 BCE_ADDR_HI(sc->ctx_paddr[i])); 3264 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_CTRL, 3265 i | BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); 3266 3267 /* 3268 * Verify that the context memory write was successful. 3269 */ 3270 for (j = 0; j < retry_cnt; j++) { 3271 val = REG_RD(sc, BCE_CTX_HOST_PAGE_TBL_CTRL); 3272 if ((val & 3273 BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0) 3274 break; 3275 DELAY(5); 3276 } 3277 if (j == retry_cnt) { 3278 device_printf(sc->bce_dev, 3279 "Failed to initialize context page!\n"); 3280 return ETIMEDOUT; 3281 } 3282 } 3283 } else { 3284 uint32_t vcid_addr, offset; 3285 3286 /* 3287 * For the 5706/5708, context memory is local to 3288 * the controller, so initialize the controller 3289 * context memory. 3290 */ 3291 3292 vcid_addr = GET_CID_ADDR(96); 3293 while (vcid_addr) { 3294 vcid_addr -= PHY_CTX_SIZE; 3295 3296 REG_WR(sc, BCE_CTX_VIRT_ADDR, 0); 3297 REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr); 3298 3299 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) 3300 CTX_WR(sc, 0x00, offset, 0); 3301 3302 REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr); 3303 REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr); 3304 } 3305 } 3306 return 0; 3307 } 3308 3309 3310 /****************************************************************************/ 3311 /* Fetch the permanent MAC address of the controller. */ 3312 /* */ 3313 /* Returns: */ 3314 /* Nothing. */ 3315 /****************************************************************************/ 3316 static void 3317 bce_get_mac_addr(struct bce_softc *sc) 3318 { 3319 uint32_t mac_lo = 0, mac_hi = 0; 3320 3321 /* 3322 * The NetXtreme II bootcode populates various NIC 3323 * power-on and runtime configuration items in a 3324 * shared memory area. The factory configured MAC 3325 * address is available from both NVRAM and the 3326 * shared memory area so we'll read the value from 3327 * shared memory for speed. 3328 */ 3329 3330 mac_hi = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_UPPER); 3331 mac_lo = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_LOWER); 3332 3333 if (mac_lo == 0 && mac_hi == 0) { 3334 if_printf(&sc->arpcom.ac_if, "Invalid Ethernet address!\n"); 3335 } else { 3336 sc->eaddr[0] = (u_char)(mac_hi >> 8); 3337 sc->eaddr[1] = (u_char)(mac_hi >> 0); 3338 sc->eaddr[2] = (u_char)(mac_lo >> 24); 3339 sc->eaddr[3] = (u_char)(mac_lo >> 16); 3340 sc->eaddr[4] = (u_char)(mac_lo >> 8); 3341 sc->eaddr[5] = (u_char)(mac_lo >> 0); 3342 } 3343 3344 DBPRINT(sc, BCE_INFO, "Permanent Ethernet address = %6D\n", sc->eaddr, ":"); 3345 } 3346 3347 3348 /****************************************************************************/ 3349 /* Program the MAC address. */ 3350 /* */ 3351 /* Returns: */ 3352 /* Nothing. */ 3353 /****************************************************************************/ 3354 static void 3355 bce_set_mac_addr(struct bce_softc *sc) 3356 { 3357 const uint8_t *mac_addr = sc->eaddr; 3358 uint32_t val; 3359 3360 DBPRINT(sc, BCE_INFO, "Setting Ethernet address = %6D\n", 3361 sc->eaddr, ":"); 3362 3363 val = (mac_addr[0] << 8) | mac_addr[1]; 3364 REG_WR(sc, BCE_EMAC_MAC_MATCH0, val); 3365 3366 val = (mac_addr[2] << 24) | 3367 (mac_addr[3] << 16) | 3368 (mac_addr[4] << 8) | 3369 mac_addr[5]; 3370 REG_WR(sc, BCE_EMAC_MAC_MATCH1, val); 3371 } 3372 3373 3374 /****************************************************************************/ 3375 /* Stop the controller. */ 3376 /* */ 3377 /* Returns: */ 3378 /* Nothing. */ 3379 /****************************************************************************/ 3380 static void 3381 bce_stop(struct bce_softc *sc) 3382 { 3383 struct ifnet *ifp = &sc->arpcom.ac_if; 3384 3385 ASSERT_SERIALIZED(ifp->if_serializer); 3386 3387 callout_stop(&sc->bce_tick_callout); 3388 3389 /* Disable the transmit/receive blocks. */ 3390 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, BCE_MISC_ENABLE_CLR_DEFAULT); 3391 REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS); 3392 DELAY(20); 3393 3394 bce_disable_intr(sc); 3395 3396 /* Free the RX lists. */ 3397 bce_free_rx_chain(sc); 3398 3399 /* Free TX buffers. */ 3400 bce_free_tx_chain(sc); 3401 3402 sc->bce_link = 0; 3403 sc->bce_coalchg_mask = 0; 3404 3405 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3406 ifp->if_timer = 0; 3407 } 3408 3409 3410 static int 3411 bce_reset(struct bce_softc *sc, uint32_t reset_code) 3412 { 3413 uint32_t val; 3414 int i, rc = 0; 3415 3416 /* Wait for pending PCI transactions to complete. */ 3417 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, 3418 BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE | 3419 BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE | 3420 BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE | 3421 BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE); 3422 val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS); 3423 DELAY(5); 3424 3425 /* Disable DMA */ 3426 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3427 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3428 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL); 3429 val &= ~BCE_MISC_NEW_CORE_CTL_DMA_ENABLE; 3430 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val); 3431 } 3432 3433 /* Assume bootcode is running. */ 3434 sc->bce_fw_timed_out = 0; 3435 sc->bce_drv_cardiac_arrest = 0; 3436 3437 /* Give the firmware a chance to prepare for the reset. */ 3438 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code); 3439 if (rc) { 3440 if_printf(&sc->arpcom.ac_if, 3441 "Firmware is not ready for reset\n"); 3442 return rc; 3443 } 3444 3445 /* Set a firmware reminder that this is a soft reset. */ 3446 bce_shmem_wr(sc, BCE_DRV_RESET_SIGNATURE, 3447 BCE_DRV_RESET_SIGNATURE_MAGIC); 3448 3449 /* Dummy read to force the chip to complete all current transactions. */ 3450 val = REG_RD(sc, BCE_MISC_ID); 3451 3452 /* Chip reset. */ 3453 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3454 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3455 REG_WR(sc, BCE_MISC_COMMAND, BCE_MISC_COMMAND_SW_RESET); 3456 REG_RD(sc, BCE_MISC_COMMAND); 3457 DELAY(5); 3458 3459 val = BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 3460 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 3461 3462 pci_write_config(sc->bce_dev, BCE_PCICFG_MISC_CONFIG, val, 4); 3463 } else { 3464 val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3465 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 3466 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 3467 REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val); 3468 3469 /* Allow up to 30us for reset to complete. */ 3470 for (i = 0; i < 10; i++) { 3471 val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG); 3472 if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3473 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) 3474 break; 3475 DELAY(10); 3476 } 3477 3478 /* Check that reset completed successfully. */ 3479 if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3480 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) { 3481 if_printf(&sc->arpcom.ac_if, "Reset failed!\n"); 3482 return EBUSY; 3483 } 3484 } 3485 3486 /* Make sure byte swapping is properly configured. */ 3487 val = REG_RD(sc, BCE_PCI_SWAP_DIAG0); 3488 if (val != 0x01020304) { 3489 if_printf(&sc->arpcom.ac_if, "Byte swap is incorrect!\n"); 3490 return ENODEV; 3491 } 3492 3493 /* Just completed a reset, assume that firmware is running again. */ 3494 sc->bce_fw_timed_out = 0; 3495 sc->bce_drv_cardiac_arrest = 0; 3496 3497 /* Wait for the firmware to finish its initialization. */ 3498 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code); 3499 if (rc) { 3500 if_printf(&sc->arpcom.ac_if, 3501 "Firmware did not complete initialization!\n"); 3502 } 3503 return rc; 3504 } 3505 3506 3507 static int 3508 bce_chipinit(struct bce_softc *sc) 3509 { 3510 uint32_t val; 3511 int rc = 0; 3512 3513 /* Make sure the interrupt is not active. */ 3514 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT); 3515 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD); 3516 3517 /* 3518 * Initialize DMA byte/word swapping, configure the number of DMA 3519 * channels and PCI clock compensation delay. 3520 */ 3521 val = BCE_DMA_CONFIG_DATA_BYTE_SWAP | 3522 BCE_DMA_CONFIG_DATA_WORD_SWAP | 3523 #if BYTE_ORDER == BIG_ENDIAN 3524 BCE_DMA_CONFIG_CNTL_BYTE_SWAP | 3525 #endif 3526 BCE_DMA_CONFIG_CNTL_WORD_SWAP | 3527 DMA_READ_CHANS << 12 | 3528 DMA_WRITE_CHANS << 16; 3529 3530 val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY; 3531 3532 if ((sc->bce_flags & BCE_PCIX_FLAG) && sc->bus_speed_mhz == 133) 3533 val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP; 3534 3535 /* 3536 * This setting resolves a problem observed on certain Intel PCI 3537 * chipsets that cannot handle multiple outstanding DMA operations. 3538 * See errata E9_5706A1_65. 3539 */ 3540 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706 && 3541 BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0 && 3542 !(sc->bce_flags & BCE_PCIX_FLAG)) 3543 val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA; 3544 3545 REG_WR(sc, BCE_DMA_CONFIG, val); 3546 3547 /* Enable the RX_V2P and Context state machines before access. */ 3548 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 3549 BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE | 3550 BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE | 3551 BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE); 3552 3553 /* Initialize context mapping and zero out the quick contexts. */ 3554 rc = bce_init_ctx(sc); 3555 if (rc != 0) 3556 return rc; 3557 3558 /* Initialize the on-boards CPUs */ 3559 bce_init_cpus(sc); 3560 3561 /* Enable management frames (NC-SI) to flow to the MCP. */ 3562 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { 3563 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) | 3564 BCE_RPM_MGMT_PKT_CTRL_MGMT_EN; 3565 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val); 3566 } 3567 3568 /* Prepare NVRAM for access. */ 3569 rc = bce_init_nvram(sc); 3570 if (rc != 0) 3571 return rc; 3572 3573 /* Set the kernel bypass block size */ 3574 val = REG_RD(sc, BCE_MQ_CONFIG); 3575 val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE; 3576 val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; 3577 3578 /* Enable bins used on the 5709/5716. */ 3579 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3580 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3581 val |= BCE_MQ_CONFIG_BIN_MQ_MODE; 3582 if (BCE_CHIP_ID(sc) == BCE_CHIP_ID_5709_A1) 3583 val |= BCE_MQ_CONFIG_HALT_DIS; 3584 } 3585 3586 REG_WR(sc, BCE_MQ_CONFIG, val); 3587 3588 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE); 3589 REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val); 3590 REG_WR(sc, BCE_MQ_KNL_WIND_END, val); 3591 3592 /* Set the page size and clear the RV2P processor stall bits. */ 3593 val = (BCM_PAGE_BITS - 8) << 24; 3594 REG_WR(sc, BCE_RV2P_CONFIG, val); 3595 3596 /* Configure page size. */ 3597 val = REG_RD(sc, BCE_TBDR_CONFIG); 3598 val &= ~BCE_TBDR_CONFIG_PAGE_SIZE; 3599 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40; 3600 REG_WR(sc, BCE_TBDR_CONFIG, val); 3601 3602 /* Set the perfect match control register to default. */ 3603 REG_WR_IND(sc, BCE_RXP_PM_CTRL, 0); 3604 3605 return 0; 3606 } 3607 3608 3609 /****************************************************************************/ 3610 /* Initialize the controller in preparation to send/receive traffic. */ 3611 /* */ 3612 /* Returns: */ 3613 /* 0 for success, positive value for failure. */ 3614 /****************************************************************************/ 3615 static int 3616 bce_blockinit(struct bce_softc *sc) 3617 { 3618 uint32_t reg, val; 3619 int rc = 0; 3620 3621 /* Load the hardware default MAC address. */ 3622 bce_set_mac_addr(sc); 3623 3624 /* Set the Ethernet backoff seed value */ 3625 val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) + 3626 sc->eaddr[3] + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16); 3627 REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val); 3628 3629 sc->last_status_idx = 0; 3630 sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE; 3631 3632 sc->pulse_check_status_idx = 0xffff; 3633 3634 /* Set up link change interrupt generation. */ 3635 REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK); 3636 3637 /* Program the physical address of the status block. */ 3638 REG_WR(sc, BCE_HC_STATUS_ADDR_L, BCE_ADDR_LO(sc->status_block_paddr)); 3639 REG_WR(sc, BCE_HC_STATUS_ADDR_H, BCE_ADDR_HI(sc->status_block_paddr)); 3640 3641 /* Program the physical address of the statistics block. */ 3642 REG_WR(sc, BCE_HC_STATISTICS_ADDR_L, 3643 BCE_ADDR_LO(sc->stats_block_paddr)); 3644 REG_WR(sc, BCE_HC_STATISTICS_ADDR_H, 3645 BCE_ADDR_HI(sc->stats_block_paddr)); 3646 3647 /* Program various host coalescing parameters. */ 3648 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP, 3649 (sc->bce_tx_quick_cons_trip_int << 16) | 3650 sc->bce_tx_quick_cons_trip); 3651 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP, 3652 (sc->bce_rx_quick_cons_trip_int << 16) | 3653 sc->bce_rx_quick_cons_trip); 3654 REG_WR(sc, BCE_HC_COMP_PROD_TRIP, 3655 (sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip); 3656 REG_WR(sc, BCE_HC_TX_TICKS, 3657 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks); 3658 REG_WR(sc, BCE_HC_RX_TICKS, 3659 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks); 3660 REG_WR(sc, BCE_HC_COM_TICKS, 3661 (sc->bce_com_ticks_int << 16) | sc->bce_com_ticks); 3662 REG_WR(sc, BCE_HC_CMD_TICKS, 3663 (sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks); 3664 REG_WR(sc, BCE_HC_STATS_TICKS, (sc->bce_stats_ticks & 0xffff00)); 3665 REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */ 3666 3667 val = BCE_HC_CONFIG_TX_TMR_MODE | BCE_HC_CONFIG_COLLECT_STATS; 3668 if (sc->bce_flags & BCE_ONESHOT_MSI_FLAG) { 3669 if (bootverbose) 3670 if_printf(&sc->arpcom.ac_if, "oneshot MSI\n"); 3671 val |= BCE_HC_CONFIG_ONE_SHOT | BCE_HC_CONFIG_USE_INT_PARAM; 3672 } 3673 REG_WR(sc, BCE_HC_CONFIG, val); 3674 3675 /* Clear the internal statistics counters. */ 3676 REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW); 3677 3678 /* Verify that bootcode is running. */ 3679 reg = bce_shmem_rd(sc, BCE_DEV_INFO_SIGNATURE); 3680 3681 DBRUNIF(DB_RANDOMTRUE(bce_debug_bootcode_running_failure), 3682 if_printf(&sc->arpcom.ac_if, 3683 "%s(%d): Simulating bootcode failure.\n", 3684 __FILE__, __LINE__); 3685 reg = 0); 3686 3687 if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) != 3688 BCE_DEV_INFO_SIGNATURE_MAGIC) { 3689 if_printf(&sc->arpcom.ac_if, 3690 "Bootcode not running! Found: 0x%08X, " 3691 "Expected: 08%08X\n", 3692 reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK, 3693 BCE_DEV_INFO_SIGNATURE_MAGIC); 3694 return ENODEV; 3695 } 3696 3697 /* Enable DMA */ 3698 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3699 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3700 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL); 3701 val |= BCE_MISC_NEW_CORE_CTL_DMA_ENABLE; 3702 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val); 3703 } 3704 3705 /* Allow bootcode to apply any additional fixes before enabling MAC. */ 3706 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET); 3707 3708 /* Enable link state change interrupt generation. */ 3709 REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE); 3710 3711 /* Enable the RXP. */ 3712 bce_start_rxp_cpu(sc); 3713 3714 /* Disable management frames (NC-SI) from flowing to the MCP. */ 3715 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { 3716 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) & 3717 ~BCE_RPM_MGMT_PKT_CTRL_MGMT_EN; 3718 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val); 3719 } 3720 3721 /* Enable all remaining blocks in the MAC. */ 3722 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3723 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3724 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 3725 BCE_MISC_ENABLE_DEFAULT_XI); 3726 } else { 3727 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT); 3728 } 3729 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS); 3730 DELAY(20); 3731 3732 /* Save the current host coalescing block settings. */ 3733 sc->hc_command = REG_RD(sc, BCE_HC_COMMAND); 3734 3735 return 0; 3736 } 3737 3738 3739 /****************************************************************************/ 3740 /* Encapsulate an mbuf cluster into the rx_bd chain. */ 3741 /* */ 3742 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's. */ 3743 /* This routine will map an mbuf cluster into 1 or more rx_bd's as */ 3744 /* necessary. */ 3745 /* */ 3746 /* Returns: */ 3747 /* 0 for success, positive value for failure. */ 3748 /****************************************************************************/ 3749 static int 3750 bce_newbuf_std(struct bce_softc *sc, uint16_t *prod, uint16_t *chain_prod, 3751 uint32_t *prod_bseq, int init) 3752 { 3753 bus_dmamap_t map; 3754 bus_dma_segment_t seg; 3755 struct mbuf *m_new; 3756 int error, nseg; 3757 #ifdef BCE_DEBUG 3758 uint16_t debug_chain_prod = *chain_prod; 3759 #endif 3760 3761 /* Make sure the inputs are valid. */ 3762 DBRUNIF((*chain_prod > MAX_RX_BD), 3763 if_printf(&sc->arpcom.ac_if, "%s(%d): " 3764 "RX producer out of range: 0x%04X > 0x%04X\n", 3765 __FILE__, __LINE__, 3766 *chain_prod, (uint16_t)MAX_RX_BD)); 3767 3768 DBPRINT(sc, BCE_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = 0x%04X, " 3769 "prod_bseq = 0x%08X\n", __func__, *prod, *chain_prod, *prod_bseq); 3770 3771 DBRUNIF(DB_RANDOMTRUE(bce_debug_mbuf_allocation_failure), 3772 if_printf(&sc->arpcom.ac_if, "%s(%d): " 3773 "Simulating mbuf allocation failure.\n", 3774 __FILE__, __LINE__); 3775 sc->mbuf_alloc_failed++; 3776 return ENOBUFS); 3777 3778 /* This is a new mbuf allocation. */ 3779 m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 3780 if (m_new == NULL) 3781 return ENOBUFS; 3782 DBRUNIF(1, sc->rx_mbuf_alloc++); 3783 3784 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 3785 3786 /* Map the mbuf cluster into device memory. */ 3787 error = bus_dmamap_load_mbuf_segment(sc->rx_mbuf_tag, 3788 sc->rx_mbuf_tmpmap, m_new, &seg, 1, &nseg, 3789 BUS_DMA_NOWAIT); 3790 if (error) { 3791 m_freem(m_new); 3792 if (init) { 3793 if_printf(&sc->arpcom.ac_if, 3794 "Error mapping mbuf into RX chain!\n"); 3795 } 3796 DBRUNIF(1, sc->rx_mbuf_alloc--); 3797 return error; 3798 } 3799 3800 if (sc->rx_mbuf_ptr[*chain_prod] != NULL) { 3801 bus_dmamap_unload(sc->rx_mbuf_tag, 3802 sc->rx_mbuf_map[*chain_prod]); 3803 } 3804 3805 map = sc->rx_mbuf_map[*chain_prod]; 3806 sc->rx_mbuf_map[*chain_prod] = sc->rx_mbuf_tmpmap; 3807 sc->rx_mbuf_tmpmap = map; 3808 3809 /* Watch for overflow. */ 3810 DBRUNIF((sc->free_rx_bd > USABLE_RX_BD), 3811 if_printf(&sc->arpcom.ac_if, "%s(%d): " 3812 "Too many free rx_bd (0x%04X > 0x%04X)!\n", 3813 __FILE__, __LINE__, sc->free_rx_bd, 3814 (uint16_t)USABLE_RX_BD)); 3815 3816 /* Update some debug statistic counters */ 3817 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark), 3818 sc->rx_low_watermark = sc->free_rx_bd); 3819 DBRUNIF((sc->free_rx_bd == 0), sc->rx_empty_count++); 3820 3821 /* Save the mbuf and update our counter. */ 3822 sc->rx_mbuf_ptr[*chain_prod] = m_new; 3823 sc->rx_mbuf_paddr[*chain_prod] = seg.ds_addr; 3824 sc->free_rx_bd--; 3825 3826 bce_setup_rxdesc_std(sc, *chain_prod, prod_bseq); 3827 3828 DBRUN(BCE_VERBOSE_RECV, 3829 bce_dump_rx_mbuf_chain(sc, debug_chain_prod, 1)); 3830 3831 DBPRINT(sc, BCE_VERBOSE_RECV, "%s(exit): prod = 0x%04X, chain_prod = 0x%04X, " 3832 "prod_bseq = 0x%08X\n", __func__, *prod, *chain_prod, *prod_bseq); 3833 3834 return 0; 3835 } 3836 3837 3838 static void 3839 bce_setup_rxdesc_std(struct bce_softc *sc, uint16_t chain_prod, uint32_t *prod_bseq) 3840 { 3841 struct rx_bd *rxbd; 3842 bus_addr_t paddr; 3843 int len; 3844 3845 paddr = sc->rx_mbuf_paddr[chain_prod]; 3846 len = sc->rx_mbuf_ptr[chain_prod]->m_len; 3847 3848 /* Setup the rx_bd for the first segment. */ 3849 rxbd = &sc->rx_bd_chain[RX_PAGE(chain_prod)][RX_IDX(chain_prod)]; 3850 3851 rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(paddr)); 3852 rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(paddr)); 3853 rxbd->rx_bd_len = htole32(len); 3854 rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START); 3855 *prod_bseq += len; 3856 3857 rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END); 3858 } 3859 3860 3861 /****************************************************************************/ 3862 /* Initialize the TX context memory. */ 3863 /* */ 3864 /* Returns: */ 3865 /* Nothing */ 3866 /****************************************************************************/ 3867 static void 3868 bce_init_tx_context(struct bce_softc *sc) 3869 { 3870 uint32_t val; 3871 3872 /* Initialize the context ID for an L2 TX chain. */ 3873 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3874 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3875 /* Set the CID type to support an L2 connection. */ 3876 val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2; 3877 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TYPE_XI, val); 3878 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16); 3879 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_CMD_TYPE_XI, val); 3880 3881 /* Point the hardware to the first page in the chain. */ 3882 val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]); 3883 CTX_WR(sc, GET_CID_ADDR(TX_CID), 3884 BCE_L2CTX_TX_TBDR_BHADDR_HI_XI, val); 3885 val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]); 3886 CTX_WR(sc, GET_CID_ADDR(TX_CID), 3887 BCE_L2CTX_TX_TBDR_BHADDR_LO_XI, val); 3888 } else { 3889 /* Set the CID type to support an L2 connection. */ 3890 val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2; 3891 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TYPE, val); 3892 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16); 3893 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_CMD_TYPE, val); 3894 3895 /* Point the hardware to the first page in the chain. */ 3896 val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]); 3897 CTX_WR(sc, GET_CID_ADDR(TX_CID), 3898 BCE_L2CTX_TX_TBDR_BHADDR_HI, val); 3899 val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]); 3900 CTX_WR(sc, GET_CID_ADDR(TX_CID), 3901 BCE_L2CTX_TX_TBDR_BHADDR_LO, val); 3902 } 3903 } 3904 3905 3906 /****************************************************************************/ 3907 /* Allocate memory and initialize the TX data structures. */ 3908 /* */ 3909 /* Returns: */ 3910 /* 0 for success, positive value for failure. */ 3911 /****************************************************************************/ 3912 static int 3913 bce_init_tx_chain(struct bce_softc *sc) 3914 { 3915 struct tx_bd *txbd; 3916 int i, rc = 0; 3917 3918 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__); 3919 3920 /* Set the initial TX producer/consumer indices. */ 3921 sc->tx_prod = 0; 3922 sc->tx_cons = 0; 3923 sc->tx_prod_bseq = 0; 3924 sc->used_tx_bd = 0; 3925 sc->max_tx_bd = USABLE_TX_BD; 3926 DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD); 3927 DBRUNIF(1, sc->tx_full_count = 0); 3928 3929 /* 3930 * The NetXtreme II supports a linked-list structre called 3931 * a Buffer Descriptor Chain (or BD chain). A BD chain 3932 * consists of a series of 1 or more chain pages, each of which 3933 * consists of a fixed number of BD entries. 3934 * The last BD entry on each page is a pointer to the next page 3935 * in the chain, and the last pointer in the BD chain 3936 * points back to the beginning of the chain. 3937 */ 3938 3939 /* Set the TX next pointer chain entries. */ 3940 for (i = 0; i < TX_PAGES; i++) { 3941 int j; 3942 3943 txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE]; 3944 3945 /* Check if we've reached the last page. */ 3946 if (i == (TX_PAGES - 1)) 3947 j = 0; 3948 else 3949 j = i + 1; 3950 3951 txbd->tx_bd_haddr_hi = 3952 htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j])); 3953 txbd->tx_bd_haddr_lo = 3954 htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j])); 3955 } 3956 bce_init_tx_context(sc); 3957 3958 return(rc); 3959 } 3960 3961 3962 /****************************************************************************/ 3963 /* Free memory and clear the TX data structures. */ 3964 /* */ 3965 /* Returns: */ 3966 /* Nothing. */ 3967 /****************************************************************************/ 3968 static void 3969 bce_free_tx_chain(struct bce_softc *sc) 3970 { 3971 int i; 3972 3973 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__); 3974 3975 /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */ 3976 for (i = 0; i < TOTAL_TX_BD; i++) { 3977 if (sc->tx_mbuf_ptr[i] != NULL) { 3978 bus_dmamap_unload(sc->tx_mbuf_tag, sc->tx_mbuf_map[i]); 3979 m_freem(sc->tx_mbuf_ptr[i]); 3980 sc->tx_mbuf_ptr[i] = NULL; 3981 DBRUNIF(1, sc->tx_mbuf_alloc--); 3982 } 3983 } 3984 3985 /* Clear each TX chain page. */ 3986 for (i = 0; i < TX_PAGES; i++) 3987 bzero(sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ); 3988 sc->used_tx_bd = 0; 3989 3990 /* Check if we lost any mbufs in the process. */ 3991 DBRUNIF((sc->tx_mbuf_alloc), 3992 if_printf(&sc->arpcom.ac_if, 3993 "%s(%d): Memory leak! " 3994 "Lost %d mbufs from tx chain!\n", 3995 __FILE__, __LINE__, sc->tx_mbuf_alloc)); 3996 3997 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__); 3998 } 3999 4000 4001 /****************************************************************************/ 4002 /* Initialize the RX context memory. */ 4003 /* */ 4004 /* Returns: */ 4005 /* Nothing */ 4006 /****************************************************************************/ 4007 static void 4008 bce_init_rx_context(struct bce_softc *sc) 4009 { 4010 uint32_t val; 4011 4012 /* Initialize the context ID for an L2 RX chain. */ 4013 val = BCE_L2CTX_RX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE | 4014 BCE_L2CTX_RX_CTX_TYPE_SIZE_L2 | (0x02 << 8); 4015 4016 /* 4017 * Set the level for generating pause frames 4018 * when the number of available rx_bd's gets 4019 * too low (the low watermark) and the level 4020 * when pause frames can be stopped (the high 4021 * watermark). 4022 */ 4023 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 4024 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 4025 uint32_t lo_water, hi_water; 4026 4027 lo_water = BCE_L2CTX_RX_LO_WATER_MARK_DEFAULT; 4028 hi_water = USABLE_RX_BD / 4; 4029 4030 lo_water /= BCE_L2CTX_RX_LO_WATER_MARK_SCALE; 4031 hi_water /= BCE_L2CTX_RX_HI_WATER_MARK_SCALE; 4032 4033 if (hi_water > 0xf) 4034 hi_water = 0xf; 4035 else if (hi_water == 0) 4036 lo_water = 0; 4037 val |= lo_water | 4038 (hi_water << BCE_L2CTX_RX_HI_WATER_MARK_SHIFT); 4039 } 4040 4041 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_CTX_TYPE, val); 4042 4043 /* Setup the MQ BIN mapping for l2_ctx_host_bseq. */ 4044 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 4045 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 4046 val = REG_RD(sc, BCE_MQ_MAP_L2_5); 4047 REG_WR(sc, BCE_MQ_MAP_L2_5, val | BCE_MQ_MAP_L2_5_ARM); 4048 } 4049 4050 /* Point the hardware to the first page in the chain. */ 4051 val = BCE_ADDR_HI(sc->rx_bd_chain_paddr[0]); 4052 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_HI, val); 4053 val = BCE_ADDR_LO(sc->rx_bd_chain_paddr[0]); 4054 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_LO, val); 4055 } 4056 4057 4058 /****************************************************************************/ 4059 /* Allocate memory and initialize the RX data structures. */ 4060 /* */ 4061 /* Returns: */ 4062 /* 0 for success, positive value for failure. */ 4063 /****************************************************************************/ 4064 static int 4065 bce_init_rx_chain(struct bce_softc *sc) 4066 { 4067 struct rx_bd *rxbd; 4068 int i, rc = 0; 4069 uint16_t prod, chain_prod; 4070 uint32_t prod_bseq; 4071 4072 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__); 4073 4074 /* Initialize the RX producer and consumer indices. */ 4075 sc->rx_prod = 0; 4076 sc->rx_cons = 0; 4077 sc->rx_prod_bseq = 0; 4078 sc->free_rx_bd = USABLE_RX_BD; 4079 sc->max_rx_bd = USABLE_RX_BD; 4080 DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD); 4081 DBRUNIF(1, sc->rx_empty_count = 0); 4082 4083 /* Initialize the RX next pointer chain entries. */ 4084 for (i = 0; i < RX_PAGES; i++) { 4085 int j; 4086 4087 rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE]; 4088 4089 /* Check if we've reached the last page. */ 4090 if (i == (RX_PAGES - 1)) 4091 j = 0; 4092 else 4093 j = i + 1; 4094 4095 /* Setup the chain page pointers. */ 4096 rxbd->rx_bd_haddr_hi = 4097 htole32(BCE_ADDR_HI(sc->rx_bd_chain_paddr[j])); 4098 rxbd->rx_bd_haddr_lo = 4099 htole32(BCE_ADDR_LO(sc->rx_bd_chain_paddr[j])); 4100 } 4101 4102 /* Allocate mbuf clusters for the rx_bd chain. */ 4103 prod = prod_bseq = 0; 4104 while (prod < TOTAL_RX_BD) { 4105 chain_prod = RX_CHAIN_IDX(prod); 4106 if (bce_newbuf_std(sc, &prod, &chain_prod, &prod_bseq, 1)) { 4107 if_printf(&sc->arpcom.ac_if, 4108 "Error filling RX chain: rx_bd[0x%04X]!\n", 4109 chain_prod); 4110 rc = ENOBUFS; 4111 break; 4112 } 4113 prod = NEXT_RX_BD(prod); 4114 } 4115 4116 /* Save the RX chain producer index. */ 4117 sc->rx_prod = prod; 4118 sc->rx_prod_bseq = prod_bseq; 4119 4120 /* Tell the chip about the waiting rx_bd's. */ 4121 REG_WR16(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BDIDX, 4122 sc->rx_prod); 4123 REG_WR(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BSEQ, 4124 sc->rx_prod_bseq); 4125 4126 bce_init_rx_context(sc); 4127 4128 return(rc); 4129 } 4130 4131 4132 /****************************************************************************/ 4133 /* Free memory and clear the RX data structures. */ 4134 /* */ 4135 /* Returns: */ 4136 /* Nothing. */ 4137 /****************************************************************************/ 4138 static void 4139 bce_free_rx_chain(struct bce_softc *sc) 4140 { 4141 int i; 4142 4143 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__); 4144 4145 /* Free any mbufs still in the RX mbuf chain. */ 4146 for (i = 0; i < TOTAL_RX_BD; i++) { 4147 if (sc->rx_mbuf_ptr[i] != NULL) { 4148 bus_dmamap_unload(sc->rx_mbuf_tag, sc->rx_mbuf_map[i]); 4149 m_freem(sc->rx_mbuf_ptr[i]); 4150 sc->rx_mbuf_ptr[i] = NULL; 4151 DBRUNIF(1, sc->rx_mbuf_alloc--); 4152 } 4153 } 4154 4155 /* Clear each RX chain page. */ 4156 for (i = 0; i < RX_PAGES; i++) 4157 bzero(sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ); 4158 4159 /* Check if we lost any mbufs in the process. */ 4160 DBRUNIF((sc->rx_mbuf_alloc), 4161 if_printf(&sc->arpcom.ac_if, 4162 "%s(%d): Memory leak! " 4163 "Lost %d mbufs from rx chain!\n", 4164 __FILE__, __LINE__, sc->rx_mbuf_alloc)); 4165 4166 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__); 4167 } 4168 4169 4170 /****************************************************************************/ 4171 /* Set media options. */ 4172 /* */ 4173 /* Returns: */ 4174 /* 0 for success, positive value for failure. */ 4175 /****************************************************************************/ 4176 static int 4177 bce_ifmedia_upd(struct ifnet *ifp) 4178 { 4179 struct bce_softc *sc = ifp->if_softc; 4180 struct mii_data *mii = device_get_softc(sc->bce_miibus); 4181 int error = 0; 4182 4183 /* 4184 * 'mii' will be NULL, when this function is called on following 4185 * code path: bce_attach() -> bce_mgmt_init() 4186 */ 4187 if (mii != NULL) { 4188 /* Make sure the MII bus has been enumerated. */ 4189 sc->bce_link = 0; 4190 if (mii->mii_instance) { 4191 struct mii_softc *miisc; 4192 4193 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 4194 mii_phy_reset(miisc); 4195 } 4196 error = mii_mediachg(mii); 4197 } 4198 return error; 4199 } 4200 4201 4202 /****************************************************************************/ 4203 /* Reports current media status. */ 4204 /* */ 4205 /* Returns: */ 4206 /* Nothing. */ 4207 /****************************************************************************/ 4208 static void 4209 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 4210 { 4211 struct bce_softc *sc = ifp->if_softc; 4212 struct mii_data *mii = device_get_softc(sc->bce_miibus); 4213 4214 mii_pollstat(mii); 4215 ifmr->ifm_active = mii->mii_media_active; 4216 ifmr->ifm_status = mii->mii_media_status; 4217 } 4218 4219 4220 /****************************************************************************/ 4221 /* Handles PHY generated interrupt events. */ 4222 /* */ 4223 /* Returns: */ 4224 /* Nothing. */ 4225 /****************************************************************************/ 4226 static void 4227 bce_phy_intr(struct bce_softc *sc) 4228 { 4229 uint32_t new_link_state, old_link_state; 4230 struct ifnet *ifp = &sc->arpcom.ac_if; 4231 4232 ASSERT_SERIALIZED(ifp->if_serializer); 4233 4234 new_link_state = sc->status_block->status_attn_bits & 4235 STATUS_ATTN_BITS_LINK_STATE; 4236 old_link_state = sc->status_block->status_attn_bits_ack & 4237 STATUS_ATTN_BITS_LINK_STATE; 4238 4239 /* Handle any changes if the link state has changed. */ 4240 if (new_link_state != old_link_state) { /* XXX redundant? */ 4241 DBRUN(BCE_VERBOSE_INTR, bce_dump_status_block(sc)); 4242 4243 /* Update the status_attn_bits_ack field in the status block. */ 4244 if (new_link_state) { 4245 REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD, 4246 STATUS_ATTN_BITS_LINK_STATE); 4247 if (bootverbose) 4248 if_printf(ifp, "Link is now UP.\n"); 4249 } else { 4250 REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD, 4251 STATUS_ATTN_BITS_LINK_STATE); 4252 if (bootverbose) 4253 if_printf(ifp, "Link is now DOWN.\n"); 4254 } 4255 4256 /* 4257 * Assume link is down and allow tick routine to 4258 * update the state based on the actual media state. 4259 */ 4260 sc->bce_link = 0; 4261 callout_stop(&sc->bce_tick_callout); 4262 bce_tick_serialized(sc); 4263 } 4264 4265 /* Acknowledge the link change interrupt. */ 4266 REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE); 4267 } 4268 4269 4270 /****************************************************************************/ 4271 /* Reads the receive consumer value from the status block (skipping over */ 4272 /* chain page pointer if necessary). */ 4273 /* */ 4274 /* Returns: */ 4275 /* hw_cons */ 4276 /****************************************************************************/ 4277 static __inline uint16_t 4278 bce_get_hw_rx_cons(struct bce_softc *sc) 4279 { 4280 uint16_t hw_cons = sc->status_block->status_rx_quick_consumer_index0; 4281 4282 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) 4283 hw_cons++; 4284 return hw_cons; 4285 } 4286 4287 4288 /****************************************************************************/ 4289 /* Handles received frame interrupt events. */ 4290 /* */ 4291 /* Returns: */ 4292 /* Nothing. */ 4293 /****************************************************************************/ 4294 static void 4295 bce_rx_intr(struct bce_softc *sc, int count) 4296 { 4297 struct ifnet *ifp = &sc->arpcom.ac_if; 4298 uint16_t hw_cons, sw_cons, sw_chain_cons, sw_prod, sw_chain_prod; 4299 uint32_t sw_prod_bseq; 4300 struct mbuf_chain chain[MAXCPU]; 4301 4302 ASSERT_SERIALIZED(ifp->if_serializer); 4303 4304 ether_input_chain_init(chain); 4305 4306 DBRUNIF(1, sc->rx_interrupts++); 4307 4308 /* Get the hardware's view of the RX consumer index. */ 4309 hw_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc); 4310 4311 /* Get working copies of the driver's view of the RX indices. */ 4312 sw_cons = sc->rx_cons; 4313 sw_prod = sc->rx_prod; 4314 sw_prod_bseq = sc->rx_prod_bseq; 4315 4316 DBPRINT(sc, BCE_INFO_RECV, "%s(enter): sw_prod = 0x%04X, " 4317 "sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n", 4318 __func__, sw_prod, sw_cons, sw_prod_bseq); 4319 4320 /* Prevent speculative reads from getting ahead of the status block. */ 4321 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, 4322 BUS_SPACE_BARRIER_READ); 4323 4324 /* Update some debug statistics counters */ 4325 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark), 4326 sc->rx_low_watermark = sc->free_rx_bd); 4327 DBRUNIF((sc->free_rx_bd == 0), sc->rx_empty_count++); 4328 4329 /* Scan through the receive chain as long as there is work to do. */ 4330 while (sw_cons != hw_cons) { 4331 struct mbuf *m = NULL; 4332 struct l2_fhdr *l2fhdr = NULL; 4333 struct rx_bd *rxbd; 4334 unsigned int len; 4335 uint32_t status = 0; 4336 4337 #ifdef DEVICE_POLLING 4338 if (count >= 0 && count-- == 0) { 4339 sc->hw_rx_cons = sw_cons; 4340 break; 4341 } 4342 #endif 4343 4344 /* 4345 * Convert the producer/consumer indices 4346 * to an actual rx_bd index. 4347 */ 4348 sw_chain_cons = RX_CHAIN_IDX(sw_cons); 4349 sw_chain_prod = RX_CHAIN_IDX(sw_prod); 4350 4351 /* Get the used rx_bd. */ 4352 rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)] 4353 [RX_IDX(sw_chain_cons)]; 4354 sc->free_rx_bd++; 4355 4356 DBRUN(BCE_VERBOSE_RECV, 4357 if_printf(ifp, "%s(): ", __func__); 4358 bce_dump_rxbd(sc, sw_chain_cons, rxbd)); 4359 4360 /* The mbuf is stored with the last rx_bd entry of a packet. */ 4361 if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) { 4362 /* Validate that this is the last rx_bd. */ 4363 DBRUNIF((!(rxbd->rx_bd_flags & RX_BD_FLAGS_END)), 4364 if_printf(ifp, "%s(%d): " 4365 "Unexpected mbuf found in rx_bd[0x%04X]!\n", 4366 __FILE__, __LINE__, sw_chain_cons); 4367 bce_breakpoint(sc)); 4368 4369 if (sw_chain_cons != sw_chain_prod) { 4370 if_printf(ifp, "RX cons(%d) != prod(%d), " 4371 "drop!\n", sw_chain_cons, 4372 sw_chain_prod); 4373 ifp->if_ierrors++; 4374 4375 bce_setup_rxdesc_std(sc, sw_chain_cons, 4376 &sw_prod_bseq); 4377 m = NULL; 4378 goto bce_rx_int_next_rx; 4379 } 4380 4381 /* Unmap the mbuf from DMA space. */ 4382 bus_dmamap_sync(sc->rx_mbuf_tag, 4383 sc->rx_mbuf_map[sw_chain_cons], 4384 BUS_DMASYNC_POSTREAD); 4385 4386 /* Save the mbuf from the driver's chain. */ 4387 m = sc->rx_mbuf_ptr[sw_chain_cons]; 4388 4389 /* 4390 * Frames received on the NetXteme II are prepended 4391 * with an l2_fhdr structure which provides status 4392 * information about the received frame (including 4393 * VLAN tags and checksum info). The frames are also 4394 * automatically adjusted to align the IP header 4395 * (i.e. two null bytes are inserted before the 4396 * Ethernet header). As a result the data DMA'd by 4397 * the controller into the mbuf is as follows: 4398 * 4399 * +---------+-----+---------------------+-----+ 4400 * | l2_fhdr | pad | packet data | FCS | 4401 * +---------+-----+---------------------+-----+ 4402 * 4403 * The l2_fhdr needs to be checked and skipped and the 4404 * FCS needs to be stripped before sending the packet 4405 * up the stack. 4406 */ 4407 l2fhdr = mtod(m, struct l2_fhdr *); 4408 4409 len = l2fhdr->l2_fhdr_pkt_len; 4410 status = l2fhdr->l2_fhdr_status; 4411 4412 DBRUNIF(DB_RANDOMTRUE(bce_debug_l2fhdr_status_check), 4413 if_printf(ifp, 4414 "Simulating l2_fhdr status error.\n"); 4415 status = status | L2_FHDR_ERRORS_PHY_DECODE); 4416 4417 /* Watch for unusual sized frames. */ 4418 DBRUNIF((len < BCE_MIN_MTU || 4419 len > BCE_MAX_JUMBO_ETHER_MTU_VLAN), 4420 if_printf(ifp, 4421 "%s(%d): Unusual frame size found. " 4422 "Min(%d), Actual(%d), Max(%d)\n", 4423 __FILE__, __LINE__, 4424 (int)BCE_MIN_MTU, len, 4425 (int)BCE_MAX_JUMBO_ETHER_MTU_VLAN); 4426 bce_dump_mbuf(sc, m); 4427 bce_breakpoint(sc)); 4428 4429 len -= ETHER_CRC_LEN; 4430 4431 /* Check the received frame for errors. */ 4432 if (status & (L2_FHDR_ERRORS_BAD_CRC | 4433 L2_FHDR_ERRORS_PHY_DECODE | 4434 L2_FHDR_ERRORS_ALIGNMENT | 4435 L2_FHDR_ERRORS_TOO_SHORT | 4436 L2_FHDR_ERRORS_GIANT_FRAME)) { 4437 ifp->if_ierrors++; 4438 DBRUNIF(1, sc->l2fhdr_status_errors++); 4439 4440 /* Reuse the mbuf for a new frame. */ 4441 bce_setup_rxdesc_std(sc, sw_chain_prod, 4442 &sw_prod_bseq); 4443 m = NULL; 4444 goto bce_rx_int_next_rx; 4445 } 4446 4447 /* 4448 * Get a new mbuf for the rx_bd. If no new 4449 * mbufs are available then reuse the current mbuf, 4450 * log an ierror on the interface, and generate 4451 * an error in the system log. 4452 */ 4453 if (bce_newbuf_std(sc, &sw_prod, &sw_chain_prod, 4454 &sw_prod_bseq, 0)) { 4455 DBRUN(BCE_WARN, 4456 if_printf(ifp, 4457 "%s(%d): Failed to allocate new mbuf, " 4458 "incoming frame dropped!\n", 4459 __FILE__, __LINE__)); 4460 4461 ifp->if_ierrors++; 4462 4463 /* Try and reuse the exisitng mbuf. */ 4464 bce_setup_rxdesc_std(sc, sw_chain_prod, 4465 &sw_prod_bseq); 4466 m = NULL; 4467 goto bce_rx_int_next_rx; 4468 } 4469 4470 /* 4471 * Skip over the l2_fhdr when passing 4472 * the data up the stack. 4473 */ 4474 m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN); 4475 4476 m->m_pkthdr.len = m->m_len = len; 4477 m->m_pkthdr.rcvif = ifp; 4478 4479 DBRUN(BCE_VERBOSE_RECV, 4480 struct ether_header *eh; 4481 eh = mtod(m, struct ether_header *); 4482 if_printf(ifp, "%s(): to: %6D, from: %6D, " 4483 "type: 0x%04X\n", __func__, 4484 eh->ether_dhost, ":", 4485 eh->ether_shost, ":", 4486 htons(eh->ether_type))); 4487 4488 /* Validate the checksum if offload enabled. */ 4489 if (ifp->if_capenable & IFCAP_RXCSUM) { 4490 /* Check for an IP datagram. */ 4491 if (status & L2_FHDR_STATUS_IP_DATAGRAM) { 4492 m->m_pkthdr.csum_flags |= 4493 CSUM_IP_CHECKED; 4494 4495 /* Check if the IP checksum is valid. */ 4496 if ((l2fhdr->l2_fhdr_ip_xsum ^ 4497 0xffff) == 0) { 4498 m->m_pkthdr.csum_flags |= 4499 CSUM_IP_VALID; 4500 } else { 4501 DBPRINT(sc, BCE_WARN_RECV, 4502 "%s(): Invalid IP checksum = 0x%04X!\n", 4503 __func__, l2fhdr->l2_fhdr_ip_xsum); 4504 } 4505 } 4506 4507 /* Check for a valid TCP/UDP frame. */ 4508 if (status & (L2_FHDR_STATUS_TCP_SEGMENT | 4509 L2_FHDR_STATUS_UDP_DATAGRAM)) { 4510 4511 /* Check for a good TCP/UDP checksum. */ 4512 if ((status & 4513 (L2_FHDR_ERRORS_TCP_XSUM | 4514 L2_FHDR_ERRORS_UDP_XSUM)) == 0) { 4515 m->m_pkthdr.csum_data = 4516 l2fhdr->l2_fhdr_tcp_udp_xsum; 4517 m->m_pkthdr.csum_flags |= 4518 CSUM_DATA_VALID | 4519 CSUM_PSEUDO_HDR; 4520 } else { 4521 DBPRINT(sc, BCE_WARN_RECV, 4522 "%s(): Invalid TCP/UDP checksum = 0x%04X!\n", 4523 __func__, l2fhdr->l2_fhdr_tcp_udp_xsum); 4524 } 4525 } 4526 } 4527 4528 ifp->if_ipackets++; 4529 bce_rx_int_next_rx: 4530 sw_prod = NEXT_RX_BD(sw_prod); 4531 } 4532 4533 sw_cons = NEXT_RX_BD(sw_cons); 4534 4535 /* If we have a packet, pass it up the stack */ 4536 if (m) { 4537 DBPRINT(sc, BCE_VERBOSE_RECV, 4538 "%s(): Passing received frame up.\n", __func__); 4539 4540 if (status & L2_FHDR_STATUS_L2_VLAN_TAG) { 4541 m->m_flags |= M_VLANTAG; 4542 m->m_pkthdr.ether_vlantag = 4543 l2fhdr->l2_fhdr_vlan_tag; 4544 } 4545 ether_input_chain(ifp, m, NULL, chain); 4546 4547 DBRUNIF(1, sc->rx_mbuf_alloc--); 4548 } 4549 4550 /* 4551 * If polling(4) is not enabled, refresh hw_cons to see 4552 * whether there's new work. 4553 * 4554 * If polling(4) is enabled, i.e count >= 0, refreshing 4555 * should not be performed, so that we would not spend 4556 * too much time in RX processing. 4557 */ 4558 if (count < 0 && sw_cons == hw_cons) 4559 hw_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc); 4560 4561 /* 4562 * Prevent speculative reads from getting ahead 4563 * of the status block. 4564 */ 4565 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, 4566 BUS_SPACE_BARRIER_READ); 4567 } 4568 4569 ether_input_dispatch(chain); 4570 4571 sc->rx_cons = sw_cons; 4572 sc->rx_prod = sw_prod; 4573 sc->rx_prod_bseq = sw_prod_bseq; 4574 4575 REG_WR16(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BDIDX, 4576 sc->rx_prod); 4577 REG_WR(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BSEQ, 4578 sc->rx_prod_bseq); 4579 4580 DBPRINT(sc, BCE_INFO_RECV, "%s(exit): rx_prod = 0x%04X, " 4581 "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n", 4582 __func__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq); 4583 } 4584 4585 4586 /****************************************************************************/ 4587 /* Reads the transmit consumer value from the status block (skipping over */ 4588 /* chain page pointer if necessary). */ 4589 /* */ 4590 /* Returns: */ 4591 /* hw_cons */ 4592 /****************************************************************************/ 4593 static __inline uint16_t 4594 bce_get_hw_tx_cons(struct bce_softc *sc) 4595 { 4596 uint16_t hw_cons = sc->status_block->status_tx_quick_consumer_index0; 4597 4598 if ((hw_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) 4599 hw_cons++; 4600 return hw_cons; 4601 } 4602 4603 4604 /****************************************************************************/ 4605 /* Handles transmit completion interrupt events. */ 4606 /* */ 4607 /* Returns: */ 4608 /* Nothing. */ 4609 /****************************************************************************/ 4610 static void 4611 bce_tx_intr(struct bce_softc *sc) 4612 { 4613 struct ifnet *ifp = &sc->arpcom.ac_if; 4614 uint16_t hw_tx_cons, sw_tx_cons, sw_tx_chain_cons; 4615 4616 ASSERT_SERIALIZED(ifp->if_serializer); 4617 4618 DBRUNIF(1, sc->tx_interrupts++); 4619 4620 /* Get the hardware's view of the TX consumer index. */ 4621 hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc); 4622 sw_tx_cons = sc->tx_cons; 4623 4624 /* Prevent speculative reads from getting ahead of the status block. */ 4625 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, 4626 BUS_SPACE_BARRIER_READ); 4627 4628 /* Cycle through any completed TX chain page entries. */ 4629 while (sw_tx_cons != hw_tx_cons) { 4630 #ifdef BCE_DEBUG 4631 struct tx_bd *txbd = NULL; 4632 #endif 4633 sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons); 4634 4635 DBPRINT(sc, BCE_INFO_SEND, 4636 "%s(): hw_tx_cons = 0x%04X, sw_tx_cons = 0x%04X, " 4637 "sw_tx_chain_cons = 0x%04X\n", 4638 __func__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons); 4639 4640 DBRUNIF((sw_tx_chain_cons > MAX_TX_BD), 4641 if_printf(ifp, "%s(%d): " 4642 "TX chain consumer out of range! " 4643 " 0x%04X > 0x%04X\n", 4644 __FILE__, __LINE__, sw_tx_chain_cons, 4645 (int)MAX_TX_BD); 4646 bce_breakpoint(sc)); 4647 4648 DBRUNIF(1, txbd = &sc->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)] 4649 [TX_IDX(sw_tx_chain_cons)]); 4650 4651 DBRUNIF((txbd == NULL), 4652 if_printf(ifp, "%s(%d): " 4653 "Unexpected NULL tx_bd[0x%04X]!\n", 4654 __FILE__, __LINE__, sw_tx_chain_cons); 4655 bce_breakpoint(sc)); 4656 4657 DBRUN(BCE_INFO_SEND, 4658 if_printf(ifp, "%s(): ", __func__); 4659 bce_dump_txbd(sc, sw_tx_chain_cons, txbd)); 4660 4661 /* 4662 * Free the associated mbuf. Remember 4663 * that only the last tx_bd of a packet 4664 * has an mbuf pointer and DMA map. 4665 */ 4666 if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) { 4667 /* Validate that this is the last tx_bd. */ 4668 DBRUNIF((!(txbd->tx_bd_flags & TX_BD_FLAGS_END)), 4669 if_printf(ifp, "%s(%d): " 4670 "tx_bd END flag not set but " 4671 "txmbuf == NULL!\n", __FILE__, __LINE__); 4672 bce_breakpoint(sc)); 4673 4674 DBRUN(BCE_INFO_SEND, 4675 if_printf(ifp, "%s(): Unloading map/freeing mbuf " 4676 "from tx_bd[0x%04X]\n", __func__, 4677 sw_tx_chain_cons)); 4678 4679 /* Unmap the mbuf. */ 4680 bus_dmamap_unload(sc->tx_mbuf_tag, 4681 sc->tx_mbuf_map[sw_tx_chain_cons]); 4682 4683 /* Free the mbuf. */ 4684 m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]); 4685 sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL; 4686 DBRUNIF(1, sc->tx_mbuf_alloc--); 4687 4688 ifp->if_opackets++; 4689 } 4690 4691 sc->used_tx_bd--; 4692 sw_tx_cons = NEXT_TX_BD(sw_tx_cons); 4693 4694 if (sw_tx_cons == hw_tx_cons) { 4695 /* Refresh hw_cons to see if there's new work. */ 4696 hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc); 4697 } 4698 4699 /* 4700 * Prevent speculative reads from getting 4701 * ahead of the status block. 4702 */ 4703 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, 4704 BUS_SPACE_BARRIER_READ); 4705 } 4706 4707 if (sc->used_tx_bd == 0) { 4708 /* Clear the TX timeout timer. */ 4709 ifp->if_timer = 0; 4710 } 4711 4712 /* Clear the tx hardware queue full flag. */ 4713 if (sc->max_tx_bd - sc->used_tx_bd >= BCE_TX_SPARE_SPACE) { 4714 DBRUNIF((ifp->if_flags & IFF_OACTIVE), 4715 DBPRINT(sc, BCE_WARN_SEND, 4716 "%s(): Open TX chain! %d/%d (used/total)\n", 4717 __func__, sc->used_tx_bd, sc->max_tx_bd)); 4718 ifp->if_flags &= ~IFF_OACTIVE; 4719 } 4720 sc->tx_cons = sw_tx_cons; 4721 } 4722 4723 4724 /****************************************************************************/ 4725 /* Disables interrupt generation. */ 4726 /* */ 4727 /* Returns: */ 4728 /* Nothing. */ 4729 /****************************************************************************/ 4730 static void 4731 bce_disable_intr(struct bce_softc *sc) 4732 { 4733 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT); 4734 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD); 4735 lwkt_serialize_handler_disable(sc->arpcom.ac_if.if_serializer); 4736 } 4737 4738 4739 /****************************************************************************/ 4740 /* Enables interrupt generation. */ 4741 /* */ 4742 /* Returns: */ 4743 /* Nothing. */ 4744 /****************************************************************************/ 4745 static void 4746 bce_enable_intr(struct bce_softc *sc, int coal_now) 4747 { 4748 lwkt_serialize_handler_enable(sc->arpcom.ac_if.if_serializer); 4749 4750 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 4751 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | 4752 BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx); 4753 4754 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 4755 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx); 4756 4757 if (coal_now) { 4758 REG_WR(sc, BCE_HC_COMMAND, 4759 sc->hc_command | BCE_HC_COMMAND_COAL_NOW); 4760 } 4761 } 4762 4763 4764 /****************************************************************************/ 4765 /* Handles controller initialization. */ 4766 /* */ 4767 /* Returns: */ 4768 /* Nothing. */ 4769 /****************************************************************************/ 4770 static void 4771 bce_init(void *xsc) 4772 { 4773 struct bce_softc *sc = xsc; 4774 struct ifnet *ifp = &sc->arpcom.ac_if; 4775 uint32_t ether_mtu; 4776 int error; 4777 4778 ASSERT_SERIALIZED(ifp->if_serializer); 4779 4780 /* Check if the driver is still running and bail out if it is. */ 4781 if (ifp->if_flags & IFF_RUNNING) 4782 return; 4783 4784 bce_stop(sc); 4785 4786 error = bce_reset(sc, BCE_DRV_MSG_CODE_RESET); 4787 if (error) { 4788 if_printf(ifp, "Controller reset failed!\n"); 4789 goto back; 4790 } 4791 4792 error = bce_chipinit(sc); 4793 if (error) { 4794 if_printf(ifp, "Controller initialization failed!\n"); 4795 goto back; 4796 } 4797 4798 error = bce_blockinit(sc); 4799 if (error) { 4800 if_printf(ifp, "Block initialization failed!\n"); 4801 goto back; 4802 } 4803 4804 /* Load our MAC address. */ 4805 bcopy(IF_LLADDR(ifp), sc->eaddr, ETHER_ADDR_LEN); 4806 bce_set_mac_addr(sc); 4807 4808 /* Calculate and program the Ethernet MTU size. */ 4809 ether_mtu = ETHER_HDR_LEN + EVL_ENCAPLEN + ifp->if_mtu + ETHER_CRC_LEN; 4810 4811 DBPRINT(sc, BCE_INFO, "%s(): setting mtu = %d\n", __func__, ether_mtu); 4812 4813 /* 4814 * Program the mtu, enabling jumbo frame 4815 * support if necessary. Also set the mbuf 4816 * allocation count for RX frames. 4817 */ 4818 if (ether_mtu > ETHER_MAX_LEN + EVL_ENCAPLEN) { 4819 #ifdef notyet 4820 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, 4821 min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) | 4822 BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA); 4823 sc->mbuf_alloc_size = MJUM9BYTES; 4824 #else 4825 panic("jumbo buffer is not supported yet\n"); 4826 #endif 4827 } else { 4828 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu); 4829 sc->mbuf_alloc_size = MCLBYTES; 4830 } 4831 4832 /* Calculate the RX Ethernet frame size for rx_bd's. */ 4833 sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8; 4834 4835 DBPRINT(sc, BCE_INFO, 4836 "%s(): mclbytes = %d, mbuf_alloc_size = %d, " 4837 "max_frame_size = %d\n", 4838 __func__, (int)MCLBYTES, sc->mbuf_alloc_size, 4839 sc->max_frame_size); 4840 4841 /* Program appropriate promiscuous/multicast filtering. */ 4842 bce_set_rx_mode(sc); 4843 4844 /* Init RX buffer descriptor chain. */ 4845 bce_init_rx_chain(sc); /* XXX return value */ 4846 4847 /* Init TX buffer descriptor chain. */ 4848 bce_init_tx_chain(sc); /* XXX return value */ 4849 4850 #ifdef DEVICE_POLLING 4851 /* Disable interrupts if we are polling. */ 4852 if (ifp->if_flags & IFF_POLLING) { 4853 bce_disable_intr(sc); 4854 4855 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP, 4856 (1 << 16) | sc->bce_rx_quick_cons_trip); 4857 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP, 4858 (1 << 16) | sc->bce_tx_quick_cons_trip); 4859 } else 4860 #endif 4861 /* Enable host interrupts. */ 4862 bce_enable_intr(sc, 1); 4863 4864 bce_ifmedia_upd(ifp); 4865 4866 ifp->if_flags |= IFF_RUNNING; 4867 ifp->if_flags &= ~IFF_OACTIVE; 4868 4869 callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc); 4870 back: 4871 if (error) 4872 bce_stop(sc); 4873 } 4874 4875 4876 /****************************************************************************/ 4877 /* Initialize the controller just enough so that any management firmware */ 4878 /* running on the device will continue to operate corectly. */ 4879 /* */ 4880 /* Returns: */ 4881 /* Nothing. */ 4882 /****************************************************************************/ 4883 static void 4884 bce_mgmt_init(struct bce_softc *sc) 4885 { 4886 struct ifnet *ifp = &sc->arpcom.ac_if; 4887 4888 /* Bail out if management firmware is not running. */ 4889 if (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)) 4890 return; 4891 4892 /* Enable all critical blocks in the MAC. */ 4893 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 4894 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 4895 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 4896 BCE_MISC_ENABLE_DEFAULT_XI); 4897 } else { 4898 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT); 4899 } 4900 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS); 4901 DELAY(20); 4902 4903 bce_ifmedia_upd(ifp); 4904 } 4905 4906 4907 /****************************************************************************/ 4908 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */ 4909 /* memory visible to the controller. */ 4910 /* */ 4911 /* Returns: */ 4912 /* 0 for success, positive value for failure. */ 4913 /****************************************************************************/ 4914 static int 4915 bce_encap(struct bce_softc *sc, struct mbuf **m_head) 4916 { 4917 bus_dma_segment_t segs[BCE_MAX_SEGMENTS]; 4918 bus_dmamap_t map, tmp_map; 4919 struct mbuf *m0 = *m_head; 4920 struct tx_bd *txbd = NULL; 4921 uint16_t vlan_tag = 0, flags = 0; 4922 uint16_t chain_prod, chain_prod_start, prod; 4923 uint32_t prod_bseq; 4924 int i, error, maxsegs, nsegs; 4925 #ifdef BCE_DEBUG 4926 uint16_t debug_prod; 4927 #endif 4928 4929 /* Transfer any checksum offload flags to the bd. */ 4930 if (m0->m_pkthdr.csum_flags) { 4931 if (m0->m_pkthdr.csum_flags & CSUM_IP) 4932 flags |= TX_BD_FLAGS_IP_CKSUM; 4933 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 4934 flags |= TX_BD_FLAGS_TCP_UDP_CKSUM; 4935 } 4936 4937 /* Transfer any VLAN tags to the bd. */ 4938 if (m0->m_flags & M_VLANTAG) { 4939 flags |= TX_BD_FLAGS_VLAN_TAG; 4940 vlan_tag = m0->m_pkthdr.ether_vlantag; 4941 } 4942 4943 prod = sc->tx_prod; 4944 chain_prod_start = chain_prod = TX_CHAIN_IDX(prod); 4945 4946 /* Map the mbuf into DMAable memory. */ 4947 map = sc->tx_mbuf_map[chain_prod_start]; 4948 4949 maxsegs = sc->max_tx_bd - sc->used_tx_bd; 4950 KASSERT(maxsegs >= BCE_TX_SPARE_SPACE, 4951 ("not enough segements %d\n", maxsegs)); 4952 if (maxsegs > BCE_MAX_SEGMENTS) 4953 maxsegs = BCE_MAX_SEGMENTS; 4954 4955 /* Map the mbuf into our DMA address space. */ 4956 error = bus_dmamap_load_mbuf_defrag(sc->tx_mbuf_tag, map, m_head, 4957 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 4958 if (error) 4959 goto back; 4960 bus_dmamap_sync(sc->tx_mbuf_tag, map, BUS_DMASYNC_PREWRITE); 4961 4962 /* Reset m0 */ 4963 m0 = *m_head; 4964 4965 /* prod points to an empty tx_bd at this point. */ 4966 prod_bseq = sc->tx_prod_bseq; 4967 4968 #ifdef BCE_DEBUG 4969 debug_prod = chain_prod; 4970 #endif 4971 4972 DBPRINT(sc, BCE_INFO_SEND, 4973 "%s(): Start: prod = 0x%04X, chain_prod = %04X, " 4974 "prod_bseq = 0x%08X\n", 4975 __func__, prod, chain_prod, prod_bseq); 4976 4977 /* 4978 * Cycle through each mbuf segment that makes up 4979 * the outgoing frame, gathering the mapping info 4980 * for that segment and creating a tx_bd to for 4981 * the mbuf. 4982 */ 4983 for (i = 0; i < nsegs; i++) { 4984 chain_prod = TX_CHAIN_IDX(prod); 4985 txbd= &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)]; 4986 4987 txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr)); 4988 txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr)); 4989 txbd->tx_bd_mss_nbytes = htole16(segs[i].ds_len); 4990 txbd->tx_bd_vlan_tag = htole16(vlan_tag); 4991 txbd->tx_bd_flags = htole16(flags); 4992 prod_bseq += segs[i].ds_len; 4993 if (i == 0) 4994 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START); 4995 prod = NEXT_TX_BD(prod); 4996 } 4997 4998 /* Set the END flag on the last TX buffer descriptor. */ 4999 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END); 5000 5001 DBRUN(BCE_EXCESSIVE_SEND, 5002 bce_dump_tx_chain(sc, debug_prod, nsegs)); 5003 5004 DBPRINT(sc, BCE_INFO_SEND, 5005 "%s(): End: prod = 0x%04X, chain_prod = %04X, " 5006 "prod_bseq = 0x%08X\n", 5007 __func__, prod, chain_prod, prod_bseq); 5008 5009 /* 5010 * Ensure that the mbuf pointer for this transmission 5011 * is placed at the array index of the last 5012 * descriptor in this chain. This is done 5013 * because a single map is used for all 5014 * segments of the mbuf and we don't want to 5015 * unload the map before all of the segments 5016 * have been freed. 5017 */ 5018 sc->tx_mbuf_ptr[chain_prod] = m0; 5019 5020 tmp_map = sc->tx_mbuf_map[chain_prod]; 5021 sc->tx_mbuf_map[chain_prod] = map; 5022 sc->tx_mbuf_map[chain_prod_start] = tmp_map; 5023 5024 sc->used_tx_bd += nsegs; 5025 5026 /* Update some debug statistic counters */ 5027 DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark), 5028 sc->tx_hi_watermark = sc->used_tx_bd); 5029 DBRUNIF((sc->used_tx_bd == sc->max_tx_bd), sc->tx_full_count++); 5030 DBRUNIF(1, sc->tx_mbuf_alloc++); 5031 5032 DBRUN(BCE_VERBOSE_SEND, 5033 bce_dump_tx_mbuf_chain(sc, chain_prod, nsegs)); 5034 5035 /* prod points to the next free tx_bd at this point. */ 5036 sc->tx_prod = prod; 5037 sc->tx_prod_bseq = prod_bseq; 5038 back: 5039 if (error) { 5040 m_freem(*m_head); 5041 *m_head = NULL; 5042 } 5043 return error; 5044 } 5045 5046 5047 /****************************************************************************/ 5048 /* Main transmit routine when called from another routine with a lock. */ 5049 /* */ 5050 /* Returns: */ 5051 /* Nothing. */ 5052 /****************************************************************************/ 5053 static void 5054 bce_start(struct ifnet *ifp) 5055 { 5056 struct bce_softc *sc = ifp->if_softc; 5057 int count = 0; 5058 5059 ASSERT_SERIALIZED(ifp->if_serializer); 5060 5061 /* If there's no link or the transmit queue is empty then just exit. */ 5062 if (!sc->bce_link) { 5063 ifq_purge(&ifp->if_snd); 5064 return; 5065 } 5066 5067 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 5068 return; 5069 5070 DBPRINT(sc, BCE_INFO_SEND, 5071 "%s(): Start: tx_prod = 0x%04X, tx_chain_prod = %04zX, " 5072 "tx_prod_bseq = 0x%08X\n", 5073 __func__, 5074 sc->tx_prod, TX_CHAIN_IDX(sc->tx_prod), sc->tx_prod_bseq); 5075 5076 for (;;) { 5077 struct mbuf *m_head; 5078 5079 /* 5080 * We keep BCE_TX_SPARE_SPACE entries, so bce_encap() is 5081 * unlikely to fail. 5082 */ 5083 if (sc->max_tx_bd - sc->used_tx_bd < BCE_TX_SPARE_SPACE) { 5084 ifp->if_flags |= IFF_OACTIVE; 5085 break; 5086 } 5087 5088 /* Check for any frames to send. */ 5089 m_head = ifq_dequeue(&ifp->if_snd, NULL); 5090 if (m_head == NULL) 5091 break; 5092 5093 /* 5094 * Pack the data into the transmit ring. If we 5095 * don't have room, place the mbuf back at the 5096 * head of the queue and set the OACTIVE flag 5097 * to wait for the NIC to drain the chain. 5098 */ 5099 if (bce_encap(sc, &m_head)) { 5100 ifp->if_oerrors++; 5101 if (sc->used_tx_bd == 0) { 5102 continue; 5103 } else { 5104 ifp->if_flags |= IFF_OACTIVE; 5105 break; 5106 } 5107 } 5108 5109 count++; 5110 5111 /* Send a copy of the frame to any BPF listeners. */ 5112 ETHER_BPF_MTAP(ifp, m_head); 5113 } 5114 5115 if (count == 0) { 5116 /* no packets were dequeued */ 5117 DBPRINT(sc, BCE_VERBOSE_SEND, 5118 "%s(): No packets were dequeued\n", __func__); 5119 return; 5120 } 5121 5122 DBPRINT(sc, BCE_INFO_SEND, 5123 "%s(): End: tx_prod = 0x%04X, tx_chain_prod = 0x%04zX, " 5124 "tx_prod_bseq = 0x%08X\n", 5125 __func__, 5126 sc->tx_prod, TX_CHAIN_IDX(sc->tx_prod), sc->tx_prod_bseq); 5127 5128 REG_WR(sc, BCE_MQ_COMMAND, 5129 REG_RD(sc, BCE_MQ_COMMAND) | BCE_MQ_COMMAND_NO_MAP_ERROR); 5130 5131 /* Start the transmit. */ 5132 REG_WR16(sc, MB_GET_CID_ADDR(TX_CID) + BCE_L2CTX_TX_HOST_BIDX, sc->tx_prod); 5133 REG_WR(sc, MB_GET_CID_ADDR(TX_CID) + BCE_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq); 5134 5135 /* Set the tx timeout. */ 5136 ifp->if_timer = BCE_TX_TIMEOUT; 5137 } 5138 5139 5140 /****************************************************************************/ 5141 /* Handles any IOCTL calls from the operating system. */ 5142 /* */ 5143 /* Returns: */ 5144 /* 0 for success, positive value for failure. */ 5145 /****************************************************************************/ 5146 static int 5147 bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 5148 { 5149 struct bce_softc *sc = ifp->if_softc; 5150 struct ifreq *ifr = (struct ifreq *)data; 5151 struct mii_data *mii; 5152 int mask, error = 0; 5153 5154 ASSERT_SERIALIZED(ifp->if_serializer); 5155 5156 switch(command) { 5157 case SIOCSIFMTU: 5158 /* Check that the MTU setting is supported. */ 5159 if (ifr->ifr_mtu < BCE_MIN_MTU || 5160 #ifdef notyet 5161 ifr->ifr_mtu > BCE_MAX_JUMBO_MTU 5162 #else 5163 ifr->ifr_mtu > ETHERMTU 5164 #endif 5165 ) { 5166 error = EINVAL; 5167 break; 5168 } 5169 5170 DBPRINT(sc, BCE_INFO, "Setting new MTU of %d\n", ifr->ifr_mtu); 5171 5172 ifp->if_mtu = ifr->ifr_mtu; 5173 ifp->if_flags &= ~IFF_RUNNING; /* Force reinitialize */ 5174 bce_init(sc); 5175 break; 5176 5177 case SIOCSIFFLAGS: 5178 if (ifp->if_flags & IFF_UP) { 5179 if (ifp->if_flags & IFF_RUNNING) { 5180 mask = ifp->if_flags ^ sc->bce_if_flags; 5181 5182 if (mask & (IFF_PROMISC | IFF_ALLMULTI)) 5183 bce_set_rx_mode(sc); 5184 } else { 5185 bce_init(sc); 5186 } 5187 } else if (ifp->if_flags & IFF_RUNNING) { 5188 bce_stop(sc); 5189 5190 /* If MFW is running, restart the controller a bit. */ 5191 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { 5192 bce_reset(sc, BCE_DRV_MSG_CODE_RESET); 5193 bce_chipinit(sc); 5194 bce_mgmt_init(sc); 5195 } 5196 } 5197 sc->bce_if_flags = ifp->if_flags; 5198 break; 5199 5200 case SIOCADDMULTI: 5201 case SIOCDELMULTI: 5202 if (ifp->if_flags & IFF_RUNNING) 5203 bce_set_rx_mode(sc); 5204 break; 5205 5206 case SIOCSIFMEDIA: 5207 case SIOCGIFMEDIA: 5208 DBPRINT(sc, BCE_VERBOSE, "bce_phy_flags = 0x%08X\n", 5209 sc->bce_phy_flags); 5210 DBPRINT(sc, BCE_VERBOSE, "Copper media set/get\n"); 5211 5212 mii = device_get_softc(sc->bce_miibus); 5213 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 5214 break; 5215 5216 case SIOCSIFCAP: 5217 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 5218 DBPRINT(sc, BCE_INFO, "Received SIOCSIFCAP = 0x%08X\n", 5219 (uint32_t) mask); 5220 5221 if (mask & IFCAP_HWCSUM) { 5222 ifp->if_capenable ^= (mask & IFCAP_HWCSUM); 5223 if (IFCAP_HWCSUM & ifp->if_capenable) 5224 ifp->if_hwassist = BCE_IF_HWASSIST; 5225 else 5226 ifp->if_hwassist = 0; 5227 } 5228 break; 5229 5230 default: 5231 error = ether_ioctl(ifp, command, data); 5232 break; 5233 } 5234 return error; 5235 } 5236 5237 5238 /****************************************************************************/ 5239 /* Transmit timeout handler. */ 5240 /* */ 5241 /* Returns: */ 5242 /* Nothing. */ 5243 /****************************************************************************/ 5244 static void 5245 bce_watchdog(struct ifnet *ifp) 5246 { 5247 struct bce_softc *sc = ifp->if_softc; 5248 5249 ASSERT_SERIALIZED(ifp->if_serializer); 5250 5251 DBRUN(BCE_VERBOSE_SEND, 5252 bce_dump_driver_state(sc); 5253 bce_dump_status_block(sc)); 5254 5255 /* 5256 * If we are in this routine because of pause frames, then 5257 * don't reset the hardware. 5258 */ 5259 if (REG_RD(sc, BCE_EMAC_TX_STATUS) & BCE_EMAC_TX_STATUS_XOFFED) 5260 return; 5261 5262 if_printf(ifp, "Watchdog timeout occurred, resetting!\n"); 5263 5264 /* DBRUN(BCE_FATAL, bce_breakpoint(sc)); */ 5265 5266 ifp->if_flags &= ~IFF_RUNNING; /* Force reinitialize */ 5267 bce_init(sc); 5268 5269 ifp->if_oerrors++; 5270 5271 if (!ifq_is_empty(&ifp->if_snd)) 5272 if_devstart(ifp); 5273 } 5274 5275 5276 #ifdef DEVICE_POLLING 5277 5278 static void 5279 bce_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 5280 { 5281 struct bce_softc *sc = ifp->if_softc; 5282 struct status_block *sblk = sc->status_block; 5283 uint16_t hw_tx_cons, hw_rx_cons; 5284 5285 ASSERT_SERIALIZED(ifp->if_serializer); 5286 5287 switch (cmd) { 5288 case POLL_REGISTER: 5289 bce_disable_intr(sc); 5290 5291 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP, 5292 (1 << 16) | sc->bce_rx_quick_cons_trip); 5293 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP, 5294 (1 << 16) | sc->bce_tx_quick_cons_trip); 5295 return; 5296 case POLL_DEREGISTER: 5297 bce_enable_intr(sc, 1); 5298 5299 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP, 5300 (sc->bce_tx_quick_cons_trip_int << 16) | 5301 sc->bce_tx_quick_cons_trip); 5302 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP, 5303 (sc->bce_rx_quick_cons_trip_int << 16) | 5304 sc->bce_rx_quick_cons_trip); 5305 return; 5306 default: 5307 break; 5308 } 5309 5310 if (cmd == POLL_AND_CHECK_STATUS) { 5311 uint32_t status_attn_bits; 5312 5313 status_attn_bits = sblk->status_attn_bits; 5314 5315 DBRUNIF(DB_RANDOMTRUE(bce_debug_unexpected_attention), 5316 if_printf(ifp, 5317 "Simulating unexpected status attention bit set."); 5318 status_attn_bits |= STATUS_ATTN_BITS_PARITY_ERROR); 5319 5320 /* Was it a link change interrupt? */ 5321 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 5322 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) 5323 bce_phy_intr(sc); 5324 5325 /* Clear any transient status updates during link state change. */ 5326 REG_WR(sc, BCE_HC_COMMAND, 5327 sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT); 5328 REG_RD(sc, BCE_HC_COMMAND); 5329 5330 /* 5331 * If any other attention is asserted then 5332 * the chip is toast. 5333 */ 5334 if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) != 5335 (sblk->status_attn_bits_ack & 5336 ~STATUS_ATTN_BITS_LINK_STATE)) { 5337 DBRUN(1, sc->unexpected_attentions++); 5338 5339 if_printf(ifp, "Fatal attention detected: 0x%08X\n", 5340 sblk->status_attn_bits); 5341 5342 DBRUN(BCE_FATAL, 5343 if (bce_debug_unexpected_attention == 0) 5344 bce_breakpoint(sc)); 5345 5346 bce_init(sc); 5347 return; 5348 } 5349 } 5350 5351 hw_rx_cons = bce_get_hw_rx_cons(sc); 5352 hw_tx_cons = bce_get_hw_tx_cons(sc); 5353 5354 /* Check for any completed RX frames. */ 5355 if (hw_rx_cons != sc->hw_rx_cons) 5356 bce_rx_intr(sc, count); 5357 5358 /* Check for any completed TX frames. */ 5359 if (hw_tx_cons != sc->hw_tx_cons) 5360 bce_tx_intr(sc); 5361 5362 /* Check for new frames to transmit. */ 5363 if (!ifq_is_empty(&ifp->if_snd)) 5364 if_devstart(ifp); 5365 } 5366 5367 #endif /* DEVICE_POLLING */ 5368 5369 5370 /* 5371 * Interrupt handler. 5372 */ 5373 /****************************************************************************/ 5374 /* Main interrupt entry point. Verifies that the controller generated the */ 5375 /* interrupt and then calls a separate routine for handle the various */ 5376 /* interrupt causes (PHY, TX, RX). */ 5377 /* */ 5378 /* Returns: */ 5379 /* 0 for success, positive value for failure. */ 5380 /****************************************************************************/ 5381 static void 5382 bce_intr(struct bce_softc *sc) 5383 { 5384 struct ifnet *ifp = &sc->arpcom.ac_if; 5385 struct status_block *sblk; 5386 uint16_t hw_rx_cons, hw_tx_cons; 5387 5388 ASSERT_SERIALIZED(ifp->if_serializer); 5389 5390 DBPRINT(sc, BCE_EXCESSIVE, "Entering %s()\n", __func__); 5391 DBRUNIF(1, sc->interrupts_generated++); 5392 5393 sblk = sc->status_block; 5394 5395 /* Check if the hardware has finished any work. */ 5396 hw_rx_cons = bce_get_hw_rx_cons(sc); 5397 hw_tx_cons = bce_get_hw_tx_cons(sc); 5398 5399 /* Keep processing data as long as there is work to do. */ 5400 for (;;) { 5401 uint32_t status_attn_bits; 5402 5403 status_attn_bits = sblk->status_attn_bits; 5404 5405 DBRUNIF(DB_RANDOMTRUE(bce_debug_unexpected_attention), 5406 if_printf(ifp, 5407 "Simulating unexpected status attention bit set."); 5408 status_attn_bits |= STATUS_ATTN_BITS_PARITY_ERROR); 5409 5410 /* Was it a link change interrupt? */ 5411 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 5412 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) { 5413 bce_phy_intr(sc); 5414 5415 /* 5416 * Clear any transient status updates during link state 5417 * change. 5418 */ 5419 REG_WR(sc, BCE_HC_COMMAND, 5420 sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT); 5421 REG_RD(sc, BCE_HC_COMMAND); 5422 } 5423 5424 /* 5425 * If any other attention is asserted then 5426 * the chip is toast. 5427 */ 5428 if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) != 5429 (sblk->status_attn_bits_ack & 5430 ~STATUS_ATTN_BITS_LINK_STATE)) { 5431 DBRUN(1, sc->unexpected_attentions++); 5432 5433 if_printf(ifp, "Fatal attention detected: 0x%08X\n", 5434 sblk->status_attn_bits); 5435 5436 DBRUN(BCE_FATAL, 5437 if (bce_debug_unexpected_attention == 0) 5438 bce_breakpoint(sc)); 5439 5440 bce_init(sc); 5441 return; 5442 } 5443 5444 /* Check for any completed RX frames. */ 5445 if (hw_rx_cons != sc->hw_rx_cons) 5446 bce_rx_intr(sc, -1); 5447 5448 /* Check for any completed TX frames. */ 5449 if (hw_tx_cons != sc->hw_tx_cons) 5450 bce_tx_intr(sc); 5451 5452 /* 5453 * Save the status block index value 5454 * for use during the next interrupt. 5455 */ 5456 sc->last_status_idx = sblk->status_idx; 5457 5458 /* 5459 * Prevent speculative reads from getting 5460 * ahead of the status block. 5461 */ 5462 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, 5463 BUS_SPACE_BARRIER_READ); 5464 5465 /* 5466 * If there's no work left then exit the 5467 * interrupt service routine. 5468 */ 5469 hw_rx_cons = bce_get_hw_rx_cons(sc); 5470 hw_tx_cons = bce_get_hw_tx_cons(sc); 5471 if ((hw_rx_cons == sc->hw_rx_cons) && (hw_tx_cons == sc->hw_tx_cons)) 5472 break; 5473 } 5474 5475 /* Re-enable interrupts. */ 5476 bce_enable_intr(sc, 0); 5477 5478 if (sc->bce_coalchg_mask) 5479 bce_coal_change(sc); 5480 5481 /* Handle any frames that arrived while handling the interrupt. */ 5482 if (!ifq_is_empty(&ifp->if_snd)) 5483 if_devstart(ifp); 5484 } 5485 5486 static void 5487 bce_intr_legacy(void *xsc) 5488 { 5489 struct bce_softc *sc = xsc; 5490 struct status_block *sblk; 5491 5492 sblk = sc->status_block; 5493 5494 /* 5495 * If the hardware status block index matches the last value 5496 * read by the driver and we haven't asserted our interrupt 5497 * then there's nothing to do. 5498 */ 5499 if (sblk->status_idx == sc->last_status_idx && 5500 (REG_RD(sc, BCE_PCICFG_MISC_STATUS) & 5501 BCE_PCICFG_MISC_STATUS_INTA_VALUE)) 5502 return; 5503 5504 /* Ack the interrupt and stop others from occuring. */ 5505 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 5506 BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | 5507 BCE_PCICFG_INT_ACK_CMD_MASK_INT); 5508 5509 /* 5510 * Read back to deassert IRQ immediately to avoid too 5511 * many spurious interrupts. 5512 */ 5513 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD); 5514 5515 bce_intr(sc); 5516 } 5517 5518 static void 5519 bce_intr_msi(void *xsc) 5520 { 5521 struct bce_softc *sc = xsc; 5522 5523 /* Ack the interrupt and stop others from occuring. */ 5524 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 5525 BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | 5526 BCE_PCICFG_INT_ACK_CMD_MASK_INT); 5527 5528 bce_intr(sc); 5529 } 5530 5531 static void 5532 bce_intr_msi_oneshot(void *xsc) 5533 { 5534 bce_intr(xsc); 5535 } 5536 5537 5538 /****************************************************************************/ 5539 /* Programs the various packet receive modes (broadcast and multicast). */ 5540 /* */ 5541 /* Returns: */ 5542 /* Nothing. */ 5543 /****************************************************************************/ 5544 static void 5545 bce_set_rx_mode(struct bce_softc *sc) 5546 { 5547 struct ifnet *ifp = &sc->arpcom.ac_if; 5548 struct ifmultiaddr *ifma; 5549 uint32_t hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 5550 uint32_t rx_mode, sort_mode; 5551 int h, i; 5552 5553 ASSERT_SERIALIZED(ifp->if_serializer); 5554 5555 /* Initialize receive mode default settings. */ 5556 rx_mode = sc->rx_mode & 5557 ~(BCE_EMAC_RX_MODE_PROMISCUOUS | 5558 BCE_EMAC_RX_MODE_KEEP_VLAN_TAG); 5559 sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN; 5560 5561 /* 5562 * ASF/IPMI/UMP firmware requires that VLAN tag stripping 5563 * be enbled. 5564 */ 5565 if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) && 5566 !(sc->bce_flags & BCE_MFW_ENABLE_FLAG)) 5567 rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG; 5568 5569 /* 5570 * Check for promiscuous, all multicast, or selected 5571 * multicast address filtering. 5572 */ 5573 if (ifp->if_flags & IFF_PROMISC) { 5574 DBPRINT(sc, BCE_INFO, "Enabling promiscuous mode.\n"); 5575 5576 /* Enable promiscuous mode. */ 5577 rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS; 5578 sort_mode |= BCE_RPM_SORT_USER0_PROM_EN; 5579 } else if (ifp->if_flags & IFF_ALLMULTI) { 5580 DBPRINT(sc, BCE_INFO, "Enabling all multicast mode.\n"); 5581 5582 /* Enable all multicast addresses. */ 5583 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { 5584 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 5585 0xffffffff); 5586 } 5587 sort_mode |= BCE_RPM_SORT_USER0_MC_EN; 5588 } else { 5589 /* Accept one or more multicast(s). */ 5590 DBPRINT(sc, BCE_INFO, "Enabling selective multicast mode.\n"); 5591 5592 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 5593 if (ifma->ifma_addr->sa_family != AF_LINK) 5594 continue; 5595 h = ether_crc32_le( 5596 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 5597 ETHER_ADDR_LEN) & 0xFF; 5598 hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F); 5599 } 5600 5601 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { 5602 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 5603 hashes[i]); 5604 } 5605 sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN; 5606 } 5607 5608 /* Only make changes if the recive mode has actually changed. */ 5609 if (rx_mode != sc->rx_mode) { 5610 DBPRINT(sc, BCE_VERBOSE, "Enabling new receive mode: 0x%08X\n", 5611 rx_mode); 5612 5613 sc->rx_mode = rx_mode; 5614 REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode); 5615 } 5616 5617 /* Disable and clear the exisitng sort before enabling a new sort. */ 5618 REG_WR(sc, BCE_RPM_SORT_USER0, 0x0); 5619 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode); 5620 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA); 5621 } 5622 5623 5624 /****************************************************************************/ 5625 /* Called periodically to updates statistics from the controllers */ 5626 /* statistics block. */ 5627 /* */ 5628 /* Returns: */ 5629 /* Nothing. */ 5630 /****************************************************************************/ 5631 static void 5632 bce_stats_update(struct bce_softc *sc) 5633 { 5634 struct ifnet *ifp = &sc->arpcom.ac_if; 5635 struct statistics_block *stats = sc->stats_block; 5636 5637 DBPRINT(sc, BCE_EXCESSIVE, "Entering %s()\n", __func__); 5638 5639 ASSERT_SERIALIZED(ifp->if_serializer); 5640 5641 /* 5642 * Certain controllers don't report carrier sense errors correctly. 5643 * See errata E11_5708CA0_1165. 5644 */ 5645 if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) && 5646 !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0)) { 5647 ifp->if_oerrors += 5648 (u_long)stats->stat_Dot3StatsCarrierSenseErrors; 5649 } 5650 5651 /* 5652 * Update the sysctl statistics from the hardware statistics. 5653 */ 5654 sc->stat_IfHCInOctets = 5655 ((uint64_t)stats->stat_IfHCInOctets_hi << 32) + 5656 (uint64_t)stats->stat_IfHCInOctets_lo; 5657 5658 sc->stat_IfHCInBadOctets = 5659 ((uint64_t)stats->stat_IfHCInBadOctets_hi << 32) + 5660 (uint64_t)stats->stat_IfHCInBadOctets_lo; 5661 5662 sc->stat_IfHCOutOctets = 5663 ((uint64_t)stats->stat_IfHCOutOctets_hi << 32) + 5664 (uint64_t)stats->stat_IfHCOutOctets_lo; 5665 5666 sc->stat_IfHCOutBadOctets = 5667 ((uint64_t)stats->stat_IfHCOutBadOctets_hi << 32) + 5668 (uint64_t)stats->stat_IfHCOutBadOctets_lo; 5669 5670 sc->stat_IfHCInUcastPkts = 5671 ((uint64_t)stats->stat_IfHCInUcastPkts_hi << 32) + 5672 (uint64_t)stats->stat_IfHCInUcastPkts_lo; 5673 5674 sc->stat_IfHCInMulticastPkts = 5675 ((uint64_t)stats->stat_IfHCInMulticastPkts_hi << 32) + 5676 (uint64_t)stats->stat_IfHCInMulticastPkts_lo; 5677 5678 sc->stat_IfHCInBroadcastPkts = 5679 ((uint64_t)stats->stat_IfHCInBroadcastPkts_hi << 32) + 5680 (uint64_t)stats->stat_IfHCInBroadcastPkts_lo; 5681 5682 sc->stat_IfHCOutUcastPkts = 5683 ((uint64_t)stats->stat_IfHCOutUcastPkts_hi << 32) + 5684 (uint64_t)stats->stat_IfHCOutUcastPkts_lo; 5685 5686 sc->stat_IfHCOutMulticastPkts = 5687 ((uint64_t)stats->stat_IfHCOutMulticastPkts_hi << 32) + 5688 (uint64_t)stats->stat_IfHCOutMulticastPkts_lo; 5689 5690 sc->stat_IfHCOutBroadcastPkts = 5691 ((uint64_t)stats->stat_IfHCOutBroadcastPkts_hi << 32) + 5692 (uint64_t)stats->stat_IfHCOutBroadcastPkts_lo; 5693 5694 sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors = 5695 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors; 5696 5697 sc->stat_Dot3StatsCarrierSenseErrors = 5698 stats->stat_Dot3StatsCarrierSenseErrors; 5699 5700 sc->stat_Dot3StatsFCSErrors = 5701 stats->stat_Dot3StatsFCSErrors; 5702 5703 sc->stat_Dot3StatsAlignmentErrors = 5704 stats->stat_Dot3StatsAlignmentErrors; 5705 5706 sc->stat_Dot3StatsSingleCollisionFrames = 5707 stats->stat_Dot3StatsSingleCollisionFrames; 5708 5709 sc->stat_Dot3StatsMultipleCollisionFrames = 5710 stats->stat_Dot3StatsMultipleCollisionFrames; 5711 5712 sc->stat_Dot3StatsDeferredTransmissions = 5713 stats->stat_Dot3StatsDeferredTransmissions; 5714 5715 sc->stat_Dot3StatsExcessiveCollisions = 5716 stats->stat_Dot3StatsExcessiveCollisions; 5717 5718 sc->stat_Dot3StatsLateCollisions = 5719 stats->stat_Dot3StatsLateCollisions; 5720 5721 sc->stat_EtherStatsCollisions = 5722 stats->stat_EtherStatsCollisions; 5723 5724 sc->stat_EtherStatsFragments = 5725 stats->stat_EtherStatsFragments; 5726 5727 sc->stat_EtherStatsJabbers = 5728 stats->stat_EtherStatsJabbers; 5729 5730 sc->stat_EtherStatsUndersizePkts = 5731 stats->stat_EtherStatsUndersizePkts; 5732 5733 sc->stat_EtherStatsOverrsizePkts = 5734 stats->stat_EtherStatsOverrsizePkts; 5735 5736 sc->stat_EtherStatsPktsRx64Octets = 5737 stats->stat_EtherStatsPktsRx64Octets; 5738 5739 sc->stat_EtherStatsPktsRx65Octetsto127Octets = 5740 stats->stat_EtherStatsPktsRx65Octetsto127Octets; 5741 5742 sc->stat_EtherStatsPktsRx128Octetsto255Octets = 5743 stats->stat_EtherStatsPktsRx128Octetsto255Octets; 5744 5745 sc->stat_EtherStatsPktsRx256Octetsto511Octets = 5746 stats->stat_EtherStatsPktsRx256Octetsto511Octets; 5747 5748 sc->stat_EtherStatsPktsRx512Octetsto1023Octets = 5749 stats->stat_EtherStatsPktsRx512Octetsto1023Octets; 5750 5751 sc->stat_EtherStatsPktsRx1024Octetsto1522Octets = 5752 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets; 5753 5754 sc->stat_EtherStatsPktsRx1523Octetsto9022Octets = 5755 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets; 5756 5757 sc->stat_EtherStatsPktsTx64Octets = 5758 stats->stat_EtherStatsPktsTx64Octets; 5759 5760 sc->stat_EtherStatsPktsTx65Octetsto127Octets = 5761 stats->stat_EtherStatsPktsTx65Octetsto127Octets; 5762 5763 sc->stat_EtherStatsPktsTx128Octetsto255Octets = 5764 stats->stat_EtherStatsPktsTx128Octetsto255Octets; 5765 5766 sc->stat_EtherStatsPktsTx256Octetsto511Octets = 5767 stats->stat_EtherStatsPktsTx256Octetsto511Octets; 5768 5769 sc->stat_EtherStatsPktsTx512Octetsto1023Octets = 5770 stats->stat_EtherStatsPktsTx512Octetsto1023Octets; 5771 5772 sc->stat_EtherStatsPktsTx1024Octetsto1522Octets = 5773 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets; 5774 5775 sc->stat_EtherStatsPktsTx1523Octetsto9022Octets = 5776 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets; 5777 5778 sc->stat_XonPauseFramesReceived = 5779 stats->stat_XonPauseFramesReceived; 5780 5781 sc->stat_XoffPauseFramesReceived = 5782 stats->stat_XoffPauseFramesReceived; 5783 5784 sc->stat_OutXonSent = 5785 stats->stat_OutXonSent; 5786 5787 sc->stat_OutXoffSent = 5788 stats->stat_OutXoffSent; 5789 5790 sc->stat_FlowControlDone = 5791 stats->stat_FlowControlDone; 5792 5793 sc->stat_MacControlFramesReceived = 5794 stats->stat_MacControlFramesReceived; 5795 5796 sc->stat_XoffStateEntered = 5797 stats->stat_XoffStateEntered; 5798 5799 sc->stat_IfInFramesL2FilterDiscards = 5800 stats->stat_IfInFramesL2FilterDiscards; 5801 5802 sc->stat_IfInRuleCheckerDiscards = 5803 stats->stat_IfInRuleCheckerDiscards; 5804 5805 sc->stat_IfInFTQDiscards = 5806 stats->stat_IfInFTQDiscards; 5807 5808 sc->stat_IfInMBUFDiscards = 5809 stats->stat_IfInMBUFDiscards; 5810 5811 sc->stat_IfInRuleCheckerP4Hit = 5812 stats->stat_IfInRuleCheckerP4Hit; 5813 5814 sc->stat_CatchupInRuleCheckerDiscards = 5815 stats->stat_CatchupInRuleCheckerDiscards; 5816 5817 sc->stat_CatchupInFTQDiscards = 5818 stats->stat_CatchupInFTQDiscards; 5819 5820 sc->stat_CatchupInMBUFDiscards = 5821 stats->stat_CatchupInMBUFDiscards; 5822 5823 sc->stat_CatchupInRuleCheckerP4Hit = 5824 stats->stat_CatchupInRuleCheckerP4Hit; 5825 5826 sc->com_no_buffers = REG_RD_IND(sc, 0x120084); 5827 5828 /* 5829 * Update the interface statistics from the 5830 * hardware statistics. 5831 */ 5832 ifp->if_collisions = (u_long)sc->stat_EtherStatsCollisions; 5833 5834 ifp->if_ierrors = (u_long)sc->stat_EtherStatsUndersizePkts + 5835 (u_long)sc->stat_EtherStatsOverrsizePkts + 5836 (u_long)sc->stat_IfInMBUFDiscards + 5837 (u_long)sc->stat_Dot3StatsAlignmentErrors + 5838 (u_long)sc->stat_Dot3StatsFCSErrors + 5839 (u_long)sc->stat_IfInRuleCheckerDiscards + 5840 (u_long)sc->stat_IfInFTQDiscards + 5841 (u_long)sc->com_no_buffers; 5842 5843 ifp->if_oerrors = 5844 (u_long)sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors + 5845 (u_long)sc->stat_Dot3StatsExcessiveCollisions + 5846 (u_long)sc->stat_Dot3StatsLateCollisions; 5847 5848 DBPRINT(sc, BCE_EXCESSIVE, "Exiting %s()\n", __func__); 5849 } 5850 5851 5852 /****************************************************************************/ 5853 /* Periodic function to notify the bootcode that the driver is still */ 5854 /* present. */ 5855 /* */ 5856 /* Returns: */ 5857 /* Nothing. */ 5858 /****************************************************************************/ 5859 static void 5860 bce_pulse(void *xsc) 5861 { 5862 struct bce_softc *sc = xsc; 5863 struct ifnet *ifp = &sc->arpcom.ac_if; 5864 uint32_t msg; 5865 5866 lwkt_serialize_enter(ifp->if_serializer); 5867 5868 if (ifp->if_flags & IFF_RUNNING) { 5869 if (sc->bce_irq_type == PCI_INTR_TYPE_MSI && 5870 (sc->bce_flags & BCE_ONESHOT_MSI_FLAG) == 0) 5871 bce_pulse_check_msi(sc); 5872 } 5873 5874 /* Tell the firmware that the driver is still running. */ 5875 msg = (uint32_t)++sc->bce_fw_drv_pulse_wr_seq; 5876 bce_shmem_wr(sc, BCE_DRV_PULSE_MB, msg); 5877 5878 /* Update the bootcode condition. */ 5879 sc->bc_state = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION); 5880 5881 /* Report whether the bootcode still knows the driver is running. */ 5882 if (!sc->bce_drv_cardiac_arrest) { 5883 if (!(sc->bc_state & BCE_CONDITION_DRV_PRESENT)) { 5884 sc->bce_drv_cardiac_arrest = 1; 5885 if_printf(ifp, "Bootcode lost the driver pulse! " 5886 "(bc_state = 0x%08X)\n", sc->bc_state); 5887 } 5888 } else { 5889 /* 5890 * Not supported by all bootcode versions. 5891 * (v5.0.11+ and v5.2.1+) Older bootcode 5892 * will require the driver to reset the 5893 * controller to clear this condition. 5894 */ 5895 if (sc->bc_state & BCE_CONDITION_DRV_PRESENT) { 5896 sc->bce_drv_cardiac_arrest = 0; 5897 if_printf(ifp, "Bootcode found the driver pulse! " 5898 "(bc_state = 0x%08X)\n", sc->bc_state); 5899 } 5900 } 5901 5902 /* Schedule the next pulse. */ 5903 callout_reset(&sc->bce_pulse_callout, hz, bce_pulse, sc); 5904 5905 lwkt_serialize_exit(ifp->if_serializer); 5906 } 5907 5908 static void 5909 bce_pulse_check_msi(struct bce_softc *sc) 5910 { 5911 int check = 0; 5912 5913 if (bce_get_hw_rx_cons(sc) != sc->hw_rx_cons) { 5914 check = 1; 5915 } else if (bce_get_hw_tx_cons(sc) != sc->hw_tx_cons) { 5916 check = 1; 5917 } else { 5918 struct status_block *sblk = sc->status_block; 5919 5920 if ((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 5921 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) 5922 check = 1; 5923 } 5924 5925 if (check) { 5926 uint32_t msi_ctrl; 5927 5928 msi_ctrl = REG_RD(sc, BCE_PCICFG_MSI_CONTROL); 5929 if ((msi_ctrl & BCE_PCICFG_MSI_CONTROL_ENABLE) == 0) 5930 return; 5931 5932 if (sc->pulse_check_status_idx == sc->last_status_idx) { 5933 if_printf(&sc->arpcom.ac_if, "missing MSI\n"); 5934 5935 REG_WR(sc, BCE_PCICFG_MSI_CONTROL, 5936 msi_ctrl & ~BCE_PCICFG_MSI_CONTROL_ENABLE); 5937 REG_WR(sc, BCE_PCICFG_MSI_CONTROL, msi_ctrl); 5938 5939 bce_intr_msi(sc); 5940 } 5941 } 5942 sc->pulse_check_status_idx = sc->last_status_idx; 5943 } 5944 5945 /****************************************************************************/ 5946 /* Periodic function to perform maintenance tasks. */ 5947 /* */ 5948 /* Returns: */ 5949 /* Nothing. */ 5950 /****************************************************************************/ 5951 static void 5952 bce_tick_serialized(struct bce_softc *sc) 5953 { 5954 struct ifnet *ifp = &sc->arpcom.ac_if; 5955 struct mii_data *mii; 5956 5957 ASSERT_SERIALIZED(ifp->if_serializer); 5958 5959 /* Update the statistics from the hardware statistics block. */ 5960 bce_stats_update(sc); 5961 5962 /* Schedule the next tick. */ 5963 callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc); 5964 5965 /* If link is up already up then we're done. */ 5966 if (sc->bce_link) 5967 return; 5968 5969 mii = device_get_softc(sc->bce_miibus); 5970 mii_tick(mii); 5971 5972 /* Check if the link has come up. */ 5973 if ((mii->mii_media_status & IFM_ACTIVE) && 5974 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 5975 sc->bce_link++; 5976 /* Now that link is up, handle any outstanding TX traffic. */ 5977 if (!ifq_is_empty(&ifp->if_snd)) 5978 if_devstart(ifp); 5979 } 5980 } 5981 5982 5983 static void 5984 bce_tick(void *xsc) 5985 { 5986 struct bce_softc *sc = xsc; 5987 struct ifnet *ifp = &sc->arpcom.ac_if; 5988 5989 lwkt_serialize_enter(ifp->if_serializer); 5990 bce_tick_serialized(sc); 5991 lwkt_serialize_exit(ifp->if_serializer); 5992 } 5993 5994 5995 #ifdef BCE_DEBUG 5996 /****************************************************************************/ 5997 /* Allows the driver state to be dumped through the sysctl interface. */ 5998 /* */ 5999 /* Returns: */ 6000 /* 0 for success, positive value for failure. */ 6001 /****************************************************************************/ 6002 static int 6003 bce_sysctl_driver_state(SYSCTL_HANDLER_ARGS) 6004 { 6005 int error; 6006 int result; 6007 struct bce_softc *sc; 6008 6009 result = -1; 6010 error = sysctl_handle_int(oidp, &result, 0, req); 6011 6012 if (error || !req->newptr) 6013 return (error); 6014 6015 if (result == 1) { 6016 sc = (struct bce_softc *)arg1; 6017 bce_dump_driver_state(sc); 6018 } 6019 6020 return error; 6021 } 6022 6023 6024 /****************************************************************************/ 6025 /* Allows the hardware state to be dumped through the sysctl interface. */ 6026 /* */ 6027 /* Returns: */ 6028 /* 0 for success, positive value for failure. */ 6029 /****************************************************************************/ 6030 static int 6031 bce_sysctl_hw_state(SYSCTL_HANDLER_ARGS) 6032 { 6033 int error; 6034 int result; 6035 struct bce_softc *sc; 6036 6037 result = -1; 6038 error = sysctl_handle_int(oidp, &result, 0, req); 6039 6040 if (error || !req->newptr) 6041 return (error); 6042 6043 if (result == 1) { 6044 sc = (struct bce_softc *)arg1; 6045 bce_dump_hw_state(sc); 6046 } 6047 6048 return error; 6049 } 6050 6051 6052 /****************************************************************************/ 6053 /* Provides a sysctl interface to allows dumping the RX chain. */ 6054 /* */ 6055 /* Returns: */ 6056 /* 0 for success, positive value for failure. */ 6057 /****************************************************************************/ 6058 static int 6059 bce_sysctl_dump_rx_chain(SYSCTL_HANDLER_ARGS) 6060 { 6061 int error; 6062 int result; 6063 struct bce_softc *sc; 6064 6065 result = -1; 6066 error = sysctl_handle_int(oidp, &result, 0, req); 6067 6068 if (error || !req->newptr) 6069 return (error); 6070 6071 if (result == 1) { 6072 sc = (struct bce_softc *)arg1; 6073 bce_dump_rx_chain(sc, 0, USABLE_RX_BD); 6074 } 6075 6076 return error; 6077 } 6078 6079 6080 /****************************************************************************/ 6081 /* Provides a sysctl interface to allows dumping the TX chain. */ 6082 /* */ 6083 /* Returns: */ 6084 /* 0 for success, positive value for failure. */ 6085 /****************************************************************************/ 6086 static int 6087 bce_sysctl_dump_tx_chain(SYSCTL_HANDLER_ARGS) 6088 { 6089 int error; 6090 int result; 6091 struct bce_softc *sc; 6092 6093 result = -1; 6094 error = sysctl_handle_int(oidp, &result, 0, req); 6095 6096 if (error || !req->newptr) 6097 return (error); 6098 6099 if (result == 1) { 6100 sc = (struct bce_softc *)arg1; 6101 bce_dump_tx_chain(sc, 0, USABLE_TX_BD); 6102 } 6103 6104 return error; 6105 } 6106 6107 6108 /****************************************************************************/ 6109 /* Provides a sysctl interface to allow reading arbitrary registers in the */ 6110 /* device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ 6111 /* */ 6112 /* Returns: */ 6113 /* 0 for success, positive value for failure. */ 6114 /****************************************************************************/ 6115 static int 6116 bce_sysctl_reg_read(SYSCTL_HANDLER_ARGS) 6117 { 6118 struct bce_softc *sc; 6119 int error; 6120 uint32_t val, result; 6121 6122 result = -1; 6123 error = sysctl_handle_int(oidp, &result, 0, req); 6124 if (error || (req->newptr == NULL)) 6125 return (error); 6126 6127 /* Make sure the register is accessible. */ 6128 if (result < 0x8000) { 6129 sc = (struct bce_softc *)arg1; 6130 val = REG_RD(sc, result); 6131 if_printf(&sc->arpcom.ac_if, "reg 0x%08X = 0x%08X\n", 6132 result, val); 6133 } else if (result < 0x0280000) { 6134 sc = (struct bce_softc *)arg1; 6135 val = REG_RD_IND(sc, result); 6136 if_printf(&sc->arpcom.ac_if, "reg 0x%08X = 0x%08X\n", 6137 result, val); 6138 } 6139 return (error); 6140 } 6141 6142 6143 /****************************************************************************/ 6144 /* Provides a sysctl interface to allow reading arbitrary PHY registers in */ 6145 /* the device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ 6146 /* */ 6147 /* Returns: */ 6148 /* 0 for success, positive value for failure. */ 6149 /****************************************************************************/ 6150 static int 6151 bce_sysctl_phy_read(SYSCTL_HANDLER_ARGS) 6152 { 6153 struct bce_softc *sc; 6154 device_t dev; 6155 int error, result; 6156 uint16_t val; 6157 6158 result = -1; 6159 error = sysctl_handle_int(oidp, &result, 0, req); 6160 if (error || (req->newptr == NULL)) 6161 return (error); 6162 6163 /* Make sure the register is accessible. */ 6164 if (result < 0x20) { 6165 sc = (struct bce_softc *)arg1; 6166 dev = sc->bce_dev; 6167 val = bce_miibus_read_reg(dev, sc->bce_phy_addr, result); 6168 if_printf(&sc->arpcom.ac_if, 6169 "phy 0x%02X = 0x%04X\n", result, val); 6170 } 6171 return (error); 6172 } 6173 6174 6175 /****************************************************************************/ 6176 /* Provides a sysctl interface to forcing the driver to dump state and */ 6177 /* enter the debugger. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ 6178 /* */ 6179 /* Returns: */ 6180 /* 0 for success, positive value for failure. */ 6181 /****************************************************************************/ 6182 static int 6183 bce_sysctl_breakpoint(SYSCTL_HANDLER_ARGS) 6184 { 6185 int error; 6186 int result; 6187 struct bce_softc *sc; 6188 6189 result = -1; 6190 error = sysctl_handle_int(oidp, &result, 0, req); 6191 6192 if (error || !req->newptr) 6193 return (error); 6194 6195 if (result == 1) { 6196 sc = (struct bce_softc *)arg1; 6197 bce_breakpoint(sc); 6198 } 6199 6200 return error; 6201 } 6202 #endif 6203 6204 6205 /****************************************************************************/ 6206 /* Adds any sysctl parameters for tuning or debugging purposes. */ 6207 /* */ 6208 /* Returns: */ 6209 /* 0 for success, positive value for failure. */ 6210 /****************************************************************************/ 6211 static void 6212 bce_add_sysctls(struct bce_softc *sc) 6213 { 6214 struct sysctl_ctx_list *ctx; 6215 struct sysctl_oid_list *children; 6216 6217 sysctl_ctx_init(&sc->bce_sysctl_ctx); 6218 sc->bce_sysctl_tree = SYSCTL_ADD_NODE(&sc->bce_sysctl_ctx, 6219 SYSCTL_STATIC_CHILDREN(_hw), 6220 OID_AUTO, 6221 device_get_nameunit(sc->bce_dev), 6222 CTLFLAG_RD, 0, ""); 6223 if (sc->bce_sysctl_tree == NULL) { 6224 device_printf(sc->bce_dev, "can't add sysctl node\n"); 6225 return; 6226 } 6227 6228 ctx = &sc->bce_sysctl_ctx; 6229 children = SYSCTL_CHILDREN(sc->bce_sysctl_tree); 6230 6231 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds_int", 6232 CTLTYPE_INT | CTLFLAG_RW, 6233 sc, 0, bce_sysctl_tx_bds_int, "I", 6234 "Send max coalesced BD count during interrupt"); 6235 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds", 6236 CTLTYPE_INT | CTLFLAG_RW, 6237 sc, 0, bce_sysctl_tx_bds, "I", 6238 "Send max coalesced BD count"); 6239 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks_int", 6240 CTLTYPE_INT | CTLFLAG_RW, 6241 sc, 0, bce_sysctl_tx_ticks_int, "I", 6242 "Send coalescing ticks during interrupt"); 6243 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks", 6244 CTLTYPE_INT | CTLFLAG_RW, 6245 sc, 0, bce_sysctl_tx_ticks, "I", 6246 "Send coalescing ticks"); 6247 6248 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds_int", 6249 CTLTYPE_INT | CTLFLAG_RW, 6250 sc, 0, bce_sysctl_rx_bds_int, "I", 6251 "Receive max coalesced BD count during interrupt"); 6252 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds", 6253 CTLTYPE_INT | CTLFLAG_RW, 6254 sc, 0, bce_sysctl_rx_bds, "I", 6255 "Receive max coalesced BD count"); 6256 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks_int", 6257 CTLTYPE_INT | CTLFLAG_RW, 6258 sc, 0, bce_sysctl_rx_ticks_int, "I", 6259 "Receive coalescing ticks during interrupt"); 6260 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks", 6261 CTLTYPE_INT | CTLFLAG_RW, 6262 sc, 0, bce_sysctl_rx_ticks, "I", 6263 "Receive coalescing ticks"); 6264 6265 #ifdef BCE_DEBUG 6266 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6267 "rx_low_watermark", 6268 CTLFLAG_RD, &sc->rx_low_watermark, 6269 0, "Lowest level of free rx_bd's"); 6270 6271 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6272 "rx_empty_count", 6273 CTLFLAG_RD, &sc->rx_empty_count, 6274 0, "Number of times the RX chain was empty"); 6275 6276 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6277 "tx_hi_watermark", 6278 CTLFLAG_RD, &sc->tx_hi_watermark, 6279 0, "Highest level of used tx_bd's"); 6280 6281 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6282 "tx_full_count", 6283 CTLFLAG_RD, &sc->tx_full_count, 6284 0, "Number of times the TX chain was full"); 6285 6286 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6287 "l2fhdr_status_errors", 6288 CTLFLAG_RD, &sc->l2fhdr_status_errors, 6289 0, "l2_fhdr status errors"); 6290 6291 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6292 "unexpected_attentions", 6293 CTLFLAG_RD, &sc->unexpected_attentions, 6294 0, "unexpected attentions"); 6295 6296 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6297 "lost_status_block_updates", 6298 CTLFLAG_RD, &sc->lost_status_block_updates, 6299 0, "lost status block updates"); 6300 6301 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6302 "mbuf_alloc_failed", 6303 CTLFLAG_RD, &sc->mbuf_alloc_failed, 6304 0, "mbuf cluster allocation failures"); 6305 #endif 6306 6307 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6308 "stat_IfHCInOctets", 6309 CTLFLAG_RD, &sc->stat_IfHCInOctets, 6310 "Bytes received"); 6311 6312 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6313 "stat_IfHCInBadOctets", 6314 CTLFLAG_RD, &sc->stat_IfHCInBadOctets, 6315 "Bad bytes received"); 6316 6317 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6318 "stat_IfHCOutOctets", 6319 CTLFLAG_RD, &sc->stat_IfHCOutOctets, 6320 "Bytes sent"); 6321 6322 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6323 "stat_IfHCOutBadOctets", 6324 CTLFLAG_RD, &sc->stat_IfHCOutBadOctets, 6325 "Bad bytes sent"); 6326 6327 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6328 "stat_IfHCInUcastPkts", 6329 CTLFLAG_RD, &sc->stat_IfHCInUcastPkts, 6330 "Unicast packets received"); 6331 6332 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6333 "stat_IfHCInMulticastPkts", 6334 CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts, 6335 "Multicast packets received"); 6336 6337 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6338 "stat_IfHCInBroadcastPkts", 6339 CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts, 6340 "Broadcast packets received"); 6341 6342 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6343 "stat_IfHCOutUcastPkts", 6344 CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts, 6345 "Unicast packets sent"); 6346 6347 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6348 "stat_IfHCOutMulticastPkts", 6349 CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts, 6350 "Multicast packets sent"); 6351 6352 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6353 "stat_IfHCOutBroadcastPkts", 6354 CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts, 6355 "Broadcast packets sent"); 6356 6357 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6358 "stat_emac_tx_stat_dot3statsinternalmactransmiterrors", 6359 CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors, 6360 0, "Internal MAC transmit errors"); 6361 6362 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6363 "stat_Dot3StatsCarrierSenseErrors", 6364 CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors, 6365 0, "Carrier sense errors"); 6366 6367 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6368 "stat_Dot3StatsFCSErrors", 6369 CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors, 6370 0, "Frame check sequence errors"); 6371 6372 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6373 "stat_Dot3StatsAlignmentErrors", 6374 CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors, 6375 0, "Alignment errors"); 6376 6377 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6378 "stat_Dot3StatsSingleCollisionFrames", 6379 CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames, 6380 0, "Single Collision Frames"); 6381 6382 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6383 "stat_Dot3StatsMultipleCollisionFrames", 6384 CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames, 6385 0, "Multiple Collision Frames"); 6386 6387 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6388 "stat_Dot3StatsDeferredTransmissions", 6389 CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions, 6390 0, "Deferred Transmissions"); 6391 6392 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6393 "stat_Dot3StatsExcessiveCollisions", 6394 CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions, 6395 0, "Excessive Collisions"); 6396 6397 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6398 "stat_Dot3StatsLateCollisions", 6399 CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions, 6400 0, "Late Collisions"); 6401 6402 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6403 "stat_EtherStatsCollisions", 6404 CTLFLAG_RD, &sc->stat_EtherStatsCollisions, 6405 0, "Collisions"); 6406 6407 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6408 "stat_EtherStatsFragments", 6409 CTLFLAG_RD, &sc->stat_EtherStatsFragments, 6410 0, "Fragments"); 6411 6412 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6413 "stat_EtherStatsJabbers", 6414 CTLFLAG_RD, &sc->stat_EtherStatsJabbers, 6415 0, "Jabbers"); 6416 6417 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6418 "stat_EtherStatsUndersizePkts", 6419 CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts, 6420 0, "Undersize packets"); 6421 6422 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6423 "stat_EtherStatsOverrsizePkts", 6424 CTLFLAG_RD, &sc->stat_EtherStatsOverrsizePkts, 6425 0, "stat_EtherStatsOverrsizePkts"); 6426 6427 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6428 "stat_EtherStatsPktsRx64Octets", 6429 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets, 6430 0, "Bytes received in 64 byte packets"); 6431 6432 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6433 "stat_EtherStatsPktsRx65Octetsto127Octets", 6434 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets, 6435 0, "Bytes received in 65 to 127 byte packets"); 6436 6437 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6438 "stat_EtherStatsPktsRx128Octetsto255Octets", 6439 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets, 6440 0, "Bytes received in 128 to 255 byte packets"); 6441 6442 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6443 "stat_EtherStatsPktsRx256Octetsto511Octets", 6444 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets, 6445 0, "Bytes received in 256 to 511 byte packets"); 6446 6447 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6448 "stat_EtherStatsPktsRx512Octetsto1023Octets", 6449 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets, 6450 0, "Bytes received in 512 to 1023 byte packets"); 6451 6452 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6453 "stat_EtherStatsPktsRx1024Octetsto1522Octets", 6454 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets, 6455 0, "Bytes received in 1024 t0 1522 byte packets"); 6456 6457 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6458 "stat_EtherStatsPktsRx1523Octetsto9022Octets", 6459 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets, 6460 0, "Bytes received in 1523 to 9022 byte packets"); 6461 6462 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6463 "stat_EtherStatsPktsTx64Octets", 6464 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets, 6465 0, "Bytes sent in 64 byte packets"); 6466 6467 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6468 "stat_EtherStatsPktsTx65Octetsto127Octets", 6469 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets, 6470 0, "Bytes sent in 65 to 127 byte packets"); 6471 6472 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6473 "stat_EtherStatsPktsTx128Octetsto255Octets", 6474 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets, 6475 0, "Bytes sent in 128 to 255 byte packets"); 6476 6477 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6478 "stat_EtherStatsPktsTx256Octetsto511Octets", 6479 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets, 6480 0, "Bytes sent in 256 to 511 byte packets"); 6481 6482 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6483 "stat_EtherStatsPktsTx512Octetsto1023Octets", 6484 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets, 6485 0, "Bytes sent in 512 to 1023 byte packets"); 6486 6487 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6488 "stat_EtherStatsPktsTx1024Octetsto1522Octets", 6489 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets, 6490 0, "Bytes sent in 1024 to 1522 byte packets"); 6491 6492 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6493 "stat_EtherStatsPktsTx1523Octetsto9022Octets", 6494 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets, 6495 0, "Bytes sent in 1523 to 9022 byte packets"); 6496 6497 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6498 "stat_XonPauseFramesReceived", 6499 CTLFLAG_RD, &sc->stat_XonPauseFramesReceived, 6500 0, "XON pause frames receved"); 6501 6502 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6503 "stat_XoffPauseFramesReceived", 6504 CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived, 6505 0, "XOFF pause frames received"); 6506 6507 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6508 "stat_OutXonSent", 6509 CTLFLAG_RD, &sc->stat_OutXonSent, 6510 0, "XON pause frames sent"); 6511 6512 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6513 "stat_OutXoffSent", 6514 CTLFLAG_RD, &sc->stat_OutXoffSent, 6515 0, "XOFF pause frames sent"); 6516 6517 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6518 "stat_FlowControlDone", 6519 CTLFLAG_RD, &sc->stat_FlowControlDone, 6520 0, "Flow control done"); 6521 6522 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6523 "stat_MacControlFramesReceived", 6524 CTLFLAG_RD, &sc->stat_MacControlFramesReceived, 6525 0, "MAC control frames received"); 6526 6527 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6528 "stat_XoffStateEntered", 6529 CTLFLAG_RD, &sc->stat_XoffStateEntered, 6530 0, "XOFF state entered"); 6531 6532 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6533 "stat_IfInFramesL2FilterDiscards", 6534 CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards, 6535 0, "Received L2 packets discarded"); 6536 6537 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6538 "stat_IfInRuleCheckerDiscards", 6539 CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards, 6540 0, "Received packets discarded by rule"); 6541 6542 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6543 "stat_IfInFTQDiscards", 6544 CTLFLAG_RD, &sc->stat_IfInFTQDiscards, 6545 0, "Received packet FTQ discards"); 6546 6547 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6548 "stat_IfInMBUFDiscards", 6549 CTLFLAG_RD, &sc->stat_IfInMBUFDiscards, 6550 0, "Received packets discarded due to lack of controller buffer memory"); 6551 6552 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6553 "stat_IfInRuleCheckerP4Hit", 6554 CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit, 6555 0, "Received packets rule checker hits"); 6556 6557 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6558 "stat_CatchupInRuleCheckerDiscards", 6559 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards, 6560 0, "Received packets discarded in Catchup path"); 6561 6562 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6563 "stat_CatchupInFTQDiscards", 6564 CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards, 6565 0, "Received packets discarded in FTQ in Catchup path"); 6566 6567 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6568 "stat_CatchupInMBUFDiscards", 6569 CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards, 6570 0, "Received packets discarded in controller buffer memory in Catchup path"); 6571 6572 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6573 "stat_CatchupInRuleCheckerP4Hit", 6574 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit, 6575 0, "Received packets rule checker hits in Catchup path"); 6576 6577 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6578 "com_no_buffers", 6579 CTLFLAG_RD, &sc->com_no_buffers, 6580 0, "Valid packets received but no RX buffers available"); 6581 6582 #ifdef BCE_DEBUG 6583 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 6584 "driver_state", CTLTYPE_INT | CTLFLAG_RW, 6585 (void *)sc, 0, 6586 bce_sysctl_driver_state, "I", "Drive state information"); 6587 6588 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 6589 "hw_state", CTLTYPE_INT | CTLFLAG_RW, 6590 (void *)sc, 0, 6591 bce_sysctl_hw_state, "I", "Hardware state information"); 6592 6593 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 6594 "dump_rx_chain", CTLTYPE_INT | CTLFLAG_RW, 6595 (void *)sc, 0, 6596 bce_sysctl_dump_rx_chain, "I", "Dump rx_bd chain"); 6597 6598 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 6599 "dump_tx_chain", CTLTYPE_INT | CTLFLAG_RW, 6600 (void *)sc, 0, 6601 bce_sysctl_dump_tx_chain, "I", "Dump tx_bd chain"); 6602 6603 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 6604 "breakpoint", CTLTYPE_INT | CTLFLAG_RW, 6605 (void *)sc, 0, 6606 bce_sysctl_breakpoint, "I", "Driver breakpoint"); 6607 6608 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 6609 "reg_read", CTLTYPE_INT | CTLFLAG_RW, 6610 (void *)sc, 0, 6611 bce_sysctl_reg_read, "I", "Register read"); 6612 6613 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 6614 "phy_read", CTLTYPE_INT | CTLFLAG_RW, 6615 (void *)sc, 0, 6616 bce_sysctl_phy_read, "I", "PHY register read"); 6617 6618 #endif 6619 6620 } 6621 6622 6623 /****************************************************************************/ 6624 /* BCE Debug Routines */ 6625 /****************************************************************************/ 6626 #ifdef BCE_DEBUG 6627 6628 /****************************************************************************/ 6629 /* Freezes the controller to allow for a cohesive state dump. */ 6630 /* */ 6631 /* Returns: */ 6632 /* Nothing. */ 6633 /****************************************************************************/ 6634 static void 6635 bce_freeze_controller(struct bce_softc *sc) 6636 { 6637 uint32_t val; 6638 6639 val = REG_RD(sc, BCE_MISC_COMMAND); 6640 val |= BCE_MISC_COMMAND_DISABLE_ALL; 6641 REG_WR(sc, BCE_MISC_COMMAND, val); 6642 } 6643 6644 6645 /****************************************************************************/ 6646 /* Unfreezes the controller after a freeze operation. This may not always */ 6647 /* work and the controller will require a reset! */ 6648 /* */ 6649 /* Returns: */ 6650 /* Nothing. */ 6651 /****************************************************************************/ 6652 static void 6653 bce_unfreeze_controller(struct bce_softc *sc) 6654 { 6655 uint32_t val; 6656 6657 val = REG_RD(sc, BCE_MISC_COMMAND); 6658 val |= BCE_MISC_COMMAND_ENABLE_ALL; 6659 REG_WR(sc, BCE_MISC_COMMAND, val); 6660 } 6661 6662 6663 /****************************************************************************/ 6664 /* Prints out information about an mbuf. */ 6665 /* */ 6666 /* Returns: */ 6667 /* Nothing. */ 6668 /****************************************************************************/ 6669 static void 6670 bce_dump_mbuf(struct bce_softc *sc, struct mbuf *m) 6671 { 6672 struct ifnet *ifp = &sc->arpcom.ac_if; 6673 uint32_t val_hi, val_lo; 6674 struct mbuf *mp = m; 6675 6676 if (m == NULL) { 6677 /* Index out of range. */ 6678 if_printf(ifp, "mbuf: null pointer\n"); 6679 return; 6680 } 6681 6682 while (mp) { 6683 val_hi = BCE_ADDR_HI(mp); 6684 val_lo = BCE_ADDR_LO(mp); 6685 if_printf(ifp, "mbuf: vaddr = 0x%08X:%08X, m_len = %d, " 6686 "m_flags = ( ", val_hi, val_lo, mp->m_len); 6687 6688 if (mp->m_flags & M_EXT) 6689 kprintf("M_EXT "); 6690 if (mp->m_flags & M_PKTHDR) 6691 kprintf("M_PKTHDR "); 6692 if (mp->m_flags & M_EOR) 6693 kprintf("M_EOR "); 6694 #ifdef M_RDONLY 6695 if (mp->m_flags & M_RDONLY) 6696 kprintf("M_RDONLY "); 6697 #endif 6698 6699 val_hi = BCE_ADDR_HI(mp->m_data); 6700 val_lo = BCE_ADDR_LO(mp->m_data); 6701 kprintf(") m_data = 0x%08X:%08X\n", val_hi, val_lo); 6702 6703 if (mp->m_flags & M_PKTHDR) { 6704 if_printf(ifp, "- m_pkthdr: flags = ( "); 6705 if (mp->m_flags & M_BCAST) 6706 kprintf("M_BCAST "); 6707 if (mp->m_flags & M_MCAST) 6708 kprintf("M_MCAST "); 6709 if (mp->m_flags & M_FRAG) 6710 kprintf("M_FRAG "); 6711 if (mp->m_flags & M_FIRSTFRAG) 6712 kprintf("M_FIRSTFRAG "); 6713 if (mp->m_flags & M_LASTFRAG) 6714 kprintf("M_LASTFRAG "); 6715 #ifdef M_VLANTAG 6716 if (mp->m_flags & M_VLANTAG) 6717 kprintf("M_VLANTAG "); 6718 #endif 6719 #ifdef M_PROMISC 6720 if (mp->m_flags & M_PROMISC) 6721 kprintf("M_PROMISC "); 6722 #endif 6723 kprintf(") csum_flags = ( "); 6724 if (mp->m_pkthdr.csum_flags & CSUM_IP) 6725 kprintf("CSUM_IP "); 6726 if (mp->m_pkthdr.csum_flags & CSUM_TCP) 6727 kprintf("CSUM_TCP "); 6728 if (mp->m_pkthdr.csum_flags & CSUM_UDP) 6729 kprintf("CSUM_UDP "); 6730 if (mp->m_pkthdr.csum_flags & CSUM_IP_FRAGS) 6731 kprintf("CSUM_IP_FRAGS "); 6732 if (mp->m_pkthdr.csum_flags & CSUM_FRAGMENT) 6733 kprintf("CSUM_FRAGMENT "); 6734 #ifdef CSUM_TSO 6735 if (mp->m_pkthdr.csum_flags & CSUM_TSO) 6736 kprintf("CSUM_TSO "); 6737 #endif 6738 if (mp->m_pkthdr.csum_flags & CSUM_IP_CHECKED) 6739 kprintf("CSUM_IP_CHECKED "); 6740 if (mp->m_pkthdr.csum_flags & CSUM_IP_VALID) 6741 kprintf("CSUM_IP_VALID "); 6742 if (mp->m_pkthdr.csum_flags & CSUM_DATA_VALID) 6743 kprintf("CSUM_DATA_VALID "); 6744 kprintf(")\n"); 6745 } 6746 6747 if (mp->m_flags & M_EXT) { 6748 val_hi = BCE_ADDR_HI(mp->m_ext.ext_buf); 6749 val_lo = BCE_ADDR_LO(mp->m_ext.ext_buf); 6750 if_printf(ifp, "- m_ext: vaddr = 0x%08X:%08X, " 6751 "ext_size = %d\n", 6752 val_hi, val_lo, mp->m_ext.ext_size); 6753 } 6754 mp = mp->m_next; 6755 } 6756 } 6757 6758 6759 /****************************************************************************/ 6760 /* Prints out the mbufs in the TX mbuf chain. */ 6761 /* */ 6762 /* Returns: */ 6763 /* Nothing. */ 6764 /****************************************************************************/ 6765 static void 6766 bce_dump_tx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count) 6767 { 6768 struct ifnet *ifp = &sc->arpcom.ac_if; 6769 int i; 6770 6771 if_printf(ifp, 6772 "----------------------------" 6773 " tx mbuf data " 6774 "----------------------------\n"); 6775 6776 for (i = 0; i < count; i++) { 6777 if_printf(ifp, "txmbuf[%d]\n", chain_prod); 6778 bce_dump_mbuf(sc, sc->tx_mbuf_ptr[chain_prod]); 6779 chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod)); 6780 } 6781 6782 if_printf(ifp, 6783 "----------------------------" 6784 "----------------" 6785 "----------------------------\n"); 6786 } 6787 6788 6789 /****************************************************************************/ 6790 /* Prints out the mbufs in the RX mbuf chain. */ 6791 /* */ 6792 /* Returns: */ 6793 /* Nothing. */ 6794 /****************************************************************************/ 6795 static void 6796 bce_dump_rx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count) 6797 { 6798 struct ifnet *ifp = &sc->arpcom.ac_if; 6799 int i; 6800 6801 if_printf(ifp, 6802 "----------------------------" 6803 " rx mbuf data " 6804 "----------------------------\n"); 6805 6806 for (i = 0; i < count; i++) { 6807 if_printf(ifp, "rxmbuf[0x%04X]\n", chain_prod); 6808 bce_dump_mbuf(sc, sc->rx_mbuf_ptr[chain_prod]); 6809 chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod)); 6810 } 6811 6812 if_printf(ifp, 6813 "----------------------------" 6814 "----------------" 6815 "----------------------------\n"); 6816 } 6817 6818 6819 /****************************************************************************/ 6820 /* Prints out a tx_bd structure. */ 6821 /* */ 6822 /* Returns: */ 6823 /* Nothing. */ 6824 /****************************************************************************/ 6825 static void 6826 bce_dump_txbd(struct bce_softc *sc, int idx, struct tx_bd *txbd) 6827 { 6828 struct ifnet *ifp = &sc->arpcom.ac_if; 6829 6830 if (idx > MAX_TX_BD) { 6831 /* Index out of range. */ 6832 if_printf(ifp, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx); 6833 } else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) { 6834 /* TX Chain page pointer. */ 6835 if_printf(ifp, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, " 6836 "chain page pointer\n", 6837 idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo); 6838 } else { 6839 /* Normal tx_bd entry. */ 6840 if_printf(ifp, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, " 6841 "nbytes = 0x%08X, " 6842 "vlan tag= 0x%04X, flags = 0x%04X (", 6843 idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo, 6844 txbd->tx_bd_mss_nbytes, 6845 txbd->tx_bd_vlan_tag, txbd->tx_bd_flags); 6846 6847 if (txbd->tx_bd_flags & TX_BD_FLAGS_CONN_FAULT) 6848 kprintf(" CONN_FAULT"); 6849 6850 if (txbd->tx_bd_flags & TX_BD_FLAGS_TCP_UDP_CKSUM) 6851 kprintf(" TCP_UDP_CKSUM"); 6852 6853 if (txbd->tx_bd_flags & TX_BD_FLAGS_IP_CKSUM) 6854 kprintf(" IP_CKSUM"); 6855 6856 if (txbd->tx_bd_flags & TX_BD_FLAGS_VLAN_TAG) 6857 kprintf(" VLAN"); 6858 6859 if (txbd->tx_bd_flags & TX_BD_FLAGS_COAL_NOW) 6860 kprintf(" COAL_NOW"); 6861 6862 if (txbd->tx_bd_flags & TX_BD_FLAGS_DONT_GEN_CRC) 6863 kprintf(" DONT_GEN_CRC"); 6864 6865 if (txbd->tx_bd_flags & TX_BD_FLAGS_START) 6866 kprintf(" START"); 6867 6868 if (txbd->tx_bd_flags & TX_BD_FLAGS_END) 6869 kprintf(" END"); 6870 6871 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_LSO) 6872 kprintf(" LSO"); 6873 6874 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_OPTION_WORD) 6875 kprintf(" OPTION_WORD"); 6876 6877 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_FLAGS) 6878 kprintf(" FLAGS"); 6879 6880 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_SNAP) 6881 kprintf(" SNAP"); 6882 6883 kprintf(" )\n"); 6884 } 6885 } 6886 6887 6888 /****************************************************************************/ 6889 /* Prints out a rx_bd structure. */ 6890 /* */ 6891 /* Returns: */ 6892 /* Nothing. */ 6893 /****************************************************************************/ 6894 static void 6895 bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd) 6896 { 6897 struct ifnet *ifp = &sc->arpcom.ac_if; 6898 6899 if (idx > MAX_RX_BD) { 6900 /* Index out of range. */ 6901 if_printf(ifp, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx); 6902 } else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) { 6903 /* TX Chain page pointer. */ 6904 if_printf(ifp, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, " 6905 "chain page pointer\n", 6906 idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo); 6907 } else { 6908 /* Normal tx_bd entry. */ 6909 if_printf(ifp, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, " 6910 "nbytes = 0x%08X, flags = 0x%08X\n", 6911 idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo, 6912 rxbd->rx_bd_len, rxbd->rx_bd_flags); 6913 } 6914 } 6915 6916 6917 /****************************************************************************/ 6918 /* Prints out a l2_fhdr structure. */ 6919 /* */ 6920 /* Returns: */ 6921 /* Nothing. */ 6922 /****************************************************************************/ 6923 static void 6924 bce_dump_l2fhdr(struct bce_softc *sc, int idx, struct l2_fhdr *l2fhdr) 6925 { 6926 if_printf(&sc->arpcom.ac_if, "l2_fhdr[0x%04X]: status = 0x%08X, " 6927 "pkt_len = 0x%04X, vlan = 0x%04x, " 6928 "ip_xsum = 0x%04X, tcp_udp_xsum = 0x%04X\n", 6929 idx, l2fhdr->l2_fhdr_status, 6930 l2fhdr->l2_fhdr_pkt_len, l2fhdr->l2_fhdr_vlan_tag, 6931 l2fhdr->l2_fhdr_ip_xsum, l2fhdr->l2_fhdr_tcp_udp_xsum); 6932 } 6933 6934 6935 /****************************************************************************/ 6936 /* Prints out the tx chain. */ 6937 /* */ 6938 /* Returns: */ 6939 /* Nothing. */ 6940 /****************************************************************************/ 6941 static void 6942 bce_dump_tx_chain(struct bce_softc *sc, int tx_prod, int count) 6943 { 6944 struct ifnet *ifp = &sc->arpcom.ac_if; 6945 int i; 6946 6947 /* First some info about the tx_bd chain structure. */ 6948 if_printf(ifp, 6949 "----------------------------" 6950 " tx_bd chain " 6951 "----------------------------\n"); 6952 6953 if_printf(ifp, "page size = 0x%08X, " 6954 "tx chain pages = 0x%08X\n", 6955 (uint32_t)BCM_PAGE_SIZE, (uint32_t)TX_PAGES); 6956 6957 if_printf(ifp, "tx_bd per page = 0x%08X, " 6958 "usable tx_bd per page = 0x%08X\n", 6959 (uint32_t)TOTAL_TX_BD_PER_PAGE, 6960 (uint32_t)USABLE_TX_BD_PER_PAGE); 6961 6962 if_printf(ifp, "total tx_bd = 0x%08X\n", (uint32_t)TOTAL_TX_BD); 6963 6964 if_printf(ifp, 6965 "----------------------------" 6966 " tx_bd data " 6967 "----------------------------\n"); 6968 6969 /* Now print out the tx_bd's themselves. */ 6970 for (i = 0; i < count; i++) { 6971 struct tx_bd *txbd; 6972 6973 txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)]; 6974 bce_dump_txbd(sc, tx_prod, txbd); 6975 tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod)); 6976 } 6977 6978 if_printf(ifp, 6979 "----------------------------" 6980 "----------------" 6981 "----------------------------\n"); 6982 } 6983 6984 6985 /****************************************************************************/ 6986 /* Prints out the rx chain. */ 6987 /* */ 6988 /* Returns: */ 6989 /* Nothing. */ 6990 /****************************************************************************/ 6991 static void 6992 bce_dump_rx_chain(struct bce_softc *sc, int rx_prod, int count) 6993 { 6994 struct ifnet *ifp = &sc->arpcom.ac_if; 6995 int i; 6996 6997 /* First some info about the tx_bd chain structure. */ 6998 if_printf(ifp, 6999 "----------------------------" 7000 " rx_bd chain " 7001 "----------------------------\n"); 7002 7003 if_printf(ifp, "page size = 0x%08X, " 7004 "rx chain pages = 0x%08X\n", 7005 (uint32_t)BCM_PAGE_SIZE, (uint32_t)RX_PAGES); 7006 7007 if_printf(ifp, "rx_bd per page = 0x%08X, " 7008 "usable rx_bd per page = 0x%08X\n", 7009 (uint32_t)TOTAL_RX_BD_PER_PAGE, 7010 (uint32_t)USABLE_RX_BD_PER_PAGE); 7011 7012 if_printf(ifp, "total rx_bd = 0x%08X\n", (uint32_t)TOTAL_RX_BD); 7013 7014 if_printf(ifp, 7015 "----------------------------" 7016 " rx_bd data " 7017 "----------------------------\n"); 7018 7019 /* Now print out the rx_bd's themselves. */ 7020 for (i = 0; i < count; i++) { 7021 struct rx_bd *rxbd; 7022 7023 rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)]; 7024 bce_dump_rxbd(sc, rx_prod, rxbd); 7025 rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod)); 7026 } 7027 7028 if_printf(ifp, 7029 "----------------------------" 7030 "----------------" 7031 "----------------------------\n"); 7032 } 7033 7034 7035 /****************************************************************************/ 7036 /* Prints out the status block from host memory. */ 7037 /* */ 7038 /* Returns: */ 7039 /* Nothing. */ 7040 /****************************************************************************/ 7041 static void 7042 bce_dump_status_block(struct bce_softc *sc) 7043 { 7044 struct status_block *sblk = sc->status_block; 7045 struct ifnet *ifp = &sc->arpcom.ac_if; 7046 7047 if_printf(ifp, 7048 "----------------------------" 7049 " Status Block " 7050 "----------------------------\n"); 7051 7052 if_printf(ifp, " 0x%08X - attn_bits\n", sblk->status_attn_bits); 7053 7054 if_printf(ifp, " 0x%08X - attn_bits_ack\n", 7055 sblk->status_attn_bits_ack); 7056 7057 if_printf(ifp, "0x%04X(0x%04X) - rx_cons0\n", 7058 sblk->status_rx_quick_consumer_index0, 7059 (uint16_t)RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index0)); 7060 7061 if_printf(ifp, "0x%04X(0x%04X) - tx_cons0\n", 7062 sblk->status_tx_quick_consumer_index0, 7063 (uint16_t)TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index0)); 7064 7065 if_printf(ifp, " 0x%04X - status_idx\n", sblk->status_idx); 7066 7067 /* Theses indices are not used for normal L2 drivers. */ 7068 if (sblk->status_rx_quick_consumer_index1) { 7069 if_printf(ifp, "0x%04X(0x%04X) - rx_cons1\n", 7070 sblk->status_rx_quick_consumer_index1, 7071 (uint16_t)RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index1)); 7072 } 7073 7074 if (sblk->status_tx_quick_consumer_index1) { 7075 if_printf(ifp, "0x%04X(0x%04X) - tx_cons1\n", 7076 sblk->status_tx_quick_consumer_index1, 7077 (uint16_t)TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index1)); 7078 } 7079 7080 if (sblk->status_rx_quick_consumer_index2) { 7081 if_printf(ifp, "0x%04X(0x%04X)- rx_cons2\n", 7082 sblk->status_rx_quick_consumer_index2, 7083 (uint16_t)RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index2)); 7084 } 7085 7086 if (sblk->status_tx_quick_consumer_index2) { 7087 if_printf(ifp, "0x%04X(0x%04X) - tx_cons2\n", 7088 sblk->status_tx_quick_consumer_index2, 7089 (uint16_t)TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index2)); 7090 } 7091 7092 if (sblk->status_rx_quick_consumer_index3) { 7093 if_printf(ifp, "0x%04X(0x%04X) - rx_cons3\n", 7094 sblk->status_rx_quick_consumer_index3, 7095 (uint16_t)RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index3)); 7096 } 7097 7098 if (sblk->status_tx_quick_consumer_index3) { 7099 if_printf(ifp, "0x%04X(0x%04X) - tx_cons3\n", 7100 sblk->status_tx_quick_consumer_index3, 7101 (uint16_t)TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index3)); 7102 } 7103 7104 if (sblk->status_rx_quick_consumer_index4 || 7105 sblk->status_rx_quick_consumer_index5) { 7106 if_printf(ifp, "rx_cons4 = 0x%08X, rx_cons5 = 0x%08X\n", 7107 sblk->status_rx_quick_consumer_index4, 7108 sblk->status_rx_quick_consumer_index5); 7109 } 7110 7111 if (sblk->status_rx_quick_consumer_index6 || 7112 sblk->status_rx_quick_consumer_index7) { 7113 if_printf(ifp, "rx_cons6 = 0x%08X, rx_cons7 = 0x%08X\n", 7114 sblk->status_rx_quick_consumer_index6, 7115 sblk->status_rx_quick_consumer_index7); 7116 } 7117 7118 if (sblk->status_rx_quick_consumer_index8 || 7119 sblk->status_rx_quick_consumer_index9) { 7120 if_printf(ifp, "rx_cons8 = 0x%08X, rx_cons9 = 0x%08X\n", 7121 sblk->status_rx_quick_consumer_index8, 7122 sblk->status_rx_quick_consumer_index9); 7123 } 7124 7125 if (sblk->status_rx_quick_consumer_index10 || 7126 sblk->status_rx_quick_consumer_index11) { 7127 if_printf(ifp, "rx_cons10 = 0x%08X, rx_cons11 = 0x%08X\n", 7128 sblk->status_rx_quick_consumer_index10, 7129 sblk->status_rx_quick_consumer_index11); 7130 } 7131 7132 if (sblk->status_rx_quick_consumer_index12 || 7133 sblk->status_rx_quick_consumer_index13) { 7134 if_printf(ifp, "rx_cons12 = 0x%08X, rx_cons13 = 0x%08X\n", 7135 sblk->status_rx_quick_consumer_index12, 7136 sblk->status_rx_quick_consumer_index13); 7137 } 7138 7139 if (sblk->status_rx_quick_consumer_index14 || 7140 sblk->status_rx_quick_consumer_index15) { 7141 if_printf(ifp, "rx_cons14 = 0x%08X, rx_cons15 = 0x%08X\n", 7142 sblk->status_rx_quick_consumer_index14, 7143 sblk->status_rx_quick_consumer_index15); 7144 } 7145 7146 if (sblk->status_completion_producer_index || 7147 sblk->status_cmd_consumer_index) { 7148 if_printf(ifp, "com_prod = 0x%08X, cmd_cons = 0x%08X\n", 7149 sblk->status_completion_producer_index, 7150 sblk->status_cmd_consumer_index); 7151 } 7152 7153 if_printf(ifp, 7154 "----------------------------" 7155 "----------------" 7156 "----------------------------\n"); 7157 } 7158 7159 7160 /****************************************************************************/ 7161 /* Prints out the statistics block. */ 7162 /* */ 7163 /* Returns: */ 7164 /* Nothing. */ 7165 /****************************************************************************/ 7166 static void 7167 bce_dump_stats_block(struct bce_softc *sc) 7168 { 7169 struct statistics_block *sblk = sc->stats_block; 7170 struct ifnet *ifp = &sc->arpcom.ac_if; 7171 7172 if_printf(ifp, 7173 "---------------" 7174 " Stats Block (All Stats Not Shown Are 0) " 7175 "---------------\n"); 7176 7177 if (sblk->stat_IfHCInOctets_hi || sblk->stat_IfHCInOctets_lo) { 7178 if_printf(ifp, "0x%08X:%08X : IfHcInOctets\n", 7179 sblk->stat_IfHCInOctets_hi, 7180 sblk->stat_IfHCInOctets_lo); 7181 } 7182 7183 if (sblk->stat_IfHCInBadOctets_hi || sblk->stat_IfHCInBadOctets_lo) { 7184 if_printf(ifp, "0x%08X:%08X : IfHcInBadOctets\n", 7185 sblk->stat_IfHCInBadOctets_hi, 7186 sblk->stat_IfHCInBadOctets_lo); 7187 } 7188 7189 if (sblk->stat_IfHCOutOctets_hi || sblk->stat_IfHCOutOctets_lo) { 7190 if_printf(ifp, "0x%08X:%08X : IfHcOutOctets\n", 7191 sblk->stat_IfHCOutOctets_hi, 7192 sblk->stat_IfHCOutOctets_lo); 7193 } 7194 7195 if (sblk->stat_IfHCOutBadOctets_hi || sblk->stat_IfHCOutBadOctets_lo) { 7196 if_printf(ifp, "0x%08X:%08X : IfHcOutBadOctets\n", 7197 sblk->stat_IfHCOutBadOctets_hi, 7198 sblk->stat_IfHCOutBadOctets_lo); 7199 } 7200 7201 if (sblk->stat_IfHCInUcastPkts_hi || sblk->stat_IfHCInUcastPkts_lo) { 7202 if_printf(ifp, "0x%08X:%08X : IfHcInUcastPkts\n", 7203 sblk->stat_IfHCInUcastPkts_hi, 7204 sblk->stat_IfHCInUcastPkts_lo); 7205 } 7206 7207 if (sblk->stat_IfHCInBroadcastPkts_hi || 7208 sblk->stat_IfHCInBroadcastPkts_lo) { 7209 if_printf(ifp, "0x%08X:%08X : IfHcInBroadcastPkts\n", 7210 sblk->stat_IfHCInBroadcastPkts_hi, 7211 sblk->stat_IfHCInBroadcastPkts_lo); 7212 } 7213 7214 if (sblk->stat_IfHCInMulticastPkts_hi || 7215 sblk->stat_IfHCInMulticastPkts_lo) { 7216 if_printf(ifp, "0x%08X:%08X : IfHcInMulticastPkts\n", 7217 sblk->stat_IfHCInMulticastPkts_hi, 7218 sblk->stat_IfHCInMulticastPkts_lo); 7219 } 7220 7221 if (sblk->stat_IfHCOutUcastPkts_hi || sblk->stat_IfHCOutUcastPkts_lo) { 7222 if_printf(ifp, "0x%08X:%08X : IfHcOutUcastPkts\n", 7223 sblk->stat_IfHCOutUcastPkts_hi, 7224 sblk->stat_IfHCOutUcastPkts_lo); 7225 } 7226 7227 if (sblk->stat_IfHCOutBroadcastPkts_hi || 7228 sblk->stat_IfHCOutBroadcastPkts_lo) { 7229 if_printf(ifp, "0x%08X:%08X : IfHcOutBroadcastPkts\n", 7230 sblk->stat_IfHCOutBroadcastPkts_hi, 7231 sblk->stat_IfHCOutBroadcastPkts_lo); 7232 } 7233 7234 if (sblk->stat_IfHCOutMulticastPkts_hi || 7235 sblk->stat_IfHCOutMulticastPkts_lo) { 7236 if_printf(ifp, "0x%08X:%08X : IfHcOutMulticastPkts\n", 7237 sblk->stat_IfHCOutMulticastPkts_hi, 7238 sblk->stat_IfHCOutMulticastPkts_lo); 7239 } 7240 7241 if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors) { 7242 if_printf(ifp, " 0x%08X : " 7243 "emac_tx_stat_dot3statsinternalmactransmiterrors\n", 7244 sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors); 7245 } 7246 7247 if (sblk->stat_Dot3StatsCarrierSenseErrors) { 7248 if_printf(ifp, " 0x%08X : " 7249 "Dot3StatsCarrierSenseErrors\n", 7250 sblk->stat_Dot3StatsCarrierSenseErrors); 7251 } 7252 7253 if (sblk->stat_Dot3StatsFCSErrors) { 7254 if_printf(ifp, " 0x%08X : Dot3StatsFCSErrors\n", 7255 sblk->stat_Dot3StatsFCSErrors); 7256 } 7257 7258 if (sblk->stat_Dot3StatsAlignmentErrors) { 7259 if_printf(ifp, " 0x%08X : Dot3StatsAlignmentErrors\n", 7260 sblk->stat_Dot3StatsAlignmentErrors); 7261 } 7262 7263 if (sblk->stat_Dot3StatsSingleCollisionFrames) { 7264 if_printf(ifp, " 0x%08X : " 7265 "Dot3StatsSingleCollisionFrames\n", 7266 sblk->stat_Dot3StatsSingleCollisionFrames); 7267 } 7268 7269 if (sblk->stat_Dot3StatsMultipleCollisionFrames) { 7270 if_printf(ifp, " 0x%08X : " 7271 "Dot3StatsMultipleCollisionFrames\n", 7272 sblk->stat_Dot3StatsMultipleCollisionFrames); 7273 } 7274 7275 if (sblk->stat_Dot3StatsDeferredTransmissions) { 7276 if_printf(ifp, " 0x%08X : " 7277 "Dot3StatsDeferredTransmissions\n", 7278 sblk->stat_Dot3StatsDeferredTransmissions); 7279 } 7280 7281 if (sblk->stat_Dot3StatsExcessiveCollisions) { 7282 if_printf(ifp, " 0x%08X : " 7283 "Dot3StatsExcessiveCollisions\n", 7284 sblk->stat_Dot3StatsExcessiveCollisions); 7285 } 7286 7287 if (sblk->stat_Dot3StatsLateCollisions) { 7288 if_printf(ifp, " 0x%08X : Dot3StatsLateCollisions\n", 7289 sblk->stat_Dot3StatsLateCollisions); 7290 } 7291 7292 if (sblk->stat_EtherStatsCollisions) { 7293 if_printf(ifp, " 0x%08X : EtherStatsCollisions\n", 7294 sblk->stat_EtherStatsCollisions); 7295 } 7296 7297 if (sblk->stat_EtherStatsFragments) { 7298 if_printf(ifp, " 0x%08X : EtherStatsFragments\n", 7299 sblk->stat_EtherStatsFragments); 7300 } 7301 7302 if (sblk->stat_EtherStatsJabbers) { 7303 if_printf(ifp, " 0x%08X : EtherStatsJabbers\n", 7304 sblk->stat_EtherStatsJabbers); 7305 } 7306 7307 if (sblk->stat_EtherStatsUndersizePkts) { 7308 if_printf(ifp, " 0x%08X : EtherStatsUndersizePkts\n", 7309 sblk->stat_EtherStatsUndersizePkts); 7310 } 7311 7312 if (sblk->stat_EtherStatsOverrsizePkts) { 7313 if_printf(ifp, " 0x%08X : EtherStatsOverrsizePkts\n", 7314 sblk->stat_EtherStatsOverrsizePkts); 7315 } 7316 7317 if (sblk->stat_EtherStatsPktsRx64Octets) { 7318 if_printf(ifp, " 0x%08X : EtherStatsPktsRx64Octets\n", 7319 sblk->stat_EtherStatsPktsRx64Octets); 7320 } 7321 7322 if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets) { 7323 if_printf(ifp, " 0x%08X : " 7324 "EtherStatsPktsRx65Octetsto127Octets\n", 7325 sblk->stat_EtherStatsPktsRx65Octetsto127Octets); 7326 } 7327 7328 if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets) { 7329 if_printf(ifp, " 0x%08X : " 7330 "EtherStatsPktsRx128Octetsto255Octets\n", 7331 sblk->stat_EtherStatsPktsRx128Octetsto255Octets); 7332 } 7333 7334 if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets) { 7335 if_printf(ifp, " 0x%08X : " 7336 "EtherStatsPktsRx256Octetsto511Octets\n", 7337 sblk->stat_EtherStatsPktsRx256Octetsto511Octets); 7338 } 7339 7340 if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets) { 7341 if_printf(ifp, " 0x%08X : " 7342 "EtherStatsPktsRx512Octetsto1023Octets\n", 7343 sblk->stat_EtherStatsPktsRx512Octetsto1023Octets); 7344 } 7345 7346 if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets) { 7347 if_printf(ifp, " 0x%08X : " 7348 "EtherStatsPktsRx1024Octetsto1522Octets\n", 7349 sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets); 7350 } 7351 7352 if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets) { 7353 if_printf(ifp, " 0x%08X : " 7354 "EtherStatsPktsRx1523Octetsto9022Octets\n", 7355 sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets); 7356 } 7357 7358 if (sblk->stat_EtherStatsPktsTx64Octets) { 7359 if_printf(ifp, " 0x%08X : EtherStatsPktsTx64Octets\n", 7360 sblk->stat_EtherStatsPktsTx64Octets); 7361 } 7362 7363 if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets) { 7364 if_printf(ifp, " 0x%08X : " 7365 "EtherStatsPktsTx65Octetsto127Octets\n", 7366 sblk->stat_EtherStatsPktsTx65Octetsto127Octets); 7367 } 7368 7369 if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets) { 7370 if_printf(ifp, " 0x%08X : " 7371 "EtherStatsPktsTx128Octetsto255Octets\n", 7372 sblk->stat_EtherStatsPktsTx128Octetsto255Octets); 7373 } 7374 7375 if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets) { 7376 if_printf(ifp, " 0x%08X : " 7377 "EtherStatsPktsTx256Octetsto511Octets\n", 7378 sblk->stat_EtherStatsPktsTx256Octetsto511Octets); 7379 } 7380 7381 if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets) { 7382 if_printf(ifp, " 0x%08X : " 7383 "EtherStatsPktsTx512Octetsto1023Octets\n", 7384 sblk->stat_EtherStatsPktsTx512Octetsto1023Octets); 7385 } 7386 7387 if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets) { 7388 if_printf(ifp, " 0x%08X : " 7389 "EtherStatsPktsTx1024Octetsto1522Octets\n", 7390 sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets); 7391 } 7392 7393 if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets) { 7394 if_printf(ifp, " 0x%08X : " 7395 "EtherStatsPktsTx1523Octetsto9022Octets\n", 7396 sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets); 7397 } 7398 7399 if (sblk->stat_XonPauseFramesReceived) { 7400 if_printf(ifp, " 0x%08X : XonPauseFramesReceived\n", 7401 sblk->stat_XonPauseFramesReceived); 7402 } 7403 7404 if (sblk->stat_XoffPauseFramesReceived) { 7405 if_printf(ifp, " 0x%08X : XoffPauseFramesReceived\n", 7406 sblk->stat_XoffPauseFramesReceived); 7407 } 7408 7409 if (sblk->stat_OutXonSent) { 7410 if_printf(ifp, " 0x%08X : OutXoffSent\n", 7411 sblk->stat_OutXonSent); 7412 } 7413 7414 if (sblk->stat_OutXoffSent) { 7415 if_printf(ifp, " 0x%08X : OutXoffSent\n", 7416 sblk->stat_OutXoffSent); 7417 } 7418 7419 if (sblk->stat_FlowControlDone) { 7420 if_printf(ifp, " 0x%08X : FlowControlDone\n", 7421 sblk->stat_FlowControlDone); 7422 } 7423 7424 if (sblk->stat_MacControlFramesReceived) { 7425 if_printf(ifp, " 0x%08X : MacControlFramesReceived\n", 7426 sblk->stat_MacControlFramesReceived); 7427 } 7428 7429 if (sblk->stat_XoffStateEntered) { 7430 if_printf(ifp, " 0x%08X : XoffStateEntered\n", 7431 sblk->stat_XoffStateEntered); 7432 } 7433 7434 if (sblk->stat_IfInFramesL2FilterDiscards) { 7435 if_printf(ifp, " 0x%08X : IfInFramesL2FilterDiscards\n", sblk->stat_IfInFramesL2FilterDiscards); 7436 } 7437 7438 if (sblk->stat_IfInRuleCheckerDiscards) { 7439 if_printf(ifp, " 0x%08X : IfInRuleCheckerDiscards\n", 7440 sblk->stat_IfInRuleCheckerDiscards); 7441 } 7442 7443 if (sblk->stat_IfInFTQDiscards) { 7444 if_printf(ifp, " 0x%08X : IfInFTQDiscards\n", 7445 sblk->stat_IfInFTQDiscards); 7446 } 7447 7448 if (sblk->stat_IfInMBUFDiscards) { 7449 if_printf(ifp, " 0x%08X : IfInMBUFDiscards\n", 7450 sblk->stat_IfInMBUFDiscards); 7451 } 7452 7453 if (sblk->stat_IfInRuleCheckerP4Hit) { 7454 if_printf(ifp, " 0x%08X : IfInRuleCheckerP4Hit\n", 7455 sblk->stat_IfInRuleCheckerP4Hit); 7456 } 7457 7458 if (sblk->stat_CatchupInRuleCheckerDiscards) { 7459 if_printf(ifp, " 0x%08X : " 7460 "CatchupInRuleCheckerDiscards\n", 7461 sblk->stat_CatchupInRuleCheckerDiscards); 7462 } 7463 7464 if (sblk->stat_CatchupInFTQDiscards) { 7465 if_printf(ifp, " 0x%08X : CatchupInFTQDiscards\n", 7466 sblk->stat_CatchupInFTQDiscards); 7467 } 7468 7469 if (sblk->stat_CatchupInMBUFDiscards) { 7470 if_printf(ifp, " 0x%08X : CatchupInMBUFDiscards\n", 7471 sblk->stat_CatchupInMBUFDiscards); 7472 } 7473 7474 if (sblk->stat_CatchupInRuleCheckerP4Hit) { 7475 if_printf(ifp, " 0x%08X : CatchupInRuleCheckerP4Hit\n", 7476 sblk->stat_CatchupInRuleCheckerP4Hit); 7477 } 7478 7479 if_printf(ifp, 7480 "----------------------------" 7481 "----------------" 7482 "----------------------------\n"); 7483 } 7484 7485 7486 /****************************************************************************/ 7487 /* Prints out a summary of the driver state. */ 7488 /* */ 7489 /* Returns: */ 7490 /* Nothing. */ 7491 /****************************************************************************/ 7492 static void 7493 bce_dump_driver_state(struct bce_softc *sc) 7494 { 7495 struct ifnet *ifp = &sc->arpcom.ac_if; 7496 uint32_t val_hi, val_lo; 7497 7498 if_printf(ifp, 7499 "-----------------------------" 7500 " Driver State " 7501 "-----------------------------\n"); 7502 7503 val_hi = BCE_ADDR_HI(sc); 7504 val_lo = BCE_ADDR_LO(sc); 7505 if_printf(ifp, "0x%08X:%08X - (sc) driver softc structure " 7506 "virtual address\n", val_hi, val_lo); 7507 7508 val_hi = BCE_ADDR_HI(sc->status_block); 7509 val_lo = BCE_ADDR_LO(sc->status_block); 7510 if_printf(ifp, "0x%08X:%08X - (sc->status_block) status block " 7511 "virtual address\n", val_hi, val_lo); 7512 7513 val_hi = BCE_ADDR_HI(sc->stats_block); 7514 val_lo = BCE_ADDR_LO(sc->stats_block); 7515 if_printf(ifp, "0x%08X:%08X - (sc->stats_block) statistics block " 7516 "virtual address\n", val_hi, val_lo); 7517 7518 val_hi = BCE_ADDR_HI(sc->tx_bd_chain); 7519 val_lo = BCE_ADDR_LO(sc->tx_bd_chain); 7520 if_printf(ifp, "0x%08X:%08X - (sc->tx_bd_chain) tx_bd chain " 7521 "virtual adddress\n", val_hi, val_lo); 7522 7523 val_hi = BCE_ADDR_HI(sc->rx_bd_chain); 7524 val_lo = BCE_ADDR_LO(sc->rx_bd_chain); 7525 if_printf(ifp, "0x%08X:%08X - (sc->rx_bd_chain) rx_bd chain " 7526 "virtual address\n", val_hi, val_lo); 7527 7528 val_hi = BCE_ADDR_HI(sc->tx_mbuf_ptr); 7529 val_lo = BCE_ADDR_LO(sc->tx_mbuf_ptr); 7530 if_printf(ifp, "0x%08X:%08X - (sc->tx_mbuf_ptr) tx mbuf chain " 7531 "virtual address\n", val_hi, val_lo); 7532 7533 val_hi = BCE_ADDR_HI(sc->rx_mbuf_ptr); 7534 val_lo = BCE_ADDR_LO(sc->rx_mbuf_ptr); 7535 if_printf(ifp, "0x%08X:%08X - (sc->rx_mbuf_ptr) rx mbuf chain " 7536 "virtual address\n", val_hi, val_lo); 7537 7538 if_printf(ifp, " 0x%08X - (sc->interrupts_generated) " 7539 "h/w intrs\n", sc->interrupts_generated); 7540 7541 if_printf(ifp, " 0x%08X - (sc->rx_interrupts) " 7542 "rx interrupts handled\n", sc->rx_interrupts); 7543 7544 if_printf(ifp, " 0x%08X - (sc->tx_interrupts) " 7545 "tx interrupts handled\n", sc->tx_interrupts); 7546 7547 if_printf(ifp, " 0x%08X - (sc->last_status_idx) " 7548 "status block index\n", sc->last_status_idx); 7549 7550 if_printf(ifp, " 0x%04X(0x%04X) - (sc->tx_prod) " 7551 "tx producer index\n", 7552 sc->tx_prod, (uint16_t)TX_CHAIN_IDX(sc->tx_prod)); 7553 7554 if_printf(ifp, " 0x%04X(0x%04X) - (sc->tx_cons) " 7555 "tx consumer index\n", 7556 sc->tx_cons, (uint16_t)TX_CHAIN_IDX(sc->tx_cons)); 7557 7558 if_printf(ifp, " 0x%08X - (sc->tx_prod_bseq) " 7559 "tx producer bseq index\n", sc->tx_prod_bseq); 7560 7561 if_printf(ifp, " 0x%04X(0x%04X) - (sc->rx_prod) " 7562 "rx producer index\n", 7563 sc->rx_prod, (uint16_t)RX_CHAIN_IDX(sc->rx_prod)); 7564 7565 if_printf(ifp, " 0x%04X(0x%04X) - (sc->rx_cons) " 7566 "rx consumer index\n", 7567 sc->rx_cons, (uint16_t)RX_CHAIN_IDX(sc->rx_cons)); 7568 7569 if_printf(ifp, " 0x%08X - (sc->rx_prod_bseq) " 7570 "rx producer bseq index\n", sc->rx_prod_bseq); 7571 7572 if_printf(ifp, " 0x%08X - (sc->rx_mbuf_alloc) " 7573 "rx mbufs allocated\n", sc->rx_mbuf_alloc); 7574 7575 if_printf(ifp, " 0x%08X - (sc->free_rx_bd) " 7576 "free rx_bd's\n", sc->free_rx_bd); 7577 7578 if_printf(ifp, "0x%08X/%08X - (sc->rx_low_watermark) rx " 7579 "low watermark\n", sc->rx_low_watermark, sc->max_rx_bd); 7580 7581 if_printf(ifp, " 0x%08X - (sc->txmbuf_alloc) " 7582 "tx mbufs allocated\n", sc->tx_mbuf_alloc); 7583 7584 if_printf(ifp, " 0x%08X - (sc->rx_mbuf_alloc) " 7585 "rx mbufs allocated\n", sc->rx_mbuf_alloc); 7586 7587 if_printf(ifp, " 0x%08X - (sc->used_tx_bd) used tx_bd's\n", 7588 sc->used_tx_bd); 7589 7590 if_printf(ifp, "0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n", 7591 sc->tx_hi_watermark, sc->max_tx_bd); 7592 7593 if_printf(ifp, " 0x%08X - (sc->mbuf_alloc_failed) " 7594 "failed mbuf alloc\n", sc->mbuf_alloc_failed); 7595 7596 if_printf(ifp, 7597 "----------------------------" 7598 "----------------" 7599 "----------------------------\n"); 7600 } 7601 7602 7603 /****************************************************************************/ 7604 /* Prints out the hardware state through a summary of important registers, */ 7605 /* followed by a complete register dump. */ 7606 /* */ 7607 /* Returns: */ 7608 /* Nothing. */ 7609 /****************************************************************************/ 7610 static void 7611 bce_dump_hw_state(struct bce_softc *sc) 7612 { 7613 struct ifnet *ifp = &sc->arpcom.ac_if; 7614 uint32_t val1; 7615 int i; 7616 7617 if_printf(ifp, 7618 "----------------------------" 7619 " Hardware State " 7620 "----------------------------\n"); 7621 7622 if_printf(ifp, "%s - bootcode version\n", sc->bce_bc_ver); 7623 7624 val1 = REG_RD(sc, BCE_MISC_ENABLE_STATUS_BITS); 7625 if_printf(ifp, "0x%08X - (0x%06X) misc_enable_status_bits\n", 7626 val1, BCE_MISC_ENABLE_STATUS_BITS); 7627 7628 val1 = REG_RD(sc, BCE_DMA_STATUS); 7629 if_printf(ifp, "0x%08X - (0x%04X) dma_status\n", val1, BCE_DMA_STATUS); 7630 7631 val1 = REG_RD(sc, BCE_CTX_STATUS); 7632 if_printf(ifp, "0x%08X - (0x%04X) ctx_status\n", val1, BCE_CTX_STATUS); 7633 7634 val1 = REG_RD(sc, BCE_EMAC_STATUS); 7635 if_printf(ifp, "0x%08X - (0x%04X) emac_status\n", 7636 val1, BCE_EMAC_STATUS); 7637 7638 val1 = REG_RD(sc, BCE_RPM_STATUS); 7639 if_printf(ifp, "0x%08X - (0x%04X) rpm_status\n", val1, BCE_RPM_STATUS); 7640 7641 val1 = REG_RD(sc, BCE_TBDR_STATUS); 7642 if_printf(ifp, "0x%08X - (0x%04X) tbdr_status\n", 7643 val1, BCE_TBDR_STATUS); 7644 7645 val1 = REG_RD(sc, BCE_TDMA_STATUS); 7646 if_printf(ifp, "0x%08X - (0x%04X) tdma_status\n", 7647 val1, BCE_TDMA_STATUS); 7648 7649 val1 = REG_RD(sc, BCE_HC_STATUS); 7650 if_printf(ifp, "0x%08X - (0x%06X) hc_status\n", val1, BCE_HC_STATUS); 7651 7652 val1 = REG_RD_IND(sc, BCE_TXP_CPU_STATE); 7653 if_printf(ifp, "0x%08X - (0x%06X) txp_cpu_state\n", 7654 val1, BCE_TXP_CPU_STATE); 7655 7656 val1 = REG_RD_IND(sc, BCE_TPAT_CPU_STATE); 7657 if_printf(ifp, "0x%08X - (0x%06X) tpat_cpu_state\n", 7658 val1, BCE_TPAT_CPU_STATE); 7659 7660 val1 = REG_RD_IND(sc, BCE_RXP_CPU_STATE); 7661 if_printf(ifp, "0x%08X - (0x%06X) rxp_cpu_state\n", 7662 val1, BCE_RXP_CPU_STATE); 7663 7664 val1 = REG_RD_IND(sc, BCE_COM_CPU_STATE); 7665 if_printf(ifp, "0x%08X - (0x%06X) com_cpu_state\n", 7666 val1, BCE_COM_CPU_STATE); 7667 7668 val1 = REG_RD_IND(sc, BCE_MCP_CPU_STATE); 7669 if_printf(ifp, "0x%08X - (0x%06X) mcp_cpu_state\n", 7670 val1, BCE_MCP_CPU_STATE); 7671 7672 val1 = REG_RD_IND(sc, BCE_CP_CPU_STATE); 7673 if_printf(ifp, "0x%08X - (0x%06X) cp_cpu_state\n", 7674 val1, BCE_CP_CPU_STATE); 7675 7676 if_printf(ifp, 7677 "----------------------------" 7678 "----------------" 7679 "----------------------------\n"); 7680 7681 if_printf(ifp, 7682 "----------------------------" 7683 " Register Dump " 7684 "----------------------------\n"); 7685 7686 for (i = 0x400; i < 0x8000; i += 0x10) { 7687 if_printf(ifp, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", i, 7688 REG_RD(sc, i), 7689 REG_RD(sc, i + 0x4), 7690 REG_RD(sc, i + 0x8), 7691 REG_RD(sc, i + 0xc)); 7692 } 7693 7694 if_printf(ifp, 7695 "----------------------------" 7696 "----------------" 7697 "----------------------------\n"); 7698 } 7699 7700 7701 /****************************************************************************/ 7702 /* Prints out the TXP state. */ 7703 /* */ 7704 /* Returns: */ 7705 /* Nothing. */ 7706 /****************************************************************************/ 7707 static void 7708 bce_dump_txp_state(struct bce_softc *sc) 7709 { 7710 struct ifnet *ifp = &sc->arpcom.ac_if; 7711 uint32_t val1; 7712 int i; 7713 7714 if_printf(ifp, 7715 "----------------------------" 7716 " TXP State " 7717 "----------------------------\n"); 7718 7719 val1 = REG_RD_IND(sc, BCE_TXP_CPU_MODE); 7720 if_printf(ifp, "0x%08X - (0x%06X) txp_cpu_mode\n", 7721 val1, BCE_TXP_CPU_MODE); 7722 7723 val1 = REG_RD_IND(sc, BCE_TXP_CPU_STATE); 7724 if_printf(ifp, "0x%08X - (0x%06X) txp_cpu_state\n", 7725 val1, BCE_TXP_CPU_STATE); 7726 7727 val1 = REG_RD_IND(sc, BCE_TXP_CPU_EVENT_MASK); 7728 if_printf(ifp, "0x%08X - (0x%06X) txp_cpu_event_mask\n", 7729 val1, BCE_TXP_CPU_EVENT_MASK); 7730 7731 if_printf(ifp, 7732 "----------------------------" 7733 " Register Dump " 7734 "----------------------------\n"); 7735 7736 for (i = BCE_TXP_CPU_MODE; i < 0x68000; i += 0x10) { 7737 /* Skip the big blank spaces */ 7738 if (i < 0x454000 && i > 0x5ffff) { 7739 if_printf(ifp, "0x%04X: " 7740 "0x%08X 0x%08X 0x%08X 0x%08X\n", i, 7741 REG_RD_IND(sc, i), 7742 REG_RD_IND(sc, i + 0x4), 7743 REG_RD_IND(sc, i + 0x8), 7744 REG_RD_IND(sc, i + 0xc)); 7745 } 7746 } 7747 7748 if_printf(ifp, 7749 "----------------------------" 7750 "----------------" 7751 "----------------------------\n"); 7752 } 7753 7754 7755 /****************************************************************************/ 7756 /* Prints out the RXP state. */ 7757 /* */ 7758 /* Returns: */ 7759 /* Nothing. */ 7760 /****************************************************************************/ 7761 static void 7762 bce_dump_rxp_state(struct bce_softc *sc) 7763 { 7764 struct ifnet *ifp = &sc->arpcom.ac_if; 7765 uint32_t val1; 7766 int i; 7767 7768 if_printf(ifp, 7769 "----------------------------" 7770 " RXP State " 7771 "----------------------------\n"); 7772 7773 val1 = REG_RD_IND(sc, BCE_RXP_CPU_MODE); 7774 if_printf(ifp, "0x%08X - (0x%06X) rxp_cpu_mode\n", 7775 val1, BCE_RXP_CPU_MODE); 7776 7777 val1 = REG_RD_IND(sc, BCE_RXP_CPU_STATE); 7778 if_printf(ifp, "0x%08X - (0x%06X) rxp_cpu_state\n", 7779 val1, BCE_RXP_CPU_STATE); 7780 7781 val1 = REG_RD_IND(sc, BCE_RXP_CPU_EVENT_MASK); 7782 if_printf(ifp, "0x%08X - (0x%06X) rxp_cpu_event_mask\n", 7783 val1, BCE_RXP_CPU_EVENT_MASK); 7784 7785 if_printf(ifp, 7786 "----------------------------" 7787 " Register Dump " 7788 "----------------------------\n"); 7789 7790 for (i = BCE_RXP_CPU_MODE; i < 0xe8fff; i += 0x10) { 7791 /* Skip the big blank sapces */ 7792 if (i < 0xc5400 && i > 0xdffff) { 7793 if_printf(ifp, "0x%04X: " 7794 "0x%08X 0x%08X 0x%08X 0x%08X\n", i, 7795 REG_RD_IND(sc, i), 7796 REG_RD_IND(sc, i + 0x4), 7797 REG_RD_IND(sc, i + 0x8), 7798 REG_RD_IND(sc, i + 0xc)); 7799 } 7800 } 7801 7802 if_printf(ifp, 7803 "----------------------------" 7804 "----------------" 7805 "----------------------------\n"); 7806 } 7807 7808 7809 /****************************************************************************/ 7810 /* Prints out the TPAT state. */ 7811 /* */ 7812 /* Returns: */ 7813 /* Nothing. */ 7814 /****************************************************************************/ 7815 static void 7816 bce_dump_tpat_state(struct bce_softc *sc) 7817 { 7818 struct ifnet *ifp = &sc->arpcom.ac_if; 7819 uint32_t val1; 7820 int i; 7821 7822 if_printf(ifp, 7823 "----------------------------" 7824 " TPAT State " 7825 "----------------------------\n"); 7826 7827 val1 = REG_RD_IND(sc, BCE_TPAT_CPU_MODE); 7828 if_printf(ifp, "0x%08X - (0x%06X) tpat_cpu_mode\n", 7829 val1, BCE_TPAT_CPU_MODE); 7830 7831 val1 = REG_RD_IND(sc, BCE_TPAT_CPU_STATE); 7832 if_printf(ifp, "0x%08X - (0x%06X) tpat_cpu_state\n", 7833 val1, BCE_TPAT_CPU_STATE); 7834 7835 val1 = REG_RD_IND(sc, BCE_TPAT_CPU_EVENT_MASK); 7836 if_printf(ifp, "0x%08X - (0x%06X) tpat_cpu_event_mask\n", 7837 val1, BCE_TPAT_CPU_EVENT_MASK); 7838 7839 if_printf(ifp, 7840 "----------------------------" 7841 " Register Dump " 7842 "----------------------------\n"); 7843 7844 for (i = BCE_TPAT_CPU_MODE; i < 0xa3fff; i += 0x10) { 7845 /* Skip the big blank spaces */ 7846 if (i < 0x854000 && i > 0x9ffff) { 7847 if_printf(ifp, "0x%04X: " 7848 "0x%08X 0x%08X 0x%08X 0x%08X\n", i, 7849 REG_RD_IND(sc, i), 7850 REG_RD_IND(sc, i + 0x4), 7851 REG_RD_IND(sc, i + 0x8), 7852 REG_RD_IND(sc, i + 0xc)); 7853 } 7854 } 7855 7856 if_printf(ifp, 7857 "----------------------------" 7858 "----------------" 7859 "----------------------------\n"); 7860 } 7861 7862 7863 /****************************************************************************/ 7864 /* Prints out the driver state and then enters the debugger. */ 7865 /* */ 7866 /* Returns: */ 7867 /* Nothing. */ 7868 /****************************************************************************/ 7869 static void 7870 bce_breakpoint(struct bce_softc *sc) 7871 { 7872 #if 0 7873 bce_freeze_controller(sc); 7874 #endif 7875 7876 bce_dump_driver_state(sc); 7877 bce_dump_status_block(sc); 7878 bce_dump_tx_chain(sc, 0, TOTAL_TX_BD); 7879 bce_dump_hw_state(sc); 7880 bce_dump_txp_state(sc); 7881 7882 #if 0 7883 bce_unfreeze_controller(sc); 7884 #endif 7885 7886 /* Call the debugger. */ 7887 breakpoint(); 7888 } 7889 7890 #endif /* BCE_DEBUG */ 7891 7892 static int 7893 bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS) 7894 { 7895 struct bce_softc *sc = arg1; 7896 7897 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 7898 &sc->bce_tx_quick_cons_trip_int, 7899 BCE_COALMASK_TX_BDS_INT); 7900 } 7901 7902 static int 7903 bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS) 7904 { 7905 struct bce_softc *sc = arg1; 7906 7907 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 7908 &sc->bce_tx_quick_cons_trip, 7909 BCE_COALMASK_TX_BDS); 7910 } 7911 7912 static int 7913 bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS) 7914 { 7915 struct bce_softc *sc = arg1; 7916 7917 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 7918 &sc->bce_tx_ticks_int, 7919 BCE_COALMASK_TX_TICKS_INT); 7920 } 7921 7922 static int 7923 bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS) 7924 { 7925 struct bce_softc *sc = arg1; 7926 7927 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 7928 &sc->bce_tx_ticks, 7929 BCE_COALMASK_TX_TICKS); 7930 } 7931 7932 static int 7933 bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS) 7934 { 7935 struct bce_softc *sc = arg1; 7936 7937 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 7938 &sc->bce_rx_quick_cons_trip_int, 7939 BCE_COALMASK_RX_BDS_INT); 7940 } 7941 7942 static int 7943 bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS) 7944 { 7945 struct bce_softc *sc = arg1; 7946 7947 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 7948 &sc->bce_rx_quick_cons_trip, 7949 BCE_COALMASK_RX_BDS); 7950 } 7951 7952 static int 7953 bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS) 7954 { 7955 struct bce_softc *sc = arg1; 7956 7957 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 7958 &sc->bce_rx_ticks_int, 7959 BCE_COALMASK_RX_TICKS_INT); 7960 } 7961 7962 static int 7963 bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS) 7964 { 7965 struct bce_softc *sc = arg1; 7966 7967 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 7968 &sc->bce_rx_ticks, 7969 BCE_COALMASK_RX_TICKS); 7970 } 7971 7972 static int 7973 bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS, uint32_t *coal, 7974 uint32_t coalchg_mask) 7975 { 7976 struct bce_softc *sc = arg1; 7977 struct ifnet *ifp = &sc->arpcom.ac_if; 7978 int error = 0, v; 7979 7980 lwkt_serialize_enter(ifp->if_serializer); 7981 7982 v = *coal; 7983 error = sysctl_handle_int(oidp, &v, 0, req); 7984 if (!error && req->newptr != NULL) { 7985 if (v < 0) { 7986 error = EINVAL; 7987 } else { 7988 *coal = v; 7989 sc->bce_coalchg_mask |= coalchg_mask; 7990 } 7991 } 7992 7993 lwkt_serialize_exit(ifp->if_serializer); 7994 return error; 7995 } 7996 7997 static void 7998 bce_coal_change(struct bce_softc *sc) 7999 { 8000 struct ifnet *ifp = &sc->arpcom.ac_if; 8001 8002 ASSERT_SERIALIZED(ifp->if_serializer); 8003 8004 if ((ifp->if_flags & IFF_RUNNING) == 0) { 8005 sc->bce_coalchg_mask = 0; 8006 return; 8007 } 8008 8009 if (sc->bce_coalchg_mask & 8010 (BCE_COALMASK_TX_BDS | BCE_COALMASK_TX_BDS_INT)) { 8011 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP, 8012 (sc->bce_tx_quick_cons_trip_int << 16) | 8013 sc->bce_tx_quick_cons_trip); 8014 if (bootverbose) { 8015 if_printf(ifp, "tx_bds %u, tx_bds_int %u\n", 8016 sc->bce_tx_quick_cons_trip, 8017 sc->bce_tx_quick_cons_trip_int); 8018 } 8019 } 8020 8021 if (sc->bce_coalchg_mask & 8022 (BCE_COALMASK_TX_TICKS | BCE_COALMASK_TX_TICKS_INT)) { 8023 REG_WR(sc, BCE_HC_TX_TICKS, 8024 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks); 8025 if (bootverbose) { 8026 if_printf(ifp, "tx_ticks %u, tx_ticks_int %u\n", 8027 sc->bce_tx_ticks, sc->bce_tx_ticks_int); 8028 } 8029 } 8030 8031 if (sc->bce_coalchg_mask & 8032 (BCE_COALMASK_RX_BDS | BCE_COALMASK_RX_BDS_INT)) { 8033 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP, 8034 (sc->bce_rx_quick_cons_trip_int << 16) | 8035 sc->bce_rx_quick_cons_trip); 8036 if (bootverbose) { 8037 if_printf(ifp, "rx_bds %u, rx_bds_int %u\n", 8038 sc->bce_rx_quick_cons_trip, 8039 sc->bce_rx_quick_cons_trip_int); 8040 } 8041 } 8042 8043 if (sc->bce_coalchg_mask & 8044 (BCE_COALMASK_RX_TICKS | BCE_COALMASK_RX_TICKS_INT)) { 8045 REG_WR(sc, BCE_HC_RX_TICKS, 8046 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks); 8047 if (bootverbose) { 8048 if_printf(ifp, "rx_ticks %u, rx_ticks_int %u\n", 8049 sc->bce_rx_ticks, sc->bce_rx_ticks_int); 8050 } 8051 } 8052 8053 sc->bce_coalchg_mask = 0; 8054 } 8055