1 /*- 2 * Copyright (c) 2006-2007 Broadcom Corporation 3 * David Christensen <davidch@broadcom.com>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Neither the name of Broadcom Corporation nor the name of its contributors 15 * may be used to endorse or promote products derived from this software 16 * without specific prior written consent. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' 19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS 22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 * 30 * $FreeBSD: src/sys/dev/bce/if_bce.c,v 1.31 2007/05/16 23:34:11 davidch Exp $ 31 */ 32 33 /* 34 * The following controllers are supported by this driver: 35 * BCM5706C A2, A3 36 * BCM5706S A2, A3 37 * BCM5708C B1, B2 38 * BCM5708S B1, B2 39 * BCM5709C A1, C0 40 * BCM5716 C0 41 * 42 * The following controllers are not supported by this driver: 43 * BCM5706C A0, A1 44 * BCM5706S A0, A1 45 * BCM5708C A0, B0 46 * BCM5708S A0, B0 47 * BCM5709C A0, B0, B1 48 * BCM5709S A0, A1, B0, B1, B2, C0 49 */ 50 51 #include "opt_bce.h" 52 #include "opt_polling.h" 53 54 #include <sys/param.h> 55 #include <sys/bus.h> 56 #include <sys/endian.h> 57 #include <sys/kernel.h> 58 #include <sys/interrupt.h> 59 #include <sys/mbuf.h> 60 #include <sys/malloc.h> 61 #include <sys/queue.h> 62 #ifdef BCE_DEBUG 63 #include <sys/random.h> 64 #endif 65 #include <sys/rman.h> 66 #include <sys/serialize.h> 67 #include <sys/socket.h> 68 #include <sys/sockio.h> 69 #include <sys/sysctl.h> 70 71 #include <netinet/ip.h> 72 #include <netinet/tcp.h> 73 74 #include <net/bpf.h> 75 #include <net/ethernet.h> 76 #include <net/if.h> 77 #include <net/if_arp.h> 78 #include <net/if_dl.h> 79 #include <net/if_media.h> 80 #include <net/if_types.h> 81 #include <net/ifq_var.h> 82 #include <net/vlan/if_vlan_var.h> 83 #include <net/vlan/if_vlan_ether.h> 84 85 #include <dev/netif/mii_layer/mii.h> 86 #include <dev/netif/mii_layer/miivar.h> 87 #include <dev/netif/mii_layer/brgphyreg.h> 88 89 #include <bus/pci/pcireg.h> 90 #include <bus/pci/pcivar.h> 91 92 #include "miibus_if.h" 93 94 #include <dev/netif/bce/if_bcereg.h> 95 #include <dev/netif/bce/if_bcefw.h> 96 97 #define BCE_MSI_CKINTVL ((10 * hz) / 1000) /* 10ms */ 98 99 /****************************************************************************/ 100 /* BCE Debug Options */ 101 /****************************************************************************/ 102 #ifdef BCE_DEBUG 103 104 static uint32_t bce_debug = BCE_WARN; 105 106 /* 107 * 0 = Never 108 * 1 = 1 in 2,147,483,648 109 * 256 = 1 in 8,388,608 110 * 2048 = 1 in 1,048,576 111 * 65536 = 1 in 32,768 112 * 1048576 = 1 in 2,048 113 * 268435456 = 1 in 8 114 * 536870912 = 1 in 4 115 * 1073741824 = 1 in 2 116 * 117 * bce_debug_mbuf_allocation_failure: 118 * How often to simulate an mbuf allocation failure. 119 * 120 * bce_debug_dma_map_addr_failure: 121 * How often to simulate a DMA mapping failure. 122 * 123 * bce_debug_bootcode_running_failure: 124 * How often to simulate a bootcode failure. 125 */ 126 static int bce_debug_mbuf_allocation_failure = 0; 127 static int bce_debug_dma_map_addr_failure = 0; 128 static int bce_debug_bootcode_running_failure = 0; 129 130 #endif /* BCE_DEBUG */ 131 132 133 /****************************************************************************/ 134 /* PCI Device ID Table */ 135 /* */ 136 /* Used by bce_probe() to identify the devices supported by this driver. */ 137 /****************************************************************************/ 138 #define BCE_DEVDESC_MAX 64 139 140 static struct bce_type bce_devs[] = { 141 /* BCM5706C Controllers and OEM boards. */ 142 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3101, 143 "HP NC370T Multifunction Gigabit Server Adapter" }, 144 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3106, 145 "HP NC370i Multifunction Gigabit Server Adapter" }, 146 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3070, 147 "HP NC380T PCIe DP Multifunc Gig Server Adapter" }, 148 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x1709, 149 "HP NC371i Multifunction Gigabit Server Adapter" }, 150 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, PCI_ANY_ID, PCI_ANY_ID, 151 "Broadcom NetXtreme II BCM5706 1000Base-T" }, 152 153 /* BCM5706S controllers and OEM boards. */ 154 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102, 155 "HP NC370F Multifunction Gigabit Server Adapter" }, 156 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID, PCI_ANY_ID, 157 "Broadcom NetXtreme II BCM5706 1000Base-SX" }, 158 159 /* BCM5708C controllers and OEM boards. */ 160 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7037, 161 "HP NC373T PCIe Multifunction Gig Server Adapter" }, 162 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7038, 163 "HP NC373i Multifunction Gigabit Server Adapter" }, 164 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7045, 165 "HP NC374m PCIe Multifunction Adapter" }, 166 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, PCI_ANY_ID, PCI_ANY_ID, 167 "Broadcom NetXtreme II BCM5708 1000Base-T" }, 168 169 /* BCM5708S controllers and OEM boards. */ 170 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x1706, 171 "HP NC373m Multifunction Gigabit Server Adapter" }, 172 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703b, 173 "HP NC373i Multifunction Gigabit Server Adapter" }, 174 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703d, 175 "HP NC373F PCIe Multifunc Giga Server Adapter" }, 176 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, PCI_ANY_ID, PCI_ANY_ID, 177 "Broadcom NetXtreme II BCM5708S 1000Base-T" }, 178 179 /* BCM5709C controllers and OEM boards. */ 180 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7055, 181 "HP NC382i DP Multifunction Gigabit Server Adapter" }, 182 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7059, 183 "HP NC382T PCIe DP Multifunction Gigabit Server Adapter" }, 184 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, PCI_ANY_ID, PCI_ANY_ID, 185 "Broadcom NetXtreme II BCM5709 1000Base-T" }, 186 187 /* BCM5709S controllers and OEM boards. */ 188 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x171d, 189 "HP NC382m DP 1GbE Multifunction BL-c Adapter" }, 190 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x7056, 191 "HP NC382i DP Multifunction Gigabit Server Adapter" }, 192 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, PCI_ANY_ID, PCI_ANY_ID, 193 "Broadcom NetXtreme II BCM5709 1000Base-SX" }, 194 195 /* BCM5716 controllers and OEM boards. */ 196 { BRCM_VENDORID, BRCM_DEVICEID_BCM5716, PCI_ANY_ID, PCI_ANY_ID, 197 "Broadcom NetXtreme II BCM5716 1000Base-T" }, 198 199 { 0, 0, 0, 0, NULL } 200 }; 201 202 203 /****************************************************************************/ 204 /* Supported Flash NVRAM device data. */ 205 /****************************************************************************/ 206 static const struct flash_spec flash_table[] = 207 { 208 #define BUFFERED_FLAGS (BCE_NV_BUFFERED | BCE_NV_TRANSLATE) 209 #define NONBUFFERED_FLAGS (BCE_NV_WREN) 210 211 /* Slow EEPROM */ 212 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400, 213 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 214 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 215 "EEPROM - slow"}, 216 /* Expansion entry 0001 */ 217 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406, 218 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 219 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 220 "Entry 0001"}, 221 /* Saifun SA25F010 (non-buffered flash) */ 222 /* strap, cfg1, & write1 need updates */ 223 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406, 224 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 225 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2, 226 "Non-buffered flash (128kB)"}, 227 /* Saifun SA25F020 (non-buffered flash) */ 228 /* strap, cfg1, & write1 need updates */ 229 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406, 230 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 231 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4, 232 "Non-buffered flash (256kB)"}, 233 /* Expansion entry 0100 */ 234 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406, 235 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 236 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 237 "Entry 0100"}, 238 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */ 239 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406, 240 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 241 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2, 242 "Entry 0101: ST M45PE10 (128kB non-bufferred)"}, 243 /* Entry 0110: ST M45PE20 (non-buffered flash)*/ 244 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406, 245 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 246 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4, 247 "Entry 0110: ST M45PE20 (256kB non-bufferred)"}, 248 /* Saifun SA25F005 (non-buffered flash) */ 249 /* strap, cfg1, & write1 need updates */ 250 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406, 251 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 252 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE, 253 "Non-buffered flash (64kB)"}, 254 /* Fast EEPROM */ 255 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400, 256 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 257 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 258 "EEPROM - fast"}, 259 /* Expansion entry 1001 */ 260 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406, 261 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 262 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 263 "Entry 1001"}, 264 /* Expansion entry 1010 */ 265 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406, 266 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 267 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 268 "Entry 1010"}, 269 /* ATMEL AT45DB011B (buffered flash) */ 270 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400, 271 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 272 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE, 273 "Buffered flash (128kB)"}, 274 /* Expansion entry 1100 */ 275 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406, 276 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 277 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 278 "Entry 1100"}, 279 /* Expansion entry 1101 */ 280 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406, 281 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 282 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 283 "Entry 1101"}, 284 /* Ateml Expansion entry 1110 */ 285 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400, 286 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 287 BUFFERED_FLASH_BYTE_ADDR_MASK, 0, 288 "Entry 1110 (Atmel)"}, 289 /* ATMEL AT45DB021B (buffered flash) */ 290 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400, 291 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 292 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2, 293 "Buffered flash (256kB)"}, 294 }; 295 296 /* 297 * The BCM5709 controllers transparently handle the 298 * differences between Atmel 264 byte pages and all 299 * flash devices which use 256 byte pages, so no 300 * logical-to-physical mapping is required in the 301 * driver. 302 */ 303 static struct flash_spec flash_5709 = { 304 .flags = BCE_NV_BUFFERED, 305 .page_bits = BCM5709_FLASH_PAGE_BITS, 306 .page_size = BCM5709_FLASH_PAGE_SIZE, 307 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK, 308 .total_size = BUFFERED_FLASH_TOTAL_SIZE * 2, 309 .name = "5709/5716 buffered flash (256kB)", 310 }; 311 312 313 /****************************************************************************/ 314 /* DragonFly device entry points. */ 315 /****************************************************************************/ 316 static int bce_probe(device_t); 317 static int bce_attach(device_t); 318 static int bce_detach(device_t); 319 static void bce_shutdown(device_t); 320 321 /****************************************************************************/ 322 /* BCE Debug Data Structure Dump Routines */ 323 /****************************************************************************/ 324 #ifdef BCE_DEBUG 325 static void bce_dump_mbuf(struct bce_softc *, struct mbuf *); 326 static void bce_dump_rx_mbuf_chain(struct bce_softc *, int, int); 327 static void bce_dump_txbd(struct bce_softc *, int, struct tx_bd *); 328 static void bce_dump_rxbd(struct bce_softc *, int, struct rx_bd *); 329 static void bce_dump_l2fhdr(struct bce_softc *, int, 330 struct l2_fhdr *) __unused; 331 static void bce_dump_tx_chain(struct bce_softc *, int, int); 332 static void bce_dump_rx_chain(struct bce_softc *, int, int); 333 static void bce_dump_status_block(struct bce_softc *); 334 static void bce_dump_driver_state(struct bce_softc *); 335 static void bce_dump_stats_block(struct bce_softc *) __unused; 336 static void bce_dump_hw_state(struct bce_softc *); 337 static void bce_dump_txp_state(struct bce_softc *); 338 static void bce_dump_rxp_state(struct bce_softc *) __unused; 339 static void bce_dump_tpat_state(struct bce_softc *) __unused; 340 static void bce_freeze_controller(struct bce_softc *) __unused; 341 static void bce_unfreeze_controller(struct bce_softc *) __unused; 342 static void bce_breakpoint(struct bce_softc *); 343 #endif /* BCE_DEBUG */ 344 345 346 /****************************************************************************/ 347 /* BCE Register/Memory Access Routines */ 348 /****************************************************************************/ 349 static uint32_t bce_reg_rd_ind(struct bce_softc *, uint32_t); 350 static void bce_reg_wr_ind(struct bce_softc *, uint32_t, uint32_t); 351 static void bce_shmem_wr(struct bce_softc *, uint32_t, uint32_t); 352 static uint32_t bce_shmem_rd(struct bce_softc *, u32); 353 static void bce_ctx_wr(struct bce_softc *, uint32_t, uint32_t, uint32_t); 354 static int bce_miibus_read_reg(device_t, int, int); 355 static int bce_miibus_write_reg(device_t, int, int, int); 356 static void bce_miibus_statchg(device_t); 357 358 359 /****************************************************************************/ 360 /* BCE NVRAM Access Routines */ 361 /****************************************************************************/ 362 static int bce_acquire_nvram_lock(struct bce_softc *); 363 static int bce_release_nvram_lock(struct bce_softc *); 364 static void bce_enable_nvram_access(struct bce_softc *); 365 static void bce_disable_nvram_access(struct bce_softc *); 366 static int bce_nvram_read_dword(struct bce_softc *, uint32_t, uint8_t *, 367 uint32_t); 368 static int bce_init_nvram(struct bce_softc *); 369 static int bce_nvram_read(struct bce_softc *, uint32_t, uint8_t *, int); 370 static int bce_nvram_test(struct bce_softc *); 371 372 /****************************************************************************/ 373 /* BCE DMA Allocate/Free Routines */ 374 /****************************************************************************/ 375 static int bce_dma_alloc(struct bce_softc *); 376 static void bce_dma_free(struct bce_softc *); 377 static void bce_dma_map_addr(void *, bus_dma_segment_t *, int, int); 378 379 /****************************************************************************/ 380 /* BCE Firmware Synchronization and Load */ 381 /****************************************************************************/ 382 static int bce_fw_sync(struct bce_softc *, uint32_t); 383 static void bce_load_rv2p_fw(struct bce_softc *, uint32_t *, 384 uint32_t, uint32_t); 385 static void bce_load_cpu_fw(struct bce_softc *, struct cpu_reg *, 386 struct fw_info *); 387 static void bce_start_cpu(struct bce_softc *, struct cpu_reg *); 388 static void bce_halt_cpu(struct bce_softc *, struct cpu_reg *); 389 static void bce_start_rxp_cpu(struct bce_softc *); 390 static void bce_init_rxp_cpu(struct bce_softc *); 391 static void bce_init_txp_cpu(struct bce_softc *); 392 static void bce_init_tpat_cpu(struct bce_softc *); 393 static void bce_init_cp_cpu(struct bce_softc *); 394 static void bce_init_com_cpu(struct bce_softc *); 395 static void bce_init_cpus(struct bce_softc *); 396 397 static void bce_stop(struct bce_softc *); 398 static int bce_reset(struct bce_softc *, uint32_t); 399 static int bce_chipinit(struct bce_softc *); 400 static int bce_blockinit(struct bce_softc *); 401 static int bce_newbuf_std(struct bce_softc *, uint16_t *, uint16_t *, 402 uint32_t *, int); 403 static void bce_setup_rxdesc_std(struct bce_softc *, uint16_t, uint32_t *); 404 static void bce_probe_pci_caps(struct bce_softc *); 405 static void bce_print_adapter_info(struct bce_softc *); 406 static void bce_get_media(struct bce_softc *); 407 408 static void bce_init_tx_context(struct bce_softc *); 409 static int bce_init_tx_chain(struct bce_softc *); 410 static void bce_init_rx_context(struct bce_softc *); 411 static int bce_init_rx_chain(struct bce_softc *); 412 static void bce_free_rx_chain(struct bce_softc *); 413 static void bce_free_tx_chain(struct bce_softc *); 414 415 static int bce_encap(struct bce_softc *, struct mbuf **); 416 static int bce_tso_setup(struct bce_softc *, struct mbuf **, 417 uint16_t *, uint16_t *); 418 static void bce_start(struct ifnet *); 419 static int bce_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 420 static void bce_watchdog(struct ifnet *); 421 static int bce_ifmedia_upd(struct ifnet *); 422 static void bce_ifmedia_sts(struct ifnet *, struct ifmediareq *); 423 static void bce_init(void *); 424 static void bce_mgmt_init(struct bce_softc *); 425 426 static int bce_init_ctx(struct bce_softc *); 427 static void bce_get_mac_addr(struct bce_softc *); 428 static void bce_set_mac_addr(struct bce_softc *); 429 static void bce_phy_intr(struct bce_softc *); 430 static void bce_rx_intr(struct bce_softc *, int, uint16_t); 431 static void bce_tx_intr(struct bce_softc *, uint16_t); 432 static void bce_disable_intr(struct bce_softc *); 433 static void bce_enable_intr(struct bce_softc *); 434 static void bce_reenable_intr(struct bce_softc *); 435 436 #ifdef DEVICE_POLLING 437 static void bce_poll(struct ifnet *, enum poll_cmd, int); 438 #endif 439 static void bce_intr(struct bce_softc *); 440 static void bce_intr_legacy(void *); 441 static void bce_intr_msi(void *); 442 static void bce_intr_msi_oneshot(void *); 443 static void bce_set_rx_mode(struct bce_softc *); 444 static void bce_stats_update(struct bce_softc *); 445 static void bce_tick(void *); 446 static void bce_tick_serialized(struct bce_softc *); 447 static void bce_pulse(void *); 448 static void bce_check_msi(void *); 449 static void bce_add_sysctls(struct bce_softc *); 450 451 static void bce_coal_change(struct bce_softc *); 452 static int bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS); 453 static int bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS); 454 static int bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS); 455 static int bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS); 456 static int bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS); 457 static int bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS); 458 static int bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS); 459 static int bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS); 460 static int bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS, 461 uint32_t *, uint32_t); 462 463 /* 464 * NOTE: 465 * Don't set bce_tx_ticks_int/bce_tx_ticks to 1023. Linux's bnx2 466 * takes 1023 as the TX ticks limit. However, using 1023 will 467 * cause 5708(B2) to generate extra interrupts (~2000/s) even when 468 * there is _no_ network activity on the NIC. 469 */ 470 static uint32_t bce_tx_bds_int = 255; /* bcm: 20 */ 471 static uint32_t bce_tx_bds = 255; /* bcm: 20 */ 472 static uint32_t bce_tx_ticks_int = 1022; /* bcm: 80 */ 473 static uint32_t bce_tx_ticks = 1022; /* bcm: 80 */ 474 static uint32_t bce_rx_bds_int = 128; /* bcm: 6 */ 475 static uint32_t bce_rx_bds = 128; /* bcm: 6 */ 476 static uint32_t bce_rx_ticks_int = 150; /* bcm: 18 */ 477 static uint32_t bce_rx_ticks = 150; /* bcm: 18 */ 478 479 static int bce_msi_enable = 1; 480 481 static int bce_rx_pages = RX_PAGES_DEFAULT; 482 static int bce_tx_pages = TX_PAGES_DEFAULT; 483 484 TUNABLE_INT("hw.bce.tx_bds_int", &bce_tx_bds_int); 485 TUNABLE_INT("hw.bce.tx_bds", &bce_tx_bds); 486 TUNABLE_INT("hw.bce.tx_ticks_int", &bce_tx_ticks_int); 487 TUNABLE_INT("hw.bce.tx_ticks", &bce_tx_ticks); 488 TUNABLE_INT("hw.bce.rx_bds_int", &bce_rx_bds_int); 489 TUNABLE_INT("hw.bce.rx_bds", &bce_rx_bds); 490 TUNABLE_INT("hw.bce.rx_ticks_int", &bce_rx_ticks_int); 491 TUNABLE_INT("hw.bce.rx_ticks", &bce_rx_ticks); 492 TUNABLE_INT("hw.bce.msi.enable", &bce_msi_enable); 493 TUNABLE_INT("hw.bce.rx_pages", &bce_rx_pages); 494 TUNABLE_INT("hw.bce.tx_pages", &bce_tx_pages); 495 496 /****************************************************************************/ 497 /* DragonFly device dispatch table. */ 498 /****************************************************************************/ 499 static device_method_t bce_methods[] = { 500 /* Device interface */ 501 DEVMETHOD(device_probe, bce_probe), 502 DEVMETHOD(device_attach, bce_attach), 503 DEVMETHOD(device_detach, bce_detach), 504 DEVMETHOD(device_shutdown, bce_shutdown), 505 506 /* bus interface */ 507 DEVMETHOD(bus_print_child, bus_generic_print_child), 508 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 509 510 /* MII interface */ 511 DEVMETHOD(miibus_readreg, bce_miibus_read_reg), 512 DEVMETHOD(miibus_writereg, bce_miibus_write_reg), 513 DEVMETHOD(miibus_statchg, bce_miibus_statchg), 514 515 { 0, 0 } 516 }; 517 518 static driver_t bce_driver = { 519 "bce", 520 bce_methods, 521 sizeof(struct bce_softc) 522 }; 523 524 static devclass_t bce_devclass; 525 526 527 DECLARE_DUMMY_MODULE(if_bce); 528 MODULE_DEPEND(bce, miibus, 1, 1, 1); 529 DRIVER_MODULE(if_bce, pci, bce_driver, bce_devclass, NULL, NULL); 530 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, NULL, NULL); 531 532 533 /****************************************************************************/ 534 /* Device probe function. */ 535 /* */ 536 /* Compares the device to the driver's list of supported devices and */ 537 /* reports back to the OS whether this is the right driver for the device. */ 538 /* */ 539 /* Returns: */ 540 /* BUS_PROBE_DEFAULT on success, positive value on failure. */ 541 /****************************************************************************/ 542 static int 543 bce_probe(device_t dev) 544 { 545 struct bce_type *t; 546 uint16_t vid, did, svid, sdid; 547 548 /* Get the data for the device to be probed. */ 549 vid = pci_get_vendor(dev); 550 did = pci_get_device(dev); 551 svid = pci_get_subvendor(dev); 552 sdid = pci_get_subdevice(dev); 553 554 /* Look through the list of known devices for a match. */ 555 for (t = bce_devs; t->bce_name != NULL; ++t) { 556 if (vid == t->bce_vid && did == t->bce_did && 557 (svid == t->bce_svid || t->bce_svid == PCI_ANY_ID) && 558 (sdid == t->bce_sdid || t->bce_sdid == PCI_ANY_ID)) { 559 uint32_t revid = pci_read_config(dev, PCIR_REVID, 4); 560 char *descbuf; 561 562 descbuf = kmalloc(BCE_DEVDESC_MAX, M_TEMP, M_WAITOK); 563 564 /* Print out the device identity. */ 565 ksnprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)", 566 t->bce_name, 567 ((revid & 0xf0) >> 4) + 'A', revid & 0xf); 568 569 device_set_desc_copy(dev, descbuf); 570 kfree(descbuf, M_TEMP); 571 return 0; 572 } 573 } 574 return ENXIO; 575 } 576 577 578 /****************************************************************************/ 579 /* PCI Capabilities Probe Function. */ 580 /* */ 581 /* Walks the PCI capabiites list for the device to find what features are */ 582 /* supported. */ 583 /* */ 584 /* Returns: */ 585 /* None. */ 586 /****************************************************************************/ 587 static void 588 bce_print_adapter_info(struct bce_softc *sc) 589 { 590 device_printf(sc->bce_dev, "ASIC (0x%08X); ", sc->bce_chipid); 591 592 kprintf("Rev (%c%d); ", ((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A', 593 ((BCE_CHIP_ID(sc) & 0x0ff0) >> 4)); 594 595 /* Bus info. */ 596 if (sc->bce_flags & BCE_PCIE_FLAG) { 597 kprintf("Bus (PCIe x%d, ", sc->link_width); 598 switch (sc->link_speed) { 599 case 1: 600 kprintf("2.5Gbps); "); 601 break; 602 case 2: 603 kprintf("5Gbps); "); 604 break; 605 default: 606 kprintf("Unknown link speed); "); 607 break; 608 } 609 } else { 610 kprintf("Bus (PCI%s, %s, %dMHz); ", 611 ((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""), 612 ((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? "32-bit" : "64-bit"), 613 sc->bus_speed_mhz); 614 } 615 616 /* Firmware version and device features. */ 617 kprintf("B/C (%s)", sc->bce_bc_ver); 618 619 if ((sc->bce_flags & BCE_MFW_ENABLE_FLAG) || 620 (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)) { 621 kprintf("; Flags("); 622 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) 623 kprintf("MFW[%s]", sc->bce_mfw_ver); 624 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) 625 kprintf(" 2.5G"); 626 kprintf(")"); 627 } 628 kprintf("\n"); 629 } 630 631 632 /****************************************************************************/ 633 /* PCI Capabilities Probe Function. */ 634 /* */ 635 /* Walks the PCI capabiites list for the device to find what features are */ 636 /* supported. */ 637 /* */ 638 /* Returns: */ 639 /* None. */ 640 /****************************************************************************/ 641 static void 642 bce_probe_pci_caps(struct bce_softc *sc) 643 { 644 device_t dev = sc->bce_dev; 645 uint8_t ptr; 646 647 if (pci_is_pcix(dev)) 648 sc->bce_cap_flags |= BCE_PCIX_CAPABLE_FLAG; 649 650 ptr = pci_get_pciecap_ptr(dev); 651 if (ptr) { 652 uint16_t link_status = pci_read_config(dev, ptr + 0x12, 2); 653 654 sc->link_speed = link_status & 0xf; 655 sc->link_width = (link_status >> 4) & 0x3f; 656 sc->bce_cap_flags |= BCE_PCIE_CAPABLE_FLAG; 657 sc->bce_flags |= BCE_PCIE_FLAG; 658 } 659 } 660 661 662 /****************************************************************************/ 663 /* Device attach function. */ 664 /* */ 665 /* Allocates device resources, performs secondary chip identification, */ 666 /* resets and initializes the hardware, and initializes driver instance */ 667 /* variables. */ 668 /* */ 669 /* Returns: */ 670 /* 0 on success, positive value on failure. */ 671 /****************************************************************************/ 672 static int 673 bce_attach(device_t dev) 674 { 675 struct bce_softc *sc = device_get_softc(dev); 676 struct ifnet *ifp = &sc->arpcom.ac_if; 677 uint32_t val; 678 u_int irq_flags; 679 void (*irq_handle)(void *); 680 int rid, rc = 0; 681 int i, j; 682 struct mii_probe_args mii_args; 683 uintptr_t mii_priv = 0; 684 685 sc->bce_dev = dev; 686 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 687 688 pci_enable_busmaster(dev); 689 690 bce_probe_pci_caps(sc); 691 692 /* Allocate PCI memory resources. */ 693 rid = PCIR_BAR(0); 694 sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 695 RF_ACTIVE | PCI_RF_DENSE); 696 if (sc->bce_res_mem == NULL) { 697 device_printf(dev, "PCI memory allocation failed\n"); 698 return ENXIO; 699 } 700 sc->bce_btag = rman_get_bustag(sc->bce_res_mem); 701 sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem); 702 703 /* Allocate PCI IRQ resources. */ 704 sc->bce_irq_type = pci_alloc_1intr(dev, bce_msi_enable, 705 &sc->bce_irq_rid, &irq_flags); 706 707 sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 708 &sc->bce_irq_rid, irq_flags); 709 if (sc->bce_res_irq == NULL) { 710 device_printf(dev, "PCI map interrupt failed\n"); 711 rc = ENXIO; 712 goto fail; 713 } 714 715 /* 716 * Configure byte swap and enable indirect register access. 717 * Rely on CPU to do target byte swapping on big endian systems. 718 * Access to registers outside of PCI configurtion space are not 719 * valid until this is done. 720 */ 721 pci_write_config(dev, BCE_PCICFG_MISC_CONFIG, 722 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 723 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4); 724 725 /* Save ASIC revsion info. */ 726 sc->bce_chipid = REG_RD(sc, BCE_MISC_ID); 727 728 /* Weed out any non-production controller revisions. */ 729 switch (BCE_CHIP_ID(sc)) { 730 case BCE_CHIP_ID_5706_A0: 731 case BCE_CHIP_ID_5706_A1: 732 case BCE_CHIP_ID_5708_A0: 733 case BCE_CHIP_ID_5708_B0: 734 case BCE_CHIP_ID_5709_A0: 735 case BCE_CHIP_ID_5709_B0: 736 case BCE_CHIP_ID_5709_B1: 737 #ifdef foo 738 /* 5709C B2 seems to work fine */ 739 case BCE_CHIP_ID_5709_B2: 740 #endif 741 device_printf(dev, "Unsupported chip id 0x%08x!\n", 742 BCE_CHIP_ID(sc)); 743 rc = ENODEV; 744 goto fail; 745 } 746 747 mii_priv |= BRGPHY_FLAG_WIRESPEED; 748 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { 749 if (BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax || 750 BCE_CHIP_REV(sc) == BCE_CHIP_REV_Bx) 751 mii_priv |= BRGPHY_FLAG_NO_EARLYDAC; 752 } else { 753 mii_priv |= BRGPHY_FLAG_BER_BUG; 754 } 755 756 if (sc->bce_irq_type == PCI_INTR_TYPE_LEGACY) { 757 irq_handle = bce_intr_legacy; 758 } else if (sc->bce_irq_type == PCI_INTR_TYPE_MSI) { 759 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { 760 irq_handle = bce_intr_msi_oneshot; 761 sc->bce_flags |= BCE_ONESHOT_MSI_FLAG; 762 } else { 763 irq_handle = bce_intr_msi; 764 sc->bce_flags |= BCE_CHECK_MSI_FLAG; 765 } 766 } else { 767 panic("%s: unsupported intr type %d", 768 device_get_nameunit(dev), sc->bce_irq_type); 769 } 770 771 /* 772 * Find the base address for shared memory access. 773 * Newer versions of bootcode use a signature and offset 774 * while older versions use a fixed address. 775 */ 776 val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE); 777 if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == 778 BCE_SHM_HDR_SIGNATURE_SIG) { 779 /* Multi-port devices use different offsets in shared memory. */ 780 sc->bce_shmem_base = REG_RD_IND(sc, 781 BCE_SHM_HDR_ADDR_0 + (pci_get_function(sc->bce_dev) << 2)); 782 } else { 783 sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE; 784 } 785 DBPRINT(sc, BCE_INFO, "bce_shmem_base = 0x%08X\n", sc->bce_shmem_base); 786 787 /* Fetch the bootcode revision. */ 788 val = bce_shmem_rd(sc, BCE_DEV_INFO_BC_REV); 789 for (i = 0, j = 0; i < 3; i++) { 790 uint8_t num; 791 int k, skip0; 792 793 num = (uint8_t)(val >> (24 - (i * 8))); 794 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) { 795 if (num >= k || !skip0 || k == 1) { 796 sc->bce_bc_ver[j++] = (num / k) + '0'; 797 skip0 = 0; 798 } 799 } 800 if (i != 2) 801 sc->bce_bc_ver[j++] = '.'; 802 } 803 804 /* Check if any management firwmare is running. */ 805 val = bce_shmem_rd(sc, BCE_PORT_FEATURE); 806 if (val & BCE_PORT_FEATURE_ASF_ENABLED) { 807 sc->bce_flags |= BCE_MFW_ENABLE_FLAG; 808 809 /* Allow time for firmware to enter the running state. */ 810 for (i = 0; i < 30; i++) { 811 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION); 812 if (val & BCE_CONDITION_MFW_RUN_MASK) 813 break; 814 DELAY(10000); 815 } 816 } 817 818 /* Check the current bootcode state. */ 819 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION) & 820 BCE_CONDITION_MFW_RUN_MASK; 821 if (val != BCE_CONDITION_MFW_RUN_UNKNOWN && 822 val != BCE_CONDITION_MFW_RUN_NONE) { 823 uint32_t addr = bce_shmem_rd(sc, BCE_MFW_VER_PTR); 824 825 for (i = 0, j = 0; j < 3; j++) { 826 val = bce_reg_rd_ind(sc, addr + j * 4); 827 val = bswap32(val); 828 memcpy(&sc->bce_mfw_ver[i], &val, 4); 829 i += 4; 830 } 831 } 832 833 /* Get PCI bus information (speed and type). */ 834 val = REG_RD(sc, BCE_PCICFG_MISC_STATUS); 835 if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) { 836 uint32_t clkreg; 837 838 sc->bce_flags |= BCE_PCIX_FLAG; 839 840 clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS) & 841 BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET; 842 switch (clkreg) { 843 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ: 844 sc->bus_speed_mhz = 133; 845 break; 846 847 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ: 848 sc->bus_speed_mhz = 100; 849 break; 850 851 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ: 852 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ: 853 sc->bus_speed_mhz = 66; 854 break; 855 856 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ: 857 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ: 858 sc->bus_speed_mhz = 50; 859 break; 860 861 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW: 862 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ: 863 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ: 864 sc->bus_speed_mhz = 33; 865 break; 866 } 867 } else { 868 if (val & BCE_PCICFG_MISC_STATUS_M66EN) 869 sc->bus_speed_mhz = 66; 870 else 871 sc->bus_speed_mhz = 33; 872 } 873 874 if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET) 875 sc->bce_flags |= BCE_PCI_32BIT_FLAG; 876 877 /* Reset the controller. */ 878 rc = bce_reset(sc, BCE_DRV_MSG_CODE_RESET); 879 if (rc != 0) 880 goto fail; 881 882 /* Initialize the controller. */ 883 rc = bce_chipinit(sc); 884 if (rc != 0) { 885 device_printf(dev, "Controller initialization failed!\n"); 886 goto fail; 887 } 888 889 /* Perform NVRAM test. */ 890 rc = bce_nvram_test(sc); 891 if (rc != 0) { 892 device_printf(dev, "NVRAM test failed!\n"); 893 goto fail; 894 } 895 896 /* Fetch the permanent Ethernet MAC address. */ 897 bce_get_mac_addr(sc); 898 899 /* 900 * Trip points control how many BDs 901 * should be ready before generating an 902 * interrupt while ticks control how long 903 * a BD can sit in the chain before 904 * generating an interrupt. Set the default 905 * values for the RX and TX rings. 906 */ 907 908 #ifdef BCE_DRBUG 909 /* Force more frequent interrupts. */ 910 sc->bce_tx_quick_cons_trip_int = 1; 911 sc->bce_tx_quick_cons_trip = 1; 912 sc->bce_tx_ticks_int = 0; 913 sc->bce_tx_ticks = 0; 914 915 sc->bce_rx_quick_cons_trip_int = 1; 916 sc->bce_rx_quick_cons_trip = 1; 917 sc->bce_rx_ticks_int = 0; 918 sc->bce_rx_ticks = 0; 919 #else 920 sc->bce_tx_quick_cons_trip_int = bce_tx_bds_int; 921 sc->bce_tx_quick_cons_trip = bce_tx_bds; 922 sc->bce_tx_ticks_int = bce_tx_ticks_int; 923 sc->bce_tx_ticks = bce_tx_ticks; 924 925 sc->bce_rx_quick_cons_trip_int = bce_rx_bds_int; 926 sc->bce_rx_quick_cons_trip = bce_rx_bds; 927 sc->bce_rx_ticks_int = bce_rx_ticks_int; 928 sc->bce_rx_ticks = bce_rx_ticks; 929 #endif 930 931 /* Update statistics once every second. */ 932 sc->bce_stats_ticks = 1000000 & 0xffff00; 933 934 /* Find the media type for the adapter. */ 935 bce_get_media(sc); 936 937 /* Allocate DMA memory resources. */ 938 rc = bce_dma_alloc(sc); 939 if (rc != 0) { 940 device_printf(dev, "DMA resource allocation failed!\n"); 941 goto fail; 942 } 943 944 /* Initialize the ifnet interface. */ 945 ifp->if_softc = sc; 946 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 947 ifp->if_ioctl = bce_ioctl; 948 ifp->if_start = bce_start; 949 ifp->if_init = bce_init; 950 ifp->if_watchdog = bce_watchdog; 951 #ifdef DEVICE_POLLING 952 ifp->if_poll = bce_poll; 953 #endif 954 ifp->if_mtu = ETHERMTU; 955 ifp->if_hwassist = BCE_CSUM_FEATURES | CSUM_TSO; 956 ifp->if_capabilities = BCE_IF_CAPABILITIES; 957 ifp->if_capenable = ifp->if_capabilities; 958 ifq_set_maxlen(&ifp->if_snd, USABLE_TX_BD(sc)); 959 ifq_set_ready(&ifp->if_snd); 960 961 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) 962 ifp->if_baudrate = IF_Gbps(2.5); 963 else 964 ifp->if_baudrate = IF_Gbps(1); 965 966 /* Assume a standard 1500 byte MTU size for mbuf allocations. */ 967 sc->mbuf_alloc_size = MCLBYTES; 968 969 /* 970 * Look for our PHY. 971 */ 972 mii_probe_args_init(&mii_args, bce_ifmedia_upd, bce_ifmedia_sts); 973 mii_args.mii_probemask = 1 << sc->bce_phy_addr; 974 mii_args.mii_privtag = MII_PRIVTAG_BRGPHY; 975 mii_args.mii_priv = mii_priv; 976 977 rc = mii_probe(dev, &sc->bce_miibus, &mii_args); 978 if (rc != 0) { 979 device_printf(dev, "PHY probe failed!\n"); 980 goto fail; 981 } 982 983 /* Attach to the Ethernet interface list. */ 984 ether_ifattach(ifp, sc->eaddr, NULL); 985 986 callout_init_mp(&sc->bce_tick_callout); 987 callout_init_mp(&sc->bce_pulse_callout); 988 callout_init_mp(&sc->bce_ckmsi_callout); 989 990 /* Hookup IRQ last. */ 991 rc = bus_setup_intr(dev, sc->bce_res_irq, INTR_MPSAFE, irq_handle, sc, 992 &sc->bce_intrhand, ifp->if_serializer); 993 if (rc != 0) { 994 device_printf(dev, "Failed to setup IRQ!\n"); 995 ether_ifdetach(ifp); 996 goto fail; 997 } 998 999 ifp->if_cpuid = rman_get_cpuid(sc->bce_res_irq); 1000 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); 1001 sc->bce_intr_cpuid = ifp->if_cpuid; 1002 1003 /* Print some important debugging info. */ 1004 DBRUN(BCE_INFO, bce_dump_driver_state(sc)); 1005 1006 /* Add the supported sysctls to the kernel. */ 1007 bce_add_sysctls(sc); 1008 1009 /* 1010 * The chip reset earlier notified the bootcode that 1011 * a driver is present. We now need to start our pulse 1012 * routine so that the bootcode is reminded that we're 1013 * still running. 1014 */ 1015 bce_pulse(sc); 1016 1017 /* Get the firmware running so IPMI still works */ 1018 bce_mgmt_init(sc); 1019 1020 if (bootverbose) 1021 bce_print_adapter_info(sc); 1022 1023 return 0; 1024 fail: 1025 bce_detach(dev); 1026 return(rc); 1027 } 1028 1029 1030 /****************************************************************************/ 1031 /* Device detach function. */ 1032 /* */ 1033 /* Stops the controller, resets the controller, and releases resources. */ 1034 /* */ 1035 /* Returns: */ 1036 /* 0 on success, positive value on failure. */ 1037 /****************************************************************************/ 1038 static int 1039 bce_detach(device_t dev) 1040 { 1041 struct bce_softc *sc = device_get_softc(dev); 1042 1043 if (device_is_attached(dev)) { 1044 struct ifnet *ifp = &sc->arpcom.ac_if; 1045 uint32_t msg; 1046 1047 /* Stop and reset the controller. */ 1048 lwkt_serialize_enter(ifp->if_serializer); 1049 callout_stop(&sc->bce_pulse_callout); 1050 bce_stop(sc); 1051 if (sc->bce_flags & BCE_NO_WOL_FLAG) 1052 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN; 1053 else 1054 msg = BCE_DRV_MSG_CODE_UNLOAD; 1055 bce_reset(sc, msg); 1056 bus_teardown_intr(dev, sc->bce_res_irq, sc->bce_intrhand); 1057 lwkt_serialize_exit(ifp->if_serializer); 1058 1059 ether_ifdetach(ifp); 1060 } 1061 1062 /* If we have a child device on the MII bus remove it too. */ 1063 if (sc->bce_miibus) 1064 device_delete_child(dev, sc->bce_miibus); 1065 bus_generic_detach(dev); 1066 1067 if (sc->bce_res_irq != NULL) { 1068 bus_release_resource(dev, SYS_RES_IRQ, sc->bce_irq_rid, 1069 sc->bce_res_irq); 1070 } 1071 1072 if (sc->bce_irq_type == PCI_INTR_TYPE_MSI) 1073 pci_release_msi(dev); 1074 1075 if (sc->bce_res_mem != NULL) { 1076 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), 1077 sc->bce_res_mem); 1078 } 1079 1080 bce_dma_free(sc); 1081 1082 if (sc->bce_sysctl_tree != NULL) 1083 sysctl_ctx_free(&sc->bce_sysctl_ctx); 1084 1085 return 0; 1086 } 1087 1088 1089 /****************************************************************************/ 1090 /* Device shutdown function. */ 1091 /* */ 1092 /* Stops and resets the controller. */ 1093 /* */ 1094 /* Returns: */ 1095 /* Nothing */ 1096 /****************************************************************************/ 1097 static void 1098 bce_shutdown(device_t dev) 1099 { 1100 struct bce_softc *sc = device_get_softc(dev); 1101 struct ifnet *ifp = &sc->arpcom.ac_if; 1102 uint32_t msg; 1103 1104 lwkt_serialize_enter(ifp->if_serializer); 1105 bce_stop(sc); 1106 if (sc->bce_flags & BCE_NO_WOL_FLAG) 1107 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN; 1108 else 1109 msg = BCE_DRV_MSG_CODE_UNLOAD; 1110 bce_reset(sc, msg); 1111 lwkt_serialize_exit(ifp->if_serializer); 1112 } 1113 1114 1115 /****************************************************************************/ 1116 /* Indirect register read. */ 1117 /* */ 1118 /* Reads NetXtreme II registers using an index/data register pair in PCI */ 1119 /* configuration space. Using this mechanism avoids issues with posted */ 1120 /* reads but is much slower than memory-mapped I/O. */ 1121 /* */ 1122 /* Returns: */ 1123 /* The value of the register. */ 1124 /****************************************************************************/ 1125 static uint32_t 1126 bce_reg_rd_ind(struct bce_softc *sc, uint32_t offset) 1127 { 1128 device_t dev = sc->bce_dev; 1129 1130 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4); 1131 #ifdef BCE_DEBUG 1132 { 1133 uint32_t val; 1134 val = pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4); 1135 DBPRINT(sc, BCE_EXCESSIVE, 1136 "%s(); offset = 0x%08X, val = 0x%08X\n", 1137 __func__, offset, val); 1138 return val; 1139 } 1140 #else 1141 return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4); 1142 #endif 1143 } 1144 1145 1146 /****************************************************************************/ 1147 /* Indirect register write. */ 1148 /* */ 1149 /* Writes NetXtreme II registers using an index/data register pair in PCI */ 1150 /* configuration space. Using this mechanism avoids issues with posted */ 1151 /* writes but is muchh slower than memory-mapped I/O. */ 1152 /* */ 1153 /* Returns: */ 1154 /* Nothing. */ 1155 /****************************************************************************/ 1156 static void 1157 bce_reg_wr_ind(struct bce_softc *sc, uint32_t offset, uint32_t val) 1158 { 1159 device_t dev = sc->bce_dev; 1160 1161 DBPRINT(sc, BCE_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n", 1162 __func__, offset, val); 1163 1164 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4); 1165 pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4); 1166 } 1167 1168 1169 /****************************************************************************/ 1170 /* Shared memory write. */ 1171 /* */ 1172 /* Writes NetXtreme II shared memory region. */ 1173 /* */ 1174 /* Returns: */ 1175 /* Nothing. */ 1176 /****************************************************************************/ 1177 static void 1178 bce_shmem_wr(struct bce_softc *sc, uint32_t offset, uint32_t val) 1179 { 1180 bce_reg_wr_ind(sc, sc->bce_shmem_base + offset, val); 1181 } 1182 1183 1184 /****************************************************************************/ 1185 /* Shared memory read. */ 1186 /* */ 1187 /* Reads NetXtreme II shared memory region. */ 1188 /* */ 1189 /* Returns: */ 1190 /* The 32 bit value read. */ 1191 /****************************************************************************/ 1192 static u32 1193 bce_shmem_rd(struct bce_softc *sc, uint32_t offset) 1194 { 1195 return bce_reg_rd_ind(sc, sc->bce_shmem_base + offset); 1196 } 1197 1198 1199 /****************************************************************************/ 1200 /* Context memory write. */ 1201 /* */ 1202 /* The NetXtreme II controller uses context memory to track connection */ 1203 /* information for L2 and higher network protocols. */ 1204 /* */ 1205 /* Returns: */ 1206 /* Nothing. */ 1207 /****************************************************************************/ 1208 static void 1209 bce_ctx_wr(struct bce_softc *sc, uint32_t cid_addr, uint32_t ctx_offset, 1210 uint32_t ctx_val) 1211 { 1212 uint32_t idx, offset = ctx_offset + cid_addr; 1213 uint32_t val, retry_cnt = 5; 1214 1215 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 1216 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 1217 REG_WR(sc, BCE_CTX_CTX_DATA, ctx_val); 1218 REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_WRITE_REQ)); 1219 1220 for (idx = 0; idx < retry_cnt; idx++) { 1221 val = REG_RD(sc, BCE_CTX_CTX_CTRL); 1222 if ((val & BCE_CTX_CTX_CTRL_WRITE_REQ) == 0) 1223 break; 1224 DELAY(5); 1225 } 1226 1227 if (val & BCE_CTX_CTX_CTRL_WRITE_REQ) { 1228 device_printf(sc->bce_dev, 1229 "Unable to write CTX memory: " 1230 "cid_addr = 0x%08X, offset = 0x%08X!\n", 1231 cid_addr, ctx_offset); 1232 } 1233 } else { 1234 REG_WR(sc, BCE_CTX_DATA_ADR, offset); 1235 REG_WR(sc, BCE_CTX_DATA, ctx_val); 1236 } 1237 } 1238 1239 1240 /****************************************************************************/ 1241 /* PHY register read. */ 1242 /* */ 1243 /* Implements register reads on the MII bus. */ 1244 /* */ 1245 /* Returns: */ 1246 /* The value of the register. */ 1247 /****************************************************************************/ 1248 static int 1249 bce_miibus_read_reg(device_t dev, int phy, int reg) 1250 { 1251 struct bce_softc *sc = device_get_softc(dev); 1252 uint32_t val; 1253 int i; 1254 1255 /* Make sure we are accessing the correct PHY address. */ 1256 KASSERT(phy == sc->bce_phy_addr, 1257 ("invalid phyno %d, should be %d\n", phy, sc->bce_phy_addr)); 1258 1259 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1260 val = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1261 val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL; 1262 1263 REG_WR(sc, BCE_EMAC_MDIO_MODE, val); 1264 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1265 1266 DELAY(40); 1267 } 1268 1269 val = BCE_MIPHY(phy) | BCE_MIREG(reg) | 1270 BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT | 1271 BCE_EMAC_MDIO_COMM_START_BUSY; 1272 REG_WR(sc, BCE_EMAC_MDIO_COMM, val); 1273 1274 for (i = 0; i < BCE_PHY_TIMEOUT; i++) { 1275 DELAY(10); 1276 1277 val = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1278 if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) { 1279 DELAY(5); 1280 1281 val = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1282 val &= BCE_EMAC_MDIO_COMM_DATA; 1283 break; 1284 } 1285 } 1286 1287 if (val & BCE_EMAC_MDIO_COMM_START_BUSY) { 1288 if_printf(&sc->arpcom.ac_if, 1289 "Error: PHY read timeout! phy = %d, reg = 0x%04X\n", 1290 phy, reg); 1291 val = 0x0; 1292 } else { 1293 val = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1294 } 1295 1296 DBPRINT(sc, BCE_EXCESSIVE, 1297 "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n", 1298 __func__, phy, (uint16_t)reg & 0xffff, (uint16_t) val & 0xffff); 1299 1300 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1301 val = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1302 val |= BCE_EMAC_MDIO_MODE_AUTO_POLL; 1303 1304 REG_WR(sc, BCE_EMAC_MDIO_MODE, val); 1305 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1306 1307 DELAY(40); 1308 } 1309 return (val & 0xffff); 1310 } 1311 1312 1313 /****************************************************************************/ 1314 /* PHY register write. */ 1315 /* */ 1316 /* Implements register writes on the MII bus. */ 1317 /* */ 1318 /* Returns: */ 1319 /* The value of the register. */ 1320 /****************************************************************************/ 1321 static int 1322 bce_miibus_write_reg(device_t dev, int phy, int reg, int val) 1323 { 1324 struct bce_softc *sc = device_get_softc(dev); 1325 uint32_t val1; 1326 int i; 1327 1328 /* Make sure we are accessing the correct PHY address. */ 1329 KASSERT(phy == sc->bce_phy_addr, 1330 ("invalid phyno %d, should be %d\n", phy, sc->bce_phy_addr)); 1331 1332 DBPRINT(sc, BCE_EXCESSIVE, 1333 "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n", 1334 __func__, phy, (uint16_t)(reg & 0xffff), 1335 (uint16_t)(val & 0xffff)); 1336 1337 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1338 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1339 val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL; 1340 1341 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1); 1342 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1343 1344 DELAY(40); 1345 } 1346 1347 val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val | 1348 BCE_EMAC_MDIO_COMM_COMMAND_WRITE | 1349 BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT; 1350 REG_WR(sc, BCE_EMAC_MDIO_COMM, val1); 1351 1352 for (i = 0; i < BCE_PHY_TIMEOUT; i++) { 1353 DELAY(10); 1354 1355 val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1356 if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) { 1357 DELAY(5); 1358 break; 1359 } 1360 } 1361 1362 if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY) 1363 if_printf(&sc->arpcom.ac_if, "PHY write timeout!\n"); 1364 1365 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1366 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1367 val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL; 1368 1369 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1); 1370 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1371 1372 DELAY(40); 1373 } 1374 return 0; 1375 } 1376 1377 1378 /****************************************************************************/ 1379 /* MII bus status change. */ 1380 /* */ 1381 /* Called by the MII bus driver when the PHY establishes link to set the */ 1382 /* MAC interface registers. */ 1383 /* */ 1384 /* Returns: */ 1385 /* Nothing. */ 1386 /****************************************************************************/ 1387 static void 1388 bce_miibus_statchg(device_t dev) 1389 { 1390 struct bce_softc *sc = device_get_softc(dev); 1391 struct mii_data *mii = device_get_softc(sc->bce_miibus); 1392 1393 DBPRINT(sc, BCE_INFO, "mii_media_active = 0x%08X\n", 1394 mii->mii_media_active); 1395 1396 #ifdef BCE_DEBUG 1397 /* Decode the interface media flags. */ 1398 if_printf(&sc->arpcom.ac_if, "Media: ( "); 1399 switch(IFM_TYPE(mii->mii_media_active)) { 1400 case IFM_ETHER: 1401 kprintf("Ethernet )"); 1402 break; 1403 default: 1404 kprintf("Unknown )"); 1405 break; 1406 } 1407 1408 kprintf(" Media Options: ( "); 1409 switch(IFM_SUBTYPE(mii->mii_media_active)) { 1410 case IFM_AUTO: 1411 kprintf("Autoselect )"); 1412 break; 1413 case IFM_MANUAL: 1414 kprintf("Manual )"); 1415 break; 1416 case IFM_NONE: 1417 kprintf("None )"); 1418 break; 1419 case IFM_10_T: 1420 kprintf("10Base-T )"); 1421 break; 1422 case IFM_100_TX: 1423 kprintf("100Base-TX )"); 1424 break; 1425 case IFM_1000_SX: 1426 kprintf("1000Base-SX )"); 1427 break; 1428 case IFM_1000_T: 1429 kprintf("1000Base-T )"); 1430 break; 1431 default: 1432 kprintf("Other )"); 1433 break; 1434 } 1435 1436 kprintf(" Global Options: ("); 1437 if (mii->mii_media_active & IFM_FDX) 1438 kprintf(" FullDuplex"); 1439 if (mii->mii_media_active & IFM_HDX) 1440 kprintf(" HalfDuplex"); 1441 if (mii->mii_media_active & IFM_LOOP) 1442 kprintf(" Loopback"); 1443 if (mii->mii_media_active & IFM_FLAG0) 1444 kprintf(" Flag0"); 1445 if (mii->mii_media_active & IFM_FLAG1) 1446 kprintf(" Flag1"); 1447 if (mii->mii_media_active & IFM_FLAG2) 1448 kprintf(" Flag2"); 1449 kprintf(" )\n"); 1450 #endif 1451 1452 BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT); 1453 1454 /* 1455 * Set MII or GMII interface based on the speed negotiated 1456 * by the PHY. 1457 */ 1458 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 1459 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) { 1460 DBPRINT(sc, BCE_INFO, "Setting GMII interface.\n"); 1461 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_GMII); 1462 } else { 1463 DBPRINT(sc, BCE_INFO, "Setting MII interface.\n"); 1464 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_MII); 1465 } 1466 1467 /* 1468 * Set half or full duplex based on the duplicity negotiated 1469 * by the PHY. 1470 */ 1471 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 1472 DBPRINT(sc, BCE_INFO, "Setting Full-Duplex interface.\n"); 1473 BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX); 1474 } else { 1475 DBPRINT(sc, BCE_INFO, "Setting Half-Duplex interface.\n"); 1476 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX); 1477 } 1478 } 1479 1480 1481 /****************************************************************************/ 1482 /* Acquire NVRAM lock. */ 1483 /* */ 1484 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */ 1485 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 1486 /* for use by the driver. */ 1487 /* */ 1488 /* Returns: */ 1489 /* 0 on success, positive value on failure. */ 1490 /****************************************************************************/ 1491 static int 1492 bce_acquire_nvram_lock(struct bce_softc *sc) 1493 { 1494 uint32_t val; 1495 int j; 1496 1497 DBPRINT(sc, BCE_VERBOSE, "Acquiring NVRAM lock.\n"); 1498 1499 /* Request access to the flash interface. */ 1500 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2); 1501 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1502 val = REG_RD(sc, BCE_NVM_SW_ARB); 1503 if (val & BCE_NVM_SW_ARB_ARB_ARB2) 1504 break; 1505 1506 DELAY(5); 1507 } 1508 1509 if (j >= NVRAM_TIMEOUT_COUNT) { 1510 DBPRINT(sc, BCE_WARN, "Timeout acquiring NVRAM lock!\n"); 1511 return EBUSY; 1512 } 1513 return 0; 1514 } 1515 1516 1517 /****************************************************************************/ 1518 /* Release NVRAM lock. */ 1519 /* */ 1520 /* When the caller is finished accessing NVRAM the lock must be released. */ 1521 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 1522 /* for use by the driver. */ 1523 /* */ 1524 /* Returns: */ 1525 /* 0 on success, positive value on failure. */ 1526 /****************************************************************************/ 1527 static int 1528 bce_release_nvram_lock(struct bce_softc *sc) 1529 { 1530 int j; 1531 uint32_t val; 1532 1533 DBPRINT(sc, BCE_VERBOSE, "Releasing NVRAM lock.\n"); 1534 1535 /* 1536 * Relinquish nvram interface. 1537 */ 1538 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2); 1539 1540 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1541 val = REG_RD(sc, BCE_NVM_SW_ARB); 1542 if (!(val & BCE_NVM_SW_ARB_ARB_ARB2)) 1543 break; 1544 1545 DELAY(5); 1546 } 1547 1548 if (j >= NVRAM_TIMEOUT_COUNT) { 1549 DBPRINT(sc, BCE_WARN, "Timeout reeasing NVRAM lock!\n"); 1550 return EBUSY; 1551 } 1552 return 0; 1553 } 1554 1555 1556 /****************************************************************************/ 1557 /* Enable NVRAM access. */ 1558 /* */ 1559 /* Before accessing NVRAM for read or write operations the caller must */ 1560 /* enabled NVRAM access. */ 1561 /* */ 1562 /* Returns: */ 1563 /* Nothing. */ 1564 /****************************************************************************/ 1565 static void 1566 bce_enable_nvram_access(struct bce_softc *sc) 1567 { 1568 uint32_t val; 1569 1570 DBPRINT(sc, BCE_VERBOSE, "Enabling NVRAM access.\n"); 1571 1572 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE); 1573 /* Enable both bits, even on read. */ 1574 REG_WR(sc, BCE_NVM_ACCESS_ENABLE, 1575 val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN); 1576 } 1577 1578 1579 /****************************************************************************/ 1580 /* Disable NVRAM access. */ 1581 /* */ 1582 /* When the caller is finished accessing NVRAM access must be disabled. */ 1583 /* */ 1584 /* Returns: */ 1585 /* Nothing. */ 1586 /****************************************************************************/ 1587 static void 1588 bce_disable_nvram_access(struct bce_softc *sc) 1589 { 1590 uint32_t val; 1591 1592 DBPRINT(sc, BCE_VERBOSE, "Disabling NVRAM access.\n"); 1593 1594 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE); 1595 1596 /* Disable both bits, even after read. */ 1597 REG_WR(sc, BCE_NVM_ACCESS_ENABLE, 1598 val & ~(BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN)); 1599 } 1600 1601 1602 /****************************************************************************/ 1603 /* Read a dword (32 bits) from NVRAM. */ 1604 /* */ 1605 /* Read a 32 bit word from NVRAM. The caller is assumed to have already */ 1606 /* obtained the NVRAM lock and enabled the controller for NVRAM access. */ 1607 /* */ 1608 /* Returns: */ 1609 /* 0 on success and the 32 bit value read, positive value on failure. */ 1610 /****************************************************************************/ 1611 static int 1612 bce_nvram_read_dword(struct bce_softc *sc, uint32_t offset, uint8_t *ret_val, 1613 uint32_t cmd_flags) 1614 { 1615 uint32_t cmd; 1616 int i, rc = 0; 1617 1618 /* Build the command word. */ 1619 cmd = BCE_NVM_COMMAND_DOIT | cmd_flags; 1620 1621 /* Calculate the offset for buffered flash. */ 1622 if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) { 1623 offset = ((offset / sc->bce_flash_info->page_size) << 1624 sc->bce_flash_info->page_bits) + 1625 (offset % sc->bce_flash_info->page_size); 1626 } 1627 1628 /* 1629 * Clear the DONE bit separately, set the address to read, 1630 * and issue the read. 1631 */ 1632 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE); 1633 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE); 1634 REG_WR(sc, BCE_NVM_COMMAND, cmd); 1635 1636 /* Wait for completion. */ 1637 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) { 1638 uint32_t val; 1639 1640 DELAY(5); 1641 1642 val = REG_RD(sc, BCE_NVM_COMMAND); 1643 if (val & BCE_NVM_COMMAND_DONE) { 1644 val = REG_RD(sc, BCE_NVM_READ); 1645 1646 val = be32toh(val); 1647 memcpy(ret_val, &val, 4); 1648 break; 1649 } 1650 } 1651 1652 /* Check for errors. */ 1653 if (i >= NVRAM_TIMEOUT_COUNT) { 1654 if_printf(&sc->arpcom.ac_if, 1655 "Timeout error reading NVRAM at offset 0x%08X!\n", 1656 offset); 1657 rc = EBUSY; 1658 } 1659 return rc; 1660 } 1661 1662 1663 /****************************************************************************/ 1664 /* Initialize NVRAM access. */ 1665 /* */ 1666 /* Identify the NVRAM device in use and prepare the NVRAM interface to */ 1667 /* access that device. */ 1668 /* */ 1669 /* Returns: */ 1670 /* 0 on success, positive value on failure. */ 1671 /****************************************************************************/ 1672 static int 1673 bce_init_nvram(struct bce_softc *sc) 1674 { 1675 uint32_t val; 1676 int j, entry_count, rc = 0; 1677 const struct flash_spec *flash; 1678 1679 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__); 1680 1681 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 1682 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 1683 sc->bce_flash_info = &flash_5709; 1684 goto bce_init_nvram_get_flash_size; 1685 } 1686 1687 /* Determine the selected interface. */ 1688 val = REG_RD(sc, BCE_NVM_CFG1); 1689 1690 entry_count = sizeof(flash_table) / sizeof(struct flash_spec); 1691 1692 /* 1693 * Flash reconfiguration is required to support additional 1694 * NVRAM devices not directly supported in hardware. 1695 * Check if the flash interface was reconfigured 1696 * by the bootcode. 1697 */ 1698 1699 if (val & 0x40000000) { 1700 /* Flash interface reconfigured by bootcode. */ 1701 1702 DBPRINT(sc, BCE_INFO_LOAD, 1703 "%s(): Flash WAS reconfigured.\n", __func__); 1704 1705 for (j = 0, flash = flash_table; j < entry_count; 1706 j++, flash++) { 1707 if ((val & FLASH_BACKUP_STRAP_MASK) == 1708 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) { 1709 sc->bce_flash_info = flash; 1710 break; 1711 } 1712 } 1713 } else { 1714 /* Flash interface not yet reconfigured. */ 1715 uint32_t mask; 1716 1717 DBPRINT(sc, BCE_INFO_LOAD, 1718 "%s(): Flash was NOT reconfigured.\n", __func__); 1719 1720 if (val & (1 << 23)) 1721 mask = FLASH_BACKUP_STRAP_MASK; 1722 else 1723 mask = FLASH_STRAP_MASK; 1724 1725 /* Look for the matching NVRAM device configuration data. */ 1726 for (j = 0, flash = flash_table; j < entry_count; 1727 j++, flash++) { 1728 /* Check if the device matches any of the known devices. */ 1729 if ((val & mask) == (flash->strapping & mask)) { 1730 /* Found a device match. */ 1731 sc->bce_flash_info = flash; 1732 1733 /* Request access to the flash interface. */ 1734 rc = bce_acquire_nvram_lock(sc); 1735 if (rc != 0) 1736 return rc; 1737 1738 /* Reconfigure the flash interface. */ 1739 bce_enable_nvram_access(sc); 1740 REG_WR(sc, BCE_NVM_CFG1, flash->config1); 1741 REG_WR(sc, BCE_NVM_CFG2, flash->config2); 1742 REG_WR(sc, BCE_NVM_CFG3, flash->config3); 1743 REG_WR(sc, BCE_NVM_WRITE1, flash->write1); 1744 bce_disable_nvram_access(sc); 1745 bce_release_nvram_lock(sc); 1746 break; 1747 } 1748 } 1749 } 1750 1751 /* Check if a matching device was found. */ 1752 if (j == entry_count) { 1753 sc->bce_flash_info = NULL; 1754 if_printf(&sc->arpcom.ac_if, "Unknown Flash NVRAM found!\n"); 1755 return ENODEV; 1756 } 1757 1758 bce_init_nvram_get_flash_size: 1759 /* Write the flash config data to the shared memory interface. */ 1760 val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG2) & 1761 BCE_SHARED_HW_CFG2_NVM_SIZE_MASK; 1762 if (val) 1763 sc->bce_flash_size = val; 1764 else 1765 sc->bce_flash_size = sc->bce_flash_info->total_size; 1766 1767 DBPRINT(sc, BCE_INFO_LOAD, "%s() flash->total_size = 0x%08X\n", 1768 __func__, sc->bce_flash_info->total_size); 1769 1770 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__); 1771 1772 return rc; 1773 } 1774 1775 1776 /****************************************************************************/ 1777 /* Read an arbitrary range of data from NVRAM. */ 1778 /* */ 1779 /* Prepares the NVRAM interface for access and reads the requested data */ 1780 /* into the supplied buffer. */ 1781 /* */ 1782 /* Returns: */ 1783 /* 0 on success and the data read, positive value on failure. */ 1784 /****************************************************************************/ 1785 static int 1786 bce_nvram_read(struct bce_softc *sc, uint32_t offset, uint8_t *ret_buf, 1787 int buf_size) 1788 { 1789 uint32_t cmd_flags, offset32, len32, extra; 1790 int rc = 0; 1791 1792 if (buf_size == 0) 1793 return 0; 1794 1795 /* Request access to the flash interface. */ 1796 rc = bce_acquire_nvram_lock(sc); 1797 if (rc != 0) 1798 return rc; 1799 1800 /* Enable access to flash interface */ 1801 bce_enable_nvram_access(sc); 1802 1803 len32 = buf_size; 1804 offset32 = offset; 1805 extra = 0; 1806 1807 cmd_flags = 0; 1808 1809 /* XXX should we release nvram lock if read_dword() fails? */ 1810 if (offset32 & 3) { 1811 uint8_t buf[4]; 1812 uint32_t pre_len; 1813 1814 offset32 &= ~3; 1815 pre_len = 4 - (offset & 3); 1816 1817 if (pre_len >= len32) { 1818 pre_len = len32; 1819 cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST; 1820 } else { 1821 cmd_flags = BCE_NVM_COMMAND_FIRST; 1822 } 1823 1824 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); 1825 if (rc) 1826 return rc; 1827 1828 memcpy(ret_buf, buf + (offset & 3), pre_len); 1829 1830 offset32 += 4; 1831 ret_buf += pre_len; 1832 len32 -= pre_len; 1833 } 1834 1835 if (len32 & 3) { 1836 extra = 4 - (len32 & 3); 1837 len32 = (len32 + 4) & ~3; 1838 } 1839 1840 if (len32 == 4) { 1841 uint8_t buf[4]; 1842 1843 if (cmd_flags) 1844 cmd_flags = BCE_NVM_COMMAND_LAST; 1845 else 1846 cmd_flags = BCE_NVM_COMMAND_FIRST | 1847 BCE_NVM_COMMAND_LAST; 1848 1849 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); 1850 1851 memcpy(ret_buf, buf, 4 - extra); 1852 } else if (len32 > 0) { 1853 uint8_t buf[4]; 1854 1855 /* Read the first word. */ 1856 if (cmd_flags) 1857 cmd_flags = 0; 1858 else 1859 cmd_flags = BCE_NVM_COMMAND_FIRST; 1860 1861 rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags); 1862 1863 /* Advance to the next dword. */ 1864 offset32 += 4; 1865 ret_buf += 4; 1866 len32 -= 4; 1867 1868 while (len32 > 4 && rc == 0) { 1869 rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0); 1870 1871 /* Advance to the next dword. */ 1872 offset32 += 4; 1873 ret_buf += 4; 1874 len32 -= 4; 1875 } 1876 1877 if (rc) 1878 goto bce_nvram_read_locked_exit; 1879 1880 cmd_flags = BCE_NVM_COMMAND_LAST; 1881 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); 1882 1883 memcpy(ret_buf, buf, 4 - extra); 1884 } 1885 1886 bce_nvram_read_locked_exit: 1887 /* Disable access to flash interface and release the lock. */ 1888 bce_disable_nvram_access(sc); 1889 bce_release_nvram_lock(sc); 1890 1891 return rc; 1892 } 1893 1894 1895 /****************************************************************************/ 1896 /* Verifies that NVRAM is accessible and contains valid data. */ 1897 /* */ 1898 /* Reads the configuration data from NVRAM and verifies that the CRC is */ 1899 /* correct. */ 1900 /* */ 1901 /* Returns: */ 1902 /* 0 on success, positive value on failure. */ 1903 /****************************************************************************/ 1904 static int 1905 bce_nvram_test(struct bce_softc *sc) 1906 { 1907 uint32_t buf[BCE_NVRAM_SIZE / 4]; 1908 uint32_t magic, csum; 1909 uint8_t *data = (uint8_t *)buf; 1910 int rc = 0; 1911 1912 /* 1913 * Check that the device NVRAM is valid by reading 1914 * the magic value at offset 0. 1915 */ 1916 rc = bce_nvram_read(sc, 0, data, 4); 1917 if (rc != 0) 1918 return rc; 1919 1920 magic = be32toh(buf[0]); 1921 if (magic != BCE_NVRAM_MAGIC) { 1922 if_printf(&sc->arpcom.ac_if, 1923 "Invalid NVRAM magic value! Expected: 0x%08X, " 1924 "Found: 0x%08X\n", BCE_NVRAM_MAGIC, magic); 1925 return ENODEV; 1926 } 1927 1928 /* 1929 * Verify that the device NVRAM includes valid 1930 * configuration data. 1931 */ 1932 rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE); 1933 if (rc != 0) 1934 return rc; 1935 1936 csum = ether_crc32_le(data, 0x100); 1937 if (csum != BCE_CRC32_RESIDUAL) { 1938 if_printf(&sc->arpcom.ac_if, 1939 "Invalid Manufacturing Information NVRAM CRC! " 1940 "Expected: 0x%08X, Found: 0x%08X\n", 1941 BCE_CRC32_RESIDUAL, csum); 1942 return ENODEV; 1943 } 1944 1945 csum = ether_crc32_le(data + 0x100, 0x100); 1946 if (csum != BCE_CRC32_RESIDUAL) { 1947 if_printf(&sc->arpcom.ac_if, 1948 "Invalid Feature Configuration Information " 1949 "NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n", 1950 BCE_CRC32_RESIDUAL, csum); 1951 rc = ENODEV; 1952 } 1953 return rc; 1954 } 1955 1956 1957 /****************************************************************************/ 1958 /* Identifies the current media type of the controller and sets the PHY */ 1959 /* address. */ 1960 /* */ 1961 /* Returns: */ 1962 /* Nothing. */ 1963 /****************************************************************************/ 1964 static void 1965 bce_get_media(struct bce_softc *sc) 1966 { 1967 uint32_t val; 1968 1969 sc->bce_phy_addr = 1; 1970 1971 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 1972 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 1973 uint32_t val = REG_RD(sc, BCE_MISC_DUAL_MEDIA_CTRL); 1974 uint32_t bond_id = val & BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID; 1975 uint32_t strap; 1976 1977 /* 1978 * The BCM5709S is software configurable 1979 * for Copper or SerDes operation. 1980 */ 1981 if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) { 1982 return; 1983 } else if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) { 1984 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 1985 return; 1986 } 1987 1988 if (val & BCE_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE) { 1989 strap = (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21; 1990 } else { 1991 strap = 1992 (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8; 1993 } 1994 1995 if (pci_get_function(sc->bce_dev) == 0) { 1996 switch (strap) { 1997 case 0x4: 1998 case 0x5: 1999 case 0x6: 2000 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 2001 break; 2002 } 2003 } else { 2004 switch (strap) { 2005 case 0x1: 2006 case 0x2: 2007 case 0x4: 2008 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 2009 break; 2010 } 2011 } 2012 } else if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) { 2013 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 2014 } 2015 2016 if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) { 2017 sc->bce_flags |= BCE_NO_WOL_FLAG; 2018 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) { 2019 sc->bce_phy_addr = 2; 2020 val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG); 2021 if (val & BCE_SHARED_HW_CFG_PHY_2_5G) 2022 sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG; 2023 } 2024 } else if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) || 2025 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)) { 2026 sc->bce_phy_flags |= BCE_PHY_CRC_FIX_FLAG; 2027 } 2028 } 2029 2030 2031 /****************************************************************************/ 2032 /* Free any DMA memory owned by the driver. */ 2033 /* */ 2034 /* Scans through each data structre that requires DMA memory and frees */ 2035 /* the memory if allocated. */ 2036 /* */ 2037 /* Returns: */ 2038 /* Nothing. */ 2039 /****************************************************************************/ 2040 static void 2041 bce_dma_free(struct bce_softc *sc) 2042 { 2043 int i; 2044 2045 /* Destroy the status block. */ 2046 if (sc->status_tag != NULL) { 2047 if (sc->status_block != NULL) { 2048 bus_dmamap_unload(sc->status_tag, sc->status_map); 2049 bus_dmamem_free(sc->status_tag, sc->status_block, 2050 sc->status_map); 2051 } 2052 bus_dma_tag_destroy(sc->status_tag); 2053 } 2054 2055 /* Destroy the statistics block. */ 2056 if (sc->stats_tag != NULL) { 2057 if (sc->stats_block != NULL) { 2058 bus_dmamap_unload(sc->stats_tag, sc->stats_map); 2059 bus_dmamem_free(sc->stats_tag, sc->stats_block, 2060 sc->stats_map); 2061 } 2062 bus_dma_tag_destroy(sc->stats_tag); 2063 } 2064 2065 /* Destroy the CTX DMA stuffs. */ 2066 if (sc->ctx_tag != NULL) { 2067 for (i = 0; i < sc->ctx_pages; i++) { 2068 if (sc->ctx_block[i] != NULL) { 2069 bus_dmamap_unload(sc->ctx_tag, sc->ctx_map[i]); 2070 bus_dmamem_free(sc->ctx_tag, sc->ctx_block[i], 2071 sc->ctx_map[i]); 2072 } 2073 } 2074 bus_dma_tag_destroy(sc->ctx_tag); 2075 } 2076 2077 /* Destroy the TX buffer descriptor DMA stuffs. */ 2078 if (sc->tx_bd_chain_tag != NULL) { 2079 for (i = 0; i < sc->tx_pages; i++) { 2080 if (sc->tx_bd_chain[i] != NULL) { 2081 bus_dmamap_unload(sc->tx_bd_chain_tag, 2082 sc->tx_bd_chain_map[i]); 2083 bus_dmamem_free(sc->tx_bd_chain_tag, 2084 sc->tx_bd_chain[i], 2085 sc->tx_bd_chain_map[i]); 2086 } 2087 } 2088 bus_dma_tag_destroy(sc->tx_bd_chain_tag); 2089 } 2090 2091 /* Destroy the RX buffer descriptor DMA stuffs. */ 2092 if (sc->rx_bd_chain_tag != NULL) { 2093 for (i = 0; i < sc->rx_pages; i++) { 2094 if (sc->rx_bd_chain[i] != NULL) { 2095 bus_dmamap_unload(sc->rx_bd_chain_tag, 2096 sc->rx_bd_chain_map[i]); 2097 bus_dmamem_free(sc->rx_bd_chain_tag, 2098 sc->rx_bd_chain[i], 2099 sc->rx_bd_chain_map[i]); 2100 } 2101 } 2102 bus_dma_tag_destroy(sc->rx_bd_chain_tag); 2103 } 2104 2105 /* Destroy the TX mbuf DMA stuffs. */ 2106 if (sc->tx_mbuf_tag != NULL) { 2107 for (i = 0; i < TOTAL_TX_BD(sc); i++) { 2108 /* Must have been unloaded in bce_stop() */ 2109 KKASSERT(sc->tx_mbuf_ptr[i] == NULL); 2110 bus_dmamap_destroy(sc->tx_mbuf_tag, 2111 sc->tx_mbuf_map[i]); 2112 } 2113 bus_dma_tag_destroy(sc->tx_mbuf_tag); 2114 } 2115 2116 /* Destroy the RX mbuf DMA stuffs. */ 2117 if (sc->rx_mbuf_tag != NULL) { 2118 for (i = 0; i < TOTAL_RX_BD(sc); i++) { 2119 /* Must have been unloaded in bce_stop() */ 2120 KKASSERT(sc->rx_mbuf_ptr[i] == NULL); 2121 bus_dmamap_destroy(sc->rx_mbuf_tag, 2122 sc->rx_mbuf_map[i]); 2123 } 2124 bus_dmamap_destroy(sc->rx_mbuf_tag, sc->rx_mbuf_tmpmap); 2125 bus_dma_tag_destroy(sc->rx_mbuf_tag); 2126 } 2127 2128 /* Destroy the parent tag */ 2129 if (sc->parent_tag != NULL) 2130 bus_dma_tag_destroy(sc->parent_tag); 2131 2132 if (sc->tx_bd_chain_map != NULL) 2133 kfree(sc->tx_bd_chain_map, M_DEVBUF); 2134 if (sc->tx_bd_chain != NULL) 2135 kfree(sc->tx_bd_chain, M_DEVBUF); 2136 if (sc->tx_bd_chain_paddr != NULL) 2137 kfree(sc->tx_bd_chain_paddr, M_DEVBUF); 2138 2139 if (sc->rx_bd_chain_map != NULL) 2140 kfree(sc->rx_bd_chain_map, M_DEVBUF); 2141 if (sc->rx_bd_chain != NULL) 2142 kfree(sc->rx_bd_chain, M_DEVBUF); 2143 if (sc->rx_bd_chain_paddr != NULL) 2144 kfree(sc->rx_bd_chain_paddr, M_DEVBUF); 2145 2146 if (sc->tx_mbuf_map != NULL) 2147 kfree(sc->tx_mbuf_map, M_DEVBUF); 2148 if (sc->tx_mbuf_ptr != NULL) 2149 kfree(sc->tx_mbuf_ptr, M_DEVBUF); 2150 2151 if (sc->rx_mbuf_map != NULL) 2152 kfree(sc->rx_mbuf_map, M_DEVBUF); 2153 if (sc->rx_mbuf_ptr != NULL) 2154 kfree(sc->rx_mbuf_ptr, M_DEVBUF); 2155 if (sc->rx_mbuf_paddr != NULL) 2156 kfree(sc->rx_mbuf_paddr, M_DEVBUF); 2157 } 2158 2159 2160 /****************************************************************************/ 2161 /* Get DMA memory from the OS. */ 2162 /* */ 2163 /* Validates that the OS has provided DMA buffers in response to a */ 2164 /* bus_dmamap_load() call and saves the physical address of those buffers. */ 2165 /* When the callback is used the OS will return 0 for the mapping function */ 2166 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any */ 2167 /* failures back to the caller. */ 2168 /* */ 2169 /* Returns: */ 2170 /* Nothing. */ 2171 /****************************************************************************/ 2172 static void 2173 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 2174 { 2175 bus_addr_t *busaddr = arg; 2176 2177 /* 2178 * Simulate a mapping failure. 2179 * XXX not correct. 2180 */ 2181 DBRUNIF(DB_RANDOMTRUE(bce_debug_dma_map_addr_failure), 2182 kprintf("bce: %s(%d): Simulating DMA mapping error.\n", 2183 __FILE__, __LINE__); 2184 error = ENOMEM); 2185 2186 /* Check for an error and signal the caller that an error occurred. */ 2187 if (error) 2188 return; 2189 2190 KASSERT(nseg == 1, ("only one segment is allowed")); 2191 *busaddr = segs->ds_addr; 2192 } 2193 2194 2195 /****************************************************************************/ 2196 /* Allocate any DMA memory needed by the driver. */ 2197 /* */ 2198 /* Allocates DMA memory needed for the various global structures needed by */ 2199 /* hardware. */ 2200 /* */ 2201 /* Memory alignment requirements: */ 2202 /* -----------------+----------+----------+----------+----------+ */ 2203 /* Data Structure | 5706 | 5708 | 5709 | 5716 | */ 2204 /* -----------------+----------+----------+----------+----------+ */ 2205 /* Status Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */ 2206 /* Statistics Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */ 2207 /* RX Buffers | 16 bytes | 16 bytes | 16 bytes | 16 bytes | */ 2208 /* PG Buffers | none | none | none | none | */ 2209 /* TX Buffers | none | none | none | none | */ 2210 /* Chain Pages(1) | 4KiB | 4KiB | 4KiB | 4KiB | */ 2211 /* Context Pages(1) | N/A | N/A | 4KiB | 4KiB | */ 2212 /* -----------------+----------+----------+----------+----------+ */ 2213 /* */ 2214 /* (1) Must align with CPU page size (BCM_PAGE_SZIE). */ 2215 /* */ 2216 /* Returns: */ 2217 /* 0 for success, positive value for failure. */ 2218 /****************************************************************************/ 2219 static int 2220 bce_dma_alloc(struct bce_softc *sc) 2221 { 2222 struct ifnet *ifp = &sc->arpcom.ac_if; 2223 int i, j, rc = 0, pages; 2224 bus_addr_t busaddr, max_busaddr; 2225 bus_size_t status_align, stats_align; 2226 2227 pages = device_getenv_int(sc->bce_dev, "rx_pages", bce_rx_pages); 2228 if (pages <= 0 || pages > RX_PAGES_MAX || !powerof2(pages)) { 2229 device_printf(sc->bce_dev, "invalid # of RX pages\n"); 2230 pages = RX_PAGES_DEFAULT; 2231 } 2232 sc->rx_pages = pages; 2233 2234 pages = device_getenv_int(sc->bce_dev, "tx_pages", bce_tx_pages); 2235 if (pages <= 0 || pages > TX_PAGES_MAX || !powerof2(pages)) { 2236 device_printf(sc->bce_dev, "invalid # of TX pages\n"); 2237 pages = TX_PAGES_DEFAULT; 2238 } 2239 sc->tx_pages = pages; 2240 2241 sc->tx_bd_chain_map = kmalloc(sizeof(bus_dmamap_t) * sc->tx_pages, 2242 M_DEVBUF, M_WAITOK | M_ZERO); 2243 sc->tx_bd_chain = kmalloc(sizeof(struct tx_bd *) * sc->tx_pages, 2244 M_DEVBUF, M_WAITOK | M_ZERO); 2245 sc->tx_bd_chain_paddr = kmalloc(sizeof(bus_addr_t) * sc->tx_pages, 2246 M_DEVBUF, M_WAITOK | M_ZERO); 2247 2248 sc->rx_bd_chain_map = kmalloc(sizeof(bus_dmamap_t) * sc->rx_pages, 2249 M_DEVBUF, M_WAITOK | M_ZERO); 2250 sc->rx_bd_chain = kmalloc(sizeof(struct rx_bd *) * sc->rx_pages, 2251 M_DEVBUF, M_WAITOK | M_ZERO); 2252 sc->rx_bd_chain_paddr = kmalloc(sizeof(bus_addr_t) * sc->rx_pages, 2253 M_DEVBUF, M_WAITOK | M_ZERO); 2254 2255 sc->tx_mbuf_map = kmalloc(sizeof(bus_dmamap_t) * TOTAL_TX_BD(sc), 2256 M_DEVBUF, M_WAITOK | M_ZERO); 2257 sc->tx_mbuf_ptr = kmalloc(sizeof(struct mbuf *) * TOTAL_TX_BD(sc), 2258 M_DEVBUF, M_WAITOK | M_ZERO); 2259 2260 sc->rx_mbuf_map = kmalloc(sizeof(bus_dmamap_t) * TOTAL_RX_BD(sc), 2261 M_DEVBUF, M_WAITOK | M_ZERO); 2262 sc->rx_mbuf_ptr = kmalloc(sizeof(struct mbuf *) * TOTAL_RX_BD(sc), 2263 M_DEVBUF, M_WAITOK | M_ZERO); 2264 sc->rx_mbuf_paddr = kmalloc(sizeof(bus_addr_t) * TOTAL_RX_BD(sc), 2265 M_DEVBUF, M_WAITOK | M_ZERO); 2266 2267 /* 2268 * The embedded PCIe to PCI-X bridge (EPB) 2269 * in the 5708 cannot address memory above 2270 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043). 2271 */ 2272 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708) 2273 max_busaddr = BCE_BUS_SPACE_MAXADDR; 2274 else 2275 max_busaddr = BUS_SPACE_MAXADDR; 2276 2277 /* 2278 * BCM5709 and BCM5716 uses host memory as cache for context memory. 2279 */ 2280 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 2281 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 2282 sc->ctx_pages = BCE_CTX_BLK_SZ / BCM_PAGE_SIZE; 2283 if (sc->ctx_pages == 0) 2284 sc->ctx_pages = 1; 2285 if (sc->ctx_pages > BCE_CTX_PAGES) { 2286 device_printf(sc->bce_dev, "excessive ctx pages %d\n", 2287 sc->ctx_pages); 2288 return ENOMEM; 2289 } 2290 status_align = 16; 2291 stats_align = 16; 2292 } else { 2293 status_align = 8; 2294 stats_align = 8; 2295 } 2296 2297 /* 2298 * Allocate the parent bus DMA tag appropriate for PCI. 2299 */ 2300 rc = bus_dma_tag_create(NULL, 1, BCE_DMA_BOUNDARY, 2301 max_busaddr, BUS_SPACE_MAXADDR, 2302 NULL, NULL, 2303 BUS_SPACE_MAXSIZE_32BIT, 0, 2304 BUS_SPACE_MAXSIZE_32BIT, 2305 0, &sc->parent_tag); 2306 if (rc != 0) { 2307 if_printf(ifp, "Could not allocate parent DMA tag!\n"); 2308 return rc; 2309 } 2310 2311 /* 2312 * Allocate status block. 2313 */ 2314 sc->status_block = bus_dmamem_coherent_any(sc->parent_tag, 2315 status_align, BCE_STATUS_BLK_SZ, 2316 BUS_DMA_WAITOK | BUS_DMA_ZERO, 2317 &sc->status_tag, &sc->status_map, 2318 &sc->status_block_paddr); 2319 if (sc->status_block == NULL) { 2320 if_printf(ifp, "Could not allocate status block!\n"); 2321 return ENOMEM; 2322 } 2323 2324 /* 2325 * Allocate statistics block. 2326 */ 2327 sc->stats_block = bus_dmamem_coherent_any(sc->parent_tag, 2328 stats_align, BCE_STATS_BLK_SZ, 2329 BUS_DMA_WAITOK | BUS_DMA_ZERO, 2330 &sc->stats_tag, &sc->stats_map, 2331 &sc->stats_block_paddr); 2332 if (sc->stats_block == NULL) { 2333 if_printf(ifp, "Could not allocate statistics block!\n"); 2334 return ENOMEM; 2335 } 2336 2337 /* 2338 * Allocate context block, if needed 2339 */ 2340 if (sc->ctx_pages != 0) { 2341 rc = bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 0, 2342 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 2343 NULL, NULL, 2344 BCM_PAGE_SIZE, 1, BCM_PAGE_SIZE, 2345 0, &sc->ctx_tag); 2346 if (rc != 0) { 2347 if_printf(ifp, "Could not allocate " 2348 "context block DMA tag!\n"); 2349 return rc; 2350 } 2351 2352 for (i = 0; i < sc->ctx_pages; i++) { 2353 rc = bus_dmamem_alloc(sc->ctx_tag, 2354 (void **)&sc->ctx_block[i], 2355 BUS_DMA_WAITOK | BUS_DMA_ZERO | 2356 BUS_DMA_COHERENT, 2357 &sc->ctx_map[i]); 2358 if (rc != 0) { 2359 if_printf(ifp, "Could not allocate %dth context " 2360 "DMA memory!\n", i); 2361 return rc; 2362 } 2363 2364 rc = bus_dmamap_load(sc->ctx_tag, sc->ctx_map[i], 2365 sc->ctx_block[i], BCM_PAGE_SIZE, 2366 bce_dma_map_addr, &busaddr, 2367 BUS_DMA_WAITOK); 2368 if (rc != 0) { 2369 if (rc == EINPROGRESS) { 2370 panic("%s coherent memory loading " 2371 "is still in progress!", ifp->if_xname); 2372 } 2373 if_printf(ifp, "Could not map %dth context " 2374 "DMA memory!\n", i); 2375 bus_dmamem_free(sc->ctx_tag, sc->ctx_block[i], 2376 sc->ctx_map[i]); 2377 sc->ctx_block[i] = NULL; 2378 return rc; 2379 } 2380 sc->ctx_paddr[i] = busaddr; 2381 } 2382 } 2383 2384 /* 2385 * Create a DMA tag for the TX buffer descriptor chain, 2386 * allocate and clear the memory, and fetch the 2387 * physical address of the block. 2388 */ 2389 rc = bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 0, 2390 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 2391 NULL, NULL, 2392 BCE_TX_CHAIN_PAGE_SZ, 1, BCE_TX_CHAIN_PAGE_SZ, 2393 0, &sc->tx_bd_chain_tag); 2394 if (rc != 0) { 2395 if_printf(ifp, "Could not allocate " 2396 "TX descriptor chain DMA tag!\n"); 2397 return rc; 2398 } 2399 2400 for (i = 0; i < sc->tx_pages; i++) { 2401 rc = bus_dmamem_alloc(sc->tx_bd_chain_tag, 2402 (void **)&sc->tx_bd_chain[i], 2403 BUS_DMA_WAITOK | BUS_DMA_ZERO | 2404 BUS_DMA_COHERENT, 2405 &sc->tx_bd_chain_map[i]); 2406 if (rc != 0) { 2407 if_printf(ifp, "Could not allocate %dth TX descriptor " 2408 "chain DMA memory!\n", i); 2409 return rc; 2410 } 2411 2412 rc = bus_dmamap_load(sc->tx_bd_chain_tag, 2413 sc->tx_bd_chain_map[i], 2414 sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ, 2415 bce_dma_map_addr, &busaddr, 2416 BUS_DMA_WAITOK); 2417 if (rc != 0) { 2418 if (rc == EINPROGRESS) { 2419 panic("%s coherent memory loading " 2420 "is still in progress!", ifp->if_xname); 2421 } 2422 if_printf(ifp, "Could not map %dth TX descriptor " 2423 "chain DMA memory!\n", i); 2424 bus_dmamem_free(sc->tx_bd_chain_tag, 2425 sc->tx_bd_chain[i], 2426 sc->tx_bd_chain_map[i]); 2427 sc->tx_bd_chain[i] = NULL; 2428 return rc; 2429 } 2430 2431 sc->tx_bd_chain_paddr[i] = busaddr; 2432 /* DRC - Fix for 64 bit systems. */ 2433 DBPRINT(sc, BCE_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n", 2434 i, (uint32_t)sc->tx_bd_chain_paddr[i]); 2435 } 2436 2437 /* Create a DMA tag for TX mbufs. */ 2438 rc = bus_dma_tag_create(sc->parent_tag, 1, 0, 2439 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 2440 NULL, NULL, 2441 IP_MAXPACKET + sizeof(struct ether_vlan_header), 2442 BCE_MAX_SEGMENTS, PAGE_SIZE, 2443 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | 2444 BUS_DMA_ONEBPAGE, 2445 &sc->tx_mbuf_tag); 2446 if (rc != 0) { 2447 if_printf(ifp, "Could not allocate TX mbuf DMA tag!\n"); 2448 return rc; 2449 } 2450 2451 /* Create DMA maps for the TX mbufs clusters. */ 2452 for (i = 0; i < TOTAL_TX_BD(sc); i++) { 2453 rc = bus_dmamap_create(sc->tx_mbuf_tag, 2454 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 2455 &sc->tx_mbuf_map[i]); 2456 if (rc != 0) { 2457 for (j = 0; j < i; ++j) { 2458 bus_dmamap_destroy(sc->tx_mbuf_tag, 2459 sc->tx_mbuf_map[i]); 2460 } 2461 bus_dma_tag_destroy(sc->tx_mbuf_tag); 2462 sc->tx_mbuf_tag = NULL; 2463 2464 if_printf(ifp, "Unable to create " 2465 "%dth TX mbuf DMA map!\n", i); 2466 return rc; 2467 } 2468 } 2469 2470 /* 2471 * Create a DMA tag for the RX buffer descriptor chain, 2472 * allocate and clear the memory, and fetch the physical 2473 * address of the blocks. 2474 */ 2475 rc = bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 0, 2476 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 2477 NULL, NULL, 2478 BCE_RX_CHAIN_PAGE_SZ, 1, BCE_RX_CHAIN_PAGE_SZ, 2479 0, &sc->rx_bd_chain_tag); 2480 if (rc != 0) { 2481 if_printf(ifp, "Could not allocate " 2482 "RX descriptor chain DMA tag!\n"); 2483 return rc; 2484 } 2485 2486 for (i = 0; i < sc->rx_pages; i++) { 2487 rc = bus_dmamem_alloc(sc->rx_bd_chain_tag, 2488 (void **)&sc->rx_bd_chain[i], 2489 BUS_DMA_WAITOK | BUS_DMA_ZERO | 2490 BUS_DMA_COHERENT, 2491 &sc->rx_bd_chain_map[i]); 2492 if (rc != 0) { 2493 if_printf(ifp, "Could not allocate %dth RX descriptor " 2494 "chain DMA memory!\n", i); 2495 return rc; 2496 } 2497 2498 rc = bus_dmamap_load(sc->rx_bd_chain_tag, 2499 sc->rx_bd_chain_map[i], 2500 sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ, 2501 bce_dma_map_addr, &busaddr, 2502 BUS_DMA_WAITOK); 2503 if (rc != 0) { 2504 if (rc == EINPROGRESS) { 2505 panic("%s coherent memory loading " 2506 "is still in progress!", ifp->if_xname); 2507 } 2508 if_printf(ifp, "Could not map %dth RX descriptor " 2509 "chain DMA memory!\n", i); 2510 bus_dmamem_free(sc->rx_bd_chain_tag, 2511 sc->rx_bd_chain[i], 2512 sc->rx_bd_chain_map[i]); 2513 sc->rx_bd_chain[i] = NULL; 2514 return rc; 2515 } 2516 2517 sc->rx_bd_chain_paddr[i] = busaddr; 2518 /* DRC - Fix for 64 bit systems. */ 2519 DBPRINT(sc, BCE_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n", 2520 i, (uint32_t)sc->rx_bd_chain_paddr[i]); 2521 } 2522 2523 /* Create a DMA tag for RX mbufs. */ 2524 rc = bus_dma_tag_create(sc->parent_tag, BCE_DMA_RX_ALIGN, 0, 2525 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 2526 NULL, NULL, 2527 MCLBYTES, 1, MCLBYTES, 2528 BUS_DMA_ALLOCNOW | BUS_DMA_ALIGNED | 2529 BUS_DMA_WAITOK, 2530 &sc->rx_mbuf_tag); 2531 if (rc != 0) { 2532 if_printf(ifp, "Could not allocate RX mbuf DMA tag!\n"); 2533 return rc; 2534 } 2535 2536 /* Create tmp DMA map for RX mbuf clusters. */ 2537 rc = bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_WAITOK, 2538 &sc->rx_mbuf_tmpmap); 2539 if (rc != 0) { 2540 bus_dma_tag_destroy(sc->rx_mbuf_tag); 2541 sc->rx_mbuf_tag = NULL; 2542 2543 if_printf(ifp, "Could not create RX mbuf tmp DMA map!\n"); 2544 return rc; 2545 } 2546 2547 /* Create DMA maps for the RX mbuf clusters. */ 2548 for (i = 0; i < TOTAL_RX_BD(sc); i++) { 2549 rc = bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_WAITOK, 2550 &sc->rx_mbuf_map[i]); 2551 if (rc != 0) { 2552 for (j = 0; j < i; ++j) { 2553 bus_dmamap_destroy(sc->rx_mbuf_tag, 2554 sc->rx_mbuf_map[j]); 2555 } 2556 bus_dma_tag_destroy(sc->rx_mbuf_tag); 2557 sc->rx_mbuf_tag = NULL; 2558 2559 if_printf(ifp, "Unable to create " 2560 "%dth RX mbuf DMA map!\n", i); 2561 return rc; 2562 } 2563 } 2564 return 0; 2565 } 2566 2567 2568 /****************************************************************************/ 2569 /* Firmware synchronization. */ 2570 /* */ 2571 /* Before performing certain events such as a chip reset, synchronize with */ 2572 /* the firmware first. */ 2573 /* */ 2574 /* Returns: */ 2575 /* 0 for success, positive value for failure. */ 2576 /****************************************************************************/ 2577 static int 2578 bce_fw_sync(struct bce_softc *sc, uint32_t msg_data) 2579 { 2580 int i, rc = 0; 2581 uint32_t val; 2582 2583 /* Don't waste any time if we've timed out before. */ 2584 if (sc->bce_fw_timed_out) 2585 return EBUSY; 2586 2587 /* Increment the message sequence number. */ 2588 sc->bce_fw_wr_seq++; 2589 msg_data |= sc->bce_fw_wr_seq; 2590 2591 DBPRINT(sc, BCE_VERBOSE, "bce_fw_sync(): msg_data = 0x%08X\n", msg_data); 2592 2593 /* Send the message to the bootcode driver mailbox. */ 2594 bce_shmem_wr(sc, BCE_DRV_MB, msg_data); 2595 2596 /* Wait for the bootcode to acknowledge the message. */ 2597 for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) { 2598 /* Check for a response in the bootcode firmware mailbox. */ 2599 val = bce_shmem_rd(sc, BCE_FW_MB); 2600 if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ)) 2601 break; 2602 DELAY(1000); 2603 } 2604 2605 /* If we've timed out, tell the bootcode that we've stopped waiting. */ 2606 if ((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ) && 2607 (msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0) { 2608 if_printf(&sc->arpcom.ac_if, 2609 "Firmware synchronization timeout! " 2610 "msg_data = 0x%08X\n", msg_data); 2611 2612 msg_data &= ~BCE_DRV_MSG_CODE; 2613 msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT; 2614 2615 bce_shmem_wr(sc, BCE_DRV_MB, msg_data); 2616 2617 sc->bce_fw_timed_out = 1; 2618 rc = EBUSY; 2619 } 2620 return rc; 2621 } 2622 2623 2624 /****************************************************************************/ 2625 /* Load Receive Virtual 2 Physical (RV2P) processor firmware. */ 2626 /* */ 2627 /* Returns: */ 2628 /* Nothing. */ 2629 /****************************************************************************/ 2630 static void 2631 bce_load_rv2p_fw(struct bce_softc *sc, uint32_t *rv2p_code, 2632 uint32_t rv2p_code_len, uint32_t rv2p_proc) 2633 { 2634 int i; 2635 uint32_t val; 2636 2637 for (i = 0; i < rv2p_code_len; i += 8) { 2638 REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code); 2639 rv2p_code++; 2640 REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code); 2641 rv2p_code++; 2642 2643 if (rv2p_proc == RV2P_PROC1) { 2644 val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR; 2645 REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val); 2646 } else { 2647 val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR; 2648 REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val); 2649 } 2650 } 2651 2652 /* Reset the processor, un-stall is done later. */ 2653 if (rv2p_proc == RV2P_PROC1) 2654 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET); 2655 else 2656 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET); 2657 } 2658 2659 2660 /****************************************************************************/ 2661 /* Load RISC processor firmware. */ 2662 /* */ 2663 /* Loads firmware from the file if_bcefw.h into the scratchpad memory */ 2664 /* associated with a particular processor. */ 2665 /* */ 2666 /* Returns: */ 2667 /* Nothing. */ 2668 /****************************************************************************/ 2669 static void 2670 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg, 2671 struct fw_info *fw) 2672 { 2673 uint32_t offset; 2674 int j; 2675 2676 bce_halt_cpu(sc, cpu_reg); 2677 2678 /* Load the Text area. */ 2679 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base); 2680 if (fw->text) { 2681 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) 2682 REG_WR_IND(sc, offset, fw->text[j]); 2683 } 2684 2685 /* Load the Data area. */ 2686 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base); 2687 if (fw->data) { 2688 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) 2689 REG_WR_IND(sc, offset, fw->data[j]); 2690 } 2691 2692 /* Load the SBSS area. */ 2693 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base); 2694 if (fw->sbss) { 2695 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) 2696 REG_WR_IND(sc, offset, fw->sbss[j]); 2697 } 2698 2699 /* Load the BSS area. */ 2700 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base); 2701 if (fw->bss) { 2702 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) 2703 REG_WR_IND(sc, offset, fw->bss[j]); 2704 } 2705 2706 /* Load the Read-Only area. */ 2707 offset = cpu_reg->spad_base + 2708 (fw->rodata_addr - cpu_reg->mips_view_base); 2709 if (fw->rodata) { 2710 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) 2711 REG_WR_IND(sc, offset, fw->rodata[j]); 2712 } 2713 2714 /* Clear the pre-fetch instruction and set the FW start address. */ 2715 REG_WR_IND(sc, cpu_reg->inst, 0); 2716 REG_WR_IND(sc, cpu_reg->pc, fw->start_addr); 2717 } 2718 2719 2720 /****************************************************************************/ 2721 /* Starts the RISC processor. */ 2722 /* */ 2723 /* Assumes the CPU starting address has already been set. */ 2724 /* */ 2725 /* Returns: */ 2726 /* Nothing. */ 2727 /****************************************************************************/ 2728 static void 2729 bce_start_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg) 2730 { 2731 uint32_t val; 2732 2733 /* Start the CPU. */ 2734 val = REG_RD_IND(sc, cpu_reg->mode); 2735 val &= ~cpu_reg->mode_value_halt; 2736 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 2737 REG_WR_IND(sc, cpu_reg->mode, val); 2738 } 2739 2740 2741 /****************************************************************************/ 2742 /* Halts the RISC processor. */ 2743 /* */ 2744 /* Returns: */ 2745 /* Nothing. */ 2746 /****************************************************************************/ 2747 static void 2748 bce_halt_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg) 2749 { 2750 uint32_t val; 2751 2752 /* Halt the CPU. */ 2753 val = REG_RD_IND(sc, cpu_reg->mode); 2754 val |= cpu_reg->mode_value_halt; 2755 REG_WR_IND(sc, cpu_reg->mode, val); 2756 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 2757 } 2758 2759 2760 /****************************************************************************/ 2761 /* Start the RX CPU. */ 2762 /* */ 2763 /* Returns: */ 2764 /* Nothing. */ 2765 /****************************************************************************/ 2766 static void 2767 bce_start_rxp_cpu(struct bce_softc *sc) 2768 { 2769 struct cpu_reg cpu_reg; 2770 2771 cpu_reg.mode = BCE_RXP_CPU_MODE; 2772 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT; 2773 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA; 2774 cpu_reg.state = BCE_RXP_CPU_STATE; 2775 cpu_reg.state_value_clear = 0xffffff; 2776 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE; 2777 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK; 2778 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER; 2779 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION; 2780 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT; 2781 cpu_reg.spad_base = BCE_RXP_SCRATCH; 2782 cpu_reg.mips_view_base = 0x8000000; 2783 2784 bce_start_cpu(sc, &cpu_reg); 2785 } 2786 2787 2788 /****************************************************************************/ 2789 /* Initialize the RX CPU. */ 2790 /* */ 2791 /* Returns: */ 2792 /* Nothing. */ 2793 /****************************************************************************/ 2794 static void 2795 bce_init_rxp_cpu(struct bce_softc *sc) 2796 { 2797 struct cpu_reg cpu_reg; 2798 struct fw_info fw; 2799 2800 cpu_reg.mode = BCE_RXP_CPU_MODE; 2801 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT; 2802 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA; 2803 cpu_reg.state = BCE_RXP_CPU_STATE; 2804 cpu_reg.state_value_clear = 0xffffff; 2805 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE; 2806 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK; 2807 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER; 2808 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION; 2809 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT; 2810 cpu_reg.spad_base = BCE_RXP_SCRATCH; 2811 cpu_reg.mips_view_base = 0x8000000; 2812 2813 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 2814 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 2815 fw.ver_major = bce_RXP_b09FwReleaseMajor; 2816 fw.ver_minor = bce_RXP_b09FwReleaseMinor; 2817 fw.ver_fix = bce_RXP_b09FwReleaseFix; 2818 fw.start_addr = bce_RXP_b09FwStartAddr; 2819 2820 fw.text_addr = bce_RXP_b09FwTextAddr; 2821 fw.text_len = bce_RXP_b09FwTextLen; 2822 fw.text_index = 0; 2823 fw.text = bce_RXP_b09FwText; 2824 2825 fw.data_addr = bce_RXP_b09FwDataAddr; 2826 fw.data_len = bce_RXP_b09FwDataLen; 2827 fw.data_index = 0; 2828 fw.data = bce_RXP_b09FwData; 2829 2830 fw.sbss_addr = bce_RXP_b09FwSbssAddr; 2831 fw.sbss_len = bce_RXP_b09FwSbssLen; 2832 fw.sbss_index = 0; 2833 fw.sbss = bce_RXP_b09FwSbss; 2834 2835 fw.bss_addr = bce_RXP_b09FwBssAddr; 2836 fw.bss_len = bce_RXP_b09FwBssLen; 2837 fw.bss_index = 0; 2838 fw.bss = bce_RXP_b09FwBss; 2839 2840 fw.rodata_addr = bce_RXP_b09FwRodataAddr; 2841 fw.rodata_len = bce_RXP_b09FwRodataLen; 2842 fw.rodata_index = 0; 2843 fw.rodata = bce_RXP_b09FwRodata; 2844 } else { 2845 fw.ver_major = bce_RXP_b06FwReleaseMajor; 2846 fw.ver_minor = bce_RXP_b06FwReleaseMinor; 2847 fw.ver_fix = bce_RXP_b06FwReleaseFix; 2848 fw.start_addr = bce_RXP_b06FwStartAddr; 2849 2850 fw.text_addr = bce_RXP_b06FwTextAddr; 2851 fw.text_len = bce_RXP_b06FwTextLen; 2852 fw.text_index = 0; 2853 fw.text = bce_RXP_b06FwText; 2854 2855 fw.data_addr = bce_RXP_b06FwDataAddr; 2856 fw.data_len = bce_RXP_b06FwDataLen; 2857 fw.data_index = 0; 2858 fw.data = bce_RXP_b06FwData; 2859 2860 fw.sbss_addr = bce_RXP_b06FwSbssAddr; 2861 fw.sbss_len = bce_RXP_b06FwSbssLen; 2862 fw.sbss_index = 0; 2863 fw.sbss = bce_RXP_b06FwSbss; 2864 2865 fw.bss_addr = bce_RXP_b06FwBssAddr; 2866 fw.bss_len = bce_RXP_b06FwBssLen; 2867 fw.bss_index = 0; 2868 fw.bss = bce_RXP_b06FwBss; 2869 2870 fw.rodata_addr = bce_RXP_b06FwRodataAddr; 2871 fw.rodata_len = bce_RXP_b06FwRodataLen; 2872 fw.rodata_index = 0; 2873 fw.rodata = bce_RXP_b06FwRodata; 2874 } 2875 2876 DBPRINT(sc, BCE_INFO_RESET, "Loading RX firmware.\n"); 2877 bce_load_cpu_fw(sc, &cpu_reg, &fw); 2878 /* Delay RXP start until initialization is complete. */ 2879 } 2880 2881 2882 /****************************************************************************/ 2883 /* Initialize the TX CPU. */ 2884 /* */ 2885 /* Returns: */ 2886 /* Nothing. */ 2887 /****************************************************************************/ 2888 static void 2889 bce_init_txp_cpu(struct bce_softc *sc) 2890 { 2891 struct cpu_reg cpu_reg; 2892 struct fw_info fw; 2893 2894 cpu_reg.mode = BCE_TXP_CPU_MODE; 2895 cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT; 2896 cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA; 2897 cpu_reg.state = BCE_TXP_CPU_STATE; 2898 cpu_reg.state_value_clear = 0xffffff; 2899 cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE; 2900 cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK; 2901 cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER; 2902 cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION; 2903 cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT; 2904 cpu_reg.spad_base = BCE_TXP_SCRATCH; 2905 cpu_reg.mips_view_base = 0x8000000; 2906 2907 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 2908 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 2909 fw.ver_major = bce_TXP_b09FwReleaseMajor; 2910 fw.ver_minor = bce_TXP_b09FwReleaseMinor; 2911 fw.ver_fix = bce_TXP_b09FwReleaseFix; 2912 fw.start_addr = bce_TXP_b09FwStartAddr; 2913 2914 fw.text_addr = bce_TXP_b09FwTextAddr; 2915 fw.text_len = bce_TXP_b09FwTextLen; 2916 fw.text_index = 0; 2917 fw.text = bce_TXP_b09FwText; 2918 2919 fw.data_addr = bce_TXP_b09FwDataAddr; 2920 fw.data_len = bce_TXP_b09FwDataLen; 2921 fw.data_index = 0; 2922 fw.data = bce_TXP_b09FwData; 2923 2924 fw.sbss_addr = bce_TXP_b09FwSbssAddr; 2925 fw.sbss_len = bce_TXP_b09FwSbssLen; 2926 fw.sbss_index = 0; 2927 fw.sbss = bce_TXP_b09FwSbss; 2928 2929 fw.bss_addr = bce_TXP_b09FwBssAddr; 2930 fw.bss_len = bce_TXP_b09FwBssLen; 2931 fw.bss_index = 0; 2932 fw.bss = bce_TXP_b09FwBss; 2933 2934 fw.rodata_addr = bce_TXP_b09FwRodataAddr; 2935 fw.rodata_len = bce_TXP_b09FwRodataLen; 2936 fw.rodata_index = 0; 2937 fw.rodata = bce_TXP_b09FwRodata; 2938 } else { 2939 fw.ver_major = bce_TXP_b06FwReleaseMajor; 2940 fw.ver_minor = bce_TXP_b06FwReleaseMinor; 2941 fw.ver_fix = bce_TXP_b06FwReleaseFix; 2942 fw.start_addr = bce_TXP_b06FwStartAddr; 2943 2944 fw.text_addr = bce_TXP_b06FwTextAddr; 2945 fw.text_len = bce_TXP_b06FwTextLen; 2946 fw.text_index = 0; 2947 fw.text = bce_TXP_b06FwText; 2948 2949 fw.data_addr = bce_TXP_b06FwDataAddr; 2950 fw.data_len = bce_TXP_b06FwDataLen; 2951 fw.data_index = 0; 2952 fw.data = bce_TXP_b06FwData; 2953 2954 fw.sbss_addr = bce_TXP_b06FwSbssAddr; 2955 fw.sbss_len = bce_TXP_b06FwSbssLen; 2956 fw.sbss_index = 0; 2957 fw.sbss = bce_TXP_b06FwSbss; 2958 2959 fw.bss_addr = bce_TXP_b06FwBssAddr; 2960 fw.bss_len = bce_TXP_b06FwBssLen; 2961 fw.bss_index = 0; 2962 fw.bss = bce_TXP_b06FwBss; 2963 2964 fw.rodata_addr = bce_TXP_b06FwRodataAddr; 2965 fw.rodata_len = bce_TXP_b06FwRodataLen; 2966 fw.rodata_index = 0; 2967 fw.rodata = bce_TXP_b06FwRodata; 2968 } 2969 2970 DBPRINT(sc, BCE_INFO_RESET, "Loading TX firmware.\n"); 2971 bce_load_cpu_fw(sc, &cpu_reg, &fw); 2972 bce_start_cpu(sc, &cpu_reg); 2973 } 2974 2975 2976 /****************************************************************************/ 2977 /* Initialize the TPAT CPU. */ 2978 /* */ 2979 /* Returns: */ 2980 /* Nothing. */ 2981 /****************************************************************************/ 2982 static void 2983 bce_init_tpat_cpu(struct bce_softc *sc) 2984 { 2985 struct cpu_reg cpu_reg; 2986 struct fw_info fw; 2987 2988 cpu_reg.mode = BCE_TPAT_CPU_MODE; 2989 cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT; 2990 cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA; 2991 cpu_reg.state = BCE_TPAT_CPU_STATE; 2992 cpu_reg.state_value_clear = 0xffffff; 2993 cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE; 2994 cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK; 2995 cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER; 2996 cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION; 2997 cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT; 2998 cpu_reg.spad_base = BCE_TPAT_SCRATCH; 2999 cpu_reg.mips_view_base = 0x8000000; 3000 3001 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3002 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3003 fw.ver_major = bce_TPAT_b09FwReleaseMajor; 3004 fw.ver_minor = bce_TPAT_b09FwReleaseMinor; 3005 fw.ver_fix = bce_TPAT_b09FwReleaseFix; 3006 fw.start_addr = bce_TPAT_b09FwStartAddr; 3007 3008 fw.text_addr = bce_TPAT_b09FwTextAddr; 3009 fw.text_len = bce_TPAT_b09FwTextLen; 3010 fw.text_index = 0; 3011 fw.text = bce_TPAT_b09FwText; 3012 3013 fw.data_addr = bce_TPAT_b09FwDataAddr; 3014 fw.data_len = bce_TPAT_b09FwDataLen; 3015 fw.data_index = 0; 3016 fw.data = bce_TPAT_b09FwData; 3017 3018 fw.sbss_addr = bce_TPAT_b09FwSbssAddr; 3019 fw.sbss_len = bce_TPAT_b09FwSbssLen; 3020 fw.sbss_index = 0; 3021 fw.sbss = bce_TPAT_b09FwSbss; 3022 3023 fw.bss_addr = bce_TPAT_b09FwBssAddr; 3024 fw.bss_len = bce_TPAT_b09FwBssLen; 3025 fw.bss_index = 0; 3026 fw.bss = bce_TPAT_b09FwBss; 3027 3028 fw.rodata_addr = bce_TPAT_b09FwRodataAddr; 3029 fw.rodata_len = bce_TPAT_b09FwRodataLen; 3030 fw.rodata_index = 0; 3031 fw.rodata = bce_TPAT_b09FwRodata; 3032 } else { 3033 fw.ver_major = bce_TPAT_b06FwReleaseMajor; 3034 fw.ver_minor = bce_TPAT_b06FwReleaseMinor; 3035 fw.ver_fix = bce_TPAT_b06FwReleaseFix; 3036 fw.start_addr = bce_TPAT_b06FwStartAddr; 3037 3038 fw.text_addr = bce_TPAT_b06FwTextAddr; 3039 fw.text_len = bce_TPAT_b06FwTextLen; 3040 fw.text_index = 0; 3041 fw.text = bce_TPAT_b06FwText; 3042 3043 fw.data_addr = bce_TPAT_b06FwDataAddr; 3044 fw.data_len = bce_TPAT_b06FwDataLen; 3045 fw.data_index = 0; 3046 fw.data = bce_TPAT_b06FwData; 3047 3048 fw.sbss_addr = bce_TPAT_b06FwSbssAddr; 3049 fw.sbss_len = bce_TPAT_b06FwSbssLen; 3050 fw.sbss_index = 0; 3051 fw.sbss = bce_TPAT_b06FwSbss; 3052 3053 fw.bss_addr = bce_TPAT_b06FwBssAddr; 3054 fw.bss_len = bce_TPAT_b06FwBssLen; 3055 fw.bss_index = 0; 3056 fw.bss = bce_TPAT_b06FwBss; 3057 3058 fw.rodata_addr = bce_TPAT_b06FwRodataAddr; 3059 fw.rodata_len = bce_TPAT_b06FwRodataLen; 3060 fw.rodata_index = 0; 3061 fw.rodata = bce_TPAT_b06FwRodata; 3062 } 3063 3064 DBPRINT(sc, BCE_INFO_RESET, "Loading TPAT firmware.\n"); 3065 bce_load_cpu_fw(sc, &cpu_reg, &fw); 3066 bce_start_cpu(sc, &cpu_reg); 3067 } 3068 3069 3070 /****************************************************************************/ 3071 /* Initialize the CP CPU. */ 3072 /* */ 3073 /* Returns: */ 3074 /* Nothing. */ 3075 /****************************************************************************/ 3076 static void 3077 bce_init_cp_cpu(struct bce_softc *sc) 3078 { 3079 struct cpu_reg cpu_reg; 3080 struct fw_info fw; 3081 3082 cpu_reg.mode = BCE_CP_CPU_MODE; 3083 cpu_reg.mode_value_halt = BCE_CP_CPU_MODE_SOFT_HALT; 3084 cpu_reg.mode_value_sstep = BCE_CP_CPU_MODE_STEP_ENA; 3085 cpu_reg.state = BCE_CP_CPU_STATE; 3086 cpu_reg.state_value_clear = 0xffffff; 3087 cpu_reg.gpr0 = BCE_CP_CPU_REG_FILE; 3088 cpu_reg.evmask = BCE_CP_CPU_EVENT_MASK; 3089 cpu_reg.pc = BCE_CP_CPU_PROGRAM_COUNTER; 3090 cpu_reg.inst = BCE_CP_CPU_INSTRUCTION; 3091 cpu_reg.bp = BCE_CP_CPU_HW_BREAKPOINT; 3092 cpu_reg.spad_base = BCE_CP_SCRATCH; 3093 cpu_reg.mips_view_base = 0x8000000; 3094 3095 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3096 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3097 fw.ver_major = bce_CP_b09FwReleaseMajor; 3098 fw.ver_minor = bce_CP_b09FwReleaseMinor; 3099 fw.ver_fix = bce_CP_b09FwReleaseFix; 3100 fw.start_addr = bce_CP_b09FwStartAddr; 3101 3102 fw.text_addr = bce_CP_b09FwTextAddr; 3103 fw.text_len = bce_CP_b09FwTextLen; 3104 fw.text_index = 0; 3105 fw.text = bce_CP_b09FwText; 3106 3107 fw.data_addr = bce_CP_b09FwDataAddr; 3108 fw.data_len = bce_CP_b09FwDataLen; 3109 fw.data_index = 0; 3110 fw.data = bce_CP_b09FwData; 3111 3112 fw.sbss_addr = bce_CP_b09FwSbssAddr; 3113 fw.sbss_len = bce_CP_b09FwSbssLen; 3114 fw.sbss_index = 0; 3115 fw.sbss = bce_CP_b09FwSbss; 3116 3117 fw.bss_addr = bce_CP_b09FwBssAddr; 3118 fw.bss_len = bce_CP_b09FwBssLen; 3119 fw.bss_index = 0; 3120 fw.bss = bce_CP_b09FwBss; 3121 3122 fw.rodata_addr = bce_CP_b09FwRodataAddr; 3123 fw.rodata_len = bce_CP_b09FwRodataLen; 3124 fw.rodata_index = 0; 3125 fw.rodata = bce_CP_b09FwRodata; 3126 } else { 3127 fw.ver_major = bce_CP_b06FwReleaseMajor; 3128 fw.ver_minor = bce_CP_b06FwReleaseMinor; 3129 fw.ver_fix = bce_CP_b06FwReleaseFix; 3130 fw.start_addr = bce_CP_b06FwStartAddr; 3131 3132 fw.text_addr = bce_CP_b06FwTextAddr; 3133 fw.text_len = bce_CP_b06FwTextLen; 3134 fw.text_index = 0; 3135 fw.text = bce_CP_b06FwText; 3136 3137 fw.data_addr = bce_CP_b06FwDataAddr; 3138 fw.data_len = bce_CP_b06FwDataLen; 3139 fw.data_index = 0; 3140 fw.data = bce_CP_b06FwData; 3141 3142 fw.sbss_addr = bce_CP_b06FwSbssAddr; 3143 fw.sbss_len = bce_CP_b06FwSbssLen; 3144 fw.sbss_index = 0; 3145 fw.sbss = bce_CP_b06FwSbss; 3146 3147 fw.bss_addr = bce_CP_b06FwBssAddr; 3148 fw.bss_len = bce_CP_b06FwBssLen; 3149 fw.bss_index = 0; 3150 fw.bss = bce_CP_b06FwBss; 3151 3152 fw.rodata_addr = bce_CP_b06FwRodataAddr; 3153 fw.rodata_len = bce_CP_b06FwRodataLen; 3154 fw.rodata_index = 0; 3155 fw.rodata = bce_CP_b06FwRodata; 3156 } 3157 3158 DBPRINT(sc, BCE_INFO_RESET, "Loading CP firmware.\n"); 3159 bce_load_cpu_fw(sc, &cpu_reg, &fw); 3160 bce_start_cpu(sc, &cpu_reg); 3161 } 3162 3163 3164 /****************************************************************************/ 3165 /* Initialize the COM CPU. */ 3166 /* */ 3167 /* Returns: */ 3168 /* Nothing. */ 3169 /****************************************************************************/ 3170 static void 3171 bce_init_com_cpu(struct bce_softc *sc) 3172 { 3173 struct cpu_reg cpu_reg; 3174 struct fw_info fw; 3175 3176 cpu_reg.mode = BCE_COM_CPU_MODE; 3177 cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT; 3178 cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA; 3179 cpu_reg.state = BCE_COM_CPU_STATE; 3180 cpu_reg.state_value_clear = 0xffffff; 3181 cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE; 3182 cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK; 3183 cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER; 3184 cpu_reg.inst = BCE_COM_CPU_INSTRUCTION; 3185 cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT; 3186 cpu_reg.spad_base = BCE_COM_SCRATCH; 3187 cpu_reg.mips_view_base = 0x8000000; 3188 3189 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3190 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3191 fw.ver_major = bce_COM_b09FwReleaseMajor; 3192 fw.ver_minor = bce_COM_b09FwReleaseMinor; 3193 fw.ver_fix = bce_COM_b09FwReleaseFix; 3194 fw.start_addr = bce_COM_b09FwStartAddr; 3195 3196 fw.text_addr = bce_COM_b09FwTextAddr; 3197 fw.text_len = bce_COM_b09FwTextLen; 3198 fw.text_index = 0; 3199 fw.text = bce_COM_b09FwText; 3200 3201 fw.data_addr = bce_COM_b09FwDataAddr; 3202 fw.data_len = bce_COM_b09FwDataLen; 3203 fw.data_index = 0; 3204 fw.data = bce_COM_b09FwData; 3205 3206 fw.sbss_addr = bce_COM_b09FwSbssAddr; 3207 fw.sbss_len = bce_COM_b09FwSbssLen; 3208 fw.sbss_index = 0; 3209 fw.sbss = bce_COM_b09FwSbss; 3210 3211 fw.bss_addr = bce_COM_b09FwBssAddr; 3212 fw.bss_len = bce_COM_b09FwBssLen; 3213 fw.bss_index = 0; 3214 fw.bss = bce_COM_b09FwBss; 3215 3216 fw.rodata_addr = bce_COM_b09FwRodataAddr; 3217 fw.rodata_len = bce_COM_b09FwRodataLen; 3218 fw.rodata_index = 0; 3219 fw.rodata = bce_COM_b09FwRodata; 3220 } else { 3221 fw.ver_major = bce_COM_b06FwReleaseMajor; 3222 fw.ver_minor = bce_COM_b06FwReleaseMinor; 3223 fw.ver_fix = bce_COM_b06FwReleaseFix; 3224 fw.start_addr = bce_COM_b06FwStartAddr; 3225 3226 fw.text_addr = bce_COM_b06FwTextAddr; 3227 fw.text_len = bce_COM_b06FwTextLen; 3228 fw.text_index = 0; 3229 fw.text = bce_COM_b06FwText; 3230 3231 fw.data_addr = bce_COM_b06FwDataAddr; 3232 fw.data_len = bce_COM_b06FwDataLen; 3233 fw.data_index = 0; 3234 fw.data = bce_COM_b06FwData; 3235 3236 fw.sbss_addr = bce_COM_b06FwSbssAddr; 3237 fw.sbss_len = bce_COM_b06FwSbssLen; 3238 fw.sbss_index = 0; 3239 fw.sbss = bce_COM_b06FwSbss; 3240 3241 fw.bss_addr = bce_COM_b06FwBssAddr; 3242 fw.bss_len = bce_COM_b06FwBssLen; 3243 fw.bss_index = 0; 3244 fw.bss = bce_COM_b06FwBss; 3245 3246 fw.rodata_addr = bce_COM_b06FwRodataAddr; 3247 fw.rodata_len = bce_COM_b06FwRodataLen; 3248 fw.rodata_index = 0; 3249 fw.rodata = bce_COM_b06FwRodata; 3250 } 3251 3252 DBPRINT(sc, BCE_INFO_RESET, "Loading COM firmware.\n"); 3253 bce_load_cpu_fw(sc, &cpu_reg, &fw); 3254 bce_start_cpu(sc, &cpu_reg); 3255 } 3256 3257 3258 /****************************************************************************/ 3259 /* Initialize the RV2P, RX, TX, TPAT, COM, and CP CPUs. */ 3260 /* */ 3261 /* Loads the firmware for each CPU and starts the CPU. */ 3262 /* */ 3263 /* Returns: */ 3264 /* Nothing. */ 3265 /****************************************************************************/ 3266 static void 3267 bce_init_cpus(struct bce_softc *sc) 3268 { 3269 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3270 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3271 if (BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax) { 3272 bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc1, 3273 sizeof(bce_xi90_rv2p_proc1), RV2P_PROC1); 3274 bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc2, 3275 sizeof(bce_xi90_rv2p_proc2), RV2P_PROC2); 3276 } else { 3277 bce_load_rv2p_fw(sc, bce_xi_rv2p_proc1, 3278 sizeof(bce_xi_rv2p_proc1), RV2P_PROC1); 3279 bce_load_rv2p_fw(sc, bce_xi_rv2p_proc2, 3280 sizeof(bce_xi_rv2p_proc2), RV2P_PROC2); 3281 } 3282 } else { 3283 bce_load_rv2p_fw(sc, bce_rv2p_proc1, 3284 sizeof(bce_rv2p_proc1), RV2P_PROC1); 3285 bce_load_rv2p_fw(sc, bce_rv2p_proc2, 3286 sizeof(bce_rv2p_proc2), RV2P_PROC2); 3287 } 3288 3289 bce_init_rxp_cpu(sc); 3290 bce_init_txp_cpu(sc); 3291 bce_init_tpat_cpu(sc); 3292 bce_init_com_cpu(sc); 3293 bce_init_cp_cpu(sc); 3294 } 3295 3296 3297 /****************************************************************************/ 3298 /* Initialize context memory. */ 3299 /* */ 3300 /* Clears the memory associated with each Context ID (CID). */ 3301 /* */ 3302 /* Returns: */ 3303 /* Nothing. */ 3304 /****************************************************************************/ 3305 static int 3306 bce_init_ctx(struct bce_softc *sc) 3307 { 3308 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3309 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3310 /* DRC: Replace this constant value with a #define. */ 3311 int i, retry_cnt = 10; 3312 uint32_t val; 3313 3314 /* 3315 * BCM5709 context memory may be cached 3316 * in host memory so prepare the host memory 3317 * for access. 3318 */ 3319 val = BCE_CTX_COMMAND_ENABLED | BCE_CTX_COMMAND_MEM_INIT | 3320 (1 << 12); 3321 val |= (BCM_PAGE_BITS - 8) << 16; 3322 REG_WR(sc, BCE_CTX_COMMAND, val); 3323 3324 /* Wait for mem init command to complete. */ 3325 for (i = 0; i < retry_cnt; i++) { 3326 val = REG_RD(sc, BCE_CTX_COMMAND); 3327 if (!(val & BCE_CTX_COMMAND_MEM_INIT)) 3328 break; 3329 DELAY(2); 3330 } 3331 if (i == retry_cnt) { 3332 device_printf(sc->bce_dev, 3333 "Context memory initialization failed!\n"); 3334 return ETIMEDOUT; 3335 } 3336 3337 for (i = 0; i < sc->ctx_pages; i++) { 3338 int j; 3339 3340 /* 3341 * Set the physical address of the context 3342 * memory cache. 3343 */ 3344 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA0, 3345 BCE_ADDR_LO(sc->ctx_paddr[i] & 0xfffffff0) | 3346 BCE_CTX_HOST_PAGE_TBL_DATA0_VALID); 3347 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA1, 3348 BCE_ADDR_HI(sc->ctx_paddr[i])); 3349 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_CTRL, 3350 i | BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); 3351 3352 /* 3353 * Verify that the context memory write was successful. 3354 */ 3355 for (j = 0; j < retry_cnt; j++) { 3356 val = REG_RD(sc, BCE_CTX_HOST_PAGE_TBL_CTRL); 3357 if ((val & 3358 BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0) 3359 break; 3360 DELAY(5); 3361 } 3362 if (j == retry_cnt) { 3363 device_printf(sc->bce_dev, 3364 "Failed to initialize context page!\n"); 3365 return ETIMEDOUT; 3366 } 3367 } 3368 } else { 3369 uint32_t vcid_addr, offset; 3370 3371 /* 3372 * For the 5706/5708, context memory is local to 3373 * the controller, so initialize the controller 3374 * context memory. 3375 */ 3376 3377 vcid_addr = GET_CID_ADDR(96); 3378 while (vcid_addr) { 3379 vcid_addr -= PHY_CTX_SIZE; 3380 3381 REG_WR(sc, BCE_CTX_VIRT_ADDR, 0); 3382 REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr); 3383 3384 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) 3385 CTX_WR(sc, 0x00, offset, 0); 3386 3387 REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr); 3388 REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr); 3389 } 3390 } 3391 return 0; 3392 } 3393 3394 3395 /****************************************************************************/ 3396 /* Fetch the permanent MAC address of the controller. */ 3397 /* */ 3398 /* Returns: */ 3399 /* Nothing. */ 3400 /****************************************************************************/ 3401 static void 3402 bce_get_mac_addr(struct bce_softc *sc) 3403 { 3404 uint32_t mac_lo = 0, mac_hi = 0; 3405 3406 /* 3407 * The NetXtreme II bootcode populates various NIC 3408 * power-on and runtime configuration items in a 3409 * shared memory area. The factory configured MAC 3410 * address is available from both NVRAM and the 3411 * shared memory area so we'll read the value from 3412 * shared memory for speed. 3413 */ 3414 3415 mac_hi = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_UPPER); 3416 mac_lo = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_LOWER); 3417 3418 if (mac_lo == 0 && mac_hi == 0) { 3419 if_printf(&sc->arpcom.ac_if, "Invalid Ethernet address!\n"); 3420 } else { 3421 sc->eaddr[0] = (u_char)(mac_hi >> 8); 3422 sc->eaddr[1] = (u_char)(mac_hi >> 0); 3423 sc->eaddr[2] = (u_char)(mac_lo >> 24); 3424 sc->eaddr[3] = (u_char)(mac_lo >> 16); 3425 sc->eaddr[4] = (u_char)(mac_lo >> 8); 3426 sc->eaddr[5] = (u_char)(mac_lo >> 0); 3427 } 3428 3429 DBPRINT(sc, BCE_INFO, "Permanent Ethernet address = %6D\n", sc->eaddr, ":"); 3430 } 3431 3432 3433 /****************************************************************************/ 3434 /* Program the MAC address. */ 3435 /* */ 3436 /* Returns: */ 3437 /* Nothing. */ 3438 /****************************************************************************/ 3439 static void 3440 bce_set_mac_addr(struct bce_softc *sc) 3441 { 3442 const uint8_t *mac_addr = sc->eaddr; 3443 uint32_t val; 3444 3445 DBPRINT(sc, BCE_INFO, "Setting Ethernet address = %6D\n", 3446 sc->eaddr, ":"); 3447 3448 val = (mac_addr[0] << 8) | mac_addr[1]; 3449 REG_WR(sc, BCE_EMAC_MAC_MATCH0, val); 3450 3451 val = (mac_addr[2] << 24) | 3452 (mac_addr[3] << 16) | 3453 (mac_addr[4] << 8) | 3454 mac_addr[5]; 3455 REG_WR(sc, BCE_EMAC_MAC_MATCH1, val); 3456 } 3457 3458 3459 /****************************************************************************/ 3460 /* Stop the controller. */ 3461 /* */ 3462 /* Returns: */ 3463 /* Nothing. */ 3464 /****************************************************************************/ 3465 static void 3466 bce_stop(struct bce_softc *sc) 3467 { 3468 struct ifnet *ifp = &sc->arpcom.ac_if; 3469 3470 ASSERT_SERIALIZED(ifp->if_serializer); 3471 3472 callout_stop(&sc->bce_tick_callout); 3473 3474 /* Disable the transmit/receive blocks. */ 3475 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, BCE_MISC_ENABLE_CLR_DEFAULT); 3476 REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS); 3477 DELAY(20); 3478 3479 bce_disable_intr(sc); 3480 3481 /* Free the RX lists. */ 3482 bce_free_rx_chain(sc); 3483 3484 /* Free TX buffers. */ 3485 bce_free_tx_chain(sc); 3486 3487 sc->bce_link = 0; 3488 sc->bce_coalchg_mask = 0; 3489 3490 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3491 ifp->if_timer = 0; 3492 } 3493 3494 3495 static int 3496 bce_reset(struct bce_softc *sc, uint32_t reset_code) 3497 { 3498 uint32_t val; 3499 int i, rc = 0; 3500 3501 /* Wait for pending PCI transactions to complete. */ 3502 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, 3503 BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE | 3504 BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE | 3505 BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE | 3506 BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE); 3507 val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS); 3508 DELAY(5); 3509 3510 /* Disable DMA */ 3511 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3512 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3513 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL); 3514 val &= ~BCE_MISC_NEW_CORE_CTL_DMA_ENABLE; 3515 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val); 3516 } 3517 3518 /* Assume bootcode is running. */ 3519 sc->bce_fw_timed_out = 0; 3520 sc->bce_drv_cardiac_arrest = 0; 3521 3522 /* Give the firmware a chance to prepare for the reset. */ 3523 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code); 3524 if (rc) { 3525 if_printf(&sc->arpcom.ac_if, 3526 "Firmware is not ready for reset\n"); 3527 return rc; 3528 } 3529 3530 /* Set a firmware reminder that this is a soft reset. */ 3531 bce_shmem_wr(sc, BCE_DRV_RESET_SIGNATURE, 3532 BCE_DRV_RESET_SIGNATURE_MAGIC); 3533 3534 /* Dummy read to force the chip to complete all current transactions. */ 3535 val = REG_RD(sc, BCE_MISC_ID); 3536 3537 /* Chip reset. */ 3538 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3539 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3540 REG_WR(sc, BCE_MISC_COMMAND, BCE_MISC_COMMAND_SW_RESET); 3541 REG_RD(sc, BCE_MISC_COMMAND); 3542 DELAY(5); 3543 3544 val = BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 3545 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 3546 3547 pci_write_config(sc->bce_dev, BCE_PCICFG_MISC_CONFIG, val, 4); 3548 } else { 3549 val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3550 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 3551 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 3552 REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val); 3553 3554 /* Allow up to 30us for reset to complete. */ 3555 for (i = 0; i < 10; i++) { 3556 val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG); 3557 if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3558 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) 3559 break; 3560 DELAY(10); 3561 } 3562 3563 /* Check that reset completed successfully. */ 3564 if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3565 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) { 3566 if_printf(&sc->arpcom.ac_if, "Reset failed!\n"); 3567 return EBUSY; 3568 } 3569 } 3570 3571 /* Make sure byte swapping is properly configured. */ 3572 val = REG_RD(sc, BCE_PCI_SWAP_DIAG0); 3573 if (val != 0x01020304) { 3574 if_printf(&sc->arpcom.ac_if, "Byte swap is incorrect!\n"); 3575 return ENODEV; 3576 } 3577 3578 /* Just completed a reset, assume that firmware is running again. */ 3579 sc->bce_fw_timed_out = 0; 3580 sc->bce_drv_cardiac_arrest = 0; 3581 3582 /* Wait for the firmware to finish its initialization. */ 3583 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code); 3584 if (rc) { 3585 if_printf(&sc->arpcom.ac_if, 3586 "Firmware did not complete initialization!\n"); 3587 } 3588 return rc; 3589 } 3590 3591 3592 static int 3593 bce_chipinit(struct bce_softc *sc) 3594 { 3595 uint32_t val; 3596 int rc = 0; 3597 3598 /* Make sure the interrupt is not active. */ 3599 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT); 3600 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD); 3601 3602 /* 3603 * Initialize DMA byte/word swapping, configure the number of DMA 3604 * channels and PCI clock compensation delay. 3605 */ 3606 val = BCE_DMA_CONFIG_DATA_BYTE_SWAP | 3607 BCE_DMA_CONFIG_DATA_WORD_SWAP | 3608 #if BYTE_ORDER == BIG_ENDIAN 3609 BCE_DMA_CONFIG_CNTL_BYTE_SWAP | 3610 #endif 3611 BCE_DMA_CONFIG_CNTL_WORD_SWAP | 3612 DMA_READ_CHANS << 12 | 3613 DMA_WRITE_CHANS << 16; 3614 3615 val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY; 3616 3617 if ((sc->bce_flags & BCE_PCIX_FLAG) && sc->bus_speed_mhz == 133) 3618 val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP; 3619 3620 /* 3621 * This setting resolves a problem observed on certain Intel PCI 3622 * chipsets that cannot handle multiple outstanding DMA operations. 3623 * See errata E9_5706A1_65. 3624 */ 3625 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706 && 3626 BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0 && 3627 !(sc->bce_flags & BCE_PCIX_FLAG)) 3628 val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA; 3629 3630 REG_WR(sc, BCE_DMA_CONFIG, val); 3631 3632 /* Enable the RX_V2P and Context state machines before access. */ 3633 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 3634 BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE | 3635 BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE | 3636 BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE); 3637 3638 /* Initialize context mapping and zero out the quick contexts. */ 3639 rc = bce_init_ctx(sc); 3640 if (rc != 0) 3641 return rc; 3642 3643 /* Initialize the on-boards CPUs */ 3644 bce_init_cpus(sc); 3645 3646 /* Enable management frames (NC-SI) to flow to the MCP. */ 3647 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { 3648 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) | 3649 BCE_RPM_MGMT_PKT_CTRL_MGMT_EN; 3650 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val); 3651 } 3652 3653 /* Prepare NVRAM for access. */ 3654 rc = bce_init_nvram(sc); 3655 if (rc != 0) 3656 return rc; 3657 3658 /* Set the kernel bypass block size */ 3659 val = REG_RD(sc, BCE_MQ_CONFIG); 3660 val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE; 3661 val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; 3662 3663 /* Enable bins used on the 5709/5716. */ 3664 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3665 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3666 val |= BCE_MQ_CONFIG_BIN_MQ_MODE; 3667 if (BCE_CHIP_ID(sc) == BCE_CHIP_ID_5709_A1) 3668 val |= BCE_MQ_CONFIG_HALT_DIS; 3669 } 3670 3671 REG_WR(sc, BCE_MQ_CONFIG, val); 3672 3673 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE); 3674 REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val); 3675 REG_WR(sc, BCE_MQ_KNL_WIND_END, val); 3676 3677 /* Set the page size and clear the RV2P processor stall bits. */ 3678 val = (BCM_PAGE_BITS - 8) << 24; 3679 REG_WR(sc, BCE_RV2P_CONFIG, val); 3680 3681 /* Configure page size. */ 3682 val = REG_RD(sc, BCE_TBDR_CONFIG); 3683 val &= ~BCE_TBDR_CONFIG_PAGE_SIZE; 3684 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40; 3685 REG_WR(sc, BCE_TBDR_CONFIG, val); 3686 3687 /* Set the perfect match control register to default. */ 3688 REG_WR_IND(sc, BCE_RXP_PM_CTRL, 0); 3689 3690 return 0; 3691 } 3692 3693 3694 /****************************************************************************/ 3695 /* Initialize the controller in preparation to send/receive traffic. */ 3696 /* */ 3697 /* Returns: */ 3698 /* 0 for success, positive value for failure. */ 3699 /****************************************************************************/ 3700 static int 3701 bce_blockinit(struct bce_softc *sc) 3702 { 3703 uint32_t reg, val; 3704 int rc = 0; 3705 3706 /* Load the hardware default MAC address. */ 3707 bce_set_mac_addr(sc); 3708 3709 /* Set the Ethernet backoff seed value */ 3710 val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) + 3711 sc->eaddr[3] + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16); 3712 REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val); 3713 3714 sc->last_status_idx = 0; 3715 sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE; 3716 3717 /* Set up link change interrupt generation. */ 3718 REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK); 3719 3720 /* Program the physical address of the status block. */ 3721 REG_WR(sc, BCE_HC_STATUS_ADDR_L, BCE_ADDR_LO(sc->status_block_paddr)); 3722 REG_WR(sc, BCE_HC_STATUS_ADDR_H, BCE_ADDR_HI(sc->status_block_paddr)); 3723 3724 /* Program the physical address of the statistics block. */ 3725 REG_WR(sc, BCE_HC_STATISTICS_ADDR_L, 3726 BCE_ADDR_LO(sc->stats_block_paddr)); 3727 REG_WR(sc, BCE_HC_STATISTICS_ADDR_H, 3728 BCE_ADDR_HI(sc->stats_block_paddr)); 3729 3730 /* Program various host coalescing parameters. */ 3731 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP, 3732 (sc->bce_tx_quick_cons_trip_int << 16) | 3733 sc->bce_tx_quick_cons_trip); 3734 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP, 3735 (sc->bce_rx_quick_cons_trip_int << 16) | 3736 sc->bce_rx_quick_cons_trip); 3737 REG_WR(sc, BCE_HC_COMP_PROD_TRIP, 3738 (sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip); 3739 REG_WR(sc, BCE_HC_TX_TICKS, 3740 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks); 3741 REG_WR(sc, BCE_HC_RX_TICKS, 3742 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks); 3743 REG_WR(sc, BCE_HC_COM_TICKS, 3744 (sc->bce_com_ticks_int << 16) | sc->bce_com_ticks); 3745 REG_WR(sc, BCE_HC_CMD_TICKS, 3746 (sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks); 3747 REG_WR(sc, BCE_HC_STATS_TICKS, (sc->bce_stats_ticks & 0xffff00)); 3748 REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */ 3749 3750 val = BCE_HC_CONFIG_TX_TMR_MODE | BCE_HC_CONFIG_COLLECT_STATS; 3751 if (sc->bce_flags & BCE_ONESHOT_MSI_FLAG) { 3752 if (bootverbose) 3753 if_printf(&sc->arpcom.ac_if, "oneshot MSI\n"); 3754 val |= BCE_HC_CONFIG_ONE_SHOT | BCE_HC_CONFIG_USE_INT_PARAM; 3755 } 3756 REG_WR(sc, BCE_HC_CONFIG, val); 3757 3758 /* Clear the internal statistics counters. */ 3759 REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW); 3760 3761 /* Verify that bootcode is running. */ 3762 reg = bce_shmem_rd(sc, BCE_DEV_INFO_SIGNATURE); 3763 3764 DBRUNIF(DB_RANDOMTRUE(bce_debug_bootcode_running_failure), 3765 if_printf(&sc->arpcom.ac_if, 3766 "%s(%d): Simulating bootcode failure.\n", 3767 __FILE__, __LINE__); 3768 reg = 0); 3769 3770 if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) != 3771 BCE_DEV_INFO_SIGNATURE_MAGIC) { 3772 if_printf(&sc->arpcom.ac_if, 3773 "Bootcode not running! Found: 0x%08X, " 3774 "Expected: 08%08X\n", 3775 reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK, 3776 BCE_DEV_INFO_SIGNATURE_MAGIC); 3777 return ENODEV; 3778 } 3779 3780 /* Enable DMA */ 3781 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3782 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3783 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL); 3784 val |= BCE_MISC_NEW_CORE_CTL_DMA_ENABLE; 3785 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val); 3786 } 3787 3788 /* Allow bootcode to apply any additional fixes before enabling MAC. */ 3789 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET); 3790 3791 /* Enable link state change interrupt generation. */ 3792 REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE); 3793 3794 /* Enable the RXP. */ 3795 bce_start_rxp_cpu(sc); 3796 3797 /* Disable management frames (NC-SI) from flowing to the MCP. */ 3798 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { 3799 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) & 3800 ~BCE_RPM_MGMT_PKT_CTRL_MGMT_EN; 3801 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val); 3802 } 3803 3804 /* Enable all remaining blocks in the MAC. */ 3805 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3806 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3807 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 3808 BCE_MISC_ENABLE_DEFAULT_XI); 3809 } else { 3810 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT); 3811 } 3812 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS); 3813 DELAY(20); 3814 3815 /* Save the current host coalescing block settings. */ 3816 sc->hc_command = REG_RD(sc, BCE_HC_COMMAND); 3817 3818 return 0; 3819 } 3820 3821 3822 /****************************************************************************/ 3823 /* Encapsulate an mbuf cluster into the rx_bd chain. */ 3824 /* */ 3825 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's. */ 3826 /* This routine will map an mbuf cluster into 1 or more rx_bd's as */ 3827 /* necessary. */ 3828 /* */ 3829 /* Returns: */ 3830 /* 0 for success, positive value for failure. */ 3831 /****************************************************************************/ 3832 static int 3833 bce_newbuf_std(struct bce_softc *sc, uint16_t *prod, uint16_t *chain_prod, 3834 uint32_t *prod_bseq, int init) 3835 { 3836 bus_dmamap_t map; 3837 bus_dma_segment_t seg; 3838 struct mbuf *m_new; 3839 int error, nseg; 3840 #ifdef BCE_DEBUG 3841 uint16_t debug_chain_prod = *chain_prod; 3842 #endif 3843 3844 /* Make sure the inputs are valid. */ 3845 DBRUNIF((*chain_prod > MAX_RX_BD(sc)), 3846 if_printf(&sc->arpcom.ac_if, "%s(%d): " 3847 "RX producer out of range: 0x%04X > 0x%04X\n", 3848 __FILE__, __LINE__, 3849 *chain_prod, (uint16_t)MAX_RX_BD(sc))); 3850 3851 DBPRINT(sc, BCE_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = 0x%04X, " 3852 "prod_bseq = 0x%08X\n", __func__, *prod, *chain_prod, *prod_bseq); 3853 3854 DBRUNIF(DB_RANDOMTRUE(bce_debug_mbuf_allocation_failure), 3855 if_printf(&sc->arpcom.ac_if, "%s(%d): " 3856 "Simulating mbuf allocation failure.\n", 3857 __FILE__, __LINE__); 3858 sc->mbuf_alloc_failed++; 3859 return ENOBUFS); 3860 3861 /* This is a new mbuf allocation. */ 3862 m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 3863 if (m_new == NULL) 3864 return ENOBUFS; 3865 DBRUNIF(1, sc->rx_mbuf_alloc++); 3866 3867 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 3868 3869 /* Map the mbuf cluster into device memory. */ 3870 error = bus_dmamap_load_mbuf_segment(sc->rx_mbuf_tag, 3871 sc->rx_mbuf_tmpmap, m_new, &seg, 1, &nseg, 3872 BUS_DMA_NOWAIT); 3873 if (error) { 3874 m_freem(m_new); 3875 if (init) { 3876 if_printf(&sc->arpcom.ac_if, 3877 "Error mapping mbuf into RX chain!\n"); 3878 } 3879 DBRUNIF(1, sc->rx_mbuf_alloc--); 3880 return error; 3881 } 3882 3883 if (sc->rx_mbuf_ptr[*chain_prod] != NULL) { 3884 bus_dmamap_unload(sc->rx_mbuf_tag, 3885 sc->rx_mbuf_map[*chain_prod]); 3886 } 3887 3888 map = sc->rx_mbuf_map[*chain_prod]; 3889 sc->rx_mbuf_map[*chain_prod] = sc->rx_mbuf_tmpmap; 3890 sc->rx_mbuf_tmpmap = map; 3891 3892 /* Watch for overflow. */ 3893 DBRUNIF((sc->free_rx_bd > USABLE_RX_BD(sc)), 3894 if_printf(&sc->arpcom.ac_if, "%s(%d): " 3895 "Too many free rx_bd (0x%04X > 0x%04X)!\n", 3896 __FILE__, __LINE__, sc->free_rx_bd, 3897 (uint16_t)USABLE_RX_BD(sc))); 3898 3899 /* Update some debug statistic counters */ 3900 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark), 3901 sc->rx_low_watermark = sc->free_rx_bd); 3902 DBRUNIF((sc->free_rx_bd == 0), sc->rx_empty_count++); 3903 3904 /* Save the mbuf and update our counter. */ 3905 sc->rx_mbuf_ptr[*chain_prod] = m_new; 3906 sc->rx_mbuf_paddr[*chain_prod] = seg.ds_addr; 3907 sc->free_rx_bd--; 3908 3909 bce_setup_rxdesc_std(sc, *chain_prod, prod_bseq); 3910 3911 DBRUN(BCE_VERBOSE_RECV, 3912 bce_dump_rx_mbuf_chain(sc, debug_chain_prod, 1)); 3913 3914 DBPRINT(sc, BCE_VERBOSE_RECV, "%s(exit): prod = 0x%04X, chain_prod = 0x%04X, " 3915 "prod_bseq = 0x%08X\n", __func__, *prod, *chain_prod, *prod_bseq); 3916 3917 return 0; 3918 } 3919 3920 3921 static void 3922 bce_setup_rxdesc_std(struct bce_softc *sc, uint16_t chain_prod, uint32_t *prod_bseq) 3923 { 3924 struct rx_bd *rxbd; 3925 bus_addr_t paddr; 3926 int len; 3927 3928 paddr = sc->rx_mbuf_paddr[chain_prod]; 3929 len = sc->rx_mbuf_ptr[chain_prod]->m_len; 3930 3931 /* Setup the rx_bd for the first segment. */ 3932 rxbd = &sc->rx_bd_chain[RX_PAGE(chain_prod)][RX_IDX(chain_prod)]; 3933 3934 rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(paddr)); 3935 rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(paddr)); 3936 rxbd->rx_bd_len = htole32(len); 3937 rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START); 3938 *prod_bseq += len; 3939 3940 rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END); 3941 } 3942 3943 3944 /****************************************************************************/ 3945 /* Initialize the TX context memory. */ 3946 /* */ 3947 /* Returns: */ 3948 /* Nothing */ 3949 /****************************************************************************/ 3950 static void 3951 bce_init_tx_context(struct bce_softc *sc) 3952 { 3953 uint32_t val; 3954 3955 /* Initialize the context ID for an L2 TX chain. */ 3956 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3957 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3958 /* Set the CID type to support an L2 connection. */ 3959 val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2; 3960 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TYPE_XI, val); 3961 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16); 3962 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_CMD_TYPE_XI, val); 3963 3964 /* Point the hardware to the first page in the chain. */ 3965 val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]); 3966 CTX_WR(sc, GET_CID_ADDR(TX_CID), 3967 BCE_L2CTX_TX_TBDR_BHADDR_HI_XI, val); 3968 val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]); 3969 CTX_WR(sc, GET_CID_ADDR(TX_CID), 3970 BCE_L2CTX_TX_TBDR_BHADDR_LO_XI, val); 3971 } else { 3972 /* Set the CID type to support an L2 connection. */ 3973 val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2; 3974 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TYPE, val); 3975 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16); 3976 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_CMD_TYPE, val); 3977 3978 /* Point the hardware to the first page in the chain. */ 3979 val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]); 3980 CTX_WR(sc, GET_CID_ADDR(TX_CID), 3981 BCE_L2CTX_TX_TBDR_BHADDR_HI, val); 3982 val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]); 3983 CTX_WR(sc, GET_CID_ADDR(TX_CID), 3984 BCE_L2CTX_TX_TBDR_BHADDR_LO, val); 3985 } 3986 } 3987 3988 3989 /****************************************************************************/ 3990 /* Allocate memory and initialize the TX data structures. */ 3991 /* */ 3992 /* Returns: */ 3993 /* 0 for success, positive value for failure. */ 3994 /****************************************************************************/ 3995 static int 3996 bce_init_tx_chain(struct bce_softc *sc) 3997 { 3998 struct tx_bd *txbd; 3999 int i, rc = 0; 4000 4001 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__); 4002 4003 /* Set the initial TX producer/consumer indices. */ 4004 sc->tx_prod = 0; 4005 sc->tx_cons = 0; 4006 sc->tx_prod_bseq = 0; 4007 sc->used_tx_bd = 0; 4008 sc->max_tx_bd = USABLE_TX_BD(sc); 4009 DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD(sc)); 4010 DBRUNIF(1, sc->tx_full_count = 0); 4011 4012 /* 4013 * The NetXtreme II supports a linked-list structre called 4014 * a Buffer Descriptor Chain (or BD chain). A BD chain 4015 * consists of a series of 1 or more chain pages, each of which 4016 * consists of a fixed number of BD entries. 4017 * The last BD entry on each page is a pointer to the next page 4018 * in the chain, and the last pointer in the BD chain 4019 * points back to the beginning of the chain. 4020 */ 4021 4022 /* Set the TX next pointer chain entries. */ 4023 for (i = 0; i < sc->tx_pages; i++) { 4024 int j; 4025 4026 txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE]; 4027 4028 /* Check if we've reached the last page. */ 4029 if (i == (sc->tx_pages - 1)) 4030 j = 0; 4031 else 4032 j = i + 1; 4033 4034 txbd->tx_bd_haddr_hi = 4035 htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j])); 4036 txbd->tx_bd_haddr_lo = 4037 htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j])); 4038 } 4039 bce_init_tx_context(sc); 4040 4041 return(rc); 4042 } 4043 4044 4045 /****************************************************************************/ 4046 /* Free memory and clear the TX data structures. */ 4047 /* */ 4048 /* Returns: */ 4049 /* Nothing. */ 4050 /****************************************************************************/ 4051 static void 4052 bce_free_tx_chain(struct bce_softc *sc) 4053 { 4054 int i; 4055 4056 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__); 4057 4058 /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */ 4059 for (i = 0; i < TOTAL_TX_BD(sc); i++) { 4060 if (sc->tx_mbuf_ptr[i] != NULL) { 4061 bus_dmamap_unload(sc->tx_mbuf_tag, sc->tx_mbuf_map[i]); 4062 m_freem(sc->tx_mbuf_ptr[i]); 4063 sc->tx_mbuf_ptr[i] = NULL; 4064 DBRUNIF(1, sc->tx_mbuf_alloc--); 4065 } 4066 } 4067 4068 /* Clear each TX chain page. */ 4069 for (i = 0; i < sc->tx_pages; i++) 4070 bzero(sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ); 4071 sc->used_tx_bd = 0; 4072 4073 /* Check if we lost any mbufs in the process. */ 4074 DBRUNIF((sc->tx_mbuf_alloc), 4075 if_printf(&sc->arpcom.ac_if, 4076 "%s(%d): Memory leak! " 4077 "Lost %d mbufs from tx chain!\n", 4078 __FILE__, __LINE__, sc->tx_mbuf_alloc)); 4079 4080 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__); 4081 } 4082 4083 4084 /****************************************************************************/ 4085 /* Initialize the RX context memory. */ 4086 /* */ 4087 /* Returns: */ 4088 /* Nothing */ 4089 /****************************************************************************/ 4090 static void 4091 bce_init_rx_context(struct bce_softc *sc) 4092 { 4093 uint32_t val; 4094 4095 /* Initialize the context ID for an L2 RX chain. */ 4096 val = BCE_L2CTX_RX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE | 4097 BCE_L2CTX_RX_CTX_TYPE_SIZE_L2 | (0x02 << 8); 4098 4099 /* 4100 * Set the level for generating pause frames 4101 * when the number of available rx_bd's gets 4102 * too low (the low watermark) and the level 4103 * when pause frames can be stopped (the high 4104 * watermark). 4105 */ 4106 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 4107 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 4108 uint32_t lo_water, hi_water; 4109 4110 lo_water = BCE_L2CTX_RX_LO_WATER_MARK_DEFAULT; 4111 hi_water = USABLE_RX_BD(sc) / 4; 4112 4113 lo_water /= BCE_L2CTX_RX_LO_WATER_MARK_SCALE; 4114 hi_water /= BCE_L2CTX_RX_HI_WATER_MARK_SCALE; 4115 4116 if (hi_water > 0xf) 4117 hi_water = 0xf; 4118 else if (hi_water == 0) 4119 lo_water = 0; 4120 val |= lo_water | 4121 (hi_water << BCE_L2CTX_RX_HI_WATER_MARK_SHIFT); 4122 } 4123 4124 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_CTX_TYPE, val); 4125 4126 /* Setup the MQ BIN mapping for l2_ctx_host_bseq. */ 4127 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 4128 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 4129 val = REG_RD(sc, BCE_MQ_MAP_L2_5); 4130 REG_WR(sc, BCE_MQ_MAP_L2_5, val | BCE_MQ_MAP_L2_5_ARM); 4131 } 4132 4133 /* Point the hardware to the first page in the chain. */ 4134 val = BCE_ADDR_HI(sc->rx_bd_chain_paddr[0]); 4135 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_HI, val); 4136 val = BCE_ADDR_LO(sc->rx_bd_chain_paddr[0]); 4137 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_LO, val); 4138 } 4139 4140 4141 /****************************************************************************/ 4142 /* Allocate memory and initialize the RX data structures. */ 4143 /* */ 4144 /* Returns: */ 4145 /* 0 for success, positive value for failure. */ 4146 /****************************************************************************/ 4147 static int 4148 bce_init_rx_chain(struct bce_softc *sc) 4149 { 4150 struct rx_bd *rxbd; 4151 int i, rc = 0; 4152 uint16_t prod, chain_prod; 4153 uint32_t prod_bseq; 4154 4155 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__); 4156 4157 /* Initialize the RX producer and consumer indices. */ 4158 sc->rx_prod = 0; 4159 sc->rx_cons = 0; 4160 sc->rx_prod_bseq = 0; 4161 sc->free_rx_bd = USABLE_RX_BD(sc); 4162 sc->max_rx_bd = USABLE_RX_BD(sc); 4163 DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD(sc)); 4164 DBRUNIF(1, sc->rx_empty_count = 0); 4165 4166 /* Initialize the RX next pointer chain entries. */ 4167 for (i = 0; i < sc->rx_pages; i++) { 4168 int j; 4169 4170 rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE]; 4171 4172 /* Check if we've reached the last page. */ 4173 if (i == (sc->rx_pages - 1)) 4174 j = 0; 4175 else 4176 j = i + 1; 4177 4178 /* Setup the chain page pointers. */ 4179 rxbd->rx_bd_haddr_hi = 4180 htole32(BCE_ADDR_HI(sc->rx_bd_chain_paddr[j])); 4181 rxbd->rx_bd_haddr_lo = 4182 htole32(BCE_ADDR_LO(sc->rx_bd_chain_paddr[j])); 4183 } 4184 4185 /* Allocate mbuf clusters for the rx_bd chain. */ 4186 prod = prod_bseq = 0; 4187 while (prod < TOTAL_RX_BD(sc)) { 4188 chain_prod = RX_CHAIN_IDX(sc, prod); 4189 if (bce_newbuf_std(sc, &prod, &chain_prod, &prod_bseq, 1)) { 4190 if_printf(&sc->arpcom.ac_if, 4191 "Error filling RX chain: rx_bd[0x%04X]!\n", 4192 chain_prod); 4193 rc = ENOBUFS; 4194 break; 4195 } 4196 prod = NEXT_RX_BD(prod); 4197 } 4198 4199 /* Save the RX chain producer index. */ 4200 sc->rx_prod = prod; 4201 sc->rx_prod_bseq = prod_bseq; 4202 4203 /* Tell the chip about the waiting rx_bd's. */ 4204 REG_WR16(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BDIDX, 4205 sc->rx_prod); 4206 REG_WR(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BSEQ, 4207 sc->rx_prod_bseq); 4208 4209 bce_init_rx_context(sc); 4210 4211 return(rc); 4212 } 4213 4214 4215 /****************************************************************************/ 4216 /* Free memory and clear the RX data structures. */ 4217 /* */ 4218 /* Returns: */ 4219 /* Nothing. */ 4220 /****************************************************************************/ 4221 static void 4222 bce_free_rx_chain(struct bce_softc *sc) 4223 { 4224 int i; 4225 4226 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__); 4227 4228 /* Free any mbufs still in the RX mbuf chain. */ 4229 for (i = 0; i < TOTAL_RX_BD(sc); i++) { 4230 if (sc->rx_mbuf_ptr[i] != NULL) { 4231 bus_dmamap_unload(sc->rx_mbuf_tag, sc->rx_mbuf_map[i]); 4232 m_freem(sc->rx_mbuf_ptr[i]); 4233 sc->rx_mbuf_ptr[i] = NULL; 4234 DBRUNIF(1, sc->rx_mbuf_alloc--); 4235 } 4236 } 4237 4238 /* Clear each RX chain page. */ 4239 for (i = 0; i < sc->rx_pages; i++) 4240 bzero(sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ); 4241 4242 /* Check if we lost any mbufs in the process. */ 4243 DBRUNIF((sc->rx_mbuf_alloc), 4244 if_printf(&sc->arpcom.ac_if, 4245 "%s(%d): Memory leak! " 4246 "Lost %d mbufs from rx chain!\n", 4247 __FILE__, __LINE__, sc->rx_mbuf_alloc)); 4248 4249 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__); 4250 } 4251 4252 4253 /****************************************************************************/ 4254 /* Set media options. */ 4255 /* */ 4256 /* Returns: */ 4257 /* 0 for success, positive value for failure. */ 4258 /****************************************************************************/ 4259 static int 4260 bce_ifmedia_upd(struct ifnet *ifp) 4261 { 4262 struct bce_softc *sc = ifp->if_softc; 4263 struct mii_data *mii = device_get_softc(sc->bce_miibus); 4264 int error = 0; 4265 4266 /* 4267 * 'mii' will be NULL, when this function is called on following 4268 * code path: bce_attach() -> bce_mgmt_init() 4269 */ 4270 if (mii != NULL) { 4271 /* Make sure the MII bus has been enumerated. */ 4272 sc->bce_link = 0; 4273 if (mii->mii_instance) { 4274 struct mii_softc *miisc; 4275 4276 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 4277 mii_phy_reset(miisc); 4278 } 4279 error = mii_mediachg(mii); 4280 } 4281 return error; 4282 } 4283 4284 4285 /****************************************************************************/ 4286 /* Reports current media status. */ 4287 /* */ 4288 /* Returns: */ 4289 /* Nothing. */ 4290 /****************************************************************************/ 4291 static void 4292 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 4293 { 4294 struct bce_softc *sc = ifp->if_softc; 4295 struct mii_data *mii = device_get_softc(sc->bce_miibus); 4296 4297 mii_pollstat(mii); 4298 ifmr->ifm_active = mii->mii_media_active; 4299 ifmr->ifm_status = mii->mii_media_status; 4300 } 4301 4302 4303 /****************************************************************************/ 4304 /* Handles PHY generated interrupt events. */ 4305 /* */ 4306 /* Returns: */ 4307 /* Nothing. */ 4308 /****************************************************************************/ 4309 static void 4310 bce_phy_intr(struct bce_softc *sc) 4311 { 4312 uint32_t new_link_state, old_link_state; 4313 struct ifnet *ifp = &sc->arpcom.ac_if; 4314 4315 ASSERT_SERIALIZED(ifp->if_serializer); 4316 4317 new_link_state = sc->status_block->status_attn_bits & 4318 STATUS_ATTN_BITS_LINK_STATE; 4319 old_link_state = sc->status_block->status_attn_bits_ack & 4320 STATUS_ATTN_BITS_LINK_STATE; 4321 4322 /* Handle any changes if the link state has changed. */ 4323 if (new_link_state != old_link_state) { /* XXX redundant? */ 4324 DBRUN(BCE_VERBOSE_INTR, bce_dump_status_block(sc)); 4325 4326 /* Update the status_attn_bits_ack field in the status block. */ 4327 if (new_link_state) { 4328 REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD, 4329 STATUS_ATTN_BITS_LINK_STATE); 4330 if (bootverbose) 4331 if_printf(ifp, "Link is now UP.\n"); 4332 } else { 4333 REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD, 4334 STATUS_ATTN_BITS_LINK_STATE); 4335 if (bootverbose) 4336 if_printf(ifp, "Link is now DOWN.\n"); 4337 } 4338 4339 /* 4340 * Assume link is down and allow tick routine to 4341 * update the state based on the actual media state. 4342 */ 4343 sc->bce_link = 0; 4344 callout_stop(&sc->bce_tick_callout); 4345 bce_tick_serialized(sc); 4346 } 4347 4348 /* Acknowledge the link change interrupt. */ 4349 REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE); 4350 } 4351 4352 4353 /****************************************************************************/ 4354 /* Reads the receive consumer value from the status block (skipping over */ 4355 /* chain page pointer if necessary). */ 4356 /* */ 4357 /* Returns: */ 4358 /* hw_cons */ 4359 /****************************************************************************/ 4360 static __inline uint16_t 4361 bce_get_hw_rx_cons(struct bce_softc *sc) 4362 { 4363 uint16_t hw_cons = sc->status_block->status_rx_quick_consumer_index0; 4364 4365 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) 4366 hw_cons++; 4367 return hw_cons; 4368 } 4369 4370 4371 /****************************************************************************/ 4372 /* Handles received frame interrupt events. */ 4373 /* */ 4374 /* Returns: */ 4375 /* Nothing. */ 4376 /****************************************************************************/ 4377 static void 4378 bce_rx_intr(struct bce_softc *sc, int count, uint16_t hw_cons) 4379 { 4380 struct ifnet *ifp = &sc->arpcom.ac_if; 4381 uint16_t sw_cons, sw_chain_cons, sw_prod, sw_chain_prod; 4382 uint32_t sw_prod_bseq; 4383 4384 ASSERT_SERIALIZED(ifp->if_serializer); 4385 4386 /* Get working copies of the driver's view of the RX indices. */ 4387 sw_cons = sc->rx_cons; 4388 sw_prod = sc->rx_prod; 4389 sw_prod_bseq = sc->rx_prod_bseq; 4390 4391 /* Scan through the receive chain as long as there is work to do. */ 4392 while (sw_cons != hw_cons) { 4393 struct mbuf *m = NULL; 4394 struct l2_fhdr *l2fhdr = NULL; 4395 struct rx_bd *rxbd; 4396 unsigned int len; 4397 uint32_t status = 0; 4398 4399 #ifdef DEVICE_POLLING 4400 if (count >= 0 && count-- == 0) 4401 break; 4402 #endif 4403 4404 /* 4405 * Convert the producer/consumer indices 4406 * to an actual rx_bd index. 4407 */ 4408 sw_chain_cons = RX_CHAIN_IDX(sc, sw_cons); 4409 sw_chain_prod = RX_CHAIN_IDX(sc, sw_prod); 4410 4411 /* Get the used rx_bd. */ 4412 rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)] 4413 [RX_IDX(sw_chain_cons)]; 4414 sc->free_rx_bd++; 4415 4416 /* The mbuf is stored with the last rx_bd entry of a packet. */ 4417 if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) { 4418 if (sw_chain_cons != sw_chain_prod) { 4419 if_printf(ifp, "RX cons(%d) != prod(%d), " 4420 "drop!\n", sw_chain_cons, 4421 sw_chain_prod); 4422 ifp->if_ierrors++; 4423 4424 bce_setup_rxdesc_std(sc, sw_chain_cons, 4425 &sw_prod_bseq); 4426 m = NULL; 4427 goto bce_rx_int_next_rx; 4428 } 4429 4430 /* Unmap the mbuf from DMA space. */ 4431 bus_dmamap_sync(sc->rx_mbuf_tag, 4432 sc->rx_mbuf_map[sw_chain_cons], 4433 BUS_DMASYNC_POSTREAD); 4434 4435 /* Save the mbuf from the driver's chain. */ 4436 m = sc->rx_mbuf_ptr[sw_chain_cons]; 4437 4438 /* 4439 * Frames received on the NetXteme II are prepended 4440 * with an l2_fhdr structure which provides status 4441 * information about the received frame (including 4442 * VLAN tags and checksum info). The frames are also 4443 * automatically adjusted to align the IP header 4444 * (i.e. two null bytes are inserted before the 4445 * Ethernet header). As a result the data DMA'd by 4446 * the controller into the mbuf is as follows: 4447 * 4448 * +---------+-----+---------------------+-----+ 4449 * | l2_fhdr | pad | packet data | FCS | 4450 * +---------+-----+---------------------+-----+ 4451 * 4452 * The l2_fhdr needs to be checked and skipped and the 4453 * FCS needs to be stripped before sending the packet 4454 * up the stack. 4455 */ 4456 l2fhdr = mtod(m, struct l2_fhdr *); 4457 4458 len = l2fhdr->l2_fhdr_pkt_len; 4459 status = l2fhdr->l2_fhdr_status; 4460 4461 len -= ETHER_CRC_LEN; 4462 4463 /* Check the received frame for errors. */ 4464 if (status & (L2_FHDR_ERRORS_BAD_CRC | 4465 L2_FHDR_ERRORS_PHY_DECODE | 4466 L2_FHDR_ERRORS_ALIGNMENT | 4467 L2_FHDR_ERRORS_TOO_SHORT | 4468 L2_FHDR_ERRORS_GIANT_FRAME)) { 4469 ifp->if_ierrors++; 4470 4471 /* Reuse the mbuf for a new frame. */ 4472 bce_setup_rxdesc_std(sc, sw_chain_prod, 4473 &sw_prod_bseq); 4474 m = NULL; 4475 goto bce_rx_int_next_rx; 4476 } 4477 4478 /* 4479 * Get a new mbuf for the rx_bd. If no new 4480 * mbufs are available then reuse the current mbuf, 4481 * log an ierror on the interface, and generate 4482 * an error in the system log. 4483 */ 4484 if (bce_newbuf_std(sc, &sw_prod, &sw_chain_prod, 4485 &sw_prod_bseq, 0)) { 4486 ifp->if_ierrors++; 4487 4488 /* Try and reuse the exisitng mbuf. */ 4489 bce_setup_rxdesc_std(sc, sw_chain_prod, 4490 &sw_prod_bseq); 4491 m = NULL; 4492 goto bce_rx_int_next_rx; 4493 } 4494 4495 /* 4496 * Skip over the l2_fhdr when passing 4497 * the data up the stack. 4498 */ 4499 m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN); 4500 4501 m->m_pkthdr.len = m->m_len = len; 4502 m->m_pkthdr.rcvif = ifp; 4503 4504 /* Validate the checksum if offload enabled. */ 4505 if (ifp->if_capenable & IFCAP_RXCSUM) { 4506 /* Check for an IP datagram. */ 4507 if (status & L2_FHDR_STATUS_IP_DATAGRAM) { 4508 m->m_pkthdr.csum_flags |= 4509 CSUM_IP_CHECKED; 4510 4511 /* Check if the IP checksum is valid. */ 4512 if ((l2fhdr->l2_fhdr_ip_xsum ^ 4513 0xffff) == 0) { 4514 m->m_pkthdr.csum_flags |= 4515 CSUM_IP_VALID; 4516 } 4517 } 4518 4519 /* Check for a valid TCP/UDP frame. */ 4520 if (status & (L2_FHDR_STATUS_TCP_SEGMENT | 4521 L2_FHDR_STATUS_UDP_DATAGRAM)) { 4522 4523 /* Check for a good TCP/UDP checksum. */ 4524 if ((status & 4525 (L2_FHDR_ERRORS_TCP_XSUM | 4526 L2_FHDR_ERRORS_UDP_XSUM)) == 0) { 4527 m->m_pkthdr.csum_data = 4528 l2fhdr->l2_fhdr_tcp_udp_xsum; 4529 m->m_pkthdr.csum_flags |= 4530 CSUM_DATA_VALID | 4531 CSUM_PSEUDO_HDR; 4532 } 4533 } 4534 } 4535 4536 ifp->if_ipackets++; 4537 bce_rx_int_next_rx: 4538 sw_prod = NEXT_RX_BD(sw_prod); 4539 } 4540 4541 sw_cons = NEXT_RX_BD(sw_cons); 4542 4543 /* If we have a packet, pass it up the stack */ 4544 if (m) { 4545 if (status & L2_FHDR_STATUS_L2_VLAN_TAG) { 4546 m->m_flags |= M_VLANTAG; 4547 m->m_pkthdr.ether_vlantag = 4548 l2fhdr->l2_fhdr_vlan_tag; 4549 } 4550 ifp->if_input(ifp, m); 4551 } 4552 } 4553 4554 sc->rx_cons = sw_cons; 4555 sc->rx_prod = sw_prod; 4556 sc->rx_prod_bseq = sw_prod_bseq; 4557 4558 REG_WR16(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BDIDX, 4559 sc->rx_prod); 4560 REG_WR(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BSEQ, 4561 sc->rx_prod_bseq); 4562 } 4563 4564 4565 /****************************************************************************/ 4566 /* Reads the transmit consumer value from the status block (skipping over */ 4567 /* chain page pointer if necessary). */ 4568 /* */ 4569 /* Returns: */ 4570 /* hw_cons */ 4571 /****************************************************************************/ 4572 static __inline uint16_t 4573 bce_get_hw_tx_cons(struct bce_softc *sc) 4574 { 4575 uint16_t hw_cons = sc->status_block->status_tx_quick_consumer_index0; 4576 4577 if ((hw_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) 4578 hw_cons++; 4579 return hw_cons; 4580 } 4581 4582 4583 /****************************************************************************/ 4584 /* Handles transmit completion interrupt events. */ 4585 /* */ 4586 /* Returns: */ 4587 /* Nothing. */ 4588 /****************************************************************************/ 4589 static void 4590 bce_tx_intr(struct bce_softc *sc, uint16_t hw_tx_cons) 4591 { 4592 struct ifnet *ifp = &sc->arpcom.ac_if; 4593 uint16_t sw_tx_cons, sw_tx_chain_cons; 4594 4595 ASSERT_SERIALIZED(ifp->if_serializer); 4596 4597 /* Get the hardware's view of the TX consumer index. */ 4598 sw_tx_cons = sc->tx_cons; 4599 4600 /* Cycle through any completed TX chain page entries. */ 4601 while (sw_tx_cons != hw_tx_cons) { 4602 sw_tx_chain_cons = TX_CHAIN_IDX(sc, sw_tx_cons); 4603 4604 /* 4605 * Free the associated mbuf. Remember 4606 * that only the last tx_bd of a packet 4607 * has an mbuf pointer and DMA map. 4608 */ 4609 if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) { 4610 /* Unmap the mbuf. */ 4611 bus_dmamap_unload(sc->tx_mbuf_tag, 4612 sc->tx_mbuf_map[sw_tx_chain_cons]); 4613 4614 /* Free the mbuf. */ 4615 m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]); 4616 sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL; 4617 4618 ifp->if_opackets++; 4619 } 4620 4621 sc->used_tx_bd--; 4622 sw_tx_cons = NEXT_TX_BD(sw_tx_cons); 4623 } 4624 4625 if (sc->used_tx_bd == 0) { 4626 /* Clear the TX timeout timer. */ 4627 ifp->if_timer = 0; 4628 } 4629 4630 /* Clear the tx hardware queue full flag. */ 4631 if (sc->max_tx_bd - sc->used_tx_bd >= BCE_TX_SPARE_SPACE) 4632 ifp->if_flags &= ~IFF_OACTIVE; 4633 sc->tx_cons = sw_tx_cons; 4634 } 4635 4636 4637 /****************************************************************************/ 4638 /* Disables interrupt generation. */ 4639 /* */ 4640 /* Returns: */ 4641 /* Nothing. */ 4642 /****************************************************************************/ 4643 static void 4644 bce_disable_intr(struct bce_softc *sc) 4645 { 4646 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT); 4647 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD); 4648 4649 callout_stop(&sc->bce_ckmsi_callout); 4650 sc->bce_msi_maylose = FALSE; 4651 sc->bce_check_rx_cons = 0; 4652 sc->bce_check_tx_cons = 0; 4653 sc->bce_check_status_idx = 0xffff; 4654 4655 lwkt_serialize_handler_disable(sc->arpcom.ac_if.if_serializer); 4656 } 4657 4658 4659 /****************************************************************************/ 4660 /* Enables interrupt generation. */ 4661 /* */ 4662 /* Returns: */ 4663 /* Nothing. */ 4664 /****************************************************************************/ 4665 static void 4666 bce_enable_intr(struct bce_softc *sc) 4667 { 4668 lwkt_serialize_handler_enable(sc->arpcom.ac_if.if_serializer); 4669 4670 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 4671 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | 4672 BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx); 4673 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 4674 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx); 4675 4676 REG_WR(sc, BCE_HC_COMMAND, sc->hc_command | BCE_HC_COMMAND_COAL_NOW); 4677 4678 if (sc->bce_flags & BCE_CHECK_MSI_FLAG) { 4679 sc->bce_msi_maylose = FALSE; 4680 sc->bce_check_rx_cons = 0; 4681 sc->bce_check_tx_cons = 0; 4682 sc->bce_check_status_idx = 0xffff; 4683 4684 if (bootverbose) 4685 if_printf(&sc->arpcom.ac_if, "check msi\n"); 4686 4687 callout_reset_bycpu(&sc->bce_ckmsi_callout, BCE_MSI_CKINTVL, 4688 bce_check_msi, sc, sc->bce_intr_cpuid); 4689 } 4690 } 4691 4692 4693 /****************************************************************************/ 4694 /* Reenables interrupt generation during interrupt handling. */ 4695 /* */ 4696 /* Returns: */ 4697 /* Nothing. */ 4698 /****************************************************************************/ 4699 static void 4700 bce_reenable_intr(struct bce_softc *sc) 4701 { 4702 if (sc->bce_irq_type == PCI_INTR_TYPE_LEGACY) { 4703 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 4704 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | 4705 BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx); 4706 } 4707 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 4708 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx); 4709 } 4710 4711 4712 /****************************************************************************/ 4713 /* Handles controller initialization. */ 4714 /* */ 4715 /* Returns: */ 4716 /* Nothing. */ 4717 /****************************************************************************/ 4718 static void 4719 bce_init(void *xsc) 4720 { 4721 struct bce_softc *sc = xsc; 4722 struct ifnet *ifp = &sc->arpcom.ac_if; 4723 uint32_t ether_mtu; 4724 int error; 4725 4726 ASSERT_SERIALIZED(ifp->if_serializer); 4727 4728 /* Check if the driver is still running and bail out if it is. */ 4729 if (ifp->if_flags & IFF_RUNNING) 4730 return; 4731 4732 bce_stop(sc); 4733 4734 error = bce_reset(sc, BCE_DRV_MSG_CODE_RESET); 4735 if (error) { 4736 if_printf(ifp, "Controller reset failed!\n"); 4737 goto back; 4738 } 4739 4740 error = bce_chipinit(sc); 4741 if (error) { 4742 if_printf(ifp, "Controller initialization failed!\n"); 4743 goto back; 4744 } 4745 4746 error = bce_blockinit(sc); 4747 if (error) { 4748 if_printf(ifp, "Block initialization failed!\n"); 4749 goto back; 4750 } 4751 4752 /* Load our MAC address. */ 4753 bcopy(IF_LLADDR(ifp), sc->eaddr, ETHER_ADDR_LEN); 4754 bce_set_mac_addr(sc); 4755 4756 /* Calculate and program the Ethernet MTU size. */ 4757 ether_mtu = ETHER_HDR_LEN + EVL_ENCAPLEN + ifp->if_mtu + ETHER_CRC_LEN; 4758 4759 DBPRINT(sc, BCE_INFO, "%s(): setting mtu = %d\n", __func__, ether_mtu); 4760 4761 /* 4762 * Program the mtu, enabling jumbo frame 4763 * support if necessary. Also set the mbuf 4764 * allocation count for RX frames. 4765 */ 4766 if (ether_mtu > ETHER_MAX_LEN + EVL_ENCAPLEN) { 4767 #ifdef notyet 4768 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, 4769 min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) | 4770 BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA); 4771 sc->mbuf_alloc_size = MJUM9BYTES; 4772 #else 4773 panic("jumbo buffer is not supported yet"); 4774 #endif 4775 } else { 4776 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu); 4777 sc->mbuf_alloc_size = MCLBYTES; 4778 } 4779 4780 /* Calculate the RX Ethernet frame size for rx_bd's. */ 4781 sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8; 4782 4783 DBPRINT(sc, BCE_INFO, 4784 "%s(): mclbytes = %d, mbuf_alloc_size = %d, " 4785 "max_frame_size = %d\n", 4786 __func__, (int)MCLBYTES, sc->mbuf_alloc_size, 4787 sc->max_frame_size); 4788 4789 /* Program appropriate promiscuous/multicast filtering. */ 4790 bce_set_rx_mode(sc); 4791 4792 /* Init RX buffer descriptor chain. */ 4793 bce_init_rx_chain(sc); /* XXX return value */ 4794 4795 /* Init TX buffer descriptor chain. */ 4796 bce_init_tx_chain(sc); /* XXX return value */ 4797 4798 #ifdef DEVICE_POLLING 4799 /* Disable interrupts if we are polling. */ 4800 if (ifp->if_flags & IFF_POLLING) { 4801 bce_disable_intr(sc); 4802 4803 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP, 4804 (1 << 16) | sc->bce_rx_quick_cons_trip); 4805 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP, 4806 (1 << 16) | sc->bce_tx_quick_cons_trip); 4807 } else 4808 #endif 4809 /* Enable host interrupts. */ 4810 bce_enable_intr(sc); 4811 4812 bce_ifmedia_upd(ifp); 4813 4814 ifp->if_flags |= IFF_RUNNING; 4815 ifp->if_flags &= ~IFF_OACTIVE; 4816 4817 callout_reset_bycpu(&sc->bce_tick_callout, hz, bce_tick, sc, 4818 sc->bce_intr_cpuid); 4819 back: 4820 if (error) 4821 bce_stop(sc); 4822 } 4823 4824 4825 /****************************************************************************/ 4826 /* Initialize the controller just enough so that any management firmware */ 4827 /* running on the device will continue to operate corectly. */ 4828 /* */ 4829 /* Returns: */ 4830 /* Nothing. */ 4831 /****************************************************************************/ 4832 static void 4833 bce_mgmt_init(struct bce_softc *sc) 4834 { 4835 struct ifnet *ifp = &sc->arpcom.ac_if; 4836 4837 /* Bail out if management firmware is not running. */ 4838 if (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)) 4839 return; 4840 4841 /* Enable all critical blocks in the MAC. */ 4842 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 4843 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 4844 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 4845 BCE_MISC_ENABLE_DEFAULT_XI); 4846 } else { 4847 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT); 4848 } 4849 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS); 4850 DELAY(20); 4851 4852 bce_ifmedia_upd(ifp); 4853 } 4854 4855 4856 /****************************************************************************/ 4857 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */ 4858 /* memory visible to the controller. */ 4859 /* */ 4860 /* Returns: */ 4861 /* 0 for success, positive value for failure. */ 4862 /****************************************************************************/ 4863 static int 4864 bce_encap(struct bce_softc *sc, struct mbuf **m_head) 4865 { 4866 bus_dma_segment_t segs[BCE_MAX_SEGMENTS]; 4867 bus_dmamap_t map, tmp_map; 4868 struct mbuf *m0 = *m_head; 4869 struct tx_bd *txbd = NULL; 4870 uint16_t vlan_tag = 0, flags = 0, mss = 0; 4871 uint16_t chain_prod, chain_prod_start, prod; 4872 uint32_t prod_bseq; 4873 int i, error, maxsegs, nsegs; 4874 4875 /* Transfer any checksum offload flags to the bd. */ 4876 if (m0->m_pkthdr.csum_flags & CSUM_TSO) { 4877 error = bce_tso_setup(sc, m_head, &flags, &mss); 4878 if (error) 4879 return ENOBUFS; 4880 m0 = *m_head; 4881 } else if (m0->m_pkthdr.csum_flags & BCE_CSUM_FEATURES) { 4882 if (m0->m_pkthdr.csum_flags & CSUM_IP) 4883 flags |= TX_BD_FLAGS_IP_CKSUM; 4884 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 4885 flags |= TX_BD_FLAGS_TCP_UDP_CKSUM; 4886 } 4887 4888 /* Transfer any VLAN tags to the bd. */ 4889 if (m0->m_flags & M_VLANTAG) { 4890 flags |= TX_BD_FLAGS_VLAN_TAG; 4891 vlan_tag = m0->m_pkthdr.ether_vlantag; 4892 } 4893 4894 prod = sc->tx_prod; 4895 chain_prod_start = chain_prod = TX_CHAIN_IDX(sc, prod); 4896 4897 /* Map the mbuf into DMAable memory. */ 4898 map = sc->tx_mbuf_map[chain_prod_start]; 4899 4900 maxsegs = sc->max_tx_bd - sc->used_tx_bd; 4901 KASSERT(maxsegs >= BCE_TX_SPARE_SPACE, 4902 ("not enough segments %d", maxsegs)); 4903 if (maxsegs > BCE_MAX_SEGMENTS) 4904 maxsegs = BCE_MAX_SEGMENTS; 4905 4906 /* Map the mbuf into our DMA address space. */ 4907 error = bus_dmamap_load_mbuf_defrag(sc->tx_mbuf_tag, map, m_head, 4908 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 4909 if (error) 4910 goto back; 4911 bus_dmamap_sync(sc->tx_mbuf_tag, map, BUS_DMASYNC_PREWRITE); 4912 4913 /* Reset m0 */ 4914 m0 = *m_head; 4915 4916 /* prod points to an empty tx_bd at this point. */ 4917 prod_bseq = sc->tx_prod_bseq; 4918 4919 /* 4920 * Cycle through each mbuf segment that makes up 4921 * the outgoing frame, gathering the mapping info 4922 * for that segment and creating a tx_bd to for 4923 * the mbuf. 4924 */ 4925 for (i = 0; i < nsegs; i++) { 4926 chain_prod = TX_CHAIN_IDX(sc, prod); 4927 txbd= &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)]; 4928 4929 txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr)); 4930 txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr)); 4931 txbd->tx_bd_mss_nbytes = htole32(mss << 16) | 4932 htole16(segs[i].ds_len); 4933 txbd->tx_bd_vlan_tag = htole16(vlan_tag); 4934 txbd->tx_bd_flags = htole16(flags); 4935 4936 prod_bseq += segs[i].ds_len; 4937 if (i == 0) 4938 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START); 4939 prod = NEXT_TX_BD(prod); 4940 } 4941 4942 /* Set the END flag on the last TX buffer descriptor. */ 4943 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END); 4944 4945 /* 4946 * Ensure that the mbuf pointer for this transmission 4947 * is placed at the array index of the last 4948 * descriptor in this chain. This is done 4949 * because a single map is used for all 4950 * segments of the mbuf and we don't want to 4951 * unload the map before all of the segments 4952 * have been freed. 4953 */ 4954 sc->tx_mbuf_ptr[chain_prod] = m0; 4955 4956 tmp_map = sc->tx_mbuf_map[chain_prod]; 4957 sc->tx_mbuf_map[chain_prod] = map; 4958 sc->tx_mbuf_map[chain_prod_start] = tmp_map; 4959 4960 sc->used_tx_bd += nsegs; 4961 4962 /* prod points to the next free tx_bd at this point. */ 4963 sc->tx_prod = prod; 4964 sc->tx_prod_bseq = prod_bseq; 4965 back: 4966 if (error) { 4967 m_freem(*m_head); 4968 *m_head = NULL; 4969 } 4970 return error; 4971 } 4972 4973 4974 /****************************************************************************/ 4975 /* Main transmit routine when called from another routine with a lock. */ 4976 /* */ 4977 /* Returns: */ 4978 /* Nothing. */ 4979 /****************************************************************************/ 4980 static void 4981 bce_start(struct ifnet *ifp) 4982 { 4983 struct bce_softc *sc = ifp->if_softc; 4984 int count = 0; 4985 4986 ASSERT_SERIALIZED(ifp->if_serializer); 4987 4988 /* If there's no link or the transmit queue is empty then just exit. */ 4989 if (!sc->bce_link) { 4990 ifq_purge(&ifp->if_snd); 4991 return; 4992 } 4993 4994 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 4995 return; 4996 4997 for (;;) { 4998 struct mbuf *m_head; 4999 5000 /* 5001 * We keep BCE_TX_SPARE_SPACE entries, so bce_encap() is 5002 * unlikely to fail. 5003 */ 5004 if (sc->max_tx_bd - sc->used_tx_bd < BCE_TX_SPARE_SPACE) { 5005 ifp->if_flags |= IFF_OACTIVE; 5006 break; 5007 } 5008 5009 /* Check for any frames to send. */ 5010 m_head = ifq_dequeue(&ifp->if_snd, NULL); 5011 if (m_head == NULL) 5012 break; 5013 5014 /* 5015 * Pack the data into the transmit ring. If we 5016 * don't have room, place the mbuf back at the 5017 * head of the queue and set the OACTIVE flag 5018 * to wait for the NIC to drain the chain. 5019 */ 5020 if (bce_encap(sc, &m_head)) { 5021 ifp->if_oerrors++; 5022 if (sc->used_tx_bd == 0) { 5023 continue; 5024 } else { 5025 ifp->if_flags |= IFF_OACTIVE; 5026 break; 5027 } 5028 } 5029 5030 count++; 5031 5032 /* Send a copy of the frame to any BPF listeners. */ 5033 ETHER_BPF_MTAP(ifp, m_head); 5034 } 5035 5036 if (count == 0) { 5037 /* no packets were dequeued */ 5038 return; 5039 } 5040 5041 REG_WR(sc, BCE_MQ_COMMAND, 5042 REG_RD(sc, BCE_MQ_COMMAND) | BCE_MQ_COMMAND_NO_MAP_ERROR); 5043 5044 /* Start the transmit. */ 5045 REG_WR16(sc, MB_GET_CID_ADDR(TX_CID) + BCE_L2CTX_TX_HOST_BIDX, 5046 sc->tx_prod); 5047 REG_WR(sc, MB_GET_CID_ADDR(TX_CID) + BCE_L2CTX_TX_HOST_BSEQ, 5048 sc->tx_prod_bseq); 5049 5050 /* Set the tx timeout. */ 5051 ifp->if_timer = BCE_TX_TIMEOUT; 5052 } 5053 5054 5055 /****************************************************************************/ 5056 /* Handles any IOCTL calls from the operating system. */ 5057 /* */ 5058 /* Returns: */ 5059 /* 0 for success, positive value for failure. */ 5060 /****************************************************************************/ 5061 static int 5062 bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 5063 { 5064 struct bce_softc *sc = ifp->if_softc; 5065 struct ifreq *ifr = (struct ifreq *)data; 5066 struct mii_data *mii; 5067 int mask, error = 0; 5068 5069 ASSERT_SERIALIZED(ifp->if_serializer); 5070 5071 switch(command) { 5072 case SIOCSIFMTU: 5073 /* Check that the MTU setting is supported. */ 5074 if (ifr->ifr_mtu < BCE_MIN_MTU || 5075 #ifdef notyet 5076 ifr->ifr_mtu > BCE_MAX_JUMBO_MTU 5077 #else 5078 ifr->ifr_mtu > ETHERMTU 5079 #endif 5080 ) { 5081 error = EINVAL; 5082 break; 5083 } 5084 5085 DBPRINT(sc, BCE_INFO, "Setting new MTU of %d\n", ifr->ifr_mtu); 5086 5087 ifp->if_mtu = ifr->ifr_mtu; 5088 ifp->if_flags &= ~IFF_RUNNING; /* Force reinitialize */ 5089 bce_init(sc); 5090 break; 5091 5092 case SIOCSIFFLAGS: 5093 if (ifp->if_flags & IFF_UP) { 5094 if (ifp->if_flags & IFF_RUNNING) { 5095 mask = ifp->if_flags ^ sc->bce_if_flags; 5096 5097 if (mask & (IFF_PROMISC | IFF_ALLMULTI)) 5098 bce_set_rx_mode(sc); 5099 } else { 5100 bce_init(sc); 5101 } 5102 } else if (ifp->if_flags & IFF_RUNNING) { 5103 bce_stop(sc); 5104 5105 /* If MFW is running, restart the controller a bit. */ 5106 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { 5107 bce_reset(sc, BCE_DRV_MSG_CODE_RESET); 5108 bce_chipinit(sc); 5109 bce_mgmt_init(sc); 5110 } 5111 } 5112 sc->bce_if_flags = ifp->if_flags; 5113 break; 5114 5115 case SIOCADDMULTI: 5116 case SIOCDELMULTI: 5117 if (ifp->if_flags & IFF_RUNNING) 5118 bce_set_rx_mode(sc); 5119 break; 5120 5121 case SIOCSIFMEDIA: 5122 case SIOCGIFMEDIA: 5123 DBPRINT(sc, BCE_VERBOSE, "bce_phy_flags = 0x%08X\n", 5124 sc->bce_phy_flags); 5125 DBPRINT(sc, BCE_VERBOSE, "Copper media set/get\n"); 5126 5127 mii = device_get_softc(sc->bce_miibus); 5128 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 5129 break; 5130 5131 case SIOCSIFCAP: 5132 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 5133 DBPRINT(sc, BCE_INFO, "Received SIOCSIFCAP = 0x%08X\n", 5134 (uint32_t) mask); 5135 5136 if (mask & IFCAP_HWCSUM) { 5137 ifp->if_capenable ^= (mask & IFCAP_HWCSUM); 5138 if (ifp->if_capenable & IFCAP_TXCSUM) 5139 ifp->if_hwassist |= BCE_CSUM_FEATURES; 5140 else 5141 ifp->if_hwassist &= ~BCE_CSUM_FEATURES; 5142 } 5143 if (mask & IFCAP_TSO) { 5144 ifp->if_capenable ^= IFCAP_TSO; 5145 if (ifp->if_capenable & IFCAP_TSO) 5146 ifp->if_hwassist |= CSUM_TSO; 5147 else 5148 ifp->if_hwassist &= ~CSUM_TSO; 5149 } 5150 break; 5151 5152 default: 5153 error = ether_ioctl(ifp, command, data); 5154 break; 5155 } 5156 return error; 5157 } 5158 5159 5160 /****************************************************************************/ 5161 /* Transmit timeout handler. */ 5162 /* */ 5163 /* Returns: */ 5164 /* Nothing. */ 5165 /****************************************************************************/ 5166 static void 5167 bce_watchdog(struct ifnet *ifp) 5168 { 5169 struct bce_softc *sc = ifp->if_softc; 5170 5171 ASSERT_SERIALIZED(ifp->if_serializer); 5172 5173 DBRUN(BCE_VERBOSE_SEND, 5174 bce_dump_driver_state(sc); 5175 bce_dump_status_block(sc)); 5176 5177 /* 5178 * If we are in this routine because of pause frames, then 5179 * don't reset the hardware. 5180 */ 5181 if (REG_RD(sc, BCE_EMAC_TX_STATUS) & BCE_EMAC_TX_STATUS_XOFFED) 5182 return; 5183 5184 if_printf(ifp, "Watchdog timeout occurred, resetting!\n"); 5185 5186 /* DBRUN(BCE_FATAL, bce_breakpoint(sc)); */ 5187 5188 ifp->if_flags &= ~IFF_RUNNING; /* Force reinitialize */ 5189 bce_init(sc); 5190 5191 ifp->if_oerrors++; 5192 5193 if (!ifq_is_empty(&ifp->if_snd)) 5194 if_devstart(ifp); 5195 } 5196 5197 5198 #ifdef DEVICE_POLLING 5199 5200 static void 5201 bce_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 5202 { 5203 struct bce_softc *sc = ifp->if_softc; 5204 struct status_block *sblk = sc->status_block; 5205 uint16_t hw_tx_cons, hw_rx_cons; 5206 5207 ASSERT_SERIALIZED(ifp->if_serializer); 5208 5209 switch (cmd) { 5210 case POLL_REGISTER: 5211 bce_disable_intr(sc); 5212 5213 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP, 5214 (1 << 16) | sc->bce_rx_quick_cons_trip); 5215 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP, 5216 (1 << 16) | sc->bce_tx_quick_cons_trip); 5217 return; 5218 case POLL_DEREGISTER: 5219 bce_enable_intr(sc); 5220 5221 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP, 5222 (sc->bce_tx_quick_cons_trip_int << 16) | 5223 sc->bce_tx_quick_cons_trip); 5224 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP, 5225 (sc->bce_rx_quick_cons_trip_int << 16) | 5226 sc->bce_rx_quick_cons_trip); 5227 return; 5228 default: 5229 break; 5230 } 5231 5232 /* 5233 * Save the status block index value for use when enabling 5234 * the interrupt. 5235 */ 5236 sc->last_status_idx = sblk->status_idx; 5237 5238 /* Make sure status index is extracted before rx/tx cons */ 5239 cpu_lfence(); 5240 5241 if (cmd == POLL_AND_CHECK_STATUS) { 5242 uint32_t status_attn_bits; 5243 5244 status_attn_bits = sblk->status_attn_bits; 5245 5246 /* Was it a link change interrupt? */ 5247 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 5248 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) 5249 bce_phy_intr(sc); 5250 5251 /* Clear any transient status updates during link state change. */ 5252 REG_WR(sc, BCE_HC_COMMAND, 5253 sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT); 5254 REG_RD(sc, BCE_HC_COMMAND); 5255 5256 /* 5257 * If any other attention is asserted then 5258 * the chip is toast. 5259 */ 5260 if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) != 5261 (sblk->status_attn_bits_ack & 5262 ~STATUS_ATTN_BITS_LINK_STATE)) { 5263 if_printf(ifp, "Fatal attention detected: 0x%08X\n", 5264 sblk->status_attn_bits); 5265 bce_init(sc); 5266 return; 5267 } 5268 } 5269 5270 hw_rx_cons = bce_get_hw_rx_cons(sc); 5271 hw_tx_cons = bce_get_hw_tx_cons(sc); 5272 5273 /* Check for any completed RX frames. */ 5274 if (hw_rx_cons != sc->rx_cons) 5275 bce_rx_intr(sc, count, hw_rx_cons); 5276 5277 /* Check for any completed TX frames. */ 5278 if (hw_tx_cons != sc->tx_cons) 5279 bce_tx_intr(sc, hw_tx_cons); 5280 5281 /* Check for new frames to transmit. */ 5282 if (!ifq_is_empty(&ifp->if_snd)) 5283 if_devstart(ifp); 5284 } 5285 5286 #endif /* DEVICE_POLLING */ 5287 5288 5289 /* 5290 * Interrupt handler. 5291 */ 5292 /****************************************************************************/ 5293 /* Main interrupt entry point. Verifies that the controller generated the */ 5294 /* interrupt and then calls a separate routine for handle the various */ 5295 /* interrupt causes (PHY, TX, RX). */ 5296 /* */ 5297 /* Returns: */ 5298 /* 0 for success, positive value for failure. */ 5299 /****************************************************************************/ 5300 static void 5301 bce_intr(struct bce_softc *sc) 5302 { 5303 struct ifnet *ifp = &sc->arpcom.ac_if; 5304 struct status_block *sblk; 5305 uint16_t hw_rx_cons, hw_tx_cons; 5306 uint32_t status_attn_bits; 5307 5308 ASSERT_SERIALIZED(ifp->if_serializer); 5309 5310 sblk = sc->status_block; 5311 5312 /* 5313 * Save the status block index value for use during 5314 * the next interrupt. 5315 */ 5316 sc->last_status_idx = sblk->status_idx; 5317 5318 /* Make sure status index is extracted before rx/tx cons */ 5319 cpu_lfence(); 5320 5321 /* Check if the hardware has finished any work. */ 5322 hw_rx_cons = bce_get_hw_rx_cons(sc); 5323 hw_tx_cons = bce_get_hw_tx_cons(sc); 5324 5325 status_attn_bits = sblk->status_attn_bits; 5326 5327 /* Was it a link change interrupt? */ 5328 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 5329 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) { 5330 bce_phy_intr(sc); 5331 5332 /* 5333 * Clear any transient status updates during link state 5334 * change. 5335 */ 5336 REG_WR(sc, BCE_HC_COMMAND, 5337 sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT); 5338 REG_RD(sc, BCE_HC_COMMAND); 5339 } 5340 5341 /* 5342 * If any other attention is asserted then 5343 * the chip is toast. 5344 */ 5345 if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) != 5346 (sblk->status_attn_bits_ack & ~STATUS_ATTN_BITS_LINK_STATE)) { 5347 if_printf(ifp, "Fatal attention detected: 0x%08X\n", 5348 sblk->status_attn_bits); 5349 bce_init(sc); 5350 return; 5351 } 5352 5353 /* Check for any completed RX frames. */ 5354 if (hw_rx_cons != sc->rx_cons) 5355 bce_rx_intr(sc, -1, hw_rx_cons); 5356 5357 /* Check for any completed TX frames. */ 5358 if (hw_tx_cons != sc->tx_cons) 5359 bce_tx_intr(sc, hw_tx_cons); 5360 5361 /* Re-enable interrupts. */ 5362 bce_reenable_intr(sc); 5363 5364 if (sc->bce_coalchg_mask) 5365 bce_coal_change(sc); 5366 5367 /* Handle any frames that arrived while handling the interrupt. */ 5368 if (!ifq_is_empty(&ifp->if_snd)) 5369 if_devstart(ifp); 5370 } 5371 5372 static void 5373 bce_intr_legacy(void *xsc) 5374 { 5375 struct bce_softc *sc = xsc; 5376 struct status_block *sblk; 5377 5378 sblk = sc->status_block; 5379 5380 /* 5381 * If the hardware status block index matches the last value 5382 * read by the driver and we haven't asserted our interrupt 5383 * then there's nothing to do. 5384 */ 5385 if (sblk->status_idx == sc->last_status_idx && 5386 (REG_RD(sc, BCE_PCICFG_MISC_STATUS) & 5387 BCE_PCICFG_MISC_STATUS_INTA_VALUE)) 5388 return; 5389 5390 /* Ack the interrupt and stop others from occuring. */ 5391 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 5392 BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | 5393 BCE_PCICFG_INT_ACK_CMD_MASK_INT); 5394 5395 /* 5396 * Read back to deassert IRQ immediately to avoid too 5397 * many spurious interrupts. 5398 */ 5399 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD); 5400 5401 bce_intr(sc); 5402 } 5403 5404 static void 5405 bce_intr_msi(void *xsc) 5406 { 5407 struct bce_softc *sc = xsc; 5408 5409 /* Ack the interrupt and stop others from occuring. */ 5410 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 5411 BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | 5412 BCE_PCICFG_INT_ACK_CMD_MASK_INT); 5413 5414 bce_intr(sc); 5415 } 5416 5417 static void 5418 bce_intr_msi_oneshot(void *xsc) 5419 { 5420 bce_intr(xsc); 5421 } 5422 5423 5424 /****************************************************************************/ 5425 /* Programs the various packet receive modes (broadcast and multicast). */ 5426 /* */ 5427 /* Returns: */ 5428 /* Nothing. */ 5429 /****************************************************************************/ 5430 static void 5431 bce_set_rx_mode(struct bce_softc *sc) 5432 { 5433 struct ifnet *ifp = &sc->arpcom.ac_if; 5434 struct ifmultiaddr *ifma; 5435 uint32_t hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 5436 uint32_t rx_mode, sort_mode; 5437 int h, i; 5438 5439 ASSERT_SERIALIZED(ifp->if_serializer); 5440 5441 /* Initialize receive mode default settings. */ 5442 rx_mode = sc->rx_mode & 5443 ~(BCE_EMAC_RX_MODE_PROMISCUOUS | 5444 BCE_EMAC_RX_MODE_KEEP_VLAN_TAG); 5445 sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN; 5446 5447 /* 5448 * ASF/IPMI/UMP firmware requires that VLAN tag stripping 5449 * be enbled. 5450 */ 5451 if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) && 5452 !(sc->bce_flags & BCE_MFW_ENABLE_FLAG)) 5453 rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG; 5454 5455 /* 5456 * Check for promiscuous, all multicast, or selected 5457 * multicast address filtering. 5458 */ 5459 if (ifp->if_flags & IFF_PROMISC) { 5460 DBPRINT(sc, BCE_INFO, "Enabling promiscuous mode.\n"); 5461 5462 /* Enable promiscuous mode. */ 5463 rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS; 5464 sort_mode |= BCE_RPM_SORT_USER0_PROM_EN; 5465 } else if (ifp->if_flags & IFF_ALLMULTI) { 5466 DBPRINT(sc, BCE_INFO, "Enabling all multicast mode.\n"); 5467 5468 /* Enable all multicast addresses. */ 5469 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { 5470 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 5471 0xffffffff); 5472 } 5473 sort_mode |= BCE_RPM_SORT_USER0_MC_EN; 5474 } else { 5475 /* Accept one or more multicast(s). */ 5476 DBPRINT(sc, BCE_INFO, "Enabling selective multicast mode.\n"); 5477 5478 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 5479 if (ifma->ifma_addr->sa_family != AF_LINK) 5480 continue; 5481 h = ether_crc32_le( 5482 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 5483 ETHER_ADDR_LEN) & 0xFF; 5484 hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F); 5485 } 5486 5487 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { 5488 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 5489 hashes[i]); 5490 } 5491 sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN; 5492 } 5493 5494 /* Only make changes if the recive mode has actually changed. */ 5495 if (rx_mode != sc->rx_mode) { 5496 DBPRINT(sc, BCE_VERBOSE, "Enabling new receive mode: 0x%08X\n", 5497 rx_mode); 5498 5499 sc->rx_mode = rx_mode; 5500 REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode); 5501 } 5502 5503 /* Disable and clear the exisitng sort before enabling a new sort. */ 5504 REG_WR(sc, BCE_RPM_SORT_USER0, 0x0); 5505 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode); 5506 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA); 5507 } 5508 5509 5510 /****************************************************************************/ 5511 /* Called periodically to updates statistics from the controllers */ 5512 /* statistics block. */ 5513 /* */ 5514 /* Returns: */ 5515 /* Nothing. */ 5516 /****************************************************************************/ 5517 static void 5518 bce_stats_update(struct bce_softc *sc) 5519 { 5520 struct ifnet *ifp = &sc->arpcom.ac_if; 5521 struct statistics_block *stats = sc->stats_block; 5522 5523 DBPRINT(sc, BCE_EXCESSIVE, "Entering %s()\n", __func__); 5524 5525 ASSERT_SERIALIZED(ifp->if_serializer); 5526 5527 /* 5528 * Certain controllers don't report carrier sense errors correctly. 5529 * See errata E11_5708CA0_1165. 5530 */ 5531 if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) && 5532 !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0)) { 5533 ifp->if_oerrors += 5534 (u_long)stats->stat_Dot3StatsCarrierSenseErrors; 5535 } 5536 5537 /* 5538 * Update the sysctl statistics from the hardware statistics. 5539 */ 5540 sc->stat_IfHCInOctets = 5541 ((uint64_t)stats->stat_IfHCInOctets_hi << 32) + 5542 (uint64_t)stats->stat_IfHCInOctets_lo; 5543 5544 sc->stat_IfHCInBadOctets = 5545 ((uint64_t)stats->stat_IfHCInBadOctets_hi << 32) + 5546 (uint64_t)stats->stat_IfHCInBadOctets_lo; 5547 5548 sc->stat_IfHCOutOctets = 5549 ((uint64_t)stats->stat_IfHCOutOctets_hi << 32) + 5550 (uint64_t)stats->stat_IfHCOutOctets_lo; 5551 5552 sc->stat_IfHCOutBadOctets = 5553 ((uint64_t)stats->stat_IfHCOutBadOctets_hi << 32) + 5554 (uint64_t)stats->stat_IfHCOutBadOctets_lo; 5555 5556 sc->stat_IfHCInUcastPkts = 5557 ((uint64_t)stats->stat_IfHCInUcastPkts_hi << 32) + 5558 (uint64_t)stats->stat_IfHCInUcastPkts_lo; 5559 5560 sc->stat_IfHCInMulticastPkts = 5561 ((uint64_t)stats->stat_IfHCInMulticastPkts_hi << 32) + 5562 (uint64_t)stats->stat_IfHCInMulticastPkts_lo; 5563 5564 sc->stat_IfHCInBroadcastPkts = 5565 ((uint64_t)stats->stat_IfHCInBroadcastPkts_hi << 32) + 5566 (uint64_t)stats->stat_IfHCInBroadcastPkts_lo; 5567 5568 sc->stat_IfHCOutUcastPkts = 5569 ((uint64_t)stats->stat_IfHCOutUcastPkts_hi << 32) + 5570 (uint64_t)stats->stat_IfHCOutUcastPkts_lo; 5571 5572 sc->stat_IfHCOutMulticastPkts = 5573 ((uint64_t)stats->stat_IfHCOutMulticastPkts_hi << 32) + 5574 (uint64_t)stats->stat_IfHCOutMulticastPkts_lo; 5575 5576 sc->stat_IfHCOutBroadcastPkts = 5577 ((uint64_t)stats->stat_IfHCOutBroadcastPkts_hi << 32) + 5578 (uint64_t)stats->stat_IfHCOutBroadcastPkts_lo; 5579 5580 sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors = 5581 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors; 5582 5583 sc->stat_Dot3StatsCarrierSenseErrors = 5584 stats->stat_Dot3StatsCarrierSenseErrors; 5585 5586 sc->stat_Dot3StatsFCSErrors = 5587 stats->stat_Dot3StatsFCSErrors; 5588 5589 sc->stat_Dot3StatsAlignmentErrors = 5590 stats->stat_Dot3StatsAlignmentErrors; 5591 5592 sc->stat_Dot3StatsSingleCollisionFrames = 5593 stats->stat_Dot3StatsSingleCollisionFrames; 5594 5595 sc->stat_Dot3StatsMultipleCollisionFrames = 5596 stats->stat_Dot3StatsMultipleCollisionFrames; 5597 5598 sc->stat_Dot3StatsDeferredTransmissions = 5599 stats->stat_Dot3StatsDeferredTransmissions; 5600 5601 sc->stat_Dot3StatsExcessiveCollisions = 5602 stats->stat_Dot3StatsExcessiveCollisions; 5603 5604 sc->stat_Dot3StatsLateCollisions = 5605 stats->stat_Dot3StatsLateCollisions; 5606 5607 sc->stat_EtherStatsCollisions = 5608 stats->stat_EtherStatsCollisions; 5609 5610 sc->stat_EtherStatsFragments = 5611 stats->stat_EtherStatsFragments; 5612 5613 sc->stat_EtherStatsJabbers = 5614 stats->stat_EtherStatsJabbers; 5615 5616 sc->stat_EtherStatsUndersizePkts = 5617 stats->stat_EtherStatsUndersizePkts; 5618 5619 sc->stat_EtherStatsOverrsizePkts = 5620 stats->stat_EtherStatsOverrsizePkts; 5621 5622 sc->stat_EtherStatsPktsRx64Octets = 5623 stats->stat_EtherStatsPktsRx64Octets; 5624 5625 sc->stat_EtherStatsPktsRx65Octetsto127Octets = 5626 stats->stat_EtherStatsPktsRx65Octetsto127Octets; 5627 5628 sc->stat_EtherStatsPktsRx128Octetsto255Octets = 5629 stats->stat_EtherStatsPktsRx128Octetsto255Octets; 5630 5631 sc->stat_EtherStatsPktsRx256Octetsto511Octets = 5632 stats->stat_EtherStatsPktsRx256Octetsto511Octets; 5633 5634 sc->stat_EtherStatsPktsRx512Octetsto1023Octets = 5635 stats->stat_EtherStatsPktsRx512Octetsto1023Octets; 5636 5637 sc->stat_EtherStatsPktsRx1024Octetsto1522Octets = 5638 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets; 5639 5640 sc->stat_EtherStatsPktsRx1523Octetsto9022Octets = 5641 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets; 5642 5643 sc->stat_EtherStatsPktsTx64Octets = 5644 stats->stat_EtherStatsPktsTx64Octets; 5645 5646 sc->stat_EtherStatsPktsTx65Octetsto127Octets = 5647 stats->stat_EtherStatsPktsTx65Octetsto127Octets; 5648 5649 sc->stat_EtherStatsPktsTx128Octetsto255Octets = 5650 stats->stat_EtherStatsPktsTx128Octetsto255Octets; 5651 5652 sc->stat_EtherStatsPktsTx256Octetsto511Octets = 5653 stats->stat_EtherStatsPktsTx256Octetsto511Octets; 5654 5655 sc->stat_EtherStatsPktsTx512Octetsto1023Octets = 5656 stats->stat_EtherStatsPktsTx512Octetsto1023Octets; 5657 5658 sc->stat_EtherStatsPktsTx1024Octetsto1522Octets = 5659 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets; 5660 5661 sc->stat_EtherStatsPktsTx1523Octetsto9022Octets = 5662 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets; 5663 5664 sc->stat_XonPauseFramesReceived = 5665 stats->stat_XonPauseFramesReceived; 5666 5667 sc->stat_XoffPauseFramesReceived = 5668 stats->stat_XoffPauseFramesReceived; 5669 5670 sc->stat_OutXonSent = 5671 stats->stat_OutXonSent; 5672 5673 sc->stat_OutXoffSent = 5674 stats->stat_OutXoffSent; 5675 5676 sc->stat_FlowControlDone = 5677 stats->stat_FlowControlDone; 5678 5679 sc->stat_MacControlFramesReceived = 5680 stats->stat_MacControlFramesReceived; 5681 5682 sc->stat_XoffStateEntered = 5683 stats->stat_XoffStateEntered; 5684 5685 sc->stat_IfInFramesL2FilterDiscards = 5686 stats->stat_IfInFramesL2FilterDiscards; 5687 5688 sc->stat_IfInRuleCheckerDiscards = 5689 stats->stat_IfInRuleCheckerDiscards; 5690 5691 sc->stat_IfInFTQDiscards = 5692 stats->stat_IfInFTQDiscards; 5693 5694 sc->stat_IfInMBUFDiscards = 5695 stats->stat_IfInMBUFDiscards; 5696 5697 sc->stat_IfInRuleCheckerP4Hit = 5698 stats->stat_IfInRuleCheckerP4Hit; 5699 5700 sc->stat_CatchupInRuleCheckerDiscards = 5701 stats->stat_CatchupInRuleCheckerDiscards; 5702 5703 sc->stat_CatchupInFTQDiscards = 5704 stats->stat_CatchupInFTQDiscards; 5705 5706 sc->stat_CatchupInMBUFDiscards = 5707 stats->stat_CatchupInMBUFDiscards; 5708 5709 sc->stat_CatchupInRuleCheckerP4Hit = 5710 stats->stat_CatchupInRuleCheckerP4Hit; 5711 5712 sc->com_no_buffers = REG_RD_IND(sc, 0x120084); 5713 5714 /* 5715 * Update the interface statistics from the 5716 * hardware statistics. 5717 */ 5718 ifp->if_collisions = (u_long)sc->stat_EtherStatsCollisions; 5719 5720 ifp->if_ierrors = (u_long)sc->stat_EtherStatsUndersizePkts + 5721 (u_long)sc->stat_EtherStatsOverrsizePkts + 5722 (u_long)sc->stat_IfInMBUFDiscards + 5723 (u_long)sc->stat_Dot3StatsAlignmentErrors + 5724 (u_long)sc->stat_Dot3StatsFCSErrors + 5725 (u_long)sc->stat_IfInRuleCheckerDiscards + 5726 (u_long)sc->stat_IfInFTQDiscards + 5727 (u_long)sc->com_no_buffers; 5728 5729 ifp->if_oerrors = 5730 (u_long)sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors + 5731 (u_long)sc->stat_Dot3StatsExcessiveCollisions + 5732 (u_long)sc->stat_Dot3StatsLateCollisions; 5733 5734 DBPRINT(sc, BCE_EXCESSIVE, "Exiting %s()\n", __func__); 5735 } 5736 5737 5738 /****************************************************************************/ 5739 /* Periodic function to notify the bootcode that the driver is still */ 5740 /* present. */ 5741 /* */ 5742 /* Returns: */ 5743 /* Nothing. */ 5744 /****************************************************************************/ 5745 static void 5746 bce_pulse(void *xsc) 5747 { 5748 struct bce_softc *sc = xsc; 5749 struct ifnet *ifp = &sc->arpcom.ac_if; 5750 uint32_t msg; 5751 5752 lwkt_serialize_enter(ifp->if_serializer); 5753 5754 /* Tell the firmware that the driver is still running. */ 5755 msg = (uint32_t)++sc->bce_fw_drv_pulse_wr_seq; 5756 bce_shmem_wr(sc, BCE_DRV_PULSE_MB, msg); 5757 5758 /* Update the bootcode condition. */ 5759 sc->bc_state = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION); 5760 5761 /* Report whether the bootcode still knows the driver is running. */ 5762 if (!sc->bce_drv_cardiac_arrest) { 5763 if (!(sc->bc_state & BCE_CONDITION_DRV_PRESENT)) { 5764 sc->bce_drv_cardiac_arrest = 1; 5765 if_printf(ifp, "Bootcode lost the driver pulse! " 5766 "(bc_state = 0x%08X)\n", sc->bc_state); 5767 } 5768 } else { 5769 /* 5770 * Not supported by all bootcode versions. 5771 * (v5.0.11+ and v5.2.1+) Older bootcode 5772 * will require the driver to reset the 5773 * controller to clear this condition. 5774 */ 5775 if (sc->bc_state & BCE_CONDITION_DRV_PRESENT) { 5776 sc->bce_drv_cardiac_arrest = 0; 5777 if_printf(ifp, "Bootcode found the driver pulse! " 5778 "(bc_state = 0x%08X)\n", sc->bc_state); 5779 } 5780 } 5781 5782 /* Schedule the next pulse. */ 5783 callout_reset_bycpu(&sc->bce_pulse_callout, hz, bce_pulse, sc, 5784 sc->bce_intr_cpuid); 5785 5786 lwkt_serialize_exit(ifp->if_serializer); 5787 } 5788 5789 5790 /****************************************************************************/ 5791 /* Periodic function to check whether MSI is lost */ 5792 /* */ 5793 /* Returns: */ 5794 /* Nothing. */ 5795 /****************************************************************************/ 5796 static void 5797 bce_check_msi(void *xsc) 5798 { 5799 struct bce_softc *sc = xsc; 5800 struct ifnet *ifp = &sc->arpcom.ac_if; 5801 struct status_block *sblk = sc->status_block; 5802 5803 5804 lwkt_serialize_enter(ifp->if_serializer); 5805 5806 KKASSERT(mycpuid == sc->bce_intr_cpuid); 5807 5808 if ((ifp->if_flags & (IFF_RUNNING | IFF_POLLING)) != IFF_RUNNING) { 5809 lwkt_serialize_exit(ifp->if_serializer); 5810 return; 5811 } 5812 5813 if (bce_get_hw_rx_cons(sc) != sc->rx_cons || 5814 bce_get_hw_tx_cons(sc) != sc->tx_cons || 5815 (sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 5816 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) { 5817 if (sc->bce_check_rx_cons == sc->rx_cons && 5818 sc->bce_check_tx_cons == sc->tx_cons && 5819 sc->bce_check_status_idx == sc->last_status_idx) { 5820 uint32_t msi_ctrl; 5821 5822 if (!sc->bce_msi_maylose) { 5823 sc->bce_msi_maylose = TRUE; 5824 goto done; 5825 } 5826 5827 msi_ctrl = REG_RD(sc, BCE_PCICFG_MSI_CONTROL); 5828 if (msi_ctrl & BCE_PCICFG_MSI_CONTROL_ENABLE) { 5829 if (bootverbose) 5830 if_printf(ifp, "lost MSI\n"); 5831 5832 REG_WR(sc, BCE_PCICFG_MSI_CONTROL, 5833 msi_ctrl & ~BCE_PCICFG_MSI_CONTROL_ENABLE); 5834 REG_WR(sc, BCE_PCICFG_MSI_CONTROL, msi_ctrl); 5835 5836 bce_intr_msi(sc); 5837 } else if (bootverbose) { 5838 if_printf(ifp, "MSI may be lost\n"); 5839 } 5840 } 5841 } 5842 sc->bce_msi_maylose = FALSE; 5843 sc->bce_check_rx_cons = sc->rx_cons; 5844 sc->bce_check_tx_cons = sc->tx_cons; 5845 sc->bce_check_status_idx = sc->last_status_idx; 5846 5847 done: 5848 callout_reset(&sc->bce_ckmsi_callout, BCE_MSI_CKINTVL, 5849 bce_check_msi, sc); 5850 lwkt_serialize_exit(ifp->if_serializer); 5851 } 5852 5853 5854 /****************************************************************************/ 5855 /* Periodic function to perform maintenance tasks. */ 5856 /* */ 5857 /* Returns: */ 5858 /* Nothing. */ 5859 /****************************************************************************/ 5860 static void 5861 bce_tick_serialized(struct bce_softc *sc) 5862 { 5863 struct ifnet *ifp = &sc->arpcom.ac_if; 5864 struct mii_data *mii; 5865 5866 ASSERT_SERIALIZED(ifp->if_serializer); 5867 5868 /* Update the statistics from the hardware statistics block. */ 5869 bce_stats_update(sc); 5870 5871 /* Schedule the next tick. */ 5872 callout_reset_bycpu(&sc->bce_tick_callout, hz, bce_tick, sc, 5873 sc->bce_intr_cpuid); 5874 5875 /* If link is up already up then we're done. */ 5876 if (sc->bce_link) 5877 return; 5878 5879 mii = device_get_softc(sc->bce_miibus); 5880 mii_tick(mii); 5881 5882 /* Check if the link has come up. */ 5883 if ((mii->mii_media_status & IFM_ACTIVE) && 5884 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 5885 sc->bce_link++; 5886 /* Now that link is up, handle any outstanding TX traffic. */ 5887 if (!ifq_is_empty(&ifp->if_snd)) 5888 if_devstart(ifp); 5889 } 5890 } 5891 5892 5893 static void 5894 bce_tick(void *xsc) 5895 { 5896 struct bce_softc *sc = xsc; 5897 struct ifnet *ifp = &sc->arpcom.ac_if; 5898 5899 lwkt_serialize_enter(ifp->if_serializer); 5900 bce_tick_serialized(sc); 5901 lwkt_serialize_exit(ifp->if_serializer); 5902 } 5903 5904 5905 #ifdef BCE_DEBUG 5906 /****************************************************************************/ 5907 /* Allows the driver state to be dumped through the sysctl interface. */ 5908 /* */ 5909 /* Returns: */ 5910 /* 0 for success, positive value for failure. */ 5911 /****************************************************************************/ 5912 static int 5913 bce_sysctl_driver_state(SYSCTL_HANDLER_ARGS) 5914 { 5915 int error; 5916 int result; 5917 struct bce_softc *sc; 5918 5919 result = -1; 5920 error = sysctl_handle_int(oidp, &result, 0, req); 5921 5922 if (error || !req->newptr) 5923 return (error); 5924 5925 if (result == 1) { 5926 sc = (struct bce_softc *)arg1; 5927 bce_dump_driver_state(sc); 5928 } 5929 5930 return error; 5931 } 5932 5933 5934 /****************************************************************************/ 5935 /* Allows the hardware state to be dumped through the sysctl interface. */ 5936 /* */ 5937 /* Returns: */ 5938 /* 0 for success, positive value for failure. */ 5939 /****************************************************************************/ 5940 static int 5941 bce_sysctl_hw_state(SYSCTL_HANDLER_ARGS) 5942 { 5943 int error; 5944 int result; 5945 struct bce_softc *sc; 5946 5947 result = -1; 5948 error = sysctl_handle_int(oidp, &result, 0, req); 5949 5950 if (error || !req->newptr) 5951 return (error); 5952 5953 if (result == 1) { 5954 sc = (struct bce_softc *)arg1; 5955 bce_dump_hw_state(sc); 5956 } 5957 5958 return error; 5959 } 5960 5961 5962 /****************************************************************************/ 5963 /* Provides a sysctl interface to allows dumping the RX chain. */ 5964 /* */ 5965 /* Returns: */ 5966 /* 0 for success, positive value for failure. */ 5967 /****************************************************************************/ 5968 static int 5969 bce_sysctl_dump_rx_chain(SYSCTL_HANDLER_ARGS) 5970 { 5971 int error; 5972 int result; 5973 struct bce_softc *sc; 5974 5975 result = -1; 5976 error = sysctl_handle_int(oidp, &result, 0, req); 5977 5978 if (error || !req->newptr) 5979 return (error); 5980 5981 if (result == 1) { 5982 sc = (struct bce_softc *)arg1; 5983 bce_dump_rx_chain(sc, 0, USABLE_RX_BD(sc)); 5984 } 5985 5986 return error; 5987 } 5988 5989 5990 /****************************************************************************/ 5991 /* Provides a sysctl interface to allows dumping the TX chain. */ 5992 /* */ 5993 /* Returns: */ 5994 /* 0 for success, positive value for failure. */ 5995 /****************************************************************************/ 5996 static int 5997 bce_sysctl_dump_tx_chain(SYSCTL_HANDLER_ARGS) 5998 { 5999 int error; 6000 int result; 6001 struct bce_softc *sc; 6002 6003 result = -1; 6004 error = sysctl_handle_int(oidp, &result, 0, req); 6005 6006 if (error || !req->newptr) 6007 return (error); 6008 6009 if (result == 1) { 6010 sc = (struct bce_softc *)arg1; 6011 bce_dump_tx_chain(sc, 0, USABLE_TX_BD(sc)); 6012 } 6013 6014 return error; 6015 } 6016 6017 6018 /****************************************************************************/ 6019 /* Provides a sysctl interface to allow reading arbitrary registers in the */ 6020 /* device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ 6021 /* */ 6022 /* Returns: */ 6023 /* 0 for success, positive value for failure. */ 6024 /****************************************************************************/ 6025 static int 6026 bce_sysctl_reg_read(SYSCTL_HANDLER_ARGS) 6027 { 6028 struct bce_softc *sc; 6029 int error; 6030 uint32_t val, result; 6031 6032 result = -1; 6033 error = sysctl_handle_int(oidp, &result, 0, req); 6034 if (error || (req->newptr == NULL)) 6035 return (error); 6036 6037 /* Make sure the register is accessible. */ 6038 if (result < 0x8000) { 6039 sc = (struct bce_softc *)arg1; 6040 val = REG_RD(sc, result); 6041 if_printf(&sc->arpcom.ac_if, "reg 0x%08X = 0x%08X\n", 6042 result, val); 6043 } else if (result < 0x0280000) { 6044 sc = (struct bce_softc *)arg1; 6045 val = REG_RD_IND(sc, result); 6046 if_printf(&sc->arpcom.ac_if, "reg 0x%08X = 0x%08X\n", 6047 result, val); 6048 } 6049 return (error); 6050 } 6051 6052 6053 /****************************************************************************/ 6054 /* Provides a sysctl interface to allow reading arbitrary PHY registers in */ 6055 /* the device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ 6056 /* */ 6057 /* Returns: */ 6058 /* 0 for success, positive value for failure. */ 6059 /****************************************************************************/ 6060 static int 6061 bce_sysctl_phy_read(SYSCTL_HANDLER_ARGS) 6062 { 6063 struct bce_softc *sc; 6064 device_t dev; 6065 int error, result; 6066 uint16_t val; 6067 6068 result = -1; 6069 error = sysctl_handle_int(oidp, &result, 0, req); 6070 if (error || (req->newptr == NULL)) 6071 return (error); 6072 6073 /* Make sure the register is accessible. */ 6074 if (result < 0x20) { 6075 sc = (struct bce_softc *)arg1; 6076 dev = sc->bce_dev; 6077 val = bce_miibus_read_reg(dev, sc->bce_phy_addr, result); 6078 if_printf(&sc->arpcom.ac_if, 6079 "phy 0x%02X = 0x%04X\n", result, val); 6080 } 6081 return (error); 6082 } 6083 6084 6085 /****************************************************************************/ 6086 /* Provides a sysctl interface to forcing the driver to dump state and */ 6087 /* enter the debugger. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ 6088 /* */ 6089 /* Returns: */ 6090 /* 0 for success, positive value for failure. */ 6091 /****************************************************************************/ 6092 static int 6093 bce_sysctl_breakpoint(SYSCTL_HANDLER_ARGS) 6094 { 6095 int error; 6096 int result; 6097 struct bce_softc *sc; 6098 6099 result = -1; 6100 error = sysctl_handle_int(oidp, &result, 0, req); 6101 6102 if (error || !req->newptr) 6103 return (error); 6104 6105 if (result == 1) { 6106 sc = (struct bce_softc *)arg1; 6107 bce_breakpoint(sc); 6108 } 6109 6110 return error; 6111 } 6112 #endif 6113 6114 6115 /****************************************************************************/ 6116 /* Adds any sysctl parameters for tuning or debugging purposes. */ 6117 /* */ 6118 /* Returns: */ 6119 /* 0 for success, positive value for failure. */ 6120 /****************************************************************************/ 6121 static void 6122 bce_add_sysctls(struct bce_softc *sc) 6123 { 6124 struct sysctl_ctx_list *ctx; 6125 struct sysctl_oid_list *children; 6126 6127 sysctl_ctx_init(&sc->bce_sysctl_ctx); 6128 sc->bce_sysctl_tree = SYSCTL_ADD_NODE(&sc->bce_sysctl_ctx, 6129 SYSCTL_STATIC_CHILDREN(_hw), 6130 OID_AUTO, 6131 device_get_nameunit(sc->bce_dev), 6132 CTLFLAG_RD, 0, ""); 6133 if (sc->bce_sysctl_tree == NULL) { 6134 device_printf(sc->bce_dev, "can't add sysctl node\n"); 6135 return; 6136 } 6137 6138 ctx = &sc->bce_sysctl_ctx; 6139 children = SYSCTL_CHILDREN(sc->bce_sysctl_tree); 6140 6141 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds_int", 6142 CTLTYPE_INT | CTLFLAG_RW, 6143 sc, 0, bce_sysctl_tx_bds_int, "I", 6144 "Send max coalesced BD count during interrupt"); 6145 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds", 6146 CTLTYPE_INT | CTLFLAG_RW, 6147 sc, 0, bce_sysctl_tx_bds, "I", 6148 "Send max coalesced BD count"); 6149 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks_int", 6150 CTLTYPE_INT | CTLFLAG_RW, 6151 sc, 0, bce_sysctl_tx_ticks_int, "I", 6152 "Send coalescing ticks during interrupt"); 6153 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks", 6154 CTLTYPE_INT | CTLFLAG_RW, 6155 sc, 0, bce_sysctl_tx_ticks, "I", 6156 "Send coalescing ticks"); 6157 6158 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds_int", 6159 CTLTYPE_INT | CTLFLAG_RW, 6160 sc, 0, bce_sysctl_rx_bds_int, "I", 6161 "Receive max coalesced BD count during interrupt"); 6162 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds", 6163 CTLTYPE_INT | CTLFLAG_RW, 6164 sc, 0, bce_sysctl_rx_bds, "I", 6165 "Receive max coalesced BD count"); 6166 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks_int", 6167 CTLTYPE_INT | CTLFLAG_RW, 6168 sc, 0, bce_sysctl_rx_ticks_int, "I", 6169 "Receive coalescing ticks during interrupt"); 6170 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks", 6171 CTLTYPE_INT | CTLFLAG_RW, 6172 sc, 0, bce_sysctl_rx_ticks, "I", 6173 "Receive coalescing ticks"); 6174 6175 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_pages", 6176 CTLFLAG_RD, &sc->rx_pages, 0, "# of RX pages"); 6177 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_pages", 6178 CTLFLAG_RD, &sc->tx_pages, 0, "# of TX pages"); 6179 6180 #ifdef BCE_DEBUG 6181 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6182 "rx_low_watermark", 6183 CTLFLAG_RD, &sc->rx_low_watermark, 6184 0, "Lowest level of free rx_bd's"); 6185 6186 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6187 "rx_empty_count", 6188 CTLFLAG_RD, &sc->rx_empty_count, 6189 0, "Number of times the RX chain was empty"); 6190 6191 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6192 "tx_hi_watermark", 6193 CTLFLAG_RD, &sc->tx_hi_watermark, 6194 0, "Highest level of used tx_bd's"); 6195 6196 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6197 "tx_full_count", 6198 CTLFLAG_RD, &sc->tx_full_count, 6199 0, "Number of times the TX chain was full"); 6200 6201 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6202 "l2fhdr_status_errors", 6203 CTLFLAG_RD, &sc->l2fhdr_status_errors, 6204 0, "l2_fhdr status errors"); 6205 6206 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6207 "unexpected_attentions", 6208 CTLFLAG_RD, &sc->unexpected_attentions, 6209 0, "unexpected attentions"); 6210 6211 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6212 "lost_status_block_updates", 6213 CTLFLAG_RD, &sc->lost_status_block_updates, 6214 0, "lost status block updates"); 6215 6216 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6217 "mbuf_alloc_failed", 6218 CTLFLAG_RD, &sc->mbuf_alloc_failed, 6219 0, "mbuf cluster allocation failures"); 6220 #endif 6221 6222 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6223 "stat_IfHCInOctets", 6224 CTLFLAG_RD, &sc->stat_IfHCInOctets, 6225 "Bytes received"); 6226 6227 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6228 "stat_IfHCInBadOctets", 6229 CTLFLAG_RD, &sc->stat_IfHCInBadOctets, 6230 "Bad bytes received"); 6231 6232 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6233 "stat_IfHCOutOctets", 6234 CTLFLAG_RD, &sc->stat_IfHCOutOctets, 6235 "Bytes sent"); 6236 6237 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6238 "stat_IfHCOutBadOctets", 6239 CTLFLAG_RD, &sc->stat_IfHCOutBadOctets, 6240 "Bad bytes sent"); 6241 6242 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6243 "stat_IfHCInUcastPkts", 6244 CTLFLAG_RD, &sc->stat_IfHCInUcastPkts, 6245 "Unicast packets received"); 6246 6247 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6248 "stat_IfHCInMulticastPkts", 6249 CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts, 6250 "Multicast packets received"); 6251 6252 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6253 "stat_IfHCInBroadcastPkts", 6254 CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts, 6255 "Broadcast packets received"); 6256 6257 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6258 "stat_IfHCOutUcastPkts", 6259 CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts, 6260 "Unicast packets sent"); 6261 6262 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6263 "stat_IfHCOutMulticastPkts", 6264 CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts, 6265 "Multicast packets sent"); 6266 6267 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6268 "stat_IfHCOutBroadcastPkts", 6269 CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts, 6270 "Broadcast packets sent"); 6271 6272 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6273 "stat_emac_tx_stat_dot3statsinternalmactransmiterrors", 6274 CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors, 6275 0, "Internal MAC transmit errors"); 6276 6277 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6278 "stat_Dot3StatsCarrierSenseErrors", 6279 CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors, 6280 0, "Carrier sense errors"); 6281 6282 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6283 "stat_Dot3StatsFCSErrors", 6284 CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors, 6285 0, "Frame check sequence errors"); 6286 6287 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6288 "stat_Dot3StatsAlignmentErrors", 6289 CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors, 6290 0, "Alignment errors"); 6291 6292 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6293 "stat_Dot3StatsSingleCollisionFrames", 6294 CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames, 6295 0, "Single Collision Frames"); 6296 6297 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6298 "stat_Dot3StatsMultipleCollisionFrames", 6299 CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames, 6300 0, "Multiple Collision Frames"); 6301 6302 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6303 "stat_Dot3StatsDeferredTransmissions", 6304 CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions, 6305 0, "Deferred Transmissions"); 6306 6307 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6308 "stat_Dot3StatsExcessiveCollisions", 6309 CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions, 6310 0, "Excessive Collisions"); 6311 6312 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6313 "stat_Dot3StatsLateCollisions", 6314 CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions, 6315 0, "Late Collisions"); 6316 6317 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6318 "stat_EtherStatsCollisions", 6319 CTLFLAG_RD, &sc->stat_EtherStatsCollisions, 6320 0, "Collisions"); 6321 6322 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6323 "stat_EtherStatsFragments", 6324 CTLFLAG_RD, &sc->stat_EtherStatsFragments, 6325 0, "Fragments"); 6326 6327 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6328 "stat_EtherStatsJabbers", 6329 CTLFLAG_RD, &sc->stat_EtherStatsJabbers, 6330 0, "Jabbers"); 6331 6332 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6333 "stat_EtherStatsUndersizePkts", 6334 CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts, 6335 0, "Undersize packets"); 6336 6337 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6338 "stat_EtherStatsOverrsizePkts", 6339 CTLFLAG_RD, &sc->stat_EtherStatsOverrsizePkts, 6340 0, "stat_EtherStatsOverrsizePkts"); 6341 6342 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6343 "stat_EtherStatsPktsRx64Octets", 6344 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets, 6345 0, "Bytes received in 64 byte packets"); 6346 6347 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6348 "stat_EtherStatsPktsRx65Octetsto127Octets", 6349 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets, 6350 0, "Bytes received in 65 to 127 byte packets"); 6351 6352 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6353 "stat_EtherStatsPktsRx128Octetsto255Octets", 6354 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets, 6355 0, "Bytes received in 128 to 255 byte packets"); 6356 6357 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6358 "stat_EtherStatsPktsRx256Octetsto511Octets", 6359 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets, 6360 0, "Bytes received in 256 to 511 byte packets"); 6361 6362 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6363 "stat_EtherStatsPktsRx512Octetsto1023Octets", 6364 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets, 6365 0, "Bytes received in 512 to 1023 byte packets"); 6366 6367 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6368 "stat_EtherStatsPktsRx1024Octetsto1522Octets", 6369 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets, 6370 0, "Bytes received in 1024 t0 1522 byte packets"); 6371 6372 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6373 "stat_EtherStatsPktsRx1523Octetsto9022Octets", 6374 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets, 6375 0, "Bytes received in 1523 to 9022 byte packets"); 6376 6377 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6378 "stat_EtherStatsPktsTx64Octets", 6379 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets, 6380 0, "Bytes sent in 64 byte packets"); 6381 6382 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6383 "stat_EtherStatsPktsTx65Octetsto127Octets", 6384 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets, 6385 0, "Bytes sent in 65 to 127 byte packets"); 6386 6387 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6388 "stat_EtherStatsPktsTx128Octetsto255Octets", 6389 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets, 6390 0, "Bytes sent in 128 to 255 byte packets"); 6391 6392 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6393 "stat_EtherStatsPktsTx256Octetsto511Octets", 6394 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets, 6395 0, "Bytes sent in 256 to 511 byte packets"); 6396 6397 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6398 "stat_EtherStatsPktsTx512Octetsto1023Octets", 6399 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets, 6400 0, "Bytes sent in 512 to 1023 byte packets"); 6401 6402 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6403 "stat_EtherStatsPktsTx1024Octetsto1522Octets", 6404 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets, 6405 0, "Bytes sent in 1024 to 1522 byte packets"); 6406 6407 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6408 "stat_EtherStatsPktsTx1523Octetsto9022Octets", 6409 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets, 6410 0, "Bytes sent in 1523 to 9022 byte packets"); 6411 6412 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6413 "stat_XonPauseFramesReceived", 6414 CTLFLAG_RD, &sc->stat_XonPauseFramesReceived, 6415 0, "XON pause frames receved"); 6416 6417 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6418 "stat_XoffPauseFramesReceived", 6419 CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived, 6420 0, "XOFF pause frames received"); 6421 6422 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6423 "stat_OutXonSent", 6424 CTLFLAG_RD, &sc->stat_OutXonSent, 6425 0, "XON pause frames sent"); 6426 6427 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6428 "stat_OutXoffSent", 6429 CTLFLAG_RD, &sc->stat_OutXoffSent, 6430 0, "XOFF pause frames sent"); 6431 6432 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6433 "stat_FlowControlDone", 6434 CTLFLAG_RD, &sc->stat_FlowControlDone, 6435 0, "Flow control done"); 6436 6437 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6438 "stat_MacControlFramesReceived", 6439 CTLFLAG_RD, &sc->stat_MacControlFramesReceived, 6440 0, "MAC control frames received"); 6441 6442 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6443 "stat_XoffStateEntered", 6444 CTLFLAG_RD, &sc->stat_XoffStateEntered, 6445 0, "XOFF state entered"); 6446 6447 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6448 "stat_IfInFramesL2FilterDiscards", 6449 CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards, 6450 0, "Received L2 packets discarded"); 6451 6452 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6453 "stat_IfInRuleCheckerDiscards", 6454 CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards, 6455 0, "Received packets discarded by rule"); 6456 6457 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6458 "stat_IfInFTQDiscards", 6459 CTLFLAG_RD, &sc->stat_IfInFTQDiscards, 6460 0, "Received packet FTQ discards"); 6461 6462 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6463 "stat_IfInMBUFDiscards", 6464 CTLFLAG_RD, &sc->stat_IfInMBUFDiscards, 6465 0, "Received packets discarded due to lack of controller buffer memory"); 6466 6467 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6468 "stat_IfInRuleCheckerP4Hit", 6469 CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit, 6470 0, "Received packets rule checker hits"); 6471 6472 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6473 "stat_CatchupInRuleCheckerDiscards", 6474 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards, 6475 0, "Received packets discarded in Catchup path"); 6476 6477 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6478 "stat_CatchupInFTQDiscards", 6479 CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards, 6480 0, "Received packets discarded in FTQ in Catchup path"); 6481 6482 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6483 "stat_CatchupInMBUFDiscards", 6484 CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards, 6485 0, "Received packets discarded in controller buffer memory in Catchup path"); 6486 6487 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6488 "stat_CatchupInRuleCheckerP4Hit", 6489 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit, 6490 0, "Received packets rule checker hits in Catchup path"); 6491 6492 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6493 "com_no_buffers", 6494 CTLFLAG_RD, &sc->com_no_buffers, 6495 0, "Valid packets received but no RX buffers available"); 6496 6497 #ifdef BCE_DEBUG 6498 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 6499 "driver_state", CTLTYPE_INT | CTLFLAG_RW, 6500 (void *)sc, 0, 6501 bce_sysctl_driver_state, "I", "Drive state information"); 6502 6503 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 6504 "hw_state", CTLTYPE_INT | CTLFLAG_RW, 6505 (void *)sc, 0, 6506 bce_sysctl_hw_state, "I", "Hardware state information"); 6507 6508 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 6509 "dump_rx_chain", CTLTYPE_INT | CTLFLAG_RW, 6510 (void *)sc, 0, 6511 bce_sysctl_dump_rx_chain, "I", "Dump rx_bd chain"); 6512 6513 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 6514 "dump_tx_chain", CTLTYPE_INT | CTLFLAG_RW, 6515 (void *)sc, 0, 6516 bce_sysctl_dump_tx_chain, "I", "Dump tx_bd chain"); 6517 6518 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 6519 "breakpoint", CTLTYPE_INT | CTLFLAG_RW, 6520 (void *)sc, 0, 6521 bce_sysctl_breakpoint, "I", "Driver breakpoint"); 6522 6523 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 6524 "reg_read", CTLTYPE_INT | CTLFLAG_RW, 6525 (void *)sc, 0, 6526 bce_sysctl_reg_read, "I", "Register read"); 6527 6528 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 6529 "phy_read", CTLTYPE_INT | CTLFLAG_RW, 6530 (void *)sc, 0, 6531 bce_sysctl_phy_read, "I", "PHY register read"); 6532 6533 #endif 6534 6535 } 6536 6537 6538 /****************************************************************************/ 6539 /* BCE Debug Routines */ 6540 /****************************************************************************/ 6541 #ifdef BCE_DEBUG 6542 6543 /****************************************************************************/ 6544 /* Freezes the controller to allow for a cohesive state dump. */ 6545 /* */ 6546 /* Returns: */ 6547 /* Nothing. */ 6548 /****************************************************************************/ 6549 static void 6550 bce_freeze_controller(struct bce_softc *sc) 6551 { 6552 uint32_t val; 6553 6554 val = REG_RD(sc, BCE_MISC_COMMAND); 6555 val |= BCE_MISC_COMMAND_DISABLE_ALL; 6556 REG_WR(sc, BCE_MISC_COMMAND, val); 6557 } 6558 6559 6560 /****************************************************************************/ 6561 /* Unfreezes the controller after a freeze operation. This may not always */ 6562 /* work and the controller will require a reset! */ 6563 /* */ 6564 /* Returns: */ 6565 /* Nothing. */ 6566 /****************************************************************************/ 6567 static void 6568 bce_unfreeze_controller(struct bce_softc *sc) 6569 { 6570 uint32_t val; 6571 6572 val = REG_RD(sc, BCE_MISC_COMMAND); 6573 val |= BCE_MISC_COMMAND_ENABLE_ALL; 6574 REG_WR(sc, BCE_MISC_COMMAND, val); 6575 } 6576 6577 6578 /****************************************************************************/ 6579 /* Prints out information about an mbuf. */ 6580 /* */ 6581 /* Returns: */ 6582 /* Nothing. */ 6583 /****************************************************************************/ 6584 static void 6585 bce_dump_mbuf(struct bce_softc *sc, struct mbuf *m) 6586 { 6587 struct ifnet *ifp = &sc->arpcom.ac_if; 6588 uint32_t val_hi, val_lo; 6589 struct mbuf *mp = m; 6590 6591 if (m == NULL) { 6592 /* Index out of range. */ 6593 if_printf(ifp, "mbuf: null pointer\n"); 6594 return; 6595 } 6596 6597 while (mp) { 6598 val_hi = BCE_ADDR_HI(mp); 6599 val_lo = BCE_ADDR_LO(mp); 6600 if_printf(ifp, "mbuf: vaddr = 0x%08X:%08X, m_len = %d, " 6601 "m_flags = ( ", val_hi, val_lo, mp->m_len); 6602 6603 if (mp->m_flags & M_EXT) 6604 kprintf("M_EXT "); 6605 if (mp->m_flags & M_PKTHDR) 6606 kprintf("M_PKTHDR "); 6607 if (mp->m_flags & M_EOR) 6608 kprintf("M_EOR "); 6609 #ifdef M_RDONLY 6610 if (mp->m_flags & M_RDONLY) 6611 kprintf("M_RDONLY "); 6612 #endif 6613 6614 val_hi = BCE_ADDR_HI(mp->m_data); 6615 val_lo = BCE_ADDR_LO(mp->m_data); 6616 kprintf(") m_data = 0x%08X:%08X\n", val_hi, val_lo); 6617 6618 if (mp->m_flags & M_PKTHDR) { 6619 if_printf(ifp, "- m_pkthdr: flags = ( "); 6620 if (mp->m_flags & M_BCAST) 6621 kprintf("M_BCAST "); 6622 if (mp->m_flags & M_MCAST) 6623 kprintf("M_MCAST "); 6624 if (mp->m_flags & M_FRAG) 6625 kprintf("M_FRAG "); 6626 if (mp->m_flags & M_FIRSTFRAG) 6627 kprintf("M_FIRSTFRAG "); 6628 if (mp->m_flags & M_LASTFRAG) 6629 kprintf("M_LASTFRAG "); 6630 #ifdef M_VLANTAG 6631 if (mp->m_flags & M_VLANTAG) 6632 kprintf("M_VLANTAG "); 6633 #endif 6634 #ifdef M_PROMISC 6635 if (mp->m_flags & M_PROMISC) 6636 kprintf("M_PROMISC "); 6637 #endif 6638 kprintf(") csum_flags = ( "); 6639 if (mp->m_pkthdr.csum_flags & CSUM_IP) 6640 kprintf("CSUM_IP "); 6641 if (mp->m_pkthdr.csum_flags & CSUM_TCP) 6642 kprintf("CSUM_TCP "); 6643 if (mp->m_pkthdr.csum_flags & CSUM_UDP) 6644 kprintf("CSUM_UDP "); 6645 if (mp->m_pkthdr.csum_flags & CSUM_IP_FRAGS) 6646 kprintf("CSUM_IP_FRAGS "); 6647 if (mp->m_pkthdr.csum_flags & CSUM_FRAGMENT) 6648 kprintf("CSUM_FRAGMENT "); 6649 #ifdef CSUM_TSO 6650 if (mp->m_pkthdr.csum_flags & CSUM_TSO) 6651 kprintf("CSUM_TSO "); 6652 #endif 6653 if (mp->m_pkthdr.csum_flags & CSUM_IP_CHECKED) 6654 kprintf("CSUM_IP_CHECKED "); 6655 if (mp->m_pkthdr.csum_flags & CSUM_IP_VALID) 6656 kprintf("CSUM_IP_VALID "); 6657 if (mp->m_pkthdr.csum_flags & CSUM_DATA_VALID) 6658 kprintf("CSUM_DATA_VALID "); 6659 kprintf(")\n"); 6660 } 6661 6662 if (mp->m_flags & M_EXT) { 6663 val_hi = BCE_ADDR_HI(mp->m_ext.ext_buf); 6664 val_lo = BCE_ADDR_LO(mp->m_ext.ext_buf); 6665 if_printf(ifp, "- m_ext: vaddr = 0x%08X:%08X, " 6666 "ext_size = %d\n", 6667 val_hi, val_lo, mp->m_ext.ext_size); 6668 } 6669 mp = mp->m_next; 6670 } 6671 } 6672 6673 6674 /****************************************************************************/ 6675 /* Prints out the mbufs in the RX mbuf chain. */ 6676 /* */ 6677 /* Returns: */ 6678 /* Nothing. */ 6679 /****************************************************************************/ 6680 static void 6681 bce_dump_rx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count) 6682 { 6683 struct ifnet *ifp = &sc->arpcom.ac_if; 6684 int i; 6685 6686 if_printf(ifp, 6687 "----------------------------" 6688 " rx mbuf data " 6689 "----------------------------\n"); 6690 6691 for (i = 0; i < count; i++) { 6692 if_printf(ifp, "rxmbuf[0x%04X]\n", chain_prod); 6693 bce_dump_mbuf(sc, sc->rx_mbuf_ptr[chain_prod]); 6694 chain_prod = RX_CHAIN_IDX(sc, NEXT_RX_BD(chain_prod)); 6695 } 6696 6697 if_printf(ifp, 6698 "----------------------------" 6699 "----------------" 6700 "----------------------------\n"); 6701 } 6702 6703 6704 /****************************************************************************/ 6705 /* Prints out a tx_bd structure. */ 6706 /* */ 6707 /* Returns: */ 6708 /* Nothing. */ 6709 /****************************************************************************/ 6710 static void 6711 bce_dump_txbd(struct bce_softc *sc, int idx, struct tx_bd *txbd) 6712 { 6713 struct ifnet *ifp = &sc->arpcom.ac_if; 6714 6715 if (idx > MAX_TX_BD(sc)) { 6716 /* Index out of range. */ 6717 if_printf(ifp, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx); 6718 } else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) { 6719 /* TX Chain page pointer. */ 6720 if_printf(ifp, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, " 6721 "chain page pointer\n", 6722 idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo); 6723 } else { 6724 /* Normal tx_bd entry. */ 6725 if_printf(ifp, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, " 6726 "nbytes = 0x%08X, " 6727 "vlan tag= 0x%04X, flags = 0x%04X (", 6728 idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo, 6729 txbd->tx_bd_mss_nbytes, 6730 txbd->tx_bd_vlan_tag, txbd->tx_bd_flags); 6731 6732 if (txbd->tx_bd_flags & TX_BD_FLAGS_CONN_FAULT) 6733 kprintf(" CONN_FAULT"); 6734 6735 if (txbd->tx_bd_flags & TX_BD_FLAGS_TCP_UDP_CKSUM) 6736 kprintf(" TCP_UDP_CKSUM"); 6737 6738 if (txbd->tx_bd_flags & TX_BD_FLAGS_IP_CKSUM) 6739 kprintf(" IP_CKSUM"); 6740 6741 if (txbd->tx_bd_flags & TX_BD_FLAGS_VLAN_TAG) 6742 kprintf(" VLAN"); 6743 6744 if (txbd->tx_bd_flags & TX_BD_FLAGS_COAL_NOW) 6745 kprintf(" COAL_NOW"); 6746 6747 if (txbd->tx_bd_flags & TX_BD_FLAGS_DONT_GEN_CRC) 6748 kprintf(" DONT_GEN_CRC"); 6749 6750 if (txbd->tx_bd_flags & TX_BD_FLAGS_START) 6751 kprintf(" START"); 6752 6753 if (txbd->tx_bd_flags & TX_BD_FLAGS_END) 6754 kprintf(" END"); 6755 6756 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_LSO) 6757 kprintf(" LSO"); 6758 6759 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_OPTION_WORD) 6760 kprintf(" OPTION_WORD"); 6761 6762 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_FLAGS) 6763 kprintf(" FLAGS"); 6764 6765 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_SNAP) 6766 kprintf(" SNAP"); 6767 6768 kprintf(" )\n"); 6769 } 6770 } 6771 6772 6773 /****************************************************************************/ 6774 /* Prints out a rx_bd structure. */ 6775 /* */ 6776 /* Returns: */ 6777 /* Nothing. */ 6778 /****************************************************************************/ 6779 static void 6780 bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd) 6781 { 6782 struct ifnet *ifp = &sc->arpcom.ac_if; 6783 6784 if (idx > MAX_RX_BD(sc)) { 6785 /* Index out of range. */ 6786 if_printf(ifp, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx); 6787 } else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) { 6788 /* TX Chain page pointer. */ 6789 if_printf(ifp, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, " 6790 "chain page pointer\n", 6791 idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo); 6792 } else { 6793 /* Normal tx_bd entry. */ 6794 if_printf(ifp, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, " 6795 "nbytes = 0x%08X, flags = 0x%08X\n", 6796 idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo, 6797 rxbd->rx_bd_len, rxbd->rx_bd_flags); 6798 } 6799 } 6800 6801 6802 /****************************************************************************/ 6803 /* Prints out a l2_fhdr structure. */ 6804 /* */ 6805 /* Returns: */ 6806 /* Nothing. */ 6807 /****************************************************************************/ 6808 static void 6809 bce_dump_l2fhdr(struct bce_softc *sc, int idx, struct l2_fhdr *l2fhdr) 6810 { 6811 if_printf(&sc->arpcom.ac_if, "l2_fhdr[0x%04X]: status = 0x%08X, " 6812 "pkt_len = 0x%04X, vlan = 0x%04x, " 6813 "ip_xsum = 0x%04X, tcp_udp_xsum = 0x%04X\n", 6814 idx, l2fhdr->l2_fhdr_status, 6815 l2fhdr->l2_fhdr_pkt_len, l2fhdr->l2_fhdr_vlan_tag, 6816 l2fhdr->l2_fhdr_ip_xsum, l2fhdr->l2_fhdr_tcp_udp_xsum); 6817 } 6818 6819 6820 /****************************************************************************/ 6821 /* Prints out the tx chain. */ 6822 /* */ 6823 /* Returns: */ 6824 /* Nothing. */ 6825 /****************************************************************************/ 6826 static void 6827 bce_dump_tx_chain(struct bce_softc *sc, int tx_prod, int count) 6828 { 6829 struct ifnet *ifp = &sc->arpcom.ac_if; 6830 int i; 6831 6832 /* First some info about the tx_bd chain structure. */ 6833 if_printf(ifp, 6834 "----------------------------" 6835 " tx_bd chain " 6836 "----------------------------\n"); 6837 6838 if_printf(ifp, "page size = 0x%08X, " 6839 "tx chain pages = 0x%08X\n", 6840 (uint32_t)BCM_PAGE_SIZE, (uint32_t)sc->tx_pages); 6841 6842 if_printf(ifp, "tx_bd per page = 0x%08X, " 6843 "usable tx_bd per page = 0x%08X\n", 6844 (uint32_t)TOTAL_TX_BD_PER_PAGE, 6845 (uint32_t)USABLE_TX_BD_PER_PAGE); 6846 6847 if_printf(ifp, "total tx_bd = 0x%08X\n", (uint32_t)TOTAL_TX_BD(sc)); 6848 6849 if_printf(ifp, 6850 "----------------------------" 6851 " tx_bd data " 6852 "----------------------------\n"); 6853 6854 /* Now print out the tx_bd's themselves. */ 6855 for (i = 0; i < count; i++) { 6856 struct tx_bd *txbd; 6857 6858 txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)]; 6859 bce_dump_txbd(sc, tx_prod, txbd); 6860 tx_prod = TX_CHAIN_IDX(sc, NEXT_TX_BD(tx_prod)); 6861 } 6862 6863 if_printf(ifp, 6864 "----------------------------" 6865 "----------------" 6866 "----------------------------\n"); 6867 } 6868 6869 6870 /****************************************************************************/ 6871 /* Prints out the rx chain. */ 6872 /* */ 6873 /* Returns: */ 6874 /* Nothing. */ 6875 /****************************************************************************/ 6876 static void 6877 bce_dump_rx_chain(struct bce_softc *sc, int rx_prod, int count) 6878 { 6879 struct ifnet *ifp = &sc->arpcom.ac_if; 6880 int i; 6881 6882 /* First some info about the tx_bd chain structure. */ 6883 if_printf(ifp, 6884 "----------------------------" 6885 " rx_bd chain " 6886 "----------------------------\n"); 6887 6888 if_printf(ifp, "page size = 0x%08X, " 6889 "rx chain pages = 0x%08X\n", 6890 (uint32_t)BCM_PAGE_SIZE, (uint32_t)sc->rx_pages); 6891 6892 if_printf(ifp, "rx_bd per page = 0x%08X, " 6893 "usable rx_bd per page = 0x%08X\n", 6894 (uint32_t)TOTAL_RX_BD_PER_PAGE, 6895 (uint32_t)USABLE_RX_BD_PER_PAGE); 6896 6897 if_printf(ifp, "total rx_bd = 0x%08X\n", (uint32_t)TOTAL_RX_BD(sc)); 6898 6899 if_printf(ifp, 6900 "----------------------------" 6901 " rx_bd data " 6902 "----------------------------\n"); 6903 6904 /* Now print out the rx_bd's themselves. */ 6905 for (i = 0; i < count; i++) { 6906 struct rx_bd *rxbd; 6907 6908 rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)]; 6909 bce_dump_rxbd(sc, rx_prod, rxbd); 6910 rx_prod = RX_CHAIN_IDX(sc, NEXT_RX_BD(rx_prod)); 6911 } 6912 6913 if_printf(ifp, 6914 "----------------------------" 6915 "----------------" 6916 "----------------------------\n"); 6917 } 6918 6919 6920 /****************************************************************************/ 6921 /* Prints out the status block from host memory. */ 6922 /* */ 6923 /* Returns: */ 6924 /* Nothing. */ 6925 /****************************************************************************/ 6926 static void 6927 bce_dump_status_block(struct bce_softc *sc) 6928 { 6929 struct status_block *sblk = sc->status_block; 6930 struct ifnet *ifp = &sc->arpcom.ac_if; 6931 6932 if_printf(ifp, 6933 "----------------------------" 6934 " Status Block " 6935 "----------------------------\n"); 6936 6937 if_printf(ifp, " 0x%08X - attn_bits\n", sblk->status_attn_bits); 6938 6939 if_printf(ifp, " 0x%08X - attn_bits_ack\n", 6940 sblk->status_attn_bits_ack); 6941 6942 if_printf(ifp, "0x%04X(0x%04X) - rx_cons0\n", 6943 sblk->status_rx_quick_consumer_index0, 6944 (uint16_t)RX_CHAIN_IDX(sc, sblk->status_rx_quick_consumer_index0)); 6945 6946 if_printf(ifp, "0x%04X(0x%04X) - tx_cons0\n", 6947 sblk->status_tx_quick_consumer_index0, 6948 (uint16_t)TX_CHAIN_IDX(sc, sblk->status_tx_quick_consumer_index0)); 6949 6950 if_printf(ifp, " 0x%04X - status_idx\n", sblk->status_idx); 6951 6952 /* Theses indices are not used for normal L2 drivers. */ 6953 if (sblk->status_rx_quick_consumer_index1) { 6954 if_printf(ifp, "0x%04X(0x%04X) - rx_cons1\n", 6955 sblk->status_rx_quick_consumer_index1, 6956 (uint16_t)RX_CHAIN_IDX(sc, 6957 sblk->status_rx_quick_consumer_index1)); 6958 } 6959 6960 if (sblk->status_tx_quick_consumer_index1) { 6961 if_printf(ifp, "0x%04X(0x%04X) - tx_cons1\n", 6962 sblk->status_tx_quick_consumer_index1, 6963 (uint16_t)TX_CHAIN_IDX(sc, 6964 sblk->status_tx_quick_consumer_index1)); 6965 } 6966 6967 if (sblk->status_rx_quick_consumer_index2) { 6968 if_printf(ifp, "0x%04X(0x%04X)- rx_cons2\n", 6969 sblk->status_rx_quick_consumer_index2, 6970 (uint16_t)RX_CHAIN_IDX(sc, 6971 sblk->status_rx_quick_consumer_index2)); 6972 } 6973 6974 if (sblk->status_tx_quick_consumer_index2) { 6975 if_printf(ifp, "0x%04X(0x%04X) - tx_cons2\n", 6976 sblk->status_tx_quick_consumer_index2, 6977 (uint16_t)TX_CHAIN_IDX(sc, 6978 sblk->status_tx_quick_consumer_index2)); 6979 } 6980 6981 if (sblk->status_rx_quick_consumer_index3) { 6982 if_printf(ifp, "0x%04X(0x%04X) - rx_cons3\n", 6983 sblk->status_rx_quick_consumer_index3, 6984 (uint16_t)RX_CHAIN_IDX(sc, 6985 sblk->status_rx_quick_consumer_index3)); 6986 } 6987 6988 if (sblk->status_tx_quick_consumer_index3) { 6989 if_printf(ifp, "0x%04X(0x%04X) - tx_cons3\n", 6990 sblk->status_tx_quick_consumer_index3, 6991 (uint16_t)TX_CHAIN_IDX(sc, 6992 sblk->status_tx_quick_consumer_index3)); 6993 } 6994 6995 if (sblk->status_rx_quick_consumer_index4 || 6996 sblk->status_rx_quick_consumer_index5) { 6997 if_printf(ifp, "rx_cons4 = 0x%08X, rx_cons5 = 0x%08X\n", 6998 sblk->status_rx_quick_consumer_index4, 6999 sblk->status_rx_quick_consumer_index5); 7000 } 7001 7002 if (sblk->status_rx_quick_consumer_index6 || 7003 sblk->status_rx_quick_consumer_index7) { 7004 if_printf(ifp, "rx_cons6 = 0x%08X, rx_cons7 = 0x%08X\n", 7005 sblk->status_rx_quick_consumer_index6, 7006 sblk->status_rx_quick_consumer_index7); 7007 } 7008 7009 if (sblk->status_rx_quick_consumer_index8 || 7010 sblk->status_rx_quick_consumer_index9) { 7011 if_printf(ifp, "rx_cons8 = 0x%08X, rx_cons9 = 0x%08X\n", 7012 sblk->status_rx_quick_consumer_index8, 7013 sblk->status_rx_quick_consumer_index9); 7014 } 7015 7016 if (sblk->status_rx_quick_consumer_index10 || 7017 sblk->status_rx_quick_consumer_index11) { 7018 if_printf(ifp, "rx_cons10 = 0x%08X, rx_cons11 = 0x%08X\n", 7019 sblk->status_rx_quick_consumer_index10, 7020 sblk->status_rx_quick_consumer_index11); 7021 } 7022 7023 if (sblk->status_rx_quick_consumer_index12 || 7024 sblk->status_rx_quick_consumer_index13) { 7025 if_printf(ifp, "rx_cons12 = 0x%08X, rx_cons13 = 0x%08X\n", 7026 sblk->status_rx_quick_consumer_index12, 7027 sblk->status_rx_quick_consumer_index13); 7028 } 7029 7030 if (sblk->status_rx_quick_consumer_index14 || 7031 sblk->status_rx_quick_consumer_index15) { 7032 if_printf(ifp, "rx_cons14 = 0x%08X, rx_cons15 = 0x%08X\n", 7033 sblk->status_rx_quick_consumer_index14, 7034 sblk->status_rx_quick_consumer_index15); 7035 } 7036 7037 if (sblk->status_completion_producer_index || 7038 sblk->status_cmd_consumer_index) { 7039 if_printf(ifp, "com_prod = 0x%08X, cmd_cons = 0x%08X\n", 7040 sblk->status_completion_producer_index, 7041 sblk->status_cmd_consumer_index); 7042 } 7043 7044 if_printf(ifp, 7045 "----------------------------" 7046 "----------------" 7047 "----------------------------\n"); 7048 } 7049 7050 7051 /****************************************************************************/ 7052 /* Prints out the statistics block. */ 7053 /* */ 7054 /* Returns: */ 7055 /* Nothing. */ 7056 /****************************************************************************/ 7057 static void 7058 bce_dump_stats_block(struct bce_softc *sc) 7059 { 7060 struct statistics_block *sblk = sc->stats_block; 7061 struct ifnet *ifp = &sc->arpcom.ac_if; 7062 7063 if_printf(ifp, 7064 "---------------" 7065 " Stats Block (All Stats Not Shown Are 0) " 7066 "---------------\n"); 7067 7068 if (sblk->stat_IfHCInOctets_hi || sblk->stat_IfHCInOctets_lo) { 7069 if_printf(ifp, "0x%08X:%08X : IfHcInOctets\n", 7070 sblk->stat_IfHCInOctets_hi, 7071 sblk->stat_IfHCInOctets_lo); 7072 } 7073 7074 if (sblk->stat_IfHCInBadOctets_hi || sblk->stat_IfHCInBadOctets_lo) { 7075 if_printf(ifp, "0x%08X:%08X : IfHcInBadOctets\n", 7076 sblk->stat_IfHCInBadOctets_hi, 7077 sblk->stat_IfHCInBadOctets_lo); 7078 } 7079 7080 if (sblk->stat_IfHCOutOctets_hi || sblk->stat_IfHCOutOctets_lo) { 7081 if_printf(ifp, "0x%08X:%08X : IfHcOutOctets\n", 7082 sblk->stat_IfHCOutOctets_hi, 7083 sblk->stat_IfHCOutOctets_lo); 7084 } 7085 7086 if (sblk->stat_IfHCOutBadOctets_hi || sblk->stat_IfHCOutBadOctets_lo) { 7087 if_printf(ifp, "0x%08X:%08X : IfHcOutBadOctets\n", 7088 sblk->stat_IfHCOutBadOctets_hi, 7089 sblk->stat_IfHCOutBadOctets_lo); 7090 } 7091 7092 if (sblk->stat_IfHCInUcastPkts_hi || sblk->stat_IfHCInUcastPkts_lo) { 7093 if_printf(ifp, "0x%08X:%08X : IfHcInUcastPkts\n", 7094 sblk->stat_IfHCInUcastPkts_hi, 7095 sblk->stat_IfHCInUcastPkts_lo); 7096 } 7097 7098 if (sblk->stat_IfHCInBroadcastPkts_hi || 7099 sblk->stat_IfHCInBroadcastPkts_lo) { 7100 if_printf(ifp, "0x%08X:%08X : IfHcInBroadcastPkts\n", 7101 sblk->stat_IfHCInBroadcastPkts_hi, 7102 sblk->stat_IfHCInBroadcastPkts_lo); 7103 } 7104 7105 if (sblk->stat_IfHCInMulticastPkts_hi || 7106 sblk->stat_IfHCInMulticastPkts_lo) { 7107 if_printf(ifp, "0x%08X:%08X : IfHcInMulticastPkts\n", 7108 sblk->stat_IfHCInMulticastPkts_hi, 7109 sblk->stat_IfHCInMulticastPkts_lo); 7110 } 7111 7112 if (sblk->stat_IfHCOutUcastPkts_hi || sblk->stat_IfHCOutUcastPkts_lo) { 7113 if_printf(ifp, "0x%08X:%08X : IfHcOutUcastPkts\n", 7114 sblk->stat_IfHCOutUcastPkts_hi, 7115 sblk->stat_IfHCOutUcastPkts_lo); 7116 } 7117 7118 if (sblk->stat_IfHCOutBroadcastPkts_hi || 7119 sblk->stat_IfHCOutBroadcastPkts_lo) { 7120 if_printf(ifp, "0x%08X:%08X : IfHcOutBroadcastPkts\n", 7121 sblk->stat_IfHCOutBroadcastPkts_hi, 7122 sblk->stat_IfHCOutBroadcastPkts_lo); 7123 } 7124 7125 if (sblk->stat_IfHCOutMulticastPkts_hi || 7126 sblk->stat_IfHCOutMulticastPkts_lo) { 7127 if_printf(ifp, "0x%08X:%08X : IfHcOutMulticastPkts\n", 7128 sblk->stat_IfHCOutMulticastPkts_hi, 7129 sblk->stat_IfHCOutMulticastPkts_lo); 7130 } 7131 7132 if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors) { 7133 if_printf(ifp, " 0x%08X : " 7134 "emac_tx_stat_dot3statsinternalmactransmiterrors\n", 7135 sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors); 7136 } 7137 7138 if (sblk->stat_Dot3StatsCarrierSenseErrors) { 7139 if_printf(ifp, " 0x%08X : " 7140 "Dot3StatsCarrierSenseErrors\n", 7141 sblk->stat_Dot3StatsCarrierSenseErrors); 7142 } 7143 7144 if (sblk->stat_Dot3StatsFCSErrors) { 7145 if_printf(ifp, " 0x%08X : Dot3StatsFCSErrors\n", 7146 sblk->stat_Dot3StatsFCSErrors); 7147 } 7148 7149 if (sblk->stat_Dot3StatsAlignmentErrors) { 7150 if_printf(ifp, " 0x%08X : Dot3StatsAlignmentErrors\n", 7151 sblk->stat_Dot3StatsAlignmentErrors); 7152 } 7153 7154 if (sblk->stat_Dot3StatsSingleCollisionFrames) { 7155 if_printf(ifp, " 0x%08X : " 7156 "Dot3StatsSingleCollisionFrames\n", 7157 sblk->stat_Dot3StatsSingleCollisionFrames); 7158 } 7159 7160 if (sblk->stat_Dot3StatsMultipleCollisionFrames) { 7161 if_printf(ifp, " 0x%08X : " 7162 "Dot3StatsMultipleCollisionFrames\n", 7163 sblk->stat_Dot3StatsMultipleCollisionFrames); 7164 } 7165 7166 if (sblk->stat_Dot3StatsDeferredTransmissions) { 7167 if_printf(ifp, " 0x%08X : " 7168 "Dot3StatsDeferredTransmissions\n", 7169 sblk->stat_Dot3StatsDeferredTransmissions); 7170 } 7171 7172 if (sblk->stat_Dot3StatsExcessiveCollisions) { 7173 if_printf(ifp, " 0x%08X : " 7174 "Dot3StatsExcessiveCollisions\n", 7175 sblk->stat_Dot3StatsExcessiveCollisions); 7176 } 7177 7178 if (sblk->stat_Dot3StatsLateCollisions) { 7179 if_printf(ifp, " 0x%08X : Dot3StatsLateCollisions\n", 7180 sblk->stat_Dot3StatsLateCollisions); 7181 } 7182 7183 if (sblk->stat_EtherStatsCollisions) { 7184 if_printf(ifp, " 0x%08X : EtherStatsCollisions\n", 7185 sblk->stat_EtherStatsCollisions); 7186 } 7187 7188 if (sblk->stat_EtherStatsFragments) { 7189 if_printf(ifp, " 0x%08X : EtherStatsFragments\n", 7190 sblk->stat_EtherStatsFragments); 7191 } 7192 7193 if (sblk->stat_EtherStatsJabbers) { 7194 if_printf(ifp, " 0x%08X : EtherStatsJabbers\n", 7195 sblk->stat_EtherStatsJabbers); 7196 } 7197 7198 if (sblk->stat_EtherStatsUndersizePkts) { 7199 if_printf(ifp, " 0x%08X : EtherStatsUndersizePkts\n", 7200 sblk->stat_EtherStatsUndersizePkts); 7201 } 7202 7203 if (sblk->stat_EtherStatsOverrsizePkts) { 7204 if_printf(ifp, " 0x%08X : EtherStatsOverrsizePkts\n", 7205 sblk->stat_EtherStatsOverrsizePkts); 7206 } 7207 7208 if (sblk->stat_EtherStatsPktsRx64Octets) { 7209 if_printf(ifp, " 0x%08X : EtherStatsPktsRx64Octets\n", 7210 sblk->stat_EtherStatsPktsRx64Octets); 7211 } 7212 7213 if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets) { 7214 if_printf(ifp, " 0x%08X : " 7215 "EtherStatsPktsRx65Octetsto127Octets\n", 7216 sblk->stat_EtherStatsPktsRx65Octetsto127Octets); 7217 } 7218 7219 if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets) { 7220 if_printf(ifp, " 0x%08X : " 7221 "EtherStatsPktsRx128Octetsto255Octets\n", 7222 sblk->stat_EtherStatsPktsRx128Octetsto255Octets); 7223 } 7224 7225 if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets) { 7226 if_printf(ifp, " 0x%08X : " 7227 "EtherStatsPktsRx256Octetsto511Octets\n", 7228 sblk->stat_EtherStatsPktsRx256Octetsto511Octets); 7229 } 7230 7231 if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets) { 7232 if_printf(ifp, " 0x%08X : " 7233 "EtherStatsPktsRx512Octetsto1023Octets\n", 7234 sblk->stat_EtherStatsPktsRx512Octetsto1023Octets); 7235 } 7236 7237 if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets) { 7238 if_printf(ifp, " 0x%08X : " 7239 "EtherStatsPktsRx1024Octetsto1522Octets\n", 7240 sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets); 7241 } 7242 7243 if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets) { 7244 if_printf(ifp, " 0x%08X : " 7245 "EtherStatsPktsRx1523Octetsto9022Octets\n", 7246 sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets); 7247 } 7248 7249 if (sblk->stat_EtherStatsPktsTx64Octets) { 7250 if_printf(ifp, " 0x%08X : EtherStatsPktsTx64Octets\n", 7251 sblk->stat_EtherStatsPktsTx64Octets); 7252 } 7253 7254 if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets) { 7255 if_printf(ifp, " 0x%08X : " 7256 "EtherStatsPktsTx65Octetsto127Octets\n", 7257 sblk->stat_EtherStatsPktsTx65Octetsto127Octets); 7258 } 7259 7260 if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets) { 7261 if_printf(ifp, " 0x%08X : " 7262 "EtherStatsPktsTx128Octetsto255Octets\n", 7263 sblk->stat_EtherStatsPktsTx128Octetsto255Octets); 7264 } 7265 7266 if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets) { 7267 if_printf(ifp, " 0x%08X : " 7268 "EtherStatsPktsTx256Octetsto511Octets\n", 7269 sblk->stat_EtherStatsPktsTx256Octetsto511Octets); 7270 } 7271 7272 if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets) { 7273 if_printf(ifp, " 0x%08X : " 7274 "EtherStatsPktsTx512Octetsto1023Octets\n", 7275 sblk->stat_EtherStatsPktsTx512Octetsto1023Octets); 7276 } 7277 7278 if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets) { 7279 if_printf(ifp, " 0x%08X : " 7280 "EtherStatsPktsTx1024Octetsto1522Octets\n", 7281 sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets); 7282 } 7283 7284 if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets) { 7285 if_printf(ifp, " 0x%08X : " 7286 "EtherStatsPktsTx1523Octetsto9022Octets\n", 7287 sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets); 7288 } 7289 7290 if (sblk->stat_XonPauseFramesReceived) { 7291 if_printf(ifp, " 0x%08X : XonPauseFramesReceived\n", 7292 sblk->stat_XonPauseFramesReceived); 7293 } 7294 7295 if (sblk->stat_XoffPauseFramesReceived) { 7296 if_printf(ifp, " 0x%08X : XoffPauseFramesReceived\n", 7297 sblk->stat_XoffPauseFramesReceived); 7298 } 7299 7300 if (sblk->stat_OutXonSent) { 7301 if_printf(ifp, " 0x%08X : OutXoffSent\n", 7302 sblk->stat_OutXonSent); 7303 } 7304 7305 if (sblk->stat_OutXoffSent) { 7306 if_printf(ifp, " 0x%08X : OutXoffSent\n", 7307 sblk->stat_OutXoffSent); 7308 } 7309 7310 if (sblk->stat_FlowControlDone) { 7311 if_printf(ifp, " 0x%08X : FlowControlDone\n", 7312 sblk->stat_FlowControlDone); 7313 } 7314 7315 if (sblk->stat_MacControlFramesReceived) { 7316 if_printf(ifp, " 0x%08X : MacControlFramesReceived\n", 7317 sblk->stat_MacControlFramesReceived); 7318 } 7319 7320 if (sblk->stat_XoffStateEntered) { 7321 if_printf(ifp, " 0x%08X : XoffStateEntered\n", 7322 sblk->stat_XoffStateEntered); 7323 } 7324 7325 if (sblk->stat_IfInFramesL2FilterDiscards) { 7326 if_printf(ifp, " 0x%08X : IfInFramesL2FilterDiscards\n", sblk->stat_IfInFramesL2FilterDiscards); 7327 } 7328 7329 if (sblk->stat_IfInRuleCheckerDiscards) { 7330 if_printf(ifp, " 0x%08X : IfInRuleCheckerDiscards\n", 7331 sblk->stat_IfInRuleCheckerDiscards); 7332 } 7333 7334 if (sblk->stat_IfInFTQDiscards) { 7335 if_printf(ifp, " 0x%08X : IfInFTQDiscards\n", 7336 sblk->stat_IfInFTQDiscards); 7337 } 7338 7339 if (sblk->stat_IfInMBUFDiscards) { 7340 if_printf(ifp, " 0x%08X : IfInMBUFDiscards\n", 7341 sblk->stat_IfInMBUFDiscards); 7342 } 7343 7344 if (sblk->stat_IfInRuleCheckerP4Hit) { 7345 if_printf(ifp, " 0x%08X : IfInRuleCheckerP4Hit\n", 7346 sblk->stat_IfInRuleCheckerP4Hit); 7347 } 7348 7349 if (sblk->stat_CatchupInRuleCheckerDiscards) { 7350 if_printf(ifp, " 0x%08X : " 7351 "CatchupInRuleCheckerDiscards\n", 7352 sblk->stat_CatchupInRuleCheckerDiscards); 7353 } 7354 7355 if (sblk->stat_CatchupInFTQDiscards) { 7356 if_printf(ifp, " 0x%08X : CatchupInFTQDiscards\n", 7357 sblk->stat_CatchupInFTQDiscards); 7358 } 7359 7360 if (sblk->stat_CatchupInMBUFDiscards) { 7361 if_printf(ifp, " 0x%08X : CatchupInMBUFDiscards\n", 7362 sblk->stat_CatchupInMBUFDiscards); 7363 } 7364 7365 if (sblk->stat_CatchupInRuleCheckerP4Hit) { 7366 if_printf(ifp, " 0x%08X : CatchupInRuleCheckerP4Hit\n", 7367 sblk->stat_CatchupInRuleCheckerP4Hit); 7368 } 7369 7370 if_printf(ifp, 7371 "----------------------------" 7372 "----------------" 7373 "----------------------------\n"); 7374 } 7375 7376 7377 /****************************************************************************/ 7378 /* Prints out a summary of the driver state. */ 7379 /* */ 7380 /* Returns: */ 7381 /* Nothing. */ 7382 /****************************************************************************/ 7383 static void 7384 bce_dump_driver_state(struct bce_softc *sc) 7385 { 7386 struct ifnet *ifp = &sc->arpcom.ac_if; 7387 uint32_t val_hi, val_lo; 7388 7389 if_printf(ifp, 7390 "-----------------------------" 7391 " Driver State " 7392 "-----------------------------\n"); 7393 7394 val_hi = BCE_ADDR_HI(sc); 7395 val_lo = BCE_ADDR_LO(sc); 7396 if_printf(ifp, "0x%08X:%08X - (sc) driver softc structure " 7397 "virtual address\n", val_hi, val_lo); 7398 7399 val_hi = BCE_ADDR_HI(sc->status_block); 7400 val_lo = BCE_ADDR_LO(sc->status_block); 7401 if_printf(ifp, "0x%08X:%08X - (sc->status_block) status block " 7402 "virtual address\n", val_hi, val_lo); 7403 7404 val_hi = BCE_ADDR_HI(sc->stats_block); 7405 val_lo = BCE_ADDR_LO(sc->stats_block); 7406 if_printf(ifp, "0x%08X:%08X - (sc->stats_block) statistics block " 7407 "virtual address\n", val_hi, val_lo); 7408 7409 val_hi = BCE_ADDR_HI(sc->tx_bd_chain); 7410 val_lo = BCE_ADDR_LO(sc->tx_bd_chain); 7411 if_printf(ifp, "0x%08X:%08X - (sc->tx_bd_chain) tx_bd chain " 7412 "virtual adddress\n", val_hi, val_lo); 7413 7414 val_hi = BCE_ADDR_HI(sc->rx_bd_chain); 7415 val_lo = BCE_ADDR_LO(sc->rx_bd_chain); 7416 if_printf(ifp, "0x%08X:%08X - (sc->rx_bd_chain) rx_bd chain " 7417 "virtual address\n", val_hi, val_lo); 7418 7419 val_hi = BCE_ADDR_HI(sc->tx_mbuf_ptr); 7420 val_lo = BCE_ADDR_LO(sc->tx_mbuf_ptr); 7421 if_printf(ifp, "0x%08X:%08X - (sc->tx_mbuf_ptr) tx mbuf chain " 7422 "virtual address\n", val_hi, val_lo); 7423 7424 val_hi = BCE_ADDR_HI(sc->rx_mbuf_ptr); 7425 val_lo = BCE_ADDR_LO(sc->rx_mbuf_ptr); 7426 if_printf(ifp, "0x%08X:%08X - (sc->rx_mbuf_ptr) rx mbuf chain " 7427 "virtual address\n", val_hi, val_lo); 7428 7429 if_printf(ifp, " 0x%08X - (sc->interrupts_generated) " 7430 "h/w intrs\n", sc->interrupts_generated); 7431 7432 if_printf(ifp, " 0x%08X - (sc->rx_interrupts) " 7433 "rx interrupts handled\n", sc->rx_interrupts); 7434 7435 if_printf(ifp, " 0x%08X - (sc->tx_interrupts) " 7436 "tx interrupts handled\n", sc->tx_interrupts); 7437 7438 if_printf(ifp, " 0x%08X - (sc->last_status_idx) " 7439 "status block index\n", sc->last_status_idx); 7440 7441 if_printf(ifp, " 0x%04X(0x%04X) - (sc->tx_prod) " 7442 "tx producer index\n", 7443 sc->tx_prod, (uint16_t)TX_CHAIN_IDX(sc, sc->tx_prod)); 7444 7445 if_printf(ifp, " 0x%04X(0x%04X) - (sc->tx_cons) " 7446 "tx consumer index\n", 7447 sc->tx_cons, (uint16_t)TX_CHAIN_IDX(sc, sc->tx_cons)); 7448 7449 if_printf(ifp, " 0x%08X - (sc->tx_prod_bseq) " 7450 "tx producer bseq index\n", sc->tx_prod_bseq); 7451 7452 if_printf(ifp, " 0x%04X(0x%04X) - (sc->rx_prod) " 7453 "rx producer index\n", 7454 sc->rx_prod, (uint16_t)RX_CHAIN_IDX(sc, sc->rx_prod)); 7455 7456 if_printf(ifp, " 0x%04X(0x%04X) - (sc->rx_cons) " 7457 "rx consumer index\n", 7458 sc->rx_cons, (uint16_t)RX_CHAIN_IDX(sc, sc->rx_cons)); 7459 7460 if_printf(ifp, " 0x%08X - (sc->rx_prod_bseq) " 7461 "rx producer bseq index\n", sc->rx_prod_bseq); 7462 7463 if_printf(ifp, " 0x%08X - (sc->rx_mbuf_alloc) " 7464 "rx mbufs allocated\n", sc->rx_mbuf_alloc); 7465 7466 if_printf(ifp, " 0x%08X - (sc->free_rx_bd) " 7467 "free rx_bd's\n", sc->free_rx_bd); 7468 7469 if_printf(ifp, "0x%08X/%08X - (sc->rx_low_watermark) rx " 7470 "low watermark\n", sc->rx_low_watermark, sc->max_rx_bd); 7471 7472 if_printf(ifp, " 0x%08X - (sc->txmbuf_alloc) " 7473 "tx mbufs allocated\n", sc->tx_mbuf_alloc); 7474 7475 if_printf(ifp, " 0x%08X - (sc->rx_mbuf_alloc) " 7476 "rx mbufs allocated\n", sc->rx_mbuf_alloc); 7477 7478 if_printf(ifp, " 0x%08X - (sc->used_tx_bd) used tx_bd's\n", 7479 sc->used_tx_bd); 7480 7481 if_printf(ifp, "0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n", 7482 sc->tx_hi_watermark, sc->max_tx_bd); 7483 7484 if_printf(ifp, " 0x%08X - (sc->mbuf_alloc_failed) " 7485 "failed mbuf alloc\n", sc->mbuf_alloc_failed); 7486 7487 if_printf(ifp, 7488 "----------------------------" 7489 "----------------" 7490 "----------------------------\n"); 7491 } 7492 7493 7494 /****************************************************************************/ 7495 /* Prints out the hardware state through a summary of important registers, */ 7496 /* followed by a complete register dump. */ 7497 /* */ 7498 /* Returns: */ 7499 /* Nothing. */ 7500 /****************************************************************************/ 7501 static void 7502 bce_dump_hw_state(struct bce_softc *sc) 7503 { 7504 struct ifnet *ifp = &sc->arpcom.ac_if; 7505 uint32_t val1; 7506 int i; 7507 7508 if_printf(ifp, 7509 "----------------------------" 7510 " Hardware State " 7511 "----------------------------\n"); 7512 7513 if_printf(ifp, "%s - bootcode version\n", sc->bce_bc_ver); 7514 7515 val1 = REG_RD(sc, BCE_MISC_ENABLE_STATUS_BITS); 7516 if_printf(ifp, "0x%08X - (0x%06X) misc_enable_status_bits\n", 7517 val1, BCE_MISC_ENABLE_STATUS_BITS); 7518 7519 val1 = REG_RD(sc, BCE_DMA_STATUS); 7520 if_printf(ifp, "0x%08X - (0x%04X) dma_status\n", val1, BCE_DMA_STATUS); 7521 7522 val1 = REG_RD(sc, BCE_CTX_STATUS); 7523 if_printf(ifp, "0x%08X - (0x%04X) ctx_status\n", val1, BCE_CTX_STATUS); 7524 7525 val1 = REG_RD(sc, BCE_EMAC_STATUS); 7526 if_printf(ifp, "0x%08X - (0x%04X) emac_status\n", 7527 val1, BCE_EMAC_STATUS); 7528 7529 val1 = REG_RD(sc, BCE_RPM_STATUS); 7530 if_printf(ifp, "0x%08X - (0x%04X) rpm_status\n", val1, BCE_RPM_STATUS); 7531 7532 val1 = REG_RD(sc, BCE_TBDR_STATUS); 7533 if_printf(ifp, "0x%08X - (0x%04X) tbdr_status\n", 7534 val1, BCE_TBDR_STATUS); 7535 7536 val1 = REG_RD(sc, BCE_TDMA_STATUS); 7537 if_printf(ifp, "0x%08X - (0x%04X) tdma_status\n", 7538 val1, BCE_TDMA_STATUS); 7539 7540 val1 = REG_RD(sc, BCE_HC_STATUS); 7541 if_printf(ifp, "0x%08X - (0x%06X) hc_status\n", val1, BCE_HC_STATUS); 7542 7543 val1 = REG_RD_IND(sc, BCE_TXP_CPU_STATE); 7544 if_printf(ifp, "0x%08X - (0x%06X) txp_cpu_state\n", 7545 val1, BCE_TXP_CPU_STATE); 7546 7547 val1 = REG_RD_IND(sc, BCE_TPAT_CPU_STATE); 7548 if_printf(ifp, "0x%08X - (0x%06X) tpat_cpu_state\n", 7549 val1, BCE_TPAT_CPU_STATE); 7550 7551 val1 = REG_RD_IND(sc, BCE_RXP_CPU_STATE); 7552 if_printf(ifp, "0x%08X - (0x%06X) rxp_cpu_state\n", 7553 val1, BCE_RXP_CPU_STATE); 7554 7555 val1 = REG_RD_IND(sc, BCE_COM_CPU_STATE); 7556 if_printf(ifp, "0x%08X - (0x%06X) com_cpu_state\n", 7557 val1, BCE_COM_CPU_STATE); 7558 7559 val1 = REG_RD_IND(sc, BCE_MCP_CPU_STATE); 7560 if_printf(ifp, "0x%08X - (0x%06X) mcp_cpu_state\n", 7561 val1, BCE_MCP_CPU_STATE); 7562 7563 val1 = REG_RD_IND(sc, BCE_CP_CPU_STATE); 7564 if_printf(ifp, "0x%08X - (0x%06X) cp_cpu_state\n", 7565 val1, BCE_CP_CPU_STATE); 7566 7567 if_printf(ifp, 7568 "----------------------------" 7569 "----------------" 7570 "----------------------------\n"); 7571 7572 if_printf(ifp, 7573 "----------------------------" 7574 " Register Dump " 7575 "----------------------------\n"); 7576 7577 for (i = 0x400; i < 0x8000; i += 0x10) { 7578 if_printf(ifp, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", i, 7579 REG_RD(sc, i), 7580 REG_RD(sc, i + 0x4), 7581 REG_RD(sc, i + 0x8), 7582 REG_RD(sc, i + 0xc)); 7583 } 7584 7585 if_printf(ifp, 7586 "----------------------------" 7587 "----------------" 7588 "----------------------------\n"); 7589 } 7590 7591 7592 /****************************************************************************/ 7593 /* Prints out the TXP state. */ 7594 /* */ 7595 /* Returns: */ 7596 /* Nothing. */ 7597 /****************************************************************************/ 7598 static void 7599 bce_dump_txp_state(struct bce_softc *sc) 7600 { 7601 struct ifnet *ifp = &sc->arpcom.ac_if; 7602 uint32_t val1; 7603 int i; 7604 7605 if_printf(ifp, 7606 "----------------------------" 7607 " TXP State " 7608 "----------------------------\n"); 7609 7610 val1 = REG_RD_IND(sc, BCE_TXP_CPU_MODE); 7611 if_printf(ifp, "0x%08X - (0x%06X) txp_cpu_mode\n", 7612 val1, BCE_TXP_CPU_MODE); 7613 7614 val1 = REG_RD_IND(sc, BCE_TXP_CPU_STATE); 7615 if_printf(ifp, "0x%08X - (0x%06X) txp_cpu_state\n", 7616 val1, BCE_TXP_CPU_STATE); 7617 7618 val1 = REG_RD_IND(sc, BCE_TXP_CPU_EVENT_MASK); 7619 if_printf(ifp, "0x%08X - (0x%06X) txp_cpu_event_mask\n", 7620 val1, BCE_TXP_CPU_EVENT_MASK); 7621 7622 if_printf(ifp, 7623 "----------------------------" 7624 " Register Dump " 7625 "----------------------------\n"); 7626 7627 for (i = BCE_TXP_CPU_MODE; i < 0x68000; i += 0x10) { 7628 /* Skip the big blank spaces */ 7629 if (i < 0x454000 && i > 0x5ffff) { 7630 if_printf(ifp, "0x%04X: " 7631 "0x%08X 0x%08X 0x%08X 0x%08X\n", i, 7632 REG_RD_IND(sc, i), 7633 REG_RD_IND(sc, i + 0x4), 7634 REG_RD_IND(sc, i + 0x8), 7635 REG_RD_IND(sc, i + 0xc)); 7636 } 7637 } 7638 7639 if_printf(ifp, 7640 "----------------------------" 7641 "----------------" 7642 "----------------------------\n"); 7643 } 7644 7645 7646 /****************************************************************************/ 7647 /* Prints out the RXP state. */ 7648 /* */ 7649 /* Returns: */ 7650 /* Nothing. */ 7651 /****************************************************************************/ 7652 static void 7653 bce_dump_rxp_state(struct bce_softc *sc) 7654 { 7655 struct ifnet *ifp = &sc->arpcom.ac_if; 7656 uint32_t val1; 7657 int i; 7658 7659 if_printf(ifp, 7660 "----------------------------" 7661 " RXP State " 7662 "----------------------------\n"); 7663 7664 val1 = REG_RD_IND(sc, BCE_RXP_CPU_MODE); 7665 if_printf(ifp, "0x%08X - (0x%06X) rxp_cpu_mode\n", 7666 val1, BCE_RXP_CPU_MODE); 7667 7668 val1 = REG_RD_IND(sc, BCE_RXP_CPU_STATE); 7669 if_printf(ifp, "0x%08X - (0x%06X) rxp_cpu_state\n", 7670 val1, BCE_RXP_CPU_STATE); 7671 7672 val1 = REG_RD_IND(sc, BCE_RXP_CPU_EVENT_MASK); 7673 if_printf(ifp, "0x%08X - (0x%06X) rxp_cpu_event_mask\n", 7674 val1, BCE_RXP_CPU_EVENT_MASK); 7675 7676 if_printf(ifp, 7677 "----------------------------" 7678 " Register Dump " 7679 "----------------------------\n"); 7680 7681 for (i = BCE_RXP_CPU_MODE; i < 0xe8fff; i += 0x10) { 7682 /* Skip the big blank sapces */ 7683 if (i < 0xc5400 || i > 0xdffff) { 7684 if_printf(ifp, "0x%04X: " 7685 "0x%08X 0x%08X 0x%08X 0x%08X\n", i, 7686 REG_RD_IND(sc, i), 7687 REG_RD_IND(sc, i + 0x4), 7688 REG_RD_IND(sc, i + 0x8), 7689 REG_RD_IND(sc, i + 0xc)); 7690 } 7691 } 7692 7693 if_printf(ifp, 7694 "----------------------------" 7695 "----------------" 7696 "----------------------------\n"); 7697 } 7698 7699 7700 /****************************************************************************/ 7701 /* Prints out the TPAT state. */ 7702 /* */ 7703 /* Returns: */ 7704 /* Nothing. */ 7705 /****************************************************************************/ 7706 static void 7707 bce_dump_tpat_state(struct bce_softc *sc) 7708 { 7709 struct ifnet *ifp = &sc->arpcom.ac_if; 7710 uint32_t val1; 7711 int i; 7712 7713 if_printf(ifp, 7714 "----------------------------" 7715 " TPAT State " 7716 "----------------------------\n"); 7717 7718 val1 = REG_RD_IND(sc, BCE_TPAT_CPU_MODE); 7719 if_printf(ifp, "0x%08X - (0x%06X) tpat_cpu_mode\n", 7720 val1, BCE_TPAT_CPU_MODE); 7721 7722 val1 = REG_RD_IND(sc, BCE_TPAT_CPU_STATE); 7723 if_printf(ifp, "0x%08X - (0x%06X) tpat_cpu_state\n", 7724 val1, BCE_TPAT_CPU_STATE); 7725 7726 val1 = REG_RD_IND(sc, BCE_TPAT_CPU_EVENT_MASK); 7727 if_printf(ifp, "0x%08X - (0x%06X) tpat_cpu_event_mask\n", 7728 val1, BCE_TPAT_CPU_EVENT_MASK); 7729 7730 if_printf(ifp, 7731 "----------------------------" 7732 " Register Dump " 7733 "----------------------------\n"); 7734 7735 for (i = BCE_TPAT_CPU_MODE; i < 0xa3fff; i += 0x10) { 7736 /* Skip the big blank spaces */ 7737 if (i < 0x854000 && i > 0x9ffff) { 7738 if_printf(ifp, "0x%04X: " 7739 "0x%08X 0x%08X 0x%08X 0x%08X\n", i, 7740 REG_RD_IND(sc, i), 7741 REG_RD_IND(sc, i + 0x4), 7742 REG_RD_IND(sc, i + 0x8), 7743 REG_RD_IND(sc, i + 0xc)); 7744 } 7745 } 7746 7747 if_printf(ifp, 7748 "----------------------------" 7749 "----------------" 7750 "----------------------------\n"); 7751 } 7752 7753 7754 /****************************************************************************/ 7755 /* Prints out the driver state and then enters the debugger. */ 7756 /* */ 7757 /* Returns: */ 7758 /* Nothing. */ 7759 /****************************************************************************/ 7760 static void 7761 bce_breakpoint(struct bce_softc *sc) 7762 { 7763 #if 0 7764 bce_freeze_controller(sc); 7765 #endif 7766 7767 bce_dump_driver_state(sc); 7768 bce_dump_status_block(sc); 7769 bce_dump_tx_chain(sc, 0, TOTAL_TX_BD(sc)); 7770 bce_dump_hw_state(sc); 7771 bce_dump_txp_state(sc); 7772 7773 #if 0 7774 bce_unfreeze_controller(sc); 7775 #endif 7776 7777 /* Call the debugger. */ 7778 breakpoint(); 7779 } 7780 7781 #endif /* BCE_DEBUG */ 7782 7783 static int 7784 bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS) 7785 { 7786 struct bce_softc *sc = arg1; 7787 7788 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 7789 &sc->bce_tx_quick_cons_trip_int, 7790 BCE_COALMASK_TX_BDS_INT); 7791 } 7792 7793 static int 7794 bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS) 7795 { 7796 struct bce_softc *sc = arg1; 7797 7798 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 7799 &sc->bce_tx_quick_cons_trip, 7800 BCE_COALMASK_TX_BDS); 7801 } 7802 7803 static int 7804 bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS) 7805 { 7806 struct bce_softc *sc = arg1; 7807 7808 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 7809 &sc->bce_tx_ticks_int, 7810 BCE_COALMASK_TX_TICKS_INT); 7811 } 7812 7813 static int 7814 bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS) 7815 { 7816 struct bce_softc *sc = arg1; 7817 7818 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 7819 &sc->bce_tx_ticks, 7820 BCE_COALMASK_TX_TICKS); 7821 } 7822 7823 static int 7824 bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS) 7825 { 7826 struct bce_softc *sc = arg1; 7827 7828 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 7829 &sc->bce_rx_quick_cons_trip_int, 7830 BCE_COALMASK_RX_BDS_INT); 7831 } 7832 7833 static int 7834 bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS) 7835 { 7836 struct bce_softc *sc = arg1; 7837 7838 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 7839 &sc->bce_rx_quick_cons_trip, 7840 BCE_COALMASK_RX_BDS); 7841 } 7842 7843 static int 7844 bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS) 7845 { 7846 struct bce_softc *sc = arg1; 7847 7848 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 7849 &sc->bce_rx_ticks_int, 7850 BCE_COALMASK_RX_TICKS_INT); 7851 } 7852 7853 static int 7854 bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS) 7855 { 7856 struct bce_softc *sc = arg1; 7857 7858 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 7859 &sc->bce_rx_ticks, 7860 BCE_COALMASK_RX_TICKS); 7861 } 7862 7863 static int 7864 bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS, uint32_t *coal, 7865 uint32_t coalchg_mask) 7866 { 7867 struct bce_softc *sc = arg1; 7868 struct ifnet *ifp = &sc->arpcom.ac_if; 7869 int error = 0, v; 7870 7871 lwkt_serialize_enter(ifp->if_serializer); 7872 7873 v = *coal; 7874 error = sysctl_handle_int(oidp, &v, 0, req); 7875 if (!error && req->newptr != NULL) { 7876 if (v < 0) { 7877 error = EINVAL; 7878 } else { 7879 *coal = v; 7880 sc->bce_coalchg_mask |= coalchg_mask; 7881 } 7882 } 7883 7884 lwkt_serialize_exit(ifp->if_serializer); 7885 return error; 7886 } 7887 7888 static void 7889 bce_coal_change(struct bce_softc *sc) 7890 { 7891 struct ifnet *ifp = &sc->arpcom.ac_if; 7892 7893 ASSERT_SERIALIZED(ifp->if_serializer); 7894 7895 if ((ifp->if_flags & IFF_RUNNING) == 0) { 7896 sc->bce_coalchg_mask = 0; 7897 return; 7898 } 7899 7900 if (sc->bce_coalchg_mask & 7901 (BCE_COALMASK_TX_BDS | BCE_COALMASK_TX_BDS_INT)) { 7902 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP, 7903 (sc->bce_tx_quick_cons_trip_int << 16) | 7904 sc->bce_tx_quick_cons_trip); 7905 if (bootverbose) { 7906 if_printf(ifp, "tx_bds %u, tx_bds_int %u\n", 7907 sc->bce_tx_quick_cons_trip, 7908 sc->bce_tx_quick_cons_trip_int); 7909 } 7910 } 7911 7912 if (sc->bce_coalchg_mask & 7913 (BCE_COALMASK_TX_TICKS | BCE_COALMASK_TX_TICKS_INT)) { 7914 REG_WR(sc, BCE_HC_TX_TICKS, 7915 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks); 7916 if (bootverbose) { 7917 if_printf(ifp, "tx_ticks %u, tx_ticks_int %u\n", 7918 sc->bce_tx_ticks, sc->bce_tx_ticks_int); 7919 } 7920 } 7921 7922 if (sc->bce_coalchg_mask & 7923 (BCE_COALMASK_RX_BDS | BCE_COALMASK_RX_BDS_INT)) { 7924 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP, 7925 (sc->bce_rx_quick_cons_trip_int << 16) | 7926 sc->bce_rx_quick_cons_trip); 7927 if (bootverbose) { 7928 if_printf(ifp, "rx_bds %u, rx_bds_int %u\n", 7929 sc->bce_rx_quick_cons_trip, 7930 sc->bce_rx_quick_cons_trip_int); 7931 } 7932 } 7933 7934 if (sc->bce_coalchg_mask & 7935 (BCE_COALMASK_RX_TICKS | BCE_COALMASK_RX_TICKS_INT)) { 7936 REG_WR(sc, BCE_HC_RX_TICKS, 7937 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks); 7938 if (bootverbose) { 7939 if_printf(ifp, "rx_ticks %u, rx_ticks_int %u\n", 7940 sc->bce_rx_ticks, sc->bce_rx_ticks_int); 7941 } 7942 } 7943 7944 sc->bce_coalchg_mask = 0; 7945 } 7946 7947 static int 7948 bce_tso_setup(struct bce_softc *sc, struct mbuf **mp, 7949 uint16_t *flags0, uint16_t *mss0) 7950 { 7951 struct mbuf *m; 7952 uint16_t flags; 7953 int thoff, iphlen, hoff; 7954 7955 m = *mp; 7956 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 7957 7958 hoff = m->m_pkthdr.csum_lhlen; 7959 iphlen = m->m_pkthdr.csum_iphlen; 7960 thoff = m->m_pkthdr.csum_thlen; 7961 7962 KASSERT(hoff >= sizeof(struct ether_header), 7963 ("invalid ether header len %d", hoff)); 7964 KASSERT(iphlen >= sizeof(struct ip), 7965 ("invalid ip header len %d", iphlen)); 7966 KASSERT(thoff >= sizeof(struct tcphdr), 7967 ("invalid tcp header len %d", thoff)); 7968 7969 if (__predict_false(m->m_len < hoff + iphlen + thoff)) { 7970 m = m_pullup(m, hoff + iphlen + thoff); 7971 if (m == NULL) { 7972 *mp = NULL; 7973 return ENOBUFS; 7974 } 7975 *mp = m; 7976 } 7977 7978 /* Set the LSO flag in the TX BD */ 7979 flags = TX_BD_FLAGS_SW_LSO; 7980 7981 /* Set the length of IP + TCP options (in 32 bit words) */ 7982 flags |= (((iphlen + thoff - 7983 sizeof(struct ip) - sizeof(struct tcphdr)) >> 2) << 8); 7984 7985 *mss0 = htole16(m->m_pkthdr.tso_segsz); 7986 *flags0 = flags; 7987 7988 return 0; 7989 } 7990