1 /*- 2 * Copyright (c) 2006-2007 Broadcom Corporation 3 * David Christensen <davidch@broadcom.com>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Neither the name of Broadcom Corporation nor the name of its contributors 15 * may be used to endorse or promote products derived from this software 16 * without specific prior written consent. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' 19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS 22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 * 30 * $FreeBSD: src/sys/dev/bce/if_bce.c,v 1.31 2007/05/16 23:34:11 davidch Exp $ 31 */ 32 33 /* 34 * The following controllers are supported by this driver: 35 * BCM5706C A2, A3 36 * BCM5706S A2, A3 37 * BCM5708C B1, B2 38 * BCM5708S B1, B2 39 * BCM5709C A1, C0 40 * BCM5716 C0 41 * 42 * The following controllers are not supported by this driver: 43 * BCM5706C A0, A1 44 * BCM5706S A0, A1 45 * BCM5708C A0, B0 46 * BCM5708S A0, B0 47 * BCM5709C A0, B0, B1 48 * BCM5709S A0, A1, B0, B1, B2, C0 49 */ 50 51 #include "opt_bce.h" 52 #include "opt_ifpoll.h" 53 54 #include <sys/param.h> 55 #include <sys/bus.h> 56 #include <sys/endian.h> 57 #include <sys/kernel.h> 58 #include <sys/interrupt.h> 59 #include <sys/mbuf.h> 60 #include <sys/malloc.h> 61 #include <sys/queue.h> 62 #ifdef BCE_DEBUG 63 #include <sys/random.h> 64 #endif 65 #include <sys/rman.h> 66 #include <sys/serialize.h> 67 #include <sys/socket.h> 68 #include <sys/sockio.h> 69 #include <sys/sysctl.h> 70 71 #include <netinet/ip.h> 72 #include <netinet/tcp.h> 73 74 #include <net/bpf.h> 75 #include <net/ethernet.h> 76 #include <net/if.h> 77 #include <net/if_arp.h> 78 #include <net/if_dl.h> 79 #include <net/if_media.h> 80 #include <net/if_poll.h> 81 #include <net/if_types.h> 82 #include <net/ifq_var.h> 83 #include <net/vlan/if_vlan_var.h> 84 #include <net/vlan/if_vlan_ether.h> 85 86 #include <dev/netif/mii_layer/mii.h> 87 #include <dev/netif/mii_layer/miivar.h> 88 #include <dev/netif/mii_layer/brgphyreg.h> 89 90 #include <bus/pci/pcireg.h> 91 #include <bus/pci/pcivar.h> 92 93 #include "miibus_if.h" 94 95 #include <dev/netif/bce/if_bcereg.h> 96 #include <dev/netif/bce/if_bcefw.h> 97 98 #define BCE_MSI_CKINTVL ((10 * hz) / 1000) /* 10ms */ 99 100 /****************************************************************************/ 101 /* BCE Debug Options */ 102 /****************************************************************************/ 103 #ifdef BCE_DEBUG 104 105 static uint32_t bce_debug = BCE_WARN; 106 107 /* 108 * 0 = Never 109 * 1 = 1 in 2,147,483,648 110 * 256 = 1 in 8,388,608 111 * 2048 = 1 in 1,048,576 112 * 65536 = 1 in 32,768 113 * 1048576 = 1 in 2,048 114 * 268435456 = 1 in 8 115 * 536870912 = 1 in 4 116 * 1073741824 = 1 in 2 117 * 118 * bce_debug_mbuf_allocation_failure: 119 * How often to simulate an mbuf allocation failure. 120 * 121 * bce_debug_dma_map_addr_failure: 122 * How often to simulate a DMA mapping failure. 123 * 124 * bce_debug_bootcode_running_failure: 125 * How often to simulate a bootcode failure. 126 */ 127 static int bce_debug_mbuf_allocation_failure = 0; 128 static int bce_debug_dma_map_addr_failure = 0; 129 static int bce_debug_bootcode_running_failure = 0; 130 131 #endif /* BCE_DEBUG */ 132 133 134 /****************************************************************************/ 135 /* PCI Device ID Table */ 136 /* */ 137 /* Used by bce_probe() to identify the devices supported by this driver. */ 138 /****************************************************************************/ 139 #define BCE_DEVDESC_MAX 64 140 141 static struct bce_type bce_devs[] = { 142 /* BCM5706C Controllers and OEM boards. */ 143 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3101, 144 "HP NC370T Multifunction Gigabit Server Adapter" }, 145 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3106, 146 "HP NC370i Multifunction Gigabit Server Adapter" }, 147 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3070, 148 "HP NC380T PCIe DP Multifunc Gig Server Adapter" }, 149 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x1709, 150 "HP NC371i Multifunction Gigabit Server Adapter" }, 151 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, PCI_ANY_ID, PCI_ANY_ID, 152 "Broadcom NetXtreme II BCM5706 1000Base-T" }, 153 154 /* BCM5706S controllers and OEM boards. */ 155 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102, 156 "HP NC370F Multifunction Gigabit Server Adapter" }, 157 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID, PCI_ANY_ID, 158 "Broadcom NetXtreme II BCM5706 1000Base-SX" }, 159 160 /* BCM5708C controllers and OEM boards. */ 161 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7037, 162 "HP NC373T PCIe Multifunction Gig Server Adapter" }, 163 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7038, 164 "HP NC373i Multifunction Gigabit Server Adapter" }, 165 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7045, 166 "HP NC374m PCIe Multifunction Adapter" }, 167 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, PCI_ANY_ID, PCI_ANY_ID, 168 "Broadcom NetXtreme II BCM5708 1000Base-T" }, 169 170 /* BCM5708S controllers and OEM boards. */ 171 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x1706, 172 "HP NC373m Multifunction Gigabit Server Adapter" }, 173 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703b, 174 "HP NC373i Multifunction Gigabit Server Adapter" }, 175 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703d, 176 "HP NC373F PCIe Multifunc Giga Server Adapter" }, 177 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, PCI_ANY_ID, PCI_ANY_ID, 178 "Broadcom NetXtreme II BCM5708S 1000Base-T" }, 179 180 /* BCM5709C controllers and OEM boards. */ 181 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7055, 182 "HP NC382i DP Multifunction Gigabit Server Adapter" }, 183 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7059, 184 "HP NC382T PCIe DP Multifunction Gigabit Server Adapter" }, 185 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, PCI_ANY_ID, PCI_ANY_ID, 186 "Broadcom NetXtreme II BCM5709 1000Base-T" }, 187 188 /* BCM5709S controllers and OEM boards. */ 189 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x171d, 190 "HP NC382m DP 1GbE Multifunction BL-c Adapter" }, 191 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x7056, 192 "HP NC382i DP Multifunction Gigabit Server Adapter" }, 193 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, PCI_ANY_ID, PCI_ANY_ID, 194 "Broadcom NetXtreme II BCM5709 1000Base-SX" }, 195 196 /* BCM5716 controllers and OEM boards. */ 197 { BRCM_VENDORID, BRCM_DEVICEID_BCM5716, PCI_ANY_ID, PCI_ANY_ID, 198 "Broadcom NetXtreme II BCM5716 1000Base-T" }, 199 200 { 0, 0, 0, 0, NULL } 201 }; 202 203 204 /****************************************************************************/ 205 /* Supported Flash NVRAM device data. */ 206 /****************************************************************************/ 207 static const struct flash_spec flash_table[] = 208 { 209 #define BUFFERED_FLAGS (BCE_NV_BUFFERED | BCE_NV_TRANSLATE) 210 #define NONBUFFERED_FLAGS (BCE_NV_WREN) 211 212 /* Slow EEPROM */ 213 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400, 214 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 215 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 216 "EEPROM - slow"}, 217 /* Expansion entry 0001 */ 218 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406, 219 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 220 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 221 "Entry 0001"}, 222 /* Saifun SA25F010 (non-buffered flash) */ 223 /* strap, cfg1, & write1 need updates */ 224 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406, 225 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 226 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2, 227 "Non-buffered flash (128kB)"}, 228 /* Saifun SA25F020 (non-buffered flash) */ 229 /* strap, cfg1, & write1 need updates */ 230 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406, 231 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 232 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4, 233 "Non-buffered flash (256kB)"}, 234 /* Expansion entry 0100 */ 235 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406, 236 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 237 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 238 "Entry 0100"}, 239 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */ 240 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406, 241 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 242 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2, 243 "Entry 0101: ST M45PE10 (128kB non-bufferred)"}, 244 /* Entry 0110: ST M45PE20 (non-buffered flash)*/ 245 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406, 246 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 247 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4, 248 "Entry 0110: ST M45PE20 (256kB non-bufferred)"}, 249 /* Saifun SA25F005 (non-buffered flash) */ 250 /* strap, cfg1, & write1 need updates */ 251 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406, 252 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 253 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE, 254 "Non-buffered flash (64kB)"}, 255 /* Fast EEPROM */ 256 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400, 257 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 258 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 259 "EEPROM - fast"}, 260 /* Expansion entry 1001 */ 261 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406, 262 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 263 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 264 "Entry 1001"}, 265 /* Expansion entry 1010 */ 266 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406, 267 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 268 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 269 "Entry 1010"}, 270 /* ATMEL AT45DB011B (buffered flash) */ 271 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400, 272 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 273 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE, 274 "Buffered flash (128kB)"}, 275 /* Expansion entry 1100 */ 276 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406, 277 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 278 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 279 "Entry 1100"}, 280 /* Expansion entry 1101 */ 281 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406, 282 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 283 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 284 "Entry 1101"}, 285 /* Ateml Expansion entry 1110 */ 286 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400, 287 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 288 BUFFERED_FLASH_BYTE_ADDR_MASK, 0, 289 "Entry 1110 (Atmel)"}, 290 /* ATMEL AT45DB021B (buffered flash) */ 291 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400, 292 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 293 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2, 294 "Buffered flash (256kB)"}, 295 }; 296 297 /* 298 * The BCM5709 controllers transparently handle the 299 * differences between Atmel 264 byte pages and all 300 * flash devices which use 256 byte pages, so no 301 * logical-to-physical mapping is required in the 302 * driver. 303 */ 304 static struct flash_spec flash_5709 = { 305 .flags = BCE_NV_BUFFERED, 306 .page_bits = BCM5709_FLASH_PAGE_BITS, 307 .page_size = BCM5709_FLASH_PAGE_SIZE, 308 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK, 309 .total_size = BUFFERED_FLASH_TOTAL_SIZE * 2, 310 .name = "5709/5716 buffered flash (256kB)", 311 }; 312 313 314 /****************************************************************************/ 315 /* DragonFly device entry points. */ 316 /****************************************************************************/ 317 static int bce_probe(device_t); 318 static int bce_attach(device_t); 319 static int bce_detach(device_t); 320 static void bce_shutdown(device_t); 321 322 /****************************************************************************/ 323 /* BCE Debug Data Structure Dump Routines */ 324 /****************************************************************************/ 325 #ifdef BCE_DEBUG 326 static void bce_dump_mbuf(struct bce_softc *, struct mbuf *); 327 static void bce_dump_rx_mbuf_chain(struct bce_softc *, int, int); 328 static void bce_dump_txbd(struct bce_softc *, int, struct tx_bd *); 329 static void bce_dump_rxbd(struct bce_softc *, int, struct rx_bd *); 330 static void bce_dump_l2fhdr(struct bce_softc *, int, 331 struct l2_fhdr *) __unused; 332 static void bce_dump_tx_chain(struct bce_softc *, int, int); 333 static void bce_dump_rx_chain(struct bce_softc *, int, int); 334 static void bce_dump_status_block(struct bce_softc *); 335 static void bce_dump_driver_state(struct bce_softc *); 336 static void bce_dump_stats_block(struct bce_softc *) __unused; 337 static void bce_dump_hw_state(struct bce_softc *); 338 static void bce_dump_txp_state(struct bce_softc *); 339 static void bce_dump_rxp_state(struct bce_softc *) __unused; 340 static void bce_dump_tpat_state(struct bce_softc *) __unused; 341 static void bce_freeze_controller(struct bce_softc *) __unused; 342 static void bce_unfreeze_controller(struct bce_softc *) __unused; 343 static void bce_breakpoint(struct bce_softc *); 344 #endif /* BCE_DEBUG */ 345 346 347 /****************************************************************************/ 348 /* BCE Register/Memory Access Routines */ 349 /****************************************************************************/ 350 static uint32_t bce_reg_rd_ind(struct bce_softc *, uint32_t); 351 static void bce_reg_wr_ind(struct bce_softc *, uint32_t, uint32_t); 352 static void bce_shmem_wr(struct bce_softc *, uint32_t, uint32_t); 353 static uint32_t bce_shmem_rd(struct bce_softc *, u32); 354 static void bce_ctx_wr(struct bce_softc *, uint32_t, uint32_t, uint32_t); 355 static int bce_miibus_read_reg(device_t, int, int); 356 static int bce_miibus_write_reg(device_t, int, int, int); 357 static void bce_miibus_statchg(device_t); 358 359 360 /****************************************************************************/ 361 /* BCE NVRAM Access Routines */ 362 /****************************************************************************/ 363 static int bce_acquire_nvram_lock(struct bce_softc *); 364 static int bce_release_nvram_lock(struct bce_softc *); 365 static void bce_enable_nvram_access(struct bce_softc *); 366 static void bce_disable_nvram_access(struct bce_softc *); 367 static int bce_nvram_read_dword(struct bce_softc *, uint32_t, uint8_t *, 368 uint32_t); 369 static int bce_init_nvram(struct bce_softc *); 370 static int bce_nvram_read(struct bce_softc *, uint32_t, uint8_t *, int); 371 static int bce_nvram_test(struct bce_softc *); 372 373 /****************************************************************************/ 374 /* BCE DMA Allocate/Free Routines */ 375 /****************************************************************************/ 376 static int bce_dma_alloc(struct bce_softc *); 377 static void bce_dma_free(struct bce_softc *); 378 static void bce_dma_map_addr(void *, bus_dma_segment_t *, int, int); 379 380 /****************************************************************************/ 381 /* BCE Firmware Synchronization and Load */ 382 /****************************************************************************/ 383 static int bce_fw_sync(struct bce_softc *, uint32_t); 384 static void bce_load_rv2p_fw(struct bce_softc *, uint32_t *, 385 uint32_t, uint32_t); 386 static void bce_load_cpu_fw(struct bce_softc *, struct cpu_reg *, 387 struct fw_info *); 388 static void bce_start_cpu(struct bce_softc *, struct cpu_reg *); 389 static void bce_halt_cpu(struct bce_softc *, struct cpu_reg *); 390 static void bce_start_rxp_cpu(struct bce_softc *); 391 static void bce_init_rxp_cpu(struct bce_softc *); 392 static void bce_init_txp_cpu(struct bce_softc *); 393 static void bce_init_tpat_cpu(struct bce_softc *); 394 static void bce_init_cp_cpu(struct bce_softc *); 395 static void bce_init_com_cpu(struct bce_softc *); 396 static void bce_init_cpus(struct bce_softc *); 397 398 static void bce_stop(struct bce_softc *); 399 static int bce_reset(struct bce_softc *, uint32_t); 400 static int bce_chipinit(struct bce_softc *); 401 static int bce_blockinit(struct bce_softc *); 402 static int bce_newbuf_std(struct bce_softc *, uint16_t *, uint16_t *, 403 uint32_t *, int); 404 static void bce_setup_rxdesc_std(struct bce_softc *, uint16_t, uint32_t *); 405 static void bce_probe_pci_caps(struct bce_softc *); 406 static void bce_print_adapter_info(struct bce_softc *); 407 static void bce_get_media(struct bce_softc *); 408 409 static void bce_init_tx_context(struct bce_softc *); 410 static int bce_init_tx_chain(struct bce_softc *); 411 static void bce_init_rx_context(struct bce_softc *); 412 static int bce_init_rx_chain(struct bce_softc *); 413 static void bce_free_rx_chain(struct bce_softc *); 414 static void bce_free_tx_chain(struct bce_softc *); 415 416 static int bce_encap(struct bce_softc *, struct mbuf **, int *); 417 static int bce_tso_setup(struct bce_softc *, struct mbuf **, 418 uint16_t *, uint16_t *); 419 static void bce_start(struct ifnet *); 420 static int bce_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 421 static void bce_watchdog(struct ifnet *); 422 static int bce_ifmedia_upd(struct ifnet *); 423 static void bce_ifmedia_sts(struct ifnet *, struct ifmediareq *); 424 static void bce_init(void *); 425 static void bce_mgmt_init(struct bce_softc *); 426 427 static int bce_init_ctx(struct bce_softc *); 428 static void bce_get_mac_addr(struct bce_softc *); 429 static void bce_set_mac_addr(struct bce_softc *); 430 static void bce_phy_intr(struct bce_softc *); 431 static void bce_rx_intr(struct bce_softc *, int, uint16_t); 432 static void bce_tx_intr(struct bce_softc *, uint16_t); 433 static void bce_disable_intr(struct bce_softc *); 434 static void bce_enable_intr(struct bce_softc *); 435 static void bce_reenable_intr(struct bce_softc *); 436 437 #ifdef IFPOLL_ENABLE 438 static void bce_npoll(struct ifnet *, struct ifpoll_info *); 439 static void bce_npoll_compat(struct ifnet *, void *, int); 440 #endif 441 static void bce_intr(struct bce_softc *); 442 static void bce_intr_legacy(void *); 443 static void bce_intr_msi(void *); 444 static void bce_intr_msi_oneshot(void *); 445 static void bce_set_rx_mode(struct bce_softc *); 446 static void bce_stats_update(struct bce_softc *); 447 static void bce_tick(void *); 448 static void bce_tick_serialized(struct bce_softc *); 449 static void bce_pulse(void *); 450 static void bce_check_msi(void *); 451 static void bce_add_sysctls(struct bce_softc *); 452 453 static void bce_coal_change(struct bce_softc *); 454 static int bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS); 455 static int bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS); 456 static int bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS); 457 static int bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS); 458 static int bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS); 459 static int bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS); 460 static int bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS); 461 static int bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS); 462 static int bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS, 463 uint32_t *, uint32_t); 464 465 /* 466 * NOTE: 467 * Don't set bce_tx_ticks_int/bce_tx_ticks to 1023. Linux's bnx2 468 * takes 1023 as the TX ticks limit. However, using 1023 will 469 * cause 5708(B2) to generate extra interrupts (~2000/s) even when 470 * there is _no_ network activity on the NIC. 471 */ 472 static uint32_t bce_tx_bds_int = 255; /* bcm: 20 */ 473 static uint32_t bce_tx_bds = 255; /* bcm: 20 */ 474 static uint32_t bce_tx_ticks_int = 1022; /* bcm: 80 */ 475 static uint32_t bce_tx_ticks = 1022; /* bcm: 80 */ 476 static uint32_t bce_rx_bds_int = 128; /* bcm: 6 */ 477 static uint32_t bce_rx_bds = 0; /* bcm: 6 */ 478 static uint32_t bce_rx_ticks_int = 150; /* bcm: 18 */ 479 static uint32_t bce_rx_ticks = 150; /* bcm: 18 */ 480 481 static int bce_msi_enable = 1; 482 483 static int bce_rx_pages = RX_PAGES_DEFAULT; 484 static int bce_tx_pages = TX_PAGES_DEFAULT; 485 486 TUNABLE_INT("hw.bce.tx_bds_int", &bce_tx_bds_int); 487 TUNABLE_INT("hw.bce.tx_bds", &bce_tx_bds); 488 TUNABLE_INT("hw.bce.tx_ticks_int", &bce_tx_ticks_int); 489 TUNABLE_INT("hw.bce.tx_ticks", &bce_tx_ticks); 490 TUNABLE_INT("hw.bce.rx_bds_int", &bce_rx_bds_int); 491 TUNABLE_INT("hw.bce.rx_bds", &bce_rx_bds); 492 TUNABLE_INT("hw.bce.rx_ticks_int", &bce_rx_ticks_int); 493 TUNABLE_INT("hw.bce.rx_ticks", &bce_rx_ticks); 494 TUNABLE_INT("hw.bce.msi.enable", &bce_msi_enable); 495 TUNABLE_INT("hw.bce.rx_pages", &bce_rx_pages); 496 TUNABLE_INT("hw.bce.tx_pages", &bce_tx_pages); 497 498 /****************************************************************************/ 499 /* DragonFly device dispatch table. */ 500 /****************************************************************************/ 501 static device_method_t bce_methods[] = { 502 /* Device interface */ 503 DEVMETHOD(device_probe, bce_probe), 504 DEVMETHOD(device_attach, bce_attach), 505 DEVMETHOD(device_detach, bce_detach), 506 DEVMETHOD(device_shutdown, bce_shutdown), 507 508 /* bus interface */ 509 DEVMETHOD(bus_print_child, bus_generic_print_child), 510 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 511 512 /* MII interface */ 513 DEVMETHOD(miibus_readreg, bce_miibus_read_reg), 514 DEVMETHOD(miibus_writereg, bce_miibus_write_reg), 515 DEVMETHOD(miibus_statchg, bce_miibus_statchg), 516 517 { 0, 0 } 518 }; 519 520 static driver_t bce_driver = { 521 "bce", 522 bce_methods, 523 sizeof(struct bce_softc) 524 }; 525 526 static devclass_t bce_devclass; 527 528 529 DECLARE_DUMMY_MODULE(if_bce); 530 MODULE_DEPEND(bce, miibus, 1, 1, 1); 531 DRIVER_MODULE(if_bce, pci, bce_driver, bce_devclass, NULL, NULL); 532 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, NULL, NULL); 533 534 535 /****************************************************************************/ 536 /* Device probe function. */ 537 /* */ 538 /* Compares the device to the driver's list of supported devices and */ 539 /* reports back to the OS whether this is the right driver for the device. */ 540 /* */ 541 /* Returns: */ 542 /* BUS_PROBE_DEFAULT on success, positive value on failure. */ 543 /****************************************************************************/ 544 static int 545 bce_probe(device_t dev) 546 { 547 struct bce_type *t; 548 uint16_t vid, did, svid, sdid; 549 550 /* Get the data for the device to be probed. */ 551 vid = pci_get_vendor(dev); 552 did = pci_get_device(dev); 553 svid = pci_get_subvendor(dev); 554 sdid = pci_get_subdevice(dev); 555 556 /* Look through the list of known devices for a match. */ 557 for (t = bce_devs; t->bce_name != NULL; ++t) { 558 if (vid == t->bce_vid && did == t->bce_did && 559 (svid == t->bce_svid || t->bce_svid == PCI_ANY_ID) && 560 (sdid == t->bce_sdid || t->bce_sdid == PCI_ANY_ID)) { 561 uint32_t revid = pci_read_config(dev, PCIR_REVID, 4); 562 char *descbuf; 563 564 descbuf = kmalloc(BCE_DEVDESC_MAX, M_TEMP, M_WAITOK); 565 566 /* Print out the device identity. */ 567 ksnprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)", 568 t->bce_name, 569 ((revid & 0xf0) >> 4) + 'A', revid & 0xf); 570 571 device_set_desc_copy(dev, descbuf); 572 kfree(descbuf, M_TEMP); 573 return 0; 574 } 575 } 576 return ENXIO; 577 } 578 579 580 /****************************************************************************/ 581 /* PCI Capabilities Probe Function. */ 582 /* */ 583 /* Walks the PCI capabiites list for the device to find what features are */ 584 /* supported. */ 585 /* */ 586 /* Returns: */ 587 /* None. */ 588 /****************************************************************************/ 589 static void 590 bce_print_adapter_info(struct bce_softc *sc) 591 { 592 device_printf(sc->bce_dev, "ASIC (0x%08X); ", sc->bce_chipid); 593 594 kprintf("Rev (%c%d); ", ((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A', 595 ((BCE_CHIP_ID(sc) & 0x0ff0) >> 4)); 596 597 /* Bus info. */ 598 if (sc->bce_flags & BCE_PCIE_FLAG) { 599 kprintf("Bus (PCIe x%d, ", sc->link_width); 600 switch (sc->link_speed) { 601 case 1: 602 kprintf("2.5Gbps); "); 603 break; 604 case 2: 605 kprintf("5Gbps); "); 606 break; 607 default: 608 kprintf("Unknown link speed); "); 609 break; 610 } 611 } else { 612 kprintf("Bus (PCI%s, %s, %dMHz); ", 613 ((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""), 614 ((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? "32-bit" : "64-bit"), 615 sc->bus_speed_mhz); 616 } 617 618 /* Firmware version and device features. */ 619 kprintf("B/C (%s)", sc->bce_bc_ver); 620 621 if ((sc->bce_flags & BCE_MFW_ENABLE_FLAG) || 622 (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)) { 623 kprintf("; Flags("); 624 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) 625 kprintf("MFW[%s]", sc->bce_mfw_ver); 626 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) 627 kprintf(" 2.5G"); 628 kprintf(")"); 629 } 630 kprintf("\n"); 631 } 632 633 634 /****************************************************************************/ 635 /* PCI Capabilities Probe Function. */ 636 /* */ 637 /* Walks the PCI capabiites list for the device to find what features are */ 638 /* supported. */ 639 /* */ 640 /* Returns: */ 641 /* None. */ 642 /****************************************************************************/ 643 static void 644 bce_probe_pci_caps(struct bce_softc *sc) 645 { 646 device_t dev = sc->bce_dev; 647 uint8_t ptr; 648 649 if (pci_is_pcix(dev)) 650 sc->bce_cap_flags |= BCE_PCIX_CAPABLE_FLAG; 651 652 ptr = pci_get_pciecap_ptr(dev); 653 if (ptr) { 654 uint16_t link_status = pci_read_config(dev, ptr + 0x12, 2); 655 656 sc->link_speed = link_status & 0xf; 657 sc->link_width = (link_status >> 4) & 0x3f; 658 sc->bce_cap_flags |= BCE_PCIE_CAPABLE_FLAG; 659 sc->bce_flags |= BCE_PCIE_FLAG; 660 } 661 } 662 663 664 /****************************************************************************/ 665 /* Device attach function. */ 666 /* */ 667 /* Allocates device resources, performs secondary chip identification, */ 668 /* resets and initializes the hardware, and initializes driver instance */ 669 /* variables. */ 670 /* */ 671 /* Returns: */ 672 /* 0 on success, positive value on failure. */ 673 /****************************************************************************/ 674 static int 675 bce_attach(device_t dev) 676 { 677 struct bce_softc *sc = device_get_softc(dev); 678 struct ifnet *ifp = &sc->arpcom.ac_if; 679 uint32_t val; 680 u_int irq_flags; 681 void (*irq_handle)(void *); 682 int rid, rc = 0; 683 int i, j; 684 struct mii_probe_args mii_args; 685 uintptr_t mii_priv = 0; 686 687 sc->bce_dev = dev; 688 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 689 690 pci_enable_busmaster(dev); 691 692 bce_probe_pci_caps(sc); 693 694 /* Allocate PCI memory resources. */ 695 rid = PCIR_BAR(0); 696 sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 697 RF_ACTIVE | PCI_RF_DENSE); 698 if (sc->bce_res_mem == NULL) { 699 device_printf(dev, "PCI memory allocation failed\n"); 700 return ENXIO; 701 } 702 sc->bce_btag = rman_get_bustag(sc->bce_res_mem); 703 sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem); 704 705 /* Allocate PCI IRQ resources. */ 706 sc->bce_irq_type = pci_alloc_1intr(dev, bce_msi_enable, 707 &sc->bce_irq_rid, &irq_flags); 708 709 sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 710 &sc->bce_irq_rid, irq_flags); 711 if (sc->bce_res_irq == NULL) { 712 device_printf(dev, "PCI map interrupt failed\n"); 713 rc = ENXIO; 714 goto fail; 715 } 716 717 /* 718 * Configure byte swap and enable indirect register access. 719 * Rely on CPU to do target byte swapping on big endian systems. 720 * Access to registers outside of PCI configurtion space are not 721 * valid until this is done. 722 */ 723 pci_write_config(dev, BCE_PCICFG_MISC_CONFIG, 724 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 725 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4); 726 727 /* Save ASIC revsion info. */ 728 sc->bce_chipid = REG_RD(sc, BCE_MISC_ID); 729 730 /* Weed out any non-production controller revisions. */ 731 switch (BCE_CHIP_ID(sc)) { 732 case BCE_CHIP_ID_5706_A0: 733 case BCE_CHIP_ID_5706_A1: 734 case BCE_CHIP_ID_5708_A0: 735 case BCE_CHIP_ID_5708_B0: 736 case BCE_CHIP_ID_5709_A0: 737 case BCE_CHIP_ID_5709_B0: 738 case BCE_CHIP_ID_5709_B1: 739 #ifdef foo 740 /* 5709C B2 seems to work fine */ 741 case BCE_CHIP_ID_5709_B2: 742 #endif 743 device_printf(dev, "Unsupported chip id 0x%08x!\n", 744 BCE_CHIP_ID(sc)); 745 rc = ENODEV; 746 goto fail; 747 } 748 749 mii_priv |= BRGPHY_FLAG_WIRESPEED; 750 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { 751 if (BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax || 752 BCE_CHIP_REV(sc) == BCE_CHIP_REV_Bx) 753 mii_priv |= BRGPHY_FLAG_NO_EARLYDAC; 754 } else { 755 mii_priv |= BRGPHY_FLAG_BER_BUG; 756 } 757 758 if (sc->bce_irq_type == PCI_INTR_TYPE_LEGACY) { 759 irq_handle = bce_intr_legacy; 760 } else if (sc->bce_irq_type == PCI_INTR_TYPE_MSI) { 761 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { 762 irq_handle = bce_intr_msi_oneshot; 763 sc->bce_flags |= BCE_ONESHOT_MSI_FLAG; 764 } else { 765 irq_handle = bce_intr_msi; 766 sc->bce_flags |= BCE_CHECK_MSI_FLAG; 767 } 768 } else { 769 panic("%s: unsupported intr type %d", 770 device_get_nameunit(dev), sc->bce_irq_type); 771 } 772 773 /* 774 * Find the base address for shared memory access. 775 * Newer versions of bootcode use a signature and offset 776 * while older versions use a fixed address. 777 */ 778 val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE); 779 if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == 780 BCE_SHM_HDR_SIGNATURE_SIG) { 781 /* Multi-port devices use different offsets in shared memory. */ 782 sc->bce_shmem_base = REG_RD_IND(sc, 783 BCE_SHM_HDR_ADDR_0 + (pci_get_function(sc->bce_dev) << 2)); 784 } else { 785 sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE; 786 } 787 DBPRINT(sc, BCE_INFO, "bce_shmem_base = 0x%08X\n", sc->bce_shmem_base); 788 789 /* Fetch the bootcode revision. */ 790 val = bce_shmem_rd(sc, BCE_DEV_INFO_BC_REV); 791 for (i = 0, j = 0; i < 3; i++) { 792 uint8_t num; 793 int k, skip0; 794 795 num = (uint8_t)(val >> (24 - (i * 8))); 796 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) { 797 if (num >= k || !skip0 || k == 1) { 798 sc->bce_bc_ver[j++] = (num / k) + '0'; 799 skip0 = 0; 800 } 801 } 802 if (i != 2) 803 sc->bce_bc_ver[j++] = '.'; 804 } 805 806 /* Check if any management firwmare is running. */ 807 val = bce_shmem_rd(sc, BCE_PORT_FEATURE); 808 if (val & BCE_PORT_FEATURE_ASF_ENABLED) { 809 sc->bce_flags |= BCE_MFW_ENABLE_FLAG; 810 811 /* Allow time for firmware to enter the running state. */ 812 for (i = 0; i < 30; i++) { 813 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION); 814 if (val & BCE_CONDITION_MFW_RUN_MASK) 815 break; 816 DELAY(10000); 817 } 818 } 819 820 /* Check the current bootcode state. */ 821 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION) & 822 BCE_CONDITION_MFW_RUN_MASK; 823 if (val != BCE_CONDITION_MFW_RUN_UNKNOWN && 824 val != BCE_CONDITION_MFW_RUN_NONE) { 825 uint32_t addr = bce_shmem_rd(sc, BCE_MFW_VER_PTR); 826 827 for (i = 0, j = 0; j < 3; j++) { 828 val = bce_reg_rd_ind(sc, addr + j * 4); 829 val = bswap32(val); 830 memcpy(&sc->bce_mfw_ver[i], &val, 4); 831 i += 4; 832 } 833 } 834 835 /* Get PCI bus information (speed and type). */ 836 val = REG_RD(sc, BCE_PCICFG_MISC_STATUS); 837 if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) { 838 uint32_t clkreg; 839 840 sc->bce_flags |= BCE_PCIX_FLAG; 841 842 clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS) & 843 BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET; 844 switch (clkreg) { 845 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ: 846 sc->bus_speed_mhz = 133; 847 break; 848 849 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ: 850 sc->bus_speed_mhz = 100; 851 break; 852 853 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ: 854 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ: 855 sc->bus_speed_mhz = 66; 856 break; 857 858 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ: 859 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ: 860 sc->bus_speed_mhz = 50; 861 break; 862 863 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW: 864 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ: 865 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ: 866 sc->bus_speed_mhz = 33; 867 break; 868 } 869 } else { 870 if (val & BCE_PCICFG_MISC_STATUS_M66EN) 871 sc->bus_speed_mhz = 66; 872 else 873 sc->bus_speed_mhz = 33; 874 } 875 876 if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET) 877 sc->bce_flags |= BCE_PCI_32BIT_FLAG; 878 879 /* Reset the controller. */ 880 rc = bce_reset(sc, BCE_DRV_MSG_CODE_RESET); 881 if (rc != 0) 882 goto fail; 883 884 /* Initialize the controller. */ 885 rc = bce_chipinit(sc); 886 if (rc != 0) { 887 device_printf(dev, "Controller initialization failed!\n"); 888 goto fail; 889 } 890 891 /* Perform NVRAM test. */ 892 rc = bce_nvram_test(sc); 893 if (rc != 0) { 894 device_printf(dev, "NVRAM test failed!\n"); 895 goto fail; 896 } 897 898 /* Fetch the permanent Ethernet MAC address. */ 899 bce_get_mac_addr(sc); 900 901 /* 902 * Trip points control how many BDs 903 * should be ready before generating an 904 * interrupt while ticks control how long 905 * a BD can sit in the chain before 906 * generating an interrupt. Set the default 907 * values for the RX and TX rings. 908 */ 909 910 #ifdef BCE_DRBUG 911 /* Force more frequent interrupts. */ 912 sc->bce_tx_quick_cons_trip_int = 1; 913 sc->bce_tx_quick_cons_trip = 1; 914 sc->bce_tx_ticks_int = 0; 915 sc->bce_tx_ticks = 0; 916 917 sc->bce_rx_quick_cons_trip_int = 1; 918 sc->bce_rx_quick_cons_trip = 1; 919 sc->bce_rx_ticks_int = 0; 920 sc->bce_rx_ticks = 0; 921 #else 922 sc->bce_tx_quick_cons_trip_int = bce_tx_bds_int; 923 sc->bce_tx_quick_cons_trip = bce_tx_bds; 924 sc->bce_tx_ticks_int = bce_tx_ticks_int; 925 sc->bce_tx_ticks = bce_tx_ticks; 926 927 sc->bce_rx_quick_cons_trip_int = bce_rx_bds_int; 928 sc->bce_rx_quick_cons_trip = bce_rx_bds; 929 sc->bce_rx_ticks_int = bce_rx_ticks_int; 930 sc->bce_rx_ticks = bce_rx_ticks; 931 #endif 932 sc->tx_wreg = 8; 933 934 /* Update statistics once every second. */ 935 sc->bce_stats_ticks = 1000000 & 0xffff00; 936 937 /* Find the media type for the adapter. */ 938 bce_get_media(sc); 939 940 /* Allocate DMA memory resources. */ 941 rc = bce_dma_alloc(sc); 942 if (rc != 0) { 943 device_printf(dev, "DMA resource allocation failed!\n"); 944 goto fail; 945 } 946 947 /* Initialize the ifnet interface. */ 948 ifp->if_softc = sc; 949 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 950 ifp->if_ioctl = bce_ioctl; 951 ifp->if_start = bce_start; 952 ifp->if_init = bce_init; 953 ifp->if_watchdog = bce_watchdog; 954 #ifdef IFPOLL_ENABLE 955 ifp->if_npoll = bce_npoll; 956 #endif 957 ifp->if_mtu = ETHERMTU; 958 ifp->if_hwassist = BCE_CSUM_FEATURES | CSUM_TSO; 959 ifp->if_capabilities = BCE_IF_CAPABILITIES; 960 ifp->if_capenable = ifp->if_capabilities; 961 ifq_set_maxlen(&ifp->if_snd, USABLE_TX_BD(sc)); 962 ifq_set_ready(&ifp->if_snd); 963 964 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) 965 ifp->if_baudrate = IF_Gbps(2.5); 966 else 967 ifp->if_baudrate = IF_Gbps(1); 968 969 /* Assume a standard 1500 byte MTU size for mbuf allocations. */ 970 sc->mbuf_alloc_size = MCLBYTES; 971 972 /* 973 * Look for our PHY. 974 */ 975 mii_probe_args_init(&mii_args, bce_ifmedia_upd, bce_ifmedia_sts); 976 mii_args.mii_probemask = 1 << sc->bce_phy_addr; 977 mii_args.mii_privtag = MII_PRIVTAG_BRGPHY; 978 mii_args.mii_priv = mii_priv; 979 980 rc = mii_probe(dev, &sc->bce_miibus, &mii_args); 981 if (rc != 0) { 982 device_printf(dev, "PHY probe failed!\n"); 983 goto fail; 984 } 985 986 /* Attach to the Ethernet interface list. */ 987 ether_ifattach(ifp, sc->eaddr, NULL); 988 989 callout_init_mp(&sc->bce_tick_callout); 990 callout_init_mp(&sc->bce_pulse_callout); 991 callout_init_mp(&sc->bce_ckmsi_callout); 992 993 /* Hookup IRQ last. */ 994 rc = bus_setup_intr(dev, sc->bce_res_irq, INTR_MPSAFE, irq_handle, sc, 995 &sc->bce_intrhand, ifp->if_serializer); 996 if (rc != 0) { 997 device_printf(dev, "Failed to setup IRQ!\n"); 998 ether_ifdetach(ifp); 999 goto fail; 1000 } 1001 1002 sc->bce_intr_cpuid = rman_get_cpuid(sc->bce_res_irq); 1003 ifq_set_cpuid(&ifp->if_snd, sc->bce_intr_cpuid); 1004 1005 /* Print some important debugging info. */ 1006 DBRUN(BCE_INFO, bce_dump_driver_state(sc)); 1007 1008 /* Add the supported sysctls to the kernel. */ 1009 bce_add_sysctls(sc); 1010 1011 #ifdef IFPOLL_ENABLE 1012 ifpoll_compat_setup(&sc->bce_npoll, 1013 &sc->bce_sysctl_ctx, sc->bce_sysctl_tree, device_get_unit(dev), 1014 ifp->if_serializer); 1015 #endif 1016 1017 /* 1018 * The chip reset earlier notified the bootcode that 1019 * a driver is present. We now need to start our pulse 1020 * routine so that the bootcode is reminded that we're 1021 * still running. 1022 */ 1023 bce_pulse(sc); 1024 1025 /* Get the firmware running so IPMI still works */ 1026 bce_mgmt_init(sc); 1027 1028 if (bootverbose) 1029 bce_print_adapter_info(sc); 1030 1031 return 0; 1032 fail: 1033 bce_detach(dev); 1034 return(rc); 1035 } 1036 1037 1038 /****************************************************************************/ 1039 /* Device detach function. */ 1040 /* */ 1041 /* Stops the controller, resets the controller, and releases resources. */ 1042 /* */ 1043 /* Returns: */ 1044 /* 0 on success, positive value on failure. */ 1045 /****************************************************************************/ 1046 static int 1047 bce_detach(device_t dev) 1048 { 1049 struct bce_softc *sc = device_get_softc(dev); 1050 1051 if (device_is_attached(dev)) { 1052 struct ifnet *ifp = &sc->arpcom.ac_if; 1053 uint32_t msg; 1054 1055 /* Stop and reset the controller. */ 1056 lwkt_serialize_enter(ifp->if_serializer); 1057 callout_stop(&sc->bce_pulse_callout); 1058 bce_stop(sc); 1059 if (sc->bce_flags & BCE_NO_WOL_FLAG) 1060 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN; 1061 else 1062 msg = BCE_DRV_MSG_CODE_UNLOAD; 1063 bce_reset(sc, msg); 1064 bus_teardown_intr(dev, sc->bce_res_irq, sc->bce_intrhand); 1065 lwkt_serialize_exit(ifp->if_serializer); 1066 1067 ether_ifdetach(ifp); 1068 } 1069 1070 /* If we have a child device on the MII bus remove it too. */ 1071 if (sc->bce_miibus) 1072 device_delete_child(dev, sc->bce_miibus); 1073 bus_generic_detach(dev); 1074 1075 if (sc->bce_res_irq != NULL) { 1076 bus_release_resource(dev, SYS_RES_IRQ, sc->bce_irq_rid, 1077 sc->bce_res_irq); 1078 } 1079 1080 if (sc->bce_irq_type == PCI_INTR_TYPE_MSI) 1081 pci_release_msi(dev); 1082 1083 if (sc->bce_res_mem != NULL) { 1084 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), 1085 sc->bce_res_mem); 1086 } 1087 1088 bce_dma_free(sc); 1089 1090 if (sc->bce_sysctl_tree != NULL) 1091 sysctl_ctx_free(&sc->bce_sysctl_ctx); 1092 1093 return 0; 1094 } 1095 1096 1097 /****************************************************************************/ 1098 /* Device shutdown function. */ 1099 /* */ 1100 /* Stops and resets the controller. */ 1101 /* */ 1102 /* Returns: */ 1103 /* Nothing */ 1104 /****************************************************************************/ 1105 static void 1106 bce_shutdown(device_t dev) 1107 { 1108 struct bce_softc *sc = device_get_softc(dev); 1109 struct ifnet *ifp = &sc->arpcom.ac_if; 1110 uint32_t msg; 1111 1112 lwkt_serialize_enter(ifp->if_serializer); 1113 bce_stop(sc); 1114 if (sc->bce_flags & BCE_NO_WOL_FLAG) 1115 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN; 1116 else 1117 msg = BCE_DRV_MSG_CODE_UNLOAD; 1118 bce_reset(sc, msg); 1119 lwkt_serialize_exit(ifp->if_serializer); 1120 } 1121 1122 1123 /****************************************************************************/ 1124 /* Indirect register read. */ 1125 /* */ 1126 /* Reads NetXtreme II registers using an index/data register pair in PCI */ 1127 /* configuration space. Using this mechanism avoids issues with posted */ 1128 /* reads but is much slower than memory-mapped I/O. */ 1129 /* */ 1130 /* Returns: */ 1131 /* The value of the register. */ 1132 /****************************************************************************/ 1133 static uint32_t 1134 bce_reg_rd_ind(struct bce_softc *sc, uint32_t offset) 1135 { 1136 device_t dev = sc->bce_dev; 1137 1138 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4); 1139 #ifdef BCE_DEBUG 1140 { 1141 uint32_t val; 1142 val = pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4); 1143 DBPRINT(sc, BCE_EXCESSIVE, 1144 "%s(); offset = 0x%08X, val = 0x%08X\n", 1145 __func__, offset, val); 1146 return val; 1147 } 1148 #else 1149 return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4); 1150 #endif 1151 } 1152 1153 1154 /****************************************************************************/ 1155 /* Indirect register write. */ 1156 /* */ 1157 /* Writes NetXtreme II registers using an index/data register pair in PCI */ 1158 /* configuration space. Using this mechanism avoids issues with posted */ 1159 /* writes but is muchh slower than memory-mapped I/O. */ 1160 /* */ 1161 /* Returns: */ 1162 /* Nothing. */ 1163 /****************************************************************************/ 1164 static void 1165 bce_reg_wr_ind(struct bce_softc *sc, uint32_t offset, uint32_t val) 1166 { 1167 device_t dev = sc->bce_dev; 1168 1169 DBPRINT(sc, BCE_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n", 1170 __func__, offset, val); 1171 1172 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4); 1173 pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4); 1174 } 1175 1176 1177 /****************************************************************************/ 1178 /* Shared memory write. */ 1179 /* */ 1180 /* Writes NetXtreme II shared memory region. */ 1181 /* */ 1182 /* Returns: */ 1183 /* Nothing. */ 1184 /****************************************************************************/ 1185 static void 1186 bce_shmem_wr(struct bce_softc *sc, uint32_t offset, uint32_t val) 1187 { 1188 bce_reg_wr_ind(sc, sc->bce_shmem_base + offset, val); 1189 } 1190 1191 1192 /****************************************************************************/ 1193 /* Shared memory read. */ 1194 /* */ 1195 /* Reads NetXtreme II shared memory region. */ 1196 /* */ 1197 /* Returns: */ 1198 /* The 32 bit value read. */ 1199 /****************************************************************************/ 1200 static u32 1201 bce_shmem_rd(struct bce_softc *sc, uint32_t offset) 1202 { 1203 return bce_reg_rd_ind(sc, sc->bce_shmem_base + offset); 1204 } 1205 1206 1207 /****************************************************************************/ 1208 /* Context memory write. */ 1209 /* */ 1210 /* The NetXtreme II controller uses context memory to track connection */ 1211 /* information for L2 and higher network protocols. */ 1212 /* */ 1213 /* Returns: */ 1214 /* Nothing. */ 1215 /****************************************************************************/ 1216 static void 1217 bce_ctx_wr(struct bce_softc *sc, uint32_t cid_addr, uint32_t ctx_offset, 1218 uint32_t ctx_val) 1219 { 1220 uint32_t idx, offset = ctx_offset + cid_addr; 1221 uint32_t val, retry_cnt = 5; 1222 1223 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 1224 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 1225 REG_WR(sc, BCE_CTX_CTX_DATA, ctx_val); 1226 REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_WRITE_REQ)); 1227 1228 for (idx = 0; idx < retry_cnt; idx++) { 1229 val = REG_RD(sc, BCE_CTX_CTX_CTRL); 1230 if ((val & BCE_CTX_CTX_CTRL_WRITE_REQ) == 0) 1231 break; 1232 DELAY(5); 1233 } 1234 1235 if (val & BCE_CTX_CTX_CTRL_WRITE_REQ) { 1236 device_printf(sc->bce_dev, 1237 "Unable to write CTX memory: " 1238 "cid_addr = 0x%08X, offset = 0x%08X!\n", 1239 cid_addr, ctx_offset); 1240 } 1241 } else { 1242 REG_WR(sc, BCE_CTX_DATA_ADR, offset); 1243 REG_WR(sc, BCE_CTX_DATA, ctx_val); 1244 } 1245 } 1246 1247 1248 /****************************************************************************/ 1249 /* PHY register read. */ 1250 /* */ 1251 /* Implements register reads on the MII bus. */ 1252 /* */ 1253 /* Returns: */ 1254 /* The value of the register. */ 1255 /****************************************************************************/ 1256 static int 1257 bce_miibus_read_reg(device_t dev, int phy, int reg) 1258 { 1259 struct bce_softc *sc = device_get_softc(dev); 1260 uint32_t val; 1261 int i; 1262 1263 /* Make sure we are accessing the correct PHY address. */ 1264 KASSERT(phy == sc->bce_phy_addr, 1265 ("invalid phyno %d, should be %d\n", phy, sc->bce_phy_addr)); 1266 1267 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1268 val = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1269 val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL; 1270 1271 REG_WR(sc, BCE_EMAC_MDIO_MODE, val); 1272 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1273 1274 DELAY(40); 1275 } 1276 1277 val = BCE_MIPHY(phy) | BCE_MIREG(reg) | 1278 BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT | 1279 BCE_EMAC_MDIO_COMM_START_BUSY; 1280 REG_WR(sc, BCE_EMAC_MDIO_COMM, val); 1281 1282 for (i = 0; i < BCE_PHY_TIMEOUT; i++) { 1283 DELAY(10); 1284 1285 val = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1286 if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) { 1287 DELAY(5); 1288 1289 val = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1290 val &= BCE_EMAC_MDIO_COMM_DATA; 1291 break; 1292 } 1293 } 1294 1295 if (val & BCE_EMAC_MDIO_COMM_START_BUSY) { 1296 if_printf(&sc->arpcom.ac_if, 1297 "Error: PHY read timeout! phy = %d, reg = 0x%04X\n", 1298 phy, reg); 1299 val = 0x0; 1300 } else { 1301 val = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1302 } 1303 1304 DBPRINT(sc, BCE_EXCESSIVE, 1305 "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n", 1306 __func__, phy, (uint16_t)reg & 0xffff, (uint16_t) val & 0xffff); 1307 1308 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1309 val = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1310 val |= BCE_EMAC_MDIO_MODE_AUTO_POLL; 1311 1312 REG_WR(sc, BCE_EMAC_MDIO_MODE, val); 1313 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1314 1315 DELAY(40); 1316 } 1317 return (val & 0xffff); 1318 } 1319 1320 1321 /****************************************************************************/ 1322 /* PHY register write. */ 1323 /* */ 1324 /* Implements register writes on the MII bus. */ 1325 /* */ 1326 /* Returns: */ 1327 /* The value of the register. */ 1328 /****************************************************************************/ 1329 static int 1330 bce_miibus_write_reg(device_t dev, int phy, int reg, int val) 1331 { 1332 struct bce_softc *sc = device_get_softc(dev); 1333 uint32_t val1; 1334 int i; 1335 1336 /* Make sure we are accessing the correct PHY address. */ 1337 KASSERT(phy == sc->bce_phy_addr, 1338 ("invalid phyno %d, should be %d\n", phy, sc->bce_phy_addr)); 1339 1340 DBPRINT(sc, BCE_EXCESSIVE, 1341 "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n", 1342 __func__, phy, (uint16_t)(reg & 0xffff), 1343 (uint16_t)(val & 0xffff)); 1344 1345 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1346 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1347 val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL; 1348 1349 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1); 1350 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1351 1352 DELAY(40); 1353 } 1354 1355 val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val | 1356 BCE_EMAC_MDIO_COMM_COMMAND_WRITE | 1357 BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT; 1358 REG_WR(sc, BCE_EMAC_MDIO_COMM, val1); 1359 1360 for (i = 0; i < BCE_PHY_TIMEOUT; i++) { 1361 DELAY(10); 1362 1363 val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1364 if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) { 1365 DELAY(5); 1366 break; 1367 } 1368 } 1369 1370 if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY) 1371 if_printf(&sc->arpcom.ac_if, "PHY write timeout!\n"); 1372 1373 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1374 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1375 val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL; 1376 1377 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1); 1378 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1379 1380 DELAY(40); 1381 } 1382 return 0; 1383 } 1384 1385 1386 /****************************************************************************/ 1387 /* MII bus status change. */ 1388 /* */ 1389 /* Called by the MII bus driver when the PHY establishes link to set the */ 1390 /* MAC interface registers. */ 1391 /* */ 1392 /* Returns: */ 1393 /* Nothing. */ 1394 /****************************************************************************/ 1395 static void 1396 bce_miibus_statchg(device_t dev) 1397 { 1398 struct bce_softc *sc = device_get_softc(dev); 1399 struct mii_data *mii = device_get_softc(sc->bce_miibus); 1400 1401 DBPRINT(sc, BCE_INFO, "mii_media_active = 0x%08X\n", 1402 mii->mii_media_active); 1403 1404 #ifdef BCE_DEBUG 1405 /* Decode the interface media flags. */ 1406 if_printf(&sc->arpcom.ac_if, "Media: ( "); 1407 switch(IFM_TYPE(mii->mii_media_active)) { 1408 case IFM_ETHER: 1409 kprintf("Ethernet )"); 1410 break; 1411 default: 1412 kprintf("Unknown )"); 1413 break; 1414 } 1415 1416 kprintf(" Media Options: ( "); 1417 switch(IFM_SUBTYPE(mii->mii_media_active)) { 1418 case IFM_AUTO: 1419 kprintf("Autoselect )"); 1420 break; 1421 case IFM_MANUAL: 1422 kprintf("Manual )"); 1423 break; 1424 case IFM_NONE: 1425 kprintf("None )"); 1426 break; 1427 case IFM_10_T: 1428 kprintf("10Base-T )"); 1429 break; 1430 case IFM_100_TX: 1431 kprintf("100Base-TX )"); 1432 break; 1433 case IFM_1000_SX: 1434 kprintf("1000Base-SX )"); 1435 break; 1436 case IFM_1000_T: 1437 kprintf("1000Base-T )"); 1438 break; 1439 default: 1440 kprintf("Other )"); 1441 break; 1442 } 1443 1444 kprintf(" Global Options: ("); 1445 if (mii->mii_media_active & IFM_FDX) 1446 kprintf(" FullDuplex"); 1447 if (mii->mii_media_active & IFM_HDX) 1448 kprintf(" HalfDuplex"); 1449 if (mii->mii_media_active & IFM_LOOP) 1450 kprintf(" Loopback"); 1451 if (mii->mii_media_active & IFM_FLAG0) 1452 kprintf(" Flag0"); 1453 if (mii->mii_media_active & IFM_FLAG1) 1454 kprintf(" Flag1"); 1455 if (mii->mii_media_active & IFM_FLAG2) 1456 kprintf(" Flag2"); 1457 kprintf(" )\n"); 1458 #endif 1459 1460 BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT); 1461 1462 /* 1463 * Set MII or GMII interface based on the speed negotiated 1464 * by the PHY. 1465 */ 1466 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 1467 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) { 1468 DBPRINT(sc, BCE_INFO, "Setting GMII interface.\n"); 1469 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_GMII); 1470 } else { 1471 DBPRINT(sc, BCE_INFO, "Setting MII interface.\n"); 1472 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_MII); 1473 } 1474 1475 /* 1476 * Set half or full duplex based on the duplicity negotiated 1477 * by the PHY. 1478 */ 1479 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 1480 DBPRINT(sc, BCE_INFO, "Setting Full-Duplex interface.\n"); 1481 BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX); 1482 } else { 1483 DBPRINT(sc, BCE_INFO, "Setting Half-Duplex interface.\n"); 1484 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX); 1485 } 1486 } 1487 1488 1489 /****************************************************************************/ 1490 /* Acquire NVRAM lock. */ 1491 /* */ 1492 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */ 1493 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 1494 /* for use by the driver. */ 1495 /* */ 1496 /* Returns: */ 1497 /* 0 on success, positive value on failure. */ 1498 /****************************************************************************/ 1499 static int 1500 bce_acquire_nvram_lock(struct bce_softc *sc) 1501 { 1502 uint32_t val; 1503 int j; 1504 1505 DBPRINT(sc, BCE_VERBOSE, "Acquiring NVRAM lock.\n"); 1506 1507 /* Request access to the flash interface. */ 1508 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2); 1509 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1510 val = REG_RD(sc, BCE_NVM_SW_ARB); 1511 if (val & BCE_NVM_SW_ARB_ARB_ARB2) 1512 break; 1513 1514 DELAY(5); 1515 } 1516 1517 if (j >= NVRAM_TIMEOUT_COUNT) { 1518 DBPRINT(sc, BCE_WARN, "Timeout acquiring NVRAM lock!\n"); 1519 return EBUSY; 1520 } 1521 return 0; 1522 } 1523 1524 1525 /****************************************************************************/ 1526 /* Release NVRAM lock. */ 1527 /* */ 1528 /* When the caller is finished accessing NVRAM the lock must be released. */ 1529 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 1530 /* for use by the driver. */ 1531 /* */ 1532 /* Returns: */ 1533 /* 0 on success, positive value on failure. */ 1534 /****************************************************************************/ 1535 static int 1536 bce_release_nvram_lock(struct bce_softc *sc) 1537 { 1538 int j; 1539 uint32_t val; 1540 1541 DBPRINT(sc, BCE_VERBOSE, "Releasing NVRAM lock.\n"); 1542 1543 /* 1544 * Relinquish nvram interface. 1545 */ 1546 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2); 1547 1548 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1549 val = REG_RD(sc, BCE_NVM_SW_ARB); 1550 if (!(val & BCE_NVM_SW_ARB_ARB_ARB2)) 1551 break; 1552 1553 DELAY(5); 1554 } 1555 1556 if (j >= NVRAM_TIMEOUT_COUNT) { 1557 DBPRINT(sc, BCE_WARN, "Timeout reeasing NVRAM lock!\n"); 1558 return EBUSY; 1559 } 1560 return 0; 1561 } 1562 1563 1564 /****************************************************************************/ 1565 /* Enable NVRAM access. */ 1566 /* */ 1567 /* Before accessing NVRAM for read or write operations the caller must */ 1568 /* enabled NVRAM access. */ 1569 /* */ 1570 /* Returns: */ 1571 /* Nothing. */ 1572 /****************************************************************************/ 1573 static void 1574 bce_enable_nvram_access(struct bce_softc *sc) 1575 { 1576 uint32_t val; 1577 1578 DBPRINT(sc, BCE_VERBOSE, "Enabling NVRAM access.\n"); 1579 1580 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE); 1581 /* Enable both bits, even on read. */ 1582 REG_WR(sc, BCE_NVM_ACCESS_ENABLE, 1583 val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN); 1584 } 1585 1586 1587 /****************************************************************************/ 1588 /* Disable NVRAM access. */ 1589 /* */ 1590 /* When the caller is finished accessing NVRAM access must be disabled. */ 1591 /* */ 1592 /* Returns: */ 1593 /* Nothing. */ 1594 /****************************************************************************/ 1595 static void 1596 bce_disable_nvram_access(struct bce_softc *sc) 1597 { 1598 uint32_t val; 1599 1600 DBPRINT(sc, BCE_VERBOSE, "Disabling NVRAM access.\n"); 1601 1602 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE); 1603 1604 /* Disable both bits, even after read. */ 1605 REG_WR(sc, BCE_NVM_ACCESS_ENABLE, 1606 val & ~(BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN)); 1607 } 1608 1609 1610 /****************************************************************************/ 1611 /* Read a dword (32 bits) from NVRAM. */ 1612 /* */ 1613 /* Read a 32 bit word from NVRAM. The caller is assumed to have already */ 1614 /* obtained the NVRAM lock and enabled the controller for NVRAM access. */ 1615 /* */ 1616 /* Returns: */ 1617 /* 0 on success and the 32 bit value read, positive value on failure. */ 1618 /****************************************************************************/ 1619 static int 1620 bce_nvram_read_dword(struct bce_softc *sc, uint32_t offset, uint8_t *ret_val, 1621 uint32_t cmd_flags) 1622 { 1623 uint32_t cmd; 1624 int i, rc = 0; 1625 1626 /* Build the command word. */ 1627 cmd = BCE_NVM_COMMAND_DOIT | cmd_flags; 1628 1629 /* Calculate the offset for buffered flash. */ 1630 if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) { 1631 offset = ((offset / sc->bce_flash_info->page_size) << 1632 sc->bce_flash_info->page_bits) + 1633 (offset % sc->bce_flash_info->page_size); 1634 } 1635 1636 /* 1637 * Clear the DONE bit separately, set the address to read, 1638 * and issue the read. 1639 */ 1640 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE); 1641 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE); 1642 REG_WR(sc, BCE_NVM_COMMAND, cmd); 1643 1644 /* Wait for completion. */ 1645 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) { 1646 uint32_t val; 1647 1648 DELAY(5); 1649 1650 val = REG_RD(sc, BCE_NVM_COMMAND); 1651 if (val & BCE_NVM_COMMAND_DONE) { 1652 val = REG_RD(sc, BCE_NVM_READ); 1653 1654 val = be32toh(val); 1655 memcpy(ret_val, &val, 4); 1656 break; 1657 } 1658 } 1659 1660 /* Check for errors. */ 1661 if (i >= NVRAM_TIMEOUT_COUNT) { 1662 if_printf(&sc->arpcom.ac_if, 1663 "Timeout error reading NVRAM at offset 0x%08X!\n", 1664 offset); 1665 rc = EBUSY; 1666 } 1667 return rc; 1668 } 1669 1670 1671 /****************************************************************************/ 1672 /* Initialize NVRAM access. */ 1673 /* */ 1674 /* Identify the NVRAM device in use and prepare the NVRAM interface to */ 1675 /* access that device. */ 1676 /* */ 1677 /* Returns: */ 1678 /* 0 on success, positive value on failure. */ 1679 /****************************************************************************/ 1680 static int 1681 bce_init_nvram(struct bce_softc *sc) 1682 { 1683 uint32_t val; 1684 int j, entry_count, rc = 0; 1685 const struct flash_spec *flash; 1686 1687 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__); 1688 1689 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 1690 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 1691 sc->bce_flash_info = &flash_5709; 1692 goto bce_init_nvram_get_flash_size; 1693 } 1694 1695 /* Determine the selected interface. */ 1696 val = REG_RD(sc, BCE_NVM_CFG1); 1697 1698 entry_count = sizeof(flash_table) / sizeof(struct flash_spec); 1699 1700 /* 1701 * Flash reconfiguration is required to support additional 1702 * NVRAM devices not directly supported in hardware. 1703 * Check if the flash interface was reconfigured 1704 * by the bootcode. 1705 */ 1706 1707 if (val & 0x40000000) { 1708 /* Flash interface reconfigured by bootcode. */ 1709 1710 DBPRINT(sc, BCE_INFO_LOAD, 1711 "%s(): Flash WAS reconfigured.\n", __func__); 1712 1713 for (j = 0, flash = flash_table; j < entry_count; 1714 j++, flash++) { 1715 if ((val & FLASH_BACKUP_STRAP_MASK) == 1716 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) { 1717 sc->bce_flash_info = flash; 1718 break; 1719 } 1720 } 1721 } else { 1722 /* Flash interface not yet reconfigured. */ 1723 uint32_t mask; 1724 1725 DBPRINT(sc, BCE_INFO_LOAD, 1726 "%s(): Flash was NOT reconfigured.\n", __func__); 1727 1728 if (val & (1 << 23)) 1729 mask = FLASH_BACKUP_STRAP_MASK; 1730 else 1731 mask = FLASH_STRAP_MASK; 1732 1733 /* Look for the matching NVRAM device configuration data. */ 1734 for (j = 0, flash = flash_table; j < entry_count; 1735 j++, flash++) { 1736 /* Check if the device matches any of the known devices. */ 1737 if ((val & mask) == (flash->strapping & mask)) { 1738 /* Found a device match. */ 1739 sc->bce_flash_info = flash; 1740 1741 /* Request access to the flash interface. */ 1742 rc = bce_acquire_nvram_lock(sc); 1743 if (rc != 0) 1744 return rc; 1745 1746 /* Reconfigure the flash interface. */ 1747 bce_enable_nvram_access(sc); 1748 REG_WR(sc, BCE_NVM_CFG1, flash->config1); 1749 REG_WR(sc, BCE_NVM_CFG2, flash->config2); 1750 REG_WR(sc, BCE_NVM_CFG3, flash->config3); 1751 REG_WR(sc, BCE_NVM_WRITE1, flash->write1); 1752 bce_disable_nvram_access(sc); 1753 bce_release_nvram_lock(sc); 1754 break; 1755 } 1756 } 1757 } 1758 1759 /* Check if a matching device was found. */ 1760 if (j == entry_count) { 1761 sc->bce_flash_info = NULL; 1762 if_printf(&sc->arpcom.ac_if, "Unknown Flash NVRAM found!\n"); 1763 return ENODEV; 1764 } 1765 1766 bce_init_nvram_get_flash_size: 1767 /* Write the flash config data to the shared memory interface. */ 1768 val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG2) & 1769 BCE_SHARED_HW_CFG2_NVM_SIZE_MASK; 1770 if (val) 1771 sc->bce_flash_size = val; 1772 else 1773 sc->bce_flash_size = sc->bce_flash_info->total_size; 1774 1775 DBPRINT(sc, BCE_INFO_LOAD, "%s() flash->total_size = 0x%08X\n", 1776 __func__, sc->bce_flash_info->total_size); 1777 1778 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__); 1779 1780 return rc; 1781 } 1782 1783 1784 /****************************************************************************/ 1785 /* Read an arbitrary range of data from NVRAM. */ 1786 /* */ 1787 /* Prepares the NVRAM interface for access and reads the requested data */ 1788 /* into the supplied buffer. */ 1789 /* */ 1790 /* Returns: */ 1791 /* 0 on success and the data read, positive value on failure. */ 1792 /****************************************************************************/ 1793 static int 1794 bce_nvram_read(struct bce_softc *sc, uint32_t offset, uint8_t *ret_buf, 1795 int buf_size) 1796 { 1797 uint32_t cmd_flags, offset32, len32, extra; 1798 int rc = 0; 1799 1800 if (buf_size == 0) 1801 return 0; 1802 1803 /* Request access to the flash interface. */ 1804 rc = bce_acquire_nvram_lock(sc); 1805 if (rc != 0) 1806 return rc; 1807 1808 /* Enable access to flash interface */ 1809 bce_enable_nvram_access(sc); 1810 1811 len32 = buf_size; 1812 offset32 = offset; 1813 extra = 0; 1814 1815 cmd_flags = 0; 1816 1817 /* XXX should we release nvram lock if read_dword() fails? */ 1818 if (offset32 & 3) { 1819 uint8_t buf[4]; 1820 uint32_t pre_len; 1821 1822 offset32 &= ~3; 1823 pre_len = 4 - (offset & 3); 1824 1825 if (pre_len >= len32) { 1826 pre_len = len32; 1827 cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST; 1828 } else { 1829 cmd_flags = BCE_NVM_COMMAND_FIRST; 1830 } 1831 1832 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); 1833 if (rc) 1834 return rc; 1835 1836 memcpy(ret_buf, buf + (offset & 3), pre_len); 1837 1838 offset32 += 4; 1839 ret_buf += pre_len; 1840 len32 -= pre_len; 1841 } 1842 1843 if (len32 & 3) { 1844 extra = 4 - (len32 & 3); 1845 len32 = (len32 + 4) & ~3; 1846 } 1847 1848 if (len32 == 4) { 1849 uint8_t buf[4]; 1850 1851 if (cmd_flags) 1852 cmd_flags = BCE_NVM_COMMAND_LAST; 1853 else 1854 cmd_flags = BCE_NVM_COMMAND_FIRST | 1855 BCE_NVM_COMMAND_LAST; 1856 1857 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); 1858 1859 memcpy(ret_buf, buf, 4 - extra); 1860 } else if (len32 > 0) { 1861 uint8_t buf[4]; 1862 1863 /* Read the first word. */ 1864 if (cmd_flags) 1865 cmd_flags = 0; 1866 else 1867 cmd_flags = BCE_NVM_COMMAND_FIRST; 1868 1869 rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags); 1870 1871 /* Advance to the next dword. */ 1872 offset32 += 4; 1873 ret_buf += 4; 1874 len32 -= 4; 1875 1876 while (len32 > 4 && rc == 0) { 1877 rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0); 1878 1879 /* Advance to the next dword. */ 1880 offset32 += 4; 1881 ret_buf += 4; 1882 len32 -= 4; 1883 } 1884 1885 if (rc) 1886 goto bce_nvram_read_locked_exit; 1887 1888 cmd_flags = BCE_NVM_COMMAND_LAST; 1889 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); 1890 1891 memcpy(ret_buf, buf, 4 - extra); 1892 } 1893 1894 bce_nvram_read_locked_exit: 1895 /* Disable access to flash interface and release the lock. */ 1896 bce_disable_nvram_access(sc); 1897 bce_release_nvram_lock(sc); 1898 1899 return rc; 1900 } 1901 1902 1903 /****************************************************************************/ 1904 /* Verifies that NVRAM is accessible and contains valid data. */ 1905 /* */ 1906 /* Reads the configuration data from NVRAM and verifies that the CRC is */ 1907 /* correct. */ 1908 /* */ 1909 /* Returns: */ 1910 /* 0 on success, positive value on failure. */ 1911 /****************************************************************************/ 1912 static int 1913 bce_nvram_test(struct bce_softc *sc) 1914 { 1915 uint32_t buf[BCE_NVRAM_SIZE / 4]; 1916 uint32_t magic, csum; 1917 uint8_t *data = (uint8_t *)buf; 1918 int rc = 0; 1919 1920 /* 1921 * Check that the device NVRAM is valid by reading 1922 * the magic value at offset 0. 1923 */ 1924 rc = bce_nvram_read(sc, 0, data, 4); 1925 if (rc != 0) 1926 return rc; 1927 1928 magic = be32toh(buf[0]); 1929 if (magic != BCE_NVRAM_MAGIC) { 1930 if_printf(&sc->arpcom.ac_if, 1931 "Invalid NVRAM magic value! Expected: 0x%08X, " 1932 "Found: 0x%08X\n", BCE_NVRAM_MAGIC, magic); 1933 return ENODEV; 1934 } 1935 1936 /* 1937 * Verify that the device NVRAM includes valid 1938 * configuration data. 1939 */ 1940 rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE); 1941 if (rc != 0) 1942 return rc; 1943 1944 csum = ether_crc32_le(data, 0x100); 1945 if (csum != BCE_CRC32_RESIDUAL) { 1946 if_printf(&sc->arpcom.ac_if, 1947 "Invalid Manufacturing Information NVRAM CRC! " 1948 "Expected: 0x%08X, Found: 0x%08X\n", 1949 BCE_CRC32_RESIDUAL, csum); 1950 return ENODEV; 1951 } 1952 1953 csum = ether_crc32_le(data + 0x100, 0x100); 1954 if (csum != BCE_CRC32_RESIDUAL) { 1955 if_printf(&sc->arpcom.ac_if, 1956 "Invalid Feature Configuration Information " 1957 "NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n", 1958 BCE_CRC32_RESIDUAL, csum); 1959 rc = ENODEV; 1960 } 1961 return rc; 1962 } 1963 1964 1965 /****************************************************************************/ 1966 /* Identifies the current media type of the controller and sets the PHY */ 1967 /* address. */ 1968 /* */ 1969 /* Returns: */ 1970 /* Nothing. */ 1971 /****************************************************************************/ 1972 static void 1973 bce_get_media(struct bce_softc *sc) 1974 { 1975 uint32_t val; 1976 1977 sc->bce_phy_addr = 1; 1978 1979 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 1980 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 1981 uint32_t val = REG_RD(sc, BCE_MISC_DUAL_MEDIA_CTRL); 1982 uint32_t bond_id = val & BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID; 1983 uint32_t strap; 1984 1985 /* 1986 * The BCM5709S is software configurable 1987 * for Copper or SerDes operation. 1988 */ 1989 if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) { 1990 return; 1991 } else if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) { 1992 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 1993 return; 1994 } 1995 1996 if (val & BCE_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE) { 1997 strap = (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21; 1998 } else { 1999 strap = 2000 (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8; 2001 } 2002 2003 if (pci_get_function(sc->bce_dev) == 0) { 2004 switch (strap) { 2005 case 0x4: 2006 case 0x5: 2007 case 0x6: 2008 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 2009 break; 2010 } 2011 } else { 2012 switch (strap) { 2013 case 0x1: 2014 case 0x2: 2015 case 0x4: 2016 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 2017 break; 2018 } 2019 } 2020 } else if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) { 2021 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 2022 } 2023 2024 if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) { 2025 sc->bce_flags |= BCE_NO_WOL_FLAG; 2026 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) { 2027 sc->bce_phy_addr = 2; 2028 val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG); 2029 if (val & BCE_SHARED_HW_CFG_PHY_2_5G) 2030 sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG; 2031 } 2032 } else if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) || 2033 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)) { 2034 sc->bce_phy_flags |= BCE_PHY_CRC_FIX_FLAG; 2035 } 2036 } 2037 2038 2039 /****************************************************************************/ 2040 /* Free any DMA memory owned by the driver. */ 2041 /* */ 2042 /* Scans through each data structre that requires DMA memory and frees */ 2043 /* the memory if allocated. */ 2044 /* */ 2045 /* Returns: */ 2046 /* Nothing. */ 2047 /****************************************************************************/ 2048 static void 2049 bce_dma_free(struct bce_softc *sc) 2050 { 2051 int i; 2052 2053 /* Destroy the status block. */ 2054 if (sc->status_tag != NULL) { 2055 if (sc->status_block != NULL) { 2056 bus_dmamap_unload(sc->status_tag, sc->status_map); 2057 bus_dmamem_free(sc->status_tag, sc->status_block, 2058 sc->status_map); 2059 } 2060 bus_dma_tag_destroy(sc->status_tag); 2061 } 2062 2063 /* Destroy the statistics block. */ 2064 if (sc->stats_tag != NULL) { 2065 if (sc->stats_block != NULL) { 2066 bus_dmamap_unload(sc->stats_tag, sc->stats_map); 2067 bus_dmamem_free(sc->stats_tag, sc->stats_block, 2068 sc->stats_map); 2069 } 2070 bus_dma_tag_destroy(sc->stats_tag); 2071 } 2072 2073 /* Destroy the CTX DMA stuffs. */ 2074 if (sc->ctx_tag != NULL) { 2075 for (i = 0; i < sc->ctx_pages; i++) { 2076 if (sc->ctx_block[i] != NULL) { 2077 bus_dmamap_unload(sc->ctx_tag, sc->ctx_map[i]); 2078 bus_dmamem_free(sc->ctx_tag, sc->ctx_block[i], 2079 sc->ctx_map[i]); 2080 } 2081 } 2082 bus_dma_tag_destroy(sc->ctx_tag); 2083 } 2084 2085 /* Destroy the TX buffer descriptor DMA stuffs. */ 2086 if (sc->tx_bd_chain_tag != NULL) { 2087 for (i = 0; i < sc->tx_pages; i++) { 2088 if (sc->tx_bd_chain[i] != NULL) { 2089 bus_dmamap_unload(sc->tx_bd_chain_tag, 2090 sc->tx_bd_chain_map[i]); 2091 bus_dmamem_free(sc->tx_bd_chain_tag, 2092 sc->tx_bd_chain[i], 2093 sc->tx_bd_chain_map[i]); 2094 } 2095 } 2096 bus_dma_tag_destroy(sc->tx_bd_chain_tag); 2097 } 2098 2099 /* Destroy the RX buffer descriptor DMA stuffs. */ 2100 if (sc->rx_bd_chain_tag != NULL) { 2101 for (i = 0; i < sc->rx_pages; i++) { 2102 if (sc->rx_bd_chain[i] != NULL) { 2103 bus_dmamap_unload(sc->rx_bd_chain_tag, 2104 sc->rx_bd_chain_map[i]); 2105 bus_dmamem_free(sc->rx_bd_chain_tag, 2106 sc->rx_bd_chain[i], 2107 sc->rx_bd_chain_map[i]); 2108 } 2109 } 2110 bus_dma_tag_destroy(sc->rx_bd_chain_tag); 2111 } 2112 2113 /* Destroy the TX mbuf DMA stuffs. */ 2114 if (sc->tx_mbuf_tag != NULL) { 2115 for (i = 0; i < TOTAL_TX_BD(sc); i++) { 2116 /* Must have been unloaded in bce_stop() */ 2117 KKASSERT(sc->tx_mbuf_ptr[i] == NULL); 2118 bus_dmamap_destroy(sc->tx_mbuf_tag, 2119 sc->tx_mbuf_map[i]); 2120 } 2121 bus_dma_tag_destroy(sc->tx_mbuf_tag); 2122 } 2123 2124 /* Destroy the RX mbuf DMA stuffs. */ 2125 if (sc->rx_mbuf_tag != NULL) { 2126 for (i = 0; i < TOTAL_RX_BD(sc); i++) { 2127 /* Must have been unloaded in bce_stop() */ 2128 KKASSERT(sc->rx_mbuf_ptr[i] == NULL); 2129 bus_dmamap_destroy(sc->rx_mbuf_tag, 2130 sc->rx_mbuf_map[i]); 2131 } 2132 bus_dmamap_destroy(sc->rx_mbuf_tag, sc->rx_mbuf_tmpmap); 2133 bus_dma_tag_destroy(sc->rx_mbuf_tag); 2134 } 2135 2136 /* Destroy the parent tag */ 2137 if (sc->parent_tag != NULL) 2138 bus_dma_tag_destroy(sc->parent_tag); 2139 2140 if (sc->tx_bd_chain_map != NULL) 2141 kfree(sc->tx_bd_chain_map, M_DEVBUF); 2142 if (sc->tx_bd_chain != NULL) 2143 kfree(sc->tx_bd_chain, M_DEVBUF); 2144 if (sc->tx_bd_chain_paddr != NULL) 2145 kfree(sc->tx_bd_chain_paddr, M_DEVBUF); 2146 2147 if (sc->rx_bd_chain_map != NULL) 2148 kfree(sc->rx_bd_chain_map, M_DEVBUF); 2149 if (sc->rx_bd_chain != NULL) 2150 kfree(sc->rx_bd_chain, M_DEVBUF); 2151 if (sc->rx_bd_chain_paddr != NULL) 2152 kfree(sc->rx_bd_chain_paddr, M_DEVBUF); 2153 2154 if (sc->tx_mbuf_map != NULL) 2155 kfree(sc->tx_mbuf_map, M_DEVBUF); 2156 if (sc->tx_mbuf_ptr != NULL) 2157 kfree(sc->tx_mbuf_ptr, M_DEVBUF); 2158 2159 if (sc->rx_mbuf_map != NULL) 2160 kfree(sc->rx_mbuf_map, M_DEVBUF); 2161 if (sc->rx_mbuf_ptr != NULL) 2162 kfree(sc->rx_mbuf_ptr, M_DEVBUF); 2163 if (sc->rx_mbuf_paddr != NULL) 2164 kfree(sc->rx_mbuf_paddr, M_DEVBUF); 2165 } 2166 2167 2168 /****************************************************************************/ 2169 /* Get DMA memory from the OS. */ 2170 /* */ 2171 /* Validates that the OS has provided DMA buffers in response to a */ 2172 /* bus_dmamap_load() call and saves the physical address of those buffers. */ 2173 /* When the callback is used the OS will return 0 for the mapping function */ 2174 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any */ 2175 /* failures back to the caller. */ 2176 /* */ 2177 /* Returns: */ 2178 /* Nothing. */ 2179 /****************************************************************************/ 2180 static void 2181 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 2182 { 2183 bus_addr_t *busaddr = arg; 2184 2185 /* 2186 * Simulate a mapping failure. 2187 * XXX not correct. 2188 */ 2189 DBRUNIF(DB_RANDOMTRUE(bce_debug_dma_map_addr_failure), 2190 kprintf("bce: %s(%d): Simulating DMA mapping error.\n", 2191 __FILE__, __LINE__); 2192 error = ENOMEM); 2193 2194 /* Check for an error and signal the caller that an error occurred. */ 2195 if (error) 2196 return; 2197 2198 KASSERT(nseg == 1, ("only one segment is allowed")); 2199 *busaddr = segs->ds_addr; 2200 } 2201 2202 2203 /****************************************************************************/ 2204 /* Allocate any DMA memory needed by the driver. */ 2205 /* */ 2206 /* Allocates DMA memory needed for the various global structures needed by */ 2207 /* hardware. */ 2208 /* */ 2209 /* Memory alignment requirements: */ 2210 /* -----------------+----------+----------+----------+----------+ */ 2211 /* Data Structure | 5706 | 5708 | 5709 | 5716 | */ 2212 /* -----------------+----------+----------+----------+----------+ */ 2213 /* Status Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */ 2214 /* Statistics Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */ 2215 /* RX Buffers | 16 bytes | 16 bytes | 16 bytes | 16 bytes | */ 2216 /* PG Buffers | none | none | none | none | */ 2217 /* TX Buffers | none | none | none | none | */ 2218 /* Chain Pages(1) | 4KiB | 4KiB | 4KiB | 4KiB | */ 2219 /* Context Pages(1) | N/A | N/A | 4KiB | 4KiB | */ 2220 /* -----------------+----------+----------+----------+----------+ */ 2221 /* */ 2222 /* (1) Must align with CPU page size (BCM_PAGE_SZIE). */ 2223 /* */ 2224 /* Returns: */ 2225 /* 0 for success, positive value for failure. */ 2226 /****************************************************************************/ 2227 static int 2228 bce_dma_alloc(struct bce_softc *sc) 2229 { 2230 struct ifnet *ifp = &sc->arpcom.ac_if; 2231 int i, j, rc = 0, pages; 2232 bus_addr_t busaddr, max_busaddr; 2233 bus_size_t status_align, stats_align; 2234 2235 pages = device_getenv_int(sc->bce_dev, "rx_pages", bce_rx_pages); 2236 if (pages <= 0 || pages > RX_PAGES_MAX || !powerof2(pages)) { 2237 device_printf(sc->bce_dev, "invalid # of RX pages\n"); 2238 pages = RX_PAGES_DEFAULT; 2239 } 2240 sc->rx_pages = pages; 2241 2242 pages = device_getenv_int(sc->bce_dev, "tx_pages", bce_tx_pages); 2243 if (pages <= 0 || pages > TX_PAGES_MAX || !powerof2(pages)) { 2244 device_printf(sc->bce_dev, "invalid # of TX pages\n"); 2245 pages = TX_PAGES_DEFAULT; 2246 } 2247 sc->tx_pages = pages; 2248 2249 sc->tx_bd_chain_map = kmalloc(sizeof(bus_dmamap_t) * sc->tx_pages, 2250 M_DEVBUF, M_WAITOK | M_ZERO); 2251 sc->tx_bd_chain = kmalloc(sizeof(struct tx_bd *) * sc->tx_pages, 2252 M_DEVBUF, M_WAITOK | M_ZERO); 2253 sc->tx_bd_chain_paddr = kmalloc(sizeof(bus_addr_t) * sc->tx_pages, 2254 M_DEVBUF, M_WAITOK | M_ZERO); 2255 2256 sc->rx_bd_chain_map = kmalloc(sizeof(bus_dmamap_t) * sc->rx_pages, 2257 M_DEVBUF, M_WAITOK | M_ZERO); 2258 sc->rx_bd_chain = kmalloc(sizeof(struct rx_bd *) * sc->rx_pages, 2259 M_DEVBUF, M_WAITOK | M_ZERO); 2260 sc->rx_bd_chain_paddr = kmalloc(sizeof(bus_addr_t) * sc->rx_pages, 2261 M_DEVBUF, M_WAITOK | M_ZERO); 2262 2263 sc->tx_mbuf_map = kmalloc(sizeof(bus_dmamap_t) * TOTAL_TX_BD(sc), 2264 M_DEVBUF, M_WAITOK | M_ZERO); 2265 sc->tx_mbuf_ptr = kmalloc(sizeof(struct mbuf *) * TOTAL_TX_BD(sc), 2266 M_DEVBUF, M_WAITOK | M_ZERO); 2267 2268 sc->rx_mbuf_map = kmalloc(sizeof(bus_dmamap_t) * TOTAL_RX_BD(sc), 2269 M_DEVBUF, M_WAITOK | M_ZERO); 2270 sc->rx_mbuf_ptr = kmalloc(sizeof(struct mbuf *) * TOTAL_RX_BD(sc), 2271 M_DEVBUF, M_WAITOK | M_ZERO); 2272 sc->rx_mbuf_paddr = kmalloc(sizeof(bus_addr_t) * TOTAL_RX_BD(sc), 2273 M_DEVBUF, M_WAITOK | M_ZERO); 2274 2275 /* 2276 * The embedded PCIe to PCI-X bridge (EPB) 2277 * in the 5708 cannot address memory above 2278 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043). 2279 */ 2280 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708) 2281 max_busaddr = BCE_BUS_SPACE_MAXADDR; 2282 else 2283 max_busaddr = BUS_SPACE_MAXADDR; 2284 2285 /* 2286 * BCM5709 and BCM5716 uses host memory as cache for context memory. 2287 */ 2288 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 2289 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 2290 sc->ctx_pages = BCE_CTX_BLK_SZ / BCM_PAGE_SIZE; 2291 if (sc->ctx_pages == 0) 2292 sc->ctx_pages = 1; 2293 if (sc->ctx_pages > BCE_CTX_PAGES) { 2294 device_printf(sc->bce_dev, "excessive ctx pages %d\n", 2295 sc->ctx_pages); 2296 return ENOMEM; 2297 } 2298 status_align = 16; 2299 stats_align = 16; 2300 } else { 2301 status_align = 8; 2302 stats_align = 8; 2303 } 2304 2305 /* 2306 * Allocate the parent bus DMA tag appropriate for PCI. 2307 */ 2308 rc = bus_dma_tag_create(NULL, 1, BCE_DMA_BOUNDARY, 2309 max_busaddr, BUS_SPACE_MAXADDR, 2310 NULL, NULL, 2311 BUS_SPACE_MAXSIZE_32BIT, 0, 2312 BUS_SPACE_MAXSIZE_32BIT, 2313 0, &sc->parent_tag); 2314 if (rc != 0) { 2315 if_printf(ifp, "Could not allocate parent DMA tag!\n"); 2316 return rc; 2317 } 2318 2319 /* 2320 * Allocate status block. 2321 */ 2322 sc->status_block = bus_dmamem_coherent_any(sc->parent_tag, 2323 status_align, BCE_STATUS_BLK_SZ, 2324 BUS_DMA_WAITOK | BUS_DMA_ZERO, 2325 &sc->status_tag, &sc->status_map, 2326 &sc->status_block_paddr); 2327 if (sc->status_block == NULL) { 2328 if_printf(ifp, "Could not allocate status block!\n"); 2329 return ENOMEM; 2330 } 2331 2332 /* 2333 * Allocate statistics block. 2334 */ 2335 sc->stats_block = bus_dmamem_coherent_any(sc->parent_tag, 2336 stats_align, BCE_STATS_BLK_SZ, 2337 BUS_DMA_WAITOK | BUS_DMA_ZERO, 2338 &sc->stats_tag, &sc->stats_map, 2339 &sc->stats_block_paddr); 2340 if (sc->stats_block == NULL) { 2341 if_printf(ifp, "Could not allocate statistics block!\n"); 2342 return ENOMEM; 2343 } 2344 2345 /* 2346 * Allocate context block, if needed 2347 */ 2348 if (sc->ctx_pages != 0) { 2349 rc = bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 0, 2350 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 2351 NULL, NULL, 2352 BCM_PAGE_SIZE, 1, BCM_PAGE_SIZE, 2353 0, &sc->ctx_tag); 2354 if (rc != 0) { 2355 if_printf(ifp, "Could not allocate " 2356 "context block DMA tag!\n"); 2357 return rc; 2358 } 2359 2360 for (i = 0; i < sc->ctx_pages; i++) { 2361 rc = bus_dmamem_alloc(sc->ctx_tag, 2362 (void **)&sc->ctx_block[i], 2363 BUS_DMA_WAITOK | BUS_DMA_ZERO | 2364 BUS_DMA_COHERENT, 2365 &sc->ctx_map[i]); 2366 if (rc != 0) { 2367 if_printf(ifp, "Could not allocate %dth context " 2368 "DMA memory!\n", i); 2369 return rc; 2370 } 2371 2372 rc = bus_dmamap_load(sc->ctx_tag, sc->ctx_map[i], 2373 sc->ctx_block[i], BCM_PAGE_SIZE, 2374 bce_dma_map_addr, &busaddr, 2375 BUS_DMA_WAITOK); 2376 if (rc != 0) { 2377 if (rc == EINPROGRESS) { 2378 panic("%s coherent memory loading " 2379 "is still in progress!", ifp->if_xname); 2380 } 2381 if_printf(ifp, "Could not map %dth context " 2382 "DMA memory!\n", i); 2383 bus_dmamem_free(sc->ctx_tag, sc->ctx_block[i], 2384 sc->ctx_map[i]); 2385 sc->ctx_block[i] = NULL; 2386 return rc; 2387 } 2388 sc->ctx_paddr[i] = busaddr; 2389 } 2390 } 2391 2392 /* 2393 * Create a DMA tag for the TX buffer descriptor chain, 2394 * allocate and clear the memory, and fetch the 2395 * physical address of the block. 2396 */ 2397 rc = bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 0, 2398 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 2399 NULL, NULL, 2400 BCE_TX_CHAIN_PAGE_SZ, 1, BCE_TX_CHAIN_PAGE_SZ, 2401 0, &sc->tx_bd_chain_tag); 2402 if (rc != 0) { 2403 if_printf(ifp, "Could not allocate " 2404 "TX descriptor chain DMA tag!\n"); 2405 return rc; 2406 } 2407 2408 for (i = 0; i < sc->tx_pages; i++) { 2409 rc = bus_dmamem_alloc(sc->tx_bd_chain_tag, 2410 (void **)&sc->tx_bd_chain[i], 2411 BUS_DMA_WAITOK | BUS_DMA_ZERO | 2412 BUS_DMA_COHERENT, 2413 &sc->tx_bd_chain_map[i]); 2414 if (rc != 0) { 2415 if_printf(ifp, "Could not allocate %dth TX descriptor " 2416 "chain DMA memory!\n", i); 2417 return rc; 2418 } 2419 2420 rc = bus_dmamap_load(sc->tx_bd_chain_tag, 2421 sc->tx_bd_chain_map[i], 2422 sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ, 2423 bce_dma_map_addr, &busaddr, 2424 BUS_DMA_WAITOK); 2425 if (rc != 0) { 2426 if (rc == EINPROGRESS) { 2427 panic("%s coherent memory loading " 2428 "is still in progress!", ifp->if_xname); 2429 } 2430 if_printf(ifp, "Could not map %dth TX descriptor " 2431 "chain DMA memory!\n", i); 2432 bus_dmamem_free(sc->tx_bd_chain_tag, 2433 sc->tx_bd_chain[i], 2434 sc->tx_bd_chain_map[i]); 2435 sc->tx_bd_chain[i] = NULL; 2436 return rc; 2437 } 2438 2439 sc->tx_bd_chain_paddr[i] = busaddr; 2440 /* DRC - Fix for 64 bit systems. */ 2441 DBPRINT(sc, BCE_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n", 2442 i, (uint32_t)sc->tx_bd_chain_paddr[i]); 2443 } 2444 2445 /* Create a DMA tag for TX mbufs. */ 2446 rc = bus_dma_tag_create(sc->parent_tag, 1, 0, 2447 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 2448 NULL, NULL, 2449 IP_MAXPACKET + sizeof(struct ether_vlan_header), 2450 BCE_MAX_SEGMENTS, PAGE_SIZE, 2451 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | 2452 BUS_DMA_ONEBPAGE, 2453 &sc->tx_mbuf_tag); 2454 if (rc != 0) { 2455 if_printf(ifp, "Could not allocate TX mbuf DMA tag!\n"); 2456 return rc; 2457 } 2458 2459 /* Create DMA maps for the TX mbufs clusters. */ 2460 for (i = 0; i < TOTAL_TX_BD(sc); i++) { 2461 rc = bus_dmamap_create(sc->tx_mbuf_tag, 2462 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 2463 &sc->tx_mbuf_map[i]); 2464 if (rc != 0) { 2465 for (j = 0; j < i; ++j) { 2466 bus_dmamap_destroy(sc->tx_mbuf_tag, 2467 sc->tx_mbuf_map[i]); 2468 } 2469 bus_dma_tag_destroy(sc->tx_mbuf_tag); 2470 sc->tx_mbuf_tag = NULL; 2471 2472 if_printf(ifp, "Unable to create " 2473 "%dth TX mbuf DMA map!\n", i); 2474 return rc; 2475 } 2476 } 2477 2478 /* 2479 * Create a DMA tag for the RX buffer descriptor chain, 2480 * allocate and clear the memory, and fetch the physical 2481 * address of the blocks. 2482 */ 2483 rc = bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 0, 2484 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 2485 NULL, NULL, 2486 BCE_RX_CHAIN_PAGE_SZ, 1, BCE_RX_CHAIN_PAGE_SZ, 2487 0, &sc->rx_bd_chain_tag); 2488 if (rc != 0) { 2489 if_printf(ifp, "Could not allocate " 2490 "RX descriptor chain DMA tag!\n"); 2491 return rc; 2492 } 2493 2494 for (i = 0; i < sc->rx_pages; i++) { 2495 rc = bus_dmamem_alloc(sc->rx_bd_chain_tag, 2496 (void **)&sc->rx_bd_chain[i], 2497 BUS_DMA_WAITOK | BUS_DMA_ZERO | 2498 BUS_DMA_COHERENT, 2499 &sc->rx_bd_chain_map[i]); 2500 if (rc != 0) { 2501 if_printf(ifp, "Could not allocate %dth RX descriptor " 2502 "chain DMA memory!\n", i); 2503 return rc; 2504 } 2505 2506 rc = bus_dmamap_load(sc->rx_bd_chain_tag, 2507 sc->rx_bd_chain_map[i], 2508 sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ, 2509 bce_dma_map_addr, &busaddr, 2510 BUS_DMA_WAITOK); 2511 if (rc != 0) { 2512 if (rc == EINPROGRESS) { 2513 panic("%s coherent memory loading " 2514 "is still in progress!", ifp->if_xname); 2515 } 2516 if_printf(ifp, "Could not map %dth RX descriptor " 2517 "chain DMA memory!\n", i); 2518 bus_dmamem_free(sc->rx_bd_chain_tag, 2519 sc->rx_bd_chain[i], 2520 sc->rx_bd_chain_map[i]); 2521 sc->rx_bd_chain[i] = NULL; 2522 return rc; 2523 } 2524 2525 sc->rx_bd_chain_paddr[i] = busaddr; 2526 /* DRC - Fix for 64 bit systems. */ 2527 DBPRINT(sc, BCE_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n", 2528 i, (uint32_t)sc->rx_bd_chain_paddr[i]); 2529 } 2530 2531 /* Create a DMA tag for RX mbufs. */ 2532 rc = bus_dma_tag_create(sc->parent_tag, BCE_DMA_RX_ALIGN, 0, 2533 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 2534 NULL, NULL, 2535 MCLBYTES, 1, MCLBYTES, 2536 BUS_DMA_ALLOCNOW | BUS_DMA_ALIGNED | 2537 BUS_DMA_WAITOK, 2538 &sc->rx_mbuf_tag); 2539 if (rc != 0) { 2540 if_printf(ifp, "Could not allocate RX mbuf DMA tag!\n"); 2541 return rc; 2542 } 2543 2544 /* Create tmp DMA map for RX mbuf clusters. */ 2545 rc = bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_WAITOK, 2546 &sc->rx_mbuf_tmpmap); 2547 if (rc != 0) { 2548 bus_dma_tag_destroy(sc->rx_mbuf_tag); 2549 sc->rx_mbuf_tag = NULL; 2550 2551 if_printf(ifp, "Could not create RX mbuf tmp DMA map!\n"); 2552 return rc; 2553 } 2554 2555 /* Create DMA maps for the RX mbuf clusters. */ 2556 for (i = 0; i < TOTAL_RX_BD(sc); i++) { 2557 rc = bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_WAITOK, 2558 &sc->rx_mbuf_map[i]); 2559 if (rc != 0) { 2560 for (j = 0; j < i; ++j) { 2561 bus_dmamap_destroy(sc->rx_mbuf_tag, 2562 sc->rx_mbuf_map[j]); 2563 } 2564 bus_dma_tag_destroy(sc->rx_mbuf_tag); 2565 sc->rx_mbuf_tag = NULL; 2566 2567 if_printf(ifp, "Unable to create " 2568 "%dth RX mbuf DMA map!\n", i); 2569 return rc; 2570 } 2571 } 2572 return 0; 2573 } 2574 2575 2576 /****************************************************************************/ 2577 /* Firmware synchronization. */ 2578 /* */ 2579 /* Before performing certain events such as a chip reset, synchronize with */ 2580 /* the firmware first. */ 2581 /* */ 2582 /* Returns: */ 2583 /* 0 for success, positive value for failure. */ 2584 /****************************************************************************/ 2585 static int 2586 bce_fw_sync(struct bce_softc *sc, uint32_t msg_data) 2587 { 2588 int i, rc = 0; 2589 uint32_t val; 2590 2591 /* Don't waste any time if we've timed out before. */ 2592 if (sc->bce_fw_timed_out) 2593 return EBUSY; 2594 2595 /* Increment the message sequence number. */ 2596 sc->bce_fw_wr_seq++; 2597 msg_data |= sc->bce_fw_wr_seq; 2598 2599 DBPRINT(sc, BCE_VERBOSE, "bce_fw_sync(): msg_data = 0x%08X\n", msg_data); 2600 2601 /* Send the message to the bootcode driver mailbox. */ 2602 bce_shmem_wr(sc, BCE_DRV_MB, msg_data); 2603 2604 /* Wait for the bootcode to acknowledge the message. */ 2605 for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) { 2606 /* Check for a response in the bootcode firmware mailbox. */ 2607 val = bce_shmem_rd(sc, BCE_FW_MB); 2608 if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ)) 2609 break; 2610 DELAY(1000); 2611 } 2612 2613 /* If we've timed out, tell the bootcode that we've stopped waiting. */ 2614 if ((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ) && 2615 (msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0) { 2616 if_printf(&sc->arpcom.ac_if, 2617 "Firmware synchronization timeout! " 2618 "msg_data = 0x%08X\n", msg_data); 2619 2620 msg_data &= ~BCE_DRV_MSG_CODE; 2621 msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT; 2622 2623 bce_shmem_wr(sc, BCE_DRV_MB, msg_data); 2624 2625 sc->bce_fw_timed_out = 1; 2626 rc = EBUSY; 2627 } 2628 return rc; 2629 } 2630 2631 2632 /****************************************************************************/ 2633 /* Load Receive Virtual 2 Physical (RV2P) processor firmware. */ 2634 /* */ 2635 /* Returns: */ 2636 /* Nothing. */ 2637 /****************************************************************************/ 2638 static void 2639 bce_load_rv2p_fw(struct bce_softc *sc, uint32_t *rv2p_code, 2640 uint32_t rv2p_code_len, uint32_t rv2p_proc) 2641 { 2642 int i; 2643 uint32_t val; 2644 2645 for (i = 0; i < rv2p_code_len; i += 8) { 2646 REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code); 2647 rv2p_code++; 2648 REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code); 2649 rv2p_code++; 2650 2651 if (rv2p_proc == RV2P_PROC1) { 2652 val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR; 2653 REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val); 2654 } else { 2655 val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR; 2656 REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val); 2657 } 2658 } 2659 2660 /* Reset the processor, un-stall is done later. */ 2661 if (rv2p_proc == RV2P_PROC1) 2662 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET); 2663 else 2664 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET); 2665 } 2666 2667 2668 /****************************************************************************/ 2669 /* Load RISC processor firmware. */ 2670 /* */ 2671 /* Loads firmware from the file if_bcefw.h into the scratchpad memory */ 2672 /* associated with a particular processor. */ 2673 /* */ 2674 /* Returns: */ 2675 /* Nothing. */ 2676 /****************************************************************************/ 2677 static void 2678 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg, 2679 struct fw_info *fw) 2680 { 2681 uint32_t offset; 2682 int j; 2683 2684 bce_halt_cpu(sc, cpu_reg); 2685 2686 /* Load the Text area. */ 2687 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base); 2688 if (fw->text) { 2689 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) 2690 REG_WR_IND(sc, offset, fw->text[j]); 2691 } 2692 2693 /* Load the Data area. */ 2694 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base); 2695 if (fw->data) { 2696 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) 2697 REG_WR_IND(sc, offset, fw->data[j]); 2698 } 2699 2700 /* Load the SBSS area. */ 2701 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base); 2702 if (fw->sbss) { 2703 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) 2704 REG_WR_IND(sc, offset, fw->sbss[j]); 2705 } 2706 2707 /* Load the BSS area. */ 2708 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base); 2709 if (fw->bss) { 2710 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) 2711 REG_WR_IND(sc, offset, fw->bss[j]); 2712 } 2713 2714 /* Load the Read-Only area. */ 2715 offset = cpu_reg->spad_base + 2716 (fw->rodata_addr - cpu_reg->mips_view_base); 2717 if (fw->rodata) { 2718 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) 2719 REG_WR_IND(sc, offset, fw->rodata[j]); 2720 } 2721 2722 /* Clear the pre-fetch instruction and set the FW start address. */ 2723 REG_WR_IND(sc, cpu_reg->inst, 0); 2724 REG_WR_IND(sc, cpu_reg->pc, fw->start_addr); 2725 } 2726 2727 2728 /****************************************************************************/ 2729 /* Starts the RISC processor. */ 2730 /* */ 2731 /* Assumes the CPU starting address has already been set. */ 2732 /* */ 2733 /* Returns: */ 2734 /* Nothing. */ 2735 /****************************************************************************/ 2736 static void 2737 bce_start_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg) 2738 { 2739 uint32_t val; 2740 2741 /* Start the CPU. */ 2742 val = REG_RD_IND(sc, cpu_reg->mode); 2743 val &= ~cpu_reg->mode_value_halt; 2744 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 2745 REG_WR_IND(sc, cpu_reg->mode, val); 2746 } 2747 2748 2749 /****************************************************************************/ 2750 /* Halts the RISC processor. */ 2751 /* */ 2752 /* Returns: */ 2753 /* Nothing. */ 2754 /****************************************************************************/ 2755 static void 2756 bce_halt_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg) 2757 { 2758 uint32_t val; 2759 2760 /* Halt the CPU. */ 2761 val = REG_RD_IND(sc, cpu_reg->mode); 2762 val |= cpu_reg->mode_value_halt; 2763 REG_WR_IND(sc, cpu_reg->mode, val); 2764 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 2765 } 2766 2767 2768 /****************************************************************************/ 2769 /* Start the RX CPU. */ 2770 /* */ 2771 /* Returns: */ 2772 /* Nothing. */ 2773 /****************************************************************************/ 2774 static void 2775 bce_start_rxp_cpu(struct bce_softc *sc) 2776 { 2777 struct cpu_reg cpu_reg; 2778 2779 cpu_reg.mode = BCE_RXP_CPU_MODE; 2780 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT; 2781 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA; 2782 cpu_reg.state = BCE_RXP_CPU_STATE; 2783 cpu_reg.state_value_clear = 0xffffff; 2784 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE; 2785 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK; 2786 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER; 2787 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION; 2788 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT; 2789 cpu_reg.spad_base = BCE_RXP_SCRATCH; 2790 cpu_reg.mips_view_base = 0x8000000; 2791 2792 bce_start_cpu(sc, &cpu_reg); 2793 } 2794 2795 2796 /****************************************************************************/ 2797 /* Initialize the RX CPU. */ 2798 /* */ 2799 /* Returns: */ 2800 /* Nothing. */ 2801 /****************************************************************************/ 2802 static void 2803 bce_init_rxp_cpu(struct bce_softc *sc) 2804 { 2805 struct cpu_reg cpu_reg; 2806 struct fw_info fw; 2807 2808 cpu_reg.mode = BCE_RXP_CPU_MODE; 2809 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT; 2810 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA; 2811 cpu_reg.state = BCE_RXP_CPU_STATE; 2812 cpu_reg.state_value_clear = 0xffffff; 2813 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE; 2814 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK; 2815 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER; 2816 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION; 2817 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT; 2818 cpu_reg.spad_base = BCE_RXP_SCRATCH; 2819 cpu_reg.mips_view_base = 0x8000000; 2820 2821 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 2822 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 2823 fw.ver_major = bce_RXP_b09FwReleaseMajor; 2824 fw.ver_minor = bce_RXP_b09FwReleaseMinor; 2825 fw.ver_fix = bce_RXP_b09FwReleaseFix; 2826 fw.start_addr = bce_RXP_b09FwStartAddr; 2827 2828 fw.text_addr = bce_RXP_b09FwTextAddr; 2829 fw.text_len = bce_RXP_b09FwTextLen; 2830 fw.text_index = 0; 2831 fw.text = bce_RXP_b09FwText; 2832 2833 fw.data_addr = bce_RXP_b09FwDataAddr; 2834 fw.data_len = bce_RXP_b09FwDataLen; 2835 fw.data_index = 0; 2836 fw.data = bce_RXP_b09FwData; 2837 2838 fw.sbss_addr = bce_RXP_b09FwSbssAddr; 2839 fw.sbss_len = bce_RXP_b09FwSbssLen; 2840 fw.sbss_index = 0; 2841 fw.sbss = bce_RXP_b09FwSbss; 2842 2843 fw.bss_addr = bce_RXP_b09FwBssAddr; 2844 fw.bss_len = bce_RXP_b09FwBssLen; 2845 fw.bss_index = 0; 2846 fw.bss = bce_RXP_b09FwBss; 2847 2848 fw.rodata_addr = bce_RXP_b09FwRodataAddr; 2849 fw.rodata_len = bce_RXP_b09FwRodataLen; 2850 fw.rodata_index = 0; 2851 fw.rodata = bce_RXP_b09FwRodata; 2852 } else { 2853 fw.ver_major = bce_RXP_b06FwReleaseMajor; 2854 fw.ver_minor = bce_RXP_b06FwReleaseMinor; 2855 fw.ver_fix = bce_RXP_b06FwReleaseFix; 2856 fw.start_addr = bce_RXP_b06FwStartAddr; 2857 2858 fw.text_addr = bce_RXP_b06FwTextAddr; 2859 fw.text_len = bce_RXP_b06FwTextLen; 2860 fw.text_index = 0; 2861 fw.text = bce_RXP_b06FwText; 2862 2863 fw.data_addr = bce_RXP_b06FwDataAddr; 2864 fw.data_len = bce_RXP_b06FwDataLen; 2865 fw.data_index = 0; 2866 fw.data = bce_RXP_b06FwData; 2867 2868 fw.sbss_addr = bce_RXP_b06FwSbssAddr; 2869 fw.sbss_len = bce_RXP_b06FwSbssLen; 2870 fw.sbss_index = 0; 2871 fw.sbss = bce_RXP_b06FwSbss; 2872 2873 fw.bss_addr = bce_RXP_b06FwBssAddr; 2874 fw.bss_len = bce_RXP_b06FwBssLen; 2875 fw.bss_index = 0; 2876 fw.bss = bce_RXP_b06FwBss; 2877 2878 fw.rodata_addr = bce_RXP_b06FwRodataAddr; 2879 fw.rodata_len = bce_RXP_b06FwRodataLen; 2880 fw.rodata_index = 0; 2881 fw.rodata = bce_RXP_b06FwRodata; 2882 } 2883 2884 DBPRINT(sc, BCE_INFO_RESET, "Loading RX firmware.\n"); 2885 bce_load_cpu_fw(sc, &cpu_reg, &fw); 2886 /* Delay RXP start until initialization is complete. */ 2887 } 2888 2889 2890 /****************************************************************************/ 2891 /* Initialize the TX CPU. */ 2892 /* */ 2893 /* Returns: */ 2894 /* Nothing. */ 2895 /****************************************************************************/ 2896 static void 2897 bce_init_txp_cpu(struct bce_softc *sc) 2898 { 2899 struct cpu_reg cpu_reg; 2900 struct fw_info fw; 2901 2902 cpu_reg.mode = BCE_TXP_CPU_MODE; 2903 cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT; 2904 cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA; 2905 cpu_reg.state = BCE_TXP_CPU_STATE; 2906 cpu_reg.state_value_clear = 0xffffff; 2907 cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE; 2908 cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK; 2909 cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER; 2910 cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION; 2911 cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT; 2912 cpu_reg.spad_base = BCE_TXP_SCRATCH; 2913 cpu_reg.mips_view_base = 0x8000000; 2914 2915 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 2916 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 2917 fw.ver_major = bce_TXP_b09FwReleaseMajor; 2918 fw.ver_minor = bce_TXP_b09FwReleaseMinor; 2919 fw.ver_fix = bce_TXP_b09FwReleaseFix; 2920 fw.start_addr = bce_TXP_b09FwStartAddr; 2921 2922 fw.text_addr = bce_TXP_b09FwTextAddr; 2923 fw.text_len = bce_TXP_b09FwTextLen; 2924 fw.text_index = 0; 2925 fw.text = bce_TXP_b09FwText; 2926 2927 fw.data_addr = bce_TXP_b09FwDataAddr; 2928 fw.data_len = bce_TXP_b09FwDataLen; 2929 fw.data_index = 0; 2930 fw.data = bce_TXP_b09FwData; 2931 2932 fw.sbss_addr = bce_TXP_b09FwSbssAddr; 2933 fw.sbss_len = bce_TXP_b09FwSbssLen; 2934 fw.sbss_index = 0; 2935 fw.sbss = bce_TXP_b09FwSbss; 2936 2937 fw.bss_addr = bce_TXP_b09FwBssAddr; 2938 fw.bss_len = bce_TXP_b09FwBssLen; 2939 fw.bss_index = 0; 2940 fw.bss = bce_TXP_b09FwBss; 2941 2942 fw.rodata_addr = bce_TXP_b09FwRodataAddr; 2943 fw.rodata_len = bce_TXP_b09FwRodataLen; 2944 fw.rodata_index = 0; 2945 fw.rodata = bce_TXP_b09FwRodata; 2946 } else { 2947 fw.ver_major = bce_TXP_b06FwReleaseMajor; 2948 fw.ver_minor = bce_TXP_b06FwReleaseMinor; 2949 fw.ver_fix = bce_TXP_b06FwReleaseFix; 2950 fw.start_addr = bce_TXP_b06FwStartAddr; 2951 2952 fw.text_addr = bce_TXP_b06FwTextAddr; 2953 fw.text_len = bce_TXP_b06FwTextLen; 2954 fw.text_index = 0; 2955 fw.text = bce_TXP_b06FwText; 2956 2957 fw.data_addr = bce_TXP_b06FwDataAddr; 2958 fw.data_len = bce_TXP_b06FwDataLen; 2959 fw.data_index = 0; 2960 fw.data = bce_TXP_b06FwData; 2961 2962 fw.sbss_addr = bce_TXP_b06FwSbssAddr; 2963 fw.sbss_len = bce_TXP_b06FwSbssLen; 2964 fw.sbss_index = 0; 2965 fw.sbss = bce_TXP_b06FwSbss; 2966 2967 fw.bss_addr = bce_TXP_b06FwBssAddr; 2968 fw.bss_len = bce_TXP_b06FwBssLen; 2969 fw.bss_index = 0; 2970 fw.bss = bce_TXP_b06FwBss; 2971 2972 fw.rodata_addr = bce_TXP_b06FwRodataAddr; 2973 fw.rodata_len = bce_TXP_b06FwRodataLen; 2974 fw.rodata_index = 0; 2975 fw.rodata = bce_TXP_b06FwRodata; 2976 } 2977 2978 DBPRINT(sc, BCE_INFO_RESET, "Loading TX firmware.\n"); 2979 bce_load_cpu_fw(sc, &cpu_reg, &fw); 2980 bce_start_cpu(sc, &cpu_reg); 2981 } 2982 2983 2984 /****************************************************************************/ 2985 /* Initialize the TPAT CPU. */ 2986 /* */ 2987 /* Returns: */ 2988 /* Nothing. */ 2989 /****************************************************************************/ 2990 static void 2991 bce_init_tpat_cpu(struct bce_softc *sc) 2992 { 2993 struct cpu_reg cpu_reg; 2994 struct fw_info fw; 2995 2996 cpu_reg.mode = BCE_TPAT_CPU_MODE; 2997 cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT; 2998 cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA; 2999 cpu_reg.state = BCE_TPAT_CPU_STATE; 3000 cpu_reg.state_value_clear = 0xffffff; 3001 cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE; 3002 cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK; 3003 cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER; 3004 cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION; 3005 cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT; 3006 cpu_reg.spad_base = BCE_TPAT_SCRATCH; 3007 cpu_reg.mips_view_base = 0x8000000; 3008 3009 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3010 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3011 fw.ver_major = bce_TPAT_b09FwReleaseMajor; 3012 fw.ver_minor = bce_TPAT_b09FwReleaseMinor; 3013 fw.ver_fix = bce_TPAT_b09FwReleaseFix; 3014 fw.start_addr = bce_TPAT_b09FwStartAddr; 3015 3016 fw.text_addr = bce_TPAT_b09FwTextAddr; 3017 fw.text_len = bce_TPAT_b09FwTextLen; 3018 fw.text_index = 0; 3019 fw.text = bce_TPAT_b09FwText; 3020 3021 fw.data_addr = bce_TPAT_b09FwDataAddr; 3022 fw.data_len = bce_TPAT_b09FwDataLen; 3023 fw.data_index = 0; 3024 fw.data = bce_TPAT_b09FwData; 3025 3026 fw.sbss_addr = bce_TPAT_b09FwSbssAddr; 3027 fw.sbss_len = bce_TPAT_b09FwSbssLen; 3028 fw.sbss_index = 0; 3029 fw.sbss = bce_TPAT_b09FwSbss; 3030 3031 fw.bss_addr = bce_TPAT_b09FwBssAddr; 3032 fw.bss_len = bce_TPAT_b09FwBssLen; 3033 fw.bss_index = 0; 3034 fw.bss = bce_TPAT_b09FwBss; 3035 3036 fw.rodata_addr = bce_TPAT_b09FwRodataAddr; 3037 fw.rodata_len = bce_TPAT_b09FwRodataLen; 3038 fw.rodata_index = 0; 3039 fw.rodata = bce_TPAT_b09FwRodata; 3040 } else { 3041 fw.ver_major = bce_TPAT_b06FwReleaseMajor; 3042 fw.ver_minor = bce_TPAT_b06FwReleaseMinor; 3043 fw.ver_fix = bce_TPAT_b06FwReleaseFix; 3044 fw.start_addr = bce_TPAT_b06FwStartAddr; 3045 3046 fw.text_addr = bce_TPAT_b06FwTextAddr; 3047 fw.text_len = bce_TPAT_b06FwTextLen; 3048 fw.text_index = 0; 3049 fw.text = bce_TPAT_b06FwText; 3050 3051 fw.data_addr = bce_TPAT_b06FwDataAddr; 3052 fw.data_len = bce_TPAT_b06FwDataLen; 3053 fw.data_index = 0; 3054 fw.data = bce_TPAT_b06FwData; 3055 3056 fw.sbss_addr = bce_TPAT_b06FwSbssAddr; 3057 fw.sbss_len = bce_TPAT_b06FwSbssLen; 3058 fw.sbss_index = 0; 3059 fw.sbss = bce_TPAT_b06FwSbss; 3060 3061 fw.bss_addr = bce_TPAT_b06FwBssAddr; 3062 fw.bss_len = bce_TPAT_b06FwBssLen; 3063 fw.bss_index = 0; 3064 fw.bss = bce_TPAT_b06FwBss; 3065 3066 fw.rodata_addr = bce_TPAT_b06FwRodataAddr; 3067 fw.rodata_len = bce_TPAT_b06FwRodataLen; 3068 fw.rodata_index = 0; 3069 fw.rodata = bce_TPAT_b06FwRodata; 3070 } 3071 3072 DBPRINT(sc, BCE_INFO_RESET, "Loading TPAT firmware.\n"); 3073 bce_load_cpu_fw(sc, &cpu_reg, &fw); 3074 bce_start_cpu(sc, &cpu_reg); 3075 } 3076 3077 3078 /****************************************************************************/ 3079 /* Initialize the CP CPU. */ 3080 /* */ 3081 /* Returns: */ 3082 /* Nothing. */ 3083 /****************************************************************************/ 3084 static void 3085 bce_init_cp_cpu(struct bce_softc *sc) 3086 { 3087 struct cpu_reg cpu_reg; 3088 struct fw_info fw; 3089 3090 cpu_reg.mode = BCE_CP_CPU_MODE; 3091 cpu_reg.mode_value_halt = BCE_CP_CPU_MODE_SOFT_HALT; 3092 cpu_reg.mode_value_sstep = BCE_CP_CPU_MODE_STEP_ENA; 3093 cpu_reg.state = BCE_CP_CPU_STATE; 3094 cpu_reg.state_value_clear = 0xffffff; 3095 cpu_reg.gpr0 = BCE_CP_CPU_REG_FILE; 3096 cpu_reg.evmask = BCE_CP_CPU_EVENT_MASK; 3097 cpu_reg.pc = BCE_CP_CPU_PROGRAM_COUNTER; 3098 cpu_reg.inst = BCE_CP_CPU_INSTRUCTION; 3099 cpu_reg.bp = BCE_CP_CPU_HW_BREAKPOINT; 3100 cpu_reg.spad_base = BCE_CP_SCRATCH; 3101 cpu_reg.mips_view_base = 0x8000000; 3102 3103 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3104 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3105 fw.ver_major = bce_CP_b09FwReleaseMajor; 3106 fw.ver_minor = bce_CP_b09FwReleaseMinor; 3107 fw.ver_fix = bce_CP_b09FwReleaseFix; 3108 fw.start_addr = bce_CP_b09FwStartAddr; 3109 3110 fw.text_addr = bce_CP_b09FwTextAddr; 3111 fw.text_len = bce_CP_b09FwTextLen; 3112 fw.text_index = 0; 3113 fw.text = bce_CP_b09FwText; 3114 3115 fw.data_addr = bce_CP_b09FwDataAddr; 3116 fw.data_len = bce_CP_b09FwDataLen; 3117 fw.data_index = 0; 3118 fw.data = bce_CP_b09FwData; 3119 3120 fw.sbss_addr = bce_CP_b09FwSbssAddr; 3121 fw.sbss_len = bce_CP_b09FwSbssLen; 3122 fw.sbss_index = 0; 3123 fw.sbss = bce_CP_b09FwSbss; 3124 3125 fw.bss_addr = bce_CP_b09FwBssAddr; 3126 fw.bss_len = bce_CP_b09FwBssLen; 3127 fw.bss_index = 0; 3128 fw.bss = bce_CP_b09FwBss; 3129 3130 fw.rodata_addr = bce_CP_b09FwRodataAddr; 3131 fw.rodata_len = bce_CP_b09FwRodataLen; 3132 fw.rodata_index = 0; 3133 fw.rodata = bce_CP_b09FwRodata; 3134 } else { 3135 fw.ver_major = bce_CP_b06FwReleaseMajor; 3136 fw.ver_minor = bce_CP_b06FwReleaseMinor; 3137 fw.ver_fix = bce_CP_b06FwReleaseFix; 3138 fw.start_addr = bce_CP_b06FwStartAddr; 3139 3140 fw.text_addr = bce_CP_b06FwTextAddr; 3141 fw.text_len = bce_CP_b06FwTextLen; 3142 fw.text_index = 0; 3143 fw.text = bce_CP_b06FwText; 3144 3145 fw.data_addr = bce_CP_b06FwDataAddr; 3146 fw.data_len = bce_CP_b06FwDataLen; 3147 fw.data_index = 0; 3148 fw.data = bce_CP_b06FwData; 3149 3150 fw.sbss_addr = bce_CP_b06FwSbssAddr; 3151 fw.sbss_len = bce_CP_b06FwSbssLen; 3152 fw.sbss_index = 0; 3153 fw.sbss = bce_CP_b06FwSbss; 3154 3155 fw.bss_addr = bce_CP_b06FwBssAddr; 3156 fw.bss_len = bce_CP_b06FwBssLen; 3157 fw.bss_index = 0; 3158 fw.bss = bce_CP_b06FwBss; 3159 3160 fw.rodata_addr = bce_CP_b06FwRodataAddr; 3161 fw.rodata_len = bce_CP_b06FwRodataLen; 3162 fw.rodata_index = 0; 3163 fw.rodata = bce_CP_b06FwRodata; 3164 } 3165 3166 DBPRINT(sc, BCE_INFO_RESET, "Loading CP firmware.\n"); 3167 bce_load_cpu_fw(sc, &cpu_reg, &fw); 3168 bce_start_cpu(sc, &cpu_reg); 3169 } 3170 3171 3172 /****************************************************************************/ 3173 /* Initialize the COM CPU. */ 3174 /* */ 3175 /* Returns: */ 3176 /* Nothing. */ 3177 /****************************************************************************/ 3178 static void 3179 bce_init_com_cpu(struct bce_softc *sc) 3180 { 3181 struct cpu_reg cpu_reg; 3182 struct fw_info fw; 3183 3184 cpu_reg.mode = BCE_COM_CPU_MODE; 3185 cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT; 3186 cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA; 3187 cpu_reg.state = BCE_COM_CPU_STATE; 3188 cpu_reg.state_value_clear = 0xffffff; 3189 cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE; 3190 cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK; 3191 cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER; 3192 cpu_reg.inst = BCE_COM_CPU_INSTRUCTION; 3193 cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT; 3194 cpu_reg.spad_base = BCE_COM_SCRATCH; 3195 cpu_reg.mips_view_base = 0x8000000; 3196 3197 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3198 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3199 fw.ver_major = bce_COM_b09FwReleaseMajor; 3200 fw.ver_minor = bce_COM_b09FwReleaseMinor; 3201 fw.ver_fix = bce_COM_b09FwReleaseFix; 3202 fw.start_addr = bce_COM_b09FwStartAddr; 3203 3204 fw.text_addr = bce_COM_b09FwTextAddr; 3205 fw.text_len = bce_COM_b09FwTextLen; 3206 fw.text_index = 0; 3207 fw.text = bce_COM_b09FwText; 3208 3209 fw.data_addr = bce_COM_b09FwDataAddr; 3210 fw.data_len = bce_COM_b09FwDataLen; 3211 fw.data_index = 0; 3212 fw.data = bce_COM_b09FwData; 3213 3214 fw.sbss_addr = bce_COM_b09FwSbssAddr; 3215 fw.sbss_len = bce_COM_b09FwSbssLen; 3216 fw.sbss_index = 0; 3217 fw.sbss = bce_COM_b09FwSbss; 3218 3219 fw.bss_addr = bce_COM_b09FwBssAddr; 3220 fw.bss_len = bce_COM_b09FwBssLen; 3221 fw.bss_index = 0; 3222 fw.bss = bce_COM_b09FwBss; 3223 3224 fw.rodata_addr = bce_COM_b09FwRodataAddr; 3225 fw.rodata_len = bce_COM_b09FwRodataLen; 3226 fw.rodata_index = 0; 3227 fw.rodata = bce_COM_b09FwRodata; 3228 } else { 3229 fw.ver_major = bce_COM_b06FwReleaseMajor; 3230 fw.ver_minor = bce_COM_b06FwReleaseMinor; 3231 fw.ver_fix = bce_COM_b06FwReleaseFix; 3232 fw.start_addr = bce_COM_b06FwStartAddr; 3233 3234 fw.text_addr = bce_COM_b06FwTextAddr; 3235 fw.text_len = bce_COM_b06FwTextLen; 3236 fw.text_index = 0; 3237 fw.text = bce_COM_b06FwText; 3238 3239 fw.data_addr = bce_COM_b06FwDataAddr; 3240 fw.data_len = bce_COM_b06FwDataLen; 3241 fw.data_index = 0; 3242 fw.data = bce_COM_b06FwData; 3243 3244 fw.sbss_addr = bce_COM_b06FwSbssAddr; 3245 fw.sbss_len = bce_COM_b06FwSbssLen; 3246 fw.sbss_index = 0; 3247 fw.sbss = bce_COM_b06FwSbss; 3248 3249 fw.bss_addr = bce_COM_b06FwBssAddr; 3250 fw.bss_len = bce_COM_b06FwBssLen; 3251 fw.bss_index = 0; 3252 fw.bss = bce_COM_b06FwBss; 3253 3254 fw.rodata_addr = bce_COM_b06FwRodataAddr; 3255 fw.rodata_len = bce_COM_b06FwRodataLen; 3256 fw.rodata_index = 0; 3257 fw.rodata = bce_COM_b06FwRodata; 3258 } 3259 3260 DBPRINT(sc, BCE_INFO_RESET, "Loading COM firmware.\n"); 3261 bce_load_cpu_fw(sc, &cpu_reg, &fw); 3262 bce_start_cpu(sc, &cpu_reg); 3263 } 3264 3265 3266 /****************************************************************************/ 3267 /* Initialize the RV2P, RX, TX, TPAT, COM, and CP CPUs. */ 3268 /* */ 3269 /* Loads the firmware for each CPU and starts the CPU. */ 3270 /* */ 3271 /* Returns: */ 3272 /* Nothing. */ 3273 /****************************************************************************/ 3274 static void 3275 bce_init_cpus(struct bce_softc *sc) 3276 { 3277 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3278 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3279 if (BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax) { 3280 bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc1, 3281 sizeof(bce_xi90_rv2p_proc1), RV2P_PROC1); 3282 bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc2, 3283 sizeof(bce_xi90_rv2p_proc2), RV2P_PROC2); 3284 } else { 3285 bce_load_rv2p_fw(sc, bce_xi_rv2p_proc1, 3286 sizeof(bce_xi_rv2p_proc1), RV2P_PROC1); 3287 bce_load_rv2p_fw(sc, bce_xi_rv2p_proc2, 3288 sizeof(bce_xi_rv2p_proc2), RV2P_PROC2); 3289 } 3290 } else { 3291 bce_load_rv2p_fw(sc, bce_rv2p_proc1, 3292 sizeof(bce_rv2p_proc1), RV2P_PROC1); 3293 bce_load_rv2p_fw(sc, bce_rv2p_proc2, 3294 sizeof(bce_rv2p_proc2), RV2P_PROC2); 3295 } 3296 3297 bce_init_rxp_cpu(sc); 3298 bce_init_txp_cpu(sc); 3299 bce_init_tpat_cpu(sc); 3300 bce_init_com_cpu(sc); 3301 bce_init_cp_cpu(sc); 3302 } 3303 3304 3305 /****************************************************************************/ 3306 /* Initialize context memory. */ 3307 /* */ 3308 /* Clears the memory associated with each Context ID (CID). */ 3309 /* */ 3310 /* Returns: */ 3311 /* Nothing. */ 3312 /****************************************************************************/ 3313 static int 3314 bce_init_ctx(struct bce_softc *sc) 3315 { 3316 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3317 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3318 /* DRC: Replace this constant value with a #define. */ 3319 int i, retry_cnt = 10; 3320 uint32_t val; 3321 3322 /* 3323 * BCM5709 context memory may be cached 3324 * in host memory so prepare the host memory 3325 * for access. 3326 */ 3327 val = BCE_CTX_COMMAND_ENABLED | BCE_CTX_COMMAND_MEM_INIT | 3328 (1 << 12); 3329 val |= (BCM_PAGE_BITS - 8) << 16; 3330 REG_WR(sc, BCE_CTX_COMMAND, val); 3331 3332 /* Wait for mem init command to complete. */ 3333 for (i = 0; i < retry_cnt; i++) { 3334 val = REG_RD(sc, BCE_CTX_COMMAND); 3335 if (!(val & BCE_CTX_COMMAND_MEM_INIT)) 3336 break; 3337 DELAY(2); 3338 } 3339 if (i == retry_cnt) { 3340 device_printf(sc->bce_dev, 3341 "Context memory initialization failed!\n"); 3342 return ETIMEDOUT; 3343 } 3344 3345 for (i = 0; i < sc->ctx_pages; i++) { 3346 int j; 3347 3348 /* 3349 * Set the physical address of the context 3350 * memory cache. 3351 */ 3352 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA0, 3353 BCE_ADDR_LO(sc->ctx_paddr[i] & 0xfffffff0) | 3354 BCE_CTX_HOST_PAGE_TBL_DATA0_VALID); 3355 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA1, 3356 BCE_ADDR_HI(sc->ctx_paddr[i])); 3357 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_CTRL, 3358 i | BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); 3359 3360 /* 3361 * Verify that the context memory write was successful. 3362 */ 3363 for (j = 0; j < retry_cnt; j++) { 3364 val = REG_RD(sc, BCE_CTX_HOST_PAGE_TBL_CTRL); 3365 if ((val & 3366 BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0) 3367 break; 3368 DELAY(5); 3369 } 3370 if (j == retry_cnt) { 3371 device_printf(sc->bce_dev, 3372 "Failed to initialize context page!\n"); 3373 return ETIMEDOUT; 3374 } 3375 } 3376 } else { 3377 uint32_t vcid_addr, offset; 3378 3379 /* 3380 * For the 5706/5708, context memory is local to 3381 * the controller, so initialize the controller 3382 * context memory. 3383 */ 3384 3385 vcid_addr = GET_CID_ADDR(96); 3386 while (vcid_addr) { 3387 vcid_addr -= PHY_CTX_SIZE; 3388 3389 REG_WR(sc, BCE_CTX_VIRT_ADDR, 0); 3390 REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr); 3391 3392 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) 3393 CTX_WR(sc, 0x00, offset, 0); 3394 3395 REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr); 3396 REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr); 3397 } 3398 } 3399 return 0; 3400 } 3401 3402 3403 /****************************************************************************/ 3404 /* Fetch the permanent MAC address of the controller. */ 3405 /* */ 3406 /* Returns: */ 3407 /* Nothing. */ 3408 /****************************************************************************/ 3409 static void 3410 bce_get_mac_addr(struct bce_softc *sc) 3411 { 3412 uint32_t mac_lo = 0, mac_hi = 0; 3413 3414 /* 3415 * The NetXtreme II bootcode populates various NIC 3416 * power-on and runtime configuration items in a 3417 * shared memory area. The factory configured MAC 3418 * address is available from both NVRAM and the 3419 * shared memory area so we'll read the value from 3420 * shared memory for speed. 3421 */ 3422 3423 mac_hi = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_UPPER); 3424 mac_lo = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_LOWER); 3425 3426 if (mac_lo == 0 && mac_hi == 0) { 3427 if_printf(&sc->arpcom.ac_if, "Invalid Ethernet address!\n"); 3428 } else { 3429 sc->eaddr[0] = (u_char)(mac_hi >> 8); 3430 sc->eaddr[1] = (u_char)(mac_hi >> 0); 3431 sc->eaddr[2] = (u_char)(mac_lo >> 24); 3432 sc->eaddr[3] = (u_char)(mac_lo >> 16); 3433 sc->eaddr[4] = (u_char)(mac_lo >> 8); 3434 sc->eaddr[5] = (u_char)(mac_lo >> 0); 3435 } 3436 3437 DBPRINT(sc, BCE_INFO, "Permanent Ethernet address = %6D\n", sc->eaddr, ":"); 3438 } 3439 3440 3441 /****************************************************************************/ 3442 /* Program the MAC address. */ 3443 /* */ 3444 /* Returns: */ 3445 /* Nothing. */ 3446 /****************************************************************************/ 3447 static void 3448 bce_set_mac_addr(struct bce_softc *sc) 3449 { 3450 const uint8_t *mac_addr = sc->eaddr; 3451 uint32_t val; 3452 3453 DBPRINT(sc, BCE_INFO, "Setting Ethernet address = %6D\n", 3454 sc->eaddr, ":"); 3455 3456 val = (mac_addr[0] << 8) | mac_addr[1]; 3457 REG_WR(sc, BCE_EMAC_MAC_MATCH0, val); 3458 3459 val = (mac_addr[2] << 24) | 3460 (mac_addr[3] << 16) | 3461 (mac_addr[4] << 8) | 3462 mac_addr[5]; 3463 REG_WR(sc, BCE_EMAC_MAC_MATCH1, val); 3464 } 3465 3466 3467 /****************************************************************************/ 3468 /* Stop the controller. */ 3469 /* */ 3470 /* Returns: */ 3471 /* Nothing. */ 3472 /****************************************************************************/ 3473 static void 3474 bce_stop(struct bce_softc *sc) 3475 { 3476 struct ifnet *ifp = &sc->arpcom.ac_if; 3477 3478 ASSERT_SERIALIZED(ifp->if_serializer); 3479 3480 callout_stop(&sc->bce_tick_callout); 3481 3482 /* Disable the transmit/receive blocks. */ 3483 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, BCE_MISC_ENABLE_CLR_DEFAULT); 3484 REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS); 3485 DELAY(20); 3486 3487 bce_disable_intr(sc); 3488 3489 /* Free the RX lists. */ 3490 bce_free_rx_chain(sc); 3491 3492 /* Free TX buffers. */ 3493 bce_free_tx_chain(sc); 3494 3495 sc->bce_link = 0; 3496 sc->bce_coalchg_mask = 0; 3497 3498 ifp->if_flags &= ~IFF_RUNNING; 3499 ifq_clr_oactive(&ifp->if_snd); 3500 ifp->if_timer = 0; 3501 } 3502 3503 3504 static int 3505 bce_reset(struct bce_softc *sc, uint32_t reset_code) 3506 { 3507 uint32_t val; 3508 int i, rc = 0; 3509 3510 /* Wait for pending PCI transactions to complete. */ 3511 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, 3512 BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE | 3513 BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE | 3514 BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE | 3515 BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE); 3516 val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS); 3517 DELAY(5); 3518 3519 /* Disable DMA */ 3520 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3521 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3522 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL); 3523 val &= ~BCE_MISC_NEW_CORE_CTL_DMA_ENABLE; 3524 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val); 3525 } 3526 3527 /* Assume bootcode is running. */ 3528 sc->bce_fw_timed_out = 0; 3529 sc->bce_drv_cardiac_arrest = 0; 3530 3531 /* Give the firmware a chance to prepare for the reset. */ 3532 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code); 3533 if (rc) { 3534 if_printf(&sc->arpcom.ac_if, 3535 "Firmware is not ready for reset\n"); 3536 return rc; 3537 } 3538 3539 /* Set a firmware reminder that this is a soft reset. */ 3540 bce_shmem_wr(sc, BCE_DRV_RESET_SIGNATURE, 3541 BCE_DRV_RESET_SIGNATURE_MAGIC); 3542 3543 /* Dummy read to force the chip to complete all current transactions. */ 3544 val = REG_RD(sc, BCE_MISC_ID); 3545 3546 /* Chip reset. */ 3547 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3548 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3549 REG_WR(sc, BCE_MISC_COMMAND, BCE_MISC_COMMAND_SW_RESET); 3550 REG_RD(sc, BCE_MISC_COMMAND); 3551 DELAY(5); 3552 3553 val = BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 3554 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 3555 3556 pci_write_config(sc->bce_dev, BCE_PCICFG_MISC_CONFIG, val, 4); 3557 } else { 3558 val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3559 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 3560 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 3561 REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val); 3562 3563 /* Allow up to 30us for reset to complete. */ 3564 for (i = 0; i < 10; i++) { 3565 val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG); 3566 if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3567 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) 3568 break; 3569 DELAY(10); 3570 } 3571 3572 /* Check that reset completed successfully. */ 3573 if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3574 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) { 3575 if_printf(&sc->arpcom.ac_if, "Reset failed!\n"); 3576 return EBUSY; 3577 } 3578 } 3579 3580 /* Make sure byte swapping is properly configured. */ 3581 val = REG_RD(sc, BCE_PCI_SWAP_DIAG0); 3582 if (val != 0x01020304) { 3583 if_printf(&sc->arpcom.ac_if, "Byte swap is incorrect!\n"); 3584 return ENODEV; 3585 } 3586 3587 /* Just completed a reset, assume that firmware is running again. */ 3588 sc->bce_fw_timed_out = 0; 3589 sc->bce_drv_cardiac_arrest = 0; 3590 3591 /* Wait for the firmware to finish its initialization. */ 3592 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code); 3593 if (rc) { 3594 if_printf(&sc->arpcom.ac_if, 3595 "Firmware did not complete initialization!\n"); 3596 } 3597 return rc; 3598 } 3599 3600 3601 static int 3602 bce_chipinit(struct bce_softc *sc) 3603 { 3604 uint32_t val; 3605 int rc = 0; 3606 3607 /* Make sure the interrupt is not active. */ 3608 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT); 3609 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD); 3610 3611 /* 3612 * Initialize DMA byte/word swapping, configure the number of DMA 3613 * channels and PCI clock compensation delay. 3614 */ 3615 val = BCE_DMA_CONFIG_DATA_BYTE_SWAP | 3616 BCE_DMA_CONFIG_DATA_WORD_SWAP | 3617 #if BYTE_ORDER == BIG_ENDIAN 3618 BCE_DMA_CONFIG_CNTL_BYTE_SWAP | 3619 #endif 3620 BCE_DMA_CONFIG_CNTL_WORD_SWAP | 3621 DMA_READ_CHANS << 12 | 3622 DMA_WRITE_CHANS << 16; 3623 3624 val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY; 3625 3626 if ((sc->bce_flags & BCE_PCIX_FLAG) && sc->bus_speed_mhz == 133) 3627 val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP; 3628 3629 /* 3630 * This setting resolves a problem observed on certain Intel PCI 3631 * chipsets that cannot handle multiple outstanding DMA operations. 3632 * See errata E9_5706A1_65. 3633 */ 3634 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706 && 3635 BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0 && 3636 !(sc->bce_flags & BCE_PCIX_FLAG)) 3637 val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA; 3638 3639 REG_WR(sc, BCE_DMA_CONFIG, val); 3640 3641 /* Enable the RX_V2P and Context state machines before access. */ 3642 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 3643 BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE | 3644 BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE | 3645 BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE); 3646 3647 /* Initialize context mapping and zero out the quick contexts. */ 3648 rc = bce_init_ctx(sc); 3649 if (rc != 0) 3650 return rc; 3651 3652 /* Initialize the on-boards CPUs */ 3653 bce_init_cpus(sc); 3654 3655 /* Enable management frames (NC-SI) to flow to the MCP. */ 3656 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { 3657 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) | 3658 BCE_RPM_MGMT_PKT_CTRL_MGMT_EN; 3659 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val); 3660 } 3661 3662 /* Prepare NVRAM for access. */ 3663 rc = bce_init_nvram(sc); 3664 if (rc != 0) 3665 return rc; 3666 3667 /* Set the kernel bypass block size */ 3668 val = REG_RD(sc, BCE_MQ_CONFIG); 3669 val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE; 3670 val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; 3671 3672 /* Enable bins used on the 5709/5716. */ 3673 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3674 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3675 val |= BCE_MQ_CONFIG_BIN_MQ_MODE; 3676 if (BCE_CHIP_ID(sc) == BCE_CHIP_ID_5709_A1) 3677 val |= BCE_MQ_CONFIG_HALT_DIS; 3678 } 3679 3680 REG_WR(sc, BCE_MQ_CONFIG, val); 3681 3682 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE); 3683 REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val); 3684 REG_WR(sc, BCE_MQ_KNL_WIND_END, val); 3685 3686 /* Set the page size and clear the RV2P processor stall bits. */ 3687 val = (BCM_PAGE_BITS - 8) << 24; 3688 REG_WR(sc, BCE_RV2P_CONFIG, val); 3689 3690 /* Configure page size. */ 3691 val = REG_RD(sc, BCE_TBDR_CONFIG); 3692 val &= ~BCE_TBDR_CONFIG_PAGE_SIZE; 3693 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40; 3694 REG_WR(sc, BCE_TBDR_CONFIG, val); 3695 3696 /* Set the perfect match control register to default. */ 3697 REG_WR_IND(sc, BCE_RXP_PM_CTRL, 0); 3698 3699 return 0; 3700 } 3701 3702 3703 /****************************************************************************/ 3704 /* Initialize the controller in preparation to send/receive traffic. */ 3705 /* */ 3706 /* Returns: */ 3707 /* 0 for success, positive value for failure. */ 3708 /****************************************************************************/ 3709 static int 3710 bce_blockinit(struct bce_softc *sc) 3711 { 3712 uint32_t reg, val; 3713 int rc = 0; 3714 3715 /* Load the hardware default MAC address. */ 3716 bce_set_mac_addr(sc); 3717 3718 /* Set the Ethernet backoff seed value */ 3719 val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) + 3720 sc->eaddr[3] + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16); 3721 REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val); 3722 3723 sc->last_status_idx = 0; 3724 sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE; 3725 3726 /* Set up link change interrupt generation. */ 3727 REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK); 3728 3729 /* Program the physical address of the status block. */ 3730 REG_WR(sc, BCE_HC_STATUS_ADDR_L, BCE_ADDR_LO(sc->status_block_paddr)); 3731 REG_WR(sc, BCE_HC_STATUS_ADDR_H, BCE_ADDR_HI(sc->status_block_paddr)); 3732 3733 /* Program the physical address of the statistics block. */ 3734 REG_WR(sc, BCE_HC_STATISTICS_ADDR_L, 3735 BCE_ADDR_LO(sc->stats_block_paddr)); 3736 REG_WR(sc, BCE_HC_STATISTICS_ADDR_H, 3737 BCE_ADDR_HI(sc->stats_block_paddr)); 3738 3739 /* Program various host coalescing parameters. */ 3740 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP, 3741 (sc->bce_tx_quick_cons_trip_int << 16) | 3742 sc->bce_tx_quick_cons_trip); 3743 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP, 3744 (sc->bce_rx_quick_cons_trip_int << 16) | 3745 sc->bce_rx_quick_cons_trip); 3746 REG_WR(sc, BCE_HC_COMP_PROD_TRIP, 3747 (sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip); 3748 REG_WR(sc, BCE_HC_TX_TICKS, 3749 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks); 3750 REG_WR(sc, BCE_HC_RX_TICKS, 3751 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks); 3752 REG_WR(sc, BCE_HC_COM_TICKS, 3753 (sc->bce_com_ticks_int << 16) | sc->bce_com_ticks); 3754 REG_WR(sc, BCE_HC_CMD_TICKS, 3755 (sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks); 3756 REG_WR(sc, BCE_HC_STATS_TICKS, (sc->bce_stats_ticks & 0xffff00)); 3757 REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */ 3758 3759 val = BCE_HC_CONFIG_TX_TMR_MODE | BCE_HC_CONFIG_COLLECT_STATS; 3760 if (sc->bce_flags & BCE_ONESHOT_MSI_FLAG) { 3761 if (bootverbose) 3762 if_printf(&sc->arpcom.ac_if, "oneshot MSI\n"); 3763 val |= BCE_HC_CONFIG_ONE_SHOT | BCE_HC_CONFIG_USE_INT_PARAM; 3764 } 3765 REG_WR(sc, BCE_HC_CONFIG, val); 3766 3767 /* Clear the internal statistics counters. */ 3768 REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW); 3769 3770 /* Verify that bootcode is running. */ 3771 reg = bce_shmem_rd(sc, BCE_DEV_INFO_SIGNATURE); 3772 3773 DBRUNIF(DB_RANDOMTRUE(bce_debug_bootcode_running_failure), 3774 if_printf(&sc->arpcom.ac_if, 3775 "%s(%d): Simulating bootcode failure.\n", 3776 __FILE__, __LINE__); 3777 reg = 0); 3778 3779 if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) != 3780 BCE_DEV_INFO_SIGNATURE_MAGIC) { 3781 if_printf(&sc->arpcom.ac_if, 3782 "Bootcode not running! Found: 0x%08X, " 3783 "Expected: 08%08X\n", 3784 reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK, 3785 BCE_DEV_INFO_SIGNATURE_MAGIC); 3786 return ENODEV; 3787 } 3788 3789 /* Enable DMA */ 3790 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3791 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3792 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL); 3793 val |= BCE_MISC_NEW_CORE_CTL_DMA_ENABLE; 3794 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val); 3795 } 3796 3797 /* Allow bootcode to apply any additional fixes before enabling MAC. */ 3798 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET); 3799 3800 /* Enable link state change interrupt generation. */ 3801 REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE); 3802 3803 /* Enable the RXP. */ 3804 bce_start_rxp_cpu(sc); 3805 3806 /* Disable management frames (NC-SI) from flowing to the MCP. */ 3807 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { 3808 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) & 3809 ~BCE_RPM_MGMT_PKT_CTRL_MGMT_EN; 3810 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val); 3811 } 3812 3813 /* Enable all remaining blocks in the MAC. */ 3814 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3815 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3816 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 3817 BCE_MISC_ENABLE_DEFAULT_XI); 3818 } else { 3819 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT); 3820 } 3821 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS); 3822 DELAY(20); 3823 3824 /* Save the current host coalescing block settings. */ 3825 sc->hc_command = REG_RD(sc, BCE_HC_COMMAND); 3826 3827 return 0; 3828 } 3829 3830 3831 /****************************************************************************/ 3832 /* Encapsulate an mbuf cluster into the rx_bd chain. */ 3833 /* */ 3834 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's. */ 3835 /* This routine will map an mbuf cluster into 1 or more rx_bd's as */ 3836 /* necessary. */ 3837 /* */ 3838 /* Returns: */ 3839 /* 0 for success, positive value for failure. */ 3840 /****************************************************************************/ 3841 static int 3842 bce_newbuf_std(struct bce_softc *sc, uint16_t *prod, uint16_t *chain_prod, 3843 uint32_t *prod_bseq, int init) 3844 { 3845 bus_dmamap_t map; 3846 bus_dma_segment_t seg; 3847 struct mbuf *m_new; 3848 int error, nseg; 3849 #ifdef BCE_DEBUG 3850 uint16_t debug_chain_prod = *chain_prod; 3851 #endif 3852 3853 /* Make sure the inputs are valid. */ 3854 DBRUNIF((*chain_prod > MAX_RX_BD(sc)), 3855 if_printf(&sc->arpcom.ac_if, "%s(%d): " 3856 "RX producer out of range: 0x%04X > 0x%04X\n", 3857 __FILE__, __LINE__, 3858 *chain_prod, (uint16_t)MAX_RX_BD(sc))); 3859 3860 DBPRINT(sc, BCE_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = 0x%04X, " 3861 "prod_bseq = 0x%08X\n", __func__, *prod, *chain_prod, *prod_bseq); 3862 3863 DBRUNIF(DB_RANDOMTRUE(bce_debug_mbuf_allocation_failure), 3864 if_printf(&sc->arpcom.ac_if, "%s(%d): " 3865 "Simulating mbuf allocation failure.\n", 3866 __FILE__, __LINE__); 3867 sc->mbuf_alloc_failed++; 3868 return ENOBUFS); 3869 3870 /* This is a new mbuf allocation. */ 3871 m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 3872 if (m_new == NULL) 3873 return ENOBUFS; 3874 DBRUNIF(1, sc->rx_mbuf_alloc++); 3875 3876 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 3877 3878 /* Map the mbuf cluster into device memory. */ 3879 error = bus_dmamap_load_mbuf_segment(sc->rx_mbuf_tag, 3880 sc->rx_mbuf_tmpmap, m_new, &seg, 1, &nseg, 3881 BUS_DMA_NOWAIT); 3882 if (error) { 3883 m_freem(m_new); 3884 if (init) { 3885 if_printf(&sc->arpcom.ac_if, 3886 "Error mapping mbuf into RX chain!\n"); 3887 } 3888 DBRUNIF(1, sc->rx_mbuf_alloc--); 3889 return error; 3890 } 3891 3892 if (sc->rx_mbuf_ptr[*chain_prod] != NULL) { 3893 bus_dmamap_unload(sc->rx_mbuf_tag, 3894 sc->rx_mbuf_map[*chain_prod]); 3895 } 3896 3897 map = sc->rx_mbuf_map[*chain_prod]; 3898 sc->rx_mbuf_map[*chain_prod] = sc->rx_mbuf_tmpmap; 3899 sc->rx_mbuf_tmpmap = map; 3900 3901 /* Watch for overflow. */ 3902 DBRUNIF((sc->free_rx_bd > USABLE_RX_BD(sc)), 3903 if_printf(&sc->arpcom.ac_if, "%s(%d): " 3904 "Too many free rx_bd (0x%04X > 0x%04X)!\n", 3905 __FILE__, __LINE__, sc->free_rx_bd, 3906 (uint16_t)USABLE_RX_BD(sc))); 3907 3908 /* Update some debug statistic counters */ 3909 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark), 3910 sc->rx_low_watermark = sc->free_rx_bd); 3911 DBRUNIF((sc->free_rx_bd == 0), sc->rx_empty_count++); 3912 3913 /* Save the mbuf and update our counter. */ 3914 sc->rx_mbuf_ptr[*chain_prod] = m_new; 3915 sc->rx_mbuf_paddr[*chain_prod] = seg.ds_addr; 3916 sc->free_rx_bd--; 3917 3918 bce_setup_rxdesc_std(sc, *chain_prod, prod_bseq); 3919 3920 DBRUN(BCE_VERBOSE_RECV, 3921 bce_dump_rx_mbuf_chain(sc, debug_chain_prod, 1)); 3922 3923 DBPRINT(sc, BCE_VERBOSE_RECV, "%s(exit): prod = 0x%04X, chain_prod = 0x%04X, " 3924 "prod_bseq = 0x%08X\n", __func__, *prod, *chain_prod, *prod_bseq); 3925 3926 return 0; 3927 } 3928 3929 3930 static void 3931 bce_setup_rxdesc_std(struct bce_softc *sc, uint16_t chain_prod, uint32_t *prod_bseq) 3932 { 3933 struct rx_bd *rxbd; 3934 bus_addr_t paddr; 3935 int len; 3936 3937 paddr = sc->rx_mbuf_paddr[chain_prod]; 3938 len = sc->rx_mbuf_ptr[chain_prod]->m_len; 3939 3940 /* Setup the rx_bd for the first segment. */ 3941 rxbd = &sc->rx_bd_chain[RX_PAGE(chain_prod)][RX_IDX(chain_prod)]; 3942 3943 rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(paddr)); 3944 rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(paddr)); 3945 rxbd->rx_bd_len = htole32(len); 3946 rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START); 3947 *prod_bseq += len; 3948 3949 rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END); 3950 } 3951 3952 3953 /****************************************************************************/ 3954 /* Initialize the TX context memory. */ 3955 /* */ 3956 /* Returns: */ 3957 /* Nothing */ 3958 /****************************************************************************/ 3959 static void 3960 bce_init_tx_context(struct bce_softc *sc) 3961 { 3962 uint32_t val; 3963 3964 /* Initialize the context ID for an L2 TX chain. */ 3965 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3966 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3967 /* Set the CID type to support an L2 connection. */ 3968 val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2; 3969 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TYPE_XI, val); 3970 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16); 3971 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_CMD_TYPE_XI, val); 3972 3973 /* Point the hardware to the first page in the chain. */ 3974 val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]); 3975 CTX_WR(sc, GET_CID_ADDR(TX_CID), 3976 BCE_L2CTX_TX_TBDR_BHADDR_HI_XI, val); 3977 val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]); 3978 CTX_WR(sc, GET_CID_ADDR(TX_CID), 3979 BCE_L2CTX_TX_TBDR_BHADDR_LO_XI, val); 3980 } else { 3981 /* Set the CID type to support an L2 connection. */ 3982 val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2; 3983 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TYPE, val); 3984 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16); 3985 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_CMD_TYPE, val); 3986 3987 /* Point the hardware to the first page in the chain. */ 3988 val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]); 3989 CTX_WR(sc, GET_CID_ADDR(TX_CID), 3990 BCE_L2CTX_TX_TBDR_BHADDR_HI, val); 3991 val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]); 3992 CTX_WR(sc, GET_CID_ADDR(TX_CID), 3993 BCE_L2CTX_TX_TBDR_BHADDR_LO, val); 3994 } 3995 } 3996 3997 3998 /****************************************************************************/ 3999 /* Allocate memory and initialize the TX data structures. */ 4000 /* */ 4001 /* Returns: */ 4002 /* 0 for success, positive value for failure. */ 4003 /****************************************************************************/ 4004 static int 4005 bce_init_tx_chain(struct bce_softc *sc) 4006 { 4007 struct tx_bd *txbd; 4008 int i, rc = 0; 4009 4010 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__); 4011 4012 /* Set the initial TX producer/consumer indices. */ 4013 sc->tx_prod = 0; 4014 sc->tx_cons = 0; 4015 sc->tx_prod_bseq = 0; 4016 sc->used_tx_bd = 0; 4017 sc->max_tx_bd = USABLE_TX_BD(sc); 4018 DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD(sc)); 4019 DBRUNIF(1, sc->tx_full_count = 0); 4020 4021 /* 4022 * The NetXtreme II supports a linked-list structre called 4023 * a Buffer Descriptor Chain (or BD chain). A BD chain 4024 * consists of a series of 1 or more chain pages, each of which 4025 * consists of a fixed number of BD entries. 4026 * The last BD entry on each page is a pointer to the next page 4027 * in the chain, and the last pointer in the BD chain 4028 * points back to the beginning of the chain. 4029 */ 4030 4031 /* Set the TX next pointer chain entries. */ 4032 for (i = 0; i < sc->tx_pages; i++) { 4033 int j; 4034 4035 txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE]; 4036 4037 /* Check if we've reached the last page. */ 4038 if (i == (sc->tx_pages - 1)) 4039 j = 0; 4040 else 4041 j = i + 1; 4042 4043 txbd->tx_bd_haddr_hi = 4044 htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j])); 4045 txbd->tx_bd_haddr_lo = 4046 htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j])); 4047 } 4048 bce_init_tx_context(sc); 4049 4050 return(rc); 4051 } 4052 4053 4054 /****************************************************************************/ 4055 /* Free memory and clear the TX data structures. */ 4056 /* */ 4057 /* Returns: */ 4058 /* Nothing. */ 4059 /****************************************************************************/ 4060 static void 4061 bce_free_tx_chain(struct bce_softc *sc) 4062 { 4063 int i; 4064 4065 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__); 4066 4067 /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */ 4068 for (i = 0; i < TOTAL_TX_BD(sc); i++) { 4069 if (sc->tx_mbuf_ptr[i] != NULL) { 4070 bus_dmamap_unload(sc->tx_mbuf_tag, sc->tx_mbuf_map[i]); 4071 m_freem(sc->tx_mbuf_ptr[i]); 4072 sc->tx_mbuf_ptr[i] = NULL; 4073 DBRUNIF(1, sc->tx_mbuf_alloc--); 4074 } 4075 } 4076 4077 /* Clear each TX chain page. */ 4078 for (i = 0; i < sc->tx_pages; i++) 4079 bzero(sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ); 4080 sc->used_tx_bd = 0; 4081 4082 /* Check if we lost any mbufs in the process. */ 4083 DBRUNIF((sc->tx_mbuf_alloc), 4084 if_printf(&sc->arpcom.ac_if, 4085 "%s(%d): Memory leak! " 4086 "Lost %d mbufs from tx chain!\n", 4087 __FILE__, __LINE__, sc->tx_mbuf_alloc)); 4088 4089 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__); 4090 } 4091 4092 4093 /****************************************************************************/ 4094 /* Initialize the RX context memory. */ 4095 /* */ 4096 /* Returns: */ 4097 /* Nothing */ 4098 /****************************************************************************/ 4099 static void 4100 bce_init_rx_context(struct bce_softc *sc) 4101 { 4102 uint32_t val; 4103 4104 /* Initialize the context ID for an L2 RX chain. */ 4105 val = BCE_L2CTX_RX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE | 4106 BCE_L2CTX_RX_CTX_TYPE_SIZE_L2 | (0x02 << 8); 4107 4108 /* 4109 * Set the level for generating pause frames 4110 * when the number of available rx_bd's gets 4111 * too low (the low watermark) and the level 4112 * when pause frames can be stopped (the high 4113 * watermark). 4114 */ 4115 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 4116 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 4117 uint32_t lo_water, hi_water; 4118 4119 lo_water = BCE_L2CTX_RX_LO_WATER_MARK_DEFAULT; 4120 hi_water = USABLE_RX_BD(sc) / 4; 4121 4122 lo_water /= BCE_L2CTX_RX_LO_WATER_MARK_SCALE; 4123 hi_water /= BCE_L2CTX_RX_HI_WATER_MARK_SCALE; 4124 4125 if (hi_water > 0xf) 4126 hi_water = 0xf; 4127 else if (hi_water == 0) 4128 lo_water = 0; 4129 val |= lo_water | 4130 (hi_water << BCE_L2CTX_RX_HI_WATER_MARK_SHIFT); 4131 } 4132 4133 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_CTX_TYPE, val); 4134 4135 /* Setup the MQ BIN mapping for l2_ctx_host_bseq. */ 4136 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 4137 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 4138 val = REG_RD(sc, BCE_MQ_MAP_L2_5); 4139 REG_WR(sc, BCE_MQ_MAP_L2_5, val | BCE_MQ_MAP_L2_5_ARM); 4140 } 4141 4142 /* Point the hardware to the first page in the chain. */ 4143 val = BCE_ADDR_HI(sc->rx_bd_chain_paddr[0]); 4144 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_HI, val); 4145 val = BCE_ADDR_LO(sc->rx_bd_chain_paddr[0]); 4146 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_LO, val); 4147 } 4148 4149 4150 /****************************************************************************/ 4151 /* Allocate memory and initialize the RX data structures. */ 4152 /* */ 4153 /* Returns: */ 4154 /* 0 for success, positive value for failure. */ 4155 /****************************************************************************/ 4156 static int 4157 bce_init_rx_chain(struct bce_softc *sc) 4158 { 4159 struct rx_bd *rxbd; 4160 int i, rc = 0; 4161 uint16_t prod, chain_prod; 4162 uint32_t prod_bseq; 4163 4164 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__); 4165 4166 /* Initialize the RX producer and consumer indices. */ 4167 sc->rx_prod = 0; 4168 sc->rx_cons = 0; 4169 sc->rx_prod_bseq = 0; 4170 sc->free_rx_bd = USABLE_RX_BD(sc); 4171 sc->max_rx_bd = USABLE_RX_BD(sc); 4172 DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD(sc)); 4173 DBRUNIF(1, sc->rx_empty_count = 0); 4174 4175 /* Initialize the RX next pointer chain entries. */ 4176 for (i = 0; i < sc->rx_pages; i++) { 4177 int j; 4178 4179 rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE]; 4180 4181 /* Check if we've reached the last page. */ 4182 if (i == (sc->rx_pages - 1)) 4183 j = 0; 4184 else 4185 j = i + 1; 4186 4187 /* Setup the chain page pointers. */ 4188 rxbd->rx_bd_haddr_hi = 4189 htole32(BCE_ADDR_HI(sc->rx_bd_chain_paddr[j])); 4190 rxbd->rx_bd_haddr_lo = 4191 htole32(BCE_ADDR_LO(sc->rx_bd_chain_paddr[j])); 4192 } 4193 4194 /* Allocate mbuf clusters for the rx_bd chain. */ 4195 prod = prod_bseq = 0; 4196 while (prod < TOTAL_RX_BD(sc)) { 4197 chain_prod = RX_CHAIN_IDX(sc, prod); 4198 if (bce_newbuf_std(sc, &prod, &chain_prod, &prod_bseq, 1)) { 4199 if_printf(&sc->arpcom.ac_if, 4200 "Error filling RX chain: rx_bd[0x%04X]!\n", 4201 chain_prod); 4202 rc = ENOBUFS; 4203 break; 4204 } 4205 prod = NEXT_RX_BD(prod); 4206 } 4207 4208 /* Save the RX chain producer index. */ 4209 sc->rx_prod = prod; 4210 sc->rx_prod_bseq = prod_bseq; 4211 4212 /* Tell the chip about the waiting rx_bd's. */ 4213 REG_WR16(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BDIDX, 4214 sc->rx_prod); 4215 REG_WR(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BSEQ, 4216 sc->rx_prod_bseq); 4217 4218 bce_init_rx_context(sc); 4219 4220 return(rc); 4221 } 4222 4223 4224 /****************************************************************************/ 4225 /* Free memory and clear the RX data structures. */ 4226 /* */ 4227 /* Returns: */ 4228 /* Nothing. */ 4229 /****************************************************************************/ 4230 static void 4231 bce_free_rx_chain(struct bce_softc *sc) 4232 { 4233 int i; 4234 4235 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__); 4236 4237 /* Free any mbufs still in the RX mbuf chain. */ 4238 for (i = 0; i < TOTAL_RX_BD(sc); i++) { 4239 if (sc->rx_mbuf_ptr[i] != NULL) { 4240 bus_dmamap_unload(sc->rx_mbuf_tag, sc->rx_mbuf_map[i]); 4241 m_freem(sc->rx_mbuf_ptr[i]); 4242 sc->rx_mbuf_ptr[i] = NULL; 4243 DBRUNIF(1, sc->rx_mbuf_alloc--); 4244 } 4245 } 4246 4247 /* Clear each RX chain page. */ 4248 for (i = 0; i < sc->rx_pages; i++) 4249 bzero(sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ); 4250 4251 /* Check if we lost any mbufs in the process. */ 4252 DBRUNIF((sc->rx_mbuf_alloc), 4253 if_printf(&sc->arpcom.ac_if, 4254 "%s(%d): Memory leak! " 4255 "Lost %d mbufs from rx chain!\n", 4256 __FILE__, __LINE__, sc->rx_mbuf_alloc)); 4257 4258 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__); 4259 } 4260 4261 4262 /****************************************************************************/ 4263 /* Set media options. */ 4264 /* */ 4265 /* Returns: */ 4266 /* 0 for success, positive value for failure. */ 4267 /****************************************************************************/ 4268 static int 4269 bce_ifmedia_upd(struct ifnet *ifp) 4270 { 4271 struct bce_softc *sc = ifp->if_softc; 4272 struct mii_data *mii = device_get_softc(sc->bce_miibus); 4273 int error = 0; 4274 4275 /* 4276 * 'mii' will be NULL, when this function is called on following 4277 * code path: bce_attach() -> bce_mgmt_init() 4278 */ 4279 if (mii != NULL) { 4280 /* Make sure the MII bus has been enumerated. */ 4281 sc->bce_link = 0; 4282 if (mii->mii_instance) { 4283 struct mii_softc *miisc; 4284 4285 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 4286 mii_phy_reset(miisc); 4287 } 4288 error = mii_mediachg(mii); 4289 } 4290 return error; 4291 } 4292 4293 4294 /****************************************************************************/ 4295 /* Reports current media status. */ 4296 /* */ 4297 /* Returns: */ 4298 /* Nothing. */ 4299 /****************************************************************************/ 4300 static void 4301 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 4302 { 4303 struct bce_softc *sc = ifp->if_softc; 4304 struct mii_data *mii = device_get_softc(sc->bce_miibus); 4305 4306 mii_pollstat(mii); 4307 ifmr->ifm_active = mii->mii_media_active; 4308 ifmr->ifm_status = mii->mii_media_status; 4309 } 4310 4311 4312 /****************************************************************************/ 4313 /* Handles PHY generated interrupt events. */ 4314 /* */ 4315 /* Returns: */ 4316 /* Nothing. */ 4317 /****************************************************************************/ 4318 static void 4319 bce_phy_intr(struct bce_softc *sc) 4320 { 4321 uint32_t new_link_state, old_link_state; 4322 struct ifnet *ifp = &sc->arpcom.ac_if; 4323 4324 ASSERT_SERIALIZED(ifp->if_serializer); 4325 4326 new_link_state = sc->status_block->status_attn_bits & 4327 STATUS_ATTN_BITS_LINK_STATE; 4328 old_link_state = sc->status_block->status_attn_bits_ack & 4329 STATUS_ATTN_BITS_LINK_STATE; 4330 4331 /* Handle any changes if the link state has changed. */ 4332 if (new_link_state != old_link_state) { /* XXX redundant? */ 4333 DBRUN(BCE_VERBOSE_INTR, bce_dump_status_block(sc)); 4334 4335 /* Update the status_attn_bits_ack field in the status block. */ 4336 if (new_link_state) { 4337 REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD, 4338 STATUS_ATTN_BITS_LINK_STATE); 4339 if (bootverbose) 4340 if_printf(ifp, "Link is now UP.\n"); 4341 } else { 4342 REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD, 4343 STATUS_ATTN_BITS_LINK_STATE); 4344 if (bootverbose) 4345 if_printf(ifp, "Link is now DOWN.\n"); 4346 } 4347 4348 /* 4349 * Assume link is down and allow tick routine to 4350 * update the state based on the actual media state. 4351 */ 4352 sc->bce_link = 0; 4353 callout_stop(&sc->bce_tick_callout); 4354 bce_tick_serialized(sc); 4355 } 4356 4357 /* Acknowledge the link change interrupt. */ 4358 REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE); 4359 } 4360 4361 4362 /****************************************************************************/ 4363 /* Reads the receive consumer value from the status block (skipping over */ 4364 /* chain page pointer if necessary). */ 4365 /* */ 4366 /* Returns: */ 4367 /* hw_cons */ 4368 /****************************************************************************/ 4369 static __inline uint16_t 4370 bce_get_hw_rx_cons(struct bce_softc *sc) 4371 { 4372 uint16_t hw_cons = sc->status_block->status_rx_quick_consumer_index0; 4373 4374 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) 4375 hw_cons++; 4376 return hw_cons; 4377 } 4378 4379 4380 /****************************************************************************/ 4381 /* Handles received frame interrupt events. */ 4382 /* */ 4383 /* Returns: */ 4384 /* Nothing. */ 4385 /****************************************************************************/ 4386 static void 4387 bce_rx_intr(struct bce_softc *sc, int count, uint16_t hw_cons) 4388 { 4389 struct ifnet *ifp = &sc->arpcom.ac_if; 4390 uint16_t sw_cons, sw_chain_cons, sw_prod, sw_chain_prod; 4391 uint32_t sw_prod_bseq; 4392 4393 ASSERT_SERIALIZED(ifp->if_serializer); 4394 4395 /* Get working copies of the driver's view of the RX indices. */ 4396 sw_cons = sc->rx_cons; 4397 sw_prod = sc->rx_prod; 4398 sw_prod_bseq = sc->rx_prod_bseq; 4399 4400 /* Scan through the receive chain as long as there is work to do. */ 4401 while (sw_cons != hw_cons) { 4402 struct mbuf *m = NULL; 4403 struct l2_fhdr *l2fhdr = NULL; 4404 struct rx_bd *rxbd; 4405 unsigned int len; 4406 uint32_t status = 0; 4407 4408 #ifdef IFPOLL_ENABLE 4409 if (count >= 0 && count-- == 0) 4410 break; 4411 #endif 4412 4413 /* 4414 * Convert the producer/consumer indices 4415 * to an actual rx_bd index. 4416 */ 4417 sw_chain_cons = RX_CHAIN_IDX(sc, sw_cons); 4418 sw_chain_prod = RX_CHAIN_IDX(sc, sw_prod); 4419 4420 /* Get the used rx_bd. */ 4421 rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)] 4422 [RX_IDX(sw_chain_cons)]; 4423 sc->free_rx_bd++; 4424 4425 /* The mbuf is stored with the last rx_bd entry of a packet. */ 4426 if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) { 4427 if (sw_chain_cons != sw_chain_prod) { 4428 if_printf(ifp, "RX cons(%d) != prod(%d), " 4429 "drop!\n", sw_chain_cons, 4430 sw_chain_prod); 4431 ifp->if_ierrors++; 4432 4433 bce_setup_rxdesc_std(sc, sw_chain_cons, 4434 &sw_prod_bseq); 4435 m = NULL; 4436 goto bce_rx_int_next_rx; 4437 } 4438 4439 /* Unmap the mbuf from DMA space. */ 4440 bus_dmamap_sync(sc->rx_mbuf_tag, 4441 sc->rx_mbuf_map[sw_chain_cons], 4442 BUS_DMASYNC_POSTREAD); 4443 4444 /* Save the mbuf from the driver's chain. */ 4445 m = sc->rx_mbuf_ptr[sw_chain_cons]; 4446 4447 /* 4448 * Frames received on the NetXteme II are prepended 4449 * with an l2_fhdr structure which provides status 4450 * information about the received frame (including 4451 * VLAN tags and checksum info). The frames are also 4452 * automatically adjusted to align the IP header 4453 * (i.e. two null bytes are inserted before the 4454 * Ethernet header). As a result the data DMA'd by 4455 * the controller into the mbuf is as follows: 4456 * 4457 * +---------+-----+---------------------+-----+ 4458 * | l2_fhdr | pad | packet data | FCS | 4459 * +---------+-----+---------------------+-----+ 4460 * 4461 * The l2_fhdr needs to be checked and skipped and the 4462 * FCS needs to be stripped before sending the packet 4463 * up the stack. 4464 */ 4465 l2fhdr = mtod(m, struct l2_fhdr *); 4466 4467 len = l2fhdr->l2_fhdr_pkt_len; 4468 status = l2fhdr->l2_fhdr_status; 4469 4470 len -= ETHER_CRC_LEN; 4471 4472 /* Check the received frame for errors. */ 4473 if (status & (L2_FHDR_ERRORS_BAD_CRC | 4474 L2_FHDR_ERRORS_PHY_DECODE | 4475 L2_FHDR_ERRORS_ALIGNMENT | 4476 L2_FHDR_ERRORS_TOO_SHORT | 4477 L2_FHDR_ERRORS_GIANT_FRAME)) { 4478 ifp->if_ierrors++; 4479 4480 /* Reuse the mbuf for a new frame. */ 4481 bce_setup_rxdesc_std(sc, sw_chain_prod, 4482 &sw_prod_bseq); 4483 m = NULL; 4484 goto bce_rx_int_next_rx; 4485 } 4486 4487 /* 4488 * Get a new mbuf for the rx_bd. If no new 4489 * mbufs are available then reuse the current mbuf, 4490 * log an ierror on the interface, and generate 4491 * an error in the system log. 4492 */ 4493 if (bce_newbuf_std(sc, &sw_prod, &sw_chain_prod, 4494 &sw_prod_bseq, 0)) { 4495 ifp->if_ierrors++; 4496 4497 /* Try and reuse the exisitng mbuf. */ 4498 bce_setup_rxdesc_std(sc, sw_chain_prod, 4499 &sw_prod_bseq); 4500 m = NULL; 4501 goto bce_rx_int_next_rx; 4502 } 4503 4504 /* 4505 * Skip over the l2_fhdr when passing 4506 * the data up the stack. 4507 */ 4508 m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN); 4509 4510 m->m_pkthdr.len = m->m_len = len; 4511 m->m_pkthdr.rcvif = ifp; 4512 4513 /* Validate the checksum if offload enabled. */ 4514 if (ifp->if_capenable & IFCAP_RXCSUM) { 4515 /* Check for an IP datagram. */ 4516 if (status & L2_FHDR_STATUS_IP_DATAGRAM) { 4517 m->m_pkthdr.csum_flags |= 4518 CSUM_IP_CHECKED; 4519 4520 /* Check if the IP checksum is valid. */ 4521 if ((l2fhdr->l2_fhdr_ip_xsum ^ 4522 0xffff) == 0) { 4523 m->m_pkthdr.csum_flags |= 4524 CSUM_IP_VALID; 4525 } 4526 } 4527 4528 /* Check for a valid TCP/UDP frame. */ 4529 if (status & (L2_FHDR_STATUS_TCP_SEGMENT | 4530 L2_FHDR_STATUS_UDP_DATAGRAM)) { 4531 4532 /* Check for a good TCP/UDP checksum. */ 4533 if ((status & 4534 (L2_FHDR_ERRORS_TCP_XSUM | 4535 L2_FHDR_ERRORS_UDP_XSUM)) == 0) { 4536 m->m_pkthdr.csum_data = 4537 l2fhdr->l2_fhdr_tcp_udp_xsum; 4538 m->m_pkthdr.csum_flags |= 4539 CSUM_DATA_VALID | 4540 CSUM_PSEUDO_HDR; 4541 } 4542 } 4543 } 4544 4545 ifp->if_ipackets++; 4546 bce_rx_int_next_rx: 4547 sw_prod = NEXT_RX_BD(sw_prod); 4548 } 4549 4550 sw_cons = NEXT_RX_BD(sw_cons); 4551 4552 /* If we have a packet, pass it up the stack */ 4553 if (m) { 4554 if (status & L2_FHDR_STATUS_L2_VLAN_TAG) { 4555 m->m_flags |= M_VLANTAG; 4556 m->m_pkthdr.ether_vlantag = 4557 l2fhdr->l2_fhdr_vlan_tag; 4558 } 4559 ifp->if_input(ifp, m); 4560 } 4561 } 4562 4563 sc->rx_cons = sw_cons; 4564 sc->rx_prod = sw_prod; 4565 sc->rx_prod_bseq = sw_prod_bseq; 4566 4567 REG_WR16(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BDIDX, 4568 sc->rx_prod); 4569 REG_WR(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BSEQ, 4570 sc->rx_prod_bseq); 4571 } 4572 4573 4574 /****************************************************************************/ 4575 /* Reads the transmit consumer value from the status block (skipping over */ 4576 /* chain page pointer if necessary). */ 4577 /* */ 4578 /* Returns: */ 4579 /* hw_cons */ 4580 /****************************************************************************/ 4581 static __inline uint16_t 4582 bce_get_hw_tx_cons(struct bce_softc *sc) 4583 { 4584 uint16_t hw_cons = sc->status_block->status_tx_quick_consumer_index0; 4585 4586 if ((hw_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) 4587 hw_cons++; 4588 return hw_cons; 4589 } 4590 4591 4592 /****************************************************************************/ 4593 /* Handles transmit completion interrupt events. */ 4594 /* */ 4595 /* Returns: */ 4596 /* Nothing. */ 4597 /****************************************************************************/ 4598 static void 4599 bce_tx_intr(struct bce_softc *sc, uint16_t hw_tx_cons) 4600 { 4601 struct ifnet *ifp = &sc->arpcom.ac_if; 4602 uint16_t sw_tx_cons, sw_tx_chain_cons; 4603 4604 ASSERT_SERIALIZED(ifp->if_serializer); 4605 4606 /* Get the hardware's view of the TX consumer index. */ 4607 sw_tx_cons = sc->tx_cons; 4608 4609 /* Cycle through any completed TX chain page entries. */ 4610 while (sw_tx_cons != hw_tx_cons) { 4611 sw_tx_chain_cons = TX_CHAIN_IDX(sc, sw_tx_cons); 4612 4613 /* 4614 * Free the associated mbuf. Remember 4615 * that only the last tx_bd of a packet 4616 * has an mbuf pointer and DMA map. 4617 */ 4618 if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) { 4619 /* Unmap the mbuf. */ 4620 bus_dmamap_unload(sc->tx_mbuf_tag, 4621 sc->tx_mbuf_map[sw_tx_chain_cons]); 4622 4623 /* Free the mbuf. */ 4624 m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]); 4625 sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL; 4626 4627 ifp->if_opackets++; 4628 } 4629 4630 sc->used_tx_bd--; 4631 sw_tx_cons = NEXT_TX_BD(sw_tx_cons); 4632 } 4633 4634 if (sc->used_tx_bd == 0) { 4635 /* Clear the TX timeout timer. */ 4636 ifp->if_timer = 0; 4637 } 4638 4639 /* Clear the tx hardware queue full flag. */ 4640 if (sc->max_tx_bd - sc->used_tx_bd >= BCE_TX_SPARE_SPACE) 4641 ifq_clr_oactive(&ifp->if_snd); 4642 sc->tx_cons = sw_tx_cons; 4643 } 4644 4645 4646 /****************************************************************************/ 4647 /* Disables interrupt generation. */ 4648 /* */ 4649 /* Returns: */ 4650 /* Nothing. */ 4651 /****************************************************************************/ 4652 static void 4653 bce_disable_intr(struct bce_softc *sc) 4654 { 4655 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT); 4656 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD); 4657 4658 callout_stop(&sc->bce_ckmsi_callout); 4659 sc->bce_msi_maylose = FALSE; 4660 sc->bce_check_rx_cons = 0; 4661 sc->bce_check_tx_cons = 0; 4662 sc->bce_check_status_idx = 0xffff; 4663 4664 sc->bce_npoll.ifpc_stcount = 0; 4665 4666 lwkt_serialize_handler_disable(sc->arpcom.ac_if.if_serializer); 4667 } 4668 4669 4670 /****************************************************************************/ 4671 /* Enables interrupt generation. */ 4672 /* */ 4673 /* Returns: */ 4674 /* Nothing. */ 4675 /****************************************************************************/ 4676 static void 4677 bce_enable_intr(struct bce_softc *sc) 4678 { 4679 lwkt_serialize_handler_enable(sc->arpcom.ac_if.if_serializer); 4680 4681 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 4682 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | 4683 BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx); 4684 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 4685 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx); 4686 4687 REG_WR(sc, BCE_HC_COMMAND, sc->hc_command | BCE_HC_COMMAND_COAL_NOW); 4688 4689 if (sc->bce_flags & BCE_CHECK_MSI_FLAG) { 4690 sc->bce_msi_maylose = FALSE; 4691 sc->bce_check_rx_cons = 0; 4692 sc->bce_check_tx_cons = 0; 4693 sc->bce_check_status_idx = 0xffff; 4694 4695 if (bootverbose) 4696 if_printf(&sc->arpcom.ac_if, "check msi\n"); 4697 4698 callout_reset_bycpu(&sc->bce_ckmsi_callout, BCE_MSI_CKINTVL, 4699 bce_check_msi, sc, sc->bce_intr_cpuid); 4700 } 4701 } 4702 4703 4704 /****************************************************************************/ 4705 /* Reenables interrupt generation during interrupt handling. */ 4706 /* */ 4707 /* Returns: */ 4708 /* Nothing. */ 4709 /****************************************************************************/ 4710 static void 4711 bce_reenable_intr(struct bce_softc *sc) 4712 { 4713 if (sc->bce_irq_type == PCI_INTR_TYPE_LEGACY) { 4714 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 4715 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | 4716 BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx); 4717 } 4718 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 4719 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx); 4720 } 4721 4722 4723 /****************************************************************************/ 4724 /* Handles controller initialization. */ 4725 /* */ 4726 /* Returns: */ 4727 /* Nothing. */ 4728 /****************************************************************************/ 4729 static void 4730 bce_init(void *xsc) 4731 { 4732 struct bce_softc *sc = xsc; 4733 struct ifnet *ifp = &sc->arpcom.ac_if; 4734 uint32_t ether_mtu; 4735 int error; 4736 4737 ASSERT_SERIALIZED(ifp->if_serializer); 4738 4739 /* Check if the driver is still running and bail out if it is. */ 4740 if (ifp->if_flags & IFF_RUNNING) 4741 return; 4742 4743 bce_stop(sc); 4744 4745 error = bce_reset(sc, BCE_DRV_MSG_CODE_RESET); 4746 if (error) { 4747 if_printf(ifp, "Controller reset failed!\n"); 4748 goto back; 4749 } 4750 4751 error = bce_chipinit(sc); 4752 if (error) { 4753 if_printf(ifp, "Controller initialization failed!\n"); 4754 goto back; 4755 } 4756 4757 error = bce_blockinit(sc); 4758 if (error) { 4759 if_printf(ifp, "Block initialization failed!\n"); 4760 goto back; 4761 } 4762 4763 /* Load our MAC address. */ 4764 bcopy(IF_LLADDR(ifp), sc->eaddr, ETHER_ADDR_LEN); 4765 bce_set_mac_addr(sc); 4766 4767 /* Calculate and program the Ethernet MTU size. */ 4768 ether_mtu = ETHER_HDR_LEN + EVL_ENCAPLEN + ifp->if_mtu + ETHER_CRC_LEN; 4769 4770 DBPRINT(sc, BCE_INFO, "%s(): setting mtu = %d\n", __func__, ether_mtu); 4771 4772 /* 4773 * Program the mtu, enabling jumbo frame 4774 * support if necessary. Also set the mbuf 4775 * allocation count for RX frames. 4776 */ 4777 if (ether_mtu > ETHER_MAX_LEN + EVL_ENCAPLEN) { 4778 #ifdef notyet 4779 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, 4780 min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) | 4781 BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA); 4782 sc->mbuf_alloc_size = MJUM9BYTES; 4783 #else 4784 panic("jumbo buffer is not supported yet"); 4785 #endif 4786 } else { 4787 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu); 4788 sc->mbuf_alloc_size = MCLBYTES; 4789 } 4790 4791 /* Calculate the RX Ethernet frame size for rx_bd's. */ 4792 sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8; 4793 4794 DBPRINT(sc, BCE_INFO, 4795 "%s(): mclbytes = %d, mbuf_alloc_size = %d, " 4796 "max_frame_size = %d\n", 4797 __func__, (int)MCLBYTES, sc->mbuf_alloc_size, 4798 sc->max_frame_size); 4799 4800 /* Program appropriate promiscuous/multicast filtering. */ 4801 bce_set_rx_mode(sc); 4802 4803 /* Init RX buffer descriptor chain. */ 4804 bce_init_rx_chain(sc); /* XXX return value */ 4805 4806 /* Init TX buffer descriptor chain. */ 4807 bce_init_tx_chain(sc); /* XXX return value */ 4808 4809 #ifdef IFPOLL_ENABLE 4810 /* Disable interrupts if we are polling. */ 4811 if (ifp->if_flags & IFF_NPOLLING) { 4812 bce_disable_intr(sc); 4813 4814 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP, 4815 (1 << 16) | sc->bce_rx_quick_cons_trip); 4816 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP, 4817 (1 << 16) | sc->bce_tx_quick_cons_trip); 4818 } else 4819 #endif 4820 /* Enable host interrupts. */ 4821 bce_enable_intr(sc); 4822 4823 bce_ifmedia_upd(ifp); 4824 4825 ifp->if_flags |= IFF_RUNNING; 4826 ifq_clr_oactive(&ifp->if_snd); 4827 4828 callout_reset_bycpu(&sc->bce_tick_callout, hz, bce_tick, sc, 4829 sc->bce_intr_cpuid); 4830 back: 4831 if (error) 4832 bce_stop(sc); 4833 } 4834 4835 4836 /****************************************************************************/ 4837 /* Initialize the controller just enough so that any management firmware */ 4838 /* running on the device will continue to operate corectly. */ 4839 /* */ 4840 /* Returns: */ 4841 /* Nothing. */ 4842 /****************************************************************************/ 4843 static void 4844 bce_mgmt_init(struct bce_softc *sc) 4845 { 4846 struct ifnet *ifp = &sc->arpcom.ac_if; 4847 4848 /* Bail out if management firmware is not running. */ 4849 if (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)) 4850 return; 4851 4852 /* Enable all critical blocks in the MAC. */ 4853 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 4854 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 4855 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 4856 BCE_MISC_ENABLE_DEFAULT_XI); 4857 } else { 4858 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT); 4859 } 4860 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS); 4861 DELAY(20); 4862 4863 bce_ifmedia_upd(ifp); 4864 } 4865 4866 4867 /****************************************************************************/ 4868 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */ 4869 /* memory visible to the controller. */ 4870 /* */ 4871 /* Returns: */ 4872 /* 0 for success, positive value for failure. */ 4873 /****************************************************************************/ 4874 static int 4875 bce_encap(struct bce_softc *sc, struct mbuf **m_head, int *nsegs_used) 4876 { 4877 bus_dma_segment_t segs[BCE_MAX_SEGMENTS]; 4878 bus_dmamap_t map, tmp_map; 4879 struct mbuf *m0 = *m_head; 4880 struct tx_bd *txbd = NULL; 4881 uint16_t vlan_tag = 0, flags = 0, mss = 0; 4882 uint16_t chain_prod, chain_prod_start, prod; 4883 uint32_t prod_bseq; 4884 int i, error, maxsegs, nsegs; 4885 4886 /* Transfer any checksum offload flags to the bd. */ 4887 if (m0->m_pkthdr.csum_flags & CSUM_TSO) { 4888 error = bce_tso_setup(sc, m_head, &flags, &mss); 4889 if (error) 4890 return ENOBUFS; 4891 m0 = *m_head; 4892 } else if (m0->m_pkthdr.csum_flags & BCE_CSUM_FEATURES) { 4893 if (m0->m_pkthdr.csum_flags & CSUM_IP) 4894 flags |= TX_BD_FLAGS_IP_CKSUM; 4895 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 4896 flags |= TX_BD_FLAGS_TCP_UDP_CKSUM; 4897 } 4898 4899 /* Transfer any VLAN tags to the bd. */ 4900 if (m0->m_flags & M_VLANTAG) { 4901 flags |= TX_BD_FLAGS_VLAN_TAG; 4902 vlan_tag = m0->m_pkthdr.ether_vlantag; 4903 } 4904 4905 prod = sc->tx_prod; 4906 chain_prod_start = chain_prod = TX_CHAIN_IDX(sc, prod); 4907 4908 /* Map the mbuf into DMAable memory. */ 4909 map = sc->tx_mbuf_map[chain_prod_start]; 4910 4911 maxsegs = sc->max_tx_bd - sc->used_tx_bd; 4912 KASSERT(maxsegs >= BCE_TX_SPARE_SPACE, 4913 ("not enough segments %d", maxsegs)); 4914 if (maxsegs > BCE_MAX_SEGMENTS) 4915 maxsegs = BCE_MAX_SEGMENTS; 4916 4917 /* Map the mbuf into our DMA address space. */ 4918 error = bus_dmamap_load_mbuf_defrag(sc->tx_mbuf_tag, map, m_head, 4919 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 4920 if (error) 4921 goto back; 4922 bus_dmamap_sync(sc->tx_mbuf_tag, map, BUS_DMASYNC_PREWRITE); 4923 4924 *nsegs_used += nsegs; 4925 4926 /* Reset m0 */ 4927 m0 = *m_head; 4928 4929 /* prod points to an empty tx_bd at this point. */ 4930 prod_bseq = sc->tx_prod_bseq; 4931 4932 /* 4933 * Cycle through each mbuf segment that makes up 4934 * the outgoing frame, gathering the mapping info 4935 * for that segment and creating a tx_bd to for 4936 * the mbuf. 4937 */ 4938 for (i = 0; i < nsegs; i++) { 4939 chain_prod = TX_CHAIN_IDX(sc, prod); 4940 txbd= &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)]; 4941 4942 txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr)); 4943 txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr)); 4944 txbd->tx_bd_mss_nbytes = htole32(mss << 16) | 4945 htole16(segs[i].ds_len); 4946 txbd->tx_bd_vlan_tag = htole16(vlan_tag); 4947 txbd->tx_bd_flags = htole16(flags); 4948 4949 prod_bseq += segs[i].ds_len; 4950 if (i == 0) 4951 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START); 4952 prod = NEXT_TX_BD(prod); 4953 } 4954 4955 /* Set the END flag on the last TX buffer descriptor. */ 4956 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END); 4957 4958 /* 4959 * Ensure that the mbuf pointer for this transmission 4960 * is placed at the array index of the last 4961 * descriptor in this chain. This is done 4962 * because a single map is used for all 4963 * segments of the mbuf and we don't want to 4964 * unload the map before all of the segments 4965 * have been freed. 4966 */ 4967 sc->tx_mbuf_ptr[chain_prod] = m0; 4968 4969 tmp_map = sc->tx_mbuf_map[chain_prod]; 4970 sc->tx_mbuf_map[chain_prod] = map; 4971 sc->tx_mbuf_map[chain_prod_start] = tmp_map; 4972 4973 sc->used_tx_bd += nsegs; 4974 4975 /* prod points to the next free tx_bd at this point. */ 4976 sc->tx_prod = prod; 4977 sc->tx_prod_bseq = prod_bseq; 4978 back: 4979 if (error) { 4980 m_freem(*m_head); 4981 *m_head = NULL; 4982 } 4983 return error; 4984 } 4985 4986 4987 /****************************************************************************/ 4988 /* Main transmit routine when called from another routine with a lock. */ 4989 /* */ 4990 /* Returns: */ 4991 /* Nothing. */ 4992 /****************************************************************************/ 4993 static void 4994 bce_start(struct ifnet *ifp) 4995 { 4996 struct bce_softc *sc = ifp->if_softc; 4997 int count = 0; 4998 4999 ASSERT_SERIALIZED(ifp->if_serializer); 5000 5001 /* If there's no link or the transmit queue is empty then just exit. */ 5002 if (!sc->bce_link) { 5003 ifq_purge(&ifp->if_snd); 5004 return; 5005 } 5006 5007 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd)) 5008 return; 5009 5010 for (;;) { 5011 struct mbuf *m_head; 5012 5013 /* 5014 * We keep BCE_TX_SPARE_SPACE entries, so bce_encap() is 5015 * unlikely to fail. 5016 */ 5017 if (sc->max_tx_bd - sc->used_tx_bd < BCE_TX_SPARE_SPACE) { 5018 ifq_set_oactive(&ifp->if_snd); 5019 break; 5020 } 5021 5022 /* Check for any frames to send. */ 5023 m_head = ifq_dequeue(&ifp->if_snd, NULL); 5024 if (m_head == NULL) 5025 break; 5026 5027 /* 5028 * Pack the data into the transmit ring. If we 5029 * don't have room, place the mbuf back at the 5030 * head of the queue and set the OACTIVE flag 5031 * to wait for the NIC to drain the chain. 5032 */ 5033 if (bce_encap(sc, &m_head, &count)) { 5034 ifp->if_oerrors++; 5035 if (sc->used_tx_bd == 0) { 5036 continue; 5037 } else { 5038 ifq_set_oactive(&ifp->if_snd); 5039 break; 5040 } 5041 } 5042 5043 if (count >= sc->tx_wreg) { 5044 /* Start the transmit. */ 5045 REG_WR16(sc, MB_GET_CID_ADDR(TX_CID) + 5046 BCE_L2CTX_TX_HOST_BIDX, sc->tx_prod); 5047 REG_WR(sc, MB_GET_CID_ADDR(TX_CID) + 5048 BCE_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq); 5049 count = 0; 5050 } 5051 5052 /* Send a copy of the frame to any BPF listeners. */ 5053 ETHER_BPF_MTAP(ifp, m_head); 5054 5055 /* Set the tx timeout. */ 5056 ifp->if_timer = BCE_TX_TIMEOUT; 5057 } 5058 if (count > 0) { 5059 /* Start the transmit. */ 5060 REG_WR16(sc, MB_GET_CID_ADDR(TX_CID) + BCE_L2CTX_TX_HOST_BIDX, 5061 sc->tx_prod); 5062 REG_WR(sc, MB_GET_CID_ADDR(TX_CID) + BCE_L2CTX_TX_HOST_BSEQ, 5063 sc->tx_prod_bseq); 5064 } 5065 } 5066 5067 5068 /****************************************************************************/ 5069 /* Handles any IOCTL calls from the operating system. */ 5070 /* */ 5071 /* Returns: */ 5072 /* 0 for success, positive value for failure. */ 5073 /****************************************************************************/ 5074 static int 5075 bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 5076 { 5077 struct bce_softc *sc = ifp->if_softc; 5078 struct ifreq *ifr = (struct ifreq *)data; 5079 struct mii_data *mii; 5080 int mask, error = 0; 5081 5082 ASSERT_SERIALIZED(ifp->if_serializer); 5083 5084 switch(command) { 5085 case SIOCSIFMTU: 5086 /* Check that the MTU setting is supported. */ 5087 if (ifr->ifr_mtu < BCE_MIN_MTU || 5088 #ifdef notyet 5089 ifr->ifr_mtu > BCE_MAX_JUMBO_MTU 5090 #else 5091 ifr->ifr_mtu > ETHERMTU 5092 #endif 5093 ) { 5094 error = EINVAL; 5095 break; 5096 } 5097 5098 DBPRINT(sc, BCE_INFO, "Setting new MTU of %d\n", ifr->ifr_mtu); 5099 5100 ifp->if_mtu = ifr->ifr_mtu; 5101 ifp->if_flags &= ~IFF_RUNNING; /* Force reinitialize */ 5102 bce_init(sc); 5103 break; 5104 5105 case SIOCSIFFLAGS: 5106 if (ifp->if_flags & IFF_UP) { 5107 if (ifp->if_flags & IFF_RUNNING) { 5108 mask = ifp->if_flags ^ sc->bce_if_flags; 5109 5110 if (mask & (IFF_PROMISC | IFF_ALLMULTI)) 5111 bce_set_rx_mode(sc); 5112 } else { 5113 bce_init(sc); 5114 } 5115 } else if (ifp->if_flags & IFF_RUNNING) { 5116 bce_stop(sc); 5117 5118 /* If MFW is running, restart the controller a bit. */ 5119 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { 5120 bce_reset(sc, BCE_DRV_MSG_CODE_RESET); 5121 bce_chipinit(sc); 5122 bce_mgmt_init(sc); 5123 } 5124 } 5125 sc->bce_if_flags = ifp->if_flags; 5126 break; 5127 5128 case SIOCADDMULTI: 5129 case SIOCDELMULTI: 5130 if (ifp->if_flags & IFF_RUNNING) 5131 bce_set_rx_mode(sc); 5132 break; 5133 5134 case SIOCSIFMEDIA: 5135 case SIOCGIFMEDIA: 5136 DBPRINT(sc, BCE_VERBOSE, "bce_phy_flags = 0x%08X\n", 5137 sc->bce_phy_flags); 5138 DBPRINT(sc, BCE_VERBOSE, "Copper media set/get\n"); 5139 5140 mii = device_get_softc(sc->bce_miibus); 5141 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 5142 break; 5143 5144 case SIOCSIFCAP: 5145 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 5146 DBPRINT(sc, BCE_INFO, "Received SIOCSIFCAP = 0x%08X\n", 5147 (uint32_t) mask); 5148 5149 if (mask & IFCAP_HWCSUM) { 5150 ifp->if_capenable ^= (mask & IFCAP_HWCSUM); 5151 if (ifp->if_capenable & IFCAP_TXCSUM) 5152 ifp->if_hwassist |= BCE_CSUM_FEATURES; 5153 else 5154 ifp->if_hwassist &= ~BCE_CSUM_FEATURES; 5155 } 5156 if (mask & IFCAP_TSO) { 5157 ifp->if_capenable ^= IFCAP_TSO; 5158 if (ifp->if_capenable & IFCAP_TSO) 5159 ifp->if_hwassist |= CSUM_TSO; 5160 else 5161 ifp->if_hwassist &= ~CSUM_TSO; 5162 } 5163 break; 5164 5165 default: 5166 error = ether_ioctl(ifp, command, data); 5167 break; 5168 } 5169 return error; 5170 } 5171 5172 5173 /****************************************************************************/ 5174 /* Transmit timeout handler. */ 5175 /* */ 5176 /* Returns: */ 5177 /* Nothing. */ 5178 /****************************************************************************/ 5179 static void 5180 bce_watchdog(struct ifnet *ifp) 5181 { 5182 struct bce_softc *sc = ifp->if_softc; 5183 5184 ASSERT_SERIALIZED(ifp->if_serializer); 5185 5186 DBRUN(BCE_VERBOSE_SEND, 5187 bce_dump_driver_state(sc); 5188 bce_dump_status_block(sc)); 5189 5190 /* 5191 * If we are in this routine because of pause frames, then 5192 * don't reset the hardware. 5193 */ 5194 if (REG_RD(sc, BCE_EMAC_TX_STATUS) & BCE_EMAC_TX_STATUS_XOFFED) 5195 return; 5196 5197 if_printf(ifp, "Watchdog timeout occurred, resetting!\n"); 5198 5199 /* DBRUN(BCE_FATAL, bce_breakpoint(sc)); */ 5200 5201 ifp->if_flags &= ~IFF_RUNNING; /* Force reinitialize */ 5202 bce_init(sc); 5203 5204 ifp->if_oerrors++; 5205 5206 if (!ifq_is_empty(&ifp->if_snd)) 5207 if_devstart(ifp); 5208 } 5209 5210 5211 #ifdef IFPOLL_ENABLE 5212 5213 static void 5214 bce_npoll_compat(struct ifnet *ifp, void *arg __unused, int count) 5215 { 5216 struct bce_softc *sc = ifp->if_softc; 5217 struct status_block *sblk = sc->status_block; 5218 uint16_t hw_tx_cons, hw_rx_cons; 5219 5220 ASSERT_SERIALIZED(ifp->if_serializer); 5221 5222 /* 5223 * Save the status block index value for use when enabling 5224 * the interrupt. 5225 */ 5226 sc->last_status_idx = sblk->status_idx; 5227 5228 /* Make sure status index is extracted before rx/tx cons */ 5229 cpu_lfence(); 5230 5231 if (sc->bce_npoll.ifpc_stcount-- == 0) { 5232 uint32_t status_attn_bits; 5233 5234 sc->bce_npoll.ifpc_stcount = sc->bce_npoll.ifpc_stfrac; 5235 5236 status_attn_bits = sblk->status_attn_bits; 5237 5238 /* Was it a link change interrupt? */ 5239 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 5240 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) 5241 bce_phy_intr(sc); 5242 5243 /* 5244 * Clear any transient status updates during link state change. 5245 */ 5246 REG_WR(sc, BCE_HC_COMMAND, 5247 sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT); 5248 REG_RD(sc, BCE_HC_COMMAND); 5249 5250 /* 5251 * If any other attention is asserted then the chip is toast. 5252 */ 5253 if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) != 5254 (sblk->status_attn_bits_ack & 5255 ~STATUS_ATTN_BITS_LINK_STATE)) { 5256 if_printf(ifp, "Fatal attention detected: 0x%08X\n", 5257 sblk->status_attn_bits); 5258 bce_init(sc); 5259 return; 5260 } 5261 } 5262 5263 hw_rx_cons = bce_get_hw_rx_cons(sc); 5264 hw_tx_cons = bce_get_hw_tx_cons(sc); 5265 5266 /* Check for any completed RX frames. */ 5267 if (hw_rx_cons != sc->rx_cons) 5268 bce_rx_intr(sc, count, hw_rx_cons); 5269 5270 /* Check for any completed TX frames. */ 5271 if (hw_tx_cons != sc->tx_cons) 5272 bce_tx_intr(sc, hw_tx_cons); 5273 5274 if (sc->bce_coalchg_mask) 5275 bce_coal_change(sc); 5276 5277 /* Check for new frames to transmit. */ 5278 if (!ifq_is_empty(&ifp->if_snd)) 5279 if_devstart(ifp); 5280 } 5281 5282 static void 5283 bce_npoll(struct ifnet *ifp, struct ifpoll_info *info) 5284 { 5285 struct bce_softc *sc = ifp->if_softc; 5286 5287 ASSERT_SERIALIZED(ifp->if_serializer); 5288 5289 if (info != NULL) { 5290 int cpuid = sc->bce_npoll.ifpc_cpuid; 5291 5292 info->ifpi_rx[cpuid].poll_func = bce_npoll_compat; 5293 info->ifpi_rx[cpuid].arg = NULL; 5294 info->ifpi_rx[cpuid].serializer = ifp->if_serializer; 5295 5296 if (ifp->if_flags & IFF_RUNNING) { 5297 bce_disable_intr(sc); 5298 5299 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP, 5300 (1 << 16) | sc->bce_rx_quick_cons_trip); 5301 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP, 5302 (1 << 16) | sc->bce_tx_quick_cons_trip); 5303 } 5304 ifq_set_cpuid(&ifp->if_snd, cpuid); 5305 } else { 5306 if (ifp->if_flags & IFF_RUNNING) { 5307 bce_enable_intr(sc); 5308 5309 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP, 5310 (sc->bce_tx_quick_cons_trip_int << 16) | 5311 sc->bce_tx_quick_cons_trip); 5312 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP, 5313 (sc->bce_rx_quick_cons_trip_int << 16) | 5314 sc->bce_rx_quick_cons_trip); 5315 } 5316 ifq_set_cpuid(&ifp->if_snd, sc->bce_intr_cpuid); 5317 } 5318 } 5319 5320 #endif /* IFPOLL_ENABLE */ 5321 5322 5323 /* 5324 * Interrupt handler. 5325 */ 5326 /****************************************************************************/ 5327 /* Main interrupt entry point. Verifies that the controller generated the */ 5328 /* interrupt and then calls a separate routine for handle the various */ 5329 /* interrupt causes (PHY, TX, RX). */ 5330 /* */ 5331 /* Returns: */ 5332 /* 0 for success, positive value for failure. */ 5333 /****************************************************************************/ 5334 static void 5335 bce_intr(struct bce_softc *sc) 5336 { 5337 struct ifnet *ifp = &sc->arpcom.ac_if; 5338 struct status_block *sblk; 5339 uint16_t hw_rx_cons, hw_tx_cons; 5340 uint32_t status_attn_bits; 5341 5342 ASSERT_SERIALIZED(ifp->if_serializer); 5343 5344 sblk = sc->status_block; 5345 5346 /* 5347 * Save the status block index value for use during 5348 * the next interrupt. 5349 */ 5350 sc->last_status_idx = sblk->status_idx; 5351 5352 /* Make sure status index is extracted before rx/tx cons */ 5353 cpu_lfence(); 5354 5355 /* Check if the hardware has finished any work. */ 5356 hw_rx_cons = bce_get_hw_rx_cons(sc); 5357 hw_tx_cons = bce_get_hw_tx_cons(sc); 5358 5359 status_attn_bits = sblk->status_attn_bits; 5360 5361 /* Was it a link change interrupt? */ 5362 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 5363 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) { 5364 bce_phy_intr(sc); 5365 5366 /* 5367 * Clear any transient status updates during link state 5368 * change. 5369 */ 5370 REG_WR(sc, BCE_HC_COMMAND, 5371 sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT); 5372 REG_RD(sc, BCE_HC_COMMAND); 5373 } 5374 5375 /* 5376 * If any other attention is asserted then 5377 * the chip is toast. 5378 */ 5379 if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) != 5380 (sblk->status_attn_bits_ack & ~STATUS_ATTN_BITS_LINK_STATE)) { 5381 if_printf(ifp, "Fatal attention detected: 0x%08X\n", 5382 sblk->status_attn_bits); 5383 bce_init(sc); 5384 return; 5385 } 5386 5387 /* Check for any completed RX frames. */ 5388 if (hw_rx_cons != sc->rx_cons) 5389 bce_rx_intr(sc, -1, hw_rx_cons); 5390 5391 /* Check for any completed TX frames. */ 5392 if (hw_tx_cons != sc->tx_cons) 5393 bce_tx_intr(sc, hw_tx_cons); 5394 5395 /* Re-enable interrupts. */ 5396 bce_reenable_intr(sc); 5397 5398 if (sc->bce_coalchg_mask) 5399 bce_coal_change(sc); 5400 5401 /* Handle any frames that arrived while handling the interrupt. */ 5402 if (!ifq_is_empty(&ifp->if_snd)) 5403 if_devstart(ifp); 5404 } 5405 5406 static void 5407 bce_intr_legacy(void *xsc) 5408 { 5409 struct bce_softc *sc = xsc; 5410 struct status_block *sblk; 5411 5412 sblk = sc->status_block; 5413 5414 /* 5415 * If the hardware status block index matches the last value 5416 * read by the driver and we haven't asserted our interrupt 5417 * then there's nothing to do. 5418 */ 5419 if (sblk->status_idx == sc->last_status_idx && 5420 (REG_RD(sc, BCE_PCICFG_MISC_STATUS) & 5421 BCE_PCICFG_MISC_STATUS_INTA_VALUE)) 5422 return; 5423 5424 /* Ack the interrupt and stop others from occuring. */ 5425 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 5426 BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | 5427 BCE_PCICFG_INT_ACK_CMD_MASK_INT); 5428 5429 /* 5430 * Read back to deassert IRQ immediately to avoid too 5431 * many spurious interrupts. 5432 */ 5433 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD); 5434 5435 bce_intr(sc); 5436 } 5437 5438 static void 5439 bce_intr_msi(void *xsc) 5440 { 5441 struct bce_softc *sc = xsc; 5442 5443 /* Ack the interrupt and stop others from occuring. */ 5444 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 5445 BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | 5446 BCE_PCICFG_INT_ACK_CMD_MASK_INT); 5447 5448 bce_intr(sc); 5449 } 5450 5451 static void 5452 bce_intr_msi_oneshot(void *xsc) 5453 { 5454 bce_intr(xsc); 5455 } 5456 5457 5458 /****************************************************************************/ 5459 /* Programs the various packet receive modes (broadcast and multicast). */ 5460 /* */ 5461 /* Returns: */ 5462 /* Nothing. */ 5463 /****************************************************************************/ 5464 static void 5465 bce_set_rx_mode(struct bce_softc *sc) 5466 { 5467 struct ifnet *ifp = &sc->arpcom.ac_if; 5468 struct ifmultiaddr *ifma; 5469 uint32_t hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 5470 uint32_t rx_mode, sort_mode; 5471 int h, i; 5472 5473 ASSERT_SERIALIZED(ifp->if_serializer); 5474 5475 /* Initialize receive mode default settings. */ 5476 rx_mode = sc->rx_mode & 5477 ~(BCE_EMAC_RX_MODE_PROMISCUOUS | 5478 BCE_EMAC_RX_MODE_KEEP_VLAN_TAG); 5479 sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN; 5480 5481 /* 5482 * ASF/IPMI/UMP firmware requires that VLAN tag stripping 5483 * be enbled. 5484 */ 5485 if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) && 5486 !(sc->bce_flags & BCE_MFW_ENABLE_FLAG)) 5487 rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG; 5488 5489 /* 5490 * Check for promiscuous, all multicast, or selected 5491 * multicast address filtering. 5492 */ 5493 if (ifp->if_flags & IFF_PROMISC) { 5494 DBPRINT(sc, BCE_INFO, "Enabling promiscuous mode.\n"); 5495 5496 /* Enable promiscuous mode. */ 5497 rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS; 5498 sort_mode |= BCE_RPM_SORT_USER0_PROM_EN; 5499 } else if (ifp->if_flags & IFF_ALLMULTI) { 5500 DBPRINT(sc, BCE_INFO, "Enabling all multicast mode.\n"); 5501 5502 /* Enable all multicast addresses. */ 5503 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { 5504 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 5505 0xffffffff); 5506 } 5507 sort_mode |= BCE_RPM_SORT_USER0_MC_EN; 5508 } else { 5509 /* Accept one or more multicast(s). */ 5510 DBPRINT(sc, BCE_INFO, "Enabling selective multicast mode.\n"); 5511 5512 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 5513 if (ifma->ifma_addr->sa_family != AF_LINK) 5514 continue; 5515 h = ether_crc32_le( 5516 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 5517 ETHER_ADDR_LEN) & 0xFF; 5518 hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F); 5519 } 5520 5521 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { 5522 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 5523 hashes[i]); 5524 } 5525 sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN; 5526 } 5527 5528 /* Only make changes if the recive mode has actually changed. */ 5529 if (rx_mode != sc->rx_mode) { 5530 DBPRINT(sc, BCE_VERBOSE, "Enabling new receive mode: 0x%08X\n", 5531 rx_mode); 5532 5533 sc->rx_mode = rx_mode; 5534 REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode); 5535 } 5536 5537 /* Disable and clear the exisitng sort before enabling a new sort. */ 5538 REG_WR(sc, BCE_RPM_SORT_USER0, 0x0); 5539 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode); 5540 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA); 5541 } 5542 5543 5544 /****************************************************************************/ 5545 /* Called periodically to updates statistics from the controllers */ 5546 /* statistics block. */ 5547 /* */ 5548 /* Returns: */ 5549 /* Nothing. */ 5550 /****************************************************************************/ 5551 static void 5552 bce_stats_update(struct bce_softc *sc) 5553 { 5554 struct ifnet *ifp = &sc->arpcom.ac_if; 5555 struct statistics_block *stats = sc->stats_block; 5556 5557 DBPRINT(sc, BCE_EXCESSIVE, "Entering %s()\n", __func__); 5558 5559 ASSERT_SERIALIZED(ifp->if_serializer); 5560 5561 /* 5562 * Certain controllers don't report carrier sense errors correctly. 5563 * See errata E11_5708CA0_1165. 5564 */ 5565 if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) && 5566 !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0)) { 5567 ifp->if_oerrors += 5568 (u_long)stats->stat_Dot3StatsCarrierSenseErrors; 5569 } 5570 5571 /* 5572 * Update the sysctl statistics from the hardware statistics. 5573 */ 5574 sc->stat_IfHCInOctets = 5575 ((uint64_t)stats->stat_IfHCInOctets_hi << 32) + 5576 (uint64_t)stats->stat_IfHCInOctets_lo; 5577 5578 sc->stat_IfHCInBadOctets = 5579 ((uint64_t)stats->stat_IfHCInBadOctets_hi << 32) + 5580 (uint64_t)stats->stat_IfHCInBadOctets_lo; 5581 5582 sc->stat_IfHCOutOctets = 5583 ((uint64_t)stats->stat_IfHCOutOctets_hi << 32) + 5584 (uint64_t)stats->stat_IfHCOutOctets_lo; 5585 5586 sc->stat_IfHCOutBadOctets = 5587 ((uint64_t)stats->stat_IfHCOutBadOctets_hi << 32) + 5588 (uint64_t)stats->stat_IfHCOutBadOctets_lo; 5589 5590 sc->stat_IfHCInUcastPkts = 5591 ((uint64_t)stats->stat_IfHCInUcastPkts_hi << 32) + 5592 (uint64_t)stats->stat_IfHCInUcastPkts_lo; 5593 5594 sc->stat_IfHCInMulticastPkts = 5595 ((uint64_t)stats->stat_IfHCInMulticastPkts_hi << 32) + 5596 (uint64_t)stats->stat_IfHCInMulticastPkts_lo; 5597 5598 sc->stat_IfHCInBroadcastPkts = 5599 ((uint64_t)stats->stat_IfHCInBroadcastPkts_hi << 32) + 5600 (uint64_t)stats->stat_IfHCInBroadcastPkts_lo; 5601 5602 sc->stat_IfHCOutUcastPkts = 5603 ((uint64_t)stats->stat_IfHCOutUcastPkts_hi << 32) + 5604 (uint64_t)stats->stat_IfHCOutUcastPkts_lo; 5605 5606 sc->stat_IfHCOutMulticastPkts = 5607 ((uint64_t)stats->stat_IfHCOutMulticastPkts_hi << 32) + 5608 (uint64_t)stats->stat_IfHCOutMulticastPkts_lo; 5609 5610 sc->stat_IfHCOutBroadcastPkts = 5611 ((uint64_t)stats->stat_IfHCOutBroadcastPkts_hi << 32) + 5612 (uint64_t)stats->stat_IfHCOutBroadcastPkts_lo; 5613 5614 sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors = 5615 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors; 5616 5617 sc->stat_Dot3StatsCarrierSenseErrors = 5618 stats->stat_Dot3StatsCarrierSenseErrors; 5619 5620 sc->stat_Dot3StatsFCSErrors = 5621 stats->stat_Dot3StatsFCSErrors; 5622 5623 sc->stat_Dot3StatsAlignmentErrors = 5624 stats->stat_Dot3StatsAlignmentErrors; 5625 5626 sc->stat_Dot3StatsSingleCollisionFrames = 5627 stats->stat_Dot3StatsSingleCollisionFrames; 5628 5629 sc->stat_Dot3StatsMultipleCollisionFrames = 5630 stats->stat_Dot3StatsMultipleCollisionFrames; 5631 5632 sc->stat_Dot3StatsDeferredTransmissions = 5633 stats->stat_Dot3StatsDeferredTransmissions; 5634 5635 sc->stat_Dot3StatsExcessiveCollisions = 5636 stats->stat_Dot3StatsExcessiveCollisions; 5637 5638 sc->stat_Dot3StatsLateCollisions = 5639 stats->stat_Dot3StatsLateCollisions; 5640 5641 sc->stat_EtherStatsCollisions = 5642 stats->stat_EtherStatsCollisions; 5643 5644 sc->stat_EtherStatsFragments = 5645 stats->stat_EtherStatsFragments; 5646 5647 sc->stat_EtherStatsJabbers = 5648 stats->stat_EtherStatsJabbers; 5649 5650 sc->stat_EtherStatsUndersizePkts = 5651 stats->stat_EtherStatsUndersizePkts; 5652 5653 sc->stat_EtherStatsOverrsizePkts = 5654 stats->stat_EtherStatsOverrsizePkts; 5655 5656 sc->stat_EtherStatsPktsRx64Octets = 5657 stats->stat_EtherStatsPktsRx64Octets; 5658 5659 sc->stat_EtherStatsPktsRx65Octetsto127Octets = 5660 stats->stat_EtherStatsPktsRx65Octetsto127Octets; 5661 5662 sc->stat_EtherStatsPktsRx128Octetsto255Octets = 5663 stats->stat_EtherStatsPktsRx128Octetsto255Octets; 5664 5665 sc->stat_EtherStatsPktsRx256Octetsto511Octets = 5666 stats->stat_EtherStatsPktsRx256Octetsto511Octets; 5667 5668 sc->stat_EtherStatsPktsRx512Octetsto1023Octets = 5669 stats->stat_EtherStatsPktsRx512Octetsto1023Octets; 5670 5671 sc->stat_EtherStatsPktsRx1024Octetsto1522Octets = 5672 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets; 5673 5674 sc->stat_EtherStatsPktsRx1523Octetsto9022Octets = 5675 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets; 5676 5677 sc->stat_EtherStatsPktsTx64Octets = 5678 stats->stat_EtherStatsPktsTx64Octets; 5679 5680 sc->stat_EtherStatsPktsTx65Octetsto127Octets = 5681 stats->stat_EtherStatsPktsTx65Octetsto127Octets; 5682 5683 sc->stat_EtherStatsPktsTx128Octetsto255Octets = 5684 stats->stat_EtherStatsPktsTx128Octetsto255Octets; 5685 5686 sc->stat_EtherStatsPktsTx256Octetsto511Octets = 5687 stats->stat_EtherStatsPktsTx256Octetsto511Octets; 5688 5689 sc->stat_EtherStatsPktsTx512Octetsto1023Octets = 5690 stats->stat_EtherStatsPktsTx512Octetsto1023Octets; 5691 5692 sc->stat_EtherStatsPktsTx1024Octetsto1522Octets = 5693 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets; 5694 5695 sc->stat_EtherStatsPktsTx1523Octetsto9022Octets = 5696 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets; 5697 5698 sc->stat_XonPauseFramesReceived = 5699 stats->stat_XonPauseFramesReceived; 5700 5701 sc->stat_XoffPauseFramesReceived = 5702 stats->stat_XoffPauseFramesReceived; 5703 5704 sc->stat_OutXonSent = 5705 stats->stat_OutXonSent; 5706 5707 sc->stat_OutXoffSent = 5708 stats->stat_OutXoffSent; 5709 5710 sc->stat_FlowControlDone = 5711 stats->stat_FlowControlDone; 5712 5713 sc->stat_MacControlFramesReceived = 5714 stats->stat_MacControlFramesReceived; 5715 5716 sc->stat_XoffStateEntered = 5717 stats->stat_XoffStateEntered; 5718 5719 sc->stat_IfInFramesL2FilterDiscards = 5720 stats->stat_IfInFramesL2FilterDiscards; 5721 5722 sc->stat_IfInRuleCheckerDiscards = 5723 stats->stat_IfInRuleCheckerDiscards; 5724 5725 sc->stat_IfInFTQDiscards = 5726 stats->stat_IfInFTQDiscards; 5727 5728 sc->stat_IfInMBUFDiscards = 5729 stats->stat_IfInMBUFDiscards; 5730 5731 sc->stat_IfInRuleCheckerP4Hit = 5732 stats->stat_IfInRuleCheckerP4Hit; 5733 5734 sc->stat_CatchupInRuleCheckerDiscards = 5735 stats->stat_CatchupInRuleCheckerDiscards; 5736 5737 sc->stat_CatchupInFTQDiscards = 5738 stats->stat_CatchupInFTQDiscards; 5739 5740 sc->stat_CatchupInMBUFDiscards = 5741 stats->stat_CatchupInMBUFDiscards; 5742 5743 sc->stat_CatchupInRuleCheckerP4Hit = 5744 stats->stat_CatchupInRuleCheckerP4Hit; 5745 5746 sc->com_no_buffers = REG_RD_IND(sc, 0x120084); 5747 5748 /* 5749 * Update the interface statistics from the 5750 * hardware statistics. 5751 */ 5752 ifp->if_collisions = (u_long)sc->stat_EtherStatsCollisions; 5753 5754 ifp->if_ierrors = (u_long)sc->stat_EtherStatsUndersizePkts + 5755 (u_long)sc->stat_EtherStatsOverrsizePkts + 5756 (u_long)sc->stat_IfInMBUFDiscards + 5757 (u_long)sc->stat_Dot3StatsAlignmentErrors + 5758 (u_long)sc->stat_Dot3StatsFCSErrors + 5759 (u_long)sc->stat_IfInRuleCheckerDiscards + 5760 (u_long)sc->stat_IfInFTQDiscards + 5761 (u_long)sc->com_no_buffers; 5762 5763 ifp->if_oerrors = 5764 (u_long)sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors + 5765 (u_long)sc->stat_Dot3StatsExcessiveCollisions + 5766 (u_long)sc->stat_Dot3StatsLateCollisions; 5767 5768 DBPRINT(sc, BCE_EXCESSIVE, "Exiting %s()\n", __func__); 5769 } 5770 5771 5772 /****************************************************************************/ 5773 /* Periodic function to notify the bootcode that the driver is still */ 5774 /* present. */ 5775 /* */ 5776 /* Returns: */ 5777 /* Nothing. */ 5778 /****************************************************************************/ 5779 static void 5780 bce_pulse(void *xsc) 5781 { 5782 struct bce_softc *sc = xsc; 5783 struct ifnet *ifp = &sc->arpcom.ac_if; 5784 uint32_t msg; 5785 5786 lwkt_serialize_enter(ifp->if_serializer); 5787 5788 /* Tell the firmware that the driver is still running. */ 5789 msg = (uint32_t)++sc->bce_fw_drv_pulse_wr_seq; 5790 bce_shmem_wr(sc, BCE_DRV_PULSE_MB, msg); 5791 5792 /* Update the bootcode condition. */ 5793 sc->bc_state = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION); 5794 5795 /* Report whether the bootcode still knows the driver is running. */ 5796 if (!sc->bce_drv_cardiac_arrest) { 5797 if (!(sc->bc_state & BCE_CONDITION_DRV_PRESENT)) { 5798 sc->bce_drv_cardiac_arrest = 1; 5799 if_printf(ifp, "Bootcode lost the driver pulse! " 5800 "(bc_state = 0x%08X)\n", sc->bc_state); 5801 } 5802 } else { 5803 /* 5804 * Not supported by all bootcode versions. 5805 * (v5.0.11+ and v5.2.1+) Older bootcode 5806 * will require the driver to reset the 5807 * controller to clear this condition. 5808 */ 5809 if (sc->bc_state & BCE_CONDITION_DRV_PRESENT) { 5810 sc->bce_drv_cardiac_arrest = 0; 5811 if_printf(ifp, "Bootcode found the driver pulse! " 5812 "(bc_state = 0x%08X)\n", sc->bc_state); 5813 } 5814 } 5815 5816 /* Schedule the next pulse. */ 5817 callout_reset_bycpu(&sc->bce_pulse_callout, hz, bce_pulse, sc, 5818 sc->bce_intr_cpuid); 5819 5820 lwkt_serialize_exit(ifp->if_serializer); 5821 } 5822 5823 5824 /****************************************************************************/ 5825 /* Periodic function to check whether MSI is lost */ 5826 /* */ 5827 /* Returns: */ 5828 /* Nothing. */ 5829 /****************************************************************************/ 5830 static void 5831 bce_check_msi(void *xsc) 5832 { 5833 struct bce_softc *sc = xsc; 5834 struct ifnet *ifp = &sc->arpcom.ac_if; 5835 struct status_block *sblk = sc->status_block; 5836 5837 lwkt_serialize_enter(ifp->if_serializer); 5838 5839 KKASSERT(mycpuid == sc->bce_intr_cpuid); 5840 5841 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) { 5842 lwkt_serialize_exit(ifp->if_serializer); 5843 return; 5844 } 5845 5846 if (bce_get_hw_rx_cons(sc) != sc->rx_cons || 5847 bce_get_hw_tx_cons(sc) != sc->tx_cons || 5848 (sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 5849 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) { 5850 if (sc->bce_check_rx_cons == sc->rx_cons && 5851 sc->bce_check_tx_cons == sc->tx_cons && 5852 sc->bce_check_status_idx == sc->last_status_idx) { 5853 uint32_t msi_ctrl; 5854 5855 if (!sc->bce_msi_maylose) { 5856 sc->bce_msi_maylose = TRUE; 5857 goto done; 5858 } 5859 5860 msi_ctrl = REG_RD(sc, BCE_PCICFG_MSI_CONTROL); 5861 if (msi_ctrl & BCE_PCICFG_MSI_CONTROL_ENABLE) { 5862 if (bootverbose) 5863 if_printf(ifp, "lost MSI\n"); 5864 5865 REG_WR(sc, BCE_PCICFG_MSI_CONTROL, 5866 msi_ctrl & ~BCE_PCICFG_MSI_CONTROL_ENABLE); 5867 REG_WR(sc, BCE_PCICFG_MSI_CONTROL, msi_ctrl); 5868 5869 bce_intr_msi(sc); 5870 } else if (bootverbose) { 5871 if_printf(ifp, "MSI may be lost\n"); 5872 } 5873 } 5874 } 5875 sc->bce_msi_maylose = FALSE; 5876 sc->bce_check_rx_cons = sc->rx_cons; 5877 sc->bce_check_tx_cons = sc->tx_cons; 5878 sc->bce_check_status_idx = sc->last_status_idx; 5879 5880 done: 5881 callout_reset(&sc->bce_ckmsi_callout, BCE_MSI_CKINTVL, 5882 bce_check_msi, sc); 5883 lwkt_serialize_exit(ifp->if_serializer); 5884 } 5885 5886 5887 /****************************************************************************/ 5888 /* Periodic function to perform maintenance tasks. */ 5889 /* */ 5890 /* Returns: */ 5891 /* Nothing. */ 5892 /****************************************************************************/ 5893 static void 5894 bce_tick_serialized(struct bce_softc *sc) 5895 { 5896 struct ifnet *ifp = &sc->arpcom.ac_if; 5897 struct mii_data *mii; 5898 5899 ASSERT_SERIALIZED(ifp->if_serializer); 5900 5901 /* Update the statistics from the hardware statistics block. */ 5902 bce_stats_update(sc); 5903 5904 /* Schedule the next tick. */ 5905 callout_reset_bycpu(&sc->bce_tick_callout, hz, bce_tick, sc, 5906 sc->bce_intr_cpuid); 5907 5908 /* If link is up already up then we're done. */ 5909 if (sc->bce_link) 5910 return; 5911 5912 mii = device_get_softc(sc->bce_miibus); 5913 mii_tick(mii); 5914 5915 /* Check if the link has come up. */ 5916 if ((mii->mii_media_status & IFM_ACTIVE) && 5917 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 5918 sc->bce_link++; 5919 /* Now that link is up, handle any outstanding TX traffic. */ 5920 if (!ifq_is_empty(&ifp->if_snd)) 5921 if_devstart(ifp); 5922 } 5923 } 5924 5925 5926 static void 5927 bce_tick(void *xsc) 5928 { 5929 struct bce_softc *sc = xsc; 5930 struct ifnet *ifp = &sc->arpcom.ac_if; 5931 5932 lwkt_serialize_enter(ifp->if_serializer); 5933 bce_tick_serialized(sc); 5934 lwkt_serialize_exit(ifp->if_serializer); 5935 } 5936 5937 5938 #ifdef BCE_DEBUG 5939 /****************************************************************************/ 5940 /* Allows the driver state to be dumped through the sysctl interface. */ 5941 /* */ 5942 /* Returns: */ 5943 /* 0 for success, positive value for failure. */ 5944 /****************************************************************************/ 5945 static int 5946 bce_sysctl_driver_state(SYSCTL_HANDLER_ARGS) 5947 { 5948 int error; 5949 int result; 5950 struct bce_softc *sc; 5951 5952 result = -1; 5953 error = sysctl_handle_int(oidp, &result, 0, req); 5954 5955 if (error || !req->newptr) 5956 return (error); 5957 5958 if (result == 1) { 5959 sc = (struct bce_softc *)arg1; 5960 bce_dump_driver_state(sc); 5961 } 5962 5963 return error; 5964 } 5965 5966 5967 /****************************************************************************/ 5968 /* Allows the hardware state to be dumped through the sysctl interface. */ 5969 /* */ 5970 /* Returns: */ 5971 /* 0 for success, positive value for failure. */ 5972 /****************************************************************************/ 5973 static int 5974 bce_sysctl_hw_state(SYSCTL_HANDLER_ARGS) 5975 { 5976 int error; 5977 int result; 5978 struct bce_softc *sc; 5979 5980 result = -1; 5981 error = sysctl_handle_int(oidp, &result, 0, req); 5982 5983 if (error || !req->newptr) 5984 return (error); 5985 5986 if (result == 1) { 5987 sc = (struct bce_softc *)arg1; 5988 bce_dump_hw_state(sc); 5989 } 5990 5991 return error; 5992 } 5993 5994 5995 /****************************************************************************/ 5996 /* Provides a sysctl interface to allows dumping the RX chain. */ 5997 /* */ 5998 /* Returns: */ 5999 /* 0 for success, positive value for failure. */ 6000 /****************************************************************************/ 6001 static int 6002 bce_sysctl_dump_rx_chain(SYSCTL_HANDLER_ARGS) 6003 { 6004 int error; 6005 int result; 6006 struct bce_softc *sc; 6007 6008 result = -1; 6009 error = sysctl_handle_int(oidp, &result, 0, req); 6010 6011 if (error || !req->newptr) 6012 return (error); 6013 6014 if (result == 1) { 6015 sc = (struct bce_softc *)arg1; 6016 bce_dump_rx_chain(sc, 0, USABLE_RX_BD(sc)); 6017 } 6018 6019 return error; 6020 } 6021 6022 6023 /****************************************************************************/ 6024 /* Provides a sysctl interface to allows dumping the TX chain. */ 6025 /* */ 6026 /* Returns: */ 6027 /* 0 for success, positive value for failure. */ 6028 /****************************************************************************/ 6029 static int 6030 bce_sysctl_dump_tx_chain(SYSCTL_HANDLER_ARGS) 6031 { 6032 int error; 6033 int result; 6034 struct bce_softc *sc; 6035 6036 result = -1; 6037 error = sysctl_handle_int(oidp, &result, 0, req); 6038 6039 if (error || !req->newptr) 6040 return (error); 6041 6042 if (result == 1) { 6043 sc = (struct bce_softc *)arg1; 6044 bce_dump_tx_chain(sc, 0, USABLE_TX_BD(sc)); 6045 } 6046 6047 return error; 6048 } 6049 6050 6051 /****************************************************************************/ 6052 /* Provides a sysctl interface to allow reading arbitrary registers in the */ 6053 /* device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ 6054 /* */ 6055 /* Returns: */ 6056 /* 0 for success, positive value for failure. */ 6057 /****************************************************************************/ 6058 static int 6059 bce_sysctl_reg_read(SYSCTL_HANDLER_ARGS) 6060 { 6061 struct bce_softc *sc; 6062 int error; 6063 uint32_t val, result; 6064 6065 result = -1; 6066 error = sysctl_handle_int(oidp, &result, 0, req); 6067 if (error || (req->newptr == NULL)) 6068 return (error); 6069 6070 /* Make sure the register is accessible. */ 6071 if (result < 0x8000) { 6072 sc = (struct bce_softc *)arg1; 6073 val = REG_RD(sc, result); 6074 if_printf(&sc->arpcom.ac_if, "reg 0x%08X = 0x%08X\n", 6075 result, val); 6076 } else if (result < 0x0280000) { 6077 sc = (struct bce_softc *)arg1; 6078 val = REG_RD_IND(sc, result); 6079 if_printf(&sc->arpcom.ac_if, "reg 0x%08X = 0x%08X\n", 6080 result, val); 6081 } 6082 return (error); 6083 } 6084 6085 6086 /****************************************************************************/ 6087 /* Provides a sysctl interface to allow reading arbitrary PHY registers in */ 6088 /* the device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ 6089 /* */ 6090 /* Returns: */ 6091 /* 0 for success, positive value for failure. */ 6092 /****************************************************************************/ 6093 static int 6094 bce_sysctl_phy_read(SYSCTL_HANDLER_ARGS) 6095 { 6096 struct bce_softc *sc; 6097 device_t dev; 6098 int error, result; 6099 uint16_t val; 6100 6101 result = -1; 6102 error = sysctl_handle_int(oidp, &result, 0, req); 6103 if (error || (req->newptr == NULL)) 6104 return (error); 6105 6106 /* Make sure the register is accessible. */ 6107 if (result < 0x20) { 6108 sc = (struct bce_softc *)arg1; 6109 dev = sc->bce_dev; 6110 val = bce_miibus_read_reg(dev, sc->bce_phy_addr, result); 6111 if_printf(&sc->arpcom.ac_if, 6112 "phy 0x%02X = 0x%04X\n", result, val); 6113 } 6114 return (error); 6115 } 6116 6117 6118 /****************************************************************************/ 6119 /* Provides a sysctl interface to forcing the driver to dump state and */ 6120 /* enter the debugger. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ 6121 /* */ 6122 /* Returns: */ 6123 /* 0 for success, positive value for failure. */ 6124 /****************************************************************************/ 6125 static int 6126 bce_sysctl_breakpoint(SYSCTL_HANDLER_ARGS) 6127 { 6128 int error; 6129 int result; 6130 struct bce_softc *sc; 6131 6132 result = -1; 6133 error = sysctl_handle_int(oidp, &result, 0, req); 6134 6135 if (error || !req->newptr) 6136 return (error); 6137 6138 if (result == 1) { 6139 sc = (struct bce_softc *)arg1; 6140 bce_breakpoint(sc); 6141 } 6142 6143 return error; 6144 } 6145 #endif 6146 6147 6148 /****************************************************************************/ 6149 /* Adds any sysctl parameters for tuning or debugging purposes. */ 6150 /* */ 6151 /* Returns: */ 6152 /* 0 for success, positive value for failure. */ 6153 /****************************************************************************/ 6154 static void 6155 bce_add_sysctls(struct bce_softc *sc) 6156 { 6157 struct sysctl_ctx_list *ctx; 6158 struct sysctl_oid_list *children; 6159 6160 sysctl_ctx_init(&sc->bce_sysctl_ctx); 6161 sc->bce_sysctl_tree = SYSCTL_ADD_NODE(&sc->bce_sysctl_ctx, 6162 SYSCTL_STATIC_CHILDREN(_hw), 6163 OID_AUTO, 6164 device_get_nameunit(sc->bce_dev), 6165 CTLFLAG_RD, 0, ""); 6166 if (sc->bce_sysctl_tree == NULL) { 6167 device_printf(sc->bce_dev, "can't add sysctl node\n"); 6168 return; 6169 } 6170 6171 ctx = &sc->bce_sysctl_ctx; 6172 children = SYSCTL_CHILDREN(sc->bce_sysctl_tree); 6173 6174 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds_int", 6175 CTLTYPE_INT | CTLFLAG_RW, 6176 sc, 0, bce_sysctl_tx_bds_int, "I", 6177 "Send max coalesced BD count during interrupt"); 6178 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds", 6179 CTLTYPE_INT | CTLFLAG_RW, 6180 sc, 0, bce_sysctl_tx_bds, "I", 6181 "Send max coalesced BD count"); 6182 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks_int", 6183 CTLTYPE_INT | CTLFLAG_RW, 6184 sc, 0, bce_sysctl_tx_ticks_int, "I", 6185 "Send coalescing ticks during interrupt"); 6186 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks", 6187 CTLTYPE_INT | CTLFLAG_RW, 6188 sc, 0, bce_sysctl_tx_ticks, "I", 6189 "Send coalescing ticks"); 6190 6191 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds_int", 6192 CTLTYPE_INT | CTLFLAG_RW, 6193 sc, 0, bce_sysctl_rx_bds_int, "I", 6194 "Receive max coalesced BD count during interrupt"); 6195 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds", 6196 CTLTYPE_INT | CTLFLAG_RW, 6197 sc, 0, bce_sysctl_rx_bds, "I", 6198 "Receive max coalesced BD count"); 6199 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks_int", 6200 CTLTYPE_INT | CTLFLAG_RW, 6201 sc, 0, bce_sysctl_rx_ticks_int, "I", 6202 "Receive coalescing ticks during interrupt"); 6203 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks", 6204 CTLTYPE_INT | CTLFLAG_RW, 6205 sc, 0, bce_sysctl_rx_ticks, "I", 6206 "Receive coalescing ticks"); 6207 6208 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_pages", 6209 CTLFLAG_RD, &sc->rx_pages, 0, "# of RX pages"); 6210 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_pages", 6211 CTLFLAG_RD, &sc->tx_pages, 0, "# of TX pages"); 6212 6213 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_wreg", 6214 CTLFLAG_RW, &sc->tx_wreg, 0, 6215 "# segments before write to hardware registers"); 6216 6217 #ifdef BCE_DEBUG 6218 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6219 "rx_low_watermark", 6220 CTLFLAG_RD, &sc->rx_low_watermark, 6221 0, "Lowest level of free rx_bd's"); 6222 6223 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6224 "rx_empty_count", 6225 CTLFLAG_RD, &sc->rx_empty_count, 6226 0, "Number of times the RX chain was empty"); 6227 6228 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6229 "tx_hi_watermark", 6230 CTLFLAG_RD, &sc->tx_hi_watermark, 6231 0, "Highest level of used tx_bd's"); 6232 6233 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6234 "tx_full_count", 6235 CTLFLAG_RD, &sc->tx_full_count, 6236 0, "Number of times the TX chain was full"); 6237 6238 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6239 "l2fhdr_status_errors", 6240 CTLFLAG_RD, &sc->l2fhdr_status_errors, 6241 0, "l2_fhdr status errors"); 6242 6243 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6244 "unexpected_attentions", 6245 CTLFLAG_RD, &sc->unexpected_attentions, 6246 0, "unexpected attentions"); 6247 6248 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6249 "lost_status_block_updates", 6250 CTLFLAG_RD, &sc->lost_status_block_updates, 6251 0, "lost status block updates"); 6252 6253 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6254 "mbuf_alloc_failed", 6255 CTLFLAG_RD, &sc->mbuf_alloc_failed, 6256 0, "mbuf cluster allocation failures"); 6257 #endif 6258 6259 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6260 "stat_IfHCInOctets", 6261 CTLFLAG_RD, &sc->stat_IfHCInOctets, 6262 "Bytes received"); 6263 6264 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6265 "stat_IfHCInBadOctets", 6266 CTLFLAG_RD, &sc->stat_IfHCInBadOctets, 6267 "Bad bytes received"); 6268 6269 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6270 "stat_IfHCOutOctets", 6271 CTLFLAG_RD, &sc->stat_IfHCOutOctets, 6272 "Bytes sent"); 6273 6274 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6275 "stat_IfHCOutBadOctets", 6276 CTLFLAG_RD, &sc->stat_IfHCOutBadOctets, 6277 "Bad bytes sent"); 6278 6279 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6280 "stat_IfHCInUcastPkts", 6281 CTLFLAG_RD, &sc->stat_IfHCInUcastPkts, 6282 "Unicast packets received"); 6283 6284 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6285 "stat_IfHCInMulticastPkts", 6286 CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts, 6287 "Multicast packets received"); 6288 6289 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6290 "stat_IfHCInBroadcastPkts", 6291 CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts, 6292 "Broadcast packets received"); 6293 6294 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6295 "stat_IfHCOutUcastPkts", 6296 CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts, 6297 "Unicast packets sent"); 6298 6299 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6300 "stat_IfHCOutMulticastPkts", 6301 CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts, 6302 "Multicast packets sent"); 6303 6304 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6305 "stat_IfHCOutBroadcastPkts", 6306 CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts, 6307 "Broadcast packets sent"); 6308 6309 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6310 "stat_emac_tx_stat_dot3statsinternalmactransmiterrors", 6311 CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors, 6312 0, "Internal MAC transmit errors"); 6313 6314 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6315 "stat_Dot3StatsCarrierSenseErrors", 6316 CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors, 6317 0, "Carrier sense errors"); 6318 6319 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6320 "stat_Dot3StatsFCSErrors", 6321 CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors, 6322 0, "Frame check sequence errors"); 6323 6324 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6325 "stat_Dot3StatsAlignmentErrors", 6326 CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors, 6327 0, "Alignment errors"); 6328 6329 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6330 "stat_Dot3StatsSingleCollisionFrames", 6331 CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames, 6332 0, "Single Collision Frames"); 6333 6334 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6335 "stat_Dot3StatsMultipleCollisionFrames", 6336 CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames, 6337 0, "Multiple Collision Frames"); 6338 6339 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6340 "stat_Dot3StatsDeferredTransmissions", 6341 CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions, 6342 0, "Deferred Transmissions"); 6343 6344 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6345 "stat_Dot3StatsExcessiveCollisions", 6346 CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions, 6347 0, "Excessive Collisions"); 6348 6349 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6350 "stat_Dot3StatsLateCollisions", 6351 CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions, 6352 0, "Late Collisions"); 6353 6354 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6355 "stat_EtherStatsCollisions", 6356 CTLFLAG_RD, &sc->stat_EtherStatsCollisions, 6357 0, "Collisions"); 6358 6359 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6360 "stat_EtherStatsFragments", 6361 CTLFLAG_RD, &sc->stat_EtherStatsFragments, 6362 0, "Fragments"); 6363 6364 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6365 "stat_EtherStatsJabbers", 6366 CTLFLAG_RD, &sc->stat_EtherStatsJabbers, 6367 0, "Jabbers"); 6368 6369 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6370 "stat_EtherStatsUndersizePkts", 6371 CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts, 6372 0, "Undersize packets"); 6373 6374 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6375 "stat_EtherStatsOverrsizePkts", 6376 CTLFLAG_RD, &sc->stat_EtherStatsOverrsizePkts, 6377 0, "stat_EtherStatsOverrsizePkts"); 6378 6379 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6380 "stat_EtherStatsPktsRx64Octets", 6381 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets, 6382 0, "Bytes received in 64 byte packets"); 6383 6384 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6385 "stat_EtherStatsPktsRx65Octetsto127Octets", 6386 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets, 6387 0, "Bytes received in 65 to 127 byte packets"); 6388 6389 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6390 "stat_EtherStatsPktsRx128Octetsto255Octets", 6391 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets, 6392 0, "Bytes received in 128 to 255 byte packets"); 6393 6394 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6395 "stat_EtherStatsPktsRx256Octetsto511Octets", 6396 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets, 6397 0, "Bytes received in 256 to 511 byte packets"); 6398 6399 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6400 "stat_EtherStatsPktsRx512Octetsto1023Octets", 6401 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets, 6402 0, "Bytes received in 512 to 1023 byte packets"); 6403 6404 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6405 "stat_EtherStatsPktsRx1024Octetsto1522Octets", 6406 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets, 6407 0, "Bytes received in 1024 t0 1522 byte packets"); 6408 6409 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6410 "stat_EtherStatsPktsRx1523Octetsto9022Octets", 6411 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets, 6412 0, "Bytes received in 1523 to 9022 byte packets"); 6413 6414 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6415 "stat_EtherStatsPktsTx64Octets", 6416 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets, 6417 0, "Bytes sent in 64 byte packets"); 6418 6419 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6420 "stat_EtherStatsPktsTx65Octetsto127Octets", 6421 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets, 6422 0, "Bytes sent in 65 to 127 byte packets"); 6423 6424 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6425 "stat_EtherStatsPktsTx128Octetsto255Octets", 6426 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets, 6427 0, "Bytes sent in 128 to 255 byte packets"); 6428 6429 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6430 "stat_EtherStatsPktsTx256Octetsto511Octets", 6431 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets, 6432 0, "Bytes sent in 256 to 511 byte packets"); 6433 6434 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6435 "stat_EtherStatsPktsTx512Octetsto1023Octets", 6436 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets, 6437 0, "Bytes sent in 512 to 1023 byte packets"); 6438 6439 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6440 "stat_EtherStatsPktsTx1024Octetsto1522Octets", 6441 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets, 6442 0, "Bytes sent in 1024 to 1522 byte packets"); 6443 6444 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6445 "stat_EtherStatsPktsTx1523Octetsto9022Octets", 6446 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets, 6447 0, "Bytes sent in 1523 to 9022 byte packets"); 6448 6449 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6450 "stat_XonPauseFramesReceived", 6451 CTLFLAG_RD, &sc->stat_XonPauseFramesReceived, 6452 0, "XON pause frames receved"); 6453 6454 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6455 "stat_XoffPauseFramesReceived", 6456 CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived, 6457 0, "XOFF pause frames received"); 6458 6459 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6460 "stat_OutXonSent", 6461 CTLFLAG_RD, &sc->stat_OutXonSent, 6462 0, "XON pause frames sent"); 6463 6464 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6465 "stat_OutXoffSent", 6466 CTLFLAG_RD, &sc->stat_OutXoffSent, 6467 0, "XOFF pause frames sent"); 6468 6469 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6470 "stat_FlowControlDone", 6471 CTLFLAG_RD, &sc->stat_FlowControlDone, 6472 0, "Flow control done"); 6473 6474 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6475 "stat_MacControlFramesReceived", 6476 CTLFLAG_RD, &sc->stat_MacControlFramesReceived, 6477 0, "MAC control frames received"); 6478 6479 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6480 "stat_XoffStateEntered", 6481 CTLFLAG_RD, &sc->stat_XoffStateEntered, 6482 0, "XOFF state entered"); 6483 6484 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6485 "stat_IfInFramesL2FilterDiscards", 6486 CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards, 6487 0, "Received L2 packets discarded"); 6488 6489 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6490 "stat_IfInRuleCheckerDiscards", 6491 CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards, 6492 0, "Received packets discarded by rule"); 6493 6494 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6495 "stat_IfInFTQDiscards", 6496 CTLFLAG_RD, &sc->stat_IfInFTQDiscards, 6497 0, "Received packet FTQ discards"); 6498 6499 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6500 "stat_IfInMBUFDiscards", 6501 CTLFLAG_RD, &sc->stat_IfInMBUFDiscards, 6502 0, "Received packets discarded due to lack of controller buffer memory"); 6503 6504 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6505 "stat_IfInRuleCheckerP4Hit", 6506 CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit, 6507 0, "Received packets rule checker hits"); 6508 6509 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6510 "stat_CatchupInRuleCheckerDiscards", 6511 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards, 6512 0, "Received packets discarded in Catchup path"); 6513 6514 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6515 "stat_CatchupInFTQDiscards", 6516 CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards, 6517 0, "Received packets discarded in FTQ in Catchup path"); 6518 6519 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6520 "stat_CatchupInMBUFDiscards", 6521 CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards, 6522 0, "Received packets discarded in controller buffer memory in Catchup path"); 6523 6524 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6525 "stat_CatchupInRuleCheckerP4Hit", 6526 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit, 6527 0, "Received packets rule checker hits in Catchup path"); 6528 6529 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6530 "com_no_buffers", 6531 CTLFLAG_RD, &sc->com_no_buffers, 6532 0, "Valid packets received but no RX buffers available"); 6533 6534 #ifdef BCE_DEBUG 6535 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 6536 "driver_state", CTLTYPE_INT | CTLFLAG_RW, 6537 (void *)sc, 0, 6538 bce_sysctl_driver_state, "I", "Drive state information"); 6539 6540 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 6541 "hw_state", CTLTYPE_INT | CTLFLAG_RW, 6542 (void *)sc, 0, 6543 bce_sysctl_hw_state, "I", "Hardware state information"); 6544 6545 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 6546 "dump_rx_chain", CTLTYPE_INT | CTLFLAG_RW, 6547 (void *)sc, 0, 6548 bce_sysctl_dump_rx_chain, "I", "Dump rx_bd chain"); 6549 6550 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 6551 "dump_tx_chain", CTLTYPE_INT | CTLFLAG_RW, 6552 (void *)sc, 0, 6553 bce_sysctl_dump_tx_chain, "I", "Dump tx_bd chain"); 6554 6555 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 6556 "breakpoint", CTLTYPE_INT | CTLFLAG_RW, 6557 (void *)sc, 0, 6558 bce_sysctl_breakpoint, "I", "Driver breakpoint"); 6559 6560 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 6561 "reg_read", CTLTYPE_INT | CTLFLAG_RW, 6562 (void *)sc, 0, 6563 bce_sysctl_reg_read, "I", "Register read"); 6564 6565 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 6566 "phy_read", CTLTYPE_INT | CTLFLAG_RW, 6567 (void *)sc, 0, 6568 bce_sysctl_phy_read, "I", "PHY register read"); 6569 6570 #endif 6571 6572 } 6573 6574 6575 /****************************************************************************/ 6576 /* BCE Debug Routines */ 6577 /****************************************************************************/ 6578 #ifdef BCE_DEBUG 6579 6580 /****************************************************************************/ 6581 /* Freezes the controller to allow for a cohesive state dump. */ 6582 /* */ 6583 /* Returns: */ 6584 /* Nothing. */ 6585 /****************************************************************************/ 6586 static void 6587 bce_freeze_controller(struct bce_softc *sc) 6588 { 6589 uint32_t val; 6590 6591 val = REG_RD(sc, BCE_MISC_COMMAND); 6592 val |= BCE_MISC_COMMAND_DISABLE_ALL; 6593 REG_WR(sc, BCE_MISC_COMMAND, val); 6594 } 6595 6596 6597 /****************************************************************************/ 6598 /* Unfreezes the controller after a freeze operation. This may not always */ 6599 /* work and the controller will require a reset! */ 6600 /* */ 6601 /* Returns: */ 6602 /* Nothing. */ 6603 /****************************************************************************/ 6604 static void 6605 bce_unfreeze_controller(struct bce_softc *sc) 6606 { 6607 uint32_t val; 6608 6609 val = REG_RD(sc, BCE_MISC_COMMAND); 6610 val |= BCE_MISC_COMMAND_ENABLE_ALL; 6611 REG_WR(sc, BCE_MISC_COMMAND, val); 6612 } 6613 6614 6615 /****************************************************************************/ 6616 /* Prints out information about an mbuf. */ 6617 /* */ 6618 /* Returns: */ 6619 /* Nothing. */ 6620 /****************************************************************************/ 6621 static void 6622 bce_dump_mbuf(struct bce_softc *sc, struct mbuf *m) 6623 { 6624 struct ifnet *ifp = &sc->arpcom.ac_if; 6625 uint32_t val_hi, val_lo; 6626 struct mbuf *mp = m; 6627 6628 if (m == NULL) { 6629 /* Index out of range. */ 6630 if_printf(ifp, "mbuf: null pointer\n"); 6631 return; 6632 } 6633 6634 while (mp) { 6635 val_hi = BCE_ADDR_HI(mp); 6636 val_lo = BCE_ADDR_LO(mp); 6637 if_printf(ifp, "mbuf: vaddr = 0x%08X:%08X, m_len = %d, " 6638 "m_flags = ( ", val_hi, val_lo, mp->m_len); 6639 6640 if (mp->m_flags & M_EXT) 6641 kprintf("M_EXT "); 6642 if (mp->m_flags & M_PKTHDR) 6643 kprintf("M_PKTHDR "); 6644 if (mp->m_flags & M_EOR) 6645 kprintf("M_EOR "); 6646 #ifdef M_RDONLY 6647 if (mp->m_flags & M_RDONLY) 6648 kprintf("M_RDONLY "); 6649 #endif 6650 6651 val_hi = BCE_ADDR_HI(mp->m_data); 6652 val_lo = BCE_ADDR_LO(mp->m_data); 6653 kprintf(") m_data = 0x%08X:%08X\n", val_hi, val_lo); 6654 6655 if (mp->m_flags & M_PKTHDR) { 6656 if_printf(ifp, "- m_pkthdr: flags = ( "); 6657 if (mp->m_flags & M_BCAST) 6658 kprintf("M_BCAST "); 6659 if (mp->m_flags & M_MCAST) 6660 kprintf("M_MCAST "); 6661 if (mp->m_flags & M_FRAG) 6662 kprintf("M_FRAG "); 6663 if (mp->m_flags & M_FIRSTFRAG) 6664 kprintf("M_FIRSTFRAG "); 6665 if (mp->m_flags & M_LASTFRAG) 6666 kprintf("M_LASTFRAG "); 6667 #ifdef M_VLANTAG 6668 if (mp->m_flags & M_VLANTAG) 6669 kprintf("M_VLANTAG "); 6670 #endif 6671 #ifdef M_PROMISC 6672 if (mp->m_flags & M_PROMISC) 6673 kprintf("M_PROMISC "); 6674 #endif 6675 kprintf(") csum_flags = ( "); 6676 if (mp->m_pkthdr.csum_flags & CSUM_IP) 6677 kprintf("CSUM_IP "); 6678 if (mp->m_pkthdr.csum_flags & CSUM_TCP) 6679 kprintf("CSUM_TCP "); 6680 if (mp->m_pkthdr.csum_flags & CSUM_UDP) 6681 kprintf("CSUM_UDP "); 6682 if (mp->m_pkthdr.csum_flags & CSUM_IP_FRAGS) 6683 kprintf("CSUM_IP_FRAGS "); 6684 if (mp->m_pkthdr.csum_flags & CSUM_FRAGMENT) 6685 kprintf("CSUM_FRAGMENT "); 6686 #ifdef CSUM_TSO 6687 if (mp->m_pkthdr.csum_flags & CSUM_TSO) 6688 kprintf("CSUM_TSO "); 6689 #endif 6690 if (mp->m_pkthdr.csum_flags & CSUM_IP_CHECKED) 6691 kprintf("CSUM_IP_CHECKED "); 6692 if (mp->m_pkthdr.csum_flags & CSUM_IP_VALID) 6693 kprintf("CSUM_IP_VALID "); 6694 if (mp->m_pkthdr.csum_flags & CSUM_DATA_VALID) 6695 kprintf("CSUM_DATA_VALID "); 6696 kprintf(")\n"); 6697 } 6698 6699 if (mp->m_flags & M_EXT) { 6700 val_hi = BCE_ADDR_HI(mp->m_ext.ext_buf); 6701 val_lo = BCE_ADDR_LO(mp->m_ext.ext_buf); 6702 if_printf(ifp, "- m_ext: vaddr = 0x%08X:%08X, " 6703 "ext_size = %d\n", 6704 val_hi, val_lo, mp->m_ext.ext_size); 6705 } 6706 mp = mp->m_next; 6707 } 6708 } 6709 6710 6711 /****************************************************************************/ 6712 /* Prints out the mbufs in the RX mbuf chain. */ 6713 /* */ 6714 /* Returns: */ 6715 /* Nothing. */ 6716 /****************************************************************************/ 6717 static void 6718 bce_dump_rx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count) 6719 { 6720 struct ifnet *ifp = &sc->arpcom.ac_if; 6721 int i; 6722 6723 if_printf(ifp, 6724 "----------------------------" 6725 " rx mbuf data " 6726 "----------------------------\n"); 6727 6728 for (i = 0; i < count; i++) { 6729 if_printf(ifp, "rxmbuf[0x%04X]\n", chain_prod); 6730 bce_dump_mbuf(sc, sc->rx_mbuf_ptr[chain_prod]); 6731 chain_prod = RX_CHAIN_IDX(sc, NEXT_RX_BD(chain_prod)); 6732 } 6733 6734 if_printf(ifp, 6735 "----------------------------" 6736 "----------------" 6737 "----------------------------\n"); 6738 } 6739 6740 6741 /****************************************************************************/ 6742 /* Prints out a tx_bd structure. */ 6743 /* */ 6744 /* Returns: */ 6745 /* Nothing. */ 6746 /****************************************************************************/ 6747 static void 6748 bce_dump_txbd(struct bce_softc *sc, int idx, struct tx_bd *txbd) 6749 { 6750 struct ifnet *ifp = &sc->arpcom.ac_if; 6751 6752 if (idx > MAX_TX_BD(sc)) { 6753 /* Index out of range. */ 6754 if_printf(ifp, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx); 6755 } else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) { 6756 /* TX Chain page pointer. */ 6757 if_printf(ifp, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, " 6758 "chain page pointer\n", 6759 idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo); 6760 } else { 6761 /* Normal tx_bd entry. */ 6762 if_printf(ifp, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, " 6763 "nbytes = 0x%08X, " 6764 "vlan tag= 0x%04X, flags = 0x%04X (", 6765 idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo, 6766 txbd->tx_bd_mss_nbytes, 6767 txbd->tx_bd_vlan_tag, txbd->tx_bd_flags); 6768 6769 if (txbd->tx_bd_flags & TX_BD_FLAGS_CONN_FAULT) 6770 kprintf(" CONN_FAULT"); 6771 6772 if (txbd->tx_bd_flags & TX_BD_FLAGS_TCP_UDP_CKSUM) 6773 kprintf(" TCP_UDP_CKSUM"); 6774 6775 if (txbd->tx_bd_flags & TX_BD_FLAGS_IP_CKSUM) 6776 kprintf(" IP_CKSUM"); 6777 6778 if (txbd->tx_bd_flags & TX_BD_FLAGS_VLAN_TAG) 6779 kprintf(" VLAN"); 6780 6781 if (txbd->tx_bd_flags & TX_BD_FLAGS_COAL_NOW) 6782 kprintf(" COAL_NOW"); 6783 6784 if (txbd->tx_bd_flags & TX_BD_FLAGS_DONT_GEN_CRC) 6785 kprintf(" DONT_GEN_CRC"); 6786 6787 if (txbd->tx_bd_flags & TX_BD_FLAGS_START) 6788 kprintf(" START"); 6789 6790 if (txbd->tx_bd_flags & TX_BD_FLAGS_END) 6791 kprintf(" END"); 6792 6793 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_LSO) 6794 kprintf(" LSO"); 6795 6796 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_OPTION_WORD) 6797 kprintf(" OPTION_WORD"); 6798 6799 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_FLAGS) 6800 kprintf(" FLAGS"); 6801 6802 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_SNAP) 6803 kprintf(" SNAP"); 6804 6805 kprintf(" )\n"); 6806 } 6807 } 6808 6809 6810 /****************************************************************************/ 6811 /* Prints out a rx_bd structure. */ 6812 /* */ 6813 /* Returns: */ 6814 /* Nothing. */ 6815 /****************************************************************************/ 6816 static void 6817 bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd) 6818 { 6819 struct ifnet *ifp = &sc->arpcom.ac_if; 6820 6821 if (idx > MAX_RX_BD(sc)) { 6822 /* Index out of range. */ 6823 if_printf(ifp, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx); 6824 } else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) { 6825 /* TX Chain page pointer. */ 6826 if_printf(ifp, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, " 6827 "chain page pointer\n", 6828 idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo); 6829 } else { 6830 /* Normal tx_bd entry. */ 6831 if_printf(ifp, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, " 6832 "nbytes = 0x%08X, flags = 0x%08X\n", 6833 idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo, 6834 rxbd->rx_bd_len, rxbd->rx_bd_flags); 6835 } 6836 } 6837 6838 6839 /****************************************************************************/ 6840 /* Prints out a l2_fhdr structure. */ 6841 /* */ 6842 /* Returns: */ 6843 /* Nothing. */ 6844 /****************************************************************************/ 6845 static void 6846 bce_dump_l2fhdr(struct bce_softc *sc, int idx, struct l2_fhdr *l2fhdr) 6847 { 6848 if_printf(&sc->arpcom.ac_if, "l2_fhdr[0x%04X]: status = 0x%08X, " 6849 "pkt_len = 0x%04X, vlan = 0x%04x, " 6850 "ip_xsum = 0x%04X, tcp_udp_xsum = 0x%04X\n", 6851 idx, l2fhdr->l2_fhdr_status, 6852 l2fhdr->l2_fhdr_pkt_len, l2fhdr->l2_fhdr_vlan_tag, 6853 l2fhdr->l2_fhdr_ip_xsum, l2fhdr->l2_fhdr_tcp_udp_xsum); 6854 } 6855 6856 6857 /****************************************************************************/ 6858 /* Prints out the tx chain. */ 6859 /* */ 6860 /* Returns: */ 6861 /* Nothing. */ 6862 /****************************************************************************/ 6863 static void 6864 bce_dump_tx_chain(struct bce_softc *sc, int tx_prod, int count) 6865 { 6866 struct ifnet *ifp = &sc->arpcom.ac_if; 6867 int i; 6868 6869 /* First some info about the tx_bd chain structure. */ 6870 if_printf(ifp, 6871 "----------------------------" 6872 " tx_bd chain " 6873 "----------------------------\n"); 6874 6875 if_printf(ifp, "page size = 0x%08X, " 6876 "tx chain pages = 0x%08X\n", 6877 (uint32_t)BCM_PAGE_SIZE, (uint32_t)sc->tx_pages); 6878 6879 if_printf(ifp, "tx_bd per page = 0x%08X, " 6880 "usable tx_bd per page = 0x%08X\n", 6881 (uint32_t)TOTAL_TX_BD_PER_PAGE, 6882 (uint32_t)USABLE_TX_BD_PER_PAGE); 6883 6884 if_printf(ifp, "total tx_bd = 0x%08X\n", (uint32_t)TOTAL_TX_BD(sc)); 6885 6886 if_printf(ifp, 6887 "----------------------------" 6888 " tx_bd data " 6889 "----------------------------\n"); 6890 6891 /* Now print out the tx_bd's themselves. */ 6892 for (i = 0; i < count; i++) { 6893 struct tx_bd *txbd; 6894 6895 txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)]; 6896 bce_dump_txbd(sc, tx_prod, txbd); 6897 tx_prod = TX_CHAIN_IDX(sc, NEXT_TX_BD(tx_prod)); 6898 } 6899 6900 if_printf(ifp, 6901 "----------------------------" 6902 "----------------" 6903 "----------------------------\n"); 6904 } 6905 6906 6907 /****************************************************************************/ 6908 /* Prints out the rx chain. */ 6909 /* */ 6910 /* Returns: */ 6911 /* Nothing. */ 6912 /****************************************************************************/ 6913 static void 6914 bce_dump_rx_chain(struct bce_softc *sc, int rx_prod, int count) 6915 { 6916 struct ifnet *ifp = &sc->arpcom.ac_if; 6917 int i; 6918 6919 /* First some info about the tx_bd chain structure. */ 6920 if_printf(ifp, 6921 "----------------------------" 6922 " rx_bd chain " 6923 "----------------------------\n"); 6924 6925 if_printf(ifp, "page size = 0x%08X, " 6926 "rx chain pages = 0x%08X\n", 6927 (uint32_t)BCM_PAGE_SIZE, (uint32_t)sc->rx_pages); 6928 6929 if_printf(ifp, "rx_bd per page = 0x%08X, " 6930 "usable rx_bd per page = 0x%08X\n", 6931 (uint32_t)TOTAL_RX_BD_PER_PAGE, 6932 (uint32_t)USABLE_RX_BD_PER_PAGE); 6933 6934 if_printf(ifp, "total rx_bd = 0x%08X\n", (uint32_t)TOTAL_RX_BD(sc)); 6935 6936 if_printf(ifp, 6937 "----------------------------" 6938 " rx_bd data " 6939 "----------------------------\n"); 6940 6941 /* Now print out the rx_bd's themselves. */ 6942 for (i = 0; i < count; i++) { 6943 struct rx_bd *rxbd; 6944 6945 rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)]; 6946 bce_dump_rxbd(sc, rx_prod, rxbd); 6947 rx_prod = RX_CHAIN_IDX(sc, NEXT_RX_BD(rx_prod)); 6948 } 6949 6950 if_printf(ifp, 6951 "----------------------------" 6952 "----------------" 6953 "----------------------------\n"); 6954 } 6955 6956 6957 /****************************************************************************/ 6958 /* Prints out the status block from host memory. */ 6959 /* */ 6960 /* Returns: */ 6961 /* Nothing. */ 6962 /****************************************************************************/ 6963 static void 6964 bce_dump_status_block(struct bce_softc *sc) 6965 { 6966 struct status_block *sblk = sc->status_block; 6967 struct ifnet *ifp = &sc->arpcom.ac_if; 6968 6969 if_printf(ifp, 6970 "----------------------------" 6971 " Status Block " 6972 "----------------------------\n"); 6973 6974 if_printf(ifp, " 0x%08X - attn_bits\n", sblk->status_attn_bits); 6975 6976 if_printf(ifp, " 0x%08X - attn_bits_ack\n", 6977 sblk->status_attn_bits_ack); 6978 6979 if_printf(ifp, "0x%04X(0x%04X) - rx_cons0\n", 6980 sblk->status_rx_quick_consumer_index0, 6981 (uint16_t)RX_CHAIN_IDX(sc, sblk->status_rx_quick_consumer_index0)); 6982 6983 if_printf(ifp, "0x%04X(0x%04X) - tx_cons0\n", 6984 sblk->status_tx_quick_consumer_index0, 6985 (uint16_t)TX_CHAIN_IDX(sc, sblk->status_tx_quick_consumer_index0)); 6986 6987 if_printf(ifp, " 0x%04X - status_idx\n", sblk->status_idx); 6988 6989 /* Theses indices are not used for normal L2 drivers. */ 6990 if (sblk->status_rx_quick_consumer_index1) { 6991 if_printf(ifp, "0x%04X(0x%04X) - rx_cons1\n", 6992 sblk->status_rx_quick_consumer_index1, 6993 (uint16_t)RX_CHAIN_IDX(sc, 6994 sblk->status_rx_quick_consumer_index1)); 6995 } 6996 6997 if (sblk->status_tx_quick_consumer_index1) { 6998 if_printf(ifp, "0x%04X(0x%04X) - tx_cons1\n", 6999 sblk->status_tx_quick_consumer_index1, 7000 (uint16_t)TX_CHAIN_IDX(sc, 7001 sblk->status_tx_quick_consumer_index1)); 7002 } 7003 7004 if (sblk->status_rx_quick_consumer_index2) { 7005 if_printf(ifp, "0x%04X(0x%04X)- rx_cons2\n", 7006 sblk->status_rx_quick_consumer_index2, 7007 (uint16_t)RX_CHAIN_IDX(sc, 7008 sblk->status_rx_quick_consumer_index2)); 7009 } 7010 7011 if (sblk->status_tx_quick_consumer_index2) { 7012 if_printf(ifp, "0x%04X(0x%04X) - tx_cons2\n", 7013 sblk->status_tx_quick_consumer_index2, 7014 (uint16_t)TX_CHAIN_IDX(sc, 7015 sblk->status_tx_quick_consumer_index2)); 7016 } 7017 7018 if (sblk->status_rx_quick_consumer_index3) { 7019 if_printf(ifp, "0x%04X(0x%04X) - rx_cons3\n", 7020 sblk->status_rx_quick_consumer_index3, 7021 (uint16_t)RX_CHAIN_IDX(sc, 7022 sblk->status_rx_quick_consumer_index3)); 7023 } 7024 7025 if (sblk->status_tx_quick_consumer_index3) { 7026 if_printf(ifp, "0x%04X(0x%04X) - tx_cons3\n", 7027 sblk->status_tx_quick_consumer_index3, 7028 (uint16_t)TX_CHAIN_IDX(sc, 7029 sblk->status_tx_quick_consumer_index3)); 7030 } 7031 7032 if (sblk->status_rx_quick_consumer_index4 || 7033 sblk->status_rx_quick_consumer_index5) { 7034 if_printf(ifp, "rx_cons4 = 0x%08X, rx_cons5 = 0x%08X\n", 7035 sblk->status_rx_quick_consumer_index4, 7036 sblk->status_rx_quick_consumer_index5); 7037 } 7038 7039 if (sblk->status_rx_quick_consumer_index6 || 7040 sblk->status_rx_quick_consumer_index7) { 7041 if_printf(ifp, "rx_cons6 = 0x%08X, rx_cons7 = 0x%08X\n", 7042 sblk->status_rx_quick_consumer_index6, 7043 sblk->status_rx_quick_consumer_index7); 7044 } 7045 7046 if (sblk->status_rx_quick_consumer_index8 || 7047 sblk->status_rx_quick_consumer_index9) { 7048 if_printf(ifp, "rx_cons8 = 0x%08X, rx_cons9 = 0x%08X\n", 7049 sblk->status_rx_quick_consumer_index8, 7050 sblk->status_rx_quick_consumer_index9); 7051 } 7052 7053 if (sblk->status_rx_quick_consumer_index10 || 7054 sblk->status_rx_quick_consumer_index11) { 7055 if_printf(ifp, "rx_cons10 = 0x%08X, rx_cons11 = 0x%08X\n", 7056 sblk->status_rx_quick_consumer_index10, 7057 sblk->status_rx_quick_consumer_index11); 7058 } 7059 7060 if (sblk->status_rx_quick_consumer_index12 || 7061 sblk->status_rx_quick_consumer_index13) { 7062 if_printf(ifp, "rx_cons12 = 0x%08X, rx_cons13 = 0x%08X\n", 7063 sblk->status_rx_quick_consumer_index12, 7064 sblk->status_rx_quick_consumer_index13); 7065 } 7066 7067 if (sblk->status_rx_quick_consumer_index14 || 7068 sblk->status_rx_quick_consumer_index15) { 7069 if_printf(ifp, "rx_cons14 = 0x%08X, rx_cons15 = 0x%08X\n", 7070 sblk->status_rx_quick_consumer_index14, 7071 sblk->status_rx_quick_consumer_index15); 7072 } 7073 7074 if (sblk->status_completion_producer_index || 7075 sblk->status_cmd_consumer_index) { 7076 if_printf(ifp, "com_prod = 0x%08X, cmd_cons = 0x%08X\n", 7077 sblk->status_completion_producer_index, 7078 sblk->status_cmd_consumer_index); 7079 } 7080 7081 if_printf(ifp, 7082 "----------------------------" 7083 "----------------" 7084 "----------------------------\n"); 7085 } 7086 7087 7088 /****************************************************************************/ 7089 /* Prints out the statistics block. */ 7090 /* */ 7091 /* Returns: */ 7092 /* Nothing. */ 7093 /****************************************************************************/ 7094 static void 7095 bce_dump_stats_block(struct bce_softc *sc) 7096 { 7097 struct statistics_block *sblk = sc->stats_block; 7098 struct ifnet *ifp = &sc->arpcom.ac_if; 7099 7100 if_printf(ifp, 7101 "---------------" 7102 " Stats Block (All Stats Not Shown Are 0) " 7103 "---------------\n"); 7104 7105 if (sblk->stat_IfHCInOctets_hi || sblk->stat_IfHCInOctets_lo) { 7106 if_printf(ifp, "0x%08X:%08X : IfHcInOctets\n", 7107 sblk->stat_IfHCInOctets_hi, 7108 sblk->stat_IfHCInOctets_lo); 7109 } 7110 7111 if (sblk->stat_IfHCInBadOctets_hi || sblk->stat_IfHCInBadOctets_lo) { 7112 if_printf(ifp, "0x%08X:%08X : IfHcInBadOctets\n", 7113 sblk->stat_IfHCInBadOctets_hi, 7114 sblk->stat_IfHCInBadOctets_lo); 7115 } 7116 7117 if (sblk->stat_IfHCOutOctets_hi || sblk->stat_IfHCOutOctets_lo) { 7118 if_printf(ifp, "0x%08X:%08X : IfHcOutOctets\n", 7119 sblk->stat_IfHCOutOctets_hi, 7120 sblk->stat_IfHCOutOctets_lo); 7121 } 7122 7123 if (sblk->stat_IfHCOutBadOctets_hi || sblk->stat_IfHCOutBadOctets_lo) { 7124 if_printf(ifp, "0x%08X:%08X : IfHcOutBadOctets\n", 7125 sblk->stat_IfHCOutBadOctets_hi, 7126 sblk->stat_IfHCOutBadOctets_lo); 7127 } 7128 7129 if (sblk->stat_IfHCInUcastPkts_hi || sblk->stat_IfHCInUcastPkts_lo) { 7130 if_printf(ifp, "0x%08X:%08X : IfHcInUcastPkts\n", 7131 sblk->stat_IfHCInUcastPkts_hi, 7132 sblk->stat_IfHCInUcastPkts_lo); 7133 } 7134 7135 if (sblk->stat_IfHCInBroadcastPkts_hi || 7136 sblk->stat_IfHCInBroadcastPkts_lo) { 7137 if_printf(ifp, "0x%08X:%08X : IfHcInBroadcastPkts\n", 7138 sblk->stat_IfHCInBroadcastPkts_hi, 7139 sblk->stat_IfHCInBroadcastPkts_lo); 7140 } 7141 7142 if (sblk->stat_IfHCInMulticastPkts_hi || 7143 sblk->stat_IfHCInMulticastPkts_lo) { 7144 if_printf(ifp, "0x%08X:%08X : IfHcInMulticastPkts\n", 7145 sblk->stat_IfHCInMulticastPkts_hi, 7146 sblk->stat_IfHCInMulticastPkts_lo); 7147 } 7148 7149 if (sblk->stat_IfHCOutUcastPkts_hi || sblk->stat_IfHCOutUcastPkts_lo) { 7150 if_printf(ifp, "0x%08X:%08X : IfHcOutUcastPkts\n", 7151 sblk->stat_IfHCOutUcastPkts_hi, 7152 sblk->stat_IfHCOutUcastPkts_lo); 7153 } 7154 7155 if (sblk->stat_IfHCOutBroadcastPkts_hi || 7156 sblk->stat_IfHCOutBroadcastPkts_lo) { 7157 if_printf(ifp, "0x%08X:%08X : IfHcOutBroadcastPkts\n", 7158 sblk->stat_IfHCOutBroadcastPkts_hi, 7159 sblk->stat_IfHCOutBroadcastPkts_lo); 7160 } 7161 7162 if (sblk->stat_IfHCOutMulticastPkts_hi || 7163 sblk->stat_IfHCOutMulticastPkts_lo) { 7164 if_printf(ifp, "0x%08X:%08X : IfHcOutMulticastPkts\n", 7165 sblk->stat_IfHCOutMulticastPkts_hi, 7166 sblk->stat_IfHCOutMulticastPkts_lo); 7167 } 7168 7169 if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors) { 7170 if_printf(ifp, " 0x%08X : " 7171 "emac_tx_stat_dot3statsinternalmactransmiterrors\n", 7172 sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors); 7173 } 7174 7175 if (sblk->stat_Dot3StatsCarrierSenseErrors) { 7176 if_printf(ifp, " 0x%08X : " 7177 "Dot3StatsCarrierSenseErrors\n", 7178 sblk->stat_Dot3StatsCarrierSenseErrors); 7179 } 7180 7181 if (sblk->stat_Dot3StatsFCSErrors) { 7182 if_printf(ifp, " 0x%08X : Dot3StatsFCSErrors\n", 7183 sblk->stat_Dot3StatsFCSErrors); 7184 } 7185 7186 if (sblk->stat_Dot3StatsAlignmentErrors) { 7187 if_printf(ifp, " 0x%08X : Dot3StatsAlignmentErrors\n", 7188 sblk->stat_Dot3StatsAlignmentErrors); 7189 } 7190 7191 if (sblk->stat_Dot3StatsSingleCollisionFrames) { 7192 if_printf(ifp, " 0x%08X : " 7193 "Dot3StatsSingleCollisionFrames\n", 7194 sblk->stat_Dot3StatsSingleCollisionFrames); 7195 } 7196 7197 if (sblk->stat_Dot3StatsMultipleCollisionFrames) { 7198 if_printf(ifp, " 0x%08X : " 7199 "Dot3StatsMultipleCollisionFrames\n", 7200 sblk->stat_Dot3StatsMultipleCollisionFrames); 7201 } 7202 7203 if (sblk->stat_Dot3StatsDeferredTransmissions) { 7204 if_printf(ifp, " 0x%08X : " 7205 "Dot3StatsDeferredTransmissions\n", 7206 sblk->stat_Dot3StatsDeferredTransmissions); 7207 } 7208 7209 if (sblk->stat_Dot3StatsExcessiveCollisions) { 7210 if_printf(ifp, " 0x%08X : " 7211 "Dot3StatsExcessiveCollisions\n", 7212 sblk->stat_Dot3StatsExcessiveCollisions); 7213 } 7214 7215 if (sblk->stat_Dot3StatsLateCollisions) { 7216 if_printf(ifp, " 0x%08X : Dot3StatsLateCollisions\n", 7217 sblk->stat_Dot3StatsLateCollisions); 7218 } 7219 7220 if (sblk->stat_EtherStatsCollisions) { 7221 if_printf(ifp, " 0x%08X : EtherStatsCollisions\n", 7222 sblk->stat_EtherStatsCollisions); 7223 } 7224 7225 if (sblk->stat_EtherStatsFragments) { 7226 if_printf(ifp, " 0x%08X : EtherStatsFragments\n", 7227 sblk->stat_EtherStatsFragments); 7228 } 7229 7230 if (sblk->stat_EtherStatsJabbers) { 7231 if_printf(ifp, " 0x%08X : EtherStatsJabbers\n", 7232 sblk->stat_EtherStatsJabbers); 7233 } 7234 7235 if (sblk->stat_EtherStatsUndersizePkts) { 7236 if_printf(ifp, " 0x%08X : EtherStatsUndersizePkts\n", 7237 sblk->stat_EtherStatsUndersizePkts); 7238 } 7239 7240 if (sblk->stat_EtherStatsOverrsizePkts) { 7241 if_printf(ifp, " 0x%08X : EtherStatsOverrsizePkts\n", 7242 sblk->stat_EtherStatsOverrsizePkts); 7243 } 7244 7245 if (sblk->stat_EtherStatsPktsRx64Octets) { 7246 if_printf(ifp, " 0x%08X : EtherStatsPktsRx64Octets\n", 7247 sblk->stat_EtherStatsPktsRx64Octets); 7248 } 7249 7250 if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets) { 7251 if_printf(ifp, " 0x%08X : " 7252 "EtherStatsPktsRx65Octetsto127Octets\n", 7253 sblk->stat_EtherStatsPktsRx65Octetsto127Octets); 7254 } 7255 7256 if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets) { 7257 if_printf(ifp, " 0x%08X : " 7258 "EtherStatsPktsRx128Octetsto255Octets\n", 7259 sblk->stat_EtherStatsPktsRx128Octetsto255Octets); 7260 } 7261 7262 if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets) { 7263 if_printf(ifp, " 0x%08X : " 7264 "EtherStatsPktsRx256Octetsto511Octets\n", 7265 sblk->stat_EtherStatsPktsRx256Octetsto511Octets); 7266 } 7267 7268 if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets) { 7269 if_printf(ifp, " 0x%08X : " 7270 "EtherStatsPktsRx512Octetsto1023Octets\n", 7271 sblk->stat_EtherStatsPktsRx512Octetsto1023Octets); 7272 } 7273 7274 if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets) { 7275 if_printf(ifp, " 0x%08X : " 7276 "EtherStatsPktsRx1024Octetsto1522Octets\n", 7277 sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets); 7278 } 7279 7280 if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets) { 7281 if_printf(ifp, " 0x%08X : " 7282 "EtherStatsPktsRx1523Octetsto9022Octets\n", 7283 sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets); 7284 } 7285 7286 if (sblk->stat_EtherStatsPktsTx64Octets) { 7287 if_printf(ifp, " 0x%08X : EtherStatsPktsTx64Octets\n", 7288 sblk->stat_EtherStatsPktsTx64Octets); 7289 } 7290 7291 if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets) { 7292 if_printf(ifp, " 0x%08X : " 7293 "EtherStatsPktsTx65Octetsto127Octets\n", 7294 sblk->stat_EtherStatsPktsTx65Octetsto127Octets); 7295 } 7296 7297 if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets) { 7298 if_printf(ifp, " 0x%08X : " 7299 "EtherStatsPktsTx128Octetsto255Octets\n", 7300 sblk->stat_EtherStatsPktsTx128Octetsto255Octets); 7301 } 7302 7303 if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets) { 7304 if_printf(ifp, " 0x%08X : " 7305 "EtherStatsPktsTx256Octetsto511Octets\n", 7306 sblk->stat_EtherStatsPktsTx256Octetsto511Octets); 7307 } 7308 7309 if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets) { 7310 if_printf(ifp, " 0x%08X : " 7311 "EtherStatsPktsTx512Octetsto1023Octets\n", 7312 sblk->stat_EtherStatsPktsTx512Octetsto1023Octets); 7313 } 7314 7315 if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets) { 7316 if_printf(ifp, " 0x%08X : " 7317 "EtherStatsPktsTx1024Octetsto1522Octets\n", 7318 sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets); 7319 } 7320 7321 if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets) { 7322 if_printf(ifp, " 0x%08X : " 7323 "EtherStatsPktsTx1523Octetsto9022Octets\n", 7324 sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets); 7325 } 7326 7327 if (sblk->stat_XonPauseFramesReceived) { 7328 if_printf(ifp, " 0x%08X : XonPauseFramesReceived\n", 7329 sblk->stat_XonPauseFramesReceived); 7330 } 7331 7332 if (sblk->stat_XoffPauseFramesReceived) { 7333 if_printf(ifp, " 0x%08X : XoffPauseFramesReceived\n", 7334 sblk->stat_XoffPauseFramesReceived); 7335 } 7336 7337 if (sblk->stat_OutXonSent) { 7338 if_printf(ifp, " 0x%08X : OutXoffSent\n", 7339 sblk->stat_OutXonSent); 7340 } 7341 7342 if (sblk->stat_OutXoffSent) { 7343 if_printf(ifp, " 0x%08X : OutXoffSent\n", 7344 sblk->stat_OutXoffSent); 7345 } 7346 7347 if (sblk->stat_FlowControlDone) { 7348 if_printf(ifp, " 0x%08X : FlowControlDone\n", 7349 sblk->stat_FlowControlDone); 7350 } 7351 7352 if (sblk->stat_MacControlFramesReceived) { 7353 if_printf(ifp, " 0x%08X : MacControlFramesReceived\n", 7354 sblk->stat_MacControlFramesReceived); 7355 } 7356 7357 if (sblk->stat_XoffStateEntered) { 7358 if_printf(ifp, " 0x%08X : XoffStateEntered\n", 7359 sblk->stat_XoffStateEntered); 7360 } 7361 7362 if (sblk->stat_IfInFramesL2FilterDiscards) { 7363 if_printf(ifp, " 0x%08X : IfInFramesL2FilterDiscards\n", sblk->stat_IfInFramesL2FilterDiscards); 7364 } 7365 7366 if (sblk->stat_IfInRuleCheckerDiscards) { 7367 if_printf(ifp, " 0x%08X : IfInRuleCheckerDiscards\n", 7368 sblk->stat_IfInRuleCheckerDiscards); 7369 } 7370 7371 if (sblk->stat_IfInFTQDiscards) { 7372 if_printf(ifp, " 0x%08X : IfInFTQDiscards\n", 7373 sblk->stat_IfInFTQDiscards); 7374 } 7375 7376 if (sblk->stat_IfInMBUFDiscards) { 7377 if_printf(ifp, " 0x%08X : IfInMBUFDiscards\n", 7378 sblk->stat_IfInMBUFDiscards); 7379 } 7380 7381 if (sblk->stat_IfInRuleCheckerP4Hit) { 7382 if_printf(ifp, " 0x%08X : IfInRuleCheckerP4Hit\n", 7383 sblk->stat_IfInRuleCheckerP4Hit); 7384 } 7385 7386 if (sblk->stat_CatchupInRuleCheckerDiscards) { 7387 if_printf(ifp, " 0x%08X : " 7388 "CatchupInRuleCheckerDiscards\n", 7389 sblk->stat_CatchupInRuleCheckerDiscards); 7390 } 7391 7392 if (sblk->stat_CatchupInFTQDiscards) { 7393 if_printf(ifp, " 0x%08X : CatchupInFTQDiscards\n", 7394 sblk->stat_CatchupInFTQDiscards); 7395 } 7396 7397 if (sblk->stat_CatchupInMBUFDiscards) { 7398 if_printf(ifp, " 0x%08X : CatchupInMBUFDiscards\n", 7399 sblk->stat_CatchupInMBUFDiscards); 7400 } 7401 7402 if (sblk->stat_CatchupInRuleCheckerP4Hit) { 7403 if_printf(ifp, " 0x%08X : CatchupInRuleCheckerP4Hit\n", 7404 sblk->stat_CatchupInRuleCheckerP4Hit); 7405 } 7406 7407 if_printf(ifp, 7408 "----------------------------" 7409 "----------------" 7410 "----------------------------\n"); 7411 } 7412 7413 7414 /****************************************************************************/ 7415 /* Prints out a summary of the driver state. */ 7416 /* */ 7417 /* Returns: */ 7418 /* Nothing. */ 7419 /****************************************************************************/ 7420 static void 7421 bce_dump_driver_state(struct bce_softc *sc) 7422 { 7423 struct ifnet *ifp = &sc->arpcom.ac_if; 7424 uint32_t val_hi, val_lo; 7425 7426 if_printf(ifp, 7427 "-----------------------------" 7428 " Driver State " 7429 "-----------------------------\n"); 7430 7431 val_hi = BCE_ADDR_HI(sc); 7432 val_lo = BCE_ADDR_LO(sc); 7433 if_printf(ifp, "0x%08X:%08X - (sc) driver softc structure " 7434 "virtual address\n", val_hi, val_lo); 7435 7436 val_hi = BCE_ADDR_HI(sc->status_block); 7437 val_lo = BCE_ADDR_LO(sc->status_block); 7438 if_printf(ifp, "0x%08X:%08X - (sc->status_block) status block " 7439 "virtual address\n", val_hi, val_lo); 7440 7441 val_hi = BCE_ADDR_HI(sc->stats_block); 7442 val_lo = BCE_ADDR_LO(sc->stats_block); 7443 if_printf(ifp, "0x%08X:%08X - (sc->stats_block) statistics block " 7444 "virtual address\n", val_hi, val_lo); 7445 7446 val_hi = BCE_ADDR_HI(sc->tx_bd_chain); 7447 val_lo = BCE_ADDR_LO(sc->tx_bd_chain); 7448 if_printf(ifp, "0x%08X:%08X - (sc->tx_bd_chain) tx_bd chain " 7449 "virtual address\n", val_hi, val_lo); 7450 7451 val_hi = BCE_ADDR_HI(sc->rx_bd_chain); 7452 val_lo = BCE_ADDR_LO(sc->rx_bd_chain); 7453 if_printf(ifp, "0x%08X:%08X - (sc->rx_bd_chain) rx_bd chain " 7454 "virtual address\n", val_hi, val_lo); 7455 7456 val_hi = BCE_ADDR_HI(sc->tx_mbuf_ptr); 7457 val_lo = BCE_ADDR_LO(sc->tx_mbuf_ptr); 7458 if_printf(ifp, "0x%08X:%08X - (sc->tx_mbuf_ptr) tx mbuf chain " 7459 "virtual address\n", val_hi, val_lo); 7460 7461 val_hi = BCE_ADDR_HI(sc->rx_mbuf_ptr); 7462 val_lo = BCE_ADDR_LO(sc->rx_mbuf_ptr); 7463 if_printf(ifp, "0x%08X:%08X - (sc->rx_mbuf_ptr) rx mbuf chain " 7464 "virtual address\n", val_hi, val_lo); 7465 7466 if_printf(ifp, " 0x%08X - (sc->interrupts_generated) " 7467 "h/w intrs\n", sc->interrupts_generated); 7468 7469 if_printf(ifp, " 0x%08X - (sc->rx_interrupts) " 7470 "rx interrupts handled\n", sc->rx_interrupts); 7471 7472 if_printf(ifp, " 0x%08X - (sc->tx_interrupts) " 7473 "tx interrupts handled\n", sc->tx_interrupts); 7474 7475 if_printf(ifp, " 0x%08X - (sc->last_status_idx) " 7476 "status block index\n", sc->last_status_idx); 7477 7478 if_printf(ifp, " 0x%04X(0x%04X) - (sc->tx_prod) " 7479 "tx producer index\n", 7480 sc->tx_prod, (uint16_t)TX_CHAIN_IDX(sc, sc->tx_prod)); 7481 7482 if_printf(ifp, " 0x%04X(0x%04X) - (sc->tx_cons) " 7483 "tx consumer index\n", 7484 sc->tx_cons, (uint16_t)TX_CHAIN_IDX(sc, sc->tx_cons)); 7485 7486 if_printf(ifp, " 0x%08X - (sc->tx_prod_bseq) " 7487 "tx producer bseq index\n", sc->tx_prod_bseq); 7488 7489 if_printf(ifp, " 0x%04X(0x%04X) - (sc->rx_prod) " 7490 "rx producer index\n", 7491 sc->rx_prod, (uint16_t)RX_CHAIN_IDX(sc, sc->rx_prod)); 7492 7493 if_printf(ifp, " 0x%04X(0x%04X) - (sc->rx_cons) " 7494 "rx consumer index\n", 7495 sc->rx_cons, (uint16_t)RX_CHAIN_IDX(sc, sc->rx_cons)); 7496 7497 if_printf(ifp, " 0x%08X - (sc->rx_prod_bseq) " 7498 "rx producer bseq index\n", sc->rx_prod_bseq); 7499 7500 if_printf(ifp, " 0x%08X - (sc->rx_mbuf_alloc) " 7501 "rx mbufs allocated\n", sc->rx_mbuf_alloc); 7502 7503 if_printf(ifp, " 0x%08X - (sc->free_rx_bd) " 7504 "free rx_bd's\n", sc->free_rx_bd); 7505 7506 if_printf(ifp, "0x%08X/%08X - (sc->rx_low_watermark) rx " 7507 "low watermark\n", sc->rx_low_watermark, sc->max_rx_bd); 7508 7509 if_printf(ifp, " 0x%08X - (sc->txmbuf_alloc) " 7510 "tx mbufs allocated\n", sc->tx_mbuf_alloc); 7511 7512 if_printf(ifp, " 0x%08X - (sc->rx_mbuf_alloc) " 7513 "rx mbufs allocated\n", sc->rx_mbuf_alloc); 7514 7515 if_printf(ifp, " 0x%08X - (sc->used_tx_bd) used tx_bd's\n", 7516 sc->used_tx_bd); 7517 7518 if_printf(ifp, "0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n", 7519 sc->tx_hi_watermark, sc->max_tx_bd); 7520 7521 if_printf(ifp, " 0x%08X - (sc->mbuf_alloc_failed) " 7522 "failed mbuf alloc\n", sc->mbuf_alloc_failed); 7523 7524 if_printf(ifp, 7525 "----------------------------" 7526 "----------------" 7527 "----------------------------\n"); 7528 } 7529 7530 7531 /****************************************************************************/ 7532 /* Prints out the hardware state through a summary of important registers, */ 7533 /* followed by a complete register dump. */ 7534 /* */ 7535 /* Returns: */ 7536 /* Nothing. */ 7537 /****************************************************************************/ 7538 static void 7539 bce_dump_hw_state(struct bce_softc *sc) 7540 { 7541 struct ifnet *ifp = &sc->arpcom.ac_if; 7542 uint32_t val1; 7543 int i; 7544 7545 if_printf(ifp, 7546 "----------------------------" 7547 " Hardware State " 7548 "----------------------------\n"); 7549 7550 if_printf(ifp, "%s - bootcode version\n", sc->bce_bc_ver); 7551 7552 val1 = REG_RD(sc, BCE_MISC_ENABLE_STATUS_BITS); 7553 if_printf(ifp, "0x%08X - (0x%06X) misc_enable_status_bits\n", 7554 val1, BCE_MISC_ENABLE_STATUS_BITS); 7555 7556 val1 = REG_RD(sc, BCE_DMA_STATUS); 7557 if_printf(ifp, "0x%08X - (0x%04X) dma_status\n", val1, BCE_DMA_STATUS); 7558 7559 val1 = REG_RD(sc, BCE_CTX_STATUS); 7560 if_printf(ifp, "0x%08X - (0x%04X) ctx_status\n", val1, BCE_CTX_STATUS); 7561 7562 val1 = REG_RD(sc, BCE_EMAC_STATUS); 7563 if_printf(ifp, "0x%08X - (0x%04X) emac_status\n", 7564 val1, BCE_EMAC_STATUS); 7565 7566 val1 = REG_RD(sc, BCE_RPM_STATUS); 7567 if_printf(ifp, "0x%08X - (0x%04X) rpm_status\n", val1, BCE_RPM_STATUS); 7568 7569 val1 = REG_RD(sc, BCE_TBDR_STATUS); 7570 if_printf(ifp, "0x%08X - (0x%04X) tbdr_status\n", 7571 val1, BCE_TBDR_STATUS); 7572 7573 val1 = REG_RD(sc, BCE_TDMA_STATUS); 7574 if_printf(ifp, "0x%08X - (0x%04X) tdma_status\n", 7575 val1, BCE_TDMA_STATUS); 7576 7577 val1 = REG_RD(sc, BCE_HC_STATUS); 7578 if_printf(ifp, "0x%08X - (0x%06X) hc_status\n", val1, BCE_HC_STATUS); 7579 7580 val1 = REG_RD_IND(sc, BCE_TXP_CPU_STATE); 7581 if_printf(ifp, "0x%08X - (0x%06X) txp_cpu_state\n", 7582 val1, BCE_TXP_CPU_STATE); 7583 7584 val1 = REG_RD_IND(sc, BCE_TPAT_CPU_STATE); 7585 if_printf(ifp, "0x%08X - (0x%06X) tpat_cpu_state\n", 7586 val1, BCE_TPAT_CPU_STATE); 7587 7588 val1 = REG_RD_IND(sc, BCE_RXP_CPU_STATE); 7589 if_printf(ifp, "0x%08X - (0x%06X) rxp_cpu_state\n", 7590 val1, BCE_RXP_CPU_STATE); 7591 7592 val1 = REG_RD_IND(sc, BCE_COM_CPU_STATE); 7593 if_printf(ifp, "0x%08X - (0x%06X) com_cpu_state\n", 7594 val1, BCE_COM_CPU_STATE); 7595 7596 val1 = REG_RD_IND(sc, BCE_MCP_CPU_STATE); 7597 if_printf(ifp, "0x%08X - (0x%06X) mcp_cpu_state\n", 7598 val1, BCE_MCP_CPU_STATE); 7599 7600 val1 = REG_RD_IND(sc, BCE_CP_CPU_STATE); 7601 if_printf(ifp, "0x%08X - (0x%06X) cp_cpu_state\n", 7602 val1, BCE_CP_CPU_STATE); 7603 7604 if_printf(ifp, 7605 "----------------------------" 7606 "----------------" 7607 "----------------------------\n"); 7608 7609 if_printf(ifp, 7610 "----------------------------" 7611 " Register Dump " 7612 "----------------------------\n"); 7613 7614 for (i = 0x400; i < 0x8000; i += 0x10) { 7615 if_printf(ifp, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", i, 7616 REG_RD(sc, i), 7617 REG_RD(sc, i + 0x4), 7618 REG_RD(sc, i + 0x8), 7619 REG_RD(sc, i + 0xc)); 7620 } 7621 7622 if_printf(ifp, 7623 "----------------------------" 7624 "----------------" 7625 "----------------------------\n"); 7626 } 7627 7628 7629 /****************************************************************************/ 7630 /* Prints out the TXP state. */ 7631 /* */ 7632 /* Returns: */ 7633 /* Nothing. */ 7634 /****************************************************************************/ 7635 static void 7636 bce_dump_txp_state(struct bce_softc *sc) 7637 { 7638 struct ifnet *ifp = &sc->arpcom.ac_if; 7639 uint32_t val1; 7640 int i; 7641 7642 if_printf(ifp, 7643 "----------------------------" 7644 " TXP State " 7645 "----------------------------\n"); 7646 7647 val1 = REG_RD_IND(sc, BCE_TXP_CPU_MODE); 7648 if_printf(ifp, "0x%08X - (0x%06X) txp_cpu_mode\n", 7649 val1, BCE_TXP_CPU_MODE); 7650 7651 val1 = REG_RD_IND(sc, BCE_TXP_CPU_STATE); 7652 if_printf(ifp, "0x%08X - (0x%06X) txp_cpu_state\n", 7653 val1, BCE_TXP_CPU_STATE); 7654 7655 val1 = REG_RD_IND(sc, BCE_TXP_CPU_EVENT_MASK); 7656 if_printf(ifp, "0x%08X - (0x%06X) txp_cpu_event_mask\n", 7657 val1, BCE_TXP_CPU_EVENT_MASK); 7658 7659 if_printf(ifp, 7660 "----------------------------" 7661 " Register Dump " 7662 "----------------------------\n"); 7663 7664 for (i = BCE_TXP_CPU_MODE; i < 0x68000; i += 0x10) { 7665 /* Skip the big blank spaces */ 7666 if (i < 0x454000 && i > 0x5ffff) { 7667 if_printf(ifp, "0x%04X: " 7668 "0x%08X 0x%08X 0x%08X 0x%08X\n", i, 7669 REG_RD_IND(sc, i), 7670 REG_RD_IND(sc, i + 0x4), 7671 REG_RD_IND(sc, i + 0x8), 7672 REG_RD_IND(sc, i + 0xc)); 7673 } 7674 } 7675 7676 if_printf(ifp, 7677 "----------------------------" 7678 "----------------" 7679 "----------------------------\n"); 7680 } 7681 7682 7683 /****************************************************************************/ 7684 /* Prints out the RXP state. */ 7685 /* */ 7686 /* Returns: */ 7687 /* Nothing. */ 7688 /****************************************************************************/ 7689 static void 7690 bce_dump_rxp_state(struct bce_softc *sc) 7691 { 7692 struct ifnet *ifp = &sc->arpcom.ac_if; 7693 uint32_t val1; 7694 int i; 7695 7696 if_printf(ifp, 7697 "----------------------------" 7698 " RXP State " 7699 "----------------------------\n"); 7700 7701 val1 = REG_RD_IND(sc, BCE_RXP_CPU_MODE); 7702 if_printf(ifp, "0x%08X - (0x%06X) rxp_cpu_mode\n", 7703 val1, BCE_RXP_CPU_MODE); 7704 7705 val1 = REG_RD_IND(sc, BCE_RXP_CPU_STATE); 7706 if_printf(ifp, "0x%08X - (0x%06X) rxp_cpu_state\n", 7707 val1, BCE_RXP_CPU_STATE); 7708 7709 val1 = REG_RD_IND(sc, BCE_RXP_CPU_EVENT_MASK); 7710 if_printf(ifp, "0x%08X - (0x%06X) rxp_cpu_event_mask\n", 7711 val1, BCE_RXP_CPU_EVENT_MASK); 7712 7713 if_printf(ifp, 7714 "----------------------------" 7715 " Register Dump " 7716 "----------------------------\n"); 7717 7718 for (i = BCE_RXP_CPU_MODE; i < 0xe8fff; i += 0x10) { 7719 /* Skip the big blank sapces */ 7720 if (i < 0xc5400 || i > 0xdffff) { 7721 if_printf(ifp, "0x%04X: " 7722 "0x%08X 0x%08X 0x%08X 0x%08X\n", i, 7723 REG_RD_IND(sc, i), 7724 REG_RD_IND(sc, i + 0x4), 7725 REG_RD_IND(sc, i + 0x8), 7726 REG_RD_IND(sc, i + 0xc)); 7727 } 7728 } 7729 7730 if_printf(ifp, 7731 "----------------------------" 7732 "----------------" 7733 "----------------------------\n"); 7734 } 7735 7736 7737 /****************************************************************************/ 7738 /* Prints out the TPAT state. */ 7739 /* */ 7740 /* Returns: */ 7741 /* Nothing. */ 7742 /****************************************************************************/ 7743 static void 7744 bce_dump_tpat_state(struct bce_softc *sc) 7745 { 7746 struct ifnet *ifp = &sc->arpcom.ac_if; 7747 uint32_t val1; 7748 int i; 7749 7750 if_printf(ifp, 7751 "----------------------------" 7752 " TPAT State " 7753 "----------------------------\n"); 7754 7755 val1 = REG_RD_IND(sc, BCE_TPAT_CPU_MODE); 7756 if_printf(ifp, "0x%08X - (0x%06X) tpat_cpu_mode\n", 7757 val1, BCE_TPAT_CPU_MODE); 7758 7759 val1 = REG_RD_IND(sc, BCE_TPAT_CPU_STATE); 7760 if_printf(ifp, "0x%08X - (0x%06X) tpat_cpu_state\n", 7761 val1, BCE_TPAT_CPU_STATE); 7762 7763 val1 = REG_RD_IND(sc, BCE_TPAT_CPU_EVENT_MASK); 7764 if_printf(ifp, "0x%08X - (0x%06X) tpat_cpu_event_mask\n", 7765 val1, BCE_TPAT_CPU_EVENT_MASK); 7766 7767 if_printf(ifp, 7768 "----------------------------" 7769 " Register Dump " 7770 "----------------------------\n"); 7771 7772 for (i = BCE_TPAT_CPU_MODE; i < 0xa3fff; i += 0x10) { 7773 /* Skip the big blank spaces */ 7774 if (i < 0x854000 && i > 0x9ffff) { 7775 if_printf(ifp, "0x%04X: " 7776 "0x%08X 0x%08X 0x%08X 0x%08X\n", i, 7777 REG_RD_IND(sc, i), 7778 REG_RD_IND(sc, i + 0x4), 7779 REG_RD_IND(sc, i + 0x8), 7780 REG_RD_IND(sc, i + 0xc)); 7781 } 7782 } 7783 7784 if_printf(ifp, 7785 "----------------------------" 7786 "----------------" 7787 "----------------------------\n"); 7788 } 7789 7790 7791 /****************************************************************************/ 7792 /* Prints out the driver state and then enters the debugger. */ 7793 /* */ 7794 /* Returns: */ 7795 /* Nothing. */ 7796 /****************************************************************************/ 7797 static void 7798 bce_breakpoint(struct bce_softc *sc) 7799 { 7800 #if 0 7801 bce_freeze_controller(sc); 7802 #endif 7803 7804 bce_dump_driver_state(sc); 7805 bce_dump_status_block(sc); 7806 bce_dump_tx_chain(sc, 0, TOTAL_TX_BD(sc)); 7807 bce_dump_hw_state(sc); 7808 bce_dump_txp_state(sc); 7809 7810 #if 0 7811 bce_unfreeze_controller(sc); 7812 #endif 7813 7814 /* Call the debugger. */ 7815 breakpoint(); 7816 } 7817 7818 #endif /* BCE_DEBUG */ 7819 7820 static int 7821 bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS) 7822 { 7823 struct bce_softc *sc = arg1; 7824 7825 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 7826 &sc->bce_tx_quick_cons_trip_int, 7827 BCE_COALMASK_TX_BDS_INT); 7828 } 7829 7830 static int 7831 bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS) 7832 { 7833 struct bce_softc *sc = arg1; 7834 7835 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 7836 &sc->bce_tx_quick_cons_trip, 7837 BCE_COALMASK_TX_BDS); 7838 } 7839 7840 static int 7841 bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS) 7842 { 7843 struct bce_softc *sc = arg1; 7844 7845 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 7846 &sc->bce_tx_ticks_int, 7847 BCE_COALMASK_TX_TICKS_INT); 7848 } 7849 7850 static int 7851 bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS) 7852 { 7853 struct bce_softc *sc = arg1; 7854 7855 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 7856 &sc->bce_tx_ticks, 7857 BCE_COALMASK_TX_TICKS); 7858 } 7859 7860 static int 7861 bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS) 7862 { 7863 struct bce_softc *sc = arg1; 7864 7865 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 7866 &sc->bce_rx_quick_cons_trip_int, 7867 BCE_COALMASK_RX_BDS_INT); 7868 } 7869 7870 static int 7871 bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS) 7872 { 7873 struct bce_softc *sc = arg1; 7874 7875 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 7876 &sc->bce_rx_quick_cons_trip, 7877 BCE_COALMASK_RX_BDS); 7878 } 7879 7880 static int 7881 bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS) 7882 { 7883 struct bce_softc *sc = arg1; 7884 7885 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 7886 &sc->bce_rx_ticks_int, 7887 BCE_COALMASK_RX_TICKS_INT); 7888 } 7889 7890 static int 7891 bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS) 7892 { 7893 struct bce_softc *sc = arg1; 7894 7895 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 7896 &sc->bce_rx_ticks, 7897 BCE_COALMASK_RX_TICKS); 7898 } 7899 7900 static int 7901 bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS, uint32_t *coal, 7902 uint32_t coalchg_mask) 7903 { 7904 struct bce_softc *sc = arg1; 7905 struct ifnet *ifp = &sc->arpcom.ac_if; 7906 int error = 0, v; 7907 7908 lwkt_serialize_enter(ifp->if_serializer); 7909 7910 v = *coal; 7911 error = sysctl_handle_int(oidp, &v, 0, req); 7912 if (!error && req->newptr != NULL) { 7913 if (v < 0) { 7914 error = EINVAL; 7915 } else { 7916 *coal = v; 7917 sc->bce_coalchg_mask |= coalchg_mask; 7918 } 7919 } 7920 7921 lwkt_serialize_exit(ifp->if_serializer); 7922 return error; 7923 } 7924 7925 static void 7926 bce_coal_change(struct bce_softc *sc) 7927 { 7928 struct ifnet *ifp = &sc->arpcom.ac_if; 7929 7930 ASSERT_SERIALIZED(ifp->if_serializer); 7931 7932 if ((ifp->if_flags & IFF_RUNNING) == 0) { 7933 sc->bce_coalchg_mask = 0; 7934 return; 7935 } 7936 7937 if (sc->bce_coalchg_mask & 7938 (BCE_COALMASK_TX_BDS | BCE_COALMASK_TX_BDS_INT)) { 7939 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP, 7940 (sc->bce_tx_quick_cons_trip_int << 16) | 7941 sc->bce_tx_quick_cons_trip); 7942 if (bootverbose) { 7943 if_printf(ifp, "tx_bds %u, tx_bds_int %u\n", 7944 sc->bce_tx_quick_cons_trip, 7945 sc->bce_tx_quick_cons_trip_int); 7946 } 7947 } 7948 7949 if (sc->bce_coalchg_mask & 7950 (BCE_COALMASK_TX_TICKS | BCE_COALMASK_TX_TICKS_INT)) { 7951 REG_WR(sc, BCE_HC_TX_TICKS, 7952 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks); 7953 if (bootverbose) { 7954 if_printf(ifp, "tx_ticks %u, tx_ticks_int %u\n", 7955 sc->bce_tx_ticks, sc->bce_tx_ticks_int); 7956 } 7957 } 7958 7959 if (sc->bce_coalchg_mask & 7960 (BCE_COALMASK_RX_BDS | BCE_COALMASK_RX_BDS_INT)) { 7961 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP, 7962 (sc->bce_rx_quick_cons_trip_int << 16) | 7963 sc->bce_rx_quick_cons_trip); 7964 if (bootverbose) { 7965 if_printf(ifp, "rx_bds %u, rx_bds_int %u\n", 7966 sc->bce_rx_quick_cons_trip, 7967 sc->bce_rx_quick_cons_trip_int); 7968 } 7969 } 7970 7971 if (sc->bce_coalchg_mask & 7972 (BCE_COALMASK_RX_TICKS | BCE_COALMASK_RX_TICKS_INT)) { 7973 REG_WR(sc, BCE_HC_RX_TICKS, 7974 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks); 7975 if (bootverbose) { 7976 if_printf(ifp, "rx_ticks %u, rx_ticks_int %u\n", 7977 sc->bce_rx_ticks, sc->bce_rx_ticks_int); 7978 } 7979 } 7980 7981 sc->bce_coalchg_mask = 0; 7982 } 7983 7984 static int 7985 bce_tso_setup(struct bce_softc *sc, struct mbuf **mp, 7986 uint16_t *flags0, uint16_t *mss0) 7987 { 7988 struct mbuf *m; 7989 uint16_t flags; 7990 int thoff, iphlen, hoff; 7991 7992 m = *mp; 7993 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 7994 7995 hoff = m->m_pkthdr.csum_lhlen; 7996 iphlen = m->m_pkthdr.csum_iphlen; 7997 thoff = m->m_pkthdr.csum_thlen; 7998 7999 KASSERT(hoff >= sizeof(struct ether_header), 8000 ("invalid ether header len %d", hoff)); 8001 KASSERT(iphlen >= sizeof(struct ip), 8002 ("invalid ip header len %d", iphlen)); 8003 KASSERT(thoff >= sizeof(struct tcphdr), 8004 ("invalid tcp header len %d", thoff)); 8005 8006 if (__predict_false(m->m_len < hoff + iphlen + thoff)) { 8007 m = m_pullup(m, hoff + iphlen + thoff); 8008 if (m == NULL) { 8009 *mp = NULL; 8010 return ENOBUFS; 8011 } 8012 *mp = m; 8013 } 8014 8015 /* Set the LSO flag in the TX BD */ 8016 flags = TX_BD_FLAGS_SW_LSO; 8017 8018 /* Set the length of IP + TCP options (in 32 bit words) */ 8019 flags |= (((iphlen + thoff - 8020 sizeof(struct ip) - sizeof(struct tcphdr)) >> 2) << 8); 8021 8022 *mss0 = htole16(m->m_pkthdr.tso_segsz); 8023 *flags0 = flags; 8024 8025 return 0; 8026 } 8027