1 /*- 2 * Copyright (c) 2006-2007 Broadcom Corporation 3 * David Christensen <davidch@broadcom.com>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Neither the name of Broadcom Corporation nor the name of its contributors 15 * may be used to endorse or promote products derived from this software 16 * without specific prior written consent. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' 19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS 22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 * 30 * $FreeBSD: src/sys/dev/bce/if_bce.c,v 1.31 2007/05/16 23:34:11 davidch Exp $ 31 * $DragonFly: src/sys/dev/netif/bce/if_bce.c,v 1.21 2008/11/19 13:57:49 sephe Exp $ 32 */ 33 34 /* 35 * The following controllers are supported by this driver: 36 * BCM5706C A2, A3 37 * BCM5708C B1, B2 38 * 39 * The following controllers are not supported by this driver: 40 * BCM5706C A0, A1 41 * BCM5706S A0, A1, A2, A3 42 * BCM5708C A0, B0 43 * BCM5708S A0, B0, B1, B2 44 */ 45 46 #include "opt_bce.h" 47 #include "opt_polling.h" 48 49 #include <sys/param.h> 50 #include <sys/bus.h> 51 #include <sys/endian.h> 52 #include <sys/kernel.h> 53 #include <sys/interrupt.h> 54 #include <sys/mbuf.h> 55 #include <sys/malloc.h> 56 #include <sys/queue.h> 57 #ifdef BCE_DEBUG 58 #include <sys/random.h> 59 #endif 60 #include <sys/rman.h> 61 #include <sys/serialize.h> 62 #include <sys/socket.h> 63 #include <sys/sockio.h> 64 #include <sys/sysctl.h> 65 66 #include <net/bpf.h> 67 #include <net/ethernet.h> 68 #include <net/if.h> 69 #include <net/if_arp.h> 70 #include <net/if_dl.h> 71 #include <net/if_media.h> 72 #include <net/if_types.h> 73 #include <net/ifq_var.h> 74 #include <net/vlan/if_vlan_var.h> 75 #include <net/vlan/if_vlan_ether.h> 76 77 #include <dev/netif/mii_layer/mii.h> 78 #include <dev/netif/mii_layer/miivar.h> 79 80 #include <bus/pci/pcireg.h> 81 #include <bus/pci/pcivar.h> 82 83 #include "miibus_if.h" 84 85 #include <dev/netif/bce/if_bcereg.h> 86 #include <dev/netif/bce/if_bcefw.h> 87 88 /****************************************************************************/ 89 /* BCE Debug Options */ 90 /****************************************************************************/ 91 #ifdef BCE_DEBUG 92 93 static uint32_t bce_debug = BCE_WARN; 94 95 /* 96 * 0 = Never 97 * 1 = 1 in 2,147,483,648 98 * 256 = 1 in 8,388,608 99 * 2048 = 1 in 1,048,576 100 * 65536 = 1 in 32,768 101 * 1048576 = 1 in 2,048 102 * 268435456 = 1 in 8 103 * 536870912 = 1 in 4 104 * 1073741824 = 1 in 2 105 * 106 * bce_debug_l2fhdr_status_check: 107 * How often the l2_fhdr frame error check will fail. 108 * 109 * bce_debug_unexpected_attention: 110 * How often the unexpected attention check will fail. 111 * 112 * bce_debug_mbuf_allocation_failure: 113 * How often to simulate an mbuf allocation failure. 114 * 115 * bce_debug_dma_map_addr_failure: 116 * How often to simulate a DMA mapping failure. 117 * 118 * bce_debug_bootcode_running_failure: 119 * How often to simulate a bootcode failure. 120 */ 121 static int bce_debug_l2fhdr_status_check = 0; 122 static int bce_debug_unexpected_attention = 0; 123 static int bce_debug_mbuf_allocation_failure = 0; 124 static int bce_debug_dma_map_addr_failure = 0; 125 static int bce_debug_bootcode_running_failure = 0; 126 127 #endif /* BCE_DEBUG */ 128 129 130 /****************************************************************************/ 131 /* PCI Device ID Table */ 132 /* */ 133 /* Used by bce_probe() to identify the devices supported by this driver. */ 134 /****************************************************************************/ 135 #define BCE_DEVDESC_MAX 64 136 137 static struct bce_type bce_devs[] = { 138 /* BCM5706C Controllers and OEM boards. */ 139 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3101, 140 "HP NC370T Multifunction Gigabit Server Adapter" }, 141 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3106, 142 "HP NC370i Multifunction Gigabit Server Adapter" }, 143 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, PCI_ANY_ID, PCI_ANY_ID, 144 "Broadcom NetXtreme II BCM5706 1000Base-T" }, 145 146 /* BCM5706S controllers and OEM boards. */ 147 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102, 148 "HP NC370F Multifunction Gigabit Server Adapter" }, 149 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID, PCI_ANY_ID, 150 "Broadcom NetXtreme II BCM5706 1000Base-SX" }, 151 152 /* BCM5708C controllers and OEM boards. */ 153 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, PCI_ANY_ID, PCI_ANY_ID, 154 "Broadcom NetXtreme II BCM5708 1000Base-T" }, 155 156 /* BCM5708S controllers and OEM boards. */ 157 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, PCI_ANY_ID, PCI_ANY_ID, 158 "Broadcom NetXtreme II BCM5708S 1000Base-T" }, 159 { 0, 0, 0, 0, NULL } 160 }; 161 162 163 /****************************************************************************/ 164 /* Supported Flash NVRAM device data. */ 165 /****************************************************************************/ 166 static const struct flash_spec flash_table[] = 167 { 168 /* Slow EEPROM */ 169 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400, 170 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 171 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 172 "EEPROM - slow"}, 173 /* Expansion entry 0001 */ 174 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406, 175 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 176 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 177 "Entry 0001"}, 178 /* Saifun SA25F010 (non-buffered flash) */ 179 /* strap, cfg1, & write1 need updates */ 180 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406, 181 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 182 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2, 183 "Non-buffered flash (128kB)"}, 184 /* Saifun SA25F020 (non-buffered flash) */ 185 /* strap, cfg1, & write1 need updates */ 186 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406, 187 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 188 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4, 189 "Non-buffered flash (256kB)"}, 190 /* Expansion entry 0100 */ 191 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406, 192 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 193 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 194 "Entry 0100"}, 195 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */ 196 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406, 197 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 198 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2, 199 "Entry 0101: ST M45PE10 (128kB non-bufferred)"}, 200 /* Entry 0110: ST M45PE20 (non-buffered flash)*/ 201 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406, 202 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 203 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4, 204 "Entry 0110: ST M45PE20 (256kB non-bufferred)"}, 205 /* Saifun SA25F005 (non-buffered flash) */ 206 /* strap, cfg1, & write1 need updates */ 207 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406, 208 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 209 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE, 210 "Non-buffered flash (64kB)"}, 211 /* Fast EEPROM */ 212 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400, 213 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 214 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 215 "EEPROM - fast"}, 216 /* Expansion entry 1001 */ 217 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406, 218 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 219 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 220 "Entry 1001"}, 221 /* Expansion entry 1010 */ 222 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406, 223 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 224 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 225 "Entry 1010"}, 226 /* ATMEL AT45DB011B (buffered flash) */ 227 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400, 228 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 229 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE, 230 "Buffered flash (128kB)"}, 231 /* Expansion entry 1100 */ 232 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406, 233 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 234 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 235 "Entry 1100"}, 236 /* Expansion entry 1101 */ 237 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406, 238 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 239 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 240 "Entry 1101"}, 241 /* Ateml Expansion entry 1110 */ 242 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400, 243 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 244 BUFFERED_FLASH_BYTE_ADDR_MASK, 0, 245 "Entry 1110 (Atmel)"}, 246 /* ATMEL AT45DB021B (buffered flash) */ 247 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400, 248 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 249 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2, 250 "Buffered flash (256kB)"}, 251 }; 252 253 254 /****************************************************************************/ 255 /* DragonFly device entry points. */ 256 /****************************************************************************/ 257 static int bce_probe(device_t); 258 static int bce_attach(device_t); 259 static int bce_detach(device_t); 260 static void bce_shutdown(device_t); 261 262 /****************************************************************************/ 263 /* BCE Debug Data Structure Dump Routines */ 264 /****************************************************************************/ 265 #ifdef BCE_DEBUG 266 static void bce_dump_mbuf(struct bce_softc *, struct mbuf *); 267 static void bce_dump_tx_mbuf_chain(struct bce_softc *, int, int); 268 static void bce_dump_rx_mbuf_chain(struct bce_softc *, int, int); 269 static void bce_dump_txbd(struct bce_softc *, int, struct tx_bd *); 270 static void bce_dump_rxbd(struct bce_softc *, int, struct rx_bd *); 271 static void bce_dump_l2fhdr(struct bce_softc *, int, 272 struct l2_fhdr *) __unused; 273 static void bce_dump_tx_chain(struct bce_softc *, int, int); 274 static void bce_dump_rx_chain(struct bce_softc *, int, int); 275 static void bce_dump_status_block(struct bce_softc *); 276 static void bce_dump_driver_state(struct bce_softc *); 277 static void bce_dump_stats_block(struct bce_softc *) __unused; 278 static void bce_dump_hw_state(struct bce_softc *); 279 static void bce_dump_txp_state(struct bce_softc *); 280 static void bce_dump_rxp_state(struct bce_softc *) __unused; 281 static void bce_dump_tpat_state(struct bce_softc *) __unused; 282 static void bce_freeze_controller(struct bce_softc *) __unused; 283 static void bce_unfreeze_controller(struct bce_softc *) __unused; 284 static void bce_breakpoint(struct bce_softc *); 285 #endif /* BCE_DEBUG */ 286 287 288 /****************************************************************************/ 289 /* BCE Register/Memory Access Routines */ 290 /****************************************************************************/ 291 static uint32_t bce_reg_rd_ind(struct bce_softc *, uint32_t); 292 static void bce_reg_wr_ind(struct bce_softc *, uint32_t, uint32_t); 293 static void bce_ctx_wr(struct bce_softc *, uint32_t, uint32_t, uint32_t); 294 static int bce_miibus_read_reg(device_t, int, int); 295 static int bce_miibus_write_reg(device_t, int, int, int); 296 static void bce_miibus_statchg(device_t); 297 298 299 /****************************************************************************/ 300 /* BCE NVRAM Access Routines */ 301 /****************************************************************************/ 302 static int bce_acquire_nvram_lock(struct bce_softc *); 303 static int bce_release_nvram_lock(struct bce_softc *); 304 static void bce_enable_nvram_access(struct bce_softc *); 305 static void bce_disable_nvram_access(struct bce_softc *); 306 static int bce_nvram_read_dword(struct bce_softc *, uint32_t, uint8_t *, 307 uint32_t); 308 static int bce_init_nvram(struct bce_softc *); 309 static int bce_nvram_read(struct bce_softc *, uint32_t, uint8_t *, int); 310 static int bce_nvram_test(struct bce_softc *); 311 #ifdef BCE_NVRAM_WRITE_SUPPORT 312 static int bce_enable_nvram_write(struct bce_softc *); 313 static void bce_disable_nvram_write(struct bce_softc *); 314 static int bce_nvram_erase_page(struct bce_softc *, uint32_t); 315 static int bce_nvram_write_dword(struct bce_softc *, uint32_t, uint8_t *, 316 uint32_t); 317 static int bce_nvram_write(struct bce_softc *, uint32_t, uint8_t *, 318 int) __unused; 319 #endif 320 321 /****************************************************************************/ 322 /* BCE DMA Allocate/Free Routines */ 323 /****************************************************************************/ 324 static int bce_dma_alloc(struct bce_softc *); 325 static void bce_dma_free(struct bce_softc *); 326 static void bce_dma_map_addr(void *, bus_dma_segment_t *, int, int); 327 static void bce_dma_map_mbuf(void *, bus_dma_segment_t *, int, 328 bus_size_t, int); 329 330 /****************************************************************************/ 331 /* BCE Firmware Synchronization and Load */ 332 /****************************************************************************/ 333 static int bce_fw_sync(struct bce_softc *, uint32_t); 334 static void bce_load_rv2p_fw(struct bce_softc *, uint32_t *, 335 uint32_t, uint32_t); 336 static void bce_load_cpu_fw(struct bce_softc *, struct cpu_reg *, 337 struct fw_info *); 338 static void bce_init_cpus(struct bce_softc *); 339 340 static void bce_stop(struct bce_softc *); 341 static int bce_reset(struct bce_softc *, uint32_t); 342 static int bce_chipinit(struct bce_softc *); 343 static int bce_blockinit(struct bce_softc *); 344 static int bce_newbuf_std(struct bce_softc *, struct mbuf *, 345 uint16_t *, uint16_t *, uint32_t *); 346 347 static int bce_init_tx_chain(struct bce_softc *); 348 static int bce_init_rx_chain(struct bce_softc *); 349 static void bce_free_rx_chain(struct bce_softc *); 350 static void bce_free_tx_chain(struct bce_softc *); 351 352 static int bce_encap(struct bce_softc *, struct mbuf **); 353 static void bce_start(struct ifnet *); 354 static int bce_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 355 static void bce_watchdog(struct ifnet *); 356 static int bce_ifmedia_upd(struct ifnet *); 357 static void bce_ifmedia_sts(struct ifnet *, struct ifmediareq *); 358 static void bce_init(void *); 359 static void bce_mgmt_init(struct bce_softc *); 360 361 static void bce_init_ctx(struct bce_softc *); 362 static void bce_get_mac_addr(struct bce_softc *); 363 static void bce_set_mac_addr(struct bce_softc *); 364 static void bce_phy_intr(struct bce_softc *); 365 static void bce_rx_intr(struct bce_softc *, int); 366 static void bce_tx_intr(struct bce_softc *); 367 static void bce_disable_intr(struct bce_softc *); 368 static void bce_enable_intr(struct bce_softc *); 369 370 #ifdef DEVICE_POLLING 371 static void bce_poll(struct ifnet *, enum poll_cmd, int); 372 #endif 373 static void bce_intr(void *); 374 static void bce_set_rx_mode(struct bce_softc *); 375 static void bce_stats_update(struct bce_softc *); 376 static void bce_tick(void *); 377 static void bce_tick_serialized(struct bce_softc *); 378 static void bce_add_sysctls(struct bce_softc *); 379 380 static void bce_coal_change(struct bce_softc *); 381 static int bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS); 382 static int bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS); 383 static int bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS); 384 static int bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS); 385 static int bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS); 386 static int bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS); 387 static int bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS); 388 static int bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS); 389 static int bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS, 390 uint32_t *, uint32_t); 391 392 /* 393 * NOTE: 394 * Don't set bce_tx_ticks_int/bce_tx_ticks to 1023. Linux's bnx2 395 * takes 1023 as the TX ticks limit. However, using 1023 will 396 * cause 5708(B2) to generate extra interrupts (~2000/s) even when 397 * there is _no_ network activity on the NIC. 398 */ 399 static uint32_t bce_tx_bds_int = 255; /* bcm: 20 */ 400 static uint32_t bce_tx_bds = 255; /* bcm: 20 */ 401 static uint32_t bce_tx_ticks_int = 1022; /* bcm: 80 */ 402 static uint32_t bce_tx_ticks = 1022; /* bcm: 80 */ 403 static uint32_t bce_rx_bds_int = 128; /* bcm: 6 */ 404 static uint32_t bce_rx_bds = 128; /* bcm: 6 */ 405 static uint32_t bce_rx_ticks_int = 125; /* bcm: 18 */ 406 static uint32_t bce_rx_ticks = 125; /* bcm: 18 */ 407 408 TUNABLE_INT("hw.bce.tx_bds_int", &bce_tx_bds_int); 409 TUNABLE_INT("hw.bce.tx_bds", &bce_tx_bds); 410 TUNABLE_INT("hw.bce.tx_ticks_int", &bce_tx_ticks_int); 411 TUNABLE_INT("hw.bce.tx_ticks", &bce_tx_ticks); 412 TUNABLE_INT("hw.bce.rx_bds_int", &bce_rx_bds_int); 413 TUNABLE_INT("hw.bce.rx_bds", &bce_rx_bds); 414 TUNABLE_INT("hw.bce.rx_ticks_int", &bce_rx_ticks_int); 415 TUNABLE_INT("hw.bce.rx_ticks", &bce_rx_ticks); 416 417 /****************************************************************************/ 418 /* DragonFly device dispatch table. */ 419 /****************************************************************************/ 420 static device_method_t bce_methods[] = { 421 /* Device interface */ 422 DEVMETHOD(device_probe, bce_probe), 423 DEVMETHOD(device_attach, bce_attach), 424 DEVMETHOD(device_detach, bce_detach), 425 DEVMETHOD(device_shutdown, bce_shutdown), 426 427 /* bus interface */ 428 DEVMETHOD(bus_print_child, bus_generic_print_child), 429 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 430 431 /* MII interface */ 432 DEVMETHOD(miibus_readreg, bce_miibus_read_reg), 433 DEVMETHOD(miibus_writereg, bce_miibus_write_reg), 434 DEVMETHOD(miibus_statchg, bce_miibus_statchg), 435 436 { 0, 0 } 437 }; 438 439 static driver_t bce_driver = { 440 "bce", 441 bce_methods, 442 sizeof(struct bce_softc) 443 }; 444 445 static devclass_t bce_devclass; 446 447 448 DECLARE_DUMMY_MODULE(if_xl); 449 MODULE_DEPEND(bce, miibus, 1, 1, 1); 450 DRIVER_MODULE(if_bce, pci, bce_driver, bce_devclass, 0, 0); 451 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, 0, 0); 452 453 454 /****************************************************************************/ 455 /* Device probe function. */ 456 /* */ 457 /* Compares the device to the driver's list of supported devices and */ 458 /* reports back to the OS whether this is the right driver for the device. */ 459 /* */ 460 /* Returns: */ 461 /* BUS_PROBE_DEFAULT on success, positive value on failure. */ 462 /****************************************************************************/ 463 static int 464 bce_probe(device_t dev) 465 { 466 struct bce_type *t; 467 uint16_t vid, did, svid, sdid; 468 469 /* Get the data for the device to be probed. */ 470 vid = pci_get_vendor(dev); 471 did = pci_get_device(dev); 472 svid = pci_get_subvendor(dev); 473 sdid = pci_get_subdevice(dev); 474 475 /* Look through the list of known devices for a match. */ 476 for (t = bce_devs; t->bce_name != NULL; ++t) { 477 if (vid == t->bce_vid && did == t->bce_did && 478 (svid == t->bce_svid || t->bce_svid == PCI_ANY_ID) && 479 (sdid == t->bce_sdid || t->bce_sdid == PCI_ANY_ID)) { 480 uint32_t revid = pci_read_config(dev, PCIR_REVID, 4); 481 char *descbuf; 482 483 descbuf = kmalloc(BCE_DEVDESC_MAX, M_TEMP, M_WAITOK); 484 485 /* Print out the device identity. */ 486 ksnprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)", 487 t->bce_name, 488 ((revid & 0xf0) >> 4) + 'A', revid & 0xf); 489 490 device_set_desc_copy(dev, descbuf); 491 kfree(descbuf, M_TEMP); 492 return 0; 493 } 494 } 495 return ENXIO; 496 } 497 498 499 /****************************************************************************/ 500 /* Device attach function. */ 501 /* */ 502 /* Allocates device resources, performs secondary chip identification, */ 503 /* resets and initializes the hardware, and initializes driver instance */ 504 /* variables. */ 505 /* */ 506 /* Returns: */ 507 /* 0 on success, positive value on failure. */ 508 /****************************************************************************/ 509 static int 510 bce_attach(device_t dev) 511 { 512 struct bce_softc *sc = device_get_softc(dev); 513 struct ifnet *ifp = &sc->arpcom.ac_if; 514 uint32_t val; 515 int rid, rc = 0; 516 #ifdef notyet 517 int count; 518 #endif 519 520 sc->bce_dev = dev; 521 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 522 523 pci_enable_busmaster(dev); 524 525 /* Allocate PCI memory resources. */ 526 rid = PCIR_BAR(0); 527 sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 528 RF_ACTIVE | PCI_RF_DENSE); 529 if (sc->bce_res_mem == NULL) { 530 device_printf(dev, "PCI memory allocation failed\n"); 531 return ENXIO; 532 } 533 sc->bce_btag = rman_get_bustag(sc->bce_res_mem); 534 sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem); 535 536 /* Allocate PCI IRQ resources. */ 537 #ifdef notyet 538 count = pci_msi_count(dev); 539 if (count == 1 && pci_alloc_msi(dev, &count) == 0) { 540 rid = 1; 541 sc->bce_flags |= BCE_USING_MSI_FLAG; 542 } else 543 #endif 544 rid = 0; 545 sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 546 RF_SHAREABLE | RF_ACTIVE); 547 if (sc->bce_res_irq == NULL) { 548 device_printf(dev, "PCI map interrupt failed\n"); 549 rc = ENXIO; 550 goto fail; 551 } 552 553 /* 554 * Configure byte swap and enable indirect register access. 555 * Rely on CPU to do target byte swapping on big endian systems. 556 * Access to registers outside of PCI configurtion space are not 557 * valid until this is done. 558 */ 559 pci_write_config(dev, BCE_PCICFG_MISC_CONFIG, 560 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 561 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4); 562 563 /* Save ASIC revsion info. */ 564 sc->bce_chipid = REG_RD(sc, BCE_MISC_ID); 565 566 /* Weed out any non-production controller revisions. */ 567 switch(BCE_CHIP_ID(sc)) { 568 case BCE_CHIP_ID_5706_A0: 569 case BCE_CHIP_ID_5706_A1: 570 case BCE_CHIP_ID_5708_A0: 571 case BCE_CHIP_ID_5708_B0: 572 device_printf(dev, "Unsupported chip id 0x%08x!\n", 573 BCE_CHIP_ID(sc)); 574 rc = ENODEV; 575 goto fail; 576 } 577 578 /* 579 * The embedded PCIe to PCI-X bridge (EPB) 580 * in the 5708 cannot address memory above 581 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043). 582 */ 583 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708) 584 sc->max_bus_addr = BCE_BUS_SPACE_MAXADDR; 585 else 586 sc->max_bus_addr = BUS_SPACE_MAXADDR; 587 588 /* 589 * Find the base address for shared memory access. 590 * Newer versions of bootcode use a signature and offset 591 * while older versions use a fixed address. 592 */ 593 val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE); 594 if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == BCE_SHM_HDR_SIGNATURE_SIG) 595 sc->bce_shmem_base = REG_RD_IND(sc, BCE_SHM_HDR_ADDR_0); 596 else 597 sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE; 598 599 DBPRINT(sc, BCE_INFO, "bce_shmem_base = 0x%08X\n", sc->bce_shmem_base); 600 601 /* Get PCI bus information (speed and type). */ 602 val = REG_RD(sc, BCE_PCICFG_MISC_STATUS); 603 if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) { 604 uint32_t clkreg; 605 606 sc->bce_flags |= BCE_PCIX_FLAG; 607 608 clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS) & 609 BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET; 610 switch (clkreg) { 611 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ: 612 sc->bus_speed_mhz = 133; 613 break; 614 615 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ: 616 sc->bus_speed_mhz = 100; 617 break; 618 619 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ: 620 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ: 621 sc->bus_speed_mhz = 66; 622 break; 623 624 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ: 625 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ: 626 sc->bus_speed_mhz = 50; 627 break; 628 629 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW: 630 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ: 631 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ: 632 sc->bus_speed_mhz = 33; 633 break; 634 } 635 } else { 636 if (val & BCE_PCICFG_MISC_STATUS_M66EN) 637 sc->bus_speed_mhz = 66; 638 else 639 sc->bus_speed_mhz = 33; 640 } 641 642 if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET) 643 sc->bce_flags |= BCE_PCI_32BIT_FLAG; 644 645 device_printf(dev, "ASIC ID 0x%08X; Revision (%c%d); PCI%s %s %dMHz\n", 646 sc->bce_chipid, 647 ((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A', 648 (BCE_CHIP_ID(sc) & 0x0ff0) >> 4, 649 (sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : "", 650 (sc->bce_flags & BCE_PCI_32BIT_FLAG) ? 651 "32-bit" : "64-bit", sc->bus_speed_mhz); 652 653 /* Reset the controller. */ 654 rc = bce_reset(sc, BCE_DRV_MSG_CODE_RESET); 655 if (rc != 0) 656 goto fail; 657 658 /* Initialize the controller. */ 659 rc = bce_chipinit(sc); 660 if (rc != 0) { 661 device_printf(dev, "Controller initialization failed!\n"); 662 goto fail; 663 } 664 665 /* Perform NVRAM test. */ 666 rc = bce_nvram_test(sc); 667 if (rc != 0) { 668 device_printf(dev, "NVRAM test failed!\n"); 669 goto fail; 670 } 671 672 /* Fetch the permanent Ethernet MAC address. */ 673 bce_get_mac_addr(sc); 674 675 /* 676 * Trip points control how many BDs 677 * should be ready before generating an 678 * interrupt while ticks control how long 679 * a BD can sit in the chain before 680 * generating an interrupt. Set the default 681 * values for the RX and TX rings. 682 */ 683 684 #ifdef BCE_DRBUG 685 /* Force more frequent interrupts. */ 686 sc->bce_tx_quick_cons_trip_int = 1; 687 sc->bce_tx_quick_cons_trip = 1; 688 sc->bce_tx_ticks_int = 0; 689 sc->bce_tx_ticks = 0; 690 691 sc->bce_rx_quick_cons_trip_int = 1; 692 sc->bce_rx_quick_cons_trip = 1; 693 sc->bce_rx_ticks_int = 0; 694 sc->bce_rx_ticks = 0; 695 #else 696 sc->bce_tx_quick_cons_trip_int = bce_tx_bds_int; 697 sc->bce_tx_quick_cons_trip = bce_tx_bds; 698 sc->bce_tx_ticks_int = bce_tx_ticks_int; 699 sc->bce_tx_ticks = bce_tx_ticks; 700 701 sc->bce_rx_quick_cons_trip_int = bce_rx_bds_int; 702 sc->bce_rx_quick_cons_trip = bce_rx_bds; 703 sc->bce_rx_ticks_int = bce_rx_ticks_int; 704 sc->bce_rx_ticks = bce_rx_ticks; 705 #endif 706 707 /* Update statistics once every second. */ 708 sc->bce_stats_ticks = 1000000 & 0xffff00; 709 710 /* 711 * The copper based NetXtreme II controllers 712 * use an integrated PHY at address 1 while 713 * the SerDes controllers use a PHY at 714 * address 2. 715 */ 716 sc->bce_phy_addr = 1; 717 718 if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) { 719 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 720 sc->bce_flags |= BCE_NO_WOL_FLAG; 721 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708) { 722 sc->bce_phy_addr = 2; 723 val = REG_RD_IND(sc, sc->bce_shmem_base + 724 BCE_SHARED_HW_CFG_CONFIG); 725 if (val & BCE_SHARED_HW_CFG_PHY_2_5G) 726 sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG; 727 } 728 } 729 730 /* Allocate DMA memory resources. */ 731 rc = bce_dma_alloc(sc); 732 if (rc != 0) { 733 device_printf(dev, "DMA resource allocation failed!\n"); 734 goto fail; 735 } 736 737 /* Initialize the ifnet interface. */ 738 ifp->if_softc = sc; 739 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 740 ifp->if_ioctl = bce_ioctl; 741 ifp->if_start = bce_start; 742 ifp->if_init = bce_init; 743 ifp->if_watchdog = bce_watchdog; 744 #ifdef DEVICE_POLLING 745 ifp->if_poll = bce_poll; 746 #endif 747 ifp->if_mtu = ETHERMTU; 748 ifp->if_hwassist = BCE_IF_HWASSIST; 749 ifp->if_capabilities = BCE_IF_CAPABILITIES; 750 ifp->if_capenable = ifp->if_capabilities; 751 ifq_set_maxlen(&ifp->if_snd, USABLE_TX_BD); 752 ifq_set_ready(&ifp->if_snd); 753 754 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) 755 ifp->if_baudrate = IF_Gbps(2.5); 756 else 757 ifp->if_baudrate = IF_Gbps(1); 758 759 /* Assume a standard 1500 byte MTU size for mbuf allocations. */ 760 sc->mbuf_alloc_size = MCLBYTES; 761 762 /* Look for our PHY. */ 763 rc = mii_phy_probe(dev, &sc->bce_miibus, 764 bce_ifmedia_upd, bce_ifmedia_sts); 765 if (rc != 0) { 766 device_printf(dev, "PHY probe failed!\n"); 767 goto fail; 768 } 769 770 /* Attach to the Ethernet interface list. */ 771 ether_ifattach(ifp, sc->eaddr, NULL); 772 773 callout_init(&sc->bce_stat_ch); 774 775 /* Hookup IRQ last. */ 776 rc = bus_setup_intr(dev, sc->bce_res_irq, INTR_MPSAFE, bce_intr, sc, 777 &sc->bce_intrhand, ifp->if_serializer); 778 if (rc != 0) { 779 device_printf(dev, "Failed to setup IRQ!\n"); 780 ether_ifdetach(ifp); 781 goto fail; 782 } 783 784 ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->bce_res_irq)); 785 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); 786 787 /* Print some important debugging info. */ 788 DBRUN(BCE_INFO, bce_dump_driver_state(sc)); 789 790 /* Add the supported sysctls to the kernel. */ 791 bce_add_sysctls(sc); 792 793 /* Get the firmware running so IPMI still works */ 794 bce_mgmt_init(sc); 795 796 return 0; 797 fail: 798 bce_detach(dev); 799 return(rc); 800 } 801 802 803 /****************************************************************************/ 804 /* Device detach function. */ 805 /* */ 806 /* Stops the controller, resets the controller, and releases resources. */ 807 /* */ 808 /* Returns: */ 809 /* 0 on success, positive value on failure. */ 810 /****************************************************************************/ 811 static int 812 bce_detach(device_t dev) 813 { 814 struct bce_softc *sc = device_get_softc(dev); 815 816 if (device_is_attached(dev)) { 817 struct ifnet *ifp = &sc->arpcom.ac_if; 818 819 /* Stop and reset the controller. */ 820 lwkt_serialize_enter(ifp->if_serializer); 821 bce_stop(sc); 822 bce_reset(sc, BCE_DRV_MSG_CODE_RESET); 823 bus_teardown_intr(dev, sc->bce_res_irq, sc->bce_intrhand); 824 lwkt_serialize_exit(ifp->if_serializer); 825 826 ether_ifdetach(ifp); 827 } 828 829 /* If we have a child device on the MII bus remove it too. */ 830 if (sc->bce_miibus) 831 device_delete_child(dev, sc->bce_miibus); 832 bus_generic_detach(dev); 833 834 if (sc->bce_res_irq != NULL) { 835 bus_release_resource(dev, SYS_RES_IRQ, 836 sc->bce_flags & BCE_USING_MSI_FLAG ? 1 : 0, 837 sc->bce_res_irq); 838 } 839 840 #ifdef notyet 841 if (sc->bce_flags & BCE_USING_MSI_FLAG) 842 pci_release_msi(dev); 843 #endif 844 845 if (sc->bce_res_mem != NULL) { 846 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), 847 sc->bce_res_mem); 848 } 849 850 bce_dma_free(sc); 851 852 if (sc->bce_sysctl_tree != NULL) 853 sysctl_ctx_free(&sc->bce_sysctl_ctx); 854 855 return 0; 856 } 857 858 859 /****************************************************************************/ 860 /* Device shutdown function. */ 861 /* */ 862 /* Stops and resets the controller. */ 863 /* */ 864 /* Returns: */ 865 /* Nothing */ 866 /****************************************************************************/ 867 static void 868 bce_shutdown(device_t dev) 869 { 870 struct bce_softc *sc = device_get_softc(dev); 871 struct ifnet *ifp = &sc->arpcom.ac_if; 872 873 lwkt_serialize_enter(ifp->if_serializer); 874 bce_stop(sc); 875 bce_reset(sc, BCE_DRV_MSG_CODE_RESET); 876 lwkt_serialize_exit(ifp->if_serializer); 877 } 878 879 880 /****************************************************************************/ 881 /* Indirect register read. */ 882 /* */ 883 /* Reads NetXtreme II registers using an index/data register pair in PCI */ 884 /* configuration space. Using this mechanism avoids issues with posted */ 885 /* reads but is much slower than memory-mapped I/O. */ 886 /* */ 887 /* Returns: */ 888 /* The value of the register. */ 889 /****************************************************************************/ 890 static uint32_t 891 bce_reg_rd_ind(struct bce_softc *sc, uint32_t offset) 892 { 893 device_t dev = sc->bce_dev; 894 895 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4); 896 #ifdef BCE_DEBUG 897 { 898 uint32_t val; 899 val = pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4); 900 DBPRINT(sc, BCE_EXCESSIVE, 901 "%s(); offset = 0x%08X, val = 0x%08X\n", 902 __func__, offset, val); 903 return val; 904 } 905 #else 906 return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4); 907 #endif 908 } 909 910 911 /****************************************************************************/ 912 /* Indirect register write. */ 913 /* */ 914 /* Writes NetXtreme II registers using an index/data register pair in PCI */ 915 /* configuration space. Using this mechanism avoids issues with posted */ 916 /* writes but is muchh slower than memory-mapped I/O. */ 917 /* */ 918 /* Returns: */ 919 /* Nothing. */ 920 /****************************************************************************/ 921 static void 922 bce_reg_wr_ind(struct bce_softc *sc, uint32_t offset, uint32_t val) 923 { 924 device_t dev = sc->bce_dev; 925 926 DBPRINT(sc, BCE_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n", 927 __func__, offset, val); 928 929 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4); 930 pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4); 931 } 932 933 934 /****************************************************************************/ 935 /* Context memory write. */ 936 /* */ 937 /* The NetXtreme II controller uses context memory to track connection */ 938 /* information for L2 and higher network protocols. */ 939 /* */ 940 /* Returns: */ 941 /* Nothing. */ 942 /****************************************************************************/ 943 static void 944 bce_ctx_wr(struct bce_softc *sc, uint32_t cid_addr, uint32_t offset, 945 uint32_t val) 946 { 947 DBPRINT(sc, BCE_EXCESSIVE, "%s(); cid_addr = 0x%08X, offset = 0x%08X, " 948 "val = 0x%08X\n", __func__, cid_addr, offset, val); 949 950 offset += cid_addr; 951 REG_WR(sc, BCE_CTX_DATA_ADR, offset); 952 REG_WR(sc, BCE_CTX_DATA, val); 953 } 954 955 956 /****************************************************************************/ 957 /* PHY register read. */ 958 /* */ 959 /* Implements register reads on the MII bus. */ 960 /* */ 961 /* Returns: */ 962 /* The value of the register. */ 963 /****************************************************************************/ 964 static int 965 bce_miibus_read_reg(device_t dev, int phy, int reg) 966 { 967 struct bce_softc *sc = device_get_softc(dev); 968 uint32_t val; 969 int i; 970 971 /* Make sure we are accessing the correct PHY address. */ 972 if (phy != sc->bce_phy_addr) { 973 DBPRINT(sc, BCE_VERBOSE, 974 "Invalid PHY address %d for PHY read!\n", phy); 975 return 0; 976 } 977 978 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 979 val = REG_RD(sc, BCE_EMAC_MDIO_MODE); 980 val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL; 981 982 REG_WR(sc, BCE_EMAC_MDIO_MODE, val); 983 REG_RD(sc, BCE_EMAC_MDIO_MODE); 984 985 DELAY(40); 986 } 987 988 val = BCE_MIPHY(phy) | BCE_MIREG(reg) | 989 BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT | 990 BCE_EMAC_MDIO_COMM_START_BUSY; 991 REG_WR(sc, BCE_EMAC_MDIO_COMM, val); 992 993 for (i = 0; i < BCE_PHY_TIMEOUT; i++) { 994 DELAY(10); 995 996 val = REG_RD(sc, BCE_EMAC_MDIO_COMM); 997 if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) { 998 DELAY(5); 999 1000 val = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1001 val &= BCE_EMAC_MDIO_COMM_DATA; 1002 break; 1003 } 1004 } 1005 1006 if (val & BCE_EMAC_MDIO_COMM_START_BUSY) { 1007 if_printf(&sc->arpcom.ac_if, 1008 "Error: PHY read timeout! phy = %d, reg = 0x%04X\n", 1009 phy, reg); 1010 val = 0x0; 1011 } else { 1012 val = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1013 } 1014 1015 DBPRINT(sc, BCE_EXCESSIVE, 1016 "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n", 1017 __func__, phy, (uint16_t)reg & 0xffff, (uint16_t) val & 0xffff); 1018 1019 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1020 val = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1021 val |= BCE_EMAC_MDIO_MODE_AUTO_POLL; 1022 1023 REG_WR(sc, BCE_EMAC_MDIO_MODE, val); 1024 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1025 1026 DELAY(40); 1027 } 1028 return (val & 0xffff); 1029 } 1030 1031 1032 /****************************************************************************/ 1033 /* PHY register write. */ 1034 /* */ 1035 /* Implements register writes on the MII bus. */ 1036 /* */ 1037 /* Returns: */ 1038 /* The value of the register. */ 1039 /****************************************************************************/ 1040 static int 1041 bce_miibus_write_reg(device_t dev, int phy, int reg, int val) 1042 { 1043 struct bce_softc *sc = device_get_softc(dev); 1044 uint32_t val1; 1045 int i; 1046 1047 /* Make sure we are accessing the correct PHY address. */ 1048 if (phy != sc->bce_phy_addr) { 1049 DBPRINT(sc, BCE_WARN, 1050 "Invalid PHY address %d for PHY write!\n", phy); 1051 return(0); 1052 } 1053 1054 DBPRINT(sc, BCE_EXCESSIVE, 1055 "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n", 1056 __func__, phy, (uint16_t)(reg & 0xffff), 1057 (uint16_t)(val & 0xffff)); 1058 1059 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1060 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1061 val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL; 1062 1063 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1); 1064 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1065 1066 DELAY(40); 1067 } 1068 1069 val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val | 1070 BCE_EMAC_MDIO_COMM_COMMAND_WRITE | 1071 BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT; 1072 REG_WR(sc, BCE_EMAC_MDIO_COMM, val1); 1073 1074 for (i = 0; i < BCE_PHY_TIMEOUT; i++) { 1075 DELAY(10); 1076 1077 val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1078 if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) { 1079 DELAY(5); 1080 break; 1081 } 1082 } 1083 1084 if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY) 1085 if_printf(&sc->arpcom.ac_if, "PHY write timeout!\n"); 1086 1087 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1088 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1089 val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL; 1090 1091 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1); 1092 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1093 1094 DELAY(40); 1095 } 1096 return 0; 1097 } 1098 1099 1100 /****************************************************************************/ 1101 /* MII bus status change. */ 1102 /* */ 1103 /* Called by the MII bus driver when the PHY establishes link to set the */ 1104 /* MAC interface registers. */ 1105 /* */ 1106 /* Returns: */ 1107 /* Nothing. */ 1108 /****************************************************************************/ 1109 static void 1110 bce_miibus_statchg(device_t dev) 1111 { 1112 struct bce_softc *sc = device_get_softc(dev); 1113 struct mii_data *mii = device_get_softc(sc->bce_miibus); 1114 1115 DBPRINT(sc, BCE_INFO, "mii_media_active = 0x%08X\n", 1116 mii->mii_media_active); 1117 1118 #ifdef BCE_DEBUG 1119 /* Decode the interface media flags. */ 1120 if_printf(&sc->arpcom.ac_if, "Media: ( "); 1121 switch(IFM_TYPE(mii->mii_media_active)) { 1122 case IFM_ETHER: 1123 kprintf("Ethernet )"); 1124 break; 1125 default: 1126 kprintf("Unknown )"); 1127 break; 1128 } 1129 1130 kprintf(" Media Options: ( "); 1131 switch(IFM_SUBTYPE(mii->mii_media_active)) { 1132 case IFM_AUTO: 1133 kprintf("Autoselect )"); 1134 break; 1135 case IFM_MANUAL: 1136 kprintf("Manual )"); 1137 break; 1138 case IFM_NONE: 1139 kprintf("None )"); 1140 break; 1141 case IFM_10_T: 1142 kprintf("10Base-T )"); 1143 break; 1144 case IFM_100_TX: 1145 kprintf("100Base-TX )"); 1146 break; 1147 case IFM_1000_SX: 1148 kprintf("1000Base-SX )"); 1149 break; 1150 case IFM_1000_T: 1151 kprintf("1000Base-T )"); 1152 break; 1153 default: 1154 kprintf("Other )"); 1155 break; 1156 } 1157 1158 kprintf(" Global Options: ("); 1159 if (mii->mii_media_active & IFM_FDX) 1160 kprintf(" FullDuplex"); 1161 if (mii->mii_media_active & IFM_HDX) 1162 kprintf(" HalfDuplex"); 1163 if (mii->mii_media_active & IFM_LOOP) 1164 kprintf(" Loopback"); 1165 if (mii->mii_media_active & IFM_FLAG0) 1166 kprintf(" Flag0"); 1167 if (mii->mii_media_active & IFM_FLAG1) 1168 kprintf(" Flag1"); 1169 if (mii->mii_media_active & IFM_FLAG2) 1170 kprintf(" Flag2"); 1171 kprintf(" )\n"); 1172 #endif 1173 1174 BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT); 1175 1176 /* 1177 * Set MII or GMII interface based on the speed negotiated 1178 * by the PHY. 1179 */ 1180 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 1181 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) { 1182 DBPRINT(sc, BCE_INFO, "Setting GMII interface.\n"); 1183 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_GMII); 1184 } else { 1185 DBPRINT(sc, BCE_INFO, "Setting MII interface.\n"); 1186 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_MII); 1187 } 1188 1189 /* 1190 * Set half or full duplex based on the duplicity negotiated 1191 * by the PHY. 1192 */ 1193 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 1194 DBPRINT(sc, BCE_INFO, "Setting Full-Duplex interface.\n"); 1195 BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX); 1196 } else { 1197 DBPRINT(sc, BCE_INFO, "Setting Half-Duplex interface.\n"); 1198 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX); 1199 } 1200 } 1201 1202 1203 /****************************************************************************/ 1204 /* Acquire NVRAM lock. */ 1205 /* */ 1206 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */ 1207 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 1208 /* for use by the driver. */ 1209 /* */ 1210 /* Returns: */ 1211 /* 0 on success, positive value on failure. */ 1212 /****************************************************************************/ 1213 static int 1214 bce_acquire_nvram_lock(struct bce_softc *sc) 1215 { 1216 uint32_t val; 1217 int j; 1218 1219 DBPRINT(sc, BCE_VERBOSE, "Acquiring NVRAM lock.\n"); 1220 1221 /* Request access to the flash interface. */ 1222 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2); 1223 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1224 val = REG_RD(sc, BCE_NVM_SW_ARB); 1225 if (val & BCE_NVM_SW_ARB_ARB_ARB2) 1226 break; 1227 1228 DELAY(5); 1229 } 1230 1231 if (j >= NVRAM_TIMEOUT_COUNT) { 1232 DBPRINT(sc, BCE_WARN, "Timeout acquiring NVRAM lock!\n"); 1233 return EBUSY; 1234 } 1235 return 0; 1236 } 1237 1238 1239 /****************************************************************************/ 1240 /* Release NVRAM lock. */ 1241 /* */ 1242 /* When the caller is finished accessing NVRAM the lock must be released. */ 1243 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 1244 /* for use by the driver. */ 1245 /* */ 1246 /* Returns: */ 1247 /* 0 on success, positive value on failure. */ 1248 /****************************************************************************/ 1249 static int 1250 bce_release_nvram_lock(struct bce_softc *sc) 1251 { 1252 int j; 1253 uint32_t val; 1254 1255 DBPRINT(sc, BCE_VERBOSE, "Releasing NVRAM lock.\n"); 1256 1257 /* 1258 * Relinquish nvram interface. 1259 */ 1260 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2); 1261 1262 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1263 val = REG_RD(sc, BCE_NVM_SW_ARB); 1264 if (!(val & BCE_NVM_SW_ARB_ARB_ARB2)) 1265 break; 1266 1267 DELAY(5); 1268 } 1269 1270 if (j >= NVRAM_TIMEOUT_COUNT) { 1271 DBPRINT(sc, BCE_WARN, "Timeout reeasing NVRAM lock!\n"); 1272 return EBUSY; 1273 } 1274 return 0; 1275 } 1276 1277 1278 #ifdef BCE_NVRAM_WRITE_SUPPORT 1279 /****************************************************************************/ 1280 /* Enable NVRAM write access. */ 1281 /* */ 1282 /* Before writing to NVRAM the caller must enable NVRAM writes. */ 1283 /* */ 1284 /* Returns: */ 1285 /* 0 on success, positive value on failure. */ 1286 /****************************************************************************/ 1287 static int 1288 bce_enable_nvram_write(struct bce_softc *sc) 1289 { 1290 uint32_t val; 1291 1292 DBPRINT(sc, BCE_VERBOSE, "Enabling NVRAM write.\n"); 1293 1294 val = REG_RD(sc, BCE_MISC_CFG); 1295 REG_WR(sc, BCE_MISC_CFG, val | BCE_MISC_CFG_NVM_WR_EN_PCI); 1296 1297 if (!sc->bce_flash_info->buffered) { 1298 int j; 1299 1300 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE); 1301 REG_WR(sc, BCE_NVM_COMMAND, 1302 BCE_NVM_COMMAND_WREN | BCE_NVM_COMMAND_DOIT); 1303 1304 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1305 DELAY(5); 1306 1307 val = REG_RD(sc, BCE_NVM_COMMAND); 1308 if (val & BCE_NVM_COMMAND_DONE) 1309 break; 1310 } 1311 1312 if (j >= NVRAM_TIMEOUT_COUNT) { 1313 DBPRINT(sc, BCE_WARN, "Timeout writing NVRAM!\n"); 1314 return EBUSY; 1315 } 1316 } 1317 return 0; 1318 } 1319 1320 1321 /****************************************************************************/ 1322 /* Disable NVRAM write access. */ 1323 /* */ 1324 /* When the caller is finished writing to NVRAM write access must be */ 1325 /* disabled. */ 1326 /* */ 1327 /* Returns: */ 1328 /* Nothing. */ 1329 /****************************************************************************/ 1330 static void 1331 bce_disable_nvram_write(struct bce_softc *sc) 1332 { 1333 uint32_t val; 1334 1335 DBPRINT(sc, BCE_VERBOSE, "Disabling NVRAM write.\n"); 1336 1337 val = REG_RD(sc, BCE_MISC_CFG); 1338 REG_WR(sc, BCE_MISC_CFG, val & ~BCE_MISC_CFG_NVM_WR_EN); 1339 } 1340 #endif /* BCE_NVRAM_WRITE_SUPPORT */ 1341 1342 1343 /****************************************************************************/ 1344 /* Enable NVRAM access. */ 1345 /* */ 1346 /* Before accessing NVRAM for read or write operations the caller must */ 1347 /* enabled NVRAM access. */ 1348 /* */ 1349 /* Returns: */ 1350 /* Nothing. */ 1351 /****************************************************************************/ 1352 static void 1353 bce_enable_nvram_access(struct bce_softc *sc) 1354 { 1355 uint32_t val; 1356 1357 DBPRINT(sc, BCE_VERBOSE, "Enabling NVRAM access.\n"); 1358 1359 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE); 1360 /* Enable both bits, even on read. */ 1361 REG_WR(sc, BCE_NVM_ACCESS_ENABLE, 1362 val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN); 1363 } 1364 1365 1366 /****************************************************************************/ 1367 /* Disable NVRAM access. */ 1368 /* */ 1369 /* When the caller is finished accessing NVRAM access must be disabled. */ 1370 /* */ 1371 /* Returns: */ 1372 /* Nothing. */ 1373 /****************************************************************************/ 1374 static void 1375 bce_disable_nvram_access(struct bce_softc *sc) 1376 { 1377 uint32_t val; 1378 1379 DBPRINT(sc, BCE_VERBOSE, "Disabling NVRAM access.\n"); 1380 1381 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE); 1382 1383 /* Disable both bits, even after read. */ 1384 REG_WR(sc, BCE_NVM_ACCESS_ENABLE, 1385 val & ~(BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN)); 1386 } 1387 1388 1389 #ifdef BCE_NVRAM_WRITE_SUPPORT 1390 /****************************************************************************/ 1391 /* Erase NVRAM page before writing. */ 1392 /* */ 1393 /* Non-buffered flash parts require that a page be erased before it is */ 1394 /* written. */ 1395 /* */ 1396 /* Returns: */ 1397 /* 0 on success, positive value on failure. */ 1398 /****************************************************************************/ 1399 static int 1400 bce_nvram_erase_page(struct bce_softc *sc, uint32_t offset) 1401 { 1402 uint32_t cmd; 1403 int j; 1404 1405 /* Buffered flash doesn't require an erase. */ 1406 if (sc->bce_flash_info->buffered) 1407 return 0; 1408 1409 DBPRINT(sc, BCE_VERBOSE, "Erasing NVRAM page.\n"); 1410 1411 /* Build an erase command. */ 1412 cmd = BCE_NVM_COMMAND_ERASE | BCE_NVM_COMMAND_WR | 1413 BCE_NVM_COMMAND_DOIT; 1414 1415 /* 1416 * Clear the DONE bit separately, set the NVRAM adress to erase, 1417 * and issue the erase command. 1418 */ 1419 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE); 1420 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE); 1421 REG_WR(sc, BCE_NVM_COMMAND, cmd); 1422 1423 /* Wait for completion. */ 1424 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1425 uint32_t val; 1426 1427 DELAY(5); 1428 1429 val = REG_RD(sc, BCE_NVM_COMMAND); 1430 if (val & BCE_NVM_COMMAND_DONE) 1431 break; 1432 } 1433 1434 if (j >= NVRAM_TIMEOUT_COUNT) { 1435 DBPRINT(sc, BCE_WARN, "Timeout erasing NVRAM.\n"); 1436 return EBUSY; 1437 } 1438 return 0; 1439 } 1440 #endif /* BCE_NVRAM_WRITE_SUPPORT */ 1441 1442 1443 /****************************************************************************/ 1444 /* Read a dword (32 bits) from NVRAM. */ 1445 /* */ 1446 /* Read a 32 bit word from NVRAM. The caller is assumed to have already */ 1447 /* obtained the NVRAM lock and enabled the controller for NVRAM access. */ 1448 /* */ 1449 /* Returns: */ 1450 /* 0 on success and the 32 bit value read, positive value on failure. */ 1451 /****************************************************************************/ 1452 static int 1453 bce_nvram_read_dword(struct bce_softc *sc, uint32_t offset, uint8_t *ret_val, 1454 uint32_t cmd_flags) 1455 { 1456 uint32_t cmd; 1457 int i, rc = 0; 1458 1459 /* Build the command word. */ 1460 cmd = BCE_NVM_COMMAND_DOIT | cmd_flags; 1461 1462 /* Calculate the offset for buffered flash. */ 1463 if (sc->bce_flash_info->buffered) { 1464 offset = ((offset / sc->bce_flash_info->page_size) << 1465 sc->bce_flash_info->page_bits) + 1466 (offset % sc->bce_flash_info->page_size); 1467 } 1468 1469 /* 1470 * Clear the DONE bit separately, set the address to read, 1471 * and issue the read. 1472 */ 1473 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE); 1474 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE); 1475 REG_WR(sc, BCE_NVM_COMMAND, cmd); 1476 1477 /* Wait for completion. */ 1478 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) { 1479 uint32_t val; 1480 1481 DELAY(5); 1482 1483 val = REG_RD(sc, BCE_NVM_COMMAND); 1484 if (val & BCE_NVM_COMMAND_DONE) { 1485 val = REG_RD(sc, BCE_NVM_READ); 1486 1487 val = be32toh(val); 1488 memcpy(ret_val, &val, 4); 1489 break; 1490 } 1491 } 1492 1493 /* Check for errors. */ 1494 if (i >= NVRAM_TIMEOUT_COUNT) { 1495 if_printf(&sc->arpcom.ac_if, 1496 "Timeout error reading NVRAM at offset 0x%08X!\n", 1497 offset); 1498 rc = EBUSY; 1499 } 1500 return rc; 1501 } 1502 1503 1504 #ifdef BCE_NVRAM_WRITE_SUPPORT 1505 /****************************************************************************/ 1506 /* Write a dword (32 bits) to NVRAM. */ 1507 /* */ 1508 /* Write a 32 bit word to NVRAM. The caller is assumed to have already */ 1509 /* obtained the NVRAM lock, enabled the controller for NVRAM access, and */ 1510 /* enabled NVRAM write access. */ 1511 /* */ 1512 /* Returns: */ 1513 /* 0 on success, positive value on failure. */ 1514 /****************************************************************************/ 1515 static int 1516 bce_nvram_write_dword(struct bce_softc *sc, uint32_t offset, uint8_t *val, 1517 uint32_t cmd_flags) 1518 { 1519 uint32_t cmd, val32; 1520 int j; 1521 1522 /* Build the command word. */ 1523 cmd = BCE_NVM_COMMAND_DOIT | BCE_NVM_COMMAND_WR | cmd_flags; 1524 1525 /* Calculate the offset for buffered flash. */ 1526 if (sc->bce_flash_info->buffered) { 1527 offset = ((offset / sc->bce_flash_info->page_size) << 1528 sc->bce_flash_info->page_bits) + 1529 (offset % sc->bce_flash_info->page_size); 1530 } 1531 1532 /* 1533 * Clear the DONE bit separately, convert NVRAM data to big-endian, 1534 * set the NVRAM address to write, and issue the write command 1535 */ 1536 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE); 1537 memcpy(&val32, val, 4); 1538 val32 = htobe32(val32); 1539 REG_WR(sc, BCE_NVM_WRITE, val32); 1540 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE); 1541 REG_WR(sc, BCE_NVM_COMMAND, cmd); 1542 1543 /* Wait for completion. */ 1544 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1545 DELAY(5); 1546 1547 if (REG_RD(sc, BCE_NVM_COMMAND) & BCE_NVM_COMMAND_DONE) 1548 break; 1549 } 1550 if (j >= NVRAM_TIMEOUT_COUNT) { 1551 if_printf(&sc->arpcom.ac_if, 1552 "Timeout error writing NVRAM at offset 0x%08X\n", 1553 offset); 1554 return EBUSY; 1555 } 1556 return 0; 1557 } 1558 #endif /* BCE_NVRAM_WRITE_SUPPORT */ 1559 1560 1561 /****************************************************************************/ 1562 /* Initialize NVRAM access. */ 1563 /* */ 1564 /* Identify the NVRAM device in use and prepare the NVRAM interface to */ 1565 /* access that device. */ 1566 /* */ 1567 /* Returns: */ 1568 /* 0 on success, positive value on failure. */ 1569 /****************************************************************************/ 1570 static int 1571 bce_init_nvram(struct bce_softc *sc) 1572 { 1573 uint32_t val; 1574 int j, entry_count, rc = 0; 1575 const struct flash_spec *flash; 1576 1577 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__); 1578 1579 /* Determine the selected interface. */ 1580 val = REG_RD(sc, BCE_NVM_CFG1); 1581 1582 entry_count = sizeof(flash_table) / sizeof(struct flash_spec); 1583 1584 /* 1585 * Flash reconfiguration is required to support additional 1586 * NVRAM devices not directly supported in hardware. 1587 * Check if the flash interface was reconfigured 1588 * by the bootcode. 1589 */ 1590 1591 if (val & 0x40000000) { 1592 /* Flash interface reconfigured by bootcode. */ 1593 1594 DBPRINT(sc, BCE_INFO_LOAD, 1595 "%s(): Flash WAS reconfigured.\n", __func__); 1596 1597 for (j = 0, flash = flash_table; j < entry_count; 1598 j++, flash++) { 1599 if ((val & FLASH_BACKUP_STRAP_MASK) == 1600 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) { 1601 sc->bce_flash_info = flash; 1602 break; 1603 } 1604 } 1605 } else { 1606 /* Flash interface not yet reconfigured. */ 1607 uint32_t mask; 1608 1609 DBPRINT(sc, BCE_INFO_LOAD, 1610 "%s(): Flash was NOT reconfigured.\n", __func__); 1611 1612 if (val & (1 << 23)) 1613 mask = FLASH_BACKUP_STRAP_MASK; 1614 else 1615 mask = FLASH_STRAP_MASK; 1616 1617 /* Look for the matching NVRAM device configuration data. */ 1618 for (j = 0, flash = flash_table; j < entry_count; 1619 j++, flash++) { 1620 /* Check if the device matches any of the known devices. */ 1621 if ((val & mask) == (flash->strapping & mask)) { 1622 /* Found a device match. */ 1623 sc->bce_flash_info = flash; 1624 1625 /* Request access to the flash interface. */ 1626 rc = bce_acquire_nvram_lock(sc); 1627 if (rc != 0) 1628 return rc; 1629 1630 /* Reconfigure the flash interface. */ 1631 bce_enable_nvram_access(sc); 1632 REG_WR(sc, BCE_NVM_CFG1, flash->config1); 1633 REG_WR(sc, BCE_NVM_CFG2, flash->config2); 1634 REG_WR(sc, BCE_NVM_CFG3, flash->config3); 1635 REG_WR(sc, BCE_NVM_WRITE1, flash->write1); 1636 bce_disable_nvram_access(sc); 1637 bce_release_nvram_lock(sc); 1638 break; 1639 } 1640 } 1641 } 1642 1643 /* Check if a matching device was found. */ 1644 if (j == entry_count) { 1645 sc->bce_flash_info = NULL; 1646 if_printf(&sc->arpcom.ac_if, "Unknown Flash NVRAM found!\n"); 1647 rc = ENODEV; 1648 } 1649 1650 /* Write the flash config data to the shared memory interface. */ 1651 val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_SHARED_HW_CFG_CONFIG2) & 1652 BCE_SHARED_HW_CFG2_NVM_SIZE_MASK; 1653 if (val) 1654 sc->bce_flash_size = val; 1655 else 1656 sc->bce_flash_size = sc->bce_flash_info->total_size; 1657 1658 DBPRINT(sc, BCE_INFO_LOAD, "%s() flash->total_size = 0x%08X\n", 1659 __func__, sc->bce_flash_info->total_size); 1660 1661 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__); 1662 1663 return rc; 1664 } 1665 1666 1667 /****************************************************************************/ 1668 /* Read an arbitrary range of data from NVRAM. */ 1669 /* */ 1670 /* Prepares the NVRAM interface for access and reads the requested data */ 1671 /* into the supplied buffer. */ 1672 /* */ 1673 /* Returns: */ 1674 /* 0 on success and the data read, positive value on failure. */ 1675 /****************************************************************************/ 1676 static int 1677 bce_nvram_read(struct bce_softc *sc, uint32_t offset, uint8_t *ret_buf, 1678 int buf_size) 1679 { 1680 uint32_t cmd_flags, offset32, len32, extra; 1681 int rc = 0; 1682 1683 if (buf_size == 0) 1684 return 0; 1685 1686 /* Request access to the flash interface. */ 1687 rc = bce_acquire_nvram_lock(sc); 1688 if (rc != 0) 1689 return rc; 1690 1691 /* Enable access to flash interface */ 1692 bce_enable_nvram_access(sc); 1693 1694 len32 = buf_size; 1695 offset32 = offset; 1696 extra = 0; 1697 1698 cmd_flags = 0; 1699 1700 /* XXX should we release nvram lock if read_dword() fails? */ 1701 if (offset32 & 3) { 1702 uint8_t buf[4]; 1703 uint32_t pre_len; 1704 1705 offset32 &= ~3; 1706 pre_len = 4 - (offset & 3); 1707 1708 if (pre_len >= len32) { 1709 pre_len = len32; 1710 cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST; 1711 } else { 1712 cmd_flags = BCE_NVM_COMMAND_FIRST; 1713 } 1714 1715 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); 1716 if (rc) 1717 return rc; 1718 1719 memcpy(ret_buf, buf + (offset & 3), pre_len); 1720 1721 offset32 += 4; 1722 ret_buf += pre_len; 1723 len32 -= pre_len; 1724 } 1725 1726 if (len32 & 3) { 1727 extra = 4 - (len32 & 3); 1728 len32 = (len32 + 4) & ~3; 1729 } 1730 1731 if (len32 == 4) { 1732 uint8_t buf[4]; 1733 1734 if (cmd_flags) 1735 cmd_flags = BCE_NVM_COMMAND_LAST; 1736 else 1737 cmd_flags = BCE_NVM_COMMAND_FIRST | 1738 BCE_NVM_COMMAND_LAST; 1739 1740 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); 1741 1742 memcpy(ret_buf, buf, 4 - extra); 1743 } else if (len32 > 0) { 1744 uint8_t buf[4]; 1745 1746 /* Read the first word. */ 1747 if (cmd_flags) 1748 cmd_flags = 0; 1749 else 1750 cmd_flags = BCE_NVM_COMMAND_FIRST; 1751 1752 rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags); 1753 1754 /* Advance to the next dword. */ 1755 offset32 += 4; 1756 ret_buf += 4; 1757 len32 -= 4; 1758 1759 while (len32 > 4 && rc == 0) { 1760 rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0); 1761 1762 /* Advance to the next dword. */ 1763 offset32 += 4; 1764 ret_buf += 4; 1765 len32 -= 4; 1766 } 1767 1768 if (rc) 1769 return rc; 1770 1771 cmd_flags = BCE_NVM_COMMAND_LAST; 1772 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); 1773 1774 memcpy(ret_buf, buf, 4 - extra); 1775 } 1776 1777 /* Disable access to flash interface and release the lock. */ 1778 bce_disable_nvram_access(sc); 1779 bce_release_nvram_lock(sc); 1780 1781 return rc; 1782 } 1783 1784 1785 #ifdef BCE_NVRAM_WRITE_SUPPORT 1786 /****************************************************************************/ 1787 /* Write an arbitrary range of data from NVRAM. */ 1788 /* */ 1789 /* Prepares the NVRAM interface for write access and writes the requested */ 1790 /* data from the supplied buffer. The caller is responsible for */ 1791 /* calculating any appropriate CRCs. */ 1792 /* */ 1793 /* Returns: */ 1794 /* 0 on success, positive value on failure. */ 1795 /****************************************************************************/ 1796 static int 1797 bce_nvram_write(struct bce_softc *sc, uint32_t offset, uint8_t *data_buf, 1798 int buf_size) 1799 { 1800 uint32_t written, offset32, len32; 1801 uint8_t *buf, start[4], end[4]; 1802 int rc = 0; 1803 int align_start, align_end; 1804 1805 buf = data_buf; 1806 offset32 = offset; 1807 len32 = buf_size; 1808 align_end = 0; 1809 align_start = (offset32 & 3); 1810 1811 if (align_start) { 1812 offset32 &= ~3; 1813 len32 += align_start; 1814 rc = bce_nvram_read(sc, offset32, start, 4); 1815 if (rc) 1816 return rc; 1817 } 1818 1819 if (len32 & 3) { 1820 if (len32 > 4 || !align_start) { 1821 align_end = 4 - (len32 & 3); 1822 len32 += align_end; 1823 rc = bce_nvram_read(sc, offset32 + len32 - 4, end, 4); 1824 if (rc) 1825 return rc; 1826 } 1827 } 1828 1829 if (align_start || align_end) { 1830 buf = kmalloc(len32, M_DEVBUF, M_NOWAIT); 1831 if (buf == NULL) 1832 return ENOMEM; 1833 if (align_start) 1834 memcpy(buf, start, 4); 1835 if (align_end) 1836 memcpy(buf + len32 - 4, end, 4); 1837 memcpy(buf + align_start, data_buf, buf_size); 1838 } 1839 1840 written = 0; 1841 while (written < len32 && rc == 0) { 1842 uint32_t page_start, page_end, data_start, data_end; 1843 uint32_t addr, cmd_flags; 1844 int i; 1845 uint8_t flash_buffer[264]; 1846 1847 /* Find the page_start addr */ 1848 page_start = offset32 + written; 1849 page_start -= (page_start % sc->bce_flash_info->page_size); 1850 /* Find the page_end addr */ 1851 page_end = page_start + sc->bce_flash_info->page_size; 1852 /* Find the data_start addr */ 1853 data_start = (written == 0) ? offset32 : page_start; 1854 /* Find the data_end addr */ 1855 data_end = (page_end > offset32 + len32) ? (offset32 + len32) 1856 : page_end; 1857 1858 /* Request access to the flash interface. */ 1859 rc = bce_acquire_nvram_lock(sc); 1860 if (rc != 0) 1861 goto nvram_write_end; 1862 1863 /* Enable access to flash interface */ 1864 bce_enable_nvram_access(sc); 1865 1866 cmd_flags = BCE_NVM_COMMAND_FIRST; 1867 if (sc->bce_flash_info->buffered == 0) { 1868 int j; 1869 1870 /* 1871 * Read the whole page into the buffer 1872 * (non-buffer flash only) 1873 */ 1874 for (j = 0; j < sc->bce_flash_info->page_size; j += 4) { 1875 if (j == (sc->bce_flash_info->page_size - 4)) 1876 cmd_flags |= BCE_NVM_COMMAND_LAST; 1877 1878 rc = bce_nvram_read_dword(sc, page_start + j, 1879 &flash_buffer[j], 1880 cmd_flags); 1881 if (rc) 1882 goto nvram_write_end; 1883 1884 cmd_flags = 0; 1885 } 1886 } 1887 1888 /* Enable writes to flash interface (unlock write-protect) */ 1889 rc = bce_enable_nvram_write(sc); 1890 if (rc != 0) 1891 goto nvram_write_end; 1892 1893 /* Erase the page */ 1894 rc = bce_nvram_erase_page(sc, page_start); 1895 if (rc != 0) 1896 goto nvram_write_end; 1897 1898 /* Re-enable the write again for the actual write */ 1899 bce_enable_nvram_write(sc); 1900 1901 /* Loop to write back the buffer data from page_start to 1902 * data_start */ 1903 i = 0; 1904 if (sc->bce_flash_info->buffered == 0) { 1905 for (addr = page_start; addr < data_start; 1906 addr += 4, i += 4) { 1907 rc = bce_nvram_write_dword(sc, addr, 1908 &flash_buffer[i], 1909 cmd_flags); 1910 if (rc != 0) 1911 goto nvram_write_end; 1912 1913 cmd_flags = 0; 1914 } 1915 } 1916 1917 /* Loop to write the new data from data_start to data_end */ 1918 for (addr = data_start; addr < data_end; addr += 4, i++) { 1919 if (addr == page_end - 4 || 1920 (sc->bce_flash_info->buffered && 1921 addr == data_end - 4)) 1922 cmd_flags |= BCE_NVM_COMMAND_LAST; 1923 1924 rc = bce_nvram_write_dword(sc, addr, buf, cmd_flags); 1925 if (rc != 0) 1926 goto nvram_write_end; 1927 1928 cmd_flags = 0; 1929 buf += 4; 1930 } 1931 1932 /* Loop to write back the buffer data from data_end 1933 * to page_end */ 1934 if (sc->bce_flash_info->buffered == 0) { 1935 for (addr = data_end; addr < page_end; 1936 addr += 4, i += 4) { 1937 if (addr == page_end-4) 1938 cmd_flags = BCE_NVM_COMMAND_LAST; 1939 1940 rc = bce_nvram_write_dword(sc, addr, 1941 &flash_buffer[i], cmd_flags); 1942 if (rc != 0) 1943 goto nvram_write_end; 1944 1945 cmd_flags = 0; 1946 } 1947 } 1948 1949 /* Disable writes to flash interface (lock write-protect) */ 1950 bce_disable_nvram_write(sc); 1951 1952 /* Disable access to flash interface */ 1953 bce_disable_nvram_access(sc); 1954 bce_release_nvram_lock(sc); 1955 1956 /* Increment written */ 1957 written += data_end - data_start; 1958 } 1959 1960 nvram_write_end: 1961 if (align_start || align_end) 1962 kfree(buf, M_DEVBUF); 1963 return rc; 1964 } 1965 #endif /* BCE_NVRAM_WRITE_SUPPORT */ 1966 1967 1968 /****************************************************************************/ 1969 /* Verifies that NVRAM is accessible and contains valid data. */ 1970 /* */ 1971 /* Reads the configuration data from NVRAM and verifies that the CRC is */ 1972 /* correct. */ 1973 /* */ 1974 /* Returns: */ 1975 /* 0 on success, positive value on failure. */ 1976 /****************************************************************************/ 1977 static int 1978 bce_nvram_test(struct bce_softc *sc) 1979 { 1980 uint32_t buf[BCE_NVRAM_SIZE / 4]; 1981 uint32_t magic, csum; 1982 uint8_t *data = (uint8_t *)buf; 1983 int rc = 0; 1984 1985 /* 1986 * Check that the device NVRAM is valid by reading 1987 * the magic value at offset 0. 1988 */ 1989 rc = bce_nvram_read(sc, 0, data, 4); 1990 if (rc != 0) 1991 return rc; 1992 1993 magic = be32toh(buf[0]); 1994 if (magic != BCE_NVRAM_MAGIC) { 1995 if_printf(&sc->arpcom.ac_if, 1996 "Invalid NVRAM magic value! Expected: 0x%08X, " 1997 "Found: 0x%08X\n", BCE_NVRAM_MAGIC, magic); 1998 return ENODEV; 1999 } 2000 2001 /* 2002 * Verify that the device NVRAM includes valid 2003 * configuration data. 2004 */ 2005 rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE); 2006 if (rc != 0) 2007 return rc; 2008 2009 csum = ether_crc32_le(data, 0x100); 2010 if (csum != BCE_CRC32_RESIDUAL) { 2011 if_printf(&sc->arpcom.ac_if, 2012 "Invalid Manufacturing Information NVRAM CRC! " 2013 "Expected: 0x%08X, Found: 0x%08X\n", 2014 BCE_CRC32_RESIDUAL, csum); 2015 return ENODEV; 2016 } 2017 2018 csum = ether_crc32_le(data + 0x100, 0x100); 2019 if (csum != BCE_CRC32_RESIDUAL) { 2020 if_printf(&sc->arpcom.ac_if, 2021 "Invalid Feature Configuration Information " 2022 "NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n", 2023 BCE_CRC32_RESIDUAL, csum); 2024 rc = ENODEV; 2025 } 2026 return rc; 2027 } 2028 2029 2030 /****************************************************************************/ 2031 /* Free any DMA memory owned by the driver. */ 2032 /* */ 2033 /* Scans through each data structre that requires DMA memory and frees */ 2034 /* the memory if allocated. */ 2035 /* */ 2036 /* Returns: */ 2037 /* Nothing. */ 2038 /****************************************************************************/ 2039 static void 2040 bce_dma_free(struct bce_softc *sc) 2041 { 2042 int i; 2043 2044 /* Destroy the status block. */ 2045 if (sc->status_tag != NULL) { 2046 if (sc->status_block != NULL) { 2047 bus_dmamap_unload(sc->status_tag, sc->status_map); 2048 bus_dmamem_free(sc->status_tag, sc->status_block, 2049 sc->status_map); 2050 } 2051 bus_dma_tag_destroy(sc->status_tag); 2052 } 2053 2054 2055 /* Destroy the statistics block. */ 2056 if (sc->stats_tag != NULL) { 2057 if (sc->stats_block != NULL) { 2058 bus_dmamap_unload(sc->stats_tag, sc->stats_map); 2059 bus_dmamem_free(sc->stats_tag, sc->stats_block, 2060 sc->stats_map); 2061 } 2062 bus_dma_tag_destroy(sc->stats_tag); 2063 } 2064 2065 /* Destroy the TX buffer descriptor DMA stuffs. */ 2066 if (sc->tx_bd_chain_tag != NULL) { 2067 for (i = 0; i < TX_PAGES; i++) { 2068 if (sc->tx_bd_chain[i] != NULL) { 2069 bus_dmamap_unload(sc->tx_bd_chain_tag, 2070 sc->tx_bd_chain_map[i]); 2071 bus_dmamem_free(sc->tx_bd_chain_tag, 2072 sc->tx_bd_chain[i], 2073 sc->tx_bd_chain_map[i]); 2074 } 2075 } 2076 bus_dma_tag_destroy(sc->tx_bd_chain_tag); 2077 } 2078 2079 /* Destroy the RX buffer descriptor DMA stuffs. */ 2080 if (sc->rx_bd_chain_tag != NULL) { 2081 for (i = 0; i < RX_PAGES; i++) { 2082 if (sc->rx_bd_chain[i] != NULL) { 2083 bus_dmamap_unload(sc->rx_bd_chain_tag, 2084 sc->rx_bd_chain_map[i]); 2085 bus_dmamem_free(sc->rx_bd_chain_tag, 2086 sc->rx_bd_chain[i], 2087 sc->rx_bd_chain_map[i]); 2088 } 2089 } 2090 bus_dma_tag_destroy(sc->rx_bd_chain_tag); 2091 } 2092 2093 /* Destroy the TX mbuf DMA stuffs. */ 2094 if (sc->tx_mbuf_tag != NULL) { 2095 for (i = 0; i < TOTAL_TX_BD; i++) { 2096 /* Must have been unloaded in bce_stop() */ 2097 KKASSERT(sc->tx_mbuf_ptr[i] == NULL); 2098 bus_dmamap_destroy(sc->tx_mbuf_tag, 2099 sc->tx_mbuf_map[i]); 2100 } 2101 bus_dma_tag_destroy(sc->tx_mbuf_tag); 2102 } 2103 2104 /* Destroy the RX mbuf DMA stuffs. */ 2105 if (sc->rx_mbuf_tag != NULL) { 2106 for (i = 0; i < TOTAL_RX_BD; i++) { 2107 /* Must have been unloaded in bce_stop() */ 2108 KKASSERT(sc->rx_mbuf_ptr[i] == NULL); 2109 bus_dmamap_destroy(sc->rx_mbuf_tag, 2110 sc->rx_mbuf_map[i]); 2111 } 2112 bus_dma_tag_destroy(sc->rx_mbuf_tag); 2113 } 2114 2115 /* Destroy the parent tag */ 2116 if (sc->parent_tag != NULL) 2117 bus_dma_tag_destroy(sc->parent_tag); 2118 } 2119 2120 2121 /****************************************************************************/ 2122 /* Get DMA memory from the OS. */ 2123 /* */ 2124 /* Validates that the OS has provided DMA buffers in response to a */ 2125 /* bus_dmamap_load() call and saves the physical address of those buffers. */ 2126 /* When the callback is used the OS will return 0 for the mapping function */ 2127 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any */ 2128 /* failures back to the caller. */ 2129 /* */ 2130 /* Returns: */ 2131 /* Nothing. */ 2132 /****************************************************************************/ 2133 static void 2134 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 2135 { 2136 bus_addr_t *busaddr = arg; 2137 2138 /* 2139 * Simulate a mapping failure. 2140 * XXX not correct. 2141 */ 2142 DBRUNIF(DB_RANDOMTRUE(bce_debug_dma_map_addr_failure), 2143 kprintf("bce: %s(%d): Simulating DMA mapping error.\n", 2144 __FILE__, __LINE__); 2145 error = ENOMEM); 2146 2147 /* Check for an error and signal the caller that an error occurred. */ 2148 if (error) 2149 return; 2150 2151 KASSERT(nseg == 1, ("only one segment is allowed\n")); 2152 *busaddr = segs->ds_addr; 2153 } 2154 2155 2156 static void 2157 bce_dma_map_mbuf(void *arg, bus_dma_segment_t *segs, int nsegs, 2158 bus_size_t mapsz __unused, int error) 2159 { 2160 struct bce_dmamap_arg *ctx = arg; 2161 int i; 2162 2163 if (error) 2164 return; 2165 2166 if (nsegs > ctx->bce_maxsegs) { 2167 ctx->bce_maxsegs = 0; 2168 return; 2169 } 2170 2171 ctx->bce_maxsegs = nsegs; 2172 for (i = 0; i < nsegs; ++i) 2173 ctx->bce_segs[i] = segs[i]; 2174 } 2175 2176 2177 /****************************************************************************/ 2178 /* Allocate any DMA memory needed by the driver. */ 2179 /* */ 2180 /* Allocates DMA memory needed for the various global structures needed by */ 2181 /* hardware. */ 2182 /* */ 2183 /* Returns: */ 2184 /* 0 for success, positive value for failure. */ 2185 /****************************************************************************/ 2186 static int 2187 bce_dma_alloc(struct bce_softc *sc) 2188 { 2189 struct ifnet *ifp = &sc->arpcom.ac_if; 2190 int i, j, rc = 0; 2191 bus_addr_t busaddr; 2192 2193 /* 2194 * Allocate the parent bus DMA tag appropriate for PCI. 2195 */ 2196 rc = bus_dma_tag_create(NULL, 1, BCE_DMA_BOUNDARY, 2197 sc->max_bus_addr, BUS_SPACE_MAXADDR, 2198 NULL, NULL, 2199 MAXBSIZE, BUS_SPACE_UNRESTRICTED, 2200 BUS_SPACE_MAXSIZE_32BIT, 2201 0, &sc->parent_tag); 2202 if (rc != 0) { 2203 if_printf(ifp, "Could not allocate parent DMA tag!\n"); 2204 return rc; 2205 } 2206 2207 /* 2208 * Create a DMA tag for the status block, allocate and clear the 2209 * memory, map the memory into DMA space, and fetch the physical 2210 * address of the block. 2211 */ 2212 rc = bus_dma_tag_create(sc->parent_tag, 2213 BCE_DMA_ALIGN, BCE_DMA_BOUNDARY, 2214 sc->max_bus_addr, BUS_SPACE_MAXADDR, 2215 NULL, NULL, 2216 BCE_STATUS_BLK_SZ, 1, BCE_STATUS_BLK_SZ, 2217 0, &sc->status_tag); 2218 if (rc != 0) { 2219 if_printf(ifp, "Could not allocate status block DMA tag!\n"); 2220 return rc; 2221 } 2222 2223 rc = bus_dmamem_alloc(sc->status_tag, (void **)&sc->status_block, 2224 BUS_DMA_WAITOK | BUS_DMA_ZERO, 2225 &sc->status_map); 2226 if (rc != 0) { 2227 if_printf(ifp, "Could not allocate status block DMA memory!\n"); 2228 return rc; 2229 } 2230 2231 rc = bus_dmamap_load(sc->status_tag, sc->status_map, 2232 sc->status_block, BCE_STATUS_BLK_SZ, 2233 bce_dma_map_addr, &busaddr, BUS_DMA_WAITOK); 2234 if (rc != 0) { 2235 if_printf(ifp, "Could not map status block DMA memory!\n"); 2236 bus_dmamem_free(sc->status_tag, sc->status_block, 2237 sc->status_map); 2238 sc->status_block = NULL; 2239 return rc; 2240 } 2241 2242 sc->status_block_paddr = busaddr; 2243 /* DRC - Fix for 64 bit addresses. */ 2244 DBPRINT(sc, BCE_INFO, "status_block_paddr = 0x%08X\n", 2245 (uint32_t)sc->status_block_paddr); 2246 2247 /* 2248 * Create a DMA tag for the statistics block, allocate and clear the 2249 * memory, map the memory into DMA space, and fetch the physical 2250 * address of the block. 2251 */ 2252 rc = bus_dma_tag_create(sc->parent_tag, 2253 BCE_DMA_ALIGN, BCE_DMA_BOUNDARY, 2254 sc->max_bus_addr, BUS_SPACE_MAXADDR, 2255 NULL, NULL, 2256 BCE_STATS_BLK_SZ, 1, BCE_STATS_BLK_SZ, 2257 0, &sc->stats_tag); 2258 if (rc != 0) { 2259 if_printf(ifp, "Could not allocate " 2260 "statistics block DMA tag!\n"); 2261 return rc; 2262 } 2263 2264 rc = bus_dmamem_alloc(sc->stats_tag, (void **)&sc->stats_block, 2265 BUS_DMA_WAITOK | BUS_DMA_ZERO, 2266 &sc->stats_map); 2267 if (rc != 0) { 2268 if_printf(ifp, "Could not allocate " 2269 "statistics block DMA memory!\n"); 2270 return rc; 2271 } 2272 2273 rc = bus_dmamap_load(sc->stats_tag, sc->stats_map, 2274 sc->stats_block, BCE_STATS_BLK_SZ, 2275 bce_dma_map_addr, &busaddr, BUS_DMA_WAITOK); 2276 if (rc != 0) { 2277 if_printf(ifp, "Could not map statistics block DMA memory!\n"); 2278 bus_dmamem_free(sc->stats_tag, sc->stats_block, sc->stats_map); 2279 sc->stats_block = NULL; 2280 return rc; 2281 } 2282 2283 sc->stats_block_paddr = busaddr; 2284 /* DRC - Fix for 64 bit address. */ 2285 DBPRINT(sc, BCE_INFO, "stats_block_paddr = 0x%08X\n", 2286 (uint32_t)sc->stats_block_paddr); 2287 2288 /* 2289 * Create a DMA tag for the TX buffer descriptor chain, 2290 * allocate and clear the memory, and fetch the 2291 * physical address of the block. 2292 */ 2293 rc = bus_dma_tag_create(sc->parent_tag, 2294 BCM_PAGE_SIZE, BCE_DMA_BOUNDARY, 2295 sc->max_bus_addr, BUS_SPACE_MAXADDR, 2296 NULL, NULL, 2297 BCE_TX_CHAIN_PAGE_SZ, 1, BCE_TX_CHAIN_PAGE_SZ, 2298 0, &sc->tx_bd_chain_tag); 2299 if (rc != 0) { 2300 if_printf(ifp, "Could not allocate " 2301 "TX descriptor chain DMA tag!\n"); 2302 return rc; 2303 } 2304 2305 for (i = 0; i < TX_PAGES; i++) { 2306 rc = bus_dmamem_alloc(sc->tx_bd_chain_tag, 2307 (void **)&sc->tx_bd_chain[i], 2308 BUS_DMA_WAITOK, &sc->tx_bd_chain_map[i]); 2309 if (rc != 0) { 2310 if_printf(ifp, "Could not allocate %dth TX descriptor " 2311 "chain DMA memory!\n", i); 2312 return rc; 2313 } 2314 2315 rc = bus_dmamap_load(sc->tx_bd_chain_tag, 2316 sc->tx_bd_chain_map[i], 2317 sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ, 2318 bce_dma_map_addr, &busaddr, 2319 BUS_DMA_WAITOK); 2320 if (rc != 0) { 2321 if_printf(ifp, "Could not map %dth TX descriptor " 2322 "chain DMA memory!\n", i); 2323 bus_dmamem_free(sc->tx_bd_chain_tag, 2324 sc->tx_bd_chain[i], 2325 sc->tx_bd_chain_map[i]); 2326 sc->tx_bd_chain[i] = NULL; 2327 return rc; 2328 } 2329 2330 sc->tx_bd_chain_paddr[i] = busaddr; 2331 /* DRC - Fix for 64 bit systems. */ 2332 DBPRINT(sc, BCE_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n", 2333 i, (uint32_t)sc->tx_bd_chain_paddr[i]); 2334 } 2335 2336 /* Create a DMA tag for TX mbufs. */ 2337 rc = bus_dma_tag_create(sc->parent_tag, 1, BCE_DMA_BOUNDARY, 2338 sc->max_bus_addr, BUS_SPACE_MAXADDR, 2339 NULL, NULL, 2340 MCLBYTES * BCE_MAX_SEGMENTS, 2341 BCE_MAX_SEGMENTS, MCLBYTES, 2342 0, &sc->tx_mbuf_tag); 2343 if (rc != 0) { 2344 if_printf(ifp, "Could not allocate TX mbuf DMA tag!\n"); 2345 return rc; 2346 } 2347 2348 /* Create DMA maps for the TX mbufs clusters. */ 2349 for (i = 0; i < TOTAL_TX_BD; i++) { 2350 rc = bus_dmamap_create(sc->tx_mbuf_tag, BUS_DMA_WAITOK, 2351 &sc->tx_mbuf_map[i]); 2352 if (rc != 0) { 2353 for (j = 0; j < i; ++j) { 2354 bus_dmamap_destroy(sc->tx_mbuf_tag, 2355 sc->tx_mbuf_map[i]); 2356 } 2357 bus_dma_tag_destroy(sc->tx_mbuf_tag); 2358 sc->tx_mbuf_tag = NULL; 2359 2360 if_printf(ifp, "Unable to create " 2361 "%dth TX mbuf DMA map!\n", i); 2362 return rc; 2363 } 2364 } 2365 2366 /* 2367 * Create a DMA tag for the RX buffer descriptor chain, 2368 * allocate and clear the memory, and fetch the physical 2369 * address of the blocks. 2370 */ 2371 rc = bus_dma_tag_create(sc->parent_tag, 2372 BCM_PAGE_SIZE, BCE_DMA_BOUNDARY, 2373 sc->max_bus_addr, BUS_SPACE_MAXADDR, 2374 NULL, NULL, 2375 BCE_RX_CHAIN_PAGE_SZ, 1, BCE_RX_CHAIN_PAGE_SZ, 2376 0, &sc->rx_bd_chain_tag); 2377 if (rc != 0) { 2378 if_printf(ifp, "Could not allocate " 2379 "RX descriptor chain DMA tag!\n"); 2380 return rc; 2381 } 2382 2383 for (i = 0; i < RX_PAGES; i++) { 2384 rc = bus_dmamem_alloc(sc->rx_bd_chain_tag, 2385 (void **)&sc->rx_bd_chain[i], 2386 BUS_DMA_WAITOK | BUS_DMA_ZERO, 2387 &sc->rx_bd_chain_map[i]); 2388 if (rc != 0) { 2389 if_printf(ifp, "Could not allocate %dth RX descriptor " 2390 "chain DMA memory!\n", i); 2391 return rc; 2392 } 2393 2394 rc = bus_dmamap_load(sc->rx_bd_chain_tag, 2395 sc->rx_bd_chain_map[i], 2396 sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ, 2397 bce_dma_map_addr, &busaddr, 2398 BUS_DMA_WAITOK); 2399 if (rc != 0) { 2400 if_printf(ifp, "Could not map %dth RX descriptor " 2401 "chain DMA memory!\n", i); 2402 bus_dmamem_free(sc->rx_bd_chain_tag, 2403 sc->rx_bd_chain[i], 2404 sc->rx_bd_chain_map[i]); 2405 sc->rx_bd_chain[i] = NULL; 2406 return rc; 2407 } 2408 2409 sc->rx_bd_chain_paddr[i] = busaddr; 2410 /* DRC - Fix for 64 bit systems. */ 2411 DBPRINT(sc, BCE_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n", 2412 i, (uint32_t)sc->rx_bd_chain_paddr[i]); 2413 } 2414 2415 /* Create a DMA tag for RX mbufs. */ 2416 rc = bus_dma_tag_create(sc->parent_tag, 1, BCE_DMA_BOUNDARY, 2417 sc->max_bus_addr, BUS_SPACE_MAXADDR, 2418 NULL, NULL, 2419 MCLBYTES, 1/* BCE_MAX_SEGMENTS */, MCLBYTES, 2420 0, &sc->rx_mbuf_tag); 2421 if (rc != 0) { 2422 if_printf(ifp, "Could not allocate RX mbuf DMA tag!\n"); 2423 return rc; 2424 } 2425 2426 /* Create DMA maps for the RX mbuf clusters. */ 2427 for (i = 0; i < TOTAL_RX_BD; i++) { 2428 rc = bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_WAITOK, 2429 &sc->rx_mbuf_map[i]); 2430 if (rc != 0) { 2431 for (j = 0; j < i; ++j) { 2432 bus_dmamap_destroy(sc->rx_mbuf_tag, 2433 sc->rx_mbuf_map[j]); 2434 } 2435 bus_dma_tag_destroy(sc->rx_mbuf_tag); 2436 sc->rx_mbuf_tag = NULL; 2437 2438 if_printf(ifp, "Unable to create " 2439 "%dth RX mbuf DMA map!\n", i); 2440 return rc; 2441 } 2442 } 2443 return 0; 2444 } 2445 2446 2447 /****************************************************************************/ 2448 /* Firmware synchronization. */ 2449 /* */ 2450 /* Before performing certain events such as a chip reset, synchronize with */ 2451 /* the firmware first. */ 2452 /* */ 2453 /* Returns: */ 2454 /* 0 for success, positive value for failure. */ 2455 /****************************************************************************/ 2456 static int 2457 bce_fw_sync(struct bce_softc *sc, uint32_t msg_data) 2458 { 2459 int i, rc = 0; 2460 uint32_t val; 2461 2462 /* Don't waste any time if we've timed out before. */ 2463 if (sc->bce_fw_timed_out) 2464 return EBUSY; 2465 2466 /* Increment the message sequence number. */ 2467 sc->bce_fw_wr_seq++; 2468 msg_data |= sc->bce_fw_wr_seq; 2469 2470 DBPRINT(sc, BCE_VERBOSE, "bce_fw_sync(): msg_data = 0x%08X\n", msg_data); 2471 2472 /* Send the message to the bootcode driver mailbox. */ 2473 REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data); 2474 2475 /* Wait for the bootcode to acknowledge the message. */ 2476 for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) { 2477 /* Check for a response in the bootcode firmware mailbox. */ 2478 val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_FW_MB); 2479 if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ)) 2480 break; 2481 DELAY(1000); 2482 } 2483 2484 /* If we've timed out, tell the bootcode that we've stopped waiting. */ 2485 if ((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ) && 2486 (msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0) { 2487 if_printf(&sc->arpcom.ac_if, 2488 "Firmware synchronization timeout! " 2489 "msg_data = 0x%08X\n", msg_data); 2490 2491 msg_data &= ~BCE_DRV_MSG_CODE; 2492 msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT; 2493 2494 REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data); 2495 2496 sc->bce_fw_timed_out = 1; 2497 rc = EBUSY; 2498 } 2499 return rc; 2500 } 2501 2502 2503 /****************************************************************************/ 2504 /* Load Receive Virtual 2 Physical (RV2P) processor firmware. */ 2505 /* */ 2506 /* Returns: */ 2507 /* Nothing. */ 2508 /****************************************************************************/ 2509 static void 2510 bce_load_rv2p_fw(struct bce_softc *sc, uint32_t *rv2p_code, 2511 uint32_t rv2p_code_len, uint32_t rv2p_proc) 2512 { 2513 int i; 2514 uint32_t val; 2515 2516 for (i = 0; i < rv2p_code_len; i += 8) { 2517 REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code); 2518 rv2p_code++; 2519 REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code); 2520 rv2p_code++; 2521 2522 if (rv2p_proc == RV2P_PROC1) { 2523 val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR; 2524 REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val); 2525 } else { 2526 val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR; 2527 REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val); 2528 } 2529 } 2530 2531 /* Reset the processor, un-stall is done later. */ 2532 if (rv2p_proc == RV2P_PROC1) 2533 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET); 2534 else 2535 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET); 2536 } 2537 2538 2539 /****************************************************************************/ 2540 /* Load RISC processor firmware. */ 2541 /* */ 2542 /* Loads firmware from the file if_bcefw.h into the scratchpad memory */ 2543 /* associated with a particular processor. */ 2544 /* */ 2545 /* Returns: */ 2546 /* Nothing. */ 2547 /****************************************************************************/ 2548 static void 2549 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg, 2550 struct fw_info *fw) 2551 { 2552 uint32_t offset, val; 2553 int j; 2554 2555 /* Halt the CPU. */ 2556 val = REG_RD_IND(sc, cpu_reg->mode); 2557 val |= cpu_reg->mode_value_halt; 2558 REG_WR_IND(sc, cpu_reg->mode, val); 2559 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 2560 2561 /* Load the Text area. */ 2562 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base); 2563 if (fw->text) { 2564 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) 2565 REG_WR_IND(sc, offset, fw->text[j]); 2566 } 2567 2568 /* Load the Data area. */ 2569 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base); 2570 if (fw->data) { 2571 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) 2572 REG_WR_IND(sc, offset, fw->data[j]); 2573 } 2574 2575 /* Load the SBSS area. */ 2576 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base); 2577 if (fw->sbss) { 2578 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) 2579 REG_WR_IND(sc, offset, fw->sbss[j]); 2580 } 2581 2582 /* Load the BSS area. */ 2583 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base); 2584 if (fw->bss) { 2585 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) 2586 REG_WR_IND(sc, offset, fw->bss[j]); 2587 } 2588 2589 /* Load the Read-Only area. */ 2590 offset = cpu_reg->spad_base + 2591 (fw->rodata_addr - cpu_reg->mips_view_base); 2592 if (fw->rodata) { 2593 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) 2594 REG_WR_IND(sc, offset, fw->rodata[j]); 2595 } 2596 2597 /* Clear the pre-fetch instruction. */ 2598 REG_WR_IND(sc, cpu_reg->inst, 0); 2599 REG_WR_IND(sc, cpu_reg->pc, fw->start_addr); 2600 2601 /* Start the CPU. */ 2602 val = REG_RD_IND(sc, cpu_reg->mode); 2603 val &= ~cpu_reg->mode_value_halt; 2604 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 2605 REG_WR_IND(sc, cpu_reg->mode, val); 2606 } 2607 2608 2609 /****************************************************************************/ 2610 /* Initialize the RV2P, RX, TX, TPAT, and COM CPUs. */ 2611 /* */ 2612 /* Loads the firmware for each CPU and starts the CPU. */ 2613 /* */ 2614 /* Returns: */ 2615 /* Nothing. */ 2616 /****************************************************************************/ 2617 static void 2618 bce_init_cpus(struct bce_softc *sc) 2619 { 2620 struct cpu_reg cpu_reg; 2621 struct fw_info fw; 2622 2623 /* Initialize the RV2P processor. */ 2624 bce_load_rv2p_fw(sc, bce_rv2p_proc1, sizeof(bce_rv2p_proc1), RV2P_PROC1); 2625 bce_load_rv2p_fw(sc, bce_rv2p_proc2, sizeof(bce_rv2p_proc2), RV2P_PROC2); 2626 2627 /* Initialize the RX Processor. */ 2628 cpu_reg.mode = BCE_RXP_CPU_MODE; 2629 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT; 2630 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA; 2631 cpu_reg.state = BCE_RXP_CPU_STATE; 2632 cpu_reg.state_value_clear = 0xffffff; 2633 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE; 2634 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK; 2635 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER; 2636 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION; 2637 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT; 2638 cpu_reg.spad_base = BCE_RXP_SCRATCH; 2639 cpu_reg.mips_view_base = 0x8000000; 2640 2641 fw.ver_major = bce_RXP_b06FwReleaseMajor; 2642 fw.ver_minor = bce_RXP_b06FwReleaseMinor; 2643 fw.ver_fix = bce_RXP_b06FwReleaseFix; 2644 fw.start_addr = bce_RXP_b06FwStartAddr; 2645 2646 fw.text_addr = bce_RXP_b06FwTextAddr; 2647 fw.text_len = bce_RXP_b06FwTextLen; 2648 fw.text_index = 0; 2649 fw.text = bce_RXP_b06FwText; 2650 2651 fw.data_addr = bce_RXP_b06FwDataAddr; 2652 fw.data_len = bce_RXP_b06FwDataLen; 2653 fw.data_index = 0; 2654 fw.data = bce_RXP_b06FwData; 2655 2656 fw.sbss_addr = bce_RXP_b06FwSbssAddr; 2657 fw.sbss_len = bce_RXP_b06FwSbssLen; 2658 fw.sbss_index = 0; 2659 fw.sbss = bce_RXP_b06FwSbss; 2660 2661 fw.bss_addr = bce_RXP_b06FwBssAddr; 2662 fw.bss_len = bce_RXP_b06FwBssLen; 2663 fw.bss_index = 0; 2664 fw.bss = bce_RXP_b06FwBss; 2665 2666 fw.rodata_addr = bce_RXP_b06FwRodataAddr; 2667 fw.rodata_len = bce_RXP_b06FwRodataLen; 2668 fw.rodata_index = 0; 2669 fw.rodata = bce_RXP_b06FwRodata; 2670 2671 DBPRINT(sc, BCE_INFO_RESET, "Loading RX firmware.\n"); 2672 bce_load_cpu_fw(sc, &cpu_reg, &fw); 2673 2674 /* Initialize the TX Processor. */ 2675 cpu_reg.mode = BCE_TXP_CPU_MODE; 2676 cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT; 2677 cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA; 2678 cpu_reg.state = BCE_TXP_CPU_STATE; 2679 cpu_reg.state_value_clear = 0xffffff; 2680 cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE; 2681 cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK; 2682 cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER; 2683 cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION; 2684 cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT; 2685 cpu_reg.spad_base = BCE_TXP_SCRATCH; 2686 cpu_reg.mips_view_base = 0x8000000; 2687 2688 fw.ver_major = bce_TXP_b06FwReleaseMajor; 2689 fw.ver_minor = bce_TXP_b06FwReleaseMinor; 2690 fw.ver_fix = bce_TXP_b06FwReleaseFix; 2691 fw.start_addr = bce_TXP_b06FwStartAddr; 2692 2693 fw.text_addr = bce_TXP_b06FwTextAddr; 2694 fw.text_len = bce_TXP_b06FwTextLen; 2695 fw.text_index = 0; 2696 fw.text = bce_TXP_b06FwText; 2697 2698 fw.data_addr = bce_TXP_b06FwDataAddr; 2699 fw.data_len = bce_TXP_b06FwDataLen; 2700 fw.data_index = 0; 2701 fw.data = bce_TXP_b06FwData; 2702 2703 fw.sbss_addr = bce_TXP_b06FwSbssAddr; 2704 fw.sbss_len = bce_TXP_b06FwSbssLen; 2705 fw.sbss_index = 0; 2706 fw.sbss = bce_TXP_b06FwSbss; 2707 2708 fw.bss_addr = bce_TXP_b06FwBssAddr; 2709 fw.bss_len = bce_TXP_b06FwBssLen; 2710 fw.bss_index = 0; 2711 fw.bss = bce_TXP_b06FwBss; 2712 2713 fw.rodata_addr = bce_TXP_b06FwRodataAddr; 2714 fw.rodata_len = bce_TXP_b06FwRodataLen; 2715 fw.rodata_index = 0; 2716 fw.rodata = bce_TXP_b06FwRodata; 2717 2718 DBPRINT(sc, BCE_INFO_RESET, "Loading TX firmware.\n"); 2719 bce_load_cpu_fw(sc, &cpu_reg, &fw); 2720 2721 /* Initialize the TX Patch-up Processor. */ 2722 cpu_reg.mode = BCE_TPAT_CPU_MODE; 2723 cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT; 2724 cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA; 2725 cpu_reg.state = BCE_TPAT_CPU_STATE; 2726 cpu_reg.state_value_clear = 0xffffff; 2727 cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE; 2728 cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK; 2729 cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER; 2730 cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION; 2731 cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT; 2732 cpu_reg.spad_base = BCE_TPAT_SCRATCH; 2733 cpu_reg.mips_view_base = 0x8000000; 2734 2735 fw.ver_major = bce_TPAT_b06FwReleaseMajor; 2736 fw.ver_minor = bce_TPAT_b06FwReleaseMinor; 2737 fw.ver_fix = bce_TPAT_b06FwReleaseFix; 2738 fw.start_addr = bce_TPAT_b06FwStartAddr; 2739 2740 fw.text_addr = bce_TPAT_b06FwTextAddr; 2741 fw.text_len = bce_TPAT_b06FwTextLen; 2742 fw.text_index = 0; 2743 fw.text = bce_TPAT_b06FwText; 2744 2745 fw.data_addr = bce_TPAT_b06FwDataAddr; 2746 fw.data_len = bce_TPAT_b06FwDataLen; 2747 fw.data_index = 0; 2748 fw.data = bce_TPAT_b06FwData; 2749 2750 fw.sbss_addr = bce_TPAT_b06FwSbssAddr; 2751 fw.sbss_len = bce_TPAT_b06FwSbssLen; 2752 fw.sbss_index = 0; 2753 fw.sbss = bce_TPAT_b06FwSbss; 2754 2755 fw.bss_addr = bce_TPAT_b06FwBssAddr; 2756 fw.bss_len = bce_TPAT_b06FwBssLen; 2757 fw.bss_index = 0; 2758 fw.bss = bce_TPAT_b06FwBss; 2759 2760 fw.rodata_addr = bce_TPAT_b06FwRodataAddr; 2761 fw.rodata_len = bce_TPAT_b06FwRodataLen; 2762 fw.rodata_index = 0; 2763 fw.rodata = bce_TPAT_b06FwRodata; 2764 2765 DBPRINT(sc, BCE_INFO_RESET, "Loading TPAT firmware.\n"); 2766 bce_load_cpu_fw(sc, &cpu_reg, &fw); 2767 2768 /* Initialize the Completion Processor. */ 2769 cpu_reg.mode = BCE_COM_CPU_MODE; 2770 cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT; 2771 cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA; 2772 cpu_reg.state = BCE_COM_CPU_STATE; 2773 cpu_reg.state_value_clear = 0xffffff; 2774 cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE; 2775 cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK; 2776 cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER; 2777 cpu_reg.inst = BCE_COM_CPU_INSTRUCTION; 2778 cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT; 2779 cpu_reg.spad_base = BCE_COM_SCRATCH; 2780 cpu_reg.mips_view_base = 0x8000000; 2781 2782 fw.ver_major = bce_COM_b06FwReleaseMajor; 2783 fw.ver_minor = bce_COM_b06FwReleaseMinor; 2784 fw.ver_fix = bce_COM_b06FwReleaseFix; 2785 fw.start_addr = bce_COM_b06FwStartAddr; 2786 2787 fw.text_addr = bce_COM_b06FwTextAddr; 2788 fw.text_len = bce_COM_b06FwTextLen; 2789 fw.text_index = 0; 2790 fw.text = bce_COM_b06FwText; 2791 2792 fw.data_addr = bce_COM_b06FwDataAddr; 2793 fw.data_len = bce_COM_b06FwDataLen; 2794 fw.data_index = 0; 2795 fw.data = bce_COM_b06FwData; 2796 2797 fw.sbss_addr = bce_COM_b06FwSbssAddr; 2798 fw.sbss_len = bce_COM_b06FwSbssLen; 2799 fw.sbss_index = 0; 2800 fw.sbss = bce_COM_b06FwSbss; 2801 2802 fw.bss_addr = bce_COM_b06FwBssAddr; 2803 fw.bss_len = bce_COM_b06FwBssLen; 2804 fw.bss_index = 0; 2805 fw.bss = bce_COM_b06FwBss; 2806 2807 fw.rodata_addr = bce_COM_b06FwRodataAddr; 2808 fw.rodata_len = bce_COM_b06FwRodataLen; 2809 fw.rodata_index = 0; 2810 fw.rodata = bce_COM_b06FwRodata; 2811 2812 DBPRINT(sc, BCE_INFO_RESET, "Loading COM firmware.\n"); 2813 bce_load_cpu_fw(sc, &cpu_reg, &fw); 2814 } 2815 2816 2817 /****************************************************************************/ 2818 /* Initialize context memory. */ 2819 /* */ 2820 /* Clears the memory associated with each Context ID (CID). */ 2821 /* */ 2822 /* Returns: */ 2823 /* Nothing. */ 2824 /****************************************************************************/ 2825 static void 2826 bce_init_ctx(struct bce_softc *sc) 2827 { 2828 uint32_t vcid = 96; 2829 2830 while (vcid) { 2831 uint32_t vcid_addr, pcid_addr, offset; 2832 int i; 2833 2834 vcid--; 2835 2836 vcid_addr = GET_CID_ADDR(vcid); 2837 pcid_addr = vcid_addr; 2838 2839 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) { 2840 vcid_addr += (i << PHY_CTX_SHIFT); 2841 pcid_addr += (i << PHY_CTX_SHIFT); 2842 2843 REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr); 2844 REG_WR(sc, BCE_CTX_PAGE_TBL, pcid_addr); 2845 2846 /* Zero out the context. */ 2847 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) 2848 CTX_WR(sc, vcid_addr, offset, 0); 2849 } 2850 } 2851 } 2852 2853 2854 /****************************************************************************/ 2855 /* Fetch the permanent MAC address of the controller. */ 2856 /* */ 2857 /* Returns: */ 2858 /* Nothing. */ 2859 /****************************************************************************/ 2860 static void 2861 bce_get_mac_addr(struct bce_softc *sc) 2862 { 2863 uint32_t mac_lo = 0, mac_hi = 0; 2864 2865 /* 2866 * The NetXtreme II bootcode populates various NIC 2867 * power-on and runtime configuration items in a 2868 * shared memory area. The factory configured MAC 2869 * address is available from both NVRAM and the 2870 * shared memory area so we'll read the value from 2871 * shared memory for speed. 2872 */ 2873 2874 mac_hi = REG_RD_IND(sc, sc->bce_shmem_base + BCE_PORT_HW_CFG_MAC_UPPER); 2875 mac_lo = REG_RD_IND(sc, sc->bce_shmem_base + BCE_PORT_HW_CFG_MAC_LOWER); 2876 2877 if (mac_lo == 0 && mac_hi == 0) { 2878 if_printf(&sc->arpcom.ac_if, "Invalid Ethernet address!\n"); 2879 } else { 2880 sc->eaddr[0] = (u_char)(mac_hi >> 8); 2881 sc->eaddr[1] = (u_char)(mac_hi >> 0); 2882 sc->eaddr[2] = (u_char)(mac_lo >> 24); 2883 sc->eaddr[3] = (u_char)(mac_lo >> 16); 2884 sc->eaddr[4] = (u_char)(mac_lo >> 8); 2885 sc->eaddr[5] = (u_char)(mac_lo >> 0); 2886 } 2887 2888 DBPRINT(sc, BCE_INFO, "Permanent Ethernet address = %6D\n", sc->eaddr, ":"); 2889 } 2890 2891 2892 /****************************************************************************/ 2893 /* Program the MAC address. */ 2894 /* */ 2895 /* Returns: */ 2896 /* Nothing. */ 2897 /****************************************************************************/ 2898 static void 2899 bce_set_mac_addr(struct bce_softc *sc) 2900 { 2901 const uint8_t *mac_addr = sc->eaddr; 2902 uint32_t val; 2903 2904 DBPRINT(sc, BCE_INFO, "Setting Ethernet address = %6D\n", 2905 sc->eaddr, ":"); 2906 2907 val = (mac_addr[0] << 8) | mac_addr[1]; 2908 REG_WR(sc, BCE_EMAC_MAC_MATCH0, val); 2909 2910 val = (mac_addr[2] << 24) | 2911 (mac_addr[3] << 16) | 2912 (mac_addr[4] << 8) | 2913 mac_addr[5]; 2914 REG_WR(sc, BCE_EMAC_MAC_MATCH1, val); 2915 } 2916 2917 2918 /****************************************************************************/ 2919 /* Stop the controller. */ 2920 /* */ 2921 /* Returns: */ 2922 /* Nothing. */ 2923 /****************************************************************************/ 2924 static void 2925 bce_stop(struct bce_softc *sc) 2926 { 2927 struct ifnet *ifp = &sc->arpcom.ac_if; 2928 struct mii_data *mii = device_get_softc(sc->bce_miibus); 2929 struct ifmedia_entry *ifm; 2930 int mtmp, itmp; 2931 2932 ASSERT_SERIALIZED(ifp->if_serializer); 2933 2934 callout_stop(&sc->bce_stat_ch); 2935 2936 /* Disable the transmit/receive blocks. */ 2937 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, 0x5ffffff); 2938 REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS); 2939 DELAY(20); 2940 2941 bce_disable_intr(sc); 2942 2943 /* Tell firmware that the driver is going away. */ 2944 bce_reset(sc, BCE_DRV_MSG_CODE_SUSPEND_NO_WOL); 2945 2946 /* Free the RX lists. */ 2947 bce_free_rx_chain(sc); 2948 2949 /* Free TX buffers. */ 2950 bce_free_tx_chain(sc); 2951 2952 /* 2953 * Isolate/power down the PHY, but leave the media selection 2954 * unchanged so that things will be put back to normal when 2955 * we bring the interface back up. 2956 * 2957 * 'mii' may be NULL if bce_stop() is called by bce_detach(). 2958 */ 2959 if (mii != NULL) { 2960 itmp = ifp->if_flags; 2961 ifp->if_flags |= IFF_UP; 2962 ifm = mii->mii_media.ifm_cur; 2963 mtmp = ifm->ifm_media; 2964 ifm->ifm_media = IFM_ETHER | IFM_NONE; 2965 mii_mediachg(mii); 2966 ifm->ifm_media = mtmp; 2967 ifp->if_flags = itmp; 2968 } 2969 2970 sc->bce_link = 0; 2971 sc->bce_coalchg_mask = 0; 2972 2973 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2974 ifp->if_timer = 0; 2975 2976 bce_mgmt_init(sc); 2977 } 2978 2979 2980 static int 2981 bce_reset(struct bce_softc *sc, uint32_t reset_code) 2982 { 2983 uint32_t val; 2984 int i, rc = 0; 2985 2986 /* Wait for pending PCI transactions to complete. */ 2987 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, 2988 BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE | 2989 BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE | 2990 BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE | 2991 BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE); 2992 val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS); 2993 DELAY(5); 2994 2995 /* Assume bootcode is running. */ 2996 sc->bce_fw_timed_out = 0; 2997 2998 /* Give the firmware a chance to prepare for the reset. */ 2999 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code); 3000 if (rc) { 3001 if_printf(&sc->arpcom.ac_if, 3002 "Firmware is not ready for reset\n"); 3003 return rc; 3004 } 3005 3006 /* Set a firmware reminder that this is a soft reset. */ 3007 REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_RESET_SIGNATURE, 3008 BCE_DRV_RESET_SIGNATURE_MAGIC); 3009 3010 /* Dummy read to force the chip to complete all current transactions. */ 3011 val = REG_RD(sc, BCE_MISC_ID); 3012 3013 /* Chip reset. */ 3014 val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3015 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 3016 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 3017 REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val); 3018 3019 /* Allow up to 30us for reset to complete. */ 3020 for (i = 0; i < 10; i++) { 3021 val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG); 3022 if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3023 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) { 3024 break; 3025 } 3026 DELAY(10); 3027 } 3028 3029 /* Check that reset completed successfully. */ 3030 if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3031 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) { 3032 if_printf(&sc->arpcom.ac_if, "Reset failed!\n"); 3033 return EBUSY; 3034 } 3035 3036 /* Make sure byte swapping is properly configured. */ 3037 val = REG_RD(sc, BCE_PCI_SWAP_DIAG0); 3038 if (val != 0x01020304) { 3039 if_printf(&sc->arpcom.ac_if, "Byte swap is incorrect!\n"); 3040 return ENODEV; 3041 } 3042 3043 /* Just completed a reset, assume that firmware is running again. */ 3044 sc->bce_fw_timed_out = 0; 3045 3046 /* Wait for the firmware to finish its initialization. */ 3047 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code); 3048 if (rc) { 3049 if_printf(&sc->arpcom.ac_if, 3050 "Firmware did not complete initialization!\n"); 3051 } 3052 return rc; 3053 } 3054 3055 3056 static int 3057 bce_chipinit(struct bce_softc *sc) 3058 { 3059 uint32_t val; 3060 int rc = 0; 3061 3062 /* Make sure the interrupt is not active. */ 3063 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT); 3064 3065 /* 3066 * Initialize DMA byte/word swapping, configure the number of DMA 3067 * channels and PCI clock compensation delay. 3068 */ 3069 val = BCE_DMA_CONFIG_DATA_BYTE_SWAP | 3070 BCE_DMA_CONFIG_DATA_WORD_SWAP | 3071 #if BYTE_ORDER == BIG_ENDIAN 3072 BCE_DMA_CONFIG_CNTL_BYTE_SWAP | 3073 #endif 3074 BCE_DMA_CONFIG_CNTL_WORD_SWAP | 3075 DMA_READ_CHANS << 12 | 3076 DMA_WRITE_CHANS << 16; 3077 3078 val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY; 3079 3080 if ((sc->bce_flags & BCE_PCIX_FLAG) && sc->bus_speed_mhz == 133) 3081 val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP; 3082 3083 /* 3084 * This setting resolves a problem observed on certain Intel PCI 3085 * chipsets that cannot handle multiple outstanding DMA operations. 3086 * See errata E9_5706A1_65. 3087 */ 3088 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706 && 3089 BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0 && 3090 !(sc->bce_flags & BCE_PCIX_FLAG)) 3091 val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA; 3092 3093 REG_WR(sc, BCE_DMA_CONFIG, val); 3094 3095 /* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */ 3096 if (sc->bce_flags & BCE_PCIX_FLAG) { 3097 uint16_t cmd; 3098 3099 cmd = pci_read_config(sc->bce_dev, BCE_PCI_PCIX_CMD, 2); 3100 pci_write_config(sc->bce_dev, BCE_PCI_PCIX_CMD, cmd & ~0x2, 2); 3101 } 3102 3103 /* Enable the RX_V2P and Context state machines before access. */ 3104 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 3105 BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE | 3106 BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE | 3107 BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE); 3108 3109 /* Initialize context mapping and zero out the quick contexts. */ 3110 bce_init_ctx(sc); 3111 3112 /* Initialize the on-boards CPUs */ 3113 bce_init_cpus(sc); 3114 3115 /* Prepare NVRAM for access. */ 3116 rc = bce_init_nvram(sc); 3117 if (rc != 0) 3118 return rc; 3119 3120 /* Set the kernel bypass block size */ 3121 val = REG_RD(sc, BCE_MQ_CONFIG); 3122 val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE; 3123 val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; 3124 REG_WR(sc, BCE_MQ_CONFIG, val); 3125 3126 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE); 3127 REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val); 3128 REG_WR(sc, BCE_MQ_KNL_WIND_END, val); 3129 3130 /* Set the page size and clear the RV2P processor stall bits. */ 3131 val = (BCM_PAGE_BITS - 8) << 24; 3132 REG_WR(sc, BCE_RV2P_CONFIG, val); 3133 3134 /* Configure page size. */ 3135 val = REG_RD(sc, BCE_TBDR_CONFIG); 3136 val &= ~BCE_TBDR_CONFIG_PAGE_SIZE; 3137 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40; 3138 REG_WR(sc, BCE_TBDR_CONFIG, val); 3139 3140 return 0; 3141 } 3142 3143 3144 /****************************************************************************/ 3145 /* Initialize the controller in preparation to send/receive traffic. */ 3146 /* */ 3147 /* Returns: */ 3148 /* 0 for success, positive value for failure. */ 3149 /****************************************************************************/ 3150 static int 3151 bce_blockinit(struct bce_softc *sc) 3152 { 3153 uint32_t reg, val; 3154 int rc = 0; 3155 3156 /* Load the hardware default MAC address. */ 3157 bce_set_mac_addr(sc); 3158 3159 /* Set the Ethernet backoff seed value */ 3160 val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) + 3161 sc->eaddr[3] + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16); 3162 REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val); 3163 3164 sc->last_status_idx = 0; 3165 sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE; 3166 3167 /* Set up link change interrupt generation. */ 3168 REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK); 3169 3170 /* Program the physical address of the status block. */ 3171 REG_WR(sc, BCE_HC_STATUS_ADDR_L, BCE_ADDR_LO(sc->status_block_paddr)); 3172 REG_WR(sc, BCE_HC_STATUS_ADDR_H, BCE_ADDR_HI(sc->status_block_paddr)); 3173 3174 /* Program the physical address of the statistics block. */ 3175 REG_WR(sc, BCE_HC_STATISTICS_ADDR_L, 3176 BCE_ADDR_LO(sc->stats_block_paddr)); 3177 REG_WR(sc, BCE_HC_STATISTICS_ADDR_H, 3178 BCE_ADDR_HI(sc->stats_block_paddr)); 3179 3180 /* Program various host coalescing parameters. */ 3181 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP, 3182 (sc->bce_tx_quick_cons_trip_int << 16) | 3183 sc->bce_tx_quick_cons_trip); 3184 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP, 3185 (sc->bce_rx_quick_cons_trip_int << 16) | 3186 sc->bce_rx_quick_cons_trip); 3187 REG_WR(sc, BCE_HC_COMP_PROD_TRIP, 3188 (sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip); 3189 REG_WR(sc, BCE_HC_TX_TICKS, 3190 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks); 3191 REG_WR(sc, BCE_HC_RX_TICKS, 3192 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks); 3193 REG_WR(sc, BCE_HC_COM_TICKS, 3194 (sc->bce_com_ticks_int << 16) | sc->bce_com_ticks); 3195 REG_WR(sc, BCE_HC_CMD_TICKS, 3196 (sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks); 3197 REG_WR(sc, BCE_HC_STATS_TICKS, (sc->bce_stats_ticks & 0xffff00)); 3198 REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */ 3199 REG_WR(sc, BCE_HC_CONFIG, 3200 BCE_HC_CONFIG_TX_TMR_MODE | 3201 BCE_HC_CONFIG_COLLECT_STATS); 3202 3203 /* Clear the internal statistics counters. */ 3204 REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW); 3205 3206 /* Verify that bootcode is running. */ 3207 reg = REG_RD_IND(sc, sc->bce_shmem_base + BCE_DEV_INFO_SIGNATURE); 3208 3209 DBRUNIF(DB_RANDOMTRUE(bce_debug_bootcode_running_failure), 3210 if_printf(&sc->arpcom.ac_if, 3211 "%s(%d): Simulating bootcode failure.\n", 3212 __FILE__, __LINE__); 3213 reg = 0); 3214 3215 if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) != 3216 BCE_DEV_INFO_SIGNATURE_MAGIC) { 3217 if_printf(&sc->arpcom.ac_if, 3218 "Bootcode not running! Found: 0x%08X, " 3219 "Expected: 08%08X\n", 3220 reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK, 3221 BCE_DEV_INFO_SIGNATURE_MAGIC); 3222 return ENODEV; 3223 } 3224 3225 /* Check if any management firmware is running. */ 3226 reg = REG_RD_IND(sc, sc->bce_shmem_base + BCE_PORT_FEATURE); 3227 if (reg & (BCE_PORT_FEATURE_ASF_ENABLED | 3228 BCE_PORT_FEATURE_IMD_ENABLED)) { 3229 DBPRINT(sc, BCE_INFO, "Management F/W Enabled.\n"); 3230 sc->bce_flags |= BCE_MFW_ENABLE_FLAG; 3231 } 3232 3233 sc->bce_fw_ver = 3234 REG_RD_IND(sc, sc->bce_shmem_base + BCE_DEV_INFO_BC_REV); 3235 DBPRINT(sc, BCE_INFO, "bootcode rev = 0x%08X\n", sc->bce_fw_ver); 3236 3237 /* Allow bootcode to apply any additional fixes before enabling MAC. */ 3238 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET); 3239 3240 /* Enable link state change interrupt generation. */ 3241 REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE); 3242 3243 /* Enable all remaining blocks in the MAC. */ 3244 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 0x5ffffff); 3245 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS); 3246 DELAY(20); 3247 3248 return 0; 3249 } 3250 3251 3252 /****************************************************************************/ 3253 /* Encapsulate an mbuf cluster into the rx_bd chain. */ 3254 /* */ 3255 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's. */ 3256 /* This routine will map an mbuf cluster into 1 or more rx_bd's as */ 3257 /* necessary. */ 3258 /* */ 3259 /* Returns: */ 3260 /* 0 for success, positive value for failure. */ 3261 /****************************************************************************/ 3262 static int 3263 bce_newbuf_std(struct bce_softc *sc, struct mbuf *m, 3264 uint16_t *prod, uint16_t *chain_prod, uint32_t *prod_bseq) 3265 { 3266 bus_dmamap_t map; 3267 struct bce_dmamap_arg ctx; 3268 bus_dma_segment_t seg; 3269 struct mbuf *m_new; 3270 struct rx_bd *rxbd; 3271 int error; 3272 #ifdef BCE_DEBUG 3273 uint16_t debug_chain_prod = *chain_prod; 3274 #endif 3275 3276 /* Make sure the inputs are valid. */ 3277 DBRUNIF((*chain_prod > MAX_RX_BD), 3278 if_printf(&sc->arpcom.ac_if, "%s(%d): " 3279 "RX producer out of range: 0x%04X > 0x%04X\n", 3280 __FILE__, __LINE__, 3281 *chain_prod, (uint16_t)MAX_RX_BD)); 3282 3283 DBPRINT(sc, BCE_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = 0x%04X, " 3284 "prod_bseq = 0x%08X\n", __func__, *prod, *chain_prod, *prod_bseq); 3285 3286 if (m == NULL) { 3287 DBRUNIF(DB_RANDOMTRUE(bce_debug_mbuf_allocation_failure), 3288 if_printf(&sc->arpcom.ac_if, "%s(%d): " 3289 "Simulating mbuf allocation failure.\n", 3290 __FILE__, __LINE__); 3291 sc->mbuf_alloc_failed++; 3292 return ENOBUFS); 3293 3294 /* This is a new mbuf allocation. */ 3295 m_new = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR); 3296 if (m_new == NULL) 3297 return ENOBUFS; 3298 DBRUNIF(1, sc->rx_mbuf_alloc++); 3299 } else { 3300 m_new = m; 3301 m_new->m_data = m_new->m_ext.ext_buf; 3302 } 3303 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 3304 3305 /* Map the mbuf cluster into device memory. */ 3306 map = sc->rx_mbuf_map[*chain_prod]; 3307 3308 ctx.bce_maxsegs = 1; 3309 ctx.bce_segs = &seg; 3310 error = bus_dmamap_load_mbuf(sc->rx_mbuf_tag, map, m_new, 3311 bce_dma_map_mbuf, &ctx, BUS_DMA_NOWAIT); 3312 if (error || ctx.bce_maxsegs == 0) { 3313 if_printf(&sc->arpcom.ac_if, 3314 "Error mapping mbuf into RX chain!\n"); 3315 3316 if (m == NULL) 3317 m_freem(m_new); 3318 3319 DBRUNIF(1, sc->rx_mbuf_alloc--); 3320 return ENOBUFS; 3321 } 3322 3323 /* Watch for overflow. */ 3324 DBRUNIF((sc->free_rx_bd > USABLE_RX_BD), 3325 if_printf(&sc->arpcom.ac_if, "%s(%d): " 3326 "Too many free rx_bd (0x%04X > 0x%04X)!\n", 3327 __FILE__, __LINE__, sc->free_rx_bd, 3328 (uint16_t)USABLE_RX_BD)); 3329 3330 /* Update some debug statistic counters */ 3331 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark), 3332 sc->rx_low_watermark = sc->free_rx_bd); 3333 DBRUNIF((sc->free_rx_bd == 0), sc->rx_empty_count++); 3334 3335 /* Setup the rx_bd for the first segment. */ 3336 rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)]; 3337 3338 rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(seg.ds_addr)); 3339 rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(seg.ds_addr)); 3340 rxbd->rx_bd_len = htole32(seg.ds_len); 3341 rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START); 3342 *prod_bseq += seg.ds_len; 3343 3344 rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END); 3345 3346 /* Save the mbuf and update our counter. */ 3347 sc->rx_mbuf_ptr[*chain_prod] = m_new; 3348 sc->free_rx_bd--; 3349 3350 DBRUN(BCE_VERBOSE_RECV, 3351 bce_dump_rx_mbuf_chain(sc, debug_chain_prod, 1)); 3352 3353 DBPRINT(sc, BCE_VERBOSE_RECV, "%s(exit): prod = 0x%04X, chain_prod = 0x%04X, " 3354 "prod_bseq = 0x%08X\n", __func__, *prod, *chain_prod, *prod_bseq); 3355 3356 return 0; 3357 } 3358 3359 3360 /****************************************************************************/ 3361 /* Allocate memory and initialize the TX data structures. */ 3362 /* */ 3363 /* Returns: */ 3364 /* 0 for success, positive value for failure. */ 3365 /****************************************************************************/ 3366 static int 3367 bce_init_tx_chain(struct bce_softc *sc) 3368 { 3369 struct tx_bd *txbd; 3370 uint32_t val; 3371 int i, rc = 0; 3372 3373 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__); 3374 3375 /* Set the initial TX producer/consumer indices. */ 3376 sc->tx_prod = 0; 3377 sc->tx_cons = 0; 3378 sc->tx_prod_bseq = 0; 3379 sc->used_tx_bd = 0; 3380 sc->max_tx_bd = USABLE_TX_BD; 3381 DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD); 3382 DBRUNIF(1, sc->tx_full_count = 0); 3383 3384 /* 3385 * The NetXtreme II supports a linked-list structre called 3386 * a Buffer Descriptor Chain (or BD chain). A BD chain 3387 * consists of a series of 1 or more chain pages, each of which 3388 * consists of a fixed number of BD entries. 3389 * The last BD entry on each page is a pointer to the next page 3390 * in the chain, and the last pointer in the BD chain 3391 * points back to the beginning of the chain. 3392 */ 3393 3394 /* Set the TX next pointer chain entries. */ 3395 for (i = 0; i < TX_PAGES; i++) { 3396 int j; 3397 3398 txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE]; 3399 3400 /* Check if we've reached the last page. */ 3401 if (i == (TX_PAGES - 1)) 3402 j = 0; 3403 else 3404 j = i + 1; 3405 3406 txbd->tx_bd_haddr_hi = 3407 htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j])); 3408 txbd->tx_bd_haddr_lo = 3409 htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j])); 3410 } 3411 3412 for (i = 0; i < TX_PAGES; ++i) { 3413 bus_dmamap_sync(sc->tx_bd_chain_tag, sc->tx_bd_chain_map[i], 3414 BUS_DMASYNC_PREWRITE); 3415 } 3416 3417 /* Initialize the context ID for an L2 TX chain. */ 3418 val = BCE_L2CTX_TYPE_TYPE_L2; 3419 val |= BCE_L2CTX_TYPE_SIZE_L2; 3420 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TYPE, val); 3421 3422 val = BCE_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); 3423 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_CMD_TYPE, val); 3424 3425 /* Point the hardware to the first page in the chain. */ 3426 val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]); 3427 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_HI, val); 3428 val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]); 3429 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_LO, val); 3430 3431 DBRUN(BCE_VERBOSE_SEND, bce_dump_tx_chain(sc, 0, TOTAL_TX_BD)); 3432 3433 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__); 3434 3435 return(rc); 3436 } 3437 3438 3439 /****************************************************************************/ 3440 /* Free memory and clear the TX data structures. */ 3441 /* */ 3442 /* Returns: */ 3443 /* Nothing. */ 3444 /****************************************************************************/ 3445 static void 3446 bce_free_tx_chain(struct bce_softc *sc) 3447 { 3448 int i; 3449 3450 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__); 3451 3452 /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */ 3453 for (i = 0; i < TOTAL_TX_BD; i++) { 3454 if (sc->tx_mbuf_ptr[i] != NULL) { 3455 bus_dmamap_sync(sc->tx_mbuf_tag, sc->tx_mbuf_map[i], 3456 BUS_DMASYNC_POSTWRITE); 3457 bus_dmamap_unload(sc->tx_mbuf_tag, sc->tx_mbuf_map[i]); 3458 m_freem(sc->tx_mbuf_ptr[i]); 3459 sc->tx_mbuf_ptr[i] = NULL; 3460 DBRUNIF(1, sc->tx_mbuf_alloc--); 3461 } 3462 } 3463 3464 /* Clear each TX chain page. */ 3465 for (i = 0; i < TX_PAGES; i++) 3466 bzero(sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ); 3467 sc->used_tx_bd = 0; 3468 3469 /* Check if we lost any mbufs in the process. */ 3470 DBRUNIF((sc->tx_mbuf_alloc), 3471 if_printf(&sc->arpcom.ac_if, 3472 "%s(%d): Memory leak! " 3473 "Lost %d mbufs from tx chain!\n", 3474 __FILE__, __LINE__, sc->tx_mbuf_alloc)); 3475 3476 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__); 3477 } 3478 3479 3480 /****************************************************************************/ 3481 /* Allocate memory and initialize the RX data structures. */ 3482 /* */ 3483 /* Returns: */ 3484 /* 0 for success, positive value for failure. */ 3485 /****************************************************************************/ 3486 static int 3487 bce_init_rx_chain(struct bce_softc *sc) 3488 { 3489 struct rx_bd *rxbd; 3490 int i, rc = 0; 3491 uint16_t prod, chain_prod; 3492 uint32_t prod_bseq, val; 3493 3494 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__); 3495 3496 /* Initialize the RX producer and consumer indices. */ 3497 sc->rx_prod = 0; 3498 sc->rx_cons = 0; 3499 sc->rx_prod_bseq = 0; 3500 sc->free_rx_bd = USABLE_RX_BD; 3501 sc->max_rx_bd = USABLE_RX_BD; 3502 DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD); 3503 DBRUNIF(1, sc->rx_empty_count = 0); 3504 3505 /* Initialize the RX next pointer chain entries. */ 3506 for (i = 0; i < RX_PAGES; i++) { 3507 int j; 3508 3509 rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE]; 3510 3511 /* Check if we've reached the last page. */ 3512 if (i == (RX_PAGES - 1)) 3513 j = 0; 3514 else 3515 j = i + 1; 3516 3517 /* Setup the chain page pointers. */ 3518 rxbd->rx_bd_haddr_hi = 3519 htole32(BCE_ADDR_HI(sc->rx_bd_chain_paddr[j])); 3520 rxbd->rx_bd_haddr_lo = 3521 htole32(BCE_ADDR_LO(sc->rx_bd_chain_paddr[j])); 3522 } 3523 3524 /* Initialize the context ID for an L2 RX chain. */ 3525 val = BCE_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE; 3526 val |= BCE_L2CTX_CTX_TYPE_SIZE_L2; 3527 val |= 0x02 << 8; 3528 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_CTX_TYPE, val); 3529 3530 /* Point the hardware to the first page in the chain. */ 3531 /* XXX shouldn't this after RX descriptor initialization? */ 3532 val = BCE_ADDR_HI(sc->rx_bd_chain_paddr[0]); 3533 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_HI, val); 3534 val = BCE_ADDR_LO(sc->rx_bd_chain_paddr[0]); 3535 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_LO, val); 3536 3537 /* Allocate mbuf clusters for the rx_bd chain. */ 3538 prod = prod_bseq = 0; 3539 while (prod < TOTAL_RX_BD) { 3540 chain_prod = RX_CHAIN_IDX(prod); 3541 if (bce_newbuf_std(sc, NULL, &prod, &chain_prod, &prod_bseq)) { 3542 if_printf(&sc->arpcom.ac_if, 3543 "Error filling RX chain: rx_bd[0x%04X]!\n", 3544 chain_prod); 3545 rc = ENOBUFS; 3546 break; 3547 } 3548 prod = NEXT_RX_BD(prod); 3549 } 3550 3551 /* Save the RX chain producer index. */ 3552 sc->rx_prod = prod; 3553 sc->rx_prod_bseq = prod_bseq; 3554 3555 for (i = 0; i < RX_PAGES; i++) { 3556 bus_dmamap_sync(sc->rx_bd_chain_tag, sc->rx_bd_chain_map[i], 3557 BUS_DMASYNC_PREWRITE); 3558 } 3559 3560 /* Tell the chip about the waiting rx_bd's. */ 3561 REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BDIDX, sc->rx_prod); 3562 REG_WR(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BSEQ, sc->rx_prod_bseq); 3563 3564 DBRUN(BCE_VERBOSE_RECV, bce_dump_rx_chain(sc, 0, TOTAL_RX_BD)); 3565 3566 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__); 3567 3568 return(rc); 3569 } 3570 3571 3572 /****************************************************************************/ 3573 /* Free memory and clear the RX data structures. */ 3574 /* */ 3575 /* Returns: */ 3576 /* Nothing. */ 3577 /****************************************************************************/ 3578 static void 3579 bce_free_rx_chain(struct bce_softc *sc) 3580 { 3581 int i; 3582 3583 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__); 3584 3585 /* Free any mbufs still in the RX mbuf chain. */ 3586 for (i = 0; i < TOTAL_RX_BD; i++) { 3587 if (sc->rx_mbuf_ptr[i] != NULL) { 3588 bus_dmamap_sync(sc->rx_mbuf_tag, sc->rx_mbuf_map[i], 3589 BUS_DMASYNC_POSTREAD); 3590 bus_dmamap_unload(sc->rx_mbuf_tag, sc->rx_mbuf_map[i]); 3591 m_freem(sc->rx_mbuf_ptr[i]); 3592 sc->rx_mbuf_ptr[i] = NULL; 3593 DBRUNIF(1, sc->rx_mbuf_alloc--); 3594 } 3595 } 3596 3597 /* Clear each RX chain page. */ 3598 for (i = 0; i < RX_PAGES; i++) 3599 bzero(sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ); 3600 3601 /* Check if we lost any mbufs in the process. */ 3602 DBRUNIF((sc->rx_mbuf_alloc), 3603 if_printf(&sc->arpcom.ac_if, 3604 "%s(%d): Memory leak! " 3605 "Lost %d mbufs from rx chain!\n", 3606 __FILE__, __LINE__, sc->rx_mbuf_alloc)); 3607 3608 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__); 3609 } 3610 3611 3612 /****************************************************************************/ 3613 /* Set media options. */ 3614 /* */ 3615 /* Returns: */ 3616 /* 0 for success, positive value for failure. */ 3617 /****************************************************************************/ 3618 static int 3619 bce_ifmedia_upd(struct ifnet *ifp) 3620 { 3621 struct bce_softc *sc = ifp->if_softc; 3622 struct mii_data *mii = device_get_softc(sc->bce_miibus); 3623 3624 /* 3625 * 'mii' will be NULL, when this function is called on following 3626 * code path: bce_attach() -> bce_mgmt_init() 3627 */ 3628 if (mii != NULL) { 3629 /* Make sure the MII bus has been enumerated. */ 3630 sc->bce_link = 0; 3631 if (mii->mii_instance) { 3632 struct mii_softc *miisc; 3633 3634 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 3635 mii_phy_reset(miisc); 3636 } 3637 mii_mediachg(mii); 3638 } 3639 return 0; 3640 } 3641 3642 3643 /****************************************************************************/ 3644 /* Reports current media status. */ 3645 /* */ 3646 /* Returns: */ 3647 /* Nothing. */ 3648 /****************************************************************************/ 3649 static void 3650 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 3651 { 3652 struct bce_softc *sc = ifp->if_softc; 3653 struct mii_data *mii = device_get_softc(sc->bce_miibus); 3654 3655 mii_pollstat(mii); 3656 ifmr->ifm_active = mii->mii_media_active; 3657 ifmr->ifm_status = mii->mii_media_status; 3658 } 3659 3660 3661 /****************************************************************************/ 3662 /* Handles PHY generated interrupt events. */ 3663 /* */ 3664 /* Returns: */ 3665 /* Nothing. */ 3666 /****************************************************************************/ 3667 static void 3668 bce_phy_intr(struct bce_softc *sc) 3669 { 3670 uint32_t new_link_state, old_link_state; 3671 struct ifnet *ifp = &sc->arpcom.ac_if; 3672 3673 ASSERT_SERIALIZED(ifp->if_serializer); 3674 3675 new_link_state = sc->status_block->status_attn_bits & 3676 STATUS_ATTN_BITS_LINK_STATE; 3677 old_link_state = sc->status_block->status_attn_bits_ack & 3678 STATUS_ATTN_BITS_LINK_STATE; 3679 3680 /* Handle any changes if the link state has changed. */ 3681 if (new_link_state != old_link_state) { /* XXX redundant? */ 3682 DBRUN(BCE_VERBOSE_INTR, bce_dump_status_block(sc)); 3683 3684 sc->bce_link = 0; 3685 callout_stop(&sc->bce_stat_ch); 3686 bce_tick_serialized(sc); 3687 3688 /* Update the status_attn_bits_ack field in the status block. */ 3689 if (new_link_state) { 3690 REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD, 3691 STATUS_ATTN_BITS_LINK_STATE); 3692 if (bootverbose) 3693 if_printf(ifp, "Link is now UP.\n"); 3694 } else { 3695 REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD, 3696 STATUS_ATTN_BITS_LINK_STATE); 3697 if (bootverbose) 3698 if_printf(ifp, "Link is now DOWN.\n"); 3699 } 3700 } 3701 3702 /* Acknowledge the link change interrupt. */ 3703 REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE); 3704 } 3705 3706 3707 /****************************************************************************/ 3708 /* Reads the receive consumer value from the status block (skipping over */ 3709 /* chain page pointer if necessary). */ 3710 /* */ 3711 /* Returns: */ 3712 /* hw_cons */ 3713 /****************************************************************************/ 3714 static __inline uint16_t 3715 bce_get_hw_rx_cons(struct bce_softc *sc) 3716 { 3717 uint16_t hw_cons = sc->status_block->status_rx_quick_consumer_index0; 3718 3719 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) 3720 hw_cons++; 3721 return hw_cons; 3722 } 3723 3724 3725 /****************************************************************************/ 3726 /* Handles received frame interrupt events. */ 3727 /* */ 3728 /* Returns: */ 3729 /* Nothing. */ 3730 /****************************************************************************/ 3731 static void 3732 bce_rx_intr(struct bce_softc *sc, int count) 3733 { 3734 struct ifnet *ifp = &sc->arpcom.ac_if; 3735 uint16_t hw_cons, sw_cons, sw_chain_cons, sw_prod, sw_chain_prod; 3736 uint32_t sw_prod_bseq; 3737 int i; 3738 struct mbuf_chain chain[MAXCPU]; 3739 3740 ASSERT_SERIALIZED(ifp->if_serializer); 3741 3742 ether_input_chain_init(chain); 3743 3744 DBRUNIF(1, sc->rx_interrupts++); 3745 3746 /* Prepare the RX chain pages to be accessed by the host CPU. */ 3747 for (i = 0; i < RX_PAGES; i++) { 3748 bus_dmamap_sync(sc->rx_bd_chain_tag, 3749 sc->rx_bd_chain_map[i], BUS_DMASYNC_POSTREAD); 3750 } 3751 3752 /* Get the hardware's view of the RX consumer index. */ 3753 hw_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc); 3754 3755 /* Get working copies of the driver's view of the RX indices. */ 3756 sw_cons = sc->rx_cons; 3757 sw_prod = sc->rx_prod; 3758 sw_prod_bseq = sc->rx_prod_bseq; 3759 3760 DBPRINT(sc, BCE_INFO_RECV, "%s(enter): sw_prod = 0x%04X, " 3761 "sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n", 3762 __func__, sw_prod, sw_cons, sw_prod_bseq); 3763 3764 /* Prevent speculative reads from getting ahead of the status block. */ 3765 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, 3766 BUS_SPACE_BARRIER_READ); 3767 3768 /* Update some debug statistics counters */ 3769 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark), 3770 sc->rx_low_watermark = sc->free_rx_bd); 3771 DBRUNIF((sc->free_rx_bd == 0), sc->rx_empty_count++); 3772 3773 /* Scan through the receive chain as long as there is work to do. */ 3774 while (sw_cons != hw_cons) { 3775 struct mbuf *m = NULL; 3776 struct l2_fhdr *l2fhdr = NULL; 3777 struct rx_bd *rxbd; 3778 unsigned int len; 3779 uint32_t status = 0; 3780 3781 #ifdef DEVICE_POLLING 3782 if (count >= 0 && count-- == 0) { 3783 sc->hw_rx_cons = sw_cons; 3784 break; 3785 } 3786 #endif 3787 3788 /* 3789 * Convert the producer/consumer indices 3790 * to an actual rx_bd index. 3791 */ 3792 sw_chain_cons = RX_CHAIN_IDX(sw_cons); 3793 sw_chain_prod = RX_CHAIN_IDX(sw_prod); 3794 3795 /* Get the used rx_bd. */ 3796 rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)] 3797 [RX_IDX(sw_chain_cons)]; 3798 sc->free_rx_bd++; 3799 3800 DBRUN(BCE_VERBOSE_RECV, 3801 if_printf(ifp, "%s(): ", __func__); 3802 bce_dump_rxbd(sc, sw_chain_cons, rxbd)); 3803 3804 /* The mbuf is stored with the last rx_bd entry of a packet. */ 3805 if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) { 3806 /* Validate that this is the last rx_bd. */ 3807 DBRUNIF((!(rxbd->rx_bd_flags & RX_BD_FLAGS_END)), 3808 if_printf(ifp, "%s(%d): " 3809 "Unexpected mbuf found in rx_bd[0x%04X]!\n", 3810 __FILE__, __LINE__, sw_chain_cons); 3811 bce_breakpoint(sc)); 3812 3813 /* 3814 * ToDo: If the received packet is small enough 3815 * to fit into a single, non-M_EXT mbuf, 3816 * allocate a new mbuf here, copy the data to 3817 * that mbuf, and recycle the mapped jumbo frame. 3818 */ 3819 3820 /* Unmap the mbuf from DMA space. */ 3821 bus_dmamap_sync(sc->rx_mbuf_tag, 3822 sc->rx_mbuf_map[sw_chain_cons], 3823 BUS_DMASYNC_POSTREAD); 3824 bus_dmamap_unload(sc->rx_mbuf_tag, 3825 sc->rx_mbuf_map[sw_chain_cons]); 3826 3827 /* Remove the mbuf from the driver's chain. */ 3828 m = sc->rx_mbuf_ptr[sw_chain_cons]; 3829 sc->rx_mbuf_ptr[sw_chain_cons] = NULL; 3830 3831 /* 3832 * Frames received on the NetXteme II are prepended 3833 * with an l2_fhdr structure which provides status 3834 * information about the received frame (including 3835 * VLAN tags and checksum info). The frames are also 3836 * automatically adjusted to align the IP header 3837 * (i.e. two null bytes are inserted before the 3838 * Ethernet header). 3839 */ 3840 l2fhdr = mtod(m, struct l2_fhdr *); 3841 3842 len = l2fhdr->l2_fhdr_pkt_len; 3843 status = l2fhdr->l2_fhdr_status; 3844 3845 DBRUNIF(DB_RANDOMTRUE(bce_debug_l2fhdr_status_check), 3846 if_printf(ifp, 3847 "Simulating l2_fhdr status error.\n"); 3848 status = status | L2_FHDR_ERRORS_PHY_DECODE); 3849 3850 /* Watch for unusual sized frames. */ 3851 DBRUNIF((len < BCE_MIN_MTU || 3852 len > BCE_MAX_JUMBO_ETHER_MTU_VLAN), 3853 if_printf(ifp, 3854 "%s(%d): Unusual frame size found. " 3855 "Min(%d), Actual(%d), Max(%d)\n", 3856 __FILE__, __LINE__, 3857 (int)BCE_MIN_MTU, len, 3858 (int)BCE_MAX_JUMBO_ETHER_MTU_VLAN); 3859 bce_dump_mbuf(sc, m); 3860 bce_breakpoint(sc)); 3861 3862 len -= ETHER_CRC_LEN; 3863 3864 /* Check the received frame for errors. */ 3865 if (status & (L2_FHDR_ERRORS_BAD_CRC | 3866 L2_FHDR_ERRORS_PHY_DECODE | 3867 L2_FHDR_ERRORS_ALIGNMENT | 3868 L2_FHDR_ERRORS_TOO_SHORT | 3869 L2_FHDR_ERRORS_GIANT_FRAME)) { 3870 ifp->if_ierrors++; 3871 DBRUNIF(1, sc->l2fhdr_status_errors++); 3872 3873 /* Reuse the mbuf for a new frame. */ 3874 if (bce_newbuf_std(sc, m, &sw_prod, 3875 &sw_chain_prod, 3876 &sw_prod_bseq)) { 3877 DBRUNIF(1, bce_breakpoint(sc)); 3878 /* XXX */ 3879 panic("%s: Can't reuse RX mbuf!\n", 3880 ifp->if_xname); 3881 } 3882 m = NULL; 3883 goto bce_rx_int_next_rx; 3884 } 3885 3886 /* 3887 * Get a new mbuf for the rx_bd. If no new 3888 * mbufs are available then reuse the current mbuf, 3889 * log an ierror on the interface, and generate 3890 * an error in the system log. 3891 */ 3892 if (bce_newbuf_std(sc, NULL, &sw_prod, &sw_chain_prod, 3893 &sw_prod_bseq)) { 3894 DBRUN(BCE_WARN, 3895 if_printf(ifp, 3896 "%s(%d): Failed to allocate new mbuf, " 3897 "incoming frame dropped!\n", 3898 __FILE__, __LINE__)); 3899 3900 ifp->if_ierrors++; 3901 3902 /* Try and reuse the exisitng mbuf. */ 3903 if (bce_newbuf_std(sc, m, &sw_prod, 3904 &sw_chain_prod, 3905 &sw_prod_bseq)) { 3906 DBRUNIF(1, bce_breakpoint(sc)); 3907 /* XXX */ 3908 panic("%s: Double mbuf allocation " 3909 "failure!", ifp->if_xname); 3910 } 3911 m = NULL; 3912 goto bce_rx_int_next_rx; 3913 } 3914 3915 /* 3916 * Skip over the l2_fhdr when passing 3917 * the data up the stack. 3918 */ 3919 m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN); 3920 3921 m->m_pkthdr.len = m->m_len = len; 3922 m->m_pkthdr.rcvif = ifp; 3923 3924 DBRUN(BCE_VERBOSE_RECV, 3925 struct ether_header *eh; 3926 eh = mtod(m, struct ether_header *); 3927 if_printf(ifp, "%s(): to: %6D, from: %6D, " 3928 "type: 0x%04X\n", __func__, 3929 eh->ether_dhost, ":", 3930 eh->ether_shost, ":", 3931 htons(eh->ether_type))); 3932 3933 /* Validate the checksum if offload enabled. */ 3934 if (ifp->if_capenable & IFCAP_RXCSUM) { 3935 /* Check for an IP datagram. */ 3936 if (status & L2_FHDR_STATUS_IP_DATAGRAM) { 3937 m->m_pkthdr.csum_flags |= 3938 CSUM_IP_CHECKED; 3939 3940 /* Check if the IP checksum is valid. */ 3941 if ((l2fhdr->l2_fhdr_ip_xsum ^ 3942 0xffff) == 0) { 3943 m->m_pkthdr.csum_flags |= 3944 CSUM_IP_VALID; 3945 } else { 3946 DBPRINT(sc, BCE_WARN_RECV, 3947 "%s(): Invalid IP checksum = 0x%04X!\n", 3948 __func__, l2fhdr->l2_fhdr_ip_xsum); 3949 } 3950 } 3951 3952 /* Check for a valid TCP/UDP frame. */ 3953 if (status & (L2_FHDR_STATUS_TCP_SEGMENT | 3954 L2_FHDR_STATUS_UDP_DATAGRAM)) { 3955 3956 /* Check for a good TCP/UDP checksum. */ 3957 if ((status & 3958 (L2_FHDR_ERRORS_TCP_XSUM | 3959 L2_FHDR_ERRORS_UDP_XSUM)) == 0) { 3960 m->m_pkthdr.csum_data = 3961 l2fhdr->l2_fhdr_tcp_udp_xsum; 3962 m->m_pkthdr.csum_flags |= 3963 CSUM_DATA_VALID | 3964 CSUM_PSEUDO_HDR; 3965 } else { 3966 DBPRINT(sc, BCE_WARN_RECV, 3967 "%s(): Invalid TCP/UDP checksum = 0x%04X!\n", 3968 __func__, l2fhdr->l2_fhdr_tcp_udp_xsum); 3969 } 3970 } 3971 } 3972 3973 ifp->if_ipackets++; 3974 bce_rx_int_next_rx: 3975 sw_prod = NEXT_RX_BD(sw_prod); 3976 } 3977 3978 sw_cons = NEXT_RX_BD(sw_cons); 3979 3980 /* If we have a packet, pass it up the stack */ 3981 if (m) { 3982 DBPRINT(sc, BCE_VERBOSE_RECV, 3983 "%s(): Passing received frame up.\n", __func__); 3984 3985 if (status & L2_FHDR_STATUS_L2_VLAN_TAG) { 3986 m->m_flags |= M_VLANTAG; 3987 m->m_pkthdr.ether_vlantag = 3988 l2fhdr->l2_fhdr_vlan_tag; 3989 } 3990 ether_input_chain(ifp, m, chain); 3991 3992 DBRUNIF(1, sc->rx_mbuf_alloc--); 3993 } 3994 3995 /* 3996 * If polling(4) is not enabled, refresh hw_cons to see 3997 * whether there's new work. 3998 * 3999 * If polling(4) is enabled, i.e count >= 0, refreshing 4000 * should not be performed, so that we would not spend 4001 * too much time in RX processing. 4002 */ 4003 if (count < 0 && sw_cons == hw_cons) 4004 hw_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc); 4005 4006 /* 4007 * Prevent speculative reads from getting ahead 4008 * of the status block. 4009 */ 4010 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, 4011 BUS_SPACE_BARRIER_READ); 4012 } 4013 4014 ether_input_dispatch(chain); 4015 4016 for (i = 0; i < RX_PAGES; i++) { 4017 bus_dmamap_sync(sc->rx_bd_chain_tag, 4018 sc->rx_bd_chain_map[i], BUS_DMASYNC_PREWRITE); 4019 } 4020 4021 sc->rx_cons = sw_cons; 4022 sc->rx_prod = sw_prod; 4023 sc->rx_prod_bseq = sw_prod_bseq; 4024 4025 REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BDIDX, sc->rx_prod); 4026 REG_WR(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BSEQ, sc->rx_prod_bseq); 4027 4028 DBPRINT(sc, BCE_INFO_RECV, "%s(exit): rx_prod = 0x%04X, " 4029 "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n", 4030 __func__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq); 4031 } 4032 4033 4034 /****************************************************************************/ 4035 /* Reads the transmit consumer value from the status block (skipping over */ 4036 /* chain page pointer if necessary). */ 4037 /* */ 4038 /* Returns: */ 4039 /* hw_cons */ 4040 /****************************************************************************/ 4041 static __inline uint16_t 4042 bce_get_hw_tx_cons(struct bce_softc *sc) 4043 { 4044 uint16_t hw_cons = sc->status_block->status_tx_quick_consumer_index0; 4045 4046 if ((hw_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) 4047 hw_cons++; 4048 return hw_cons; 4049 } 4050 4051 4052 /****************************************************************************/ 4053 /* Handles transmit completion interrupt events. */ 4054 /* */ 4055 /* Returns: */ 4056 /* Nothing. */ 4057 /****************************************************************************/ 4058 static void 4059 bce_tx_intr(struct bce_softc *sc) 4060 { 4061 struct ifnet *ifp = &sc->arpcom.ac_if; 4062 uint16_t hw_tx_cons, sw_tx_cons, sw_tx_chain_cons; 4063 4064 ASSERT_SERIALIZED(ifp->if_serializer); 4065 4066 DBRUNIF(1, sc->tx_interrupts++); 4067 4068 /* Get the hardware's view of the TX consumer index. */ 4069 hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc); 4070 sw_tx_cons = sc->tx_cons; 4071 4072 /* Prevent speculative reads from getting ahead of the status block. */ 4073 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, 4074 BUS_SPACE_BARRIER_READ); 4075 4076 /* Cycle through any completed TX chain page entries. */ 4077 while (sw_tx_cons != hw_tx_cons) { 4078 #ifdef BCE_DEBUG 4079 struct tx_bd *txbd = NULL; 4080 #endif 4081 sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons); 4082 4083 DBPRINT(sc, BCE_INFO_SEND, 4084 "%s(): hw_tx_cons = 0x%04X, sw_tx_cons = 0x%04X, " 4085 "sw_tx_chain_cons = 0x%04X\n", 4086 __func__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons); 4087 4088 DBRUNIF((sw_tx_chain_cons > MAX_TX_BD), 4089 if_printf(ifp, "%s(%d): " 4090 "TX chain consumer out of range! " 4091 " 0x%04X > 0x%04X\n", 4092 __FILE__, __LINE__, sw_tx_chain_cons, 4093 (int)MAX_TX_BD); 4094 bce_breakpoint(sc)); 4095 4096 DBRUNIF(1, txbd = &sc->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)] 4097 [TX_IDX(sw_tx_chain_cons)]); 4098 4099 DBRUNIF((txbd == NULL), 4100 if_printf(ifp, "%s(%d): " 4101 "Unexpected NULL tx_bd[0x%04X]!\n", 4102 __FILE__, __LINE__, sw_tx_chain_cons); 4103 bce_breakpoint(sc)); 4104 4105 DBRUN(BCE_INFO_SEND, 4106 if_printf(ifp, "%s(): ", __func__); 4107 bce_dump_txbd(sc, sw_tx_chain_cons, txbd)); 4108 4109 /* 4110 * Free the associated mbuf. Remember 4111 * that only the last tx_bd of a packet 4112 * has an mbuf pointer and DMA map. 4113 */ 4114 if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) { 4115 /* Validate that this is the last tx_bd. */ 4116 DBRUNIF((!(txbd->tx_bd_flags & TX_BD_FLAGS_END)), 4117 if_printf(ifp, "%s(%d): " 4118 "tx_bd END flag not set but " 4119 "txmbuf == NULL!\n", __FILE__, __LINE__); 4120 bce_breakpoint(sc)); 4121 4122 DBRUN(BCE_INFO_SEND, 4123 if_printf(ifp, "%s(): Unloading map/freeing mbuf " 4124 "from tx_bd[0x%04X]\n", __func__, 4125 sw_tx_chain_cons)); 4126 4127 /* Unmap the mbuf. */ 4128 bus_dmamap_unload(sc->tx_mbuf_tag, 4129 sc->tx_mbuf_map[sw_tx_chain_cons]); 4130 4131 /* Free the mbuf. */ 4132 m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]); 4133 sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL; 4134 DBRUNIF(1, sc->tx_mbuf_alloc--); 4135 4136 ifp->if_opackets++; 4137 } 4138 4139 sc->used_tx_bd--; 4140 sw_tx_cons = NEXT_TX_BD(sw_tx_cons); 4141 4142 if (sw_tx_cons == hw_tx_cons) { 4143 /* Refresh hw_cons to see if there's new work. */ 4144 hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc); 4145 } 4146 4147 /* 4148 * Prevent speculative reads from getting 4149 * ahead of the status block. 4150 */ 4151 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, 4152 BUS_SPACE_BARRIER_READ); 4153 } 4154 4155 if (sc->used_tx_bd == 0) { 4156 /* Clear the TX timeout timer. */ 4157 ifp->if_timer = 0; 4158 } 4159 4160 /* Clear the tx hardware queue full flag. */ 4161 if (sc->max_tx_bd - sc->used_tx_bd >= BCE_TX_SPARE_SPACE) { 4162 DBRUNIF((ifp->if_flags & IFF_OACTIVE), 4163 DBPRINT(sc, BCE_WARN_SEND, 4164 "%s(): Open TX chain! %d/%d (used/total)\n", 4165 __func__, sc->used_tx_bd, sc->max_tx_bd)); 4166 ifp->if_flags &= ~IFF_OACTIVE; 4167 } 4168 sc->tx_cons = sw_tx_cons; 4169 } 4170 4171 4172 /****************************************************************************/ 4173 /* Disables interrupt generation. */ 4174 /* */ 4175 /* Returns: */ 4176 /* Nothing. */ 4177 /****************************************************************************/ 4178 static void 4179 bce_disable_intr(struct bce_softc *sc) 4180 { 4181 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT); 4182 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD); 4183 lwkt_serialize_handler_disable(sc->arpcom.ac_if.if_serializer); 4184 } 4185 4186 4187 /****************************************************************************/ 4188 /* Enables interrupt generation. */ 4189 /* */ 4190 /* Returns: */ 4191 /* Nothing. */ 4192 /****************************************************************************/ 4193 static void 4194 bce_enable_intr(struct bce_softc *sc) 4195 { 4196 uint32_t val; 4197 4198 lwkt_serialize_handler_enable(sc->arpcom.ac_if.if_serializer); 4199 4200 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 4201 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | 4202 BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx); 4203 4204 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 4205 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx); 4206 4207 val = REG_RD(sc, BCE_HC_COMMAND); 4208 REG_WR(sc, BCE_HC_COMMAND, val | BCE_HC_COMMAND_COAL_NOW); 4209 } 4210 4211 4212 /****************************************************************************/ 4213 /* Handles controller initialization. */ 4214 /* */ 4215 /* Returns: */ 4216 /* Nothing. */ 4217 /****************************************************************************/ 4218 static void 4219 bce_init(void *xsc) 4220 { 4221 struct bce_softc *sc = xsc; 4222 struct ifnet *ifp = &sc->arpcom.ac_if; 4223 uint32_t ether_mtu; 4224 int error; 4225 4226 ASSERT_SERIALIZED(ifp->if_serializer); 4227 4228 /* Check if the driver is still running and bail out if it is. */ 4229 if (ifp->if_flags & IFF_RUNNING) 4230 return; 4231 4232 bce_stop(sc); 4233 4234 error = bce_reset(sc, BCE_DRV_MSG_CODE_RESET); 4235 if (error) { 4236 if_printf(ifp, "Controller reset failed!\n"); 4237 goto back; 4238 } 4239 4240 error = bce_chipinit(sc); 4241 if (error) { 4242 if_printf(ifp, "Controller initialization failed!\n"); 4243 goto back; 4244 } 4245 4246 error = bce_blockinit(sc); 4247 if (error) { 4248 if_printf(ifp, "Block initialization failed!\n"); 4249 goto back; 4250 } 4251 4252 /* Load our MAC address. */ 4253 bcopy(IF_LLADDR(ifp), sc->eaddr, ETHER_ADDR_LEN); 4254 bce_set_mac_addr(sc); 4255 4256 /* Calculate and program the Ethernet MTU size. */ 4257 ether_mtu = ETHER_HDR_LEN + EVL_ENCAPLEN + ifp->if_mtu + ETHER_CRC_LEN; 4258 4259 DBPRINT(sc, BCE_INFO, "%s(): setting mtu = %d\n", __func__, ether_mtu); 4260 4261 /* 4262 * Program the mtu, enabling jumbo frame 4263 * support if necessary. Also set the mbuf 4264 * allocation count for RX frames. 4265 */ 4266 if (ether_mtu > ETHER_MAX_LEN + EVL_ENCAPLEN) { 4267 #ifdef notyet 4268 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, 4269 min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) | 4270 BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA); 4271 sc->mbuf_alloc_size = MJUM9BYTES; 4272 #else 4273 panic("jumbo buffer is not supported yet\n"); 4274 #endif 4275 } else { 4276 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu); 4277 sc->mbuf_alloc_size = MCLBYTES; 4278 } 4279 4280 /* Calculate the RX Ethernet frame size for rx_bd's. */ 4281 sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8; 4282 4283 DBPRINT(sc, BCE_INFO, 4284 "%s(): mclbytes = %d, mbuf_alloc_size = %d, " 4285 "max_frame_size = %d\n", 4286 __func__, (int)MCLBYTES, sc->mbuf_alloc_size, 4287 sc->max_frame_size); 4288 4289 /* Program appropriate promiscuous/multicast filtering. */ 4290 bce_set_rx_mode(sc); 4291 4292 /* Init RX buffer descriptor chain. */ 4293 bce_init_rx_chain(sc); /* XXX return value */ 4294 4295 /* Init TX buffer descriptor chain. */ 4296 bce_init_tx_chain(sc); /* XXX return value */ 4297 4298 #ifdef DEVICE_POLLING 4299 /* Disable interrupts if we are polling. */ 4300 if (ifp->if_flags & IFF_POLLING) { 4301 bce_disable_intr(sc); 4302 4303 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP, 4304 (1 << 16) | sc->bce_rx_quick_cons_trip); 4305 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP, 4306 (1 << 16) | sc->bce_tx_quick_cons_trip); 4307 } else 4308 #endif 4309 /* Enable host interrupts. */ 4310 bce_enable_intr(sc); 4311 4312 bce_ifmedia_upd(ifp); 4313 4314 ifp->if_flags |= IFF_RUNNING; 4315 ifp->if_flags &= ~IFF_OACTIVE; 4316 4317 callout_reset(&sc->bce_stat_ch, hz, bce_tick, sc); 4318 back: 4319 if (error) 4320 bce_stop(sc); 4321 } 4322 4323 4324 /****************************************************************************/ 4325 /* Initialize the controller just enough so that any management firmware */ 4326 /* running on the device will continue to operate corectly. */ 4327 /* */ 4328 /* Returns: */ 4329 /* Nothing. */ 4330 /****************************************************************************/ 4331 static void 4332 bce_mgmt_init(struct bce_softc *sc) 4333 { 4334 struct ifnet *ifp = &sc->arpcom.ac_if; 4335 uint32_t val; 4336 4337 /* Check if the driver is still running and bail out if it is. */ 4338 if (ifp->if_flags & IFF_RUNNING) 4339 return; 4340 4341 /* Initialize the on-boards CPUs */ 4342 bce_init_cpus(sc); 4343 4344 /* Set the page size and clear the RV2P processor stall bits. */ 4345 val = (BCM_PAGE_BITS - 8) << 24; 4346 REG_WR(sc, BCE_RV2P_CONFIG, val); 4347 4348 /* Enable all critical blocks in the MAC. */ 4349 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 4350 BCE_MISC_ENABLE_SET_BITS_RX_V2P_ENABLE | 4351 BCE_MISC_ENABLE_SET_BITS_RX_DMA_ENABLE | 4352 BCE_MISC_ENABLE_SET_BITS_COMPLETION_ENABLE); 4353 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS); 4354 DELAY(20); 4355 4356 bce_ifmedia_upd(ifp); 4357 } 4358 4359 4360 /****************************************************************************/ 4361 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */ 4362 /* memory visible to the controller. */ 4363 /* */ 4364 /* Returns: */ 4365 /* 0 for success, positive value for failure. */ 4366 /****************************************************************************/ 4367 static int 4368 bce_encap(struct bce_softc *sc, struct mbuf **m_head) 4369 { 4370 struct bce_dmamap_arg ctx; 4371 bus_dma_segment_t segs[BCE_MAX_SEGMENTS]; 4372 bus_dmamap_t map, tmp_map; 4373 struct mbuf *m0 = *m_head; 4374 struct tx_bd *txbd = NULL; 4375 uint16_t vlan_tag = 0, flags = 0; 4376 uint16_t chain_prod, chain_prod_start, prod; 4377 uint32_t prod_bseq; 4378 int i, error, maxsegs; 4379 #ifdef BCE_DEBUG 4380 uint16_t debug_prod; 4381 #endif 4382 4383 /* Transfer any checksum offload flags to the bd. */ 4384 if (m0->m_pkthdr.csum_flags) { 4385 if (m0->m_pkthdr.csum_flags & CSUM_IP) 4386 flags |= TX_BD_FLAGS_IP_CKSUM; 4387 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 4388 flags |= TX_BD_FLAGS_TCP_UDP_CKSUM; 4389 } 4390 4391 /* Transfer any VLAN tags to the bd. */ 4392 if (m0->m_flags & M_VLANTAG) { 4393 flags |= TX_BD_FLAGS_VLAN_TAG; 4394 vlan_tag = m0->m_pkthdr.ether_vlantag; 4395 } 4396 4397 prod = sc->tx_prod; 4398 chain_prod_start = chain_prod = TX_CHAIN_IDX(prod); 4399 4400 /* Map the mbuf into DMAable memory. */ 4401 map = sc->tx_mbuf_map[chain_prod_start]; 4402 4403 maxsegs = sc->max_tx_bd - sc->used_tx_bd; 4404 KASSERT(maxsegs >= BCE_TX_SPARE_SPACE, 4405 ("not enough segements %d\n", maxsegs)); 4406 if (maxsegs > BCE_MAX_SEGMENTS) 4407 maxsegs = BCE_MAX_SEGMENTS; 4408 4409 /* Map the mbuf into our DMA address space. */ 4410 ctx.bce_maxsegs = maxsegs; 4411 ctx.bce_segs = segs; 4412 error = bus_dmamap_load_mbuf(sc->tx_mbuf_tag, map, m0, 4413 bce_dma_map_mbuf, &ctx, BUS_DMA_NOWAIT); 4414 if (error == EFBIG || ctx.bce_maxsegs == 0) { 4415 DBPRINT(sc, BCE_WARN, "%s(): fragmented mbuf\n", __func__); 4416 DBRUNIF(1, bce_dump_mbuf(sc, m0);); 4417 4418 m0 = m_defrag(*m_head, MB_DONTWAIT); 4419 if (m0 == NULL) { 4420 error = ENOBUFS; 4421 goto back; 4422 } 4423 *m_head = m0; 4424 4425 ctx.bce_maxsegs = maxsegs; 4426 ctx.bce_segs = segs; 4427 error = bus_dmamap_load_mbuf(sc->tx_mbuf_tag, map, m0, 4428 bce_dma_map_mbuf, &ctx, 4429 BUS_DMA_NOWAIT); 4430 if (error || ctx.bce_maxsegs == 0) { 4431 if_printf(&sc->arpcom.ac_if, 4432 "Error mapping mbuf into TX chain\n"); 4433 if (error == 0) 4434 error = EFBIG; 4435 goto back; 4436 } 4437 } else if (error) { 4438 if_printf(&sc->arpcom.ac_if, 4439 "Error mapping mbuf into TX chain\n"); 4440 goto back; 4441 } 4442 4443 /* prod points to an empty tx_bd at this point. */ 4444 prod_bseq = sc->tx_prod_bseq; 4445 4446 #ifdef BCE_DEBUG 4447 debug_prod = chain_prod; 4448 #endif 4449 4450 DBPRINT(sc, BCE_INFO_SEND, 4451 "%s(): Start: prod = 0x%04X, chain_prod = %04X, " 4452 "prod_bseq = 0x%08X\n", 4453 __func__, prod, chain_prod, prod_bseq); 4454 4455 /* 4456 * Cycle through each mbuf segment that makes up 4457 * the outgoing frame, gathering the mapping info 4458 * for that segment and creating a tx_bd to for 4459 * the mbuf. 4460 */ 4461 for (i = 0; i < ctx.bce_maxsegs; i++) { 4462 chain_prod = TX_CHAIN_IDX(prod); 4463 txbd= &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)]; 4464 4465 txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr)); 4466 txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr)); 4467 txbd->tx_bd_mss_nbytes = htole16(segs[i].ds_len); 4468 txbd->tx_bd_vlan_tag = htole16(vlan_tag); 4469 txbd->tx_bd_flags = htole16(flags); 4470 prod_bseq += segs[i].ds_len; 4471 if (i == 0) 4472 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START); 4473 prod = NEXT_TX_BD(prod); 4474 } 4475 4476 /* Set the END flag on the last TX buffer descriptor. */ 4477 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END); 4478 4479 DBRUN(BCE_EXCESSIVE_SEND, 4480 bce_dump_tx_chain(sc, debug_prod, ctx.bce_maxsegs)); 4481 4482 DBPRINT(sc, BCE_INFO_SEND, 4483 "%s(): End: prod = 0x%04X, chain_prod = %04X, " 4484 "prod_bseq = 0x%08X\n", 4485 __func__, prod, chain_prod, prod_bseq); 4486 4487 bus_dmamap_sync(sc->tx_mbuf_tag, map, BUS_DMASYNC_PREWRITE); 4488 4489 /* 4490 * Ensure that the mbuf pointer for this transmission 4491 * is placed at the array index of the last 4492 * descriptor in this chain. This is done 4493 * because a single map is used for all 4494 * segments of the mbuf and we don't want to 4495 * unload the map before all of the segments 4496 * have been freed. 4497 */ 4498 sc->tx_mbuf_ptr[chain_prod] = m0; 4499 4500 tmp_map = sc->tx_mbuf_map[chain_prod]; 4501 sc->tx_mbuf_map[chain_prod] = map; 4502 sc->tx_mbuf_map[chain_prod_start] = tmp_map; 4503 4504 sc->used_tx_bd += ctx.bce_maxsegs; 4505 4506 /* Update some debug statistic counters */ 4507 DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark), 4508 sc->tx_hi_watermark = sc->used_tx_bd); 4509 DBRUNIF((sc->used_tx_bd == sc->max_tx_bd), sc->tx_full_count++); 4510 DBRUNIF(1, sc->tx_mbuf_alloc++); 4511 4512 DBRUN(BCE_VERBOSE_SEND, 4513 bce_dump_tx_mbuf_chain(sc, chain_prod, ctx.bce_maxsegs)); 4514 4515 /* prod points to the next free tx_bd at this point. */ 4516 sc->tx_prod = prod; 4517 sc->tx_prod_bseq = prod_bseq; 4518 back: 4519 if (error) { 4520 m_freem(*m_head); 4521 *m_head = NULL; 4522 } 4523 return error; 4524 } 4525 4526 4527 /****************************************************************************/ 4528 /* Main transmit routine when called from another routine with a lock. */ 4529 /* */ 4530 /* Returns: */ 4531 /* Nothing. */ 4532 /****************************************************************************/ 4533 static void 4534 bce_start(struct ifnet *ifp) 4535 { 4536 struct bce_softc *sc = ifp->if_softc; 4537 int count = 0; 4538 4539 ASSERT_SERIALIZED(ifp->if_serializer); 4540 4541 /* If there's no link or the transmit queue is empty then just exit. */ 4542 if (!sc->bce_link) { 4543 ifq_purge(&ifp->if_snd); 4544 return; 4545 } 4546 4547 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 4548 return; 4549 4550 DBPRINT(sc, BCE_INFO_SEND, 4551 "%s(): Start: tx_prod = 0x%04X, tx_chain_prod = %04X, " 4552 "tx_prod_bseq = 0x%08X\n", 4553 __func__, 4554 sc->tx_prod, TX_CHAIN_IDX(sc->tx_prod), sc->tx_prod_bseq); 4555 4556 for (;;) { 4557 struct mbuf *m_head; 4558 4559 /* 4560 * We keep BCE_TX_SPARE_SPACE entries, so bce_encap() is 4561 * unlikely to fail. 4562 */ 4563 if (sc->max_tx_bd - sc->used_tx_bd < BCE_TX_SPARE_SPACE) { 4564 ifp->if_flags |= IFF_OACTIVE; 4565 break; 4566 } 4567 4568 /* Check for any frames to send. */ 4569 m_head = ifq_dequeue(&ifp->if_snd, NULL); 4570 if (m_head == NULL) 4571 break; 4572 4573 /* 4574 * Pack the data into the transmit ring. If we 4575 * don't have room, place the mbuf back at the 4576 * head of the queue and set the OACTIVE flag 4577 * to wait for the NIC to drain the chain. 4578 */ 4579 if (bce_encap(sc, &m_head)) { 4580 ifp->if_flags |= IFF_OACTIVE; 4581 DBPRINT(sc, BCE_INFO_SEND, 4582 "TX chain is closed for business! " 4583 "Total tx_bd used = %d\n", 4584 sc->used_tx_bd); 4585 break; 4586 } 4587 4588 count++; 4589 4590 /* Send a copy of the frame to any BPF listeners. */ 4591 ETHER_BPF_MTAP(ifp, m_head); 4592 } 4593 4594 if (count == 0) { 4595 /* no packets were dequeued */ 4596 DBPRINT(sc, BCE_VERBOSE_SEND, 4597 "%s(): No packets were dequeued\n", __func__); 4598 return; 4599 } 4600 4601 DBPRINT(sc, BCE_INFO_SEND, 4602 "%s(): End: tx_prod = 0x%04X, tx_chain_prod = 0x%04X, " 4603 "tx_prod_bseq = 0x%08X\n", 4604 __func__, 4605 sc->tx_prod, TX_CHAIN_IDX(sc->tx_prod), sc->tx_prod_bseq); 4606 4607 /* Start the transmit. */ 4608 REG_WR16(sc, MB_TX_CID_ADDR + BCE_L2CTX_TX_HOST_BIDX, sc->tx_prod); 4609 REG_WR(sc, MB_TX_CID_ADDR + BCE_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq); 4610 4611 /* Set the tx timeout. */ 4612 ifp->if_timer = BCE_TX_TIMEOUT; 4613 } 4614 4615 4616 /****************************************************************************/ 4617 /* Handles any IOCTL calls from the operating system. */ 4618 /* */ 4619 /* Returns: */ 4620 /* 0 for success, positive value for failure. */ 4621 /****************************************************************************/ 4622 static int 4623 bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 4624 { 4625 struct bce_softc *sc = ifp->if_softc; 4626 struct ifreq *ifr = (struct ifreq *)data; 4627 struct mii_data *mii; 4628 int mask, error = 0; 4629 4630 ASSERT_SERIALIZED(ifp->if_serializer); 4631 4632 switch(command) { 4633 case SIOCSIFMTU: 4634 /* Check that the MTU setting is supported. */ 4635 if (ifr->ifr_mtu < BCE_MIN_MTU || 4636 #ifdef notyet 4637 ifr->ifr_mtu > BCE_MAX_JUMBO_MTU 4638 #else 4639 ifr->ifr_mtu > ETHERMTU 4640 #endif 4641 ) { 4642 error = EINVAL; 4643 break; 4644 } 4645 4646 DBPRINT(sc, BCE_INFO, "Setting new MTU of %d\n", ifr->ifr_mtu); 4647 4648 ifp->if_mtu = ifr->ifr_mtu; 4649 ifp->if_flags &= ~IFF_RUNNING; /* Force reinitialize */ 4650 bce_init(sc); 4651 break; 4652 4653 case SIOCSIFFLAGS: 4654 if (ifp->if_flags & IFF_UP) { 4655 if (ifp->if_flags & IFF_RUNNING) { 4656 mask = ifp->if_flags ^ sc->bce_if_flags; 4657 4658 if (mask & (IFF_PROMISC | IFF_ALLMULTI)) 4659 bce_set_rx_mode(sc); 4660 } else { 4661 bce_init(sc); 4662 } 4663 } else if (ifp->if_flags & IFF_RUNNING) { 4664 bce_stop(sc); 4665 } 4666 sc->bce_if_flags = ifp->if_flags; 4667 break; 4668 4669 case SIOCADDMULTI: 4670 case SIOCDELMULTI: 4671 if (ifp->if_flags & IFF_RUNNING) 4672 bce_set_rx_mode(sc); 4673 break; 4674 4675 case SIOCSIFMEDIA: 4676 case SIOCGIFMEDIA: 4677 DBPRINT(sc, BCE_VERBOSE, "bce_phy_flags = 0x%08X\n", 4678 sc->bce_phy_flags); 4679 DBPRINT(sc, BCE_VERBOSE, "Copper media set/get\n"); 4680 4681 mii = device_get_softc(sc->bce_miibus); 4682 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 4683 break; 4684 4685 case SIOCSIFCAP: 4686 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 4687 DBPRINT(sc, BCE_INFO, "Received SIOCSIFCAP = 0x%08X\n", 4688 (uint32_t) mask); 4689 4690 if (mask & IFCAP_HWCSUM) { 4691 ifp->if_capenable ^= IFCAP_HWCSUM; 4692 if (IFCAP_HWCSUM & ifp->if_capenable) 4693 ifp->if_hwassist = BCE_IF_HWASSIST; 4694 else 4695 ifp->if_hwassist = 0; 4696 } 4697 break; 4698 4699 default: 4700 error = ether_ioctl(ifp, command, data); 4701 break; 4702 } 4703 return error; 4704 } 4705 4706 4707 /****************************************************************************/ 4708 /* Transmit timeout handler. */ 4709 /* */ 4710 /* Returns: */ 4711 /* Nothing. */ 4712 /****************************************************************************/ 4713 static void 4714 bce_watchdog(struct ifnet *ifp) 4715 { 4716 struct bce_softc *sc = ifp->if_softc; 4717 4718 ASSERT_SERIALIZED(ifp->if_serializer); 4719 4720 DBRUN(BCE_VERBOSE_SEND, 4721 bce_dump_driver_state(sc); 4722 bce_dump_status_block(sc)); 4723 4724 /* 4725 * If we are in this routine because of pause frames, then 4726 * don't reset the hardware. 4727 */ 4728 if (REG_RD(sc, BCE_EMAC_TX_STATUS) & BCE_EMAC_TX_STATUS_XOFFED) 4729 return; 4730 4731 if_printf(ifp, "Watchdog timeout occurred, resetting!\n"); 4732 4733 /* DBRUN(BCE_FATAL, bce_breakpoint(sc)); */ 4734 4735 ifp->if_flags &= ~IFF_RUNNING; /* Force reinitialize */ 4736 bce_init(sc); 4737 4738 ifp->if_oerrors++; 4739 4740 if (!ifq_is_empty(&ifp->if_snd)) 4741 if_devstart(ifp); 4742 } 4743 4744 4745 #ifdef DEVICE_POLLING 4746 4747 static void 4748 bce_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 4749 { 4750 struct bce_softc *sc = ifp->if_softc; 4751 struct status_block *sblk = sc->status_block; 4752 uint16_t hw_tx_cons, hw_rx_cons; 4753 4754 ASSERT_SERIALIZED(ifp->if_serializer); 4755 4756 switch (cmd) { 4757 case POLL_REGISTER: 4758 bce_disable_intr(sc); 4759 4760 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP, 4761 (1 << 16) | sc->bce_rx_quick_cons_trip); 4762 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP, 4763 (1 << 16) | sc->bce_tx_quick_cons_trip); 4764 return; 4765 case POLL_DEREGISTER: 4766 bce_enable_intr(sc); 4767 4768 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP, 4769 (sc->bce_tx_quick_cons_trip_int << 16) | 4770 sc->bce_tx_quick_cons_trip); 4771 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP, 4772 (sc->bce_rx_quick_cons_trip_int << 16) | 4773 sc->bce_rx_quick_cons_trip); 4774 return; 4775 default: 4776 break; 4777 } 4778 4779 bus_dmamap_sync(sc->status_tag, sc->status_map, BUS_DMASYNC_POSTREAD); 4780 4781 if (cmd == POLL_AND_CHECK_STATUS) { 4782 uint32_t status_attn_bits; 4783 4784 status_attn_bits = sblk->status_attn_bits; 4785 4786 DBRUNIF(DB_RANDOMTRUE(bce_debug_unexpected_attention), 4787 if_printf(ifp, 4788 "Simulating unexpected status attention bit set."); 4789 status_attn_bits |= STATUS_ATTN_BITS_PARITY_ERROR); 4790 4791 /* Was it a link change interrupt? */ 4792 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 4793 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) 4794 bce_phy_intr(sc); 4795 4796 /* 4797 * If any other attention is asserted then 4798 * the chip is toast. 4799 */ 4800 if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) != 4801 (sblk->status_attn_bits_ack & 4802 ~STATUS_ATTN_BITS_LINK_STATE)) { 4803 DBRUN(1, sc->unexpected_attentions++); 4804 4805 if_printf(ifp, "Fatal attention detected: 0x%08X\n", 4806 sblk->status_attn_bits); 4807 4808 DBRUN(BCE_FATAL, 4809 if (bce_debug_unexpected_attention == 0) 4810 bce_breakpoint(sc)); 4811 4812 bce_init(sc); 4813 return; 4814 } 4815 } 4816 4817 hw_rx_cons = bce_get_hw_rx_cons(sc); 4818 hw_tx_cons = bce_get_hw_tx_cons(sc); 4819 4820 /* Check for any completed RX frames. */ 4821 if (hw_rx_cons != sc->hw_rx_cons) 4822 bce_rx_intr(sc, count); 4823 4824 /* Check for any completed TX frames. */ 4825 if (hw_tx_cons != sc->hw_tx_cons) 4826 bce_tx_intr(sc); 4827 4828 bus_dmamap_sync(sc->status_tag, sc->status_map, BUS_DMASYNC_PREWRITE); 4829 4830 /* Check for new frames to transmit. */ 4831 if (!ifq_is_empty(&ifp->if_snd)) 4832 if_devstart(ifp); 4833 } 4834 4835 #endif /* DEVICE_POLLING */ 4836 4837 4838 /* 4839 * Interrupt handler. 4840 */ 4841 /****************************************************************************/ 4842 /* Main interrupt entry point. Verifies that the controller generated the */ 4843 /* interrupt and then calls a separate routine for handle the various */ 4844 /* interrupt causes (PHY, TX, RX). */ 4845 /* */ 4846 /* Returns: */ 4847 /* 0 for success, positive value for failure. */ 4848 /****************************************************************************/ 4849 static void 4850 bce_intr(void *xsc) 4851 { 4852 struct bce_softc *sc = xsc; 4853 struct ifnet *ifp = &sc->arpcom.ac_if; 4854 struct status_block *sblk; 4855 uint16_t hw_rx_cons, hw_tx_cons; 4856 4857 ASSERT_SERIALIZED(ifp->if_serializer); 4858 4859 DBPRINT(sc, BCE_EXCESSIVE, "Entering %s()\n", __func__); 4860 DBRUNIF(1, sc->interrupts_generated++); 4861 4862 bus_dmamap_sync(sc->status_tag, sc->status_map, BUS_DMASYNC_POSTREAD); 4863 sblk = sc->status_block; 4864 4865 /* 4866 * If the hardware status block index matches the last value 4867 * read by the driver and we haven't asserted our interrupt 4868 * then there's nothing to do. 4869 */ 4870 if (sblk->status_idx == sc->last_status_idx && 4871 (REG_RD(sc, BCE_PCICFG_MISC_STATUS) & 4872 BCE_PCICFG_MISC_STATUS_INTA_VALUE)) 4873 return; 4874 4875 /* Ack the interrupt and stop others from occuring. */ 4876 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 4877 BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | 4878 BCE_PCICFG_INT_ACK_CMD_MASK_INT); 4879 4880 /* Check if the hardware has finished any work. */ 4881 hw_rx_cons = bce_get_hw_rx_cons(sc); 4882 hw_tx_cons = bce_get_hw_tx_cons(sc); 4883 4884 /* Keep processing data as long as there is work to do. */ 4885 for (;;) { 4886 uint32_t status_attn_bits; 4887 4888 status_attn_bits = sblk->status_attn_bits; 4889 4890 DBRUNIF(DB_RANDOMTRUE(bce_debug_unexpected_attention), 4891 if_printf(ifp, 4892 "Simulating unexpected status attention bit set."); 4893 status_attn_bits |= STATUS_ATTN_BITS_PARITY_ERROR); 4894 4895 /* Was it a link change interrupt? */ 4896 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 4897 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) 4898 bce_phy_intr(sc); 4899 4900 /* 4901 * If any other attention is asserted then 4902 * the chip is toast. 4903 */ 4904 if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) != 4905 (sblk->status_attn_bits_ack & 4906 ~STATUS_ATTN_BITS_LINK_STATE)) { 4907 DBRUN(1, sc->unexpected_attentions++); 4908 4909 if_printf(ifp, "Fatal attention detected: 0x%08X\n", 4910 sblk->status_attn_bits); 4911 4912 DBRUN(BCE_FATAL, 4913 if (bce_debug_unexpected_attention == 0) 4914 bce_breakpoint(sc)); 4915 4916 bce_init(sc); 4917 return; 4918 } 4919 4920 /* Check for any completed RX frames. */ 4921 if (hw_rx_cons != sc->hw_rx_cons) 4922 bce_rx_intr(sc, -1); 4923 4924 /* Check for any completed TX frames. */ 4925 if (hw_tx_cons != sc->hw_tx_cons) 4926 bce_tx_intr(sc); 4927 4928 /* 4929 * Save the status block index value 4930 * for use during the next interrupt. 4931 */ 4932 sc->last_status_idx = sblk->status_idx; 4933 4934 /* 4935 * Prevent speculative reads from getting 4936 * ahead of the status block. 4937 */ 4938 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, 4939 BUS_SPACE_BARRIER_READ); 4940 4941 /* 4942 * If there's no work left then exit the 4943 * interrupt service routine. 4944 */ 4945 hw_rx_cons = bce_get_hw_rx_cons(sc); 4946 hw_tx_cons = bce_get_hw_tx_cons(sc); 4947 if ((hw_rx_cons == sc->hw_rx_cons) && (hw_tx_cons == sc->hw_tx_cons)) 4948 break; 4949 } 4950 4951 bus_dmamap_sync(sc->status_tag, sc->status_map, BUS_DMASYNC_PREWRITE); 4952 4953 /* Re-enable interrupts. */ 4954 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 4955 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx | 4956 BCE_PCICFG_INT_ACK_CMD_MASK_INT); 4957 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 4958 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx); 4959 4960 if (sc->bce_coalchg_mask) 4961 bce_coal_change(sc); 4962 4963 /* Handle any frames that arrived while handling the interrupt. */ 4964 if (!ifq_is_empty(&ifp->if_snd)) 4965 if_devstart(ifp); 4966 } 4967 4968 4969 /****************************************************************************/ 4970 /* Programs the various packet receive modes (broadcast and multicast). */ 4971 /* */ 4972 /* Returns: */ 4973 /* Nothing. */ 4974 /****************************************************************************/ 4975 static void 4976 bce_set_rx_mode(struct bce_softc *sc) 4977 { 4978 struct ifnet *ifp = &sc->arpcom.ac_if; 4979 struct ifmultiaddr *ifma; 4980 uint32_t hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 4981 uint32_t rx_mode, sort_mode; 4982 int h, i; 4983 4984 ASSERT_SERIALIZED(ifp->if_serializer); 4985 4986 /* Initialize receive mode default settings. */ 4987 rx_mode = sc->rx_mode & 4988 ~(BCE_EMAC_RX_MODE_PROMISCUOUS | 4989 BCE_EMAC_RX_MODE_KEEP_VLAN_TAG); 4990 sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN; 4991 4992 /* 4993 * ASF/IPMI/UMP firmware requires that VLAN tag stripping 4994 * be enbled. 4995 */ 4996 if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) && 4997 !(sc->bce_flags & BCE_MFW_ENABLE_FLAG)) 4998 rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG; 4999 5000 /* 5001 * Check for promiscuous, all multicast, or selected 5002 * multicast address filtering. 5003 */ 5004 if (ifp->if_flags & IFF_PROMISC) { 5005 DBPRINT(sc, BCE_INFO, "Enabling promiscuous mode.\n"); 5006 5007 /* Enable promiscuous mode. */ 5008 rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS; 5009 sort_mode |= BCE_RPM_SORT_USER0_PROM_EN; 5010 } else if (ifp->if_flags & IFF_ALLMULTI) { 5011 DBPRINT(sc, BCE_INFO, "Enabling all multicast mode.\n"); 5012 5013 /* Enable all multicast addresses. */ 5014 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { 5015 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 5016 0xffffffff); 5017 } 5018 sort_mode |= BCE_RPM_SORT_USER0_MC_EN; 5019 } else { 5020 /* Accept one or more multicast(s). */ 5021 DBPRINT(sc, BCE_INFO, "Enabling selective multicast mode.\n"); 5022 5023 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 5024 if (ifma->ifma_addr->sa_family != AF_LINK) 5025 continue; 5026 h = ether_crc32_le( 5027 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 5028 ETHER_ADDR_LEN) & 0xFF; 5029 hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F); 5030 } 5031 5032 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { 5033 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 5034 hashes[i]); 5035 } 5036 sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN; 5037 } 5038 5039 /* Only make changes if the recive mode has actually changed. */ 5040 if (rx_mode != sc->rx_mode) { 5041 DBPRINT(sc, BCE_VERBOSE, "Enabling new receive mode: 0x%08X\n", 5042 rx_mode); 5043 5044 sc->rx_mode = rx_mode; 5045 REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode); 5046 } 5047 5048 /* Disable and clear the exisitng sort before enabling a new sort. */ 5049 REG_WR(sc, BCE_RPM_SORT_USER0, 0x0); 5050 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode); 5051 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA); 5052 } 5053 5054 5055 /****************************************************************************/ 5056 /* Called periodically to updates statistics from the controllers */ 5057 /* statistics block. */ 5058 /* */ 5059 /* Returns: */ 5060 /* Nothing. */ 5061 /****************************************************************************/ 5062 static void 5063 bce_stats_update(struct bce_softc *sc) 5064 { 5065 struct ifnet *ifp = &sc->arpcom.ac_if; 5066 struct statistics_block *stats = sc->stats_block; 5067 5068 DBPRINT(sc, BCE_EXCESSIVE, "Entering %s()\n", __func__); 5069 5070 ASSERT_SERIALIZED(ifp->if_serializer); 5071 5072 /* 5073 * Update the interface statistics from the hardware statistics. 5074 */ 5075 ifp->if_collisions = (u_long)stats->stat_EtherStatsCollisions; 5076 5077 ifp->if_ierrors = (u_long)stats->stat_EtherStatsUndersizePkts + 5078 (u_long)stats->stat_EtherStatsOverrsizePkts + 5079 (u_long)stats->stat_IfInMBUFDiscards + 5080 (u_long)stats->stat_Dot3StatsAlignmentErrors + 5081 (u_long)stats->stat_Dot3StatsFCSErrors; 5082 5083 ifp->if_oerrors = 5084 (u_long)stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors + 5085 (u_long)stats->stat_Dot3StatsExcessiveCollisions + 5086 (u_long)stats->stat_Dot3StatsLateCollisions; 5087 5088 /* 5089 * Certain controllers don't report carrier sense errors correctly. 5090 * See errata E11_5708CA0_1165. 5091 */ 5092 if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) && 5093 !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0)) { 5094 ifp->if_oerrors += 5095 (u_long)stats->stat_Dot3StatsCarrierSenseErrors; 5096 } 5097 5098 /* 5099 * Update the sysctl statistics from the hardware statistics. 5100 */ 5101 sc->stat_IfHCInOctets = 5102 ((uint64_t)stats->stat_IfHCInOctets_hi << 32) + 5103 (uint64_t)stats->stat_IfHCInOctets_lo; 5104 5105 sc->stat_IfHCInBadOctets = 5106 ((uint64_t)stats->stat_IfHCInBadOctets_hi << 32) + 5107 (uint64_t)stats->stat_IfHCInBadOctets_lo; 5108 5109 sc->stat_IfHCOutOctets = 5110 ((uint64_t)stats->stat_IfHCOutOctets_hi << 32) + 5111 (uint64_t)stats->stat_IfHCOutOctets_lo; 5112 5113 sc->stat_IfHCOutBadOctets = 5114 ((uint64_t)stats->stat_IfHCOutBadOctets_hi << 32) + 5115 (uint64_t)stats->stat_IfHCOutBadOctets_lo; 5116 5117 sc->stat_IfHCInUcastPkts = 5118 ((uint64_t)stats->stat_IfHCInUcastPkts_hi << 32) + 5119 (uint64_t)stats->stat_IfHCInUcastPkts_lo; 5120 5121 sc->stat_IfHCInMulticastPkts = 5122 ((uint64_t)stats->stat_IfHCInMulticastPkts_hi << 32) + 5123 (uint64_t)stats->stat_IfHCInMulticastPkts_lo; 5124 5125 sc->stat_IfHCInBroadcastPkts = 5126 ((uint64_t)stats->stat_IfHCInBroadcastPkts_hi << 32) + 5127 (uint64_t)stats->stat_IfHCInBroadcastPkts_lo; 5128 5129 sc->stat_IfHCOutUcastPkts = 5130 ((uint64_t)stats->stat_IfHCOutUcastPkts_hi << 32) + 5131 (uint64_t)stats->stat_IfHCOutUcastPkts_lo; 5132 5133 sc->stat_IfHCOutMulticastPkts = 5134 ((uint64_t)stats->stat_IfHCOutMulticastPkts_hi << 32) + 5135 (uint64_t)stats->stat_IfHCOutMulticastPkts_lo; 5136 5137 sc->stat_IfHCOutBroadcastPkts = 5138 ((uint64_t)stats->stat_IfHCOutBroadcastPkts_hi << 32) + 5139 (uint64_t)stats->stat_IfHCOutBroadcastPkts_lo; 5140 5141 sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors = 5142 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors; 5143 5144 sc->stat_Dot3StatsCarrierSenseErrors = 5145 stats->stat_Dot3StatsCarrierSenseErrors; 5146 5147 sc->stat_Dot3StatsFCSErrors = 5148 stats->stat_Dot3StatsFCSErrors; 5149 5150 sc->stat_Dot3StatsAlignmentErrors = 5151 stats->stat_Dot3StatsAlignmentErrors; 5152 5153 sc->stat_Dot3StatsSingleCollisionFrames = 5154 stats->stat_Dot3StatsSingleCollisionFrames; 5155 5156 sc->stat_Dot3StatsMultipleCollisionFrames = 5157 stats->stat_Dot3StatsMultipleCollisionFrames; 5158 5159 sc->stat_Dot3StatsDeferredTransmissions = 5160 stats->stat_Dot3StatsDeferredTransmissions; 5161 5162 sc->stat_Dot3StatsExcessiveCollisions = 5163 stats->stat_Dot3StatsExcessiveCollisions; 5164 5165 sc->stat_Dot3StatsLateCollisions = 5166 stats->stat_Dot3StatsLateCollisions; 5167 5168 sc->stat_EtherStatsCollisions = 5169 stats->stat_EtherStatsCollisions; 5170 5171 sc->stat_EtherStatsFragments = 5172 stats->stat_EtherStatsFragments; 5173 5174 sc->stat_EtherStatsJabbers = 5175 stats->stat_EtherStatsJabbers; 5176 5177 sc->stat_EtherStatsUndersizePkts = 5178 stats->stat_EtherStatsUndersizePkts; 5179 5180 sc->stat_EtherStatsOverrsizePkts = 5181 stats->stat_EtherStatsOverrsizePkts; 5182 5183 sc->stat_EtherStatsPktsRx64Octets = 5184 stats->stat_EtherStatsPktsRx64Octets; 5185 5186 sc->stat_EtherStatsPktsRx65Octetsto127Octets = 5187 stats->stat_EtherStatsPktsRx65Octetsto127Octets; 5188 5189 sc->stat_EtherStatsPktsRx128Octetsto255Octets = 5190 stats->stat_EtherStatsPktsRx128Octetsto255Octets; 5191 5192 sc->stat_EtherStatsPktsRx256Octetsto511Octets = 5193 stats->stat_EtherStatsPktsRx256Octetsto511Octets; 5194 5195 sc->stat_EtherStatsPktsRx512Octetsto1023Octets = 5196 stats->stat_EtherStatsPktsRx512Octetsto1023Octets; 5197 5198 sc->stat_EtherStatsPktsRx1024Octetsto1522Octets = 5199 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets; 5200 5201 sc->stat_EtherStatsPktsRx1523Octetsto9022Octets = 5202 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets; 5203 5204 sc->stat_EtherStatsPktsTx64Octets = 5205 stats->stat_EtherStatsPktsTx64Octets; 5206 5207 sc->stat_EtherStatsPktsTx65Octetsto127Octets = 5208 stats->stat_EtherStatsPktsTx65Octetsto127Octets; 5209 5210 sc->stat_EtherStatsPktsTx128Octetsto255Octets = 5211 stats->stat_EtherStatsPktsTx128Octetsto255Octets; 5212 5213 sc->stat_EtherStatsPktsTx256Octetsto511Octets = 5214 stats->stat_EtherStatsPktsTx256Octetsto511Octets; 5215 5216 sc->stat_EtherStatsPktsTx512Octetsto1023Octets = 5217 stats->stat_EtherStatsPktsTx512Octetsto1023Octets; 5218 5219 sc->stat_EtherStatsPktsTx1024Octetsto1522Octets = 5220 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets; 5221 5222 sc->stat_EtherStatsPktsTx1523Octetsto9022Octets = 5223 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets; 5224 5225 sc->stat_XonPauseFramesReceived = 5226 stats->stat_XonPauseFramesReceived; 5227 5228 sc->stat_XoffPauseFramesReceived = 5229 stats->stat_XoffPauseFramesReceived; 5230 5231 sc->stat_OutXonSent = 5232 stats->stat_OutXonSent; 5233 5234 sc->stat_OutXoffSent = 5235 stats->stat_OutXoffSent; 5236 5237 sc->stat_FlowControlDone = 5238 stats->stat_FlowControlDone; 5239 5240 sc->stat_MacControlFramesReceived = 5241 stats->stat_MacControlFramesReceived; 5242 5243 sc->stat_XoffStateEntered = 5244 stats->stat_XoffStateEntered; 5245 5246 sc->stat_IfInFramesL2FilterDiscards = 5247 stats->stat_IfInFramesL2FilterDiscards; 5248 5249 sc->stat_IfInRuleCheckerDiscards = 5250 stats->stat_IfInRuleCheckerDiscards; 5251 5252 sc->stat_IfInFTQDiscards = 5253 stats->stat_IfInFTQDiscards; 5254 5255 sc->stat_IfInMBUFDiscards = 5256 stats->stat_IfInMBUFDiscards; 5257 5258 sc->stat_IfInRuleCheckerP4Hit = 5259 stats->stat_IfInRuleCheckerP4Hit; 5260 5261 sc->stat_CatchupInRuleCheckerDiscards = 5262 stats->stat_CatchupInRuleCheckerDiscards; 5263 5264 sc->stat_CatchupInFTQDiscards = 5265 stats->stat_CatchupInFTQDiscards; 5266 5267 sc->stat_CatchupInMBUFDiscards = 5268 stats->stat_CatchupInMBUFDiscards; 5269 5270 sc->stat_CatchupInRuleCheckerP4Hit = 5271 stats->stat_CatchupInRuleCheckerP4Hit; 5272 5273 sc->com_no_buffers = REG_RD_IND(sc, 0x120084); 5274 5275 DBPRINT(sc, BCE_EXCESSIVE, "Exiting %s()\n", __func__); 5276 } 5277 5278 5279 /****************************************************************************/ 5280 /* Periodic function to perform maintenance tasks. */ 5281 /* */ 5282 /* Returns: */ 5283 /* Nothing. */ 5284 /****************************************************************************/ 5285 static void 5286 bce_tick_serialized(struct bce_softc *sc) 5287 { 5288 struct ifnet *ifp = &sc->arpcom.ac_if; 5289 struct mii_data *mii; 5290 uint32_t msg; 5291 5292 ASSERT_SERIALIZED(ifp->if_serializer); 5293 5294 /* Tell the firmware that the driver is still running. */ 5295 #ifdef BCE_DEBUG 5296 msg = (uint32_t)BCE_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE; 5297 #else 5298 msg = (uint32_t)++sc->bce_fw_drv_pulse_wr_seq; 5299 #endif 5300 REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_PULSE_MB, msg); 5301 5302 /* Update the statistics from the hardware statistics block. */ 5303 bce_stats_update(sc); 5304 5305 /* Schedule the next tick. */ 5306 callout_reset(&sc->bce_stat_ch, hz, bce_tick, sc); 5307 5308 /* If link is up already up then we're done. */ 5309 if (sc->bce_link) 5310 return; 5311 5312 mii = device_get_softc(sc->bce_miibus); 5313 mii_tick(mii); 5314 5315 /* Check if the link has come up. */ 5316 if (!sc->bce_link && (mii->mii_media_status & IFM_ACTIVE) && 5317 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 5318 sc->bce_link++; 5319 /* Now that link is up, handle any outstanding TX traffic. */ 5320 if (!ifq_is_empty(&ifp->if_snd)) 5321 if_devstart(ifp); 5322 } 5323 } 5324 5325 5326 static void 5327 bce_tick(void *xsc) 5328 { 5329 struct bce_softc *sc = xsc; 5330 struct ifnet *ifp = &sc->arpcom.ac_if; 5331 5332 lwkt_serialize_enter(ifp->if_serializer); 5333 bce_tick_serialized(sc); 5334 lwkt_serialize_exit(ifp->if_serializer); 5335 } 5336 5337 5338 #ifdef BCE_DEBUG 5339 /****************************************************************************/ 5340 /* Allows the driver state to be dumped through the sysctl interface. */ 5341 /* */ 5342 /* Returns: */ 5343 /* 0 for success, positive value for failure. */ 5344 /****************************************************************************/ 5345 static int 5346 bce_sysctl_driver_state(SYSCTL_HANDLER_ARGS) 5347 { 5348 int error; 5349 int result; 5350 struct bce_softc *sc; 5351 5352 result = -1; 5353 error = sysctl_handle_int(oidp, &result, 0, req); 5354 5355 if (error || !req->newptr) 5356 return (error); 5357 5358 if (result == 1) { 5359 sc = (struct bce_softc *)arg1; 5360 bce_dump_driver_state(sc); 5361 } 5362 5363 return error; 5364 } 5365 5366 5367 /****************************************************************************/ 5368 /* Allows the hardware state to be dumped through the sysctl interface. */ 5369 /* */ 5370 /* Returns: */ 5371 /* 0 for success, positive value for failure. */ 5372 /****************************************************************************/ 5373 static int 5374 bce_sysctl_hw_state(SYSCTL_HANDLER_ARGS) 5375 { 5376 int error; 5377 int result; 5378 struct bce_softc *sc; 5379 5380 result = -1; 5381 error = sysctl_handle_int(oidp, &result, 0, req); 5382 5383 if (error || !req->newptr) 5384 return (error); 5385 5386 if (result == 1) { 5387 sc = (struct bce_softc *)arg1; 5388 bce_dump_hw_state(sc); 5389 } 5390 5391 return error; 5392 } 5393 5394 5395 /****************************************************************************/ 5396 /* Provides a sysctl interface to allows dumping the RX chain. */ 5397 /* */ 5398 /* Returns: */ 5399 /* 0 for success, positive value for failure. */ 5400 /****************************************************************************/ 5401 static int 5402 bce_sysctl_dump_rx_chain(SYSCTL_HANDLER_ARGS) 5403 { 5404 int error; 5405 int result; 5406 struct bce_softc *sc; 5407 5408 result = -1; 5409 error = sysctl_handle_int(oidp, &result, 0, req); 5410 5411 if (error || !req->newptr) 5412 return (error); 5413 5414 if (result == 1) { 5415 sc = (struct bce_softc *)arg1; 5416 bce_dump_rx_chain(sc, 0, USABLE_RX_BD); 5417 } 5418 5419 return error; 5420 } 5421 5422 5423 /****************************************************************************/ 5424 /* Provides a sysctl interface to allows dumping the TX chain. */ 5425 /* */ 5426 /* Returns: */ 5427 /* 0 for success, positive value for failure. */ 5428 /****************************************************************************/ 5429 static int 5430 bce_sysctl_dump_tx_chain(SYSCTL_HANDLER_ARGS) 5431 { 5432 int error; 5433 int result; 5434 struct bce_softc *sc; 5435 5436 result = -1; 5437 error = sysctl_handle_int(oidp, &result, 0, req); 5438 5439 if (error || !req->newptr) 5440 return (error); 5441 5442 if (result == 1) { 5443 sc = (struct bce_softc *)arg1; 5444 bce_dump_tx_chain(sc, 0, USABLE_TX_BD); 5445 } 5446 5447 return error; 5448 } 5449 5450 5451 /****************************************************************************/ 5452 /* Provides a sysctl interface to allow reading arbitrary registers in the */ 5453 /* device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ 5454 /* */ 5455 /* Returns: */ 5456 /* 0 for success, positive value for failure. */ 5457 /****************************************************************************/ 5458 static int 5459 bce_sysctl_reg_read(SYSCTL_HANDLER_ARGS) 5460 { 5461 struct bce_softc *sc; 5462 int error; 5463 uint32_t val, result; 5464 5465 result = -1; 5466 error = sysctl_handle_int(oidp, &result, 0, req); 5467 if (error || (req->newptr == NULL)) 5468 return (error); 5469 5470 /* Make sure the register is accessible. */ 5471 if (result < 0x8000) { 5472 sc = (struct bce_softc *)arg1; 5473 val = REG_RD(sc, result); 5474 if_printf(&sc->arpcom.ac_if, "reg 0x%08X = 0x%08X\n", 5475 result, val); 5476 } else if (result < 0x0280000) { 5477 sc = (struct bce_softc *)arg1; 5478 val = REG_RD_IND(sc, result); 5479 if_printf(&sc->arpcom.ac_if, "reg 0x%08X = 0x%08X\n", 5480 result, val); 5481 } 5482 return (error); 5483 } 5484 5485 5486 /****************************************************************************/ 5487 /* Provides a sysctl interface to allow reading arbitrary PHY registers in */ 5488 /* the device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ 5489 /* */ 5490 /* Returns: */ 5491 /* 0 for success, positive value for failure. */ 5492 /****************************************************************************/ 5493 static int 5494 bce_sysctl_phy_read(SYSCTL_HANDLER_ARGS) 5495 { 5496 struct bce_softc *sc; 5497 device_t dev; 5498 int error, result; 5499 uint16_t val; 5500 5501 result = -1; 5502 error = sysctl_handle_int(oidp, &result, 0, req); 5503 if (error || (req->newptr == NULL)) 5504 return (error); 5505 5506 /* Make sure the register is accessible. */ 5507 if (result < 0x20) { 5508 sc = (struct bce_softc *)arg1; 5509 dev = sc->bce_dev; 5510 val = bce_miibus_read_reg(dev, sc->bce_phy_addr, result); 5511 if_printf(&sc->arpcom.ac_if, 5512 "phy 0x%02X = 0x%04X\n", result, val); 5513 } 5514 return (error); 5515 } 5516 5517 5518 /****************************************************************************/ 5519 /* Provides a sysctl interface to forcing the driver to dump state and */ 5520 /* enter the debugger. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ 5521 /* */ 5522 /* Returns: */ 5523 /* 0 for success, positive value for failure. */ 5524 /****************************************************************************/ 5525 static int 5526 bce_sysctl_breakpoint(SYSCTL_HANDLER_ARGS) 5527 { 5528 int error; 5529 int result; 5530 struct bce_softc *sc; 5531 5532 result = -1; 5533 error = sysctl_handle_int(oidp, &result, 0, req); 5534 5535 if (error || !req->newptr) 5536 return (error); 5537 5538 if (result == 1) { 5539 sc = (struct bce_softc *)arg1; 5540 bce_breakpoint(sc); 5541 } 5542 5543 return error; 5544 } 5545 #endif 5546 5547 5548 /****************************************************************************/ 5549 /* Adds any sysctl parameters for tuning or debugging purposes. */ 5550 /* */ 5551 /* Returns: */ 5552 /* 0 for success, positive value for failure. */ 5553 /****************************************************************************/ 5554 static void 5555 bce_add_sysctls(struct bce_softc *sc) 5556 { 5557 struct sysctl_ctx_list *ctx; 5558 struct sysctl_oid_list *children; 5559 5560 sysctl_ctx_init(&sc->bce_sysctl_ctx); 5561 sc->bce_sysctl_tree = SYSCTL_ADD_NODE(&sc->bce_sysctl_ctx, 5562 SYSCTL_STATIC_CHILDREN(_hw), 5563 OID_AUTO, 5564 device_get_nameunit(sc->bce_dev), 5565 CTLFLAG_RD, 0, ""); 5566 if (sc->bce_sysctl_tree == NULL) { 5567 device_printf(sc->bce_dev, "can't add sysctl node\n"); 5568 return; 5569 } 5570 5571 ctx = &sc->bce_sysctl_ctx; 5572 children = SYSCTL_CHILDREN(sc->bce_sysctl_tree); 5573 5574 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds_int", 5575 CTLTYPE_INT | CTLFLAG_RW, 5576 sc, 0, bce_sysctl_tx_bds_int, "I", 5577 "Send max coalesced BD count during interrupt"); 5578 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds", 5579 CTLTYPE_INT | CTLFLAG_RW, 5580 sc, 0, bce_sysctl_tx_bds, "I", 5581 "Send max coalesced BD count"); 5582 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks_int", 5583 CTLTYPE_INT | CTLFLAG_RW, 5584 sc, 0, bce_sysctl_tx_ticks_int, "I", 5585 "Send coalescing ticks during interrupt"); 5586 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks", 5587 CTLTYPE_INT | CTLFLAG_RW, 5588 sc, 0, bce_sysctl_tx_ticks, "I", 5589 "Send coalescing ticks"); 5590 5591 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds_int", 5592 CTLTYPE_INT | CTLFLAG_RW, 5593 sc, 0, bce_sysctl_rx_bds_int, "I", 5594 "Receive max coalesced BD count during interrupt"); 5595 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds", 5596 CTLTYPE_INT | CTLFLAG_RW, 5597 sc, 0, bce_sysctl_rx_bds, "I", 5598 "Receive max coalesced BD count"); 5599 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks_int", 5600 CTLTYPE_INT | CTLFLAG_RW, 5601 sc, 0, bce_sysctl_rx_ticks_int, "I", 5602 "Receive coalescing ticks during interrupt"); 5603 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks", 5604 CTLTYPE_INT | CTLFLAG_RW, 5605 sc, 0, bce_sysctl_rx_ticks, "I", 5606 "Receive coalescing ticks"); 5607 5608 #ifdef BCE_DEBUG 5609 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 5610 "rx_low_watermark", 5611 CTLFLAG_RD, &sc->rx_low_watermark, 5612 0, "Lowest level of free rx_bd's"); 5613 5614 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 5615 "rx_empty_count", 5616 CTLFLAG_RD, &sc->rx_empty_count, 5617 0, "Number of times the RX chain was empty"); 5618 5619 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 5620 "tx_hi_watermark", 5621 CTLFLAG_RD, &sc->tx_hi_watermark, 5622 0, "Highest level of used tx_bd's"); 5623 5624 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 5625 "tx_full_count", 5626 CTLFLAG_RD, &sc->tx_full_count, 5627 0, "Number of times the TX chain was full"); 5628 5629 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 5630 "l2fhdr_status_errors", 5631 CTLFLAG_RD, &sc->l2fhdr_status_errors, 5632 0, "l2_fhdr status errors"); 5633 5634 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 5635 "unexpected_attentions", 5636 CTLFLAG_RD, &sc->unexpected_attentions, 5637 0, "unexpected attentions"); 5638 5639 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 5640 "lost_status_block_updates", 5641 CTLFLAG_RD, &sc->lost_status_block_updates, 5642 0, "lost status block updates"); 5643 5644 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 5645 "mbuf_alloc_failed", 5646 CTLFLAG_RD, &sc->mbuf_alloc_failed, 5647 0, "mbuf cluster allocation failures"); 5648 #endif 5649 5650 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 5651 "stat_IfHCInOctets", 5652 CTLFLAG_RD, &sc->stat_IfHCInOctets, 5653 "Bytes received"); 5654 5655 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 5656 "stat_IfHCInBadOctets", 5657 CTLFLAG_RD, &sc->stat_IfHCInBadOctets, 5658 "Bad bytes received"); 5659 5660 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 5661 "stat_IfHCOutOctets", 5662 CTLFLAG_RD, &sc->stat_IfHCOutOctets, 5663 "Bytes sent"); 5664 5665 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 5666 "stat_IfHCOutBadOctets", 5667 CTLFLAG_RD, &sc->stat_IfHCOutBadOctets, 5668 "Bad bytes sent"); 5669 5670 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 5671 "stat_IfHCInUcastPkts", 5672 CTLFLAG_RD, &sc->stat_IfHCInUcastPkts, 5673 "Unicast packets received"); 5674 5675 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 5676 "stat_IfHCInMulticastPkts", 5677 CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts, 5678 "Multicast packets received"); 5679 5680 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 5681 "stat_IfHCInBroadcastPkts", 5682 CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts, 5683 "Broadcast packets received"); 5684 5685 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 5686 "stat_IfHCOutUcastPkts", 5687 CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts, 5688 "Unicast packets sent"); 5689 5690 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 5691 "stat_IfHCOutMulticastPkts", 5692 CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts, 5693 "Multicast packets sent"); 5694 5695 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 5696 "stat_IfHCOutBroadcastPkts", 5697 CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts, 5698 "Broadcast packets sent"); 5699 5700 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5701 "stat_emac_tx_stat_dot3statsinternalmactransmiterrors", 5702 CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors, 5703 0, "Internal MAC transmit errors"); 5704 5705 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5706 "stat_Dot3StatsCarrierSenseErrors", 5707 CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors, 5708 0, "Carrier sense errors"); 5709 5710 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5711 "stat_Dot3StatsFCSErrors", 5712 CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors, 5713 0, "Frame check sequence errors"); 5714 5715 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5716 "stat_Dot3StatsAlignmentErrors", 5717 CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors, 5718 0, "Alignment errors"); 5719 5720 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5721 "stat_Dot3StatsSingleCollisionFrames", 5722 CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames, 5723 0, "Single Collision Frames"); 5724 5725 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5726 "stat_Dot3StatsMultipleCollisionFrames", 5727 CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames, 5728 0, "Multiple Collision Frames"); 5729 5730 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5731 "stat_Dot3StatsDeferredTransmissions", 5732 CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions, 5733 0, "Deferred Transmissions"); 5734 5735 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5736 "stat_Dot3StatsExcessiveCollisions", 5737 CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions, 5738 0, "Excessive Collisions"); 5739 5740 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5741 "stat_Dot3StatsLateCollisions", 5742 CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions, 5743 0, "Late Collisions"); 5744 5745 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5746 "stat_EtherStatsCollisions", 5747 CTLFLAG_RD, &sc->stat_EtherStatsCollisions, 5748 0, "Collisions"); 5749 5750 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5751 "stat_EtherStatsFragments", 5752 CTLFLAG_RD, &sc->stat_EtherStatsFragments, 5753 0, "Fragments"); 5754 5755 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5756 "stat_EtherStatsJabbers", 5757 CTLFLAG_RD, &sc->stat_EtherStatsJabbers, 5758 0, "Jabbers"); 5759 5760 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5761 "stat_EtherStatsUndersizePkts", 5762 CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts, 5763 0, "Undersize packets"); 5764 5765 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5766 "stat_EtherStatsOverrsizePkts", 5767 CTLFLAG_RD, &sc->stat_EtherStatsOverrsizePkts, 5768 0, "stat_EtherStatsOverrsizePkts"); 5769 5770 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5771 "stat_EtherStatsPktsRx64Octets", 5772 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets, 5773 0, "Bytes received in 64 byte packets"); 5774 5775 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5776 "stat_EtherStatsPktsRx65Octetsto127Octets", 5777 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets, 5778 0, "Bytes received in 65 to 127 byte packets"); 5779 5780 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5781 "stat_EtherStatsPktsRx128Octetsto255Octets", 5782 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets, 5783 0, "Bytes received in 128 to 255 byte packets"); 5784 5785 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5786 "stat_EtherStatsPktsRx256Octetsto511Octets", 5787 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets, 5788 0, "Bytes received in 256 to 511 byte packets"); 5789 5790 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5791 "stat_EtherStatsPktsRx512Octetsto1023Octets", 5792 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets, 5793 0, "Bytes received in 512 to 1023 byte packets"); 5794 5795 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5796 "stat_EtherStatsPktsRx1024Octetsto1522Octets", 5797 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets, 5798 0, "Bytes received in 1024 t0 1522 byte packets"); 5799 5800 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5801 "stat_EtherStatsPktsRx1523Octetsto9022Octets", 5802 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets, 5803 0, "Bytes received in 1523 to 9022 byte packets"); 5804 5805 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5806 "stat_EtherStatsPktsTx64Octets", 5807 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets, 5808 0, "Bytes sent in 64 byte packets"); 5809 5810 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5811 "stat_EtherStatsPktsTx65Octetsto127Octets", 5812 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets, 5813 0, "Bytes sent in 65 to 127 byte packets"); 5814 5815 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5816 "stat_EtherStatsPktsTx128Octetsto255Octets", 5817 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets, 5818 0, "Bytes sent in 128 to 255 byte packets"); 5819 5820 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5821 "stat_EtherStatsPktsTx256Octetsto511Octets", 5822 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets, 5823 0, "Bytes sent in 256 to 511 byte packets"); 5824 5825 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5826 "stat_EtherStatsPktsTx512Octetsto1023Octets", 5827 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets, 5828 0, "Bytes sent in 512 to 1023 byte packets"); 5829 5830 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5831 "stat_EtherStatsPktsTx1024Octetsto1522Octets", 5832 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets, 5833 0, "Bytes sent in 1024 to 1522 byte packets"); 5834 5835 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5836 "stat_EtherStatsPktsTx1523Octetsto9022Octets", 5837 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets, 5838 0, "Bytes sent in 1523 to 9022 byte packets"); 5839 5840 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5841 "stat_XonPauseFramesReceived", 5842 CTLFLAG_RD, &sc->stat_XonPauseFramesReceived, 5843 0, "XON pause frames receved"); 5844 5845 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5846 "stat_XoffPauseFramesReceived", 5847 CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived, 5848 0, "XOFF pause frames received"); 5849 5850 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5851 "stat_OutXonSent", 5852 CTLFLAG_RD, &sc->stat_OutXonSent, 5853 0, "XON pause frames sent"); 5854 5855 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5856 "stat_OutXoffSent", 5857 CTLFLAG_RD, &sc->stat_OutXoffSent, 5858 0, "XOFF pause frames sent"); 5859 5860 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5861 "stat_FlowControlDone", 5862 CTLFLAG_RD, &sc->stat_FlowControlDone, 5863 0, "Flow control done"); 5864 5865 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5866 "stat_MacControlFramesReceived", 5867 CTLFLAG_RD, &sc->stat_MacControlFramesReceived, 5868 0, "MAC control frames received"); 5869 5870 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5871 "stat_XoffStateEntered", 5872 CTLFLAG_RD, &sc->stat_XoffStateEntered, 5873 0, "XOFF state entered"); 5874 5875 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5876 "stat_IfInFramesL2FilterDiscards", 5877 CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards, 5878 0, "Received L2 packets discarded"); 5879 5880 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5881 "stat_IfInRuleCheckerDiscards", 5882 CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards, 5883 0, "Received packets discarded by rule"); 5884 5885 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5886 "stat_IfInFTQDiscards", 5887 CTLFLAG_RD, &sc->stat_IfInFTQDiscards, 5888 0, "Received packet FTQ discards"); 5889 5890 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5891 "stat_IfInMBUFDiscards", 5892 CTLFLAG_RD, &sc->stat_IfInMBUFDiscards, 5893 0, "Received packets discarded due to lack of controller buffer memory"); 5894 5895 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5896 "stat_IfInRuleCheckerP4Hit", 5897 CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit, 5898 0, "Received packets rule checker hits"); 5899 5900 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5901 "stat_CatchupInRuleCheckerDiscards", 5902 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards, 5903 0, "Received packets discarded in Catchup path"); 5904 5905 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5906 "stat_CatchupInFTQDiscards", 5907 CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards, 5908 0, "Received packets discarded in FTQ in Catchup path"); 5909 5910 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5911 "stat_CatchupInMBUFDiscards", 5912 CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards, 5913 0, "Received packets discarded in controller buffer memory in Catchup path"); 5914 5915 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5916 "stat_CatchupInRuleCheckerP4Hit", 5917 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit, 5918 0, "Received packets rule checker hits in Catchup path"); 5919 5920 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 5921 "com_no_buffers", 5922 CTLFLAG_RD, &sc->com_no_buffers, 5923 0, "Valid packets received but no RX buffers available"); 5924 5925 #ifdef BCE_DEBUG 5926 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 5927 "driver_state", CTLTYPE_INT | CTLFLAG_RW, 5928 (void *)sc, 0, 5929 bce_sysctl_driver_state, "I", "Drive state information"); 5930 5931 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 5932 "hw_state", CTLTYPE_INT | CTLFLAG_RW, 5933 (void *)sc, 0, 5934 bce_sysctl_hw_state, "I", "Hardware state information"); 5935 5936 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 5937 "dump_rx_chain", CTLTYPE_INT | CTLFLAG_RW, 5938 (void *)sc, 0, 5939 bce_sysctl_dump_rx_chain, "I", "Dump rx_bd chain"); 5940 5941 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 5942 "dump_tx_chain", CTLTYPE_INT | CTLFLAG_RW, 5943 (void *)sc, 0, 5944 bce_sysctl_dump_tx_chain, "I", "Dump tx_bd chain"); 5945 5946 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 5947 "breakpoint", CTLTYPE_INT | CTLFLAG_RW, 5948 (void *)sc, 0, 5949 bce_sysctl_breakpoint, "I", "Driver breakpoint"); 5950 5951 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 5952 "reg_read", CTLTYPE_INT | CTLFLAG_RW, 5953 (void *)sc, 0, 5954 bce_sysctl_reg_read, "I", "Register read"); 5955 5956 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 5957 "phy_read", CTLTYPE_INT | CTLFLAG_RW, 5958 (void *)sc, 0, 5959 bce_sysctl_phy_read, "I", "PHY register read"); 5960 5961 #endif 5962 5963 } 5964 5965 5966 /****************************************************************************/ 5967 /* BCE Debug Routines */ 5968 /****************************************************************************/ 5969 #ifdef BCE_DEBUG 5970 5971 /****************************************************************************/ 5972 /* Freezes the controller to allow for a cohesive state dump. */ 5973 /* */ 5974 /* Returns: */ 5975 /* Nothing. */ 5976 /****************************************************************************/ 5977 static void 5978 bce_freeze_controller(struct bce_softc *sc) 5979 { 5980 uint32_t val; 5981 5982 val = REG_RD(sc, BCE_MISC_COMMAND); 5983 val |= BCE_MISC_COMMAND_DISABLE_ALL; 5984 REG_WR(sc, BCE_MISC_COMMAND, val); 5985 } 5986 5987 5988 /****************************************************************************/ 5989 /* Unfreezes the controller after a freeze operation. This may not always */ 5990 /* work and the controller will require a reset! */ 5991 /* */ 5992 /* Returns: */ 5993 /* Nothing. */ 5994 /****************************************************************************/ 5995 static void 5996 bce_unfreeze_controller(struct bce_softc *sc) 5997 { 5998 uint32_t val; 5999 6000 val = REG_RD(sc, BCE_MISC_COMMAND); 6001 val |= BCE_MISC_COMMAND_ENABLE_ALL; 6002 REG_WR(sc, BCE_MISC_COMMAND, val); 6003 } 6004 6005 6006 /****************************************************************************/ 6007 /* Prints out information about an mbuf. */ 6008 /* */ 6009 /* Returns: */ 6010 /* Nothing. */ 6011 /****************************************************************************/ 6012 static void 6013 bce_dump_mbuf(struct bce_softc *sc, struct mbuf *m) 6014 { 6015 struct ifnet *ifp = &sc->arpcom.ac_if; 6016 uint32_t val_hi, val_lo; 6017 struct mbuf *mp = m; 6018 6019 if (m == NULL) { 6020 /* Index out of range. */ 6021 if_printf(ifp, "mbuf: null pointer\n"); 6022 return; 6023 } 6024 6025 while (mp) { 6026 val_hi = BCE_ADDR_HI(mp); 6027 val_lo = BCE_ADDR_LO(mp); 6028 if_printf(ifp, "mbuf: vaddr = 0x%08X:%08X, m_len = %d, " 6029 "m_flags = ( ", val_hi, val_lo, mp->m_len); 6030 6031 if (mp->m_flags & M_EXT) 6032 kprintf("M_EXT "); 6033 if (mp->m_flags & M_PKTHDR) 6034 kprintf("M_PKTHDR "); 6035 if (mp->m_flags & M_EOR) 6036 kprintf("M_EOR "); 6037 #ifdef M_RDONLY 6038 if (mp->m_flags & M_RDONLY) 6039 kprintf("M_RDONLY "); 6040 #endif 6041 6042 val_hi = BCE_ADDR_HI(mp->m_data); 6043 val_lo = BCE_ADDR_LO(mp->m_data); 6044 kprintf(") m_data = 0x%08X:%08X\n", val_hi, val_lo); 6045 6046 if (mp->m_flags & M_PKTHDR) { 6047 if_printf(ifp, "- m_pkthdr: flags = ( "); 6048 if (mp->m_flags & M_BCAST) 6049 kprintf("M_BCAST "); 6050 if (mp->m_flags & M_MCAST) 6051 kprintf("M_MCAST "); 6052 if (mp->m_flags & M_FRAG) 6053 kprintf("M_FRAG "); 6054 if (mp->m_flags & M_FIRSTFRAG) 6055 kprintf("M_FIRSTFRAG "); 6056 if (mp->m_flags & M_LASTFRAG) 6057 kprintf("M_LASTFRAG "); 6058 #ifdef M_VLANTAG 6059 if (mp->m_flags & M_VLANTAG) 6060 kprintf("M_VLANTAG "); 6061 #endif 6062 #ifdef M_PROMISC 6063 if (mp->m_flags & M_PROMISC) 6064 kprintf("M_PROMISC "); 6065 #endif 6066 kprintf(") csum_flags = ( "); 6067 if (mp->m_pkthdr.csum_flags & CSUM_IP) 6068 kprintf("CSUM_IP "); 6069 if (mp->m_pkthdr.csum_flags & CSUM_TCP) 6070 kprintf("CSUM_TCP "); 6071 if (mp->m_pkthdr.csum_flags & CSUM_UDP) 6072 kprintf("CSUM_UDP "); 6073 if (mp->m_pkthdr.csum_flags & CSUM_IP_FRAGS) 6074 kprintf("CSUM_IP_FRAGS "); 6075 if (mp->m_pkthdr.csum_flags & CSUM_FRAGMENT) 6076 kprintf("CSUM_FRAGMENT "); 6077 #ifdef CSUM_TSO 6078 if (mp->m_pkthdr.csum_flags & CSUM_TSO) 6079 kprintf("CSUM_TSO "); 6080 #endif 6081 if (mp->m_pkthdr.csum_flags & CSUM_IP_CHECKED) 6082 kprintf("CSUM_IP_CHECKED "); 6083 if (mp->m_pkthdr.csum_flags & CSUM_IP_VALID) 6084 kprintf("CSUM_IP_VALID "); 6085 if (mp->m_pkthdr.csum_flags & CSUM_DATA_VALID) 6086 kprintf("CSUM_DATA_VALID "); 6087 kprintf(")\n"); 6088 } 6089 6090 if (mp->m_flags & M_EXT) { 6091 val_hi = BCE_ADDR_HI(mp->m_ext.ext_buf); 6092 val_lo = BCE_ADDR_LO(mp->m_ext.ext_buf); 6093 if_printf(ifp, "- m_ext: vaddr = 0x%08X:%08X, " 6094 "ext_size = %d\n", 6095 val_hi, val_lo, mp->m_ext.ext_size); 6096 } 6097 mp = mp->m_next; 6098 } 6099 } 6100 6101 6102 /****************************************************************************/ 6103 /* Prints out the mbufs in the TX mbuf chain. */ 6104 /* */ 6105 /* Returns: */ 6106 /* Nothing. */ 6107 /****************************************************************************/ 6108 static void 6109 bce_dump_tx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count) 6110 { 6111 struct ifnet *ifp = &sc->arpcom.ac_if; 6112 int i; 6113 6114 if_printf(ifp, 6115 "----------------------------" 6116 " tx mbuf data " 6117 "----------------------------\n"); 6118 6119 for (i = 0; i < count; i++) { 6120 if_printf(ifp, "txmbuf[%d]\n", chain_prod); 6121 bce_dump_mbuf(sc, sc->tx_mbuf_ptr[chain_prod]); 6122 chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod)); 6123 } 6124 6125 if_printf(ifp, 6126 "----------------------------" 6127 "----------------" 6128 "----------------------------\n"); 6129 } 6130 6131 6132 /****************************************************************************/ 6133 /* Prints out the mbufs in the RX mbuf chain. */ 6134 /* */ 6135 /* Returns: */ 6136 /* Nothing. */ 6137 /****************************************************************************/ 6138 static void 6139 bce_dump_rx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count) 6140 { 6141 struct ifnet *ifp = &sc->arpcom.ac_if; 6142 int i; 6143 6144 if_printf(ifp, 6145 "----------------------------" 6146 " rx mbuf data " 6147 "----------------------------\n"); 6148 6149 for (i = 0; i < count; i++) { 6150 if_printf(ifp, "rxmbuf[0x%04X]\n", chain_prod); 6151 bce_dump_mbuf(sc, sc->rx_mbuf_ptr[chain_prod]); 6152 chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod)); 6153 } 6154 6155 if_printf(ifp, 6156 "----------------------------" 6157 "----------------" 6158 "----------------------------\n"); 6159 } 6160 6161 6162 /****************************************************************************/ 6163 /* Prints out a tx_bd structure. */ 6164 /* */ 6165 /* Returns: */ 6166 /* Nothing. */ 6167 /****************************************************************************/ 6168 static void 6169 bce_dump_txbd(struct bce_softc *sc, int idx, struct tx_bd *txbd) 6170 { 6171 struct ifnet *ifp = &sc->arpcom.ac_if; 6172 6173 if (idx > MAX_TX_BD) { 6174 /* Index out of range. */ 6175 if_printf(ifp, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx); 6176 } else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) { 6177 /* TX Chain page pointer. */ 6178 if_printf(ifp, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, " 6179 "chain page pointer\n", 6180 idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo); 6181 } else { 6182 /* Normal tx_bd entry. */ 6183 if_printf(ifp, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, " 6184 "nbytes = 0x%08X, " 6185 "vlan tag= 0x%04X, flags = 0x%04X (", 6186 idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo, 6187 txbd->tx_bd_mss_nbytes, 6188 txbd->tx_bd_vlan_tag, txbd->tx_bd_flags); 6189 6190 if (txbd->tx_bd_flags & TX_BD_FLAGS_CONN_FAULT) 6191 kprintf(" CONN_FAULT"); 6192 6193 if (txbd->tx_bd_flags & TX_BD_FLAGS_TCP_UDP_CKSUM) 6194 kprintf(" TCP_UDP_CKSUM"); 6195 6196 if (txbd->tx_bd_flags & TX_BD_FLAGS_IP_CKSUM) 6197 kprintf(" IP_CKSUM"); 6198 6199 if (txbd->tx_bd_flags & TX_BD_FLAGS_VLAN_TAG) 6200 kprintf(" VLAN"); 6201 6202 if (txbd->tx_bd_flags & TX_BD_FLAGS_COAL_NOW) 6203 kprintf(" COAL_NOW"); 6204 6205 if (txbd->tx_bd_flags & TX_BD_FLAGS_DONT_GEN_CRC) 6206 kprintf(" DONT_GEN_CRC"); 6207 6208 if (txbd->tx_bd_flags & TX_BD_FLAGS_START) 6209 kprintf(" START"); 6210 6211 if (txbd->tx_bd_flags & TX_BD_FLAGS_END) 6212 kprintf(" END"); 6213 6214 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_LSO) 6215 kprintf(" LSO"); 6216 6217 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_OPTION_WORD) 6218 kprintf(" OPTION_WORD"); 6219 6220 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_FLAGS) 6221 kprintf(" FLAGS"); 6222 6223 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_SNAP) 6224 kprintf(" SNAP"); 6225 6226 kprintf(" )\n"); 6227 } 6228 } 6229 6230 6231 /****************************************************************************/ 6232 /* Prints out a rx_bd structure. */ 6233 /* */ 6234 /* Returns: */ 6235 /* Nothing. */ 6236 /****************************************************************************/ 6237 static void 6238 bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd) 6239 { 6240 struct ifnet *ifp = &sc->arpcom.ac_if; 6241 6242 if (idx > MAX_RX_BD) { 6243 /* Index out of range. */ 6244 if_printf(ifp, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx); 6245 } else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) { 6246 /* TX Chain page pointer. */ 6247 if_printf(ifp, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, " 6248 "chain page pointer\n", 6249 idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo); 6250 } else { 6251 /* Normal tx_bd entry. */ 6252 if_printf(ifp, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, " 6253 "nbytes = 0x%08X, flags = 0x%08X\n", 6254 idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo, 6255 rxbd->rx_bd_len, rxbd->rx_bd_flags); 6256 } 6257 } 6258 6259 6260 /****************************************************************************/ 6261 /* Prints out a l2_fhdr structure. */ 6262 /* */ 6263 /* Returns: */ 6264 /* Nothing. */ 6265 /****************************************************************************/ 6266 static void 6267 bce_dump_l2fhdr(struct bce_softc *sc, int idx, struct l2_fhdr *l2fhdr) 6268 { 6269 if_printf(&sc->arpcom.ac_if, "l2_fhdr[0x%04X]: status = 0x%08X, " 6270 "pkt_len = 0x%04X, vlan = 0x%04x, " 6271 "ip_xsum = 0x%04X, tcp_udp_xsum = 0x%04X\n", 6272 idx, l2fhdr->l2_fhdr_status, 6273 l2fhdr->l2_fhdr_pkt_len, l2fhdr->l2_fhdr_vlan_tag, 6274 l2fhdr->l2_fhdr_ip_xsum, l2fhdr->l2_fhdr_tcp_udp_xsum); 6275 } 6276 6277 6278 /****************************************************************************/ 6279 /* Prints out the tx chain. */ 6280 /* */ 6281 /* Returns: */ 6282 /* Nothing. */ 6283 /****************************************************************************/ 6284 static void 6285 bce_dump_tx_chain(struct bce_softc *sc, int tx_prod, int count) 6286 { 6287 struct ifnet *ifp = &sc->arpcom.ac_if; 6288 int i; 6289 6290 /* First some info about the tx_bd chain structure. */ 6291 if_printf(ifp, 6292 "----------------------------" 6293 " tx_bd chain " 6294 "----------------------------\n"); 6295 6296 if_printf(ifp, "page size = 0x%08X, " 6297 "tx chain pages = 0x%08X\n", 6298 (uint32_t)BCM_PAGE_SIZE, (uint32_t)TX_PAGES); 6299 6300 if_printf(ifp, "tx_bd per page = 0x%08X, " 6301 "usable tx_bd per page = 0x%08X\n", 6302 (uint32_t)TOTAL_TX_BD_PER_PAGE, 6303 (uint32_t)USABLE_TX_BD_PER_PAGE); 6304 6305 if_printf(ifp, "total tx_bd = 0x%08X\n", (uint32_t)TOTAL_TX_BD); 6306 6307 if_printf(ifp, 6308 "----------------------------" 6309 " tx_bd data " 6310 "----------------------------\n"); 6311 6312 /* Now print out the tx_bd's themselves. */ 6313 for (i = 0; i < count; i++) { 6314 struct tx_bd *txbd; 6315 6316 txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)]; 6317 bce_dump_txbd(sc, tx_prod, txbd); 6318 tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod)); 6319 } 6320 6321 if_printf(ifp, 6322 "----------------------------" 6323 "----------------" 6324 "----------------------------\n"); 6325 } 6326 6327 6328 /****************************************************************************/ 6329 /* Prints out the rx chain. */ 6330 /* */ 6331 /* Returns: */ 6332 /* Nothing. */ 6333 /****************************************************************************/ 6334 static void 6335 bce_dump_rx_chain(struct bce_softc *sc, int rx_prod, int count) 6336 { 6337 struct ifnet *ifp = &sc->arpcom.ac_if; 6338 int i; 6339 6340 /* First some info about the tx_bd chain structure. */ 6341 if_printf(ifp, 6342 "----------------------------" 6343 " rx_bd chain " 6344 "----------------------------\n"); 6345 6346 if_printf(ifp, "page size = 0x%08X, " 6347 "rx chain pages = 0x%08X\n", 6348 (uint32_t)BCM_PAGE_SIZE, (uint32_t)RX_PAGES); 6349 6350 if_printf(ifp, "rx_bd per page = 0x%08X, " 6351 "usable rx_bd per page = 0x%08X\n", 6352 (uint32_t)TOTAL_RX_BD_PER_PAGE, 6353 (uint32_t)USABLE_RX_BD_PER_PAGE); 6354 6355 if_printf(ifp, "total rx_bd = 0x%08X\n", (uint32_t)TOTAL_RX_BD); 6356 6357 if_printf(ifp, 6358 "----------------------------" 6359 " rx_bd data " 6360 "----------------------------\n"); 6361 6362 /* Now print out the rx_bd's themselves. */ 6363 for (i = 0; i < count; i++) { 6364 struct rx_bd *rxbd; 6365 6366 rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)]; 6367 bce_dump_rxbd(sc, rx_prod, rxbd); 6368 rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod)); 6369 } 6370 6371 if_printf(ifp, 6372 "----------------------------" 6373 "----------------" 6374 "----------------------------\n"); 6375 } 6376 6377 6378 /****************************************************************************/ 6379 /* Prints out the status block from host memory. */ 6380 /* */ 6381 /* Returns: */ 6382 /* Nothing. */ 6383 /****************************************************************************/ 6384 static void 6385 bce_dump_status_block(struct bce_softc *sc) 6386 { 6387 struct status_block *sblk = sc->status_block; 6388 struct ifnet *ifp = &sc->arpcom.ac_if; 6389 6390 if_printf(ifp, 6391 "----------------------------" 6392 " Status Block " 6393 "----------------------------\n"); 6394 6395 if_printf(ifp, " 0x%08X - attn_bits\n", sblk->status_attn_bits); 6396 6397 if_printf(ifp, " 0x%08X - attn_bits_ack\n", 6398 sblk->status_attn_bits_ack); 6399 6400 if_printf(ifp, "0x%04X(0x%04X) - rx_cons0\n", 6401 sblk->status_rx_quick_consumer_index0, 6402 (uint16_t)RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index0)); 6403 6404 if_printf(ifp, "0x%04X(0x%04X) - tx_cons0\n", 6405 sblk->status_tx_quick_consumer_index0, 6406 (uint16_t)TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index0)); 6407 6408 if_printf(ifp, " 0x%04X - status_idx\n", sblk->status_idx); 6409 6410 /* Theses indices are not used for normal L2 drivers. */ 6411 if (sblk->status_rx_quick_consumer_index1) { 6412 if_printf(ifp, "0x%04X(0x%04X) - rx_cons1\n", 6413 sblk->status_rx_quick_consumer_index1, 6414 (uint16_t)RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index1)); 6415 } 6416 6417 if (sblk->status_tx_quick_consumer_index1) { 6418 if_printf(ifp, "0x%04X(0x%04X) - tx_cons1\n", 6419 sblk->status_tx_quick_consumer_index1, 6420 (uint16_t)TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index1)); 6421 } 6422 6423 if (sblk->status_rx_quick_consumer_index2) { 6424 if_printf(ifp, "0x%04X(0x%04X)- rx_cons2\n", 6425 sblk->status_rx_quick_consumer_index2, 6426 (uint16_t)RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index2)); 6427 } 6428 6429 if (sblk->status_tx_quick_consumer_index2) { 6430 if_printf(ifp, "0x%04X(0x%04X) - tx_cons2\n", 6431 sblk->status_tx_quick_consumer_index2, 6432 (uint16_t)TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index2)); 6433 } 6434 6435 if (sblk->status_rx_quick_consumer_index3) { 6436 if_printf(ifp, "0x%04X(0x%04X) - rx_cons3\n", 6437 sblk->status_rx_quick_consumer_index3, 6438 (uint16_t)RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index3)); 6439 } 6440 6441 if (sblk->status_tx_quick_consumer_index3) { 6442 if_printf(ifp, "0x%04X(0x%04X) - tx_cons3\n", 6443 sblk->status_tx_quick_consumer_index3, 6444 (uint16_t)TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index3)); 6445 } 6446 6447 if (sblk->status_rx_quick_consumer_index4 || 6448 sblk->status_rx_quick_consumer_index5) { 6449 if_printf(ifp, "rx_cons4 = 0x%08X, rx_cons5 = 0x%08X\n", 6450 sblk->status_rx_quick_consumer_index4, 6451 sblk->status_rx_quick_consumer_index5); 6452 } 6453 6454 if (sblk->status_rx_quick_consumer_index6 || 6455 sblk->status_rx_quick_consumer_index7) { 6456 if_printf(ifp, "rx_cons6 = 0x%08X, rx_cons7 = 0x%08X\n", 6457 sblk->status_rx_quick_consumer_index6, 6458 sblk->status_rx_quick_consumer_index7); 6459 } 6460 6461 if (sblk->status_rx_quick_consumer_index8 || 6462 sblk->status_rx_quick_consumer_index9) { 6463 if_printf(ifp, "rx_cons8 = 0x%08X, rx_cons9 = 0x%08X\n", 6464 sblk->status_rx_quick_consumer_index8, 6465 sblk->status_rx_quick_consumer_index9); 6466 } 6467 6468 if (sblk->status_rx_quick_consumer_index10 || 6469 sblk->status_rx_quick_consumer_index11) { 6470 if_printf(ifp, "rx_cons10 = 0x%08X, rx_cons11 = 0x%08X\n", 6471 sblk->status_rx_quick_consumer_index10, 6472 sblk->status_rx_quick_consumer_index11); 6473 } 6474 6475 if (sblk->status_rx_quick_consumer_index12 || 6476 sblk->status_rx_quick_consumer_index13) { 6477 if_printf(ifp, "rx_cons12 = 0x%08X, rx_cons13 = 0x%08X\n", 6478 sblk->status_rx_quick_consumer_index12, 6479 sblk->status_rx_quick_consumer_index13); 6480 } 6481 6482 if (sblk->status_rx_quick_consumer_index14 || 6483 sblk->status_rx_quick_consumer_index15) { 6484 if_printf(ifp, "rx_cons14 = 0x%08X, rx_cons15 = 0x%08X\n", 6485 sblk->status_rx_quick_consumer_index14, 6486 sblk->status_rx_quick_consumer_index15); 6487 } 6488 6489 if (sblk->status_completion_producer_index || 6490 sblk->status_cmd_consumer_index) { 6491 if_printf(ifp, "com_prod = 0x%08X, cmd_cons = 0x%08X\n", 6492 sblk->status_completion_producer_index, 6493 sblk->status_cmd_consumer_index); 6494 } 6495 6496 if_printf(ifp, 6497 "----------------------------" 6498 "----------------" 6499 "----------------------------\n"); 6500 } 6501 6502 6503 /****************************************************************************/ 6504 /* Prints out the statistics block. */ 6505 /* */ 6506 /* Returns: */ 6507 /* Nothing. */ 6508 /****************************************************************************/ 6509 static void 6510 bce_dump_stats_block(struct bce_softc *sc) 6511 { 6512 struct statistics_block *sblk = sc->stats_block; 6513 struct ifnet *ifp = &sc->arpcom.ac_if; 6514 6515 if_printf(ifp, 6516 "---------------" 6517 " Stats Block (All Stats Not Shown Are 0) " 6518 "---------------\n"); 6519 6520 if (sblk->stat_IfHCInOctets_hi || sblk->stat_IfHCInOctets_lo) { 6521 if_printf(ifp, "0x%08X:%08X : IfHcInOctets\n", 6522 sblk->stat_IfHCInOctets_hi, 6523 sblk->stat_IfHCInOctets_lo); 6524 } 6525 6526 if (sblk->stat_IfHCInBadOctets_hi || sblk->stat_IfHCInBadOctets_lo) { 6527 if_printf(ifp, "0x%08X:%08X : IfHcInBadOctets\n", 6528 sblk->stat_IfHCInBadOctets_hi, 6529 sblk->stat_IfHCInBadOctets_lo); 6530 } 6531 6532 if (sblk->stat_IfHCOutOctets_hi || sblk->stat_IfHCOutOctets_lo) { 6533 if_printf(ifp, "0x%08X:%08X : IfHcOutOctets\n", 6534 sblk->stat_IfHCOutOctets_hi, 6535 sblk->stat_IfHCOutOctets_lo); 6536 } 6537 6538 if (sblk->stat_IfHCOutBadOctets_hi || sblk->stat_IfHCOutBadOctets_lo) { 6539 if_printf(ifp, "0x%08X:%08X : IfHcOutBadOctets\n", 6540 sblk->stat_IfHCOutBadOctets_hi, 6541 sblk->stat_IfHCOutBadOctets_lo); 6542 } 6543 6544 if (sblk->stat_IfHCInUcastPkts_hi || sblk->stat_IfHCInUcastPkts_lo) { 6545 if_printf(ifp, "0x%08X:%08X : IfHcInUcastPkts\n", 6546 sblk->stat_IfHCInUcastPkts_hi, 6547 sblk->stat_IfHCInUcastPkts_lo); 6548 } 6549 6550 if (sblk->stat_IfHCInBroadcastPkts_hi || 6551 sblk->stat_IfHCInBroadcastPkts_lo) { 6552 if_printf(ifp, "0x%08X:%08X : IfHcInBroadcastPkts\n", 6553 sblk->stat_IfHCInBroadcastPkts_hi, 6554 sblk->stat_IfHCInBroadcastPkts_lo); 6555 } 6556 6557 if (sblk->stat_IfHCInMulticastPkts_hi || 6558 sblk->stat_IfHCInMulticastPkts_lo) { 6559 if_printf(ifp, "0x%08X:%08X : IfHcInMulticastPkts\n", 6560 sblk->stat_IfHCInMulticastPkts_hi, 6561 sblk->stat_IfHCInMulticastPkts_lo); 6562 } 6563 6564 if (sblk->stat_IfHCOutUcastPkts_hi || sblk->stat_IfHCOutUcastPkts_lo) { 6565 if_printf(ifp, "0x%08X:%08X : IfHcOutUcastPkts\n", 6566 sblk->stat_IfHCOutUcastPkts_hi, 6567 sblk->stat_IfHCOutUcastPkts_lo); 6568 } 6569 6570 if (sblk->stat_IfHCOutBroadcastPkts_hi || 6571 sblk->stat_IfHCOutBroadcastPkts_lo) { 6572 if_printf(ifp, "0x%08X:%08X : IfHcOutBroadcastPkts\n", 6573 sblk->stat_IfHCOutBroadcastPkts_hi, 6574 sblk->stat_IfHCOutBroadcastPkts_lo); 6575 } 6576 6577 if (sblk->stat_IfHCOutMulticastPkts_hi || 6578 sblk->stat_IfHCOutMulticastPkts_lo) { 6579 if_printf(ifp, "0x%08X:%08X : IfHcOutMulticastPkts\n", 6580 sblk->stat_IfHCOutMulticastPkts_hi, 6581 sblk->stat_IfHCOutMulticastPkts_lo); 6582 } 6583 6584 if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors) { 6585 if_printf(ifp, " 0x%08X : " 6586 "emac_tx_stat_dot3statsinternalmactransmiterrors\n", 6587 sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors); 6588 } 6589 6590 if (sblk->stat_Dot3StatsCarrierSenseErrors) { 6591 if_printf(ifp, " 0x%08X : " 6592 "Dot3StatsCarrierSenseErrors\n", 6593 sblk->stat_Dot3StatsCarrierSenseErrors); 6594 } 6595 6596 if (sblk->stat_Dot3StatsFCSErrors) { 6597 if_printf(ifp, " 0x%08X : Dot3StatsFCSErrors\n", 6598 sblk->stat_Dot3StatsFCSErrors); 6599 } 6600 6601 if (sblk->stat_Dot3StatsAlignmentErrors) { 6602 if_printf(ifp, " 0x%08X : Dot3StatsAlignmentErrors\n", 6603 sblk->stat_Dot3StatsAlignmentErrors); 6604 } 6605 6606 if (sblk->stat_Dot3StatsSingleCollisionFrames) { 6607 if_printf(ifp, " 0x%08X : " 6608 "Dot3StatsSingleCollisionFrames\n", 6609 sblk->stat_Dot3StatsSingleCollisionFrames); 6610 } 6611 6612 if (sblk->stat_Dot3StatsMultipleCollisionFrames) { 6613 if_printf(ifp, " 0x%08X : " 6614 "Dot3StatsMultipleCollisionFrames\n", 6615 sblk->stat_Dot3StatsMultipleCollisionFrames); 6616 } 6617 6618 if (sblk->stat_Dot3StatsDeferredTransmissions) { 6619 if_printf(ifp, " 0x%08X : " 6620 "Dot3StatsDeferredTransmissions\n", 6621 sblk->stat_Dot3StatsDeferredTransmissions); 6622 } 6623 6624 if (sblk->stat_Dot3StatsExcessiveCollisions) { 6625 if_printf(ifp, " 0x%08X : " 6626 "Dot3StatsExcessiveCollisions\n", 6627 sblk->stat_Dot3StatsExcessiveCollisions); 6628 } 6629 6630 if (sblk->stat_Dot3StatsLateCollisions) { 6631 if_printf(ifp, " 0x%08X : Dot3StatsLateCollisions\n", 6632 sblk->stat_Dot3StatsLateCollisions); 6633 } 6634 6635 if (sblk->stat_EtherStatsCollisions) { 6636 if_printf(ifp, " 0x%08X : EtherStatsCollisions\n", 6637 sblk->stat_EtherStatsCollisions); 6638 } 6639 6640 if (sblk->stat_EtherStatsFragments) { 6641 if_printf(ifp, " 0x%08X : EtherStatsFragments\n", 6642 sblk->stat_EtherStatsFragments); 6643 } 6644 6645 if (sblk->stat_EtherStatsJabbers) { 6646 if_printf(ifp, " 0x%08X : EtherStatsJabbers\n", 6647 sblk->stat_EtherStatsJabbers); 6648 } 6649 6650 if (sblk->stat_EtherStatsUndersizePkts) { 6651 if_printf(ifp, " 0x%08X : EtherStatsUndersizePkts\n", 6652 sblk->stat_EtherStatsUndersizePkts); 6653 } 6654 6655 if (sblk->stat_EtherStatsOverrsizePkts) { 6656 if_printf(ifp, " 0x%08X : EtherStatsOverrsizePkts\n", 6657 sblk->stat_EtherStatsOverrsizePkts); 6658 } 6659 6660 if (sblk->stat_EtherStatsPktsRx64Octets) { 6661 if_printf(ifp, " 0x%08X : EtherStatsPktsRx64Octets\n", 6662 sblk->stat_EtherStatsPktsRx64Octets); 6663 } 6664 6665 if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets) { 6666 if_printf(ifp, " 0x%08X : " 6667 "EtherStatsPktsRx65Octetsto127Octets\n", 6668 sblk->stat_EtherStatsPktsRx65Octetsto127Octets); 6669 } 6670 6671 if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets) { 6672 if_printf(ifp, " 0x%08X : " 6673 "EtherStatsPktsRx128Octetsto255Octets\n", 6674 sblk->stat_EtherStatsPktsRx128Octetsto255Octets); 6675 } 6676 6677 if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets) { 6678 if_printf(ifp, " 0x%08X : " 6679 "EtherStatsPktsRx256Octetsto511Octets\n", 6680 sblk->stat_EtherStatsPktsRx256Octetsto511Octets); 6681 } 6682 6683 if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets) { 6684 if_printf(ifp, " 0x%08X : " 6685 "EtherStatsPktsRx512Octetsto1023Octets\n", 6686 sblk->stat_EtherStatsPktsRx512Octetsto1023Octets); 6687 } 6688 6689 if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets) { 6690 if_printf(ifp, " 0x%08X : " 6691 "EtherStatsPktsRx1024Octetsto1522Octets\n", 6692 sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets); 6693 } 6694 6695 if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets) { 6696 if_printf(ifp, " 0x%08X : " 6697 "EtherStatsPktsRx1523Octetsto9022Octets\n", 6698 sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets); 6699 } 6700 6701 if (sblk->stat_EtherStatsPktsTx64Octets) { 6702 if_printf(ifp, " 0x%08X : EtherStatsPktsTx64Octets\n", 6703 sblk->stat_EtherStatsPktsTx64Octets); 6704 } 6705 6706 if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets) { 6707 if_printf(ifp, " 0x%08X : " 6708 "EtherStatsPktsTx65Octetsto127Octets\n", 6709 sblk->stat_EtherStatsPktsTx65Octetsto127Octets); 6710 } 6711 6712 if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets) { 6713 if_printf(ifp, " 0x%08X : " 6714 "EtherStatsPktsTx128Octetsto255Octets\n", 6715 sblk->stat_EtherStatsPktsTx128Octetsto255Octets); 6716 } 6717 6718 if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets) { 6719 if_printf(ifp, " 0x%08X : " 6720 "EtherStatsPktsTx256Octetsto511Octets\n", 6721 sblk->stat_EtherStatsPktsTx256Octetsto511Octets); 6722 } 6723 6724 if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets) { 6725 if_printf(ifp, " 0x%08X : " 6726 "EtherStatsPktsTx512Octetsto1023Octets\n", 6727 sblk->stat_EtherStatsPktsTx512Octetsto1023Octets); 6728 } 6729 6730 if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets) { 6731 if_printf(ifp, " 0x%08X : " 6732 "EtherStatsPktsTx1024Octetsto1522Octets\n", 6733 sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets); 6734 } 6735 6736 if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets) { 6737 if_printf(ifp, " 0x%08X : " 6738 "EtherStatsPktsTx1523Octetsto9022Octets\n", 6739 sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets); 6740 } 6741 6742 if (sblk->stat_XonPauseFramesReceived) { 6743 if_printf(ifp, " 0x%08X : XonPauseFramesReceived\n", 6744 sblk->stat_XonPauseFramesReceived); 6745 } 6746 6747 if (sblk->stat_XoffPauseFramesReceived) { 6748 if_printf(ifp, " 0x%08X : XoffPauseFramesReceived\n", 6749 sblk->stat_XoffPauseFramesReceived); 6750 } 6751 6752 if (sblk->stat_OutXonSent) { 6753 if_printf(ifp, " 0x%08X : OutXoffSent\n", 6754 sblk->stat_OutXonSent); 6755 } 6756 6757 if (sblk->stat_OutXoffSent) { 6758 if_printf(ifp, " 0x%08X : OutXoffSent\n", 6759 sblk->stat_OutXoffSent); 6760 } 6761 6762 if (sblk->stat_FlowControlDone) { 6763 if_printf(ifp, " 0x%08X : FlowControlDone\n", 6764 sblk->stat_FlowControlDone); 6765 } 6766 6767 if (sblk->stat_MacControlFramesReceived) { 6768 if_printf(ifp, " 0x%08X : MacControlFramesReceived\n", 6769 sblk->stat_MacControlFramesReceived); 6770 } 6771 6772 if (sblk->stat_XoffStateEntered) { 6773 if_printf(ifp, " 0x%08X : XoffStateEntered\n", 6774 sblk->stat_XoffStateEntered); 6775 } 6776 6777 if (sblk->stat_IfInFramesL2FilterDiscards) { 6778 if_printf(ifp, " 0x%08X : IfInFramesL2FilterDiscards\n", sblk->stat_IfInFramesL2FilterDiscards); 6779 } 6780 6781 if (sblk->stat_IfInRuleCheckerDiscards) { 6782 if_printf(ifp, " 0x%08X : IfInRuleCheckerDiscards\n", 6783 sblk->stat_IfInRuleCheckerDiscards); 6784 } 6785 6786 if (sblk->stat_IfInFTQDiscards) { 6787 if_printf(ifp, " 0x%08X : IfInFTQDiscards\n", 6788 sblk->stat_IfInFTQDiscards); 6789 } 6790 6791 if (sblk->stat_IfInMBUFDiscards) { 6792 if_printf(ifp, " 0x%08X : IfInMBUFDiscards\n", 6793 sblk->stat_IfInMBUFDiscards); 6794 } 6795 6796 if (sblk->stat_IfInRuleCheckerP4Hit) { 6797 if_printf(ifp, " 0x%08X : IfInRuleCheckerP4Hit\n", 6798 sblk->stat_IfInRuleCheckerP4Hit); 6799 } 6800 6801 if (sblk->stat_CatchupInRuleCheckerDiscards) { 6802 if_printf(ifp, " 0x%08X : " 6803 "CatchupInRuleCheckerDiscards\n", 6804 sblk->stat_CatchupInRuleCheckerDiscards); 6805 } 6806 6807 if (sblk->stat_CatchupInFTQDiscards) { 6808 if_printf(ifp, " 0x%08X : CatchupInFTQDiscards\n", 6809 sblk->stat_CatchupInFTQDiscards); 6810 } 6811 6812 if (sblk->stat_CatchupInMBUFDiscards) { 6813 if_printf(ifp, " 0x%08X : CatchupInMBUFDiscards\n", 6814 sblk->stat_CatchupInMBUFDiscards); 6815 } 6816 6817 if (sblk->stat_CatchupInRuleCheckerP4Hit) { 6818 if_printf(ifp, " 0x%08X : CatchupInRuleCheckerP4Hit\n", 6819 sblk->stat_CatchupInRuleCheckerP4Hit); 6820 } 6821 6822 if_printf(ifp, 6823 "----------------------------" 6824 "----------------" 6825 "----------------------------\n"); 6826 } 6827 6828 6829 /****************************************************************************/ 6830 /* Prints out a summary of the driver state. */ 6831 /* */ 6832 /* Returns: */ 6833 /* Nothing. */ 6834 /****************************************************************************/ 6835 static void 6836 bce_dump_driver_state(struct bce_softc *sc) 6837 { 6838 struct ifnet *ifp = &sc->arpcom.ac_if; 6839 uint32_t val_hi, val_lo; 6840 6841 if_printf(ifp, 6842 "-----------------------------" 6843 " Driver State " 6844 "-----------------------------\n"); 6845 6846 val_hi = BCE_ADDR_HI(sc); 6847 val_lo = BCE_ADDR_LO(sc); 6848 if_printf(ifp, "0x%08X:%08X - (sc) driver softc structure " 6849 "virtual address\n", val_hi, val_lo); 6850 6851 val_hi = BCE_ADDR_HI(sc->status_block); 6852 val_lo = BCE_ADDR_LO(sc->status_block); 6853 if_printf(ifp, "0x%08X:%08X - (sc->status_block) status block " 6854 "virtual address\n", val_hi, val_lo); 6855 6856 val_hi = BCE_ADDR_HI(sc->stats_block); 6857 val_lo = BCE_ADDR_LO(sc->stats_block); 6858 if_printf(ifp, "0x%08X:%08X - (sc->stats_block) statistics block " 6859 "virtual address\n", val_hi, val_lo); 6860 6861 val_hi = BCE_ADDR_HI(sc->tx_bd_chain); 6862 val_lo = BCE_ADDR_LO(sc->tx_bd_chain); 6863 if_printf(ifp, "0x%08X:%08X - (sc->tx_bd_chain) tx_bd chain " 6864 "virtual adddress\n", val_hi, val_lo); 6865 6866 val_hi = BCE_ADDR_HI(sc->rx_bd_chain); 6867 val_lo = BCE_ADDR_LO(sc->rx_bd_chain); 6868 if_printf(ifp, "0x%08X:%08X - (sc->rx_bd_chain) rx_bd chain " 6869 "virtual address\n", val_hi, val_lo); 6870 6871 val_hi = BCE_ADDR_HI(sc->tx_mbuf_ptr); 6872 val_lo = BCE_ADDR_LO(sc->tx_mbuf_ptr); 6873 if_printf(ifp, "0x%08X:%08X - (sc->tx_mbuf_ptr) tx mbuf chain " 6874 "virtual address\n", val_hi, val_lo); 6875 6876 val_hi = BCE_ADDR_HI(sc->rx_mbuf_ptr); 6877 val_lo = BCE_ADDR_LO(sc->rx_mbuf_ptr); 6878 if_printf(ifp, "0x%08X:%08X - (sc->rx_mbuf_ptr) rx mbuf chain " 6879 "virtual address\n", val_hi, val_lo); 6880 6881 if_printf(ifp, " 0x%08X - (sc->interrupts_generated) " 6882 "h/w intrs\n", sc->interrupts_generated); 6883 6884 if_printf(ifp, " 0x%08X - (sc->rx_interrupts) " 6885 "rx interrupts handled\n", sc->rx_interrupts); 6886 6887 if_printf(ifp, " 0x%08X - (sc->tx_interrupts) " 6888 "tx interrupts handled\n", sc->tx_interrupts); 6889 6890 if_printf(ifp, " 0x%08X - (sc->last_status_idx) " 6891 "status block index\n", sc->last_status_idx); 6892 6893 if_printf(ifp, " 0x%04X(0x%04X) - (sc->tx_prod) " 6894 "tx producer index\n", 6895 sc->tx_prod, (uint16_t)TX_CHAIN_IDX(sc->tx_prod)); 6896 6897 if_printf(ifp, " 0x%04X(0x%04X) - (sc->tx_cons) " 6898 "tx consumer index\n", 6899 sc->tx_cons, (uint16_t)TX_CHAIN_IDX(sc->tx_cons)); 6900 6901 if_printf(ifp, " 0x%08X - (sc->tx_prod_bseq) " 6902 "tx producer bseq index\n", sc->tx_prod_bseq); 6903 6904 if_printf(ifp, " 0x%04X(0x%04X) - (sc->rx_prod) " 6905 "rx producer index\n", 6906 sc->rx_prod, (uint16_t)RX_CHAIN_IDX(sc->rx_prod)); 6907 6908 if_printf(ifp, " 0x%04X(0x%04X) - (sc->rx_cons) " 6909 "rx consumer index\n", 6910 sc->rx_cons, (uint16_t)RX_CHAIN_IDX(sc->rx_cons)); 6911 6912 if_printf(ifp, " 0x%08X - (sc->rx_prod_bseq) " 6913 "rx producer bseq index\n", sc->rx_prod_bseq); 6914 6915 if_printf(ifp, " 0x%08X - (sc->rx_mbuf_alloc) " 6916 "rx mbufs allocated\n", sc->rx_mbuf_alloc); 6917 6918 if_printf(ifp, " 0x%08X - (sc->free_rx_bd) " 6919 "free rx_bd's\n", sc->free_rx_bd); 6920 6921 if_printf(ifp, "0x%08X/%08X - (sc->rx_low_watermark) rx " 6922 "low watermark\n", sc->rx_low_watermark, sc->max_rx_bd); 6923 6924 if_printf(ifp, " 0x%08X - (sc->txmbuf_alloc) " 6925 "tx mbufs allocated\n", sc->tx_mbuf_alloc); 6926 6927 if_printf(ifp, " 0x%08X - (sc->rx_mbuf_alloc) " 6928 "rx mbufs allocated\n", sc->rx_mbuf_alloc); 6929 6930 if_printf(ifp, " 0x%08X - (sc->used_tx_bd) used tx_bd's\n", 6931 sc->used_tx_bd); 6932 6933 if_printf(ifp, "0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n", 6934 sc->tx_hi_watermark, sc->max_tx_bd); 6935 6936 if_printf(ifp, " 0x%08X - (sc->mbuf_alloc_failed) " 6937 "failed mbuf alloc\n", sc->mbuf_alloc_failed); 6938 6939 if_printf(ifp, 6940 "----------------------------" 6941 "----------------" 6942 "----------------------------\n"); 6943 } 6944 6945 6946 /****************************************************************************/ 6947 /* Prints out the hardware state through a summary of important registers, */ 6948 /* followed by a complete register dump. */ 6949 /* */ 6950 /* Returns: */ 6951 /* Nothing. */ 6952 /****************************************************************************/ 6953 static void 6954 bce_dump_hw_state(struct bce_softc *sc) 6955 { 6956 struct ifnet *ifp = &sc->arpcom.ac_if; 6957 uint32_t val1; 6958 int i; 6959 6960 if_printf(ifp, 6961 "----------------------------" 6962 " Hardware State " 6963 "----------------------------\n"); 6964 6965 if_printf(ifp, "0x%08X - bootcode version\n", sc->bce_fw_ver); 6966 6967 val1 = REG_RD(sc, BCE_MISC_ENABLE_STATUS_BITS); 6968 if_printf(ifp, "0x%08X - (0x%06X) misc_enable_status_bits\n", 6969 val1, BCE_MISC_ENABLE_STATUS_BITS); 6970 6971 val1 = REG_RD(sc, BCE_DMA_STATUS); 6972 if_printf(ifp, "0x%08X - (0x%04X) dma_status\n", val1, BCE_DMA_STATUS); 6973 6974 val1 = REG_RD(sc, BCE_CTX_STATUS); 6975 if_printf(ifp, "0x%08X - (0x%04X) ctx_status\n", val1, BCE_CTX_STATUS); 6976 6977 val1 = REG_RD(sc, BCE_EMAC_STATUS); 6978 if_printf(ifp, "0x%08X - (0x%04X) emac_status\n", 6979 val1, BCE_EMAC_STATUS); 6980 6981 val1 = REG_RD(sc, BCE_RPM_STATUS); 6982 if_printf(ifp, "0x%08X - (0x%04X) rpm_status\n", val1, BCE_RPM_STATUS); 6983 6984 val1 = REG_RD(sc, BCE_TBDR_STATUS); 6985 if_printf(ifp, "0x%08X - (0x%04X) tbdr_status\n", 6986 val1, BCE_TBDR_STATUS); 6987 6988 val1 = REG_RD(sc, BCE_TDMA_STATUS); 6989 if_printf(ifp, "0x%08X - (0x%04X) tdma_status\n", 6990 val1, BCE_TDMA_STATUS); 6991 6992 val1 = REG_RD(sc, BCE_HC_STATUS); 6993 if_printf(ifp, "0x%08X - (0x%06X) hc_status\n", val1, BCE_HC_STATUS); 6994 6995 val1 = REG_RD_IND(sc, BCE_TXP_CPU_STATE); 6996 if_printf(ifp, "0x%08X - (0x%06X) txp_cpu_state\n", 6997 val1, BCE_TXP_CPU_STATE); 6998 6999 val1 = REG_RD_IND(sc, BCE_TPAT_CPU_STATE); 7000 if_printf(ifp, "0x%08X - (0x%06X) tpat_cpu_state\n", 7001 val1, BCE_TPAT_CPU_STATE); 7002 7003 val1 = REG_RD_IND(sc, BCE_RXP_CPU_STATE); 7004 if_printf(ifp, "0x%08X - (0x%06X) rxp_cpu_state\n", 7005 val1, BCE_RXP_CPU_STATE); 7006 7007 val1 = REG_RD_IND(sc, BCE_COM_CPU_STATE); 7008 if_printf(ifp, "0x%08X - (0x%06X) com_cpu_state\n", 7009 val1, BCE_COM_CPU_STATE); 7010 7011 val1 = REG_RD_IND(sc, BCE_MCP_CPU_STATE); 7012 if_printf(ifp, "0x%08X - (0x%06X) mcp_cpu_state\n", 7013 val1, BCE_MCP_CPU_STATE); 7014 7015 val1 = REG_RD_IND(sc, BCE_CP_CPU_STATE); 7016 if_printf(ifp, "0x%08X - (0x%06X) cp_cpu_state\n", 7017 val1, BCE_CP_CPU_STATE); 7018 7019 if_printf(ifp, 7020 "----------------------------" 7021 "----------------" 7022 "----------------------------\n"); 7023 7024 if_printf(ifp, 7025 "----------------------------" 7026 " Register Dump " 7027 "----------------------------\n"); 7028 7029 for (i = 0x400; i < 0x8000; i += 0x10) { 7030 if_printf(ifp, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", i, 7031 REG_RD(sc, i), 7032 REG_RD(sc, i + 0x4), 7033 REG_RD(sc, i + 0x8), 7034 REG_RD(sc, i + 0xc)); 7035 } 7036 7037 if_printf(ifp, 7038 "----------------------------" 7039 "----------------" 7040 "----------------------------\n"); 7041 } 7042 7043 7044 /****************************************************************************/ 7045 /* Prints out the TXP state. */ 7046 /* */ 7047 /* Returns: */ 7048 /* Nothing. */ 7049 /****************************************************************************/ 7050 static void 7051 bce_dump_txp_state(struct bce_softc *sc) 7052 { 7053 struct ifnet *ifp = &sc->arpcom.ac_if; 7054 uint32_t val1; 7055 int i; 7056 7057 if_printf(ifp, 7058 "----------------------------" 7059 " TXP State " 7060 "----------------------------\n"); 7061 7062 val1 = REG_RD_IND(sc, BCE_TXP_CPU_MODE); 7063 if_printf(ifp, "0x%08X - (0x%06X) txp_cpu_mode\n", 7064 val1, BCE_TXP_CPU_MODE); 7065 7066 val1 = REG_RD_IND(sc, BCE_TXP_CPU_STATE); 7067 if_printf(ifp, "0x%08X - (0x%06X) txp_cpu_state\n", 7068 val1, BCE_TXP_CPU_STATE); 7069 7070 val1 = REG_RD_IND(sc, BCE_TXP_CPU_EVENT_MASK); 7071 if_printf(ifp, "0x%08X - (0x%06X) txp_cpu_event_mask\n", 7072 val1, BCE_TXP_CPU_EVENT_MASK); 7073 7074 if_printf(ifp, 7075 "----------------------------" 7076 " Register Dump " 7077 "----------------------------\n"); 7078 7079 for (i = BCE_TXP_CPU_MODE; i < 0x68000; i += 0x10) { 7080 /* Skip the big blank spaces */ 7081 if (i < 0x454000 && i > 0x5ffff) { 7082 if_printf(ifp, "0x%04X: " 7083 "0x%08X 0x%08X 0x%08X 0x%08X\n", i, 7084 REG_RD_IND(sc, i), 7085 REG_RD_IND(sc, i + 0x4), 7086 REG_RD_IND(sc, i + 0x8), 7087 REG_RD_IND(sc, i + 0xc)); 7088 } 7089 } 7090 7091 if_printf(ifp, 7092 "----------------------------" 7093 "----------------" 7094 "----------------------------\n"); 7095 } 7096 7097 7098 /****************************************************************************/ 7099 /* Prints out the RXP state. */ 7100 /* */ 7101 /* Returns: */ 7102 /* Nothing. */ 7103 /****************************************************************************/ 7104 static void 7105 bce_dump_rxp_state(struct bce_softc *sc) 7106 { 7107 struct ifnet *ifp = &sc->arpcom.ac_if; 7108 uint32_t val1; 7109 int i; 7110 7111 if_printf(ifp, 7112 "----------------------------" 7113 " RXP State " 7114 "----------------------------\n"); 7115 7116 val1 = REG_RD_IND(sc, BCE_RXP_CPU_MODE); 7117 if_printf(ifp, "0x%08X - (0x%06X) rxp_cpu_mode\n", 7118 val1, BCE_RXP_CPU_MODE); 7119 7120 val1 = REG_RD_IND(sc, BCE_RXP_CPU_STATE); 7121 if_printf(ifp, "0x%08X - (0x%06X) rxp_cpu_state\n", 7122 val1, BCE_RXP_CPU_STATE); 7123 7124 val1 = REG_RD_IND(sc, BCE_RXP_CPU_EVENT_MASK); 7125 if_printf(ifp, "0x%08X - (0x%06X) rxp_cpu_event_mask\n", 7126 val1, BCE_RXP_CPU_EVENT_MASK); 7127 7128 if_printf(ifp, 7129 "----------------------------" 7130 " Register Dump " 7131 "----------------------------\n"); 7132 7133 for (i = BCE_RXP_CPU_MODE; i < 0xe8fff; i += 0x10) { 7134 /* Skip the big blank sapces */ 7135 if (i < 0xc5400 && i > 0xdffff) { 7136 if_printf(ifp, "0x%04X: " 7137 "0x%08X 0x%08X 0x%08X 0x%08X\n", i, 7138 REG_RD_IND(sc, i), 7139 REG_RD_IND(sc, i + 0x4), 7140 REG_RD_IND(sc, i + 0x8), 7141 REG_RD_IND(sc, i + 0xc)); 7142 } 7143 } 7144 7145 if_printf(ifp, 7146 "----------------------------" 7147 "----------------" 7148 "----------------------------\n"); 7149 } 7150 7151 7152 /****************************************************************************/ 7153 /* Prints out the TPAT state. */ 7154 /* */ 7155 /* Returns: */ 7156 /* Nothing. */ 7157 /****************************************************************************/ 7158 static void 7159 bce_dump_tpat_state(struct bce_softc *sc) 7160 { 7161 struct ifnet *ifp = &sc->arpcom.ac_if; 7162 uint32_t val1; 7163 int i; 7164 7165 if_printf(ifp, 7166 "----------------------------" 7167 " TPAT State " 7168 "----------------------------\n"); 7169 7170 val1 = REG_RD_IND(sc, BCE_TPAT_CPU_MODE); 7171 if_printf(ifp, "0x%08X - (0x%06X) tpat_cpu_mode\n", 7172 val1, BCE_TPAT_CPU_MODE); 7173 7174 val1 = REG_RD_IND(sc, BCE_TPAT_CPU_STATE); 7175 if_printf(ifp, "0x%08X - (0x%06X) tpat_cpu_state\n", 7176 val1, BCE_TPAT_CPU_STATE); 7177 7178 val1 = REG_RD_IND(sc, BCE_TPAT_CPU_EVENT_MASK); 7179 if_printf(ifp, "0x%08X - (0x%06X) tpat_cpu_event_mask\n", 7180 val1, BCE_TPAT_CPU_EVENT_MASK); 7181 7182 if_printf(ifp, 7183 "----------------------------" 7184 " Register Dump " 7185 "----------------------------\n"); 7186 7187 for (i = BCE_TPAT_CPU_MODE; i < 0xa3fff; i += 0x10) { 7188 /* Skip the big blank spaces */ 7189 if (i < 0x854000 && i > 0x9ffff) { 7190 if_printf(ifp, "0x%04X: " 7191 "0x%08X 0x%08X 0x%08X 0x%08X\n", i, 7192 REG_RD_IND(sc, i), 7193 REG_RD_IND(sc, i + 0x4), 7194 REG_RD_IND(sc, i + 0x8), 7195 REG_RD_IND(sc, i + 0xc)); 7196 } 7197 } 7198 7199 if_printf(ifp, 7200 "----------------------------" 7201 "----------------" 7202 "----------------------------\n"); 7203 } 7204 7205 7206 /****************************************************************************/ 7207 /* Prints out the driver state and then enters the debugger. */ 7208 /* */ 7209 /* Returns: */ 7210 /* Nothing. */ 7211 /****************************************************************************/ 7212 static void 7213 bce_breakpoint(struct bce_softc *sc) 7214 { 7215 #if 0 7216 bce_freeze_controller(sc); 7217 #endif 7218 7219 bce_dump_driver_state(sc); 7220 bce_dump_status_block(sc); 7221 bce_dump_tx_chain(sc, 0, TOTAL_TX_BD); 7222 bce_dump_hw_state(sc); 7223 bce_dump_txp_state(sc); 7224 7225 #if 0 7226 bce_unfreeze_controller(sc); 7227 #endif 7228 7229 /* Call the debugger. */ 7230 breakpoint(); 7231 } 7232 7233 #endif /* BCE_DEBUG */ 7234 7235 static int 7236 bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS) 7237 { 7238 struct bce_softc *sc = arg1; 7239 7240 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 7241 &sc->bce_tx_quick_cons_trip_int, 7242 BCE_COALMASK_TX_BDS_INT); 7243 } 7244 7245 static int 7246 bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS) 7247 { 7248 struct bce_softc *sc = arg1; 7249 7250 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 7251 &sc->bce_tx_quick_cons_trip, 7252 BCE_COALMASK_TX_BDS); 7253 } 7254 7255 static int 7256 bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS) 7257 { 7258 struct bce_softc *sc = arg1; 7259 7260 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 7261 &sc->bce_tx_ticks_int, 7262 BCE_COALMASK_TX_TICKS_INT); 7263 } 7264 7265 static int 7266 bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS) 7267 { 7268 struct bce_softc *sc = arg1; 7269 7270 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 7271 &sc->bce_tx_ticks, 7272 BCE_COALMASK_TX_TICKS); 7273 } 7274 7275 static int 7276 bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS) 7277 { 7278 struct bce_softc *sc = arg1; 7279 7280 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 7281 &sc->bce_rx_quick_cons_trip_int, 7282 BCE_COALMASK_RX_BDS_INT); 7283 } 7284 7285 static int 7286 bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS) 7287 { 7288 struct bce_softc *sc = arg1; 7289 7290 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 7291 &sc->bce_rx_quick_cons_trip, 7292 BCE_COALMASK_RX_BDS); 7293 } 7294 7295 static int 7296 bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS) 7297 { 7298 struct bce_softc *sc = arg1; 7299 7300 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 7301 &sc->bce_rx_ticks_int, 7302 BCE_COALMASK_RX_TICKS_INT); 7303 } 7304 7305 static int 7306 bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS) 7307 { 7308 struct bce_softc *sc = arg1; 7309 7310 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 7311 &sc->bce_rx_ticks, 7312 BCE_COALMASK_RX_TICKS); 7313 } 7314 7315 static int 7316 bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS, uint32_t *coal, 7317 uint32_t coalchg_mask) 7318 { 7319 struct bce_softc *sc = arg1; 7320 struct ifnet *ifp = &sc->arpcom.ac_if; 7321 int error = 0, v; 7322 7323 lwkt_serialize_enter(ifp->if_serializer); 7324 7325 v = *coal; 7326 error = sysctl_handle_int(oidp, &v, 0, req); 7327 if (!error && req->newptr != NULL) { 7328 if (v < 0) { 7329 error = EINVAL; 7330 } else { 7331 *coal = v; 7332 sc->bce_coalchg_mask |= coalchg_mask; 7333 } 7334 } 7335 7336 lwkt_serialize_exit(ifp->if_serializer); 7337 return error; 7338 } 7339 7340 static void 7341 bce_coal_change(struct bce_softc *sc) 7342 { 7343 struct ifnet *ifp = &sc->arpcom.ac_if; 7344 7345 ASSERT_SERIALIZED(ifp->if_serializer); 7346 7347 if ((ifp->if_flags & IFF_RUNNING) == 0) { 7348 sc->bce_coalchg_mask = 0; 7349 return; 7350 } 7351 7352 if (sc->bce_coalchg_mask & 7353 (BCE_COALMASK_TX_BDS | BCE_COALMASK_TX_BDS_INT)) { 7354 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP, 7355 (sc->bce_tx_quick_cons_trip_int << 16) | 7356 sc->bce_tx_quick_cons_trip); 7357 if (bootverbose) { 7358 if_printf(ifp, "tx_bds %u, tx_bds_int %u\n", 7359 sc->bce_tx_quick_cons_trip, 7360 sc->bce_tx_quick_cons_trip_int); 7361 } 7362 } 7363 7364 if (sc->bce_coalchg_mask & 7365 (BCE_COALMASK_TX_TICKS | BCE_COALMASK_TX_TICKS_INT)) { 7366 REG_WR(sc, BCE_HC_TX_TICKS, 7367 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks); 7368 if (bootverbose) { 7369 if_printf(ifp, "tx_ticks %u, tx_ticks_int %u\n", 7370 sc->bce_tx_ticks, sc->bce_tx_ticks_int); 7371 } 7372 } 7373 7374 if (sc->bce_coalchg_mask & 7375 (BCE_COALMASK_RX_BDS | BCE_COALMASK_RX_BDS_INT)) { 7376 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP, 7377 (sc->bce_rx_quick_cons_trip_int << 16) | 7378 sc->bce_rx_quick_cons_trip); 7379 if (bootverbose) { 7380 if_printf(ifp, "rx_bds %u, rx_bds_int %u\n", 7381 sc->bce_rx_quick_cons_trip, 7382 sc->bce_rx_quick_cons_trip_int); 7383 } 7384 } 7385 7386 if (sc->bce_coalchg_mask & 7387 (BCE_COALMASK_RX_TICKS | BCE_COALMASK_RX_TICKS_INT)) { 7388 REG_WR(sc, BCE_HC_RX_TICKS, 7389 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks); 7390 if (bootverbose) { 7391 if_printf(ifp, "rx_ticks %u, rx_ticks_int %u\n", 7392 sc->bce_rx_ticks, sc->bce_rx_ticks_int); 7393 } 7394 } 7395 7396 sc->bce_coalchg_mask = 0; 7397 } 7398