1 /* 2 * Copyright (c) 2001 Wind River Systems 3 * Copyright (c) 1997, 1998, 1999, 2001 4 * Bill Paul <wpaul@windriver.com>. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Bill Paul. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $ 34 */ 35 36 /* 37 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD. 38 * 39 * Written by Bill Paul <wpaul@windriver.com> 40 * Senior Engineer, Wind River Systems 41 */ 42 43 /* 44 * The Broadcom BCM5700 is based on technology originally developed by 45 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 46 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has 47 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 48 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 49 * frames, highly configurable RX filtering, and 16 RX and TX queues 50 * (which, along with RX filter rules, can be used for QOS applications). 51 * Other features, such as TCP segmentation, may be available as part 52 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 53 * firmware images can be stored in hardware and need not be compiled 54 * into the driver. 55 * 56 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 57 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus. 58 * 59 * The BCM5701 is a single-chip solution incorporating both the BCM5700 60 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 61 * does not support external SSRAM. 62 * 63 * Broadcom also produces a variation of the BCM5700 under the "Altima" 64 * brand name, which is functionally similar but lacks PCI-X support. 65 * 66 * Without external SSRAM, you can only have at most 4 TX rings, 67 * and the use of the mini RX ring is disabled. This seems to imply 68 * that these features are simply not available on the BCM5701. As a 69 * result, this driver does not implement any support for the mini RX 70 * ring. 71 */ 72 73 #include "opt_polling.h" 74 75 #include <sys/param.h> 76 #include <sys/bus.h> 77 #include <sys/endian.h> 78 #include <sys/kernel.h> 79 #include <sys/ktr.h> 80 #include <sys/interrupt.h> 81 #include <sys/mbuf.h> 82 #include <sys/malloc.h> 83 #include <sys/queue.h> 84 #include <sys/rman.h> 85 #include <sys/serialize.h> 86 #include <sys/socket.h> 87 #include <sys/sockio.h> 88 #include <sys/sysctl.h> 89 90 #include <net/bpf.h> 91 #include <net/ethernet.h> 92 #include <net/if.h> 93 #include <net/if_arp.h> 94 #include <net/if_dl.h> 95 #include <net/if_media.h> 96 #include <net/if_types.h> 97 #include <net/ifq_var.h> 98 #include <net/vlan/if_vlan_var.h> 99 #include <net/vlan/if_vlan_ether.h> 100 101 #include <dev/netif/mii_layer/mii.h> 102 #include <dev/netif/mii_layer/miivar.h> 103 #include <dev/netif/mii_layer/brgphyreg.h> 104 105 #include <bus/pci/pcidevs.h> 106 #include <bus/pci/pcireg.h> 107 #include <bus/pci/pcivar.h> 108 109 #include <dev/netif/bge/if_bgereg.h> 110 111 /* "device miibus" required. See GENERIC if you get errors here. */ 112 #include "miibus_if.h" 113 114 #define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 115 #define BGE_MIN_FRAME 60 116 117 static const struct bge_type bge_devs[] = { 118 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C996, 119 "3COM 3C996 Gigabit Ethernet" }, 120 121 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700, 122 "Alteon BCM5700 Gigabit Ethernet" }, 123 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5701, 124 "Alteon BCM5701 Gigabit Ethernet" }, 125 126 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000, 127 "Altima AC1000 Gigabit Ethernet" }, 128 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001, 129 "Altima AC1002 Gigabit Ethernet" }, 130 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100, 131 "Altima AC9100 Gigabit Ethernet" }, 132 133 { PCI_VENDOR_APPLE, PCI_PRODUCT_APPLE_BCM5701, 134 "Apple BCM5701 Gigabit Ethernet" }, 135 136 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700, 137 "Broadcom BCM5700 Gigabit Ethernet" }, 138 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701, 139 "Broadcom BCM5701 Gigabit Ethernet" }, 140 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702, 141 "Broadcom BCM5702 Gigabit Ethernet" }, 142 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X, 143 "Broadcom BCM5702X Gigabit Ethernet" }, 144 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702_ALT, 145 "Broadcom BCM5702 Gigabit Ethernet" }, 146 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703, 147 "Broadcom BCM5703 Gigabit Ethernet" }, 148 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X, 149 "Broadcom BCM5703X Gigabit Ethernet" }, 150 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703A3, 151 "Broadcom BCM5703 Gigabit Ethernet" }, 152 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C, 153 "Broadcom BCM5704C Dual Gigabit Ethernet" }, 154 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S, 155 "Broadcom BCM5704S Dual Gigabit Ethernet" }, 156 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S_ALT, 157 "Broadcom BCM5704S Dual Gigabit Ethernet" }, 158 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705, 159 "Broadcom BCM5705 Gigabit Ethernet" }, 160 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705F, 161 "Broadcom BCM5705F Gigabit Ethernet" }, 162 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705K, 163 "Broadcom BCM5705K Gigabit Ethernet" }, 164 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M, 165 "Broadcom BCM5705M Gigabit Ethernet" }, 166 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M_ALT, 167 "Broadcom BCM5705M Gigabit Ethernet" }, 168 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714, 169 "Broadcom BCM5714C Gigabit Ethernet" }, 170 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714S, 171 "Broadcom BCM5714S Gigabit Ethernet" }, 172 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715, 173 "Broadcom BCM5715 Gigabit Ethernet" }, 174 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715S, 175 "Broadcom BCM5715S Gigabit Ethernet" }, 176 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720, 177 "Broadcom BCM5720 Gigabit Ethernet" }, 178 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5721, 179 "Broadcom BCM5721 Gigabit Ethernet" }, 180 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5722, 181 "Broadcom BCM5722 Gigabit Ethernet" }, 182 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5723, 183 "Broadcom BCM5723 Gigabit Ethernet" }, 184 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750, 185 "Broadcom BCM5750 Gigabit Ethernet" }, 186 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750M, 187 "Broadcom BCM5750M Gigabit Ethernet" }, 188 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751, 189 "Broadcom BCM5751 Gigabit Ethernet" }, 190 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751F, 191 "Broadcom BCM5751F Gigabit Ethernet" }, 192 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751M, 193 "Broadcom BCM5751M Gigabit Ethernet" }, 194 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752, 195 "Broadcom BCM5752 Gigabit Ethernet" }, 196 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752M, 197 "Broadcom BCM5752M Gigabit Ethernet" }, 198 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753, 199 "Broadcom BCM5753 Gigabit Ethernet" }, 200 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753F, 201 "Broadcom BCM5753F Gigabit Ethernet" }, 202 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753M, 203 "Broadcom BCM5753M Gigabit Ethernet" }, 204 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754, 205 "Broadcom BCM5754 Gigabit Ethernet" }, 206 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754M, 207 "Broadcom BCM5754M Gigabit Ethernet" }, 208 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755, 209 "Broadcom BCM5755 Gigabit Ethernet" }, 210 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755M, 211 "Broadcom BCM5755M Gigabit Ethernet" }, 212 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5756, 213 "Broadcom BCM5756 Gigabit Ethernet" }, 214 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761, 215 "Broadcom BCM5761 Gigabit Ethernet" }, 216 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761E, 217 "Broadcom BCM5761E Gigabit Ethernet" }, 218 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761S, 219 "Broadcom BCM5761S Gigabit Ethernet" }, 220 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761SE, 221 "Broadcom BCM5761SE Gigabit Ethernet" }, 222 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5764, 223 "Broadcom BCM5764 Gigabit Ethernet" }, 224 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780, 225 "Broadcom BCM5780 Gigabit Ethernet" }, 226 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780S, 227 "Broadcom BCM5780S Gigabit Ethernet" }, 228 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5781, 229 "Broadcom BCM5781 Gigabit Ethernet" }, 230 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782, 231 "Broadcom BCM5782 Gigabit Ethernet" }, 232 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5784, 233 "Broadcom BCM5784 Gigabit Ethernet" }, 234 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5785F, 235 "Broadcom BCM5785F Gigabit Ethernet" }, 236 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5785G, 237 "Broadcom BCM5785G Gigabit Ethernet" }, 238 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5786, 239 "Broadcom BCM5786 Gigabit Ethernet" }, 240 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787, 241 "Broadcom BCM5787 Gigabit Ethernet" }, 242 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787F, 243 "Broadcom BCM5787F Gigabit Ethernet" }, 244 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787M, 245 "Broadcom BCM5787M Gigabit Ethernet" }, 246 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5788, 247 "Broadcom BCM5788 Gigabit Ethernet" }, 248 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5789, 249 "Broadcom BCM5789 Gigabit Ethernet" }, 250 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901, 251 "Broadcom BCM5901 Fast Ethernet" }, 252 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2, 253 "Broadcom BCM5901A2 Fast Ethernet" }, 254 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5903M, 255 "Broadcom BCM5903M Fast Ethernet" }, 256 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906, 257 "Broadcom BCM5906 Fast Ethernet"}, 258 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906M, 259 "Broadcom BCM5906M Fast Ethernet"}, 260 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57760, 261 "Broadcom BCM57760 Gigabit Ethernet"}, 262 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57780, 263 "Broadcom BCM57780 Gigabit Ethernet"}, 264 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57788, 265 "Broadcom BCM57788 Gigabit Ethernet"}, 266 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57790, 267 "Broadcom BCM57790 Gigabit Ethernet"}, 268 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1, 269 "SysKonnect Gigabit Ethernet" }, 270 271 { 0, 0, NULL } 272 }; 273 274 #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO) 275 #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY) 276 #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS) 277 #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY) 278 #define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS) 279 #define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS) 280 281 typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]); 282 283 static int bge_probe(device_t); 284 static int bge_attach(device_t); 285 static int bge_detach(device_t); 286 static void bge_txeof(struct bge_softc *); 287 static void bge_rxeof(struct bge_softc *); 288 289 static void bge_tick(void *); 290 static void bge_stats_update(struct bge_softc *); 291 static void bge_stats_update_regs(struct bge_softc *); 292 static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *); 293 294 #ifdef DEVICE_POLLING 295 static void bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count); 296 #endif 297 static void bge_intr(void *); 298 static void bge_enable_intr(struct bge_softc *); 299 static void bge_disable_intr(struct bge_softc *); 300 static void bge_start(struct ifnet *); 301 static int bge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 302 static void bge_init(void *); 303 static void bge_stop(struct bge_softc *); 304 static void bge_watchdog(struct ifnet *); 305 static void bge_shutdown(device_t); 306 static int bge_suspend(device_t); 307 static int bge_resume(device_t); 308 static int bge_ifmedia_upd(struct ifnet *); 309 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 310 311 static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *); 312 static int bge_read_nvram(struct bge_softc *, caddr_t, int, int); 313 314 static uint8_t bge_eeprom_getbyte(struct bge_softc *, uint32_t, uint8_t *); 315 static int bge_read_eeprom(struct bge_softc *, caddr_t, uint32_t, size_t); 316 317 static void bge_setmulti(struct bge_softc *); 318 static void bge_setpromisc(struct bge_softc *); 319 320 static int bge_alloc_jumbo_mem(struct bge_softc *); 321 static void bge_free_jumbo_mem(struct bge_softc *); 322 static struct bge_jslot 323 *bge_jalloc(struct bge_softc *); 324 static void bge_jfree(void *); 325 static void bge_jref(void *); 326 static int bge_newbuf_std(struct bge_softc *, int, int); 327 static int bge_newbuf_jumbo(struct bge_softc *, int, int); 328 static void bge_setup_rxdesc_std(struct bge_softc *, int); 329 static void bge_setup_rxdesc_jumbo(struct bge_softc *, int); 330 static int bge_init_rx_ring_std(struct bge_softc *); 331 static void bge_free_rx_ring_std(struct bge_softc *); 332 static int bge_init_rx_ring_jumbo(struct bge_softc *); 333 static void bge_free_rx_ring_jumbo(struct bge_softc *); 334 static void bge_free_tx_ring(struct bge_softc *); 335 static int bge_init_tx_ring(struct bge_softc *); 336 337 static int bge_chipinit(struct bge_softc *); 338 static int bge_blockinit(struct bge_softc *); 339 340 static uint32_t bge_readmem_ind(struct bge_softc *, uint32_t); 341 static void bge_writemem_ind(struct bge_softc *, uint32_t, uint32_t); 342 #ifdef notdef 343 static uint32_t bge_readreg_ind(struct bge_softc *, uint32_t); 344 #endif 345 static void bge_writereg_ind(struct bge_softc *, uint32_t, uint32_t); 346 static void bge_writemem_direct(struct bge_softc *, uint32_t, uint32_t); 347 static void bge_writembx(struct bge_softc *, int, int); 348 349 static int bge_miibus_readreg(device_t, int, int); 350 static int bge_miibus_writereg(device_t, int, int, int); 351 static void bge_miibus_statchg(device_t); 352 static void bge_bcm5700_link_upd(struct bge_softc *, uint32_t); 353 static void bge_tbi_link_upd(struct bge_softc *, uint32_t); 354 static void bge_copper_link_upd(struct bge_softc *, uint32_t); 355 356 static void bge_reset(struct bge_softc *); 357 358 static int bge_dma_alloc(struct bge_softc *); 359 static void bge_dma_free(struct bge_softc *); 360 static int bge_dma_block_alloc(struct bge_softc *, bus_size_t, 361 bus_dma_tag_t *, bus_dmamap_t *, 362 void **, bus_addr_t *); 363 static void bge_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *); 364 365 static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]); 366 static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]); 367 static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]); 368 static int bge_get_eaddr(struct bge_softc *, uint8_t[]); 369 370 static void bge_coal_change(struct bge_softc *); 371 static int bge_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS); 372 static int bge_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS); 373 static int bge_sysctl_rx_max_coal_bds(SYSCTL_HANDLER_ARGS); 374 static int bge_sysctl_tx_max_coal_bds(SYSCTL_HANDLER_ARGS); 375 static int bge_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *, uint32_t); 376 377 /* 378 * Set following tunable to 1 for some IBM blade servers with the DNLK 379 * switch module. Auto negotiation is broken for those configurations. 380 */ 381 static int bge_fake_autoneg = 0; 382 TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg); 383 384 /* Interrupt moderation control variables. */ 385 static int bge_rx_coal_ticks = 100; /* usec */ 386 static int bge_tx_coal_ticks = 1023; /* usec */ 387 static int bge_rx_max_coal_bds = 80; 388 static int bge_tx_max_coal_bds = 128; 389 390 TUNABLE_INT("hw.bge.rx_coal_ticks", &bge_rx_coal_ticks); 391 TUNABLE_INT("hw.bge.tx_coal_ticks", &bge_tx_coal_ticks); 392 TUNABLE_INT("hw.bge.rx_max_coal_bds", &bge_rx_max_coal_bds); 393 TUNABLE_INT("hw.bge.tx_max_coal_bds", &bge_tx_max_coal_bds); 394 395 #if !defined(KTR_IF_BGE) 396 #define KTR_IF_BGE KTR_ALL 397 #endif 398 KTR_INFO_MASTER(if_bge); 399 KTR_INFO(KTR_IF_BGE, if_bge, intr, 0, "intr", 0); 400 KTR_INFO(KTR_IF_BGE, if_bge, rx_pkt, 1, "rx_pkt", 0); 401 KTR_INFO(KTR_IF_BGE, if_bge, tx_pkt, 2, "tx_pkt", 0); 402 #define logif(name) KTR_LOG(if_bge_ ## name) 403 404 static device_method_t bge_methods[] = { 405 /* Device interface */ 406 DEVMETHOD(device_probe, bge_probe), 407 DEVMETHOD(device_attach, bge_attach), 408 DEVMETHOD(device_detach, bge_detach), 409 DEVMETHOD(device_shutdown, bge_shutdown), 410 DEVMETHOD(device_suspend, bge_suspend), 411 DEVMETHOD(device_resume, bge_resume), 412 413 /* bus interface */ 414 DEVMETHOD(bus_print_child, bus_generic_print_child), 415 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 416 417 /* MII interface */ 418 DEVMETHOD(miibus_readreg, bge_miibus_readreg), 419 DEVMETHOD(miibus_writereg, bge_miibus_writereg), 420 DEVMETHOD(miibus_statchg, bge_miibus_statchg), 421 422 { 0, 0 } 423 }; 424 425 static DEFINE_CLASS_0(bge, bge_driver, bge_methods, sizeof(struct bge_softc)); 426 static devclass_t bge_devclass; 427 428 DECLARE_DUMMY_MODULE(if_bge); 429 DRIVER_MODULE(if_bge, pci, bge_driver, bge_devclass, NULL, NULL); 430 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, NULL, NULL); 431 432 static uint32_t 433 bge_readmem_ind(struct bge_softc *sc, uint32_t off) 434 { 435 device_t dev = sc->bge_dev; 436 uint32_t val; 437 438 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 439 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4); 440 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); 441 return (val); 442 } 443 444 static void 445 bge_writemem_ind(struct bge_softc *sc, uint32_t off, uint32_t val) 446 { 447 device_t dev = sc->bge_dev; 448 449 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 450 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4); 451 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); 452 } 453 454 #ifdef notdef 455 static uint32_t 456 bge_readreg_ind(struct bge_softc *sc, uin32_t off) 457 { 458 device_t dev = sc->bge_dev; 459 460 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 461 return(pci_read_config(dev, BGE_PCI_REG_DATA, 4)); 462 } 463 #endif 464 465 static void 466 bge_writereg_ind(struct bge_softc *sc, uint32_t off, uint32_t val) 467 { 468 device_t dev = sc->bge_dev; 469 470 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 471 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4); 472 } 473 474 static void 475 bge_writemem_direct(struct bge_softc *sc, uint32_t off, uint32_t val) 476 { 477 CSR_WRITE_4(sc, off, val); 478 } 479 480 static void 481 bge_writembx(struct bge_softc *sc, int off, int val) 482 { 483 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) 484 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI; 485 486 CSR_WRITE_4(sc, off, val); 487 } 488 489 static uint8_t 490 bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) 491 { 492 uint32_t access, byte = 0; 493 int i; 494 495 /* Lock. */ 496 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); 497 for (i = 0; i < 8000; i++) { 498 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1) 499 break; 500 DELAY(20); 501 } 502 if (i == 8000) 503 return (1); 504 505 /* Enable access. */ 506 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS); 507 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE); 508 509 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc); 510 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD); 511 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 512 DELAY(10); 513 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) { 514 DELAY(10); 515 break; 516 } 517 } 518 519 if (i == BGE_TIMEOUT * 10) { 520 if_printf(&sc->arpcom.ac_if, "nvram read timed out\n"); 521 return (1); 522 } 523 524 /* Get result. */ 525 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA); 526 527 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF; 528 529 /* Disable access. */ 530 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access); 531 532 /* Unlock. */ 533 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1); 534 CSR_READ_4(sc, BGE_NVRAM_SWARB); 535 536 return (0); 537 } 538 539 /* 540 * Read a sequence of bytes from NVRAM. 541 */ 542 static int 543 bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt) 544 { 545 int err = 0, i; 546 uint8_t byte = 0; 547 548 if (sc->bge_asicrev != BGE_ASICREV_BCM5906) 549 return (1); 550 551 for (i = 0; i < cnt; i++) { 552 err = bge_nvram_getbyte(sc, off + i, &byte); 553 if (err) 554 break; 555 *(dest + i) = byte; 556 } 557 558 return (err ? 1 : 0); 559 } 560 561 /* 562 * Read a byte of data stored in the EEPROM at address 'addr.' The 563 * BCM570x supports both the traditional bitbang interface and an 564 * auto access interface for reading the EEPROM. We use the auto 565 * access method. 566 */ 567 static uint8_t 568 bge_eeprom_getbyte(struct bge_softc *sc, uint32_t addr, uint8_t *dest) 569 { 570 int i; 571 uint32_t byte = 0; 572 573 /* 574 * Enable use of auto EEPROM access so we can avoid 575 * having to use the bitbang method. 576 */ 577 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 578 579 /* Reset the EEPROM, load the clock period. */ 580 CSR_WRITE_4(sc, BGE_EE_ADDR, 581 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 582 DELAY(20); 583 584 /* Issue the read EEPROM command. */ 585 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 586 587 /* Wait for completion */ 588 for(i = 0; i < BGE_TIMEOUT * 10; i++) { 589 DELAY(10); 590 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 591 break; 592 } 593 594 if (i == BGE_TIMEOUT) { 595 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n"); 596 return(1); 597 } 598 599 /* Get result. */ 600 byte = CSR_READ_4(sc, BGE_EE_DATA); 601 602 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 603 604 return(0); 605 } 606 607 /* 608 * Read a sequence of bytes from the EEPROM. 609 */ 610 static int 611 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, uint32_t off, size_t len) 612 { 613 size_t i; 614 int err; 615 uint8_t byte; 616 617 for (byte = 0, err = 0, i = 0; i < len; i++) { 618 err = bge_eeprom_getbyte(sc, off + i, &byte); 619 if (err) 620 break; 621 *(dest + i) = byte; 622 } 623 624 return(err ? 1 : 0); 625 } 626 627 static int 628 bge_miibus_readreg(device_t dev, int phy, int reg) 629 { 630 struct bge_softc *sc = device_get_softc(dev); 631 struct ifnet *ifp = &sc->arpcom.ac_if; 632 uint32_t val, autopoll; 633 int i; 634 635 /* 636 * Broadcom's own driver always assumes the internal 637 * PHY is at GMII address 1. On some chips, the PHY responds 638 * to accesses at all addresses, which could cause us to 639 * bogusly attach the PHY 32 times at probe type. Always 640 * restricting the lookup to address 1 is simpler than 641 * trying to figure out which chips revisions should be 642 * special-cased. 643 */ 644 if (phy != 1) 645 return(0); 646 647 /* Reading with autopolling on may trigger PCI errors */ 648 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 649 if (autopoll & BGE_MIMODE_AUTOPOLL) { 650 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 651 DELAY(40); 652 } 653 654 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY| 655 BGE_MIPHY(phy)|BGE_MIREG(reg)); 656 657 for (i = 0; i < BGE_TIMEOUT; i++) { 658 DELAY(10); 659 val = CSR_READ_4(sc, BGE_MI_COMM); 660 if (!(val & BGE_MICOMM_BUSY)) 661 break; 662 } 663 664 if (i == BGE_TIMEOUT) { 665 if_printf(ifp, "PHY read timed out " 666 "(phy %d, reg %d, val 0x%08x)\n", phy, reg, val); 667 val = 0; 668 goto done; 669 } 670 671 DELAY(5); 672 val = CSR_READ_4(sc, BGE_MI_COMM); 673 674 done: 675 if (autopoll & BGE_MIMODE_AUTOPOLL) { 676 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 677 DELAY(40); 678 } 679 680 if (val & BGE_MICOMM_READFAIL) 681 return(0); 682 683 return(val & 0xFFFF); 684 } 685 686 static int 687 bge_miibus_writereg(device_t dev, int phy, int reg, int val) 688 { 689 struct bge_softc *sc = device_get_softc(dev); 690 uint32_t autopoll; 691 int i; 692 693 /* 694 * See the related comment in bge_miibus_readreg() 695 */ 696 if (phy != 1) 697 return(0); 698 699 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 && 700 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL)) 701 return(0); 702 703 /* Reading with autopolling on may trigger PCI errors */ 704 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 705 if (autopoll & BGE_MIMODE_AUTOPOLL) { 706 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 707 DELAY(40); 708 } 709 710 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY| 711 BGE_MIPHY(phy)|BGE_MIREG(reg)|val); 712 713 for (i = 0; i < BGE_TIMEOUT; i++) { 714 DELAY(10); 715 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) { 716 DELAY(5); 717 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */ 718 break; 719 } 720 } 721 722 if (autopoll & BGE_MIMODE_AUTOPOLL) { 723 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 724 DELAY(40); 725 } 726 727 if (i == BGE_TIMEOUT) { 728 if_printf(&sc->arpcom.ac_if, "PHY write timed out " 729 "(phy %d, reg %d, val %d)\n", phy, reg, val); 730 return(0); 731 } 732 733 return(0); 734 } 735 736 static void 737 bge_miibus_statchg(device_t dev) 738 { 739 struct bge_softc *sc; 740 struct mii_data *mii; 741 742 sc = device_get_softc(dev); 743 mii = device_get_softc(sc->bge_miibus); 744 745 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); 746 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 747 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); 748 } else { 749 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); 750 } 751 752 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 753 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 754 } else { 755 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 756 } 757 } 758 759 /* 760 * Memory management for jumbo frames. 761 */ 762 static int 763 bge_alloc_jumbo_mem(struct bge_softc *sc) 764 { 765 struct ifnet *ifp = &sc->arpcom.ac_if; 766 struct bge_jslot *entry; 767 uint8_t *ptr; 768 bus_addr_t paddr; 769 int i, error; 770 771 /* 772 * Create tag for jumbo mbufs. 773 * This is really a bit of a kludge. We allocate a special 774 * jumbo buffer pool which (thanks to the way our DMA 775 * memory allocation works) will consist of contiguous 776 * pages. This means that even though a jumbo buffer might 777 * be larger than a page size, we don't really need to 778 * map it into more than one DMA segment. However, the 779 * default mbuf tag will result in multi-segment mappings, 780 * so we have to create a special jumbo mbuf tag that 781 * lets us get away with mapping the jumbo buffers as 782 * a single segment. I think eventually the driver should 783 * be changed so that it uses ordinary mbufs and cluster 784 * buffers, i.e. jumbo frames can span multiple DMA 785 * descriptors. But that's a project for another day. 786 */ 787 788 /* 789 * Create DMA stuffs for jumbo RX ring. 790 */ 791 error = bge_dma_block_alloc(sc, BGE_JUMBO_RX_RING_SZ, 792 &sc->bge_cdata.bge_rx_jumbo_ring_tag, 793 &sc->bge_cdata.bge_rx_jumbo_ring_map, 794 (void *)&sc->bge_ldata.bge_rx_jumbo_ring, 795 &sc->bge_ldata.bge_rx_jumbo_ring_paddr); 796 if (error) { 797 if_printf(ifp, "could not create jumbo RX ring\n"); 798 return error; 799 } 800 801 /* 802 * Create DMA stuffs for jumbo buffer block. 803 */ 804 error = bge_dma_block_alloc(sc, BGE_JMEM, 805 &sc->bge_cdata.bge_jumbo_tag, 806 &sc->bge_cdata.bge_jumbo_map, 807 (void **)&sc->bge_ldata.bge_jumbo_buf, 808 &paddr); 809 if (error) { 810 if_printf(ifp, "could not create jumbo buffer\n"); 811 return error; 812 } 813 814 SLIST_INIT(&sc->bge_jfree_listhead); 815 816 /* 817 * Now divide it up into 9K pieces and save the addresses 818 * in an array. Note that we play an evil trick here by using 819 * the first few bytes in the buffer to hold the the address 820 * of the softc structure for this interface. This is because 821 * bge_jfree() needs it, but it is called by the mbuf management 822 * code which will not pass it to us explicitly. 823 */ 824 for (i = 0, ptr = sc->bge_ldata.bge_jumbo_buf; i < BGE_JSLOTS; i++) { 825 entry = &sc->bge_cdata.bge_jslots[i]; 826 entry->bge_sc = sc; 827 entry->bge_buf = ptr; 828 entry->bge_paddr = paddr; 829 entry->bge_inuse = 0; 830 entry->bge_slot = i; 831 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jslot_link); 832 833 ptr += BGE_JLEN; 834 paddr += BGE_JLEN; 835 } 836 return 0; 837 } 838 839 static void 840 bge_free_jumbo_mem(struct bge_softc *sc) 841 { 842 /* Destroy jumbo RX ring. */ 843 bge_dma_block_free(sc->bge_cdata.bge_rx_jumbo_ring_tag, 844 sc->bge_cdata.bge_rx_jumbo_ring_map, 845 sc->bge_ldata.bge_rx_jumbo_ring); 846 847 /* Destroy jumbo buffer block. */ 848 bge_dma_block_free(sc->bge_cdata.bge_jumbo_tag, 849 sc->bge_cdata.bge_jumbo_map, 850 sc->bge_ldata.bge_jumbo_buf); 851 } 852 853 /* 854 * Allocate a jumbo buffer. 855 */ 856 static struct bge_jslot * 857 bge_jalloc(struct bge_softc *sc) 858 { 859 struct bge_jslot *entry; 860 861 lwkt_serialize_enter(&sc->bge_jslot_serializer); 862 entry = SLIST_FIRST(&sc->bge_jfree_listhead); 863 if (entry) { 864 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jslot_link); 865 entry->bge_inuse = 1; 866 } else { 867 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n"); 868 } 869 lwkt_serialize_exit(&sc->bge_jslot_serializer); 870 return(entry); 871 } 872 873 /* 874 * Adjust usage count on a jumbo buffer. 875 */ 876 static void 877 bge_jref(void *arg) 878 { 879 struct bge_jslot *entry = (struct bge_jslot *)arg; 880 struct bge_softc *sc = entry->bge_sc; 881 882 if (sc == NULL) 883 panic("bge_jref: can't find softc pointer!"); 884 885 if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) { 886 panic("bge_jref: asked to reference buffer " 887 "that we don't manage!"); 888 } else if (entry->bge_inuse == 0) { 889 panic("bge_jref: buffer already free!"); 890 } else { 891 atomic_add_int(&entry->bge_inuse, 1); 892 } 893 } 894 895 /* 896 * Release a jumbo buffer. 897 */ 898 static void 899 bge_jfree(void *arg) 900 { 901 struct bge_jslot *entry = (struct bge_jslot *)arg; 902 struct bge_softc *sc = entry->bge_sc; 903 904 if (sc == NULL) 905 panic("bge_jfree: can't find softc pointer!"); 906 907 if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) { 908 panic("bge_jfree: asked to free buffer that we don't manage!"); 909 } else if (entry->bge_inuse == 0) { 910 panic("bge_jfree: buffer already free!"); 911 } else { 912 /* 913 * Possible MP race to 0, use the serializer. The atomic insn 914 * is still needed for races against bge_jref(). 915 */ 916 lwkt_serialize_enter(&sc->bge_jslot_serializer); 917 atomic_subtract_int(&entry->bge_inuse, 1); 918 if (entry->bge_inuse == 0) { 919 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, 920 entry, jslot_link); 921 } 922 lwkt_serialize_exit(&sc->bge_jslot_serializer); 923 } 924 } 925 926 927 /* 928 * Intialize a standard receive ring descriptor. 929 */ 930 static int 931 bge_newbuf_std(struct bge_softc *sc, int i, int init) 932 { 933 struct mbuf *m_new = NULL; 934 bus_dma_segment_t seg; 935 bus_dmamap_t map; 936 int error, nsegs; 937 938 m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 939 if (m_new == NULL) 940 return ENOBUFS; 941 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 942 943 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) 944 m_adj(m_new, ETHER_ALIGN); 945 946 error = bus_dmamap_load_mbuf_segment(sc->bge_cdata.bge_rx_mtag, 947 sc->bge_cdata.bge_rx_tmpmap, m_new, 948 &seg, 1, &nsegs, BUS_DMA_NOWAIT); 949 if (error) { 950 m_freem(m_new); 951 return error; 952 } 953 954 if (!init) { 955 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag, 956 sc->bge_cdata.bge_rx_std_dmamap[i], 957 BUS_DMASYNC_POSTREAD); 958 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag, 959 sc->bge_cdata.bge_rx_std_dmamap[i]); 960 } 961 962 map = sc->bge_cdata.bge_rx_tmpmap; 963 sc->bge_cdata.bge_rx_tmpmap = sc->bge_cdata.bge_rx_std_dmamap[i]; 964 sc->bge_cdata.bge_rx_std_dmamap[i] = map; 965 966 sc->bge_cdata.bge_rx_std_chain[i].bge_mbuf = m_new; 967 sc->bge_cdata.bge_rx_std_chain[i].bge_paddr = seg.ds_addr; 968 969 bge_setup_rxdesc_std(sc, i); 970 return 0; 971 } 972 973 static void 974 bge_setup_rxdesc_std(struct bge_softc *sc, int i) 975 { 976 struct bge_rxchain *rc; 977 struct bge_rx_bd *r; 978 979 rc = &sc->bge_cdata.bge_rx_std_chain[i]; 980 r = &sc->bge_ldata.bge_rx_std_ring[i]; 981 982 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bge_paddr); 983 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bge_paddr); 984 r->bge_len = rc->bge_mbuf->m_len; 985 r->bge_idx = i; 986 r->bge_flags = BGE_RXBDFLAG_END; 987 } 988 989 /* 990 * Initialize a jumbo receive ring descriptor. This allocates 991 * a jumbo buffer from the pool managed internally by the driver. 992 */ 993 static int 994 bge_newbuf_jumbo(struct bge_softc *sc, int i, int init) 995 { 996 struct mbuf *m_new = NULL; 997 struct bge_jslot *buf; 998 bus_addr_t paddr; 999 1000 /* Allocate the mbuf. */ 1001 MGETHDR(m_new, init ? MB_WAIT : MB_DONTWAIT, MT_DATA); 1002 if (m_new == NULL) 1003 return ENOBUFS; 1004 1005 /* Allocate the jumbo buffer */ 1006 buf = bge_jalloc(sc); 1007 if (buf == NULL) { 1008 m_freem(m_new); 1009 return ENOBUFS; 1010 } 1011 1012 /* Attach the buffer to the mbuf. */ 1013 m_new->m_ext.ext_arg = buf; 1014 m_new->m_ext.ext_buf = buf->bge_buf; 1015 m_new->m_ext.ext_free = bge_jfree; 1016 m_new->m_ext.ext_ref = bge_jref; 1017 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN; 1018 1019 m_new->m_flags |= M_EXT; 1020 1021 m_new->m_data = m_new->m_ext.ext_buf; 1022 m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size; 1023 1024 paddr = buf->bge_paddr; 1025 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) { 1026 m_adj(m_new, ETHER_ALIGN); 1027 paddr += ETHER_ALIGN; 1028 } 1029 1030 /* Save necessary information */ 1031 sc->bge_cdata.bge_rx_jumbo_chain[i].bge_mbuf = m_new; 1032 sc->bge_cdata.bge_rx_jumbo_chain[i].bge_paddr = paddr; 1033 1034 /* Set up the descriptor. */ 1035 bge_setup_rxdesc_jumbo(sc, i); 1036 return 0; 1037 } 1038 1039 static void 1040 bge_setup_rxdesc_jumbo(struct bge_softc *sc, int i) 1041 { 1042 struct bge_rx_bd *r; 1043 struct bge_rxchain *rc; 1044 1045 r = &sc->bge_ldata.bge_rx_jumbo_ring[i]; 1046 rc = &sc->bge_cdata.bge_rx_jumbo_chain[i]; 1047 1048 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bge_paddr); 1049 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bge_paddr); 1050 r->bge_len = rc->bge_mbuf->m_len; 1051 r->bge_idx = i; 1052 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING; 1053 } 1054 1055 static int 1056 bge_init_rx_ring_std(struct bge_softc *sc) 1057 { 1058 int i, error; 1059 1060 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1061 error = bge_newbuf_std(sc, i, 1); 1062 if (error) 1063 return error; 1064 }; 1065 1066 sc->bge_std = BGE_STD_RX_RING_CNT - 1; 1067 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 1068 1069 return(0); 1070 } 1071 1072 static void 1073 bge_free_rx_ring_std(struct bge_softc *sc) 1074 { 1075 int i; 1076 1077 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1078 struct bge_rxchain *rc = &sc->bge_cdata.bge_rx_std_chain[i]; 1079 1080 if (rc->bge_mbuf != NULL) { 1081 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag, 1082 sc->bge_cdata.bge_rx_std_dmamap[i]); 1083 m_freem(rc->bge_mbuf); 1084 rc->bge_mbuf = NULL; 1085 } 1086 bzero(&sc->bge_ldata.bge_rx_std_ring[i], 1087 sizeof(struct bge_rx_bd)); 1088 } 1089 } 1090 1091 static int 1092 bge_init_rx_ring_jumbo(struct bge_softc *sc) 1093 { 1094 struct bge_rcb *rcb; 1095 int i, error; 1096 1097 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1098 error = bge_newbuf_jumbo(sc, i, 1); 1099 if (error) 1100 return error; 1101 }; 1102 1103 sc->bge_jumbo = BGE_JUMBO_RX_RING_CNT - 1; 1104 1105 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; 1106 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0); 1107 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1108 1109 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 1110 1111 return(0); 1112 } 1113 1114 static void 1115 bge_free_rx_ring_jumbo(struct bge_softc *sc) 1116 { 1117 int i; 1118 1119 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1120 struct bge_rxchain *rc = &sc->bge_cdata.bge_rx_jumbo_chain[i]; 1121 1122 if (rc->bge_mbuf != NULL) { 1123 m_freem(rc->bge_mbuf); 1124 rc->bge_mbuf = NULL; 1125 } 1126 bzero(&sc->bge_ldata.bge_rx_jumbo_ring[i], 1127 sizeof(struct bge_rx_bd)); 1128 } 1129 } 1130 1131 static void 1132 bge_free_tx_ring(struct bge_softc *sc) 1133 { 1134 int i; 1135 1136 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1137 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 1138 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, 1139 sc->bge_cdata.bge_tx_dmamap[i]); 1140 m_freem(sc->bge_cdata.bge_tx_chain[i]); 1141 sc->bge_cdata.bge_tx_chain[i] = NULL; 1142 } 1143 bzero(&sc->bge_ldata.bge_tx_ring[i], 1144 sizeof(struct bge_tx_bd)); 1145 } 1146 } 1147 1148 static int 1149 bge_init_tx_ring(struct bge_softc *sc) 1150 { 1151 sc->bge_txcnt = 0; 1152 sc->bge_tx_saved_considx = 0; 1153 sc->bge_tx_prodidx = 0; 1154 1155 /* Initialize transmit producer index for host-memory send ring. */ 1156 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1157 1158 /* 5700 b2 errata */ 1159 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 1160 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1161 1162 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1163 /* 5700 b2 errata */ 1164 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 1165 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1166 1167 return(0); 1168 } 1169 1170 static void 1171 bge_setmulti(struct bge_softc *sc) 1172 { 1173 struct ifnet *ifp; 1174 struct ifmultiaddr *ifma; 1175 uint32_t hashes[4] = { 0, 0, 0, 0 }; 1176 int h, i; 1177 1178 ifp = &sc->arpcom.ac_if; 1179 1180 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 1181 for (i = 0; i < 4; i++) 1182 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF); 1183 return; 1184 } 1185 1186 /* First, zot all the existing filters. */ 1187 for (i = 0; i < 4; i++) 1188 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0); 1189 1190 /* Now program new ones. */ 1191 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1192 if (ifma->ifma_addr->sa_family != AF_LINK) 1193 continue; 1194 h = ether_crc32_le( 1195 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1196 ETHER_ADDR_LEN) & 0x7f; 1197 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 1198 } 1199 1200 for (i = 0; i < 4; i++) 1201 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 1202 } 1203 1204 /* 1205 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1206 * self-test results. 1207 */ 1208 static int 1209 bge_chipinit(struct bge_softc *sc) 1210 { 1211 int i; 1212 uint32_t dma_rw_ctl; 1213 1214 /* Set endian type before we access any non-PCI registers. */ 1215 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4); 1216 1217 /* Clear the MAC control register */ 1218 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1219 1220 /* 1221 * Clear the MAC statistics block in the NIC's 1222 * internal memory. 1223 */ 1224 for (i = BGE_STATS_BLOCK; 1225 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t)) 1226 BGE_MEMWIN_WRITE(sc, i, 0); 1227 1228 for (i = BGE_STATUS_BLOCK; 1229 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t)) 1230 BGE_MEMWIN_WRITE(sc, i, 0); 1231 1232 /* Set up the PCI DMA control register. */ 1233 if (sc->bge_flags & BGE_FLAG_PCIE) { 1234 /* PCI Express */ 1235 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1236 (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1237 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1238 } else if (sc->bge_flags & BGE_FLAG_PCIX) { 1239 /* PCI-X bus */ 1240 if (BGE_IS_5714_FAMILY(sc)) { 1241 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD; 1242 dma_rw_ctl &= ~BGE_PCIDMARWCTL_ONEDMA_ATONCE; /* XXX */ 1243 /* XXX magic values, Broadcom-supplied Linux driver */ 1244 if (sc->bge_asicrev == BGE_ASICREV_BCM5780) { 1245 dma_rw_ctl |= (1 << 20) | (1 << 18) | 1246 BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1247 } else { 1248 dma_rw_ctl |= (1 << 20) | (1 << 18) | (1 << 15); 1249 } 1250 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { 1251 /* 1252 * The 5704 uses a different encoding of read/write 1253 * watermarks. 1254 */ 1255 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1256 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1257 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1258 } else { 1259 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1260 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1261 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1262 (0x0F); 1263 } 1264 1265 /* 1266 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround 1267 * for hardware bugs. 1268 */ 1269 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || 1270 sc->bge_asicrev == BGE_ASICREV_BCM5704) { 1271 uint32_t tmp; 1272 1273 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f; 1274 if (tmp == 0x6 || tmp == 0x7) 1275 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1276 } 1277 } else { 1278 /* Conventional PCI bus */ 1279 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1280 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1281 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1282 (0x0F); 1283 } 1284 1285 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || 1286 sc->bge_asicrev == BGE_ASICREV_BCM5704 || 1287 sc->bge_asicrev == BGE_ASICREV_BCM5705) 1288 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA; 1289 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4); 1290 1291 /* 1292 * Set up general mode register. 1293 */ 1294 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS| 1295 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS| 1296 BGE_MODECTL_TX_NO_PHDR_CSUM); 1297 1298 /* 1299 * Disable memory write invalidate. Apparently it is not supported 1300 * properly by these devices. 1301 */ 1302 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4); 1303 1304 /* Set the timer prescaler (always 66Mhz) */ 1305 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); 1306 1307 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { 1308 DELAY(40); /* XXX */ 1309 1310 /* Put PHY into ready state */ 1311 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ); 1312 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */ 1313 DELAY(40); 1314 } 1315 1316 return(0); 1317 } 1318 1319 static int 1320 bge_blockinit(struct bge_softc *sc) 1321 { 1322 struct bge_rcb *rcb; 1323 bus_size_t vrcb; 1324 bge_hostaddr taddr; 1325 uint32_t val; 1326 int i; 1327 1328 /* 1329 * Initialize the memory window pointer register so that 1330 * we can access the first 32K of internal NIC RAM. This will 1331 * allow us to set up the TX send ring RCBs and the RX return 1332 * ring RCBs, plus other things which live in NIC memory. 1333 */ 1334 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); 1335 1336 /* Note: the BCM5704 has a smaller mbuf space than other chips. */ 1337 1338 if (!BGE_IS_5705_PLUS(sc)) { 1339 /* Configure mbuf memory pool */ 1340 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1); 1341 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 1342 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1343 else 1344 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1345 1346 /* Configure DMA resource pool */ 1347 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, 1348 BGE_DMA_DESCRIPTORS); 1349 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 1350 } 1351 1352 /* Configure mbuf pool watermarks */ 1353 if (!BGE_IS_5705_PLUS(sc)) { 1354 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 1355 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 1356 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1357 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { 1358 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1359 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04); 1360 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10); 1361 } else { 1362 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1363 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 1364 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1365 } 1366 1367 /* Configure DMA resource watermarks */ 1368 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1369 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1370 1371 /* Enable buffer manager */ 1372 if (!BGE_IS_5705_PLUS(sc)) { 1373 CSR_WRITE_4(sc, BGE_BMAN_MODE, 1374 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN); 1375 1376 /* Poll for buffer manager start indication */ 1377 for (i = 0; i < BGE_TIMEOUT; i++) { 1378 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1379 break; 1380 DELAY(10); 1381 } 1382 1383 if (i == BGE_TIMEOUT) { 1384 if_printf(&sc->arpcom.ac_if, 1385 "buffer manager failed to start\n"); 1386 return(ENXIO); 1387 } 1388 } 1389 1390 /* Enable flow-through queues */ 1391 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 1392 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 1393 1394 /* Wait until queue initialization is complete */ 1395 for (i = 0; i < BGE_TIMEOUT; i++) { 1396 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 1397 break; 1398 DELAY(10); 1399 } 1400 1401 if (i == BGE_TIMEOUT) { 1402 if_printf(&sc->arpcom.ac_if, 1403 "flow-through queue init failed\n"); 1404 return(ENXIO); 1405 } 1406 1407 /* Initialize the standard RX ring control block */ 1408 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb; 1409 rcb->bge_hostaddr.bge_addr_lo = 1410 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr); 1411 rcb->bge_hostaddr.bge_addr_hi = 1412 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr); 1413 if (BGE_IS_5705_PLUS(sc)) 1414 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 1415 else 1416 rcb->bge_maxlen_flags = 1417 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); 1418 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 1419 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 1420 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 1421 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1422 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 1423 1424 /* 1425 * Initialize the jumbo RX ring control block 1426 * We set the 'ring disabled' bit in the flags 1427 * field until we're actually ready to start 1428 * using this ring (i.e. once we set the MTU 1429 * high enough to require it). 1430 */ 1431 if (BGE_IS_JUMBO_CAPABLE(sc)) { 1432 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; 1433 1434 rcb->bge_hostaddr.bge_addr_lo = 1435 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr); 1436 rcb->bge_hostaddr.bge_addr_hi = 1437 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr); 1438 rcb->bge_maxlen_flags = 1439 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 1440 BGE_RCB_FLAG_RING_DISABLED); 1441 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 1442 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 1443 rcb->bge_hostaddr.bge_addr_hi); 1444 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 1445 rcb->bge_hostaddr.bge_addr_lo); 1446 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 1447 rcb->bge_maxlen_flags); 1448 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 1449 1450 /* Set up dummy disabled mini ring RCB */ 1451 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb; 1452 rcb->bge_maxlen_flags = 1453 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED); 1454 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, 1455 rcb->bge_maxlen_flags); 1456 } 1457 1458 /* 1459 * Set the BD ring replentish thresholds. The recommended 1460 * values are 1/8th the number of descriptors allocated to 1461 * each ring. 1462 */ 1463 if (BGE_IS_5705_PLUS(sc)) 1464 val = 8; 1465 else 1466 val = BGE_STD_RX_RING_CNT / 8; 1467 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val); 1468 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); 1469 1470 /* 1471 * Disable all unused send rings by setting the 'ring disabled' 1472 * bit in the flags field of all the TX send ring control blocks. 1473 * These are located in NIC memory. 1474 */ 1475 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1476 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { 1477 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1478 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); 1479 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); 1480 vrcb += sizeof(struct bge_rcb); 1481 } 1482 1483 /* Configure TX RCB 0 (we use only the first ring) */ 1484 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1485 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr); 1486 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1487 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1488 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 1489 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 1490 if (!BGE_IS_5705_PLUS(sc)) { 1491 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1492 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 1493 } 1494 1495 /* Disable all unused RX return rings */ 1496 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1497 for (i = 0; i < BGE_RX_RINGS_MAX; i++) { 1498 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0); 1499 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0); 1500 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1501 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 1502 BGE_RCB_FLAG_RING_DISABLED)); 1503 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); 1504 bge_writembx(sc, BGE_MBX_RX_CONS0_LO + 1505 (i * (sizeof(uint64_t))), 0); 1506 vrcb += sizeof(struct bge_rcb); 1507 } 1508 1509 /* Initialize RX ring indexes */ 1510 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0); 1511 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 1512 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 1513 1514 /* 1515 * Set up RX return ring 0 1516 * Note that the NIC address for RX return rings is 0x00000000. 1517 * The return rings live entirely within the host, so the 1518 * nicaddr field in the RCB isn't used. 1519 */ 1520 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1521 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr); 1522 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1523 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1524 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000); 1525 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1526 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); 1527 1528 /* Set random backoff seed for TX */ 1529 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 1530 sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] + 1531 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] + 1532 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] + 1533 BGE_TX_BACKOFF_SEED_MASK); 1534 1535 /* Set inter-packet gap */ 1536 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); 1537 1538 /* 1539 * Specify which ring to use for packets that don't match 1540 * any RX rules. 1541 */ 1542 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 1543 1544 /* 1545 * Configure number of RX lists. One interrupt distribution 1546 * list, sixteen active lists, one bad frames class. 1547 */ 1548 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 1549 1550 /* Inialize RX list placement stats mask. */ 1551 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 1552 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 1553 1554 /* Disable host coalescing until we get it set up */ 1555 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 1556 1557 /* Poll to make sure it's shut down. */ 1558 for (i = 0; i < BGE_TIMEOUT; i++) { 1559 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 1560 break; 1561 DELAY(10); 1562 } 1563 1564 if (i == BGE_TIMEOUT) { 1565 if_printf(&sc->arpcom.ac_if, 1566 "host coalescing engine failed to idle\n"); 1567 return(ENXIO); 1568 } 1569 1570 /* Set up host coalescing defaults */ 1571 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 1572 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 1573 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 1574 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 1575 if (!BGE_IS_5705_PLUS(sc)) { 1576 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 1577 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 1578 } 1579 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1); 1580 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1); 1581 1582 /* Set up address of statistics block */ 1583 if (!BGE_IS_5705_PLUS(sc)) { 1584 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 1585 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr)); 1586 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, 1587 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr)); 1588 1589 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 1590 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 1591 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 1592 } 1593 1594 /* Set up address of status block */ 1595 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 1596 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr)); 1597 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, 1598 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr)); 1599 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0; 1600 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0; 1601 1602 /* Turn on host coalescing state machine */ 1603 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 1604 1605 /* Turn on RX BD completion state machine and enable attentions */ 1606 CSR_WRITE_4(sc, BGE_RBDC_MODE, 1607 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); 1608 1609 /* Turn on RX list placement state machine */ 1610 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 1611 1612 /* Turn on RX list selector state machine. */ 1613 if (!BGE_IS_5705_PLUS(sc)) 1614 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 1615 1616 /* Turn on DMA, clear stats */ 1617 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB| 1618 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR| 1619 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB| 1620 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB| 1621 ((sc->bge_flags & BGE_FLAG_TBI) ? 1622 BGE_PORTMODE_TBI : BGE_PORTMODE_MII)); 1623 1624 /* Set misc. local control, enable interrupts on attentions */ 1625 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); 1626 1627 #ifdef notdef 1628 /* Assert GPIO pins for PHY reset */ 1629 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 1630 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 1631 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 1632 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 1633 #endif 1634 1635 /* Turn on DMA completion state machine */ 1636 if (!BGE_IS_5705_PLUS(sc)) 1637 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 1638 1639 /* Turn on write DMA state machine */ 1640 val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS; 1641 if (BGE_IS_5755_PLUS(sc)) 1642 val |= (1 << 29); /* Enable host coalescing bug fix. */ 1643 CSR_WRITE_4(sc, BGE_WDMA_MODE, val); 1644 DELAY(40); 1645 1646 /* Turn on read DMA state machine */ 1647 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS; 1648 if (sc->bge_asicrev == BGE_ASICREV_BCM5784 || 1649 sc->bge_asicrev == BGE_ASICREV_BCM5785 || 1650 sc->bge_asicrev == BGE_ASICREV_BCM57780) 1651 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN | 1652 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN | 1653 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN; 1654 if (sc->bge_flags & BGE_FLAG_PCIE) 1655 val |= BGE_RDMAMODE_FIFO_LONG_BURST; 1656 CSR_WRITE_4(sc, BGE_RDMA_MODE, val); 1657 DELAY(40); 1658 1659 /* Turn on RX data completion state machine */ 1660 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 1661 1662 /* Turn on RX BD initiator state machine */ 1663 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 1664 1665 /* Turn on RX data and RX BD initiator state machine */ 1666 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 1667 1668 /* Turn on Mbuf cluster free state machine */ 1669 if (!BGE_IS_5705_PLUS(sc)) 1670 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 1671 1672 /* Turn on send BD completion state machine */ 1673 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 1674 1675 /* Turn on send data completion state machine */ 1676 val = BGE_SDCMODE_ENABLE; 1677 if (sc->bge_asicrev == BGE_ASICREV_BCM5761) 1678 val |= BGE_SDCMODE_CDELAY; 1679 CSR_WRITE_4(sc, BGE_SDC_MODE, val); 1680 1681 /* Turn on send data initiator state machine */ 1682 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 1683 1684 /* Turn on send BD initiator state machine */ 1685 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 1686 1687 /* Turn on send BD selector state machine */ 1688 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 1689 1690 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 1691 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 1692 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); 1693 1694 /* ack/clear link change events */ 1695 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1696 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 1697 BGE_MACSTAT_LINK_CHANGED); 1698 CSR_WRITE_4(sc, BGE_MI_STS, 0); 1699 1700 /* Enable PHY auto polling (for MII/GMII only) */ 1701 if (sc->bge_flags & BGE_FLAG_TBI) { 1702 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1703 } else { 1704 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16); 1705 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && 1706 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) { 1707 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 1708 BGE_EVTENB_MI_INTERRUPT); 1709 } 1710 } 1711 1712 /* 1713 * Clear any pending link state attention. 1714 * Otherwise some link state change events may be lost until attention 1715 * is cleared by bge_intr() -> bge_softc.bge_link_upd() sequence. 1716 * It's not necessary on newer BCM chips - perhaps enabling link 1717 * state change attentions implies clearing pending attention. 1718 */ 1719 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1720 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 1721 BGE_MACSTAT_LINK_CHANGED); 1722 1723 /* Enable link state change attentions. */ 1724 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 1725 1726 return(0); 1727 } 1728 1729 /* 1730 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 1731 * against our list and return its name if we find a match. Note 1732 * that since the Broadcom controller contains VPD support, we 1733 * can get the device name string from the controller itself instead 1734 * of the compiled-in string. This is a little slow, but it guarantees 1735 * we'll always announce the right product name. 1736 */ 1737 static int 1738 bge_probe(device_t dev) 1739 { 1740 const struct bge_type *t; 1741 uint16_t product, vendor; 1742 1743 product = pci_get_device(dev); 1744 vendor = pci_get_vendor(dev); 1745 1746 for (t = bge_devs; t->bge_name != NULL; t++) { 1747 if (vendor == t->bge_vid && product == t->bge_did) 1748 break; 1749 } 1750 if (t->bge_name == NULL) 1751 return(ENXIO); 1752 1753 device_set_desc(dev, t->bge_name); 1754 if (pci_get_subvendor(dev) == PCI_VENDOR_DELL) { 1755 struct bge_softc *sc = device_get_softc(dev); 1756 sc->bge_flags |= BGE_FLAG_NO_3LED; 1757 } 1758 return(0); 1759 } 1760 1761 static int 1762 bge_attach(device_t dev) 1763 { 1764 struct ifnet *ifp; 1765 struct bge_softc *sc; 1766 uint32_t hwcfg = 0; 1767 int error = 0, rid; 1768 uint8_t ether_addr[ETHER_ADDR_LEN]; 1769 1770 sc = device_get_softc(dev); 1771 sc->bge_dev = dev; 1772 callout_init(&sc->bge_stat_timer); 1773 lwkt_serialize_init(&sc->bge_jslot_serializer); 1774 1775 #ifndef BURN_BRIDGES 1776 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 1777 uint32_t irq, mem; 1778 1779 irq = pci_read_config(dev, PCIR_INTLINE, 4); 1780 mem = pci_read_config(dev, BGE_PCI_BAR0, 4); 1781 1782 device_printf(dev, "chip is in D%d power mode " 1783 "-- setting to D0\n", pci_get_powerstate(dev)); 1784 1785 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 1786 1787 pci_write_config(dev, PCIR_INTLINE, irq, 4); 1788 pci_write_config(dev, BGE_PCI_BAR0, mem, 4); 1789 } 1790 #endif /* !BURN_BRIDGE */ 1791 1792 /* 1793 * Map control/status registers. 1794 */ 1795 pci_enable_busmaster(dev); 1796 1797 rid = BGE_PCI_BAR0; 1798 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 1799 RF_ACTIVE); 1800 1801 if (sc->bge_res == NULL) { 1802 device_printf(dev, "couldn't map memory\n"); 1803 return ENXIO; 1804 } 1805 1806 sc->bge_btag = rman_get_bustag(sc->bge_res); 1807 sc->bge_bhandle = rman_get_bushandle(sc->bge_res); 1808 1809 /* Save various chip information */ 1810 sc->bge_chipid = 1811 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 1812 BGE_PCIMISCCTL_ASICREV_SHIFT; 1813 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) 1814 sc->bge_chipid = pci_read_config(dev, BGE_PCI_PRODID_ASICREV, 4); 1815 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid); 1816 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid); 1817 1818 /* Save chipset family. */ 1819 switch (sc->bge_asicrev) { 1820 case BGE_ASICREV_BCM5755: 1821 case BGE_ASICREV_BCM5761: 1822 case BGE_ASICREV_BCM5784: 1823 case BGE_ASICREV_BCM5785: 1824 case BGE_ASICREV_BCM5787: 1825 case BGE_ASICREV_BCM57780: 1826 sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS | 1827 BGE_FLAG_5705_PLUS; 1828 break; 1829 1830 case BGE_ASICREV_BCM5700: 1831 case BGE_ASICREV_BCM5701: 1832 case BGE_ASICREV_BCM5703: 1833 case BGE_ASICREV_BCM5704: 1834 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO; 1835 break; 1836 1837 case BGE_ASICREV_BCM5714_A0: 1838 case BGE_ASICREV_BCM5780: 1839 case BGE_ASICREV_BCM5714: 1840 sc->bge_flags |= BGE_FLAG_5714_FAMILY; 1841 /* Fall through */ 1842 1843 case BGE_ASICREV_BCM5750: 1844 case BGE_ASICREV_BCM5752: 1845 case BGE_ASICREV_BCM5906: 1846 sc->bge_flags |= BGE_FLAG_575X_PLUS; 1847 /* Fall through */ 1848 1849 case BGE_ASICREV_BCM5705: 1850 sc->bge_flags |= BGE_FLAG_5705_PLUS; 1851 break; 1852 } 1853 1854 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) 1855 sc->bge_flags |= BGE_FLAG_NO_EEPROM; 1856 1857 /* 1858 * Set various quirk flags. 1859 */ 1860 1861 sc->bge_flags |= BGE_FLAG_ETH_WIRESPEED; 1862 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || 1863 (sc->bge_asicrev == BGE_ASICREV_BCM5705 && 1864 (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 && 1865 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) || 1866 sc->bge_asicrev == BGE_ASICREV_BCM5906) 1867 sc->bge_flags &= ~BGE_FLAG_ETH_WIRESPEED; 1868 1869 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 || 1870 sc->bge_chipid == BGE_CHIPID_BCM5701_B0) 1871 sc->bge_flags |= BGE_FLAG_CRC_BUG; 1872 1873 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX || 1874 sc->bge_chiprev == BGE_CHIPREV_5704_AX) 1875 sc->bge_flags |= BGE_FLAG_ADC_BUG; 1876 1877 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0) 1878 sc->bge_flags |= BGE_FLAG_5704_A0_BUG; 1879 1880 if (BGE_IS_5705_PLUS(sc) && 1881 !(sc->bge_flags & BGE_FLAG_ADJUST_TRIM)) { 1882 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 || 1883 sc->bge_asicrev == BGE_ASICREV_BCM5761 || 1884 sc->bge_asicrev == BGE_ASICREV_BCM5784 || 1885 sc->bge_asicrev == BGE_ASICREV_BCM5787) { 1886 if (sc->bge_chipid != BGE_CHIPID_BCM5722_A0) 1887 sc->bge_flags |= BGE_FLAG_JITTER_BUG; 1888 } else if (sc->bge_asicrev != BGE_ASICREV_BCM5906) { 1889 sc->bge_flags |= BGE_FLAG_BER_BUG; 1890 } 1891 } 1892 1893 /* Allocate interrupt */ 1894 rid = 0; 1895 1896 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1897 RF_SHAREABLE | RF_ACTIVE); 1898 1899 if (sc->bge_irq == NULL) { 1900 device_printf(dev, "couldn't map interrupt\n"); 1901 error = ENXIO; 1902 goto fail; 1903 } 1904 1905 /* 1906 * Check if this is a PCI-X or PCI Express device. 1907 */ 1908 if (BGE_IS_5705_PLUS(sc)) { 1909 if (pci_is_pcie(dev)) { 1910 sc->bge_flags |= BGE_FLAG_PCIE; 1911 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_4096); 1912 } 1913 } else { 1914 /* 1915 * Check if the device is in PCI-X Mode. 1916 * (This bit is not valid on PCI Express controllers.) 1917 */ 1918 if ((pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) & 1919 BGE_PCISTATE_PCI_BUSMODE) == 0) 1920 sc->bge_flags |= BGE_FLAG_PCIX; 1921 } 1922 1923 device_printf(dev, "CHIP ID 0x%08x; " 1924 "ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n", 1925 sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev, 1926 (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X" 1927 : ((sc->bge_flags & BGE_FLAG_PCIE) ? 1928 "PCI-E" : "PCI")); 1929 1930 ifp = &sc->arpcom.ac_if; 1931 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1932 1933 /* Try to reset the chip. */ 1934 bge_reset(sc); 1935 1936 if (bge_chipinit(sc)) { 1937 device_printf(dev, "chip initialization failed\n"); 1938 error = ENXIO; 1939 goto fail; 1940 } 1941 1942 /* 1943 * Get station address 1944 */ 1945 error = bge_get_eaddr(sc, ether_addr); 1946 if (error) { 1947 device_printf(dev, "failed to read station address\n"); 1948 goto fail; 1949 } 1950 1951 /* 5705/5750 limits RX return ring to 512 entries. */ 1952 if (BGE_IS_5705_PLUS(sc)) 1953 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 1954 else 1955 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 1956 1957 error = bge_dma_alloc(sc); 1958 if (error) 1959 goto fail; 1960 1961 /* Set default tuneable values. */ 1962 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 1963 sc->bge_rx_coal_ticks = bge_rx_coal_ticks; 1964 sc->bge_tx_coal_ticks = bge_tx_coal_ticks; 1965 sc->bge_rx_max_coal_bds = bge_rx_max_coal_bds; 1966 sc->bge_tx_max_coal_bds = bge_tx_max_coal_bds; 1967 1968 /* Set up ifnet structure */ 1969 ifp->if_softc = sc; 1970 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1971 ifp->if_ioctl = bge_ioctl; 1972 ifp->if_start = bge_start; 1973 #ifdef DEVICE_POLLING 1974 ifp->if_poll = bge_poll; 1975 #endif 1976 ifp->if_watchdog = bge_watchdog; 1977 ifp->if_init = bge_init; 1978 ifp->if_mtu = ETHERMTU; 1979 ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; 1980 ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1); 1981 ifq_set_ready(&ifp->if_snd); 1982 1983 /* 1984 * 5700 B0 chips do not support checksumming correctly due 1985 * to hardware bugs. 1986 */ 1987 if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0) { 1988 ifp->if_capabilities |= IFCAP_HWCSUM; 1989 ifp->if_hwassist = BGE_CSUM_FEATURES; 1990 } 1991 ifp->if_capenable = ifp->if_capabilities; 1992 1993 /* 1994 * Figure out what sort of media we have by checking the 1995 * hardware config word in the first 32k of NIC internal memory, 1996 * or fall back to examining the EEPROM if necessary. 1997 * Note: on some BCM5700 cards, this value appears to be unset. 1998 * If that's the case, we have to rely on identifying the NIC 1999 * by its PCI subsystem ID, as we do below for the SysKonnect 2000 * SK-9D41. 2001 */ 2002 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) 2003 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); 2004 else { 2005 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET, 2006 sizeof(hwcfg))) { 2007 device_printf(dev, "failed to read EEPROM\n"); 2008 error = ENXIO; 2009 goto fail; 2010 } 2011 hwcfg = ntohl(hwcfg); 2012 } 2013 2014 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) 2015 sc->bge_flags |= BGE_FLAG_TBI; 2016 2017 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 2018 if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41) 2019 sc->bge_flags |= BGE_FLAG_TBI; 2020 2021 if (sc->bge_flags & BGE_FLAG_TBI) { 2022 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, 2023 bge_ifmedia_upd, bge_ifmedia_sts); 2024 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 2025 ifmedia_add(&sc->bge_ifmedia, 2026 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL); 2027 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 2028 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO); 2029 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media; 2030 } else { 2031 /* 2032 * Do transceiver setup. 2033 */ 2034 if (mii_phy_probe(dev, &sc->bge_miibus, 2035 bge_ifmedia_upd, bge_ifmedia_sts)) { 2036 device_printf(dev, "MII without any PHY!\n"); 2037 error = ENXIO; 2038 goto fail; 2039 } 2040 } 2041 2042 /* 2043 * When using the BCM5701 in PCI-X mode, data corruption has 2044 * been observed in the first few bytes of some received packets. 2045 * Aligning the packet buffer in memory eliminates the corruption. 2046 * Unfortunately, this misaligns the packet payloads. On platforms 2047 * which do not support unaligned accesses, we will realign the 2048 * payloads by copying the received packets. 2049 */ 2050 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 && 2051 (sc->bge_flags & BGE_FLAG_PCIX)) 2052 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG; 2053 2054 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && 2055 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) { 2056 sc->bge_link_upd = bge_bcm5700_link_upd; 2057 sc->bge_link_chg = BGE_MACSTAT_MI_INTERRUPT; 2058 } else if (sc->bge_flags & BGE_FLAG_TBI) { 2059 sc->bge_link_upd = bge_tbi_link_upd; 2060 sc->bge_link_chg = BGE_MACSTAT_LINK_CHANGED; 2061 } else { 2062 sc->bge_link_upd = bge_copper_link_upd; 2063 sc->bge_link_chg = BGE_MACSTAT_LINK_CHANGED; 2064 } 2065 2066 /* 2067 * Create sysctl nodes. 2068 */ 2069 sysctl_ctx_init(&sc->bge_sysctl_ctx); 2070 sc->bge_sysctl_tree = SYSCTL_ADD_NODE(&sc->bge_sysctl_ctx, 2071 SYSCTL_STATIC_CHILDREN(_hw), 2072 OID_AUTO, 2073 device_get_nameunit(dev), 2074 CTLFLAG_RD, 0, ""); 2075 if (sc->bge_sysctl_tree == NULL) { 2076 device_printf(dev, "can't add sysctl node\n"); 2077 error = ENXIO; 2078 goto fail; 2079 } 2080 2081 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx, 2082 SYSCTL_CHILDREN(sc->bge_sysctl_tree), 2083 OID_AUTO, "rx_coal_ticks", 2084 CTLTYPE_INT | CTLFLAG_RW, 2085 sc, 0, bge_sysctl_rx_coal_ticks, "I", 2086 "Receive coalescing ticks (usec)."); 2087 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx, 2088 SYSCTL_CHILDREN(sc->bge_sysctl_tree), 2089 OID_AUTO, "tx_coal_ticks", 2090 CTLTYPE_INT | CTLFLAG_RW, 2091 sc, 0, bge_sysctl_tx_coal_ticks, "I", 2092 "Transmit coalescing ticks (usec)."); 2093 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx, 2094 SYSCTL_CHILDREN(sc->bge_sysctl_tree), 2095 OID_AUTO, "rx_max_coal_bds", 2096 CTLTYPE_INT | CTLFLAG_RW, 2097 sc, 0, bge_sysctl_rx_max_coal_bds, "I", 2098 "Receive max coalesced BD count."); 2099 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx, 2100 SYSCTL_CHILDREN(sc->bge_sysctl_tree), 2101 OID_AUTO, "tx_max_coal_bds", 2102 CTLTYPE_INT | CTLFLAG_RW, 2103 sc, 0, bge_sysctl_tx_max_coal_bds, "I", 2104 "Transmit max coalesced BD count."); 2105 2106 /* 2107 * Call MI attach routine. 2108 */ 2109 ether_ifattach(ifp, ether_addr, NULL); 2110 2111 error = bus_setup_intr(dev, sc->bge_irq, INTR_MPSAFE, 2112 bge_intr, sc, &sc->bge_intrhand, 2113 ifp->if_serializer); 2114 if (error) { 2115 ether_ifdetach(ifp); 2116 device_printf(dev, "couldn't set up irq\n"); 2117 goto fail; 2118 } 2119 2120 ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->bge_irq)); 2121 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); 2122 2123 return(0); 2124 fail: 2125 bge_detach(dev); 2126 return(error); 2127 } 2128 2129 static int 2130 bge_detach(device_t dev) 2131 { 2132 struct bge_softc *sc = device_get_softc(dev); 2133 2134 if (device_is_attached(dev)) { 2135 struct ifnet *ifp = &sc->arpcom.ac_if; 2136 2137 lwkt_serialize_enter(ifp->if_serializer); 2138 bge_stop(sc); 2139 bge_reset(sc); 2140 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand); 2141 lwkt_serialize_exit(ifp->if_serializer); 2142 2143 ether_ifdetach(ifp); 2144 } 2145 2146 if (sc->bge_flags & BGE_FLAG_TBI) 2147 ifmedia_removeall(&sc->bge_ifmedia); 2148 if (sc->bge_miibus) 2149 device_delete_child(dev, sc->bge_miibus); 2150 bus_generic_detach(dev); 2151 2152 if (sc->bge_irq != NULL) 2153 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq); 2154 2155 if (sc->bge_res != NULL) 2156 bus_release_resource(dev, SYS_RES_MEMORY, 2157 BGE_PCI_BAR0, sc->bge_res); 2158 2159 if (sc->bge_sysctl_tree != NULL) 2160 sysctl_ctx_free(&sc->bge_sysctl_ctx); 2161 2162 bge_dma_free(sc); 2163 2164 return 0; 2165 } 2166 2167 static void 2168 bge_reset(struct bge_softc *sc) 2169 { 2170 device_t dev; 2171 uint32_t cachesize, command, pcistate, reset; 2172 void (*write_op)(struct bge_softc *, uint32_t, uint32_t); 2173 int i, val = 0; 2174 2175 dev = sc->bge_dev; 2176 2177 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) && 2178 sc->bge_asicrev != BGE_ASICREV_BCM5906) { 2179 if (sc->bge_flags & BGE_FLAG_PCIE) 2180 write_op = bge_writemem_direct; 2181 else 2182 write_op = bge_writemem_ind; 2183 } else { 2184 write_op = bge_writereg_ind; 2185 } 2186 2187 /* Save some important PCI state. */ 2188 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4); 2189 command = pci_read_config(dev, BGE_PCI_CMD, 4); 2190 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4); 2191 2192 pci_write_config(dev, BGE_PCI_MISC_CTL, 2193 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2194 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4); 2195 2196 /* Disable fastboot on controllers that support it. */ 2197 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 || 2198 sc->bge_asicrev == BGE_ASICREV_BCM5755 || 2199 sc->bge_asicrev == BGE_ASICREV_BCM5787) { 2200 if (bootverbose) 2201 if_printf(&sc->arpcom.ac_if, "Disabling fastboot\n"); 2202 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0); 2203 } 2204 2205 /* 2206 * Write the magic number to SRAM at offset 0xB50. 2207 * When firmware finishes its initialization it will 2208 * write ~BGE_MAGIC_NUMBER to the same location. 2209 */ 2210 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 2211 2212 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1); 2213 2214 /* XXX: Broadcom Linux driver. */ 2215 if (sc->bge_flags & BGE_FLAG_PCIE) { 2216 if (CSR_READ_4(sc, 0x7e2c) == 0x60) /* PCIE 1.0 */ 2217 CSR_WRITE_4(sc, 0x7e2c, 0x20); 2218 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 2219 /* Prevent PCIE link training during global reset */ 2220 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29)); 2221 reset |= (1<<29); 2222 } 2223 } 2224 2225 /* 2226 * Set GPHY Power Down Override to leave GPHY 2227 * powered up in D0 uninitialized. 2228 */ 2229 if (BGE_IS_5705_PLUS(sc)) 2230 reset |= 0x04000000; 2231 2232 /* Issue global reset */ 2233 write_op(sc, BGE_MISC_CFG, reset); 2234 2235 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { 2236 uint32_t status, ctrl; 2237 2238 status = CSR_READ_4(sc, BGE_VCPU_STATUS); 2239 CSR_WRITE_4(sc, BGE_VCPU_STATUS, 2240 status | BGE_VCPU_STATUS_DRV_RESET); 2241 ctrl = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL); 2242 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL, 2243 ctrl & ~BGE_VCPU_EXT_CTRL_HALT_CPU); 2244 } 2245 2246 DELAY(1000); 2247 2248 /* XXX: Broadcom Linux driver. */ 2249 if (sc->bge_flags & BGE_FLAG_PCIE) { 2250 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { 2251 uint32_t v; 2252 2253 DELAY(500000); /* wait for link training to complete */ 2254 v = pci_read_config(dev, 0xc4, 4); 2255 pci_write_config(dev, 0xc4, v | (1<<15), 4); 2256 } 2257 /* 2258 * Set PCIE max payload size to 128 bytes and 2259 * clear error status. 2260 */ 2261 pci_write_config(dev, 0xd8, 0xf5000, 4); 2262 } 2263 2264 /* Reset some of the PCI state that got zapped by reset */ 2265 pci_write_config(dev, BGE_PCI_MISC_CTL, 2266 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2267 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4); 2268 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4); 2269 pci_write_config(dev, BGE_PCI_CMD, command, 4); 2270 write_op(sc, BGE_MISC_CFG, (65 << 1)); 2271 2272 /* Enable memory arbiter. */ 2273 if (BGE_IS_5714_FAMILY(sc)) { 2274 uint32_t val; 2275 2276 val = CSR_READ_4(sc, BGE_MARB_MODE); 2277 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val); 2278 } else { 2279 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2280 } 2281 2282 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { 2283 for (i = 0; i < BGE_TIMEOUT; i++) { 2284 val = CSR_READ_4(sc, BGE_VCPU_STATUS); 2285 if (val & BGE_VCPU_STATUS_INIT_DONE) 2286 break; 2287 DELAY(100); 2288 } 2289 if (i == BGE_TIMEOUT) { 2290 if_printf(&sc->arpcom.ac_if, "reset timed out\n"); 2291 return; 2292 } 2293 } else { 2294 /* 2295 * Poll until we see the 1's complement of the magic number. 2296 * This indicates that the firmware initialization 2297 * is complete. 2298 */ 2299 for (i = 0; i < BGE_FIRMWARE_TIMEOUT; i++) { 2300 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 2301 if (val == ~BGE_MAGIC_NUMBER) 2302 break; 2303 DELAY(10); 2304 } 2305 if (i == BGE_FIRMWARE_TIMEOUT) { 2306 if_printf(&sc->arpcom.ac_if, "firmware handshake " 2307 "timed out, found 0x%08x\n", val); 2308 return; 2309 } 2310 } 2311 2312 /* 2313 * XXX Wait for the value of the PCISTATE register to 2314 * return to its original pre-reset state. This is a 2315 * fairly good indicator of reset completion. If we don't 2316 * wait for the reset to fully complete, trying to read 2317 * from the device's non-PCI registers may yield garbage 2318 * results. 2319 */ 2320 for (i = 0; i < BGE_TIMEOUT; i++) { 2321 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate) 2322 break; 2323 DELAY(10); 2324 } 2325 2326 if (sc->bge_flags & BGE_FLAG_PCIE) { 2327 reset = bge_readmem_ind(sc, 0x7c00); 2328 bge_writemem_ind(sc, 0x7c00, reset | (1 << 25)); 2329 } 2330 2331 /* Fix up byte swapping */ 2332 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS | 2333 BGE_MODECTL_BYTESWAP_DATA); 2334 2335 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 2336 2337 /* 2338 * The 5704 in TBI mode apparently needs some special 2339 * adjustment to insure the SERDES drive level is set 2340 * to 1.2V. 2341 */ 2342 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && 2343 (sc->bge_flags & BGE_FLAG_TBI)) { 2344 uint32_t serdescfg; 2345 2346 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG); 2347 serdescfg = (serdescfg & ~0xFFF) | 0x880; 2348 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg); 2349 } 2350 2351 /* XXX: Broadcom Linux driver. */ 2352 if ((sc->bge_flags & BGE_FLAG_PCIE) && 2353 sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 2354 uint32_t v; 2355 2356 v = CSR_READ_4(sc, 0x7c00); 2357 CSR_WRITE_4(sc, 0x7c00, v | (1<<25)); 2358 } 2359 2360 DELAY(10000); 2361 } 2362 2363 /* 2364 * Frame reception handling. This is called if there's a frame 2365 * on the receive return list. 2366 * 2367 * Note: we have to be able to handle two possibilities here: 2368 * 1) the frame is from the jumbo recieve ring 2369 * 2) the frame is from the standard receive ring 2370 */ 2371 2372 static void 2373 bge_rxeof(struct bge_softc *sc) 2374 { 2375 struct ifnet *ifp; 2376 int stdcnt = 0, jumbocnt = 0; 2377 struct mbuf_chain chain[MAXCPU]; 2378 2379 if (sc->bge_rx_saved_considx == 2380 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) 2381 return; 2382 2383 ether_input_chain_init(chain); 2384 2385 ifp = &sc->arpcom.ac_if; 2386 2387 while (sc->bge_rx_saved_considx != 2388 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) { 2389 struct bge_rx_bd *cur_rx; 2390 uint32_t rxidx; 2391 struct mbuf *m = NULL; 2392 uint16_t vlan_tag = 0; 2393 int have_tag = 0; 2394 2395 cur_rx = 2396 &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx]; 2397 2398 rxidx = cur_rx->bge_idx; 2399 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt); 2400 logif(rx_pkt); 2401 2402 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 2403 have_tag = 1; 2404 vlan_tag = cur_rx->bge_vlan_tag; 2405 } 2406 2407 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 2408 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 2409 jumbocnt++; 2410 2411 if (rxidx != sc->bge_jumbo) { 2412 ifp->if_ierrors++; 2413 if_printf(ifp, "sw jumbo index(%d) " 2414 "and hw jumbo index(%d) mismatch, drop!\n", 2415 sc->bge_jumbo, rxidx); 2416 bge_setup_rxdesc_jumbo(sc, rxidx); 2417 continue; 2418 } 2419 2420 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx].bge_mbuf; 2421 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2422 ifp->if_ierrors++; 2423 bge_setup_rxdesc_jumbo(sc, sc->bge_jumbo); 2424 continue; 2425 } 2426 if (bge_newbuf_jumbo(sc, sc->bge_jumbo, 0)) { 2427 ifp->if_ierrors++; 2428 bge_setup_rxdesc_jumbo(sc, sc->bge_jumbo); 2429 continue; 2430 } 2431 } else { 2432 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 2433 stdcnt++; 2434 2435 if (rxidx != sc->bge_std) { 2436 ifp->if_ierrors++; 2437 if_printf(ifp, "sw std index(%d) " 2438 "and hw std index(%d) mismatch, drop!\n", 2439 sc->bge_std, rxidx); 2440 bge_setup_rxdesc_std(sc, rxidx); 2441 continue; 2442 } 2443 2444 m = sc->bge_cdata.bge_rx_std_chain[rxidx].bge_mbuf; 2445 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2446 ifp->if_ierrors++; 2447 bge_setup_rxdesc_std(sc, sc->bge_std); 2448 continue; 2449 } 2450 if (bge_newbuf_std(sc, sc->bge_std, 0)) { 2451 ifp->if_ierrors++; 2452 bge_setup_rxdesc_std(sc, sc->bge_std); 2453 continue; 2454 } 2455 } 2456 2457 ifp->if_ipackets++; 2458 #ifndef __i386__ 2459 /* 2460 * The i386 allows unaligned accesses, but for other 2461 * platforms we must make sure the payload is aligned. 2462 */ 2463 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) { 2464 bcopy(m->m_data, m->m_data + ETHER_ALIGN, 2465 cur_rx->bge_len); 2466 m->m_data += ETHER_ALIGN; 2467 } 2468 #endif 2469 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 2470 m->m_pkthdr.rcvif = ifp; 2471 2472 if (ifp->if_capenable & IFCAP_RXCSUM) { 2473 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) { 2474 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2475 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0) 2476 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2477 } 2478 if ((cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) && 2479 m->m_pkthdr.len >= BGE_MIN_FRAME) { 2480 m->m_pkthdr.csum_data = 2481 cur_rx->bge_tcp_udp_csum; 2482 m->m_pkthdr.csum_flags |= 2483 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2484 } 2485 } 2486 2487 /* 2488 * If we received a packet with a vlan tag, pass it 2489 * to vlan_input() instead of ether_input(). 2490 */ 2491 if (have_tag) { 2492 m->m_flags |= M_VLANTAG; 2493 m->m_pkthdr.ether_vlantag = vlan_tag; 2494 have_tag = vlan_tag = 0; 2495 } 2496 ether_input_chain(ifp, m, NULL, chain); 2497 } 2498 2499 ether_input_dispatch(chain); 2500 2501 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 2502 if (stdcnt) 2503 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 2504 if (jumbocnt) 2505 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 2506 } 2507 2508 static void 2509 bge_txeof(struct bge_softc *sc) 2510 { 2511 struct bge_tx_bd *cur_tx = NULL; 2512 struct ifnet *ifp; 2513 2514 if (sc->bge_tx_saved_considx == 2515 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) 2516 return; 2517 2518 ifp = &sc->arpcom.ac_if; 2519 2520 /* 2521 * Go through our tx ring and free mbufs for those 2522 * frames that have been sent. 2523 */ 2524 while (sc->bge_tx_saved_considx != 2525 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) { 2526 uint32_t idx = 0; 2527 2528 idx = sc->bge_tx_saved_considx; 2529 cur_tx = &sc->bge_ldata.bge_tx_ring[idx]; 2530 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 2531 ifp->if_opackets++; 2532 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) { 2533 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, 2534 sc->bge_cdata.bge_tx_dmamap[idx]); 2535 m_freem(sc->bge_cdata.bge_tx_chain[idx]); 2536 sc->bge_cdata.bge_tx_chain[idx] = NULL; 2537 } 2538 sc->bge_txcnt--; 2539 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 2540 logif(tx_pkt); 2541 } 2542 2543 if (cur_tx != NULL && 2544 (BGE_TX_RING_CNT - sc->bge_txcnt) >= 2545 (BGE_NSEG_RSVD + BGE_NSEG_SPARE)) 2546 ifp->if_flags &= ~IFF_OACTIVE; 2547 2548 if (sc->bge_txcnt == 0) 2549 ifp->if_timer = 0; 2550 2551 if (!ifq_is_empty(&ifp->if_snd)) 2552 if_devstart(ifp); 2553 } 2554 2555 #ifdef DEVICE_POLLING 2556 2557 static void 2558 bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 2559 { 2560 struct bge_softc *sc = ifp->if_softc; 2561 uint32_t status; 2562 2563 switch(cmd) { 2564 case POLL_REGISTER: 2565 bge_disable_intr(sc); 2566 break; 2567 case POLL_DEREGISTER: 2568 bge_enable_intr(sc); 2569 break; 2570 case POLL_AND_CHECK_STATUS: 2571 /* 2572 * Process link state changes. 2573 */ 2574 status = CSR_READ_4(sc, BGE_MAC_STS); 2575 if ((status & sc->bge_link_chg) || sc->bge_link_evt) { 2576 sc->bge_link_evt = 0; 2577 sc->bge_link_upd(sc, status); 2578 } 2579 /* fall through */ 2580 case POLL_ONLY: 2581 if (ifp->if_flags & IFF_RUNNING) { 2582 bge_rxeof(sc); 2583 bge_txeof(sc); 2584 } 2585 break; 2586 } 2587 } 2588 2589 #endif 2590 2591 static void 2592 bge_intr(void *xsc) 2593 { 2594 struct bge_softc *sc = xsc; 2595 struct ifnet *ifp = &sc->arpcom.ac_if; 2596 uint32_t status; 2597 2598 logif(intr); 2599 2600 /* 2601 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't 2602 * disable interrupts by writing nonzero like we used to, since with 2603 * our current organization this just gives complications and 2604 * pessimizations for re-enabling interrupts. We used to have races 2605 * instead of the necessary complications. Disabling interrupts 2606 * would just reduce the chance of a status update while we are 2607 * running (by switching to the interrupt-mode coalescence 2608 * parameters), but this chance is already very low so it is more 2609 * efficient to get another interrupt than prevent it. 2610 * 2611 * We do the ack first to ensure another interrupt if there is a 2612 * status update after the ack. We don't check for the status 2613 * changing later because it is more efficient to get another 2614 * interrupt than prevent it, not quite as above (not checking is 2615 * a smaller optimization than not toggling the interrupt enable, 2616 * since checking doesn't involve PCI accesses and toggling require 2617 * the status check). So toggling would probably be a pessimization 2618 * even with MSI. It would only be needed for using a task queue. 2619 */ 2620 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); 2621 2622 /* 2623 * Process link state changes. 2624 */ 2625 status = CSR_READ_4(sc, BGE_MAC_STS); 2626 if ((status & sc->bge_link_chg) || sc->bge_link_evt) { 2627 sc->bge_link_evt = 0; 2628 sc->bge_link_upd(sc, status); 2629 } 2630 2631 if (ifp->if_flags & IFF_RUNNING) { 2632 /* Check RX return ring producer/consumer */ 2633 bge_rxeof(sc); 2634 2635 /* Check TX ring producer/consumer */ 2636 bge_txeof(sc); 2637 } 2638 2639 if (sc->bge_coal_chg) 2640 bge_coal_change(sc); 2641 } 2642 2643 static void 2644 bge_tick(void *xsc) 2645 { 2646 struct bge_softc *sc = xsc; 2647 struct ifnet *ifp = &sc->arpcom.ac_if; 2648 2649 lwkt_serialize_enter(ifp->if_serializer); 2650 2651 if (BGE_IS_5705_PLUS(sc)) 2652 bge_stats_update_regs(sc); 2653 else 2654 bge_stats_update(sc); 2655 2656 if (sc->bge_flags & BGE_FLAG_TBI) { 2657 /* 2658 * Since in TBI mode auto-polling can't be used we should poll 2659 * link status manually. Here we register pending link event 2660 * and trigger interrupt. 2661 */ 2662 sc->bge_link_evt++; 2663 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 2664 } else if (!sc->bge_link) { 2665 mii_tick(device_get_softc(sc->bge_miibus)); 2666 } 2667 2668 callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc); 2669 2670 lwkt_serialize_exit(ifp->if_serializer); 2671 } 2672 2673 static void 2674 bge_stats_update_regs(struct bge_softc *sc) 2675 { 2676 struct ifnet *ifp = &sc->arpcom.ac_if; 2677 struct bge_mac_stats_regs stats; 2678 uint32_t *s; 2679 int i; 2680 2681 s = (uint32_t *)&stats; 2682 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) { 2683 *s = CSR_READ_4(sc, BGE_RX_STATS + i); 2684 s++; 2685 } 2686 2687 ifp->if_collisions += 2688 (stats.dot3StatsSingleCollisionFrames + 2689 stats.dot3StatsMultipleCollisionFrames + 2690 stats.dot3StatsExcessiveCollisions + 2691 stats.dot3StatsLateCollisions) - 2692 ifp->if_collisions; 2693 } 2694 2695 static void 2696 bge_stats_update(struct bge_softc *sc) 2697 { 2698 struct ifnet *ifp = &sc->arpcom.ac_if; 2699 bus_size_t stats; 2700 2701 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; 2702 2703 #define READ_STAT(sc, stats, stat) \ 2704 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) 2705 2706 ifp->if_collisions += 2707 (READ_STAT(sc, stats, 2708 txstats.dot3StatsSingleCollisionFrames.bge_addr_lo) + 2709 READ_STAT(sc, stats, 2710 txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo) + 2711 READ_STAT(sc, stats, 2712 txstats.dot3StatsExcessiveCollisions.bge_addr_lo) + 2713 READ_STAT(sc, stats, 2714 txstats.dot3StatsLateCollisions.bge_addr_lo)) - 2715 ifp->if_collisions; 2716 2717 #undef READ_STAT 2718 2719 #ifdef notdef 2720 ifp->if_collisions += 2721 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames + 2722 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames + 2723 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions + 2724 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) - 2725 ifp->if_collisions; 2726 #endif 2727 } 2728 2729 /* 2730 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 2731 * pointers to descriptors. 2732 */ 2733 static int 2734 bge_encap(struct bge_softc *sc, struct mbuf **m_head0, uint32_t *txidx) 2735 { 2736 struct bge_tx_bd *d = NULL; 2737 uint16_t csum_flags = 0; 2738 bus_dma_segment_t segs[BGE_NSEG_NEW]; 2739 bus_dmamap_t map; 2740 int error, maxsegs, nsegs, idx, i; 2741 struct mbuf *m_head = *m_head0; 2742 2743 if (m_head->m_pkthdr.csum_flags) { 2744 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 2745 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 2746 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 2747 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 2748 if (m_head->m_flags & M_LASTFRAG) 2749 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END; 2750 else if (m_head->m_flags & M_FRAG) 2751 csum_flags |= BGE_TXBDFLAG_IP_FRAG; 2752 } 2753 2754 idx = *txidx; 2755 map = sc->bge_cdata.bge_tx_dmamap[idx]; 2756 2757 maxsegs = (BGE_TX_RING_CNT - sc->bge_txcnt) - BGE_NSEG_RSVD; 2758 KASSERT(maxsegs >= BGE_NSEG_SPARE, 2759 ("not enough segments %d\n", maxsegs)); 2760 2761 if (maxsegs > BGE_NSEG_NEW) 2762 maxsegs = BGE_NSEG_NEW; 2763 2764 /* 2765 * Pad outbound frame to BGE_MIN_FRAME for an unusual reason. 2766 * The bge hardware will pad out Tx runts to BGE_MIN_FRAME, 2767 * but when such padded frames employ the bge IP/TCP checksum 2768 * offload, the hardware checksum assist gives incorrect results 2769 * (possibly from incorporating its own padding into the UDP/TCP 2770 * checksum; who knows). If we pad such runts with zeros, the 2771 * onboard checksum comes out correct. 2772 */ 2773 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) && 2774 m_head->m_pkthdr.len < BGE_MIN_FRAME) { 2775 error = m_devpad(m_head, BGE_MIN_FRAME); 2776 if (error) 2777 goto back; 2778 } 2779 2780 error = bus_dmamap_load_mbuf_defrag(sc->bge_cdata.bge_tx_mtag, map, 2781 m_head0, segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 2782 if (error) 2783 goto back; 2784 2785 m_head = *m_head0; 2786 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE); 2787 2788 for (i = 0; ; i++) { 2789 d = &sc->bge_ldata.bge_tx_ring[idx]; 2790 2791 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr); 2792 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr); 2793 d->bge_len = segs[i].ds_len; 2794 d->bge_flags = csum_flags; 2795 2796 if (i == nsegs - 1) 2797 break; 2798 BGE_INC(idx, BGE_TX_RING_CNT); 2799 } 2800 /* Mark the last segment as end of packet... */ 2801 d->bge_flags |= BGE_TXBDFLAG_END; 2802 2803 /* Set vlan tag to the first segment of the packet. */ 2804 d = &sc->bge_ldata.bge_tx_ring[*txidx]; 2805 if (m_head->m_flags & M_VLANTAG) { 2806 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 2807 d->bge_vlan_tag = m_head->m_pkthdr.ether_vlantag; 2808 } else { 2809 d->bge_vlan_tag = 0; 2810 } 2811 2812 /* 2813 * Insure that the map for this transmission is placed at 2814 * the array index of the last descriptor in this chain. 2815 */ 2816 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx]; 2817 sc->bge_cdata.bge_tx_dmamap[idx] = map; 2818 sc->bge_cdata.bge_tx_chain[idx] = m_head; 2819 sc->bge_txcnt += nsegs; 2820 2821 BGE_INC(idx, BGE_TX_RING_CNT); 2822 *txidx = idx; 2823 back: 2824 if (error) { 2825 m_freem(*m_head0); 2826 *m_head0 = NULL; 2827 } 2828 return error; 2829 } 2830 2831 /* 2832 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 2833 * to the mbuf data regions directly in the transmit descriptors. 2834 */ 2835 static void 2836 bge_start(struct ifnet *ifp) 2837 { 2838 struct bge_softc *sc = ifp->if_softc; 2839 struct mbuf *m_head = NULL; 2840 uint32_t prodidx; 2841 int need_trans; 2842 2843 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 2844 return; 2845 2846 prodidx = sc->bge_tx_prodidx; 2847 2848 need_trans = 0; 2849 while (sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { 2850 m_head = ifq_dequeue(&ifp->if_snd, NULL); 2851 if (m_head == NULL) 2852 break; 2853 2854 /* 2855 * XXX 2856 * The code inside the if() block is never reached since we 2857 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting 2858 * requests to checksum TCP/UDP in a fragmented packet. 2859 * 2860 * XXX 2861 * safety overkill. If this is a fragmented packet chain 2862 * with delayed TCP/UDP checksums, then only encapsulate 2863 * it if we have enough descriptors to handle the entire 2864 * chain at once. 2865 * (paranoia -- may not actually be needed) 2866 */ 2867 if ((m_head->m_flags & M_FIRSTFRAG) && 2868 (m_head->m_pkthdr.csum_flags & CSUM_DELAY_DATA)) { 2869 if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 2870 m_head->m_pkthdr.csum_data + BGE_NSEG_RSVD) { 2871 ifp->if_flags |= IFF_OACTIVE; 2872 ifq_prepend(&ifp->if_snd, m_head); 2873 break; 2874 } 2875 } 2876 2877 /* 2878 * Sanity check: avoid coming within BGE_NSEG_RSVD 2879 * descriptors of the end of the ring. Also make 2880 * sure there are BGE_NSEG_SPARE descriptors for 2881 * jumbo buffers' defragmentation. 2882 */ 2883 if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 2884 (BGE_NSEG_RSVD + BGE_NSEG_SPARE)) { 2885 ifp->if_flags |= IFF_OACTIVE; 2886 ifq_prepend(&ifp->if_snd, m_head); 2887 break; 2888 } 2889 2890 /* 2891 * Pack the data into the transmit ring. If we 2892 * don't have room, set the OACTIVE flag and wait 2893 * for the NIC to drain the ring. 2894 */ 2895 if (bge_encap(sc, &m_head, &prodidx)) { 2896 ifp->if_flags |= IFF_OACTIVE; 2897 ifp->if_oerrors++; 2898 break; 2899 } 2900 need_trans = 1; 2901 2902 ETHER_BPF_MTAP(ifp, m_head); 2903 } 2904 2905 if (!need_trans) 2906 return; 2907 2908 /* Transmit */ 2909 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 2910 /* 5700 b2 errata */ 2911 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 2912 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 2913 2914 sc->bge_tx_prodidx = prodidx; 2915 2916 /* 2917 * Set a timeout in case the chip goes out to lunch. 2918 */ 2919 ifp->if_timer = 5; 2920 } 2921 2922 static void 2923 bge_init(void *xsc) 2924 { 2925 struct bge_softc *sc = xsc; 2926 struct ifnet *ifp = &sc->arpcom.ac_if; 2927 uint16_t *m; 2928 2929 ASSERT_SERIALIZED(ifp->if_serializer); 2930 2931 if (ifp->if_flags & IFF_RUNNING) 2932 return; 2933 2934 /* Cancel pending I/O and flush buffers. */ 2935 bge_stop(sc); 2936 bge_reset(sc); 2937 bge_chipinit(sc); 2938 2939 /* 2940 * Init the various state machines, ring 2941 * control blocks and firmware. 2942 */ 2943 if (bge_blockinit(sc)) { 2944 if_printf(ifp, "initialization failure\n"); 2945 bge_stop(sc); 2946 return; 2947 } 2948 2949 /* Specify MTU. */ 2950 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 2951 ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN); 2952 2953 /* Load our MAC address. */ 2954 m = (uint16_t *)&sc->arpcom.ac_enaddr[0]; 2955 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 2956 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 2957 2958 /* Enable or disable promiscuous mode as needed. */ 2959 bge_setpromisc(sc); 2960 2961 /* Program multicast filter. */ 2962 bge_setmulti(sc); 2963 2964 /* Init RX ring. */ 2965 if (bge_init_rx_ring_std(sc)) { 2966 if_printf(ifp, "RX ring initialization failed\n"); 2967 bge_stop(sc); 2968 return; 2969 } 2970 2971 /* 2972 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's 2973 * memory to insure that the chip has in fact read the first 2974 * entry of the ring. 2975 */ 2976 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) { 2977 uint32_t v, i; 2978 for (i = 0; i < 10; i++) { 2979 DELAY(20); 2980 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8); 2981 if (v == (MCLBYTES - ETHER_ALIGN)) 2982 break; 2983 } 2984 if (i == 10) 2985 if_printf(ifp, "5705 A0 chip failed to load RX ring\n"); 2986 } 2987 2988 /* Init jumbo RX ring. */ 2989 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) { 2990 if (bge_init_rx_ring_jumbo(sc)) { 2991 if_printf(ifp, "Jumbo RX ring initialization failed\n"); 2992 bge_stop(sc); 2993 return; 2994 } 2995 } 2996 2997 /* Init our RX return ring index */ 2998 sc->bge_rx_saved_considx = 0; 2999 3000 /* Init TX ring. */ 3001 bge_init_tx_ring(sc); 3002 3003 /* Turn on transmitter */ 3004 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); 3005 3006 /* Turn on receiver */ 3007 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 3008 3009 /* Tell firmware we're alive. */ 3010 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3011 3012 /* Enable host interrupts if polling(4) is not enabled. */ 3013 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 3014 #ifdef DEVICE_POLLING 3015 if (ifp->if_flags & IFF_POLLING) 3016 bge_disable_intr(sc); 3017 else 3018 #endif 3019 bge_enable_intr(sc); 3020 3021 bge_ifmedia_upd(ifp); 3022 3023 ifp->if_flags |= IFF_RUNNING; 3024 ifp->if_flags &= ~IFF_OACTIVE; 3025 3026 callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc); 3027 } 3028 3029 /* 3030 * Set media options. 3031 */ 3032 static int 3033 bge_ifmedia_upd(struct ifnet *ifp) 3034 { 3035 struct bge_softc *sc = ifp->if_softc; 3036 3037 /* If this is a 1000baseX NIC, enable the TBI port. */ 3038 if (sc->bge_flags & BGE_FLAG_TBI) { 3039 struct ifmedia *ifm = &sc->bge_ifmedia; 3040 3041 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 3042 return(EINVAL); 3043 3044 switch(IFM_SUBTYPE(ifm->ifm_media)) { 3045 case IFM_AUTO: 3046 /* 3047 * The BCM5704 ASIC appears to have a special 3048 * mechanism for programming the autoneg 3049 * advertisement registers in TBI mode. 3050 */ 3051 if (!bge_fake_autoneg && 3052 sc->bge_asicrev == BGE_ASICREV_BCM5704) { 3053 uint32_t sgdig; 3054 3055 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0); 3056 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG); 3057 sgdig |= BGE_SGDIGCFG_AUTO | 3058 BGE_SGDIGCFG_PAUSE_CAP | 3059 BGE_SGDIGCFG_ASYM_PAUSE; 3060 CSR_WRITE_4(sc, BGE_SGDIG_CFG, 3061 sgdig | BGE_SGDIGCFG_SEND); 3062 DELAY(5); 3063 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig); 3064 } 3065 break; 3066 case IFM_1000_SX: 3067 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 3068 BGE_CLRBIT(sc, BGE_MAC_MODE, 3069 BGE_MACMODE_HALF_DUPLEX); 3070 } else { 3071 BGE_SETBIT(sc, BGE_MAC_MODE, 3072 BGE_MACMODE_HALF_DUPLEX); 3073 } 3074 break; 3075 default: 3076 return(EINVAL); 3077 } 3078 } else { 3079 struct mii_data *mii = device_get_softc(sc->bge_miibus); 3080 3081 sc->bge_link_evt++; 3082 sc->bge_link = 0; 3083 if (mii->mii_instance) { 3084 struct mii_softc *miisc; 3085 3086 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 3087 mii_phy_reset(miisc); 3088 } 3089 mii_mediachg(mii); 3090 } 3091 return(0); 3092 } 3093 3094 /* 3095 * Report current media status. 3096 */ 3097 static void 3098 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 3099 { 3100 struct bge_softc *sc = ifp->if_softc; 3101 3102 if (sc->bge_flags & BGE_FLAG_TBI) { 3103 ifmr->ifm_status = IFM_AVALID; 3104 ifmr->ifm_active = IFM_ETHER; 3105 if (CSR_READ_4(sc, BGE_MAC_STS) & 3106 BGE_MACSTAT_TBI_PCS_SYNCHED) { 3107 ifmr->ifm_status |= IFM_ACTIVE; 3108 } else { 3109 ifmr->ifm_active |= IFM_NONE; 3110 return; 3111 } 3112 3113 ifmr->ifm_active |= IFM_1000_SX; 3114 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 3115 ifmr->ifm_active |= IFM_HDX; 3116 else 3117 ifmr->ifm_active |= IFM_FDX; 3118 } else { 3119 struct mii_data *mii = device_get_softc(sc->bge_miibus); 3120 3121 mii_pollstat(mii); 3122 ifmr->ifm_active = mii->mii_media_active; 3123 ifmr->ifm_status = mii->mii_media_status; 3124 } 3125 } 3126 3127 static int 3128 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 3129 { 3130 struct bge_softc *sc = ifp->if_softc; 3131 struct ifreq *ifr = (struct ifreq *)data; 3132 int mask, error = 0; 3133 3134 ASSERT_SERIALIZED(ifp->if_serializer); 3135 3136 switch (command) { 3137 case SIOCSIFMTU: 3138 if ((!BGE_IS_JUMBO_CAPABLE(sc) && ifr->ifr_mtu > ETHERMTU) || 3139 (BGE_IS_JUMBO_CAPABLE(sc) && 3140 ifr->ifr_mtu > BGE_JUMBO_MTU)) { 3141 error = EINVAL; 3142 } else if (ifp->if_mtu != ifr->ifr_mtu) { 3143 ifp->if_mtu = ifr->ifr_mtu; 3144 ifp->if_flags &= ~IFF_RUNNING; 3145 bge_init(sc); 3146 } 3147 break; 3148 case SIOCSIFFLAGS: 3149 if (ifp->if_flags & IFF_UP) { 3150 if (ifp->if_flags & IFF_RUNNING) { 3151 mask = ifp->if_flags ^ sc->bge_if_flags; 3152 3153 /* 3154 * If only the state of the PROMISC flag 3155 * changed, then just use the 'set promisc 3156 * mode' command instead of reinitializing 3157 * the entire NIC. Doing a full re-init 3158 * means reloading the firmware and waiting 3159 * for it to start up, which may take a 3160 * second or two. Similarly for ALLMULTI. 3161 */ 3162 if (mask & IFF_PROMISC) 3163 bge_setpromisc(sc); 3164 if (mask & IFF_ALLMULTI) 3165 bge_setmulti(sc); 3166 } else { 3167 bge_init(sc); 3168 } 3169 } else { 3170 if (ifp->if_flags & IFF_RUNNING) 3171 bge_stop(sc); 3172 } 3173 sc->bge_if_flags = ifp->if_flags; 3174 break; 3175 case SIOCADDMULTI: 3176 case SIOCDELMULTI: 3177 if (ifp->if_flags & IFF_RUNNING) 3178 bge_setmulti(sc); 3179 break; 3180 case SIOCSIFMEDIA: 3181 case SIOCGIFMEDIA: 3182 if (sc->bge_flags & BGE_FLAG_TBI) { 3183 error = ifmedia_ioctl(ifp, ifr, 3184 &sc->bge_ifmedia, command); 3185 } else { 3186 struct mii_data *mii; 3187 3188 mii = device_get_softc(sc->bge_miibus); 3189 error = ifmedia_ioctl(ifp, ifr, 3190 &mii->mii_media, command); 3191 } 3192 break; 3193 case SIOCSIFCAP: 3194 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 3195 if (mask & IFCAP_HWCSUM) { 3196 ifp->if_capenable ^= (mask & IFCAP_HWCSUM); 3197 if (IFCAP_HWCSUM & ifp->if_capenable) 3198 ifp->if_hwassist = BGE_CSUM_FEATURES; 3199 else 3200 ifp->if_hwassist = 0; 3201 } 3202 break; 3203 default: 3204 error = ether_ioctl(ifp, command, data); 3205 break; 3206 } 3207 return error; 3208 } 3209 3210 static void 3211 bge_watchdog(struct ifnet *ifp) 3212 { 3213 struct bge_softc *sc = ifp->if_softc; 3214 3215 if_printf(ifp, "watchdog timeout -- resetting\n"); 3216 3217 ifp->if_flags &= ~IFF_RUNNING; 3218 bge_init(sc); 3219 3220 ifp->if_oerrors++; 3221 3222 if (!ifq_is_empty(&ifp->if_snd)) 3223 if_devstart(ifp); 3224 } 3225 3226 /* 3227 * Stop the adapter and free any mbufs allocated to the 3228 * RX and TX lists. 3229 */ 3230 static void 3231 bge_stop(struct bge_softc *sc) 3232 { 3233 struct ifnet *ifp = &sc->arpcom.ac_if; 3234 struct ifmedia_entry *ifm; 3235 struct mii_data *mii = NULL; 3236 int mtmp, itmp; 3237 3238 ASSERT_SERIALIZED(ifp->if_serializer); 3239 3240 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) 3241 mii = device_get_softc(sc->bge_miibus); 3242 3243 callout_stop(&sc->bge_stat_timer); 3244 3245 /* 3246 * Disable all of the receiver blocks 3247 */ 3248 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 3249 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 3250 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 3251 if (!BGE_IS_5705_PLUS(sc)) 3252 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 3253 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 3254 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 3255 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 3256 3257 /* 3258 * Disable all of the transmit blocks 3259 */ 3260 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 3261 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 3262 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 3263 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 3264 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 3265 if (!BGE_IS_5705_PLUS(sc)) 3266 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 3267 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 3268 3269 /* 3270 * Shut down all of the memory managers and related 3271 * state machines. 3272 */ 3273 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 3274 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 3275 if (!BGE_IS_5705_PLUS(sc)) 3276 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 3277 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 3278 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 3279 if (!BGE_IS_5705_PLUS(sc)) { 3280 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 3281 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 3282 } 3283 3284 /* Disable host interrupts. */ 3285 bge_disable_intr(sc); 3286 3287 /* 3288 * Tell firmware we're shutting down. 3289 */ 3290 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3291 3292 /* Free the RX lists. */ 3293 bge_free_rx_ring_std(sc); 3294 3295 /* Free jumbo RX list. */ 3296 if (BGE_IS_JUMBO_CAPABLE(sc)) 3297 bge_free_rx_ring_jumbo(sc); 3298 3299 /* Free TX buffers. */ 3300 bge_free_tx_ring(sc); 3301 3302 /* 3303 * Isolate/power down the PHY, but leave the media selection 3304 * unchanged so that things will be put back to normal when 3305 * we bring the interface back up. 3306 * 3307 * 'mii' may be NULL in the following cases: 3308 * - The device uses TBI. 3309 * - bge_stop() is called by bge_detach(). 3310 */ 3311 if (mii != NULL) { 3312 itmp = ifp->if_flags; 3313 ifp->if_flags |= IFF_UP; 3314 ifm = mii->mii_media.ifm_cur; 3315 mtmp = ifm->ifm_media; 3316 ifm->ifm_media = IFM_ETHER|IFM_NONE; 3317 mii_mediachg(mii); 3318 ifm->ifm_media = mtmp; 3319 ifp->if_flags = itmp; 3320 } 3321 3322 sc->bge_link = 0; 3323 sc->bge_coal_chg = 0; 3324 3325 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 3326 3327 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3328 ifp->if_timer = 0; 3329 } 3330 3331 /* 3332 * Stop all chip I/O so that the kernel's probe routines don't 3333 * get confused by errant DMAs when rebooting. 3334 */ 3335 static void 3336 bge_shutdown(device_t dev) 3337 { 3338 struct bge_softc *sc = device_get_softc(dev); 3339 struct ifnet *ifp = &sc->arpcom.ac_if; 3340 3341 lwkt_serialize_enter(ifp->if_serializer); 3342 bge_stop(sc); 3343 bge_reset(sc); 3344 lwkt_serialize_exit(ifp->if_serializer); 3345 } 3346 3347 static int 3348 bge_suspend(device_t dev) 3349 { 3350 struct bge_softc *sc = device_get_softc(dev); 3351 struct ifnet *ifp = &sc->arpcom.ac_if; 3352 3353 lwkt_serialize_enter(ifp->if_serializer); 3354 bge_stop(sc); 3355 lwkt_serialize_exit(ifp->if_serializer); 3356 3357 return 0; 3358 } 3359 3360 static int 3361 bge_resume(device_t dev) 3362 { 3363 struct bge_softc *sc = device_get_softc(dev); 3364 struct ifnet *ifp = &sc->arpcom.ac_if; 3365 3366 lwkt_serialize_enter(ifp->if_serializer); 3367 3368 if (ifp->if_flags & IFF_UP) { 3369 bge_init(sc); 3370 3371 if (!ifq_is_empty(&ifp->if_snd)) 3372 if_devstart(ifp); 3373 } 3374 3375 lwkt_serialize_exit(ifp->if_serializer); 3376 3377 return 0; 3378 } 3379 3380 static void 3381 bge_setpromisc(struct bge_softc *sc) 3382 { 3383 struct ifnet *ifp = &sc->arpcom.ac_if; 3384 3385 if (ifp->if_flags & IFF_PROMISC) 3386 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 3387 else 3388 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 3389 } 3390 3391 static void 3392 bge_dma_free(struct bge_softc *sc) 3393 { 3394 int i; 3395 3396 /* Destroy RX mbuf DMA stuffs. */ 3397 if (sc->bge_cdata.bge_rx_mtag != NULL) { 3398 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 3399 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag, 3400 sc->bge_cdata.bge_rx_std_dmamap[i]); 3401 } 3402 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag, 3403 sc->bge_cdata.bge_rx_tmpmap); 3404 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag); 3405 } 3406 3407 /* Destroy TX mbuf DMA stuffs. */ 3408 if (sc->bge_cdata.bge_tx_mtag != NULL) { 3409 for (i = 0; i < BGE_TX_RING_CNT; i++) { 3410 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag, 3411 sc->bge_cdata.bge_tx_dmamap[i]); 3412 } 3413 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag); 3414 } 3415 3416 /* Destroy standard RX ring */ 3417 bge_dma_block_free(sc->bge_cdata.bge_rx_std_ring_tag, 3418 sc->bge_cdata.bge_rx_std_ring_map, 3419 sc->bge_ldata.bge_rx_std_ring); 3420 3421 if (BGE_IS_JUMBO_CAPABLE(sc)) 3422 bge_free_jumbo_mem(sc); 3423 3424 /* Destroy RX return ring */ 3425 bge_dma_block_free(sc->bge_cdata.bge_rx_return_ring_tag, 3426 sc->bge_cdata.bge_rx_return_ring_map, 3427 sc->bge_ldata.bge_rx_return_ring); 3428 3429 /* Destroy TX ring */ 3430 bge_dma_block_free(sc->bge_cdata.bge_tx_ring_tag, 3431 sc->bge_cdata.bge_tx_ring_map, 3432 sc->bge_ldata.bge_tx_ring); 3433 3434 /* Destroy status block */ 3435 bge_dma_block_free(sc->bge_cdata.bge_status_tag, 3436 sc->bge_cdata.bge_status_map, 3437 sc->bge_ldata.bge_status_block); 3438 3439 /* Destroy statistics block */ 3440 bge_dma_block_free(sc->bge_cdata.bge_stats_tag, 3441 sc->bge_cdata.bge_stats_map, 3442 sc->bge_ldata.bge_stats); 3443 3444 /* Destroy the parent tag */ 3445 if (sc->bge_cdata.bge_parent_tag != NULL) 3446 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag); 3447 } 3448 3449 static int 3450 bge_dma_alloc(struct bge_softc *sc) 3451 { 3452 struct ifnet *ifp = &sc->arpcom.ac_if; 3453 int i, error; 3454 3455 /* 3456 * Allocate the parent bus DMA tag appropriate for PCI. 3457 */ 3458 error = bus_dma_tag_create(NULL, 1, 0, 3459 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 3460 NULL, NULL, 3461 BUS_SPACE_MAXSIZE_32BIT, 0, 3462 BUS_SPACE_MAXSIZE_32BIT, 3463 0, &sc->bge_cdata.bge_parent_tag); 3464 if (error) { 3465 if_printf(ifp, "could not allocate parent dma tag\n"); 3466 return error; 3467 } 3468 3469 /* 3470 * Create DMA tag and maps for RX mbufs. 3471 */ 3472 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 0, 3473 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 3474 NULL, NULL, MCLBYTES, 1, MCLBYTES, 3475 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK, 3476 &sc->bge_cdata.bge_rx_mtag); 3477 if (error) { 3478 if_printf(ifp, "could not allocate RX mbuf dma tag\n"); 3479 return error; 3480 } 3481 3482 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 3483 BUS_DMA_WAITOK, &sc->bge_cdata.bge_rx_tmpmap); 3484 if (error) { 3485 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag); 3486 sc->bge_cdata.bge_rx_mtag = NULL; 3487 return error; 3488 } 3489 3490 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 3491 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 3492 BUS_DMA_WAITOK, 3493 &sc->bge_cdata.bge_rx_std_dmamap[i]); 3494 if (error) { 3495 int j; 3496 3497 for (j = 0; j < i; ++j) { 3498 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag, 3499 sc->bge_cdata.bge_rx_std_dmamap[j]); 3500 } 3501 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag); 3502 sc->bge_cdata.bge_rx_mtag = NULL; 3503 3504 if_printf(ifp, "could not create DMA map for RX\n"); 3505 return error; 3506 } 3507 } 3508 3509 /* 3510 * Create DMA tag and maps for TX mbufs. 3511 */ 3512 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 0, 3513 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 3514 NULL, NULL, 3515 BGE_JUMBO_FRAMELEN, BGE_NSEG_NEW, MCLBYTES, 3516 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | 3517 BUS_DMA_ONEBPAGE, 3518 &sc->bge_cdata.bge_tx_mtag); 3519 if (error) { 3520 if_printf(ifp, "could not allocate TX mbuf dma tag\n"); 3521 return error; 3522 } 3523 3524 for (i = 0; i < BGE_TX_RING_CNT; i++) { 3525 error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 3526 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 3527 &sc->bge_cdata.bge_tx_dmamap[i]); 3528 if (error) { 3529 int j; 3530 3531 for (j = 0; j < i; ++j) { 3532 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag, 3533 sc->bge_cdata.bge_tx_dmamap[j]); 3534 } 3535 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag); 3536 sc->bge_cdata.bge_tx_mtag = NULL; 3537 3538 if_printf(ifp, "could not create DMA map for TX\n"); 3539 return error; 3540 } 3541 } 3542 3543 /* 3544 * Create DMA stuffs for standard RX ring. 3545 */ 3546 error = bge_dma_block_alloc(sc, BGE_STD_RX_RING_SZ, 3547 &sc->bge_cdata.bge_rx_std_ring_tag, 3548 &sc->bge_cdata.bge_rx_std_ring_map, 3549 (void *)&sc->bge_ldata.bge_rx_std_ring, 3550 &sc->bge_ldata.bge_rx_std_ring_paddr); 3551 if (error) { 3552 if_printf(ifp, "could not create std RX ring\n"); 3553 return error; 3554 } 3555 3556 /* 3557 * Create jumbo buffer pool. 3558 */ 3559 if (BGE_IS_JUMBO_CAPABLE(sc)) { 3560 error = bge_alloc_jumbo_mem(sc); 3561 if (error) { 3562 if_printf(ifp, "could not create jumbo buffer pool\n"); 3563 return error; 3564 } 3565 } 3566 3567 /* 3568 * Create DMA stuffs for RX return ring. 3569 */ 3570 error = bge_dma_block_alloc(sc, BGE_RX_RTN_RING_SZ(sc), 3571 &sc->bge_cdata.bge_rx_return_ring_tag, 3572 &sc->bge_cdata.bge_rx_return_ring_map, 3573 (void *)&sc->bge_ldata.bge_rx_return_ring, 3574 &sc->bge_ldata.bge_rx_return_ring_paddr); 3575 if (error) { 3576 if_printf(ifp, "could not create RX ret ring\n"); 3577 return error; 3578 } 3579 3580 /* 3581 * Create DMA stuffs for TX ring. 3582 */ 3583 error = bge_dma_block_alloc(sc, BGE_TX_RING_SZ, 3584 &sc->bge_cdata.bge_tx_ring_tag, 3585 &sc->bge_cdata.bge_tx_ring_map, 3586 (void *)&sc->bge_ldata.bge_tx_ring, 3587 &sc->bge_ldata.bge_tx_ring_paddr); 3588 if (error) { 3589 if_printf(ifp, "could not create TX ring\n"); 3590 return error; 3591 } 3592 3593 /* 3594 * Create DMA stuffs for status block. 3595 */ 3596 error = bge_dma_block_alloc(sc, BGE_STATUS_BLK_SZ, 3597 &sc->bge_cdata.bge_status_tag, 3598 &sc->bge_cdata.bge_status_map, 3599 (void *)&sc->bge_ldata.bge_status_block, 3600 &sc->bge_ldata.bge_status_block_paddr); 3601 if (error) { 3602 if_printf(ifp, "could not create status block\n"); 3603 return error; 3604 } 3605 3606 /* 3607 * Create DMA stuffs for statistics block. 3608 */ 3609 error = bge_dma_block_alloc(sc, BGE_STATS_SZ, 3610 &sc->bge_cdata.bge_stats_tag, 3611 &sc->bge_cdata.bge_stats_map, 3612 (void *)&sc->bge_ldata.bge_stats, 3613 &sc->bge_ldata.bge_stats_paddr); 3614 if (error) { 3615 if_printf(ifp, "could not create stats block\n"); 3616 return error; 3617 } 3618 return 0; 3619 } 3620 3621 static int 3622 bge_dma_block_alloc(struct bge_softc *sc, bus_size_t size, bus_dma_tag_t *tag, 3623 bus_dmamap_t *map, void **addr, bus_addr_t *paddr) 3624 { 3625 bus_dmamem_t dmem; 3626 int error; 3627 3628 error = bus_dmamem_coherent(sc->bge_cdata.bge_parent_tag, PAGE_SIZE, 0, 3629 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 3630 size, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 3631 if (error) 3632 return error; 3633 3634 *tag = dmem.dmem_tag; 3635 *map = dmem.dmem_map; 3636 *addr = dmem.dmem_addr; 3637 *paddr = dmem.dmem_busaddr; 3638 3639 return 0; 3640 } 3641 3642 static void 3643 bge_dma_block_free(bus_dma_tag_t tag, bus_dmamap_t map, void *addr) 3644 { 3645 if (tag != NULL) { 3646 bus_dmamap_unload(tag, map); 3647 bus_dmamem_free(tag, addr, map); 3648 bus_dma_tag_destroy(tag); 3649 } 3650 } 3651 3652 /* 3653 * Grrr. The link status word in the status block does 3654 * not work correctly on the BCM5700 rev AX and BX chips, 3655 * according to all available information. Hence, we have 3656 * to enable MII interrupts in order to properly obtain 3657 * async link changes. Unfortunately, this also means that 3658 * we have to read the MAC status register to detect link 3659 * changes, thereby adding an additional register access to 3660 * the interrupt handler. 3661 * 3662 * XXX: perhaps link state detection procedure used for 3663 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions. 3664 */ 3665 static void 3666 bge_bcm5700_link_upd(struct bge_softc *sc, uint32_t status __unused) 3667 { 3668 struct ifnet *ifp = &sc->arpcom.ac_if; 3669 struct mii_data *mii = device_get_softc(sc->bge_miibus); 3670 3671 mii_pollstat(mii); 3672 3673 if (!sc->bge_link && 3674 (mii->mii_media_status & IFM_ACTIVE) && 3675 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 3676 sc->bge_link++; 3677 if (bootverbose) 3678 if_printf(ifp, "link UP\n"); 3679 } else if (sc->bge_link && 3680 (!(mii->mii_media_status & IFM_ACTIVE) || 3681 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { 3682 sc->bge_link = 0; 3683 if (bootverbose) 3684 if_printf(ifp, "link DOWN\n"); 3685 } 3686 3687 /* Clear the interrupt. */ 3688 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_MI_INTERRUPT); 3689 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR); 3690 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR, BRGPHY_INTRS); 3691 } 3692 3693 static void 3694 bge_tbi_link_upd(struct bge_softc *sc, uint32_t status) 3695 { 3696 struct ifnet *ifp = &sc->arpcom.ac_if; 3697 3698 #define PCS_ENCODE_ERR (BGE_MACSTAT_PORT_DECODE_ERROR|BGE_MACSTAT_MI_COMPLETE) 3699 3700 /* 3701 * Sometimes PCS encoding errors are detected in 3702 * TBI mode (on fiber NICs), and for some reason 3703 * the chip will signal them as link changes. 3704 * If we get a link change event, but the 'PCS 3705 * encoding error' bit in the MAC status register 3706 * is set, don't bother doing a link check. 3707 * This avoids spurious "gigabit link up" messages 3708 * that sometimes appear on fiber NICs during 3709 * periods of heavy traffic. 3710 */ 3711 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) { 3712 if (!sc->bge_link) { 3713 sc->bge_link++; 3714 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { 3715 BGE_CLRBIT(sc, BGE_MAC_MODE, 3716 BGE_MACMODE_TBI_SEND_CFGS); 3717 } 3718 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 3719 3720 if (bootverbose) 3721 if_printf(ifp, "link UP\n"); 3722 3723 ifp->if_link_state = LINK_STATE_UP; 3724 if_link_state_change(ifp); 3725 } 3726 } else if ((status & PCS_ENCODE_ERR) != PCS_ENCODE_ERR) { 3727 if (sc->bge_link) { 3728 sc->bge_link = 0; 3729 3730 if (bootverbose) 3731 if_printf(ifp, "link DOWN\n"); 3732 3733 ifp->if_link_state = LINK_STATE_DOWN; 3734 if_link_state_change(ifp); 3735 } 3736 } 3737 3738 #undef PCS_ENCODE_ERR 3739 3740 /* Clear the attention. */ 3741 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 3742 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 3743 BGE_MACSTAT_LINK_CHANGED); 3744 } 3745 3746 static void 3747 bge_copper_link_upd(struct bge_softc *sc, uint32_t status __unused) 3748 { 3749 /* 3750 * Check that the AUTOPOLL bit is set before 3751 * processing the event as a real link change. 3752 * Turning AUTOPOLL on and off in the MII read/write 3753 * functions will often trigger a link status 3754 * interrupt for no reason. 3755 */ 3756 if (CSR_READ_4(sc, BGE_MI_MODE) & BGE_MIMODE_AUTOPOLL) { 3757 struct ifnet *ifp = &sc->arpcom.ac_if; 3758 struct mii_data *mii = device_get_softc(sc->bge_miibus); 3759 3760 mii_pollstat(mii); 3761 3762 if (!sc->bge_link && 3763 (mii->mii_media_status & IFM_ACTIVE) && 3764 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 3765 sc->bge_link++; 3766 if (bootverbose) 3767 if_printf(ifp, "link UP\n"); 3768 } else if (sc->bge_link && 3769 (!(mii->mii_media_status & IFM_ACTIVE) || 3770 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { 3771 sc->bge_link = 0; 3772 if (bootverbose) 3773 if_printf(ifp, "link DOWN\n"); 3774 } 3775 } 3776 3777 /* Clear the attention. */ 3778 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 3779 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 3780 BGE_MACSTAT_LINK_CHANGED); 3781 } 3782 3783 static int 3784 bge_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS) 3785 { 3786 struct bge_softc *sc = arg1; 3787 3788 return bge_sysctl_coal_chg(oidp, arg1, arg2, req, 3789 &sc->bge_rx_coal_ticks, 3790 BGE_RX_COAL_TICKS_CHG); 3791 } 3792 3793 static int 3794 bge_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS) 3795 { 3796 struct bge_softc *sc = arg1; 3797 3798 return bge_sysctl_coal_chg(oidp, arg1, arg2, req, 3799 &sc->bge_tx_coal_ticks, 3800 BGE_TX_COAL_TICKS_CHG); 3801 } 3802 3803 static int 3804 bge_sysctl_rx_max_coal_bds(SYSCTL_HANDLER_ARGS) 3805 { 3806 struct bge_softc *sc = arg1; 3807 3808 return bge_sysctl_coal_chg(oidp, arg1, arg2, req, 3809 &sc->bge_rx_max_coal_bds, 3810 BGE_RX_MAX_COAL_BDS_CHG); 3811 } 3812 3813 static int 3814 bge_sysctl_tx_max_coal_bds(SYSCTL_HANDLER_ARGS) 3815 { 3816 struct bge_softc *sc = arg1; 3817 3818 return bge_sysctl_coal_chg(oidp, arg1, arg2, req, 3819 &sc->bge_tx_max_coal_bds, 3820 BGE_TX_MAX_COAL_BDS_CHG); 3821 } 3822 3823 static int 3824 bge_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *coal, 3825 uint32_t coal_chg_mask) 3826 { 3827 struct bge_softc *sc = arg1; 3828 struct ifnet *ifp = &sc->arpcom.ac_if; 3829 int error = 0, v; 3830 3831 lwkt_serialize_enter(ifp->if_serializer); 3832 3833 v = *coal; 3834 error = sysctl_handle_int(oidp, &v, 0, req); 3835 if (!error && req->newptr != NULL) { 3836 if (v < 0) { 3837 error = EINVAL; 3838 } else { 3839 *coal = v; 3840 sc->bge_coal_chg |= coal_chg_mask; 3841 } 3842 } 3843 3844 lwkt_serialize_exit(ifp->if_serializer); 3845 return error; 3846 } 3847 3848 static void 3849 bge_coal_change(struct bge_softc *sc) 3850 { 3851 struct ifnet *ifp = &sc->arpcom.ac_if; 3852 uint32_t val; 3853 3854 ASSERT_SERIALIZED(ifp->if_serializer); 3855 3856 if (sc->bge_coal_chg & BGE_RX_COAL_TICKS_CHG) { 3857 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, 3858 sc->bge_rx_coal_ticks); 3859 DELAY(10); 3860 val = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS); 3861 3862 if (bootverbose) { 3863 if_printf(ifp, "rx_coal_ticks -> %u\n", 3864 sc->bge_rx_coal_ticks); 3865 } 3866 } 3867 3868 if (sc->bge_coal_chg & BGE_TX_COAL_TICKS_CHG) { 3869 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, 3870 sc->bge_tx_coal_ticks); 3871 DELAY(10); 3872 val = CSR_READ_4(sc, BGE_HCC_TX_COAL_TICKS); 3873 3874 if (bootverbose) { 3875 if_printf(ifp, "tx_coal_ticks -> %u\n", 3876 sc->bge_tx_coal_ticks); 3877 } 3878 } 3879 3880 if (sc->bge_coal_chg & BGE_RX_MAX_COAL_BDS_CHG) { 3881 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, 3882 sc->bge_rx_max_coal_bds); 3883 DELAY(10); 3884 val = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS); 3885 3886 if (bootverbose) { 3887 if_printf(ifp, "rx_max_coal_bds -> %u\n", 3888 sc->bge_rx_max_coal_bds); 3889 } 3890 } 3891 3892 if (sc->bge_coal_chg & BGE_TX_MAX_COAL_BDS_CHG) { 3893 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, 3894 sc->bge_tx_max_coal_bds); 3895 DELAY(10); 3896 val = CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS); 3897 3898 if (bootverbose) { 3899 if_printf(ifp, "tx_max_coal_bds -> %u\n", 3900 sc->bge_tx_max_coal_bds); 3901 } 3902 } 3903 3904 sc->bge_coal_chg = 0; 3905 } 3906 3907 static void 3908 bge_enable_intr(struct bge_softc *sc) 3909 { 3910 struct ifnet *ifp = &sc->arpcom.ac_if; 3911 3912 lwkt_serialize_handler_enable(ifp->if_serializer); 3913 3914 /* 3915 * Enable interrupt. 3916 */ 3917 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); 3918 3919 /* 3920 * Unmask the interrupt when we stop polling. 3921 */ 3922 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 3923 3924 /* 3925 * Trigger another interrupt, since above writing 3926 * to interrupt mailbox0 may acknowledge pending 3927 * interrupt. 3928 */ 3929 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 3930 } 3931 3932 static void 3933 bge_disable_intr(struct bge_softc *sc) 3934 { 3935 struct ifnet *ifp = &sc->arpcom.ac_if; 3936 3937 /* 3938 * Mask the interrupt when we start polling. 3939 */ 3940 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 3941 3942 /* 3943 * Acknowledge possible asserted interrupt. 3944 */ 3945 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); 3946 3947 lwkt_serialize_handler_disable(ifp->if_serializer); 3948 } 3949 3950 static int 3951 bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[]) 3952 { 3953 uint32_t mac_addr; 3954 int ret = 1; 3955 3956 mac_addr = bge_readmem_ind(sc, 0x0c14); 3957 if ((mac_addr >> 16) == 0x484b) { 3958 ether_addr[0] = (uint8_t)(mac_addr >> 8); 3959 ether_addr[1] = (uint8_t)mac_addr; 3960 mac_addr = bge_readmem_ind(sc, 0x0c18); 3961 ether_addr[2] = (uint8_t)(mac_addr >> 24); 3962 ether_addr[3] = (uint8_t)(mac_addr >> 16); 3963 ether_addr[4] = (uint8_t)(mac_addr >> 8); 3964 ether_addr[5] = (uint8_t)mac_addr; 3965 ret = 0; 3966 } 3967 return ret; 3968 } 3969 3970 static int 3971 bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[]) 3972 { 3973 int mac_offset = BGE_EE_MAC_OFFSET; 3974 3975 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) 3976 mac_offset = BGE_EE_MAC_OFFSET_5906; 3977 3978 return bge_read_nvram(sc, ether_addr, mac_offset + 2, ETHER_ADDR_LEN); 3979 } 3980 3981 static int 3982 bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[]) 3983 { 3984 if (sc->bge_flags & BGE_FLAG_NO_EEPROM) 3985 return 1; 3986 3987 return bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2, 3988 ETHER_ADDR_LEN); 3989 } 3990 3991 static int 3992 bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[]) 3993 { 3994 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = { 3995 /* NOTE: Order is critical */ 3996 bge_get_eaddr_mem, 3997 bge_get_eaddr_nvram, 3998 bge_get_eaddr_eeprom, 3999 NULL 4000 }; 4001 const bge_eaddr_fcn_t *func; 4002 4003 for (func = bge_eaddr_funcs; *func != NULL; ++func) { 4004 if ((*func)(sc, eaddr) == 0) 4005 break; 4006 } 4007 return (*func == NULL ? ENXIO : 0); 4008 } 4009