1 /* $NetBSD: if_bge.c,v 1.371 2022/08/07 08:24:23 skrll Exp $ */ 2 3 /* 4 * Copyright (c) 2001 Wind River Systems 5 * Copyright (c) 1997, 1998, 1999, 2001 6 * Bill Paul <wpaul@windriver.com>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Bill Paul. 19 * 4. Neither the name of the author nor the names of any co-contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 33 * THE POSSIBILITY OF SUCH DAMAGE. 34 * 35 * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $ 36 */ 37 38 /* 39 * Broadcom BCM570x family gigabit ethernet driver for NetBSD. 40 * 41 * NetBSD version by: 42 * 43 * Frank van der Linden <fvdl@wasabisystems.com> 44 * Jason Thorpe <thorpej@wasabisystems.com> 45 * Jonathan Stone <jonathan@dsg.stanford.edu> 46 * 47 * Originally written for FreeBSD by Bill Paul <wpaul@windriver.com> 48 * Senior Engineer, Wind River Systems 49 */ 50 51 /* 52 * The Broadcom BCM5700 is based on technology originally developed by 53 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 54 * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has 55 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 56 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 57 * frames, highly configurable RX filtering, and 16 RX and TX queues 58 * (which, along with RX filter rules, can be used for QOS applications). 59 * Other features, such as TCP segmentation, may be available as part 60 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 61 * firmware images can be stored in hardware and need not be compiled 62 * into the driver. 63 * 64 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 65 * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus. 66 * 67 * The BCM5701 is a single-chip solution incorporating both the BCM5700 68 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 69 * does not support external SSRAM. 70 * 71 * Broadcom also produces a variation of the BCM5700 under the "Altima" 72 * brand name, which is functionally similar but lacks PCI-X support. 73 * 74 * Without external SSRAM, you can only have at most 4 TX rings, 75 * and the use of the mini RX ring is disabled. This seems to imply 76 * that these features are simply not available on the BCM5701. As a 77 * result, this driver does not implement any support for the mini RX 78 * ring. 79 */ 80 81 #include <sys/cdefs.h> 82 __KERNEL_RCSID(0, "$NetBSD: if_bge.c,v 1.371 2022/08/07 08:24:23 skrll Exp $"); 83 84 #include <sys/param.h> 85 #include <sys/types.h> 86 87 #include <sys/callout.h> 88 #include <sys/device.h> 89 #include <sys/kernel.h> 90 #include <sys/kmem.h> 91 #include <sys/mbuf.h> 92 #include <sys/rndsource.h> 93 #include <sys/socket.h> 94 #include <sys/sockio.h> 95 #include <sys/sysctl.h> 96 #include <sys/systm.h> 97 98 #include <net/if.h> 99 #include <net/if_dl.h> 100 #include <net/if_media.h> 101 #include <net/if_ether.h> 102 #include <net/bpf.h> 103 104 #ifdef INET 105 #include <netinet/in.h> 106 #include <netinet/in_systm.h> 107 #include <netinet/in_var.h> 108 #include <netinet/ip.h> 109 #endif 110 111 /* Headers for TCP Segmentation Offload (TSO) */ 112 #include <netinet/in_systm.h> /* n_time for <netinet/ip.h>... */ 113 #include <netinet/in.h> /* ip_{src,dst}, for <netinet/ip.h> */ 114 #include <netinet/ip.h> /* for struct ip */ 115 #include <netinet/tcp.h> /* for struct tcphdr */ 116 117 #include <dev/pci/pcireg.h> 118 #include <dev/pci/pcivar.h> 119 #include <dev/pci/pcidevs.h> 120 121 #include <dev/mii/mii.h> 122 #include <dev/mii/miivar.h> 123 #include <dev/mii/miidevs.h> 124 #include <dev/mii/brgphyreg.h> 125 126 #include <dev/pci/if_bgereg.h> 127 #include <dev/pci/if_bgevar.h> 128 129 #include <prop/proplib.h> 130 131 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */ 132 133 134 /* 135 * Tunable thresholds for rx-side bge interrupt mitigation. 136 */ 137 138 /* 139 * The pairs of values below were obtained from empirical measurement 140 * on bcm5700 rev B2; they ar designed to give roughly 1 receive 141 * interrupt for every N packets received, where N is, approximately, 142 * the second value (rx_max_bds) in each pair. The values are chosen 143 * such that moving from one pair to the succeeding pair was observed 144 * to roughly halve interrupt rate under sustained input packet load. 145 * The values were empirically chosen to avoid overflowing internal 146 * limits on the bcm5700: increasing rx_ticks much beyond 600 147 * results in internal wrapping and higher interrupt rates. 148 * The limit of 46 frames was chosen to match NFS workloads. 149 * 150 * These values also work well on bcm5701, bcm5704C, and (less 151 * tested) bcm5703. On other chipsets, (including the Altima chip 152 * family), the larger values may overflow internal chip limits, 153 * leading to increasing interrupt rates rather than lower interrupt 154 * rates. 155 * 156 * Applications using heavy interrupt mitigation (interrupting every 157 * 32 or 46 frames) in both directions may need to increase the TCP 158 * windowsize to above 131072 bytes (e.g., to 199608 bytes) to sustain 159 * full link bandwidth, due to ACKs and window updates lingering 160 * in the RX queue during the 30-to-40-frame interrupt-mitigation window. 161 */ 162 static const struct bge_load_rx_thresh { 163 int rx_ticks; 164 int rx_max_bds; } 165 bge_rx_threshes[] = { 166 { 16, 1 }, /* rx_max_bds = 1 disables interrupt mitigation */ 167 { 32, 2 }, 168 { 50, 4 }, 169 { 100, 8 }, 170 { 192, 16 }, 171 { 416, 32 }, 172 { 598, 46 } 173 }; 174 #define NBGE_RX_THRESH (sizeof(bge_rx_threshes) / sizeof(bge_rx_threshes[0])) 175 176 /* XXX patchable; should be sysctl'able */ 177 static int bge_auto_thresh = 1; 178 static int bge_rx_thresh_lvl; 179 180 static int bge_rxthresh_nodenum; 181 182 typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]); 183 184 static uint32_t bge_chipid(const struct pci_attach_args *); 185 static int bge_can_use_msi(struct bge_softc *); 186 static int bge_probe(device_t, cfdata_t, void *); 187 static void bge_attach(device_t, device_t, void *); 188 static int bge_detach(device_t, int); 189 static void bge_release_resources(struct bge_softc *); 190 191 static int bge_get_eaddr_fw(struct bge_softc *, uint8_t[]); 192 static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]); 193 static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]); 194 static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]); 195 static int bge_get_eaddr(struct bge_softc *, uint8_t[]); 196 197 static void bge_txeof(struct bge_softc *); 198 static void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *); 199 static void bge_rxeof(struct bge_softc *); 200 201 static void bge_asf_driver_up (struct bge_softc *); 202 static void bge_tick(void *); 203 static void bge_stats_update(struct bge_softc *); 204 static void bge_stats_update_regs(struct bge_softc *); 205 static int bge_encap(struct bge_softc *, struct mbuf *, uint32_t *); 206 207 static int bge_intr(void *); 208 static void bge_start(struct ifnet *); 209 static int bge_ifflags_cb(struct ethercom *); 210 static int bge_ioctl(struct ifnet *, u_long, void *); 211 static int bge_init(struct ifnet *); 212 static void bge_stop(struct ifnet *, int); 213 static void bge_watchdog(struct ifnet *); 214 static int bge_ifmedia_upd(struct ifnet *); 215 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 216 217 static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *); 218 static int bge_read_nvram(struct bge_softc *, uint8_t *, int, int); 219 220 static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *); 221 static int bge_read_eeprom(struct bge_softc *, void *, int, int); 222 static void bge_setmulti(struct bge_softc *); 223 224 static void bge_handle_events(struct bge_softc *); 225 static int bge_alloc_jumbo_mem(struct bge_softc *); 226 #if 0 /* XXX */ 227 static void bge_free_jumbo_mem(struct bge_softc *); 228 #endif 229 static void *bge_jalloc(struct bge_softc *); 230 static void bge_jfree(struct mbuf *, void *, size_t, void *); 231 static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *, 232 bus_dmamap_t); 233 static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *); 234 static int bge_init_rx_ring_std(struct bge_softc *); 235 static void bge_free_rx_ring_std(struct bge_softc *m, bool); 236 static int bge_init_rx_ring_jumbo(struct bge_softc *); 237 static void bge_free_rx_ring_jumbo(struct bge_softc *); 238 static void bge_free_tx_ring(struct bge_softc *m, bool); 239 static int bge_init_tx_ring(struct bge_softc *); 240 241 static int bge_chipinit(struct bge_softc *); 242 static int bge_blockinit(struct bge_softc *); 243 static int bge_phy_addr(struct bge_softc *); 244 static uint32_t bge_readmem_ind(struct bge_softc *, int); 245 static void bge_writemem_ind(struct bge_softc *, int, int); 246 static void bge_writembx(struct bge_softc *, int, int); 247 static void bge_writembx_flush(struct bge_softc *, int, int); 248 static void bge_writemem_direct(struct bge_softc *, int, int); 249 static void bge_writereg_ind(struct bge_softc *, int, int); 250 static void bge_set_max_readrq(struct bge_softc *); 251 252 static int bge_miibus_readreg(device_t, int, int, uint16_t *); 253 static int bge_miibus_writereg(device_t, int, int, uint16_t); 254 static void bge_miibus_statchg(struct ifnet *); 255 256 #define BGE_RESET_SHUTDOWN 0 257 #define BGE_RESET_START 1 258 #define BGE_RESET_SUSPEND 2 259 static void bge_sig_post_reset(struct bge_softc *, int); 260 static void bge_sig_legacy(struct bge_softc *, int); 261 static void bge_sig_pre_reset(struct bge_softc *, int); 262 static void bge_wait_for_event_ack(struct bge_softc *); 263 static void bge_stop_fw(struct bge_softc *); 264 static int bge_reset(struct bge_softc *); 265 static void bge_link_upd(struct bge_softc *); 266 static void bge_sysctl_init(struct bge_softc *); 267 static int bge_sysctl_verify(SYSCTLFN_PROTO); 268 269 static void bge_ape_lock_init(struct bge_softc *); 270 static void bge_ape_read_fw_ver(struct bge_softc *); 271 static int bge_ape_lock(struct bge_softc *, int); 272 static void bge_ape_unlock(struct bge_softc *, int); 273 static void bge_ape_send_event(struct bge_softc *, uint32_t); 274 static void bge_ape_driver_state_change(struct bge_softc *, int); 275 276 #ifdef BGE_DEBUG 277 #define DPRINTF(x) if (bgedebug) printf x 278 #define DPRINTFN(n, x) if (bgedebug >= (n)) printf x 279 #define BGE_TSO_PRINTF(x) do { if (bge_tso_debug) printf x ;} while (0) 280 int bgedebug = 0; 281 int bge_tso_debug = 0; 282 void bge_debug_info(struct bge_softc *); 283 #else 284 #define DPRINTF(x) 285 #define DPRINTFN(n, x) 286 #define BGE_TSO_PRINTF(x) 287 #endif 288 289 #ifdef BGE_EVENT_COUNTERS 290 #define BGE_EVCNT_INCR(ev) (ev).ev_count++ 291 #define BGE_EVCNT_ADD(ev, val) (ev).ev_count += (val) 292 #define BGE_EVCNT_UPD(ev, val) (ev).ev_count = (val) 293 #else 294 #define BGE_EVCNT_INCR(ev) /* nothing */ 295 #define BGE_EVCNT_ADD(ev, val) /* nothing */ 296 #define BGE_EVCNT_UPD(ev, val) /* nothing */ 297 #endif 298 299 #define VIDDID(a, b) PCI_VENDOR_ ## a, PCI_PRODUCT_ ## a ## _ ## b 300 /* 301 * The BCM5700 documentation seems to indicate that the hardware still has the 302 * Alteon vendor ID burned into it, though it should always be overridden by 303 * the value in the EEPROM. We'll check for it anyway. 304 */ 305 static const struct bge_product { 306 pci_vendor_id_t bp_vendor; 307 pci_product_id_t bp_product; 308 const char *bp_name; 309 } bge_products[] = { 310 { VIDDID(ALTEON, BCM5700), "Broadcom BCM5700 Gigabit" }, 311 { VIDDID(ALTEON, BCM5701), "Broadcom BCM5701 Gigabit" }, 312 { VIDDID(ALTIMA, AC1000), "Altima AC1000 Gigabit" }, 313 { VIDDID(ALTIMA, AC1001), "Altima AC1001 Gigabit" }, 314 { VIDDID(ALTIMA, AC1003), "Altima AC1003 Gigabit" }, 315 { VIDDID(ALTIMA, AC9100), "Altima AC9100 Gigabit" }, 316 { VIDDID(APPLE, BCM5701), "APPLE BCM5701 Gigabit" }, 317 { VIDDID(BROADCOM, BCM5700), "Broadcom BCM5700 Gigabit" }, 318 { VIDDID(BROADCOM, BCM5701), "Broadcom BCM5701 Gigabit" }, 319 { VIDDID(BROADCOM, BCM5702), "Broadcom BCM5702 Gigabit" }, 320 { VIDDID(BROADCOM, BCM5702FE), "Broadcom BCM5702FE Fast" }, 321 { VIDDID(BROADCOM, BCM5702X), "Broadcom BCM5702X Gigabit" }, 322 { VIDDID(BROADCOM, BCM5703), "Broadcom BCM5703 Gigabit" }, 323 { VIDDID(BROADCOM, BCM5703X), "Broadcom BCM5703X Gigabit" }, 324 { VIDDID(BROADCOM, BCM5703_ALT),"Broadcom BCM5703 Gigabit" }, 325 { VIDDID(BROADCOM, BCM5704C), "Broadcom BCM5704C Dual Gigabit" }, 326 { VIDDID(BROADCOM, BCM5704S), "Broadcom BCM5704S Dual Gigabit" }, 327 { VIDDID(BROADCOM, BCM5704S_ALT),"Broadcom BCM5704S Dual Gigabit" }, 328 { VIDDID(BROADCOM, BCM5705), "Broadcom BCM5705 Gigabit" }, 329 { VIDDID(BROADCOM, BCM5705F), "Broadcom BCM5705F Gigabit" }, 330 { VIDDID(BROADCOM, BCM5705K), "Broadcom BCM5705K Gigabit" }, 331 { VIDDID(BROADCOM, BCM5705M), "Broadcom BCM5705M Gigabit" }, 332 { VIDDID(BROADCOM, BCM5705M_ALT),"Broadcom BCM5705M Gigabit" }, 333 { VIDDID(BROADCOM, BCM5714), "Broadcom BCM5714 Gigabit" }, 334 { VIDDID(BROADCOM, BCM5714S), "Broadcom BCM5714S Gigabit" }, 335 { VIDDID(BROADCOM, BCM5715), "Broadcom BCM5715 Gigabit" }, 336 { VIDDID(BROADCOM, BCM5715S), "Broadcom BCM5715S Gigabit" }, 337 { VIDDID(BROADCOM, BCM5717), "Broadcom BCM5717 Gigabit" }, 338 { VIDDID(BROADCOM, BCM5717C), "Broadcom BCM5717 Gigabit" }, 339 { VIDDID(BROADCOM, BCM5718), "Broadcom BCM5718 Gigabit" }, 340 { VIDDID(BROADCOM, BCM5719), "Broadcom BCM5719 Gigabit" }, 341 { VIDDID(BROADCOM, BCM5720), "Broadcom BCM5720 Gigabit" }, 342 { VIDDID(BROADCOM, BCM5721), "Broadcom BCM5721 Gigabit" }, 343 { VIDDID(BROADCOM, BCM5722), "Broadcom BCM5722 Gigabit" }, 344 { VIDDID(BROADCOM, BCM5723), "Broadcom BCM5723 Gigabit" }, 345 { VIDDID(BROADCOM, BCM5725), "Broadcom BCM5725 Gigabit" }, 346 { VIDDID(BROADCOM, BCM5727), "Broadcom BCM5727 Gigabit" }, 347 { VIDDID(BROADCOM, BCM5750), "Broadcom BCM5750 Gigabit" }, 348 { VIDDID(BROADCOM, BCM5751), "Broadcom BCM5751 Gigabit" }, 349 { VIDDID(BROADCOM, BCM5751F), "Broadcom BCM5751F Gigabit" }, 350 { VIDDID(BROADCOM, BCM5751M), "Broadcom BCM5751M Gigabit" }, 351 { VIDDID(BROADCOM, BCM5752), "Broadcom BCM5752 Gigabit" }, 352 { VIDDID(BROADCOM, BCM5752M), "Broadcom BCM5752M Gigabit" }, 353 { VIDDID(BROADCOM, BCM5753), "Broadcom BCM5753 Gigabit" }, 354 { VIDDID(BROADCOM, BCM5753F), "Broadcom BCM5753F Gigabit" }, 355 { VIDDID(BROADCOM, BCM5753M), "Broadcom BCM5753M Gigabit" }, 356 { VIDDID(BROADCOM, BCM5754), "Broadcom BCM5754 Gigabit" }, 357 { VIDDID(BROADCOM, BCM5754M), "Broadcom BCM5754M Gigabit" }, 358 { VIDDID(BROADCOM, BCM5755), "Broadcom BCM5755 Gigabit" }, 359 { VIDDID(BROADCOM, BCM5755M), "Broadcom BCM5755M Gigabit" }, 360 { VIDDID(BROADCOM, BCM5756), "Broadcom BCM5756 Gigabit" }, 361 { VIDDID(BROADCOM, BCM5761), "Broadcom BCM5761 Gigabit" }, 362 { VIDDID(BROADCOM, BCM5761E), "Broadcom BCM5761E Gigabit" }, 363 { VIDDID(BROADCOM, BCM5761S), "Broadcom BCM5761S Gigabit" }, 364 { VIDDID(BROADCOM, BCM5761SE), "Broadcom BCM5761SE Gigabit" }, 365 { VIDDID(BROADCOM, BCM5762), "Broadcom BCM5762 Gigabit" }, 366 { VIDDID(BROADCOM, BCM5764), "Broadcom BCM5764 Gigabit" }, 367 { VIDDID(BROADCOM, BCM5780), "Broadcom BCM5780 Gigabit" }, 368 { VIDDID(BROADCOM, BCM5780S), "Broadcom BCM5780S Gigabit" }, 369 { VIDDID(BROADCOM, BCM5781), "Broadcom BCM5781 Gigabit" }, 370 { VIDDID(BROADCOM, BCM5782), "Broadcom BCM5782 Gigabit" }, 371 { VIDDID(BROADCOM, BCM5784M), "BCM5784M NetLink 1000baseT" }, 372 { VIDDID(BROADCOM, BCM5785F), "BCM5785F NetLink 10/100" }, 373 { VIDDID(BROADCOM, BCM5785G), "BCM5785G NetLink 1000baseT" }, 374 { VIDDID(BROADCOM, BCM5786), "Broadcom BCM5786 Gigabit" }, 375 { VIDDID(BROADCOM, BCM5787), "Broadcom BCM5787 Gigabit" }, 376 { VIDDID(BROADCOM, BCM5787F), "Broadcom BCM5787F 10/100" }, 377 { VIDDID(BROADCOM, BCM5787M), "Broadcom BCM5787M Gigabit" }, 378 { VIDDID(BROADCOM, BCM5788), "Broadcom BCM5788 Gigabit" }, 379 { VIDDID(BROADCOM, BCM5789), "Broadcom BCM5789 Gigabit" }, 380 { VIDDID(BROADCOM, BCM5901), "Broadcom BCM5901 Fast" }, 381 { VIDDID(BROADCOM, BCM5901A2), "Broadcom BCM5901A2 Fast" }, 382 { VIDDID(BROADCOM, BCM5903M), "Broadcom BCM5903M Fast" }, 383 { VIDDID(BROADCOM, BCM5906), "Broadcom BCM5906 Fast" }, 384 { VIDDID(BROADCOM, BCM5906M), "Broadcom BCM5906M Fast" }, 385 { VIDDID(BROADCOM, BCM57760), "Broadcom BCM57760 Gigabit" }, 386 { VIDDID(BROADCOM, BCM57761), "Broadcom BCM57761 Gigabit" }, 387 { VIDDID(BROADCOM, BCM57762), "Broadcom BCM57762 Gigabit" }, 388 { VIDDID(BROADCOM, BCM57764), "Broadcom BCM57764 Gigabit" }, 389 { VIDDID(BROADCOM, BCM57765), "Broadcom BCM57765 Gigabit" }, 390 { VIDDID(BROADCOM, BCM57766), "Broadcom BCM57766 Gigabit" }, 391 { VIDDID(BROADCOM, BCM57767), "Broadcom BCM57767 Gigabit" }, 392 { VIDDID(BROADCOM, BCM57780), "Broadcom BCM57780 Gigabit" }, 393 { VIDDID(BROADCOM, BCM57781), "Broadcom BCM57781 Gigabit" }, 394 { VIDDID(BROADCOM, BCM57782), "Broadcom BCM57782 Gigabit" }, 395 { VIDDID(BROADCOM, BCM57785), "Broadcom BCM57785 Gigabit" }, 396 { VIDDID(BROADCOM, BCM57786), "Broadcom BCM57786 Gigabit" }, 397 { VIDDID(BROADCOM, BCM57787), "Broadcom BCM57787 Gigabit" }, 398 { VIDDID(BROADCOM, BCM57788), "Broadcom BCM57788 Gigabit" }, 399 { VIDDID(BROADCOM, BCM57790), "Broadcom BCM57790 Gigabit" }, 400 { VIDDID(BROADCOM, BCM57791), "Broadcom BCM57791 Gigabit" }, 401 { VIDDID(BROADCOM, BCM57795), "Broadcom BCM57795 Gigabit" }, 402 { VIDDID(SCHNEIDERKOCH, SK_9DX1),"SysKonnect SK-9Dx1 Gigabit" }, 403 { VIDDID(SCHNEIDERKOCH, SK_9MXX),"SysKonnect SK-9Mxx Gigabit" }, 404 { VIDDID(3COM, 3C996), "3Com 3c996 Gigabit" }, 405 { VIDDID(FUJITSU4, PW008GE4), "Fujitsu PW008GE4 Gigabit" }, 406 { VIDDID(FUJITSU4, PW008GE5), "Fujitsu PW008GE5 Gigabit" }, 407 { VIDDID(FUJITSU4, PP250_450_LAN),"Fujitsu Primepower 250/450 Gigabit" }, 408 { 0, 0, NULL }, 409 }; 410 411 #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGEF_JUMBO_CAPABLE) 412 #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGEF_5700_FAMILY) 413 #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGEF_5705_PLUS) 414 #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGEF_5714_FAMILY) 415 #define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGEF_575X_PLUS) 416 #define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGEF_5755_PLUS) 417 #define BGE_IS_57765_FAMILY(sc) ((sc)->bge_flags & BGEF_57765_FAMILY) 418 #define BGE_IS_57765_PLUS(sc) ((sc)->bge_flags & BGEF_57765_PLUS) 419 #define BGE_IS_5717_PLUS(sc) ((sc)->bge_flags & BGEF_5717_PLUS) 420 421 static const struct bge_revision { 422 uint32_t br_chipid; 423 const char *br_name; 424 } bge_revisions[] = { 425 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" }, 426 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" }, 427 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" }, 428 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" }, 429 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" }, 430 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" }, 431 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" }, 432 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" }, 433 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" }, 434 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" }, 435 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" }, 436 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" }, 437 { BGE_CHIPID_BCM5703_A0, "BCM5702/5703 A0" }, 438 { BGE_CHIPID_BCM5703_A1, "BCM5702/5703 A1" }, 439 { BGE_CHIPID_BCM5703_A2, "BCM5702/5703 A2" }, 440 { BGE_CHIPID_BCM5703_A3, "BCM5702/5703 A3" }, 441 { BGE_CHIPID_BCM5703_B0, "BCM5702/5703 B0" }, 442 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" }, 443 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" }, 444 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" }, 445 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" }, 446 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" }, 447 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" }, 448 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" }, 449 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" }, 450 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" }, 451 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" }, 452 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" }, 453 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" }, 454 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" }, 455 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" }, 456 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" }, 457 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" }, 458 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" }, 459 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" }, 460 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" }, 461 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" }, 462 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" }, 463 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" }, 464 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" }, 465 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" }, 466 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" }, 467 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" }, 468 { BGE_CHIPID_BCM5717_A0, "BCM5717 A0" }, 469 { BGE_CHIPID_BCM5717_B0, "BCM5717 B0" }, 470 { BGE_CHIPID_BCM5719_A0, "BCM5719 A0" }, 471 { BGE_CHIPID_BCM5720_A0, "BCM5720 A0" }, 472 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" }, 473 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" }, 474 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" }, 475 { BGE_CHIPID_BCM5755_C0, "BCM5755 C0" }, 476 { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" }, 477 { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" }, 478 { BGE_CHIPID_BCM5762_A0, "BCM5762 A0" }, 479 { BGE_CHIPID_BCM5762_B0, "BCM5762 B0" }, 480 { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" }, 481 { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" }, 482 { BGE_CHIPID_BCM5784_B0, "BCM5784 B0" }, 483 /* 5754 and 5787 share the same ASIC ID */ 484 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" }, 485 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" }, 486 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" }, 487 { BGE_CHIPID_BCM5906_A0, "BCM5906 A0" }, 488 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" }, 489 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" }, 490 { BGE_CHIPID_BCM57765_A0, "BCM57765 A0" }, 491 { BGE_CHIPID_BCM57765_B0, "BCM57765 B0" }, 492 { BGE_CHIPID_BCM57766_A0, "BCM57766 A0" }, 493 { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" }, 494 { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" }, 495 496 { 0, NULL } 497 }; 498 499 /* 500 * Some defaults for major revisions, so that newer steppings 501 * that we don't know about have a shot at working. 502 */ 503 static const struct bge_revision bge_majorrevs[] = { 504 { BGE_ASICREV_BCM5700, "unknown BCM5700" }, 505 { BGE_ASICREV_BCM5701, "unknown BCM5701" }, 506 { BGE_ASICREV_BCM5703, "unknown BCM5703" }, 507 { BGE_ASICREV_BCM5704, "unknown BCM5704" }, 508 { BGE_ASICREV_BCM5705, "unknown BCM5705" }, 509 { BGE_ASICREV_BCM5750, "unknown BCM5750" }, 510 { BGE_ASICREV_BCM5714, "unknown BCM5714" }, 511 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" }, 512 { BGE_ASICREV_BCM5752, "unknown BCM5752" }, 513 { BGE_ASICREV_BCM5780, "unknown BCM5780" }, 514 { BGE_ASICREV_BCM5755, "unknown BCM5755" }, 515 { BGE_ASICREV_BCM5761, "unknown BCM5761" }, 516 { BGE_ASICREV_BCM5784, "unknown BCM5784" }, 517 { BGE_ASICREV_BCM5785, "unknown BCM5785" }, 518 /* 5754 and 5787 share the same ASIC ID */ 519 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" }, 520 { BGE_ASICREV_BCM5906, "unknown BCM5906" }, 521 { BGE_ASICREV_BCM57765, "unknown BCM57765" }, 522 { BGE_ASICREV_BCM57766, "unknown BCM57766" }, 523 { BGE_ASICREV_BCM57780, "unknown BCM57780" }, 524 { BGE_ASICREV_BCM5717, "unknown BCM5717" }, 525 { BGE_ASICREV_BCM5719, "unknown BCM5719" }, 526 { BGE_ASICREV_BCM5720, "unknown BCM5720" }, 527 { BGE_ASICREV_BCM5762, "unknown BCM5762" }, 528 529 { 0, NULL } 530 }; 531 532 static int bge_allow_asf = 1; 533 534 CFATTACH_DECL3_NEW(bge, sizeof(struct bge_softc), 535 bge_probe, bge_attach, bge_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); 536 537 static uint32_t 538 bge_readmem_ind(struct bge_softc *sc, int off) 539 { 540 pcireg_t val; 541 542 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 && 543 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4) 544 return 0; 545 546 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off); 547 val = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA); 548 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0); 549 return val; 550 } 551 552 static void 553 bge_writemem_ind(struct bge_softc *sc, int off, int val) 554 { 555 556 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off); 557 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA, val); 558 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0); 559 } 560 561 /* 562 * PCI Express only 563 */ 564 static void 565 bge_set_max_readrq(struct bge_softc *sc) 566 { 567 pcireg_t val; 568 569 val = pci_conf_read(sc->sc_pc, sc->sc_pcitag, sc->bge_pciecap 570 + PCIE_DCSR); 571 val &= ~PCIE_DCSR_MAX_READ_REQ; 572 switch (sc->bge_expmrq) { 573 case 2048: 574 val |= BGE_PCIE_DEVCTL_MAX_READRQ_2048; 575 break; 576 case 4096: 577 val |= BGE_PCIE_DEVCTL_MAX_READRQ_4096; 578 break; 579 default: 580 panic("incorrect expmrq value(%d)", sc->bge_expmrq); 581 break; 582 } 583 pci_conf_write(sc->sc_pc, sc->sc_pcitag, sc->bge_pciecap 584 + PCIE_DCSR, val); 585 } 586 587 #ifdef notdef 588 static uint32_t 589 bge_readreg_ind(struct bge_softc *sc, int off) 590 { 591 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off); 592 return pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA); 593 } 594 #endif 595 596 static void 597 bge_writereg_ind(struct bge_softc *sc, int off, int val) 598 { 599 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off); 600 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA, val); 601 } 602 603 static void 604 bge_writemem_direct(struct bge_softc *sc, int off, int val) 605 { 606 CSR_WRITE_4(sc, off, val); 607 } 608 609 static void 610 bge_writembx(struct bge_softc *sc, int off, int val) 611 { 612 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 613 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI; 614 615 CSR_WRITE_4(sc, off, val); 616 } 617 618 static void 619 bge_writembx_flush(struct bge_softc *sc, int off, int val) 620 { 621 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 622 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI; 623 624 CSR_WRITE_4_FLUSH(sc, off, val); 625 } 626 627 /* 628 * Clear all stale locks and select the lock for this driver instance. 629 */ 630 void 631 bge_ape_lock_init(struct bge_softc *sc) 632 { 633 struct pci_attach_args *pa = &(sc->bge_pa); 634 uint32_t bit, regbase; 635 int i; 636 637 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) 638 regbase = BGE_APE_LOCK_GRANT; 639 else 640 regbase = BGE_APE_PER_LOCK_GRANT; 641 642 /* Clear any stale locks. */ 643 for (i = BGE_APE_LOCK_PHY0; i <= BGE_APE_LOCK_GPIO; i++) { 644 switch (i) { 645 case BGE_APE_LOCK_PHY0: 646 case BGE_APE_LOCK_PHY1: 647 case BGE_APE_LOCK_PHY2: 648 case BGE_APE_LOCK_PHY3: 649 bit = BGE_APE_LOCK_GRANT_DRIVER0; 650 break; 651 default: 652 if (pa->pa_function == 0) 653 bit = BGE_APE_LOCK_GRANT_DRIVER0; 654 else 655 bit = (1 << pa->pa_function); 656 } 657 APE_WRITE_4(sc, regbase + 4 * i, bit); 658 } 659 660 /* Select the PHY lock based on the device's function number. */ 661 switch (pa->pa_function) { 662 case 0: 663 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY0; 664 break; 665 case 1: 666 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY1; 667 break; 668 case 2: 669 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY2; 670 break; 671 case 3: 672 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY3; 673 break; 674 default: 675 printf("%s: PHY lock not supported on function\n", 676 device_xname(sc->bge_dev)); 677 break; 678 } 679 } 680 681 /* 682 * Check for APE firmware, set flags, and print version info. 683 */ 684 void 685 bge_ape_read_fw_ver(struct bge_softc *sc) 686 { 687 const char *fwtype; 688 uint32_t apedata, features; 689 690 /* Check for a valid APE signature in shared memory. */ 691 apedata = APE_READ_4(sc, BGE_APE_SEG_SIG); 692 if (apedata != BGE_APE_SEG_SIG_MAGIC) { 693 sc->bge_mfw_flags &= ~ BGE_MFW_ON_APE; 694 return; 695 } 696 697 /* Check if APE firmware is running. */ 698 apedata = APE_READ_4(sc, BGE_APE_FW_STATUS); 699 if ((apedata & BGE_APE_FW_STATUS_READY) == 0) { 700 printf("%s: APE signature found but FW status not ready! " 701 "0x%08x\n", device_xname(sc->bge_dev), apedata); 702 return; 703 } 704 705 sc->bge_mfw_flags |= BGE_MFW_ON_APE; 706 707 /* Fetch the APE firwmare type and version. */ 708 apedata = APE_READ_4(sc, BGE_APE_FW_VERSION); 709 features = APE_READ_4(sc, BGE_APE_FW_FEATURES); 710 if ((features & BGE_APE_FW_FEATURE_NCSI) != 0) { 711 sc->bge_mfw_flags |= BGE_MFW_TYPE_NCSI; 712 fwtype = "NCSI"; 713 } else if ((features & BGE_APE_FW_FEATURE_DASH) != 0) { 714 sc->bge_mfw_flags |= BGE_MFW_TYPE_DASH; 715 fwtype = "DASH"; 716 } else 717 fwtype = "UNKN"; 718 719 /* Print the APE firmware version. */ 720 aprint_normal_dev(sc->bge_dev, "APE firmware %s %d.%d.%d.%d\n", fwtype, 721 (apedata & BGE_APE_FW_VERSION_MAJMSK) >> BGE_APE_FW_VERSION_MAJSFT, 722 (apedata & BGE_APE_FW_VERSION_MINMSK) >> BGE_APE_FW_VERSION_MINSFT, 723 (apedata & BGE_APE_FW_VERSION_REVMSK) >> BGE_APE_FW_VERSION_REVSFT, 724 (apedata & BGE_APE_FW_VERSION_BLDMSK)); 725 } 726 727 int 728 bge_ape_lock(struct bge_softc *sc, int locknum) 729 { 730 struct pci_attach_args *pa = &(sc->bge_pa); 731 uint32_t bit, gnt, req, status; 732 int i, off; 733 734 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) 735 return 0; 736 737 /* Lock request/grant registers have different bases. */ 738 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) { 739 req = BGE_APE_LOCK_REQ; 740 gnt = BGE_APE_LOCK_GRANT; 741 } else { 742 req = BGE_APE_PER_LOCK_REQ; 743 gnt = BGE_APE_PER_LOCK_GRANT; 744 } 745 746 off = 4 * locknum; 747 748 switch (locknum) { 749 case BGE_APE_LOCK_GPIO: 750 /* Lock required when using GPIO. */ 751 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) 752 return 0; 753 if (pa->pa_function == 0) 754 bit = BGE_APE_LOCK_REQ_DRIVER0; 755 else 756 bit = (1 << pa->pa_function); 757 break; 758 case BGE_APE_LOCK_GRC: 759 /* Lock required to reset the device. */ 760 if (pa->pa_function == 0) 761 bit = BGE_APE_LOCK_REQ_DRIVER0; 762 else 763 bit = (1 << pa->pa_function); 764 break; 765 case BGE_APE_LOCK_MEM: 766 /* Lock required when accessing certain APE memory. */ 767 if (pa->pa_function == 0) 768 bit = BGE_APE_LOCK_REQ_DRIVER0; 769 else 770 bit = (1 << pa->pa_function); 771 break; 772 case BGE_APE_LOCK_PHY0: 773 case BGE_APE_LOCK_PHY1: 774 case BGE_APE_LOCK_PHY2: 775 case BGE_APE_LOCK_PHY3: 776 /* Lock required when accessing PHYs. */ 777 bit = BGE_APE_LOCK_REQ_DRIVER0; 778 break; 779 default: 780 return EINVAL; 781 } 782 783 /* Request a lock. */ 784 APE_WRITE_4_FLUSH(sc, req + off, bit); 785 786 /* Wait up to 1 second to acquire lock. */ 787 for (i = 0; i < 20000; i++) { 788 status = APE_READ_4(sc, gnt + off); 789 if (status == bit) 790 break; 791 DELAY(50); 792 } 793 794 /* Handle any errors. */ 795 if (status != bit) { 796 printf("%s: APE lock %d request failed! " 797 "request = 0x%04x[0x%04x], status = 0x%04x[0x%04x]\n", 798 device_xname(sc->bge_dev), 799 locknum, req + off, bit & 0xFFFF, gnt + off, 800 status & 0xFFFF); 801 /* Revoke the lock request. */ 802 APE_WRITE_4(sc, gnt + off, bit); 803 return EBUSY; 804 } 805 806 return 0; 807 } 808 809 void 810 bge_ape_unlock(struct bge_softc *sc, int locknum) 811 { 812 struct pci_attach_args *pa = &(sc->bge_pa); 813 uint32_t bit, gnt; 814 int off; 815 816 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) 817 return; 818 819 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) 820 gnt = BGE_APE_LOCK_GRANT; 821 else 822 gnt = BGE_APE_PER_LOCK_GRANT; 823 824 off = 4 * locknum; 825 826 switch (locknum) { 827 case BGE_APE_LOCK_GPIO: 828 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) 829 return; 830 if (pa->pa_function == 0) 831 bit = BGE_APE_LOCK_GRANT_DRIVER0; 832 else 833 bit = (1 << pa->pa_function); 834 break; 835 case BGE_APE_LOCK_GRC: 836 if (pa->pa_function == 0) 837 bit = BGE_APE_LOCK_GRANT_DRIVER0; 838 else 839 bit = (1 << pa->pa_function); 840 break; 841 case BGE_APE_LOCK_MEM: 842 if (pa->pa_function == 0) 843 bit = BGE_APE_LOCK_GRANT_DRIVER0; 844 else 845 bit = (1 << pa->pa_function); 846 break; 847 case BGE_APE_LOCK_PHY0: 848 case BGE_APE_LOCK_PHY1: 849 case BGE_APE_LOCK_PHY2: 850 case BGE_APE_LOCK_PHY3: 851 bit = BGE_APE_LOCK_GRANT_DRIVER0; 852 break; 853 default: 854 return; 855 } 856 857 /* Write and flush for consecutive bge_ape_lock() */ 858 APE_WRITE_4_FLUSH(sc, gnt + off, bit); 859 } 860 861 /* 862 * Send an event to the APE firmware. 863 */ 864 void 865 bge_ape_send_event(struct bge_softc *sc, uint32_t event) 866 { 867 uint32_t apedata; 868 int i; 869 870 /* NCSI does not support APE events. */ 871 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) 872 return; 873 874 /* Wait up to 1ms for APE to service previous event. */ 875 for (i = 10; i > 0; i--) { 876 if (bge_ape_lock(sc, BGE_APE_LOCK_MEM) != 0) 877 break; 878 apedata = APE_READ_4(sc, BGE_APE_EVENT_STATUS); 879 if ((apedata & BGE_APE_EVENT_STATUS_EVENT_PENDING) == 0) { 880 APE_WRITE_4(sc, BGE_APE_EVENT_STATUS, event | 881 BGE_APE_EVENT_STATUS_EVENT_PENDING); 882 bge_ape_unlock(sc, BGE_APE_LOCK_MEM); 883 APE_WRITE_4(sc, BGE_APE_EVENT, BGE_APE_EVENT_1); 884 break; 885 } 886 bge_ape_unlock(sc, BGE_APE_LOCK_MEM); 887 DELAY(100); 888 } 889 if (i == 0) { 890 printf("%s: APE event 0x%08x send timed out\n", 891 device_xname(sc->bge_dev), event); 892 } 893 } 894 895 void 896 bge_ape_driver_state_change(struct bge_softc *sc, int kind) 897 { 898 uint32_t apedata, event; 899 900 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) 901 return; 902 903 switch (kind) { 904 case BGE_RESET_START: 905 /* If this is the first load, clear the load counter. */ 906 apedata = APE_READ_4(sc, BGE_APE_HOST_SEG_SIG); 907 if (apedata != BGE_APE_HOST_SEG_SIG_MAGIC) 908 APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, 0); 909 else { 910 apedata = APE_READ_4(sc, BGE_APE_HOST_INIT_COUNT); 911 APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, ++apedata); 912 } 913 APE_WRITE_4(sc, BGE_APE_HOST_SEG_SIG, 914 BGE_APE_HOST_SEG_SIG_MAGIC); 915 APE_WRITE_4(sc, BGE_APE_HOST_SEG_LEN, 916 BGE_APE_HOST_SEG_LEN_MAGIC); 917 918 /* Add some version info if bge(4) supports it. */ 919 APE_WRITE_4(sc, BGE_APE_HOST_DRIVER_ID, 920 BGE_APE_HOST_DRIVER_ID_MAGIC(1, 0)); 921 APE_WRITE_4(sc, BGE_APE_HOST_BEHAVIOR, 922 BGE_APE_HOST_BEHAV_NO_PHYLOCK); 923 APE_WRITE_4(sc, BGE_APE_HOST_HEARTBEAT_INT_MS, 924 BGE_APE_HOST_HEARTBEAT_INT_DISABLE); 925 APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE, 926 BGE_APE_HOST_DRVR_STATE_START); 927 event = BGE_APE_EVENT_STATUS_STATE_START; 928 break; 929 case BGE_RESET_SHUTDOWN: 930 APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE, 931 BGE_APE_HOST_DRVR_STATE_UNLOAD); 932 event = BGE_APE_EVENT_STATUS_STATE_UNLOAD; 933 break; 934 case BGE_RESET_SUSPEND: 935 event = BGE_APE_EVENT_STATUS_STATE_SUSPEND; 936 break; 937 default: 938 return; 939 } 940 941 bge_ape_send_event(sc, event | BGE_APE_EVENT_STATUS_DRIVER_EVNT | 942 BGE_APE_EVENT_STATUS_STATE_CHNGE); 943 } 944 945 static uint8_t 946 bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) 947 { 948 uint32_t access, byte = 0; 949 int i; 950 951 /* Lock. */ 952 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); 953 for (i = 0; i < 8000; i++) { 954 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1) 955 break; 956 DELAY(20); 957 } 958 if (i == 8000) 959 return 1; 960 961 /* Enable access. */ 962 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS); 963 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE); 964 965 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc); 966 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD); 967 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 968 DELAY(10); 969 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) { 970 DELAY(10); 971 break; 972 } 973 } 974 975 if (i == BGE_TIMEOUT * 10) { 976 aprint_error_dev(sc->bge_dev, "nvram read timed out\n"); 977 return 1; 978 } 979 980 /* Get result. */ 981 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA); 982 983 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF; 984 985 /* Disable access. */ 986 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access); 987 988 /* Unlock. */ 989 CSR_WRITE_4_FLUSH(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1); 990 991 return 0; 992 } 993 994 /* 995 * Read a sequence of bytes from NVRAM. 996 */ 997 static int 998 bge_read_nvram(struct bge_softc *sc, uint8_t *dest, int off, int cnt) 999 { 1000 int error = 0, i; 1001 uint8_t byte = 0; 1002 1003 if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906) 1004 return 1; 1005 1006 for (i = 0; i < cnt; i++) { 1007 error = bge_nvram_getbyte(sc, off + i, &byte); 1008 if (error) 1009 break; 1010 *(dest + i) = byte; 1011 } 1012 1013 return error ? 1 : 0; 1014 } 1015 1016 /* 1017 * Read a byte of data stored in the EEPROM at address 'addr.' The 1018 * BCM570x supports both the traditional bitbang interface and an 1019 * auto access interface for reading the EEPROM. We use the auto 1020 * access method. 1021 */ 1022 static uint8_t 1023 bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) 1024 { 1025 int i; 1026 uint32_t byte = 0; 1027 1028 /* 1029 * Enable use of auto EEPROM access so we can avoid 1030 * having to use the bitbang method. 1031 */ 1032 BGE_SETBIT_FLUSH(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 1033 1034 /* Reset the EEPROM, load the clock period. */ 1035 CSR_WRITE_4_FLUSH(sc, BGE_EE_ADDR, 1036 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 1037 DELAY(20); 1038 1039 /* Issue the read EEPROM command. */ 1040 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 1041 1042 /* Wait for completion */ 1043 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 1044 DELAY(10); 1045 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 1046 break; 1047 } 1048 1049 if (i == BGE_TIMEOUT * 10) { 1050 aprint_error_dev(sc->bge_dev, "eeprom read timed out\n"); 1051 return 1; 1052 } 1053 1054 /* Get result. */ 1055 byte = CSR_READ_4(sc, BGE_EE_DATA); 1056 1057 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 1058 1059 return 0; 1060 } 1061 1062 /* 1063 * Read a sequence of bytes from the EEPROM. 1064 */ 1065 static int 1066 bge_read_eeprom(struct bge_softc *sc, void *destv, int off, int cnt) 1067 { 1068 int error = 0, i; 1069 uint8_t byte = 0; 1070 char *dest = destv; 1071 1072 for (i = 0; i < cnt; i++) { 1073 error = bge_eeprom_getbyte(sc, off + i, &byte); 1074 if (error) 1075 break; 1076 *(dest + i) = byte; 1077 } 1078 1079 return error ? 1 : 0; 1080 } 1081 1082 static int 1083 bge_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val) 1084 { 1085 struct bge_softc * const sc = device_private(dev); 1086 uint32_t data; 1087 uint32_t autopoll; 1088 int rv = 0; 1089 int i; 1090 1091 if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0) 1092 return -1; 1093 1094 /* Reading with autopolling on may trigger PCI errors */ 1095 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 1096 if (autopoll & BGE_MIMODE_AUTOPOLL) { 1097 BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL); 1098 BGE_CLRBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 1099 DELAY(80); 1100 } 1101 1102 CSR_WRITE_4_FLUSH(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY | 1103 BGE_MIPHY(phy) | BGE_MIREG(reg)); 1104 1105 for (i = 0; i < BGE_TIMEOUT; i++) { 1106 delay(10); 1107 data = CSR_READ_4(sc, BGE_MI_COMM); 1108 if (!(data & BGE_MICOMM_BUSY)) { 1109 DELAY(5); 1110 data = CSR_READ_4(sc, BGE_MI_COMM); 1111 break; 1112 } 1113 } 1114 1115 if (i == BGE_TIMEOUT) { 1116 aprint_error_dev(sc->bge_dev, "PHY read timed out\n"); 1117 rv = ETIMEDOUT; 1118 } else if ((data & BGE_MICOMM_READFAIL) != 0) { 1119 /* XXX This error occurs on some devices while attaching. */ 1120 aprint_debug_dev(sc->bge_dev, "PHY read I/O error\n"); 1121 rv = EIO; 1122 } else 1123 *val = data & BGE_MICOMM_DATA; 1124 1125 if (autopoll & BGE_MIMODE_AUTOPOLL) { 1126 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL); 1127 BGE_SETBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 1128 DELAY(80); 1129 } 1130 1131 bge_ape_unlock(sc, sc->bge_phy_ape_lock); 1132 1133 return rv; 1134 } 1135 1136 static int 1137 bge_miibus_writereg(device_t dev, int phy, int reg, uint16_t val) 1138 { 1139 struct bge_softc * const sc = device_private(dev); 1140 uint32_t data, autopoll; 1141 int rv = 0; 1142 int i; 1143 1144 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 && 1145 (reg == MII_GTCR || reg == BRGPHY_MII_AUXCTL)) 1146 return 0; 1147 1148 if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0) 1149 return -1; 1150 1151 /* Reading with autopolling on may trigger PCI errors */ 1152 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 1153 if (autopoll & BGE_MIMODE_AUTOPOLL) { 1154 BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL); 1155 BGE_CLRBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 1156 DELAY(80); 1157 } 1158 1159 CSR_WRITE_4_FLUSH(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY | 1160 BGE_MIPHY(phy) | BGE_MIREG(reg) | val); 1161 1162 for (i = 0; i < BGE_TIMEOUT; i++) { 1163 delay(10); 1164 data = CSR_READ_4(sc, BGE_MI_COMM); 1165 if (!(data & BGE_MICOMM_BUSY)) { 1166 delay(5); 1167 data = CSR_READ_4(sc, BGE_MI_COMM); 1168 break; 1169 } 1170 } 1171 1172 if (i == BGE_TIMEOUT) { 1173 aprint_error_dev(sc->bge_dev, "PHY write timed out\n"); 1174 rv = ETIMEDOUT; 1175 } else if ((data & BGE_MICOMM_READFAIL) != 0) { 1176 aprint_error_dev(sc->bge_dev, "PHY write I/O error\n"); 1177 rv = EIO; 1178 } 1179 1180 if (autopoll & BGE_MIMODE_AUTOPOLL) { 1181 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL); 1182 BGE_SETBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 1183 delay(80); 1184 } 1185 1186 bge_ape_unlock(sc, sc->bge_phy_ape_lock); 1187 1188 return rv; 1189 } 1190 1191 static void 1192 bge_miibus_statchg(struct ifnet *ifp) 1193 { 1194 struct bge_softc * const sc = ifp->if_softc; 1195 struct mii_data *mii = &sc->bge_mii; 1196 uint32_t mac_mode, rx_mode, tx_mode; 1197 1198 /* 1199 * Get flow control negotiation result. 1200 */ 1201 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 1202 (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags) 1203 sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 1204 1205 if (!BGE_STS_BIT(sc, BGE_STS_LINK) && 1206 mii->mii_media_status & IFM_ACTIVE && 1207 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 1208 BGE_STS_SETBIT(sc, BGE_STS_LINK); 1209 else if (BGE_STS_BIT(sc, BGE_STS_LINK) && 1210 (!(mii->mii_media_status & IFM_ACTIVE) || 1211 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) 1212 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 1213 1214 if (!BGE_STS_BIT(sc, BGE_STS_LINK)) 1215 return; 1216 1217 /* Set the port mode (MII/GMII) to match the link speed. */ 1218 mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & 1219 ~(BGE_MACMODE_PORTMODE | BGE_MACMODE_HALF_DUPLEX); 1220 tx_mode = CSR_READ_4(sc, BGE_TX_MODE); 1221 rx_mode = CSR_READ_4(sc, BGE_RX_MODE); 1222 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 1223 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) 1224 mac_mode |= BGE_PORTMODE_GMII; 1225 else 1226 mac_mode |= BGE_PORTMODE_MII; 1227 1228 tx_mode &= ~BGE_TXMODE_FLOWCTL_ENABLE; 1229 rx_mode &= ~BGE_RXMODE_FLOWCTL_ENABLE; 1230 if ((mii->mii_media_active & IFM_FDX) != 0) { 1231 if (sc->bge_flowflags & IFM_ETH_TXPAUSE) 1232 tx_mode |= BGE_TXMODE_FLOWCTL_ENABLE; 1233 if (sc->bge_flowflags & IFM_ETH_RXPAUSE) 1234 rx_mode |= BGE_RXMODE_FLOWCTL_ENABLE; 1235 } else 1236 mac_mode |= BGE_MACMODE_HALF_DUPLEX; 1237 1238 CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, mac_mode); 1239 DELAY(40); 1240 CSR_WRITE_4(sc, BGE_TX_MODE, tx_mode); 1241 CSR_WRITE_4(sc, BGE_RX_MODE, rx_mode); 1242 } 1243 1244 /* 1245 * Update rx threshold levels to values in a particular slot 1246 * of the interrupt-mitigation table bge_rx_threshes. 1247 */ 1248 static void 1249 bge_set_thresh(struct ifnet *ifp, int lvl) 1250 { 1251 struct bge_softc * const sc = ifp->if_softc; 1252 int s; 1253 1254 /* 1255 * For now, just save the new Rx-intr thresholds and record 1256 * that a threshold update is pending. Updating the hardware 1257 * registers here (even at splhigh()) is observed to 1258 * occasionally cause glitches where Rx-interrupts are not 1259 * honoured for up to 10 seconds. jonathan@NetBSD.org, 2003-04-05 1260 */ 1261 s = splnet(); 1262 sc->bge_rx_coal_ticks = bge_rx_threshes[lvl].rx_ticks; 1263 sc->bge_rx_max_coal_bds = bge_rx_threshes[lvl].rx_max_bds; 1264 sc->bge_pending_rxintr_change = 1; 1265 splx(s); 1266 } 1267 1268 1269 /* 1270 * Update Rx thresholds of all bge devices 1271 */ 1272 static void 1273 bge_update_all_threshes(int lvl) 1274 { 1275 const char * const namebuf = "bge"; 1276 const size_t namelen = strlen(namebuf); 1277 struct ifnet *ifp; 1278 1279 if (lvl < 0) 1280 lvl = 0; 1281 else if (lvl >= NBGE_RX_THRESH) 1282 lvl = NBGE_RX_THRESH - 1; 1283 1284 /* 1285 * Now search all the interfaces for this name/number 1286 */ 1287 int s = pserialize_read_enter(); 1288 IFNET_READER_FOREACH(ifp) { 1289 if (strncmp(ifp->if_xname, namebuf, namelen) != 0) 1290 continue; 1291 /* We got a match: update if doing auto-threshold-tuning */ 1292 if (bge_auto_thresh) 1293 bge_set_thresh(ifp, lvl); 1294 } 1295 pserialize_read_exit(s); 1296 } 1297 1298 /* 1299 * Handle events that have triggered interrupts. 1300 */ 1301 static void 1302 bge_handle_events(struct bge_softc *sc) 1303 { 1304 1305 return; 1306 } 1307 1308 /* 1309 * Memory management for jumbo frames. 1310 */ 1311 1312 static int 1313 bge_alloc_jumbo_mem(struct bge_softc *sc) 1314 { 1315 char *ptr, *kva; 1316 bus_dma_segment_t seg; 1317 int i, rseg, state, error; 1318 struct bge_jpool_entry *entry; 1319 1320 state = error = 0; 1321 1322 /* Grab a big chunk o' storage. */ 1323 if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0, 1324 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 1325 aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n"); 1326 return ENOBUFS; 1327 } 1328 1329 state = 1; 1330 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, (void **)&kva, 1331 BUS_DMA_NOWAIT)) { 1332 aprint_error_dev(sc->bge_dev, 1333 "can't map DMA buffers (%d bytes)\n", (int)BGE_JMEM); 1334 error = ENOBUFS; 1335 goto out; 1336 } 1337 1338 state = 2; 1339 if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0, 1340 BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) { 1341 aprint_error_dev(sc->bge_dev, "can't create DMA map\n"); 1342 error = ENOBUFS; 1343 goto out; 1344 } 1345 1346 state = 3; 1347 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map, 1348 kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) { 1349 aprint_error_dev(sc->bge_dev, "can't load DMA map\n"); 1350 error = ENOBUFS; 1351 goto out; 1352 } 1353 1354 state = 4; 1355 sc->bge_cdata.bge_jumbo_buf = (void *)kva; 1356 DPRINTFN(1,("bge_jumbo_buf = %p\n", sc->bge_cdata.bge_jumbo_buf)); 1357 1358 SLIST_INIT(&sc->bge_jfree_listhead); 1359 SLIST_INIT(&sc->bge_jinuse_listhead); 1360 1361 /* 1362 * Now divide it up into 9K pieces and save the addresses 1363 * in an array. 1364 */ 1365 ptr = sc->bge_cdata.bge_jumbo_buf; 1366 for (i = 0; i < BGE_JSLOTS; i++) { 1367 sc->bge_cdata.bge_jslots[i] = ptr; 1368 ptr += BGE_JLEN; 1369 entry = kmem_alloc(sizeof(*entry), KM_SLEEP); 1370 entry->slot = i; 1371 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, 1372 entry, jpool_entries); 1373 } 1374 out: 1375 if (error != 0) { 1376 switch (state) { 1377 case 4: 1378 bus_dmamap_unload(sc->bge_dmatag, 1379 sc->bge_cdata.bge_rx_jumbo_map); 1380 /* FALLTHROUGH */ 1381 case 3: 1382 bus_dmamap_destroy(sc->bge_dmatag, 1383 sc->bge_cdata.bge_rx_jumbo_map); 1384 /* FALLTHROUGH */ 1385 case 2: 1386 bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM); 1387 /* FALLTHROUGH */ 1388 case 1: 1389 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 1390 break; 1391 default: 1392 break; 1393 } 1394 } 1395 1396 return error; 1397 } 1398 1399 /* 1400 * Allocate a jumbo buffer. 1401 */ 1402 static void * 1403 bge_jalloc(struct bge_softc *sc) 1404 { 1405 struct bge_jpool_entry *entry; 1406 1407 entry = SLIST_FIRST(&sc->bge_jfree_listhead); 1408 1409 if (entry == NULL) { 1410 aprint_error_dev(sc->bge_dev, "no free jumbo buffers\n"); 1411 return NULL; 1412 } 1413 1414 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries); 1415 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries); 1416 return sc->bge_cdata.bge_jslots[entry->slot]; 1417 } 1418 1419 /* 1420 * Release a jumbo buffer. 1421 */ 1422 static void 1423 bge_jfree(struct mbuf *m, void *buf, size_t size, void *arg) 1424 { 1425 struct bge_jpool_entry *entry; 1426 struct bge_softc * const sc = arg; 1427 int s; 1428 1429 if (sc == NULL) 1430 panic("bge_jfree: can't find softc pointer!"); 1431 1432 /* calculate the slot this buffer belongs to */ 1433 int i = ((char *)buf - (char *)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN; 1434 1435 if (i < 0 || i >= BGE_JSLOTS) 1436 panic("bge_jfree: asked to free buffer that we don't manage!"); 1437 1438 s = splvm(); 1439 entry = SLIST_FIRST(&sc->bge_jinuse_listhead); 1440 if (entry == NULL) 1441 panic("bge_jfree: buffer not in use!"); 1442 entry->slot = i; 1443 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries); 1444 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries); 1445 1446 if (__predict_true(m != NULL)) 1447 pool_cache_put(mb_cache, m); 1448 splx(s); 1449 } 1450 1451 1452 /* 1453 * Initialize a standard receive ring descriptor. 1454 */ 1455 static int 1456 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m, 1457 bus_dmamap_t dmamap) 1458 { 1459 struct mbuf *m_new = NULL; 1460 struct bge_rx_bd *r; 1461 int error; 1462 1463 if (dmamap == NULL) 1464 dmamap = sc->bge_cdata.bge_rx_std_map[i]; 1465 1466 if (dmamap == NULL) { 1467 error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1, 1468 MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap); 1469 if (error != 0) 1470 return error; 1471 } 1472 1473 sc->bge_cdata.bge_rx_std_map[i] = dmamap; 1474 1475 if (m == NULL) { 1476 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1477 if (m_new == NULL) 1478 return ENOBUFS; 1479 1480 MCLGET(m_new, M_DONTWAIT); 1481 if (!(m_new->m_flags & M_EXT)) { 1482 m_freem(m_new); 1483 return ENOBUFS; 1484 } 1485 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1486 1487 } else { 1488 m_new = m; 1489 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1490 m_new->m_data = m_new->m_ext.ext_buf; 1491 } 1492 if (!(sc->bge_flags & BGEF_RX_ALIGNBUG)) 1493 m_adj(m_new, ETHER_ALIGN); 1494 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new, 1495 BUS_DMA_READ | BUS_DMA_NOWAIT)) { 1496 m_freem(m_new); 1497 return ENOBUFS; 1498 } 1499 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize, 1500 BUS_DMASYNC_PREREAD); 1501 1502 sc->bge_cdata.bge_rx_std_chain[i] = m_new; 1503 r = &sc->bge_rdata->bge_rx_std_ring[i]; 1504 BGE_HOSTADDR(r->bge_addr, dmamap->dm_segs[0].ds_addr); 1505 r->bge_flags = BGE_RXBDFLAG_END; 1506 r->bge_len = m_new->m_len; 1507 r->bge_idx = i; 1508 1509 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 1510 offsetof(struct bge_ring_data, bge_rx_std_ring) + 1511 i * sizeof(struct bge_rx_bd), 1512 sizeof(struct bge_rx_bd), 1513 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1514 1515 return 0; 1516 } 1517 1518 /* 1519 * Initialize a jumbo receive ring descriptor. This allocates 1520 * a jumbo buffer from the pool managed internally by the driver. 1521 */ 1522 static int 1523 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m) 1524 { 1525 struct mbuf *m_new = NULL; 1526 struct bge_rx_bd *r; 1527 void *buf = NULL; 1528 1529 if (m == NULL) { 1530 1531 /* Allocate the mbuf. */ 1532 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1533 if (m_new == NULL) 1534 return ENOBUFS; 1535 1536 /* Allocate the jumbo buffer */ 1537 buf = bge_jalloc(sc); 1538 if (buf == NULL) { 1539 m_freem(m_new); 1540 aprint_error_dev(sc->bge_dev, 1541 "jumbo allocation failed -- packet dropped!\n"); 1542 return ENOBUFS; 1543 } 1544 1545 /* Attach the buffer to the mbuf. */ 1546 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN; 1547 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF, 1548 bge_jfree, sc); 1549 m_new->m_flags |= M_EXT_RW; 1550 } else { 1551 m_new = m; 1552 buf = m_new->m_data = m_new->m_ext.ext_buf; 1553 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN; 1554 } 1555 if (!(sc->bge_flags & BGEF_RX_ALIGNBUG)) 1556 m_adj(m_new, ETHER_ALIGN); 1557 bus_dmamap_sync(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map, 1558 mtod(m_new, char *) - (char *)sc->bge_cdata.bge_jumbo_buf, 1559 BGE_JLEN, BUS_DMASYNC_PREREAD); 1560 /* Set up the descriptor. */ 1561 r = &sc->bge_rdata->bge_rx_jumbo_ring[i]; 1562 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; 1563 BGE_HOSTADDR(r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new)); 1564 r->bge_flags = BGE_RXBDFLAG_END | BGE_RXBDFLAG_JUMBO_RING; 1565 r->bge_len = m_new->m_len; 1566 r->bge_idx = i; 1567 1568 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 1569 offsetof(struct bge_ring_data, bge_rx_jumbo_ring) + 1570 i * sizeof(struct bge_rx_bd), 1571 sizeof(struct bge_rx_bd), 1572 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1573 1574 return 0; 1575 } 1576 1577 /* 1578 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 1579 * that's 1MB or memory, which is a lot. For now, we fill only the first 1580 * 256 ring entries and hope that our CPU is fast enough to keep up with 1581 * the NIC. 1582 */ 1583 static int 1584 bge_init_rx_ring_std(struct bge_softc *sc) 1585 { 1586 int i; 1587 1588 if (sc->bge_flags & BGEF_RXRING_VALID) 1589 return 0; 1590 1591 for (i = 0; i < BGE_SSLOTS; i++) { 1592 if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS) 1593 return ENOBUFS; 1594 } 1595 1596 sc->bge_std = i - 1; 1597 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 1598 1599 sc->bge_flags |= BGEF_RXRING_VALID; 1600 1601 return 0; 1602 } 1603 1604 static void 1605 bge_free_rx_ring_std(struct bge_softc *sc, bool disable) 1606 { 1607 int i; 1608 1609 if (!(sc->bge_flags & BGEF_RXRING_VALID)) 1610 return; 1611 1612 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1613 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 1614 m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 1615 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 1616 if (disable) { 1617 bus_dmamap_destroy(sc->bge_dmatag, 1618 sc->bge_cdata.bge_rx_std_map[i]); 1619 sc->bge_cdata.bge_rx_std_map[i] = NULL; 1620 } 1621 } 1622 memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0, 1623 sizeof(struct bge_rx_bd)); 1624 } 1625 1626 sc->bge_flags &= ~BGEF_RXRING_VALID; 1627 } 1628 1629 static int 1630 bge_init_rx_ring_jumbo(struct bge_softc *sc) 1631 { 1632 int i; 1633 volatile struct bge_rcb *rcb; 1634 1635 if (sc->bge_flags & BGEF_JUMBO_RXRING_VALID) 1636 return 0; 1637 1638 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1639 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 1640 return ENOBUFS; 1641 } 1642 1643 sc->bge_jumbo = i - 1; 1644 sc->bge_flags |= BGEF_JUMBO_RXRING_VALID; 1645 1646 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 1647 rcb->bge_maxlen_flags = 0; 1648 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1649 1650 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 1651 1652 return 0; 1653 } 1654 1655 static void 1656 bge_free_rx_ring_jumbo(struct bge_softc *sc) 1657 { 1658 int i; 1659 1660 if (!(sc->bge_flags & BGEF_JUMBO_RXRING_VALID)) 1661 return; 1662 1663 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1664 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 1665 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 1666 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 1667 } 1668 memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0, 1669 sizeof(struct bge_rx_bd)); 1670 } 1671 1672 sc->bge_flags &= ~BGEF_JUMBO_RXRING_VALID; 1673 } 1674 1675 static void 1676 bge_free_tx_ring(struct bge_softc *sc, bool disable) 1677 { 1678 int i; 1679 struct txdmamap_pool_entry *dma; 1680 1681 if (!(sc->bge_flags & BGEF_TXRING_VALID)) 1682 return; 1683 1684 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1685 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 1686 m_freem(sc->bge_cdata.bge_tx_chain[i]); 1687 sc->bge_cdata.bge_tx_chain[i] = NULL; 1688 SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i], 1689 link); 1690 sc->txdma[i] = 0; 1691 } 1692 memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0, 1693 sizeof(struct bge_tx_bd)); 1694 } 1695 1696 if (disable) { 1697 while ((dma = SLIST_FIRST(&sc->txdma_list))) { 1698 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 1699 bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap); 1700 if (sc->bge_dma64) { 1701 bus_dmamap_destroy(sc->bge_dmatag32, 1702 dma->dmamap32); 1703 } 1704 kmem_free(dma, sizeof(*dma)); 1705 } 1706 SLIST_INIT(&sc->txdma_list); 1707 } 1708 1709 sc->bge_flags &= ~BGEF_TXRING_VALID; 1710 } 1711 1712 static int 1713 bge_init_tx_ring(struct bge_softc *sc) 1714 { 1715 struct ifnet * const ifp = &sc->ethercom.ec_if; 1716 int i; 1717 bus_dmamap_t dmamap, dmamap32; 1718 bus_size_t maxsegsz; 1719 struct txdmamap_pool_entry *dma; 1720 1721 if (sc->bge_flags & BGEF_TXRING_VALID) 1722 return 0; 1723 1724 sc->bge_txcnt = 0; 1725 sc->bge_tx_saved_considx = 0; 1726 1727 /* Initialize transmit producer index for host-memory send ring. */ 1728 sc->bge_tx_prodidx = 0; 1729 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1730 /* 5700 b2 errata */ 1731 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) 1732 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1733 1734 /* NIC-memory send ring not used; initialize to zero. */ 1735 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1736 /* 5700 b2 errata */ 1737 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) 1738 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1739 1740 /* Limit DMA segment size for some chips */ 1741 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57766) && 1742 (ifp->if_mtu <= ETHERMTU)) 1743 maxsegsz = 2048; 1744 else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) 1745 maxsegsz = 4096; 1746 else 1747 maxsegsz = ETHER_MAX_LEN_JUMBO; 1748 1749 if (SLIST_FIRST(&sc->txdma_list) != NULL) 1750 goto alloc_done; 1751 1752 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1753 if (bus_dmamap_create(sc->bge_dmatag, BGE_TXDMA_MAX, 1754 BGE_NTXSEG, maxsegsz, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1755 &dmamap)) 1756 return ENOBUFS; 1757 if (dmamap == NULL) 1758 panic("dmamap NULL in bge_init_tx_ring"); 1759 if (sc->bge_dma64) { 1760 if (bus_dmamap_create(sc->bge_dmatag32, BGE_TXDMA_MAX, 1761 BGE_NTXSEG, maxsegsz, 0, 1762 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1763 &dmamap32)) { 1764 bus_dmamap_destroy(sc->bge_dmatag, dmamap); 1765 return ENOBUFS; 1766 } 1767 if (dmamap32 == NULL) 1768 panic("dmamap32 NULL in bge_init_tx_ring"); 1769 } else 1770 dmamap32 = dmamap; 1771 dma = kmem_alloc(sizeof(*dma), KM_NOSLEEP); 1772 if (dma == NULL) { 1773 aprint_error_dev(sc->bge_dev, 1774 "can't alloc txdmamap_pool_entry\n"); 1775 bus_dmamap_destroy(sc->bge_dmatag, dmamap); 1776 if (sc->bge_dma64) 1777 bus_dmamap_destroy(sc->bge_dmatag32, dmamap32); 1778 return ENOMEM; 1779 } 1780 dma->dmamap = dmamap; 1781 dma->dmamap32 = dmamap32; 1782 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 1783 } 1784 alloc_done: 1785 sc->bge_flags |= BGEF_TXRING_VALID; 1786 1787 return 0; 1788 } 1789 1790 static void 1791 bge_setmulti(struct bge_softc *sc) 1792 { 1793 struct ethercom * const ec = &sc->ethercom; 1794 struct ifnet * const ifp = &ec->ec_if; 1795 struct ether_multi *enm; 1796 struct ether_multistep step; 1797 uint32_t hashes[4] = { 0, 0, 0, 0 }; 1798 uint32_t h; 1799 int i; 1800 1801 if (ifp->if_flags & IFF_PROMISC) 1802 goto allmulti; 1803 1804 /* Now program new ones. */ 1805 ETHER_LOCK(ec); 1806 ETHER_FIRST_MULTI(step, ec, enm); 1807 while (enm != NULL) { 1808 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1809 /* 1810 * We must listen to a range of multicast addresses. 1811 * For now, just accept all multicasts, rather than 1812 * trying to set only those filter bits needed to match 1813 * the range. (At this time, the only use of address 1814 * ranges is for IP multicast routing, for which the 1815 * range is big enough to require all bits set.) 1816 */ 1817 ETHER_UNLOCK(ec); 1818 goto allmulti; 1819 } 1820 1821 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 1822 1823 /* Just want the 7 least-significant bits. */ 1824 h &= 0x7f; 1825 1826 hashes[(h & 0x60) >> 5] |= 1U << (h & 0x1F); 1827 ETHER_NEXT_MULTI(step, enm); 1828 } 1829 ETHER_UNLOCK(ec); 1830 1831 ifp->if_flags &= ~IFF_ALLMULTI; 1832 goto setit; 1833 1834 allmulti: 1835 ifp->if_flags |= IFF_ALLMULTI; 1836 hashes[0] = hashes[1] = hashes[2] = hashes[3] = 0xffffffff; 1837 1838 setit: 1839 for (i = 0; i < 4; i++) 1840 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 1841 } 1842 1843 static void 1844 bge_sig_pre_reset(struct bge_softc *sc, int type) 1845 { 1846 1847 /* 1848 * Some chips don't like this so only do this if ASF is enabled 1849 */ 1850 if (sc->bge_asf_mode) 1851 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC); 1852 1853 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 1854 switch (type) { 1855 case BGE_RESET_START: 1856 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 1857 BGE_FW_DRV_STATE_START); 1858 break; 1859 case BGE_RESET_SHUTDOWN: 1860 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 1861 BGE_FW_DRV_STATE_UNLOAD); 1862 break; 1863 case BGE_RESET_SUSPEND: 1864 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 1865 BGE_FW_DRV_STATE_SUSPEND); 1866 break; 1867 } 1868 } 1869 1870 if (type == BGE_RESET_START || type == BGE_RESET_SUSPEND) 1871 bge_ape_driver_state_change(sc, type); 1872 } 1873 1874 static void 1875 bge_sig_post_reset(struct bge_softc *sc, int type) 1876 { 1877 1878 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 1879 switch (type) { 1880 case BGE_RESET_START: 1881 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 1882 BGE_FW_DRV_STATE_START_DONE); 1883 /* START DONE */ 1884 break; 1885 case BGE_RESET_SHUTDOWN: 1886 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 1887 BGE_FW_DRV_STATE_UNLOAD_DONE); 1888 break; 1889 } 1890 } 1891 1892 if (type == BGE_RESET_SHUTDOWN) 1893 bge_ape_driver_state_change(sc, type); 1894 } 1895 1896 static void 1897 bge_sig_legacy(struct bge_softc *sc, int type) 1898 { 1899 1900 if (sc->bge_asf_mode) { 1901 switch (type) { 1902 case BGE_RESET_START: 1903 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 1904 BGE_FW_DRV_STATE_START); 1905 break; 1906 case BGE_RESET_SHUTDOWN: 1907 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 1908 BGE_FW_DRV_STATE_UNLOAD); 1909 break; 1910 } 1911 } 1912 } 1913 1914 static void 1915 bge_wait_for_event_ack(struct bge_softc *sc) 1916 { 1917 int i; 1918 1919 /* wait up to 2500usec */ 1920 for (i = 0; i < 250; i++) { 1921 if (!(CSR_READ_4(sc, BGE_RX_CPU_EVENT) & 1922 BGE_RX_CPU_DRV_EVENT)) 1923 break; 1924 DELAY(10); 1925 } 1926 } 1927 1928 static void 1929 bge_stop_fw(struct bge_softc *sc) 1930 { 1931 1932 if (sc->bge_asf_mode) { 1933 bge_wait_for_event_ack(sc); 1934 1935 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, BGE_FW_CMD_PAUSE); 1936 CSR_WRITE_4_FLUSH(sc, BGE_RX_CPU_EVENT, 1937 CSR_READ_4(sc, BGE_RX_CPU_EVENT) | BGE_RX_CPU_DRV_EVENT); 1938 1939 bge_wait_for_event_ack(sc); 1940 } 1941 } 1942 1943 static int 1944 bge_poll_fw(struct bge_softc *sc) 1945 { 1946 uint32_t val; 1947 int i; 1948 1949 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 1950 for (i = 0; i < BGE_TIMEOUT; i++) { 1951 val = CSR_READ_4(sc, BGE_VCPU_STATUS); 1952 if (val & BGE_VCPU_STATUS_INIT_DONE) 1953 break; 1954 DELAY(100); 1955 } 1956 if (i >= BGE_TIMEOUT) { 1957 aprint_error_dev(sc->bge_dev, "reset timed out\n"); 1958 return -1; 1959 } 1960 } else { 1961 /* 1962 * Poll the value location we just wrote until 1963 * we see the 1's complement of the magic number. 1964 * This indicates that the firmware initialization 1965 * is complete. 1966 * XXX 1000ms for Flash and 10000ms for SEEPROM. 1967 */ 1968 for (i = 0; i < BGE_TIMEOUT; i++) { 1969 val = bge_readmem_ind(sc, BGE_SRAM_FW_MB); 1970 if (val == ~BGE_SRAM_FW_MB_MAGIC) 1971 break; 1972 DELAY(10); 1973 } 1974 1975 if ((i >= BGE_TIMEOUT) 1976 && ((sc->bge_flags & BGEF_NO_EEPROM) == 0)) { 1977 aprint_error_dev(sc->bge_dev, 1978 "firmware handshake timed out, val = %x\n", val); 1979 return -1; 1980 } 1981 } 1982 1983 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) { 1984 /* tg3 says we have to wait extra time */ 1985 delay(10 * 1000); 1986 } 1987 1988 return 0; 1989 } 1990 1991 int 1992 bge_phy_addr(struct bge_softc *sc) 1993 { 1994 struct pci_attach_args *pa = &(sc->bge_pa); 1995 int phy_addr = 1; 1996 1997 /* 1998 * PHY address mapping for various devices. 1999 * 2000 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr | 2001 * ---------+-------+-------+-------+-------+ 2002 * BCM57XX | 1 | X | X | X | 2003 * BCM5704 | 1 | X | 1 | X | 2004 * BCM5717 | 1 | 8 | 2 | 9 | 2005 * BCM5719 | 1 | 8 | 2 | 9 | 2006 * BCM5720 | 1 | 8 | 2 | 9 | 2007 * 2008 * | F2 Cu | F2 Sr | F3 Cu | F3 Sr | 2009 * ---------+-------+-------+-------+-------+ 2010 * BCM57XX | X | X | X | X | 2011 * BCM5704 | X | X | X | X | 2012 * BCM5717 | X | X | X | X | 2013 * BCM5719 | 3 | 10 | 4 | 11 | 2014 * BCM5720 | X | X | X | X | 2015 * 2016 * Other addresses may respond but they are not 2017 * IEEE compliant PHYs and should be ignored. 2018 */ 2019 switch (BGE_ASICREV(sc->bge_chipid)) { 2020 case BGE_ASICREV_BCM5717: 2021 case BGE_ASICREV_BCM5719: 2022 case BGE_ASICREV_BCM5720: 2023 phy_addr = pa->pa_function; 2024 if (sc->bge_chipid != BGE_CHIPID_BCM5717_A0) { 2025 phy_addr += (CSR_READ_4(sc, BGE_SGDIG_STS) & 2026 BGE_SGDIGSTS_IS_SERDES) ? 8 : 1; 2027 } else { 2028 phy_addr += (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) & 2029 BGE_CPMU_PHY_STRAP_IS_SERDES) ? 8 : 1; 2030 } 2031 } 2032 2033 return phy_addr; 2034 } 2035 2036 /* 2037 * Do endian, PCI and DMA initialization. Also check the on-board ROM 2038 * self-test results. 2039 */ 2040 static int 2041 bge_chipinit(struct bge_softc *sc) 2042 { 2043 uint32_t dma_rw_ctl, misc_ctl, mode_ctl, reg; 2044 int i; 2045 2046 /* Set endianness before we access any non-PCI registers. */ 2047 misc_ctl = BGE_INIT; 2048 if (sc->bge_flags & BGEF_TAGGED_STATUS) 2049 misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS; 2050 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL, 2051 misc_ctl); 2052 2053 /* 2054 * Clear the MAC statistics block in the NIC's 2055 * internal memory. 2056 */ 2057 for (i = BGE_STATS_BLOCK; 2058 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t)) 2059 BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0); 2060 2061 for (i = BGE_STATUS_BLOCK; 2062 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t)) 2063 BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0); 2064 2065 /* 5717 workaround from tg3 */ 2066 if (sc->bge_chipid == BGE_CHIPID_BCM5717_A0) { 2067 /* Save */ 2068 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL); 2069 2070 /* Temporary modify MODE_CTL to control TLP */ 2071 reg = mode_ctl & ~BGE_MODECTL_PCIE_TLPADDRMASK; 2072 CSR_WRITE_4(sc, BGE_MODE_CTL, reg | BGE_MODECTL_PCIE_TLPADDR1); 2073 2074 /* Control TLP */ 2075 reg = CSR_READ_4(sc, BGE_TLP_CONTROL_REG + 2076 BGE_TLP_PHYCTL1); 2077 CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG + BGE_TLP_PHYCTL1, 2078 reg | BGE_TLP_PHYCTL1_EN_L1PLLPD); 2079 2080 /* Restore */ 2081 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 2082 } 2083 2084 if (BGE_IS_57765_FAMILY(sc)) { 2085 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) { 2086 /* Save */ 2087 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL); 2088 2089 /* Temporary modify MODE_CTL to control TLP */ 2090 reg = mode_ctl & ~BGE_MODECTL_PCIE_TLPADDRMASK; 2091 CSR_WRITE_4(sc, BGE_MODE_CTL, 2092 reg | BGE_MODECTL_PCIE_TLPADDR1); 2093 2094 /* Control TLP */ 2095 reg = CSR_READ_4(sc, BGE_TLP_CONTROL_REG + 2096 BGE_TLP_PHYCTL5); 2097 CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG + BGE_TLP_PHYCTL5, 2098 reg | BGE_TLP_PHYCTL5_DIS_L2CLKREQ); 2099 2100 /* Restore */ 2101 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 2102 } 2103 if (BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_57765_AX) { 2104 /* 2105 * For the 57766 and non Ax versions of 57765, bootcode 2106 * needs to setup the PCIE Fast Training Sequence (FTS) 2107 * value to prevent transmit hangs. 2108 */ 2109 reg = CSR_READ_4(sc, BGE_CPMU_PADRNG_CTL); 2110 CSR_WRITE_4(sc, BGE_CPMU_PADRNG_CTL, 2111 reg | BGE_CPMU_PADRNG_CTL_RDIV2); 2112 2113 /* Save */ 2114 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL); 2115 2116 /* Temporary modify MODE_CTL to control TLP */ 2117 reg = mode_ctl & ~BGE_MODECTL_PCIE_TLPADDRMASK; 2118 CSR_WRITE_4(sc, BGE_MODE_CTL, 2119 reg | BGE_MODECTL_PCIE_TLPADDR0); 2120 2121 /* Control TLP */ 2122 reg = CSR_READ_4(sc, BGE_TLP_CONTROL_REG + 2123 BGE_TLP_FTSMAX); 2124 reg &= ~BGE_TLP_FTSMAX_MSK; 2125 CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG + BGE_TLP_FTSMAX, 2126 reg | BGE_TLP_FTSMAX_VAL); 2127 2128 /* Restore */ 2129 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 2130 } 2131 2132 reg = CSR_READ_4(sc, BGE_CPMU_LSPD_10MB_CLK); 2133 reg &= ~BGE_CPMU_LSPD_10MB_MACCLK_MASK; 2134 reg |= BGE_CPMU_LSPD_10MB_MACCLK_6_25; 2135 CSR_WRITE_4(sc, BGE_CPMU_LSPD_10MB_CLK, reg); 2136 } 2137 2138 /* Set up the PCI DMA control register. */ 2139 dma_rw_ctl = BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD; 2140 if (sc->bge_flags & BGEF_PCIE) { 2141 /* Read watermark not used, 128 bytes for write. */ 2142 DPRINTFN(4, ("(%s: PCI-Express DMA setting)\n", 2143 device_xname(sc->bge_dev))); 2144 if (sc->bge_mps >= 256) 2145 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(7); 2146 else 2147 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); 2148 } else if (sc->bge_flags & BGEF_PCIX) { 2149 DPRINTFN(4, ("(:%s: PCI-X DMA setting)\n", 2150 device_xname(sc->bge_dev))); 2151 /* PCI-X bus */ 2152 if (BGE_IS_5714_FAMILY(sc)) { 2153 /* 256 bytes for read and write. */ 2154 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) | 2155 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2); 2156 2157 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780) 2158 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL; 2159 else 2160 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL; 2161 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703) { 2162 /* 2163 * In the BCM5703, the DMA read watermark should 2164 * be set to less than or equal to the maximum 2165 * memory read byte count of the PCI-X command 2166 * register. 2167 */ 2168 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) | 2169 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); 2170 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 2171 /* 1536 bytes for read, 384 bytes for write. */ 2172 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) | 2173 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); 2174 } else { 2175 /* 384 bytes for read and write. */ 2176 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) | 2177 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) | 2178 (0x0F); 2179 } 2180 2181 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 || 2182 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 2183 uint32_t tmp; 2184 2185 /* Set ONEDMA_ATONCE for hardware workaround. */ 2186 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f; 2187 if (tmp == 6 || tmp == 7) 2188 dma_rw_ctl |= 2189 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL; 2190 2191 /* Set PCI-X DMA write workaround. */ 2192 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE; 2193 } 2194 } else { 2195 /* Conventional PCI bus: 256 bytes for read and write. */ 2196 DPRINTFN(4, ("(%s: PCI 2.2 DMA setting)\n", 2197 device_xname(sc->bge_dev))); 2198 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) | 2199 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7); 2200 2201 if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5705 && 2202 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5750) 2203 dma_rw_ctl |= 0x0F; 2204 } 2205 2206 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 2207 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) 2208 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM | 2209 BGE_PCIDMARWCTL_ASRT_ALL_BE; 2210 2211 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 || 2212 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) 2213 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA; 2214 2215 if (BGE_IS_57765_PLUS(sc)) { 2216 dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT; 2217 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) 2218 dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK; 2219 2220 /* 2221 * Enable HW workaround for controllers that misinterpret 2222 * a status tag update and leave interrupts permanently 2223 * disabled. 2224 */ 2225 if (!BGE_IS_57765_FAMILY(sc) && 2226 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 && 2227 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5762) 2228 dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA; 2229 } 2230 2231 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL, 2232 dma_rw_ctl); 2233 2234 /* 2235 * Set up general mode register. 2236 */ 2237 mode_ctl = BGE_DMA_SWAP_OPTIONS; 2238 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 || 2239 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) { 2240 /* Retain Host-2-BMC settings written by APE firmware. */ 2241 mode_ctl |= CSR_READ_4(sc, BGE_MODE_CTL) & 2242 (BGE_MODECTL_BYTESWAP_B2HRX_DATA | 2243 BGE_MODECTL_WORDSWAP_B2HRX_DATA | 2244 BGE_MODECTL_B2HRX_ENABLE | BGE_MODECTL_HTX2B_ENABLE); 2245 } 2246 mode_ctl |= BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS | 2247 BGE_MODECTL_TX_NO_PHDR_CSUM; 2248 2249 /* 2250 * BCM5701 B5 have a bug causing data corruption when using 2251 * 64-bit DMA reads, which can be terminated early and then 2252 * completed later as 32-bit accesses, in combination with 2253 * certain bridges. 2254 */ 2255 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 && 2256 sc->bge_chipid == BGE_CHIPID_BCM5701_B5) 2257 mode_ctl |= BGE_MODECTL_FORCE_PCI32; 2258 2259 /* 2260 * Tell the firmware the driver is running 2261 */ 2262 if (sc->bge_asf_mode & ASF_STACKUP) 2263 mode_ctl |= BGE_MODECTL_STACKUP; 2264 2265 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 2266 2267 /* 2268 * Disable memory write invalidate. Apparently it is not supported 2269 * properly by these devices. 2270 */ 2271 PCI_CLRBIT(sc->sc_pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, 2272 PCI_COMMAND_INVALIDATE_ENABLE); 2273 2274 #ifdef __brokenalpha__ 2275 /* 2276 * Must insure that we do not cross an 8K (bytes) boundary 2277 * for DMA reads. Our highest limit is 1K bytes. This is a 2278 * restriction on some ALPHA platforms with early revision 2279 * 21174 PCI chipsets, such as the AlphaPC 164lx 2280 */ 2281 PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4); 2282 #endif 2283 2284 /* Set the timer prescaler (always 66MHz) */ 2285 CSR_WRITE_4_FLUSH(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ); 2286 2287 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 2288 DELAY(40); /* XXX */ 2289 2290 /* Put PHY into ready state */ 2291 BGE_CLRBIT_FLUSH(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ); 2292 DELAY(40); 2293 } 2294 2295 return 0; 2296 } 2297 2298 static int 2299 bge_blockinit(struct bge_softc *sc) 2300 { 2301 volatile struct bge_rcb *rcb; 2302 bus_size_t rcb_addr; 2303 struct ifnet * const ifp = &sc->ethercom.ec_if; 2304 bge_hostaddr taddr; 2305 uint32_t dmactl, rdmareg, mimode, val; 2306 int i, limit; 2307 2308 /* 2309 * Initialize the memory window pointer register so that 2310 * we can access the first 32K of internal NIC RAM. This will 2311 * allow us to set up the TX send ring RCBs and the RX return 2312 * ring RCBs, plus other things which live in NIC memory. 2313 */ 2314 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0); 2315 2316 if (!BGE_IS_5705_PLUS(sc)) { 2317 /* 57XX step 33 */ 2318 /* Configure mbuf memory pool */ 2319 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1); 2320 2321 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) 2322 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 2323 else 2324 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 2325 2326 /* 57XX step 34 */ 2327 /* Configure DMA resource pool */ 2328 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, 2329 BGE_DMA_DESCRIPTORS); 2330 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 2331 } 2332 2333 /* 5718 step 11, 57XX step 35 */ 2334 /* 2335 * Configure mbuf pool watermarks. New broadcom docs strongly 2336 * recommend these. 2337 */ 2338 if (BGE_IS_5717_PLUS(sc)) { 2339 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 2340 if (ifp->if_mtu > ETHERMTU) { 2341 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e); 2342 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea); 2343 } else { 2344 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a); 2345 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0); 2346 } 2347 } else if (BGE_IS_5705_PLUS(sc)) { 2348 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 2349 2350 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 2351 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04); 2352 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10); 2353 } else { 2354 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 2355 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 2356 } 2357 } else { 2358 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 2359 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 2360 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 2361 } 2362 2363 /* 57XX step 36 */ 2364 /* Configure DMA resource watermarks */ 2365 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 2366 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 2367 2368 /* 5718 step 13, 57XX step 38 */ 2369 /* Enable buffer manager */ 2370 val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_ATTN; 2371 /* 2372 * Change the arbitration algorithm of TXMBUF read request to 2373 * round-robin instead of priority based for BCM5719. When 2374 * TXFIFO is almost empty, RDMA will hold its request until 2375 * TXFIFO is not almost empty. 2376 */ 2377 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) 2378 val |= BGE_BMANMODE_NO_TX_UNDERRUN; 2379 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || 2380 sc->bge_chipid == BGE_CHIPID_BCM5719_A0 || 2381 sc->bge_chipid == BGE_CHIPID_BCM5720_A0) 2382 val |= BGE_BMANMODE_LOMBUF_ATTN; 2383 CSR_WRITE_4(sc, BGE_BMAN_MODE, val); 2384 2385 /* 57XX step 39 */ 2386 /* Poll for buffer manager start indication */ 2387 for (i = 0; i < BGE_TIMEOUT * 2; i++) { 2388 DELAY(10); 2389 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 2390 break; 2391 } 2392 2393 if (i == BGE_TIMEOUT * 2) { 2394 aprint_error_dev(sc->bge_dev, 2395 "buffer manager failed to start\n"); 2396 return ENXIO; 2397 } 2398 2399 /* 57XX step 40 */ 2400 /* Enable flow-through queues */ 2401 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 2402 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 2403 2404 /* Wait until queue initialization is complete */ 2405 for (i = 0; i < BGE_TIMEOUT * 2; i++) { 2406 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 2407 break; 2408 DELAY(10); 2409 } 2410 2411 if (i == BGE_TIMEOUT * 2) { 2412 aprint_error_dev(sc->bge_dev, 2413 "flow-through queue init failed\n"); 2414 return ENXIO; 2415 } 2416 2417 /* 2418 * Summary of rings supported by the controller: 2419 * 2420 * Standard Receive Producer Ring 2421 * - This ring is used to feed receive buffers for "standard" 2422 * sized frames (typically 1536 bytes) to the controller. 2423 * 2424 * Jumbo Receive Producer Ring 2425 * - This ring is used to feed receive buffers for jumbo sized 2426 * frames (i.e. anything bigger than the "standard" frames) 2427 * to the controller. 2428 * 2429 * Mini Receive Producer Ring 2430 * - This ring is used to feed receive buffers for "mini" 2431 * sized frames to the controller. 2432 * - This feature required external memory for the controller 2433 * but was never used in a production system. Should always 2434 * be disabled. 2435 * 2436 * Receive Return Ring 2437 * - After the controller has placed an incoming frame into a 2438 * receive buffer that buffer is moved into a receive return 2439 * ring. The driver is then responsible to passing the 2440 * buffer up to the stack. Many versions of the controller 2441 * support multiple RR rings. 2442 * 2443 * Send Ring 2444 * - This ring is used for outgoing frames. Many versions of 2445 * the controller support multiple send rings. 2446 */ 2447 2448 /* 5718 step 15, 57XX step 41 */ 2449 /* Initialize the standard RX ring control block */ 2450 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb; 2451 BGE_HOSTADDR(rcb->bge_hostaddr, BGE_RING_DMA_ADDR(sc, bge_rx_std_ring)); 2452 /* 5718 step 16 */ 2453 if (BGE_IS_57765_PLUS(sc)) { 2454 /* 2455 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32) 2456 * Bits 15-2 : Maximum RX frame size 2457 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled 2458 * Bit 0 : Reserved 2459 */ 2460 rcb->bge_maxlen_flags = 2461 BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN << 2); 2462 } else if (BGE_IS_5705_PLUS(sc)) { 2463 /* 2464 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32) 2465 * Bits 15-2 : Reserved (should be 0) 2466 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled 2467 * Bit 0 : Reserved 2468 */ 2469 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 2470 } else { 2471 /* 2472 * Ring size is always XXX entries 2473 * Bits 31-16: Maximum RX frame size 2474 * Bits 15-2 : Reserved (should be 0) 2475 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled 2476 * Bit 0 : Reserved 2477 */ 2478 rcb->bge_maxlen_flags = 2479 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); 2480 } 2481 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || 2482 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || 2483 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) 2484 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717; 2485 else 2486 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 2487 /* Write the standard receive producer ring control block. */ 2488 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 2489 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 2490 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 2491 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 2492 2493 /* Reset the standard receive producer ring producer index. */ 2494 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0); 2495 2496 /* 57XX step 42 */ 2497 /* 2498 * Initialize the jumbo RX ring control block 2499 * We set the 'ring disabled' bit in the flags 2500 * field until we're actually ready to start 2501 * using this ring (i.e. once we set the MTU 2502 * high enough to require it). 2503 */ 2504 if (BGE_IS_JUMBO_CAPABLE(sc)) { 2505 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 2506 BGE_HOSTADDR(rcb->bge_hostaddr, 2507 BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring)); 2508 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 2509 BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED); 2510 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || 2511 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || 2512 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) 2513 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717; 2514 else 2515 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 2516 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 2517 rcb->bge_hostaddr.bge_addr_hi); 2518 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 2519 rcb->bge_hostaddr.bge_addr_lo); 2520 /* Program the jumbo receive producer ring RCB parameters. */ 2521 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 2522 rcb->bge_maxlen_flags); 2523 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 2524 /* Reset the jumbo receive producer ring producer index. */ 2525 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 2526 } 2527 2528 /* 57XX step 43 */ 2529 /* Disable the mini receive producer ring RCB. */ 2530 if (BGE_IS_5700_FAMILY(sc)) { 2531 /* Set up dummy disabled mini ring RCB */ 2532 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb; 2533 rcb->bge_maxlen_flags = 2534 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED); 2535 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, 2536 rcb->bge_maxlen_flags); 2537 /* Reset the mini receive producer ring producer index. */ 2538 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 2539 2540 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2541 offsetof(struct bge_ring_data, bge_info), 2542 sizeof(struct bge_gib), 2543 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2544 } 2545 2546 /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */ 2547 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 2548 if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 || 2549 sc->bge_chipid == BGE_CHIPID_BCM5906_A1 || 2550 sc->bge_chipid == BGE_CHIPID_BCM5906_A2) 2551 CSR_WRITE_4(sc, BGE_ISO_PKT_TX, 2552 (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2); 2553 } 2554 /* 5718 step 14, 57XX step 44 */ 2555 /* 2556 * The BD ring replenish thresholds control how often the 2557 * hardware fetches new BD's from the producer rings in host 2558 * memory. Setting the value too low on a busy system can 2559 * starve the hardware and recue the throughpout. 2560 * 2561 * Set the BD ring replenish thresholds. The recommended 2562 * values are 1/8th the number of descriptors allocated to 2563 * each ring, but since we try to avoid filling the entire 2564 * ring we set these to the minimal value of 8. This needs to 2565 * be done on several of the supported chip revisions anyway, 2566 * to work around HW bugs. 2567 */ 2568 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, 8); 2569 if (BGE_IS_JUMBO_CAPABLE(sc)) 2570 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, 8); 2571 2572 /* 5718 step 18 */ 2573 if (BGE_IS_5717_PLUS(sc)) { 2574 CSR_WRITE_4(sc, BGE_STD_REPL_LWM, 4); 2575 CSR_WRITE_4(sc, BGE_JUMBO_REPL_LWM, 4); 2576 } 2577 2578 /* 57XX step 45 */ 2579 /* 2580 * Disable all send rings by setting the 'ring disabled' bit 2581 * in the flags field of all the TX send ring control blocks, 2582 * located in NIC memory. 2583 */ 2584 if (BGE_IS_5700_FAMILY(sc)) { 2585 /* 5700 to 5704 had 16 send rings. */ 2586 limit = BGE_TX_RINGS_EXTSSRAM_MAX; 2587 } else if (BGE_IS_5717_PLUS(sc)) { 2588 limit = BGE_TX_RINGS_5717_MAX; 2589 } else if (BGE_IS_57765_FAMILY(sc) || 2590 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) { 2591 limit = BGE_TX_RINGS_57765_MAX; 2592 } else 2593 limit = 1; 2594 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 2595 for (i = 0; i < limit; i++) { 2596 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2597 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); 2598 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 2599 rcb_addr += sizeof(struct bge_rcb); 2600 } 2601 2602 /* 57XX step 46 and 47 */ 2603 /* Configure send ring RCB 0 (we use only the first ring) */ 2604 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 2605 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring)); 2606 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 2607 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 2608 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || 2609 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || 2610 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) 2611 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, BGE_SEND_RING_5717); 2612 else 2613 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 2614 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 2615 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2616 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 2617 2618 /* 57XX step 48 */ 2619 /* 2620 * Disable all receive return rings by setting the 2621 * 'ring diabled' bit in the flags field of all the receive 2622 * return ring control blocks, located in NIC memory. 2623 */ 2624 if (BGE_IS_5717_PLUS(sc)) { 2625 /* Should be 17, use 16 until we get an SRAM map. */ 2626 limit = 16; 2627 } else if (BGE_IS_5700_FAMILY(sc)) 2628 limit = BGE_RX_RINGS_MAX; 2629 else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 || 2630 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762 || 2631 BGE_IS_57765_FAMILY(sc)) 2632 limit = 4; 2633 else 2634 limit = 1; 2635 /* Disable all receive return rings */ 2636 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 2637 for (i = 0; i < limit; i++) { 2638 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0); 2639 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0); 2640 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2641 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 2642 BGE_RCB_FLAG_RING_DISABLED)); 2643 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 2644 bge_writembx(sc, BGE_MBX_RX_CONS0_LO + 2645 (i * (sizeof(uint64_t))), 0); 2646 rcb_addr += sizeof(struct bge_rcb); 2647 } 2648 2649 /* 57XX step 49 */ 2650 /* 2651 * Set up receive return ring 0. Note that the NIC address 2652 * for RX return rings is 0x0. The return rings live entirely 2653 * within the host, so the nicaddr field in the RCB isn't used. 2654 */ 2655 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 2656 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring)); 2657 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 2658 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 2659 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000); 2660 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2661 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); 2662 2663 /* 5718 step 24, 57XX step 53 */ 2664 /* Set random backoff seed for TX */ 2665 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 2666 (CLLADDR(ifp->if_sadl)[0] + CLLADDR(ifp->if_sadl)[1] + 2667 CLLADDR(ifp->if_sadl)[2] + CLLADDR(ifp->if_sadl)[3] + 2668 CLLADDR(ifp->if_sadl)[4] + CLLADDR(ifp->if_sadl)[5]) & 2669 BGE_TX_BACKOFF_SEED_MASK); 2670 2671 /* 5718 step 26, 57XX step 55 */ 2672 /* Set inter-packet gap */ 2673 val = 0x2620; 2674 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 || 2675 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) 2676 val |= CSR_READ_4(sc, BGE_TX_LENGTHS) & 2677 (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK); 2678 CSR_WRITE_4(sc, BGE_TX_LENGTHS, val); 2679 2680 /* 5718 step 27, 57XX step 56 */ 2681 /* 2682 * Specify which ring to use for packets that don't match 2683 * any RX rules. 2684 */ 2685 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 2686 2687 /* 5718 step 28, 57XX step 57 */ 2688 /* 2689 * Configure number of RX lists. One interrupt distribution 2690 * list, sixteen active lists, one bad frames class. 2691 */ 2692 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 2693 2694 /* 5718 step 29, 57XX step 58 */ 2695 /* Inialize RX list placement stats mask. */ 2696 if (BGE_IS_575X_PLUS(sc)) { 2697 val = CSR_READ_4(sc, BGE_RXLP_STATS_ENABLE_MASK); 2698 val &= ~BGE_RXLPSTATCONTROL_DACK_FIX; 2699 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, val); 2700 } else 2701 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 2702 2703 /* 5718 step 30, 57XX step 59 */ 2704 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 2705 2706 /* 5718 step 33, 57XX step 62 */ 2707 /* Disable host coalescing until we get it set up */ 2708 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 2709 2710 /* 5718 step 34, 57XX step 63 */ 2711 /* Poll to make sure it's shut down. */ 2712 for (i = 0; i < BGE_TIMEOUT * 2; i++) { 2713 DELAY(10); 2714 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 2715 break; 2716 } 2717 2718 if (i == BGE_TIMEOUT * 2) { 2719 aprint_error_dev(sc->bge_dev, 2720 "host coalescing engine failed to idle\n"); 2721 return ENXIO; 2722 } 2723 2724 /* 5718 step 35, 36, 37 */ 2725 /* Set up host coalescing defaults */ 2726 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 2727 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 2728 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 2729 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 2730 if (!(BGE_IS_5705_PLUS(sc))) { 2731 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 2732 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 2733 } 2734 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 2735 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 2736 2737 /* Set up address of statistics block */ 2738 if (BGE_IS_5700_FAMILY(sc)) { 2739 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_info.bge_stats)); 2740 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 2741 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 2742 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi); 2743 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo); 2744 } 2745 2746 /* 5718 step 38 */ 2747 /* Set up address of status block */ 2748 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_status_block)); 2749 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 2750 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi); 2751 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo); 2752 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0; 2753 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0; 2754 2755 /* Set up status block size. */ 2756 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 && 2757 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) { 2758 val = BGE_STATBLKSZ_FULL; 2759 bzero(&sc->bge_rdata->bge_status_block, BGE_STATUS_BLK_SZ); 2760 } else { 2761 val = BGE_STATBLKSZ_32BYTE; 2762 bzero(&sc->bge_rdata->bge_status_block, 32); 2763 } 2764 2765 /* 5718 step 39, 57XX step 73 */ 2766 /* Turn on host coalescing state machine */ 2767 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE); 2768 2769 /* 5718 step 40, 57XX step 74 */ 2770 /* Turn on RX BD completion state machine and enable attentions */ 2771 CSR_WRITE_4(sc, BGE_RBDC_MODE, 2772 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN); 2773 2774 /* 5718 step 41, 57XX step 75 */ 2775 /* Turn on RX list placement state machine */ 2776 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 2777 2778 /* 57XX step 76 */ 2779 /* Turn on RX list selector state machine. */ 2780 if (!(BGE_IS_5705_PLUS(sc))) 2781 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 2782 2783 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB | 2784 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR | 2785 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB | 2786 BGE_MACMODE_FRMHDR_DMA_ENB; 2787 2788 if (sc->bge_flags & BGEF_FIBER_TBI) 2789 val |= BGE_PORTMODE_TBI; 2790 else if (sc->bge_flags & BGEF_FIBER_MII) 2791 val |= BGE_PORTMODE_GMII; 2792 else 2793 val |= BGE_PORTMODE_MII; 2794 2795 /* 5718 step 42 and 43, 57XX step 77 and 78 */ 2796 /* Allow APE to send/receive frames. */ 2797 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) 2798 val |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN; 2799 2800 /* Turn on DMA, clear stats */ 2801 CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, val); 2802 /* 5718 step 44 */ 2803 DELAY(40); 2804 2805 /* 5718 step 45, 57XX step 79 */ 2806 /* Set misc. local control, enable interrupts on attentions */ 2807 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); 2808 if (BGE_IS_5717_PLUS(sc)) { 2809 CSR_READ_4(sc, BGE_MISC_LOCAL_CTL); /* Flush */ 2810 /* 5718 step 46 */ 2811 DELAY(100); 2812 } 2813 2814 /* 57XX step 81 */ 2815 /* Turn on DMA completion state machine */ 2816 if (!(BGE_IS_5705_PLUS(sc))) 2817 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 2818 2819 /* 5718 step 47, 57XX step 82 */ 2820 val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS; 2821 2822 /* 5718 step 48 */ 2823 /* Enable host coalescing bug fix. */ 2824 if (BGE_IS_5755_PLUS(sc)) 2825 val |= BGE_WDMAMODE_STATUS_TAG_FIX; 2826 2827 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785) 2828 val |= BGE_WDMAMODE_BURST_ALL_DATA; 2829 2830 /* Turn on write DMA state machine */ 2831 CSR_WRITE_4_FLUSH(sc, BGE_WDMA_MODE, val); 2832 /* 5718 step 49 */ 2833 DELAY(40); 2834 2835 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS; 2836 2837 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717) 2838 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS; 2839 2840 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || 2841 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 || 2842 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780) 2843 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN | 2844 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN | 2845 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN; 2846 2847 if (sc->bge_flags & BGEF_PCIE) 2848 val |= BGE_RDMAMODE_FIFO_LONG_BURST; 2849 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57766) { 2850 if (ifp->if_mtu <= ETHERMTU) 2851 val |= BGE_RDMAMODE_JMB_2K_MMRR; 2852 } 2853 if (sc->bge_flags & BGEF_TSO) { 2854 val |= BGE_RDMAMODE_TSO4_ENABLE; 2855 if (BGE_IS_5717_PLUS(sc)) 2856 val |= BGE_RDMAMODE_TSO6_ENABLE; 2857 } 2858 2859 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 || 2860 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) { 2861 val |= CSR_READ_4(sc, BGE_RDMA_MODE) & 2862 BGE_RDMAMODE_H2BNC_VLAN_DET; 2863 /* 2864 * Allow multiple outstanding read requests from 2865 * non-LSO read DMA engine. 2866 */ 2867 val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS; 2868 } 2869 2870 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 || 2871 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || 2872 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 || 2873 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780 || 2874 BGE_IS_57765_PLUS(sc)) { 2875 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) 2876 rdmareg = BGE_RDMA_RSRVCTRL_REG2; 2877 else 2878 rdmareg = BGE_RDMA_RSRVCTRL; 2879 dmactl = CSR_READ_4(sc, rdmareg); 2880 /* 2881 * Adjust tx margin to prevent TX data corruption and 2882 * fix internal FIFO overflow. 2883 */ 2884 if (sc->bge_chipid == BGE_CHIPID_BCM5719_A0 || 2885 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) { 2886 dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK | 2887 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK | 2888 BGE_RDMA_RSRVCTRL_TXMRGN_MASK); 2889 dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K | 2890 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K | 2891 BGE_RDMA_RSRVCTRL_TXMRGN_320B; 2892 } 2893 /* 2894 * Enable fix for read DMA FIFO overruns. 2895 * The fix is to limit the number of RX BDs 2896 * the hardware would fetch at a time. 2897 */ 2898 CSR_WRITE_4(sc, rdmareg, dmactl | 2899 BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX); 2900 } 2901 2902 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) { 2903 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, 2904 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) | 2905 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K | 2906 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); 2907 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) { 2908 /* 2909 * Allow 4KB burst length reads for non-LSO frames. 2910 * Enable 512B burst length reads for buffer descriptors. 2911 */ 2912 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, 2913 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) | 2914 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 | 2915 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); 2916 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) { 2917 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2, 2918 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2) | 2919 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K | 2920 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); 2921 } 2922 /* Turn on read DMA state machine */ 2923 CSR_WRITE_4_FLUSH(sc, BGE_RDMA_MODE, val); 2924 /* 5718 step 52 */ 2925 delay(40); 2926 2927 if (sc->bge_flags & BGEF_RDMA_BUG) { 2928 for (i = 0; i < BGE_NUM_RDMA_CHANNELS / 2; i++) { 2929 val = CSR_READ_4(sc, BGE_RDMA_LENGTH + i * 4); 2930 if ((val & 0xFFFF) > BGE_FRAMELEN) 2931 break; 2932 if (((val >> 16) & 0xFFFF) > BGE_FRAMELEN) 2933 break; 2934 } 2935 if (i != BGE_NUM_RDMA_CHANNELS / 2) { 2936 val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL); 2937 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) 2938 val |= BGE_RDMA_TX_LENGTH_WA_5719; 2939 else 2940 val |= BGE_RDMA_TX_LENGTH_WA_5720; 2941 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val); 2942 } 2943 } 2944 2945 /* 5718 step 56, 57XX step 84 */ 2946 /* Turn on RX data completion state machine */ 2947 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 2948 2949 /* Turn on RX data and RX BD initiator state machine */ 2950 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 2951 2952 /* 57XX step 85 */ 2953 /* Turn on Mbuf cluster free state machine */ 2954 if (!BGE_IS_5705_PLUS(sc)) 2955 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 2956 2957 /* 5718 step 57, 57XX step 86 */ 2958 /* Turn on send data completion state machine */ 2959 val = BGE_SDCMODE_ENABLE; 2960 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) 2961 val |= BGE_SDCMODE_CDELAY; 2962 CSR_WRITE_4(sc, BGE_SDC_MODE, val); 2963 2964 /* 5718 step 58 */ 2965 /* Turn on send BD completion state machine */ 2966 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 2967 2968 /* 57XX step 88 */ 2969 /* Turn on RX BD initiator state machine */ 2970 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 2971 2972 /* 5718 step 60, 57XX step 90 */ 2973 /* Turn on send data initiator state machine */ 2974 if (sc->bge_flags & BGEF_TSO) { 2975 /* XXX: magic value from Linux driver */ 2976 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | 2977 BGE_SDIMODE_HW_LSO_PRE_DMA); 2978 } else 2979 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 2980 2981 /* 5718 step 61, 57XX step 91 */ 2982 /* Turn on send BD initiator state machine */ 2983 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 2984 2985 /* 5718 step 62, 57XX step 92 */ 2986 /* Turn on send BD selector state machine */ 2987 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 2988 2989 /* 5718 step 31, 57XX step 60 */ 2990 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 2991 /* 5718 step 32, 57XX step 61 */ 2992 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 2993 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER); 2994 2995 /* ack/clear link change events */ 2996 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 2997 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 2998 BGE_MACSTAT_LINK_CHANGED); 2999 CSR_WRITE_4(sc, BGE_MI_STS, 0); 3000 3001 /* 3002 * Enable attention when the link has changed state for 3003 * devices that use auto polling. 3004 */ 3005 if (sc->bge_flags & BGEF_FIBER_TBI) { 3006 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 3007 } else { 3008 if ((sc->bge_flags & BGEF_CPMU_PRESENT) != 0) 3009 mimode = BGE_MIMODE_500KHZ_CONST; 3010 else 3011 mimode = BGE_MIMODE_BASE; 3012 /* 5718 step 68. 5718 step 69 (optionally). */ 3013 if (BGE_IS_5700_FAMILY(sc) || 3014 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705) { 3015 mimode |= BGE_MIMODE_AUTOPOLL; 3016 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL); 3017 } 3018 mimode |= BGE_MIMODE_PHYADDR(sc->bge_phy_addr); 3019 CSR_WRITE_4(sc, BGE_MI_MODE, mimode); 3020 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) 3021 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 3022 BGE_EVTENB_MI_INTERRUPT); 3023 } 3024 3025 /* 3026 * Clear any pending link state attention. 3027 * Otherwise some link state change events may be lost until attention 3028 * is cleared by bge_intr() -> bge_link_upd() sequence. 3029 * It's not necessary on newer BCM chips - perhaps enabling link 3030 * state change attentions implies clearing pending attention. 3031 */ 3032 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 3033 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 3034 BGE_MACSTAT_LINK_CHANGED); 3035 3036 /* Enable link state change attentions. */ 3037 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 3038 3039 return 0; 3040 } 3041 3042 static const struct bge_revision * 3043 bge_lookup_rev(uint32_t chipid) 3044 { 3045 const struct bge_revision *br; 3046 3047 for (br = bge_revisions; br->br_name != NULL; br++) { 3048 if (br->br_chipid == chipid) 3049 return br; 3050 } 3051 3052 for (br = bge_majorrevs; br->br_name != NULL; br++) { 3053 if (br->br_chipid == BGE_ASICREV(chipid)) 3054 return br; 3055 } 3056 3057 return NULL; 3058 } 3059 3060 static const struct bge_product * 3061 bge_lookup(const struct pci_attach_args *pa) 3062 { 3063 const struct bge_product *bp; 3064 3065 for (bp = bge_products; bp->bp_name != NULL; bp++) { 3066 if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor && 3067 PCI_PRODUCT(pa->pa_id) == bp->bp_product) 3068 return bp; 3069 } 3070 3071 return NULL; 3072 } 3073 3074 static uint32_t 3075 bge_chipid(const struct pci_attach_args *pa) 3076 { 3077 uint32_t id; 3078 3079 id = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL) 3080 >> BGE_PCIMISCCTL_ASICREV_SHIFT; 3081 3082 if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) { 3083 switch (PCI_PRODUCT(pa->pa_id)) { 3084 case PCI_PRODUCT_BROADCOM_BCM5717: 3085 case PCI_PRODUCT_BROADCOM_BCM5718: 3086 case PCI_PRODUCT_BROADCOM_BCM5719: 3087 case PCI_PRODUCT_BROADCOM_BCM5720: 3088 case PCI_PRODUCT_BROADCOM_BCM5725: 3089 case PCI_PRODUCT_BROADCOM_BCM5727: 3090 case PCI_PRODUCT_BROADCOM_BCM5762: 3091 case PCI_PRODUCT_BROADCOM_BCM57764: 3092 case PCI_PRODUCT_BROADCOM_BCM57767: 3093 case PCI_PRODUCT_BROADCOM_BCM57787: 3094 id = pci_conf_read(pa->pa_pc, pa->pa_tag, 3095 BGE_PCI_GEN2_PRODID_ASICREV); 3096 break; 3097 case PCI_PRODUCT_BROADCOM_BCM57761: 3098 case PCI_PRODUCT_BROADCOM_BCM57762: 3099 case PCI_PRODUCT_BROADCOM_BCM57765: 3100 case PCI_PRODUCT_BROADCOM_BCM57766: 3101 case PCI_PRODUCT_BROADCOM_BCM57781: 3102 case PCI_PRODUCT_BROADCOM_BCM57782: 3103 case PCI_PRODUCT_BROADCOM_BCM57785: 3104 case PCI_PRODUCT_BROADCOM_BCM57786: 3105 case PCI_PRODUCT_BROADCOM_BCM57791: 3106 case PCI_PRODUCT_BROADCOM_BCM57795: 3107 id = pci_conf_read(pa->pa_pc, pa->pa_tag, 3108 BGE_PCI_GEN15_PRODID_ASICREV); 3109 break; 3110 default: 3111 id = pci_conf_read(pa->pa_pc, pa->pa_tag, 3112 BGE_PCI_PRODID_ASICREV); 3113 break; 3114 } 3115 } 3116 3117 return id; 3118 } 3119 3120 /* 3121 * Return true if MSI can be used with this device. 3122 */ 3123 static int 3124 bge_can_use_msi(struct bge_softc *sc) 3125 { 3126 int can_use_msi = 0; 3127 3128 switch (BGE_ASICREV(sc->bge_chipid)) { 3129 case BGE_ASICREV_BCM5714_A0: 3130 case BGE_ASICREV_BCM5714: 3131 /* 3132 * Apparently, MSI doesn't work when these chips are 3133 * configured in single-port mode. 3134 */ 3135 break; 3136 case BGE_ASICREV_BCM5750: 3137 if (BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_5750_AX && 3138 BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_5750_BX) 3139 can_use_msi = 1; 3140 break; 3141 default: 3142 if (BGE_IS_575X_PLUS(sc)) 3143 can_use_msi = 1; 3144 } 3145 return can_use_msi; 3146 } 3147 3148 /* 3149 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 3150 * against our list and return its name if we find a match. Note 3151 * that since the Broadcom controller contains VPD support, we 3152 * can get the device name string from the controller itself instead 3153 * of the compiled-in string. This is a little slow, but it guarantees 3154 * we'll always announce the right product name. 3155 */ 3156 static int 3157 bge_probe(device_t parent, cfdata_t match, void *aux) 3158 { 3159 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 3160 3161 if (bge_lookup(pa) != NULL) 3162 return 1; 3163 3164 return 0; 3165 } 3166 3167 static void 3168 bge_attach(device_t parent, device_t self, void *aux) 3169 { 3170 struct bge_softc * const sc = device_private(self); 3171 struct pci_attach_args * const pa = aux; 3172 prop_dictionary_t dict; 3173 const struct bge_product *bp; 3174 const struct bge_revision *br; 3175 pci_chipset_tag_t pc; 3176 const char *intrstr = NULL; 3177 uint32_t hwcfg, hwcfg2, hwcfg3, hwcfg4, hwcfg5; 3178 uint32_t command; 3179 struct ifnet *ifp; 3180 struct mii_data * const mii = &sc->bge_mii; 3181 uint32_t misccfg, mimode, macmode; 3182 void * kva; 3183 u_char eaddr[ETHER_ADDR_LEN]; 3184 pcireg_t memtype, subid, reg; 3185 bus_addr_t memaddr; 3186 uint32_t pm_ctl; 3187 bool no_seeprom; 3188 int capmask, trys; 3189 int mii_flags; 3190 int map_flags; 3191 char intrbuf[PCI_INTRSTR_LEN]; 3192 3193 bp = bge_lookup(pa); 3194 KASSERT(bp != NULL); 3195 3196 sc->sc_pc = pa->pa_pc; 3197 sc->sc_pcitag = pa->pa_tag; 3198 sc->bge_dev = self; 3199 3200 sc->bge_pa = *pa; 3201 pc = sc->sc_pc; 3202 subid = pci_conf_read(pc, sc->sc_pcitag, PCI_SUBSYS_ID_REG); 3203 3204 aprint_naive(": Ethernet controller\n"); 3205 aprint_normal(": %s Ethernet\n", bp->bp_name); 3206 3207 /* 3208 * Map control/status registers. 3209 */ 3210 DPRINTFN(5, ("Map control/status regs\n")); 3211 command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); 3212 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE; 3213 pci_conf_write(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, command); 3214 command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); 3215 3216 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 3217 aprint_error_dev(sc->bge_dev, 3218 "failed to enable memory mapping!\n"); 3219 return; 3220 } 3221 3222 DPRINTFN(5, ("pci_mem_find\n")); 3223 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_pcitag, BGE_PCI_BAR0); 3224 switch (memtype) { 3225 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 3226 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 3227 #if 0 3228 if (pci_mapreg_map(pa, BGE_PCI_BAR0, 3229 memtype, 0, &sc->bge_btag, &sc->bge_bhandle, 3230 &memaddr, &sc->bge_bsize) == 0) 3231 break; 3232 #else 3233 /* 3234 * Workaround for PCI prefetchable bit. Some BCM5717-5720 based 3235 * system get NMI on boot (PR#48451). This problem might not be 3236 * the driver's bug but our PCI common part's bug. Until we 3237 * find a real reason, we ignore the prefetchable bit. 3238 */ 3239 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0, 3240 memtype, &memaddr, &sc->bge_bsize, &map_flags) == 0) { 3241 map_flags &= ~BUS_SPACE_MAP_PREFETCHABLE; 3242 if (bus_space_map(pa->pa_memt, memaddr, sc->bge_bsize, 3243 map_flags, &sc->bge_bhandle) == 0) { 3244 sc->bge_btag = pa->pa_memt; 3245 break; 3246 } 3247 } 3248 #endif 3249 /* FALLTHROUGH */ 3250 default: 3251 aprint_error_dev(sc->bge_dev, "can't find mem space\n"); 3252 return; 3253 } 3254 3255 /* Save various chip information. */ 3256 sc->bge_chipid = bge_chipid(pa); 3257 sc->bge_phy_addr = bge_phy_addr(sc); 3258 3259 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PCIEXPRESS, 3260 &sc->bge_pciecap, NULL) != 0) { 3261 /* PCIe */ 3262 sc->bge_flags |= BGEF_PCIE; 3263 /* Extract supported maximum payload size. */ 3264 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 3265 sc->bge_pciecap + PCIE_DCAP); 3266 sc->bge_mps = 128 << (reg & PCIE_DCAP_MAX_PAYLOAD); 3267 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || 3268 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) 3269 sc->bge_expmrq = 2048; 3270 else 3271 sc->bge_expmrq = 4096; 3272 bge_set_max_readrq(sc); 3273 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785) { 3274 /* PCIe without PCIe cap */ 3275 sc->bge_flags |= BGEF_PCIE; 3276 } else if ((pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE) & 3277 BGE_PCISTATE_PCI_BUSMODE) == 0) { 3278 /* PCI-X */ 3279 sc->bge_flags |= BGEF_PCIX; 3280 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIX, 3281 &sc->bge_pcixcap, NULL) == 0) 3282 aprint_error_dev(sc->bge_dev, 3283 "unable to find PCIX capability\n"); 3284 } 3285 3286 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) { 3287 /* 3288 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?) 3289 * can clobber the chip's PCI config-space power control 3290 * registers, leaving the card in D3 powersave state. We do 3291 * not have memory-mapped registers in this state, so force 3292 * device into D0 state before starting initialization. 3293 */ 3294 pm_ctl = pci_conf_read(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD); 3295 pm_ctl &= ~(PCI_PWR_D0 | PCI_PWR_D1 | PCI_PWR_D2 | PCI_PWR_D3); 3296 pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */ 3297 pci_conf_write(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD, pm_ctl); 3298 DELAY(1000); /* 27 usec is allegedly sufficient */ 3299 } 3300 3301 /* Save chipset family. */ 3302 switch (BGE_ASICREV(sc->bge_chipid)) { 3303 case BGE_ASICREV_BCM5717: 3304 case BGE_ASICREV_BCM5719: 3305 case BGE_ASICREV_BCM5720: 3306 sc->bge_flags |= BGEF_5717_PLUS; 3307 /* FALLTHROUGH */ 3308 case BGE_ASICREV_BCM5762: 3309 case BGE_ASICREV_BCM57765: 3310 case BGE_ASICREV_BCM57766: 3311 if (!BGE_IS_5717_PLUS(sc)) 3312 sc->bge_flags |= BGEF_57765_FAMILY; 3313 sc->bge_flags |= BGEF_57765_PLUS | BGEF_5755_PLUS | 3314 BGEF_575X_PLUS | BGEF_5705_PLUS | BGEF_JUMBO_CAPABLE; 3315 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || 3316 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) { 3317 /* 3318 * Enable work around for DMA engine miscalculation 3319 * of TXMBUF available space. 3320 */ 3321 sc->bge_flags |= BGEF_RDMA_BUG; 3322 3323 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) && 3324 (sc->bge_chipid == BGE_CHIPID_BCM5719_A0)) { 3325 /* Jumbo frame on BCM5719 A0 does not work. */ 3326 sc->bge_flags &= ~BGEF_JUMBO_CAPABLE; 3327 } 3328 } 3329 break; 3330 case BGE_ASICREV_BCM5755: 3331 case BGE_ASICREV_BCM5761: 3332 case BGE_ASICREV_BCM5784: 3333 case BGE_ASICREV_BCM5785: 3334 case BGE_ASICREV_BCM5787: 3335 case BGE_ASICREV_BCM57780: 3336 sc->bge_flags |= BGEF_5755_PLUS | BGEF_575X_PLUS | BGEF_5705_PLUS; 3337 break; 3338 case BGE_ASICREV_BCM5700: 3339 case BGE_ASICREV_BCM5701: 3340 case BGE_ASICREV_BCM5703: 3341 case BGE_ASICREV_BCM5704: 3342 sc->bge_flags |= BGEF_5700_FAMILY | BGEF_JUMBO_CAPABLE; 3343 break; 3344 case BGE_ASICREV_BCM5714_A0: 3345 case BGE_ASICREV_BCM5780: 3346 case BGE_ASICREV_BCM5714: 3347 sc->bge_flags |= BGEF_5714_FAMILY | BGEF_JUMBO_CAPABLE; 3348 /* FALLTHROUGH */ 3349 case BGE_ASICREV_BCM5750: 3350 case BGE_ASICREV_BCM5752: 3351 case BGE_ASICREV_BCM5906: 3352 sc->bge_flags |= BGEF_575X_PLUS; 3353 /* FALLTHROUGH */ 3354 case BGE_ASICREV_BCM5705: 3355 sc->bge_flags |= BGEF_5705_PLUS; 3356 break; 3357 } 3358 3359 /* Identify chips with APE processor. */ 3360 switch (BGE_ASICREV(sc->bge_chipid)) { 3361 case BGE_ASICREV_BCM5717: 3362 case BGE_ASICREV_BCM5719: 3363 case BGE_ASICREV_BCM5720: 3364 case BGE_ASICREV_BCM5761: 3365 case BGE_ASICREV_BCM5762: 3366 sc->bge_flags |= BGEF_APE; 3367 break; 3368 } 3369 3370 /* 3371 * The 40bit DMA bug applies to the 5714/5715 controllers and is 3372 * not actually a MAC controller bug but an issue with the embedded 3373 * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround. 3374 */ 3375 if (BGE_IS_5714_FAMILY(sc) && ((sc->bge_flags & BGEF_PCIX) != 0)) 3376 sc->bge_flags |= BGEF_40BIT_BUG; 3377 3378 /* Chips with APE need BAR2 access for APE registers/memory. */ 3379 if ((sc->bge_flags & BGEF_APE) != 0) { 3380 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR2); 3381 #if 0 3382 if (pci_mapreg_map(pa, BGE_PCI_BAR2, memtype, 0, 3383 &sc->bge_apetag, &sc->bge_apehandle, NULL, 3384 &sc->bge_apesize)) { 3385 aprint_error_dev(sc->bge_dev, 3386 "couldn't map BAR2 memory\n"); 3387 return; 3388 } 3389 #else 3390 /* 3391 * Workaround for PCI prefetchable bit. Some BCM5717-5720 based 3392 * system get NMI on boot (PR#48451). This problem might not be 3393 * the driver's bug but our PCI common part's bug. Until we 3394 * find a real reason, we ignore the prefetchable bit. 3395 */ 3396 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR2, 3397 memtype, &memaddr, &sc->bge_apesize, &map_flags) != 0) { 3398 aprint_error_dev(sc->bge_dev, 3399 "couldn't map BAR2 memory\n"); 3400 return; 3401 } 3402 3403 map_flags &= ~BUS_SPACE_MAP_PREFETCHABLE; 3404 if (bus_space_map(pa->pa_memt, memaddr, 3405 sc->bge_apesize, map_flags, &sc->bge_apehandle) != 0) { 3406 aprint_error_dev(sc->bge_dev, 3407 "couldn't map BAR2 memory\n"); 3408 return; 3409 } 3410 sc->bge_apetag = pa->pa_memt; 3411 #endif 3412 3413 /* Enable APE register/memory access by host driver. */ 3414 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE); 3415 reg |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR | 3416 BGE_PCISTATE_ALLOW_APE_SHMEM_WR | 3417 BGE_PCISTATE_ALLOW_APE_PSPACE_WR; 3418 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE, reg); 3419 3420 bge_ape_lock_init(sc); 3421 bge_ape_read_fw_ver(sc); 3422 } 3423 3424 /* Identify the chips that use an CPMU. */ 3425 if (BGE_IS_5717_PLUS(sc) || 3426 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || 3427 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 || 3428 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 || 3429 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780) 3430 sc->bge_flags |= BGEF_CPMU_PRESENT; 3431 3432 /* 3433 * When using the BCM5701 in PCI-X mode, data corruption has 3434 * been observed in the first few bytes of some received packets. 3435 * Aligning the packet buffer in memory eliminates the corruption. 3436 * Unfortunately, this misaligns the packet payloads. On platforms 3437 * which do not support unaligned accesses, we will realign the 3438 * payloads by copying the received packets. 3439 */ 3440 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 && 3441 sc->bge_flags & BGEF_PCIX) 3442 sc->bge_flags |= BGEF_RX_ALIGNBUG; 3443 3444 if (BGE_IS_5700_FAMILY(sc)) 3445 sc->bge_flags |= BGEF_JUMBO_CAPABLE; 3446 3447 misccfg = CSR_READ_4(sc, BGE_MISC_CFG); 3448 misccfg &= BGE_MISCCFG_BOARD_ID_MASK; 3449 3450 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 && 3451 (misccfg == BGE_MISCCFG_BOARD_ID_5788 || 3452 misccfg == BGE_MISCCFG_BOARD_ID_5788M)) 3453 sc->bge_flags |= BGEF_IS_5788; 3454 3455 /* 3456 * Some controllers seem to require a special firmware to use 3457 * TSO. But the firmware is not available to FreeBSD and Linux 3458 * claims that the TSO performed by the firmware is slower than 3459 * hardware based TSO. Moreover the firmware based TSO has one 3460 * known bug which can't handle TSO if ethernet header + IP/TCP 3461 * header is greater than 80 bytes. The workaround for the TSO 3462 * bug exist but it seems it's too expensive than not using 3463 * TSO at all. Some hardwares also have the TSO bug so limit 3464 * the TSO to the controllers that are not affected TSO issues 3465 * (e.g. 5755 or higher). 3466 */ 3467 if (BGE_IS_5755_PLUS(sc)) { 3468 /* 3469 * BCM5754 and BCM5787 shares the same ASIC id so 3470 * explicit device id check is required. 3471 */ 3472 if ((PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5754) && 3473 (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5754M)) 3474 sc->bge_flags |= BGEF_TSO; 3475 /* TSO on BCM5719 A0 does not work. */ 3476 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) && 3477 (sc->bge_chipid == BGE_CHIPID_BCM5719_A0)) 3478 sc->bge_flags &= ~BGEF_TSO; 3479 } 3480 3481 capmask = 0xffffffff; /* XXX BMSR_DEFCAPMASK */ 3482 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 && 3483 (misccfg == 0x4000 || misccfg == 0x8000)) || 3484 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 && 3485 PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM && 3486 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901 || 3487 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901A2 || 3488 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5705F)) || 3489 (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM && 3490 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5751F || 3491 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5753F || 3492 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5787F)) || 3493 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57790 || 3494 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57791 || 3495 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57795 || 3496 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 3497 /* These chips are 10/100 only. */ 3498 capmask &= ~BMSR_EXTSTAT; 3499 sc->bge_phy_flags |= BGEPHYF_NO_WIRESPEED; 3500 } 3501 3502 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 3503 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 && 3504 (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 && 3505 sc->bge_chipid != BGE_CHIPID_BCM5705_A1))) 3506 sc->bge_phy_flags |= BGEPHYF_NO_WIRESPEED; 3507 3508 /* Set various PHY bug flags. */ 3509 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 || 3510 sc->bge_chipid == BGE_CHIPID_BCM5701_B0) 3511 sc->bge_phy_flags |= BGEPHYF_CRC_BUG; 3512 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5703_AX || 3513 BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_AX) 3514 sc->bge_phy_flags |= BGEPHYF_ADC_BUG; 3515 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0) 3516 sc->bge_phy_flags |= BGEPHYF_5704_A0_BUG; 3517 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 3518 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) && 3519 PCI_VENDOR(subid) == PCI_VENDOR_DELL) 3520 sc->bge_phy_flags |= BGEPHYF_NO_3LED; 3521 if (BGE_IS_5705_PLUS(sc) && 3522 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906 && 3523 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785 && 3524 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57780 && 3525 !BGE_IS_57765_PLUS(sc)) { 3526 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 || 3527 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 || 3528 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || 3529 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787) { 3530 if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5722 && 3531 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5756) 3532 sc->bge_phy_flags |= BGEPHYF_JITTER_BUG; 3533 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5755M) 3534 sc->bge_phy_flags |= BGEPHYF_ADJUST_TRIM; 3535 } else 3536 sc->bge_phy_flags |= BGEPHYF_BER_BUG; 3537 } 3538 3539 /* 3540 * SEEPROM check. 3541 * First check if firmware knows we do not have SEEPROM. 3542 */ 3543 if (prop_dictionary_get_bool(device_properties(self), 3544 "without-seeprom", &no_seeprom) && no_seeprom) 3545 sc->bge_flags |= BGEF_NO_EEPROM; 3546 3547 else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 3548 sc->bge_flags |= BGEF_NO_EEPROM; 3549 3550 /* Now check the 'ROM failed' bit on the RX CPU */ 3551 else if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) 3552 sc->bge_flags |= BGEF_NO_EEPROM; 3553 3554 sc->bge_asf_mode = 0; 3555 /* No ASF if APE present. */ 3556 if ((sc->bge_flags & BGEF_APE) == 0) { 3557 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == 3558 BGE_SRAM_DATA_SIG_MAGIC)) { 3559 if (bge_readmem_ind(sc, BGE_SRAM_DATA_CFG) & 3560 BGE_HWCFG_ASF) { 3561 sc->bge_asf_mode |= ASF_ENABLE; 3562 sc->bge_asf_mode |= ASF_STACKUP; 3563 if (BGE_IS_575X_PLUS(sc)) 3564 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE; 3565 } 3566 } 3567 } 3568 3569 int counts[PCI_INTR_TYPE_SIZE] = { 3570 [PCI_INTR_TYPE_INTX] = 1, 3571 [PCI_INTR_TYPE_MSI] = 1, 3572 [PCI_INTR_TYPE_MSIX] = 1, 3573 }; 3574 int max_type = PCI_INTR_TYPE_MSIX; 3575 3576 if (!bge_can_use_msi(sc)) { 3577 /* MSI broken, allow only INTx */ 3578 max_type = PCI_INTR_TYPE_INTX; 3579 } 3580 3581 if (pci_intr_alloc(pa, &sc->bge_pihp, counts, max_type) != 0) { 3582 aprint_error_dev(sc->bge_dev, "couldn't alloc interrupt\n"); 3583 return; 3584 } 3585 3586 DPRINTFN(5, ("pci_intr_string\n")); 3587 intrstr = pci_intr_string(pc, sc->bge_pihp[0], intrbuf, 3588 sizeof(intrbuf)); 3589 DPRINTFN(5, ("pci_intr_establish\n")); 3590 sc->bge_intrhand = pci_intr_establish_xname(pc, sc->bge_pihp[0], 3591 IPL_NET, bge_intr, sc, device_xname(sc->bge_dev)); 3592 if (sc->bge_intrhand == NULL) { 3593 pci_intr_release(pc, sc->bge_pihp, 1); 3594 sc->bge_pihp = NULL; 3595 3596 aprint_error_dev(self, "couldn't establish interrupt"); 3597 if (intrstr != NULL) 3598 aprint_error(" at %s", intrstr); 3599 aprint_error("\n"); 3600 return; 3601 } 3602 aprint_normal_dev(sc->bge_dev, "interrupting at %s\n", intrstr); 3603 3604 switch (pci_intr_type(pc, sc->bge_pihp[0])) { 3605 case PCI_INTR_TYPE_MSIX: 3606 case PCI_INTR_TYPE_MSI: 3607 KASSERT(bge_can_use_msi(sc)); 3608 sc->bge_flags |= BGEF_MSI; 3609 break; 3610 default: 3611 /* nothing to do */ 3612 break; 3613 } 3614 3615 /* 3616 * All controllers except BCM5700 supports tagged status but 3617 * we use tagged status only for MSI case on BCM5717. Otherwise 3618 * MSI on BCM5717 does not work. 3619 */ 3620 if (BGE_IS_57765_PLUS(sc) && sc->bge_flags & BGEF_MSI) 3621 sc->bge_flags |= BGEF_TAGGED_STATUS; 3622 3623 /* 3624 * Reset NVRAM before bge_reset(). It's required to acquire NVRAM 3625 * lock in bge_reset(). 3626 */ 3627 CSR_WRITE_4_FLUSH(sc, BGE_EE_ADDR, 3628 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 3629 delay(1000); 3630 BGE_SETBIT_FLUSH(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 3631 3632 bge_stop_fw(sc); 3633 bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN); 3634 if (bge_reset(sc)) 3635 aprint_error_dev(sc->bge_dev, "chip reset failed\n"); 3636 3637 /* 3638 * Read the hardware config word in the first 32k of NIC internal 3639 * memory, or fall back to the config word in the EEPROM. 3640 * Note: on some BCM5700 cards, this value appears to be unset. 3641 */ 3642 hwcfg = hwcfg2 = hwcfg3 = hwcfg4 = hwcfg5 = 0; 3643 if (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == 3644 BGE_SRAM_DATA_SIG_MAGIC) { 3645 uint32_t tmp; 3646 3647 hwcfg = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG); 3648 tmp = bge_readmem_ind(sc, BGE_SRAM_DATA_VER) >> 3649 BGE_SRAM_DATA_VER_SHIFT; 3650 if ((0 < tmp) && (tmp < 0x100)) 3651 hwcfg2 = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG_2); 3652 if (sc->bge_flags & BGEF_PCIE) 3653 hwcfg3 = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG_3); 3654 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785) 3655 hwcfg4 = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG_4); 3656 if (BGE_IS_5717_PLUS(sc)) 3657 hwcfg5 = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG_5); 3658 } else if (!(sc->bge_flags & BGEF_NO_EEPROM)) { 3659 bge_read_eeprom(sc, (void *)&hwcfg, 3660 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg)); 3661 hwcfg = be32toh(hwcfg); 3662 } 3663 aprint_normal_dev(sc->bge_dev, 3664 "HW config %08x, %08x, %08x, %08x %08x\n", 3665 hwcfg, hwcfg2, hwcfg3, hwcfg4, hwcfg5); 3666 3667 bge_sig_legacy(sc, BGE_RESET_SHUTDOWN); 3668 bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN); 3669 3670 if (bge_chipinit(sc)) { 3671 aprint_error_dev(sc->bge_dev, "chip initialization failed\n"); 3672 bge_release_resources(sc); 3673 return; 3674 } 3675 3676 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) { 3677 BGE_SETBIT_FLUSH(sc, BGE_MISC_LOCAL_CTL, 3678 BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUTEN1); 3679 DELAY(100); 3680 } 3681 3682 /* Set MI_MODE */ 3683 mimode = BGE_MIMODE_PHYADDR(sc->bge_phy_addr); 3684 if ((sc->bge_flags & BGEF_CPMU_PRESENT) != 0) 3685 mimode |= BGE_MIMODE_500KHZ_CONST; 3686 else 3687 mimode |= BGE_MIMODE_BASE; 3688 CSR_WRITE_4_FLUSH(sc, BGE_MI_MODE, mimode); 3689 DELAY(80); 3690 3691 /* 3692 * Get station address from the EEPROM. 3693 */ 3694 if (bge_get_eaddr(sc, eaddr)) { 3695 aprint_error_dev(sc->bge_dev, 3696 "failed to read station address\n"); 3697 bge_release_resources(sc); 3698 return; 3699 } 3700 3701 br = bge_lookup_rev(sc->bge_chipid); 3702 3703 if (br == NULL) { 3704 aprint_normal_dev(sc->bge_dev, "unknown ASIC (0x%x)", 3705 sc->bge_chipid); 3706 } else { 3707 aprint_normal_dev(sc->bge_dev, "ASIC %s (0x%x)", 3708 br->br_name, sc->bge_chipid); 3709 } 3710 aprint_normal(", Ethernet address %s\n", ether_sprintf(eaddr)); 3711 3712 /* Allocate the general information block and ring buffers. */ 3713 if (pci_dma64_available(pa)) { 3714 sc->bge_dmatag = pa->pa_dmat64; 3715 sc->bge_dmatag32 = pa->pa_dmat; 3716 sc->bge_dma64 = true; 3717 } else { 3718 sc->bge_dmatag = pa->pa_dmat; 3719 sc->bge_dmatag32 = pa->pa_dmat; 3720 sc->bge_dma64 = false; 3721 } 3722 3723 /* 40bit DMA workaround */ 3724 if (sizeof(bus_addr_t) > 4) { 3725 if ((sc->bge_flags & BGEF_40BIT_BUG) != 0) { 3726 bus_dma_tag_t olddmatag = sc->bge_dmatag; /* save */ 3727 3728 if (bus_dmatag_subregion(olddmatag, 0, 3729 (bus_addr_t)__MASK(40), 3730 &(sc->bge_dmatag), BUS_DMA_NOWAIT) != 0) { 3731 aprint_error_dev(self, 3732 "WARNING: failed to restrict dma range," 3733 " falling back to parent bus dma range\n"); 3734 sc->bge_dmatag = olddmatag; 3735 } 3736 } 3737 } 3738 SLIST_INIT(&sc->txdma_list); 3739 DPRINTFN(5, ("bus_dmamem_alloc\n")); 3740 if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data), 3741 PAGE_SIZE, 0, &sc->bge_ring_seg, 1, 3742 &sc->bge_ring_rseg, BUS_DMA_NOWAIT)) { 3743 aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n"); 3744 return; 3745 } 3746 DPRINTFN(5, ("bus_dmamem_map\n")); 3747 if (bus_dmamem_map(sc->bge_dmatag, &sc->bge_ring_seg, 3748 sc->bge_ring_rseg, sizeof(struct bge_ring_data), &kva, 3749 BUS_DMA_NOWAIT)) { 3750 aprint_error_dev(sc->bge_dev, 3751 "can't map DMA buffers (%zu bytes)\n", 3752 sizeof(struct bge_ring_data)); 3753 bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg, 3754 sc->bge_ring_rseg); 3755 return; 3756 } 3757 DPRINTFN(5, ("bus_dmamem_create\n")); 3758 if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1, 3759 sizeof(struct bge_ring_data), 0, 3760 BUS_DMA_NOWAIT, &sc->bge_ring_map)) { 3761 aprint_error_dev(sc->bge_dev, "can't create DMA map\n"); 3762 bus_dmamem_unmap(sc->bge_dmatag, kva, 3763 sizeof(struct bge_ring_data)); 3764 bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg, 3765 sc->bge_ring_rseg); 3766 return; 3767 } 3768 DPRINTFN(5, ("bus_dmamem_load\n")); 3769 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva, 3770 sizeof(struct bge_ring_data), NULL, 3771 BUS_DMA_NOWAIT)) { 3772 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map); 3773 bus_dmamem_unmap(sc->bge_dmatag, kva, 3774 sizeof(struct bge_ring_data)); 3775 bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg, 3776 sc->bge_ring_rseg); 3777 return; 3778 } 3779 3780 DPRINTFN(5, ("bzero\n")); 3781 sc->bge_rdata = (struct bge_ring_data *)kva; 3782 3783 memset(sc->bge_rdata, 0, sizeof(struct bge_ring_data)); 3784 3785 /* Try to allocate memory for jumbo buffers. */ 3786 if (BGE_IS_JUMBO_CAPABLE(sc)) { 3787 if (bge_alloc_jumbo_mem(sc)) { 3788 aprint_error_dev(sc->bge_dev, 3789 "jumbo buffer allocation failed\n"); 3790 } else 3791 sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 3792 } 3793 3794 /* Set default tuneable values. */ 3795 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 3796 sc->bge_rx_coal_ticks = 150; 3797 sc->bge_rx_max_coal_bds = 64; 3798 sc->bge_tx_coal_ticks = 300; 3799 sc->bge_tx_max_coal_bds = 400; 3800 if (BGE_IS_5705_PLUS(sc)) { 3801 sc->bge_tx_coal_ticks = (12 * 5); 3802 sc->bge_tx_max_coal_bds = (12 * 5); 3803 aprint_verbose_dev(sc->bge_dev, 3804 "setting short Tx thresholds\n"); 3805 } 3806 3807 if (BGE_IS_5717_PLUS(sc)) 3808 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 3809 else if (BGE_IS_5705_PLUS(sc)) 3810 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 3811 else 3812 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 3813 3814 /* Set up ifnet structure */ 3815 ifp = &sc->ethercom.ec_if; 3816 ifp->if_softc = sc; 3817 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 3818 ifp->if_ioctl = bge_ioctl; 3819 ifp->if_stop = bge_stop; 3820 ifp->if_start = bge_start; 3821 ifp->if_init = bge_init; 3822 ifp->if_watchdog = bge_watchdog; 3823 IFQ_SET_MAXLEN(&ifp->if_snd, uimax(BGE_TX_RING_CNT - 1, IFQ_MAXLEN)); 3824 IFQ_SET_READY(&ifp->if_snd); 3825 DPRINTFN(5, ("strcpy if_xname\n")); 3826 strcpy(ifp->if_xname, device_xname(sc->bge_dev)); 3827 3828 if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0) 3829 sc->ethercom.ec_if.if_capabilities |= 3830 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx; 3831 #if 1 /* XXX TCP/UDP checksum offload breaks with pf(4) */ 3832 sc->ethercom.ec_if.if_capabilities |= 3833 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 3834 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 3835 #endif 3836 sc->ethercom.ec_capabilities |= 3837 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU; 3838 sc->ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING; 3839 3840 if (sc->bge_flags & BGEF_TSO) 3841 sc->ethercom.ec_if.if_capabilities |= IFCAP_TSOv4; 3842 3843 /* 3844 * Do MII setup. 3845 */ 3846 DPRINTFN(5, ("mii setup\n")); 3847 mii->mii_ifp = ifp; 3848 mii->mii_readreg = bge_miibus_readreg; 3849 mii->mii_writereg = bge_miibus_writereg; 3850 mii->mii_statchg = bge_miibus_statchg; 3851 3852 /* 3853 * Figure out what sort of media we have by checking the hardware 3854 * config word. Note: on some BCM5700 cards, this value appears to be 3855 * unset. If that's the case, we have to rely on identifying the NIC 3856 * by its PCI subsystem ID, as we do below for the SysKonnect SK-9D41. 3857 * The SysKonnect SK-9D41 is a 1000baseSX card. 3858 */ 3859 if (PCI_PRODUCT(subid) == SK_SUBSYSID_9D41 || 3860 (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) { 3861 if (BGE_IS_5705_PLUS(sc)) { 3862 sc->bge_flags |= BGEF_FIBER_MII; 3863 sc->bge_phy_flags |= BGEPHYF_NO_WIRESPEED; 3864 } else 3865 sc->bge_flags |= BGEF_FIBER_TBI; 3866 } 3867 3868 /* Set bge_phy_flags before prop_dictionary_set_uint32() */ 3869 if (BGE_IS_JUMBO_CAPABLE(sc)) 3870 sc->bge_phy_flags |= BGEPHYF_JUMBO_CAPABLE; 3871 3872 /* set phyflags and chipid before mii_attach() */ 3873 dict = device_properties(self); 3874 prop_dictionary_set_uint32(dict, "phyflags", sc->bge_phy_flags); 3875 prop_dictionary_set_uint32(dict, "chipid", sc->bge_chipid); 3876 3877 macmode = CSR_READ_4(sc, BGE_MAC_MODE); 3878 macmode &= ~BGE_MACMODE_PORTMODE; 3879 /* Initialize ifmedia structures. */ 3880 if (sc->bge_flags & BGEF_FIBER_TBI) { 3881 CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, 3882 macmode | BGE_PORTMODE_TBI); 3883 DELAY(40); 3884 3885 sc->ethercom.ec_ifmedia = &sc->bge_ifmedia; 3886 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, 3887 bge_ifmedia_sts); 3888 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER |IFM_1000_SX, 0, NULL); 3889 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX |IFM_FDX, 3890 0, NULL); 3891 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL); 3892 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO); 3893 /* Pretend the user requested this setting */ 3894 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media; 3895 } else { 3896 uint16_t phyreg; 3897 int rv; 3898 /* 3899 * Do transceiver setup and tell the firmware the 3900 * driver is down so we can try to get access the 3901 * probe if ASF is running. Retry a couple of times 3902 * if we get a conflict with the ASF firmware accessing 3903 * the PHY. 3904 */ 3905 if (sc->bge_flags & BGEF_FIBER_MII) 3906 macmode |= BGE_PORTMODE_GMII; 3907 else 3908 macmode |= BGE_PORTMODE_MII; 3909 CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, macmode); 3910 DELAY(40); 3911 3912 /* 3913 * Do transceiver setup and tell the firmware the 3914 * driver is down so we can try to get access the 3915 * probe if ASF is running. Retry a couple of times 3916 * if we get a conflict with the ASF firmware accessing 3917 * the PHY. 3918 */ 3919 trys = 0; 3920 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3921 sc->ethercom.ec_mii = mii; 3922 ifmedia_init(&mii->mii_media, 0, bge_ifmedia_upd, 3923 bge_ifmedia_sts); 3924 mii_flags = MIIF_DOPAUSE; 3925 if (sc->bge_flags & BGEF_FIBER_MII) 3926 mii_flags |= MIIF_HAVEFIBER; 3927 again: 3928 bge_asf_driver_up(sc); 3929 rv = bge_miibus_readreg(sc->bge_dev, sc->bge_phy_addr, 3930 MII_BMCR, &phyreg); 3931 if ((rv != 0) || ((phyreg & BMCR_PDOWN) != 0)) { 3932 int i; 3933 3934 bge_miibus_writereg(sc->bge_dev, sc->bge_phy_addr, 3935 MII_BMCR, BMCR_RESET); 3936 /* Wait up to 500ms for it to complete. */ 3937 for (i = 0; i < 500; i++) { 3938 bge_miibus_readreg(sc->bge_dev, 3939 sc->bge_phy_addr, MII_BMCR, &phyreg); 3940 if ((phyreg & BMCR_RESET) == 0) 3941 break; 3942 DELAY(1000); 3943 } 3944 } 3945 3946 mii_attach(sc->bge_dev, mii, capmask, sc->bge_phy_addr, 3947 MII_OFFSET_ANY, mii_flags); 3948 3949 if (LIST_EMPTY(&mii->mii_phys) && (trys++ < 4)) 3950 goto again; 3951 3952 if (LIST_EMPTY(&mii->mii_phys)) { 3953 aprint_error_dev(sc->bge_dev, "no PHY found!\n"); 3954 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 3955 0, NULL); 3956 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL); 3957 } else 3958 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 3959 3960 /* 3961 * Now tell the firmware we are going up after probing the PHY 3962 */ 3963 if (sc->bge_asf_mode & ASF_STACKUP) 3964 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3965 } 3966 3967 /* 3968 * Call MI attach routine. 3969 */ 3970 DPRINTFN(5, ("if_attach\n")); 3971 if_attach(ifp); 3972 if_deferred_start_init(ifp, NULL); 3973 DPRINTFN(5, ("ether_ifattach\n")); 3974 ether_ifattach(ifp, eaddr); 3975 ether_set_ifflags_cb(&sc->ethercom, bge_ifflags_cb); 3976 rnd_attach_source(&sc->rnd_source, device_xname(sc->bge_dev), 3977 RND_TYPE_NET, RND_FLAG_DEFAULT); 3978 #ifdef BGE_EVENT_COUNTERS 3979 /* 3980 * Attach event counters. 3981 */ 3982 evcnt_attach_dynamic(&sc->bge_ev_intr, EVCNT_TYPE_INTR, 3983 NULL, device_xname(sc->bge_dev), "intr"); 3984 evcnt_attach_dynamic(&sc->bge_ev_intr_spurious, EVCNT_TYPE_INTR, 3985 NULL, device_xname(sc->bge_dev), "intr_spurious"); 3986 evcnt_attach_dynamic(&sc->bge_ev_intr_spurious2, EVCNT_TYPE_INTR, 3987 NULL, device_xname(sc->bge_dev), "intr_spurious2"); 3988 evcnt_attach_dynamic(&sc->bge_ev_tx_xoff, EVCNT_TYPE_MISC, 3989 NULL, device_xname(sc->bge_dev), "tx_xoff"); 3990 evcnt_attach_dynamic(&sc->bge_ev_tx_xon, EVCNT_TYPE_MISC, 3991 NULL, device_xname(sc->bge_dev), "tx_xon"); 3992 evcnt_attach_dynamic(&sc->bge_ev_rx_xoff, EVCNT_TYPE_MISC, 3993 NULL, device_xname(sc->bge_dev), "rx_xoff"); 3994 evcnt_attach_dynamic(&sc->bge_ev_rx_xon, EVCNT_TYPE_MISC, 3995 NULL, device_xname(sc->bge_dev), "rx_xon"); 3996 evcnt_attach_dynamic(&sc->bge_ev_rx_macctl, EVCNT_TYPE_MISC, 3997 NULL, device_xname(sc->bge_dev), "rx_macctl"); 3998 evcnt_attach_dynamic(&sc->bge_ev_xoffentered, EVCNT_TYPE_MISC, 3999 NULL, device_xname(sc->bge_dev), "xoffentered"); 4000 #endif /* BGE_EVENT_COUNTERS */ 4001 DPRINTFN(5, ("callout_init\n")); 4002 callout_init(&sc->bge_timeout, 0); 4003 callout_setfunc(&sc->bge_timeout, bge_tick, sc); 4004 4005 if (pmf_device_register(self, NULL, NULL)) 4006 pmf_class_network_register(self, ifp); 4007 else 4008 aprint_error_dev(self, "couldn't establish power handler\n"); 4009 4010 bge_sysctl_init(sc); 4011 4012 #ifdef BGE_DEBUG 4013 bge_debug_info(sc); 4014 #endif 4015 } 4016 4017 /* 4018 * Stop all chip I/O so that the kernel's probe routines don't 4019 * get confused by errant DMAs when rebooting. 4020 */ 4021 static int 4022 bge_detach(device_t self, int flags __unused) 4023 { 4024 struct bge_softc * const sc = device_private(self); 4025 struct ifnet * const ifp = &sc->ethercom.ec_if; 4026 int s; 4027 4028 s = splnet(); 4029 /* Stop the interface. Callouts are stopped in it. */ 4030 bge_stop(ifp, 1); 4031 splx(s); 4032 4033 mii_detach(&sc->bge_mii, MII_PHY_ANY, MII_OFFSET_ANY); 4034 4035 ether_ifdetach(ifp); 4036 if_detach(ifp); 4037 4038 /* Delete all remaining media. */ 4039 ifmedia_fini(&sc->bge_mii.mii_media); 4040 4041 bge_release_resources(sc); 4042 4043 return 0; 4044 } 4045 4046 static void 4047 bge_release_resources(struct bge_softc *sc) 4048 { 4049 4050 /* Detach sysctl */ 4051 if (sc->bge_log != NULL) 4052 sysctl_teardown(&sc->bge_log); 4053 4054 #ifdef BGE_EVENT_COUNTERS 4055 /* Detach event counters. */ 4056 evcnt_detach(&sc->bge_ev_intr); 4057 evcnt_detach(&sc->bge_ev_intr_spurious); 4058 evcnt_detach(&sc->bge_ev_intr_spurious2); 4059 evcnt_detach(&sc->bge_ev_tx_xoff); 4060 evcnt_detach(&sc->bge_ev_tx_xon); 4061 evcnt_detach(&sc->bge_ev_rx_xoff); 4062 evcnt_detach(&sc->bge_ev_rx_xon); 4063 evcnt_detach(&sc->bge_ev_rx_macctl); 4064 evcnt_detach(&sc->bge_ev_xoffentered); 4065 #endif /* BGE_EVENT_COUNTERS */ 4066 4067 /* Disestablish the interrupt handler */ 4068 if (sc->bge_intrhand != NULL) { 4069 pci_intr_disestablish(sc->sc_pc, sc->bge_intrhand); 4070 pci_intr_release(sc->sc_pc, sc->bge_pihp, 1); 4071 sc->bge_intrhand = NULL; 4072 } 4073 4074 if (sc->bge_dmatag != NULL) { 4075 bus_dmamap_unload(sc->bge_dmatag, sc->bge_ring_map); 4076 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map); 4077 bus_dmamem_unmap(sc->bge_dmatag, (void *)sc->bge_rdata, 4078 sizeof(struct bge_ring_data)); 4079 bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg, 4080 sc->bge_ring_rseg); 4081 } 4082 4083 /* Unmap the device registers */ 4084 if (sc->bge_bsize != 0) { 4085 bus_space_unmap(sc->bge_btag, sc->bge_bhandle, sc->bge_bsize); 4086 sc->bge_bsize = 0; 4087 } 4088 4089 /* Unmap the APE registers */ 4090 if (sc->bge_apesize != 0) { 4091 bus_space_unmap(sc->bge_apetag, sc->bge_apehandle, 4092 sc->bge_apesize); 4093 sc->bge_apesize = 0; 4094 } 4095 } 4096 4097 static int 4098 bge_reset(struct bge_softc *sc) 4099 { 4100 uint32_t cachesize, command; 4101 uint32_t reset, mac_mode, mac_mode_mask; 4102 pcireg_t devctl, reg; 4103 int i, val; 4104 void (*write_op)(struct bge_softc *, int, int); 4105 4106 /* Make mask for BGE_MAC_MODE register. */ 4107 mac_mode_mask = BGE_MACMODE_HALF_DUPLEX | BGE_MACMODE_PORTMODE; 4108 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) 4109 mac_mode_mask |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN; 4110 /* Keep mac_mode_mask's bits of BGE_MAC_MODE register into mac_mode */ 4111 mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & mac_mode_mask; 4112 4113 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) && 4114 (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)) { 4115 if (sc->bge_flags & BGEF_PCIE) 4116 write_op = bge_writemem_direct; 4117 else 4118 write_op = bge_writemem_ind; 4119 } else 4120 write_op = bge_writereg_ind; 4121 4122 /* 57XX step 4 */ 4123 /* Acquire the NVM lock */ 4124 if ((sc->bge_flags & BGEF_NO_EEPROM) == 0 && 4125 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5700 && 4126 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5701) { 4127 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); 4128 for (i = 0; i < 8000; i++) { 4129 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & 4130 BGE_NVRAMSWARB_GNT1) 4131 break; 4132 DELAY(20); 4133 } 4134 if (i == 8000) { 4135 printf("%s: NVRAM lock timedout!\n", 4136 device_xname(sc->bge_dev)); 4137 } 4138 } 4139 4140 /* Take APE lock when performing reset. */ 4141 bge_ape_lock(sc, BGE_APE_LOCK_GRC); 4142 4143 /* 57XX step 3 */ 4144 /* Save some important PCI state. */ 4145 cachesize = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ); 4146 /* 5718 reset step 3 */ 4147 command = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD); 4148 4149 /* 5718 reset step 5, 57XX step 5b-5d */ 4150 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL, 4151 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | 4152 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW); 4153 4154 /* XXX ???: Disable fastboot on controllers that support it. */ 4155 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 || 4156 BGE_IS_5755_PLUS(sc)) 4157 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0); 4158 4159 /* 5718 reset step 2, 57XX step 6 */ 4160 /* 4161 * Write the magic number to SRAM at offset 0xB50. 4162 * When firmware finishes its initialization it will 4163 * write ~BGE_MAGIC_NUMBER to the same location. 4164 */ 4165 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC); 4166 4167 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780) { 4168 val = CSR_READ_4(sc, BGE_PCIE_LINKCTL); 4169 val = (val & ~BGE_PCIE_LINKCTL_L1_PLL_PDEN) 4170 | BGE_PCIE_LINKCTL_L1_PLL_PDDIS; 4171 CSR_WRITE_4(sc, BGE_PCIE_LINKCTL, val); 4172 } 4173 4174 /* 5718 reset step 6, 57XX step 7 */ 4175 reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ; 4176 /* 4177 * XXX: from FreeBSD/Linux; no documentation 4178 */ 4179 if (sc->bge_flags & BGEF_PCIE) { 4180 if ((BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785) && 4181 !BGE_IS_57765_PLUS(sc) && 4182 (CSR_READ_4(sc, BGE_PHY_TEST_CTRL_REG) == 4183 (BGE_PHY_PCIE_LTASS_MODE | BGE_PHY_PCIE_SCRAM_MODE))) { 4184 /* PCI Express 1.0 system */ 4185 CSR_WRITE_4(sc, BGE_PHY_TEST_CTRL_REG, 4186 BGE_PHY_PCIE_SCRAM_MODE); 4187 } 4188 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 4189 /* 4190 * Prevent PCI Express link training 4191 * during global reset. 4192 */ 4193 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29); 4194 reset |= (1 << 29); 4195 } 4196 } 4197 4198 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 4199 i = CSR_READ_4(sc, BGE_VCPU_STATUS); 4200 CSR_WRITE_4(sc, BGE_VCPU_STATUS, 4201 i | BGE_VCPU_STATUS_DRV_RESET); 4202 i = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL); 4203 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL, 4204 i & ~BGE_VCPU_EXT_CTRL_HALT_CPU); 4205 } 4206 4207 /* 4208 * Set GPHY Power Down Override to leave GPHY 4209 * powered up in D0 uninitialized. 4210 */ 4211 if (BGE_IS_5705_PLUS(sc) && 4212 (sc->bge_flags & BGEF_CPMU_PRESENT) == 0) 4213 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE; 4214 4215 /* Issue global reset */ 4216 write_op(sc, BGE_MISC_CFG, reset); 4217 4218 /* 5718 reset step 7, 57XX step 8 */ 4219 if (sc->bge_flags & BGEF_PCIE) 4220 delay(100*1000); /* too big */ 4221 else 4222 delay(1000); 4223 4224 if (sc->bge_flags & BGEF_PCIE) { 4225 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { 4226 DELAY(500000); 4227 /* XXX: Magic Numbers */ 4228 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 4229 BGE_PCI_UNKNOWN0); 4230 pci_conf_write(sc->sc_pc, sc->sc_pcitag, 4231 BGE_PCI_UNKNOWN0, 4232 reg | (1 << 15)); 4233 } 4234 devctl = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 4235 sc->bge_pciecap + PCIE_DCSR); 4236 /* Clear enable no snoop and disable relaxed ordering. */ 4237 devctl &= ~(PCIE_DCSR_ENA_RELAX_ORD | 4238 PCIE_DCSR_ENA_NO_SNOOP); 4239 4240 /* Set PCIE max payload size to 128 for older PCIe devices */ 4241 if ((sc->bge_flags & BGEF_CPMU_PRESENT) == 0) 4242 devctl &= ~(0x00e0); 4243 /* Clear device status register. Write 1b to clear */ 4244 devctl |= PCIE_DCSR_URD | PCIE_DCSR_FED 4245 | PCIE_DCSR_NFED | PCIE_DCSR_CED; 4246 pci_conf_write(sc->sc_pc, sc->sc_pcitag, 4247 sc->bge_pciecap + PCIE_DCSR, devctl); 4248 bge_set_max_readrq(sc); 4249 } 4250 4251 /* From Linux: dummy read to flush PCI posted writes */ 4252 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD); 4253 4254 /* 4255 * Reset some of the PCI state that got zapped by reset 4256 * To modify the PCISTATE register, BGE_PCIMISCCTL_PCISTATE_RW must be 4257 * set, too. 4258 */ 4259 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL, 4260 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | 4261 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW); 4262 val = BGE_PCISTATE_ROM_ENABLE | BGE_PCISTATE_ROM_RETRY_ENABLE; 4263 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0 && 4264 (sc->bge_flags & BGEF_PCIX) != 0) 4265 val |= BGE_PCISTATE_RETRY_SAME_DMA; 4266 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) 4267 val |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR | 4268 BGE_PCISTATE_ALLOW_APE_SHMEM_WR | 4269 BGE_PCISTATE_ALLOW_APE_PSPACE_WR; 4270 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE, val); 4271 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ, cachesize); 4272 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD, command); 4273 4274 /* 57xx step 11: disable PCI-X Relaxed Ordering. */ 4275 if (sc->bge_flags & BGEF_PCIX) { 4276 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, sc->bge_pcixcap 4277 + PCIX_CMD); 4278 /* Set max memory read byte count to 2K */ 4279 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703) { 4280 reg &= ~PCIX_CMD_BYTECNT_MASK; 4281 reg |= PCIX_CMD_BCNT_2048; 4282 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704){ 4283 /* 4284 * For 5704, set max outstanding split transaction 4285 * field to 0 (0 means it supports 1 request) 4286 */ 4287 reg &= ~(PCIX_CMD_SPLTRANS_MASK 4288 | PCIX_CMD_BYTECNT_MASK); 4289 reg |= PCIX_CMD_BCNT_2048; 4290 } 4291 pci_conf_write(sc->sc_pc, sc->sc_pcitag, sc->bge_pcixcap 4292 + PCIX_CMD, reg & ~PCIX_CMD_RELAXED_ORDER); 4293 } 4294 4295 /* 5718 reset step 10, 57XX step 12 */ 4296 /* Enable memory arbiter. */ 4297 if (BGE_IS_5714_FAMILY(sc)) { 4298 val = CSR_READ_4(sc, BGE_MARB_MODE); 4299 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val); 4300 } else 4301 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 4302 4303 /* XXX 5721, 5751 and 5752 */ 4304 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750) { 4305 /* Step 19: */ 4306 BGE_SETBIT(sc, BGE_TLP_CONTROL_REG, 1 << 29 | 1 << 25); 4307 /* Step 20: */ 4308 BGE_SETBIT(sc, BGE_TLP_CONTROL_REG, BGE_TLP_DATA_FIFO_PROTECT); 4309 } 4310 4311 /* 5718 reset step 12, 57XX step 15 and 16 */ 4312 /* Fix up byte swapping */ 4313 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS); 4314 4315 /* 5718 reset step 13, 57XX step 17 */ 4316 /* Poll until the firmware initialization is complete */ 4317 bge_poll_fw(sc); 4318 4319 /* 57XX step 21 */ 4320 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_BX) { 4321 pcireg_t msidata; 4322 4323 msidata = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 4324 BGE_PCI_MSI_DATA); 4325 msidata |= ((1 << 13 | 1 << 12 | 1 << 10) << 16); 4326 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MSI_DATA, 4327 msidata); 4328 } 4329 4330 /* 57XX step 18 */ 4331 /* Write mac mode. */ 4332 val = CSR_READ_4(sc, BGE_MAC_MODE); 4333 /* Restore mac_mode_mask's bits using mac_mode */ 4334 val = (val & ~mac_mode_mask) | mac_mode; 4335 CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, val); 4336 DELAY(40); 4337 4338 bge_ape_unlock(sc, BGE_APE_LOCK_GRC); 4339 4340 /* 4341 * The 5704 in TBI mode apparently needs some special 4342 * adjustment to insure the SERDES drive level is set 4343 * to 1.2V. 4344 */ 4345 if (sc->bge_flags & BGEF_FIBER_TBI && 4346 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 4347 uint32_t serdescfg; 4348 4349 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG); 4350 serdescfg = (serdescfg & ~0xFFF) | 0x880; 4351 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg); 4352 } 4353 4354 if (sc->bge_flags & BGEF_PCIE && 4355 !BGE_IS_57765_PLUS(sc) && 4356 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 && 4357 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785) { 4358 uint32_t v; 4359 4360 /* Enable PCI Express bug fix */ 4361 v = CSR_READ_4(sc, BGE_TLP_CONTROL_REG); 4362 CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG, 4363 v | BGE_TLP_DATA_FIFO_PROTECT); 4364 } 4365 4366 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) 4367 BGE_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE, 4368 CPMU_CLCK_ORIDE_MAC_ORIDE_EN); 4369 4370 return 0; 4371 } 4372 4373 /* 4374 * Frame reception handling. This is called if there's a frame 4375 * on the receive return list. 4376 * 4377 * Note: we have to be able to handle two possibilities here: 4378 * 1) the frame is from the jumbo receive ring 4379 * 2) the frame is from the standard receive ring 4380 */ 4381 4382 static void 4383 bge_rxeof(struct bge_softc *sc) 4384 { 4385 struct ifnet * const ifp = &sc->ethercom.ec_if; 4386 uint16_t rx_prod, rx_cons; 4387 int stdcnt = 0, jumbocnt = 0; 4388 bus_dmamap_t dmamap; 4389 bus_addr_t offset, toff; 4390 bus_size_t tlen; 4391 int tosync; 4392 4393 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 4394 offsetof(struct bge_ring_data, bge_status_block), 4395 sizeof(struct bge_status_block), 4396 BUS_DMASYNC_POSTREAD); 4397 4398 rx_cons = sc->bge_rx_saved_considx; 4399 rx_prod = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx; 4400 4401 /* Nothing to do */ 4402 if (rx_cons == rx_prod) 4403 return; 4404 4405 offset = offsetof(struct bge_ring_data, bge_rx_return_ring); 4406 tosync = rx_prod - rx_cons; 4407 4408 if (tosync != 0) 4409 rnd_add_uint32(&sc->rnd_source, tosync); 4410 4411 toff = offset + (rx_cons * sizeof(struct bge_rx_bd)); 4412 4413 if (tosync < 0) { 4414 tlen = (sc->bge_return_ring_cnt - rx_cons) * 4415 sizeof(struct bge_rx_bd); 4416 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 4417 toff, tlen, BUS_DMASYNC_POSTREAD); 4418 tosync = -tosync; 4419 } 4420 4421 if (tosync != 0) { 4422 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 4423 offset, tosync * sizeof(struct bge_rx_bd), 4424 BUS_DMASYNC_POSTREAD); 4425 } 4426 4427 while (rx_cons != rx_prod) { 4428 struct bge_rx_bd *cur_rx; 4429 uint32_t rxidx; 4430 struct mbuf *m = NULL; 4431 4432 cur_rx = &sc->bge_rdata->bge_rx_return_ring[rx_cons]; 4433 4434 rxidx = cur_rx->bge_idx; 4435 BGE_INC(rx_cons, sc->bge_return_ring_cnt); 4436 4437 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 4438 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 4439 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 4440 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; 4441 jumbocnt++; 4442 bus_dmamap_sync(sc->bge_dmatag, 4443 sc->bge_cdata.bge_rx_jumbo_map, 4444 mtod(m, char *) - (char *)sc->bge_cdata.bge_jumbo_buf, 4445 BGE_JLEN, BUS_DMASYNC_POSTREAD); 4446 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 4447 if_statinc(ifp, if_ierrors); 4448 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 4449 continue; 4450 } 4451 if (bge_newbuf_jumbo(sc, sc->bge_jumbo, 4452 NULL) == ENOBUFS) { 4453 if_statinc(ifp, if_ierrors); 4454 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 4455 continue; 4456 } 4457 } else { 4458 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 4459 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 4460 4461 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; 4462 stdcnt++; 4463 dmamap = sc->bge_cdata.bge_rx_std_map[rxidx]; 4464 sc->bge_cdata.bge_rx_std_map[rxidx] = NULL; 4465 if (dmamap == NULL) { 4466 if_statinc(ifp, if_ierrors); 4467 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 4468 continue; 4469 } 4470 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, 4471 dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 4472 bus_dmamap_unload(sc->bge_dmatag, dmamap); 4473 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 4474 if_statinc(ifp, if_ierrors); 4475 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 4476 continue; 4477 } 4478 if (bge_newbuf_std(sc, sc->bge_std, 4479 NULL, dmamap) == ENOBUFS) { 4480 if_statinc(ifp, if_ierrors); 4481 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 4482 continue; 4483 } 4484 } 4485 4486 #ifndef __NO_STRICT_ALIGNMENT 4487 /* 4488 * XXX: if the 5701 PCIX-Rx-DMA workaround is in effect, 4489 * the Rx buffer has the layer-2 header unaligned. 4490 * If our CPU requires alignment, re-align by copying. 4491 */ 4492 if (sc->bge_flags & BGEF_RX_ALIGNBUG) { 4493 memmove(mtod(m, char *) + ETHER_ALIGN, m->m_data, 4494 cur_rx->bge_len); 4495 m->m_data += ETHER_ALIGN; 4496 } 4497 #endif 4498 4499 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 4500 m_set_rcvif(m, ifp); 4501 4502 bge_rxcsum(sc, cur_rx, m); 4503 4504 /* 4505 * If we received a packet with a vlan tag, pass it 4506 * to vlan_input() instead of ether_input(). 4507 */ 4508 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) 4509 vlan_set_tag(m, cur_rx->bge_vlan_tag); 4510 4511 if_percpuq_enqueue(ifp->if_percpuq, m); 4512 } 4513 4514 sc->bge_rx_saved_considx = rx_cons; 4515 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 4516 if (stdcnt) 4517 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 4518 if (jumbocnt) 4519 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 4520 } 4521 4522 static void 4523 bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m) 4524 { 4525 4526 if (BGE_IS_57765_PLUS(sc)) { 4527 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) { 4528 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) != 0) 4529 m->m_pkthdr.csum_flags = M_CSUM_IPv4; 4530 if ((cur_rx->bge_error_flag & 4531 BGE_RXERRFLAG_IP_CSUM_NOK) != 0) 4532 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 4533 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) { 4534 m->m_pkthdr.csum_data = 4535 cur_rx->bge_tcp_udp_csum; 4536 m->m_pkthdr.csum_flags |= 4537 (M_CSUM_TCPv4 | M_CSUM_UDPv4 |M_CSUM_DATA); 4538 } 4539 } 4540 } else { 4541 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) != 0) 4542 m->m_pkthdr.csum_flags = M_CSUM_IPv4; 4543 if ((cur_rx->bge_ip_csum ^ 0xffff) != 0) 4544 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 4545 /* 4546 * Rx transport checksum-offload may also 4547 * have bugs with packets which, when transmitted, 4548 * were `runts' requiring padding. 4549 */ 4550 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM && 4551 (/* (sc->_bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||*/ 4552 m->m_pkthdr.len >= ETHER_MIN_NOPAD)) { 4553 m->m_pkthdr.csum_data = 4554 cur_rx->bge_tcp_udp_csum; 4555 m->m_pkthdr.csum_flags |= 4556 (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_DATA); 4557 } 4558 } 4559 } 4560 4561 static void 4562 bge_txeof(struct bge_softc *sc) 4563 { 4564 struct ifnet * const ifp = &sc->ethercom.ec_if; 4565 struct bge_tx_bd *cur_tx = NULL; 4566 struct txdmamap_pool_entry *dma; 4567 bus_addr_t offset, toff; 4568 bus_size_t tlen; 4569 int tosync; 4570 struct mbuf *m; 4571 4572 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 4573 offsetof(struct bge_ring_data, bge_status_block), 4574 sizeof(struct bge_status_block), 4575 BUS_DMASYNC_POSTREAD); 4576 4577 offset = offsetof(struct bge_ring_data, bge_tx_ring); 4578 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx - 4579 sc->bge_tx_saved_considx; 4580 4581 if (tosync != 0) 4582 rnd_add_uint32(&sc->rnd_source, tosync); 4583 4584 toff = offset + (sc->bge_tx_saved_considx * sizeof(struct bge_tx_bd)); 4585 4586 if (tosync < 0) { 4587 tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) * 4588 sizeof(struct bge_tx_bd); 4589 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 4590 toff, tlen, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 4591 tosync = -tosync; 4592 } 4593 4594 if (tosync != 0) { 4595 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 4596 offset, tosync * sizeof(struct bge_tx_bd), 4597 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 4598 } 4599 4600 /* 4601 * Go through our tx ring and free mbufs for those 4602 * frames that have been sent. 4603 */ 4604 while (sc->bge_tx_saved_considx != 4605 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) { 4606 uint32_t idx = sc->bge_tx_saved_considx; 4607 cur_tx = &sc->bge_rdata->bge_tx_ring[idx]; 4608 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 4609 if_statinc(ifp, if_opackets); 4610 m = sc->bge_cdata.bge_tx_chain[idx]; 4611 if (m != NULL) { 4612 sc->bge_cdata.bge_tx_chain[idx] = NULL; 4613 dma = sc->txdma[idx]; 4614 if (dma->is_dma32) { 4615 bus_dmamap_sync(sc->bge_dmatag32, dma->dmamap32, 4616 0, dma->dmamap32->dm_mapsize, 4617 BUS_DMASYNC_POSTWRITE); 4618 bus_dmamap_unload( 4619 sc->bge_dmatag32, dma->dmamap32); 4620 } else { 4621 bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 4622 0, dma->dmamap->dm_mapsize, 4623 BUS_DMASYNC_POSTWRITE); 4624 bus_dmamap_unload(sc->bge_dmatag, dma->dmamap); 4625 } 4626 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 4627 sc->txdma[idx] = NULL; 4628 4629 m_freem(m); 4630 } 4631 sc->bge_txcnt--; 4632 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 4633 ifp->if_timer = 0; 4634 } 4635 4636 if (cur_tx != NULL) 4637 ifp->if_flags &= ~IFF_OACTIVE; 4638 } 4639 4640 static int 4641 bge_intr(void *xsc) 4642 { 4643 struct bge_softc * const sc = xsc; 4644 struct ifnet * const ifp = &sc->ethercom.ec_if; 4645 uint32_t pcistate, statusword, statustag; 4646 uint32_t intrmask = BGE_PCISTATE_INTR_NOT_ACTIVE; 4647 4648 4649 /* 5717 and newer chips have no BGE_PCISTATE_INTR_NOT_ACTIVE bit */ 4650 if (BGE_IS_5717_PLUS(sc)) 4651 intrmask = 0; 4652 4653 /* 4654 * It is possible for the interrupt to arrive before 4655 * the status block is updated prior to the interrupt. 4656 * Reading the PCI State register will confirm whether the 4657 * interrupt is ours and will flush the status block. 4658 */ 4659 pcistate = CSR_READ_4(sc, BGE_PCI_PCISTATE); 4660 4661 /* read status word from status block */ 4662 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 4663 offsetof(struct bge_ring_data, bge_status_block), 4664 sizeof(struct bge_status_block), 4665 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 4666 statusword = sc->bge_rdata->bge_status_block.bge_status; 4667 statustag = sc->bge_rdata->bge_status_block.bge_status_tag << 24; 4668 4669 if (sc->bge_flags & BGEF_TAGGED_STATUS) { 4670 if (sc->bge_lasttag == statustag && 4671 (~pcistate & intrmask)) { 4672 BGE_EVCNT_INCR(sc->bge_ev_intr_spurious); 4673 return 0; 4674 } 4675 sc->bge_lasttag = statustag; 4676 } else { 4677 if (!(statusword & BGE_STATFLAG_UPDATED) && 4678 !(~pcistate & intrmask)) { 4679 BGE_EVCNT_INCR(sc->bge_ev_intr_spurious2); 4680 return 0; 4681 } 4682 statustag = 0; 4683 } 4684 /* Ack interrupt and stop others from occurring. */ 4685 bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, 1); 4686 BGE_EVCNT_INCR(sc->bge_ev_intr); 4687 4688 /* clear status word */ 4689 sc->bge_rdata->bge_status_block.bge_status = 0; 4690 4691 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 4692 offsetof(struct bge_ring_data, bge_status_block), 4693 sizeof(struct bge_status_block), 4694 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 4695 4696 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 4697 statusword & BGE_STATFLAG_LINKSTATE_CHANGED || 4698 BGE_STS_BIT(sc, BGE_STS_LINK_EVT)) 4699 bge_link_upd(sc); 4700 4701 if (ifp->if_flags & IFF_RUNNING) { 4702 /* Check RX return ring producer/consumer */ 4703 bge_rxeof(sc); 4704 4705 /* Check TX ring producer/consumer */ 4706 bge_txeof(sc); 4707 } 4708 4709 if (sc->bge_pending_rxintr_change) { 4710 uint32_t rx_ticks = sc->bge_rx_coal_ticks; 4711 uint32_t rx_bds = sc->bge_rx_max_coal_bds; 4712 4713 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, rx_ticks); 4714 DELAY(10); 4715 (void)CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS); 4716 4717 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_bds); 4718 DELAY(10); 4719 (void)CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS); 4720 4721 sc->bge_pending_rxintr_change = 0; 4722 } 4723 bge_handle_events(sc); 4724 4725 /* Re-enable interrupts. */ 4726 bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, statustag); 4727 4728 if (ifp->if_flags & IFF_RUNNING) 4729 if_schedule_deferred_start(ifp); 4730 4731 return 1; 4732 } 4733 4734 static void 4735 bge_asf_driver_up(struct bge_softc *sc) 4736 { 4737 if (sc->bge_asf_mode & ASF_STACKUP) { 4738 /* Send ASF heartbeat aprox. every 2s */ 4739 if (sc->bge_asf_count) 4740 sc->bge_asf_count --; 4741 else { 4742 sc->bge_asf_count = 2; 4743 4744 bge_wait_for_event_ack(sc); 4745 4746 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, 4747 BGE_FW_CMD_DRV_ALIVE3); 4748 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_LEN_MB, 4); 4749 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_DATA_MB, 4750 BGE_FW_HB_TIMEOUT_SEC); 4751 CSR_WRITE_4_FLUSH(sc, BGE_RX_CPU_EVENT, 4752 CSR_READ_4(sc, BGE_RX_CPU_EVENT) | 4753 BGE_RX_CPU_DRV_EVENT); 4754 } 4755 } 4756 } 4757 4758 static void 4759 bge_tick(void *xsc) 4760 { 4761 struct bge_softc * const sc = xsc; 4762 struct mii_data * const mii = &sc->bge_mii; 4763 int s; 4764 4765 s = splnet(); 4766 4767 if (BGE_IS_5705_PLUS(sc)) 4768 bge_stats_update_regs(sc); 4769 else 4770 bge_stats_update(sc); 4771 4772 if (sc->bge_flags & BGEF_FIBER_TBI) { 4773 /* 4774 * Since in TBI mode auto-polling can't be used we should poll 4775 * link status manually. Here we register pending link event 4776 * and trigger interrupt. 4777 */ 4778 BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT); 4779 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 4780 } else { 4781 /* 4782 * Do not touch PHY if we have link up. This could break 4783 * IPMI/ASF mode or produce extra input errors. 4784 * (extra input errors was reported for bcm5701 & bcm5704). 4785 */ 4786 if (!BGE_STS_BIT(sc, BGE_STS_LINK)) 4787 mii_tick(mii); 4788 } 4789 4790 bge_asf_driver_up(sc); 4791 4792 if (!sc->bge_detaching) 4793 callout_schedule(&sc->bge_timeout, hz); 4794 4795 splx(s); 4796 } 4797 4798 static void 4799 bge_stats_update_regs(struct bge_softc *sc) 4800 { 4801 struct ifnet *const ifp = &sc->ethercom.ec_if; 4802 4803 net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 4804 4805 if_statadd_ref(nsr, if_collisions, 4806 CSR_READ_4(sc, BGE_MAC_STATS + 4807 offsetof(struct bge_mac_stats_regs, etherStatsCollisions))); 4808 4809 /* 4810 * On BCM5717, BCM5718, BCM5719 A0 and BCM5720 A0, 4811 * RXLP_LOCSTAT_IFIN_DROPS includes unwanted multicast frames 4812 * (silicon bug). There's no reliable workaround so just 4813 * ignore the counter 4814 */ 4815 if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 && 4816 sc->bge_chipid != BGE_CHIPID_BCM5719_A0 && 4817 sc->bge_chipid != BGE_CHIPID_BCM5720_A0) { 4818 if_statadd_ref(nsr, if_ierrors, 4819 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS)); 4820 } 4821 if_statadd_ref(nsr, if_ierrors, 4822 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS)); 4823 if_statadd_ref(nsr, if_ierrors, 4824 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS)); 4825 4826 IF_STAT_PUTREF(ifp); 4827 4828 if (sc->bge_flags & BGEF_RDMA_BUG) { 4829 uint32_t val, ucast, mcast, bcast; 4830 4831 ucast = CSR_READ_4(sc, BGE_MAC_STATS + 4832 offsetof(struct bge_mac_stats_regs, ifHCOutUcastPkts)); 4833 mcast = CSR_READ_4(sc, BGE_MAC_STATS + 4834 offsetof(struct bge_mac_stats_regs, ifHCOutMulticastPkts)); 4835 bcast = CSR_READ_4(sc, BGE_MAC_STATS + 4836 offsetof(struct bge_mac_stats_regs, ifHCOutBroadcastPkts)); 4837 4838 /* 4839 * If controller transmitted more than BGE_NUM_RDMA_CHANNELS 4840 * frames, it's safe to disable workaround for DMA engine's 4841 * miscalculation of TXMBUF space. 4842 */ 4843 if (ucast + mcast + bcast > BGE_NUM_RDMA_CHANNELS) { 4844 val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL); 4845 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) 4846 val &= ~BGE_RDMA_TX_LENGTH_WA_5719; 4847 else 4848 val &= ~BGE_RDMA_TX_LENGTH_WA_5720; 4849 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val); 4850 sc->bge_flags &= ~BGEF_RDMA_BUG; 4851 } 4852 } 4853 } 4854 4855 static void 4856 bge_stats_update(struct bge_softc *sc) 4857 { 4858 struct ifnet * const ifp = &sc->ethercom.ec_if; 4859 bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; 4860 4861 #define READ_STAT(sc, stats, stat) \ 4862 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) 4863 4864 uint64_t collisions = 4865 (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) + 4866 READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) + 4867 READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) + 4868 READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo)); 4869 4870 if_statadd(ifp, if_collisions, collisions - sc->bge_if_collisions); 4871 sc->bge_if_collisions = collisions; 4872 4873 4874 BGE_EVCNT_UPD(sc->bge_ev_tx_xoff, 4875 READ_STAT(sc, stats, outXoffSent.bge_addr_lo)); 4876 BGE_EVCNT_UPD(sc->bge_ev_tx_xon, 4877 READ_STAT(sc, stats, outXonSent.bge_addr_lo)); 4878 BGE_EVCNT_UPD(sc->bge_ev_rx_xoff, 4879 READ_STAT(sc, stats, 4880 xoffPauseFramesReceived.bge_addr_lo)); 4881 BGE_EVCNT_UPD(sc->bge_ev_rx_xon, 4882 READ_STAT(sc, stats, xonPauseFramesReceived.bge_addr_lo)); 4883 BGE_EVCNT_UPD(sc->bge_ev_rx_macctl, 4884 READ_STAT(sc, stats, 4885 macControlFramesReceived.bge_addr_lo)); 4886 BGE_EVCNT_UPD(sc->bge_ev_xoffentered, 4887 READ_STAT(sc, stats, xoffStateEntered.bge_addr_lo)); 4888 4889 #undef READ_STAT 4890 4891 #ifdef notdef 4892 ifp->if_collisions += 4893 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames + 4894 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames + 4895 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions + 4896 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) - 4897 ifp->if_collisions; 4898 #endif 4899 } 4900 4901 /* 4902 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason. 4903 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD, 4904 * but when such padded frames employ the bge IP/TCP checksum offload, 4905 * the hardware checksum assist gives incorrect results (possibly 4906 * from incorporating its own padding into the UDP/TCP checksum; who knows). 4907 * If we pad such runts with zeros, the onboard checksum comes out correct. 4908 */ 4909 static inline int 4910 bge_cksum_pad(struct mbuf *pkt) 4911 { 4912 struct mbuf *last = NULL; 4913 int padlen; 4914 4915 padlen = ETHER_MIN_NOPAD - pkt->m_pkthdr.len; 4916 4917 /* if there's only the packet-header and we can pad there, use it. */ 4918 if (pkt->m_pkthdr.len == pkt->m_len && 4919 M_TRAILINGSPACE(pkt) >= padlen) { 4920 last = pkt; 4921 } else { 4922 /* 4923 * Walk packet chain to find last mbuf. We will either 4924 * pad there, or append a new mbuf and pad it 4925 * (thus perhaps avoiding the bcm5700 dma-min bug). 4926 */ 4927 for (last = pkt; last->m_next != NULL; last = last->m_next) { 4928 continue; /* do nothing */ 4929 } 4930 4931 /* `last' now points to last in chain. */ 4932 if (M_TRAILINGSPACE(last) < padlen) { 4933 /* Allocate new empty mbuf, pad it. Compact later. */ 4934 struct mbuf *n; 4935 MGET(n, M_DONTWAIT, MT_DATA); 4936 if (n == NULL) 4937 return ENOBUFS; 4938 n->m_len = 0; 4939 last->m_next = n; 4940 last = n; 4941 } 4942 } 4943 4944 KDASSERT(!M_READONLY(last)); 4945 KDASSERT(M_TRAILINGSPACE(last) >= padlen); 4946 4947 /* Now zero the pad area, to avoid the bge cksum-assist bug */ 4948 memset(mtod(last, char *) + last->m_len, 0, padlen); 4949 last->m_len += padlen; 4950 pkt->m_pkthdr.len += padlen; 4951 return 0; 4952 } 4953 4954 /* 4955 * Compact outbound packets to avoid bug with DMA segments less than 8 bytes. 4956 */ 4957 static inline int 4958 bge_compact_dma_runt(struct mbuf *pkt) 4959 { 4960 struct mbuf *m, *prev; 4961 int totlen; 4962 4963 prev = NULL; 4964 totlen = 0; 4965 4966 for (m = pkt; m != NULL; prev = m, m = m->m_next) { 4967 int mlen = m->m_len; 4968 int shortfall = 8 - mlen ; 4969 4970 totlen += mlen; 4971 if (mlen == 0) 4972 continue; 4973 if (mlen >= 8) 4974 continue; 4975 4976 /* 4977 * If we get here, mbuf data is too small for DMA engine. 4978 * Try to fix by shuffling data to prev or next in chain. 4979 * If that fails, do a compacting deep-copy of the whole chain. 4980 */ 4981 4982 /* Internal frag. If fits in prev, copy it there. */ 4983 if (prev && M_TRAILINGSPACE(prev) >= m->m_len) { 4984 memcpy(prev->m_data + prev->m_len, m->m_data, mlen); 4985 prev->m_len += mlen; 4986 m->m_len = 0; 4987 /* XXX stitch chain */ 4988 prev->m_next = m_free(m); 4989 m = prev; 4990 continue; 4991 } else if (m->m_next != NULL && 4992 M_TRAILINGSPACE(m) >= shortfall && 4993 m->m_next->m_len >= (8 + shortfall)) { 4994 /* m is writable and have enough data in next, pull up. */ 4995 4996 memcpy(m->m_data + m->m_len, m->m_next->m_data, 4997 shortfall); 4998 m->m_len += shortfall; 4999 m->m_next->m_len -= shortfall; 5000 m->m_next->m_data += shortfall; 5001 } else if (m->m_next == NULL || 1) { 5002 /* 5003 * Got a runt at the very end of the packet. 5004 * borrow data from the tail of the preceding mbuf and 5005 * update its length in-place. (The original data is 5006 * still valid, so we can do this even if prev is not 5007 * writable.) 5008 */ 5009 5010 /* 5011 * If we'd make prev a runt, just move all of its data. 5012 */ 5013 KASSERT(prev != NULL /*, ("runt but null PREV")*/); 5014 KASSERT(prev->m_len >= 8 /*, ("runt prev")*/); 5015 5016 if ((prev->m_len - shortfall) < 8) 5017 shortfall = prev->m_len; 5018 5019 #ifdef notyet /* just do the safe slow thing for now */ 5020 if (!M_READONLY(m)) { 5021 if (M_LEADINGSPACE(m) < shorfall) { 5022 void *m_dat; 5023 m_dat = M_BUFADDR(m); 5024 memmove(m_dat, mtod(m, void*), 5025 m->m_len); 5026 m->m_data = m_dat; 5027 } 5028 } else 5029 #endif /* just do the safe slow thing */ 5030 { 5031 struct mbuf * n = NULL; 5032 int newprevlen = prev->m_len - shortfall; 5033 5034 MGET(n, M_NOWAIT, MT_DATA); 5035 if (n == NULL) 5036 return ENOBUFS; 5037 KASSERT(m->m_len + shortfall < MLEN 5038 /*, 5039 ("runt %d +prev %d too big\n", m->m_len, shortfall)*/); 5040 5041 /* first copy the data we're stealing from prev */ 5042 memcpy(n->m_data, prev->m_data + newprevlen, 5043 shortfall); 5044 5045 /* update prev->m_len accordingly */ 5046 prev->m_len -= shortfall; 5047 5048 /* copy data from runt m */ 5049 memcpy(n->m_data + shortfall, m->m_data, 5050 m->m_len); 5051 5052 /* n holds what we stole from prev, plus m */ 5053 n->m_len = shortfall + m->m_len; 5054 5055 /* stitch n into chain and free m */ 5056 n->m_next = m->m_next; 5057 prev->m_next = n; 5058 /* KASSERT(m->m_next == NULL); */ 5059 m->m_next = NULL; 5060 m_free(m); 5061 m = n; /* for continuing loop */ 5062 } 5063 } 5064 } 5065 return 0; 5066 } 5067 5068 /* 5069 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 5070 * pointers to descriptors. 5071 */ 5072 static int 5073 bge_encap(struct bge_softc *sc, struct mbuf *m_head, uint32_t *txidx) 5074 { 5075 struct ifnet * const ifp = &sc->ethercom.ec_if; 5076 struct bge_tx_bd *f, *prev_f; 5077 uint32_t frag, cur; 5078 uint16_t csum_flags = 0; 5079 uint16_t txbd_tso_flags = 0; 5080 struct txdmamap_pool_entry *dma; 5081 bus_dmamap_t dmamap; 5082 bus_dma_tag_t dmatag; 5083 int i = 0; 5084 int use_tso, maxsegsize, error; 5085 bool have_vtag; 5086 uint16_t vtag; 5087 bool remap; 5088 5089 if (m_head->m_pkthdr.csum_flags) { 5090 if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4) 5091 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 5092 if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4 |M_CSUM_UDPv4)) 5093 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 5094 } 5095 5096 /* 5097 * If we were asked to do an outboard checksum, and the NIC 5098 * has the bug where it sometimes adds in the Ethernet padding, 5099 * explicitly pad with zeros so the cksum will be correct either way. 5100 * (For now, do this for all chip versions, until newer 5101 * are confirmed to not require the workaround.) 5102 */ 5103 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) == 0 || 5104 #ifdef notyet 5105 (sc->bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 || 5106 #endif 5107 m_head->m_pkthdr.len >= ETHER_MIN_NOPAD) 5108 goto check_dma_bug; 5109 5110 if (bge_cksum_pad(m_head) != 0) 5111 return ENOBUFS; 5112 5113 check_dma_bug: 5114 if (!(BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)) 5115 goto doit; 5116 5117 /* 5118 * bcm5700 Revision B silicon cannot handle DMA descriptors with 5119 * less than eight bytes. If we encounter a teeny mbuf 5120 * at the end of a chain, we can pad. Otherwise, copy. 5121 */ 5122 if (bge_compact_dma_runt(m_head) != 0) 5123 return ENOBUFS; 5124 5125 doit: 5126 dma = SLIST_FIRST(&sc->txdma_list); 5127 if (dma == NULL) { 5128 ifp->if_flags |= IFF_OACTIVE; 5129 return ENOBUFS; 5130 } 5131 dmamap = dma->dmamap; 5132 dmatag = sc->bge_dmatag; 5133 dma->is_dma32 = false; 5134 5135 /* 5136 * Set up any necessary TSO state before we start packing... 5137 */ 5138 use_tso = (m_head->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; 5139 if (!use_tso) { 5140 maxsegsize = 0; 5141 } else { /* TSO setup */ 5142 unsigned mss; 5143 struct ether_header *eh; 5144 unsigned ip_tcp_hlen, iptcp_opt_words, tcp_seg_flags, offset; 5145 unsigned bge_hlen; 5146 struct mbuf * m0 = m_head; 5147 struct ip *ip; 5148 struct tcphdr *th; 5149 int iphl, hlen; 5150 5151 /* 5152 * XXX It would be nice if the mbuf pkthdr had offset 5153 * fields for the protocol headers. 5154 */ 5155 5156 eh = mtod(m0, struct ether_header *); 5157 switch (htons(eh->ether_type)) { 5158 case ETHERTYPE_IP: 5159 offset = ETHER_HDR_LEN; 5160 break; 5161 5162 case ETHERTYPE_VLAN: 5163 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 5164 break; 5165 5166 default: 5167 /* 5168 * Don't support this protocol or encapsulation. 5169 */ 5170 return ENOBUFS; 5171 } 5172 5173 /* 5174 * TCP/IP headers are in the first mbuf; we can do 5175 * this the easy way. 5176 */ 5177 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); 5178 hlen = iphl + offset; 5179 if (__predict_false(m0->m_len < 5180 (hlen + sizeof(struct tcphdr)))) { 5181 5182 aprint_error_dev(sc->bge_dev, 5183 "TSO: hard case m0->m_len == %d < ip/tcp hlen %zd," 5184 "not handled yet\n", 5185 m0->m_len, hlen+ sizeof(struct tcphdr)); 5186 #ifdef NOTYET 5187 /* 5188 * XXX jonathan@NetBSD.org: untested. 5189 * how to force this branch to be taken? 5190 */ 5191 BGE_EVCNT_INCR(sc->bge_ev_txtsopain); 5192 5193 m_copydata(m0, offset, sizeof(ip), &ip); 5194 m_copydata(m0, hlen, sizeof(th), &th); 5195 5196 ip.ip_len = 0; 5197 5198 m_copyback(m0, hlen + offsetof(struct ip, ip_len), 5199 sizeof(ip.ip_len), &ip.ip_len); 5200 5201 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, 5202 ip.ip_dst.s_addr, htons(IPPROTO_TCP)); 5203 5204 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum), 5205 sizeof(th.th_sum), &th.th_sum); 5206 5207 hlen += th.th_off << 2; 5208 iptcp_opt_words = hlen; 5209 #else 5210 /* 5211 * if_wm "hard" case not yet supported, can we not 5212 * mandate it out of existence? 5213 */ 5214 (void) ip; (void)th; (void) ip_tcp_hlen; 5215 5216 return ENOBUFS; 5217 #endif 5218 } else { 5219 ip = (struct ip *) (mtod(m0, char *) + offset); 5220 th = (struct tcphdr *) (mtod(m0, char *) + hlen); 5221 ip_tcp_hlen = iphl + (th->th_off << 2); 5222 5223 /* Total IP/TCP options, in 32-bit words */ 5224 iptcp_opt_words = (ip_tcp_hlen 5225 - sizeof(struct tcphdr) 5226 - sizeof(struct ip)) >> 2; 5227 } 5228 if (BGE_IS_575X_PLUS(sc)) { 5229 th->th_sum = 0; 5230 csum_flags = 0; 5231 } else { 5232 /* 5233 * XXX jonathan@NetBSD.org: 5705 untested. 5234 * Requires TSO firmware patch for 5701/5703/5704. 5235 */ 5236 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, 5237 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 5238 } 5239 5240 mss = m_head->m_pkthdr.segsz; 5241 txbd_tso_flags |= 5242 BGE_TXBDFLAG_CPU_PRE_DMA | 5243 BGE_TXBDFLAG_CPU_POST_DMA; 5244 5245 /* 5246 * Our NIC TSO-assist assumes TSO has standard, optionless 5247 * IPv4 and TCP headers, which total 40 bytes. By default, 5248 * the NIC copies 40 bytes of IP/TCP header from the 5249 * supplied header into the IP/TCP header portion of 5250 * each post-TSO-segment. If the supplied packet has IP or 5251 * TCP options, we need to tell the NIC to copy those extra 5252 * bytes into each post-TSO header, in addition to the normal 5253 * 40-byte IP/TCP header (and to leave space accordingly). 5254 * Unfortunately, the driver encoding of option length 5255 * varies across different ASIC families. 5256 */ 5257 tcp_seg_flags = 0; 5258 bge_hlen = ip_tcp_hlen >> 2; 5259 if (BGE_IS_5717_PLUS(sc)) { 5260 tcp_seg_flags = (bge_hlen & 0x3) << 14; 5261 txbd_tso_flags |= 5262 ((bge_hlen & 0xF8) << 7) | ((bge_hlen & 0x4) << 2); 5263 } else if (BGE_IS_5705_PLUS(sc)) { 5264 tcp_seg_flags = bge_hlen << 11; 5265 } else { 5266 /* XXX iptcp_opt_words or bge_hlen ? */ 5267 txbd_tso_flags |= iptcp_opt_words << 12; 5268 } 5269 maxsegsize = mss | tcp_seg_flags; 5270 ip->ip_len = htons(mss + ip_tcp_hlen); 5271 ip->ip_sum = 0; 5272 5273 } /* TSO setup */ 5274 5275 have_vtag = vlan_has_tag(m_head); 5276 if (have_vtag) 5277 vtag = vlan_get_tag(m_head); 5278 5279 /* 5280 * Start packing the mbufs in this chain into 5281 * the fragment pointers. Stop when we run out 5282 * of fragments or hit the end of the mbuf chain. 5283 */ 5284 remap = true; 5285 load_again: 5286 error = bus_dmamap_load_mbuf(dmatag, dmamap, m_head, BUS_DMA_NOWAIT); 5287 if (__predict_false(error)) { 5288 if (error == EFBIG && remap) { 5289 struct mbuf *m; 5290 remap = false; 5291 m = m_defrag(m_head, M_NOWAIT); 5292 if (m != NULL) { 5293 KASSERT(m == m_head); 5294 goto load_again; 5295 } 5296 } 5297 return error; 5298 } 5299 /* 5300 * Sanity check: avoid coming within 16 descriptors 5301 * of the end of the ring. 5302 */ 5303 if (dmamap->dm_nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) { 5304 BGE_TSO_PRINTF(("%s: " 5305 " dmamap_load_mbuf too close to ring wrap\n", 5306 device_xname(sc->bge_dev))); 5307 goto fail_unload; 5308 } 5309 5310 /* Iterate over dmap-map fragments. */ 5311 f = prev_f = NULL; 5312 cur = frag = *txidx; 5313 5314 for (i = 0; i < dmamap->dm_nsegs; i++) { 5315 f = &sc->bge_rdata->bge_tx_ring[frag]; 5316 if (sc->bge_cdata.bge_tx_chain[frag] != NULL) 5317 break; 5318 5319 BGE_HOSTADDR(f->bge_addr, dmamap->dm_segs[i].ds_addr); 5320 f->bge_len = dmamap->dm_segs[i].ds_len; 5321 if (sizeof(bus_addr_t) > 4 && dma->is_dma32 == false && use_tso && ( 5322 (dmamap->dm_segs[i].ds_addr & 0xffffffff00000000) != 5323 ((dmamap->dm_segs[i].ds_addr + f->bge_len) & 0xffffffff00000000) || 5324 (prev_f != NULL && 5325 prev_f->bge_addr.bge_addr_hi != f->bge_addr.bge_addr_hi)) 5326 ) { 5327 /* 5328 * watchdog timeout issue was observed with TSO, 5329 * limiting DMA address space to 32bits seems to 5330 * address the issue. 5331 */ 5332 bus_dmamap_unload(dmatag, dmamap); 5333 dmatag = sc->bge_dmatag32; 5334 dmamap = dma->dmamap32; 5335 dma->is_dma32 = true; 5336 remap = true; 5337 goto load_again; 5338 } 5339 5340 /* 5341 * For 5751 and follow-ons, for TSO we must turn 5342 * off checksum-assist flag in the tx-descr, and 5343 * supply the ASIC-revision-specific encoding 5344 * of TSO flags and segsize. 5345 */ 5346 if (use_tso) { 5347 if (BGE_IS_575X_PLUS(sc) || i == 0) { 5348 f->bge_rsvd = maxsegsize; 5349 f->bge_flags = csum_flags | txbd_tso_flags; 5350 } else { 5351 f->bge_rsvd = 0; 5352 f->bge_flags = 5353 (csum_flags | txbd_tso_flags) & 0x0fff; 5354 } 5355 } else { 5356 f->bge_rsvd = 0; 5357 f->bge_flags = csum_flags; 5358 } 5359 5360 if (have_vtag) { 5361 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 5362 f->bge_vlan_tag = vtag; 5363 } else { 5364 f->bge_vlan_tag = 0; 5365 } 5366 prev_f = f; 5367 cur = frag; 5368 BGE_INC(frag, BGE_TX_RING_CNT); 5369 } 5370 5371 if (i < dmamap->dm_nsegs) { 5372 BGE_TSO_PRINTF(("%s: reached %d < dm_nsegs %d\n", 5373 device_xname(sc->bge_dev), i, dmamap->dm_nsegs)); 5374 goto fail_unload; 5375 } 5376 5377 bus_dmamap_sync(dmatag, dmamap, 0, dmamap->dm_mapsize, 5378 BUS_DMASYNC_PREWRITE); 5379 5380 if (frag == sc->bge_tx_saved_considx) { 5381 BGE_TSO_PRINTF(("%s: frag %d = wrapped id %d?\n", 5382 device_xname(sc->bge_dev), frag, sc->bge_tx_saved_considx)); 5383 5384 goto fail_unload; 5385 } 5386 5387 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END; 5388 sc->bge_cdata.bge_tx_chain[cur] = m_head; 5389 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 5390 sc->txdma[cur] = dma; 5391 sc->bge_txcnt += dmamap->dm_nsegs; 5392 5393 *txidx = frag; 5394 5395 return 0; 5396 5397 fail_unload: 5398 bus_dmamap_unload(dmatag, dmamap); 5399 ifp->if_flags |= IFF_OACTIVE; 5400 5401 return ENOBUFS; 5402 } 5403 5404 /* 5405 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 5406 * to the mbuf data regions directly in the transmit descriptors. 5407 */ 5408 static void 5409 bge_start(struct ifnet *ifp) 5410 { 5411 struct bge_softc * const sc = ifp->if_softc; 5412 struct mbuf *m_head = NULL; 5413 struct mbuf *m; 5414 uint32_t prodidx; 5415 int pkts = 0; 5416 int error; 5417 5418 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 5419 return; 5420 5421 prodidx = sc->bge_tx_prodidx; 5422 5423 while (sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { 5424 IFQ_POLL(&ifp->if_snd, m_head); 5425 if (m_head == NULL) 5426 break; 5427 5428 #if 0 5429 /* 5430 * XXX 5431 * safety overkill. If this is a fragmented packet chain 5432 * with delayed TCP/UDP checksums, then only encapsulate 5433 * it if we have enough descriptors to handle the entire 5434 * chain at once. 5435 * (paranoia -- may not actually be needed) 5436 */ 5437 if (m_head->m_flags & M_FIRSTFRAG && 5438 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 5439 if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 5440 M_CSUM_DATA_IPv4_OFFSET(m_head->m_pkthdr.csum_data) + 16) { 5441 ifp->if_flags |= IFF_OACTIVE; 5442 break; 5443 } 5444 } 5445 #endif 5446 5447 /* 5448 * Pack the data into the transmit ring. If we 5449 * don't have room, set the OACTIVE flag and wait 5450 * for the NIC to drain the ring. 5451 */ 5452 error = bge_encap(sc, m_head, &prodidx); 5453 if (__predict_false(error)) { 5454 if (ifp->if_flags & IFF_OACTIVE) { 5455 /* just wait for the transmit ring to drain */ 5456 break; 5457 } 5458 IFQ_DEQUEUE(&ifp->if_snd, m); 5459 KASSERT(m == m_head); 5460 m_freem(m_head); 5461 continue; 5462 } 5463 5464 /* now we are committed to transmit the packet */ 5465 IFQ_DEQUEUE(&ifp->if_snd, m); 5466 KASSERT(m == m_head); 5467 pkts++; 5468 5469 /* 5470 * If there's a BPF listener, bounce a copy of this frame 5471 * to him. 5472 */ 5473 bpf_mtap(ifp, m_head, BPF_D_OUT); 5474 } 5475 if (pkts == 0) 5476 return; 5477 5478 /* Transmit */ 5479 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 5480 /* 5700 b2 errata */ 5481 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) 5482 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 5483 5484 sc->bge_tx_prodidx = prodidx; 5485 5486 /* 5487 * Set a timeout in case the chip goes out to lunch. 5488 */ 5489 ifp->if_timer = 5; 5490 } 5491 5492 static int 5493 bge_init(struct ifnet *ifp) 5494 { 5495 struct bge_softc * const sc = ifp->if_softc; 5496 const uint16_t *m; 5497 uint32_t mode, reg; 5498 int s, error = 0; 5499 5500 s = splnet(); 5501 5502 KASSERT(ifp == &sc->ethercom.ec_if); 5503 5504 /* Cancel pending I/O and flush buffers. */ 5505 bge_stop(ifp, 0); 5506 5507 bge_stop_fw(sc); 5508 bge_sig_pre_reset(sc, BGE_RESET_START); 5509 bge_reset(sc); 5510 bge_sig_legacy(sc, BGE_RESET_START); 5511 5512 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5784_AX) { 5513 reg = CSR_READ_4(sc, BGE_CPMU_CTRL); 5514 reg &= ~(BGE_CPMU_CTRL_LINK_AWARE_MODE | 5515 BGE_CPMU_CTRL_LINK_IDLE_MODE); 5516 CSR_WRITE_4(sc, BGE_CPMU_CTRL, reg); 5517 5518 reg = CSR_READ_4(sc, BGE_CPMU_LSPD_10MB_CLK); 5519 reg &= ~BGE_CPMU_LSPD_10MB_CLK; 5520 reg |= BGE_CPMU_LSPD_10MB_MACCLK_6_25; 5521 CSR_WRITE_4(sc, BGE_CPMU_LSPD_10MB_CLK, reg); 5522 5523 reg = CSR_READ_4(sc, BGE_CPMU_LNK_AWARE_PWRMD); 5524 reg &= ~BGE_CPMU_LNK_AWARE_MACCLK_MASK; 5525 reg |= BGE_CPMU_LNK_AWARE_MACCLK_6_25; 5526 CSR_WRITE_4(sc, BGE_CPMU_LNK_AWARE_PWRMD, reg); 5527 5528 reg = CSR_READ_4(sc, BGE_CPMU_HST_ACC); 5529 reg &= ~BGE_CPMU_HST_ACC_MACCLK_MASK; 5530 reg |= BGE_CPMU_HST_ACC_MACCLK_6_25; 5531 CSR_WRITE_4(sc, BGE_CPMU_HST_ACC, reg); 5532 } 5533 5534 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780) { 5535 pcireg_t aercap; 5536 5537 reg = CSR_READ_4(sc, BGE_PCIE_PWRMNG_THRESH); 5538 reg = (reg & ~BGE_PCIE_PWRMNG_L1THRESH_MASK) 5539 | BGE_PCIE_PWRMNG_L1THRESH_4MS 5540 | BGE_PCIE_PWRMNG_EXTASPMTMR_EN; 5541 CSR_WRITE_4(sc, BGE_PCIE_PWRMNG_THRESH, reg); 5542 5543 reg = CSR_READ_4(sc, BGE_PCIE_EIDLE_DELAY); 5544 reg = (reg & ~BGE_PCIE_EIDLE_DELAY_MASK) 5545 | BGE_PCIE_EIDLE_DELAY_13CLK; 5546 CSR_WRITE_4(sc, BGE_PCIE_EIDLE_DELAY, reg); 5547 5548 /* Clear correctable error */ 5549 if (pci_get_ext_capability(sc->sc_pc, sc->sc_pcitag, 5550 PCI_EXTCAP_AER, &aercap, NULL) != 0) 5551 pci_conf_write(sc->sc_pc, sc->sc_pcitag, 5552 aercap + PCI_AER_COR_STATUS, 0xffffffff); 5553 5554 reg = CSR_READ_4(sc, BGE_PCIE_LINKCTL); 5555 reg = (reg & ~BGE_PCIE_LINKCTL_L1_PLL_PDEN) 5556 | BGE_PCIE_LINKCTL_L1_PLL_PDDIS; 5557 CSR_WRITE_4(sc, BGE_PCIE_LINKCTL, reg); 5558 } 5559 5560 bge_sig_post_reset(sc, BGE_RESET_START); 5561 5562 bge_chipinit(sc); 5563 5564 /* 5565 * Init the various state machines, ring 5566 * control blocks and firmware. 5567 */ 5568 error = bge_blockinit(sc); 5569 if (error != 0) { 5570 aprint_error_dev(sc->bge_dev, "initialization error %d\n", 5571 error); 5572 splx(s); 5573 return error; 5574 } 5575 5576 /* 5718 step 25, 57XX step 54 */ 5577 /* Specify MTU. */ 5578 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 5579 ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN); 5580 5581 /* 5718 step 23 */ 5582 /* Load our MAC address. */ 5583 m = (const uint16_t *)&(CLLADDR(ifp->if_sadl)[0]); 5584 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 5585 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, 5586 ((uint32_t)htons(m[1]) << 16) | htons(m[2])); 5587 5588 /* Enable or disable promiscuous mode as needed. */ 5589 if (ifp->if_flags & IFF_PROMISC) 5590 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 5591 else 5592 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 5593 5594 /* Program multicast filter. */ 5595 bge_setmulti(sc); 5596 5597 /* Init RX ring. */ 5598 bge_init_rx_ring_std(sc); 5599 5600 /* 5601 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's 5602 * memory to insure that the chip has in fact read the first 5603 * entry of the ring. 5604 */ 5605 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) { 5606 uint32_t v, i; 5607 for (i = 0; i < 10; i++) { 5608 DELAY(20); 5609 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8); 5610 if (v == (MCLBYTES - ETHER_ALIGN)) 5611 break; 5612 } 5613 if (i == 10) 5614 aprint_error_dev(sc->bge_dev, 5615 "5705 A0 chip failed to load RX ring\n"); 5616 } 5617 5618 /* Init jumbo RX ring. */ 5619 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 5620 bge_init_rx_ring_jumbo(sc); 5621 5622 /* Init our RX return ring index */ 5623 sc->bge_rx_saved_considx = 0; 5624 5625 /* Init TX ring. */ 5626 bge_init_tx_ring(sc); 5627 5628 /* 5718 step 63, 57XX step 94 */ 5629 /* Enable TX MAC state machine lockup fix. */ 5630 mode = CSR_READ_4(sc, BGE_TX_MODE); 5631 if (BGE_IS_5755_PLUS(sc) || 5632 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 5633 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX; 5634 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 || 5635 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) { 5636 mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); 5637 mode |= CSR_READ_4(sc, BGE_TX_MODE) & 5638 (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); 5639 } 5640 5641 /* Turn on transmitter */ 5642 CSR_WRITE_4_FLUSH(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE); 5643 /* 5718 step 64 */ 5644 DELAY(100); 5645 5646 /* 5718 step 65, 57XX step 95 */ 5647 /* Turn on receiver */ 5648 mode = CSR_READ_4(sc, BGE_RX_MODE); 5649 if (BGE_IS_5755_PLUS(sc)) 5650 mode |= BGE_RXMODE_IPV6_ENABLE; 5651 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) 5652 mode |= BGE_RXMODE_IPV4_FRAG_FIX; 5653 CSR_WRITE_4_FLUSH(sc, BGE_RX_MODE, mode | BGE_RXMODE_ENABLE); 5654 /* 5718 step 66 */ 5655 DELAY(10); 5656 5657 /* 5718 step 12, 57XX step 37 */ 5658 /* 5659 * XXX Doucments of 5718 series and 577xx say the recommended value 5660 * is 1, but tg3 set 1 only on 57765 series. 5661 */ 5662 if (BGE_IS_57765_PLUS(sc)) 5663 reg = 1; 5664 else 5665 reg = 2; 5666 CSR_WRITE_4_FLUSH(sc, BGE_MAX_RX_FRAME_LOWAT, reg); 5667 5668 /* Tell firmware we're alive. */ 5669 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 5670 5671 /* Enable host interrupts. */ 5672 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 5673 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 5674 bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, 0); 5675 5676 if ((error = bge_ifmedia_upd(ifp)) != 0) 5677 goto out; 5678 5679 ifp->if_flags |= IFF_RUNNING; 5680 ifp->if_flags &= ~IFF_OACTIVE; 5681 5682 callout_schedule(&sc->bge_timeout, hz); 5683 5684 out: 5685 sc->bge_if_flags = ifp->if_flags; 5686 splx(s); 5687 5688 return error; 5689 } 5690 5691 /* 5692 * Set media options. 5693 */ 5694 static int 5695 bge_ifmedia_upd(struct ifnet *ifp) 5696 { 5697 struct bge_softc * const sc = ifp->if_softc; 5698 struct mii_data * const mii = &sc->bge_mii; 5699 struct ifmedia * const ifm = &sc->bge_ifmedia; 5700 int rc; 5701 5702 /* If this is a 1000baseX NIC, enable the TBI port. */ 5703 if (sc->bge_flags & BGEF_FIBER_TBI) { 5704 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 5705 return EINVAL; 5706 switch (IFM_SUBTYPE(ifm->ifm_media)) { 5707 case IFM_AUTO: 5708 /* 5709 * The BCM5704 ASIC appears to have a special 5710 * mechanism for programming the autoneg 5711 * advertisement registers in TBI mode. 5712 */ 5713 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 5714 uint32_t sgdig; 5715 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS); 5716 if (sgdig & BGE_SGDIGSTS_DONE) { 5717 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0); 5718 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG); 5719 sgdig |= BGE_SGDIGCFG_AUTO | 5720 BGE_SGDIGCFG_PAUSE_CAP | 5721 BGE_SGDIGCFG_ASYM_PAUSE; 5722 CSR_WRITE_4_FLUSH(sc, BGE_SGDIG_CFG, 5723 sgdig | BGE_SGDIGCFG_SEND); 5724 DELAY(5); 5725 CSR_WRITE_4_FLUSH(sc, BGE_SGDIG_CFG, 5726 sgdig); 5727 } 5728 } 5729 break; 5730 case IFM_1000_SX: 5731 if ((ifm->ifm_media & IFM_FDX) != 0) { 5732 BGE_CLRBIT_FLUSH(sc, BGE_MAC_MODE, 5733 BGE_MACMODE_HALF_DUPLEX); 5734 } else { 5735 BGE_SETBIT_FLUSH(sc, BGE_MAC_MODE, 5736 BGE_MACMODE_HALF_DUPLEX); 5737 } 5738 DELAY(40); 5739 break; 5740 default: 5741 return EINVAL; 5742 } 5743 /* XXX 802.3x flow control for 1000BASE-SX */ 5744 return 0; 5745 } 5746 5747 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784) && 5748 (BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_5784_AX)) { 5749 uint32_t reg; 5750 5751 reg = CSR_READ_4(sc, BGE_CPMU_CTRL); 5752 if ((reg & BGE_CPMU_CTRL_GPHY_10MB_RXONLY) != 0) { 5753 reg &= ~BGE_CPMU_CTRL_GPHY_10MB_RXONLY; 5754 CSR_WRITE_4(sc, BGE_CPMU_CTRL, reg); 5755 } 5756 } 5757 5758 BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT); 5759 if ((rc = mii_mediachg(mii)) == ENXIO) 5760 return 0; 5761 5762 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5784_AX) { 5763 uint32_t reg; 5764 5765 reg = CSR_READ_4(sc, BGE_CPMU_LSPD_1000MB_CLK); 5766 if ((reg & BGE_CPMU_LSPD_1000MB_MACCLK_MASK) 5767 == (BGE_CPMU_LSPD_1000MB_MACCLK_12_5)) { 5768 reg &= ~BGE_CPMU_LSPD_1000MB_MACCLK_MASK; 5769 delay(40); 5770 CSR_WRITE_4(sc, BGE_CPMU_LSPD_1000MB_CLK, reg); 5771 } 5772 } 5773 5774 /* 5775 * Force an interrupt so that we will call bge_link_upd 5776 * if needed and clear any pending link state attention. 5777 * Without this we are not getting any further interrupts 5778 * for link state changes and thus will not UP the link and 5779 * not be able to send in bge_start. The only way to get 5780 * things working was to receive a packet and get a RX intr. 5781 */ 5782 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 5783 sc->bge_flags & BGEF_IS_5788) 5784 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 5785 else 5786 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 5787 5788 return rc; 5789 } 5790 5791 /* 5792 * Report current media status. 5793 */ 5794 static void 5795 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 5796 { 5797 struct bge_softc * const sc = ifp->if_softc; 5798 struct mii_data * const mii = &sc->bge_mii; 5799 5800 if (sc->bge_flags & BGEF_FIBER_TBI) { 5801 ifmr->ifm_status = IFM_AVALID; 5802 ifmr->ifm_active = IFM_ETHER; 5803 if (CSR_READ_4(sc, BGE_MAC_STS) & 5804 BGE_MACSTAT_TBI_PCS_SYNCHED) 5805 ifmr->ifm_status |= IFM_ACTIVE; 5806 ifmr->ifm_active |= IFM_1000_SX; 5807 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 5808 ifmr->ifm_active |= IFM_HDX; 5809 else 5810 ifmr->ifm_active |= IFM_FDX; 5811 return; 5812 } 5813 5814 mii_pollstat(mii); 5815 ifmr->ifm_status = mii->mii_media_status; 5816 ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) | 5817 sc->bge_flowflags; 5818 } 5819 5820 static int 5821 bge_ifflags_cb(struct ethercom *ec) 5822 { 5823 struct ifnet * const ifp = &ec->ec_if; 5824 struct bge_softc * const sc = ifp->if_softc; 5825 u_short change = ifp->if_flags ^ sc->bge_if_flags; 5826 5827 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) 5828 return ENETRESET; 5829 else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) == 0) 5830 return 0; 5831 5832 if ((ifp->if_flags & IFF_PROMISC) == 0) 5833 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 5834 else 5835 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 5836 5837 bge_setmulti(sc); 5838 5839 sc->bge_if_flags = ifp->if_flags; 5840 return 0; 5841 } 5842 5843 static int 5844 bge_ioctl(struct ifnet *ifp, u_long command, void *data) 5845 { 5846 struct bge_softc * const sc = ifp->if_softc; 5847 struct ifreq * const ifr = (struct ifreq *) data; 5848 int s, error = 0; 5849 struct mii_data *mii; 5850 5851 s = splnet(); 5852 5853 switch (command) { 5854 case SIOCSIFMEDIA: 5855 /* XXX Flow control is not supported for 1000BASE-SX */ 5856 if (sc->bge_flags & BGEF_FIBER_TBI) { 5857 ifr->ifr_media &= ~IFM_ETH_FMASK; 5858 sc->bge_flowflags = 0; 5859 } 5860 5861 /* Flow control requires full-duplex mode. */ 5862 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 5863 (ifr->ifr_media & IFM_FDX) == 0) { 5864 ifr->ifr_media &= ~IFM_ETH_FMASK; 5865 } 5866 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 5867 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 5868 /* We can do both TXPAUSE and RXPAUSE. */ 5869 ifr->ifr_media |= 5870 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 5871 } 5872 sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 5873 } 5874 5875 if (sc->bge_flags & BGEF_FIBER_TBI) { 5876 error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia, 5877 command); 5878 } else { 5879 mii = &sc->bge_mii; 5880 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, 5881 command); 5882 } 5883 break; 5884 default: 5885 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET) 5886 break; 5887 5888 error = 0; 5889 5890 if (command != SIOCADDMULTI && command != SIOCDELMULTI) 5891 ; 5892 else if (ifp->if_flags & IFF_RUNNING) 5893 bge_setmulti(sc); 5894 break; 5895 } 5896 5897 splx(s); 5898 5899 return error; 5900 } 5901 5902 static void 5903 bge_watchdog(struct ifnet *ifp) 5904 { 5905 struct bge_softc * const sc = ifp->if_softc; 5906 uint32_t status; 5907 5908 /* If pause frames are active then don't reset the hardware. */ 5909 if ((CSR_READ_4(sc, BGE_RX_MODE) & BGE_RXMODE_FLOWCTL_ENABLE) != 0) { 5910 status = CSR_READ_4(sc, BGE_RX_STS); 5911 if ((status & BGE_RXSTAT_REMOTE_XOFFED) != 0) { 5912 /* 5913 * If link partner has us in XOFF state then wait for 5914 * the condition to clear. 5915 */ 5916 CSR_WRITE_4(sc, BGE_RX_STS, status); 5917 ifp->if_timer = 5; 5918 return; 5919 } else if ((status & BGE_RXSTAT_RCVD_XOFF) != 0 && 5920 (status & BGE_RXSTAT_RCVD_XON) != 0) { 5921 /* 5922 * If link partner has us in XOFF state then wait for 5923 * the condition to clear. 5924 */ 5925 CSR_WRITE_4(sc, BGE_RX_STS, status); 5926 ifp->if_timer = 5; 5927 return; 5928 } 5929 /* 5930 * Any other condition is unexpected and the controller 5931 * should be reset. 5932 */ 5933 } 5934 5935 aprint_error_dev(sc->bge_dev, "watchdog timeout -- resetting\n"); 5936 5937 ifp->if_flags &= ~IFF_RUNNING; 5938 bge_init(ifp); 5939 5940 if_statinc(ifp, if_oerrors); 5941 } 5942 5943 static void 5944 bge_stop_block(struct bge_softc *sc, bus_addr_t reg, uint32_t bit) 5945 { 5946 int i; 5947 5948 BGE_CLRBIT_FLUSH(sc, reg, bit); 5949 5950 for (i = 0; i < 1000; i++) { 5951 delay(100); 5952 if ((CSR_READ_4(sc, reg) & bit) == 0) 5953 return; 5954 } 5955 5956 /* 5957 * Doesn't print only when the register is BGE_SRS_MODE. It occurs 5958 * on some environment (and once after boot?) 5959 */ 5960 if (reg != BGE_SRS_MODE) 5961 aprint_error_dev(sc->bge_dev, 5962 "block failed to stop: reg 0x%lx, bit 0x%08x\n", 5963 (u_long)reg, bit); 5964 } 5965 5966 /* 5967 * Stop the adapter and free any mbufs allocated to the 5968 * RX and TX lists. 5969 */ 5970 static void 5971 bge_stop(struct ifnet *ifp, int disable) 5972 { 5973 struct bge_softc * const sc = ifp->if_softc; 5974 5975 if (disable) { 5976 sc->bge_detaching = 1; 5977 callout_halt(&sc->bge_timeout, NULL); 5978 } else 5979 callout_stop(&sc->bge_timeout); 5980 5981 /* Disable host interrupts. */ 5982 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 5983 bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, 1); 5984 5985 /* 5986 * Tell firmware we're shutting down. 5987 */ 5988 bge_stop_fw(sc); 5989 bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN); 5990 5991 /* 5992 * Disable all of the receiver blocks. 5993 */ 5994 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 5995 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 5996 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 5997 if (BGE_IS_5700_FAMILY(sc)) 5998 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 5999 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 6000 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 6001 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 6002 6003 /* 6004 * Disable all of the transmit blocks. 6005 */ 6006 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 6007 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 6008 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 6009 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 6010 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 6011 if (BGE_IS_5700_FAMILY(sc)) 6012 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 6013 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 6014 6015 BGE_CLRBIT_FLUSH(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB); 6016 delay(40); 6017 6018 bge_stop_block(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); 6019 6020 /* 6021 * Shut down all of the memory managers and related 6022 * state machines. 6023 */ 6024 /* 5718 step 5a,5b */ 6025 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 6026 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 6027 if (BGE_IS_5700_FAMILY(sc)) 6028 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 6029 6030 /* 5718 step 5c,5d */ 6031 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 6032 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 6033 6034 if (BGE_IS_5700_FAMILY(sc)) { 6035 bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 6036 bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 6037 } 6038 6039 bge_reset(sc); 6040 bge_sig_legacy(sc, BGE_RESET_SHUTDOWN); 6041 bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN); 6042 6043 /* 6044 * Keep the ASF firmware running if up. 6045 */ 6046 if (sc->bge_asf_mode & ASF_STACKUP) 6047 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 6048 else 6049 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 6050 6051 /* Free the RX lists. */ 6052 bge_free_rx_ring_std(sc, disable); 6053 6054 /* Free jumbo RX list. */ 6055 if (BGE_IS_JUMBO_CAPABLE(sc)) 6056 bge_free_rx_ring_jumbo(sc); 6057 6058 /* Free TX buffers. */ 6059 bge_free_tx_ring(sc, disable); 6060 6061 /* 6062 * Isolate/power down the PHY. 6063 */ 6064 if (!(sc->bge_flags & BGEF_FIBER_TBI)) 6065 mii_down(&sc->bge_mii); 6066 6067 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 6068 6069 /* Clear MAC's link state (PHY may still have link UP). */ 6070 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 6071 6072 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 6073 } 6074 6075 static void 6076 bge_link_upd(struct bge_softc *sc) 6077 { 6078 struct ifnet * const ifp = &sc->ethercom.ec_if; 6079 struct mii_data * const mii = &sc->bge_mii; 6080 uint32_t status; 6081 uint16_t phyval; 6082 int link; 6083 6084 /* Clear 'pending link event' flag */ 6085 BGE_STS_CLRBIT(sc, BGE_STS_LINK_EVT); 6086 6087 /* 6088 * Process link state changes. 6089 * Grrr. The link status word in the status block does 6090 * not work correctly on the BCM5700 rev AX and BX chips, 6091 * according to all available information. Hence, we have 6092 * to enable MII interrupts in order to properly obtain 6093 * async link changes. Unfortunately, this also means that 6094 * we have to read the MAC status register to detect link 6095 * changes, thereby adding an additional register access to 6096 * the interrupt handler. 6097 */ 6098 6099 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) { 6100 status = CSR_READ_4(sc, BGE_MAC_STS); 6101 if (status & BGE_MACSTAT_MI_INTERRUPT) { 6102 mii_pollstat(mii); 6103 6104 if (!BGE_STS_BIT(sc, BGE_STS_LINK) && 6105 mii->mii_media_status & IFM_ACTIVE && 6106 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 6107 BGE_STS_SETBIT(sc, BGE_STS_LINK); 6108 else if (BGE_STS_BIT(sc, BGE_STS_LINK) && 6109 (!(mii->mii_media_status & IFM_ACTIVE) || 6110 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) 6111 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 6112 6113 /* Clear the interrupt */ 6114 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 6115 BGE_EVTENB_MI_INTERRUPT); 6116 bge_miibus_readreg(sc->bge_dev, sc->bge_phy_addr, 6117 BRGPHY_MII_ISR, &phyval); 6118 bge_miibus_writereg(sc->bge_dev, sc->bge_phy_addr, 6119 BRGPHY_MII_IMR, BRGPHY_INTRS); 6120 } 6121 return; 6122 } 6123 6124 if (sc->bge_flags & BGEF_FIBER_TBI) { 6125 status = CSR_READ_4(sc, BGE_MAC_STS); 6126 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) { 6127 if (!BGE_STS_BIT(sc, BGE_STS_LINK)) { 6128 BGE_STS_SETBIT(sc, BGE_STS_LINK); 6129 if (BGE_ASICREV(sc->bge_chipid) 6130 == BGE_ASICREV_BCM5704) { 6131 BGE_CLRBIT_FLUSH(sc, BGE_MAC_MODE, 6132 BGE_MACMODE_TBI_SEND_CFGS); 6133 DELAY(40); 6134 } 6135 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 6136 if_link_state_change(ifp, LINK_STATE_UP); 6137 } 6138 } else if (BGE_STS_BIT(sc, BGE_STS_LINK)) { 6139 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 6140 if_link_state_change(ifp, LINK_STATE_DOWN); 6141 } 6142 } else if (BGE_STS_BIT(sc, BGE_STS_AUTOPOLL)) { 6143 /* 6144 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED 6145 * bit in status word always set. Workaround this bug by 6146 * reading PHY link status directly. 6147 */ 6148 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK)? 6149 BGE_STS_LINK : 0; 6150 6151 if (BGE_STS_BIT(sc, BGE_STS_LINK) != link) { 6152 mii_pollstat(mii); 6153 6154 if (!BGE_STS_BIT(sc, BGE_STS_LINK) && 6155 mii->mii_media_status & IFM_ACTIVE && 6156 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 6157 BGE_STS_SETBIT(sc, BGE_STS_LINK); 6158 else if (BGE_STS_BIT(sc, BGE_STS_LINK) && 6159 (!(mii->mii_media_status & IFM_ACTIVE) || 6160 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) 6161 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 6162 } 6163 } else { 6164 /* 6165 * For controllers that call mii_tick, we have to poll 6166 * link status. 6167 */ 6168 mii_pollstat(mii); 6169 } 6170 6171 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5784_AX) { 6172 uint32_t reg, scale; 6173 6174 reg = CSR_READ_4(sc, BGE_CPMU_CLCK_STAT) & 6175 BGE_CPMU_CLCK_STAT_MAC_CLCK_MASK; 6176 if (reg == BGE_CPMU_CLCK_STAT_MAC_CLCK_62_5) 6177 scale = 65; 6178 else if (reg == BGE_CPMU_CLCK_STAT_MAC_CLCK_6_25) 6179 scale = 6; 6180 else 6181 scale = 12; 6182 6183 reg = CSR_READ_4(sc, BGE_MISC_CFG) & 6184 ~BGE_MISCCFG_TIMER_PRESCALER; 6185 reg |= scale << 1; 6186 CSR_WRITE_4(sc, BGE_MISC_CFG, reg); 6187 } 6188 /* Clear the attention */ 6189 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 6190 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 6191 BGE_MACSTAT_LINK_CHANGED); 6192 } 6193 6194 static int 6195 bge_sysctl_verify(SYSCTLFN_ARGS) 6196 { 6197 int error, t; 6198 struct sysctlnode node; 6199 6200 node = *rnode; 6201 t = *(int*)rnode->sysctl_data; 6202 node.sysctl_data = &t; 6203 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 6204 if (error || newp == NULL) 6205 return error; 6206 6207 #if 0 6208 DPRINTF2(("%s: t = %d, nodenum = %d, rnodenum = %d\n", __func__, t, 6209 node.sysctl_num, rnode->sysctl_num)); 6210 #endif 6211 6212 if (node.sysctl_num == bge_rxthresh_nodenum) { 6213 if (t < 0 || t >= NBGE_RX_THRESH) 6214 return EINVAL; 6215 bge_update_all_threshes(t); 6216 } else 6217 return EINVAL; 6218 6219 *(int*)rnode->sysctl_data = t; 6220 6221 return 0; 6222 } 6223 6224 /* 6225 * Set up sysctl(3) MIB, hw.bge.*. 6226 */ 6227 static void 6228 bge_sysctl_init(struct bge_softc *sc) 6229 { 6230 int rc, bge_root_num; 6231 const struct sysctlnode *node; 6232 6233 if ((rc = sysctl_createv(&sc->bge_log, 0, NULL, &node, 6234 0, CTLTYPE_NODE, "bge", 6235 SYSCTL_DESCR("BGE interface controls"), 6236 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) { 6237 goto out; 6238 } 6239 6240 bge_root_num = node->sysctl_num; 6241 6242 /* BGE Rx interrupt mitigation level */ 6243 if ((rc = sysctl_createv(&sc->bge_log, 0, NULL, &node, 6244 CTLFLAG_READWRITE, 6245 CTLTYPE_INT, "rx_lvl", 6246 SYSCTL_DESCR("BGE receive interrupt mitigation level"), 6247 bge_sysctl_verify, 0, 6248 &bge_rx_thresh_lvl, 6249 0, CTL_HW, bge_root_num, CTL_CREATE, 6250 CTL_EOL)) != 0) { 6251 goto out; 6252 } 6253 6254 bge_rxthresh_nodenum = node->sysctl_num; 6255 6256 return; 6257 6258 out: 6259 aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc); 6260 } 6261 6262 #ifdef BGE_DEBUG 6263 void 6264 bge_debug_info(struct bge_softc *sc) 6265 { 6266 6267 printf("Hardware Flags:\n"); 6268 if (BGE_IS_57765_PLUS(sc)) 6269 printf(" - 57765 Plus\n"); 6270 if (BGE_IS_5717_PLUS(sc)) 6271 printf(" - 5717 Plus\n"); 6272 if (BGE_IS_5755_PLUS(sc)) 6273 printf(" - 5755 Plus\n"); 6274 if (BGE_IS_575X_PLUS(sc)) 6275 printf(" - 575X Plus\n"); 6276 if (BGE_IS_5705_PLUS(sc)) 6277 printf(" - 5705 Plus\n"); 6278 if (BGE_IS_5714_FAMILY(sc)) 6279 printf(" - 5714 Family\n"); 6280 if (BGE_IS_5700_FAMILY(sc)) 6281 printf(" - 5700 Family\n"); 6282 if (sc->bge_flags & BGEF_IS_5788) 6283 printf(" - 5788\n"); 6284 if (sc->bge_flags & BGEF_JUMBO_CAPABLE) 6285 printf(" - Supports Jumbo Frames\n"); 6286 if (sc->bge_flags & BGEF_NO_EEPROM) 6287 printf(" - No EEPROM\n"); 6288 if (sc->bge_flags & BGEF_PCIX) 6289 printf(" - PCI-X Bus\n"); 6290 if (sc->bge_flags & BGEF_PCIE) 6291 printf(" - PCI Express Bus\n"); 6292 if (sc->bge_flags & BGEF_RX_ALIGNBUG) 6293 printf(" - RX Alignment Bug\n"); 6294 if (sc->bge_flags & BGEF_APE) 6295 printf(" - APE\n"); 6296 if (sc->bge_flags & BGEF_CPMU_PRESENT) 6297 printf(" - CPMU\n"); 6298 if (sc->bge_flags & BGEF_TSO) 6299 printf(" - TSO\n"); 6300 if (sc->bge_flags & BGEF_TAGGED_STATUS) 6301 printf(" - TAGGED_STATUS\n"); 6302 6303 /* PHY related */ 6304 if (sc->bge_phy_flags & BGEPHYF_NO_3LED) 6305 printf(" - No 3 LEDs\n"); 6306 if (sc->bge_phy_flags & BGEPHYF_CRC_BUG) 6307 printf(" - CRC bug\n"); 6308 if (sc->bge_phy_flags & BGEPHYF_ADC_BUG) 6309 printf(" - ADC bug\n"); 6310 if (sc->bge_phy_flags & BGEPHYF_5704_A0_BUG) 6311 printf(" - 5704 A0 bug\n"); 6312 if (sc->bge_phy_flags & BGEPHYF_JITTER_BUG) 6313 printf(" - jitter bug\n"); 6314 if (sc->bge_phy_flags & BGEPHYF_BER_BUG) 6315 printf(" - BER bug\n"); 6316 if (sc->bge_phy_flags & BGEPHYF_ADJUST_TRIM) 6317 printf(" - adjust trim\n"); 6318 if (sc->bge_phy_flags & BGEPHYF_NO_WIRESPEED) 6319 printf(" - no wirespeed\n"); 6320 6321 /* ASF related */ 6322 if (sc->bge_asf_mode & ASF_ENABLE) 6323 printf(" - ASF enable\n"); 6324 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) 6325 printf(" - ASF new handshake\n"); 6326 if (sc->bge_asf_mode & ASF_STACKUP) 6327 printf(" - ASF stackup\n"); 6328 } 6329 #endif /* BGE_DEBUG */ 6330 6331 static int 6332 bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]) 6333 { 6334 prop_dictionary_t dict; 6335 prop_data_t ea; 6336 6337 if ((sc->bge_flags & BGEF_NO_EEPROM) == 0) 6338 return 1; 6339 6340 dict = device_properties(sc->bge_dev); 6341 ea = prop_dictionary_get(dict, "mac-address"); 6342 if (ea != NULL) { 6343 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA); 6344 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN); 6345 memcpy(ether_addr, prop_data_value(ea), ETHER_ADDR_LEN); 6346 return 0; 6347 } 6348 6349 return 1; 6350 } 6351 6352 static int 6353 bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[]) 6354 { 6355 uint32_t mac_addr; 6356 6357 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_HIGH_MB); 6358 if ((mac_addr >> 16) == 0x484b) { 6359 ether_addr[0] = (uint8_t)(mac_addr >> 8); 6360 ether_addr[1] = (uint8_t)mac_addr; 6361 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_LOW_MB); 6362 ether_addr[2] = (uint8_t)(mac_addr >> 24); 6363 ether_addr[3] = (uint8_t)(mac_addr >> 16); 6364 ether_addr[4] = (uint8_t)(mac_addr >> 8); 6365 ether_addr[5] = (uint8_t)mac_addr; 6366 return 0; 6367 } 6368 return 1; 6369 } 6370 6371 static int 6372 bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[]) 6373 { 6374 int mac_offset = BGE_EE_MAC_OFFSET; 6375 6376 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 6377 mac_offset = BGE_EE_MAC_OFFSET_5906; 6378 6379 return (bge_read_nvram(sc, ether_addr, mac_offset + 2, 6380 ETHER_ADDR_LEN)); 6381 } 6382 6383 static int 6384 bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[]) 6385 { 6386 6387 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 6388 return 1; 6389 6390 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2, 6391 ETHER_ADDR_LEN)); 6392 } 6393 6394 static int 6395 bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[]) 6396 { 6397 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = { 6398 /* NOTE: Order is critical */ 6399 bge_get_eaddr_fw, 6400 bge_get_eaddr_mem, 6401 bge_get_eaddr_nvram, 6402 bge_get_eaddr_eeprom, 6403 NULL 6404 }; 6405 const bge_eaddr_fcn_t *func; 6406 6407 for (func = bge_eaddr_funcs; *func != NULL; ++func) { 6408 if ((*func)(sc, eaddr) == 0) 6409 break; 6410 } 6411 return *func == NULL ? ENXIO : 0; 6412 } 6413