1 /* $NetBSD: if_bge.c,v 1.190 2011/01/09 13:01:03 jruoho Exp $ */ 2 3 /* 4 * Copyright (c) 2001 Wind River Systems 5 * Copyright (c) 1997, 1998, 1999, 2001 6 * Bill Paul <wpaul@windriver.com>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Bill Paul. 19 * 4. Neither the name of the author nor the names of any co-contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 33 * THE POSSIBILITY OF SUCH DAMAGE. 34 * 35 * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $ 36 */ 37 38 /* 39 * Broadcom BCM570x family gigabit ethernet driver for NetBSD. 40 * 41 * NetBSD version by: 42 * 43 * Frank van der Linden <fvdl@wasabisystems.com> 44 * Jason Thorpe <thorpej@wasabisystems.com> 45 * Jonathan Stone <jonathan@dsg.stanford.edu> 46 * 47 * Originally written for FreeBSD by Bill Paul <wpaul@windriver.com> 48 * Senior Engineer, Wind River Systems 49 */ 50 51 /* 52 * The Broadcom BCM5700 is based on technology originally developed by 53 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 54 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has 55 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 56 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 57 * frames, highly configurable RX filtering, and 16 RX and TX queues 58 * (which, along with RX filter rules, can be used for QOS applications). 59 * Other features, such as TCP segmentation, may be available as part 60 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 61 * firmware images can be stored in hardware and need not be compiled 62 * into the driver. 63 * 64 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 65 * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus. 66 * 67 * The BCM5701 is a single-chip solution incorporating both the BCM5700 68 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 69 * does not support external SSRAM. 70 * 71 * Broadcom also produces a variation of the BCM5700 under the "Altima" 72 * brand name, which is functionally similar but lacks PCI-X support. 73 * 74 * Without external SSRAM, you can only have at most 4 TX rings, 75 * and the use of the mini RX ring is disabled. This seems to imply 76 * that these features are simply not available on the BCM5701. As a 77 * result, this driver does not implement any support for the mini RX 78 * ring. 79 */ 80 81 #include <sys/cdefs.h> 82 __KERNEL_RCSID(0, "$NetBSD: if_bge.c,v 1.190 2011/01/09 13:01:03 jruoho Exp $"); 83 84 #include "vlan.h" 85 #include "rnd.h" 86 87 #include <sys/param.h> 88 #include <sys/systm.h> 89 #include <sys/callout.h> 90 #include <sys/sockio.h> 91 #include <sys/mbuf.h> 92 #include <sys/malloc.h> 93 #include <sys/kernel.h> 94 #include <sys/device.h> 95 #include <sys/socket.h> 96 #include <sys/sysctl.h> 97 98 #include <net/if.h> 99 #include <net/if_dl.h> 100 #include <net/if_media.h> 101 #include <net/if_ether.h> 102 103 #if NRND > 0 104 #include <sys/rnd.h> 105 #endif 106 107 #ifdef INET 108 #include <netinet/in.h> 109 #include <netinet/in_systm.h> 110 #include <netinet/in_var.h> 111 #include <netinet/ip.h> 112 #endif 113 114 /* Headers for TCP Segmentation Offload (TSO) */ 115 #include <netinet/in_systm.h> /* n_time for <netinet/ip.h>... */ 116 #include <netinet/in.h> /* ip_{src,dst}, for <netinet/ip.h> */ 117 #include <netinet/ip.h> /* for struct ip */ 118 #include <netinet/tcp.h> /* for struct tcphdr */ 119 120 121 #include <net/bpf.h> 122 123 #include <dev/pci/pcireg.h> 124 #include <dev/pci/pcivar.h> 125 #include <dev/pci/pcidevs.h> 126 127 #include <dev/mii/mii.h> 128 #include <dev/mii/miivar.h> 129 #include <dev/mii/miidevs.h> 130 #include <dev/mii/brgphyreg.h> 131 132 #include <dev/pci/if_bgereg.h> 133 #include <dev/pci/if_bgevar.h> 134 135 #include <prop/proplib.h> 136 137 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */ 138 139 140 /* 141 * Tunable thresholds for rx-side bge interrupt mitigation. 142 */ 143 144 /* 145 * The pairs of values below were obtained from empirical measurement 146 * on bcm5700 rev B2; they ar designed to give roughly 1 receive 147 * interrupt for every N packets received, where N is, approximately, 148 * the second value (rx_max_bds) in each pair. The values are chosen 149 * such that moving from one pair to the succeeding pair was observed 150 * to roughly halve interrupt rate under sustained input packet load. 151 * The values were empirically chosen to avoid overflowing internal 152 * limits on the bcm5700: increasing rx_ticks much beyond 600 153 * results in internal wrapping and higher interrupt rates. 154 * The limit of 46 frames was chosen to match NFS workloads. 155 * 156 * These values also work well on bcm5701, bcm5704C, and (less 157 * tested) bcm5703. On other chipsets, (including the Altima chip 158 * family), the larger values may overflow internal chip limits, 159 * leading to increasing interrupt rates rather than lower interrupt 160 * rates. 161 * 162 * Applications using heavy interrupt mitigation (interrupting every 163 * 32 or 46 frames) in both directions may need to increase the TCP 164 * windowsize to above 131072 bytes (e.g., to 199608 bytes) to sustain 165 * full link bandwidth, due to ACKs and window updates lingering 166 * in the RX queue during the 30-to-40-frame interrupt-mitigation window. 167 */ 168 static const struct bge_load_rx_thresh { 169 int rx_ticks; 170 int rx_max_bds; } 171 bge_rx_threshes[] = { 172 { 32, 2 }, 173 { 50, 4 }, 174 { 100, 8 }, 175 { 192, 16 }, 176 { 416, 32 }, 177 { 598, 46 } 178 }; 179 #define NBGE_RX_THRESH (sizeof(bge_rx_threshes) / sizeof(bge_rx_threshes[0])) 180 181 /* XXX patchable; should be sysctl'able */ 182 static int bge_auto_thresh = 1; 183 static int bge_rx_thresh_lvl; 184 185 static int bge_rxthresh_nodenum; 186 187 typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]); 188 189 static int bge_probe(device_t, cfdata_t, void *); 190 static void bge_attach(device_t, device_t, void *); 191 static void bge_release_resources(struct bge_softc *); 192 193 static int bge_get_eaddr_fw(struct bge_softc *, uint8_t[]); 194 static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]); 195 static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]); 196 static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]); 197 static int bge_get_eaddr(struct bge_softc *, uint8_t[]); 198 199 static void bge_txeof(struct bge_softc *); 200 static void bge_rxeof(struct bge_softc *); 201 202 static void bge_asf_driver_up (struct bge_softc *); 203 static void bge_tick(void *); 204 static void bge_stats_update(struct bge_softc *); 205 static void bge_stats_update_regs(struct bge_softc *); 206 static int bge_encap(struct bge_softc *, struct mbuf *, uint32_t *); 207 208 static int bge_intr(void *); 209 static void bge_start(struct ifnet *); 210 static int bge_ifflags_cb(struct ethercom *); 211 static int bge_ioctl(struct ifnet *, u_long, void *); 212 static int bge_init(struct ifnet *); 213 static void bge_stop(struct ifnet *, int); 214 static void bge_watchdog(struct ifnet *); 215 static int bge_ifmedia_upd(struct ifnet *); 216 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 217 218 static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *); 219 static int bge_read_nvram(struct bge_softc *, uint8_t *, int, int); 220 221 static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *); 222 static int bge_read_eeprom(struct bge_softc *, void *, int, int); 223 static void bge_setmulti(struct bge_softc *); 224 225 static void bge_handle_events(struct bge_softc *); 226 static int bge_alloc_jumbo_mem(struct bge_softc *); 227 #if 0 /* XXX */ 228 static void bge_free_jumbo_mem(struct bge_softc *); 229 #endif 230 static void *bge_jalloc(struct bge_softc *); 231 static void bge_jfree(struct mbuf *, void *, size_t, void *); 232 static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *, 233 bus_dmamap_t); 234 static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *); 235 static int bge_init_rx_ring_std(struct bge_softc *); 236 static void bge_free_rx_ring_std(struct bge_softc *); 237 static int bge_init_rx_ring_jumbo(struct bge_softc *); 238 static void bge_free_rx_ring_jumbo(struct bge_softc *); 239 static void bge_free_tx_ring(struct bge_softc *); 240 static int bge_init_tx_ring(struct bge_softc *); 241 242 static int bge_chipinit(struct bge_softc *); 243 static int bge_blockinit(struct bge_softc *); 244 static int bge_setpowerstate(struct bge_softc *, int); 245 static uint32_t bge_readmem_ind(struct bge_softc *, int); 246 static void bge_writemem_ind(struct bge_softc *, int, int); 247 static void bge_writembx(struct bge_softc *, int, int); 248 static void bge_writemem_direct(struct bge_softc *, int, int); 249 static void bge_writereg_ind(struct bge_softc *, int, int); 250 static void bge_set_max_readrq(struct bge_softc *); 251 252 static int bge_miibus_readreg(device_t, int, int); 253 static void bge_miibus_writereg(device_t, int, int, int); 254 static void bge_miibus_statchg(device_t); 255 256 #define BGE_RESET_START 1 257 #define BGE_RESET_STOP 2 258 static void bge_sig_post_reset(struct bge_softc *, int); 259 static void bge_sig_legacy(struct bge_softc *, int); 260 static void bge_sig_pre_reset(struct bge_softc *, int); 261 static void bge_stop_fw(struct bge_softc *); 262 static int bge_reset(struct bge_softc *); 263 static void bge_link_upd(struct bge_softc *); 264 static void sysctl_bge_init(struct bge_softc *); 265 static int sysctl_bge_verify(SYSCTLFN_PROTO); 266 267 #ifdef BGE_DEBUG 268 #define DPRINTF(x) if (bgedebug) printf x 269 #define DPRINTFN(n,x) if (bgedebug >= (n)) printf x 270 #define BGE_TSO_PRINTF(x) do { if (bge_tso_debug) printf x ;} while (0) 271 int bgedebug = 0; 272 int bge_tso_debug = 0; 273 void bge_debug_info(struct bge_softc *); 274 #else 275 #define DPRINTF(x) 276 #define DPRINTFN(n,x) 277 #define BGE_TSO_PRINTF(x) 278 #endif 279 280 #ifdef BGE_EVENT_COUNTERS 281 #define BGE_EVCNT_INCR(ev) (ev).ev_count++ 282 #define BGE_EVCNT_ADD(ev, val) (ev).ev_count += (val) 283 #define BGE_EVCNT_UPD(ev, val) (ev).ev_count = (val) 284 #else 285 #define BGE_EVCNT_INCR(ev) /* nothing */ 286 #define BGE_EVCNT_ADD(ev, val) /* nothing */ 287 #define BGE_EVCNT_UPD(ev, val) /* nothing */ 288 #endif 289 290 static const struct bge_product { 291 pci_vendor_id_t bp_vendor; 292 pci_product_id_t bp_product; 293 const char *bp_name; 294 } bge_products[] = { 295 /* 296 * The BCM5700 documentation seems to indicate that the hardware 297 * still has the Alteon vendor ID burned into it, though it 298 * should always be overridden by the value in the EEPROM. We'll 299 * check for it anyway. 300 */ 301 { PCI_VENDOR_ALTEON, 302 PCI_PRODUCT_ALTEON_BCM5700, 303 "Broadcom BCM5700 Gigabit Ethernet", 304 }, 305 { PCI_VENDOR_ALTEON, 306 PCI_PRODUCT_ALTEON_BCM5701, 307 "Broadcom BCM5701 Gigabit Ethernet", 308 }, 309 { PCI_VENDOR_ALTIMA, 310 PCI_PRODUCT_ALTIMA_AC1000, 311 "Altima AC1000 Gigabit Ethernet", 312 }, 313 { PCI_VENDOR_ALTIMA, 314 PCI_PRODUCT_ALTIMA_AC1001, 315 "Altima AC1001 Gigabit Ethernet", 316 }, 317 { PCI_VENDOR_ALTIMA, 318 PCI_PRODUCT_ALTIMA_AC9100, 319 "Altima AC9100 Gigabit Ethernet", 320 }, 321 { PCI_VENDOR_BROADCOM, 322 PCI_PRODUCT_BROADCOM_BCM5700, 323 "Broadcom BCM5700 Gigabit Ethernet", 324 }, 325 { PCI_VENDOR_BROADCOM, 326 PCI_PRODUCT_BROADCOM_BCM5701, 327 "Broadcom BCM5701 Gigabit Ethernet", 328 }, 329 { PCI_VENDOR_BROADCOM, 330 PCI_PRODUCT_BROADCOM_BCM5702, 331 "Broadcom BCM5702 Gigabit Ethernet", 332 }, 333 { PCI_VENDOR_BROADCOM, 334 PCI_PRODUCT_BROADCOM_BCM5702X, 335 "Broadcom BCM5702X Gigabit Ethernet" }, 336 { PCI_VENDOR_BROADCOM, 337 PCI_PRODUCT_BROADCOM_BCM5703, 338 "Broadcom BCM5703 Gigabit Ethernet", 339 }, 340 { PCI_VENDOR_BROADCOM, 341 PCI_PRODUCT_BROADCOM_BCM5703X, 342 "Broadcom BCM5703X Gigabit Ethernet", 343 }, 344 { PCI_VENDOR_BROADCOM, 345 PCI_PRODUCT_BROADCOM_BCM5703_ALT, 346 "Broadcom BCM5703 Gigabit Ethernet", 347 }, 348 { PCI_VENDOR_BROADCOM, 349 PCI_PRODUCT_BROADCOM_BCM5704C, 350 "Broadcom BCM5704C Dual Gigabit Ethernet", 351 }, 352 { PCI_VENDOR_BROADCOM, 353 PCI_PRODUCT_BROADCOM_BCM5704S, 354 "Broadcom BCM5704S Dual Gigabit Ethernet", 355 }, 356 { PCI_VENDOR_BROADCOM, 357 PCI_PRODUCT_BROADCOM_BCM5705, 358 "Broadcom BCM5705 Gigabit Ethernet", 359 }, 360 { PCI_VENDOR_BROADCOM, 361 PCI_PRODUCT_BROADCOM_BCM5705F, 362 "Broadcom BCM5705F Gigabit Ethernet", 363 }, 364 { PCI_VENDOR_BROADCOM, 365 PCI_PRODUCT_BROADCOM_BCM5705K, 366 "Broadcom BCM5705K Gigabit Ethernet", 367 }, 368 { PCI_VENDOR_BROADCOM, 369 PCI_PRODUCT_BROADCOM_BCM5705M, 370 "Broadcom BCM5705M Gigabit Ethernet", 371 }, 372 { PCI_VENDOR_BROADCOM, 373 PCI_PRODUCT_BROADCOM_BCM5705M_ALT, 374 "Broadcom BCM5705M Gigabit Ethernet", 375 }, 376 { PCI_VENDOR_BROADCOM, 377 PCI_PRODUCT_BROADCOM_BCM5714, 378 "Broadcom BCM5714 Gigabit Ethernet", 379 }, 380 { PCI_VENDOR_BROADCOM, 381 PCI_PRODUCT_BROADCOM_BCM5714S, 382 "Broadcom BCM5714S Gigabit Ethernet", 383 }, 384 { PCI_VENDOR_BROADCOM, 385 PCI_PRODUCT_BROADCOM_BCM5715, 386 "Broadcom BCM5715 Gigabit Ethernet", 387 }, 388 { PCI_VENDOR_BROADCOM, 389 PCI_PRODUCT_BROADCOM_BCM5715S, 390 "Broadcom BCM5715S Gigabit Ethernet", 391 }, 392 { PCI_VENDOR_BROADCOM, 393 PCI_PRODUCT_BROADCOM_BCM5717, 394 "Broadcom BCM5717 Gigabit Ethernet", 395 }, 396 { PCI_VENDOR_BROADCOM, 397 PCI_PRODUCT_BROADCOM_BCM5718, 398 "Broadcom BCM5718 Gigabit Ethernet", 399 }, 400 { PCI_VENDOR_BROADCOM, 401 PCI_PRODUCT_BROADCOM_BCM5720, 402 "Broadcom BCM5720 Gigabit Ethernet", 403 }, 404 { PCI_VENDOR_BROADCOM, 405 PCI_PRODUCT_BROADCOM_BCM5721, 406 "Broadcom BCM5721 Gigabit Ethernet", 407 }, 408 { PCI_VENDOR_BROADCOM, 409 PCI_PRODUCT_BROADCOM_BCM5722, 410 "Broadcom BCM5722 Gigabit Ethernet", 411 }, 412 { PCI_VENDOR_BROADCOM, 413 PCI_PRODUCT_BROADCOM_BCM5723, 414 "Broadcom BCM5723 Gigabit Ethernet", 415 }, 416 { PCI_VENDOR_BROADCOM, 417 PCI_PRODUCT_BROADCOM_BCM5724, 418 "Broadcom BCM5724 Gigabit Ethernet", 419 }, 420 { PCI_VENDOR_BROADCOM, 421 PCI_PRODUCT_BROADCOM_BCM5750, 422 "Broadcom BCM5750 Gigabit Ethernet", 423 }, 424 { PCI_VENDOR_BROADCOM, 425 PCI_PRODUCT_BROADCOM_BCM5750M, 426 "Broadcom BCM5750M Gigabit Ethernet", 427 }, 428 { PCI_VENDOR_BROADCOM, 429 PCI_PRODUCT_BROADCOM_BCM5751, 430 "Broadcom BCM5751 Gigabit Ethernet", 431 }, 432 { PCI_VENDOR_BROADCOM, 433 PCI_PRODUCT_BROADCOM_BCM5751F, 434 "Broadcom BCM5751F Gigabit Ethernet", 435 }, 436 { PCI_VENDOR_BROADCOM, 437 PCI_PRODUCT_BROADCOM_BCM5751M, 438 "Broadcom BCM5751M Gigabit Ethernet", 439 }, 440 { PCI_VENDOR_BROADCOM, 441 PCI_PRODUCT_BROADCOM_BCM5752, 442 "Broadcom BCM5752 Gigabit Ethernet", 443 }, 444 { PCI_VENDOR_BROADCOM, 445 PCI_PRODUCT_BROADCOM_BCM5752M, 446 "Broadcom BCM5752M Gigabit Ethernet", 447 }, 448 { PCI_VENDOR_BROADCOM, 449 PCI_PRODUCT_BROADCOM_BCM5753, 450 "Broadcom BCM5753 Gigabit Ethernet", 451 }, 452 { PCI_VENDOR_BROADCOM, 453 PCI_PRODUCT_BROADCOM_BCM5753F, 454 "Broadcom BCM5753F Gigabit Ethernet", 455 }, 456 { PCI_VENDOR_BROADCOM, 457 PCI_PRODUCT_BROADCOM_BCM5753M, 458 "Broadcom BCM5753M Gigabit Ethernet", 459 }, 460 { PCI_VENDOR_BROADCOM, 461 PCI_PRODUCT_BROADCOM_BCM5754, 462 "Broadcom BCM5754 Gigabit Ethernet", 463 }, 464 { PCI_VENDOR_BROADCOM, 465 PCI_PRODUCT_BROADCOM_BCM5754M, 466 "Broadcom BCM5754M Gigabit Ethernet", 467 }, 468 { PCI_VENDOR_BROADCOM, 469 PCI_PRODUCT_BROADCOM_BCM5755, 470 "Broadcom BCM5755 Gigabit Ethernet", 471 }, 472 { PCI_VENDOR_BROADCOM, 473 PCI_PRODUCT_BROADCOM_BCM5755M, 474 "Broadcom BCM5755M Gigabit Ethernet", 475 }, 476 { PCI_VENDOR_BROADCOM, 477 PCI_PRODUCT_BROADCOM_BCM5756, 478 "Broadcom BCM5756 Gigabit Ethernet", 479 }, 480 { PCI_VENDOR_BROADCOM, 481 PCI_PRODUCT_BROADCOM_BCM5761, 482 "Broadcom BCM5761 Gigabit Ethernet", 483 }, 484 { PCI_VENDOR_BROADCOM, 485 PCI_PRODUCT_BROADCOM_BCM5761E, 486 "Broadcom BCM5761E Gigabit Ethernet", 487 }, 488 { PCI_VENDOR_BROADCOM, 489 PCI_PRODUCT_BROADCOM_BCM5761S, 490 "Broadcom BCM5761S Gigabit Ethernet", 491 }, 492 { PCI_VENDOR_BROADCOM, 493 PCI_PRODUCT_BROADCOM_BCM5761SE, 494 "Broadcom BCM5761SE Gigabit Ethernet", 495 }, 496 { PCI_VENDOR_BROADCOM, 497 PCI_PRODUCT_BROADCOM_BCM5764, 498 "Broadcom BCM5764 Gigabit Ethernet", 499 }, 500 { PCI_VENDOR_BROADCOM, 501 PCI_PRODUCT_BROADCOM_BCM5780, 502 "Broadcom BCM5780 Gigabit Ethernet", 503 }, 504 { PCI_VENDOR_BROADCOM, 505 PCI_PRODUCT_BROADCOM_BCM5780S, 506 "Broadcom BCM5780S Gigabit Ethernet", 507 }, 508 { PCI_VENDOR_BROADCOM, 509 PCI_PRODUCT_BROADCOM_BCM5781, 510 "Broadcom BCM5781 Gigabit Ethernet", 511 }, 512 { PCI_VENDOR_BROADCOM, 513 PCI_PRODUCT_BROADCOM_BCM5782, 514 "Broadcom BCM5782 Gigabit Ethernet", 515 }, 516 { PCI_VENDOR_BROADCOM, 517 PCI_PRODUCT_BROADCOM_BCM5784M, 518 "BCM5784M NetLink 1000baseT Ethernet", 519 }, 520 { PCI_VENDOR_BROADCOM, 521 PCI_PRODUCT_BROADCOM_BCM5786, 522 "Broadcom BCM5786 Gigabit Ethernet", 523 }, 524 { PCI_VENDOR_BROADCOM, 525 PCI_PRODUCT_BROADCOM_BCM5787, 526 "Broadcom BCM5787 Gigabit Ethernet", 527 }, 528 { PCI_VENDOR_BROADCOM, 529 PCI_PRODUCT_BROADCOM_BCM5787M, 530 "Broadcom BCM5787M Gigabit Ethernet", 531 }, 532 { PCI_VENDOR_BROADCOM, 533 PCI_PRODUCT_BROADCOM_BCM5788, 534 "Broadcom BCM5788 Gigabit Ethernet", 535 }, 536 { PCI_VENDOR_BROADCOM, 537 PCI_PRODUCT_BROADCOM_BCM5789, 538 "Broadcom BCM5789 Gigabit Ethernet", 539 }, 540 { PCI_VENDOR_BROADCOM, 541 PCI_PRODUCT_BROADCOM_BCM5901, 542 "Broadcom BCM5901 Fast Ethernet", 543 }, 544 { PCI_VENDOR_BROADCOM, 545 PCI_PRODUCT_BROADCOM_BCM5901A2, 546 "Broadcom BCM5901A2 Fast Ethernet", 547 }, 548 { PCI_VENDOR_BROADCOM, 549 PCI_PRODUCT_BROADCOM_BCM5903M, 550 "Broadcom BCM5903M Fast Ethernet", 551 }, 552 { PCI_VENDOR_BROADCOM, 553 PCI_PRODUCT_BROADCOM_BCM5906, 554 "Broadcom BCM5906 Fast Ethernet", 555 }, 556 { PCI_VENDOR_BROADCOM, 557 PCI_PRODUCT_BROADCOM_BCM5906M, 558 "Broadcom BCM5906M Fast Ethernet", 559 }, 560 { PCI_VENDOR_BROADCOM, 561 PCI_PRODUCT_BROADCOM_BCM57760, 562 "Broadcom BCM57760 Fast Ethernet", 563 }, 564 { PCI_VENDOR_BROADCOM, 565 PCI_PRODUCT_BROADCOM_BCM57761, 566 "Broadcom BCM57761 Fast Ethernet", 567 }, 568 { PCI_VENDOR_BROADCOM, 569 PCI_PRODUCT_BROADCOM_BCM57765, 570 "Broadcom BCM57765 Fast Ethernet", 571 }, 572 { PCI_VENDOR_BROADCOM, 573 PCI_PRODUCT_BROADCOM_BCM57780, 574 "Broadcom BCM57780 Fast Ethernet", 575 }, 576 { PCI_VENDOR_BROADCOM, 577 PCI_PRODUCT_BROADCOM_BCM57781, 578 "Broadcom BCM57781 Fast Ethernet", 579 }, 580 { PCI_VENDOR_BROADCOM, 581 PCI_PRODUCT_BROADCOM_BCM57785, 582 "Broadcom BCM57785 Fast Ethernet", 583 }, 584 { PCI_VENDOR_BROADCOM, 585 PCI_PRODUCT_BROADCOM_BCM57788, 586 "Broadcom BCM57788 Fast Ethernet", 587 }, 588 { PCI_VENDOR_BROADCOM, 589 PCI_PRODUCT_BROADCOM_BCM57790, 590 "Broadcom BCM57790 Fast Ethernet", 591 }, 592 { PCI_VENDOR_BROADCOM, 593 PCI_PRODUCT_BROADCOM_BCM57791, 594 "Broadcom BCM57791 Fast Ethernet", 595 }, 596 { PCI_VENDOR_BROADCOM, 597 PCI_PRODUCT_BROADCOM_BCM57795, 598 "Broadcom BCM57795 Fast Ethernet", 599 }, 600 { PCI_VENDOR_SCHNEIDERKOCH, 601 PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1, 602 "SysKonnect SK-9Dx1 Gigabit Ethernet", 603 }, 604 { PCI_VENDOR_3COM, 605 PCI_PRODUCT_3COM_3C996, 606 "3Com 3c996 Gigabit Ethernet", 607 }, 608 { 0, 609 0, 610 NULL }, 611 }; 612 613 /* 614 * XXX: how to handle variants based on 5750 and derivatives: 615 * 5750 5751, 5721, possibly 5714, 5752, and 5708?, which 616 * in general behave like a 5705, except with additional quirks. 617 * This driver's current handling of the 5721 is wrong; 618 * how we map ASIC revision to "quirks" needs more thought. 619 * (defined here until the thought is done). 620 */ 621 #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_5700_FAMILY) 622 #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_5714_FAMILY) 623 #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_5705_PLUS) 624 #define BGE_IS_5750_OR_BEYOND(sc) ((sc)->bge_flags & BGE_5750_PLUS) 625 #define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_5755_PLUS) 626 #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_JUMBO_CAPABLE) 627 628 static const struct bge_revision { 629 uint32_t br_chipid; 630 const char *br_name; 631 } bge_revisions[] = { 632 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" }, 633 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" }, 634 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" }, 635 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" }, 636 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" }, 637 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" }, 638 /* This is treated like a BCM5700 Bx */ 639 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" }, 640 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" }, 641 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" }, 642 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" }, 643 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" }, 644 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" }, 645 { BGE_CHIPID_BCM5703_A0, "BCM5702/5703 A0" }, 646 { BGE_CHIPID_BCM5703_A1, "BCM5702/5703 A1" }, 647 { BGE_CHIPID_BCM5703_A2, "BCM5702/5703 A2" }, 648 { BGE_CHIPID_BCM5703_A3, "BCM5702/5703 A3" }, 649 { BGE_CHIPID_BCM5703_B0, "BCM5702/5703 B0" }, 650 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" }, 651 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" }, 652 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" }, 653 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" }, 654 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" }, 655 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" }, 656 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" }, 657 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" }, 658 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" }, 659 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" }, 660 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" }, 661 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" }, 662 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" }, 663 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" }, 664 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" }, 665 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" }, 666 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" }, 667 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" }, 668 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" }, 669 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" }, 670 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" }, 671 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" }, 672 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" }, 673 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" }, 674 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" }, 675 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" }, 676 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" }, 677 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" }, 678 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" }, 679 { BGE_CHIPID_BCM5755_C0, "BCM5755 C0" }, 680 { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" }, 681 { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" }, 682 { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" }, 683 { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" }, 684 /* 5754 and 5787 share the same ASIC ID */ 685 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" }, 686 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" }, 687 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" }, 688 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" }, 689 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" }, 690 { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" }, 691 { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" }, 692 693 { 0, NULL } 694 }; 695 696 /* 697 * Some defaults for major revisions, so that newer steppings 698 * that we don't know about have a shot at working. 699 */ 700 static const struct bge_revision bge_majorrevs[] = { 701 { BGE_ASICREV_BCM5700, "unknown BCM5700" }, 702 { BGE_ASICREV_BCM5701, "unknown BCM5701" }, 703 { BGE_ASICREV_BCM5703, "unknown BCM5703" }, 704 { BGE_ASICREV_BCM5704, "unknown BCM5704" }, 705 { BGE_ASICREV_BCM5705, "unknown BCM5705" }, 706 { BGE_ASICREV_BCM5750, "unknown BCM5750" }, 707 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" }, 708 { BGE_ASICREV_BCM5752, "unknown BCM5752" }, 709 { BGE_ASICREV_BCM5780, "unknown BCM5780" }, 710 { BGE_ASICREV_BCM5714, "unknown BCM5714" }, 711 { BGE_ASICREV_BCM5755, "unknown BCM5755" }, 712 { BGE_ASICREV_BCM5761, "unknown BCM5761" }, 713 { BGE_ASICREV_BCM5784, "unknown BCM5784" }, 714 { BGE_ASICREV_BCM5785, "unknown BCM5785" }, 715 /* 5754 and 5787 share the same ASIC ID */ 716 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" }, 717 { BGE_ASICREV_BCM5906, "unknown BCM5906" }, 718 { BGE_ASICREV_BCM57780, "unknown BCM57780" }, 719 { BGE_ASICREV_BCM5717, "unknown BCM5717" }, 720 { BGE_ASICREV_BCM57765, "unknown BCM57765" }, 721 722 { 0, NULL } 723 }; 724 725 static int bge_allow_asf = 1; 726 727 CFATTACH_DECL_NEW(bge, sizeof(struct bge_softc), 728 bge_probe, bge_attach, NULL, NULL); 729 730 static uint32_t 731 bge_readmem_ind(struct bge_softc *sc, int off) 732 { 733 pcireg_t val; 734 735 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off); 736 val = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA); 737 return val; 738 } 739 740 static void 741 bge_writemem_ind(struct bge_softc *sc, int off, int val) 742 { 743 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off); 744 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA, val); 745 } 746 747 /* 748 * PCI Express only 749 */ 750 static void 751 bge_set_max_readrq(struct bge_softc *sc) 752 { 753 pcireg_t val; 754 755 val = pci_conf_read(sc->sc_pc, sc->sc_pcitag, sc->bge_pciecap 756 + PCI_PCIE_DCSR); 757 if ((val & PCI_PCIE_DCSR_MAX_READ_REQ) != 758 BGE_PCIE_DEVCTL_MAX_READRQ_4096) { 759 aprint_verbose_dev(sc->bge_dev, 760 "adjust device control 0x%04x ", val); 761 val &= ~PCI_PCIE_DCSR_MAX_READ_REQ; 762 val |= BGE_PCIE_DEVCTL_MAX_READRQ_4096; 763 pci_conf_write(sc->sc_pc, sc->sc_pcitag, sc->bge_pciecap 764 + PCI_PCIE_DCSR, val); 765 aprint_verbose("-> 0x%04x\n", val); 766 } 767 } 768 769 #ifdef notdef 770 static uint32_t 771 bge_readreg_ind(struct bge_softc *sc, int off) 772 { 773 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off); 774 return (pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA)); 775 } 776 #endif 777 778 static void 779 bge_writereg_ind(struct bge_softc *sc, int off, int val) 780 { 781 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off); 782 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA, val); 783 } 784 785 static void 786 bge_writemem_direct(struct bge_softc *sc, int off, int val) 787 { 788 CSR_WRITE_4(sc, off, val); 789 } 790 791 static void 792 bge_writembx(struct bge_softc *sc, int off, int val) 793 { 794 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 795 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI; 796 797 CSR_WRITE_4(sc, off, val); 798 } 799 800 static uint8_t 801 bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) 802 { 803 uint32_t access, byte = 0; 804 int i; 805 806 /* Lock. */ 807 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); 808 for (i = 0; i < 8000; i++) { 809 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1) 810 break; 811 DELAY(20); 812 } 813 if (i == 8000) 814 return 1; 815 816 /* Enable access. */ 817 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS); 818 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE); 819 820 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc); 821 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD); 822 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 823 DELAY(10); 824 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) { 825 DELAY(10); 826 break; 827 } 828 } 829 830 if (i == BGE_TIMEOUT * 10) { 831 aprint_error_dev(sc->bge_dev, "nvram read timed out\n"); 832 return 1; 833 } 834 835 /* Get result. */ 836 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA); 837 838 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF; 839 840 /* Disable access. */ 841 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access); 842 843 /* Unlock. */ 844 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1); 845 CSR_READ_4(sc, BGE_NVRAM_SWARB); 846 847 return 0; 848 } 849 850 /* 851 * Read a sequence of bytes from NVRAM. 852 */ 853 static int 854 bge_read_nvram(struct bge_softc *sc, uint8_t *dest, int off, int cnt) 855 { 856 int err = 0, i; 857 uint8_t byte = 0; 858 859 if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906) 860 return 1; 861 862 for (i = 0; i < cnt; i++) { 863 err = bge_nvram_getbyte(sc, off + i, &byte); 864 if (err) 865 break; 866 *(dest + i) = byte; 867 } 868 869 return (err ? 1 : 0); 870 } 871 872 /* 873 * Read a byte of data stored in the EEPROM at address 'addr.' The 874 * BCM570x supports both the traditional bitbang interface and an 875 * auto access interface for reading the EEPROM. We use the auto 876 * access method. 877 */ 878 static uint8_t 879 bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) 880 { 881 int i; 882 uint32_t byte = 0; 883 884 /* 885 * Enable use of auto EEPROM access so we can avoid 886 * having to use the bitbang method. 887 */ 888 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 889 890 /* Reset the EEPROM, load the clock period. */ 891 CSR_WRITE_4(sc, BGE_EE_ADDR, 892 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 893 DELAY(20); 894 895 /* Issue the read EEPROM command. */ 896 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 897 898 /* Wait for completion */ 899 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 900 DELAY(10); 901 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 902 break; 903 } 904 905 if (i == BGE_TIMEOUT * 10) { 906 aprint_error_dev(sc->bge_dev, "eeprom read timed out\n"); 907 return 1; 908 } 909 910 /* Get result. */ 911 byte = CSR_READ_4(sc, BGE_EE_DATA); 912 913 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 914 915 return 0; 916 } 917 918 /* 919 * Read a sequence of bytes from the EEPROM. 920 */ 921 static int 922 bge_read_eeprom(struct bge_softc *sc, void *destv, int off, int cnt) 923 { 924 int err = 0, i; 925 uint8_t byte = 0; 926 char *dest = destv; 927 928 for (i = 0; i < cnt; i++) { 929 err = bge_eeprom_getbyte(sc, off + i, &byte); 930 if (err) 931 break; 932 *(dest + i) = byte; 933 } 934 935 return (err ? 1 : 0); 936 } 937 938 static int 939 bge_miibus_readreg(device_t dev, int phy, int reg) 940 { 941 struct bge_softc *sc = device_private(dev); 942 uint32_t val; 943 uint32_t autopoll; 944 int i; 945 946 /* 947 * Broadcom's own driver always assumes the internal 948 * PHY is at GMII address 1. On some chips, the PHY responds 949 * to accesses at all addresses, which could cause us to 950 * bogusly attach the PHY 32 times at probe type. Always 951 * restricting the lookup to address 1 is simpler than 952 * trying to figure out which chips revisions should be 953 * special-cased. 954 */ 955 if (phy != 1) 956 return 0; 957 958 /* Reading with autopolling on may trigger PCI errors */ 959 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 960 if (autopoll & BGE_MIMODE_AUTOPOLL) { 961 BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL); 962 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 963 DELAY(40); 964 } 965 966 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY | 967 BGE_MIPHY(phy) | BGE_MIREG(reg)); 968 969 for (i = 0; i < BGE_TIMEOUT; i++) { 970 val = CSR_READ_4(sc, BGE_MI_COMM); 971 if (!(val & BGE_MICOMM_BUSY)) 972 break; 973 delay(10); 974 } 975 976 if (i == BGE_TIMEOUT) { 977 aprint_error_dev(sc->bge_dev, "PHY read timed out\n"); 978 val = 0; 979 goto done; 980 } 981 982 val = CSR_READ_4(sc, BGE_MI_COMM); 983 984 done: 985 if (autopoll & BGE_MIMODE_AUTOPOLL) { 986 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL); 987 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 988 DELAY(40); 989 } 990 991 if (val & BGE_MICOMM_READFAIL) 992 return 0; 993 994 return (val & 0xFFFF); 995 } 996 997 static void 998 bge_miibus_writereg(device_t dev, int phy, int reg, int val) 999 { 1000 struct bge_softc *sc = device_private(dev); 1001 uint32_t autopoll; 1002 int i; 1003 1004 if (phy!=1) { 1005 return; 1006 } 1007 1008 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 && 1009 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL)) { 1010 return; 1011 } 1012 1013 /* Reading with autopolling on may trigger PCI errors */ 1014 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 1015 if (autopoll & BGE_MIMODE_AUTOPOLL) { 1016 delay(40); 1017 BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL); 1018 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 1019 delay(10); /* 40 usec is supposed to be adequate */ 1020 } 1021 1022 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY | 1023 BGE_MIPHY(phy) | BGE_MIREG(reg) | val); 1024 1025 for (i = 0; i < BGE_TIMEOUT; i++) { 1026 delay(10); 1027 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) { 1028 delay(5); 1029 CSR_READ_4(sc, BGE_MI_COMM); 1030 break; 1031 } 1032 } 1033 1034 if (autopoll & BGE_MIMODE_AUTOPOLL) { 1035 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL); 1036 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 1037 delay(40); 1038 } 1039 1040 if (i == BGE_TIMEOUT) 1041 aprint_error_dev(sc->bge_dev, "PHY read timed out\n"); 1042 } 1043 1044 static void 1045 bge_miibus_statchg(device_t dev) 1046 { 1047 struct bge_softc *sc = device_private(dev); 1048 struct mii_data *mii = &sc->bge_mii; 1049 1050 /* 1051 * Get flow control negotiation result. 1052 */ 1053 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 1054 (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags) { 1055 sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 1056 mii->mii_media_active &= ~IFM_ETH_FMASK; 1057 } 1058 1059 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); 1060 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 1061 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) 1062 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); 1063 else 1064 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); 1065 1066 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 1067 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 1068 else 1069 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 1070 1071 /* 1072 * 802.3x flow control 1073 */ 1074 if (sc->bge_flowflags & IFM_ETH_RXPAUSE) 1075 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE); 1076 else 1077 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE); 1078 1079 if (sc->bge_flowflags & IFM_ETH_TXPAUSE) 1080 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE); 1081 else 1082 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE); 1083 } 1084 1085 /* 1086 * Update rx threshold levels to values in a particular slot 1087 * of the interrupt-mitigation table bge_rx_threshes. 1088 */ 1089 static void 1090 bge_set_thresh(struct ifnet *ifp, int lvl) 1091 { 1092 struct bge_softc *sc = ifp->if_softc; 1093 int s; 1094 1095 /* For now, just save the new Rx-intr thresholds and record 1096 * that a threshold update is pending. Updating the hardware 1097 * registers here (even at splhigh()) is observed to 1098 * occasionaly cause glitches where Rx-interrupts are not 1099 * honoured for up to 10 seconds. jonathan@NetBSD.org, 2003-04-05 1100 */ 1101 s = splnet(); 1102 sc->bge_rx_coal_ticks = bge_rx_threshes[lvl].rx_ticks; 1103 sc->bge_rx_max_coal_bds = bge_rx_threshes[lvl].rx_max_bds; 1104 sc->bge_pending_rxintr_change = 1; 1105 splx(s); 1106 1107 return; 1108 } 1109 1110 1111 /* 1112 * Update Rx thresholds of all bge devices 1113 */ 1114 static void 1115 bge_update_all_threshes(int lvl) 1116 { 1117 struct ifnet *ifp; 1118 const char * const namebuf = "bge"; 1119 int namelen; 1120 1121 if (lvl < 0) 1122 lvl = 0; 1123 else if (lvl >= NBGE_RX_THRESH) 1124 lvl = NBGE_RX_THRESH - 1; 1125 1126 namelen = strlen(namebuf); 1127 /* 1128 * Now search all the interfaces for this name/number 1129 */ 1130 IFNET_FOREACH(ifp) { 1131 if (strncmp(ifp->if_xname, namebuf, namelen) != 0) 1132 continue; 1133 /* We got a match: update if doing auto-threshold-tuning */ 1134 if (bge_auto_thresh) 1135 bge_set_thresh(ifp, lvl); 1136 } 1137 } 1138 1139 /* 1140 * Handle events that have triggered interrupts. 1141 */ 1142 static void 1143 bge_handle_events(struct bge_softc *sc) 1144 { 1145 1146 return; 1147 } 1148 1149 /* 1150 * Memory management for jumbo frames. 1151 */ 1152 1153 static int 1154 bge_alloc_jumbo_mem(struct bge_softc *sc) 1155 { 1156 char *ptr, *kva; 1157 bus_dma_segment_t seg; 1158 int i, rseg, state, error; 1159 struct bge_jpool_entry *entry; 1160 1161 state = error = 0; 1162 1163 /* Grab a big chunk o' storage. */ 1164 if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0, 1165 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 1166 aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n"); 1167 return ENOBUFS; 1168 } 1169 1170 state = 1; 1171 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, (void **)&kva, 1172 BUS_DMA_NOWAIT)) { 1173 aprint_error_dev(sc->bge_dev, 1174 "can't map DMA buffers (%d bytes)\n", (int)BGE_JMEM); 1175 error = ENOBUFS; 1176 goto out; 1177 } 1178 1179 state = 2; 1180 if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0, 1181 BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) { 1182 aprint_error_dev(sc->bge_dev, "can't create DMA map\n"); 1183 error = ENOBUFS; 1184 goto out; 1185 } 1186 1187 state = 3; 1188 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map, 1189 kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) { 1190 aprint_error_dev(sc->bge_dev, "can't load DMA map\n"); 1191 error = ENOBUFS; 1192 goto out; 1193 } 1194 1195 state = 4; 1196 sc->bge_cdata.bge_jumbo_buf = (void *)kva; 1197 DPRINTFN(1,("bge_jumbo_buf = %p\n", sc->bge_cdata.bge_jumbo_buf)); 1198 1199 SLIST_INIT(&sc->bge_jfree_listhead); 1200 SLIST_INIT(&sc->bge_jinuse_listhead); 1201 1202 /* 1203 * Now divide it up into 9K pieces and save the addresses 1204 * in an array. 1205 */ 1206 ptr = sc->bge_cdata.bge_jumbo_buf; 1207 for (i = 0; i < BGE_JSLOTS; i++) { 1208 sc->bge_cdata.bge_jslots[i] = ptr; 1209 ptr += BGE_JLEN; 1210 entry = malloc(sizeof(struct bge_jpool_entry), 1211 M_DEVBUF, M_NOWAIT); 1212 if (entry == NULL) { 1213 aprint_error_dev(sc->bge_dev, 1214 "no memory for jumbo buffer queue!\n"); 1215 error = ENOBUFS; 1216 goto out; 1217 } 1218 entry->slot = i; 1219 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, 1220 entry, jpool_entries); 1221 } 1222 out: 1223 if (error != 0) { 1224 switch (state) { 1225 case 4: 1226 bus_dmamap_unload(sc->bge_dmatag, 1227 sc->bge_cdata.bge_rx_jumbo_map); 1228 case 3: 1229 bus_dmamap_destroy(sc->bge_dmatag, 1230 sc->bge_cdata.bge_rx_jumbo_map); 1231 case 2: 1232 bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM); 1233 case 1: 1234 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 1235 break; 1236 default: 1237 break; 1238 } 1239 } 1240 1241 return error; 1242 } 1243 1244 /* 1245 * Allocate a jumbo buffer. 1246 */ 1247 static void * 1248 bge_jalloc(struct bge_softc *sc) 1249 { 1250 struct bge_jpool_entry *entry; 1251 1252 entry = SLIST_FIRST(&sc->bge_jfree_listhead); 1253 1254 if (entry == NULL) { 1255 aprint_error_dev(sc->bge_dev, "no free jumbo buffers\n"); 1256 return NULL; 1257 } 1258 1259 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries); 1260 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries); 1261 return (sc->bge_cdata.bge_jslots[entry->slot]); 1262 } 1263 1264 /* 1265 * Release a jumbo buffer. 1266 */ 1267 static void 1268 bge_jfree(struct mbuf *m, void *buf, size_t size, void *arg) 1269 { 1270 struct bge_jpool_entry *entry; 1271 struct bge_softc *sc; 1272 int i, s; 1273 1274 /* Extract the softc struct pointer. */ 1275 sc = (struct bge_softc *)arg; 1276 1277 if (sc == NULL) 1278 panic("bge_jfree: can't find softc pointer!"); 1279 1280 /* calculate the slot this buffer belongs to */ 1281 1282 i = ((char *)buf 1283 - (char *)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN; 1284 1285 if ((i < 0) || (i >= BGE_JSLOTS)) 1286 panic("bge_jfree: asked to free buffer that we don't manage!"); 1287 1288 s = splvm(); 1289 entry = SLIST_FIRST(&sc->bge_jinuse_listhead); 1290 if (entry == NULL) 1291 panic("bge_jfree: buffer not in use!"); 1292 entry->slot = i; 1293 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries); 1294 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries); 1295 1296 if (__predict_true(m != NULL)) 1297 pool_cache_put(mb_cache, m); 1298 splx(s); 1299 } 1300 1301 1302 /* 1303 * Initialize a standard receive ring descriptor. 1304 */ 1305 static int 1306 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m, 1307 bus_dmamap_t dmamap) 1308 { 1309 struct mbuf *m_new = NULL; 1310 struct bge_rx_bd *r; 1311 int error; 1312 1313 if (dmamap == NULL) { 1314 error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1, 1315 MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap); 1316 if (error != 0) 1317 return error; 1318 } 1319 1320 sc->bge_cdata.bge_rx_std_map[i] = dmamap; 1321 1322 if (m == NULL) { 1323 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1324 if (m_new == NULL) 1325 return ENOBUFS; 1326 1327 MCLGET(m_new, M_DONTWAIT); 1328 if (!(m_new->m_flags & M_EXT)) { 1329 m_freem(m_new); 1330 return ENOBUFS; 1331 } 1332 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1333 1334 } else { 1335 m_new = m; 1336 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1337 m_new->m_data = m_new->m_ext.ext_buf; 1338 } 1339 if (!(sc->bge_flags & BGE_RX_ALIGNBUG)) 1340 m_adj(m_new, ETHER_ALIGN); 1341 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new, 1342 BUS_DMA_READ|BUS_DMA_NOWAIT)) 1343 return ENOBUFS; 1344 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize, 1345 BUS_DMASYNC_PREREAD); 1346 1347 sc->bge_cdata.bge_rx_std_chain[i] = m_new; 1348 r = &sc->bge_rdata->bge_rx_std_ring[i]; 1349 BGE_HOSTADDR(r->bge_addr, dmamap->dm_segs[0].ds_addr); 1350 r->bge_flags = BGE_RXBDFLAG_END; 1351 r->bge_len = m_new->m_len; 1352 r->bge_idx = i; 1353 1354 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 1355 offsetof(struct bge_ring_data, bge_rx_std_ring) + 1356 i * sizeof (struct bge_rx_bd), 1357 sizeof (struct bge_rx_bd), 1358 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1359 1360 return 0; 1361 } 1362 1363 /* 1364 * Initialize a jumbo receive ring descriptor. This allocates 1365 * a jumbo buffer from the pool managed internally by the driver. 1366 */ 1367 static int 1368 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m) 1369 { 1370 struct mbuf *m_new = NULL; 1371 struct bge_rx_bd *r; 1372 void *buf = NULL; 1373 1374 if (m == NULL) { 1375 1376 /* Allocate the mbuf. */ 1377 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1378 if (m_new == NULL) 1379 return ENOBUFS; 1380 1381 /* Allocate the jumbo buffer */ 1382 buf = bge_jalloc(sc); 1383 if (buf == NULL) { 1384 m_freem(m_new); 1385 aprint_error_dev(sc->bge_dev, 1386 "jumbo allocation failed -- packet dropped!\n"); 1387 return ENOBUFS; 1388 } 1389 1390 /* Attach the buffer to the mbuf. */ 1391 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN; 1392 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF, 1393 bge_jfree, sc); 1394 m_new->m_flags |= M_EXT_RW; 1395 } else { 1396 m_new = m; 1397 buf = m_new->m_data = m_new->m_ext.ext_buf; 1398 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN; 1399 } 1400 if (!(sc->bge_flags & BGE_RX_ALIGNBUG)) 1401 m_adj(m_new, ETHER_ALIGN); 1402 bus_dmamap_sync(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map, 1403 mtod(m_new, char *) - (char *)sc->bge_cdata.bge_jumbo_buf, BGE_JLEN, 1404 BUS_DMASYNC_PREREAD); 1405 /* Set up the descriptor. */ 1406 r = &sc->bge_rdata->bge_rx_jumbo_ring[i]; 1407 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; 1408 BGE_HOSTADDR(r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new)); 1409 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING; 1410 r->bge_len = m_new->m_len; 1411 r->bge_idx = i; 1412 1413 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 1414 offsetof(struct bge_ring_data, bge_rx_jumbo_ring) + 1415 i * sizeof (struct bge_rx_bd), 1416 sizeof (struct bge_rx_bd), 1417 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1418 1419 return 0; 1420 } 1421 1422 /* 1423 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 1424 * that's 1MB or memory, which is a lot. For now, we fill only the first 1425 * 256 ring entries and hope that our CPU is fast enough to keep up with 1426 * the NIC. 1427 */ 1428 static int 1429 bge_init_rx_ring_std(struct bge_softc *sc) 1430 { 1431 int i; 1432 1433 if (sc->bge_flags & BGE_RXRING_VALID) 1434 return 0; 1435 1436 for (i = 0; i < BGE_SSLOTS; i++) { 1437 if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS) 1438 return ENOBUFS; 1439 } 1440 1441 sc->bge_std = i - 1; 1442 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 1443 1444 sc->bge_flags |= BGE_RXRING_VALID; 1445 1446 return 0; 1447 } 1448 1449 static void 1450 bge_free_rx_ring_std(struct bge_softc *sc) 1451 { 1452 int i; 1453 1454 if (!(sc->bge_flags & BGE_RXRING_VALID)) 1455 return; 1456 1457 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1458 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 1459 m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 1460 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 1461 bus_dmamap_destroy(sc->bge_dmatag, 1462 sc->bge_cdata.bge_rx_std_map[i]); 1463 } 1464 memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0, 1465 sizeof(struct bge_rx_bd)); 1466 } 1467 1468 sc->bge_flags &= ~BGE_RXRING_VALID; 1469 } 1470 1471 static int 1472 bge_init_rx_ring_jumbo(struct bge_softc *sc) 1473 { 1474 int i; 1475 volatile struct bge_rcb *rcb; 1476 1477 if (sc->bge_flags & BGE_JUMBO_RXRING_VALID) 1478 return 0; 1479 1480 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1481 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 1482 return ENOBUFS; 1483 }; 1484 1485 sc->bge_jumbo = i - 1; 1486 sc->bge_flags |= BGE_JUMBO_RXRING_VALID; 1487 1488 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 1489 rcb->bge_maxlen_flags = 0; 1490 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1491 1492 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 1493 1494 return 0; 1495 } 1496 1497 static void 1498 bge_free_rx_ring_jumbo(struct bge_softc *sc) 1499 { 1500 int i; 1501 1502 if (!(sc->bge_flags & BGE_JUMBO_RXRING_VALID)) 1503 return; 1504 1505 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1506 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 1507 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 1508 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 1509 } 1510 memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0, 1511 sizeof(struct bge_rx_bd)); 1512 } 1513 1514 sc->bge_flags &= ~BGE_JUMBO_RXRING_VALID; 1515 } 1516 1517 static void 1518 bge_free_tx_ring(struct bge_softc *sc) 1519 { 1520 int i, freed; 1521 struct txdmamap_pool_entry *dma; 1522 1523 if (!(sc->bge_flags & BGE_TXRING_VALID)) 1524 return; 1525 1526 freed = 0; 1527 1528 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1529 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 1530 freed++; 1531 m_freem(sc->bge_cdata.bge_tx_chain[i]); 1532 sc->bge_cdata.bge_tx_chain[i] = NULL; 1533 SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i], 1534 link); 1535 sc->txdma[i] = 0; 1536 } 1537 memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0, 1538 sizeof(struct bge_tx_bd)); 1539 } 1540 1541 while ((dma = SLIST_FIRST(&sc->txdma_list))) { 1542 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 1543 bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap); 1544 free(dma, M_DEVBUF); 1545 } 1546 1547 sc->bge_flags &= ~BGE_TXRING_VALID; 1548 } 1549 1550 static int 1551 bge_init_tx_ring(struct bge_softc *sc) 1552 { 1553 int i; 1554 bus_dmamap_t dmamap; 1555 struct txdmamap_pool_entry *dma; 1556 1557 if (sc->bge_flags & BGE_TXRING_VALID) 1558 return 0; 1559 1560 sc->bge_txcnt = 0; 1561 sc->bge_tx_saved_considx = 0; 1562 1563 /* Initialize transmit producer index for host-memory send ring. */ 1564 sc->bge_tx_prodidx = 0; 1565 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1566 /* 5700 b2 errata */ 1567 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) 1568 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1569 1570 /* NIC-memory send ring not used; initialize to zero. */ 1571 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1572 /* 5700 b2 errata */ 1573 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) 1574 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1575 1576 SLIST_INIT(&sc->txdma_list); 1577 for (i = 0; i < BGE_RSLOTS; i++) { 1578 if (bus_dmamap_create(sc->bge_dmatag, BGE_TXDMA_MAX, 1579 BGE_NTXSEG, ETHER_MAX_LEN_JUMBO, 0, BUS_DMA_NOWAIT, 1580 &dmamap)) 1581 return ENOBUFS; 1582 if (dmamap == NULL) 1583 panic("dmamap NULL in bge_init_tx_ring"); 1584 dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT); 1585 if (dma == NULL) { 1586 aprint_error_dev(sc->bge_dev, 1587 "can't alloc txdmamap_pool_entry\n"); 1588 bus_dmamap_destroy(sc->bge_dmatag, dmamap); 1589 return ENOMEM; 1590 } 1591 dma->dmamap = dmamap; 1592 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 1593 } 1594 1595 sc->bge_flags |= BGE_TXRING_VALID; 1596 1597 return 0; 1598 } 1599 1600 static void 1601 bge_setmulti(struct bge_softc *sc) 1602 { 1603 struct ethercom *ac = &sc->ethercom; 1604 struct ifnet *ifp = &ac->ec_if; 1605 struct ether_multi *enm; 1606 struct ether_multistep step; 1607 uint32_t hashes[4] = { 0, 0, 0, 0 }; 1608 uint32_t h; 1609 int i; 1610 1611 if (ifp->if_flags & IFF_PROMISC) 1612 goto allmulti; 1613 1614 /* Now program new ones. */ 1615 ETHER_FIRST_MULTI(step, ac, enm); 1616 while (enm != NULL) { 1617 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1618 /* 1619 * We must listen to a range of multicast addresses. 1620 * For now, just accept all multicasts, rather than 1621 * trying to set only those filter bits needed to match 1622 * the range. (At this time, the only use of address 1623 * ranges is for IP multicast routing, for which the 1624 * range is big enough to require all bits set.) 1625 */ 1626 goto allmulti; 1627 } 1628 1629 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 1630 1631 /* Just want the 7 least-significant bits. */ 1632 h &= 0x7f; 1633 1634 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 1635 ETHER_NEXT_MULTI(step, enm); 1636 } 1637 1638 ifp->if_flags &= ~IFF_ALLMULTI; 1639 goto setit; 1640 1641 allmulti: 1642 ifp->if_flags |= IFF_ALLMULTI; 1643 hashes[0] = hashes[1] = hashes[2] = hashes[3] = 0xffffffff; 1644 1645 setit: 1646 for (i = 0; i < 4; i++) 1647 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 1648 } 1649 1650 static void 1651 bge_sig_pre_reset(struct bge_softc *sc, int type) 1652 { 1653 /* 1654 * Some chips don't like this so only do this if ASF is enabled 1655 */ 1656 if (sc->bge_asf_mode) 1657 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 1658 1659 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 1660 switch (type) { 1661 case BGE_RESET_START: 1662 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */ 1663 break; 1664 case BGE_RESET_STOP: 1665 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */ 1666 break; 1667 } 1668 } 1669 } 1670 1671 static void 1672 bge_sig_post_reset(struct bge_softc *sc, int type) 1673 { 1674 1675 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 1676 switch (type) { 1677 case BGE_RESET_START: 1678 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001); 1679 /* START DONE */ 1680 break; 1681 case BGE_RESET_STOP: 1682 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002); 1683 break; 1684 } 1685 } 1686 } 1687 1688 static void 1689 bge_sig_legacy(struct bge_softc *sc, int type) 1690 { 1691 1692 if (sc->bge_asf_mode) { 1693 switch (type) { 1694 case BGE_RESET_START: 1695 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */ 1696 break; 1697 case BGE_RESET_STOP: 1698 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */ 1699 break; 1700 } 1701 } 1702 } 1703 1704 static void 1705 bge_stop_fw(struct bge_softc *sc) 1706 { 1707 int i; 1708 1709 if (sc->bge_asf_mode) { 1710 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_PAUSE); 1711 CSR_WRITE_4(sc, BGE_CPU_EVENT, 1712 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14)); 1713 1714 for (i = 0; i < 100; i++) { 1715 if (!(CSR_READ_4(sc, BGE_CPU_EVENT) & (1 << 14))) 1716 break; 1717 DELAY(10); 1718 } 1719 } 1720 } 1721 1722 static int 1723 bge_poll_fw(struct bge_softc *sc) 1724 { 1725 uint32_t val; 1726 int i; 1727 1728 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 1729 for (i = 0; i < BGE_TIMEOUT; i++) { 1730 val = CSR_READ_4(sc, BGE_VCPU_STATUS); 1731 if (val & BGE_VCPU_STATUS_INIT_DONE) 1732 break; 1733 DELAY(100); 1734 } 1735 if (i >= BGE_TIMEOUT) { 1736 aprint_error_dev(sc->bge_dev, "reset timed out\n"); 1737 return -1; 1738 } 1739 } else if ((sc->bge_flags & BGE_NO_EEPROM) == 0) { 1740 /* 1741 * Poll the value location we just wrote until 1742 * we see the 1's complement of the magic number. 1743 * This indicates that the firmware initialization 1744 * is complete. 1745 * XXX 1000ms for Flash and 10000ms for SEEPROM. 1746 */ 1747 for (i = 0; i < BGE_TIMEOUT; i++) { 1748 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 1749 if (val == ~BGE_MAGIC_NUMBER) 1750 break; 1751 DELAY(10); 1752 } 1753 1754 if (i >= BGE_TIMEOUT) { 1755 aprint_error_dev(sc->bge_dev, 1756 "firmware handshake timed out, val = %x\n", val); 1757 return -1; 1758 } 1759 } 1760 1761 return 0; 1762 } 1763 1764 /* 1765 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1766 * self-test results. 1767 */ 1768 static int 1769 bge_chipinit(struct bge_softc *sc) 1770 { 1771 int i; 1772 uint32_t dma_rw_ctl; 1773 1774 /* Set endianness before we access any non-PCI registers. */ 1775 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL, 1776 BGE_INIT); 1777 1778 /* Set power state to D0. */ 1779 bge_setpowerstate(sc, 0); 1780 1781 /* Clear the MAC control register */ 1782 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1783 1784 /* 1785 * Clear the MAC statistics block in the NIC's 1786 * internal memory. 1787 */ 1788 for (i = BGE_STATS_BLOCK; 1789 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t)) 1790 BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0); 1791 1792 for (i = BGE_STATUS_BLOCK; 1793 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t)) 1794 BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0); 1795 1796 /* Set up the PCI DMA control register. */ 1797 dma_rw_ctl = BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD; 1798 if (sc->bge_flags & BGE_PCIE) { 1799 /* Read watermark not used, 128 bytes for write. */ 1800 DPRINTFN(4, ("(%s: PCI-Express DMA setting)\n", 1801 device_xname(sc->bge_dev))); 1802 dma_rw_ctl |= (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1803 } else if (sc->bge_flags & BGE_PCIX) { 1804 DPRINTFN(4, ("(:%s: PCI-X DMA setting)\n", 1805 device_xname(sc->bge_dev))); 1806 /* PCI-X bus */ 1807 if (BGE_IS_5714_FAMILY(sc)) { 1808 /* 256 bytes for read and write. */ 1809 dma_rw_ctl |= (0x02 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1810 (0x02 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1811 1812 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780) 1813 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL; 1814 else 1815 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL; 1816 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 1817 /* 1536 bytes for read, 384 bytes for write. */ 1818 dma_rw_ctl |= 1819 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1820 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1821 } else { 1822 /* 384 bytes for read and write. */ 1823 dma_rw_ctl |= (0x03 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1824 (0x03 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1825 (0x0F); 1826 } 1827 1828 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 || 1829 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 1830 uint32_t tmp; 1831 1832 /* Set ONEDMA_ATONCE for hardware workaround. */ 1833 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f; 1834 if (tmp == 6 || tmp == 7) 1835 dma_rw_ctl |= 1836 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL; 1837 1838 /* Set PCI-X DMA write workaround. */ 1839 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE; 1840 } 1841 } else { 1842 /* Conventional PCI bus: 256 bytes for read and write. */ 1843 DPRINTFN(4, ("(%s: PCI 2.2 DMA setting)\n", 1844 device_xname(sc->bge_dev))); 1845 dma_rw_ctl |= (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1846 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1847 if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5705 && 1848 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5750) 1849 dma_rw_ctl |= 0x0F; 1850 } 1851 1852 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 1853 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) 1854 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM | 1855 BGE_PCIDMARWCTL_ASRT_ALL_BE; 1856 1857 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 || 1858 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) 1859 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA; 1860 1861 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL, 1862 dma_rw_ctl); 1863 1864 /* 1865 * Set up general mode register. 1866 */ 1867 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS | 1868 BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS | 1869 BGE_MODECTL_TX_NO_PHDR_CSUM | BGE_MODECTL_RX_NO_PHDR_CSUM); 1870 1871 /* 1872 * BCM5701 B5 have a bug causing data corruption when using 1873 * 64-bit DMA reads, which can be terminated early and then 1874 * completed later as 32-bit accesses, in combination with 1875 * certain bridges. 1876 */ 1877 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 && 1878 sc->bge_chipid == BGE_CHIPID_BCM5701_B5) 1879 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32); 1880 1881 /* 1882 * Tell the firmware the driver is running 1883 */ 1884 if (sc->bge_asf_mode & ASF_STACKUP) 1885 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 1886 1887 /* 1888 * Disable memory write invalidate. Apparently it is not supported 1889 * properly by these devices. 1890 */ 1891 PCI_CLRBIT(sc->sc_pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, 1892 PCI_COMMAND_INVALIDATE_ENABLE); 1893 1894 #ifdef __brokenalpha__ 1895 /* 1896 * Must insure that we do not cross an 8K (bytes) boundary 1897 * for DMA reads. Our highest limit is 1K bytes. This is a 1898 * restriction on some ALPHA platforms with early revision 1899 * 21174 PCI chipsets, such as the AlphaPC 164lx 1900 */ 1901 PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4); 1902 #endif 1903 1904 /* Set the timer prescaler (always 66MHz) */ 1905 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); 1906 1907 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 1908 DELAY(40); /* XXX */ 1909 1910 /* Put PHY into ready state */ 1911 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ); 1912 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */ 1913 DELAY(40); 1914 } 1915 1916 return 0; 1917 } 1918 1919 static int 1920 bge_blockinit(struct bge_softc *sc) 1921 { 1922 volatile struct bge_rcb *rcb; 1923 bus_size_t rcb_addr; 1924 int i; 1925 struct ifnet *ifp = &sc->ethercom.ec_if; 1926 bge_hostaddr taddr; 1927 uint32_t val; 1928 1929 /* 1930 * Initialize the memory window pointer register so that 1931 * we can access the first 32K of internal NIC RAM. This will 1932 * allow us to set up the TX send ring RCBs and the RX return 1933 * ring RCBs, plus other things which live in NIC memory. 1934 */ 1935 1936 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0); 1937 1938 /* Step 33: Configure mbuf memory pool */ 1939 if (BGE_IS_5700_FAMILY(sc)) { 1940 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1941 BGE_BUFFPOOL_1); 1942 1943 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) 1944 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1945 else 1946 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1947 1948 /* Configure DMA resource pool */ 1949 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, 1950 BGE_DMA_DESCRIPTORS); 1951 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 1952 } 1953 1954 /* Step 35: Configure mbuf pool watermarks */ 1955 #ifdef ORIG_WPAUL_VALUES 1956 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 24); 1957 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 24); 1958 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 48); 1959 #else 1960 1961 /* new broadcom docs strongly recommend these: */ 1962 if (!BGE_IS_5705_PLUS(sc)) { 1963 if (ifp->if_mtu > ETHER_MAX_LEN) { 1964 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 1965 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 1966 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1967 } else { 1968 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 304); 1969 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 152); 1970 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 380); 1971 } 1972 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 1973 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1974 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04); 1975 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10); 1976 } else { 1977 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1978 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 1979 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1980 } 1981 #endif 1982 1983 /* Step 36: Configure DMA resource watermarks */ 1984 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1985 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1986 1987 /* Step 38: Enable buffer manager */ 1988 CSR_WRITE_4(sc, BGE_BMAN_MODE, 1989 BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN); 1990 1991 /* Step 39: Poll for buffer manager start indication */ 1992 for (i = 0; i < BGE_TIMEOUT * 2; i++) { 1993 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1994 break; 1995 DELAY(10); 1996 } 1997 1998 if (i == BGE_TIMEOUT * 2) { 1999 aprint_error_dev(sc->bge_dev, 2000 "buffer manager failed to start\n"); 2001 return ENXIO; 2002 } 2003 2004 /* Step 40: Enable flow-through queues */ 2005 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 2006 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 2007 2008 /* Wait until queue initialization is complete */ 2009 for (i = 0; i < BGE_TIMEOUT * 2; i++) { 2010 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 2011 break; 2012 DELAY(10); 2013 } 2014 2015 if (i == BGE_TIMEOUT * 2) { 2016 aprint_error_dev(sc->bge_dev, 2017 "flow-through queue init failed\n"); 2018 return ENXIO; 2019 } 2020 2021 /* Step 41: Initialize the standard RX ring control block */ 2022 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb; 2023 BGE_HOSTADDR(rcb->bge_hostaddr, BGE_RING_DMA_ADDR(sc, bge_rx_std_ring)); 2024 if (BGE_IS_5705_PLUS(sc)) 2025 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 2026 else 2027 rcb->bge_maxlen_flags = 2028 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); 2029 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 2030 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 2031 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 2032 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 2033 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 2034 2035 /* 2036 * Step 42: Initialize the jumbo RX ring control block 2037 * We set the 'ring disabled' bit in the flags 2038 * field until we're actually ready to start 2039 * using this ring (i.e. once we set the MTU 2040 * high enough to require it). 2041 */ 2042 if (BGE_IS_JUMBO_CAPABLE(sc)) { 2043 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 2044 BGE_HOSTADDR(rcb->bge_hostaddr, 2045 BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring)); 2046 rcb->bge_maxlen_flags = 2047 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 2048 BGE_RCB_FLAG_RING_DISABLED); 2049 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 2050 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 2051 rcb->bge_hostaddr.bge_addr_hi); 2052 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 2053 rcb->bge_hostaddr.bge_addr_lo); 2054 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 2055 rcb->bge_maxlen_flags); 2056 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 2057 2058 /* Set up dummy disabled mini ring RCB */ 2059 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb; 2060 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 2061 BGE_RCB_FLAG_RING_DISABLED); 2062 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, 2063 rcb->bge_maxlen_flags); 2064 2065 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2066 offsetof(struct bge_ring_data, bge_info), 2067 sizeof (struct bge_gib), 2068 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 2069 } 2070 2071 /* 2072 * Set the BD ring replenish thresholds. The recommended 2073 * values are 1/8th the number of descriptors allocated to 2074 * each ring. 2075 */ 2076 i = BGE_STD_RX_RING_CNT / 8; 2077 2078 /* 2079 * Use a value of 8 for the following chips to workaround HW errata. 2080 * Some of these chips have been added based on empirical 2081 * evidence (they don't work unless this is done). 2082 */ 2083 if (BGE_IS_5705_PLUS(sc)) 2084 i = 8; 2085 2086 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, i); 2087 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT / 8); 2088 2089 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || 2090 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57765) { 2091 CSR_WRITE_4(sc, BGE_STD_REPL_LWM, 4); 2092 CSR_WRITE_4(sc, BGE_JUMBO_REPL_LWM, 4); 2093 } 2094 2095 /* 2096 * Disable all unused send rings by setting the 'ring disabled' 2097 * bit in the flags field of all the TX send ring control blocks. 2098 * These are located in NIC memory. 2099 */ 2100 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 2101 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { 2102 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2103 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); 2104 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 2105 rcb_addr += sizeof(struct bge_rcb); 2106 } 2107 2108 /* Configure TX RCB 0 (we use only the first ring) */ 2109 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 2110 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring)); 2111 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 2112 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 2113 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 2114 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 2115 if (BGE_IS_5700_FAMILY(sc)) 2116 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2117 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 2118 2119 /* Disable all unused RX return rings */ 2120 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 2121 for (i = 0; i < BGE_RX_RINGS_MAX; i++) { 2122 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0); 2123 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0); 2124 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2125 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 2126 BGE_RCB_FLAG_RING_DISABLED)); 2127 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 2128 bge_writembx(sc, BGE_MBX_RX_CONS0_LO + 2129 (i * (sizeof(uint64_t))), 0); 2130 rcb_addr += sizeof(struct bge_rcb); 2131 } 2132 2133 /* Initialize RX ring indexes */ 2134 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0); 2135 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 2136 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 2137 2138 /* 2139 * Set up RX return ring 0 2140 * Note that the NIC address for RX return rings is 0x00000000. 2141 * The return rings live entirely within the host, so the 2142 * nicaddr field in the RCB isn't used. 2143 */ 2144 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 2145 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring)); 2146 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 2147 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 2148 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000); 2149 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2150 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); 2151 2152 /* Set random backoff seed for TX */ 2153 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 2154 CLLADDR(ifp->if_sadl)[0] + CLLADDR(ifp->if_sadl)[1] + 2155 CLLADDR(ifp->if_sadl)[2] + CLLADDR(ifp->if_sadl)[3] + 2156 CLLADDR(ifp->if_sadl)[4] + CLLADDR(ifp->if_sadl)[5] + 2157 BGE_TX_BACKOFF_SEED_MASK); 2158 2159 /* Set inter-packet gap */ 2160 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); 2161 2162 /* 2163 * Specify which ring to use for packets that don't match 2164 * any RX rules. 2165 */ 2166 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 2167 2168 /* 2169 * Configure number of RX lists. One interrupt distribution 2170 * list, sixteen active lists, one bad frames class. 2171 */ 2172 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 2173 2174 /* Inialize RX list placement stats mask. */ 2175 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 2176 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 2177 2178 /* Disable host coalescing until we get it set up */ 2179 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 2180 2181 /* Poll to make sure it's shut down. */ 2182 for (i = 0; i < BGE_TIMEOUT * 2; i++) { 2183 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 2184 break; 2185 DELAY(10); 2186 } 2187 2188 if (i == BGE_TIMEOUT * 2) { 2189 aprint_error_dev(sc->bge_dev, 2190 "host coalescing engine failed to idle\n"); 2191 return ENXIO; 2192 } 2193 2194 /* Set up host coalescing defaults */ 2195 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 2196 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 2197 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 2198 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 2199 if (BGE_IS_5700_FAMILY(sc)) { 2200 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 2201 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 2202 } 2203 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 2204 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 2205 2206 /* Set up address of statistics block */ 2207 if (BGE_IS_5700_FAMILY(sc)) { 2208 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_info.bge_stats)); 2209 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 2210 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 2211 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi); 2212 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo); 2213 } 2214 2215 /* Set up address of status block */ 2216 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_status_block)); 2217 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 2218 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi); 2219 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo); 2220 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0; 2221 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0; 2222 2223 /* Turn on host coalescing state machine */ 2224 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 2225 2226 /* Turn on RX BD completion state machine and enable attentions */ 2227 CSR_WRITE_4(sc, BGE_RBDC_MODE, 2228 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN); 2229 2230 /* Turn on RX list placement state machine */ 2231 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 2232 2233 /* Turn on RX list selector state machine. */ 2234 if (BGE_IS_5700_FAMILY(sc)) 2235 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 2236 2237 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB | 2238 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR | 2239 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB | 2240 BGE_MACMODE_FRMHDR_DMA_ENB; 2241 2242 if (sc->bge_flags & BGE_PHY_FIBER_TBI) 2243 val |= BGE_PORTMODE_TBI; 2244 else if (sc->bge_flags & BGE_PHY_FIBER_MII) 2245 val |= BGE_PORTMODE_GMII; 2246 else 2247 val |= BGE_PORTMODE_MII; 2248 2249 /* Turn on DMA, clear stats */ 2250 CSR_WRITE_4(sc, BGE_MAC_MODE, val); 2251 2252 /* Set misc. local control, enable interrupts on attentions */ 2253 sc->bge_local_ctrl_reg = BGE_MLC_INTR_ONATTN | BGE_MLC_AUTO_EEPROM; 2254 2255 #ifdef notdef 2256 /* Assert GPIO pins for PHY reset */ 2257 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 2258 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 2259 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 2260 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 2261 #endif 2262 2263 #if defined(not_quite_yet) 2264 /* Linux driver enables enable gpio pin #1 on 5700s */ 2265 if (sc->bge_chipid == BGE_CHIPID_BCM5700) { 2266 sc->bge_local_ctrl_reg |= 2267 (BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUTEN1); 2268 } 2269 #endif 2270 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg); 2271 2272 /* Turn on DMA completion state machine */ 2273 if (BGE_IS_5700_FAMILY(sc)) 2274 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 2275 2276 /* Turn on write DMA state machine */ 2277 { 2278 uint32_t bge_wdma_mode = 2279 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS; 2280 2281 /* Enable host coalescing bug fix; see Linux tg3.c */ 2282 if (BGE_IS_5755_PLUS(sc)) 2283 bge_wdma_mode |= BGE_WDMAMODE_STATUS_TAG_FIX; 2284 2285 CSR_WRITE_4(sc, BGE_WDMA_MODE, bge_wdma_mode); 2286 } 2287 2288 /* Turn on read DMA state machine */ 2289 { 2290 uint32_t dma_read_modebits; 2291 2292 dma_read_modebits = 2293 BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS; 2294 2295 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || 2296 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 || 2297 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780) 2298 dma_read_modebits |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN | 2299 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN | 2300 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN; 2301 2302 if (sc->bge_flags & BGE_PCIE) 2303 dma_read_modebits |= BGE_RDMA_MODE_FIFO_LONG_BURST; 2304 if (sc->bge_flags & BGE_TSO) 2305 dma_read_modebits |= BGE_RDMAMODE_TSO4_ENABLE; 2306 CSR_WRITE_4(sc, BGE_RDMA_MODE, dma_read_modebits); 2307 delay(40); 2308 } 2309 2310 /* Turn on RX data completion state machine */ 2311 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 2312 2313 /* Turn on RX BD initiator state machine */ 2314 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 2315 2316 /* Turn on RX data and RX BD initiator state machine */ 2317 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 2318 2319 /* Turn on Mbuf cluster free state machine */ 2320 if (BGE_IS_5700_FAMILY(sc)) 2321 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 2322 2323 /* Turn on send BD completion state machine */ 2324 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 2325 2326 /* Turn on send data completion state machine */ 2327 val = BGE_SDCMODE_ENABLE; 2328 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) 2329 val |= BGE_SDCMODE_CDELAY; 2330 CSR_WRITE_4(sc, BGE_SDC_MODE, val); 2331 2332 /* Turn on send data initiator state machine */ 2333 if (sc->bge_flags & BGE_TSO) { 2334 /* XXX: magic value from Linux driver */ 2335 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | 0x08); 2336 } else 2337 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 2338 2339 /* Turn on send BD initiator state machine */ 2340 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 2341 2342 /* Turn on send BD selector state machine */ 2343 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 2344 2345 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 2346 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 2347 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER); 2348 2349 /* ack/clear link change events */ 2350 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 2351 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 2352 BGE_MACSTAT_LINK_CHANGED); 2353 CSR_WRITE_4(sc, BGE_MI_STS, 0); 2354 2355 /* Enable PHY auto polling (for MII/GMII only) */ 2356 if (sc->bge_flags & BGE_PHY_FIBER_TBI) { 2357 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 2358 } else { 2359 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL); 2360 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL | (10 << 16)); 2361 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) 2362 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 2363 BGE_EVTENB_MI_INTERRUPT); 2364 } 2365 2366 /* 2367 * Clear any pending link state attention. 2368 * Otherwise some link state change events may be lost until attention 2369 * is cleared by bge_intr() -> bge_link_upd() sequence. 2370 * It's not necessary on newer BCM chips - perhaps enabling link 2371 * state change attentions implies clearing pending attention. 2372 */ 2373 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 2374 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 2375 BGE_MACSTAT_LINK_CHANGED); 2376 2377 /* Enable link state change attentions. */ 2378 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 2379 2380 return 0; 2381 } 2382 2383 static const struct bge_revision * 2384 bge_lookup_rev(uint32_t chipid) 2385 { 2386 const struct bge_revision *br; 2387 2388 for (br = bge_revisions; br->br_name != NULL; br++) { 2389 if (br->br_chipid == chipid) 2390 return br; 2391 } 2392 2393 for (br = bge_majorrevs; br->br_name != NULL; br++) { 2394 if (br->br_chipid == BGE_ASICREV(chipid)) 2395 return br; 2396 } 2397 2398 return NULL; 2399 } 2400 2401 static const struct bge_product * 2402 bge_lookup(const struct pci_attach_args *pa) 2403 { 2404 const struct bge_product *bp; 2405 2406 for (bp = bge_products; bp->bp_name != NULL; bp++) { 2407 if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor && 2408 PCI_PRODUCT(pa->pa_id) == bp->bp_product) 2409 return bp; 2410 } 2411 2412 return NULL; 2413 } 2414 2415 static int 2416 bge_setpowerstate(struct bge_softc *sc, int powerlevel) 2417 { 2418 #ifdef NOTYET 2419 uint32_t pm_ctl = 0; 2420 2421 /* XXX FIXME: make sure indirect accesses enabled? */ 2422 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_MISC_CTL, 4); 2423 pm_ctl |= BGE_PCIMISCCTL_INDIRECT_ACCESS; 2424 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, pm_ctl, 4); 2425 2426 /* clear the PME_assert bit and power state bits, enable PME */ 2427 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2); 2428 pm_ctl &= ~PCIM_PSTAT_DMASK; 2429 pm_ctl |= (1 << 8); 2430 2431 if (powerlevel == 0) { 2432 pm_ctl |= PCIM_PSTAT_D0; 2433 pci_write_config(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2434 pm_ctl, 2); 2435 DELAY(10000); 2436 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg); 2437 DELAY(10000); 2438 2439 #ifdef NOTYET 2440 /* XXX FIXME: write 0x02 to phy aux_Ctrl reg */ 2441 bge_miibus_writereg(sc->bge_dev, 1, 0x18, 0x02); 2442 #endif 2443 DELAY(40); DELAY(40); DELAY(40); 2444 DELAY(10000); /* above not quite adequate on 5700 */ 2445 return 0; 2446 } 2447 2448 2449 /* 2450 * Entering ACPI power states D1-D3 is achieved by wiggling 2451 * GMII gpio pins. Example code assumes all hardware vendors 2452 * followed Broadcom's sample pcb layout. Until we verify that 2453 * for all supported OEM cards, states D1-D3 are unsupported. 2454 */ 2455 aprint_error_dev(sc->bge_dev, 2456 "power state %d unimplemented; check GPIO pins\n", 2457 powerlevel); 2458 #endif 2459 return EOPNOTSUPP; 2460 } 2461 2462 2463 /* 2464 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 2465 * against our list and return its name if we find a match. Note 2466 * that since the Broadcom controller contains VPD support, we 2467 * can get the device name string from the controller itself instead 2468 * of the compiled-in string. This is a little slow, but it guarantees 2469 * we'll always announce the right product name. 2470 */ 2471 static int 2472 bge_probe(device_t parent, cfdata_t match, void *aux) 2473 { 2474 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 2475 2476 if (bge_lookup(pa) != NULL) 2477 return 1; 2478 2479 return 0; 2480 } 2481 2482 static void 2483 bge_attach(device_t parent, device_t self, void *aux) 2484 { 2485 struct bge_softc *sc = device_private(self); 2486 struct pci_attach_args *pa = aux; 2487 prop_dictionary_t dict; 2488 const struct bge_product *bp; 2489 const struct bge_revision *br; 2490 pci_chipset_tag_t pc; 2491 pci_intr_handle_t ih; 2492 const char *intrstr = NULL; 2493 bus_dma_segment_t seg; 2494 int rseg; 2495 uint32_t hwcfg = 0; 2496 uint32_t command; 2497 struct ifnet *ifp; 2498 uint32_t misccfg; 2499 void * kva; 2500 u_char eaddr[ETHER_ADDR_LEN]; 2501 pcireg_t memtype, subid; 2502 bus_addr_t memaddr; 2503 bus_size_t memsize; 2504 uint32_t pm_ctl; 2505 bool no_seeprom; 2506 2507 bp = bge_lookup(pa); 2508 KASSERT(bp != NULL); 2509 2510 sc->sc_pc = pa->pa_pc; 2511 sc->sc_pcitag = pa->pa_tag; 2512 sc->bge_dev = self; 2513 2514 pc = sc->sc_pc; 2515 subid = pci_conf_read(pc, sc->sc_pcitag, PCI_SUBSYS_ID_REG); 2516 2517 aprint_naive(": Ethernet controller\n"); 2518 aprint_normal(": %s\n", bp->bp_name); 2519 2520 /* 2521 * Map control/status registers. 2522 */ 2523 DPRINTFN(5, ("Map control/status regs\n")); 2524 command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); 2525 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE; 2526 pci_conf_write(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, command); 2527 command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); 2528 2529 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 2530 aprint_error_dev(sc->bge_dev, 2531 "failed to enable memory mapping!\n"); 2532 return; 2533 } 2534 2535 DPRINTFN(5, ("pci_mem_find\n")); 2536 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_pcitag, BGE_PCI_BAR0); 2537 switch (memtype) { 2538 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 2539 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 2540 if (pci_mapreg_map(pa, BGE_PCI_BAR0, 2541 memtype, 0, &sc->bge_btag, &sc->bge_bhandle, 2542 &memaddr, &memsize) == 0) 2543 break; 2544 default: 2545 aprint_error_dev(sc->bge_dev, "can't find mem space\n"); 2546 return; 2547 } 2548 2549 DPRINTFN(5, ("pci_intr_map\n")); 2550 if (pci_intr_map(pa, &ih)) { 2551 aprint_error_dev(sc->bge_dev, "couldn't map interrupt\n"); 2552 return; 2553 } 2554 2555 DPRINTFN(5, ("pci_intr_string\n")); 2556 intrstr = pci_intr_string(pc, ih); 2557 2558 DPRINTFN(5, ("pci_intr_establish\n")); 2559 sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc); 2560 2561 if (sc->bge_intrhand == NULL) { 2562 aprint_error_dev(sc->bge_dev, 2563 "couldn't establish interrupt%s%s\n", 2564 intrstr ? " at " : "", intrstr ? intrstr : ""); 2565 return; 2566 } 2567 aprint_normal_dev(sc->bge_dev, "interrupting at %s\n", intrstr); 2568 2569 /* 2570 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?) 2571 * can clobber the chip's PCI config-space power control registers, 2572 * leaving the card in D3 powersave state. 2573 * We do not have memory-mapped registers in this state, 2574 * so force device into D0 state before starting initialization. 2575 */ 2576 pm_ctl = pci_conf_read(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD); 2577 pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3); 2578 pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */ 2579 pci_conf_write(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD, pm_ctl); 2580 DELAY(1000); /* 27 usec is allegedly sufficent */ 2581 2582 /* 2583 * Save ASIC rev. 2584 */ 2585 sc->bge_chipid = 2586 pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL) 2587 >> BGE_PCIMISCCTL_ASICREV_SHIFT; 2588 2589 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) { 2590 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5717 || 2591 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5718 || 2592 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5724) 2593 sc->bge_chipid = pci_conf_read(pc, pa->pa_tag, 2594 BGE_PCI_GEN2_PRODID_ASICREV); 2595 else if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57761 || 2596 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57765 || 2597 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57781 || 2598 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57785 || 2599 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57791 || 2600 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57795) 2601 sc->bge_chipid = pci_conf_read(pc, pa->pa_tag, 2602 BGE_PCI_GEN15_PRODID_ASICREV); 2603 else 2604 sc->bge_chipid = pci_conf_read(pc, pa->pa_tag, 2605 BGE_PCI_PRODID_ASICREV); 2606 } 2607 2608 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PCIEXPRESS, 2609 &sc->bge_pciecap, NULL) != 0) { 2610 /* PCIe */ 2611 sc->bge_flags |= BGE_PCIE; 2612 bge_set_max_readrq(sc); 2613 } else if ((pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE) & 2614 BGE_PCISTATE_PCI_BUSMODE) == 0) { 2615 /* PCI-X */ 2616 sc->bge_flags |= BGE_PCIX; 2617 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIX, 2618 &sc->bge_pcixcap, NULL) == 0) 2619 aprint_error_dev(sc->bge_dev, 2620 "unable to find PCIX capability\n"); 2621 } 2622 2623 /* chipid */ 2624 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 2625 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 || 2626 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 || 2627 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) 2628 sc->bge_flags |= BGE_5700_FAMILY; 2629 2630 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714_A0 || 2631 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780 || 2632 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714) 2633 sc->bge_flags |= BGE_5714_FAMILY; 2634 2635 /* Intentionally exclude BGE_ASICREV_BCM5906 */ 2636 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || 2637 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 || 2638 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 || 2639 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || 2640 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 || 2641 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787 || 2642 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57765 || 2643 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780) 2644 sc->bge_flags |= BGE_5755_PLUS; 2645 2646 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750 || 2647 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 || 2648 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 || 2649 BGE_IS_5755_PLUS(sc) || 2650 BGE_IS_5714_FAMILY(sc)) 2651 sc->bge_flags |= BGE_5750_PLUS; 2652 2653 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 || 2654 BGE_IS_5750_OR_BEYOND(sc)) 2655 sc->bge_flags |= BGE_5705_PLUS; 2656 2657 /* 2658 * When using the BCM5701 in PCI-X mode, data corruption has 2659 * been observed in the first few bytes of some received packets. 2660 * Aligning the packet buffer in memory eliminates the corruption. 2661 * Unfortunately, this misaligns the packet payloads. On platforms 2662 * which do not support unaligned accesses, we will realign the 2663 * payloads by copying the received packets. 2664 */ 2665 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 && 2666 sc->bge_flags & BGE_PCIX) 2667 sc->bge_flags |= BGE_RX_ALIGNBUG; 2668 2669 if (BGE_IS_5700_FAMILY(sc)) 2670 sc->bge_flags |= BGE_JUMBO_CAPABLE; 2671 2672 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 2673 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) && 2674 PCI_VENDOR(subid) == PCI_VENDOR_DELL) 2675 sc->bge_flags |= BGE_NO_3LED; 2676 2677 misccfg = CSR_READ_4(sc, BGE_MISC_CFG); 2678 misccfg &= BGE_MISCCFG_BOARD_ID_MASK; 2679 2680 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 && 2681 (misccfg == BGE_MISCCFG_BOARD_ID_5788 || 2682 misccfg == BGE_MISCCFG_BOARD_ID_5788M)) 2683 sc->bge_flags |= BGE_IS_5788; 2684 2685 /* 2686 * Some controllers seem to require a special firmware to use 2687 * TSO. But the firmware is not available to FreeBSD and Linux 2688 * claims that the TSO performed by the firmware is slower than 2689 * hardware based TSO. Moreover the firmware based TSO has one 2690 * known bug which can't handle TSO if ethernet header + IP/TCP 2691 * header is greater than 80 bytes. The workaround for the TSO 2692 * bug exist but it seems it's too expensive than not using 2693 * TSO at all. Some hardwares also have the TSO bug so limit 2694 * the TSO to the controllers that are not affected TSO issues 2695 * (e.g. 5755 or higher). 2696 */ 2697 if (BGE_IS_5755_PLUS(sc)) { 2698 /* 2699 * BCM5754 and BCM5787 shares the same ASIC id so 2700 * explicit device id check is required. 2701 */ 2702 if ((PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5754) && 2703 (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5754M)) 2704 sc->bge_flags |= BGE_TSO; 2705 } 2706 2707 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 && 2708 (misccfg == 0x4000 || misccfg == 0x8000)) || 2709 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 && 2710 PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM && 2711 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901 || 2712 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901A2 || 2713 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5705F)) || 2714 (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM && 2715 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5751F || 2716 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5753F || 2717 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5787F)) || 2718 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57790 || 2719 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 2720 sc->bge_flags |= BGE_10_100_ONLY; 2721 2722 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 2723 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 && 2724 (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 && 2725 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) || 2726 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 2727 sc->bge_flags |= BGE_NO_ETH_WIRE_SPEED; 2728 2729 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 || 2730 sc->bge_chipid == BGE_CHIPID_BCM5701_B0) 2731 sc->bge_flags |= BGE_PHY_CRC_BUG; 2732 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5703_AX || 2733 BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_AX) 2734 sc->bge_flags |= BGE_PHY_ADC_BUG; 2735 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0) 2736 sc->bge_flags |= BGE_PHY_5704_A0_BUG; 2737 2738 if (BGE_IS_5705_PLUS(sc) && 2739 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906 && 2740 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 && 2741 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785 && 2742 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57765 && 2743 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57780) { 2744 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 || 2745 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 || 2746 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || 2747 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787) { 2748 if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5722 && 2749 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5756) 2750 sc->bge_flags |= BGE_PHY_JITTER_BUG; 2751 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5755M) 2752 sc->bge_flags |= BGE_PHY_ADJUST_TRIM; 2753 } else if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906) 2754 sc->bge_flags |= BGE_PHY_BER_BUG; 2755 } 2756 2757 /* 2758 * SEEPROM check. 2759 * First check if firmware knows we do not have SEEPROM. 2760 */ 2761 if (prop_dictionary_get_bool(device_properties(self), 2762 "without-seeprom", &no_seeprom) && no_seeprom) 2763 sc->bge_flags |= BGE_NO_EEPROM; 2764 2765 /* Now check the 'ROM failed' bit on the RX CPU */ 2766 else if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) 2767 sc->bge_flags |= BGE_NO_EEPROM; 2768 2769 /* Try to reset the chip. */ 2770 DPRINTFN(5, ("bge_reset\n")); 2771 bge_reset(sc); 2772 2773 sc->bge_asf_mode = 0; 2774 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) 2775 == BGE_MAGIC_NUMBER)) { 2776 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG) 2777 & BGE_HWCFG_ASF) { 2778 sc->bge_asf_mode |= ASF_ENABLE; 2779 sc->bge_asf_mode |= ASF_STACKUP; 2780 if (BGE_IS_5750_OR_BEYOND(sc)) { 2781 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE; 2782 } 2783 } 2784 } 2785 2786 /* Try to reset the chip again the nice way. */ 2787 bge_stop_fw(sc); 2788 bge_sig_pre_reset(sc, BGE_RESET_STOP); 2789 if (bge_reset(sc)) 2790 aprint_error_dev(sc->bge_dev, "chip reset failed\n"); 2791 2792 bge_sig_legacy(sc, BGE_RESET_STOP); 2793 bge_sig_post_reset(sc, BGE_RESET_STOP); 2794 2795 if (bge_chipinit(sc)) { 2796 aprint_error_dev(sc->bge_dev, "chip initialization failed\n"); 2797 bge_release_resources(sc); 2798 return; 2799 } 2800 2801 /* 2802 * Get station address from the EEPROM 2803 */ 2804 if (bge_get_eaddr(sc, eaddr)) { 2805 aprint_error_dev(sc->bge_dev, 2806 "failed to read station address\n"); 2807 bge_release_resources(sc); 2808 return; 2809 } 2810 2811 br = bge_lookup_rev(sc->bge_chipid); 2812 2813 if (br == NULL) { 2814 aprint_normal_dev(sc->bge_dev, "unknown ASIC (0x%x)", 2815 sc->bge_chipid); 2816 } else { 2817 aprint_normal_dev(sc->bge_dev, "ASIC %s (0x%x)", 2818 br->br_name, sc->bge_chipid); 2819 } 2820 aprint_normal(", Ethernet address %s\n", ether_sprintf(eaddr)); 2821 2822 /* Allocate the general information block and ring buffers. */ 2823 if (pci_dma64_available(pa)) 2824 sc->bge_dmatag = pa->pa_dmat64; 2825 else 2826 sc->bge_dmatag = pa->pa_dmat; 2827 DPRINTFN(5, ("bus_dmamem_alloc\n")); 2828 if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data), 2829 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 2830 aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n"); 2831 return; 2832 } 2833 DPRINTFN(5, ("bus_dmamem_map\n")); 2834 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, 2835 sizeof(struct bge_ring_data), &kva, 2836 BUS_DMA_NOWAIT)) { 2837 aprint_error_dev(sc->bge_dev, 2838 "can't map DMA buffers (%zu bytes)\n", 2839 sizeof(struct bge_ring_data)); 2840 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2841 return; 2842 } 2843 DPRINTFN(5, ("bus_dmamem_create\n")); 2844 if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1, 2845 sizeof(struct bge_ring_data), 0, 2846 BUS_DMA_NOWAIT, &sc->bge_ring_map)) { 2847 aprint_error_dev(sc->bge_dev, "can't create DMA map\n"); 2848 bus_dmamem_unmap(sc->bge_dmatag, kva, 2849 sizeof(struct bge_ring_data)); 2850 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2851 return; 2852 } 2853 DPRINTFN(5, ("bus_dmamem_load\n")); 2854 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva, 2855 sizeof(struct bge_ring_data), NULL, 2856 BUS_DMA_NOWAIT)) { 2857 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map); 2858 bus_dmamem_unmap(sc->bge_dmatag, kva, 2859 sizeof(struct bge_ring_data)); 2860 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2861 return; 2862 } 2863 2864 DPRINTFN(5, ("bzero\n")); 2865 sc->bge_rdata = (struct bge_ring_data *)kva; 2866 2867 memset(sc->bge_rdata, 0, sizeof(struct bge_ring_data)); 2868 2869 /* Try to allocate memory for jumbo buffers. */ 2870 if (BGE_IS_JUMBO_CAPABLE(sc)) { 2871 if (bge_alloc_jumbo_mem(sc)) { 2872 aprint_error_dev(sc->bge_dev, 2873 "jumbo buffer allocation failed\n"); 2874 } else 2875 sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 2876 } 2877 2878 /* Set default tuneable values. */ 2879 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 2880 sc->bge_rx_coal_ticks = 150; 2881 sc->bge_rx_max_coal_bds = 64; 2882 #ifdef ORIG_WPAUL_VALUES 2883 sc->bge_tx_coal_ticks = 150; 2884 sc->bge_tx_max_coal_bds = 128; 2885 #else 2886 sc->bge_tx_coal_ticks = 300; 2887 sc->bge_tx_max_coal_bds = 400; 2888 #endif 2889 if (BGE_IS_5705_PLUS(sc)) { 2890 sc->bge_tx_coal_ticks = (12 * 5); 2891 sc->bge_tx_max_coal_bds = (12 * 5); 2892 aprint_verbose_dev(sc->bge_dev, 2893 "setting short Tx thresholds\n"); 2894 } 2895 2896 if (BGE_IS_5705_PLUS(sc)) 2897 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 2898 else 2899 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 2900 2901 /* Set up ifnet structure */ 2902 ifp = &sc->ethercom.ec_if; 2903 ifp->if_softc = sc; 2904 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2905 ifp->if_ioctl = bge_ioctl; 2906 ifp->if_stop = bge_stop; 2907 ifp->if_start = bge_start; 2908 ifp->if_init = bge_init; 2909 ifp->if_watchdog = bge_watchdog; 2910 IFQ_SET_MAXLEN(&ifp->if_snd, max(BGE_TX_RING_CNT - 1, IFQ_MAXLEN)); 2911 IFQ_SET_READY(&ifp->if_snd); 2912 DPRINTFN(5, ("strcpy if_xname\n")); 2913 strcpy(ifp->if_xname, device_xname(sc->bge_dev)); 2914 2915 if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0) 2916 sc->ethercom.ec_if.if_capabilities |= 2917 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx; 2918 #if 1 /* XXX TCP/UDP checksum offload breaks with pf(4) */ 2919 sc->ethercom.ec_if.if_capabilities |= 2920 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 2921 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 2922 #endif 2923 sc->ethercom.ec_capabilities |= 2924 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU; 2925 2926 if (sc->bge_flags & BGE_TSO) 2927 sc->ethercom.ec_if.if_capabilities |= IFCAP_TSOv4; 2928 2929 /* 2930 * Do MII setup. 2931 */ 2932 DPRINTFN(5, ("mii setup\n")); 2933 sc->bge_mii.mii_ifp = ifp; 2934 sc->bge_mii.mii_readreg = bge_miibus_readreg; 2935 sc->bge_mii.mii_writereg = bge_miibus_writereg; 2936 sc->bge_mii.mii_statchg = bge_miibus_statchg; 2937 2938 /* 2939 * Figure out what sort of media we have by checking the 2940 * hardware config word in the first 32k of NIC internal memory, 2941 * or fall back to the config word in the EEPROM. Note: on some BCM5700 2942 * cards, this value appears to be unset. If that's the 2943 * case, we have to rely on identifying the NIC by its PCI 2944 * subsystem ID, as we do below for the SysKonnect SK-9D41. 2945 */ 2946 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) { 2947 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); 2948 } else if (!(sc->bge_flags & BGE_NO_EEPROM)) { 2949 bge_read_eeprom(sc, (void *)&hwcfg, 2950 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg)); 2951 hwcfg = be32toh(hwcfg); 2952 } 2953 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 2954 if (PCI_PRODUCT(pa->pa_id) == SK_SUBSYSID_9D41 || 2955 (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) { 2956 if (BGE_IS_5714_FAMILY(sc)) 2957 sc->bge_flags |= BGE_PHY_FIBER_MII; 2958 else 2959 sc->bge_flags |= BGE_PHY_FIBER_TBI; 2960 } 2961 2962 /* set phyflags before mii_attach() */ 2963 dict = device_properties(self); 2964 prop_dictionary_set_uint32(dict, "phyflags", sc->bge_flags); 2965 2966 if (sc->bge_flags & BGE_PHY_FIBER_TBI) { 2967 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, 2968 bge_ifmedia_sts); 2969 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER |IFM_1000_SX, 0, NULL); 2970 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX|IFM_FDX, 2971 0, NULL); 2972 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL); 2973 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO); 2974 /* Pretend the user requested this setting */ 2975 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media; 2976 } else { 2977 /* 2978 * Do transceiver setup and tell the firmware the 2979 * driver is down so we can try to get access the 2980 * probe if ASF is running. Retry a couple of times 2981 * if we get a conflict with the ASF firmware accessing 2982 * the PHY. 2983 */ 2984 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 2985 bge_asf_driver_up(sc); 2986 2987 ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd, 2988 bge_ifmedia_sts); 2989 mii_attach(sc->bge_dev, &sc->bge_mii, 0xffffffff, 2990 MII_PHY_ANY, MII_OFFSET_ANY, 2991 MIIF_FORCEANEG|MIIF_DOPAUSE); 2992 2993 if (LIST_EMPTY(&sc->bge_mii.mii_phys)) { 2994 aprint_error_dev(sc->bge_dev, "no PHY found!\n"); 2995 ifmedia_add(&sc->bge_mii.mii_media, 2996 IFM_ETHER|IFM_MANUAL, 0, NULL); 2997 ifmedia_set(&sc->bge_mii.mii_media, 2998 IFM_ETHER|IFM_MANUAL); 2999 } else 3000 ifmedia_set(&sc->bge_mii.mii_media, 3001 IFM_ETHER|IFM_AUTO); 3002 3003 /* 3004 * Now tell the firmware we are going up after probing the PHY 3005 */ 3006 if (sc->bge_asf_mode & ASF_STACKUP) 3007 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3008 } 3009 3010 /* 3011 * Call MI attach routine. 3012 */ 3013 DPRINTFN(5, ("if_attach\n")); 3014 if_attach(ifp); 3015 DPRINTFN(5, ("ether_ifattach\n")); 3016 ether_ifattach(ifp, eaddr); 3017 ether_set_ifflags_cb(&sc->ethercom, bge_ifflags_cb); 3018 #if NRND > 0 3019 rnd_attach_source(&sc->rnd_source, device_xname(sc->bge_dev), 3020 RND_TYPE_NET, 0); 3021 #endif 3022 #ifdef BGE_EVENT_COUNTERS 3023 /* 3024 * Attach event counters. 3025 */ 3026 evcnt_attach_dynamic(&sc->bge_ev_intr, EVCNT_TYPE_INTR, 3027 NULL, device_xname(sc->bge_dev), "intr"); 3028 evcnt_attach_dynamic(&sc->bge_ev_tx_xoff, EVCNT_TYPE_MISC, 3029 NULL, device_xname(sc->bge_dev), "tx_xoff"); 3030 evcnt_attach_dynamic(&sc->bge_ev_tx_xon, EVCNT_TYPE_MISC, 3031 NULL, device_xname(sc->bge_dev), "tx_xon"); 3032 evcnt_attach_dynamic(&sc->bge_ev_rx_xoff, EVCNT_TYPE_MISC, 3033 NULL, device_xname(sc->bge_dev), "rx_xoff"); 3034 evcnt_attach_dynamic(&sc->bge_ev_rx_xon, EVCNT_TYPE_MISC, 3035 NULL, device_xname(sc->bge_dev), "rx_xon"); 3036 evcnt_attach_dynamic(&sc->bge_ev_rx_macctl, EVCNT_TYPE_MISC, 3037 NULL, device_xname(sc->bge_dev), "rx_macctl"); 3038 evcnt_attach_dynamic(&sc->bge_ev_xoffentered, EVCNT_TYPE_MISC, 3039 NULL, device_xname(sc->bge_dev), "xoffentered"); 3040 #endif /* BGE_EVENT_COUNTERS */ 3041 DPRINTFN(5, ("callout_init\n")); 3042 callout_init(&sc->bge_timeout, 0); 3043 3044 if (pmf_device_register(self, NULL, NULL)) 3045 pmf_class_network_register(self, ifp); 3046 else 3047 aprint_error_dev(self, "couldn't establish power handler\n"); 3048 3049 sysctl_bge_init(sc); 3050 3051 #ifdef BGE_DEBUG 3052 bge_debug_info(sc); 3053 #endif 3054 } 3055 3056 static void 3057 bge_release_resources(struct bge_softc *sc) 3058 { 3059 if (sc->bge_vpd_prodname != NULL) 3060 free(sc->bge_vpd_prodname, M_DEVBUF); 3061 3062 if (sc->bge_vpd_readonly != NULL) 3063 free(sc->bge_vpd_readonly, M_DEVBUF); 3064 } 3065 3066 static int 3067 bge_reset(struct bge_softc *sc) 3068 { 3069 uint32_t cachesize, command, pcistate, marbmode; 3070 #if 0 3071 uint32_t new_pcistate; 3072 #endif 3073 pcireg_t devctl, reg; 3074 int i, val; 3075 void (*write_op)(struct bge_softc *, int, int); 3076 3077 if (BGE_IS_5750_OR_BEYOND(sc) && !BGE_IS_5714_FAMILY(sc) 3078 && (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)) { 3079 if (sc->bge_flags & BGE_PCIE) 3080 write_op = bge_writemem_direct; 3081 else 3082 write_op = bge_writemem_ind; 3083 } else 3084 write_op = bge_writereg_ind; 3085 3086 /* Save some important PCI state. */ 3087 cachesize = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ); 3088 command = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD); 3089 pcistate = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE); 3090 3091 /* Step 5a: Enable memory arbiter. */ 3092 marbmode = 0; 3093 if (BGE_IS_5714_FAMILY(sc)) 3094 marbmode = CSR_READ_4(sc, BGE_MARB_MODE); 3095 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | marbmode); 3096 3097 /* Step 5b-5d: */ 3098 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL, 3099 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | 3100 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW); 3101 3102 /* XXX ???: Disable fastboot on controllers that support it. */ 3103 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 || 3104 BGE_IS_5755_PLUS(sc)) 3105 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0); 3106 3107 /* 3108 * Step 6: Write the magic number to SRAM at offset 0xB50. 3109 * When firmware finishes its initialization it will 3110 * write ~BGE_MAGIC_NUMBER to the same location. 3111 */ 3112 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 3113 3114 /* Step 7: */ 3115 val = BGE_MISCCFG_RESET_CORE_CLOCKS | (65<<1); 3116 /* 3117 * XXX: from FreeBSD/Linux; no documentation 3118 */ 3119 if (sc->bge_flags & BGE_PCIE) { 3120 if (CSR_READ_4(sc, BGE_PCIE_CTL1) == 0x60) 3121 /* PCI Express 1.0 system */ 3122 CSR_WRITE_4(sc, BGE_PCIE_CTL1, 0x20); 3123 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 3124 /* 3125 * Prevent PCI Express link training 3126 * during global reset. 3127 */ 3128 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29); 3129 val |= (1<<29); 3130 } 3131 } 3132 3133 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 3134 i = CSR_READ_4(sc, BGE_VCPU_STATUS); 3135 CSR_WRITE_4(sc, BGE_VCPU_STATUS, 3136 i | BGE_VCPU_STATUS_DRV_RESET); 3137 i = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL); 3138 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL, 3139 i & ~BGE_VCPU_EXT_CTRL_HALT_CPU); 3140 } 3141 3142 /* 3143 * Set GPHY Power Down Override to leave GPHY 3144 * powered up in D0 uninitialized. 3145 */ 3146 if (BGE_IS_5705_PLUS(sc)) 3147 val |= BGE_MISCCFG_KEEP_GPHY_POWER; 3148 3149 /* XXX 5721, 5751 and 5752 */ 3150 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750) 3151 val |= BGE_MISCCFG_GRC_RESET_DISABLE; 3152 3153 /* Issue global reset */ 3154 write_op(sc, BGE_MISC_CFG, val); 3155 3156 /* Step 8: wait for complete */ 3157 if (sc->bge_flags & BGE_PCIE) 3158 delay(100*1000); /* too big */ 3159 else 3160 delay(100); 3161 3162 /* From Linux: dummy read to flush PCI posted writes */ 3163 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD); 3164 3165 /* Step 9-10: Reset some of the PCI state that got zapped by reset */ 3166 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL, 3167 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | 3168 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW 3169 | BGE_PCIMISCCTL_CLOCKCTL_RW); 3170 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD, command); 3171 write_op(sc, BGE_MISC_CFG, (65 << 1)); 3172 3173 /* Step 11: disable PCI-X Relaxed Ordering. */ 3174 if (sc->bge_flags & BGE_PCIX) { 3175 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, sc->bge_pcixcap 3176 + PCI_PCIX_CMD); 3177 pci_conf_write(sc->sc_pc, sc->sc_pcitag, sc->bge_pcixcap 3178 + PCI_PCIX_CMD, reg & ~PCI_PCIX_CMD_RELAXED_ORDER); 3179 } 3180 3181 if (sc->bge_flags & BGE_PCIE) { 3182 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { 3183 DELAY(500000); 3184 /* XXX: Magic Numbers */ 3185 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 3186 BGE_PCI_UNKNOWN0); 3187 pci_conf_write(sc->sc_pc, sc->sc_pcitag, 3188 BGE_PCI_UNKNOWN0, 3189 reg | (1 << 15)); 3190 } 3191 devctl = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 3192 sc->bge_pciecap + PCI_PCIE_DCSR); 3193 /* Clear enable no snoop and disable relaxed ordering. */ 3194 devctl &= ~(0x0010 | PCI_PCIE_DCSR_ENA_NO_SNOOP); 3195 /* Set PCIE max payload size to 128. */ 3196 devctl &= ~(0x00e0); 3197 /* Clear device status register. Write 1b to clear */ 3198 devctl |= PCI_PCIE_DCSR_URD | PCI_PCIE_DCSR_FED 3199 | PCI_PCIE_DCSR_NFED | PCI_PCIE_DCSR_CED; 3200 pci_conf_write(sc->sc_pc, sc->sc_pcitag, 3201 sc->bge_pciecap + PCI_PCIE_DCSR, devctl); 3202 } 3203 3204 /* Step 12: Enable memory arbiter. */ 3205 marbmode = 0; 3206 if (BGE_IS_5714_FAMILY(sc)) 3207 marbmode = CSR_READ_4(sc, BGE_MARB_MODE); 3208 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | marbmode); 3209 3210 /* Step 17: Poll until the firmware initialization is complete */ 3211 bge_poll_fw(sc); 3212 3213 /* XXX 5721, 5751 and 5752 */ 3214 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750) { 3215 /* Step 19: */ 3216 BGE_SETBIT(sc, BGE_TLP_CONTROL_REG, 1 << 29 | 1 << 25); 3217 /* Step 20: */ 3218 BGE_SETBIT(sc, BGE_TLP_CONTROL_REG, BGE_TLP_DATA_FIFO_PROTECT); 3219 } 3220 3221 /* 3222 * Step 18: wirte mac mode 3223 * XXX Write 0x0c for 5703S and 5704S 3224 */ 3225 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 3226 3227 3228 /* Step 21: 5822 B0 errata */ 3229 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_BX) { 3230 pcireg_t msidata; 3231 3232 msidata = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 3233 BGE_PCI_MSI_DATA); 3234 msidata |= ((1 << 13 | 1 << 12 | 1 << 10) << 16); 3235 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MSI_DATA, 3236 msidata); 3237 } 3238 3239 /* Step 23: restore cache line size */ 3240 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ, cachesize); 3241 3242 #if 0 3243 /* 3244 * XXX Wait for the value of the PCISTATE register to 3245 * return to its original pre-reset state. This is a 3246 * fairly good indicator of reset completion. If we don't 3247 * wait for the reset to fully complete, trying to read 3248 * from the device's non-PCI registers may yield garbage 3249 * results. 3250 */ 3251 for (i = 0; i < BGE_TIMEOUT; i++) { 3252 new_pcistate = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 3253 BGE_PCI_PCISTATE); 3254 if ((new_pcistate & ~BGE_PCISTATE_RESERVED) == 3255 (pcistate & ~BGE_PCISTATE_RESERVED)) 3256 break; 3257 DELAY(10); 3258 } 3259 if ((new_pcistate & ~BGE_PCISTATE_RESERVED) != 3260 (pcistate & ~BGE_PCISTATE_RESERVED)) { 3261 aprint_error_dev(sc->bge_dev, "pcistate failed to revert\n"); 3262 } 3263 #endif 3264 3265 /* Step 28: Fix up byte swapping */ 3266 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS); 3267 3268 /* Tell the ASF firmware we are up */ 3269 if (sc->bge_asf_mode & ASF_STACKUP) 3270 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3271 3272 /* 3273 * The 5704 in TBI mode apparently needs some special 3274 * adjustment to insure the SERDES drive level is set 3275 * to 1.2V. 3276 */ 3277 if (sc->bge_flags & BGE_PHY_FIBER_TBI && 3278 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 3279 uint32_t serdescfg; 3280 3281 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG); 3282 serdescfg = (serdescfg & ~0xFFF) | 0x880; 3283 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg); 3284 } 3285 3286 if (sc->bge_flags & BGE_PCIE && 3287 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 && 3288 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 && 3289 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785 && 3290 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57765) { 3291 uint32_t v; 3292 3293 /* Enable PCI Express bug fix */ 3294 v = CSR_READ_4(sc, 0x7c00); 3295 CSR_WRITE_4(sc, 0x7c00, v | (1<<25)); 3296 } 3297 DELAY(10000); 3298 3299 return 0; 3300 } 3301 3302 /* 3303 * Frame reception handling. This is called if there's a frame 3304 * on the receive return list. 3305 * 3306 * Note: we have to be able to handle two possibilities here: 3307 * 1) the frame is from the jumbo receive ring 3308 * 2) the frame is from the standard receive ring 3309 */ 3310 3311 static void 3312 bge_rxeof(struct bge_softc *sc) 3313 { 3314 struct ifnet *ifp; 3315 uint16_t rx_prod, rx_cons; 3316 int stdcnt = 0, jumbocnt = 0; 3317 bus_dmamap_t dmamap; 3318 bus_addr_t offset, toff; 3319 bus_size_t tlen; 3320 int tosync; 3321 3322 rx_cons = sc->bge_rx_saved_considx; 3323 rx_prod = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx; 3324 3325 /* Nothing to do */ 3326 if (rx_cons == rx_prod) 3327 return; 3328 3329 ifp = &sc->ethercom.ec_if; 3330 3331 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3332 offsetof(struct bge_ring_data, bge_status_block), 3333 sizeof (struct bge_status_block), 3334 BUS_DMASYNC_POSTREAD); 3335 3336 offset = offsetof(struct bge_ring_data, bge_rx_return_ring); 3337 tosync = rx_prod - rx_cons; 3338 3339 #if NRND > 0 3340 if (tosync != 0 && RND_ENABLED(&sc->rnd_source)) 3341 rnd_add_uint32(&sc->rnd_source, tosync); 3342 #endif 3343 3344 toff = offset + (rx_cons * sizeof (struct bge_rx_bd)); 3345 3346 if (tosync < 0) { 3347 tlen = (sc->bge_return_ring_cnt - rx_cons) * 3348 sizeof (struct bge_rx_bd); 3349 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3350 toff, tlen, BUS_DMASYNC_POSTREAD); 3351 tosync = -tosync; 3352 } 3353 3354 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3355 offset, tosync * sizeof (struct bge_rx_bd), 3356 BUS_DMASYNC_POSTREAD); 3357 3358 while (rx_cons != rx_prod) { 3359 struct bge_rx_bd *cur_rx; 3360 uint32_t rxidx; 3361 struct mbuf *m = NULL; 3362 3363 cur_rx = &sc->bge_rdata->bge_rx_return_ring[rx_cons]; 3364 3365 rxidx = cur_rx->bge_idx; 3366 BGE_INC(rx_cons, sc->bge_return_ring_cnt); 3367 3368 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 3369 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 3370 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 3371 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; 3372 jumbocnt++; 3373 bus_dmamap_sync(sc->bge_dmatag, 3374 sc->bge_cdata.bge_rx_jumbo_map, 3375 mtod(m, char *) - (char *)sc->bge_cdata.bge_jumbo_buf, 3376 BGE_JLEN, BUS_DMASYNC_POSTREAD); 3377 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 3378 ifp->if_ierrors++; 3379 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 3380 continue; 3381 } 3382 if (bge_newbuf_jumbo(sc, sc->bge_jumbo, 3383 NULL)== ENOBUFS) { 3384 ifp->if_ierrors++; 3385 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 3386 continue; 3387 } 3388 } else { 3389 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 3390 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 3391 3392 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; 3393 stdcnt++; 3394 dmamap = sc->bge_cdata.bge_rx_std_map[rxidx]; 3395 sc->bge_cdata.bge_rx_std_map[rxidx] = 0; 3396 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, 3397 dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 3398 bus_dmamap_unload(sc->bge_dmatag, dmamap); 3399 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 3400 ifp->if_ierrors++; 3401 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 3402 continue; 3403 } 3404 if (bge_newbuf_std(sc, sc->bge_std, 3405 NULL, dmamap) == ENOBUFS) { 3406 ifp->if_ierrors++; 3407 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 3408 continue; 3409 } 3410 } 3411 3412 ifp->if_ipackets++; 3413 #ifndef __NO_STRICT_ALIGNMENT 3414 /* 3415 * XXX: if the 5701 PCIX-Rx-DMA workaround is in effect, 3416 * the Rx buffer has the layer-2 header unaligned. 3417 * If our CPU requires alignment, re-align by copying. 3418 */ 3419 if (sc->bge_flags & BGE_RX_ALIGNBUG) { 3420 memmove(mtod(m, char *) + ETHER_ALIGN, m->m_data, 3421 cur_rx->bge_len); 3422 m->m_data += ETHER_ALIGN; 3423 } 3424 #endif 3425 3426 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 3427 m->m_pkthdr.rcvif = ifp; 3428 3429 /* 3430 * Handle BPF listeners. Let the BPF user see the packet. 3431 */ 3432 bpf_mtap(ifp, m); 3433 3434 m->m_pkthdr.csum_flags = M_CSUM_IPv4; 3435 3436 if ((cur_rx->bge_ip_csum ^ 0xffff) != 0) 3437 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 3438 /* 3439 * Rx transport checksum-offload may also 3440 * have bugs with packets which, when transmitted, 3441 * were `runts' requiring padding. 3442 */ 3443 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM && 3444 (/* (sc->_bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||*/ 3445 m->m_pkthdr.len >= ETHER_MIN_NOPAD)) { 3446 m->m_pkthdr.csum_data = 3447 cur_rx->bge_tcp_udp_csum; 3448 m->m_pkthdr.csum_flags |= 3449 (M_CSUM_TCPv4|M_CSUM_UDPv4| 3450 M_CSUM_DATA|M_CSUM_NO_PSEUDOHDR); 3451 } 3452 3453 /* 3454 * If we received a packet with a vlan tag, pass it 3455 * to vlan_input() instead of ether_input(). 3456 */ 3457 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 3458 VLAN_INPUT_TAG(ifp, m, cur_rx->bge_vlan_tag, continue); 3459 } 3460 3461 (*ifp->if_input)(ifp, m); 3462 } 3463 3464 sc->bge_rx_saved_considx = rx_cons; 3465 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 3466 if (stdcnt) 3467 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 3468 if (jumbocnt) 3469 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 3470 } 3471 3472 static void 3473 bge_txeof(struct bge_softc *sc) 3474 { 3475 struct bge_tx_bd *cur_tx = NULL; 3476 struct ifnet *ifp; 3477 struct txdmamap_pool_entry *dma; 3478 bus_addr_t offset, toff; 3479 bus_size_t tlen; 3480 int tosync; 3481 struct mbuf *m; 3482 3483 ifp = &sc->ethercom.ec_if; 3484 3485 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3486 offsetof(struct bge_ring_data, bge_status_block), 3487 sizeof (struct bge_status_block), 3488 BUS_DMASYNC_POSTREAD); 3489 3490 offset = offsetof(struct bge_ring_data, bge_tx_ring); 3491 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx - 3492 sc->bge_tx_saved_considx; 3493 3494 #if NRND > 0 3495 if (tosync != 0 && RND_ENABLED(&sc->rnd_source)) 3496 rnd_add_uint32(&sc->rnd_source, tosync); 3497 #endif 3498 3499 toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd)); 3500 3501 if (tosync < 0) { 3502 tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) * 3503 sizeof (struct bge_tx_bd); 3504 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3505 toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3506 tosync = -tosync; 3507 } 3508 3509 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3510 offset, tosync * sizeof (struct bge_tx_bd), 3511 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3512 3513 /* 3514 * Go through our tx ring and free mbufs for those 3515 * frames that have been sent. 3516 */ 3517 while (sc->bge_tx_saved_considx != 3518 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) { 3519 uint32_t idx = 0; 3520 3521 idx = sc->bge_tx_saved_considx; 3522 cur_tx = &sc->bge_rdata->bge_tx_ring[idx]; 3523 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 3524 ifp->if_opackets++; 3525 m = sc->bge_cdata.bge_tx_chain[idx]; 3526 if (m != NULL) { 3527 sc->bge_cdata.bge_tx_chain[idx] = NULL; 3528 dma = sc->txdma[idx]; 3529 bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0, 3530 dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 3531 bus_dmamap_unload(sc->bge_dmatag, dma->dmamap); 3532 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 3533 sc->txdma[idx] = NULL; 3534 3535 m_freem(m); 3536 } 3537 sc->bge_txcnt--; 3538 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 3539 ifp->if_timer = 0; 3540 } 3541 3542 if (cur_tx != NULL) 3543 ifp->if_flags &= ~IFF_OACTIVE; 3544 } 3545 3546 static int 3547 bge_intr(void *xsc) 3548 { 3549 struct bge_softc *sc; 3550 struct ifnet *ifp; 3551 uint32_t statusword; 3552 3553 sc = xsc; 3554 ifp = &sc->ethercom.ec_if; 3555 3556 /* It is possible for the interrupt to arrive before 3557 * the status block is updated prior to the interrupt. 3558 * Reading the PCI State register will confirm whether the 3559 * interrupt is ours and will flush the status block. 3560 */ 3561 3562 /* read status word from status block */ 3563 statusword = sc->bge_rdata->bge_status_block.bge_status; 3564 3565 if ((statusword & BGE_STATFLAG_UPDATED) || 3566 (!(CSR_READ_4(sc, BGE_PCI_PCISTATE) & BGE_PCISTATE_INTR_NOT_ACTIVE))) { 3567 /* Ack interrupt and stop others from occuring. */ 3568 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); 3569 3570 BGE_EVCNT_INCR(sc->bge_ev_intr); 3571 3572 /* clear status word */ 3573 sc->bge_rdata->bge_status_block.bge_status = 0; 3574 3575 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 3576 statusword & BGE_STATFLAG_LINKSTATE_CHANGED || 3577 BGE_STS_BIT(sc, BGE_STS_LINK_EVT)) 3578 bge_link_upd(sc); 3579 3580 if (ifp->if_flags & IFF_RUNNING) { 3581 /* Check RX return ring producer/consumer */ 3582 bge_rxeof(sc); 3583 3584 /* Check TX ring producer/consumer */ 3585 bge_txeof(sc); 3586 } 3587 3588 if (sc->bge_pending_rxintr_change) { 3589 uint32_t rx_ticks = sc->bge_rx_coal_ticks; 3590 uint32_t rx_bds = sc->bge_rx_max_coal_bds; 3591 uint32_t junk; 3592 3593 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, rx_ticks); 3594 DELAY(10); 3595 junk = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS); 3596 3597 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_bds); 3598 DELAY(10); 3599 junk = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS); 3600 3601 sc->bge_pending_rxintr_change = 0; 3602 } 3603 bge_handle_events(sc); 3604 3605 /* Re-enable interrupts. */ 3606 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); 3607 3608 if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd)) 3609 bge_start(ifp); 3610 3611 return 1; 3612 } else 3613 return 0; 3614 } 3615 3616 static void 3617 bge_asf_driver_up(struct bge_softc *sc) 3618 { 3619 if (sc->bge_asf_mode & ASF_STACKUP) { 3620 /* Send ASF heartbeat aprox. every 2s */ 3621 if (sc->bge_asf_count) 3622 sc->bge_asf_count --; 3623 else { 3624 sc->bge_asf_count = 2; 3625 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, 3626 BGE_FW_DRV_ALIVE); 3627 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_LEN, 4); 3628 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_DATA, 3); 3629 CSR_WRITE_4(sc, BGE_CPU_EVENT, 3630 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14)); 3631 } 3632 } 3633 } 3634 3635 static void 3636 bge_tick(void *xsc) 3637 { 3638 struct bge_softc *sc = xsc; 3639 struct mii_data *mii = &sc->bge_mii; 3640 int s; 3641 3642 s = splnet(); 3643 3644 if (BGE_IS_5705_PLUS(sc)) 3645 bge_stats_update_regs(sc); 3646 else 3647 bge_stats_update(sc); 3648 3649 if (sc->bge_flags & BGE_PHY_FIBER_TBI) { 3650 /* 3651 * Since in TBI mode auto-polling can't be used we should poll 3652 * link status manually. Here we register pending link event 3653 * and trigger interrupt. 3654 */ 3655 BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT); 3656 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 3657 } else { 3658 /* 3659 * Do not touch PHY if we have link up. This could break 3660 * IPMI/ASF mode or produce extra input errors. 3661 * (extra input errors was reported for bcm5701 & bcm5704). 3662 */ 3663 if (!BGE_STS_BIT(sc, BGE_STS_LINK)) 3664 mii_tick(mii); 3665 } 3666 3667 callout_reset(&sc->bge_timeout, hz, bge_tick, sc); 3668 3669 splx(s); 3670 } 3671 3672 static void 3673 bge_stats_update_regs(struct bge_softc *sc) 3674 { 3675 struct ifnet *ifp = &sc->ethercom.ec_if; 3676 3677 ifp->if_collisions += CSR_READ_4(sc, BGE_MAC_STATS + 3678 offsetof(struct bge_mac_stats_regs, etherStatsCollisions)); 3679 3680 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS); 3681 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS); 3682 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS); 3683 } 3684 3685 static void 3686 bge_stats_update(struct bge_softc *sc) 3687 { 3688 struct ifnet *ifp = &sc->ethercom.ec_if; 3689 bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; 3690 3691 #define READ_STAT(sc, stats, stat) \ 3692 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) 3693 3694 ifp->if_collisions += 3695 (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) + 3696 READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) + 3697 READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) + 3698 READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo)) - 3699 ifp->if_collisions; 3700 3701 BGE_EVCNT_UPD(sc->bge_ev_tx_xoff, 3702 READ_STAT(sc, stats, outXoffSent.bge_addr_lo)); 3703 BGE_EVCNT_UPD(sc->bge_ev_tx_xon, 3704 READ_STAT(sc, stats, outXonSent.bge_addr_lo)); 3705 BGE_EVCNT_UPD(sc->bge_ev_rx_xoff, 3706 READ_STAT(sc, stats, 3707 xoffPauseFramesReceived.bge_addr_lo)); 3708 BGE_EVCNT_UPD(sc->bge_ev_rx_xon, 3709 READ_STAT(sc, stats, xonPauseFramesReceived.bge_addr_lo)); 3710 BGE_EVCNT_UPD(sc->bge_ev_rx_macctl, 3711 READ_STAT(sc, stats, 3712 macControlFramesReceived.bge_addr_lo)); 3713 BGE_EVCNT_UPD(sc->bge_ev_xoffentered, 3714 READ_STAT(sc, stats, xoffStateEntered.bge_addr_lo)); 3715 3716 #undef READ_STAT 3717 3718 #ifdef notdef 3719 ifp->if_collisions += 3720 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames + 3721 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames + 3722 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions + 3723 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) - 3724 ifp->if_collisions; 3725 #endif 3726 } 3727 3728 /* 3729 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason. 3730 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD, 3731 * but when such padded frames employ the bge IP/TCP checksum offload, 3732 * the hardware checksum assist gives incorrect results (possibly 3733 * from incorporating its own padding into the UDP/TCP checksum; who knows). 3734 * If we pad such runts with zeros, the onboard checksum comes out correct. 3735 */ 3736 static inline int 3737 bge_cksum_pad(struct mbuf *pkt) 3738 { 3739 struct mbuf *last = NULL; 3740 int padlen; 3741 3742 padlen = ETHER_MIN_NOPAD - pkt->m_pkthdr.len; 3743 3744 /* if there's only the packet-header and we can pad there, use it. */ 3745 if (pkt->m_pkthdr.len == pkt->m_len && 3746 M_TRAILINGSPACE(pkt) >= padlen) { 3747 last = pkt; 3748 } else { 3749 /* 3750 * Walk packet chain to find last mbuf. We will either 3751 * pad there, or append a new mbuf and pad it 3752 * (thus perhaps avoiding the bcm5700 dma-min bug). 3753 */ 3754 for (last = pkt; last->m_next != NULL; last = last->m_next) { 3755 continue; /* do nothing */ 3756 } 3757 3758 /* `last' now points to last in chain. */ 3759 if (M_TRAILINGSPACE(last) < padlen) { 3760 /* Allocate new empty mbuf, pad it. Compact later. */ 3761 struct mbuf *n; 3762 MGET(n, M_DONTWAIT, MT_DATA); 3763 if (n == NULL) 3764 return ENOBUFS; 3765 n->m_len = 0; 3766 last->m_next = n; 3767 last = n; 3768 } 3769 } 3770 3771 KDASSERT(!M_READONLY(last)); 3772 KDASSERT(M_TRAILINGSPACE(last) >= padlen); 3773 3774 /* Now zero the pad area, to avoid the bge cksum-assist bug */ 3775 memset(mtod(last, char *) + last->m_len, 0, padlen); 3776 last->m_len += padlen; 3777 pkt->m_pkthdr.len += padlen; 3778 return 0; 3779 } 3780 3781 /* 3782 * Compact outbound packets to avoid bug with DMA segments less than 8 bytes. 3783 */ 3784 static inline int 3785 bge_compact_dma_runt(struct mbuf *pkt) 3786 { 3787 struct mbuf *m, *prev; 3788 int totlen, prevlen; 3789 3790 prev = NULL; 3791 totlen = 0; 3792 prevlen = -1; 3793 3794 for (m = pkt; m != NULL; prev = m,m = m->m_next) { 3795 int mlen = m->m_len; 3796 int shortfall = 8 - mlen ; 3797 3798 totlen += mlen; 3799 if (mlen == 0) { 3800 continue; 3801 } 3802 if (mlen >= 8) 3803 continue; 3804 3805 /* If we get here, mbuf data is too small for DMA engine. 3806 * Try to fix by shuffling data to prev or next in chain. 3807 * If that fails, do a compacting deep-copy of the whole chain. 3808 */ 3809 3810 /* Internal frag. If fits in prev, copy it there. */ 3811 if (prev && M_TRAILINGSPACE(prev) >= m->m_len) { 3812 memcpy(prev->m_data + prev->m_len, m->m_data, mlen); 3813 prev->m_len += mlen; 3814 m->m_len = 0; 3815 /* XXX stitch chain */ 3816 prev->m_next = m_free(m); 3817 m = prev; 3818 continue; 3819 } 3820 else if (m->m_next != NULL && 3821 M_TRAILINGSPACE(m) >= shortfall && 3822 m->m_next->m_len >= (8 + shortfall)) { 3823 /* m is writable and have enough data in next, pull up. */ 3824 3825 memcpy(m->m_data + m->m_len, m->m_next->m_data, 3826 shortfall); 3827 m->m_len += shortfall; 3828 m->m_next->m_len -= shortfall; 3829 m->m_next->m_data += shortfall; 3830 } 3831 else if (m->m_next == NULL || 1) { 3832 /* Got a runt at the very end of the packet. 3833 * borrow data from the tail of the preceding mbuf and 3834 * update its length in-place. (The original data is still 3835 * valid, so we can do this even if prev is not writable.) 3836 */ 3837 3838 /* if we'd make prev a runt, just move all of its data. */ 3839 KASSERT(prev != NULL /*, ("runt but null PREV")*/); 3840 KASSERT(prev->m_len >= 8 /*, ("runt prev")*/); 3841 3842 if ((prev->m_len - shortfall) < 8) 3843 shortfall = prev->m_len; 3844 3845 #ifdef notyet /* just do the safe slow thing for now */ 3846 if (!M_READONLY(m)) { 3847 if (M_LEADINGSPACE(m) < shorfall) { 3848 void *m_dat; 3849 m_dat = (m->m_flags & M_PKTHDR) ? 3850 m->m_pktdat : m->dat; 3851 memmove(m_dat, mtod(m, void*), m->m_len); 3852 m->m_data = m_dat; 3853 } 3854 } else 3855 #endif /* just do the safe slow thing */ 3856 { 3857 struct mbuf * n = NULL; 3858 int newprevlen = prev->m_len - shortfall; 3859 3860 MGET(n, M_NOWAIT, MT_DATA); 3861 if (n == NULL) 3862 return ENOBUFS; 3863 KASSERT(m->m_len + shortfall < MLEN 3864 /*, 3865 ("runt %d +prev %d too big\n", m->m_len, shortfall)*/); 3866 3867 /* first copy the data we're stealing from prev */ 3868 memcpy(n->m_data, prev->m_data + newprevlen, 3869 shortfall); 3870 3871 /* update prev->m_len accordingly */ 3872 prev->m_len -= shortfall; 3873 3874 /* copy data from runt m */ 3875 memcpy(n->m_data + shortfall, m->m_data, 3876 m->m_len); 3877 3878 /* n holds what we stole from prev, plus m */ 3879 n->m_len = shortfall + m->m_len; 3880 3881 /* stitch n into chain and free m */ 3882 n->m_next = m->m_next; 3883 prev->m_next = n; 3884 /* KASSERT(m->m_next == NULL); */ 3885 m->m_next = NULL; 3886 m_free(m); 3887 m = n; /* for continuing loop */ 3888 } 3889 } 3890 prevlen = m->m_len; 3891 } 3892 return 0; 3893 } 3894 3895 /* 3896 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 3897 * pointers to descriptors. 3898 */ 3899 static int 3900 bge_encap(struct bge_softc *sc, struct mbuf *m_head, uint32_t *txidx) 3901 { 3902 struct bge_tx_bd *f = NULL; 3903 uint32_t frag, cur; 3904 uint16_t csum_flags = 0; 3905 uint16_t txbd_tso_flags = 0; 3906 struct txdmamap_pool_entry *dma; 3907 bus_dmamap_t dmamap; 3908 int i = 0; 3909 struct m_tag *mtag; 3910 int use_tso, maxsegsize, error; 3911 3912 cur = frag = *txidx; 3913 3914 if (m_head->m_pkthdr.csum_flags) { 3915 if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4) 3916 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 3917 if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) 3918 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 3919 } 3920 3921 /* 3922 * If we were asked to do an outboard checksum, and the NIC 3923 * has the bug where it sometimes adds in the Ethernet padding, 3924 * explicitly pad with zeros so the cksum will be correct either way. 3925 * (For now, do this for all chip versions, until newer 3926 * are confirmed to not require the workaround.) 3927 */ 3928 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) == 0 || 3929 #ifdef notyet 3930 (sc->bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 || 3931 #endif 3932 m_head->m_pkthdr.len >= ETHER_MIN_NOPAD) 3933 goto check_dma_bug; 3934 3935 if (bge_cksum_pad(m_head) != 0) 3936 return ENOBUFS; 3937 3938 check_dma_bug: 3939 if (!(BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)) 3940 goto doit; 3941 3942 /* 3943 * bcm5700 Revision B silicon cannot handle DMA descriptors with 3944 * less than eight bytes. If we encounter a teeny mbuf 3945 * at the end of a chain, we can pad. Otherwise, copy. 3946 */ 3947 if (bge_compact_dma_runt(m_head) != 0) 3948 return ENOBUFS; 3949 3950 doit: 3951 dma = SLIST_FIRST(&sc->txdma_list); 3952 if (dma == NULL) 3953 return ENOBUFS; 3954 dmamap = dma->dmamap; 3955 3956 /* 3957 * Set up any necessary TSO state before we start packing... 3958 */ 3959 use_tso = (m_head->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; 3960 if (!use_tso) { 3961 maxsegsize = 0; 3962 } else { /* TSO setup */ 3963 unsigned mss; 3964 struct ether_header *eh; 3965 unsigned ip_tcp_hlen, iptcp_opt_words, tcp_seg_flags, offset; 3966 struct mbuf * m0 = m_head; 3967 struct ip *ip; 3968 struct tcphdr *th; 3969 int iphl, hlen; 3970 3971 /* 3972 * XXX It would be nice if the mbuf pkthdr had offset 3973 * fields for the protocol headers. 3974 */ 3975 3976 eh = mtod(m0, struct ether_header *); 3977 switch (htons(eh->ether_type)) { 3978 case ETHERTYPE_IP: 3979 offset = ETHER_HDR_LEN; 3980 break; 3981 3982 case ETHERTYPE_VLAN: 3983 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 3984 break; 3985 3986 default: 3987 /* 3988 * Don't support this protocol or encapsulation. 3989 */ 3990 return ENOBUFS; 3991 } 3992 3993 /* 3994 * TCP/IP headers are in the first mbuf; we can do 3995 * this the easy way. 3996 */ 3997 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); 3998 hlen = iphl + offset; 3999 if (__predict_false(m0->m_len < 4000 (hlen + sizeof(struct tcphdr)))) { 4001 4002 aprint_debug_dev(sc->bge_dev, 4003 "TSO: hard case m0->m_len == %d < ip/tcp hlen %zd," 4004 "not handled yet\n", 4005 m0->m_len, hlen+ sizeof(struct tcphdr)); 4006 #ifdef NOTYET 4007 /* 4008 * XXX jonathan@NetBSD.org: untested. 4009 * how to force this branch to be taken? 4010 */ 4011 BGE_EVCNT_INCR(&sc->sc_ev_txtsopain); 4012 4013 m_copydata(m0, offset, sizeof(ip), &ip); 4014 m_copydata(m0, hlen, sizeof(th), &th); 4015 4016 ip.ip_len = 0; 4017 4018 m_copyback(m0, hlen + offsetof(struct ip, ip_len), 4019 sizeof(ip.ip_len), &ip.ip_len); 4020 4021 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, 4022 ip.ip_dst.s_addr, htons(IPPROTO_TCP)); 4023 4024 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum), 4025 sizeof(th.th_sum), &th.th_sum); 4026 4027 hlen += th.th_off << 2; 4028 iptcp_opt_words = hlen; 4029 #else 4030 /* 4031 * if_wm "hard" case not yet supported, can we not 4032 * mandate it out of existence? 4033 */ 4034 (void) ip; (void)th; (void) ip_tcp_hlen; 4035 4036 return ENOBUFS; 4037 #endif 4038 } else { 4039 ip = (struct ip *) (mtod(m0, char *) + offset); 4040 th = (struct tcphdr *) (mtod(m0, char *) + hlen); 4041 ip_tcp_hlen = iphl + (th->th_off << 2); 4042 4043 /* Total IP/TCP options, in 32-bit words */ 4044 iptcp_opt_words = (ip_tcp_hlen 4045 - sizeof(struct tcphdr) 4046 - sizeof(struct ip)) >> 2; 4047 } 4048 if (BGE_IS_5750_OR_BEYOND(sc)) { 4049 th->th_sum = 0; 4050 csum_flags &= ~(BGE_TXBDFLAG_TCP_UDP_CSUM); 4051 } else { 4052 /* 4053 * XXX jonathan@NetBSD.org: 5705 untested. 4054 * Requires TSO firmware patch for 5701/5703/5704. 4055 */ 4056 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, 4057 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 4058 } 4059 4060 mss = m_head->m_pkthdr.segsz; 4061 txbd_tso_flags |= 4062 BGE_TXBDFLAG_CPU_PRE_DMA | 4063 BGE_TXBDFLAG_CPU_POST_DMA; 4064 4065 /* 4066 * Our NIC TSO-assist assumes TSO has standard, optionless 4067 * IPv4 and TCP headers, which total 40 bytes. By default, 4068 * the NIC copies 40 bytes of IP/TCP header from the 4069 * supplied header into the IP/TCP header portion of 4070 * each post-TSO-segment. If the supplied packet has IP or 4071 * TCP options, we need to tell the NIC to copy those extra 4072 * bytes into each post-TSO header, in addition to the normal 4073 * 40-byte IP/TCP header (and to leave space accordingly). 4074 * Unfortunately, the driver encoding of option length 4075 * varies across different ASIC families. 4076 */ 4077 tcp_seg_flags = 0; 4078 if (iptcp_opt_words) { 4079 if (BGE_IS_5705_PLUS(sc)) { 4080 tcp_seg_flags = 4081 iptcp_opt_words << 11; 4082 } else { 4083 txbd_tso_flags |= 4084 iptcp_opt_words << 12; 4085 } 4086 } 4087 maxsegsize = mss | tcp_seg_flags; 4088 ip->ip_len = htons(mss + ip_tcp_hlen); 4089 4090 } /* TSO setup */ 4091 4092 /* 4093 * Start packing the mbufs in this chain into 4094 * the fragment pointers. Stop when we run out 4095 * of fragments or hit the end of the mbuf chain. 4096 */ 4097 error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head, 4098 BUS_DMA_NOWAIT); 4099 if (error) 4100 return ENOBUFS; 4101 /* 4102 * Sanity check: avoid coming within 16 descriptors 4103 * of the end of the ring. 4104 */ 4105 if (dmamap->dm_nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) { 4106 BGE_TSO_PRINTF(("%s: " 4107 " dmamap_load_mbuf too close to ring wrap\n", 4108 device_xname(sc->bge_dev))); 4109 goto fail_unload; 4110 } 4111 4112 mtag = sc->ethercom.ec_nvlans ? 4113 m_tag_find(m_head, PACKET_TAG_VLAN, NULL) : NULL; 4114 4115 4116 /* Iterate over dmap-map fragments. */ 4117 for (i = 0; i < dmamap->dm_nsegs; i++) { 4118 f = &sc->bge_rdata->bge_tx_ring[frag]; 4119 if (sc->bge_cdata.bge_tx_chain[frag] != NULL) 4120 break; 4121 4122 BGE_HOSTADDR(f->bge_addr, dmamap->dm_segs[i].ds_addr); 4123 f->bge_len = dmamap->dm_segs[i].ds_len; 4124 4125 /* 4126 * For 5751 and follow-ons, for TSO we must turn 4127 * off checksum-assist flag in the tx-descr, and 4128 * supply the ASIC-revision-specific encoding 4129 * of TSO flags and segsize. 4130 */ 4131 if (use_tso) { 4132 if (BGE_IS_5750_OR_BEYOND(sc) || i == 0) { 4133 f->bge_rsvd = maxsegsize; 4134 f->bge_flags = csum_flags | txbd_tso_flags; 4135 } else { 4136 f->bge_rsvd = 0; 4137 f->bge_flags = 4138 (csum_flags | txbd_tso_flags) & 0x0fff; 4139 } 4140 } else { 4141 f->bge_rsvd = 0; 4142 f->bge_flags = csum_flags; 4143 } 4144 4145 if (mtag != NULL) { 4146 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 4147 f->bge_vlan_tag = VLAN_TAG_VALUE(mtag); 4148 } else { 4149 f->bge_vlan_tag = 0; 4150 } 4151 cur = frag; 4152 BGE_INC(frag, BGE_TX_RING_CNT); 4153 } 4154 4155 if (i < dmamap->dm_nsegs) { 4156 BGE_TSO_PRINTF(("%s: reached %d < dm_nsegs %d\n", 4157 device_xname(sc->bge_dev), i, dmamap->dm_nsegs)); 4158 goto fail_unload; 4159 } 4160 4161 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize, 4162 BUS_DMASYNC_PREWRITE); 4163 4164 if (frag == sc->bge_tx_saved_considx) { 4165 BGE_TSO_PRINTF(("%s: frag %d = wrapped id %d?\n", 4166 device_xname(sc->bge_dev), frag, sc->bge_tx_saved_considx)); 4167 4168 goto fail_unload; 4169 } 4170 4171 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END; 4172 sc->bge_cdata.bge_tx_chain[cur] = m_head; 4173 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 4174 sc->txdma[cur] = dma; 4175 sc->bge_txcnt += dmamap->dm_nsegs; 4176 4177 *txidx = frag; 4178 4179 return 0; 4180 4181 fail_unload: 4182 bus_dmamap_unload(sc->bge_dmatag, dmamap); 4183 4184 return ENOBUFS; 4185 } 4186 4187 /* 4188 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 4189 * to the mbuf data regions directly in the transmit descriptors. 4190 */ 4191 static void 4192 bge_start(struct ifnet *ifp) 4193 { 4194 struct bge_softc *sc; 4195 struct mbuf *m_head = NULL; 4196 uint32_t prodidx; 4197 int pkts = 0; 4198 4199 sc = ifp->if_softc; 4200 4201 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 4202 return; 4203 4204 prodidx = sc->bge_tx_prodidx; 4205 4206 while (sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { 4207 IFQ_POLL(&ifp->if_snd, m_head); 4208 if (m_head == NULL) 4209 break; 4210 4211 #if 0 4212 /* 4213 * XXX 4214 * safety overkill. If this is a fragmented packet chain 4215 * with delayed TCP/UDP checksums, then only encapsulate 4216 * it if we have enough descriptors to handle the entire 4217 * chain at once. 4218 * (paranoia -- may not actually be needed) 4219 */ 4220 if (m_head->m_flags & M_FIRSTFRAG && 4221 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 4222 if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 4223 M_CSUM_DATA_IPv4_OFFSET(m_head->m_pkthdr.csum_data) + 16) { 4224 ifp->if_flags |= IFF_OACTIVE; 4225 break; 4226 } 4227 } 4228 #endif 4229 4230 /* 4231 * Pack the data into the transmit ring. If we 4232 * don't have room, set the OACTIVE flag and wait 4233 * for the NIC to drain the ring. 4234 */ 4235 if (bge_encap(sc, m_head, &prodidx)) { 4236 ifp->if_flags |= IFF_OACTIVE; 4237 break; 4238 } 4239 4240 /* now we are committed to transmit the packet */ 4241 IFQ_DEQUEUE(&ifp->if_snd, m_head); 4242 pkts++; 4243 4244 /* 4245 * If there's a BPF listener, bounce a copy of this frame 4246 * to him. 4247 */ 4248 bpf_mtap(ifp, m_head); 4249 } 4250 if (pkts == 0) 4251 return; 4252 4253 /* Transmit */ 4254 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 4255 /* 5700 b2 errata */ 4256 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) 4257 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 4258 4259 sc->bge_tx_prodidx = prodidx; 4260 4261 /* 4262 * Set a timeout in case the chip goes out to lunch. 4263 */ 4264 ifp->if_timer = 5; 4265 } 4266 4267 static int 4268 bge_init(struct ifnet *ifp) 4269 { 4270 struct bge_softc *sc = ifp->if_softc; 4271 const uint16_t *m; 4272 int s, error = 0; 4273 4274 s = splnet(); 4275 4276 ifp = &sc->ethercom.ec_if; 4277 4278 /* Cancel pending I/O and flush buffers. */ 4279 bge_stop(ifp, 0); 4280 4281 bge_stop_fw(sc); 4282 bge_sig_pre_reset(sc, BGE_RESET_START); 4283 bge_reset(sc); 4284 bge_sig_legacy(sc, BGE_RESET_START); 4285 bge_sig_post_reset(sc, BGE_RESET_START); 4286 4287 bge_chipinit(sc); 4288 4289 /* 4290 * Init the various state machines, ring 4291 * control blocks and firmware. 4292 */ 4293 error = bge_blockinit(sc); 4294 if (error != 0) { 4295 aprint_error_dev(sc->bge_dev, "initialization error %d\n", 4296 error); 4297 splx(s); 4298 return error; 4299 } 4300 4301 ifp = &sc->ethercom.ec_if; 4302 4303 /* Specify MTU. */ 4304 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 4305 ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN); 4306 4307 /* Load our MAC address. */ 4308 m = (const uint16_t *)&(CLLADDR(ifp->if_sadl)[0]); 4309 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 4310 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 4311 4312 /* Enable or disable promiscuous mode as needed. */ 4313 if (ifp->if_flags & IFF_PROMISC) 4314 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 4315 else 4316 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 4317 4318 /* Program multicast filter. */ 4319 bge_setmulti(sc); 4320 4321 /* Init RX ring. */ 4322 bge_init_rx_ring_std(sc); 4323 4324 /* 4325 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's 4326 * memory to insure that the chip has in fact read the first 4327 * entry of the ring. 4328 */ 4329 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) { 4330 uint32_t v, i; 4331 for (i = 0; i < 10; i++) { 4332 DELAY(20); 4333 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8); 4334 if (v == (MCLBYTES - ETHER_ALIGN)) 4335 break; 4336 } 4337 if (i == 10) 4338 aprint_error_dev(sc->bge_dev, 4339 "5705 A0 chip failed to load RX ring\n"); 4340 } 4341 4342 /* Init jumbo RX ring. */ 4343 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 4344 bge_init_rx_ring_jumbo(sc); 4345 4346 /* Init our RX return ring index */ 4347 sc->bge_rx_saved_considx = 0; 4348 4349 /* Init TX ring. */ 4350 bge_init_tx_ring(sc); 4351 4352 /* Turn on transmitter */ 4353 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); 4354 4355 /* Turn on receiver */ 4356 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 4357 4358 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2); 4359 4360 /* Tell firmware we're alive. */ 4361 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 4362 4363 /* Enable host interrupts. */ 4364 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 4365 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 4366 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); 4367 4368 if ((error = bge_ifmedia_upd(ifp)) != 0) 4369 goto out; 4370 4371 ifp->if_flags |= IFF_RUNNING; 4372 ifp->if_flags &= ~IFF_OACTIVE; 4373 4374 callout_reset(&sc->bge_timeout, hz, bge_tick, sc); 4375 4376 out: 4377 sc->bge_if_flags = ifp->if_flags; 4378 splx(s); 4379 4380 return error; 4381 } 4382 4383 /* 4384 * Set media options. 4385 */ 4386 static int 4387 bge_ifmedia_upd(struct ifnet *ifp) 4388 { 4389 struct bge_softc *sc = ifp->if_softc; 4390 struct mii_data *mii = &sc->bge_mii; 4391 struct ifmedia *ifm = &sc->bge_ifmedia; 4392 int rc; 4393 4394 /* If this is a 1000baseX NIC, enable the TBI port. */ 4395 if (sc->bge_flags & BGE_PHY_FIBER_TBI) { 4396 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 4397 return EINVAL; 4398 switch (IFM_SUBTYPE(ifm->ifm_media)) { 4399 case IFM_AUTO: 4400 /* 4401 * The BCM5704 ASIC appears to have a special 4402 * mechanism for programming the autoneg 4403 * advertisement registers in TBI mode. 4404 */ 4405 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 4406 uint32_t sgdig; 4407 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS); 4408 if (sgdig & BGE_SGDIGSTS_DONE) { 4409 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0); 4410 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG); 4411 sgdig |= BGE_SGDIGCFG_AUTO | 4412 BGE_SGDIGCFG_PAUSE_CAP | 4413 BGE_SGDIGCFG_ASYM_PAUSE; 4414 CSR_WRITE_4(sc, BGE_SGDIG_CFG, 4415 sgdig | BGE_SGDIGCFG_SEND); 4416 DELAY(5); 4417 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig); 4418 } 4419 } 4420 break; 4421 case IFM_1000_SX: 4422 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 4423 BGE_CLRBIT(sc, BGE_MAC_MODE, 4424 BGE_MACMODE_HALF_DUPLEX); 4425 } else { 4426 BGE_SETBIT(sc, BGE_MAC_MODE, 4427 BGE_MACMODE_HALF_DUPLEX); 4428 } 4429 break; 4430 default: 4431 return EINVAL; 4432 } 4433 /* XXX 802.3x flow control for 1000BASE-SX */ 4434 return 0; 4435 } 4436 4437 BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT); 4438 if ((rc = mii_mediachg(mii)) == ENXIO) 4439 return 0; 4440 4441 /* 4442 * Force an interrupt so that we will call bge_link_upd 4443 * if needed and clear any pending link state attention. 4444 * Without this we are not getting any further interrupts 4445 * for link state changes and thus will not UP the link and 4446 * not be able to send in bge_start. The only way to get 4447 * things working was to receive a packet and get a RX intr. 4448 */ 4449 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 4450 sc->bge_flags & BGE_IS_5788) 4451 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 4452 else 4453 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 4454 4455 return rc; 4456 } 4457 4458 /* 4459 * Report current media status. 4460 */ 4461 static void 4462 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 4463 { 4464 struct bge_softc *sc = ifp->if_softc; 4465 struct mii_data *mii = &sc->bge_mii; 4466 4467 if (sc->bge_flags & BGE_PHY_FIBER_TBI) { 4468 ifmr->ifm_status = IFM_AVALID; 4469 ifmr->ifm_active = IFM_ETHER; 4470 if (CSR_READ_4(sc, BGE_MAC_STS) & 4471 BGE_MACSTAT_TBI_PCS_SYNCHED) 4472 ifmr->ifm_status |= IFM_ACTIVE; 4473 ifmr->ifm_active |= IFM_1000_SX; 4474 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 4475 ifmr->ifm_active |= IFM_HDX; 4476 else 4477 ifmr->ifm_active |= IFM_FDX; 4478 return; 4479 } 4480 4481 mii_pollstat(mii); 4482 ifmr->ifm_status = mii->mii_media_status; 4483 ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) | 4484 sc->bge_flowflags; 4485 } 4486 4487 static int 4488 bge_ifflags_cb(struct ethercom *ec) 4489 { 4490 struct ifnet *ifp = &ec->ec_if; 4491 struct bge_softc *sc = ifp->if_softc; 4492 int change = ifp->if_flags ^ sc->bge_if_flags; 4493 4494 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) 4495 return ENETRESET; 4496 else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) == 0) 4497 return 0; 4498 4499 if ((ifp->if_flags & IFF_PROMISC) == 0) 4500 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 4501 else 4502 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 4503 4504 bge_setmulti(sc); 4505 4506 sc->bge_if_flags = ifp->if_flags; 4507 return 0; 4508 } 4509 4510 static int 4511 bge_ioctl(struct ifnet *ifp, u_long command, void *data) 4512 { 4513 struct bge_softc *sc = ifp->if_softc; 4514 struct ifreq *ifr = (struct ifreq *) data; 4515 int s, error = 0; 4516 struct mii_data *mii; 4517 4518 s = splnet(); 4519 4520 switch (command) { 4521 case SIOCSIFMEDIA: 4522 /* XXX Flow control is not supported for 1000BASE-SX */ 4523 if (sc->bge_flags & BGE_PHY_FIBER_TBI) { 4524 ifr->ifr_media &= ~IFM_ETH_FMASK; 4525 sc->bge_flowflags = 0; 4526 } 4527 4528 /* Flow control requires full-duplex mode. */ 4529 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 4530 (ifr->ifr_media & IFM_FDX) == 0) { 4531 ifr->ifr_media &= ~IFM_ETH_FMASK; 4532 } 4533 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 4534 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 4535 /* We can do both TXPAUSE and RXPAUSE. */ 4536 ifr->ifr_media |= 4537 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 4538 } 4539 sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 4540 } 4541 /* FALLTHROUGH */ 4542 case SIOCGIFMEDIA: 4543 if (sc->bge_flags & BGE_PHY_FIBER_TBI) { 4544 error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia, 4545 command); 4546 } else { 4547 mii = &sc->bge_mii; 4548 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, 4549 command); 4550 } 4551 break; 4552 default: 4553 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET) 4554 break; 4555 4556 error = 0; 4557 4558 if (command != SIOCADDMULTI && command != SIOCDELMULTI) 4559 ; 4560 else if (ifp->if_flags & IFF_RUNNING) 4561 bge_setmulti(sc); 4562 break; 4563 } 4564 4565 splx(s); 4566 4567 return error; 4568 } 4569 4570 static void 4571 bge_watchdog(struct ifnet *ifp) 4572 { 4573 struct bge_softc *sc; 4574 4575 sc = ifp->if_softc; 4576 4577 aprint_error_dev(sc->bge_dev, "watchdog timeout -- resetting\n"); 4578 4579 ifp->if_flags &= ~IFF_RUNNING; 4580 bge_init(ifp); 4581 4582 ifp->if_oerrors++; 4583 } 4584 4585 static void 4586 bge_stop_block(struct bge_softc *sc, bus_addr_t reg, uint32_t bit) 4587 { 4588 int i; 4589 4590 BGE_CLRBIT(sc, reg, bit); 4591 4592 for (i = 0; i < 1000; i++) { 4593 if ((CSR_READ_4(sc, reg) & bit) == 0) 4594 return; 4595 delay(100); 4596 } 4597 4598 /* 4599 * Doesn't print only when the register is BGE_SRS_MODE. It occurs 4600 * on some environment (and once after boot?) 4601 */ 4602 if (reg != BGE_SRS_MODE) 4603 aprint_error_dev(sc->bge_dev, 4604 "block failed to stop: reg 0x%lx, bit 0x%08x\n", 4605 (u_long)reg, bit); 4606 } 4607 4608 /* 4609 * Stop the adapter and free any mbufs allocated to the 4610 * RX and TX lists. 4611 */ 4612 static void 4613 bge_stop(struct ifnet *ifp, int disable) 4614 { 4615 struct bge_softc *sc = ifp->if_softc; 4616 4617 callout_stop(&sc->bge_timeout); 4618 4619 /* 4620 * Tell firmware we're shutting down. 4621 */ 4622 bge_stop_fw(sc); 4623 bge_sig_pre_reset(sc, BGE_RESET_STOP); 4624 4625 /* Disable host interrupts. */ 4626 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 4627 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); 4628 4629 /* 4630 * Disable all of the receiver blocks 4631 */ 4632 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 4633 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 4634 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 4635 if (BGE_IS_5700_FAMILY(sc)) 4636 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 4637 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 4638 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 4639 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 4640 4641 /* 4642 * Disable all of the transmit blocks 4643 */ 4644 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 4645 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 4646 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 4647 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 4648 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 4649 if (BGE_IS_5700_FAMILY(sc)) 4650 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 4651 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 4652 4653 /* 4654 * Shut down all of the memory managers and related 4655 * state machines. 4656 */ 4657 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 4658 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 4659 if (BGE_IS_5700_FAMILY(sc)) 4660 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 4661 4662 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 4663 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 4664 4665 if (BGE_IS_5700_FAMILY(sc)) { 4666 bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 4667 bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 4668 } 4669 4670 bge_reset(sc); 4671 bge_sig_legacy(sc, BGE_RESET_STOP); 4672 bge_sig_post_reset(sc, BGE_RESET_STOP); 4673 4674 /* 4675 * Keep the ASF firmware running if up. 4676 */ 4677 if (sc->bge_asf_mode & ASF_STACKUP) 4678 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 4679 else 4680 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 4681 4682 /* Free the RX lists. */ 4683 bge_free_rx_ring_std(sc); 4684 4685 /* Free jumbo RX list. */ 4686 if (BGE_IS_JUMBO_CAPABLE(sc)) 4687 bge_free_rx_ring_jumbo(sc); 4688 4689 /* Free TX buffers. */ 4690 bge_free_tx_ring(sc); 4691 4692 /* 4693 * Isolate/power down the PHY. 4694 */ 4695 if (!(sc->bge_flags & BGE_PHY_FIBER_TBI)) 4696 mii_down(&sc->bge_mii); 4697 4698 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 4699 4700 /* Clear MAC's link state (PHY may still have link UP). */ 4701 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 4702 4703 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 4704 } 4705 4706 static void 4707 bge_link_upd(struct bge_softc *sc) 4708 { 4709 struct ifnet *ifp = &sc->ethercom.ec_if; 4710 struct mii_data *mii = &sc->bge_mii; 4711 uint32_t status; 4712 int link; 4713 4714 /* Clear 'pending link event' flag */ 4715 BGE_STS_CLRBIT(sc, BGE_STS_LINK_EVT); 4716 4717 /* 4718 * Process link state changes. 4719 * Grrr. The link status word in the status block does 4720 * not work correctly on the BCM5700 rev AX and BX chips, 4721 * according to all available information. Hence, we have 4722 * to enable MII interrupts in order to properly obtain 4723 * async link changes. Unfortunately, this also means that 4724 * we have to read the MAC status register to detect link 4725 * changes, thereby adding an additional register access to 4726 * the interrupt handler. 4727 */ 4728 4729 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) { 4730 status = CSR_READ_4(sc, BGE_MAC_STS); 4731 if (status & BGE_MACSTAT_MI_INTERRUPT) { 4732 mii_pollstat(mii); 4733 4734 if (!BGE_STS_BIT(sc, BGE_STS_LINK) && 4735 mii->mii_media_status & IFM_ACTIVE && 4736 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 4737 BGE_STS_SETBIT(sc, BGE_STS_LINK); 4738 else if (BGE_STS_BIT(sc, BGE_STS_LINK) && 4739 (!(mii->mii_media_status & IFM_ACTIVE) || 4740 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) 4741 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 4742 4743 /* Clear the interrupt */ 4744 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 4745 BGE_EVTENB_MI_INTERRUPT); 4746 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR); 4747 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR, 4748 BRGPHY_INTRS); 4749 } 4750 return; 4751 } 4752 4753 if (sc->bge_flags & BGE_PHY_FIBER_TBI) { 4754 status = CSR_READ_4(sc, BGE_MAC_STS); 4755 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) { 4756 if (!BGE_STS_BIT(sc, BGE_STS_LINK)) { 4757 BGE_STS_SETBIT(sc, BGE_STS_LINK); 4758 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) 4759 BGE_CLRBIT(sc, BGE_MAC_MODE, 4760 BGE_MACMODE_TBI_SEND_CFGS); 4761 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 4762 if_link_state_change(ifp, LINK_STATE_UP); 4763 } 4764 } else if (BGE_STS_BIT(sc, BGE_STS_LINK)) { 4765 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 4766 if_link_state_change(ifp, LINK_STATE_DOWN); 4767 } 4768 /* 4769 * Discard link events for MII/GMII cards if MI auto-polling disabled. 4770 * This should not happen since mii callouts are locked now, but 4771 * we keep this check for debug. 4772 */ 4773 } else if (BGE_STS_BIT(sc, BGE_STS_AUTOPOLL)) { 4774 /* 4775 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED 4776 * bit in status word always set. Workaround this bug by 4777 * reading PHY link status directly. 4778 */ 4779 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK)? 4780 BGE_STS_LINK : 0; 4781 4782 if (BGE_STS_BIT(sc, BGE_STS_LINK) != link) { 4783 mii_pollstat(mii); 4784 4785 if (!BGE_STS_BIT(sc, BGE_STS_LINK) && 4786 mii->mii_media_status & IFM_ACTIVE && 4787 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 4788 BGE_STS_SETBIT(sc, BGE_STS_LINK); 4789 else if (BGE_STS_BIT(sc, BGE_STS_LINK) && 4790 (!(mii->mii_media_status & IFM_ACTIVE) || 4791 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) 4792 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 4793 } 4794 } 4795 4796 /* Clear the attention */ 4797 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 4798 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 4799 BGE_MACSTAT_LINK_CHANGED); 4800 } 4801 4802 static int 4803 sysctl_bge_verify(SYSCTLFN_ARGS) 4804 { 4805 int error, t; 4806 struct sysctlnode node; 4807 4808 node = *rnode; 4809 t = *(int*)rnode->sysctl_data; 4810 node.sysctl_data = &t; 4811 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 4812 if (error || newp == NULL) 4813 return error; 4814 4815 #if 0 4816 DPRINTF2(("%s: t = %d, nodenum = %d, rnodenum = %d\n", __func__, t, 4817 node.sysctl_num, rnode->sysctl_num)); 4818 #endif 4819 4820 if (node.sysctl_num == bge_rxthresh_nodenum) { 4821 if (t < 0 || t >= NBGE_RX_THRESH) 4822 return EINVAL; 4823 bge_update_all_threshes(t); 4824 } else 4825 return EINVAL; 4826 4827 *(int*)rnode->sysctl_data = t; 4828 4829 return 0; 4830 } 4831 4832 /* 4833 * Set up sysctl(3) MIB, hw.bge.*. 4834 */ 4835 static void 4836 sysctl_bge_init(struct bge_softc *sc) 4837 { 4838 int rc, bge_root_num; 4839 const struct sysctlnode *node; 4840 4841 if ((rc = sysctl_createv(&sc->bge_log, 0, NULL, NULL, 4842 CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL, 4843 NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) { 4844 goto err; 4845 } 4846 4847 if ((rc = sysctl_createv(&sc->bge_log, 0, NULL, &node, 4848 0, CTLTYPE_NODE, "bge", 4849 SYSCTL_DESCR("BGE interface controls"), 4850 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) { 4851 goto err; 4852 } 4853 4854 bge_root_num = node->sysctl_num; 4855 4856 /* BGE Rx interrupt mitigation level */ 4857 if ((rc = sysctl_createv(&sc->bge_log, 0, NULL, &node, 4858 CTLFLAG_READWRITE, 4859 CTLTYPE_INT, "rx_lvl", 4860 SYSCTL_DESCR("BGE receive interrupt mitigation level"), 4861 sysctl_bge_verify, 0, 4862 &bge_rx_thresh_lvl, 4863 0, CTL_HW, bge_root_num, CTL_CREATE, 4864 CTL_EOL)) != 0) { 4865 goto err; 4866 } 4867 4868 bge_rxthresh_nodenum = node->sysctl_num; 4869 4870 return; 4871 4872 err: 4873 aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc); 4874 } 4875 4876 #ifdef BGE_DEBUG 4877 void 4878 bge_debug_info(struct bge_softc *sc) 4879 { 4880 4881 printf("Hardware Flags:\n"); 4882 if (BGE_IS_5755_PLUS(sc)) 4883 printf(" - 5755 Plus\n"); 4884 if (BGE_IS_5750_OR_BEYOND(sc)) 4885 printf(" - 5750 Plus\n"); 4886 if (BGE_IS_5705_PLUS(sc)) 4887 printf(" - 5705 Plus\n"); 4888 if (BGE_IS_5714_FAMILY(sc)) 4889 printf(" - 5714 Family\n"); 4890 if (BGE_IS_5700_FAMILY(sc)) 4891 printf(" - 5700 Family\n"); 4892 if (sc->bge_flags & BGE_IS_5788) 4893 printf(" - 5788\n"); 4894 if (sc->bge_flags & BGE_JUMBO_CAPABLE) 4895 printf(" - Supports Jumbo Frames\n"); 4896 if (sc->bge_flags & BGE_NO_EEPROM) 4897 printf(" - No EEPROM\n"); 4898 if (sc->bge_flags & BGE_PCIX) 4899 printf(" - PCI-X Bus\n"); 4900 if (sc->bge_flags & BGE_PCIE) 4901 printf(" - PCI Express Bus\n"); 4902 if (sc->bge_flags & BGE_NO_3LED) 4903 printf(" - No 3 LEDs\n"); 4904 if (sc->bge_flags & BGE_RX_ALIGNBUG) 4905 printf(" - RX Alignment Bug\n"); 4906 if (sc->bge_flags & BGE_TSO) 4907 printf(" - TSO\n"); 4908 } 4909 #endif /* BGE_DEBUG */ 4910 4911 static int 4912 bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]) 4913 { 4914 prop_dictionary_t dict; 4915 prop_data_t ea; 4916 4917 if ((sc->bge_flags & BGE_NO_EEPROM) == 0) 4918 return 1; 4919 4920 dict = device_properties(sc->bge_dev); 4921 ea = prop_dictionary_get(dict, "mac-address"); 4922 if (ea != NULL) { 4923 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA); 4924 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN); 4925 memcpy(ether_addr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN); 4926 return 0; 4927 } 4928 4929 return 1; 4930 } 4931 4932 static int 4933 bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[]) 4934 { 4935 uint32_t mac_addr; 4936 4937 mac_addr = bge_readmem_ind(sc, 0x0c14); 4938 if ((mac_addr >> 16) == 0x484b) { 4939 ether_addr[0] = (uint8_t)(mac_addr >> 8); 4940 ether_addr[1] = (uint8_t)mac_addr; 4941 mac_addr = bge_readmem_ind(sc, 0x0c18); 4942 ether_addr[2] = (uint8_t)(mac_addr >> 24); 4943 ether_addr[3] = (uint8_t)(mac_addr >> 16); 4944 ether_addr[4] = (uint8_t)(mac_addr >> 8); 4945 ether_addr[5] = (uint8_t)mac_addr; 4946 return 0; 4947 } 4948 return 1; 4949 } 4950 4951 static int 4952 bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[]) 4953 { 4954 int mac_offset = BGE_EE_MAC_OFFSET; 4955 4956 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 4957 mac_offset = BGE_EE_MAC_OFFSET_5906; 4958 4959 return (bge_read_nvram(sc, ether_addr, mac_offset + 2, 4960 ETHER_ADDR_LEN)); 4961 } 4962 4963 static int 4964 bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[]) 4965 { 4966 4967 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 4968 return 1; 4969 4970 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2, 4971 ETHER_ADDR_LEN)); 4972 } 4973 4974 static int 4975 bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[]) 4976 { 4977 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = { 4978 /* NOTE: Order is critical */ 4979 bge_get_eaddr_fw, 4980 bge_get_eaddr_mem, 4981 bge_get_eaddr_nvram, 4982 bge_get_eaddr_eeprom, 4983 NULL 4984 }; 4985 const bge_eaddr_fcn_t *func; 4986 4987 for (func = bge_eaddr_funcs; *func != NULL; ++func) { 4988 if ((*func)(sc, eaddr) == 0) 4989 break; 4990 } 4991 return (*func == NULL ? ENXIO : 0); 4992 } 4993