xref: /openbsd/sys/dev/pci/if_bge.c (revision 261a77c2)
1 /*	$OpenBSD: if_bge.c,v 1.403 2024/02/11 06:40:46 jmc Exp $	*/
2 
3 /*
4  * Copyright (c) 2001 Wind River Systems
5  * Copyright (c) 1997, 1998, 1999, 2001
6  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by Bill Paul.
19  * 4. Neither the name of the author nor the names of any co-contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
27  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33  * THE POSSIBILITY OF SUCH DAMAGE.
34  *
35  * $FreeBSD: if_bge.c,v 1.25 2002/11/14 23:54:49 sam Exp $
36  */
37 
38 /*
39  * Broadcom BCM57xx/BCM590x family ethernet driver for OpenBSD.
40  *
41  * Written by Bill Paul <wpaul@windriver.com>
42  * Senior Engineer, Wind River Systems
43  */
44 
45 /*
46  * The Broadcom BCM5700 is based on technology originally developed by
47  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
48  * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has
49  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
50  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
51  * frames, highly configurable RX filtering, and 16 RX and TX queues
52  * (which, along with RX filter rules, can be used for QOS applications).
53  * Other features, such as TCP segmentation, may be available as part
54  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
55  * firmware images can be stored in hardware and need not be compiled
56  * into the driver.
57  *
58  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
59  * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus.
60  *
61  * The BCM5701 is a single-chip solution incorporating both the BCM5700
62  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
63  * does not support external SSRAM.
64  *
65  * Broadcom also produces a variation of the BCM5700 under the "Altima"
66  * brand name, which is functionally similar but lacks PCI-X support.
67  *
68  * Without external SSRAM, you can only have at most 4 TX rings,
69  * and the use of the mini RX ring is disabled. This seems to imply
70  * that these features are simply not available on the BCM5701. As a
71  * result, this driver does not implement any support for the mini RX
72  * ring.
73  */
74 
75 #include "bpfilter.h"
76 #include "vlan.h"
77 #include "kstat.h"
78 
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/sockio.h>
82 #include <sys/mbuf.h>
83 #include <sys/malloc.h>
84 #include <sys/kernel.h>
85 #include <sys/device.h>
86 #include <sys/timeout.h>
87 #include <sys/socket.h>
88 #include <sys/atomic.h>
89 #include <sys/kstat.h>
90 
91 #include <net/if.h>
92 #include <net/if_media.h>
93 
94 #include <netinet/in.h>
95 #include <netinet/if_ether.h>
96 
97 #if NBPFILTER > 0
98 #include <net/bpf.h>
99 #endif
100 
101 #if defined(__sparc64__) || defined(__HAVE_FDT)
102 #include <dev/ofw/openfirm.h>
103 #endif
104 
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108 
109 #include <dev/mii/mii.h>
110 #include <dev/mii/miivar.h>
111 #include <dev/mii/miidevs.h>
112 #include <dev/mii/brgphyreg.h>
113 
114 #include <dev/pci/if_bgereg.h>
115 
116 #define ETHER_MIN_NOPAD		(ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
117 
118 const struct bge_revision * bge_lookup_rev(u_int32_t);
119 int bge_can_use_msi(struct bge_softc *);
120 int bge_probe(struct device *, void *, void *);
121 void bge_attach(struct device *, struct device *, void *);
122 int bge_detach(struct device *, int);
123 int bge_activate(struct device *, int);
124 
125 const struct cfattach bge_ca = {
126 	sizeof(struct bge_softc), bge_probe, bge_attach, bge_detach,
127 	bge_activate
128 };
129 
130 struct cfdriver bge_cd = {
131 	NULL, "bge", DV_IFNET
132 };
133 
134 void bge_txeof(struct bge_softc *);
135 void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *);
136 void bge_rxeof(struct bge_softc *);
137 
138 void bge_tick(void *);
139 void bge_stats_update(struct bge_softc *);
140 void bge_stats_update_regs(struct bge_softc *);
141 int bge_cksum_pad(struct mbuf *);
142 int bge_encap(struct bge_softc *, struct mbuf *, int *);
143 int bge_compact_dma_runt(struct mbuf *);
144 
145 int bge_intr(void *);
146 void bge_start(struct ifqueue *);
147 int bge_ioctl(struct ifnet *, u_long, caddr_t);
148 int bge_rxrinfo(struct bge_softc *, struct if_rxrinfo *);
149 void bge_init(void *);
150 void bge_stop_block(struct bge_softc *, bus_size_t, u_int32_t);
151 void bge_stop(struct bge_softc *, int);
152 void bge_watchdog(struct ifnet *);
153 int bge_ifmedia_upd(struct ifnet *);
154 void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
155 
156 u_int8_t bge_nvram_getbyte(struct bge_softc *, int, u_int8_t *);
157 int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
158 u_int8_t bge_eeprom_getbyte(struct bge_softc *, int, u_int8_t *);
159 int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
160 
161 void bge_iff(struct bge_softc *);
162 
163 int bge_newbuf_jumbo(struct bge_softc *, int);
164 int bge_init_rx_ring_jumbo(struct bge_softc *);
165 void bge_fill_rx_ring_jumbo(struct bge_softc *);
166 void bge_free_rx_ring_jumbo(struct bge_softc *);
167 
168 int bge_newbuf(struct bge_softc *, int);
169 int bge_init_rx_ring_std(struct bge_softc *);
170 void bge_rxtick(void *);
171 void bge_fill_rx_ring_std(struct bge_softc *);
172 void bge_free_rx_ring_std(struct bge_softc *);
173 
174 void bge_free_tx_ring(struct bge_softc *);
175 int bge_init_tx_ring(struct bge_softc *);
176 
177 void bge_chipinit(struct bge_softc *);
178 int bge_blockinit(struct bge_softc *);
179 u_int32_t bge_dma_swap_options(struct bge_softc *);
180 int bge_phy_addr(struct bge_softc *);
181 
182 u_int32_t bge_readmem_ind(struct bge_softc *, int);
183 void bge_writemem_ind(struct bge_softc *, int, int);
184 void bge_writereg_ind(struct bge_softc *, int, int);
185 void bge_writembx(struct bge_softc *, int, int);
186 
187 int bge_miibus_readreg(struct device *, int, int);
188 void bge_miibus_writereg(struct device *, int, int, int);
189 void bge_miibus_statchg(struct device *);
190 
191 #define BGE_RESET_SHUTDOWN	0
192 #define BGE_RESET_START		1
193 #define BGE_RESET_SUSPEND	2
194 void bge_sig_post_reset(struct bge_softc *, int);
195 void bge_sig_legacy(struct bge_softc *, int);
196 void bge_sig_pre_reset(struct bge_softc *, int);
197 void bge_stop_fw(struct bge_softc *, int);
198 void bge_reset(struct bge_softc *);
199 void bge_link_upd(struct bge_softc *);
200 
201 void bge_ape_lock_init(struct bge_softc *);
202 void bge_ape_read_fw_ver(struct bge_softc *);
203 int bge_ape_lock(struct bge_softc *, int);
204 void bge_ape_unlock(struct bge_softc *, int);
205 void bge_ape_send_event(struct bge_softc *, uint32_t);
206 void bge_ape_driver_state_change(struct bge_softc *, int);
207 
208 #if NKSTAT > 0
209 void bge_kstat_attach(struct bge_softc *);
210 
211 enum {
212 	bge_stat_out_octets = 0,
213 	bge_stat_collisions,
214 	bge_stat_xon_sent,
215 	bge_stat_xoff_sent,
216 	bge_stat_xmit_errors,
217 	bge_stat_coll_frames,
218 	bge_stat_multicoll_frames,
219 	bge_stat_deferred_xmit,
220 	bge_stat_excess_coll,
221 	bge_stat_late_coll,
222 	bge_stat_out_ucast_pkt,
223 	bge_stat_out_mcast_pkt,
224 	bge_stat_out_bcast_pkt,
225 	bge_stat_in_octets,
226 	bge_stat_fragments,
227 	bge_stat_in_ucast_pkt,
228 	bge_stat_in_mcast_pkt,
229 	bge_stat_in_bcast_pkt,
230 	bge_stat_fcs_errors,
231 	bge_stat_align_errors,
232 	bge_stat_xon_rcvd,
233 	bge_stat_xoff_rcvd,
234 	bge_stat_ctrl_frame_rcvd,
235 	bge_stat_xoff_entered,
236 	bge_stat_too_long_frames,
237 	bge_stat_jabbers,
238 	bge_stat_too_short_pkts,
239 
240 	bge_stat_dma_rq_full,
241 	bge_stat_dma_hprq_full,
242 	bge_stat_sdc_queue_full,
243 	bge_stat_nic_sendprod_set,
244 	bge_stat_status_updated,
245 	bge_stat_irqs,
246 	bge_stat_avoided_irqs,
247 	bge_stat_tx_thresh_hit,
248 
249 	bge_stat_filtdrop,
250 	bge_stat_dma_wrq_full,
251 	bge_stat_dma_hpwrq_full,
252 	bge_stat_out_of_bds,
253 	bge_stat_if_in_drops,
254 	bge_stat_if_in_errors,
255 	bge_stat_rx_thresh_hit,
256 };
257 
258 #endif
259 
260 #ifdef BGE_DEBUG
261 #define DPRINTF(x)	do { if (bgedebug) printf x; } while (0)
262 #define DPRINTFN(n,x)	do { if (bgedebug >= (n)) printf x; } while (0)
263 int	bgedebug = 0;
264 #else
265 #define DPRINTF(x)
266 #define DPRINTFN(n,x)
267 #endif
268 
269 /*
270  * Various supported device vendors/types and their names. Note: the
271  * spec seems to indicate that the hardware still has Alteon's vendor
272  * ID burned into it, though it will always be overridden by the vendor
273  * ID in the EEPROM. Just to be safe, we cover all possibilities.
274  */
275 const struct pci_matchid bge_devices[] = {
276 	{ PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700 },
277 	{ PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5701 },
278 
279 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000 },
280 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001 },
281 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1003 },
282 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100 },
283 
284 	{ PCI_VENDOR_APPLE, PCI_PRODUCT_APPLE_BCM5701 },
285 
286 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700 },
287 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701 },
288 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702 },
289 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702_ALT },
290 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X },
291 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703 },
292 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703_ALT },
293 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X },
294 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C },
295 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S },
296 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S_ALT },
297 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705 },
298 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705F },
299 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705K },
300 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M },
301 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M_ALT },
302 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714 },
303 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714S },
304 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715 },
305 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715S },
306 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717 },
307 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717C },
308 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5718 },
309 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5719 },
310 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720 },
311 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5721 },
312 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5722 },
313 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5723 },
314 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5725 },
315 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5727 },
316 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751 },
317 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751F },
318 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751M },
319 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752 },
320 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752M },
321 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753 },
322 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753F },
323 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753M },
324 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754 },
325 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754M },
326 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755 },
327 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755M },
328 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5756 },
329 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761 },
330 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761E },
331 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761S },
332 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761SE },
333 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5762 },
334 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5764 },
335 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780 },
336 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780S },
337 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5781 },
338 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782 },
339 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5784 },
340 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5785F },
341 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5785G },
342 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5786 },
343 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787 },
344 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787F },
345 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787M },
346 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5788 },
347 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5789 },
348 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901 },
349 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2 },
350 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5903M },
351 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906 },
352 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906M },
353 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57760 },
354 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57761 },
355 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57762 },
356 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57764 },
357 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57765 },
358 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57766 },
359 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57767 },
360 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57780 },
361 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57781 },
362 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57782 },
363 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57785 },
364 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57786 },
365 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57787 },
366 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57788 },
367 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57790 },
368 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57791 },
369 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57795 },
370 
371 	{ PCI_VENDOR_FUJITSU, PCI_PRODUCT_FUJITSU_PW008GE4 },
372 	{ PCI_VENDOR_FUJITSU, PCI_PRODUCT_FUJITSU_PW008GE5 },
373 	{ PCI_VENDOR_FUJITSU, PCI_PRODUCT_FUJITSU_PP250_450_LAN },
374 
375 	{ PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK9D21 },
376 
377 	{ PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C996 }
378 };
379 
380 #define BGE_IS_JUMBO_CAPABLE(sc)	((sc)->bge_flags & BGE_JUMBO_CAPABLE)
381 #define BGE_IS_5700_FAMILY(sc)		((sc)->bge_flags & BGE_5700_FAMILY)
382 #define BGE_IS_5705_PLUS(sc)		((sc)->bge_flags & BGE_5705_PLUS)
383 #define BGE_IS_5714_FAMILY(sc)		((sc)->bge_flags & BGE_5714_FAMILY)
384 #define BGE_IS_575X_PLUS(sc)		((sc)->bge_flags & BGE_575X_PLUS)
385 #define BGE_IS_5755_PLUS(sc)		((sc)->bge_flags & BGE_5755_PLUS)
386 #define BGE_IS_5717_PLUS(sc)		((sc)->bge_flags & BGE_5717_PLUS)
387 #define BGE_IS_57765_PLUS(sc)		((sc)->bge_flags & BGE_57765_PLUS)
388 
389 static const struct bge_revision {
390 	u_int32_t		br_chipid;
391 	const char		*br_name;
392 } bge_revisions[] = {
393 	{ BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
394 	{ BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
395 	{ BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
396 	{ BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
397 	{ BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
398 	{ BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
399 	{ BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
400 	{ BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
401 	{ BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
402 	{ BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
403 	{ BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
404 	{ BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
405 	/* the 5702 and 5703 share the same ASIC ID */
406 	{ BGE_CHIPID_BCM5703_A0, "BCM5702/5703 A0" },
407 	{ BGE_CHIPID_BCM5703_A1, "BCM5702/5703 A1" },
408 	{ BGE_CHIPID_BCM5703_A2, "BCM5702/5703 A2" },
409 	{ BGE_CHIPID_BCM5703_A3, "BCM5702/5703 A3" },
410 	{ BGE_CHIPID_BCM5703_B0, "BCM5702/5703 B0" },
411 	{ BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
412 	{ BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
413 	{ BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
414 	{ BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
415 	{ BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
416 	{ BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
417 	{ BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
418 	{ BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
419 	{ BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
420 	{ BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
421 	{ BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
422 	{ BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
423 	{ BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
424 	{ BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
425 	{ BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
426 	{ BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
427 	{ BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
428 	{ BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
429 	{ BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
430 	{ BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
431 	{ BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
432 	{ BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
433 	{ BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
434 	{ BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
435 	{ BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
436 	{ BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
437 	{ BGE_CHIPID_BCM5717_A0, "BCM5717 A0" },
438 	{ BGE_CHIPID_BCM5717_B0, "BCM5717 B0" },
439 	{ BGE_CHIPID_BCM5719_A0, "BCM5719 A0" },
440 	{ BGE_CHIPID_BCM5719_A1, "BCM5719 A1" },
441 	{ BGE_CHIPID_BCM5720_A0, "BCM5720 A0" },
442 	{ BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
443 	{ BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
444 	{ BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
445 	{ BGE_CHIPID_BCM5755_C0, "BCM5755 C0" },
446 	{ BGE_CHIPID_BCM5761_A0, "BCM5761 A0" },
447 	{ BGE_CHIPID_BCM5761_A1, "BCM5761 A1" },
448 	{ BGE_CHIPID_BCM5762_A0, "BCM5762 A0" },
449 	{ BGE_CHIPID_BCM5762_B0, "BCM5762 B0" },
450 	{ BGE_CHIPID_BCM5784_A0, "BCM5784 A0" },
451 	{ BGE_CHIPID_BCM5784_A1, "BCM5784 A1" },
452 	/* the 5754 and 5787 share the same ASIC ID */
453 	{ BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
454 	{ BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
455 	{ BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
456 	{ BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
457 	{ BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
458 	{ BGE_CHIPID_BCM57765_A0, "BCM57765 A0" },
459 	{ BGE_CHIPID_BCM57765_B0, "BCM57765 B0" },
460 	{ BGE_CHIPID_BCM57766_A0, "BCM57766 A0" },
461 	{ BGE_CHIPID_BCM57766_A1, "BCM57766 A1" },
462 	{ BGE_CHIPID_BCM57780_A0, "BCM57780 A0" },
463 	{ BGE_CHIPID_BCM57780_A1, "BCM57780 A1" },
464 
465 	{ 0, NULL }
466 };
467 
468 /*
469  * Some defaults for major revisions, so that newer steppings
470  * that we don't know about have a shot at working.
471  */
472 static const struct bge_revision bge_majorrevs[] = {
473 	{ BGE_ASICREV_BCM5700, "unknown BCM5700" },
474 	{ BGE_ASICREV_BCM5701, "unknown BCM5701" },
475 	/* 5702 and 5703 share the same ASIC ID */
476 	{ BGE_ASICREV_BCM5703, "unknown BCM5703" },
477 	{ BGE_ASICREV_BCM5704, "unknown BCM5704" },
478 	{ BGE_ASICREV_BCM5705, "unknown BCM5705" },
479 	{ BGE_ASICREV_BCM5750, "unknown BCM5750" },
480 	{ BGE_ASICREV_BCM5714, "unknown BCM5714" },
481 	{ BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
482 	{ BGE_ASICREV_BCM5752, "unknown BCM5752" },
483 	{ BGE_ASICREV_BCM5780, "unknown BCM5780" },
484 	{ BGE_ASICREV_BCM5755, "unknown BCM5755" },
485 	{ BGE_ASICREV_BCM5761, "unknown BCM5761" },
486 	{ BGE_ASICREV_BCM5784, "unknown BCM5784" },
487 	{ BGE_ASICREV_BCM5785, "unknown BCM5785" },
488 	/* 5754 and 5787 share the same ASIC ID */
489 	{ BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
490 	{ BGE_ASICREV_BCM5906, "unknown BCM5906" },
491 	{ BGE_ASICREV_BCM57765, "unknown BCM57765" },
492 	{ BGE_ASICREV_BCM57766, "unknown BCM57766" },
493 	{ BGE_ASICREV_BCM57780, "unknown BCM57780" },
494 	{ BGE_ASICREV_BCM5717, "unknown BCM5717" },
495 	{ BGE_ASICREV_BCM5719, "unknown BCM5719" },
496 	{ BGE_ASICREV_BCM5720, "unknown BCM5720" },
497 	{ BGE_ASICREV_BCM5762, "unknown BCM5762" },
498 
499 	{ 0, NULL }
500 };
501 
502 u_int32_t
503 bge_readmem_ind(struct bge_softc *sc, int off)
504 {
505 	struct pci_attach_args	*pa = &(sc->bge_pa);
506 	u_int32_t val;
507 
508 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 &&
509 	    off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
510 		return (0);
511 
512 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off);
513 	val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA);
514 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, 0);
515 	return (val);
516 }
517 
518 void
519 bge_writemem_ind(struct bge_softc *sc, int off, int val)
520 {
521 	struct pci_attach_args	*pa = &(sc->bge_pa);
522 
523 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 &&
524 	    off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
525 		return;
526 
527 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off);
528 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA, val);
529 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, 0);
530 }
531 
532 void
533 bge_writereg_ind(struct bge_softc *sc, int off, int val)
534 {
535 	struct pci_attach_args	*pa = &(sc->bge_pa);
536 
537 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off);
538 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA, val);
539 }
540 
541 void
542 bge_writembx(struct bge_softc *sc, int off, int val)
543 {
544 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
545 		off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
546 
547 	CSR_WRITE_4(sc, off, val);
548 }
549 
550 /*
551  * Clear all stale locks and select the lock for this driver instance.
552  */
553 void
554 bge_ape_lock_init(struct bge_softc *sc)
555 {
556 	struct pci_attach_args *pa = &(sc->bge_pa);
557 	uint32_t bit, regbase;
558 	int i;
559 
560 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
561 		regbase = BGE_APE_LOCK_GRANT;
562 	else
563 		regbase = BGE_APE_PER_LOCK_GRANT;
564 
565 	/* Clear any stale locks. */
566 	for (i = BGE_APE_LOCK_PHY0; i <= BGE_APE_LOCK_GPIO; i++) {
567 		switch (i) {
568 		case BGE_APE_LOCK_PHY0:
569 		case BGE_APE_LOCK_PHY1:
570 		case BGE_APE_LOCK_PHY2:
571 		case BGE_APE_LOCK_PHY3:
572 			bit = BGE_APE_LOCK_GRANT_DRIVER0;
573 			break;
574 		default:
575 			if (pa->pa_function == 0)
576 				bit = BGE_APE_LOCK_GRANT_DRIVER0;
577 			else
578 				bit = (1 << pa->pa_function);
579 		}
580 		APE_WRITE_4(sc, regbase + 4 * i, bit);
581 	}
582 
583 	/* Select the PHY lock based on the device's function number. */
584 	switch (pa->pa_function) {
585 	case 0:
586 		sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY0;
587 		break;
588 	case 1:
589 		sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY1;
590 		break;
591 	case 2:
592 		sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY2;
593 		break;
594 	case 3:
595 		sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY3;
596 		break;
597 	default:
598 		printf("%s: PHY lock not supported on function %d\n",
599 		    sc->bge_dev.dv_xname, pa->pa_function);
600 		break;
601 	}
602 }
603 
604 /*
605  * Check for APE firmware, set flags, and print version info.
606  */
607 void
608 bge_ape_read_fw_ver(struct bge_softc *sc)
609 {
610 	const char *fwtype;
611 	uint32_t apedata, features;
612 
613 	/* Check for a valid APE signature in shared memory. */
614 	apedata = APE_READ_4(sc, BGE_APE_SEG_SIG);
615 	if (apedata != BGE_APE_SEG_SIG_MAGIC) {
616 		sc->bge_mfw_flags &= ~ BGE_MFW_ON_APE;
617 		return;
618 	}
619 
620 	/* Check if APE firmware is running. */
621 	apedata = APE_READ_4(sc, BGE_APE_FW_STATUS);
622 	if ((apedata & BGE_APE_FW_STATUS_READY) == 0) {
623 		printf("%s: APE signature found but FW status not ready! "
624 		    "0x%08x\n", sc->bge_dev.dv_xname, apedata);
625 		return;
626 	}
627 
628 	sc->bge_mfw_flags |= BGE_MFW_ON_APE;
629 
630 	/* Fetch the APE firmware type and version. */
631 	apedata = APE_READ_4(sc, BGE_APE_FW_VERSION);
632 	features = APE_READ_4(sc, BGE_APE_FW_FEATURES);
633 	if ((features & BGE_APE_FW_FEATURE_NCSI) != 0) {
634 		sc->bge_mfw_flags |= BGE_MFW_TYPE_NCSI;
635 		fwtype = "NCSI";
636 	} else if ((features & BGE_APE_FW_FEATURE_DASH) != 0) {
637 		sc->bge_mfw_flags |= BGE_MFW_TYPE_DASH;
638 		fwtype = "DASH";
639 	} else
640 		fwtype = "UNKN";
641 
642 	/* Print the APE firmware version. */
643 	printf(", APE firmware %s %d.%d.%d.%d", fwtype,
644 	    (apedata & BGE_APE_FW_VERSION_MAJMSK) >> BGE_APE_FW_VERSION_MAJSFT,
645 	    (apedata & BGE_APE_FW_VERSION_MINMSK) >> BGE_APE_FW_VERSION_MINSFT,
646 	    (apedata & BGE_APE_FW_VERSION_REVMSK) >> BGE_APE_FW_VERSION_REVSFT,
647 	    (apedata & BGE_APE_FW_VERSION_BLDMSK));
648 }
649 
650 int
651 bge_ape_lock(struct bge_softc *sc, int locknum)
652 {
653 	struct pci_attach_args *pa = &(sc->bge_pa);
654 	uint32_t bit, gnt, req, status;
655 	int i, off;
656 
657 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
658 		return (0);
659 
660 	/* Lock request/grant registers have different bases. */
661 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) {
662 		req = BGE_APE_LOCK_REQ;
663 		gnt = BGE_APE_LOCK_GRANT;
664 	} else {
665 		req = BGE_APE_PER_LOCK_REQ;
666 		gnt = BGE_APE_PER_LOCK_GRANT;
667 	}
668 
669 	off = 4 * locknum;
670 
671 	switch (locknum) {
672 	case BGE_APE_LOCK_GPIO:
673 		/* Lock required when using GPIO. */
674 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
675 			return (0);
676 		if (pa->pa_function == 0)
677 			bit = BGE_APE_LOCK_REQ_DRIVER0;
678 		else
679 			bit = (1 << pa->pa_function);
680 		break;
681 	case BGE_APE_LOCK_GRC:
682 		/* Lock required to reset the device. */
683 		if (pa->pa_function == 0)
684 			bit = BGE_APE_LOCK_REQ_DRIVER0;
685 		else
686 			bit = (1 << pa->pa_function);
687 		break;
688 	case BGE_APE_LOCK_MEM:
689 		/* Lock required when accessing certain APE memory. */
690 		if (pa->pa_function == 0)
691 			bit = BGE_APE_LOCK_REQ_DRIVER0;
692 		else
693 			bit = (1 << pa->pa_function);
694 		break;
695 	case BGE_APE_LOCK_PHY0:
696 	case BGE_APE_LOCK_PHY1:
697 	case BGE_APE_LOCK_PHY2:
698 	case BGE_APE_LOCK_PHY3:
699 		/* Lock required when accessing PHYs. */
700 		bit = BGE_APE_LOCK_REQ_DRIVER0;
701 		break;
702 	default:
703 		return (EINVAL);
704 	}
705 
706 	/* Request a lock. */
707 	APE_WRITE_4(sc, req + off, bit);
708 
709 	/* Wait up to 1 second to acquire lock. */
710 	for (i = 0; i < 20000; i++) {
711 		status = APE_READ_4(sc, gnt + off);
712 		if (status == bit)
713 			break;
714 		DELAY(50);
715 	}
716 
717 	/* Handle any errors. */
718 	if (status != bit) {
719 		printf("%s: APE lock %d request failed! "
720 		    "request = 0x%04x[0x%04x], status = 0x%04x[0x%04x]\n",
721 		    sc->bge_dev.dv_xname,
722 		    locknum, req + off, bit & 0xFFFF, gnt + off,
723 		    status & 0xFFFF);
724 		/* Revoke the lock request. */
725 		APE_WRITE_4(sc, gnt + off, bit);
726 		return (EBUSY);
727 	}
728 
729 	return (0);
730 }
731 
732 void
733 bge_ape_unlock(struct bge_softc *sc, int locknum)
734 {
735 	struct pci_attach_args *pa = &(sc->bge_pa);
736 	uint32_t bit, gnt;
737 	int off;
738 
739 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
740 		return;
741 
742 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
743 		gnt = BGE_APE_LOCK_GRANT;
744 	else
745 		gnt = BGE_APE_PER_LOCK_GRANT;
746 
747 	off = 4 * locknum;
748 
749 	switch (locknum) {
750 	case BGE_APE_LOCK_GPIO:
751 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
752 			return;
753 		if (pa->pa_function == 0)
754 			bit = BGE_APE_LOCK_GRANT_DRIVER0;
755 		else
756 			bit = (1 << pa->pa_function);
757 		break;
758 	case BGE_APE_LOCK_GRC:
759 		if (pa->pa_function == 0)
760 			bit = BGE_APE_LOCK_GRANT_DRIVER0;
761 		else
762 			bit = (1 << pa->pa_function);
763 		break;
764 	case BGE_APE_LOCK_MEM:
765 		if (pa->pa_function == 0)
766 			bit = BGE_APE_LOCK_GRANT_DRIVER0;
767 		else
768 			bit = (1 << pa->pa_function);
769 		break;
770 	case BGE_APE_LOCK_PHY0:
771 	case BGE_APE_LOCK_PHY1:
772 	case BGE_APE_LOCK_PHY2:
773 	case BGE_APE_LOCK_PHY3:
774 		bit = BGE_APE_LOCK_GRANT_DRIVER0;
775 		break;
776 	default:
777 		return;
778 	}
779 
780 	APE_WRITE_4(sc, gnt + off, bit);
781 }
782 
783 /*
784  * Send an event to the APE firmware.
785  */
786 void
787 bge_ape_send_event(struct bge_softc *sc, uint32_t event)
788 {
789 	uint32_t apedata;
790 	int i;
791 
792 	/* NCSI does not support APE events. */
793 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
794 		return;
795 
796 	/* Wait up to 1ms for APE to service previous event. */
797 	for (i = 10; i > 0; i--) {
798 		if (bge_ape_lock(sc, BGE_APE_LOCK_MEM) != 0)
799 			break;
800 		apedata = APE_READ_4(sc, BGE_APE_EVENT_STATUS);
801 		if ((apedata & BGE_APE_EVENT_STATUS_EVENT_PENDING) == 0) {
802 			APE_WRITE_4(sc, BGE_APE_EVENT_STATUS, event |
803 			    BGE_APE_EVENT_STATUS_EVENT_PENDING);
804 			bge_ape_unlock(sc, BGE_APE_LOCK_MEM);
805 			APE_WRITE_4(sc, BGE_APE_EVENT, BGE_APE_EVENT_1);
806 			break;
807 		}
808 		bge_ape_unlock(sc, BGE_APE_LOCK_MEM);
809 		DELAY(100);
810 	}
811 	if (i == 0) {
812 		printf("%s: APE event 0x%08x send timed out\n",
813 		    sc->bge_dev.dv_xname, event);
814 	}
815 }
816 
817 void
818 bge_ape_driver_state_change(struct bge_softc *sc, int kind)
819 {
820 	uint32_t apedata, event;
821 
822 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
823 		return;
824 
825 	switch (kind) {
826 	case BGE_RESET_START:
827 		/* If this is the first load, clear the load counter. */
828 		apedata = APE_READ_4(sc, BGE_APE_HOST_SEG_SIG);
829 		if (apedata != BGE_APE_HOST_SEG_SIG_MAGIC)
830 			APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, 0);
831 		else {
832 			apedata = APE_READ_4(sc, BGE_APE_HOST_INIT_COUNT);
833 			APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, ++apedata);
834 		}
835 		APE_WRITE_4(sc, BGE_APE_HOST_SEG_SIG,
836 		    BGE_APE_HOST_SEG_SIG_MAGIC);
837 		APE_WRITE_4(sc, BGE_APE_HOST_SEG_LEN,
838 		    BGE_APE_HOST_SEG_LEN_MAGIC);
839 
840 		/* Add some version info if bge(4) supports it. */
841 		APE_WRITE_4(sc, BGE_APE_HOST_DRIVER_ID,
842 		    BGE_APE_HOST_DRIVER_ID_MAGIC(1, 0));
843 		APE_WRITE_4(sc, BGE_APE_HOST_BEHAVIOR,
844 		    BGE_APE_HOST_BEHAV_NO_PHYLOCK);
845 		APE_WRITE_4(sc, BGE_APE_HOST_HEARTBEAT_INT_MS,
846 		    BGE_APE_HOST_HEARTBEAT_INT_DISABLE);
847 		APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE,
848 		    BGE_APE_HOST_DRVR_STATE_START);
849 		event = BGE_APE_EVENT_STATUS_STATE_START;
850 		break;
851 	case BGE_RESET_SHUTDOWN:
852 		APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE,
853 		    BGE_APE_HOST_DRVR_STATE_UNLOAD);
854 		event = BGE_APE_EVENT_STATUS_STATE_UNLOAD;
855 		break;
856 	case BGE_RESET_SUSPEND:
857 		event = BGE_APE_EVENT_STATUS_STATE_SUSPEND;
858 		break;
859 	default:
860 		return;
861 	}
862 
863 	bge_ape_send_event(sc, event | BGE_APE_EVENT_STATUS_DRIVER_EVNT |
864 	    BGE_APE_EVENT_STATUS_STATE_CHNGE);
865 }
866 
867 
868 u_int8_t
869 bge_nvram_getbyte(struct bge_softc *sc, int addr, u_int8_t *dest)
870 {
871 	u_int32_t access, byte = 0;
872 	int i;
873 
874 	/* Lock. */
875 	CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
876 	for (i = 0; i < 8000; i++) {
877 		if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
878 			break;
879 		DELAY(20);
880 	}
881 	if (i == 8000)
882 		return (1);
883 
884 	/* Enable access. */
885 	access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
886 	CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
887 
888 	CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
889 	CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
890 	for (i = 0; i < BGE_TIMEOUT * 10; i++) {
891 		DELAY(10);
892 		if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
893 			DELAY(10);
894 			break;
895 		}
896 	}
897 
898 	if (i == BGE_TIMEOUT * 10) {
899 		printf("%s: nvram read timed out\n", sc->bge_dev.dv_xname);
900 		return (1);
901 	}
902 
903 	/* Get result. */
904 	byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
905 
906 	*dest = (swap32(byte) >> ((addr % 4) * 8)) & 0xFF;
907 
908 	/* Disable access. */
909 	CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
910 
911 	/* Unlock. */
912 	CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
913 	CSR_READ_4(sc, BGE_NVRAM_SWARB);
914 
915 	return (0);
916 }
917 
918 /*
919  * Read a sequence of bytes from NVRAM.
920  */
921 
922 int
923 bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
924 {
925 	int err = 0, i;
926 	u_int8_t byte = 0;
927 
928 	if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)
929 		return (1);
930 
931 	for (i = 0; i < cnt; i++) {
932 		err = bge_nvram_getbyte(sc, off + i, &byte);
933 		if (err)
934 			break;
935 		*(dest + i) = byte;
936 	}
937 
938 	return (err ? 1 : 0);
939 }
940 
941 /*
942  * Read a byte of data stored in the EEPROM at address 'addr.' The
943  * BCM570x supports both the traditional bitbang interface and an
944  * auto access interface for reading the EEPROM. We use the auto
945  * access method.
946  */
947 u_int8_t
948 bge_eeprom_getbyte(struct bge_softc *sc, int addr, u_int8_t *dest)
949 {
950 	int i;
951 	u_int32_t byte = 0;
952 
953 	/*
954 	 * Enable use of auto EEPROM access so we can avoid
955 	 * having to use the bitbang method.
956 	 */
957 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
958 
959 	/* Reset the EEPROM, load the clock period. */
960 	CSR_WRITE_4(sc, BGE_EE_ADDR,
961 	    BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
962 	DELAY(20);
963 
964 	/* Issue the read EEPROM command. */
965 	CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
966 
967 	/* Wait for completion */
968 	for(i = 0; i < BGE_TIMEOUT * 10; i++) {
969 		DELAY(10);
970 		if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
971 			break;
972 	}
973 
974 	if (i == BGE_TIMEOUT * 10) {
975 		printf("%s: eeprom read timed out\n", sc->bge_dev.dv_xname);
976 		return (1);
977 	}
978 
979 	/* Get result. */
980 	byte = CSR_READ_4(sc, BGE_EE_DATA);
981 
982 	*dest = (byte >> ((addr % 4) * 8)) & 0xFF;
983 
984 	return (0);
985 }
986 
987 /*
988  * Read a sequence of bytes from the EEPROM.
989  */
990 int
991 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
992 {
993 	int i, error = 0;
994 	u_int8_t byte = 0;
995 
996 	for (i = 0; i < cnt; i++) {
997 		error = bge_eeprom_getbyte(sc, off + i, &byte);
998 		if (error)
999 			break;
1000 		*(dest + i) = byte;
1001 	}
1002 
1003 	return (error ? 1 : 0);
1004 }
1005 
1006 int
1007 bge_miibus_readreg(struct device *dev, int phy, int reg)
1008 {
1009 	struct bge_softc *sc = (struct bge_softc *)dev;
1010 	u_int32_t val, autopoll;
1011 	int i;
1012 
1013 	if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0)
1014 		return (0);
1015 
1016 	/* Reading with autopolling on may trigger PCI errors */
1017 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
1018 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
1019 		BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL);
1020 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
1021 		DELAY(80);
1022 	}
1023 
1024 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
1025 	    BGE_MIPHY(phy)|BGE_MIREG(reg));
1026 	CSR_READ_4(sc, BGE_MI_COMM); /* force write */
1027 
1028 	for (i = 0; i < 200; i++) {
1029 		delay(1);
1030 		val = CSR_READ_4(sc, BGE_MI_COMM);
1031 		if (!(val & BGE_MICOMM_BUSY))
1032 			break;
1033 		delay(10);
1034 	}
1035 
1036 	if (i == 200) {
1037 		printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname);
1038 		val = 0;
1039 		goto done;
1040 	}
1041 
1042 	val = CSR_READ_4(sc, BGE_MI_COMM);
1043 
1044 done:
1045 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
1046 		BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
1047 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
1048 		DELAY(80);
1049 	}
1050 
1051 	bge_ape_unlock(sc, sc->bge_phy_ape_lock);
1052 
1053 	if (val & BGE_MICOMM_READFAIL)
1054 		return (0);
1055 
1056 	return (val & 0xFFFF);
1057 }
1058 
1059 void
1060 bge_miibus_writereg(struct device *dev, int phy, int reg, int val)
1061 {
1062 	struct bge_softc *sc = (struct bge_softc *)dev;
1063 	u_int32_t autopoll;
1064 	int i;
1065 
1066 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 &&
1067 	    (reg == MII_100T2CR || reg == BRGPHY_MII_AUXCTL))
1068 		return;
1069 
1070 	if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0)
1071 		return;
1072 
1073 	/* Reading with autopolling on may trigger PCI errors */
1074 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
1075 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
1076 		DELAY(40);
1077 		BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL);
1078 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
1079 		DELAY(40); /* 40 usec is supposed to be adequate */
1080 	}
1081 
1082 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
1083 	    BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
1084 	CSR_READ_4(sc, BGE_MI_COMM); /* force write */
1085 
1086 	for (i = 0; i < 200; i++) {
1087 		delay(1);
1088 		if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
1089 			break;
1090 		delay(10);
1091 	}
1092 
1093 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
1094 		BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
1095 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
1096 		DELAY(40);
1097 	}
1098 
1099 	bge_ape_unlock(sc, sc->bge_phy_ape_lock);
1100 
1101 	if (i == 200) {
1102 		printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname);
1103 	}
1104 }
1105 
1106 void
1107 bge_miibus_statchg(struct device *dev)
1108 {
1109 	struct bge_softc *sc = (struct bge_softc *)dev;
1110 	struct mii_data *mii = &sc->bge_mii;
1111 	u_int32_t mac_mode, rx_mode, tx_mode;
1112 
1113 	/*
1114 	 * Get flow control negotiation result.
1115 	 */
1116 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
1117 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags)
1118 		sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
1119 
1120 	if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
1121 	    mii->mii_media_status & IFM_ACTIVE &&
1122 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
1123 		BGE_STS_SETBIT(sc, BGE_STS_LINK);
1124 	else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
1125 	    (!(mii->mii_media_status & IFM_ACTIVE) ||
1126 	    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
1127 		BGE_STS_CLRBIT(sc, BGE_STS_LINK);
1128 
1129 	if (!BGE_STS_BIT(sc, BGE_STS_LINK))
1130 		return;
1131 
1132 	/* Set the port mode (MII/GMII) to match the link speed. */
1133 	mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) &
1134 	    ~(BGE_MACMODE_PORTMODE | BGE_MACMODE_HALF_DUPLEX);
1135 	tx_mode = CSR_READ_4(sc, BGE_TX_MODE);
1136 	rx_mode = CSR_READ_4(sc, BGE_RX_MODE);
1137 
1138 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
1139 	    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
1140 		mac_mode |= BGE_PORTMODE_GMII;
1141 	else
1142 		mac_mode |= BGE_PORTMODE_MII;
1143 
1144 	/* Set MAC flow control behavior to match link flow control settings. */
1145 	tx_mode &= ~BGE_TXMODE_FLOWCTL_ENABLE;
1146 	rx_mode &= ~BGE_RXMODE_FLOWCTL_ENABLE;
1147 	if (mii->mii_media_active & IFM_FDX) {
1148 		if (sc->bge_flowflags & IFM_ETH_TXPAUSE)
1149 			tx_mode |= BGE_TXMODE_FLOWCTL_ENABLE;
1150 		if (sc->bge_flowflags & IFM_ETH_RXPAUSE)
1151 			rx_mode |= BGE_RXMODE_FLOWCTL_ENABLE;
1152 	} else
1153 		mac_mode |= BGE_MACMODE_HALF_DUPLEX;
1154 
1155 	CSR_WRITE_4(sc, BGE_MAC_MODE, mac_mode);
1156 	DELAY(40);
1157 	CSR_WRITE_4(sc, BGE_TX_MODE, tx_mode);
1158 	CSR_WRITE_4(sc, BGE_RX_MODE, rx_mode);
1159 }
1160 
1161 /*
1162  * Initialize a standard receive ring descriptor.
1163  */
1164 int
1165 bge_newbuf(struct bge_softc *sc, int i)
1166 {
1167 	bus_dmamap_t		dmap = sc->bge_cdata.bge_rx_std_map[i];
1168 	struct bge_rx_bd	*r = &sc->bge_rdata->bge_rx_std_ring[i];
1169 	struct mbuf		*m;
1170 	int			error;
1171 
1172 	m = MCLGETL(NULL, M_DONTWAIT, sc->bge_rx_std_len);
1173 	if (!m)
1174 		return (ENOBUFS);
1175 	m->m_len = m->m_pkthdr.len = sc->bge_rx_std_len;
1176 	if (!(sc->bge_flags & BGE_RX_ALIGNBUG))
1177 	    m_adj(m, ETHER_ALIGN);
1178 
1179 	error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmap, m,
1180 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
1181 	if (error) {
1182 		m_freem(m);
1183 		return (ENOBUFS);
1184 	}
1185 
1186 	bus_dmamap_sync(sc->bge_dmatag, dmap, 0, dmap->dm_mapsize,
1187 	    BUS_DMASYNC_PREREAD);
1188 	sc->bge_cdata.bge_rx_std_chain[i] = m;
1189 
1190 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1191 	    offsetof(struct bge_ring_data, bge_rx_std_ring) +
1192 		i * sizeof (struct bge_rx_bd),
1193 	    sizeof (struct bge_rx_bd),
1194 	    BUS_DMASYNC_POSTWRITE);
1195 
1196 	BGE_HOSTADDR(r->bge_addr, dmap->dm_segs[0].ds_addr);
1197 	r->bge_flags = BGE_RXBDFLAG_END;
1198 	r->bge_len = m->m_len;
1199 	r->bge_idx = i;
1200 
1201 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1202 	    offsetof(struct bge_ring_data, bge_rx_std_ring) +
1203 		i * sizeof (struct bge_rx_bd),
1204 	    sizeof (struct bge_rx_bd),
1205 	    BUS_DMASYNC_PREWRITE);
1206 
1207 	return (0);
1208 }
1209 
1210 /*
1211  * Initialize a Jumbo receive ring descriptor.
1212  */
1213 int
1214 bge_newbuf_jumbo(struct bge_softc *sc, int i)
1215 {
1216 	bus_dmamap_t		dmap = sc->bge_cdata.bge_rx_jumbo_map[i];
1217 	struct bge_ext_rx_bd	*r = &sc->bge_rdata->bge_rx_jumbo_ring[i];
1218 	struct mbuf		*m;
1219 	int			error;
1220 
1221 	m = MCLGETL(NULL, M_DONTWAIT, BGE_JLEN);
1222 	if (!m)
1223 		return (ENOBUFS);
1224 	m->m_len = m->m_pkthdr.len = BGE_JUMBO_FRAMELEN;
1225 	if (!(sc->bge_flags & BGE_RX_ALIGNBUG))
1226 	    m_adj(m, ETHER_ALIGN);
1227 
1228 	error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmap, m,
1229 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
1230 	if (error) {
1231 		m_freem(m);
1232 		return (ENOBUFS);
1233 	}
1234 
1235 	bus_dmamap_sync(sc->bge_dmatag, dmap, 0, dmap->dm_mapsize,
1236 	    BUS_DMASYNC_PREREAD);
1237 	sc->bge_cdata.bge_rx_jumbo_chain[i] = m;
1238 
1239 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1240 	    offsetof(struct bge_ring_data, bge_rx_jumbo_ring) +
1241 		i * sizeof (struct bge_ext_rx_bd),
1242 	    sizeof (struct bge_ext_rx_bd),
1243 	    BUS_DMASYNC_POSTWRITE);
1244 
1245 	/*
1246 	 * Fill in the extended RX buffer descriptor.
1247 	 */
1248 	r->bge_bd.bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
1249 	r->bge_bd.bge_idx = i;
1250 	r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
1251 	switch (dmap->dm_nsegs) {
1252 	case 4:
1253 		BGE_HOSTADDR(r->bge_addr3, dmap->dm_segs[3].ds_addr);
1254 		r->bge_len3 = dmap->dm_segs[3].ds_len;
1255 		/* FALLTHROUGH */
1256 	case 3:
1257 		BGE_HOSTADDR(r->bge_addr2, dmap->dm_segs[2].ds_addr);
1258 		r->bge_len2 = dmap->dm_segs[2].ds_len;
1259 		/* FALLTHROUGH */
1260 	case 2:
1261 		BGE_HOSTADDR(r->bge_addr1, dmap->dm_segs[1].ds_addr);
1262 		r->bge_len1 = dmap->dm_segs[1].ds_len;
1263 		/* FALLTHROUGH */
1264 	case 1:
1265 		BGE_HOSTADDR(r->bge_bd.bge_addr, dmap->dm_segs[0].ds_addr);
1266 		r->bge_bd.bge_len = dmap->dm_segs[0].ds_len;
1267 		break;
1268 	default:
1269 		panic("%s: %d segments", __func__, dmap->dm_nsegs);
1270 	}
1271 
1272 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1273 	    offsetof(struct bge_ring_data, bge_rx_jumbo_ring) +
1274 		i * sizeof (struct bge_ext_rx_bd),
1275 	    sizeof (struct bge_ext_rx_bd),
1276 	    BUS_DMASYNC_PREWRITE);
1277 
1278 	return (0);
1279 }
1280 
1281 int
1282 bge_init_rx_ring_std(struct bge_softc *sc)
1283 {
1284 	int i;
1285 
1286 	if (ISSET(sc->bge_flags, BGE_RXRING_VALID))
1287 		return (0);
1288 
1289 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1290 		if (bus_dmamap_create(sc->bge_dmatag, sc->bge_rx_std_len, 1,
1291 		    sc->bge_rx_std_len, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1292 		    &sc->bge_cdata.bge_rx_std_map[i]) != 0) {
1293 			printf("%s: unable to create dmamap for slot %d\n",
1294 			    sc->bge_dev.dv_xname, i);
1295 			goto uncreate;
1296 		}
1297 		bzero(&sc->bge_rdata->bge_rx_std_ring[i],
1298 		    sizeof(struct bge_rx_bd));
1299 	}
1300 
1301 	sc->bge_std = BGE_STD_RX_RING_CNT - 1;
1302 
1303 	/* lwm must be greater than the replenish threshold */
1304 	if_rxr_init(&sc->bge_std_ring, 17, BGE_STD_RX_RING_CNT);
1305 	bge_fill_rx_ring_std(sc);
1306 
1307 	SET(sc->bge_flags, BGE_RXRING_VALID);
1308 
1309 	return (0);
1310 
1311 uncreate:
1312 	while (--i) {
1313 		bus_dmamap_destroy(sc->bge_dmatag,
1314 		    sc->bge_cdata.bge_rx_std_map[i]);
1315 	}
1316 	return (1);
1317 }
1318 
1319 /*
1320  * When the refill timeout for a ring is active, that ring is so empty
1321  * that no more packets can be received on it, so the interrupt handler
1322  * will not attempt to refill it, meaning we don't need to protect against
1323  * interrupts here.
1324  */
1325 
1326 void
1327 bge_rxtick(void *arg)
1328 {
1329 	struct bge_softc *sc = arg;
1330 
1331 	if (ISSET(sc->bge_flags, BGE_RXRING_VALID) &&
1332 	    if_rxr_inuse(&sc->bge_std_ring) <= 8)
1333 		bge_fill_rx_ring_std(sc);
1334 }
1335 
1336 void
1337 bge_rxtick_jumbo(void *arg)
1338 {
1339 	struct bge_softc *sc = arg;
1340 
1341 	if (ISSET(sc->bge_flags, BGE_JUMBO_RXRING_VALID) &&
1342 	    if_rxr_inuse(&sc->bge_jumbo_ring) <= 8)
1343 		bge_fill_rx_ring_jumbo(sc);
1344 }
1345 
1346 void
1347 bge_fill_rx_ring_std(struct bge_softc *sc)
1348 {
1349 	int i;
1350 	int post = 0;
1351 	u_int slots;
1352 
1353 	i = sc->bge_std;
1354 	for (slots = if_rxr_get(&sc->bge_std_ring, BGE_STD_RX_RING_CNT);
1355 	    slots > 0; slots--) {
1356 		BGE_INC(i, BGE_STD_RX_RING_CNT);
1357 
1358 		if (bge_newbuf(sc, i) != 0)
1359 			break;
1360 
1361 		sc->bge_std = i;
1362 		post = 1;
1363 	}
1364 	if_rxr_put(&sc->bge_std_ring, slots);
1365 
1366 	if (post)
1367 		bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
1368 
1369 	/*
1370 	 * bge always needs more than 8 packets on the ring. if we cant do
1371 	 * that now, then try again later.
1372 	 */
1373 	if (if_rxr_inuse(&sc->bge_std_ring) <= 8)
1374 		timeout_add(&sc->bge_rxtimeout, 1);
1375 }
1376 
1377 void
1378 bge_free_rx_ring_std(struct bge_softc *sc)
1379 {
1380 	bus_dmamap_t dmap;
1381 	struct mbuf *m;
1382 	int i;
1383 
1384 	if (!ISSET(sc->bge_flags, BGE_RXRING_VALID))
1385 		return;
1386 
1387 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1388 		dmap = sc->bge_cdata.bge_rx_std_map[i];
1389 		m = sc->bge_cdata.bge_rx_std_chain[i];
1390 		if (m != NULL) {
1391 			bus_dmamap_sync(sc->bge_dmatag, dmap, 0,
1392 			    dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1393 			bus_dmamap_unload(sc->bge_dmatag, dmap);
1394 			m_freem(m);
1395 			sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1396 		}
1397 		bus_dmamap_destroy(sc->bge_dmatag, dmap);
1398 		sc->bge_cdata.bge_rx_std_map[i] = NULL;
1399 		bzero(&sc->bge_rdata->bge_rx_std_ring[i],
1400 		    sizeof(struct bge_rx_bd));
1401 	}
1402 
1403 	CLR(sc->bge_flags, BGE_RXRING_VALID);
1404 }
1405 
1406 int
1407 bge_init_rx_ring_jumbo(struct bge_softc *sc)
1408 {
1409 	volatile struct bge_rcb *rcb;
1410 	int i;
1411 
1412 	if (ISSET(sc->bge_flags, BGE_JUMBO_RXRING_VALID))
1413 		return (0);
1414 
1415 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1416 		if (bus_dmamap_create(sc->bge_dmatag, BGE_JLEN, 4, BGE_JLEN, 0,
1417 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1418 		    &sc->bge_cdata.bge_rx_jumbo_map[i]) != 0) {
1419 			printf("%s: unable to create dmamap for slot %d\n",
1420 			    sc->bge_dev.dv_xname, i);
1421 			goto uncreate;
1422 		}
1423 		bzero(&sc->bge_rdata->bge_rx_jumbo_ring[i],
1424 		    sizeof(struct bge_ext_rx_bd));
1425 	}
1426 
1427 	sc->bge_jumbo = BGE_JUMBO_RX_RING_CNT - 1;
1428 
1429 	/* lwm must be greater than the replenish threshold */
1430 	if_rxr_init(&sc->bge_jumbo_ring, 17, BGE_JUMBO_RX_RING_CNT);
1431 	bge_fill_rx_ring_jumbo(sc);
1432 
1433 	SET(sc->bge_flags, BGE_JUMBO_RXRING_VALID);
1434 
1435 	rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1436 	rcb->bge_maxlen_flags =
1437 	    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD);
1438 	CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1439 
1440 	return (0);
1441 
1442 uncreate:
1443 	while (--i) {
1444 		bus_dmamap_destroy(sc->bge_dmatag,
1445 		    sc->bge_cdata.bge_rx_jumbo_map[i]);
1446 	}
1447 	return (1);
1448 }
1449 
1450 void
1451 bge_fill_rx_ring_jumbo(struct bge_softc *sc)
1452 {
1453 	int i;
1454 	int post = 0;
1455 	u_int slots;
1456 
1457 	i = sc->bge_jumbo;
1458 	for (slots = if_rxr_get(&sc->bge_jumbo_ring, BGE_JUMBO_RX_RING_CNT);
1459 	    slots > 0; slots--) {
1460 		BGE_INC(i, BGE_JUMBO_RX_RING_CNT);
1461 
1462 		if (bge_newbuf_jumbo(sc, i) != 0)
1463 			break;
1464 
1465 		sc->bge_jumbo = i;
1466 		post = 1;
1467 	}
1468 	if_rxr_put(&sc->bge_jumbo_ring, slots);
1469 
1470 	if (post)
1471 		bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1472 
1473 	/*
1474 	 * bge always needs more than 8 packets on the ring. if we cant do
1475 	 * that now, then try again later.
1476 	 */
1477 	if (if_rxr_inuse(&sc->bge_jumbo_ring) <= 8)
1478 		timeout_add(&sc->bge_rxtimeout_jumbo, 1);
1479 }
1480 
1481 void
1482 bge_free_rx_ring_jumbo(struct bge_softc *sc)
1483 {
1484 	bus_dmamap_t dmap;
1485 	struct mbuf *m;
1486 	int i;
1487 
1488 	if (!ISSET(sc->bge_flags, BGE_JUMBO_RXRING_VALID))
1489 		return;
1490 
1491 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1492 		dmap = sc->bge_cdata.bge_rx_jumbo_map[i];
1493 		m = sc->bge_cdata.bge_rx_jumbo_chain[i];
1494 		if (m != NULL) {
1495 			bus_dmamap_sync(sc->bge_dmatag, dmap, 0,
1496 			    dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1497 			bus_dmamap_unload(sc->bge_dmatag, dmap);
1498 			m_freem(m);
1499 			sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1500 		}
1501 		bus_dmamap_destroy(sc->bge_dmatag, dmap);
1502 		sc->bge_cdata.bge_rx_jumbo_map[i] = NULL;
1503 		bzero(&sc->bge_rdata->bge_rx_jumbo_ring[i],
1504 		    sizeof(struct bge_ext_rx_bd));
1505 	}
1506 
1507 	CLR(sc->bge_flags, BGE_JUMBO_RXRING_VALID);
1508 }
1509 
1510 void
1511 bge_free_tx_ring(struct bge_softc *sc)
1512 {
1513 	int i;
1514 
1515 	if (!(sc->bge_flags & BGE_TXRING_VALID))
1516 		return;
1517 
1518 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1519 		if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1520 			m_freem(sc->bge_cdata.bge_tx_chain[i]);
1521 			sc->bge_cdata.bge_tx_chain[i] = NULL;
1522 			sc->bge_cdata.bge_tx_map[i] = NULL;
1523 		}
1524 		bzero(&sc->bge_rdata->bge_tx_ring[i],
1525 		    sizeof(struct bge_tx_bd));
1526 
1527 		bus_dmamap_destroy(sc->bge_dmatag, sc->bge_txdma[i]);
1528 	}
1529 
1530 	sc->bge_flags &= ~BGE_TXRING_VALID;
1531 }
1532 
1533 int
1534 bge_init_tx_ring(struct bge_softc *sc)
1535 {
1536 	int i;
1537 	bus_size_t txsegsz, txmaxsegsz;
1538 
1539 	if (sc->bge_flags & BGE_TXRING_VALID)
1540 		return (0);
1541 
1542 	sc->bge_txcnt = 0;
1543 	sc->bge_tx_saved_considx = 0;
1544 
1545 	/* Initialize transmit producer index for host-memory send ring. */
1546 	sc->bge_tx_prodidx = 0;
1547 	bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1548 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
1549 		bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1550 
1551 	/* NIC-memory send ring not used; initialize to zero. */
1552 	bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1553 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
1554 		bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1555 
1556 	if (BGE_IS_JUMBO_CAPABLE(sc)) {
1557 		txsegsz = 4096;
1558 		txmaxsegsz = BGE_JLEN;
1559 	} else {
1560 		txsegsz = MCLBYTES;
1561 		txmaxsegsz = MCLBYTES;
1562 	}
1563 
1564 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1565 		if (bus_dmamap_create(sc->bge_dmatag, txmaxsegsz,
1566 		    BGE_NTXSEG, txsegsz, 0, BUS_DMA_NOWAIT, &sc->bge_txdma[i]))
1567 			return (ENOBUFS);
1568 	}
1569 
1570 	sc->bge_flags |= BGE_TXRING_VALID;
1571 
1572 	return (0);
1573 }
1574 
1575 void
1576 bge_iff(struct bge_softc *sc)
1577 {
1578 	struct arpcom		*ac = &sc->arpcom;
1579 	struct ifnet		*ifp = &ac->ac_if;
1580 	struct ether_multi	*enm;
1581 	struct ether_multistep  step;
1582 	u_int8_t		hashes[16];
1583 	u_int32_t		h, rxmode;
1584 
1585 	/* First, zot all the existing filters. */
1586 	rxmode = CSR_READ_4(sc, BGE_RX_MODE) & ~BGE_RXMODE_RX_PROMISC;
1587 	ifp->if_flags &= ~IFF_ALLMULTI;
1588 	memset(hashes, 0x00, sizeof(hashes));
1589 
1590 	if (ifp->if_flags & IFF_PROMISC) {
1591 		ifp->if_flags |= IFF_ALLMULTI;
1592 		rxmode |= BGE_RXMODE_RX_PROMISC;
1593 	} else if (ac->ac_multirangecnt > 0) {
1594 		ifp->if_flags |= IFF_ALLMULTI;
1595 		memset(hashes, 0xff, sizeof(hashes));
1596 	} else {
1597 		ETHER_FIRST_MULTI(step, ac, enm);
1598 		while (enm != NULL) {
1599 			h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
1600 
1601 			setbit(hashes, h & 0x7F);
1602 
1603 			ETHER_NEXT_MULTI(step, enm);
1604 		}
1605 	}
1606 
1607 	bus_space_write_raw_region_4(sc->bge_btag, sc->bge_bhandle, BGE_MAR0,
1608 	    hashes, sizeof(hashes));
1609 	CSR_WRITE_4(sc, BGE_RX_MODE, rxmode);
1610 }
1611 
1612 void
1613 bge_sig_pre_reset(struct bge_softc *sc, int type)
1614 {
1615 	/* no bge_asf_mode. */
1616 
1617 	if (type == BGE_RESET_START || type == BGE_RESET_SUSPEND)
1618 		bge_ape_driver_state_change(sc, type);
1619 }
1620 
1621 void
1622 bge_sig_post_reset(struct bge_softc *sc, int type)
1623 {
1624 	/* no bge_asf_mode. */
1625 
1626 	if (type == BGE_RESET_SHUTDOWN)
1627 		bge_ape_driver_state_change(sc, type);
1628 }
1629 
1630 void
1631 bge_sig_legacy(struct bge_softc *sc, int type)
1632 {
1633 	/* no bge_asf_mode. */
1634 }
1635 
1636 void
1637 bge_stop_fw(struct bge_softc *sc, int type)
1638 {
1639 	/* no bge_asf_mode. */
1640 }
1641 
1642 u_int32_t
1643 bge_dma_swap_options(struct bge_softc *sc)
1644 {
1645 	u_int32_t dma_options = BGE_DMA_SWAP_OPTIONS;
1646 
1647 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) {
1648 		dma_options |= BGE_MODECTL_BYTESWAP_B2HRX_DATA |
1649 		    BGE_MODECTL_WORDSWAP_B2HRX_DATA | BGE_MODECTL_B2HRX_ENABLE |
1650 		    BGE_MODECTL_HTX2B_ENABLE;
1651 	}
1652 
1653 	return (dma_options);
1654 }
1655 
1656 int
1657 bge_phy_addr(struct bge_softc *sc)
1658 {
1659 	struct pci_attach_args *pa = &(sc->bge_pa);
1660 	int phy_addr = 1;
1661 
1662 	switch (BGE_ASICREV(sc->bge_chipid)) {
1663 	case BGE_ASICREV_BCM5717:
1664 	case BGE_ASICREV_BCM5719:
1665 	case BGE_ASICREV_BCM5720:
1666 		phy_addr = pa->pa_function;
1667 		if (sc->bge_chipid != BGE_CHIPID_BCM5717_A0) {
1668 			phy_addr += (CSR_READ_4(sc, BGE_SGDIG_STS) &
1669 			    BGE_SGDIGSTS_IS_SERDES) ? 8 : 1;
1670 		} else {
1671 			phy_addr += (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
1672 			    BGE_CPMU_PHY_STRAP_IS_SERDES) ? 8 : 1;
1673 		}
1674 	}
1675 
1676 	return (phy_addr);
1677 }
1678 
1679 /*
1680  * Do endian, PCI and DMA initialization.
1681  */
1682 void
1683 bge_chipinit(struct bge_softc *sc)
1684 {
1685 	struct pci_attach_args	*pa = &(sc->bge_pa);
1686 	u_int32_t dma_rw_ctl, misc_ctl, mode_ctl;
1687 	int i;
1688 
1689 	/* Set endianness before we access any non-PCI registers. */
1690 	misc_ctl = BGE_INIT;
1691 	if (sc->bge_flags & BGE_TAGGED_STATUS)
1692 		misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS;
1693 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
1694 	    misc_ctl);
1695 
1696 	/*
1697 	 * Clear the MAC statistics block in the NIC's
1698 	 * internal memory.
1699 	 */
1700 	for (i = BGE_STATS_BLOCK;
1701 	    i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
1702 		BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0);
1703 
1704 	for (i = BGE_STATUS_BLOCK;
1705 	    i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
1706 		BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0);
1707 
1708 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57765 ||
1709 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57766) {
1710 		/*
1711 		 * For the 57766 and non Ax versions of 57765, bootcode
1712 		 * needs to setup the PCIE Fast Training Sequence (FTS)
1713 		 * value to prevent transmit hangs.
1714 		 */
1715 		if (BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_57765_AX) {
1716 		    CSR_WRITE_4(sc, BGE_CPMU_PADRNG_CTL,
1717 			CSR_READ_4(sc, BGE_CPMU_PADRNG_CTL) |
1718 			BGE_CPMU_PADRNG_CTL_RDIV2);
1719 		}
1720 	}
1721 
1722 	/*
1723 	 * Set up the PCI DMA control register.
1724 	 */
1725 	dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1726 	    BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1727 
1728 	if (sc->bge_flags & BGE_PCIE) {
1729 		if (sc->bge_mps >= 256)
1730 			dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1731 		else
1732 			dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1733 	} else if (sc->bge_flags & BGE_PCIX) {
1734 		/* PCI-X bus */
1735 		if (BGE_IS_5714_FAMILY(sc)) {
1736 			/* 256 bytes for read and write. */
1737 			dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1738 			    BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1739 
1740 			if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780)
1741 				dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1742 			else
1743 				dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1744 		} else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
1745 			/* 1536 bytes for read, 384 bytes for write. */
1746 			dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1747 			    BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1748 		} else {
1749 			/* 384 bytes for read and write. */
1750 			dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1751 			    BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1752 			    (0x0F);
1753 		}
1754 
1755 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 ||
1756 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
1757 			u_int32_t tmp;
1758 
1759 			/* Set ONEDMA_ATONCE for hardware workaround. */
1760 			tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1761 			if (tmp == 6 || tmp == 7)
1762 				dma_rw_ctl |=
1763 				    BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1764 
1765 			/* Set PCI-X DMA write workaround. */
1766 			dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1767 		}
1768 	} else {
1769 		/* Conventional PCI bus: 256 bytes for read and write. */
1770 		dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1771 		    BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1772 
1773 		if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5705 &&
1774 		    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5750)
1775 			dma_rw_ctl |= 0x0F;
1776 	}
1777 
1778 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
1779 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701)
1780 		dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1781 		    BGE_PCIDMARWCTL_ASRT_ALL_BE;
1782 
1783 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 ||
1784 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
1785 		dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1786 
1787 	if (BGE_IS_5717_PLUS(sc)) {
1788 		dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
1789 		if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
1790 			dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK;
1791 
1792 		/*
1793 		 * Enable HW workaround for controllers that misinterpret
1794 		 * a status tag update and leave interrupts permanently
1795 		 * disabled.
1796 		 */
1797 		if (!BGE_IS_57765_PLUS(sc) &&
1798 		    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 &&
1799 		    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5762)
1800 			dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA;
1801 	}
1802 
1803 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, dma_rw_ctl);
1804 
1805 	/*
1806 	 * Set up general mode register.
1807 	 */
1808 	mode_ctl = bge_dma_swap_options(sc);
1809 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 ||
1810 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) {
1811 		/* Retain Host-2-BMC settings written by APE firmware. */
1812 		mode_ctl |= CSR_READ_4(sc, BGE_MODE_CTL) &
1813 		    (BGE_MODECTL_BYTESWAP_B2HRX_DATA |
1814 		    BGE_MODECTL_WORDSWAP_B2HRX_DATA |
1815 		    BGE_MODECTL_B2HRX_ENABLE | BGE_MODECTL_HTX2B_ENABLE);
1816 	}
1817 	mode_ctl |= BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS |
1818 	    BGE_MODECTL_TX_NO_PHDR_CSUM;
1819 
1820 	/*
1821 	 * BCM5701 B5 have a bug causing data corruption when using
1822 	 * 64-bit DMA reads, which can be terminated early and then
1823 	 * completed later as 32-bit accesses, in combination with
1824 	 * certain bridges.
1825 	 */
1826 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 &&
1827 	    sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1828 		mode_ctl |= BGE_MODECTL_FORCE_PCI32;
1829 
1830 	CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1831 
1832 	/*
1833 	 * Disable memory write invalidate.  Apparently it is not supported
1834 	 * properly by these devices.
1835 	 */
1836 	PCI_CLRBIT(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
1837 	    PCI_COMMAND_INVALIDATE_ENABLE);
1838 
1839 #ifdef __brokenalpha__
1840 	/*
1841 	 * Must ensure that we do not cross an 8K (bytes) boundary
1842 	 * for DMA reads.  Our highest limit is 1K bytes.  This is a
1843 	 * restriction on some ALPHA platforms with early revision
1844 	 * 21174 PCI chipsets, such as the AlphaPC 164lx
1845 	 */
1846 	PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1847 	    BGE_PCI_READ_BNDRY_1024);
1848 #endif
1849 
1850 	/* Set the timer prescaler (always 66MHz) */
1851 	CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
1852 
1853 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
1854 		DELAY(40);	/* XXX */
1855 
1856 		/* Put PHY into ready state */
1857 		BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1858 		CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1859 		DELAY(40);
1860 	}
1861 }
1862 
1863 int
1864 bge_blockinit(struct bge_softc *sc)
1865 {
1866 	volatile struct bge_rcb		*rcb;
1867 	vaddr_t			rcb_addr;
1868 	bge_hostaddr		taddr;
1869 	u_int32_t		dmactl, rdmareg, mimode, val;
1870 	int			i, limit;
1871 
1872 	/*
1873 	 * Initialize the memory window pointer register so that
1874 	 * we can access the first 32K of internal NIC RAM. This will
1875 	 * allow us to set up the TX send ring RCBs and the RX return
1876 	 * ring RCBs, plus other things which live in NIC memory.
1877 	 */
1878 	CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1879 
1880 	/* Configure mbuf memory pool */
1881 	if (!BGE_IS_5705_PLUS(sc)) {
1882 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1883 		    BGE_BUFFPOOL_1);
1884 
1885 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
1886 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1887 		else
1888 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1889 
1890 		/* Configure DMA resource pool */
1891 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1892 		    BGE_DMA_DESCRIPTORS);
1893 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1894 	}
1895 
1896 	/* Configure mbuf pool watermarks */
1897 	/* new Broadcom docs strongly recommend these: */
1898 	if (BGE_IS_5717_PLUS(sc)) {
1899 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1900 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
1901 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
1902 	} else if (BGE_IS_5705_PLUS(sc)) {
1903 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1904 
1905 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
1906 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1907 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1908 		} else {
1909 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1910 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1911 		}
1912 	} else {
1913 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1914 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1915 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1916 	}
1917 
1918 	/* Configure DMA resource watermarks */
1919 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1920 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1921 
1922 	/* Enable buffer manager */
1923 	val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN;
1924 	/*
1925 	 * Change the arbitration algorithm of TXMBUF read request to
1926 	 * round-robin instead of priority based for BCM5719.  When
1927 	 * TXFIFO is almost empty, RDMA will hold its request until
1928 	 * TXFIFO is not almost empty.
1929 	 */
1930 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719)
1931 		val |= BGE_BMANMODE_NO_TX_UNDERRUN;
1932 	CSR_WRITE_4(sc, BGE_BMAN_MODE, val);
1933 
1934 	/* Poll for buffer manager start indication */
1935 	for (i = 0; i < 2000; i++) {
1936 		if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1937 			break;
1938 		DELAY(10);
1939 	}
1940 
1941 	if (i == 2000) {
1942 		printf("%s: buffer manager failed to start\n",
1943 		    sc->bge_dev.dv_xname);
1944 		return (ENXIO);
1945 	}
1946 
1947 	/* Enable flow-through queues */
1948 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1949 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1950 
1951 	/* Wait until queue initialization is complete */
1952 	for (i = 0; i < 2000; i++) {
1953 		if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1954 			break;
1955 		DELAY(10);
1956 	}
1957 
1958 	if (i == 2000) {
1959 		printf("%s: flow-through queue init failed\n",
1960 		    sc->bge_dev.dv_xname);
1961 		return (ENXIO);
1962 	}
1963 
1964 	/*
1965 	 * Summary of rings supported by the controller:
1966 	 *
1967 	 * Standard Receive Producer Ring
1968 	 * - This ring is used to feed receive buffers for "standard"
1969 	 *   sized frames (typically 1536 bytes) to the controller.
1970 	 *
1971 	 * Jumbo Receive Producer Ring
1972 	 * - This ring is used to feed receive buffers for jumbo sized
1973 	 *   frames (i.e. anything bigger than the "standard" frames)
1974 	 *   to the controller.
1975 	 *
1976 	 * Mini Receive Producer Ring
1977 	 * - This ring is used to feed receive buffers for "mini"
1978 	 *   sized frames to the controller.
1979 	 * - This feature required external memory for the controller
1980 	 *   but was never used in a production system.  Should always
1981 	 *   be disabled.
1982 	 *
1983 	 * Receive Return Ring
1984 	 * - After the controller has placed an incoming frame into a
1985 	 *   receive buffer that buffer is moved into a receive return
1986 	 *   ring.  The driver is then responsible to passing the
1987 	 *   buffer up to the stack.  Many versions of the controller
1988 	 *   support multiple RR rings.
1989 	 *
1990 	 * Send Ring
1991 	 * - This ring is used for outgoing frames.  Many versions of
1992 	 *   the controller support multiple send rings.
1993 	 */
1994 
1995 	/* Initialize the standard RX ring control block */
1996 	rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
1997 	BGE_HOSTADDR(rcb->bge_hostaddr, BGE_RING_DMA_ADDR(sc, bge_rx_std_ring));
1998 	if (BGE_IS_5717_PLUS(sc)) {
1999 		/*
2000 		 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
2001 		 * Bits 15-2 : Maximum RX frame size
2002 		 * Bit 1     : 1 = Ring Disabled, 0 = Ring ENabled
2003 		 * Bit 0     : Reserved
2004 		 */
2005 		rcb->bge_maxlen_flags =
2006 		    BGE_RCB_MAXLEN_FLAGS(512, ETHER_MAX_DIX_LEN << 2);
2007 	} else if (BGE_IS_5705_PLUS(sc)) {
2008 		/*
2009 		 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
2010 		 * Bits 15-2 : Reserved (should be 0)
2011 		 * Bit 1     : 1 = Ring Disabled, 0 = Ring Enabled
2012 		 * Bit 0     : Reserved
2013 		 */
2014 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
2015 	} else {
2016 		/*
2017 		 * Ring size is always XXX entries
2018 		 * Bits 31-16: Maximum RX frame size
2019 		 * Bits 15-2 : Reserved (should be 0)
2020 		 * Bit 1     : 1 = Ring Disabled, 0 = Ring Enabled
2021 		 * Bit 0     : Reserved
2022 		 */
2023 		rcb->bge_maxlen_flags =
2024 		    BGE_RCB_MAXLEN_FLAGS(ETHER_MAX_DIX_LEN, 0);
2025 	}
2026 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
2027 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
2028 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
2029 		rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
2030 	else
2031 		rcb->bge_nicaddr = BGE_STD_RX_RINGS;
2032 	/* Write the standard receive producer ring control block. */
2033 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
2034 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
2035 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
2036 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
2037 
2038 	/* Reset the standard receive producer ring producer index. */
2039 	bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
2040 
2041 	/*
2042 	 * Initialize the Jumbo RX ring control block
2043 	 * We set the 'ring disabled' bit in the flags
2044 	 * field until we're actually ready to start
2045 	 * using this ring (i.e. once we set the MTU
2046 	 * high enough to require it).
2047 	 */
2048 	if (sc->bge_flags & BGE_JUMBO_RING) {
2049 		rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
2050 		BGE_HOSTADDR(rcb->bge_hostaddr,
2051 		    BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring));
2052 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
2053 		    BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
2054 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
2055 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
2056 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
2057 			rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
2058 		else
2059 			rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
2060 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
2061 		    rcb->bge_hostaddr.bge_addr_hi);
2062 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
2063 		    rcb->bge_hostaddr.bge_addr_lo);
2064 		/* Program the jumbo receive producer ring RCB parameters. */
2065 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
2066 		    rcb->bge_maxlen_flags);
2067 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
2068 		/* Reset the jumbo receive producer ring producer index. */
2069 		bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
2070 	}
2071 
2072 	/* Disable the mini receive producer ring RCB. */
2073 	if (BGE_IS_5700_FAMILY(sc)) {
2074 		/* Set up dummy disabled mini ring RCB */
2075 		rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb;
2076 		rcb->bge_maxlen_flags =
2077 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
2078 		CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
2079 		    rcb->bge_maxlen_flags);
2080 		/* Reset the mini receive producer ring producer index. */
2081 		bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
2082 
2083 		/* XXX why? */
2084 		bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2085 		    offsetof(struct bge_ring_data, bge_info),
2086 		    sizeof (struct bge_gib),
2087 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2088 	}
2089 
2090 	/* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
2091 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
2092 		if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 ||
2093 		    sc->bge_chipid == BGE_CHIPID_BCM5906_A1 ||
2094 		    sc->bge_chipid == BGE_CHIPID_BCM5906_A2)
2095 			CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
2096 			    (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
2097 	}
2098 	/*
2099 	 * The BD ring replenish thresholds control how often the
2100 	 * hardware fetches new BD's from the producer rings in host
2101 	 * memory.  Setting the value too low on a busy system can
2102 	 * starve the hardware and reduce the throughput.
2103 	 *
2104 	 * Set the BD ring replenish thresholds. The recommended
2105 	 * values are 1/8th the number of descriptors allocated to
2106 	 * each ring, but since we try to avoid filling the entire
2107 	 * ring we set these to the minimal value of 8.  This needs to
2108 	 * be done on several of the supported chip revisions anyway,
2109 	 * to work around HW bugs.
2110 	 */
2111 	CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, 8);
2112 	if (sc->bge_flags & BGE_JUMBO_RING)
2113 		CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, 8);
2114 
2115 	if (BGE_IS_5717_PLUS(sc)) {
2116 		CSR_WRITE_4(sc, BGE_STD_REPL_LWM, 4);
2117 		CSR_WRITE_4(sc, BGE_JUMBO_REPL_LWM, 4);
2118 	}
2119 
2120 	/*
2121 	 * Disable all send rings by setting the 'ring disabled' bit
2122 	 * in the flags field of all the TX send ring control blocks,
2123 	 * located in NIC memory.
2124 	 */
2125 	if (BGE_IS_5700_FAMILY(sc)) {
2126 		/* 5700 to 5704 had 16 send rings. */
2127 		limit = BGE_TX_RINGS_EXTSSRAM_MAX;
2128 	} else if (BGE_IS_57765_PLUS(sc) ||
2129 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762)
2130 		limit = 2;
2131 	else if (BGE_IS_5717_PLUS(sc))
2132 		limit = 4;
2133 	else
2134 		limit = 1;
2135 	rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
2136 	for (i = 0; i < limit; i++) {
2137 		RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
2138 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
2139 		RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
2140 		rcb_addr += sizeof(struct bge_rcb);
2141 	}
2142 
2143 	/* Configure send ring RCB 0 (we use only the first ring) */
2144 	rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
2145 	BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring));
2146 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
2147 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
2148 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
2149 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
2150 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
2151 		RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, BGE_SEND_RING_5717);
2152 	else
2153 		RCB_WRITE_4(sc, rcb_addr, bge_nicaddr,
2154 		    BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
2155 	RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
2156 	    BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
2157 
2158 	/*
2159 	 * Disable all receive return rings by setting the
2160 	 * 'ring disabled' bit in the flags field of all the receive
2161 	 * return ring control blocks, located in NIC memory.
2162 	 */
2163 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
2164 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
2165 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) {
2166 		/* Should be 17, use 16 until we get an SRAM map. */
2167 		limit = 16;
2168 	} else if (BGE_IS_5700_FAMILY(sc))
2169 		limit = BGE_RX_RINGS_MAX;
2170 	else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
2171 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762 ||
2172 	    BGE_IS_57765_PLUS(sc))
2173 		limit = 4;
2174 	else
2175 		limit = 1;
2176 	/* Disable all receive return rings */
2177 	rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
2178 	for (i = 0; i < limit; i++) {
2179 		RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0);
2180 		RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0);
2181 		RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
2182 		    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
2183 			BGE_RCB_FLAG_RING_DISABLED));
2184 		RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
2185 		bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
2186 		    (i * (sizeof(u_int64_t))), 0);
2187 		rcb_addr += sizeof(struct bge_rcb);
2188 	}
2189 
2190 	/*
2191 	 * Set up receive return ring 0.  Note that the NIC address
2192 	 * for RX return rings is 0x0.  The return rings live entirely
2193 	 * within the host, so the nicaddr field in the RCB isn't used.
2194 	 */
2195 	rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
2196 	BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring));
2197 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
2198 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
2199 	RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000);
2200 	RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
2201 	    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
2202 
2203 	/* Set random backoff seed for TX */
2204 	CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
2205 	    (sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
2206 	     sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
2207 	     sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5]) &
2208 	    BGE_TX_BACKOFF_SEED_MASK);
2209 
2210 	/* Set inter-packet gap */
2211 	val = 0x2620;
2212 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 ||
2213 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762)
2214 		val |= CSR_READ_4(sc, BGE_TX_LENGTHS) &
2215 		    (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK);
2216 	CSR_WRITE_4(sc, BGE_TX_LENGTHS, val);
2217 
2218 	/*
2219 	 * Specify which ring to use for packets that don't match
2220 	 * any RX rules.
2221 	 */
2222 	CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
2223 
2224 	/*
2225 	 * Configure number of RX lists. One interrupt distribution
2226 	 * list, sixteen active lists, one bad frames class.
2227 	 */
2228 	CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
2229 
2230 	/* Initialize RX list placement stats mask. */
2231 	CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007BFFFF);
2232 	CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
2233 
2234 	/* Disable host coalescing until we get it set up */
2235 	CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
2236 
2237 	/* Poll to make sure it's shut down. */
2238 	for (i = 0; i < 2000; i++) {
2239 		if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
2240 			break;
2241 		DELAY(10);
2242 	}
2243 
2244 	if (i == 2000) {
2245 		printf("%s: host coalescing engine failed to idle\n",
2246 		    sc->bge_dev.dv_xname);
2247 		return (ENXIO);
2248 	}
2249 
2250 	/* Set up host coalescing defaults */
2251 	CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
2252 	CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
2253 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
2254 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
2255 	if (!(BGE_IS_5705_PLUS(sc))) {
2256 		CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
2257 		CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
2258 	}
2259 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
2260 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
2261 
2262 	/* Set up address of statistics block */
2263 	if (!(BGE_IS_5705_PLUS(sc))) {
2264 		BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_info.bge_stats));
2265 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi);
2266 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo);
2267 
2268 		CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
2269 		CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
2270 		CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
2271 	}
2272 
2273 	/* Set up address of status block */
2274 	BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_status_block));
2275 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi);
2276 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo);
2277 
2278 	sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0;
2279 	sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0;
2280 
2281 	/* Set up status block size. */
2282 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 &&
2283 	    sc->bge_chipid != BGE_CHIPID_BCM5700_C0) {
2284 		val = BGE_STATBLKSZ_FULL;
2285 		bzero(&sc->bge_rdata->bge_status_block, BGE_STATUS_BLK_SZ);
2286 	} else {
2287 		val = BGE_STATBLKSZ_32BYTE;
2288 		bzero(&sc->bge_rdata->bge_status_block, 32);
2289 	}
2290 
2291 	/* Turn on host coalescing state machine */
2292 	CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
2293 
2294 	/* Turn on RX BD completion state machine and enable attentions */
2295 	CSR_WRITE_4(sc, BGE_RBDC_MODE,
2296 	    BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
2297 
2298 	/* Turn on RX list placement state machine */
2299 	CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
2300 
2301 	/* Turn on RX list selector state machine. */
2302 	if (!(BGE_IS_5705_PLUS(sc)))
2303 		CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
2304 
2305 	val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
2306 	    BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
2307 	    BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
2308 	    BGE_MACMODE_FRMHDR_DMA_ENB;
2309 
2310 	if (sc->bge_flags & BGE_FIBER_TBI)
2311 	    val |= BGE_PORTMODE_TBI;
2312 	else if (sc->bge_flags & BGE_FIBER_MII)
2313 	    val |= BGE_PORTMODE_GMII;
2314 	else
2315 	    val |= BGE_PORTMODE_MII;
2316 
2317 	/* Allow APE to send/receive frames. */
2318 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0)
2319 		val |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN;
2320 
2321 	/* Turn on DMA, clear stats */
2322 	CSR_WRITE_4(sc, BGE_MAC_MODE, val);
2323 	DELAY(40);
2324 
2325 	/* Set misc. local control, enable interrupts on attentions */
2326 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
2327 
2328 #ifdef notdef
2329 	/* Assert GPIO pins for PHY reset */
2330 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
2331 	    BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
2332 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
2333 	    BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
2334 #endif
2335 
2336 	/* Turn on DMA completion state machine */
2337 	if (!(BGE_IS_5705_PLUS(sc)))
2338 		CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
2339 
2340 	val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS;
2341 
2342 	/* Enable host coalescing bug fix. */
2343 	if (BGE_IS_5755_PLUS(sc))
2344 		val |= BGE_WDMAMODE_STATUS_TAG_FIX;
2345 
2346 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785)
2347 		val |= BGE_WDMAMODE_BURST_ALL_DATA;
2348 
2349 	/* Turn on write DMA state machine */
2350 	CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
2351 	DELAY(40);
2352 
2353 	val = BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS;
2354 
2355 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717)
2356 		val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
2357 
2358 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
2359 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 ||
2360 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780)
2361 		val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
2362 		       BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
2363 		       BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
2364 
2365 	if (sc->bge_flags & BGE_PCIE)
2366 		val |= BGE_RDMAMODE_FIFO_LONG_BURST;
2367 
2368 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 ||
2369 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) {
2370 		val |= CSR_READ_4(sc, BGE_RDMA_MODE) &
2371 		    BGE_RDMAMODE_H2BNC_VLAN_DET;
2372 		/*
2373 		 * Allow multiple outstanding read requests from
2374 		 * non-LSO read DMA engine.
2375 		 */
2376 		val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS;
2377 	}
2378 
2379 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 ||
2380 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
2381 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 ||
2382 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780 ||
2383 	    BGE_IS_5717_PLUS(sc) || BGE_IS_57765_PLUS(sc)) {
2384 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762)
2385 			rdmareg = BGE_RDMA_RSRVCTRL_REG2;
2386 		else
2387 			rdmareg = BGE_RDMA_RSRVCTRL;
2388 		dmactl = CSR_READ_4(sc, rdmareg);
2389 		/*
2390 		 * Adjust tx margin to prevent TX data corruption and
2391 		 * fix internal FIFO overflow.
2392 		 */
2393 		if (sc->bge_chipid == BGE_CHIPID_BCM5719_A0 ||
2394 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) {
2395 			dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK |
2396 			    BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK |
2397 			    BGE_RDMA_RSRVCTRL_TXMRGN_MASK);
2398 			dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
2399 			    BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K |
2400 			    BGE_RDMA_RSRVCTRL_TXMRGN_320B;
2401 		}
2402 		/*
2403 		 * Enable fix for read DMA FIFO overruns.
2404 		 * The fix is to limit the number of RX BDs
2405 		 * the hardware would fetch at a time.
2406 		 */
2407 		CSR_WRITE_4(sc, rdmareg, dmactl |
2408 		    BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
2409 	}
2410 
2411 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) {
2412 		CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
2413 		    CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
2414 		    BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
2415 		    BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2416 	} else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) {
2417 		/*
2418 		 * Allow 4KB burst length reads for non-LSO frames.
2419 		 * Enable 512B burst length reads for buffer descriptors.
2420 		 */
2421 		CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
2422 		    CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
2423 		    BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 |
2424 		    BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2425 	} else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) {
2426 		CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2,
2427 		    CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2) |
2428 		    BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
2429 		    BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2430 	}
2431 
2432 	CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
2433 	DELAY(40);
2434 
2435 	if (sc->bge_flags & BGE_RDMA_BUG) {
2436 		for (i = 0; i < BGE_NUM_RDMA_CHANNELS / 2; i++) {
2437 			val = CSR_READ_4(sc, BGE_RDMA_LENGTH + i * 4);
2438 			if ((val & 0xFFFF) > ETHER_MAX_LEN)
2439 				break;
2440 			if (((val >> 16) & 0xFFFF) > ETHER_MAX_LEN)
2441 				break;
2442 		}
2443 		if (i != BGE_NUM_RDMA_CHANNELS / 2) {
2444 			val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL);
2445 			if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719)
2446 				val |= BGE_RDMA_TX_LENGTH_WA_5719;
2447 			else
2448 				val |= BGE_RDMA_TX_LENGTH_WA_5720;
2449 			CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val);
2450 		}
2451 	}
2452 
2453 	/* Turn on RX data completion state machine */
2454 	CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2455 
2456 	/* Turn on RX BD initiator state machine */
2457 	CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2458 
2459 	/* Turn on RX data and RX BD initiator state machine */
2460 	CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
2461 
2462 	/* Turn on Mbuf cluster free state machine */
2463 	if (!BGE_IS_5705_PLUS(sc))
2464 		CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2465 
2466 	/* Turn on send BD completion state machine */
2467 	CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2468 
2469 	/* Turn on send data completion state machine */
2470 	val = BGE_SDCMODE_ENABLE;
2471 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
2472 		val |= BGE_SDCMODE_CDELAY;
2473 	CSR_WRITE_4(sc, BGE_SDC_MODE, val);
2474 
2475 	/* Turn on send data initiator state machine */
2476 	CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2477 
2478 	/* Turn on send BD initiator state machine */
2479 	CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2480 
2481 	/* Turn on send BD selector state machine */
2482 	CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2483 
2484 	CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007BFFFF);
2485 	CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
2486 	    BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
2487 
2488 	/* ack/clear link change events */
2489 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2490 	    BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2491 	    BGE_MACSTAT_LINK_CHANGED);
2492 
2493 	/* Enable PHY auto polling (for MII/GMII only) */
2494 	if (sc->bge_flags & BGE_FIBER_TBI) {
2495 		CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
2496  	} else {
2497 		if ((sc->bge_flags & BGE_CPMU_PRESENT) != 0)
2498 			mimode = BGE_MIMODE_500KHZ_CONST;
2499 		else
2500 			mimode = BGE_MIMODE_BASE;
2501 		if (BGE_IS_5700_FAMILY(sc) ||
2502 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705) {
2503 			mimode |= BGE_MIMODE_AUTOPOLL;
2504 			BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
2505 		}
2506 		mimode |= BGE_MIMODE_PHYADDR(sc->bge_phy_addr);
2507 		CSR_WRITE_4(sc, BGE_MI_MODE, mimode);
2508 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700)
2509 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2510 			    BGE_EVTENB_MI_INTERRUPT);
2511 	}
2512 
2513 	/*
2514 	 * Clear any pending link state attention.
2515 	 * Otherwise some link state change events may be lost until attention
2516 	 * is cleared by bge_intr() -> bge_link_upd() sequence.
2517 	 * It's not necessary on newer BCM chips - perhaps enabling link
2518 	 * state change attentions implies clearing pending attention.
2519 	 */
2520 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
2521 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
2522 	    BGE_MACSTAT_LINK_CHANGED);
2523 
2524 	/* Enable link state change attentions. */
2525 	BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
2526 
2527 	return (0);
2528 }
2529 
2530 const struct bge_revision *
2531 bge_lookup_rev(u_int32_t chipid)
2532 {
2533 	const struct bge_revision *br;
2534 
2535 	for (br = bge_revisions; br->br_name != NULL; br++) {
2536 		if (br->br_chipid == chipid)
2537 			return (br);
2538 	}
2539 
2540 	for (br = bge_majorrevs; br->br_name != NULL; br++) {
2541 		if (br->br_chipid == BGE_ASICREV(chipid))
2542 			return (br);
2543 	}
2544 
2545 	return (NULL);
2546 }
2547 
2548 int
2549 bge_can_use_msi(struct bge_softc *sc)
2550 {
2551 	int can_use_msi = 0;
2552 
2553 	switch (BGE_ASICREV(sc->bge_chipid)) {
2554 	case BGE_ASICREV_BCM5714_A0:
2555 	case BGE_ASICREV_BCM5714:
2556 		/*
2557 		 * Apparently, MSI doesn't work when these chips are
2558 		 * configured in single-port mode.
2559 		 */
2560 		break;
2561 	case BGE_ASICREV_BCM5750:
2562 		if (BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_5750_AX &&
2563 		    BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_5750_BX)
2564 			can_use_msi = 1;
2565 		break;
2566 	default:
2567 		if (BGE_IS_575X_PLUS(sc))
2568 			can_use_msi = 1;
2569 	}
2570 
2571 	return (can_use_msi);
2572 }
2573 
2574 /*
2575  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
2576  * against our list and return its name if we find a match. Note
2577  * that since the Broadcom controller contains VPD support, we
2578  * can get the device name string from the controller itself instead
2579  * of the compiled-in string. This is a little slow, but it guarantees
2580  * we'll always announce the right product name.
2581  */
2582 int
2583 bge_probe(struct device *parent, void *match, void *aux)
2584 {
2585 	return (pci_matchbyid(aux, bge_devices, nitems(bge_devices)));
2586 }
2587 
2588 void
2589 bge_attach(struct device *parent, struct device *self, void *aux)
2590 {
2591 	struct bge_softc	*sc = (struct bge_softc *)self;
2592 	struct pci_attach_args	*pa = aux;
2593 	pci_chipset_tag_t	pc = pa->pa_pc;
2594 	const struct bge_revision *br;
2595 	pcireg_t		pm_ctl, memtype, subid, reg;
2596 	pci_intr_handle_t	ih;
2597 	const char		*intrstr = NULL;
2598 	int			gotenaddr = 0;
2599 	u_int32_t		hwcfg = 0;
2600 	u_int32_t		mac_addr = 0;
2601 	u_int32_t		misccfg;
2602 	struct ifnet		*ifp;
2603 	caddr_t			kva;
2604 #ifdef __sparc64__
2605 	char			name[32];
2606 #endif
2607 
2608 	sc->bge_pa = *pa;
2609 
2610 	subid = pci_conf_read(pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
2611 
2612 	/*
2613 	 * Map control/status registers.
2614 	 */
2615 	DPRINTFN(5, ("Map control/status regs\n"));
2616 
2617 	DPRINTFN(5, ("pci_mapreg_map\n"));
2618 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0);
2619 	if (pci_mapreg_map(pa, BGE_PCI_BAR0, memtype, 0, &sc->bge_btag,
2620 	    &sc->bge_bhandle, NULL, &sc->bge_bsize, 0)) {
2621 		printf(": can't find mem space\n");
2622 		return;
2623 	}
2624 
2625 	/*
2626 	 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?)
2627 	 * can clobber the chip's PCI config-space power control registers,
2628 	 * leaving the card in D3 powersave state.
2629 	 * We do not have memory-mapped registers in this state,
2630 	 * so force device into D0 state before starting initialization.
2631 	 */
2632 	pm_ctl = pci_conf_read(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD);
2633 	pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3);
2634 	pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */
2635 	pci_conf_write(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD, pm_ctl);
2636 	DELAY(1000);	/* 27 usec is allegedly sufficient */
2637 
2638 	/*
2639 	 * Save ASIC rev.
2640 	 */
2641 	sc->bge_chipid =
2642 	     (pci_conf_read(pc, pa->pa_tag, BGE_PCI_MISC_CTL)
2643 	      >> BGE_PCIMISCCTL_ASICREV_SHIFT);
2644 
2645 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) {
2646 		switch (PCI_PRODUCT(pa->pa_id)) {
2647 		case PCI_PRODUCT_BROADCOM_BCM5717:
2648 		case PCI_PRODUCT_BROADCOM_BCM5718:
2649 		case PCI_PRODUCT_BROADCOM_BCM5719:
2650 		case PCI_PRODUCT_BROADCOM_BCM5720:
2651 		case PCI_PRODUCT_BROADCOM_BCM5725:
2652 		case PCI_PRODUCT_BROADCOM_BCM5727:
2653 		case PCI_PRODUCT_BROADCOM_BCM5762:
2654 		case PCI_PRODUCT_BROADCOM_BCM57764:
2655 		case PCI_PRODUCT_BROADCOM_BCM57767:
2656 		case PCI_PRODUCT_BROADCOM_BCM57787:
2657 			sc->bge_chipid = pci_conf_read(pc, pa->pa_tag,
2658 			    BGE_PCI_GEN2_PRODID_ASICREV);
2659 			break;
2660 		case PCI_PRODUCT_BROADCOM_BCM57761:
2661 		case PCI_PRODUCT_BROADCOM_BCM57762:
2662 		case PCI_PRODUCT_BROADCOM_BCM57765:
2663 		case PCI_PRODUCT_BROADCOM_BCM57766:
2664 		case PCI_PRODUCT_BROADCOM_BCM57781:
2665 		case PCI_PRODUCT_BROADCOM_BCM57782:
2666 		case PCI_PRODUCT_BROADCOM_BCM57785:
2667 		case PCI_PRODUCT_BROADCOM_BCM57786:
2668 		case PCI_PRODUCT_BROADCOM_BCM57791:
2669 		case PCI_PRODUCT_BROADCOM_BCM57795:
2670 			sc->bge_chipid = pci_conf_read(pc, pa->pa_tag,
2671 			    BGE_PCI_GEN15_PRODID_ASICREV);
2672 			break;
2673 		default:
2674 			sc->bge_chipid = pci_conf_read(pc, pa->pa_tag,
2675 			    BGE_PCI_PRODID_ASICREV);
2676 			break;
2677 		}
2678 	}
2679 
2680 	sc->bge_phy_addr = bge_phy_addr(sc);
2681 
2682 	printf(", ");
2683 	br = bge_lookup_rev(sc->bge_chipid);
2684 	if (br == NULL)
2685 		printf("unknown ASIC (0x%x)", sc->bge_chipid);
2686 	else
2687 		printf("%s (0x%x)", br->br_name, sc->bge_chipid);
2688 
2689 	/*
2690 	 * PCI Express or PCI-X controller check.
2691 	 */
2692 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
2693 	    &sc->bge_expcap, NULL) != 0) {
2694 		/* Extract supported maximum payload size. */
2695 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag, sc->bge_expcap +
2696 		    PCI_PCIE_DCAP);
2697 		sc->bge_mps = 128 << (reg & 0x7);
2698 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
2699 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
2700 			sc->bge_expmrq = (fls(2048) - 8) << 12;
2701 		else
2702 			sc->bge_expmrq = (fls(4096) - 8) << 12;
2703 		/* Disable PCIe Active State Power Management (ASPM). */
2704 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
2705 		    sc->bge_expcap + PCI_PCIE_LCSR);
2706 		reg &= ~(PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1);
2707 		pci_conf_write(pa->pa_pc, pa->pa_tag,
2708 		    sc->bge_expcap + PCI_PCIE_LCSR, reg);
2709 		sc->bge_flags |= BGE_PCIE;
2710 	} else {
2711 		if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE) &
2712 		    BGE_PCISTATE_PCI_BUSMODE) == 0)
2713 			sc->bge_flags |= BGE_PCIX;
2714 	}
2715 
2716 	/*
2717 	 * SEEPROM check.
2718 	 */
2719 #ifdef __sparc64__
2720 	/*
2721 	 * Onboard interfaces on UltraSPARC systems generally don't
2722 	 * have a SEEPROM fitted.  These interfaces, and cards that
2723 	 * have FCode, are named "network" by the PROM, whereas cards
2724 	 * without FCode show up as "ethernet".  Since we don't really
2725 	 * need the information from the SEEPROM on cards that have
2726 	 * FCode it's fine to pretend they don't have one.
2727 	 */
2728 	if (OF_getprop(PCITAG_NODE(pa->pa_tag), "name", name,
2729 	    sizeof(name)) > 0 && strcmp(name, "network") == 0)
2730 		sc->bge_flags |= BGE_NO_EEPROM;
2731 #endif
2732 
2733 	/* Save chipset family. */
2734 	switch (BGE_ASICREV(sc->bge_chipid)) {
2735 	case BGE_ASICREV_BCM5762:
2736 	case BGE_ASICREV_BCM57765:
2737 	case BGE_ASICREV_BCM57766:
2738 		sc->bge_flags |= BGE_57765_PLUS;
2739 		/* FALLTHROUGH */
2740 	case BGE_ASICREV_BCM5717:
2741 	case BGE_ASICREV_BCM5719:
2742 	case BGE_ASICREV_BCM5720:
2743 		sc->bge_flags |= BGE_5717_PLUS | BGE_5755_PLUS | BGE_575X_PLUS |
2744 		    BGE_5705_PLUS | BGE_JUMBO_CAPABLE | BGE_JUMBO_RING |
2745 		    BGE_JUMBO_FRAME;
2746 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
2747 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) {
2748 			/*
2749 			 * Enable work around for DMA engine miscalculation
2750 			 * of TXMBUF available space.
2751 			 */
2752 			sc->bge_flags |= BGE_RDMA_BUG;
2753 
2754 			if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 &&
2755 			    sc->bge_chipid == BGE_CHIPID_BCM5719_A0) {
2756 				/* Jumbo frame on BCM5719 A0 does not work. */
2757 				sc->bge_flags &= ~(BGE_JUMBO_CAPABLE |
2758 				    BGE_JUMBO_RING | BGE_JUMBO_FRAME);
2759 			}
2760 		}
2761 		break;
2762 	case BGE_ASICREV_BCM5755:
2763 	case BGE_ASICREV_BCM5761:
2764 	case BGE_ASICREV_BCM5784:
2765 	case BGE_ASICREV_BCM5785:
2766 	case BGE_ASICREV_BCM5787:
2767 	case BGE_ASICREV_BCM57780:
2768 		sc->bge_flags |= BGE_5755_PLUS | BGE_575X_PLUS | BGE_5705_PLUS;
2769 		break;
2770 	case BGE_ASICREV_BCM5700:
2771 	case BGE_ASICREV_BCM5701:
2772 	case BGE_ASICREV_BCM5703:
2773 	case BGE_ASICREV_BCM5704:
2774 		sc->bge_flags |= BGE_5700_FAMILY | BGE_JUMBO_CAPABLE | BGE_JUMBO_RING;
2775 		break;
2776 	case BGE_ASICREV_BCM5714_A0:
2777 	case BGE_ASICREV_BCM5780:
2778 	case BGE_ASICREV_BCM5714:
2779 		sc->bge_flags |= BGE_5714_FAMILY | BGE_JUMBO_CAPABLE | BGE_JUMBO_STD;
2780 		/* FALLTHROUGH */
2781 	case BGE_ASICREV_BCM5750:
2782 	case BGE_ASICREV_BCM5752:
2783 	case BGE_ASICREV_BCM5906:
2784 		sc->bge_flags |= BGE_575X_PLUS;
2785 		/* FALLTHROUGH */
2786 	case BGE_ASICREV_BCM5705:
2787 		sc->bge_flags |= BGE_5705_PLUS;
2788 		break;
2789 	}
2790 
2791 	if (sc->bge_flags & BGE_JUMBO_STD)
2792 		sc->bge_rx_std_len = BGE_JLEN;
2793 	else
2794 		sc->bge_rx_std_len = MCLBYTES;
2795 
2796 	/*
2797 	 * When using the BCM5701 in PCI-X mode, data corruption has
2798 	 * been observed in the first few bytes of some received packets.
2799 	 * Aligning the packet buffer in memory eliminates the corruption.
2800 	 * Unfortunately, this misaligns the packet payloads.  On platforms
2801 	 * which do not support unaligned accesses, we will realign the
2802 	 * payloads by copying the received packets.
2803 	 */
2804 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 &&
2805 	    sc->bge_flags & BGE_PCIX)
2806 		sc->bge_flags |= BGE_RX_ALIGNBUG;
2807 
2808 	if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
2809 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) &&
2810 	    PCI_VENDOR(subid) == DELL_VENDORID)
2811 		sc->bge_phy_flags |= BGE_PHY_NO_3LED;
2812 
2813 	misccfg = CSR_READ_4(sc, BGE_MISC_CFG);
2814 	misccfg &= BGE_MISCCFG_BOARD_ID_MASK;
2815 
2816 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
2817 	    (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
2818 	     misccfg == BGE_MISCCFG_BOARD_ID_5788M))
2819 		sc->bge_flags |= BGE_IS_5788;
2820 
2821 	if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 &&
2822 	     (misccfg == 0x4000 || misccfg == 0x8000)) ||
2823 	    (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
2824 	     PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM &&
2825 	     (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901 ||
2826 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901A2 ||
2827 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5705F)) ||
2828 	    (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM &&
2829 	     (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5751F ||
2830 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5753F ||
2831 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5787F)) ||
2832 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57790 ||
2833 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57791 ||
2834 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57795 ||
2835 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
2836 		sc->bge_phy_flags |= BGE_PHY_10_100_ONLY;
2837 
2838 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
2839 	    (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
2840 	     (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
2841 	      sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) ||
2842 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
2843 		sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED;
2844 
2845 	if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
2846 	    sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
2847 		sc->bge_phy_flags |= BGE_PHY_CRC_BUG;
2848 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5703_AX ||
2849 	    BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_AX)
2850 		sc->bge_phy_flags |= BGE_PHY_ADC_BUG;
2851 	if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
2852 		sc->bge_phy_flags |= BGE_PHY_5704_A0_BUG;
2853 
2854 	if ((BGE_IS_5705_PLUS(sc)) &&
2855 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906 &&
2856 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785 &&
2857 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57780 &&
2858 	    !BGE_IS_5717_PLUS(sc)) {
2859 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
2860 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 ||
2861 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
2862 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787) {
2863 			if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5722 &&
2864 			    PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5756)
2865 				sc->bge_phy_flags |= BGE_PHY_JITTER_BUG;
2866 			if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5755M)
2867 				sc->bge_phy_flags |= BGE_PHY_ADJUST_TRIM;
2868 		} else
2869 			sc->bge_phy_flags |= BGE_PHY_BER_BUG;
2870 	}
2871 
2872 	/* Identify chips with APE processor. */
2873 	switch (BGE_ASICREV(sc->bge_chipid)) {
2874 	case BGE_ASICREV_BCM5717:
2875 	case BGE_ASICREV_BCM5719:
2876 	case BGE_ASICREV_BCM5720:
2877 	case BGE_ASICREV_BCM5761:
2878 	case BGE_ASICREV_BCM5762:
2879 		sc->bge_flags |= BGE_APE;
2880 		break;
2881 	}
2882 
2883 	/* Chips with APE need BAR2 access for APE registers/memory. */
2884 	if ((sc->bge_flags & BGE_APE) != 0) {
2885 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR2);
2886 		if (pci_mapreg_map(pa, BGE_PCI_BAR2, memtype, 0,
2887 		    &sc->bge_apetag, &sc->bge_apehandle, NULL,
2888 		    &sc->bge_apesize, 0)) {
2889 			printf(": couldn't map BAR2 memory\n");
2890 			goto fail_1;
2891 		}
2892 
2893 		/* Enable APE register/memory access by host driver. */
2894 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE);
2895 		reg |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR |
2896 		    BGE_PCISTATE_ALLOW_APE_SHMEM_WR |
2897 		    BGE_PCISTATE_ALLOW_APE_PSPACE_WR;
2898 		pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE, reg);
2899 
2900 		bge_ape_lock_init(sc);
2901 		bge_ape_read_fw_ver(sc);
2902 	}
2903 
2904 	/* Identify the chips that use an CPMU. */
2905 	if (BGE_IS_5717_PLUS(sc) ||
2906 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
2907 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 ||
2908 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 ||
2909 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780)
2910 		sc->bge_flags |= BGE_CPMU_PRESENT;
2911 
2912 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSI,
2913 	    &sc->bge_msicap, NULL)) {
2914 		if (bge_can_use_msi(sc) == 0)
2915 			pa->pa_flags &= ~PCI_FLAGS_MSI_ENABLED;
2916 	}
2917 
2918 	DPRINTFN(5, ("pci_intr_map\n"));
2919 	if (pci_intr_map_msi(pa, &ih) == 0)
2920 		sc->bge_flags |= BGE_MSI;
2921 	else if (pci_intr_map(pa, &ih)) {
2922 		printf(": couldn't map interrupt\n");
2923 		goto fail_1;
2924 	}
2925 
2926 	/*
2927 	 * All controllers except BCM5700 supports tagged status but
2928 	 * we use tagged status only for MSI case on BCM5717. Otherwise
2929 	 * MSI on BCM5717 does not work.
2930 	 */
2931 	if (BGE_IS_5717_PLUS(sc) && sc->bge_flags & BGE_MSI)
2932 		sc->bge_flags |= BGE_TAGGED_STATUS;
2933 
2934 	DPRINTFN(5, ("pci_intr_string\n"));
2935 	intrstr = pci_intr_string(pc, ih);
2936 
2937 	/* Try to reset the chip. */
2938 	DPRINTFN(5, ("bge_reset\n"));
2939 	bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN);
2940 	bge_reset(sc);
2941 
2942 	bge_sig_legacy(sc, BGE_RESET_SHUTDOWN);
2943 	bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN);
2944 
2945 	bge_chipinit(sc);
2946 
2947 #if defined(__sparc64__) || defined(__HAVE_FDT)
2948 	if (!gotenaddr && PCITAG_NODE(pa->pa_tag)) {
2949 		if (OF_getprop(PCITAG_NODE(pa->pa_tag), "local-mac-address",
2950 		    sc->arpcom.ac_enaddr, ETHER_ADDR_LEN) == ETHER_ADDR_LEN)
2951 			gotenaddr = 1;
2952 	}
2953 #endif
2954 
2955 	/*
2956 	 * Get station address from the EEPROM.
2957 	 */
2958 	if (!gotenaddr) {
2959 		mac_addr = bge_readmem_ind(sc, 0x0c14);
2960 		if ((mac_addr >> 16) == 0x484b) {
2961 			sc->arpcom.ac_enaddr[0] = (u_char)(mac_addr >> 8);
2962 			sc->arpcom.ac_enaddr[1] = (u_char)mac_addr;
2963 			mac_addr = bge_readmem_ind(sc, 0x0c18);
2964 			sc->arpcom.ac_enaddr[2] = (u_char)(mac_addr >> 24);
2965 			sc->arpcom.ac_enaddr[3] = (u_char)(mac_addr >> 16);
2966 			sc->arpcom.ac_enaddr[4] = (u_char)(mac_addr >> 8);
2967 			sc->arpcom.ac_enaddr[5] = (u_char)mac_addr;
2968 			gotenaddr = 1;
2969 		}
2970 	}
2971 	if (!gotenaddr) {
2972 		int mac_offset = BGE_EE_MAC_OFFSET;
2973 
2974 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
2975 			mac_offset = BGE_EE_MAC_OFFSET_5906;
2976 
2977 		if (bge_read_nvram(sc, (caddr_t)&sc->arpcom.ac_enaddr,
2978 		    mac_offset + 2, ETHER_ADDR_LEN) == 0)
2979 			gotenaddr = 1;
2980 	}
2981 	if (!gotenaddr && (!(sc->bge_flags & BGE_NO_EEPROM))) {
2982 		if (bge_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
2983 		    BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN) == 0)
2984 			gotenaddr = 1;
2985 	}
2986 
2987 #ifdef __sparc64__
2988 	if (!gotenaddr) {
2989 		extern void myetheraddr(u_char *);
2990 
2991 		myetheraddr(sc->arpcom.ac_enaddr);
2992 		gotenaddr = 1;
2993 	}
2994 #endif
2995 
2996 	if (!gotenaddr) {
2997 		printf(": failed to read station address\n");
2998 		goto fail_2;
2999 	}
3000 
3001 	/* Allocate the general information block and ring buffers. */
3002 	sc->bge_dmatag = pa->pa_dmat;
3003 	DPRINTFN(5, ("bus_dmamem_alloc\n"));
3004 	if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data),
3005 	    PAGE_SIZE, 0, &sc->bge_ring_seg, 1, &sc->bge_ring_nseg,
3006 	    BUS_DMA_NOWAIT)) {
3007 		printf(": can't alloc rx buffers\n");
3008 		goto fail_2;
3009 	}
3010 	DPRINTFN(5, ("bus_dmamem_map\n"));
3011 	if (bus_dmamem_map(sc->bge_dmatag, &sc->bge_ring_seg,
3012 	    sc->bge_ring_nseg, sizeof(struct bge_ring_data), &kva,
3013 	    BUS_DMA_NOWAIT)) {
3014 		printf(": can't map dma buffers (%lu bytes)\n",
3015 		    sizeof(struct bge_ring_data));
3016 		goto fail_3;
3017 	}
3018 	DPRINTFN(5, ("bus_dmamap_create\n"));
3019 	if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1,
3020 	    sizeof(struct bge_ring_data), 0,
3021 	    BUS_DMA_NOWAIT, &sc->bge_ring_map)) {
3022 		printf(": can't create dma map\n");
3023 		goto fail_4;
3024 	}
3025 	DPRINTFN(5, ("bus_dmamap_load\n"));
3026 	if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva,
3027 			    sizeof(struct bge_ring_data), NULL,
3028 			    BUS_DMA_NOWAIT)) {
3029 		goto fail_5;
3030 	}
3031 
3032 	DPRINTFN(5, ("bzero\n"));
3033 	sc->bge_rdata = (struct bge_ring_data *)kva;
3034 
3035 	bzero(sc->bge_rdata, sizeof(struct bge_ring_data));
3036 
3037 	/* Set default tuneable values. */
3038 	sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
3039 	sc->bge_rx_coal_ticks = 150;
3040 	sc->bge_rx_max_coal_bds = 64;
3041 	sc->bge_tx_coal_ticks = 300;
3042 	sc->bge_tx_max_coal_bds = 400;
3043 
3044 	/* 5705 limits RX return ring to 512 entries. */
3045 	if (BGE_IS_5700_FAMILY(sc) || BGE_IS_5717_PLUS(sc))
3046 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3047 	else
3048 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
3049 
3050 	mtx_init(&sc->bge_kstat_mtx, IPL_SOFTCLOCK);
3051 #if NKSTAT > 0
3052 	if (BGE_IS_5705_PLUS(sc))
3053 		bge_kstat_attach(sc);
3054 #endif
3055 
3056 	/* Set up ifnet structure */
3057 	ifp = &sc->arpcom.ac_if;
3058 	ifp->if_softc = sc;
3059 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3060 	ifp->if_xflags = IFXF_MPSAFE;
3061 	ifp->if_ioctl = bge_ioctl;
3062 	ifp->if_qstart = bge_start;
3063 	ifp->if_watchdog = bge_watchdog;
3064 	ifq_init_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
3065 
3066 	DPRINTFN(5, ("bcopy\n"));
3067 	bcopy(sc->bge_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
3068 
3069 	ifp->if_capabilities = IFCAP_VLAN_MTU;
3070 
3071 #if NVLAN > 0
3072 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
3073 #endif
3074 
3075 	/*
3076 	 * 5700 B0 chips do not support checksumming correctly due
3077 	 * to hardware bugs.
3078 	 *
3079 	 * It seems all controllers have a bug that can generate UDP
3080 	 * datagrams with a checksum value 0 when TX UDP checksum
3081 	 * offloading is enabled. Generating UDP checksum value 0 is
3082 	 * a violation of RFC 768.
3083 	 */
3084 	if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0)
3085 		ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4;
3086 
3087 	if (BGE_IS_JUMBO_CAPABLE(sc))
3088 		ifp->if_hardmtu = BGE_JUMBO_MTU;
3089 
3090 	/*
3091 	 * Do MII setup.
3092 	 */
3093 	DPRINTFN(5, ("mii setup\n"));
3094 	sc->bge_mii.mii_ifp = ifp;
3095 	sc->bge_mii.mii_readreg = bge_miibus_readreg;
3096 	sc->bge_mii.mii_writereg = bge_miibus_writereg;
3097 	sc->bge_mii.mii_statchg = bge_miibus_statchg;
3098 
3099 	/*
3100 	 * Figure out what sort of media we have by checking the hardware
3101 	 * config word in the first 32K of internal NIC memory, or fall back to
3102 	 * examining the EEPROM if necessary.  Note: on some BCM5700 cards,
3103 	 * this value seems to be unset. If that's the case, we have to rely on
3104 	 * identifying the NIC by its PCI subsystem ID, as we do below for the
3105 	 * SysKonnect SK-9D41.
3106 	 */
3107 	if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
3108 		hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
3109 	else if (!(sc->bge_flags & BGE_NO_EEPROM)) {
3110 		if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
3111 		    sizeof(hwcfg))) {
3112 			printf(": failed to read media type\n");
3113 			goto fail_6;
3114 		}
3115 		hwcfg = ntohl(hwcfg);
3116 	}
3117 
3118 	/* The SysKonnect SK-9D41 is a 1000baseSX card. */
3119 	if (PCI_PRODUCT(subid) == SK_SUBSYSID_9D41 ||
3120 	    (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
3121 		if (BGE_IS_5700_FAMILY(sc))
3122 		    sc->bge_flags |= BGE_FIBER_TBI;
3123 		else
3124 		    sc->bge_flags |= BGE_FIBER_MII;
3125 	}
3126 
3127 	/* Take advantage of single-shot MSI. */
3128 	if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_MSI)
3129 		CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) &
3130 		    ~BGE_MSIMODE_ONE_SHOT_DISABLE);
3131 
3132 	/* Hookup IRQ last. */
3133 	DPRINTFN(5, ("pci_intr_establish\n"));
3134 	sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET | IPL_MPSAFE,
3135 	    bge_intr, sc, sc->bge_dev.dv_xname);
3136 	if (sc->bge_intrhand == NULL) {
3137 		printf(": couldn't establish interrupt");
3138 		if (intrstr != NULL)
3139 			printf(" at %s", intrstr);
3140 		printf("\n");
3141 		goto fail_6;
3142 	}
3143 
3144 	/*
3145 	 * A Broadcom chip was detected. Inform the world.
3146 	 */
3147 	printf(": %s, address %s\n", intrstr,
3148 	    ether_sprintf(sc->arpcom.ac_enaddr));
3149 
3150 	if (sc->bge_flags & BGE_FIBER_TBI) {
3151 		ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
3152 		    bge_ifmedia_sts);
3153 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
3154 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX,
3155 			    0, NULL);
3156 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
3157 		ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
3158 		sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
3159 	} else {
3160 		int mii_flags;
3161 
3162 		/*
3163 		 * Do transceiver setup.
3164 		 */
3165 		ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd,
3166 			     bge_ifmedia_sts);
3167 		mii_flags = MIIF_DOPAUSE;
3168 		if (sc->bge_flags & BGE_FIBER_MII)
3169 			mii_flags |= MIIF_HAVEFIBER;
3170 		mii_attach(&sc->bge_dev, &sc->bge_mii, 0xffffffff,
3171 		    sc->bge_phy_addr, MII_OFFSET_ANY, mii_flags);
3172 
3173 		if (LIST_FIRST(&sc->bge_mii.mii_phys) == NULL) {
3174 			printf("%s: no PHY found!\n", sc->bge_dev.dv_xname);
3175 			ifmedia_add(&sc->bge_mii.mii_media,
3176 				    IFM_ETHER|IFM_MANUAL, 0, NULL);
3177 			ifmedia_set(&sc->bge_mii.mii_media,
3178 				    IFM_ETHER|IFM_MANUAL);
3179 		} else
3180 			ifmedia_set(&sc->bge_mii.mii_media,
3181 				    IFM_ETHER|IFM_AUTO);
3182 	}
3183 
3184 	/*
3185 	 * Call MI attach routine.
3186 	 */
3187 	if_attach(ifp);
3188 	ether_ifattach(ifp);
3189 
3190 	timeout_set(&sc->bge_timeout, bge_tick, sc);
3191 	timeout_set(&sc->bge_rxtimeout, bge_rxtick, sc);
3192 	timeout_set(&sc->bge_rxtimeout_jumbo, bge_rxtick_jumbo, sc);
3193 	return;
3194 
3195 fail_6:
3196 	bus_dmamap_unload(sc->bge_dmatag, sc->bge_ring_map);
3197 
3198 fail_5:
3199 	bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map);
3200 
3201 fail_4:
3202 	bus_dmamem_unmap(sc->bge_dmatag, (caddr_t)sc->bge_rdata,
3203 	    sizeof(struct bge_ring_data));
3204 
3205 fail_3:
3206 	bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg, sc->bge_ring_nseg);
3207 
3208 fail_2:
3209 	if ((sc->bge_flags & BGE_APE) != 0)
3210 		bus_space_unmap(sc->bge_apetag, sc->bge_apehandle,
3211 		    sc->bge_apesize);
3212 
3213 fail_1:
3214 	bus_space_unmap(sc->bge_btag, sc->bge_bhandle, sc->bge_bsize);
3215 }
3216 
3217 int
3218 bge_detach(struct device *self, int flags)
3219 {
3220 	struct bge_softc *sc = (struct bge_softc *)self;
3221 	struct ifnet *ifp = &sc->arpcom.ac_if;
3222 
3223 	bge_stop(sc, 1);
3224 
3225 	if (sc->bge_intrhand)
3226 		pci_intr_disestablish(sc->bge_pa.pa_pc, sc->bge_intrhand);
3227 
3228 	/* Detach any PHYs we might have. */
3229 	if (LIST_FIRST(&sc->bge_mii.mii_phys) != NULL)
3230 		mii_detach(&sc->bge_mii, MII_PHY_ANY, MII_OFFSET_ANY);
3231 
3232 	/* Delete any remaining media. */
3233 	ifmedia_delete_instance(&sc->bge_mii.mii_media, IFM_INST_ANY);
3234 
3235 	ether_ifdetach(ifp);
3236 	if_detach(ifp);
3237 
3238 	bus_dmamap_unload(sc->bge_dmatag, sc->bge_ring_map);
3239 	bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map);
3240 	bus_dmamem_unmap(sc->bge_dmatag, (caddr_t)sc->bge_rdata,
3241 	    sizeof(struct bge_ring_data));
3242 	bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg, sc->bge_ring_nseg);
3243 
3244 	if ((sc->bge_flags & BGE_APE) != 0)
3245 		bus_space_unmap(sc->bge_apetag, sc->bge_apehandle,
3246 		    sc->bge_apesize);
3247 
3248 	bus_space_unmap(sc->bge_btag, sc->bge_bhandle, sc->bge_bsize);
3249 	return (0);
3250 }
3251 
3252 int
3253 bge_activate(struct device *self, int act)
3254 {
3255 	struct bge_softc *sc = (struct bge_softc *)self;
3256 	struct ifnet *ifp = &sc->arpcom.ac_if;
3257 	int rv = 0;
3258 
3259 	switch (act) {
3260 	case DVACT_SUSPEND:
3261 		rv = config_activate_children(self, act);
3262 		if (ifp->if_flags & IFF_RUNNING)
3263 			bge_stop(sc, 0);
3264 		break;
3265 	case DVACT_RESUME:
3266 		if (ifp->if_flags & IFF_UP)
3267 			bge_init(sc);
3268 		break;
3269 	default:
3270 		rv = config_activate_children(self, act);
3271 		break;
3272 	}
3273 	return (rv);
3274 }
3275 
3276 void
3277 bge_reset(struct bge_softc *sc)
3278 {
3279 	struct pci_attach_args *pa = &sc->bge_pa;
3280 	pcireg_t cachesize, command, devctl;
3281 	u_int32_t reset, mac_mode, mac_mode_mask, val;
3282 	void (*write_op)(struct bge_softc *, int, int);
3283 	int i;
3284 
3285 	mac_mode_mask = BGE_MACMODE_HALF_DUPLEX | BGE_MACMODE_PORTMODE;
3286 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0)
3287 		mac_mode_mask |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN;
3288 	mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & mac_mode_mask;
3289 
3290 	if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
3291 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906) {
3292 		if (sc->bge_flags & BGE_PCIE)
3293 			write_op = bge_writembx;
3294 		else
3295 			write_op = bge_writemem_ind;
3296 	} else
3297 		write_op = bge_writereg_ind;
3298 
3299 	if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5700 &&
3300 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5701 &&
3301 	    !(sc->bge_flags & BGE_NO_EEPROM)) {
3302 		CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
3303 		for (i = 0; i < 8000; i++) {
3304 			if (CSR_READ_4(sc, BGE_NVRAM_SWARB) &
3305 			    BGE_NVRAMSWARB_GNT1)
3306 				break;
3307 			DELAY(20);
3308 		}
3309 		if (i == 8000)
3310 			printf("%s: nvram lock timed out\n",
3311 			    sc->bge_dev.dv_xname);
3312 	}
3313 	/* Take APE lock when performing reset. */
3314 	bge_ape_lock(sc, BGE_APE_LOCK_GRC);
3315 
3316 	/* Save some important PCI state. */
3317 	cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ);
3318 	command = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD);
3319 
3320 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
3321 	    BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3322 	    BGE_PCIMISCCTL_ENDIAN_WORDSWAP | BGE_PCIMISCCTL_PCISTATE_RW);
3323 
3324 	/* Disable fastboot on controllers that support it. */
3325 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 ||
3326 	    BGE_IS_5755_PLUS(sc))
3327 		CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0);
3328 
3329 	/*
3330 	 * Write the magic number to SRAM at offset 0xB50.
3331 	 * When firmware finishes its initialization it will
3332 	 * write ~BGE_SRAM_FW_MB_MAGIC to the same location.
3333 	 */
3334 	bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
3335 
3336 	reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
3337 
3338 	if (sc->bge_flags & BGE_PCIE) {
3339 		if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785 &&
3340 		    !BGE_IS_5717_PLUS(sc)) {
3341 			if (CSR_READ_4(sc, 0x7e2c) == 0x60) {
3342 				/* PCI Express 1.0 system */
3343 				CSR_WRITE_4(sc, 0x7e2c, 0x20);
3344 			}
3345 		}
3346 		if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
3347 			/*
3348 			 * Prevent PCI Express link training
3349 			 * during global reset.
3350 			 */
3351 			CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
3352 			reset |= (1<<29);
3353 		}
3354 	}
3355 
3356 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
3357 		val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3358 		CSR_WRITE_4(sc, BGE_VCPU_STATUS,
3359 		    val | BGE_VCPU_STATUS_DRV_RESET);
3360                 val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
3361                 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
3362                     val & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
3363 
3364                 sc->bge_flags |= BGE_NO_EEPROM;
3365         }
3366 
3367 	/*
3368 	 * Set GPHY Power Down Override to leave GPHY
3369 	 * powered up in D0 uninitialized.
3370 	 */
3371 	if (BGE_IS_5705_PLUS(sc) &&
3372 	    (sc->bge_flags & BGE_CPMU_PRESENT) == 0)
3373 		reset |= BGE_MISCCFG_KEEP_GPHY_POWER;
3374 
3375 	/* Issue global reset */
3376 	write_op(sc, BGE_MISC_CFG, reset);
3377 
3378 	if (sc->bge_flags & BGE_PCIE)
3379 		DELAY(100 * 1000);
3380 	else
3381 		DELAY(1000);
3382 
3383 	if (sc->bge_flags & BGE_PCIE) {
3384 		if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
3385 			pcireg_t v;
3386 
3387 			DELAY(500000); /* wait for link training to complete */
3388 			v = pci_conf_read(pa->pa_pc, pa->pa_tag, 0xc4);
3389 			pci_conf_write(pa->pa_pc, pa->pa_tag, 0xc4, v | (1<<15));
3390 		}
3391 
3392 		devctl = pci_conf_read(pa->pa_pc, pa->pa_tag, sc->bge_expcap +
3393 		    PCI_PCIE_DCSR);
3394 		/* Clear enable no snoop and disable relaxed ordering. */
3395 		devctl &= ~(PCI_PCIE_DCSR_ERO | PCI_PCIE_DCSR_ENS);
3396 		/* Set PCI Express max payload size. */
3397 		devctl = (devctl & ~PCI_PCIE_DCSR_MPS) | sc->bge_expmrq;
3398 		/* Clear error status. */
3399 		devctl |= PCI_PCIE_DCSR_CEE | PCI_PCIE_DCSR_NFE |
3400 		    PCI_PCIE_DCSR_FEE | PCI_PCIE_DCSR_URE;
3401 		pci_conf_write(pa->pa_pc, pa->pa_tag, sc->bge_expcap +
3402 		    PCI_PCIE_DCSR, devctl);
3403 	}
3404 
3405 	/* Reset some of the PCI state that got zapped by reset */
3406 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
3407 	    BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3408 	    BGE_PCIMISCCTL_ENDIAN_WORDSWAP | BGE_PCIMISCCTL_PCISTATE_RW);
3409 	val = BGE_PCISTATE_ROM_ENABLE | BGE_PCISTATE_ROM_RETRY_ENABLE;
3410 	if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0 &&
3411 	    (sc->bge_flags & BGE_PCIX) != 0)
3412 		val |= BGE_PCISTATE_RETRY_SAME_DMA;
3413 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0)
3414 		val |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR |
3415 		    BGE_PCISTATE_ALLOW_APE_SHMEM_WR |
3416 		    BGE_PCISTATE_ALLOW_APE_PSPACE_WR;
3417 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE, val);
3418 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ, cachesize);
3419 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, command);
3420 
3421 	/* Re-enable MSI, if necessary, and enable memory arbiter. */
3422 	if (BGE_IS_5714_FAMILY(sc)) {
3423 		/* This chip disables MSI on reset. */
3424 		if (sc->bge_flags & BGE_MSI) {
3425 			val = pci_conf_read(pa->pa_pc, pa->pa_tag,
3426 			    sc->bge_msicap + PCI_MSI_MC);
3427 			pci_conf_write(pa->pa_pc, pa->pa_tag,
3428 			    sc->bge_msicap + PCI_MSI_MC,
3429 			    val | PCI_MSI_MC_MSIE);
3430 			val = CSR_READ_4(sc, BGE_MSI_MODE);
3431 			CSR_WRITE_4(sc, BGE_MSI_MODE,
3432 			    val | BGE_MSIMODE_ENABLE);
3433 		}
3434 		val = CSR_READ_4(sc, BGE_MARB_MODE);
3435 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
3436 	} else
3437 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3438 
3439 	/* Fix up byte swapping */
3440 	CSR_WRITE_4(sc, BGE_MODE_CTL, bge_dma_swap_options(sc));
3441 
3442 	val = CSR_READ_4(sc, BGE_MAC_MODE);
3443 	val = (val & ~mac_mode_mask) | mac_mode;
3444 	CSR_WRITE_4(sc, BGE_MAC_MODE, val);
3445 	DELAY(40);
3446 
3447 	bge_ape_unlock(sc, BGE_APE_LOCK_GRC);
3448 
3449 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
3450 		for (i = 0; i < BGE_TIMEOUT; i++) {
3451 			val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3452 			if (val & BGE_VCPU_STATUS_INIT_DONE)
3453 				break;
3454 			DELAY(100);
3455 		}
3456 
3457 		if (i >= BGE_TIMEOUT)
3458 			printf("%s: reset timed out\n", sc->bge_dev.dv_xname);
3459 	} else {
3460 		/*
3461 		 * Poll until we see 1's complement of the magic number.
3462 		 * This indicates that the firmware initialization
3463 		 * is complete.  We expect this to fail if no SEEPROM
3464 		 * is fitted.
3465 		 */
3466 		for (i = 0; i < BGE_TIMEOUT * 10; i++) {
3467 			val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
3468 			if (val == ~BGE_MAGIC_NUMBER)
3469 				break;
3470 			DELAY(10);
3471 		}
3472 
3473 		if ((i >= BGE_TIMEOUT * 10) &&
3474 		    (!(sc->bge_flags & BGE_NO_EEPROM)))
3475 			printf("%s: firmware handshake timed out\n",
3476 			   sc->bge_dev.dv_xname);
3477 		/* BCM57765 A0 needs additional time before accessing. */
3478 		if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
3479 			DELAY(10 * 1000);       /* XXX */
3480 	}
3481 
3482 	/*
3483 	 * The 5704 in TBI mode apparently needs some special
3484 	 * adjustment to ensure the SERDES drive level is set
3485 	 * to 1.2V.
3486 	 */
3487 	if (sc->bge_flags & BGE_FIBER_TBI &&
3488 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
3489 		val = CSR_READ_4(sc, BGE_SERDES_CFG);
3490 		val = (val & ~0xFFF) | 0x880;
3491 		CSR_WRITE_4(sc, BGE_SERDES_CFG, val);
3492 	}
3493 
3494 	if (sc->bge_flags & BGE_PCIE &&
3495 	    !BGE_IS_5717_PLUS(sc) &&
3496 	    sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
3497 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785) {
3498 		/* Enable Data FIFO protection. */
3499 		val = CSR_READ_4(sc, 0x7c00);
3500 		CSR_WRITE_4(sc, 0x7c00, val | (1<<25));
3501 	}
3502 
3503 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
3504 		BGE_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE,
3505 		    CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
3506 }
3507 
3508 /*
3509  * Frame reception handling. This is called if there's a frame
3510  * on the receive return list.
3511  *
3512  * Note: we have to be able to handle two possibilities here:
3513  * 1) the frame is from the jumbo receive ring
3514  * 2) the frame is from the standard receive ring
3515  */
3516 
3517 void
3518 bge_rxeof(struct bge_softc *sc)
3519 {
3520 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
3521 	struct ifnet *ifp;
3522 	uint16_t rx_prod, rx_cons;
3523 	int stdcnt = 0, jumbocnt = 0;
3524 	bus_dmamap_t dmamap;
3525 	bus_addr_t offset, toff;
3526 	bus_size_t tlen;
3527 	int tosync;
3528 	int livelocked;
3529 
3530 	rx_cons = sc->bge_rx_saved_considx;
3531 	rx_prod = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx;
3532 
3533 	/* Nothing to do */
3534 	if (rx_cons == rx_prod)
3535 		return;
3536 
3537 	ifp = &sc->arpcom.ac_if;
3538 
3539 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3540 	    offsetof(struct bge_ring_data, bge_status_block),
3541 	    sizeof (struct bge_status_block),
3542 	    BUS_DMASYNC_POSTREAD);
3543 
3544 	offset = offsetof(struct bge_ring_data, bge_rx_return_ring);
3545 	tosync = rx_prod - rx_cons;
3546 
3547 	toff = offset + (rx_cons * sizeof (struct bge_rx_bd));
3548 
3549 	if (tosync < 0) {
3550 		tlen = (sc->bge_return_ring_cnt - rx_cons) *
3551 		    sizeof (struct bge_rx_bd);
3552 		bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3553 		    toff, tlen, BUS_DMASYNC_POSTREAD);
3554 		tosync = -tosync;
3555 	}
3556 
3557 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3558 	    offset, tosync * sizeof (struct bge_rx_bd),
3559 	    BUS_DMASYNC_POSTREAD);
3560 
3561 	while (rx_cons != rx_prod) {
3562 		struct bge_rx_bd	*cur_rx;
3563 		u_int32_t		rxidx;
3564 		struct mbuf		*m = NULL;
3565 
3566 		cur_rx = &sc->bge_rdata->bge_rx_return_ring[rx_cons];
3567 
3568 		rxidx = cur_rx->bge_idx;
3569 		BGE_INC(rx_cons, sc->bge_return_ring_cnt);
3570 
3571 		if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
3572 			m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
3573 			sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
3574 
3575 			jumbocnt++;
3576 
3577 			dmamap = sc->bge_cdata.bge_rx_jumbo_map[rxidx];
3578 			bus_dmamap_sync(sc->bge_dmatag, dmamap, 0,
3579 			    dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3580 			bus_dmamap_unload(sc->bge_dmatag, dmamap);
3581 
3582 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3583 				m_freem(m);
3584 				continue;
3585 			}
3586 		} else {
3587 			m = sc->bge_cdata.bge_rx_std_chain[rxidx];
3588 			sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
3589 
3590 			stdcnt++;
3591 
3592 			dmamap = sc->bge_cdata.bge_rx_std_map[rxidx];
3593 			bus_dmamap_sync(sc->bge_dmatag, dmamap, 0,
3594 			    dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3595 			bus_dmamap_unload(sc->bge_dmatag, dmamap);
3596 
3597 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3598 				m_freem(m);
3599 				continue;
3600 			}
3601 		}
3602 
3603 #ifdef __STRICT_ALIGNMENT
3604 		/*
3605 		 * The i386 allows unaligned accesses, but for other
3606 		 * platforms we must make sure the payload is aligned.
3607 		 */
3608 		if (sc->bge_flags & BGE_RX_ALIGNBUG) {
3609 			bcopy(m->m_data, m->m_data + ETHER_ALIGN,
3610 			    cur_rx->bge_len);
3611 			m->m_data += ETHER_ALIGN;
3612 		}
3613 #endif
3614 		m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
3615 
3616 		bge_rxcsum(sc, cur_rx, m);
3617 
3618 #if NVLAN > 0
3619 		if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING &&
3620 		    cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
3621 			m->m_pkthdr.ether_vtag = cur_rx->bge_vlan_tag;
3622 			m->m_flags |= M_VLANTAG;
3623 		}
3624 #endif
3625 
3626 		ml_enqueue(&ml, m);
3627 	}
3628 
3629 	sc->bge_rx_saved_considx = rx_cons;
3630 	bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
3631 
3632 	livelocked = ifiq_input(&ifp->if_rcv, &ml);
3633 	if (stdcnt) {
3634 		if_rxr_put(&sc->bge_std_ring, stdcnt);
3635 		if (livelocked)
3636 			if_rxr_livelocked(&sc->bge_std_ring);
3637 		bge_fill_rx_ring_std(sc);
3638 	}
3639 	if (jumbocnt) {
3640 		if_rxr_put(&sc->bge_jumbo_ring, jumbocnt);
3641 		if (livelocked)
3642 			if_rxr_livelocked(&sc->bge_jumbo_ring);
3643 		bge_fill_rx_ring_jumbo(sc);
3644 	}
3645 }
3646 
3647 void
3648 bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m)
3649 {
3650 	if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
3651 		/*
3652 		 * 5700 B0 chips do not support checksumming correctly due
3653 		 * to hardware bugs.
3654 		 */
3655 		return;
3656 	} else if (BGE_IS_5717_PLUS(sc)) {
3657 		if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
3658 			if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM &&
3659 			    (cur_rx->bge_error_flag &
3660 			    BGE_RXERRFLAG_IP_CSUM_NOK) == 0)
3661 				m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
3662 
3663 			if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
3664 				m->m_pkthdr.csum_flags |=
3665 				    M_TCP_CSUM_IN_OK|M_UDP_CSUM_IN_OK;
3666                         }
3667                 }
3668         } else {
3669 		if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM &&
3670 		    cur_rx->bge_ip_csum == 0xFFFF)
3671 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
3672 
3673 		if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
3674 		    m->m_pkthdr.len >= ETHER_MIN_NOPAD &&
3675 		    cur_rx->bge_tcp_udp_csum == 0xFFFF) {
3676 			m->m_pkthdr.csum_flags |=
3677 			    M_TCP_CSUM_IN_OK|M_UDP_CSUM_IN_OK;
3678 		}
3679 	}
3680 }
3681 
3682 void
3683 bge_txeof(struct bge_softc *sc)
3684 {
3685 	struct bge_tx_bd *cur_tx = NULL;
3686 	struct ifnet *ifp;
3687 	bus_dmamap_t dmamap;
3688 	bus_addr_t offset, toff;
3689 	bus_size_t tlen;
3690 	int tosync, freed, txcnt;
3691 	u_int32_t cons, newcons;
3692 	struct mbuf *m;
3693 
3694 	/* Nothing to do */
3695 	cons = sc->bge_tx_saved_considx;
3696 	newcons = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx;
3697 	if (cons == newcons)
3698 		return;
3699 
3700 	ifp = &sc->arpcom.ac_if;
3701 
3702 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3703 	    offsetof(struct bge_ring_data, bge_status_block),
3704 	    sizeof (struct bge_status_block),
3705 	    BUS_DMASYNC_POSTREAD);
3706 
3707 	offset = offsetof(struct bge_ring_data, bge_tx_ring);
3708 	tosync = newcons - cons;
3709 
3710 	toff = offset + (cons * sizeof (struct bge_tx_bd));
3711 
3712 	if (tosync < 0) {
3713 		tlen = (BGE_TX_RING_CNT - cons) * sizeof (struct bge_tx_bd);
3714 		bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3715 		    toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3716 		tosync = -tosync;
3717 	}
3718 
3719 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3720 	    offset, tosync * sizeof (struct bge_tx_bd),
3721 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3722 
3723 	/*
3724 	 * Go through our tx ring and free mbufs for those
3725 	 * frames that have been sent.
3726 	 */
3727 	freed = 0;
3728 	while (cons != newcons) {
3729 		cur_tx = &sc->bge_rdata->bge_tx_ring[cons];
3730 		m = sc->bge_cdata.bge_tx_chain[cons];
3731 		if (m != NULL) {
3732 			dmamap = sc->bge_cdata.bge_tx_map[cons];
3733 
3734 			sc->bge_cdata.bge_tx_chain[cons] = NULL;
3735 			sc->bge_cdata.bge_tx_map[cons] = NULL;
3736 			bus_dmamap_sync(sc->bge_dmatag, dmamap, 0,
3737 			    dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3738 			bus_dmamap_unload(sc->bge_dmatag, dmamap);
3739 
3740 			m_freem(m);
3741 		}
3742 		freed++;
3743 		BGE_INC(cons, BGE_TX_RING_CNT);
3744 	}
3745 
3746 	txcnt = atomic_sub_int_nv(&sc->bge_txcnt, freed);
3747 
3748 	sc->bge_tx_saved_considx = cons;
3749 
3750 	if (ifq_is_oactive(&ifp->if_snd))
3751 		ifq_restart(&ifp->if_snd);
3752 	else if (txcnt == 0)
3753 		ifp->if_timer = 0;
3754 }
3755 
3756 int
3757 bge_intr(void *xsc)
3758 {
3759 	struct bge_softc *sc;
3760 	struct ifnet *ifp;
3761 	u_int32_t statusword, statustag;
3762 
3763 	sc = xsc;
3764 	ifp = &sc->arpcom.ac_if;
3765 
3766 	/* read status word from status block */
3767 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3768 	    offsetof(struct bge_ring_data, bge_status_block),
3769 	    sizeof (struct bge_status_block),
3770 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3771 
3772 	statusword = sc->bge_rdata->bge_status_block.bge_status;
3773 	statustag = sc->bge_rdata->bge_status_block.bge_status_tag << 24;
3774 
3775 	if (sc->bge_flags & BGE_TAGGED_STATUS) {
3776 		if (sc->bge_lasttag == statustag &&
3777 		    (CSR_READ_4(sc, BGE_PCI_PCISTATE) &
3778 		     BGE_PCISTATE_INTR_NOT_ACTIVE))
3779 			return (0);
3780 		sc->bge_lasttag = statustag;
3781 	} else {
3782 		if (!(statusword & BGE_STATFLAG_UPDATED) &&
3783 		    (CSR_READ_4(sc, BGE_PCI_PCISTATE) &
3784 		     BGE_PCISTATE_INTR_NOT_ACTIVE))
3785 			return (0);
3786 		/* Ack interrupt and stop others from occurring. */
3787 		bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
3788 		statustag = 0;
3789 	}
3790 
3791 	/* clear status word */
3792 	sc->bge_rdata->bge_status_block.bge_status = 0;
3793 
3794 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3795 	    offsetof(struct bge_ring_data, bge_status_block),
3796 	    sizeof (struct bge_status_block),
3797 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3798 
3799 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
3800 	    statusword & BGE_STATFLAG_LINKSTATE_CHANGED ||
3801 	    BGE_STS_BIT(sc, BGE_STS_LINK_EVT)) {
3802 		KERNEL_LOCK();
3803 		bge_link_upd(sc);
3804 		KERNEL_UNLOCK();
3805 	}
3806 
3807 	/* Re-enable interrupts. */
3808 	bge_writembx(sc, BGE_MBX_IRQ0_LO, statustag);
3809 
3810 	if (ifp->if_flags & IFF_RUNNING) {
3811 		/* Check RX return ring producer/consumer */
3812 		bge_rxeof(sc);
3813 
3814 		/* Check TX ring producer/consumer */
3815 		bge_txeof(sc);
3816 	}
3817 
3818 	return (1);
3819 }
3820 
3821 void
3822 bge_tick(void *xsc)
3823 {
3824 	struct bge_softc *sc = xsc;
3825 	struct mii_data *mii = &sc->bge_mii;
3826 	int s;
3827 
3828 	s = splnet();
3829 
3830 	if (BGE_IS_5705_PLUS(sc)) {
3831 		mtx_enter(&sc->bge_kstat_mtx);
3832 		bge_stats_update_regs(sc);
3833 		mtx_leave(&sc->bge_kstat_mtx);
3834 	} else
3835 		bge_stats_update(sc);
3836 
3837 	if (sc->bge_flags & BGE_FIBER_TBI) {
3838 		/*
3839 		 * Since in TBI mode auto-polling can't be used we should poll
3840 		 * link status manually. Here we register pending link event
3841 		 * and trigger interrupt.
3842 		 */
3843 		BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT);
3844 		BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3845 	} else {
3846 		/*
3847 		 * Do not touch PHY if we have link up. This could break
3848 		 * IPMI/ASF mode or produce extra input errors.
3849 		 * (extra input errors was reported for bcm5701 & bcm5704).
3850 		 */
3851 		if (!BGE_STS_BIT(sc, BGE_STS_LINK))
3852 			mii_tick(mii);
3853 	}
3854 
3855 	timeout_add_sec(&sc->bge_timeout, 1);
3856 
3857 	splx(s);
3858 }
3859 
3860 void
3861 bge_stats_update_regs(struct bge_softc *sc)
3862 {
3863 	struct ifnet *ifp = &sc->arpcom.ac_if;
3864 	uint32_t collisions, discards, inerrors;
3865 	uint32_t ucast, mcast, bcast;
3866 	u_int32_t val;
3867 #if NKSTAT > 0
3868 	struct kstat_kv *kvs = sc->bge_kstat->ks_data;
3869 #endif
3870 
3871 	collisions = CSR_READ_4(sc, BGE_MAC_STATS +
3872 	    offsetof(struct bge_mac_stats_regs, etherStatsCollisions));
3873 
3874 	/*
3875 	 * XXX
3876 	 * Unlike other controllers, the BGE_RXLP_LOCSTAT_IFIN_DROPS counter
3877 	 * of the BCM5717, BCM5718, BCM5762, BCM5719 A0 and BCM5720 A0
3878 	 * controllers includes the number of unwanted multicast frames.
3879 	 * This comes from a silicon bug and known workaround to get rough
3880 	 * (not exact) counter is to enable interrupt on MBUF low watermark
3881 	 * attention. This can be accomplished by setting BGE_HCCMODE_ATTN
3882 	 * bit of BGE_HDD_MODE, BGE_BMANMODE_LOMBUF_ATTN bit of BGE_BMAN_MODE
3883 	 * and BGE_MODECTL_FLOWCTL_ATTN_INTR bit of BGE_MODE_CTL. However
3884 	 * that change would generate more interrupts and there are still
3885 	 * possibilities of losing multiple frames during
3886 	 * BGE_MODECTL_FLOWCTL_ATTN_INTR interrupt handling. Given that
3887 	 * the workaround still would not get correct counter I don't think
3888 	 * it's worth to implement it. So ignore reading the counter on
3889 	 * controllers that have the silicon bug.
3890 	 */
3891 	if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 &&
3892 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5762 &&
3893 	    sc->bge_chipid != BGE_CHIPID_BCM5719_A0 &&
3894 	    sc->bge_chipid != BGE_CHIPID_BCM5720_A0)
3895 		discards = CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3896 	else
3897 		discards = 0;
3898 
3899 	inerrors = CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
3900 
3901 	ifp->if_collisions += collisions;
3902 	ifp->if_ierrors += discards + inerrors;
3903 
3904 	ucast = CSR_READ_4(sc, BGE_MAC_STATS +
3905 	    offsetof(struct bge_mac_stats_regs, ifHCOutUcastPkts));
3906 	mcast = CSR_READ_4(sc, BGE_MAC_STATS +
3907 	    offsetof(struct bge_mac_stats_regs, ifHCOutMulticastPkts));
3908 	bcast = CSR_READ_4(sc, BGE_MAC_STATS +
3909 	    offsetof(struct bge_mac_stats_regs, ifHCOutBroadcastPkts));
3910 	if (sc->bge_flags & BGE_RDMA_BUG) {
3911 		/*
3912 		 * If controller transmitted more than BGE_NUM_RDMA_CHANNELS
3913 		 * frames, it's safe to disable workaround for DMA engine's
3914 		 * miscalculation of TXMBUF space.
3915 		 */
3916 		if (ucast + mcast + bcast > BGE_NUM_RDMA_CHANNELS) {
3917 			val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL);
3918 			if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719)
3919 				val &= ~BGE_RDMA_TX_LENGTH_WA_5719;
3920 			else
3921 				val &= ~BGE_RDMA_TX_LENGTH_WA_5720;
3922 			CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val);
3923 			sc->bge_flags &= ~BGE_RDMA_BUG;
3924 		}
3925 	}
3926 
3927 #if NKSTAT > 0
3928 	kstat_kv_u32(&kvs[bge_stat_out_ucast_pkt]) += ucast;
3929 	kstat_kv_u32(&kvs[bge_stat_out_mcast_pkt]) += mcast;
3930 	kstat_kv_u32(&kvs[bge_stat_out_bcast_pkt]) += bcast;
3931 	kstat_kv_u32(&kvs[bge_stat_collisions]) += collisions;
3932 	kstat_kv_u32(&kvs[bge_stat_if_in_drops]) += discards;
3933 	kstat_kv_u32(&kvs[bge_stat_if_in_errors]) += inerrors;
3934 #endif
3935 }
3936 
3937 void
3938 bge_stats_update(struct bge_softc *sc)
3939 {
3940 	struct ifnet *ifp = &sc->arpcom.ac_if;
3941 	bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
3942 	u_int32_t cnt;
3943 
3944 #define READ_STAT(sc, stats, stat) \
3945 	  CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
3946 
3947 	cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
3948 	ifp->if_collisions += (u_int32_t)(cnt - sc->bge_tx_collisions);
3949 	sc->bge_tx_collisions = cnt;
3950 
3951 	cnt = READ_STAT(sc, stats, nicNoMoreRxBDs.bge_addr_lo);
3952 	sc->bge_rx_overruns = cnt;
3953 	cnt = READ_STAT(sc, stats, ifInErrors.bge_addr_lo);
3954 	ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_inerrors);
3955 	sc->bge_rx_inerrors = cnt;
3956 	cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
3957 	ifp->if_ierrors += (u_int32_t)(cnt - sc->bge_rx_discards);
3958 	sc->bge_rx_discards = cnt;
3959 
3960 	cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
3961 	ifp->if_oerrors += (u_int32_t)(cnt - sc->bge_tx_discards);
3962 	sc->bge_tx_discards = cnt;
3963 
3964 #undef READ_STAT
3965 }
3966 
3967 /*
3968  * Compact outbound packets to avoid bug with DMA segments less than 8 bytes.
3969  */
3970 int
3971 bge_compact_dma_runt(struct mbuf *pkt)
3972 {
3973 	struct mbuf	*m, *prev, *n = NULL;
3974 	int 		totlen, newprevlen;
3975 
3976 	prev = NULL;
3977 	totlen = 0;
3978 
3979 	for (m = pkt; m != NULL; prev = m,m = m->m_next) {
3980 		int mlen = m->m_len;
3981 		int shortfall = 8 - mlen ;
3982 
3983 		totlen += mlen;
3984 		if (mlen == 0)
3985 			continue;
3986 		if (mlen >= 8)
3987 			continue;
3988 
3989 		/* If we get here, mbuf data is too small for DMA engine.
3990 		 * Try to fix by shuffling data to prev or next in chain.
3991 		 * If that fails, do a compacting deep-copy of the whole chain.
3992 		 */
3993 
3994 		/* Internal frag. If fits in prev, copy it there. */
3995 		if (prev && m_trailingspace(prev) >= m->m_len) {
3996 			bcopy(m->m_data, prev->m_data+prev->m_len, mlen);
3997 			prev->m_len += mlen;
3998 			m->m_len = 0;
3999 			/* XXX stitch chain */
4000 			prev->m_next = m_free(m);
4001 			m = prev;
4002 			continue;
4003 		} else if (m->m_next != NULL &&
4004 			   m_trailingspace(m) >= shortfall &&
4005 			   m->m_next->m_len >= (8 + shortfall)) {
4006 			/* m is writable and have enough data in next, pull up. */
4007 
4008 			bcopy(m->m_next->m_data, m->m_data+m->m_len, shortfall);
4009 			m->m_len += shortfall;
4010 			m->m_next->m_len -= shortfall;
4011 			m->m_next->m_data += shortfall;
4012 		} else if (m->m_next == NULL || 1) {
4013 			/* Got a runt at the very end of the packet.
4014 			 * borrow data from the tail of the preceding mbuf and
4015 			 * update its length in-place. (The original data is still
4016 			 * valid, so we can do this even if prev is not writable.)
4017 			 */
4018 
4019 			/* if we'd make prev a runt, just move all of its data. */
4020 #ifdef DEBUG
4021 			KASSERT(prev != NULL /*, ("runt but null PREV")*/);
4022 			KASSERT(prev->m_len >= 8 /*, ("runt prev")*/);
4023 #endif
4024 			if ((prev->m_len - shortfall) < 8)
4025 				shortfall = prev->m_len;
4026 
4027 			newprevlen = prev->m_len - shortfall;
4028 
4029 			MGET(n, M_NOWAIT, MT_DATA);
4030 			if (n == NULL)
4031 				return (ENOBUFS);
4032 			KASSERT(m->m_len + shortfall < MLEN
4033 				/*,
4034 				  ("runt %d +prev %d too big\n", m->m_len, shortfall)*/);
4035 
4036 			/* first copy the data we're stealing from prev */
4037 			bcopy(prev->m_data + newprevlen, n->m_data, shortfall);
4038 
4039 			/* update prev->m_len accordingly */
4040 			prev->m_len -= shortfall;
4041 
4042 			/* copy data from runt m */
4043 			bcopy(m->m_data, n->m_data + shortfall, m->m_len);
4044 
4045 			/* n holds what we stole from prev, plus m */
4046 			n->m_len = shortfall + m->m_len;
4047 
4048 			/* stitch n into chain and free m */
4049 			n->m_next = m->m_next;
4050 			prev->m_next = n;
4051 			/* KASSERT(m->m_next == NULL); */
4052 			m->m_next = NULL;
4053 			m_free(m);
4054 			m = n;	/* for continuing loop */
4055 		}
4056 	}
4057 	return (0);
4058 }
4059 
4060 /*
4061  * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
4062  * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
4063  * but when such padded frames employ the bge IP/TCP checksum offload,
4064  * the hardware checksum assist gives incorrect results (possibly
4065  * from incorporating its own padding into the UDP/TCP checksum; who knows).
4066  * If we pad such runts with zeros, the onboard checksum comes out correct.
4067  */
4068 int
4069 bge_cksum_pad(struct mbuf *m)
4070 {
4071 	int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
4072 	struct mbuf *last;
4073 
4074 	/* If there's only the packet-header and we can pad there, use it. */
4075 	if (m->m_pkthdr.len == m->m_len && m_trailingspace(m) >= padlen) {
4076 		last = m;
4077 	} else {
4078 		/*
4079 		 * Walk packet chain to find last mbuf. We will either
4080 		 * pad there, or append a new mbuf and pad it.
4081 		 */
4082 		for (last = m; last->m_next != NULL; last = last->m_next);
4083 		if (m_trailingspace(last) < padlen) {
4084 			/* Allocate new empty mbuf, pad it. Compact later. */
4085 			struct mbuf *n;
4086 
4087 			MGET(n, M_DONTWAIT, MT_DATA);
4088 			if (n == NULL)
4089 				return (ENOBUFS);
4090 			n->m_len = 0;
4091 			last->m_next = n;
4092 			last = n;
4093 		}
4094 	}
4095 
4096 	/* Now zero the pad area, to avoid the bge cksum-assist bug. */
4097 	memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
4098 	last->m_len += padlen;
4099 	m->m_pkthdr.len += padlen;
4100 
4101 	return (0);
4102 }
4103 
4104 /*
4105  * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
4106  * pointers to descriptors.
4107  */
4108 int
4109 bge_encap(struct bge_softc *sc, struct mbuf *m, int *txinc)
4110 {
4111 	struct bge_tx_bd	*f = NULL;
4112 	u_int32_t		frag, cur;
4113 	u_int16_t		csum_flags = 0;
4114 	bus_dmamap_t		dmamap;
4115 	int			i = 0;
4116 
4117 	cur = frag = (sc->bge_tx_prodidx + *txinc) % BGE_TX_RING_CNT;
4118 
4119 	if (m->m_pkthdr.csum_flags) {
4120 		if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
4121 			csum_flags |= BGE_TXBDFLAG_IP_CSUM;
4122 		if (m->m_pkthdr.csum_flags &
4123 		    (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)) {
4124 			csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
4125 			if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
4126 			    bge_cksum_pad(m) != 0)
4127 				return (ENOBUFS);
4128 		}
4129 	}
4130 
4131 	if (sc->bge_flags & BGE_JUMBO_FRAME &&
4132 	    m->m_pkthdr.len > ETHER_MAX_LEN)
4133 		csum_flags |= BGE_TXBDFLAG_JUMBO_FRAME;
4134 
4135 	if (!(BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX))
4136 		goto doit;
4137 
4138 	/*
4139 	 * bcm5700 Revision B silicon cannot handle DMA descriptors with
4140 	 * less than eight bytes.  If we encounter a teeny mbuf
4141 	 * at the end of a chain, we can pad.  Otherwise, copy.
4142 	 */
4143 	if (bge_compact_dma_runt(m) != 0)
4144 		return (ENOBUFS);
4145 
4146 doit:
4147 	dmamap = sc->bge_txdma[cur];
4148 
4149 	/*
4150 	 * Start packing the mbufs in this chain into
4151 	 * the fragment pointers. Stop when we run out
4152 	 * of fragments or hit the end of the mbuf chain.
4153 	 */
4154 	switch (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m,
4155 	    BUS_DMA_NOWAIT)) {
4156 	case 0:
4157 		break;
4158 	case EFBIG:
4159 		if (m_defrag(m, M_DONTWAIT) == 0 &&
4160 		    bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m,
4161 		     BUS_DMA_NOWAIT) == 0)
4162 			break;
4163 
4164 		/* FALLTHROUGH */
4165 	default:
4166 		return (ENOBUFS);
4167 	}
4168 
4169 	for (i = 0; i < dmamap->dm_nsegs; i++) {
4170 		f = &sc->bge_rdata->bge_tx_ring[frag];
4171 		if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
4172 			break;
4173 		BGE_HOSTADDR(f->bge_addr, dmamap->dm_segs[i].ds_addr);
4174 		f->bge_len = dmamap->dm_segs[i].ds_len;
4175 		f->bge_flags = csum_flags;
4176 		f->bge_vlan_tag = 0;
4177 #if NVLAN > 0
4178 		if (m->m_flags & M_VLANTAG) {
4179 			f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
4180 			f->bge_vlan_tag = m->m_pkthdr.ether_vtag;
4181 		}
4182 #endif
4183 		cur = frag;
4184 		BGE_INC(frag, BGE_TX_RING_CNT);
4185 	}
4186 
4187 	if (i < dmamap->dm_nsegs)
4188 		goto fail_unload;
4189 
4190 	if (frag == sc->bge_tx_saved_considx)
4191 		goto fail_unload;
4192 
4193 	bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize,
4194 	    BUS_DMASYNC_PREWRITE);
4195 
4196 	sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
4197 	sc->bge_cdata.bge_tx_chain[cur] = m;
4198 	sc->bge_cdata.bge_tx_map[cur] = dmamap;
4199 
4200 	*txinc += dmamap->dm_nsegs;
4201 
4202 	return (0);
4203 
4204 fail_unload:
4205 	bus_dmamap_unload(sc->bge_dmatag, dmamap);
4206 
4207 	return (ENOBUFS);
4208 }
4209 
4210 /*
4211  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4212  * to the mbuf data regions directly in the transmit descriptors.
4213  */
4214 void
4215 bge_start(struct ifqueue *ifq)
4216 {
4217 	struct ifnet *ifp = ifq->ifq_if;
4218 	struct bge_softc *sc = ifp->if_softc;
4219 	struct mbuf *m;
4220 	int txinc;
4221 
4222 	if (!BGE_STS_BIT(sc, BGE_STS_LINK)) {
4223 		ifq_purge(ifq);
4224 		return;
4225 	}
4226 
4227 	txinc = 0;
4228 	while (1) {
4229 		/* Check if we have enough free send BDs. */
4230 		if (sc->bge_txcnt + txinc + BGE_NTXSEG + 16 >=
4231 		    BGE_TX_RING_CNT) {
4232 			ifq_set_oactive(ifq);
4233 			break;
4234 		}
4235 
4236 		m = ifq_dequeue(ifq);
4237 		if (m == NULL)
4238 			break;
4239 
4240 		if (bge_encap(sc, m, &txinc) != 0) {
4241 			m_freem(m);
4242 			continue;
4243 		}
4244 
4245 #if NBPFILTER > 0
4246 		if (ifp->if_bpf)
4247 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
4248 #endif
4249 	}
4250 
4251 	if (txinc != 0) {
4252 		/* Transmit */
4253 		sc->bge_tx_prodidx = (sc->bge_tx_prodidx + txinc) %
4254 		    BGE_TX_RING_CNT;
4255 		bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
4256 		if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
4257 			bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO,
4258 			    sc->bge_tx_prodidx);
4259 
4260 		atomic_add_int(&sc->bge_txcnt, txinc);
4261 
4262 		/*
4263 		 * Set a timeout in case the chip goes out to lunch.
4264 		 */
4265 		ifp->if_timer = 5;
4266 	}
4267 }
4268 
4269 void
4270 bge_init(void *xsc)
4271 {
4272 	struct bge_softc *sc = xsc;
4273 	struct ifnet *ifp;
4274 	u_int16_t *m;
4275 	u_int32_t mode;
4276 	int s;
4277 
4278 	s = splnet();
4279 
4280 	ifp = &sc->arpcom.ac_if;
4281 
4282 	/* Cancel pending I/O and flush buffers. */
4283 	bge_stop(sc, 0);
4284 	bge_sig_pre_reset(sc, BGE_RESET_START);
4285 	bge_reset(sc);
4286 	bge_sig_legacy(sc, BGE_RESET_START);
4287 	bge_sig_post_reset(sc, BGE_RESET_START);
4288 
4289 	bge_chipinit(sc);
4290 
4291 	/*
4292 	 * Init the various state machines, ring
4293 	 * control blocks and firmware.
4294 	 */
4295 	if (bge_blockinit(sc)) {
4296 		printf("%s: initialization failure\n", sc->bge_dev.dv_xname);
4297 		splx(s);
4298 		return;
4299 	}
4300 
4301 	/* Specify MRU. */
4302 	if (BGE_IS_JUMBO_CAPABLE(sc))
4303 		CSR_WRITE_4(sc, BGE_RX_MTU,
4304 			BGE_JUMBO_FRAMELEN + ETHER_VLAN_ENCAP_LEN);
4305 	else
4306 		CSR_WRITE_4(sc, BGE_RX_MTU,
4307 			ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
4308 
4309 	/* Load our MAC address. */
4310 	m = (u_int16_t *)&sc->arpcom.ac_enaddr[0];
4311 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
4312 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
4313 
4314 	if (!(ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)) {
4315 		/* Disable hardware decapsulation of VLAN frames. */
4316 		BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
4317 	}
4318 
4319 	/* Program promiscuous mode and multicast filters. */
4320 	bge_iff(sc);
4321 
4322 	/* Init RX ring. */
4323 	bge_init_rx_ring_std(sc);
4324 
4325 	/*
4326 	 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
4327 	 * memory to ensure that the chip has in fact read the first
4328 	 * entry of the ring.
4329 	 */
4330 	if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
4331 		u_int32_t		v, i;
4332 		for (i = 0; i < 10; i++) {
4333 			DELAY(20);
4334 			v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
4335 			if (v == (MCLBYTES - ETHER_ALIGN))
4336 				break;
4337 		}
4338 		if (i == 10)
4339 			printf("%s: 5705 A0 chip failed to load RX ring\n",
4340 			    sc->bge_dev.dv_xname);
4341 	}
4342 
4343 	/* Init Jumbo RX ring. */
4344 	if (sc->bge_flags & BGE_JUMBO_RING)
4345 		bge_init_rx_ring_jumbo(sc);
4346 
4347 	/* Init our RX return ring index */
4348 	sc->bge_rx_saved_considx = 0;
4349 
4350 	/* Init our RX/TX stat counters. */
4351 	sc->bge_tx_collisions = 0;
4352 	sc->bge_rx_discards = 0;
4353 	sc->bge_rx_inerrors = 0;
4354 	sc->bge_rx_overruns = 0;
4355 	sc->bge_tx_discards = 0;
4356 
4357 	/* Init TX ring. */
4358 	bge_init_tx_ring(sc);
4359 
4360 	/* Enable TX MAC state machine lockup fix. */
4361 	mode = CSR_READ_4(sc, BGE_TX_MODE);
4362 	if (BGE_IS_5755_PLUS(sc) ||
4363 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
4364 		mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
4365 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 ||
4366 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) {
4367 		mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
4368 		mode |= CSR_READ_4(sc, BGE_TX_MODE) &
4369 		    (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
4370 	}
4371 
4372 	/* Turn on transmitter */
4373 	CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
4374 	DELAY(100);
4375 
4376 	mode = CSR_READ_4(sc, BGE_RX_MODE);
4377 	if (BGE_IS_5755_PLUS(sc))
4378 		mode |= BGE_RXMODE_IPV6_ENABLE;
4379 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762)
4380 		mode |= BGE_RXMODE_IPV4_FRAG_FIX;
4381 
4382 	/* Turn on receiver */
4383 	CSR_WRITE_4(sc, BGE_RX_MODE, mode | BGE_RXMODE_ENABLE);
4384 	DELAY(10);
4385 
4386 	/*
4387 	 * Set the number of good frames to receive after RX MBUF
4388 	 * Low Watermark has been reached. After the RX MAC receives
4389 	 * this number of frames, it will drop subsequent incoming
4390 	 * frames until the MBUF High Watermark is reached.
4391 	 */
4392 	if (BGE_IS_57765_PLUS(sc))
4393 		CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1);
4394 	else
4395 		CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
4396 
4397 	/* Tell firmware we're alive. */
4398 	BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4399 
4400 	/* Enable host interrupts. */
4401 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
4402 	BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4403 	bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4404 
4405 	bge_ifmedia_upd(ifp);
4406 
4407 	ifp->if_flags |= IFF_RUNNING;
4408 	ifq_clr_oactive(&ifp->if_snd);
4409 
4410 	splx(s);
4411 
4412 	timeout_add_sec(&sc->bge_timeout, 1);
4413 }
4414 
4415 /*
4416  * Set media options.
4417  */
4418 int
4419 bge_ifmedia_upd(struct ifnet *ifp)
4420 {
4421 	struct bge_softc *sc = ifp->if_softc;
4422 	struct mii_data *mii = &sc->bge_mii;
4423 	struct ifmedia *ifm = &sc->bge_ifmedia;
4424 
4425 	/* If this is a 1000baseX NIC, enable the TBI port. */
4426 	if (sc->bge_flags & BGE_FIBER_TBI) {
4427 		if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
4428 			return (EINVAL);
4429 		switch(IFM_SUBTYPE(ifm->ifm_media)) {
4430 		case IFM_AUTO:
4431 			/*
4432 			 * The BCM5704 ASIC appears to have a special
4433 			 * mechanism for programming the autoneg
4434 			 * advertisement registers in TBI mode.
4435 			 */
4436 			if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
4437 				u_int32_t sgdig;
4438 				sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
4439 				if (sgdig & BGE_SGDIGSTS_DONE) {
4440 					CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
4441 					sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
4442 					sgdig |= BGE_SGDIGCFG_AUTO |
4443 					    BGE_SGDIGCFG_PAUSE_CAP |
4444 					    BGE_SGDIGCFG_ASYM_PAUSE;
4445 					CSR_WRITE_4(sc, BGE_SGDIG_CFG,
4446 					    sgdig | BGE_SGDIGCFG_SEND);
4447 					DELAY(5);
4448 					CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
4449 				}
4450 			}
4451 			break;
4452 		case IFM_1000_SX:
4453 			if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
4454 				BGE_CLRBIT(sc, BGE_MAC_MODE,
4455 				    BGE_MACMODE_HALF_DUPLEX);
4456 			} else {
4457 				BGE_SETBIT(sc, BGE_MAC_MODE,
4458 				    BGE_MACMODE_HALF_DUPLEX);
4459 			}
4460 			DELAY(40);
4461 			break;
4462 		default:
4463 			return (EINVAL);
4464 		}
4465 		/* XXX 802.3x flow control for 1000BASE-SX */
4466 		return (0);
4467 	}
4468 
4469 	BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT);
4470 	if (mii->mii_instance) {
4471 		struct mii_softc *miisc;
4472 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4473 			mii_phy_reset(miisc);
4474 	}
4475 	mii_mediachg(mii);
4476 
4477 	/*
4478 	 * Force an interrupt so that we will call bge_link_upd
4479 	 * if needed and clear any pending link state attention.
4480 	 * Without this we are not getting any further interrupts
4481 	 * for link state changes and thus will not UP the link and
4482 	 * not be able to send in bge_start. The only way to get
4483 	 * things working was to receive a packet and get a RX intr.
4484 	 */
4485 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
4486 	    sc->bge_flags & BGE_IS_5788)
4487 		BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4488 	else
4489 		BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4490 
4491 	return (0);
4492 }
4493 
4494 /*
4495  * Report current media status.
4496  */
4497 void
4498 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4499 {
4500 	struct bge_softc *sc = ifp->if_softc;
4501 	struct mii_data *mii = &sc->bge_mii;
4502 
4503 	if (sc->bge_flags & BGE_FIBER_TBI) {
4504 		ifmr->ifm_status = IFM_AVALID;
4505 		ifmr->ifm_active = IFM_ETHER;
4506 		if (CSR_READ_4(sc, BGE_MAC_STS) &
4507 		    BGE_MACSTAT_TBI_PCS_SYNCHED) {
4508 			ifmr->ifm_status |= IFM_ACTIVE;
4509 		} else {
4510 			ifmr->ifm_active |= IFM_NONE;
4511 			return;
4512 		}
4513 		ifmr->ifm_active |= IFM_1000_SX;
4514 		if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
4515 			ifmr->ifm_active |= IFM_HDX;
4516 		else
4517 			ifmr->ifm_active |= IFM_FDX;
4518 		return;
4519 	}
4520 
4521 	mii_pollstat(mii);
4522 	ifmr->ifm_status = mii->mii_media_status;
4523 	ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) |
4524 	    sc->bge_flowflags;
4525 }
4526 
4527 int
4528 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4529 {
4530 	struct bge_softc *sc = ifp->if_softc;
4531 	struct ifreq *ifr = (struct ifreq *) data;
4532 	int s, error = 0;
4533 	struct mii_data *mii;
4534 
4535 	s = splnet();
4536 
4537 	switch(command) {
4538 	case SIOCSIFADDR:
4539 		ifp->if_flags |= IFF_UP;
4540 		if (!(ifp->if_flags & IFF_RUNNING))
4541 			bge_init(sc);
4542 		break;
4543 
4544 	case SIOCSIFFLAGS:
4545 		if (ifp->if_flags & IFF_UP) {
4546 			if (ifp->if_flags & IFF_RUNNING)
4547 				error = ENETRESET;
4548 			else
4549 				bge_init(sc);
4550 		} else {
4551 			if (ifp->if_flags & IFF_RUNNING)
4552 				bge_stop(sc, 0);
4553 		}
4554 		break;
4555 
4556 	case SIOCSIFMEDIA:
4557 		/* XXX Flow control is not supported for 1000BASE-SX */
4558 		if (sc->bge_flags & BGE_FIBER_TBI) {
4559 			ifr->ifr_media &= ~IFM_ETH_FMASK;
4560 			sc->bge_flowflags = 0;
4561 		}
4562 
4563 		/* Flow control requires full-duplex mode. */
4564 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
4565 		    (ifr->ifr_media & IFM_FDX) == 0) {
4566 		    	ifr->ifr_media &= ~IFM_ETH_FMASK;
4567 		}
4568 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
4569 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
4570 				/* We can do both TXPAUSE and RXPAUSE. */
4571 				ifr->ifr_media |=
4572 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
4573 			}
4574 			sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
4575 		}
4576 		/* FALLTHROUGH */
4577 	case SIOCGIFMEDIA:
4578 		if (sc->bge_flags & BGE_FIBER_TBI) {
4579 			error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia,
4580 			    command);
4581 		} else {
4582 			mii = &sc->bge_mii;
4583 			error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
4584 			    command);
4585 		}
4586 		break;
4587 
4588 	case SIOCGIFRXR:
4589 		error = bge_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
4590 		break;
4591 
4592 	default:
4593 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
4594 	}
4595 
4596 	if (error == ENETRESET) {
4597 		if (ifp->if_flags & IFF_RUNNING)
4598 			bge_iff(sc);
4599 		error = 0;
4600 	}
4601 
4602 	splx(s);
4603 	return (error);
4604 }
4605 
4606 int
4607 bge_rxrinfo(struct bge_softc *sc, struct if_rxrinfo *ifri)
4608 {
4609 	struct if_rxring_info ifr[2];
4610 	u_int n = 0;
4611 
4612 	memset(ifr, 0, sizeof(ifr));
4613 
4614 	if (ISSET(sc->bge_flags, BGE_RXRING_VALID)) {
4615 		ifr[n].ifr_size = sc->bge_rx_std_len;
4616 		strlcpy(ifr[n].ifr_name, "std", sizeof(ifr[n].ifr_name));
4617 		ifr[n].ifr_info = sc->bge_std_ring;
4618 
4619 		n++;
4620 	}
4621 
4622 	if (ISSET(sc->bge_flags, BGE_JUMBO_RXRING_VALID)) {
4623 		ifr[n].ifr_size = BGE_JLEN;
4624 		strlcpy(ifr[n].ifr_name, "jumbo", sizeof(ifr[n].ifr_name));
4625 		ifr[n].ifr_info = sc->bge_jumbo_ring;
4626 
4627 		n++;
4628 	}
4629 
4630 	return (if_rxr_info_ioctl(ifri, n, ifr));
4631 }
4632 
4633 void
4634 bge_watchdog(struct ifnet *ifp)
4635 {
4636 	struct bge_softc *sc;
4637 
4638 	sc = ifp->if_softc;
4639 
4640 	printf("%s: watchdog timeout -- resetting\n", sc->bge_dev.dv_xname);
4641 
4642 	bge_init(sc);
4643 
4644 	ifp->if_oerrors++;
4645 }
4646 
4647 void
4648 bge_stop_block(struct bge_softc *sc, bus_size_t reg, u_int32_t bit)
4649 {
4650 	int i;
4651 
4652 	BGE_CLRBIT(sc, reg, bit);
4653 
4654 	for (i = 0; i < BGE_TIMEOUT; i++) {
4655 		if ((CSR_READ_4(sc, reg) & bit) == 0)
4656 			return;
4657 		delay(100);
4658 	}
4659 
4660 	DPRINTFN(5, ("%s: block failed to stop: reg 0x%lx, bit 0x%08x\n",
4661 	    sc->bge_dev.dv_xname, (u_long) reg, bit));
4662 }
4663 
4664 /*
4665  * Stop the adapter and free any mbufs allocated to the
4666  * RX and TX lists.
4667  */
4668 void
4669 bge_stop(struct bge_softc *sc, int softonly)
4670 {
4671 	struct ifnet *ifp = &sc->arpcom.ac_if;
4672 	struct ifmedia_entry *ifm;
4673 	struct mii_data *mii;
4674 	int mtmp, itmp;
4675 
4676 	timeout_del(&sc->bge_timeout);
4677 	timeout_del(&sc->bge_rxtimeout);
4678 	timeout_del(&sc->bge_rxtimeout_jumbo);
4679 
4680 	ifp->if_flags &= ~IFF_RUNNING;
4681 	ifp->if_timer = 0;
4682 
4683 	if (!softonly) {
4684 		/*
4685 		 * Tell firmware we're shutting down.
4686 		 */
4687 		/* bge_stop_fw(sc); */
4688 		bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN);
4689 
4690 		/*
4691 		 * Disable all of the receiver blocks
4692 		 */
4693 		bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4694 		bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
4695 		bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
4696 		if (BGE_IS_5700_FAMILY(sc))
4697 			bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
4698 		bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
4699 		bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
4700 		bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
4701 
4702 		/*
4703 		 * Disable all of the transmit blocks
4704 		 */
4705 		bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
4706 		bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
4707 		bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
4708 		bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
4709 		bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
4710 		if (BGE_IS_5700_FAMILY(sc))
4711 			bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
4712 		bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
4713 
4714 		/*
4715 		 * Shut down all of the memory managers and related
4716 		 * state machines.
4717 		 */
4718 		bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
4719 		bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
4720 		if (BGE_IS_5700_FAMILY(sc))
4721 			bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
4722 
4723 		CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
4724 		CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
4725 
4726 		if (!BGE_IS_5705_PLUS(sc)) {
4727 			bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
4728 			bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
4729 		}
4730 
4731 		bge_reset(sc);
4732 		bge_sig_legacy(sc, BGE_RESET_SHUTDOWN);
4733 		bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN);
4734 
4735 		/*
4736 		 * Tell firmware we're shutting down.
4737 		 */
4738 		BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4739 	}
4740 
4741 	intr_barrier(sc->bge_intrhand);
4742 	ifq_barrier(&ifp->if_snd);
4743 
4744 	ifq_clr_oactive(&ifp->if_snd);
4745 
4746 	/* Free the RX lists. */
4747 	bge_free_rx_ring_std(sc);
4748 
4749 	/* Free jumbo RX list. */
4750 	if (sc->bge_flags & BGE_JUMBO_RING)
4751 		bge_free_rx_ring_jumbo(sc);
4752 
4753 	/* Free TX buffers. */
4754 	bge_free_tx_ring(sc);
4755 
4756 	/*
4757 	 * Isolate/power down the PHY, but leave the media selection
4758 	 * unchanged so that things will be put back to normal when
4759 	 * we bring the interface back up.
4760 	 */
4761 	if (!(sc->bge_flags & BGE_FIBER_TBI)) {
4762 		mii = &sc->bge_mii;
4763 		itmp = ifp->if_flags;
4764 		ifp->if_flags |= IFF_UP;
4765 		ifm = mii->mii_media.ifm_cur;
4766 		mtmp = ifm->ifm_media;
4767 		ifm->ifm_media = IFM_ETHER|IFM_NONE;
4768 		mii_mediachg(mii);
4769 		ifm->ifm_media = mtmp;
4770 		ifp->if_flags = itmp;
4771 	}
4772 
4773 	sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
4774 
4775 	if (!softonly) {
4776 		/* Clear MAC's link state (PHY may still have link UP). */
4777 		BGE_STS_CLRBIT(sc, BGE_STS_LINK);
4778 	}
4779 }
4780 
4781 void
4782 bge_link_upd(struct bge_softc *sc)
4783 {
4784 	struct ifnet *ifp = &sc->arpcom.ac_if;
4785 	struct mii_data *mii = &sc->bge_mii;
4786 	u_int32_t status;
4787 	int link;
4788 
4789 	/* Clear 'pending link event' flag */
4790 	BGE_STS_CLRBIT(sc, BGE_STS_LINK_EVT);
4791 
4792 	/*
4793 	 * Process link state changes.
4794 	 * Grrr. The link status word in the status block does
4795 	 * not work correctly on the BCM5700 rev AX and BX chips,
4796 	 * according to all available information. Hence, we have
4797 	 * to enable MII interrupts in order to properly obtain
4798 	 * async link changes. Unfortunately, this also means that
4799 	 * we have to read the MAC status register to detect link
4800 	 * changes, thereby adding an additional register access to
4801 	 * the interrupt handler.
4802 	 *
4803 	 */
4804 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) {
4805 		status = CSR_READ_4(sc, BGE_MAC_STS);
4806 		if (status & BGE_MACSTAT_MI_INTERRUPT) {
4807 			mii_pollstat(mii);
4808 
4809 			if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
4810 			    mii->mii_media_status & IFM_ACTIVE &&
4811 			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
4812 				BGE_STS_SETBIT(sc, BGE_STS_LINK);
4813 			else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
4814 			    (!(mii->mii_media_status & IFM_ACTIVE) ||
4815 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
4816 				BGE_STS_CLRBIT(sc, BGE_STS_LINK);
4817 
4818 			/* Clear the interrupt */
4819 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
4820 			    BGE_EVTENB_MI_INTERRUPT);
4821 			bge_miibus_readreg(&sc->bge_dev, sc->bge_phy_addr,
4822 			    BRGPHY_MII_ISR);
4823 			bge_miibus_writereg(&sc->bge_dev, sc->bge_phy_addr,
4824 			    BRGPHY_MII_IMR, BRGPHY_INTRS);
4825 		}
4826 		return;
4827 	}
4828 
4829 	if (sc->bge_flags & BGE_FIBER_TBI) {
4830 		status = CSR_READ_4(sc, BGE_MAC_STS);
4831 		if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
4832 			if (!BGE_STS_BIT(sc, BGE_STS_LINK)) {
4833 				BGE_STS_SETBIT(sc, BGE_STS_LINK);
4834 				if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
4835 					BGE_CLRBIT(sc, BGE_MAC_MODE,
4836 					    BGE_MACMODE_TBI_SEND_CFGS);
4837 				CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
4838 				status = CSR_READ_4(sc, BGE_MAC_MODE);
4839 				link = (status & BGE_MACMODE_HALF_DUPLEX) ?
4840 				    LINK_STATE_HALF_DUPLEX :
4841 				    LINK_STATE_FULL_DUPLEX;
4842 				ifp->if_baudrate = IF_Gbps(1);
4843 				if (ifp->if_link_state != link) {
4844 					ifp->if_link_state = link;
4845 					if_link_state_change(ifp);
4846 				}
4847 			}
4848 		} else if (BGE_STS_BIT(sc, BGE_STS_LINK)) {
4849 			BGE_STS_CLRBIT(sc, BGE_STS_LINK);
4850 			link = LINK_STATE_DOWN;
4851 			ifp->if_baudrate = 0;
4852 			if (ifp->if_link_state != link) {
4853 				ifp->if_link_state = link;
4854 				if_link_state_change(ifp);
4855 			}
4856 		}
4857 	} else if (BGE_STS_BIT(sc, BGE_STS_AUTOPOLL)) {
4858 		/*
4859 		 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
4860 		 * in status word always set. Workaround this bug by reading
4861 		 * PHY link status directly.
4862 		 */
4863 		link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK)?
4864 		    BGE_STS_LINK : 0;
4865 
4866 		if (BGE_STS_BIT(sc, BGE_STS_LINK) != link) {
4867 			mii_pollstat(mii);
4868 
4869 			if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
4870 			    mii->mii_media_status & IFM_ACTIVE &&
4871 			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
4872 				BGE_STS_SETBIT(sc, BGE_STS_LINK);
4873 			else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
4874 			    (!(mii->mii_media_status & IFM_ACTIVE) ||
4875 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
4876 				BGE_STS_CLRBIT(sc, BGE_STS_LINK);
4877 		}
4878 	} else {
4879 		/*
4880 		 * For controllers that call mii_tick, we have to poll
4881 		 * link status.
4882 		 */
4883 		mii_pollstat(mii);
4884 	}
4885 
4886 	/* Clear the attention */
4887 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
4888 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
4889 	    BGE_MACSTAT_LINK_CHANGED);
4890 }
4891 
4892 #if NKSTAT > 0
4893 
4894 struct bge_stat {
4895 	char			name[KSTAT_KV_NAMELEN];
4896 	enum kstat_kv_unit	unit;
4897 	bus_size_t		reg;
4898 };
4899 
4900 #define MACREG(_f) \
4901 	BGE_MAC_STATS + offsetof(struct bge_mac_stats_regs, _f)
4902 
4903 static const struct bge_stat bge_kstat_tpl[] = {
4904 	/* MAC stats */
4905 	[bge_stat_out_octets] = { "out octets", KSTAT_KV_U_BYTES,
4906 	    MACREG(ifHCOutOctets) },
4907 	[bge_stat_collisions] = { "collisions", KSTAT_KV_U_NONE, 0 },
4908 	[bge_stat_xon_sent] = { "xon sent", KSTAT_KV_U_NONE,
4909 	    MACREG(outXonSent) },
4910 	[bge_stat_xoff_sent] = { "xoff sent", KSTAT_KV_U_NONE,
4911 	    MACREG(outXonSent) },
4912 	[bge_stat_xmit_errors] = { "xmit errors", KSTAT_KV_U_NONE,
4913 	    MACREG(dot3StatsInternalMacTransmitErrors) },
4914 	[bge_stat_coll_frames] = { "coll frames", KSTAT_KV_U_PACKETS,
4915 	    MACREG(dot3StatsSingleCollisionFrames) },
4916 	[bge_stat_multicoll_frames] = { "multicoll frames", KSTAT_KV_U_PACKETS,
4917 	    MACREG(dot3StatsMultipleCollisionFrames) },
4918 	[bge_stat_deferred_xmit] = { "deferred xmit", KSTAT_KV_U_NONE,
4919 	    MACREG(dot3StatsDeferredTransmissions) },
4920 	[bge_stat_excess_coll] = { "excess coll", KSTAT_KV_U_NONE,
4921 	    MACREG(dot3StatsExcessiveCollisions) },
4922 	[bge_stat_late_coll] = { "late coll", KSTAT_KV_U_NONE,
4923 	    MACREG(dot3StatsLateCollisions) },
4924 	[bge_stat_out_ucast_pkt] = { "out ucast pkts", KSTAT_KV_U_PACKETS, 0 },
4925 	[bge_stat_out_mcast_pkt] = { "out mcast pkts", KSTAT_KV_U_PACKETS, 0 },
4926 	[bge_stat_out_bcast_pkt] = { "out bcast pkts", KSTAT_KV_U_PACKETS, 0 },
4927 	[bge_stat_in_octets] = { "in octets", KSTAT_KV_U_BYTES,
4928 	    MACREG(ifHCInOctets) },
4929 	[bge_stat_fragments] = { "fragments", KSTAT_KV_U_NONE,
4930 	    MACREG(etherStatsFragments) },
4931 	[bge_stat_in_ucast_pkt] = { "in ucast pkts", KSTAT_KV_U_PACKETS,
4932 	    MACREG(ifHCInUcastPkts) },
4933 	[bge_stat_in_mcast_pkt] = { "in mcast pkts", KSTAT_KV_U_PACKETS,
4934 	    MACREG(ifHCInMulticastPkts) },
4935 	[bge_stat_in_bcast_pkt] = { "in bcast pkts", KSTAT_KV_U_PACKETS,
4936 	    MACREG(ifHCInBroadcastPkts) },
4937 	[bge_stat_fcs_errors] = { "FCS errors", KSTAT_KV_U_NONE,
4938 	    MACREG(dot3StatsFCSErrors) },
4939 	[bge_stat_align_errors] = { "align errors", KSTAT_KV_U_NONE,
4940 	    MACREG(dot3StatsAlignmentErrors) },
4941 	[bge_stat_xon_rcvd] = { "xon rcvd", KSTAT_KV_U_NONE,
4942 	    MACREG(xonPauseFramesReceived) },
4943 	[bge_stat_xoff_rcvd] = { "xoff rcvd", KSTAT_KV_U_NONE,
4944 	    MACREG(xoffPauseFramesReceived) },
4945 	[bge_stat_ctrl_frame_rcvd] = { "ctrlframes rcvd", KSTAT_KV_U_NONE,
4946 	    MACREG(macControlFramesReceived) },
4947 	[bge_stat_xoff_entered] = { "xoff entered", KSTAT_KV_U_NONE,
4948 	    MACREG(xoffStateEntered) },
4949 	[bge_stat_too_long_frames] = { "too long frames", KSTAT_KV_U_NONE,
4950 	    MACREG(dot3StatsFramesTooLong) },
4951 	[bge_stat_jabbers] = { "jabbers", KSTAT_KV_U_NONE,
4952 	    MACREG(etherStatsJabbers) },
4953 	[bge_stat_too_short_pkts] = { "too short pkts", KSTAT_KV_U_NONE,
4954 	    MACREG(etherStatsUndersizePkts) },
4955 
4956 	/* Send Data Initiator stats */
4957 	[bge_stat_dma_rq_full] = { "DMA RQ full", KSTAT_KV_U_NONE,
4958 	    BGE_LOCSTATS_DMA_RQ_FULL },
4959 	[bge_stat_dma_hprq_full] = { "DMA HPRQ full", KSTAT_KV_U_NONE,
4960 	    BGE_LOCSTATS_DMA_HIPRIO_RQ_FULL },
4961 	[bge_stat_sdc_queue_full] = { "SDC queue full", KSTAT_KV_U_NONE,
4962 	    BGE_LOCSTATS_SDC_QUEUE_FULL },
4963 	[bge_stat_nic_sendprod_set] = { "sendprod set", KSTAT_KV_U_NONE,
4964 	    BGE_LOCSTATS_NIC_SENDPROD_SET },
4965 	[bge_stat_status_updated] = { "stats updated", KSTAT_KV_U_NONE,
4966 	    BGE_LOCSTATS_STATS_UPDATED },
4967 	[bge_stat_irqs] = { "irqs", KSTAT_KV_U_NONE, BGE_LOCSTATS_IRQS },
4968 	[bge_stat_avoided_irqs] = { "avoided irqs", KSTAT_KV_U_NONE,
4969 	    BGE_LOCSTATS_AVOIDED_IRQS },
4970 	[bge_stat_tx_thresh_hit] = { "tx thresh hit", KSTAT_KV_U_NONE,
4971 	    BGE_LOCSTATS_TX_THRESH_HIT },
4972 
4973 	/* Receive List Placement stats */
4974 	[bge_stat_filtdrop] = { "filtdrop", KSTAT_KV_U_NONE,
4975 	    BGE_RXLP_LOCSTAT_FILTDROP },
4976 	[bge_stat_dma_wrq_full] = { "DMA WRQ full", KSTAT_KV_U_NONE,
4977 	    BGE_RXLP_LOCSTAT_DMA_WRQ_FULL },
4978 	[bge_stat_dma_hpwrq_full] = { "DMA HPWRQ full", KSTAT_KV_U_NONE,
4979 	    BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL },
4980 	[bge_stat_out_of_bds] = { "out of BDs", KSTAT_KV_U_NONE,
4981 	    BGE_RXLP_LOCSTAT_OUT_OF_BDS },
4982 	[bge_stat_if_in_drops] = { "if in drops", KSTAT_KV_U_NONE, 0 },
4983 	[bge_stat_if_in_errors] = { "if in errors", KSTAT_KV_U_NONE, 0 },
4984 	[bge_stat_rx_thresh_hit] = { "rx thresh hit", KSTAT_KV_U_NONE,
4985 	    BGE_RXLP_LOCSTAT_RXTHRESH_HIT },
4986 };
4987 
4988 int
4989 bge_kstat_read(struct kstat *ks)
4990 {
4991 	struct bge_softc *sc = ks->ks_softc;
4992 	struct kstat_kv *kvs = ks->ks_data;
4993 	int i;
4994 
4995 	bge_stats_update_regs(sc);
4996 
4997 	for (i = 0; i < nitems(bge_kstat_tpl); i++) {
4998 		if (bge_kstat_tpl[i].reg != 0)
4999 			kstat_kv_u32(kvs) += CSR_READ_4(sc,
5000 			    bge_kstat_tpl[i].reg);
5001 		kvs++;
5002 	}
5003 
5004 	getnanouptime(&ks->ks_updated);
5005 	return 0;
5006 }
5007 
5008 void
5009 bge_kstat_attach(struct bge_softc *sc)
5010 {
5011 	struct kstat *ks;
5012 	struct kstat_kv *kvs;
5013 	int i;
5014 
5015 
5016 	ks = kstat_create(sc->bge_dev.dv_xname, 0, "bge-stats", 0,
5017 	    KSTAT_T_KV, 0);
5018 	if (ks == NULL)
5019 		return;
5020 
5021 	kvs = mallocarray(nitems(bge_kstat_tpl), sizeof(*kvs), M_DEVBUF,
5022 	    M_ZERO | M_WAITOK);
5023 	for (i = 0; i < nitems(bge_kstat_tpl); i++) {
5024 		const struct bge_stat *tpl = &bge_kstat_tpl[i];
5025 		kstat_kv_unit_init(&kvs[i], tpl->name, KSTAT_KV_T_UINT32,
5026 		    tpl->unit);
5027 	}
5028 
5029 	kstat_set_mutex(ks, &sc->bge_kstat_mtx);
5030 	ks->ks_softc = sc;
5031 	ks->ks_data = kvs;
5032 	ks->ks_datalen = nitems(bge_kstat_tpl) * sizeof(*kvs);
5033 	ks->ks_read = bge_kstat_read;
5034 
5035 	sc->bge_kstat = ks;
5036 	kstat_install(ks);
5037 }
5038 #endif /* NKSTAT > 0 */
5039