xref: /dragonfly/sys/dev/netif/bge/if_bge.c (revision a32bc35d)
1 /*
2  * Copyright (c) 2001 Wind River Systems
3  * Copyright (c) 1997, 1998, 1999, 2001
4  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  *
33  * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $
34  */
35 
36 /*
37  * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
38  *
39  * Written by Bill Paul <wpaul@windriver.com>
40  * Senior Engineer, Wind River Systems
41  */
42 
43 /*
44  * The Broadcom BCM5700 is based on technology originally developed by
45  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
46  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
47  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
48  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
49  * frames, highly configurable RX filtering, and 16 RX and TX queues
50  * (which, along with RX filter rules, can be used for QOS applications).
51  * Other features, such as TCP segmentation, may be available as part
52  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
53  * firmware images can be stored in hardware and need not be compiled
54  * into the driver.
55  *
56  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
57  * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
58  *
59  * The BCM5701 is a single-chip solution incorporating both the BCM5700
60  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
61  * does not support external SSRAM.
62  *
63  * Broadcom also produces a variation of the BCM5700 under the "Altima"
64  * brand name, which is functionally similar but lacks PCI-X support.
65  *
66  * Without external SSRAM, you can only have at most 4 TX rings,
67  * and the use of the mini RX ring is disabled. This seems to imply
68  * that these features are simply not available on the BCM5701. As a
69  * result, this driver does not implement any support for the mini RX
70  * ring.
71  */
72 
73 #include "opt_polling.h"
74 
75 #include <sys/param.h>
76 #include <sys/bus.h>
77 #include <sys/endian.h>
78 #include <sys/kernel.h>
79 #include <sys/ktr.h>
80 #include <sys/interrupt.h>
81 #include <sys/mbuf.h>
82 #include <sys/malloc.h>
83 #include <sys/queue.h>
84 #include <sys/rman.h>
85 #include <sys/serialize.h>
86 #include <sys/socket.h>
87 #include <sys/sockio.h>
88 #include <sys/sysctl.h>
89 
90 #include <net/bpf.h>
91 #include <net/ethernet.h>
92 #include <net/if.h>
93 #include <net/if_arp.h>
94 #include <net/if_dl.h>
95 #include <net/if_media.h>
96 #include <net/if_types.h>
97 #include <net/ifq_var.h>
98 #include <net/vlan/if_vlan_var.h>
99 #include <net/vlan/if_vlan_ether.h>
100 
101 #include <dev/netif/mii_layer/mii.h>
102 #include <dev/netif/mii_layer/miivar.h>
103 #include <dev/netif/mii_layer/brgphyreg.h>
104 
105 #include <bus/pci/pcidevs.h>
106 #include <bus/pci/pcireg.h>
107 #include <bus/pci/pcivar.h>
108 
109 #include <dev/netif/bge/if_bgereg.h>
110 
111 /* "device miibus" required.  See GENERIC if you get errors here. */
112 #include "miibus_if.h"
113 
114 #define BGE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
115 #define BGE_MIN_FRAME		60
116 
117 static const struct bge_type bge_devs[] = {
118 	{ PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C996,
119 		"3COM 3C996 Gigabit Ethernet" },
120 
121 	{ PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700,
122 		"Alteon BCM5700 Gigabit Ethernet" },
123 	{ PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5701,
124 		"Alteon BCM5701 Gigabit Ethernet" },
125 
126 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000,
127 		"Altima AC1000 Gigabit Ethernet" },
128 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001,
129 		"Altima AC1002 Gigabit Ethernet" },
130 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100,
131 		"Altima AC9100 Gigabit Ethernet" },
132 
133 	{ PCI_VENDOR_APPLE, PCI_PRODUCT_APPLE_BCM5701,
134 		"Apple BCM5701 Gigabit Ethernet" },
135 
136 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700,
137 		"Broadcom BCM5700 Gigabit Ethernet" },
138 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701,
139 		"Broadcom BCM5701 Gigabit Ethernet" },
140 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702,
141 		"Broadcom BCM5702 Gigabit Ethernet" },
142 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X,
143 		"Broadcom BCM5702X Gigabit Ethernet" },
144 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702_ALT,
145 		"Broadcom BCM5702 Gigabit Ethernet" },
146 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703,
147 		"Broadcom BCM5703 Gigabit Ethernet" },
148 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X,
149 		"Broadcom BCM5703X Gigabit Ethernet" },
150 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703A3,
151 		"Broadcom BCM5703 Gigabit Ethernet" },
152 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C,
153 		"Broadcom BCM5704C Dual Gigabit Ethernet" },
154 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S,
155 		"Broadcom BCM5704S Dual Gigabit Ethernet" },
156 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S_ALT,
157 		"Broadcom BCM5704S Dual Gigabit Ethernet" },
158 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705,
159 		"Broadcom BCM5705 Gigabit Ethernet" },
160 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705F,
161 		"Broadcom BCM5705F Gigabit Ethernet" },
162 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705K,
163 		"Broadcom BCM5705K Gigabit Ethernet" },
164 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M,
165 		"Broadcom BCM5705M Gigabit Ethernet" },
166 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M_ALT,
167 		"Broadcom BCM5705M Gigabit Ethernet" },
168 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714,
169 		"Broadcom BCM5714C Gigabit Ethernet" },
170 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714S,
171 		"Broadcom BCM5714S Gigabit Ethernet" },
172 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715,
173 		"Broadcom BCM5715 Gigabit Ethernet" },
174 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715S,
175 		"Broadcom BCM5715S Gigabit Ethernet" },
176 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720,
177 		"Broadcom BCM5720 Gigabit Ethernet" },
178 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5721,
179 		"Broadcom BCM5721 Gigabit Ethernet" },
180 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5722,
181 		"Broadcom BCM5722 Gigabit Ethernet" },
182 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5723,
183 		"Broadcom BCM5723 Gigabit Ethernet" },
184 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750,
185 		"Broadcom BCM5750 Gigabit Ethernet" },
186 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750M,
187 		"Broadcom BCM5750M Gigabit Ethernet" },
188 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751,
189 		"Broadcom BCM5751 Gigabit Ethernet" },
190 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751F,
191 		"Broadcom BCM5751F Gigabit Ethernet" },
192 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751M,
193 		"Broadcom BCM5751M Gigabit Ethernet" },
194 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752,
195 		"Broadcom BCM5752 Gigabit Ethernet" },
196 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752M,
197 		"Broadcom BCM5752M Gigabit Ethernet" },
198 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753,
199 		"Broadcom BCM5753 Gigabit Ethernet" },
200 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753F,
201 		"Broadcom BCM5753F Gigabit Ethernet" },
202 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753M,
203 		"Broadcom BCM5753M Gigabit Ethernet" },
204 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754,
205 		"Broadcom BCM5754 Gigabit Ethernet" },
206 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754M,
207 		"Broadcom BCM5754M Gigabit Ethernet" },
208 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755,
209 		"Broadcom BCM5755 Gigabit Ethernet" },
210 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755M,
211 		"Broadcom BCM5755M Gigabit Ethernet" },
212 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5756,
213 		"Broadcom BCM5756 Gigabit Ethernet" },
214 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761,
215 		"Broadcom BCM5761 Gigabit Ethernet" },
216 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761E,
217 		"Broadcom BCM5761E Gigabit Ethernet" },
218 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761S,
219 		"Broadcom BCM5761S Gigabit Ethernet" },
220 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761SE,
221 		"Broadcom BCM5761SE Gigabit Ethernet" },
222 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5764,
223 		"Broadcom BCM5764 Gigabit Ethernet" },
224 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780,
225 		"Broadcom BCM5780 Gigabit Ethernet" },
226 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780S,
227 		"Broadcom BCM5780S Gigabit Ethernet" },
228 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5781,
229 		"Broadcom BCM5781 Gigabit Ethernet" },
230 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782,
231 		"Broadcom BCM5782 Gigabit Ethernet" },
232 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5784,
233 		"Broadcom BCM5784 Gigabit Ethernet" },
234 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5785F,
235 		"Broadcom BCM5785F Gigabit Ethernet" },
236 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5785G,
237 		"Broadcom BCM5785G Gigabit Ethernet" },
238 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5786,
239 		"Broadcom BCM5786 Gigabit Ethernet" },
240 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787,
241 		"Broadcom BCM5787 Gigabit Ethernet" },
242 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787F,
243 		"Broadcom BCM5787F Gigabit Ethernet" },
244 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787M,
245 		"Broadcom BCM5787M Gigabit Ethernet" },
246 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5788,
247 		"Broadcom BCM5788 Gigabit Ethernet" },
248 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5789,
249 		"Broadcom BCM5789 Gigabit Ethernet" },
250 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901,
251 		"Broadcom BCM5901 Fast Ethernet" },
252 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2,
253 		"Broadcom BCM5901A2 Fast Ethernet" },
254 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5903M,
255 		"Broadcom BCM5903M Fast Ethernet" },
256 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906,
257 		"Broadcom BCM5906 Fast Ethernet"},
258 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906M,
259 		"Broadcom BCM5906M Fast Ethernet"},
260 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57760,
261 		"Broadcom BCM57760 Gigabit Ethernet"},
262 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57780,
263 		"Broadcom BCM57780 Gigabit Ethernet"},
264 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57788,
265 		"Broadcom BCM57788 Gigabit Ethernet"},
266 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57790,
267 		"Broadcom BCM57790 Gigabit Ethernet"},
268 	{ PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1,
269 		"SysKonnect Gigabit Ethernet" },
270 
271 	{ 0, 0, NULL }
272 };
273 
274 #define BGE_IS_JUMBO_CAPABLE(sc)	((sc)->bge_flags & BGE_FLAG_JUMBO)
275 #define BGE_IS_5700_FAMILY(sc)		((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
276 #define BGE_IS_5705_PLUS(sc)		((sc)->bge_flags & BGE_FLAG_5705_PLUS)
277 #define BGE_IS_5714_FAMILY(sc)		((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
278 #define BGE_IS_575X_PLUS(sc)		((sc)->bge_flags & BGE_FLAG_575X_PLUS)
279 #define BGE_IS_5755_PLUS(sc)		((sc)->bge_flags & BGE_FLAG_5755_PLUS)
280 
281 typedef int	(*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
282 
283 static int	bge_probe(device_t);
284 static int	bge_attach(device_t);
285 static int	bge_detach(device_t);
286 static void	bge_txeof(struct bge_softc *);
287 static void	bge_rxeof(struct bge_softc *);
288 
289 static void	bge_tick(void *);
290 static void	bge_stats_update(struct bge_softc *);
291 static void	bge_stats_update_regs(struct bge_softc *);
292 static int	bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
293 
294 #ifdef DEVICE_POLLING
295 static void	bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
296 #endif
297 static void	bge_intr(void *);
298 static void	bge_enable_intr(struct bge_softc *);
299 static void	bge_disable_intr(struct bge_softc *);
300 static void	bge_start(struct ifnet *);
301 static int	bge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
302 static void	bge_init(void *);
303 static void	bge_stop(struct bge_softc *);
304 static void	bge_watchdog(struct ifnet *);
305 static void	bge_shutdown(device_t);
306 static int	bge_suspend(device_t);
307 static int	bge_resume(device_t);
308 static int	bge_ifmedia_upd(struct ifnet *);
309 static void	bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
310 
311 static uint8_t	bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
312 static int	bge_read_nvram(struct bge_softc *, caddr_t, int, int);
313 
314 static uint8_t	bge_eeprom_getbyte(struct bge_softc *, uint32_t, uint8_t *);
315 static int	bge_read_eeprom(struct bge_softc *, caddr_t, uint32_t, size_t);
316 
317 static void	bge_setmulti(struct bge_softc *);
318 static void	bge_setpromisc(struct bge_softc *);
319 
320 static int	bge_alloc_jumbo_mem(struct bge_softc *);
321 static void	bge_free_jumbo_mem(struct bge_softc *);
322 static struct bge_jslot
323 		*bge_jalloc(struct bge_softc *);
324 static void	bge_jfree(void *);
325 static void	bge_jref(void *);
326 static int	bge_newbuf_std(struct bge_softc *, int, int);
327 static int	bge_newbuf_jumbo(struct bge_softc *, int, int);
328 static void	bge_setup_rxdesc_std(struct bge_softc *, int);
329 static void	bge_setup_rxdesc_jumbo(struct bge_softc *, int);
330 static int	bge_init_rx_ring_std(struct bge_softc *);
331 static void	bge_free_rx_ring_std(struct bge_softc *);
332 static int	bge_init_rx_ring_jumbo(struct bge_softc *);
333 static void	bge_free_rx_ring_jumbo(struct bge_softc *);
334 static void	bge_free_tx_ring(struct bge_softc *);
335 static int	bge_init_tx_ring(struct bge_softc *);
336 
337 static int	bge_chipinit(struct bge_softc *);
338 static int	bge_blockinit(struct bge_softc *);
339 
340 static uint32_t	bge_readmem_ind(struct bge_softc *, uint32_t);
341 static void	bge_writemem_ind(struct bge_softc *, uint32_t, uint32_t);
342 #ifdef notdef
343 static uint32_t	bge_readreg_ind(struct bge_softc *, uint32_t);
344 #endif
345 static void	bge_writereg_ind(struct bge_softc *, uint32_t, uint32_t);
346 static void	bge_writemem_direct(struct bge_softc *, uint32_t, uint32_t);
347 static void	bge_writembx(struct bge_softc *, int, int);
348 
349 static int	bge_miibus_readreg(device_t, int, int);
350 static int	bge_miibus_writereg(device_t, int, int, int);
351 static void	bge_miibus_statchg(device_t);
352 static void	bge_bcm5700_link_upd(struct bge_softc *, uint32_t);
353 static void	bge_tbi_link_upd(struct bge_softc *, uint32_t);
354 static void	bge_copper_link_upd(struct bge_softc *, uint32_t);
355 
356 static void	bge_reset(struct bge_softc *);
357 
358 static int	bge_dma_alloc(struct bge_softc *);
359 static void	bge_dma_free(struct bge_softc *);
360 static int	bge_dma_block_alloc(struct bge_softc *, bus_size_t,
361 				    bus_dma_tag_t *, bus_dmamap_t *,
362 				    void **, bus_addr_t *);
363 static void	bge_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *);
364 
365 static int	bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
366 static int	bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
367 static int	bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
368 static int	bge_get_eaddr(struct bge_softc *, uint8_t[]);
369 
370 static void	bge_coal_change(struct bge_softc *);
371 static int	bge_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS);
372 static int	bge_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS);
373 static int	bge_sysctl_rx_max_coal_bds(SYSCTL_HANDLER_ARGS);
374 static int	bge_sysctl_tx_max_coal_bds(SYSCTL_HANDLER_ARGS);
375 static int	bge_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *, uint32_t);
376 
377 /*
378  * Set following tunable to 1 for some IBM blade servers with the DNLK
379  * switch module. Auto negotiation is broken for those configurations.
380  */
381 static int	bge_fake_autoneg = 0;
382 TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg);
383 
384 /* Interrupt moderation control variables. */
385 static int	bge_rx_coal_ticks = 100;	/* usec */
386 static int	bge_tx_coal_ticks = 1023;	/* usec */
387 static int	bge_rx_max_coal_bds = 80;
388 static int	bge_tx_max_coal_bds = 128;
389 
390 TUNABLE_INT("hw.bge.rx_coal_ticks", &bge_rx_coal_ticks);
391 TUNABLE_INT("hw.bge.tx_coal_ticks", &bge_tx_coal_ticks);
392 TUNABLE_INT("hw.bge.rx_max_coal_bds", &bge_rx_max_coal_bds);
393 TUNABLE_INT("hw.bge.tx_max_coal_bds", &bge_tx_max_coal_bds);
394 
395 #if !defined(KTR_IF_BGE)
396 #define KTR_IF_BGE	KTR_ALL
397 #endif
398 KTR_INFO_MASTER(if_bge);
399 KTR_INFO(KTR_IF_BGE, if_bge, intr, 0, "intr");
400 KTR_INFO(KTR_IF_BGE, if_bge, rx_pkt, 1, "rx_pkt");
401 KTR_INFO(KTR_IF_BGE, if_bge, tx_pkt, 2, "tx_pkt");
402 #define logif(name)	KTR_LOG(if_bge_ ## name)
403 
404 static device_method_t bge_methods[] = {
405 	/* Device interface */
406 	DEVMETHOD(device_probe,		bge_probe),
407 	DEVMETHOD(device_attach,	bge_attach),
408 	DEVMETHOD(device_detach,	bge_detach),
409 	DEVMETHOD(device_shutdown,	bge_shutdown),
410 	DEVMETHOD(device_suspend,	bge_suspend),
411 	DEVMETHOD(device_resume,	bge_resume),
412 
413 	/* bus interface */
414 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
415 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
416 
417 	/* MII interface */
418 	DEVMETHOD(miibus_readreg,	bge_miibus_readreg),
419 	DEVMETHOD(miibus_writereg,	bge_miibus_writereg),
420 	DEVMETHOD(miibus_statchg,	bge_miibus_statchg),
421 
422 	{ 0, 0 }
423 };
424 
425 static DEFINE_CLASS_0(bge, bge_driver, bge_methods, sizeof(struct bge_softc));
426 static devclass_t bge_devclass;
427 
428 DECLARE_DUMMY_MODULE(if_bge);
429 DRIVER_MODULE(if_bge, pci, bge_driver, bge_devclass, NULL, NULL);
430 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, NULL, NULL);
431 
432 static uint32_t
433 bge_readmem_ind(struct bge_softc *sc, uint32_t off)
434 {
435 	device_t dev = sc->bge_dev;
436 	uint32_t val;
437 
438 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
439 	val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
440 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
441 	return (val);
442 }
443 
444 static void
445 bge_writemem_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
446 {
447 	device_t dev = sc->bge_dev;
448 
449 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
450 	pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
451 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
452 }
453 
454 #ifdef notdef
455 static uint32_t
456 bge_readreg_ind(struct bge_softc *sc, uin32_t off)
457 {
458 	device_t dev = sc->bge_dev;
459 
460 	pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
461 	return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
462 }
463 #endif
464 
465 static void
466 bge_writereg_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
467 {
468 	device_t dev = sc->bge_dev;
469 
470 	pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
471 	pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
472 }
473 
474 static void
475 bge_writemem_direct(struct bge_softc *sc, uint32_t off, uint32_t val)
476 {
477 	CSR_WRITE_4(sc, off, val);
478 }
479 
480 static void
481 bge_writembx(struct bge_softc *sc, int off, int val)
482 {
483 	if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
484 		off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
485 
486 	CSR_WRITE_4(sc, off, val);
487 }
488 
489 static uint8_t
490 bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
491 {
492 	uint32_t access, byte = 0;
493 	int i;
494 
495 	/* Lock. */
496 	CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
497 	for (i = 0; i < 8000; i++) {
498 		if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
499 			break;
500 		DELAY(20);
501 	}
502 	if (i == 8000)
503 		return (1);
504 
505 	/* Enable access. */
506 	access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
507 	CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
508 
509 	CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
510 	CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
511 	for (i = 0; i < BGE_TIMEOUT * 10; i++) {
512 		DELAY(10);
513 		if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
514 			DELAY(10);
515 			break;
516 		}
517 	}
518 
519 	if (i == BGE_TIMEOUT * 10) {
520 		if_printf(&sc->arpcom.ac_if, "nvram read timed out\n");
521 		return (1);
522 	}
523 
524 	/* Get result. */
525 	byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
526 
527 	*dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
528 
529 	/* Disable access. */
530 	CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
531 
532 	/* Unlock. */
533 	CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
534 	CSR_READ_4(sc, BGE_NVRAM_SWARB);
535 
536 	return (0);
537 }
538 
539 /*
540  * Read a sequence of bytes from NVRAM.
541  */
542 static int
543 bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
544 {
545 	int err = 0, i;
546 	uint8_t byte = 0;
547 
548 	if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
549 		return (1);
550 
551 	for (i = 0; i < cnt; i++) {
552 		err = bge_nvram_getbyte(sc, off + i, &byte);
553 		if (err)
554 			break;
555 		*(dest + i) = byte;
556 	}
557 
558 	return (err ? 1 : 0);
559 }
560 
561 /*
562  * Read a byte of data stored in the EEPROM at address 'addr.' The
563  * BCM570x supports both the traditional bitbang interface and an
564  * auto access interface for reading the EEPROM. We use the auto
565  * access method.
566  */
567 static uint8_t
568 bge_eeprom_getbyte(struct bge_softc *sc, uint32_t addr, uint8_t *dest)
569 {
570 	int i;
571 	uint32_t byte = 0;
572 
573 	/*
574 	 * Enable use of auto EEPROM access so we can avoid
575 	 * having to use the bitbang method.
576 	 */
577 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
578 
579 	/* Reset the EEPROM, load the clock period. */
580 	CSR_WRITE_4(sc, BGE_EE_ADDR,
581 	    BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
582 	DELAY(20);
583 
584 	/* Issue the read EEPROM command. */
585 	CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
586 
587 	/* Wait for completion */
588 	for(i = 0; i < BGE_TIMEOUT * 10; i++) {
589 		DELAY(10);
590 		if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
591 			break;
592 	}
593 
594 	if (i == BGE_TIMEOUT) {
595 		if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n");
596 		return(1);
597 	}
598 
599 	/* Get result. */
600 	byte = CSR_READ_4(sc, BGE_EE_DATA);
601 
602         *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
603 
604 	return(0);
605 }
606 
607 /*
608  * Read a sequence of bytes from the EEPROM.
609  */
610 static int
611 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, uint32_t off, size_t len)
612 {
613 	size_t i;
614 	int err;
615 	uint8_t byte;
616 
617 	for (byte = 0, err = 0, i = 0; i < len; i++) {
618 		err = bge_eeprom_getbyte(sc, off + i, &byte);
619 		if (err)
620 			break;
621 		*(dest + i) = byte;
622 	}
623 
624 	return(err ? 1 : 0);
625 }
626 
627 static int
628 bge_miibus_readreg(device_t dev, int phy, int reg)
629 {
630 	struct bge_softc *sc = device_get_softc(dev);
631 	struct ifnet *ifp = &sc->arpcom.ac_if;
632 	uint32_t val, autopoll;
633 	int i;
634 
635 	/*
636 	 * Broadcom's own driver always assumes the internal
637 	 * PHY is at GMII address 1. On some chips, the PHY responds
638 	 * to accesses at all addresses, which could cause us to
639 	 * bogusly attach the PHY 32 times at probe type. Always
640 	 * restricting the lookup to address 1 is simpler than
641 	 * trying to figure out which chips revisions should be
642 	 * special-cased.
643 	 */
644 	if (phy != 1)
645 		return(0);
646 
647 	/* Reading with autopolling on may trigger PCI errors */
648 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
649 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
650 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
651 		DELAY(40);
652 	}
653 
654 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
655 	    BGE_MIPHY(phy)|BGE_MIREG(reg));
656 
657 	for (i = 0; i < BGE_TIMEOUT; i++) {
658 		DELAY(10);
659 		val = CSR_READ_4(sc, BGE_MI_COMM);
660 		if (!(val & BGE_MICOMM_BUSY))
661 			break;
662 	}
663 
664 	if (i == BGE_TIMEOUT) {
665 		if_printf(ifp, "PHY read timed out "
666 			  "(phy %d, reg %d, val 0x%08x)\n", phy, reg, val);
667 		val = 0;
668 		goto done;
669 	}
670 
671 	DELAY(5);
672 	val = CSR_READ_4(sc, BGE_MI_COMM);
673 
674 done:
675 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
676 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
677 		DELAY(40);
678 	}
679 
680 	if (val & BGE_MICOMM_READFAIL)
681 		return(0);
682 
683 	return(val & 0xFFFF);
684 }
685 
686 static int
687 bge_miibus_writereg(device_t dev, int phy, int reg, int val)
688 {
689 	struct bge_softc *sc = device_get_softc(dev);
690 	uint32_t autopoll;
691 	int i;
692 
693 	/*
694 	 * See the related comment in bge_miibus_readreg()
695 	 */
696 	if (phy != 1)
697 		return(0);
698 
699 	if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
700 	    (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
701 	       return(0);
702 
703 	/* Reading with autopolling on may trigger PCI errors */
704 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
705 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
706 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
707 		DELAY(40);
708 	}
709 
710 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
711 	    BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
712 
713 	for (i = 0; i < BGE_TIMEOUT; i++) {
714 		DELAY(10);
715 		if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
716 			DELAY(5);
717 			CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
718 			break;
719 		}
720 	}
721 
722 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
723 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
724 		DELAY(40);
725 	}
726 
727 	if (i == BGE_TIMEOUT) {
728 		if_printf(&sc->arpcom.ac_if, "PHY write timed out "
729 			  "(phy %d, reg %d, val %d)\n", phy, reg, val);
730 		return(0);
731 	}
732 
733 	return(0);
734 }
735 
736 static void
737 bge_miibus_statchg(device_t dev)
738 {
739 	struct bge_softc *sc;
740 	struct mii_data *mii;
741 
742 	sc = device_get_softc(dev);
743 	mii = device_get_softc(sc->bge_miibus);
744 
745 	BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
746 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
747 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
748 	} else {
749 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
750 	}
751 
752 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
753 		BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
754 	} else {
755 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
756 	}
757 }
758 
759 /*
760  * Memory management for jumbo frames.
761  */
762 static int
763 bge_alloc_jumbo_mem(struct bge_softc *sc)
764 {
765 	struct ifnet *ifp = &sc->arpcom.ac_if;
766 	struct bge_jslot *entry;
767 	uint8_t *ptr;
768 	bus_addr_t paddr;
769 	int i, error;
770 
771 	/*
772 	 * Create tag for jumbo mbufs.
773 	 * This is really a bit of a kludge. We allocate a special
774 	 * jumbo buffer pool which (thanks to the way our DMA
775 	 * memory allocation works) will consist of contiguous
776 	 * pages. This means that even though a jumbo buffer might
777 	 * be larger than a page size, we don't really need to
778 	 * map it into more than one DMA segment. However, the
779 	 * default mbuf tag will result in multi-segment mappings,
780 	 * so we have to create a special jumbo mbuf tag that
781 	 * lets us get away with mapping the jumbo buffers as
782 	 * a single segment. I think eventually the driver should
783 	 * be changed so that it uses ordinary mbufs and cluster
784 	 * buffers, i.e. jumbo frames can span multiple DMA
785 	 * descriptors. But that's a project for another day.
786 	 */
787 
788 	/*
789 	 * Create DMA stuffs for jumbo RX ring.
790 	 */
791 	error = bge_dma_block_alloc(sc, BGE_JUMBO_RX_RING_SZ,
792 				    &sc->bge_cdata.bge_rx_jumbo_ring_tag,
793 				    &sc->bge_cdata.bge_rx_jumbo_ring_map,
794 				    (void *)&sc->bge_ldata.bge_rx_jumbo_ring,
795 				    &sc->bge_ldata.bge_rx_jumbo_ring_paddr);
796 	if (error) {
797 		if_printf(ifp, "could not create jumbo RX ring\n");
798 		return error;
799 	}
800 
801 	/*
802 	 * Create DMA stuffs for jumbo buffer block.
803 	 */
804 	error = bge_dma_block_alloc(sc, BGE_JMEM,
805 				    &sc->bge_cdata.bge_jumbo_tag,
806 				    &sc->bge_cdata.bge_jumbo_map,
807 				    (void **)&sc->bge_ldata.bge_jumbo_buf,
808 				    &paddr);
809 	if (error) {
810 		if_printf(ifp, "could not create jumbo buffer\n");
811 		return error;
812 	}
813 
814 	SLIST_INIT(&sc->bge_jfree_listhead);
815 
816 	/*
817 	 * Now divide it up into 9K pieces and save the addresses
818 	 * in an array. Note that we play an evil trick here by using
819 	 * the first few bytes in the buffer to hold the the address
820 	 * of the softc structure for this interface. This is because
821 	 * bge_jfree() needs it, but it is called by the mbuf management
822 	 * code which will not pass it to us explicitly.
823 	 */
824 	for (i = 0, ptr = sc->bge_ldata.bge_jumbo_buf; i < BGE_JSLOTS; i++) {
825 		entry = &sc->bge_cdata.bge_jslots[i];
826 		entry->bge_sc = sc;
827 		entry->bge_buf = ptr;
828 		entry->bge_paddr = paddr;
829 		entry->bge_inuse = 0;
830 		entry->bge_slot = i;
831 		SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jslot_link);
832 
833 		ptr += BGE_JLEN;
834 		paddr += BGE_JLEN;
835 	}
836 	return 0;
837 }
838 
839 static void
840 bge_free_jumbo_mem(struct bge_softc *sc)
841 {
842 	/* Destroy jumbo RX ring. */
843 	bge_dma_block_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
844 			   sc->bge_cdata.bge_rx_jumbo_ring_map,
845 			   sc->bge_ldata.bge_rx_jumbo_ring);
846 
847 	/* Destroy jumbo buffer block. */
848 	bge_dma_block_free(sc->bge_cdata.bge_jumbo_tag,
849 			   sc->bge_cdata.bge_jumbo_map,
850 			   sc->bge_ldata.bge_jumbo_buf);
851 }
852 
853 /*
854  * Allocate a jumbo buffer.
855  */
856 static struct bge_jslot *
857 bge_jalloc(struct bge_softc *sc)
858 {
859 	struct bge_jslot *entry;
860 
861 	lwkt_serialize_enter(&sc->bge_jslot_serializer);
862 	entry = SLIST_FIRST(&sc->bge_jfree_listhead);
863 	if (entry) {
864 		SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jslot_link);
865 		entry->bge_inuse = 1;
866 	} else {
867 		if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n");
868 	}
869 	lwkt_serialize_exit(&sc->bge_jslot_serializer);
870 	return(entry);
871 }
872 
873 /*
874  * Adjust usage count on a jumbo buffer.
875  */
876 static void
877 bge_jref(void *arg)
878 {
879 	struct bge_jslot *entry = (struct bge_jslot *)arg;
880 	struct bge_softc *sc = entry->bge_sc;
881 
882 	if (sc == NULL)
883 		panic("bge_jref: can't find softc pointer!");
884 
885 	if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) {
886 		panic("bge_jref: asked to reference buffer "
887 		    "that we don't manage!");
888 	} else if (entry->bge_inuse == 0) {
889 		panic("bge_jref: buffer already free!");
890 	} else {
891 		atomic_add_int(&entry->bge_inuse, 1);
892 	}
893 }
894 
895 /*
896  * Release a jumbo buffer.
897  */
898 static void
899 bge_jfree(void *arg)
900 {
901 	struct bge_jslot *entry = (struct bge_jslot *)arg;
902 	struct bge_softc *sc = entry->bge_sc;
903 
904 	if (sc == NULL)
905 		panic("bge_jfree: can't find softc pointer!");
906 
907 	if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) {
908 		panic("bge_jfree: asked to free buffer that we don't manage!");
909 	} else if (entry->bge_inuse == 0) {
910 		panic("bge_jfree: buffer already free!");
911 	} else {
912 		/*
913 		 * Possible MP race to 0, use the serializer.  The atomic insn
914 		 * is still needed for races against bge_jref().
915 		 */
916 		lwkt_serialize_enter(&sc->bge_jslot_serializer);
917 		atomic_subtract_int(&entry->bge_inuse, 1);
918 		if (entry->bge_inuse == 0) {
919 			SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
920 					  entry, jslot_link);
921 		}
922 		lwkt_serialize_exit(&sc->bge_jslot_serializer);
923 	}
924 }
925 
926 
927 /*
928  * Intialize a standard receive ring descriptor.
929  */
930 static int
931 bge_newbuf_std(struct bge_softc *sc, int i, int init)
932 {
933 	struct mbuf *m_new = NULL;
934 	bus_dma_segment_t seg;
935 	bus_dmamap_t map;
936 	int error, nsegs;
937 
938 	m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
939 	if (m_new == NULL)
940 		return ENOBUFS;
941 	m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
942 
943 	if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
944 		m_adj(m_new, ETHER_ALIGN);
945 
946 	error = bus_dmamap_load_mbuf_segment(sc->bge_cdata.bge_rx_mtag,
947 			sc->bge_cdata.bge_rx_tmpmap, m_new,
948 			&seg, 1, &nsegs, BUS_DMA_NOWAIT);
949 	if (error) {
950 		m_freem(m_new);
951 		return error;
952 	}
953 
954 	if (!init) {
955 		bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
956 				sc->bge_cdata.bge_rx_std_dmamap[i],
957 				BUS_DMASYNC_POSTREAD);
958 		bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
959 			sc->bge_cdata.bge_rx_std_dmamap[i]);
960 	}
961 
962 	map = sc->bge_cdata.bge_rx_tmpmap;
963 	sc->bge_cdata.bge_rx_tmpmap = sc->bge_cdata.bge_rx_std_dmamap[i];
964 	sc->bge_cdata.bge_rx_std_dmamap[i] = map;
965 
966 	sc->bge_cdata.bge_rx_std_chain[i].bge_mbuf = m_new;
967 	sc->bge_cdata.bge_rx_std_chain[i].bge_paddr = seg.ds_addr;
968 
969 	bge_setup_rxdesc_std(sc, i);
970 	return 0;
971 }
972 
973 static void
974 bge_setup_rxdesc_std(struct bge_softc *sc, int i)
975 {
976 	struct bge_rxchain *rc;
977 	struct bge_rx_bd *r;
978 
979 	rc = &sc->bge_cdata.bge_rx_std_chain[i];
980 	r = &sc->bge_ldata.bge_rx_std_ring[i];
981 
982 	r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bge_paddr);
983 	r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bge_paddr);
984 	r->bge_len = rc->bge_mbuf->m_len;
985 	r->bge_idx = i;
986 	r->bge_flags = BGE_RXBDFLAG_END;
987 }
988 
989 /*
990  * Initialize a jumbo receive ring descriptor. This allocates
991  * a jumbo buffer from the pool managed internally by the driver.
992  */
993 static int
994 bge_newbuf_jumbo(struct bge_softc *sc, int i, int init)
995 {
996 	struct mbuf *m_new = NULL;
997 	struct bge_jslot *buf;
998 	bus_addr_t paddr;
999 
1000 	/* Allocate the mbuf. */
1001 	MGETHDR(m_new, init ? MB_WAIT : MB_DONTWAIT, MT_DATA);
1002 	if (m_new == NULL)
1003 		return ENOBUFS;
1004 
1005 	/* Allocate the jumbo buffer */
1006 	buf = bge_jalloc(sc);
1007 	if (buf == NULL) {
1008 		m_freem(m_new);
1009 		return ENOBUFS;
1010 	}
1011 
1012 	/* Attach the buffer to the mbuf. */
1013 	m_new->m_ext.ext_arg = buf;
1014 	m_new->m_ext.ext_buf = buf->bge_buf;
1015 	m_new->m_ext.ext_free = bge_jfree;
1016 	m_new->m_ext.ext_ref = bge_jref;
1017 	m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
1018 
1019 	m_new->m_flags |= M_EXT;
1020 
1021 	m_new->m_data = m_new->m_ext.ext_buf;
1022 	m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size;
1023 
1024 	paddr = buf->bge_paddr;
1025 	if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) {
1026 		m_adj(m_new, ETHER_ALIGN);
1027 		paddr += ETHER_ALIGN;
1028 	}
1029 
1030 	/* Save necessary information */
1031 	sc->bge_cdata.bge_rx_jumbo_chain[i].bge_mbuf = m_new;
1032 	sc->bge_cdata.bge_rx_jumbo_chain[i].bge_paddr = paddr;
1033 
1034 	/* Set up the descriptor. */
1035 	bge_setup_rxdesc_jumbo(sc, i);
1036 	return 0;
1037 }
1038 
1039 static void
1040 bge_setup_rxdesc_jumbo(struct bge_softc *sc, int i)
1041 {
1042 	struct bge_rx_bd *r;
1043 	struct bge_rxchain *rc;
1044 
1045 	r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
1046 	rc = &sc->bge_cdata.bge_rx_jumbo_chain[i];
1047 
1048 	r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bge_paddr);
1049 	r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bge_paddr);
1050 	r->bge_len = rc->bge_mbuf->m_len;
1051 	r->bge_idx = i;
1052 	r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
1053 }
1054 
1055 static int
1056 bge_init_rx_ring_std(struct bge_softc *sc)
1057 {
1058 	int i, error;
1059 
1060 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1061 		error = bge_newbuf_std(sc, i, 1);
1062 		if (error)
1063 			return error;
1064 	};
1065 
1066 	sc->bge_std = BGE_STD_RX_RING_CNT - 1;
1067 	bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
1068 
1069 	return(0);
1070 }
1071 
1072 static void
1073 bge_free_rx_ring_std(struct bge_softc *sc)
1074 {
1075 	int i;
1076 
1077 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1078 		struct bge_rxchain *rc = &sc->bge_cdata.bge_rx_std_chain[i];
1079 
1080 		if (rc->bge_mbuf != NULL) {
1081 			bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1082 					  sc->bge_cdata.bge_rx_std_dmamap[i]);
1083 			m_freem(rc->bge_mbuf);
1084 			rc->bge_mbuf = NULL;
1085 		}
1086 		bzero(&sc->bge_ldata.bge_rx_std_ring[i],
1087 		    sizeof(struct bge_rx_bd));
1088 	}
1089 }
1090 
1091 static int
1092 bge_init_rx_ring_jumbo(struct bge_softc *sc)
1093 {
1094 	struct bge_rcb *rcb;
1095 	int i, error;
1096 
1097 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1098 		error = bge_newbuf_jumbo(sc, i, 1);
1099 		if (error)
1100 			return error;
1101 	};
1102 
1103 	sc->bge_jumbo = BGE_JUMBO_RX_RING_CNT - 1;
1104 
1105 	rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1106 	rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
1107 	CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1108 
1109 	bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1110 
1111 	return(0);
1112 }
1113 
1114 static void
1115 bge_free_rx_ring_jumbo(struct bge_softc *sc)
1116 {
1117 	int i;
1118 
1119 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1120 		struct bge_rxchain *rc = &sc->bge_cdata.bge_rx_jumbo_chain[i];
1121 
1122 		if (rc->bge_mbuf != NULL) {
1123 			m_freem(rc->bge_mbuf);
1124 			rc->bge_mbuf = NULL;
1125 		}
1126 		bzero(&sc->bge_ldata.bge_rx_jumbo_ring[i],
1127 		    sizeof(struct bge_rx_bd));
1128 	}
1129 }
1130 
1131 static void
1132 bge_free_tx_ring(struct bge_softc *sc)
1133 {
1134 	int i;
1135 
1136 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1137 		if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1138 			bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
1139 					  sc->bge_cdata.bge_tx_dmamap[i]);
1140 			m_freem(sc->bge_cdata.bge_tx_chain[i]);
1141 			sc->bge_cdata.bge_tx_chain[i] = NULL;
1142 		}
1143 		bzero(&sc->bge_ldata.bge_tx_ring[i],
1144 		    sizeof(struct bge_tx_bd));
1145 	}
1146 }
1147 
1148 static int
1149 bge_init_tx_ring(struct bge_softc *sc)
1150 {
1151 	sc->bge_txcnt = 0;
1152 	sc->bge_tx_saved_considx = 0;
1153 	sc->bge_tx_prodidx = 0;
1154 
1155 	/* Initialize transmit producer index for host-memory send ring. */
1156 	bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1157 
1158 	/* 5700 b2 errata */
1159 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1160 		bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1161 
1162 	bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1163 	/* 5700 b2 errata */
1164 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1165 		bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1166 
1167 	return(0);
1168 }
1169 
1170 static void
1171 bge_setmulti(struct bge_softc *sc)
1172 {
1173 	struct ifnet *ifp;
1174 	struct ifmultiaddr *ifma;
1175 	uint32_t hashes[4] = { 0, 0, 0, 0 };
1176 	int h, i;
1177 
1178 	ifp = &sc->arpcom.ac_if;
1179 
1180 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1181 		for (i = 0; i < 4; i++)
1182 			CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1183 		return;
1184 	}
1185 
1186 	/* First, zot all the existing filters. */
1187 	for (i = 0; i < 4; i++)
1188 		CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1189 
1190 	/* Now program new ones. */
1191 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1192 		if (ifma->ifma_addr->sa_family != AF_LINK)
1193 			continue;
1194 		h = ether_crc32_le(
1195 		    LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1196 		    ETHER_ADDR_LEN) & 0x7f;
1197 		hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1198 	}
1199 
1200 	for (i = 0; i < 4; i++)
1201 		CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1202 }
1203 
1204 /*
1205  * Do endian, PCI and DMA initialization. Also check the on-board ROM
1206  * self-test results.
1207  */
1208 static int
1209 bge_chipinit(struct bge_softc *sc)
1210 {
1211 	int i;
1212 	uint32_t dma_rw_ctl;
1213 
1214 	/* Set endian type before we access any non-PCI registers. */
1215 	pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
1216 
1217 	/* Clear the MAC control register */
1218 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1219 
1220 	/*
1221 	 * Clear the MAC statistics block in the NIC's
1222 	 * internal memory.
1223 	 */
1224 	for (i = BGE_STATS_BLOCK;
1225 	    i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1226 		BGE_MEMWIN_WRITE(sc, i, 0);
1227 
1228 	for (i = BGE_STATUS_BLOCK;
1229 	    i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1230 		BGE_MEMWIN_WRITE(sc, i, 0);
1231 
1232 	/* Set up the PCI DMA control register. */
1233 	if (sc->bge_flags & BGE_FLAG_PCIE) {
1234 		/* PCI Express */
1235 		dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1236 		    (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1237 		    (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1238 	} else if (sc->bge_flags & BGE_FLAG_PCIX) {
1239 		/* PCI-X bus */
1240 		if (BGE_IS_5714_FAMILY(sc)) {
1241 			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD;
1242 			dma_rw_ctl &= ~BGE_PCIDMARWCTL_ONEDMA_ATONCE; /* XXX */
1243 			/* XXX magic values, Broadcom-supplied Linux driver */
1244 			if (sc->bge_asicrev == BGE_ASICREV_BCM5780) {
1245 				dma_rw_ctl |= (1 << 20) | (1 << 18) |
1246 				    BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1247 			} else {
1248 				dma_rw_ctl |= (1 << 20) | (1 << 18) | (1 << 15);
1249 			}
1250 		} else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1251 			/*
1252 			 * The 5704 uses a different encoding of read/write
1253 			 * watermarks.
1254 			 */
1255 			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1256 			    (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1257 			    (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1258 		} else {
1259 			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1260 			    (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1261 			    (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1262 			    (0x0F);
1263 		}
1264 
1265 		/*
1266 		 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1267 		 * for hardware bugs.
1268 		 */
1269 		if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1270 		    sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1271 			uint32_t tmp;
1272 
1273 			tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1274 			if (tmp == 0x6 || tmp == 0x7)
1275 				dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1276 		}
1277 	} else {
1278 		/* Conventional PCI bus */
1279 		dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1280 		    (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1281 		    (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1282 		    (0x0F);
1283 	}
1284 
1285 	if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1286 	    sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1287 	    sc->bge_asicrev == BGE_ASICREV_BCM5705)
1288 		dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1289 	pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1290 
1291 	/*
1292 	 * Set up general mode register.
1293 	 */
1294 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1295 	    BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1296 	    BGE_MODECTL_TX_NO_PHDR_CSUM);
1297 
1298 	/*
1299 	 * Disable memory write invalidate.  Apparently it is not supported
1300 	 * properly by these devices.
1301 	 */
1302 	PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1303 
1304 	/* Set the timer prescaler (always 66Mhz) */
1305 	CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1306 
1307 	if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1308 		DELAY(40);	/* XXX */
1309 
1310 		/* Put PHY into ready state */
1311 		BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1312 		CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1313 		DELAY(40);
1314 	}
1315 
1316 	return(0);
1317 }
1318 
1319 static int
1320 bge_blockinit(struct bge_softc *sc)
1321 {
1322 	struct bge_rcb *rcb;
1323 	bus_size_t vrcb;
1324 	bge_hostaddr taddr;
1325 	uint32_t val;
1326 	int i;
1327 
1328 	/*
1329 	 * Initialize the memory window pointer register so that
1330 	 * we can access the first 32K of internal NIC RAM. This will
1331 	 * allow us to set up the TX send ring RCBs and the RX return
1332 	 * ring RCBs, plus other things which live in NIC memory.
1333 	 */
1334 	CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1335 
1336 	/* Note: the BCM5704 has a smaller mbuf space than other chips. */
1337 
1338 	if (!BGE_IS_5705_PLUS(sc)) {
1339 		/* Configure mbuf memory pool */
1340 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1341 		if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1342 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1343 		else
1344 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1345 
1346 		/* Configure DMA resource pool */
1347 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1348 		    BGE_DMA_DESCRIPTORS);
1349 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1350 	}
1351 
1352 	/* Configure mbuf pool watermarks */
1353 	if (!BGE_IS_5705_PLUS(sc)) {
1354 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1355 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1356 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1357 	} else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1358 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1359 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1360 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1361 	} else {
1362 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1363 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1364 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1365 	}
1366 
1367 	/* Configure DMA resource watermarks */
1368 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1369 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1370 
1371 	/* Enable buffer manager */
1372 	if (!BGE_IS_5705_PLUS(sc)) {
1373 		CSR_WRITE_4(sc, BGE_BMAN_MODE,
1374 		    BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1375 
1376 		/* Poll for buffer manager start indication */
1377 		for (i = 0; i < BGE_TIMEOUT; i++) {
1378 			if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1379 				break;
1380 			DELAY(10);
1381 		}
1382 
1383 		if (i == BGE_TIMEOUT) {
1384 			if_printf(&sc->arpcom.ac_if,
1385 				  "buffer manager failed to start\n");
1386 			return(ENXIO);
1387 		}
1388 	}
1389 
1390 	/* Enable flow-through queues */
1391 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1392 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1393 
1394 	/* Wait until queue initialization is complete */
1395 	for (i = 0; i < BGE_TIMEOUT; i++) {
1396 		if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1397 			break;
1398 		DELAY(10);
1399 	}
1400 
1401 	if (i == BGE_TIMEOUT) {
1402 		if_printf(&sc->arpcom.ac_if,
1403 			  "flow-through queue init failed\n");
1404 		return(ENXIO);
1405 	}
1406 
1407 	/* Initialize the standard RX ring control block */
1408 	rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1409 	rcb->bge_hostaddr.bge_addr_lo =
1410 	    BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1411 	rcb->bge_hostaddr.bge_addr_hi =
1412 	    BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1413 	if (BGE_IS_5705_PLUS(sc))
1414 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1415 	else
1416 		rcb->bge_maxlen_flags =
1417 		    BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1418 	rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1419 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1420 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1421 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1422 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1423 
1424 	/*
1425 	 * Initialize the jumbo RX ring control block
1426 	 * We set the 'ring disabled' bit in the flags
1427 	 * field until we're actually ready to start
1428 	 * using this ring (i.e. once we set the MTU
1429 	 * high enough to require it).
1430 	 */
1431 	if (BGE_IS_JUMBO_CAPABLE(sc)) {
1432 		rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1433 
1434 		rcb->bge_hostaddr.bge_addr_lo =
1435 		    BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1436 		rcb->bge_hostaddr.bge_addr_hi =
1437 		    BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1438 		rcb->bge_maxlen_flags =
1439 		    BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
1440 		    BGE_RCB_FLAG_RING_DISABLED);
1441 		rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1442 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1443 		    rcb->bge_hostaddr.bge_addr_hi);
1444 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1445 		    rcb->bge_hostaddr.bge_addr_lo);
1446 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1447 		    rcb->bge_maxlen_flags);
1448 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1449 
1450 		/* Set up dummy disabled mini ring RCB */
1451 		rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1452 		rcb->bge_maxlen_flags =
1453 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1454 		CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1455 		    rcb->bge_maxlen_flags);
1456 	}
1457 
1458 	/*
1459 	 * Set the BD ring replentish thresholds. The recommended
1460 	 * values are 1/8th the number of descriptors allocated to
1461 	 * each ring.
1462 	 */
1463 	if (BGE_IS_5705_PLUS(sc))
1464 		val = 8;
1465 	else
1466 		val = BGE_STD_RX_RING_CNT / 8;
1467 	CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1468 	CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1469 
1470 	/*
1471 	 * Disable all unused send rings by setting the 'ring disabled'
1472 	 * bit in the flags field of all the TX send ring control blocks.
1473 	 * These are located in NIC memory.
1474 	 */
1475 	vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1476 	for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1477 		RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1478 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1479 		RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1480 		vrcb += sizeof(struct bge_rcb);
1481 	}
1482 
1483 	/* Configure TX RCB 0 (we use only the first ring) */
1484 	vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1485 	BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1486 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1487 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1488 	RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1489 	    BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1490 	if (!BGE_IS_5705_PLUS(sc)) {
1491 		RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1492 		    BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1493 	}
1494 
1495 	/* Disable all unused RX return rings */
1496 	vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1497 	for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1498 		RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1499 		RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1500 		RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1501 		    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1502 		    BGE_RCB_FLAG_RING_DISABLED));
1503 		RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1504 		bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1505 		    (i * (sizeof(uint64_t))), 0);
1506 		vrcb += sizeof(struct bge_rcb);
1507 	}
1508 
1509 	/* Initialize RX ring indexes */
1510 	bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1511 	bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1512 	bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1513 
1514 	/*
1515 	 * Set up RX return ring 0
1516 	 * Note that the NIC address for RX return rings is 0x00000000.
1517 	 * The return rings live entirely within the host, so the
1518 	 * nicaddr field in the RCB isn't used.
1519 	 */
1520 	vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1521 	BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1522 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1523 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1524 	RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000);
1525 	RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1526 	    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1527 
1528 	/* Set random backoff seed for TX */
1529 	CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1530 	    sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1531 	    sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1532 	    sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1533 	    BGE_TX_BACKOFF_SEED_MASK);
1534 
1535 	/* Set inter-packet gap */
1536 	CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1537 
1538 	/*
1539 	 * Specify which ring to use for packets that don't match
1540 	 * any RX rules.
1541 	 */
1542 	CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1543 
1544 	/*
1545 	 * Configure number of RX lists. One interrupt distribution
1546 	 * list, sixteen active lists, one bad frames class.
1547 	 */
1548 	CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1549 
1550 	/* Inialize RX list placement stats mask. */
1551 	CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1552 	CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1553 
1554 	/* Disable host coalescing until we get it set up */
1555 	CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1556 
1557 	/* Poll to make sure it's shut down. */
1558 	for (i = 0; i < BGE_TIMEOUT; i++) {
1559 		if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1560 			break;
1561 		DELAY(10);
1562 	}
1563 
1564 	if (i == BGE_TIMEOUT) {
1565 		if_printf(&sc->arpcom.ac_if,
1566 			  "host coalescing engine failed to idle\n");
1567 		return(ENXIO);
1568 	}
1569 
1570 	/* Set up host coalescing defaults */
1571 	CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1572 	CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1573 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1574 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1575 	if (!BGE_IS_5705_PLUS(sc)) {
1576 		CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1577 		CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1578 	}
1579 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
1580 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
1581 
1582 	/* Set up address of statistics block */
1583 	if (!BGE_IS_5705_PLUS(sc)) {
1584 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1585 		    BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1586 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1587 		    BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1588 
1589 		CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1590 		CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1591 		CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1592 	}
1593 
1594 	/* Set up address of status block */
1595 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1596 	    BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1597 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1598 	    BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1599 	sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1600 	sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
1601 
1602 	/* Turn on host coalescing state machine */
1603 	CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1604 
1605 	/* Turn on RX BD completion state machine and enable attentions */
1606 	CSR_WRITE_4(sc, BGE_RBDC_MODE,
1607 	    BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1608 
1609 	/* Turn on RX list placement state machine */
1610 	CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1611 
1612 	/* Turn on RX list selector state machine. */
1613 	if (!BGE_IS_5705_PLUS(sc))
1614 		CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1615 
1616 	/* Turn on DMA, clear stats */
1617 	CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1618 	    BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1619 	    BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1620 	    BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1621 	    ((sc->bge_flags & BGE_FLAG_TBI) ?
1622 	     BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1623 
1624 	/* Set misc. local control, enable interrupts on attentions */
1625 	CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1626 
1627 #ifdef notdef
1628 	/* Assert GPIO pins for PHY reset */
1629 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1630 	    BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1631 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1632 	    BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1633 #endif
1634 
1635 	/* Turn on DMA completion state machine */
1636 	if (!BGE_IS_5705_PLUS(sc))
1637 		CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1638 
1639 	/* Turn on write DMA state machine */
1640 	val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS;
1641 	if (BGE_IS_5755_PLUS(sc))
1642 		val |= (1 << 29);	/* Enable host coalescing bug fix. */
1643 	CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1644 	DELAY(40);
1645 
1646 	/* Turn on read DMA state machine */
1647 	val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1648         if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
1649             sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
1650             sc->bge_asicrev == BGE_ASICREV_BCM57780)
1651 		val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
1652                   BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
1653                   BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
1654 	if (sc->bge_flags & BGE_FLAG_PCIE)
1655 		val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1656 	CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
1657 	DELAY(40);
1658 
1659 	/* Turn on RX data completion state machine */
1660 	CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1661 
1662 	/* Turn on RX BD initiator state machine */
1663 	CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1664 
1665 	/* Turn on RX data and RX BD initiator state machine */
1666 	CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1667 
1668 	/* Turn on Mbuf cluster free state machine */
1669 	if (!BGE_IS_5705_PLUS(sc))
1670 		CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1671 
1672 	/* Turn on send BD completion state machine */
1673 	CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1674 
1675 	/* Turn on send data completion state machine */
1676 	val = BGE_SDCMODE_ENABLE;
1677 	if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
1678 		val |= BGE_SDCMODE_CDELAY;
1679 	CSR_WRITE_4(sc, BGE_SDC_MODE, val);
1680 
1681 	/* Turn on send data initiator state machine */
1682 	CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1683 
1684 	/* Turn on send BD initiator state machine */
1685 	CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1686 
1687 	/* Turn on send BD selector state machine */
1688 	CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1689 
1690 	CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1691 	CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1692 	    BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1693 
1694 	/* ack/clear link change events */
1695 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1696 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1697 	    BGE_MACSTAT_LINK_CHANGED);
1698 	CSR_WRITE_4(sc, BGE_MI_STS, 0);
1699 
1700 	/* Enable PHY auto polling (for MII/GMII only) */
1701 	if (sc->bge_flags & BGE_FLAG_TBI) {
1702 		CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1703  	} else {
1704 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1705 		if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1706 		    sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
1707 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1708 			    BGE_EVTENB_MI_INTERRUPT);
1709 		}
1710 	}
1711 
1712 	/*
1713 	 * Clear any pending link state attention.
1714 	 * Otherwise some link state change events may be lost until attention
1715 	 * is cleared by bge_intr() -> bge_softc.bge_link_upd() sequence.
1716 	 * It's not necessary on newer BCM chips - perhaps enabling link
1717 	 * state change attentions implies clearing pending attention.
1718 	 */
1719 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1720 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1721 	    BGE_MACSTAT_LINK_CHANGED);
1722 
1723 	/* Enable link state change attentions. */
1724 	BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1725 
1726 	return(0);
1727 }
1728 
1729 /*
1730  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1731  * against our list and return its name if we find a match. Note
1732  * that since the Broadcom controller contains VPD support, we
1733  * can get the device name string from the controller itself instead
1734  * of the compiled-in string. This is a little slow, but it guarantees
1735  * we'll always announce the right product name.
1736  */
1737 static int
1738 bge_probe(device_t dev)
1739 {
1740 	const struct bge_type *t;
1741 	uint16_t product, vendor;
1742 
1743 	product = pci_get_device(dev);
1744 	vendor = pci_get_vendor(dev);
1745 
1746 	for (t = bge_devs; t->bge_name != NULL; t++) {
1747 		if (vendor == t->bge_vid && product == t->bge_did)
1748 			break;
1749 	}
1750 	if (t->bge_name == NULL)
1751 		return(ENXIO);
1752 
1753 	device_set_desc(dev, t->bge_name);
1754 	if (pci_get_subvendor(dev) == PCI_VENDOR_DELL) {
1755 		struct bge_softc *sc = device_get_softc(dev);
1756 		sc->bge_flags |= BGE_FLAG_NO_3LED;
1757 	}
1758 	return(0);
1759 }
1760 
1761 static int
1762 bge_attach(device_t dev)
1763 {
1764 	struct ifnet *ifp;
1765 	struct bge_softc *sc;
1766 	uint32_t hwcfg = 0;
1767 	int error = 0, rid;
1768 	uint8_t ether_addr[ETHER_ADDR_LEN];
1769 
1770 	sc = device_get_softc(dev);
1771 	sc->bge_dev = dev;
1772 	callout_init(&sc->bge_stat_timer);
1773 	lwkt_serialize_init(&sc->bge_jslot_serializer);
1774 
1775 #ifndef BURN_BRIDGES
1776 	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
1777 		uint32_t irq, mem;
1778 
1779 		irq = pci_read_config(dev, PCIR_INTLINE, 4);
1780 		mem = pci_read_config(dev, BGE_PCI_BAR0, 4);
1781 
1782 		device_printf(dev, "chip is in D%d power mode "
1783 		    "-- setting to D0\n", pci_get_powerstate(dev));
1784 
1785 		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
1786 
1787 		pci_write_config(dev, PCIR_INTLINE, irq, 4);
1788 		pci_write_config(dev, BGE_PCI_BAR0, mem, 4);
1789 	}
1790 #endif	/* !BURN_BRIDGE */
1791 
1792 	/*
1793 	 * Map control/status registers.
1794 	 */
1795 	pci_enable_busmaster(dev);
1796 
1797 	rid = BGE_PCI_BAR0;
1798 	sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1799 	    RF_ACTIVE);
1800 
1801 	if (sc->bge_res == NULL) {
1802 		device_printf(dev, "couldn't map memory\n");
1803 		return ENXIO;
1804 	}
1805 
1806 	sc->bge_btag = rman_get_bustag(sc->bge_res);
1807 	sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
1808 
1809 	/* Save various chip information */
1810 	sc->bge_chipid =
1811 	    pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
1812 	    BGE_PCIMISCCTL_ASICREV_SHIFT;
1813         if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG)
1814 		sc->bge_chipid = pci_read_config(dev, BGE_PCI_PRODID_ASICREV, 4);
1815 	sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
1816 	sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
1817 
1818 	/* Save chipset family. */
1819 	switch (sc->bge_asicrev) {
1820 	case BGE_ASICREV_BCM5755:
1821 	case BGE_ASICREV_BCM5761:
1822 	case BGE_ASICREV_BCM5784:
1823 	case BGE_ASICREV_BCM5785:
1824 	case BGE_ASICREV_BCM5787:
1825 	case BGE_ASICREV_BCM57780:
1826 	    sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS |
1827 		BGE_FLAG_5705_PLUS;
1828 	    break;
1829 
1830 	case BGE_ASICREV_BCM5700:
1831 	case BGE_ASICREV_BCM5701:
1832 	case BGE_ASICREV_BCM5703:
1833 	case BGE_ASICREV_BCM5704:
1834 		sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
1835 		break;
1836 
1837 	case BGE_ASICREV_BCM5714_A0:
1838 	case BGE_ASICREV_BCM5780:
1839 	case BGE_ASICREV_BCM5714:
1840 		sc->bge_flags |= BGE_FLAG_5714_FAMILY;
1841 		/* Fall through */
1842 
1843 	case BGE_ASICREV_BCM5750:
1844 	case BGE_ASICREV_BCM5752:
1845 	case BGE_ASICREV_BCM5906:
1846 		sc->bge_flags |= BGE_FLAG_575X_PLUS;
1847 		/* Fall through */
1848 
1849 	case BGE_ASICREV_BCM5705:
1850 		sc->bge_flags |= BGE_FLAG_5705_PLUS;
1851 		break;
1852 	}
1853 
1854 	if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
1855 		sc->bge_flags |= BGE_FLAG_NO_EEPROM;
1856 
1857 	/*
1858 	 * Set various quirk flags.
1859 	 */
1860 
1861 	sc->bge_flags |= BGE_FLAG_ETH_WIRESPEED;
1862 	if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1863 	    (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
1864 	     (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
1865 	      sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) ||
1866 	    sc->bge_asicrev == BGE_ASICREV_BCM5906)
1867 		sc->bge_flags &= ~BGE_FLAG_ETH_WIRESPEED;
1868 
1869 	if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
1870 	    sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
1871 		sc->bge_flags |= BGE_FLAG_CRC_BUG;
1872 
1873 	if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
1874 	    sc->bge_chiprev == BGE_CHIPREV_5704_AX)
1875 		sc->bge_flags |= BGE_FLAG_ADC_BUG;
1876 
1877 	if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
1878 		sc->bge_flags |= BGE_FLAG_5704_A0_BUG;
1879 
1880 	if (BGE_IS_5705_PLUS(sc) &&
1881 		!(sc->bge_flags & BGE_FLAG_ADJUST_TRIM)) {
1882 		if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
1883 		    sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
1884 		    sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
1885 		    sc->bge_asicrev == BGE_ASICREV_BCM5787) {
1886 		    	if (sc->bge_chipid != BGE_CHIPID_BCM5722_A0)
1887 			    sc->bge_flags |= BGE_FLAG_JITTER_BUG;
1888 		} else if (sc->bge_asicrev != BGE_ASICREV_BCM5906) {
1889 			sc->bge_flags |= BGE_FLAG_BER_BUG;
1890 		}
1891 	}
1892 
1893 	/* Allocate interrupt */
1894 	rid = 0;
1895 
1896 	sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1897 	    RF_SHAREABLE | RF_ACTIVE);
1898 
1899 	if (sc->bge_irq == NULL) {
1900 		device_printf(dev, "couldn't map interrupt\n");
1901 		error = ENXIO;
1902 		goto fail;
1903 	}
1904 
1905   	/*
1906 	 * Check if this is a PCI-X or PCI Express device.
1907   	 */
1908 	if (BGE_IS_5705_PLUS(sc)) {
1909 		if (pci_is_pcie(dev)) {
1910 			sc->bge_flags |= BGE_FLAG_PCIE;
1911 			pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_4096);
1912 		}
1913 	} else {
1914 		/*
1915 		 * Check if the device is in PCI-X Mode.
1916 		 * (This bit is not valid on PCI Express controllers.)
1917 		 */
1918 		if ((pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
1919 		    BGE_PCISTATE_PCI_BUSMODE) == 0)
1920 			sc->bge_flags |= BGE_FLAG_PCIX;
1921  	}
1922 
1923 	device_printf(dev, "CHIP ID 0x%08x; "
1924 		      "ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n",
1925 		      sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev,
1926 		      (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X"
1927 		      : ((sc->bge_flags & BGE_FLAG_PCIE) ?
1928 			"PCI-E" : "PCI"));
1929 
1930 	ifp = &sc->arpcom.ac_if;
1931 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1932 
1933 	/* Try to reset the chip. */
1934 	bge_reset(sc);
1935 
1936 	if (bge_chipinit(sc)) {
1937 		device_printf(dev, "chip initialization failed\n");
1938 		error = ENXIO;
1939 		goto fail;
1940 	}
1941 
1942 	/*
1943 	 * Get station address
1944 	 */
1945 	error = bge_get_eaddr(sc, ether_addr);
1946 	if (error) {
1947 		device_printf(dev, "failed to read station address\n");
1948 		goto fail;
1949 	}
1950 
1951 	/* 5705/5750 limits RX return ring to 512 entries. */
1952 	if (BGE_IS_5705_PLUS(sc))
1953 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
1954 	else
1955 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
1956 
1957 	error = bge_dma_alloc(sc);
1958 	if (error)
1959 		goto fail;
1960 
1961 	/* Set default tuneable values. */
1962 	sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
1963 	sc->bge_rx_coal_ticks = bge_rx_coal_ticks;
1964 	sc->bge_tx_coal_ticks = bge_tx_coal_ticks;
1965 	sc->bge_rx_max_coal_bds = bge_rx_max_coal_bds;
1966 	sc->bge_tx_max_coal_bds = bge_tx_max_coal_bds;
1967 
1968 	/* Set up ifnet structure */
1969 	ifp->if_softc = sc;
1970 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1971 	ifp->if_ioctl = bge_ioctl;
1972 	ifp->if_start = bge_start;
1973 #ifdef DEVICE_POLLING
1974 	ifp->if_poll = bge_poll;
1975 #endif
1976 	ifp->if_watchdog = bge_watchdog;
1977 	ifp->if_init = bge_init;
1978 	ifp->if_mtu = ETHERMTU;
1979 	ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1980 	ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
1981 	ifq_set_ready(&ifp->if_snd);
1982 
1983 	/*
1984 	 * 5700 B0 chips do not support checksumming correctly due
1985 	 * to hardware bugs.
1986 	 */
1987 	if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0) {
1988 		ifp->if_capabilities |= IFCAP_HWCSUM;
1989 		ifp->if_hwassist = BGE_CSUM_FEATURES;
1990 	}
1991 	ifp->if_capenable = ifp->if_capabilities;
1992 
1993 	/*
1994 	 * Figure out what sort of media we have by checking the
1995 	 * hardware config word in the first 32k of NIC internal memory,
1996 	 * or fall back to examining the EEPROM if necessary.
1997 	 * Note: on some BCM5700 cards, this value appears to be unset.
1998 	 * If that's the case, we have to rely on identifying the NIC
1999 	 * by its PCI subsystem ID, as we do below for the SysKonnect
2000 	 * SK-9D41.
2001 	 */
2002 	if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2003 		hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2004 	else {
2005 		if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2006 				    sizeof(hwcfg))) {
2007 			device_printf(dev, "failed to read EEPROM\n");
2008 			error = ENXIO;
2009 			goto fail;
2010 		}
2011 		hwcfg = ntohl(hwcfg);
2012 	}
2013 
2014 	if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2015 		sc->bge_flags |= BGE_FLAG_TBI;
2016 
2017 	/* The SysKonnect SK-9D41 is a 1000baseSX card. */
2018 	if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41)
2019 		sc->bge_flags |= BGE_FLAG_TBI;
2020 
2021 	if (sc->bge_flags & BGE_FLAG_TBI) {
2022 		ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
2023 		    bge_ifmedia_upd, bge_ifmedia_sts);
2024 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2025 		ifmedia_add(&sc->bge_ifmedia,
2026 		    IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2027 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2028 		ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
2029 		sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2030 	} else {
2031 		/*
2032 		 * Do transceiver setup.
2033 		 */
2034 		if (mii_phy_probe(dev, &sc->bge_miibus,
2035 		    bge_ifmedia_upd, bge_ifmedia_sts)) {
2036 			device_printf(dev, "MII without any PHY!\n");
2037 			error = ENXIO;
2038 			goto fail;
2039 		}
2040 	}
2041 
2042 	/*
2043 	 * When using the BCM5701 in PCI-X mode, data corruption has
2044 	 * been observed in the first few bytes of some received packets.
2045 	 * Aligning the packet buffer in memory eliminates the corruption.
2046 	 * Unfortunately, this misaligns the packet payloads.  On platforms
2047 	 * which do not support unaligned accesses, we will realign the
2048 	 * payloads by copying the received packets.
2049 	 */
2050 	if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
2051 	    (sc->bge_flags & BGE_FLAG_PCIX))
2052 		sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
2053 
2054 	if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2055 	    sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
2056 		sc->bge_link_upd = bge_bcm5700_link_upd;
2057 		sc->bge_link_chg = BGE_MACSTAT_MI_INTERRUPT;
2058 	} else if (sc->bge_flags & BGE_FLAG_TBI) {
2059 		sc->bge_link_upd = bge_tbi_link_upd;
2060 		sc->bge_link_chg = BGE_MACSTAT_LINK_CHANGED;
2061 	} else {
2062 		sc->bge_link_upd = bge_copper_link_upd;
2063 		sc->bge_link_chg = BGE_MACSTAT_LINK_CHANGED;
2064 	}
2065 
2066 	/*
2067 	 * Create sysctl nodes.
2068 	 */
2069 	sysctl_ctx_init(&sc->bge_sysctl_ctx);
2070 	sc->bge_sysctl_tree = SYSCTL_ADD_NODE(&sc->bge_sysctl_ctx,
2071 					      SYSCTL_STATIC_CHILDREN(_hw),
2072 					      OID_AUTO,
2073 					      device_get_nameunit(dev),
2074 					      CTLFLAG_RD, 0, "");
2075 	if (sc->bge_sysctl_tree == NULL) {
2076 		device_printf(dev, "can't add sysctl node\n");
2077 		error = ENXIO;
2078 		goto fail;
2079 	}
2080 
2081 	SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
2082 			SYSCTL_CHILDREN(sc->bge_sysctl_tree),
2083 			OID_AUTO, "rx_coal_ticks",
2084 			CTLTYPE_INT | CTLFLAG_RW,
2085 			sc, 0, bge_sysctl_rx_coal_ticks, "I",
2086 			"Receive coalescing ticks (usec).");
2087 	SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
2088 			SYSCTL_CHILDREN(sc->bge_sysctl_tree),
2089 			OID_AUTO, "tx_coal_ticks",
2090 			CTLTYPE_INT | CTLFLAG_RW,
2091 			sc, 0, bge_sysctl_tx_coal_ticks, "I",
2092 			"Transmit coalescing ticks (usec).");
2093 	SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
2094 			SYSCTL_CHILDREN(sc->bge_sysctl_tree),
2095 			OID_AUTO, "rx_max_coal_bds",
2096 			CTLTYPE_INT | CTLFLAG_RW,
2097 			sc, 0, bge_sysctl_rx_max_coal_bds, "I",
2098 			"Receive max coalesced BD count.");
2099 	SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
2100 			SYSCTL_CHILDREN(sc->bge_sysctl_tree),
2101 			OID_AUTO, "tx_max_coal_bds",
2102 			CTLTYPE_INT | CTLFLAG_RW,
2103 			sc, 0, bge_sysctl_tx_max_coal_bds, "I",
2104 			"Transmit max coalesced BD count.");
2105 
2106 	/*
2107 	 * Call MI attach routine.
2108 	 */
2109 	ether_ifattach(ifp, ether_addr, NULL);
2110 
2111 	error = bus_setup_intr(dev, sc->bge_irq, INTR_MPSAFE,
2112 			       bge_intr, sc, &sc->bge_intrhand,
2113 			       ifp->if_serializer);
2114 	if (error) {
2115 		ether_ifdetach(ifp);
2116 		device_printf(dev, "couldn't set up irq\n");
2117 		goto fail;
2118 	}
2119 
2120 	ifp->if_cpuid = rman_get_cpuid(sc->bge_irq);
2121 	KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
2122 
2123 	return(0);
2124 fail:
2125 	bge_detach(dev);
2126 	return(error);
2127 }
2128 
2129 static int
2130 bge_detach(device_t dev)
2131 {
2132 	struct bge_softc *sc = device_get_softc(dev);
2133 
2134 	if (device_is_attached(dev)) {
2135 		struct ifnet *ifp = &sc->arpcom.ac_if;
2136 
2137 		lwkt_serialize_enter(ifp->if_serializer);
2138 		bge_stop(sc);
2139 		bge_reset(sc);
2140 		bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
2141 		lwkt_serialize_exit(ifp->if_serializer);
2142 
2143 		ether_ifdetach(ifp);
2144 	}
2145 
2146 	if (sc->bge_flags & BGE_FLAG_TBI)
2147 		ifmedia_removeall(&sc->bge_ifmedia);
2148 	if (sc->bge_miibus)
2149 		device_delete_child(dev, sc->bge_miibus);
2150 	bus_generic_detach(dev);
2151 
2152         if (sc->bge_irq != NULL)
2153 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
2154 
2155         if (sc->bge_res != NULL)
2156 		bus_release_resource(dev, SYS_RES_MEMORY,
2157 		    BGE_PCI_BAR0, sc->bge_res);
2158 
2159 	if (sc->bge_sysctl_tree != NULL)
2160 		sysctl_ctx_free(&sc->bge_sysctl_ctx);
2161 
2162 	bge_dma_free(sc);
2163 
2164 	return 0;
2165 }
2166 
2167 static void
2168 bge_reset(struct bge_softc *sc)
2169 {
2170 	device_t dev;
2171 	uint32_t cachesize, command, pcistate, reset;
2172 	void (*write_op)(struct bge_softc *, uint32_t, uint32_t);
2173 	int i, val = 0;
2174 
2175 	dev = sc->bge_dev;
2176 
2177 	if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
2178 	    sc->bge_asicrev != BGE_ASICREV_BCM5906) {
2179 		if (sc->bge_flags & BGE_FLAG_PCIE)
2180 			write_op = bge_writemem_direct;
2181 		else
2182 			write_op = bge_writemem_ind;
2183 	} else {
2184 		write_op = bge_writereg_ind;
2185 	}
2186 
2187 	/* Save some important PCI state. */
2188 	cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2189 	command = pci_read_config(dev, BGE_PCI_CMD, 4);
2190 	pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2191 
2192 	pci_write_config(dev, BGE_PCI_MISC_CTL,
2193 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2194 	    BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2195 
2196 	/* Disable fastboot on controllers that support it. */
2197 	if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
2198 	    sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2199 	    sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2200 		if (bootverbose)
2201 			if_printf(&sc->arpcom.ac_if, "Disabling fastboot\n");
2202 		CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
2203 	}
2204 
2205 	/*
2206 	 * Write the magic number to SRAM at offset 0xB50.
2207 	 * When firmware finishes its initialization it will
2208 	 * write ~BGE_MAGIC_NUMBER to the same location.
2209 	 */
2210 	bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2211 
2212 	reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2213 
2214 	/* XXX: Broadcom Linux driver. */
2215 	if (sc->bge_flags & BGE_FLAG_PCIE) {
2216 		if (CSR_READ_4(sc, 0x7e2c) == 0x60)	/* PCIE 1.0 */
2217 			CSR_WRITE_4(sc, 0x7e2c, 0x20);
2218 		if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2219 			/* Prevent PCIE link training during global reset */
2220 			CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2221 			reset |= (1<<29);
2222 		}
2223 	}
2224 
2225 	/*
2226 	 * Set GPHY Power Down Override to leave GPHY
2227 	 * powered up in D0 uninitialized.
2228 	 */
2229 	if (BGE_IS_5705_PLUS(sc))
2230 		reset |= 0x04000000;
2231 
2232 	/* Issue global reset */
2233 	write_op(sc, BGE_MISC_CFG, reset);
2234 
2235 	if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2236 		uint32_t status, ctrl;
2237 
2238 		status = CSR_READ_4(sc, BGE_VCPU_STATUS);
2239 		CSR_WRITE_4(sc, BGE_VCPU_STATUS,
2240 		    status | BGE_VCPU_STATUS_DRV_RESET);
2241 		ctrl = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
2242 		CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
2243 		    ctrl & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
2244 	}
2245 
2246 	DELAY(1000);
2247 
2248 	/* XXX: Broadcom Linux driver. */
2249 	if (sc->bge_flags & BGE_FLAG_PCIE) {
2250 		if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2251 			uint32_t v;
2252 
2253 			DELAY(500000); /* wait for link training to complete */
2254 			v = pci_read_config(dev, 0xc4, 4);
2255 			pci_write_config(dev, 0xc4, v | (1<<15), 4);
2256 		}
2257 		/*
2258 		 * Set PCIE max payload size to 128 bytes and
2259 		 * clear error status.
2260 		 */
2261 		pci_write_config(dev, 0xd8, 0xf5000, 4);
2262 	}
2263 
2264 	/* Reset some of the PCI state that got zapped by reset */
2265 	pci_write_config(dev, BGE_PCI_MISC_CTL,
2266 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2267 	    BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2268 	pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2269 	pci_write_config(dev, BGE_PCI_CMD, command, 4);
2270 	write_op(sc, BGE_MISC_CFG, (65 << 1));
2271 
2272 	/* Enable memory arbiter. */
2273 	if (BGE_IS_5714_FAMILY(sc)) {
2274 		uint32_t val;
2275 
2276 		val = CSR_READ_4(sc, BGE_MARB_MODE);
2277 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
2278 	} else {
2279 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2280 	}
2281 
2282 	if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2283 		for (i = 0; i < BGE_TIMEOUT; i++) {
2284 			val = CSR_READ_4(sc, BGE_VCPU_STATUS);
2285 			if (val & BGE_VCPU_STATUS_INIT_DONE)
2286 				break;
2287 			DELAY(100);
2288 		}
2289 		if (i == BGE_TIMEOUT) {
2290 			if_printf(&sc->arpcom.ac_if, "reset timed out\n");
2291 			return;
2292 		}
2293 	} else {
2294 		/*
2295 		 * Poll until we see the 1's complement of the magic number.
2296 		 * This indicates that the firmware initialization
2297 		 * is complete.
2298 		 */
2299 		for (i = 0; i < BGE_FIRMWARE_TIMEOUT; i++) {
2300 			val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2301 			if (val == ~BGE_MAGIC_NUMBER)
2302 				break;
2303 			DELAY(10);
2304 		}
2305 		if (i == BGE_FIRMWARE_TIMEOUT) {
2306 			if_printf(&sc->arpcom.ac_if, "firmware handshake "
2307 				  "timed out, found 0x%08x\n", val);
2308 			return;
2309 		}
2310 	}
2311 
2312 	/*
2313 	 * XXX Wait for the value of the PCISTATE register to
2314 	 * return to its original pre-reset state. This is a
2315 	 * fairly good indicator of reset completion. If we don't
2316 	 * wait for the reset to fully complete, trying to read
2317 	 * from the device's non-PCI registers may yield garbage
2318 	 * results.
2319 	 */
2320 	for (i = 0; i < BGE_TIMEOUT; i++) {
2321 		if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2322 			break;
2323 		DELAY(10);
2324 	}
2325 
2326 	if (sc->bge_flags & BGE_FLAG_PCIE) {
2327 		reset = bge_readmem_ind(sc, 0x7c00);
2328 		bge_writemem_ind(sc, 0x7c00, reset | (1 << 25));
2329 	}
2330 
2331 	/* Fix up byte swapping */
2332 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
2333 	    BGE_MODECTL_BYTESWAP_DATA);
2334 
2335 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2336 
2337 	/*
2338 	 * The 5704 in TBI mode apparently needs some special
2339 	 * adjustment to insure the SERDES drive level is set
2340 	 * to 1.2V.
2341 	 */
2342 	if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
2343 	    (sc->bge_flags & BGE_FLAG_TBI)) {
2344 		uint32_t serdescfg;
2345 
2346 		serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2347 		serdescfg = (serdescfg & ~0xFFF) | 0x880;
2348 		CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2349 	}
2350 
2351 	/* XXX: Broadcom Linux driver. */
2352 	if ((sc->bge_flags & BGE_FLAG_PCIE) &&
2353 	    sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2354 		uint32_t v;
2355 
2356 		v = CSR_READ_4(sc, 0x7c00);
2357 		CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
2358 	}
2359 
2360 	DELAY(10000);
2361 }
2362 
2363 /*
2364  * Frame reception handling. This is called if there's a frame
2365  * on the receive return list.
2366  *
2367  * Note: we have to be able to handle two possibilities here:
2368  * 1) the frame is from the jumbo recieve ring
2369  * 2) the frame is from the standard receive ring
2370  */
2371 
2372 static void
2373 bge_rxeof(struct bge_softc *sc)
2374 {
2375 	struct ifnet *ifp;
2376 	int stdcnt = 0, jumbocnt = 0;
2377 
2378 	if (sc->bge_rx_saved_considx ==
2379 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx)
2380 		return;
2381 
2382 	ifp = &sc->arpcom.ac_if;
2383 
2384 	while (sc->bge_rx_saved_considx !=
2385 	       sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
2386 		struct bge_rx_bd	*cur_rx;
2387 		uint32_t		rxidx;
2388 		struct mbuf		*m = NULL;
2389 		uint16_t		vlan_tag = 0;
2390 		int			have_tag = 0;
2391 
2392 		cur_rx =
2393 	    &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
2394 
2395 		rxidx = cur_rx->bge_idx;
2396 		BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2397 		logif(rx_pkt);
2398 
2399 		if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2400 			have_tag = 1;
2401 			vlan_tag = cur_rx->bge_vlan_tag;
2402 		}
2403 
2404 		if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2405 			BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2406 			jumbocnt++;
2407 
2408 			if (rxidx != sc->bge_jumbo) {
2409 				ifp->if_ierrors++;
2410 				if_printf(ifp, "sw jumbo index(%d) "
2411 				    "and hw jumbo index(%d) mismatch, drop!\n",
2412 				    sc->bge_jumbo, rxidx);
2413 				bge_setup_rxdesc_jumbo(sc, rxidx);
2414 				continue;
2415 			}
2416 
2417 			m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx].bge_mbuf;
2418 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2419 				ifp->if_ierrors++;
2420 				bge_setup_rxdesc_jumbo(sc, sc->bge_jumbo);
2421 				continue;
2422 			}
2423 			if (bge_newbuf_jumbo(sc, sc->bge_jumbo, 0)) {
2424 				ifp->if_ierrors++;
2425 				bge_setup_rxdesc_jumbo(sc, sc->bge_jumbo);
2426 				continue;
2427 			}
2428 		} else {
2429 			BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2430 			stdcnt++;
2431 
2432 			if (rxidx != sc->bge_std) {
2433 				ifp->if_ierrors++;
2434 				if_printf(ifp, "sw std index(%d) "
2435 				    "and hw std index(%d) mismatch, drop!\n",
2436 				    sc->bge_std, rxidx);
2437 				bge_setup_rxdesc_std(sc, rxidx);
2438 				continue;
2439 			}
2440 
2441 			m = sc->bge_cdata.bge_rx_std_chain[rxidx].bge_mbuf;
2442 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2443 				ifp->if_ierrors++;
2444 				bge_setup_rxdesc_std(sc, sc->bge_std);
2445 				continue;
2446 			}
2447 			if (bge_newbuf_std(sc, sc->bge_std, 0)) {
2448 				ifp->if_ierrors++;
2449 				bge_setup_rxdesc_std(sc, sc->bge_std);
2450 				continue;
2451 			}
2452 		}
2453 
2454 		ifp->if_ipackets++;
2455 #ifndef __i386__
2456 		/*
2457 		 * The i386 allows unaligned accesses, but for other
2458 		 * platforms we must make sure the payload is aligned.
2459 		 */
2460 		if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
2461 			bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2462 			    cur_rx->bge_len);
2463 			m->m_data += ETHER_ALIGN;
2464 		}
2465 #endif
2466 		m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2467 		m->m_pkthdr.rcvif = ifp;
2468 
2469 		if (ifp->if_capenable & IFCAP_RXCSUM) {
2470 			if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2471 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2472 				if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2473 					m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2474 			}
2475 			if ((cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) &&
2476 			    m->m_pkthdr.len >= BGE_MIN_FRAME) {
2477 				m->m_pkthdr.csum_data =
2478 					cur_rx->bge_tcp_udp_csum;
2479 				m->m_pkthdr.csum_flags |=
2480 					CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2481 			}
2482 		}
2483 
2484 		/*
2485 		 * If we received a packet with a vlan tag, pass it
2486 		 * to vlan_input() instead of ether_input().
2487 		 */
2488 		if (have_tag) {
2489 			m->m_flags |= M_VLANTAG;
2490 			m->m_pkthdr.ether_vlantag = vlan_tag;
2491 			have_tag = vlan_tag = 0;
2492 		}
2493 		ifp->if_input(ifp, m);
2494 	}
2495 
2496 	bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2497 	if (stdcnt)
2498 		bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2499 	if (jumbocnt)
2500 		bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2501 }
2502 
2503 static void
2504 bge_txeof(struct bge_softc *sc)
2505 {
2506 	struct bge_tx_bd *cur_tx = NULL;
2507 	struct ifnet *ifp;
2508 
2509 	if (sc->bge_tx_saved_considx ==
2510 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx)
2511 		return;
2512 
2513 	ifp = &sc->arpcom.ac_if;
2514 
2515 	/*
2516 	 * Go through our tx ring and free mbufs for those
2517 	 * frames that have been sent.
2518 	 */
2519 	while (sc->bge_tx_saved_considx !=
2520 	       sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
2521 		uint32_t idx = 0;
2522 
2523 		idx = sc->bge_tx_saved_considx;
2524 		cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
2525 		if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2526 			ifp->if_opackets++;
2527 		if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2528 			bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
2529 			    sc->bge_cdata.bge_tx_dmamap[idx]);
2530 			m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2531 			sc->bge_cdata.bge_tx_chain[idx] = NULL;
2532 		}
2533 		sc->bge_txcnt--;
2534 		BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2535 		logif(tx_pkt);
2536 	}
2537 
2538 	if (cur_tx != NULL &&
2539 	    (BGE_TX_RING_CNT - sc->bge_txcnt) >=
2540 	    (BGE_NSEG_RSVD + BGE_NSEG_SPARE))
2541 		ifp->if_flags &= ~IFF_OACTIVE;
2542 
2543 	if (sc->bge_txcnt == 0)
2544 		ifp->if_timer = 0;
2545 
2546 	if (!ifq_is_empty(&ifp->if_snd))
2547 		if_devstart(ifp);
2548 }
2549 
2550 #ifdef DEVICE_POLLING
2551 
2552 static void
2553 bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2554 {
2555 	struct bge_softc *sc = ifp->if_softc;
2556  	uint32_t status;
2557 
2558 	switch(cmd) {
2559 	case POLL_REGISTER:
2560 		bge_disable_intr(sc);
2561 		break;
2562 	case POLL_DEREGISTER:
2563 		bge_enable_intr(sc);
2564 		break;
2565 	case POLL_AND_CHECK_STATUS:
2566 		/*
2567 		 * Process link state changes.
2568 		 */
2569 		status = CSR_READ_4(sc, BGE_MAC_STS);
2570 		if ((status & sc->bge_link_chg) || sc->bge_link_evt) {
2571 			sc->bge_link_evt = 0;
2572 			sc->bge_link_upd(sc, status);
2573 		}
2574 		/* fall through */
2575 	case POLL_ONLY:
2576 		if (ifp->if_flags & IFF_RUNNING) {
2577 			bge_rxeof(sc);
2578 			bge_txeof(sc);
2579 		}
2580 		break;
2581 	}
2582 }
2583 
2584 #endif
2585 
2586 static void
2587 bge_intr(void *xsc)
2588 {
2589 	struct bge_softc *sc = xsc;
2590 	struct ifnet *ifp = &sc->arpcom.ac_if;
2591 	uint32_t status;
2592 
2593 	logif(intr);
2594 
2595  	/*
2596 	 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO.  Don't
2597 	 * disable interrupts by writing nonzero like we used to, since with
2598 	 * our current organization this just gives complications and
2599 	 * pessimizations for re-enabling interrupts.  We used to have races
2600 	 * instead of the necessary complications.  Disabling interrupts
2601 	 * would just reduce the chance of a status update while we are
2602 	 * running (by switching to the interrupt-mode coalescence
2603 	 * parameters), but this chance is already very low so it is more
2604 	 * efficient to get another interrupt than prevent it.
2605 	 *
2606 	 * We do the ack first to ensure another interrupt if there is a
2607 	 * status update after the ack.  We don't check for the status
2608 	 * changing later because it is more efficient to get another
2609 	 * interrupt than prevent it, not quite as above (not checking is
2610 	 * a smaller optimization than not toggling the interrupt enable,
2611 	 * since checking doesn't involve PCI accesses and toggling require
2612 	 * the status check).  So toggling would probably be a pessimization
2613 	 * even with MSI.  It would only be needed for using a task queue.
2614 	 */
2615 	bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
2616 
2617 	/*
2618 	 * Process link state changes.
2619 	 */
2620 	status = CSR_READ_4(sc, BGE_MAC_STS);
2621 	if ((status & sc->bge_link_chg) || sc->bge_link_evt) {
2622 		sc->bge_link_evt = 0;
2623 		sc->bge_link_upd(sc, status);
2624 	}
2625 
2626 	if (ifp->if_flags & IFF_RUNNING) {
2627 		/* Check RX return ring producer/consumer */
2628 		bge_rxeof(sc);
2629 
2630 		/* Check TX ring producer/consumer */
2631 		bge_txeof(sc);
2632 	}
2633 
2634 	if (sc->bge_coal_chg)
2635 		bge_coal_change(sc);
2636 }
2637 
2638 static void
2639 bge_tick(void *xsc)
2640 {
2641 	struct bge_softc *sc = xsc;
2642 	struct ifnet *ifp = &sc->arpcom.ac_if;
2643 
2644 	lwkt_serialize_enter(ifp->if_serializer);
2645 
2646 	if (BGE_IS_5705_PLUS(sc))
2647 		bge_stats_update_regs(sc);
2648 	else
2649 		bge_stats_update(sc);
2650 
2651 	if (sc->bge_flags & BGE_FLAG_TBI) {
2652 		/*
2653 		 * Since in TBI mode auto-polling can't be used we should poll
2654 		 * link status manually. Here we register pending link event
2655 		 * and trigger interrupt.
2656 		 */
2657 		sc->bge_link_evt++;
2658 		BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
2659 	} else if (!sc->bge_link) {
2660 		mii_tick(device_get_softc(sc->bge_miibus));
2661 	}
2662 
2663 	callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
2664 
2665 	lwkt_serialize_exit(ifp->if_serializer);
2666 }
2667 
2668 static void
2669 bge_stats_update_regs(struct bge_softc *sc)
2670 {
2671 	struct ifnet *ifp = &sc->arpcom.ac_if;
2672 	struct bge_mac_stats_regs stats;
2673 	uint32_t *s;
2674 	int i;
2675 
2676 	s = (uint32_t *)&stats;
2677 	for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2678 		*s = CSR_READ_4(sc, BGE_RX_STATS + i);
2679 		s++;
2680 	}
2681 
2682 	ifp->if_collisions +=
2683 	   (stats.dot3StatsSingleCollisionFrames +
2684 	   stats.dot3StatsMultipleCollisionFrames +
2685 	   stats.dot3StatsExcessiveCollisions +
2686 	   stats.dot3StatsLateCollisions) -
2687 	   ifp->if_collisions;
2688 }
2689 
2690 static void
2691 bge_stats_update(struct bge_softc *sc)
2692 {
2693 	struct ifnet *ifp = &sc->arpcom.ac_if;
2694 	bus_size_t stats;
2695 
2696 	stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
2697 
2698 #define READ_STAT(sc, stats, stat)	\
2699 	CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
2700 
2701 	ifp->if_collisions +=
2702 	   (READ_STAT(sc, stats,
2703 		txstats.dot3StatsSingleCollisionFrames.bge_addr_lo) +
2704 	    READ_STAT(sc, stats,
2705 		txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo) +
2706 	    READ_STAT(sc, stats,
2707 		txstats.dot3StatsExcessiveCollisions.bge_addr_lo) +
2708 	    READ_STAT(sc, stats,
2709 		txstats.dot3StatsLateCollisions.bge_addr_lo)) -
2710 	   ifp->if_collisions;
2711 
2712 #undef READ_STAT
2713 
2714 #ifdef notdef
2715 	ifp->if_collisions +=
2716 	   (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
2717 	   sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
2718 	   sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
2719 	   sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
2720 	   ifp->if_collisions;
2721 #endif
2722 }
2723 
2724 /*
2725  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
2726  * pointers to descriptors.
2727  */
2728 static int
2729 bge_encap(struct bge_softc *sc, struct mbuf **m_head0, uint32_t *txidx)
2730 {
2731 	struct bge_tx_bd *d = NULL;
2732 	uint16_t csum_flags = 0;
2733 	bus_dma_segment_t segs[BGE_NSEG_NEW];
2734 	bus_dmamap_t map;
2735 	int error, maxsegs, nsegs, idx, i;
2736 	struct mbuf *m_head = *m_head0;
2737 
2738 	if (m_head->m_pkthdr.csum_flags) {
2739 		if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2740 			csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2741 		if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
2742 			csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2743 		if (m_head->m_flags & M_LASTFRAG)
2744 			csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2745 		else if (m_head->m_flags & M_FRAG)
2746 			csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2747 	}
2748 
2749 	idx = *txidx;
2750 	map = sc->bge_cdata.bge_tx_dmamap[idx];
2751 
2752 	maxsegs = (BGE_TX_RING_CNT - sc->bge_txcnt) - BGE_NSEG_RSVD;
2753 	KASSERT(maxsegs >= BGE_NSEG_SPARE,
2754 		("not enough segments %d", maxsegs));
2755 
2756 	if (maxsegs > BGE_NSEG_NEW)
2757 		maxsegs = BGE_NSEG_NEW;
2758 
2759 	/*
2760 	 * Pad outbound frame to BGE_MIN_FRAME for an unusual reason.
2761 	 * The bge hardware will pad out Tx runts to BGE_MIN_FRAME,
2762 	 * but when such padded frames employ the bge IP/TCP checksum
2763 	 * offload, the hardware checksum assist gives incorrect results
2764 	 * (possibly from incorporating its own padding into the UDP/TCP
2765 	 * checksum; who knows).  If we pad such runts with zeros, the
2766 	 * onboard checksum comes out correct.
2767 	 */
2768 	if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) &&
2769 	    m_head->m_pkthdr.len < BGE_MIN_FRAME) {
2770 		error = m_devpad(m_head, BGE_MIN_FRAME);
2771 		if (error)
2772 			goto back;
2773 	}
2774 
2775 	error = bus_dmamap_load_mbuf_defrag(sc->bge_cdata.bge_tx_mtag, map,
2776 			m_head0, segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
2777 	if (error)
2778 		goto back;
2779 
2780 	m_head = *m_head0;
2781 	bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE);
2782 
2783 	for (i = 0; ; i++) {
2784 		d = &sc->bge_ldata.bge_tx_ring[idx];
2785 
2786 		d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
2787 		d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
2788 		d->bge_len = segs[i].ds_len;
2789 		d->bge_flags = csum_flags;
2790 
2791 		if (i == nsegs - 1)
2792 			break;
2793 		BGE_INC(idx, BGE_TX_RING_CNT);
2794 	}
2795 	/* Mark the last segment as end of packet... */
2796 	d->bge_flags |= BGE_TXBDFLAG_END;
2797 
2798 	/* Set vlan tag to the first segment of the packet. */
2799 	d = &sc->bge_ldata.bge_tx_ring[*txidx];
2800 	if (m_head->m_flags & M_VLANTAG) {
2801 		d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
2802 		d->bge_vlan_tag = m_head->m_pkthdr.ether_vlantag;
2803 	} else {
2804 		d->bge_vlan_tag = 0;
2805 	}
2806 
2807 	/*
2808 	 * Insure that the map for this transmission is placed at
2809 	 * the array index of the last descriptor in this chain.
2810 	 */
2811 	sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
2812 	sc->bge_cdata.bge_tx_dmamap[idx] = map;
2813 	sc->bge_cdata.bge_tx_chain[idx] = m_head;
2814 	sc->bge_txcnt += nsegs;
2815 
2816 	BGE_INC(idx, BGE_TX_RING_CNT);
2817 	*txidx = idx;
2818 back:
2819 	if (error) {
2820 		m_freem(*m_head0);
2821 		*m_head0 = NULL;
2822 	}
2823 	return error;
2824 }
2825 
2826 /*
2827  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2828  * to the mbuf data regions directly in the transmit descriptors.
2829  */
2830 static void
2831 bge_start(struct ifnet *ifp)
2832 {
2833 	struct bge_softc *sc = ifp->if_softc;
2834 	struct mbuf *m_head = NULL;
2835 	uint32_t prodidx;
2836 	int need_trans;
2837 
2838 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
2839 		return;
2840 
2841 	prodidx = sc->bge_tx_prodidx;
2842 
2843 	need_trans = 0;
2844 	while (sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
2845 		m_head = ifq_dequeue(&ifp->if_snd, NULL);
2846 		if (m_head == NULL)
2847 			break;
2848 
2849 		/*
2850 		 * XXX
2851 		 * The code inside the if() block is never reached since we
2852 		 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
2853 		 * requests to checksum TCP/UDP in a fragmented packet.
2854 		 *
2855 		 * XXX
2856 		 * safety overkill.  If this is a fragmented packet chain
2857 		 * with delayed TCP/UDP checksums, then only encapsulate
2858 		 * it if we have enough descriptors to handle the entire
2859 		 * chain at once.
2860 		 * (paranoia -- may not actually be needed)
2861 		 */
2862 		if ((m_head->m_flags & M_FIRSTFRAG) &&
2863 		    (m_head->m_pkthdr.csum_flags & CSUM_DELAY_DATA)) {
2864 			if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
2865 			    m_head->m_pkthdr.csum_data + BGE_NSEG_RSVD) {
2866 				ifp->if_flags |= IFF_OACTIVE;
2867 				ifq_prepend(&ifp->if_snd, m_head);
2868 				break;
2869 			}
2870 		}
2871 
2872 		/*
2873 		 * Sanity check: avoid coming within BGE_NSEG_RSVD
2874 		 * descriptors of the end of the ring.  Also make
2875 		 * sure there are BGE_NSEG_SPARE descriptors for
2876 		 * jumbo buffers' defragmentation.
2877 		 */
2878 		if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
2879 		    (BGE_NSEG_RSVD + BGE_NSEG_SPARE)) {
2880 			ifp->if_flags |= IFF_OACTIVE;
2881 			ifq_prepend(&ifp->if_snd, m_head);
2882 			break;
2883 		}
2884 
2885 		/*
2886 		 * Pack the data into the transmit ring. If we
2887 		 * don't have room, set the OACTIVE flag and wait
2888 		 * for the NIC to drain the ring.
2889 		 */
2890 		if (bge_encap(sc, &m_head, &prodidx)) {
2891 			ifp->if_flags |= IFF_OACTIVE;
2892 			ifp->if_oerrors++;
2893 			break;
2894 		}
2895 		need_trans = 1;
2896 
2897 		ETHER_BPF_MTAP(ifp, m_head);
2898 	}
2899 
2900 	if (!need_trans)
2901 		return;
2902 
2903 	/* Transmit */
2904 	bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2905 	/* 5700 b2 errata */
2906 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
2907 		bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2908 
2909 	sc->bge_tx_prodidx = prodidx;
2910 
2911 	/*
2912 	 * Set a timeout in case the chip goes out to lunch.
2913 	 */
2914 	ifp->if_timer = 5;
2915 }
2916 
2917 static void
2918 bge_init(void *xsc)
2919 {
2920 	struct bge_softc *sc = xsc;
2921 	struct ifnet *ifp = &sc->arpcom.ac_if;
2922 	uint16_t *m;
2923 
2924 	ASSERT_SERIALIZED(ifp->if_serializer);
2925 
2926 	if (ifp->if_flags & IFF_RUNNING)
2927 		return;
2928 
2929 	/* Cancel pending I/O and flush buffers. */
2930 	bge_stop(sc);
2931 	bge_reset(sc);
2932 	bge_chipinit(sc);
2933 
2934 	/*
2935 	 * Init the various state machines, ring
2936 	 * control blocks and firmware.
2937 	 */
2938 	if (bge_blockinit(sc)) {
2939 		if_printf(ifp, "initialization failure\n");
2940 		bge_stop(sc);
2941 		return;
2942 	}
2943 
2944 	/* Specify MTU. */
2945 	CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
2946 	    ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN);
2947 
2948 	/* Load our MAC address. */
2949 	m = (uint16_t *)&sc->arpcom.ac_enaddr[0];
2950 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
2951 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
2952 
2953 	/* Enable or disable promiscuous mode as needed. */
2954 	bge_setpromisc(sc);
2955 
2956 	/* Program multicast filter. */
2957 	bge_setmulti(sc);
2958 
2959 	/* Init RX ring. */
2960 	if (bge_init_rx_ring_std(sc)) {
2961 		if_printf(ifp, "RX ring initialization failed\n");
2962 		bge_stop(sc);
2963 		return;
2964 	}
2965 
2966 	/*
2967 	 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
2968 	 * memory to insure that the chip has in fact read the first
2969 	 * entry of the ring.
2970 	 */
2971 	if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
2972 		uint32_t		v, i;
2973 		for (i = 0; i < 10; i++) {
2974 			DELAY(20);
2975 			v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
2976 			if (v == (MCLBYTES - ETHER_ALIGN))
2977 				break;
2978 		}
2979 		if (i == 10)
2980 			if_printf(ifp, "5705 A0 chip failed to load RX ring\n");
2981 	}
2982 
2983 	/* Init jumbo RX ring. */
2984 	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) {
2985 		if (bge_init_rx_ring_jumbo(sc)) {
2986 			if_printf(ifp, "Jumbo RX ring initialization failed\n");
2987 			bge_stop(sc);
2988 			return;
2989 		}
2990 	}
2991 
2992 	/* Init our RX return ring index */
2993 	sc->bge_rx_saved_considx = 0;
2994 
2995 	/* Init TX ring. */
2996 	bge_init_tx_ring(sc);
2997 
2998 	/* Turn on transmitter */
2999 	BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
3000 
3001 	/* Turn on receiver */
3002 	BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3003 
3004 	/* Tell firmware we're alive. */
3005 	BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3006 
3007 	/* Enable host interrupts if polling(4) is not enabled. */
3008 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
3009 #ifdef DEVICE_POLLING
3010 	if (ifp->if_flags & IFF_POLLING)
3011 		bge_disable_intr(sc);
3012 	else
3013 #endif
3014 	bge_enable_intr(sc);
3015 
3016 	bge_ifmedia_upd(ifp);
3017 
3018 	ifp->if_flags |= IFF_RUNNING;
3019 	ifp->if_flags &= ~IFF_OACTIVE;
3020 
3021 	callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
3022 }
3023 
3024 /*
3025  * Set media options.
3026  */
3027 static int
3028 bge_ifmedia_upd(struct ifnet *ifp)
3029 {
3030 	struct bge_softc *sc = ifp->if_softc;
3031 
3032 	/* If this is a 1000baseX NIC, enable the TBI port. */
3033 	if (sc->bge_flags & BGE_FLAG_TBI) {
3034 		struct ifmedia *ifm = &sc->bge_ifmedia;
3035 
3036 		if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3037 			return(EINVAL);
3038 
3039 		switch(IFM_SUBTYPE(ifm->ifm_media)) {
3040 		case IFM_AUTO:
3041 			/*
3042 			 * The BCM5704 ASIC appears to have a special
3043 			 * mechanism for programming the autoneg
3044 			 * advertisement registers in TBI mode.
3045 			 */
3046 			if (!bge_fake_autoneg &&
3047 			    sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3048 				uint32_t sgdig;
3049 
3050 				CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3051 				sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3052 				sgdig |= BGE_SGDIGCFG_AUTO |
3053 					 BGE_SGDIGCFG_PAUSE_CAP |
3054 					 BGE_SGDIGCFG_ASYM_PAUSE;
3055 				CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3056 					    sgdig | BGE_SGDIGCFG_SEND);
3057 				DELAY(5);
3058 				CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3059 			}
3060 			break;
3061 		case IFM_1000_SX:
3062 			if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3063 				BGE_CLRBIT(sc, BGE_MAC_MODE,
3064 				    BGE_MACMODE_HALF_DUPLEX);
3065 			} else {
3066 				BGE_SETBIT(sc, BGE_MAC_MODE,
3067 				    BGE_MACMODE_HALF_DUPLEX);
3068 			}
3069 			break;
3070 		default:
3071 			return(EINVAL);
3072 		}
3073 	} else {
3074 		struct mii_data *mii = device_get_softc(sc->bge_miibus);
3075 
3076 		sc->bge_link_evt++;
3077 		sc->bge_link = 0;
3078 		if (mii->mii_instance) {
3079 			struct mii_softc *miisc;
3080 
3081 			LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3082 				mii_phy_reset(miisc);
3083 		}
3084 		mii_mediachg(mii);
3085 	}
3086 	return(0);
3087 }
3088 
3089 /*
3090  * Report current media status.
3091  */
3092 static void
3093 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3094 {
3095 	struct bge_softc *sc = ifp->if_softc;
3096 
3097 	if (sc->bge_flags & BGE_FLAG_TBI) {
3098 		ifmr->ifm_status = IFM_AVALID;
3099 		ifmr->ifm_active = IFM_ETHER;
3100 		if (CSR_READ_4(sc, BGE_MAC_STS) &
3101 		    BGE_MACSTAT_TBI_PCS_SYNCHED) {
3102 			ifmr->ifm_status |= IFM_ACTIVE;
3103 		} else {
3104 			ifmr->ifm_active |= IFM_NONE;
3105 			return;
3106 		}
3107 
3108 		ifmr->ifm_active |= IFM_1000_SX;
3109 		if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3110 			ifmr->ifm_active |= IFM_HDX;
3111 		else
3112 			ifmr->ifm_active |= IFM_FDX;
3113 	} else {
3114 		struct mii_data *mii = device_get_softc(sc->bge_miibus);
3115 
3116 		mii_pollstat(mii);
3117 		ifmr->ifm_active = mii->mii_media_active;
3118 		ifmr->ifm_status = mii->mii_media_status;
3119 	}
3120 }
3121 
3122 static int
3123 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
3124 {
3125 	struct bge_softc *sc = ifp->if_softc;
3126 	struct ifreq *ifr = (struct ifreq *)data;
3127 	int mask, error = 0;
3128 
3129 	ASSERT_SERIALIZED(ifp->if_serializer);
3130 
3131 	switch (command) {
3132 	case SIOCSIFMTU:
3133 		if ((!BGE_IS_JUMBO_CAPABLE(sc) && ifr->ifr_mtu > ETHERMTU) ||
3134 		    (BGE_IS_JUMBO_CAPABLE(sc) &&
3135 		     ifr->ifr_mtu > BGE_JUMBO_MTU)) {
3136 			error = EINVAL;
3137 		} else if (ifp->if_mtu != ifr->ifr_mtu) {
3138 			ifp->if_mtu = ifr->ifr_mtu;
3139 			ifp->if_flags &= ~IFF_RUNNING;
3140 			bge_init(sc);
3141 		}
3142 		break;
3143 	case SIOCSIFFLAGS:
3144 		if (ifp->if_flags & IFF_UP) {
3145 			if (ifp->if_flags & IFF_RUNNING) {
3146 				mask = ifp->if_flags ^ sc->bge_if_flags;
3147 
3148 				/*
3149 				 * If only the state of the PROMISC flag
3150 				 * changed, then just use the 'set promisc
3151 				 * mode' command instead of reinitializing
3152 				 * the entire NIC. Doing a full re-init
3153 				 * means reloading the firmware and waiting
3154 				 * for it to start up, which may take a
3155 				 * second or two.  Similarly for ALLMULTI.
3156 				 */
3157 				if (mask & IFF_PROMISC)
3158 					bge_setpromisc(sc);
3159 				if (mask & IFF_ALLMULTI)
3160 					bge_setmulti(sc);
3161 			} else {
3162 				bge_init(sc);
3163 			}
3164 		} else {
3165 			if (ifp->if_flags & IFF_RUNNING)
3166 				bge_stop(sc);
3167 		}
3168 		sc->bge_if_flags = ifp->if_flags;
3169 		break;
3170 	case SIOCADDMULTI:
3171 	case SIOCDELMULTI:
3172 		if (ifp->if_flags & IFF_RUNNING)
3173 			bge_setmulti(sc);
3174 		break;
3175 	case SIOCSIFMEDIA:
3176 	case SIOCGIFMEDIA:
3177 		if (sc->bge_flags & BGE_FLAG_TBI) {
3178 			error = ifmedia_ioctl(ifp, ifr,
3179 			    &sc->bge_ifmedia, command);
3180 		} else {
3181 			struct mii_data *mii;
3182 
3183 			mii = device_get_softc(sc->bge_miibus);
3184 			error = ifmedia_ioctl(ifp, ifr,
3185 					      &mii->mii_media, command);
3186 		}
3187 		break;
3188         case SIOCSIFCAP:
3189 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3190 		if (mask & IFCAP_HWCSUM) {
3191 			ifp->if_capenable ^= (mask & IFCAP_HWCSUM);
3192 			if (IFCAP_HWCSUM & ifp->if_capenable)
3193 				ifp->if_hwassist = BGE_CSUM_FEATURES;
3194 			else
3195 				ifp->if_hwassist = 0;
3196 		}
3197 		break;
3198 	default:
3199 		error = ether_ioctl(ifp, command, data);
3200 		break;
3201 	}
3202 	return error;
3203 }
3204 
3205 static void
3206 bge_watchdog(struct ifnet *ifp)
3207 {
3208 	struct bge_softc *sc = ifp->if_softc;
3209 
3210 	if_printf(ifp, "watchdog timeout -- resetting\n");
3211 
3212 	ifp->if_flags &= ~IFF_RUNNING;
3213 	bge_init(sc);
3214 
3215 	ifp->if_oerrors++;
3216 
3217 	if (!ifq_is_empty(&ifp->if_snd))
3218 		if_devstart(ifp);
3219 }
3220 
3221 /*
3222  * Stop the adapter and free any mbufs allocated to the
3223  * RX and TX lists.
3224  */
3225 static void
3226 bge_stop(struct bge_softc *sc)
3227 {
3228 	struct ifnet *ifp = &sc->arpcom.ac_if;
3229 	struct ifmedia_entry *ifm;
3230 	struct mii_data *mii = NULL;
3231 	int mtmp, itmp;
3232 
3233 	ASSERT_SERIALIZED(ifp->if_serializer);
3234 
3235 	if ((sc->bge_flags & BGE_FLAG_TBI) == 0)
3236 		mii = device_get_softc(sc->bge_miibus);
3237 
3238 	callout_stop(&sc->bge_stat_timer);
3239 
3240 	/*
3241 	 * Disable all of the receiver blocks
3242 	 */
3243 	BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3244 	BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3245 	BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3246 	if (!BGE_IS_5705_PLUS(sc))
3247 		BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3248 	BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3249 	BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3250 	BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3251 
3252 	/*
3253 	 * Disable all of the transmit blocks
3254 	 */
3255 	BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3256 	BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3257 	BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3258 	BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3259 	BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3260 	if (!BGE_IS_5705_PLUS(sc))
3261 		BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3262 	BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3263 
3264 	/*
3265 	 * Shut down all of the memory managers and related
3266 	 * state machines.
3267 	 */
3268 	BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3269 	BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3270 	if (!BGE_IS_5705_PLUS(sc))
3271 		BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3272 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3273 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3274 	if (!BGE_IS_5705_PLUS(sc)) {
3275 		BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3276 		BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3277 	}
3278 
3279 	/* Disable host interrupts. */
3280 	bge_disable_intr(sc);
3281 
3282 	/*
3283 	 * Tell firmware we're shutting down.
3284 	 */
3285 	BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3286 
3287 	/* Free the RX lists. */
3288 	bge_free_rx_ring_std(sc);
3289 
3290 	/* Free jumbo RX list. */
3291 	if (BGE_IS_JUMBO_CAPABLE(sc))
3292 		bge_free_rx_ring_jumbo(sc);
3293 
3294 	/* Free TX buffers. */
3295 	bge_free_tx_ring(sc);
3296 
3297 	/*
3298 	 * Isolate/power down the PHY, but leave the media selection
3299 	 * unchanged so that things will be put back to normal when
3300 	 * we bring the interface back up.
3301 	 *
3302 	 * 'mii' may be NULL in the following cases:
3303 	 * - The device uses TBI.
3304 	 * - bge_stop() is called by bge_detach().
3305 	 */
3306 	if (mii != NULL) {
3307 		itmp = ifp->if_flags;
3308 		ifp->if_flags |= IFF_UP;
3309 		ifm = mii->mii_media.ifm_cur;
3310 		mtmp = ifm->ifm_media;
3311 		ifm->ifm_media = IFM_ETHER|IFM_NONE;
3312 		mii_mediachg(mii);
3313 		ifm->ifm_media = mtmp;
3314 		ifp->if_flags = itmp;
3315 	}
3316 
3317 	sc->bge_link = 0;
3318 	sc->bge_coal_chg = 0;
3319 
3320 	sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3321 
3322 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3323 	ifp->if_timer = 0;
3324 }
3325 
3326 /*
3327  * Stop all chip I/O so that the kernel's probe routines don't
3328  * get confused by errant DMAs when rebooting.
3329  */
3330 static void
3331 bge_shutdown(device_t dev)
3332 {
3333 	struct bge_softc *sc = device_get_softc(dev);
3334 	struct ifnet *ifp = &sc->arpcom.ac_if;
3335 
3336 	lwkt_serialize_enter(ifp->if_serializer);
3337 	bge_stop(sc);
3338 	bge_reset(sc);
3339 	lwkt_serialize_exit(ifp->if_serializer);
3340 }
3341 
3342 static int
3343 bge_suspend(device_t dev)
3344 {
3345 	struct bge_softc *sc = device_get_softc(dev);
3346 	struct ifnet *ifp = &sc->arpcom.ac_if;
3347 
3348 	lwkt_serialize_enter(ifp->if_serializer);
3349 	bge_stop(sc);
3350 	lwkt_serialize_exit(ifp->if_serializer);
3351 
3352 	return 0;
3353 }
3354 
3355 static int
3356 bge_resume(device_t dev)
3357 {
3358 	struct bge_softc *sc = device_get_softc(dev);
3359 	struct ifnet *ifp = &sc->arpcom.ac_if;
3360 
3361 	lwkt_serialize_enter(ifp->if_serializer);
3362 
3363 	if (ifp->if_flags & IFF_UP) {
3364 		bge_init(sc);
3365 
3366 		if (!ifq_is_empty(&ifp->if_snd))
3367 			if_devstart(ifp);
3368 	}
3369 
3370 	lwkt_serialize_exit(ifp->if_serializer);
3371 
3372 	return 0;
3373 }
3374 
3375 static void
3376 bge_setpromisc(struct bge_softc *sc)
3377 {
3378 	struct ifnet *ifp = &sc->arpcom.ac_if;
3379 
3380 	if (ifp->if_flags & IFF_PROMISC)
3381 		BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3382 	else
3383 		BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3384 }
3385 
3386 static void
3387 bge_dma_free(struct bge_softc *sc)
3388 {
3389 	int i;
3390 
3391 	/* Destroy RX mbuf DMA stuffs. */
3392 	if (sc->bge_cdata.bge_rx_mtag != NULL) {
3393 		for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3394 			bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
3395 			    sc->bge_cdata.bge_rx_std_dmamap[i]);
3396 		}
3397 		bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
3398 				   sc->bge_cdata.bge_rx_tmpmap);
3399 		bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
3400 	}
3401 
3402 	/* Destroy TX mbuf DMA stuffs. */
3403 	if (sc->bge_cdata.bge_tx_mtag != NULL) {
3404 		for (i = 0; i < BGE_TX_RING_CNT; i++) {
3405 			bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
3406 			    sc->bge_cdata.bge_tx_dmamap[i]);
3407 		}
3408 		bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
3409 	}
3410 
3411 	/* Destroy standard RX ring */
3412 	bge_dma_block_free(sc->bge_cdata.bge_rx_std_ring_tag,
3413 			   sc->bge_cdata.bge_rx_std_ring_map,
3414 			   sc->bge_ldata.bge_rx_std_ring);
3415 
3416 	if (BGE_IS_JUMBO_CAPABLE(sc))
3417 		bge_free_jumbo_mem(sc);
3418 
3419 	/* Destroy RX return ring */
3420 	bge_dma_block_free(sc->bge_cdata.bge_rx_return_ring_tag,
3421 			   sc->bge_cdata.bge_rx_return_ring_map,
3422 			   sc->bge_ldata.bge_rx_return_ring);
3423 
3424 	/* Destroy TX ring */
3425 	bge_dma_block_free(sc->bge_cdata.bge_tx_ring_tag,
3426 			   sc->bge_cdata.bge_tx_ring_map,
3427 			   sc->bge_ldata.bge_tx_ring);
3428 
3429 	/* Destroy status block */
3430 	bge_dma_block_free(sc->bge_cdata.bge_status_tag,
3431 			   sc->bge_cdata.bge_status_map,
3432 			   sc->bge_ldata.bge_status_block);
3433 
3434 	/* Destroy statistics block */
3435 	bge_dma_block_free(sc->bge_cdata.bge_stats_tag,
3436 			   sc->bge_cdata.bge_stats_map,
3437 			   sc->bge_ldata.bge_stats);
3438 
3439 	/* Destroy the parent tag */
3440 	if (sc->bge_cdata.bge_parent_tag != NULL)
3441 		bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
3442 }
3443 
3444 static int
3445 bge_dma_alloc(struct bge_softc *sc)
3446 {
3447 	struct ifnet *ifp = &sc->arpcom.ac_if;
3448 	int i, error;
3449 
3450 	/*
3451 	 * Allocate the parent bus DMA tag appropriate for PCI.
3452 	 */
3453 	error = bus_dma_tag_create(NULL, 1, 0,
3454 				   BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3455 				   NULL, NULL,
3456 				   BUS_SPACE_MAXSIZE_32BIT, 0,
3457 				   BUS_SPACE_MAXSIZE_32BIT,
3458 				   0, &sc->bge_cdata.bge_parent_tag);
3459 	if (error) {
3460 		if_printf(ifp, "could not allocate parent dma tag\n");
3461 		return error;
3462 	}
3463 
3464 	/*
3465 	 * Create DMA tag and maps for RX mbufs.
3466 	 */
3467 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 0,
3468 				   BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3469 				   NULL, NULL, MCLBYTES, 1, MCLBYTES,
3470 				   BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,
3471 				   &sc->bge_cdata.bge_rx_mtag);
3472 	if (error) {
3473 		if_printf(ifp, "could not allocate RX mbuf dma tag\n");
3474 		return error;
3475 	}
3476 
3477 	error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag,
3478 				  BUS_DMA_WAITOK, &sc->bge_cdata.bge_rx_tmpmap);
3479 	if (error) {
3480 		bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
3481 		sc->bge_cdata.bge_rx_mtag = NULL;
3482 		return error;
3483 	}
3484 
3485 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3486 		error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag,
3487 					  BUS_DMA_WAITOK,
3488 					  &sc->bge_cdata.bge_rx_std_dmamap[i]);
3489 		if (error) {
3490 			int j;
3491 
3492 			for (j = 0; j < i; ++j) {
3493 				bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
3494 					sc->bge_cdata.bge_rx_std_dmamap[j]);
3495 			}
3496 			bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
3497 			sc->bge_cdata.bge_rx_mtag = NULL;
3498 
3499 			if_printf(ifp, "could not create DMA map for RX\n");
3500 			return error;
3501 		}
3502 	}
3503 
3504 	/*
3505 	 * Create DMA tag and maps for TX mbufs.
3506 	 */
3507 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 0,
3508 				   BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3509 				   NULL, NULL,
3510 				   BGE_JUMBO_FRAMELEN, BGE_NSEG_NEW, MCLBYTES,
3511 				   BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK |
3512 				   BUS_DMA_ONEBPAGE,
3513 				   &sc->bge_cdata.bge_tx_mtag);
3514 	if (error) {
3515 		if_printf(ifp, "could not allocate TX mbuf dma tag\n");
3516 		return error;
3517 	}
3518 
3519 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
3520 		error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag,
3521 					  BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
3522 					  &sc->bge_cdata.bge_tx_dmamap[i]);
3523 		if (error) {
3524 			int j;
3525 
3526 			for (j = 0; j < i; ++j) {
3527 				bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
3528 					sc->bge_cdata.bge_tx_dmamap[j]);
3529 			}
3530 			bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
3531 			sc->bge_cdata.bge_tx_mtag = NULL;
3532 
3533 			if_printf(ifp, "could not create DMA map for TX\n");
3534 			return error;
3535 		}
3536 	}
3537 
3538 	/*
3539 	 * Create DMA stuffs for standard RX ring.
3540 	 */
3541 	error = bge_dma_block_alloc(sc, BGE_STD_RX_RING_SZ,
3542 				    &sc->bge_cdata.bge_rx_std_ring_tag,
3543 				    &sc->bge_cdata.bge_rx_std_ring_map,
3544 				    (void *)&sc->bge_ldata.bge_rx_std_ring,
3545 				    &sc->bge_ldata.bge_rx_std_ring_paddr);
3546 	if (error) {
3547 		if_printf(ifp, "could not create std RX ring\n");
3548 		return error;
3549 	}
3550 
3551 	/*
3552 	 * Create jumbo buffer pool.
3553 	 */
3554 	if (BGE_IS_JUMBO_CAPABLE(sc)) {
3555 		error = bge_alloc_jumbo_mem(sc);
3556 		if (error) {
3557 			if_printf(ifp, "could not create jumbo buffer pool\n");
3558 			return error;
3559 		}
3560 	}
3561 
3562 	/*
3563 	 * Create DMA stuffs for RX return ring.
3564 	 */
3565 	error = bge_dma_block_alloc(sc, BGE_RX_RTN_RING_SZ(sc),
3566 				    &sc->bge_cdata.bge_rx_return_ring_tag,
3567 				    &sc->bge_cdata.bge_rx_return_ring_map,
3568 				    (void *)&sc->bge_ldata.bge_rx_return_ring,
3569 				    &sc->bge_ldata.bge_rx_return_ring_paddr);
3570 	if (error) {
3571 		if_printf(ifp, "could not create RX ret ring\n");
3572 		return error;
3573 	}
3574 
3575 	/*
3576 	 * Create DMA stuffs for TX ring.
3577 	 */
3578 	error = bge_dma_block_alloc(sc, BGE_TX_RING_SZ,
3579 				    &sc->bge_cdata.bge_tx_ring_tag,
3580 				    &sc->bge_cdata.bge_tx_ring_map,
3581 				    (void *)&sc->bge_ldata.bge_tx_ring,
3582 				    &sc->bge_ldata.bge_tx_ring_paddr);
3583 	if (error) {
3584 		if_printf(ifp, "could not create TX ring\n");
3585 		return error;
3586 	}
3587 
3588 	/*
3589 	 * Create DMA stuffs for status block.
3590 	 */
3591 	error = bge_dma_block_alloc(sc, BGE_STATUS_BLK_SZ,
3592 				    &sc->bge_cdata.bge_status_tag,
3593 				    &sc->bge_cdata.bge_status_map,
3594 				    (void *)&sc->bge_ldata.bge_status_block,
3595 				    &sc->bge_ldata.bge_status_block_paddr);
3596 	if (error) {
3597 		if_printf(ifp, "could not create status block\n");
3598 		return error;
3599 	}
3600 
3601 	/*
3602 	 * Create DMA stuffs for statistics block.
3603 	 */
3604 	error = bge_dma_block_alloc(sc, BGE_STATS_SZ,
3605 				    &sc->bge_cdata.bge_stats_tag,
3606 				    &sc->bge_cdata.bge_stats_map,
3607 				    (void *)&sc->bge_ldata.bge_stats,
3608 				    &sc->bge_ldata.bge_stats_paddr);
3609 	if (error) {
3610 		if_printf(ifp, "could not create stats block\n");
3611 		return error;
3612 	}
3613 	return 0;
3614 }
3615 
3616 static int
3617 bge_dma_block_alloc(struct bge_softc *sc, bus_size_t size, bus_dma_tag_t *tag,
3618 		    bus_dmamap_t *map, void **addr, bus_addr_t *paddr)
3619 {
3620 	bus_dmamem_t dmem;
3621 	int error;
3622 
3623 	error = bus_dmamem_coherent(sc->bge_cdata.bge_parent_tag, PAGE_SIZE, 0,
3624 				    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3625 				    size, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3626 	if (error)
3627 		return error;
3628 
3629 	*tag = dmem.dmem_tag;
3630 	*map = dmem.dmem_map;
3631 	*addr = dmem.dmem_addr;
3632 	*paddr = dmem.dmem_busaddr;
3633 
3634 	return 0;
3635 }
3636 
3637 static void
3638 bge_dma_block_free(bus_dma_tag_t tag, bus_dmamap_t map, void *addr)
3639 {
3640 	if (tag != NULL) {
3641 		bus_dmamap_unload(tag, map);
3642 		bus_dmamem_free(tag, addr, map);
3643 		bus_dma_tag_destroy(tag);
3644 	}
3645 }
3646 
3647 /*
3648  * Grrr. The link status word in the status block does
3649  * not work correctly on the BCM5700 rev AX and BX chips,
3650  * according to all available information. Hence, we have
3651  * to enable MII interrupts in order to properly obtain
3652  * async link changes. Unfortunately, this also means that
3653  * we have to read the MAC status register to detect link
3654  * changes, thereby adding an additional register access to
3655  * the interrupt handler.
3656  *
3657  * XXX: perhaps link state detection procedure used for
3658  * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
3659  */
3660 static void
3661 bge_bcm5700_link_upd(struct bge_softc *sc, uint32_t status __unused)
3662 {
3663 	struct ifnet *ifp = &sc->arpcom.ac_if;
3664 	struct mii_data *mii = device_get_softc(sc->bge_miibus);
3665 
3666 	mii_pollstat(mii);
3667 
3668 	if (!sc->bge_link &&
3669 	    (mii->mii_media_status & IFM_ACTIVE) &&
3670 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3671 		sc->bge_link++;
3672 		if (bootverbose)
3673 			if_printf(ifp, "link UP\n");
3674 	} else if (sc->bge_link &&
3675 	    (!(mii->mii_media_status & IFM_ACTIVE) ||
3676 	    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3677 		sc->bge_link = 0;
3678 		if (bootverbose)
3679 			if_printf(ifp, "link DOWN\n");
3680 	}
3681 
3682 	/* Clear the interrupt. */
3683 	CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_MI_INTERRUPT);
3684 	bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
3685 	bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR, BRGPHY_INTRS);
3686 }
3687 
3688 static void
3689 bge_tbi_link_upd(struct bge_softc *sc, uint32_t status)
3690 {
3691 	struct ifnet *ifp = &sc->arpcom.ac_if;
3692 
3693 #define PCS_ENCODE_ERR	(BGE_MACSTAT_PORT_DECODE_ERROR|BGE_MACSTAT_MI_COMPLETE)
3694 
3695 	/*
3696 	 * Sometimes PCS encoding errors are detected in
3697 	 * TBI mode (on fiber NICs), and for some reason
3698 	 * the chip will signal them as link changes.
3699 	 * If we get a link change event, but the 'PCS
3700 	 * encoding error' bit in the MAC status register
3701 	 * is set, don't bother doing a link check.
3702 	 * This avoids spurious "gigabit link up" messages
3703 	 * that sometimes appear on fiber NICs during
3704 	 * periods of heavy traffic.
3705 	 */
3706 	if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
3707 		if (!sc->bge_link) {
3708 			sc->bge_link++;
3709 			if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3710 				BGE_CLRBIT(sc, BGE_MAC_MODE,
3711 				    BGE_MACMODE_TBI_SEND_CFGS);
3712 			}
3713 			CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3714 
3715 			if (bootverbose)
3716 				if_printf(ifp, "link UP\n");
3717 
3718 			ifp->if_link_state = LINK_STATE_UP;
3719 			if_link_state_change(ifp);
3720 		}
3721 	} else if ((status & PCS_ENCODE_ERR) != PCS_ENCODE_ERR) {
3722 		if (sc->bge_link) {
3723 			sc->bge_link = 0;
3724 
3725 			if (bootverbose)
3726 				if_printf(ifp, "link DOWN\n");
3727 
3728 			ifp->if_link_state = LINK_STATE_DOWN;
3729 			if_link_state_change(ifp);
3730 		}
3731 	}
3732 
3733 #undef PCS_ENCODE_ERR
3734 
3735 	/* Clear the attention. */
3736 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3737 	    BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3738 	    BGE_MACSTAT_LINK_CHANGED);
3739 }
3740 
3741 static void
3742 bge_copper_link_upd(struct bge_softc *sc, uint32_t status __unused)
3743 {
3744 	/*
3745 	 * Check that the AUTOPOLL bit is set before
3746 	 * processing the event as a real link change.
3747 	 * Turning AUTOPOLL on and off in the MII read/write
3748 	 * functions will often trigger a link status
3749 	 * interrupt for no reason.
3750 	 */
3751 	if (CSR_READ_4(sc, BGE_MI_MODE) & BGE_MIMODE_AUTOPOLL) {
3752 		struct ifnet *ifp = &sc->arpcom.ac_if;
3753 		struct mii_data *mii = device_get_softc(sc->bge_miibus);
3754 
3755 		mii_pollstat(mii);
3756 
3757 		if (!sc->bge_link &&
3758 		    (mii->mii_media_status & IFM_ACTIVE) &&
3759 		    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3760 			sc->bge_link++;
3761 			if (bootverbose)
3762 				if_printf(ifp, "link UP\n");
3763 		} else if (sc->bge_link &&
3764 		    (!(mii->mii_media_status & IFM_ACTIVE) ||
3765 		    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3766 			sc->bge_link = 0;
3767 			if (bootverbose)
3768 				if_printf(ifp, "link DOWN\n");
3769 		}
3770 	}
3771 
3772 	/* Clear the attention. */
3773 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3774 	    BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3775 	    BGE_MACSTAT_LINK_CHANGED);
3776 }
3777 
3778 static int
3779 bge_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS)
3780 {
3781 	struct bge_softc *sc = arg1;
3782 
3783 	return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
3784 				   &sc->bge_rx_coal_ticks,
3785 				   BGE_RX_COAL_TICKS_CHG);
3786 }
3787 
3788 static int
3789 bge_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS)
3790 {
3791 	struct bge_softc *sc = arg1;
3792 
3793 	return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
3794 				   &sc->bge_tx_coal_ticks,
3795 				   BGE_TX_COAL_TICKS_CHG);
3796 }
3797 
3798 static int
3799 bge_sysctl_rx_max_coal_bds(SYSCTL_HANDLER_ARGS)
3800 {
3801 	struct bge_softc *sc = arg1;
3802 
3803 	return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
3804 				   &sc->bge_rx_max_coal_bds,
3805 				   BGE_RX_MAX_COAL_BDS_CHG);
3806 }
3807 
3808 static int
3809 bge_sysctl_tx_max_coal_bds(SYSCTL_HANDLER_ARGS)
3810 {
3811 	struct bge_softc *sc = arg1;
3812 
3813 	return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
3814 				   &sc->bge_tx_max_coal_bds,
3815 				   BGE_TX_MAX_COAL_BDS_CHG);
3816 }
3817 
3818 static int
3819 bge_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *coal,
3820 		    uint32_t coal_chg_mask)
3821 {
3822 	struct bge_softc *sc = arg1;
3823 	struct ifnet *ifp = &sc->arpcom.ac_if;
3824 	int error = 0, v;
3825 
3826 	lwkt_serialize_enter(ifp->if_serializer);
3827 
3828 	v = *coal;
3829 	error = sysctl_handle_int(oidp, &v, 0, req);
3830 	if (!error && req->newptr != NULL) {
3831 		if (v < 0) {
3832 			error = EINVAL;
3833 		} else {
3834 			*coal = v;
3835 			sc->bge_coal_chg |= coal_chg_mask;
3836 		}
3837 	}
3838 
3839 	lwkt_serialize_exit(ifp->if_serializer);
3840 	return error;
3841 }
3842 
3843 static void
3844 bge_coal_change(struct bge_softc *sc)
3845 {
3846 	struct ifnet *ifp = &sc->arpcom.ac_if;
3847 	uint32_t val;
3848 
3849 	ASSERT_SERIALIZED(ifp->if_serializer);
3850 
3851 	if (sc->bge_coal_chg & BGE_RX_COAL_TICKS_CHG) {
3852 		CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS,
3853 			    sc->bge_rx_coal_ticks);
3854 		DELAY(10);
3855 		val = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS);
3856 
3857 		if (bootverbose) {
3858 			if_printf(ifp, "rx_coal_ticks -> %u\n",
3859 				  sc->bge_rx_coal_ticks);
3860 		}
3861 	}
3862 
3863 	if (sc->bge_coal_chg & BGE_TX_COAL_TICKS_CHG) {
3864 		CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS,
3865 			    sc->bge_tx_coal_ticks);
3866 		DELAY(10);
3867 		val = CSR_READ_4(sc, BGE_HCC_TX_COAL_TICKS);
3868 
3869 		if (bootverbose) {
3870 			if_printf(ifp, "tx_coal_ticks -> %u\n",
3871 				  sc->bge_tx_coal_ticks);
3872 		}
3873 	}
3874 
3875 	if (sc->bge_coal_chg & BGE_RX_MAX_COAL_BDS_CHG) {
3876 		CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS,
3877 			    sc->bge_rx_max_coal_bds);
3878 		DELAY(10);
3879 		val = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS);
3880 
3881 		if (bootverbose) {
3882 			if_printf(ifp, "rx_max_coal_bds -> %u\n",
3883 				  sc->bge_rx_max_coal_bds);
3884 		}
3885 	}
3886 
3887 	if (sc->bge_coal_chg & BGE_TX_MAX_COAL_BDS_CHG) {
3888 		CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS,
3889 			    sc->bge_tx_max_coal_bds);
3890 		DELAY(10);
3891 		val = CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS);
3892 
3893 		if (bootverbose) {
3894 			if_printf(ifp, "tx_max_coal_bds -> %u\n",
3895 				  sc->bge_tx_max_coal_bds);
3896 		}
3897 	}
3898 
3899 	sc->bge_coal_chg = 0;
3900 }
3901 
3902 static void
3903 bge_enable_intr(struct bge_softc *sc)
3904 {
3905 	struct ifnet *ifp = &sc->arpcom.ac_if;
3906 
3907 	lwkt_serialize_handler_enable(ifp->if_serializer);
3908 
3909 	/*
3910 	 * Enable interrupt.
3911 	 */
3912 	bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3913 
3914 	/*
3915 	 * Unmask the interrupt when we stop polling.
3916 	 */
3917 	BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3918 
3919 	/*
3920 	 * Trigger another interrupt, since above writing
3921 	 * to interrupt mailbox0 may acknowledge pending
3922 	 * interrupt.
3923 	 */
3924 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3925 }
3926 
3927 static void
3928 bge_disable_intr(struct bge_softc *sc)
3929 {
3930 	struct ifnet *ifp = &sc->arpcom.ac_if;
3931 
3932 	/*
3933 	 * Mask the interrupt when we start polling.
3934 	 */
3935 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3936 
3937 	/*
3938 	 * Acknowledge possible asserted interrupt.
3939 	 */
3940 	bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
3941 
3942 	lwkt_serialize_handler_disable(ifp->if_serializer);
3943 }
3944 
3945 static int
3946 bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
3947 {
3948 	uint32_t mac_addr;
3949 	int ret = 1;
3950 
3951 	mac_addr = bge_readmem_ind(sc, 0x0c14);
3952 	if ((mac_addr >> 16) == 0x484b) {
3953 		ether_addr[0] = (uint8_t)(mac_addr >> 8);
3954 		ether_addr[1] = (uint8_t)mac_addr;
3955 		mac_addr = bge_readmem_ind(sc, 0x0c18);
3956 		ether_addr[2] = (uint8_t)(mac_addr >> 24);
3957 		ether_addr[3] = (uint8_t)(mac_addr >> 16);
3958 		ether_addr[4] = (uint8_t)(mac_addr >> 8);
3959 		ether_addr[5] = (uint8_t)mac_addr;
3960 		ret = 0;
3961 	}
3962 	return ret;
3963 }
3964 
3965 static int
3966 bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
3967 {
3968 	int mac_offset = BGE_EE_MAC_OFFSET;
3969 
3970 	if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
3971 		mac_offset = BGE_EE_MAC_OFFSET_5906;
3972 
3973 	return bge_read_nvram(sc, ether_addr, mac_offset + 2, ETHER_ADDR_LEN);
3974 }
3975 
3976 static int
3977 bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
3978 {
3979 	if (sc->bge_flags & BGE_FLAG_NO_EEPROM)
3980 		return 1;
3981 
3982 	return bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
3983 			       ETHER_ADDR_LEN);
3984 }
3985 
3986 static int
3987 bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
3988 {
3989 	static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
3990 		/* NOTE: Order is critical */
3991 		bge_get_eaddr_mem,
3992 		bge_get_eaddr_nvram,
3993 		bge_get_eaddr_eeprom,
3994 		NULL
3995 	};
3996 	const bge_eaddr_fcn_t *func;
3997 
3998 	for (func = bge_eaddr_funcs; *func != NULL; ++func) {
3999 		if ((*func)(sc, eaddr) == 0)
4000 			break;
4001 	}
4002 	return (*func == NULL ? ENXIO : 0);
4003 }
4004