xref: /dragonfly/sys/dev/netif/bge/if_bge.c (revision 521a7b05)
1 /*
2  * Copyright (c) 2001 Wind River Systems
3  * Copyright (c) 1997, 1998, 1999, 2001
4  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  *
33  * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $
34  * $DragonFly: src/sys/dev/netif/bge/if_bge.c,v 1.77 2007/05/12 08:39:56 sephe Exp $
35  *
36  */
37 
38 /*
39  * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
40  *
41  * Written by Bill Paul <wpaul@windriver.com>
42  * Senior Engineer, Wind River Systems
43  */
44 
45 /*
46  * The Broadcom BCM5700 is based on technology originally developed by
47  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
48  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
49  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
50  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
51  * frames, highly configurable RX filtering, and 16 RX and TX queues
52  * (which, along with RX filter rules, can be used for QOS applications).
53  * Other features, such as TCP segmentation, may be available as part
54  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
55  * firmware images can be stored in hardware and need not be compiled
56  * into the driver.
57  *
58  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
59  * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
60  *
61  * The BCM5701 is a single-chip solution incorporating both the BCM5700
62  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
63  * does not support external SSRAM.
64  *
65  * Broadcom also produces a variation of the BCM5700 under the "Altima"
66  * brand name, which is functionally similar but lacks PCI-X support.
67  *
68  * Without external SSRAM, you can only have at most 4 TX rings,
69  * and the use of the mini RX ring is disabled. This seems to imply
70  * that these features are simply not available on the BCM5701. As a
71  * result, this driver does not implement any support for the mini RX
72  * ring.
73  */
74 
75 #include <sys/param.h>
76 #include <sys/bus.h>
77 #include <sys/endian.h>
78 #include <sys/kernel.h>
79 #include <sys/mbuf.h>
80 #include <sys/malloc.h>
81 #include <sys/queue.h>
82 #include <sys/rman.h>
83 #include <sys/serialize.h>
84 #include <sys/socket.h>
85 #include <sys/sockio.h>
86 
87 #include <net/bpf.h>
88 #include <net/ethernet.h>
89 #include <net/if.h>
90 #include <net/if_arp.h>
91 #include <net/if_dl.h>
92 #include <net/if_media.h>
93 #include <net/if_types.h>
94 #include <net/ifq_var.h>
95 #include <net/vlan/if_vlan_var.h>
96 
97 #include <dev/netif/mii_layer/mii.h>
98 #include <dev/netif/mii_layer/miivar.h>
99 #include <dev/netif/mii_layer/brgphyreg.h>
100 
101 #include <bus/pci/pcidevs.h>
102 #include <bus/pci/pcireg.h>
103 #include <bus/pci/pcivar.h>
104 
105 #include <dev/netif/bge/if_bgereg.h>
106 
107 /* "device miibus" required.  See GENERIC if you get errors here. */
108 #include "miibus_if.h"
109 
110 #define BGE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
111 #define BGE_MIN_FRAME		60
112 
113 /*
114  * Various supported device vendors/types and their names. Note: the
115  * spec seems to indicate that the hardware still has Alteon's vendor
116  * ID burned into it, though it will always be overriden by the vendor
117  * ID in the EEPROM. Just to be safe, we cover all possibilities.
118  */
119 #define BGE_DEVDESC_MAX		64	/* Maximum device description length */
120 
121 static struct bge_type bge_devs[] = {
122 	{ PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C996,
123 		"3COM 3C996 Gigabit Ethernet" },
124 
125 	{ PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700,
126 		"Alteon BCM5700 Gigabit Ethernet" },
127 	{ PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5701,
128 		"Alteon BCM5701 Gigabit Ethernet" },
129 
130 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000,
131 		"Altima AC1000 Gigabit Ethernet" },
132 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001,
133 		"Altima AC1002 Gigabit Ethernet" },
134 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100,
135 		"Altima AC9100 Gigabit Ethernet" },
136 
137 	{ PCI_VENDOR_APPLE, PCI_PRODUCT_APPLE_BCM5701,
138 		"Apple BCM5701 Gigabit Ethernet" },
139 
140 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700,
141 		"Broadcom BCM5700 Gigabit Ethernet" },
142 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701,
143 		"Broadcom BCM5701 Gigabit Ethernet" },
144 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702,
145 		"Broadcom BCM5702 Gigabit Ethernet" },
146 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X,
147 		"Broadcom BCM5702X Gigabit Ethernet" },
148 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702_ALT,
149 		"Broadcom BCM5702 Gigabit Ethernet" },
150 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703,
151 		"Broadcom BCM5703 Gigabit Ethernet" },
152 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X,
153 		"Broadcom BCM5703X Gigabit Ethernet" },
154 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703A3,
155 		"Broadcom BCM5703 Gigabit Ethernet" },
156 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C,
157 		"Broadcom BCM5704C Dual Gigabit Ethernet" },
158 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S,
159 		"Broadcom BCM5704S Dual Gigabit Ethernet" },
160 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S_ALT,
161 		"Broadcom BCM5704S Dual Gigabit Ethernet" },
162 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705,
163 		"Broadcom BCM5705 Gigabit Ethernet" },
164 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705F,
165 		"Broadcom BCM5705F Gigabit Ethernet" },
166 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705K,
167 		"Broadcom BCM5705K Gigabit Ethernet" },
168 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M,
169 		"Broadcom BCM5705M Gigabit Ethernet" },
170 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M_ALT,
171 		"Broadcom BCM5705M Gigabit Ethernet" },
172 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714,
173 		"Broadcom BCM5714C Gigabit Ethernet" },
174 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714S,
175 		"Broadcom BCM5714S Gigabit Ethernet" },
176 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715,
177 		"Broadcom BCM5715 Gigabit Ethernet" },
178 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715S,
179 		"Broadcom BCM5715S Gigabit Ethernet" },
180 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720,
181 		"Broadcom BCM5720 Gigabit Ethernet" },
182 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5721,
183 		"Broadcom BCM5721 Gigabit Ethernet" },
184 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5722,
185 		"Broadcom BCM5722 Gigabit Ethernet" },
186 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750,
187 		"Broadcom BCM5750 Gigabit Ethernet" },
188 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750M,
189 		"Broadcom BCM5750M Gigabit Ethernet" },
190 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751,
191 		"Broadcom BCM5751 Gigabit Ethernet" },
192 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751F,
193 		"Broadcom BCM5751F Gigabit Ethernet" },
194 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751M,
195 		"Broadcom BCM5751M Gigabit Ethernet" },
196 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752,
197 		"Broadcom BCM5752 Gigabit Ethernet" },
198 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752M,
199 		"Broadcom BCM5752M Gigabit Ethernet" },
200 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753,
201 		"Broadcom BCM5753 Gigabit Ethernet" },
202 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753F,
203 		"Broadcom BCM5753F Gigabit Ethernet" },
204 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753M,
205 		"Broadcom BCM5753M Gigabit Ethernet" },
206 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754,
207 		"Broadcom BCM5754 Gigabit Ethernet" },
208 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754M,
209 		"Broadcom BCM5754M Gigabit Ethernet" },
210 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755,
211 		"Broadcom BCM5755 Gigabit Ethernet" },
212 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755M,
213 		"Broadcom BCM5755M Gigabit Ethernet" },
214 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5756,
215 		"Broadcom BCM5756 Gigabit Ethernet" },
216 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780,
217 		"Broadcom BCM5780 Gigabit Ethernet" },
218 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780S,
219 		"Broadcom BCM5780S Gigabit Ethernet" },
220 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5781,
221 		"Broadcom BCM5781 Gigabit Ethernet" },
222 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782,
223 		"Broadcom BCM5782 Gigabit Ethernet" },
224 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5786,
225 		"Broadcom BCM5786 Gigabit Ethernet" },
226 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787,
227 		"Broadcom BCM5787 Gigabit Ethernet" },
228 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787F,
229 		"Broadcom BCM5787F Gigabit Ethernet" },
230 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787M,
231 		"Broadcom BCM5787M Gigabit Ethernet" },
232 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5788,
233 		"Broadcom BCM5788 Gigabit Ethernet" },
234 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5789,
235 		"Broadcom BCM5789 Gigabit Ethernet" },
236 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901,
237 		"Broadcom BCM5901 Fast Ethernet" },
238 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2,
239 		"Broadcom BCM5901A2 Fast Ethernet" },
240 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5903M,
241 		"Broadcom BCM5903M Fast Ethernet" },
242 
243 	{ PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1,
244 		"SysKonnect Gigabit Ethernet" },
245 
246 	{ 0, 0, NULL }
247 };
248 
249 #define BGE_IS_JUMBO_CAPABLE(sc)	((sc)->bge_flags & BGE_FLAG_JUMBO)
250 #define BGE_IS_5700_FAMILY(sc)		((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
251 #define BGE_IS_5705_PLUS(sc)		((sc)->bge_flags & BGE_FLAG_5705_PLUS)
252 #define BGE_IS_5714_FAMILY(sc)		((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
253 #define BGE_IS_575X_PLUS(sc)		((sc)->bge_flags & BGE_FLAG_575X_PLUS)
254 
255 static int	bge_probe(device_t);
256 static int	bge_attach(device_t);
257 static int	bge_detach(device_t);
258 static void	bge_txeof(struct bge_softc *);
259 static void	bge_rxeof(struct bge_softc *);
260 
261 static void	bge_tick(void *);
262 static void	bge_stats_update(struct bge_softc *);
263 static void	bge_stats_update_regs(struct bge_softc *);
264 static int	bge_encap(struct bge_softc *, struct mbuf *, uint32_t *);
265 
266 static void	bge_intr(void *);
267 static void	bge_start(struct ifnet *);
268 static int	bge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
269 static void	bge_init(void *);
270 static void	bge_stop(struct bge_softc *);
271 static void	bge_watchdog(struct ifnet *);
272 static void	bge_shutdown(device_t);
273 static int	bge_suspend(device_t);
274 static int	bge_resume(device_t);
275 static int	bge_ifmedia_upd(struct ifnet *);
276 static void	bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
277 
278 static uint8_t	bge_eeprom_getbyte(struct bge_softc *, uint32_t, uint8_t *);
279 static int	bge_read_eeprom(struct bge_softc *, caddr_t, uint32_t, size_t);
280 
281 static void	bge_setmulti(struct bge_softc *);
282 static void	bge_setpromisc(struct bge_softc *);
283 
284 static int	bge_alloc_jumbo_mem(struct bge_softc *);
285 static void	bge_free_jumbo_mem(struct bge_softc *);
286 static struct bge_jslot
287 		*bge_jalloc(struct bge_softc *);
288 static void	bge_jfree(void *);
289 static void	bge_jref(void *);
290 static int	bge_newbuf_std(struct bge_softc *, int, struct mbuf *);
291 static int	bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
292 static int	bge_init_rx_ring_std(struct bge_softc *);
293 static void	bge_free_rx_ring_std(struct bge_softc *);
294 static int	bge_init_rx_ring_jumbo(struct bge_softc *);
295 static void	bge_free_rx_ring_jumbo(struct bge_softc *);
296 static void	bge_free_tx_ring(struct bge_softc *);
297 static int	bge_init_tx_ring(struct bge_softc *);
298 
299 static int	bge_chipinit(struct bge_softc *);
300 static int	bge_blockinit(struct bge_softc *);
301 
302 static uint32_t	bge_readmem_ind(struct bge_softc *, uint32_t);
303 static void	bge_writemem_ind(struct bge_softc *, uint32_t, uint32_t);
304 #ifdef notdef
305 static uint32_t	bge_readreg_ind(struct bge_softc *, uint32_t);
306 #endif
307 static void	bge_writereg_ind(struct bge_softc *, uint32_t, uint32_t);
308 static void	bge_writemem_direct(struct bge_softc *, uint32_t, uint32_t);
309 
310 static int	bge_miibus_readreg(device_t, int, int);
311 static int	bge_miibus_writereg(device_t, int, int, int);
312 static void	bge_miibus_statchg(device_t);
313 static void	bge_bcm5700_link_upd(struct bge_softc *, uint32_t);
314 static void	bge_tbi_link_upd(struct bge_softc *, uint32_t);
315 static void	bge_copper_link_upd(struct bge_softc *, uint32_t);
316 
317 static void	bge_reset(struct bge_softc *);
318 
319 static void	bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
320 static void	bge_dma_map_mbuf(void *, bus_dma_segment_t *, int,
321 				 bus_size_t, int);
322 static int	bge_dma_alloc(struct bge_softc *);
323 static void	bge_dma_free(struct bge_softc *);
324 static int	bge_dma_block_alloc(struct bge_softc *, bus_size_t,
325 				    bus_dma_tag_t *, bus_dmamap_t *,
326 				    void **, bus_addr_t *);
327 static void	bge_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *);
328 
329 /*
330  * Set following tunable to 1 for some IBM blade servers with the DNLK
331  * switch module. Auto negotiation is broken for those configurations.
332  */
333 static int	bge_fake_autoneg = 0;
334 TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg);
335 
336 static device_method_t bge_methods[] = {
337 	/* Device interface */
338 	DEVMETHOD(device_probe,		bge_probe),
339 	DEVMETHOD(device_attach,	bge_attach),
340 	DEVMETHOD(device_detach,	bge_detach),
341 	DEVMETHOD(device_shutdown,	bge_shutdown),
342 	DEVMETHOD(device_suspend,	bge_suspend),
343 	DEVMETHOD(device_resume,	bge_resume),
344 
345 	/* bus interface */
346 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
347 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
348 
349 	/* MII interface */
350 	DEVMETHOD(miibus_readreg,	bge_miibus_readreg),
351 	DEVMETHOD(miibus_writereg,	bge_miibus_writereg),
352 	DEVMETHOD(miibus_statchg,	bge_miibus_statchg),
353 
354 	{ 0, 0 }
355 };
356 
357 static DEFINE_CLASS_0(bge, bge_driver, bge_methods, sizeof(struct bge_softc));
358 static devclass_t bge_devclass;
359 
360 DECLARE_DUMMY_MODULE(if_bge);
361 DRIVER_MODULE(if_bge, pci, bge_driver, bge_devclass, 0, 0);
362 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
363 
364 static uint32_t
365 bge_readmem_ind(struct bge_softc *sc, uint32_t off)
366 {
367 	device_t dev = sc->bge_dev;
368 	uint32_t val;
369 
370 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
371 	val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
372 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
373 	return (val);
374 }
375 
376 static void
377 bge_writemem_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
378 {
379 	device_t dev = sc->bge_dev;
380 
381 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
382 	pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
383 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
384 }
385 
386 #ifdef notdef
387 static uint32_t
388 bge_readreg_ind(struct bge_softc *sc, uin32_t off)
389 {
390 	device_t dev = sc->bge_dev;
391 
392 	pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
393 	return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
394 }
395 #endif
396 
397 static void
398 bge_writereg_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
399 {
400 	device_t dev = sc->bge_dev;
401 
402 	pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
403 	pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
404 }
405 
406 static void
407 bge_writemem_direct(struct bge_softc *sc, uint32_t off, uint32_t val)
408 {
409 	CSR_WRITE_4(sc, off, val);
410 }
411 
412 /*
413  * Read a byte of data stored in the EEPROM at address 'addr.' The
414  * BCM570x supports both the traditional bitbang interface and an
415  * auto access interface for reading the EEPROM. We use the auto
416  * access method.
417  */
418 static uint8_t
419 bge_eeprom_getbyte(struct bge_softc *sc, uint32_t addr, uint8_t *dest)
420 {
421 	int i;
422 	uint32_t byte = 0;
423 
424 	/*
425 	 * Enable use of auto EEPROM access so we can avoid
426 	 * having to use the bitbang method.
427 	 */
428 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
429 
430 	/* Reset the EEPROM, load the clock period. */
431 	CSR_WRITE_4(sc, BGE_EE_ADDR,
432 	    BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
433 	DELAY(20);
434 
435 	/* Issue the read EEPROM command. */
436 	CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
437 
438 	/* Wait for completion */
439 	for(i = 0; i < BGE_TIMEOUT * 10; i++) {
440 		DELAY(10);
441 		if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
442 			break;
443 	}
444 
445 	if (i == BGE_TIMEOUT) {
446 		if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n");
447 		return(1);
448 	}
449 
450 	/* Get result. */
451 	byte = CSR_READ_4(sc, BGE_EE_DATA);
452 
453         *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
454 
455 	return(0);
456 }
457 
458 /*
459  * Read a sequence of bytes from the EEPROM.
460  */
461 static int
462 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, uint32_t off, size_t len)
463 {
464 	size_t i;
465 	int err;
466 	uint8_t byte;
467 
468 	for (byte = 0, err = 0, i = 0; i < len; i++) {
469 		err = bge_eeprom_getbyte(sc, off + i, &byte);
470 		if (err)
471 			break;
472 		*(dest + i) = byte;
473 	}
474 
475 	return(err ? 1 : 0);
476 }
477 
478 static int
479 bge_miibus_readreg(device_t dev, int phy, int reg)
480 {
481 	struct bge_softc *sc;
482 	struct ifnet *ifp;
483 	uint32_t val, autopoll;
484 	int i;
485 
486 	sc = device_get_softc(dev);
487 	ifp = &sc->arpcom.ac_if;
488 
489 	/*
490 	 * Broadcom's own driver always assumes the internal
491 	 * PHY is at GMII address 1. On some chips, the PHY responds
492 	 * to accesses at all addresses, which could cause us to
493 	 * bogusly attach the PHY 32 times at probe type. Always
494 	 * restricting the lookup to address 1 is simpler than
495 	 * trying to figure out which chips revisions should be
496 	 * special-cased.
497 	 */
498 	if (phy != 1)
499 		return(0);
500 
501 	/* Reading with autopolling on may trigger PCI errors */
502 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
503 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
504 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
505 		DELAY(40);
506 	}
507 
508 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
509 	    BGE_MIPHY(phy)|BGE_MIREG(reg));
510 
511 	for (i = 0; i < BGE_TIMEOUT; i++) {
512 		val = CSR_READ_4(sc, BGE_MI_COMM);
513 		if (!(val & BGE_MICOMM_BUSY))
514 			break;
515 	}
516 
517 	if (i == BGE_TIMEOUT) {
518 		if_printf(ifp, "PHY read timed out\n");
519 		val = 0;
520 		goto done;
521 	}
522 
523 	val = CSR_READ_4(sc, BGE_MI_COMM);
524 
525 done:
526 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
527 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
528 		DELAY(40);
529 	}
530 
531 	if (val & BGE_MICOMM_READFAIL)
532 		return(0);
533 
534 	return(val & 0xFFFF);
535 }
536 
537 static int
538 bge_miibus_writereg(device_t dev, int phy, int reg, int val)
539 {
540 	struct bge_softc *sc;
541 	uint32_t autopoll;
542 	int i;
543 
544 	sc = device_get_softc(dev);
545 
546 	/* Reading with autopolling on may trigger PCI errors */
547 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
548 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
549 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
550 		DELAY(40);
551 	}
552 
553 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
554 	    BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
555 
556 	for (i = 0; i < BGE_TIMEOUT; i++) {
557 		if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
558 			break;
559 	}
560 
561 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
562 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
563 		DELAY(40);
564 	}
565 
566 	if (i == BGE_TIMEOUT) {
567 		if_printf(&sc->arpcom.ac_if, "PHY read timed out\n");
568 		return(0);
569 	}
570 
571 	return(0);
572 }
573 
574 static void
575 bge_miibus_statchg(device_t dev)
576 {
577 	struct bge_softc *sc;
578 	struct mii_data *mii;
579 
580 	sc = device_get_softc(dev);
581 	mii = device_get_softc(sc->bge_miibus);
582 
583 	BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
584 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
585 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
586 	} else {
587 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
588 	}
589 
590 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
591 		BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
592 	} else {
593 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
594 	}
595 }
596 
597 /*
598  * Memory management for jumbo frames.
599  */
600 static int
601 bge_alloc_jumbo_mem(struct bge_softc *sc)
602 {
603 	struct ifnet *ifp = &sc->arpcom.ac_if;
604 	struct bge_jslot *entry;
605 	uint8_t *ptr;
606 	bus_addr_t paddr;
607 	int i, error;
608 
609 	/*
610 	 * Create tag for jumbo mbufs.
611 	 * This is really a bit of a kludge. We allocate a special
612 	 * jumbo buffer pool which (thanks to the way our DMA
613 	 * memory allocation works) will consist of contiguous
614 	 * pages. This means that even though a jumbo buffer might
615 	 * be larger than a page size, we don't really need to
616 	 * map it into more than one DMA segment. However, the
617 	 * default mbuf tag will result in multi-segment mappings,
618 	 * so we have to create a special jumbo mbuf tag that
619 	 * lets us get away with mapping the jumbo buffers as
620 	 * a single segment. I think eventually the driver should
621 	 * be changed so that it uses ordinary mbufs and cluster
622 	 * buffers, i.e. jumbo frames can span multiple DMA
623 	 * descriptors. But that's a project for another day.
624 	 */
625 
626 	/*
627 	 * Create DMA stuffs for jumbo RX ring.
628 	 */
629 	error = bge_dma_block_alloc(sc, BGE_JUMBO_RX_RING_SZ,
630 				    &sc->bge_cdata.bge_rx_jumbo_ring_tag,
631 				    &sc->bge_cdata.bge_rx_jumbo_ring_map,
632 				    (void **)&sc->bge_ldata.bge_rx_jumbo_ring,
633 				    &sc->bge_ldata.bge_rx_jumbo_ring_paddr);
634 	if (error) {
635 		if_printf(ifp, "could not create jumbo RX ring\n");
636 		return error;
637 	}
638 
639 	/*
640 	 * Create DMA stuffs for jumbo buffer block.
641 	 */
642 	error = bge_dma_block_alloc(sc, BGE_JMEM,
643 				    &sc->bge_cdata.bge_jumbo_tag,
644 				    &sc->bge_cdata.bge_jumbo_map,
645 				    (void **)&sc->bge_ldata.bge_jumbo_buf,
646 				    &paddr);
647 	if (error) {
648 		if_printf(ifp, "could not create jumbo buffer\n");
649 		return error;
650 	}
651 
652 	SLIST_INIT(&sc->bge_jfree_listhead);
653 
654 	/*
655 	 * Now divide it up into 9K pieces and save the addresses
656 	 * in an array. Note that we play an evil trick here by using
657 	 * the first few bytes in the buffer to hold the the address
658 	 * of the softc structure for this interface. This is because
659 	 * bge_jfree() needs it, but it is called by the mbuf management
660 	 * code which will not pass it to us explicitly.
661 	 */
662 	for (i = 0, ptr = sc->bge_ldata.bge_jumbo_buf; i < BGE_JSLOTS; i++) {
663 		entry = &sc->bge_cdata.bge_jslots[i];
664 		entry->bge_sc = sc;
665 		entry->bge_buf = ptr;
666 		entry->bge_paddr = paddr;
667 		entry->bge_inuse = 0;
668 		entry->bge_slot = i;
669 		SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jslot_link);
670 
671 		ptr += BGE_JLEN;
672 		paddr += BGE_JLEN;
673 	}
674 	return 0;
675 }
676 
677 static void
678 bge_free_jumbo_mem(struct bge_softc *sc)
679 {
680 	/* Destroy jumbo RX ring. */
681 	bge_dma_block_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
682 			   sc->bge_cdata.bge_rx_jumbo_ring_map,
683 			   sc->bge_ldata.bge_rx_jumbo_ring);
684 
685 	/* Destroy jumbo buffer block. */
686 	bge_dma_block_free(sc->bge_cdata.bge_jumbo_tag,
687 			   sc->bge_cdata.bge_jumbo_map,
688 			   sc->bge_ldata.bge_jumbo_buf);
689 }
690 
691 /*
692  * Allocate a jumbo buffer.
693  */
694 static struct bge_jslot *
695 bge_jalloc(struct bge_softc *sc)
696 {
697 	struct bge_jslot *entry;
698 
699 	lwkt_serialize_enter(&sc->bge_jslot_serializer);
700 	entry = SLIST_FIRST(&sc->bge_jfree_listhead);
701 	if (entry) {
702 		SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jslot_link);
703 		entry->bge_inuse = 1;
704 	} else {
705 		if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n");
706 	}
707 	lwkt_serialize_exit(&sc->bge_jslot_serializer);
708 	return(entry);
709 }
710 
711 /*
712  * Adjust usage count on a jumbo buffer.
713  */
714 static void
715 bge_jref(void *arg)
716 {
717 	struct bge_jslot *entry = (struct bge_jslot *)arg;
718 	struct bge_softc *sc = entry->bge_sc;
719 
720 	if (sc == NULL)
721 		panic("bge_jref: can't find softc pointer!");
722 
723 	if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) {
724 		panic("bge_jref: asked to reference buffer "
725 		    "that we don't manage!");
726 	} else if (entry->bge_inuse == 0) {
727 		panic("bge_jref: buffer already free!");
728 	} else {
729 		atomic_add_int(&entry->bge_inuse, 1);
730 	}
731 }
732 
733 /*
734  * Release a jumbo buffer.
735  */
736 static void
737 bge_jfree(void *arg)
738 {
739 	struct bge_jslot *entry = (struct bge_jslot *)arg;
740 	struct bge_softc *sc = entry->bge_sc;
741 
742 	if (sc == NULL)
743 		panic("bge_jfree: can't find softc pointer!");
744 
745 	if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) {
746 		panic("bge_jfree: asked to free buffer that we don't manage!");
747 	} else if (entry->bge_inuse == 0) {
748 		panic("bge_jfree: buffer already free!");
749 	} else {
750 		/*
751 		 * Possible MP race to 0, use the serializer.  The atomic insn
752 		 * is still needed for races against bge_jref().
753 		 */
754 		lwkt_serialize_enter(&sc->bge_jslot_serializer);
755 		atomic_subtract_int(&entry->bge_inuse, 1);
756 		if (entry->bge_inuse == 0) {
757 			SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
758 					  entry, jslot_link);
759 		}
760 		lwkt_serialize_exit(&sc->bge_jslot_serializer);
761 	}
762 }
763 
764 
765 /*
766  * Intialize a standard receive ring descriptor.
767  */
768 static int
769 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m)
770 {
771 	struct mbuf *m_new = NULL;
772 	struct bge_dmamap_arg ctx;
773 	bus_dma_segment_t seg;
774 	struct bge_rx_bd *r;
775 	int error;
776 
777 	if (m == NULL) {
778 		m_new = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR);
779 		if (m_new == NULL)
780 			return ENOBUFS;
781 	} else {
782 		m_new = m;
783 		m_new->m_data = m_new->m_ext.ext_buf;
784 	}
785 	m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
786 
787 	if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
788 		m_adj(m_new, ETHER_ALIGN);
789 
790 	ctx.bge_maxsegs = 1;
791 	ctx.bge_segs = &seg;
792 	error = bus_dmamap_load_mbuf(sc->bge_cdata.bge_mtag,
793 				     sc->bge_cdata.bge_rx_std_dmamap[i],
794 				     m_new, bge_dma_map_mbuf, &ctx,
795 				     BUS_DMA_NOWAIT);
796 	if (error || ctx.bge_maxsegs == 0) {
797 		if (m == NULL)
798 			m_freem(m_new);
799 		return ENOMEM;
800 	}
801 
802 	sc->bge_cdata.bge_rx_std_chain[i] = m_new;
803 
804 	r = &sc->bge_ldata.bge_rx_std_ring[i];
805 	r->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_segs[0].ds_addr);
806 	r->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_segs[0].ds_addr);
807 	r->bge_flags = BGE_RXBDFLAG_END;
808 	r->bge_len = m_new->m_len;
809 	r->bge_idx = i;
810 
811 	bus_dmamap_sync(sc->bge_cdata.bge_mtag,
812 			sc->bge_cdata.bge_rx_std_dmamap[i],
813 			BUS_DMASYNC_PREREAD);
814 	return 0;
815 }
816 
817 /*
818  * Initialize a jumbo receive ring descriptor. This allocates
819  * a jumbo buffer from the pool managed internally by the driver.
820  */
821 static int
822 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
823 {
824 	struct mbuf *m_new = NULL;
825 	struct bge_jslot *buf;
826 	struct bge_rx_bd *r;
827 	bus_addr_t paddr;
828 
829 	if (m == NULL) {
830 		/* Allocate the mbuf. */
831 		MGETHDR(m_new, MB_DONTWAIT, MT_DATA);
832 		if (m_new == NULL)
833 			return(ENOBUFS);
834 
835 		/* Allocate the jumbo buffer */
836 		buf = bge_jalloc(sc);
837 		if (buf == NULL) {
838 			m_freem(m_new);
839 			if_printf(&sc->arpcom.ac_if, "jumbo allocation failed "
840 			    "-- packet dropped!\n");
841 			return ENOBUFS;
842 		}
843 
844 		/* Attach the buffer to the mbuf. */
845 		m_new->m_ext.ext_arg = buf;
846 		m_new->m_ext.ext_buf = buf->bge_buf;
847 		m_new->m_ext.ext_free = bge_jfree;
848 		m_new->m_ext.ext_ref = bge_jref;
849 		m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
850 
851 		m_new->m_flags |= M_EXT;
852 	} else {
853 		KKASSERT(m->m_flags & M_EXT);
854 		m_new = m;
855 		buf = m_new->m_ext.ext_arg;
856 	}
857 	m_new->m_data = m_new->m_ext.ext_buf;
858 	m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size;
859 
860 	paddr = buf->bge_paddr;
861 	if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) {
862 		m_adj(m_new, ETHER_ALIGN);
863 		paddr += ETHER_ALIGN;
864 	}
865 
866 	/* Set up the descriptor. */
867 	sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
868 
869 	r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
870 	r->bge_addr.bge_addr_lo = BGE_ADDR_LO(paddr);
871 	r->bge_addr.bge_addr_hi = BGE_ADDR_HI(paddr);
872 	r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
873 	r->bge_len = m_new->m_len;
874 	r->bge_idx = i;
875 
876 	return 0;
877 }
878 
879 /*
880  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
881  * that's 1MB or memory, which is a lot. For now, we fill only the first
882  * 256 ring entries and hope that our CPU is fast enough to keep up with
883  * the NIC.
884  */
885 static int
886 bge_init_rx_ring_std(struct bge_softc *sc)
887 {
888 	int i;
889 
890 	for (i = 0; i < BGE_SSLOTS; i++) {
891 		if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
892 			return(ENOBUFS);
893 	};
894 
895 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
896 			sc->bge_cdata.bge_rx_std_ring_map,
897 			BUS_DMASYNC_PREWRITE);
898 
899 	sc->bge_std = i - 1;
900 	CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
901 
902 	return(0);
903 }
904 
905 static void
906 bge_free_rx_ring_std(struct bge_softc *sc)
907 {
908 	int i;
909 
910 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
911 		if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
912 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
913 					  sc->bge_cdata.bge_rx_std_dmamap[i]);
914 			m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
915 			sc->bge_cdata.bge_rx_std_chain[i] = NULL;
916 		}
917 		bzero(&sc->bge_ldata.bge_rx_std_ring[i],
918 		    sizeof(struct bge_rx_bd));
919 	}
920 }
921 
922 static int
923 bge_init_rx_ring_jumbo(struct bge_softc *sc)
924 {
925 	int i;
926 	struct bge_rcb *rcb;
927 
928 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
929 		if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
930 			return(ENOBUFS);
931 	};
932 
933 	bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
934 			sc->bge_cdata.bge_rx_jumbo_ring_map,
935 			BUS_DMASYNC_PREWRITE);
936 
937 	sc->bge_jumbo = i - 1;
938 
939 	rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
940 	rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
941 	CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
942 
943 	CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
944 
945 	return(0);
946 }
947 
948 static void
949 bge_free_rx_ring_jumbo(struct bge_softc *sc)
950 {
951 	int i;
952 
953 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
954 		if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
955 			m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
956 			sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
957 		}
958 		bzero(&sc->bge_ldata.bge_rx_jumbo_ring[i],
959 		    sizeof(struct bge_rx_bd));
960 	}
961 }
962 
963 static void
964 bge_free_tx_ring(struct bge_softc *sc)
965 {
966 	int i;
967 
968 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
969 		if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
970 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
971 					  sc->bge_cdata.bge_tx_dmamap[i]);
972 			m_freem(sc->bge_cdata.bge_tx_chain[i]);
973 			sc->bge_cdata.bge_tx_chain[i] = NULL;
974 		}
975 		bzero(&sc->bge_ldata.bge_tx_ring[i],
976 		    sizeof(struct bge_tx_bd));
977 	}
978 }
979 
980 static int
981 bge_init_tx_ring(struct bge_softc *sc)
982 {
983 	sc->bge_txcnt = 0;
984 	sc->bge_tx_saved_considx = 0;
985 	sc->bge_tx_prodidx = 0;
986 
987 	/* Initialize transmit producer index for host-memory send ring. */
988 	CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
989 
990 	/* 5700 b2 errata */
991 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
992 		CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
993 
994 	CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
995 	/* 5700 b2 errata */
996 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
997 		CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
998 
999 	return(0);
1000 }
1001 
1002 static void
1003 bge_setmulti(struct bge_softc *sc)
1004 {
1005 	struct ifnet *ifp;
1006 	struct ifmultiaddr *ifma;
1007 	uint32_t hashes[4] = { 0, 0, 0, 0 };
1008 	int h, i;
1009 
1010 	ifp = &sc->arpcom.ac_if;
1011 
1012 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1013 		for (i = 0; i < 4; i++)
1014 			CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1015 		return;
1016 	}
1017 
1018 	/* First, zot all the existing filters. */
1019 	for (i = 0; i < 4; i++)
1020 		CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1021 
1022 	/* Now program new ones. */
1023 	LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1024 		if (ifma->ifma_addr->sa_family != AF_LINK)
1025 			continue;
1026 		h = ether_crc32_le(
1027 		    LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1028 		    ETHER_ADDR_LEN) & 0x7f;
1029 		hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1030 	}
1031 
1032 	for (i = 0; i < 4; i++)
1033 		CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1034 }
1035 
1036 /*
1037  * Do endian, PCI and DMA initialization. Also check the on-board ROM
1038  * self-test results.
1039  */
1040 static int
1041 bge_chipinit(struct bge_softc *sc)
1042 {
1043 	int i;
1044 	uint32_t dma_rw_ctl;
1045 
1046 	/* Set endian type before we access any non-PCI registers. */
1047 	pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
1048 
1049 	/*
1050 	 * Check the 'ROM failed' bit on the RX CPU to see if
1051 	 * self-tests passed.
1052 	 */
1053 	if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
1054 		if_printf(&sc->arpcom.ac_if,
1055 			  "RX CPU self-diagnostics failed!\n");
1056 		return(ENODEV);
1057 	}
1058 
1059 	/* Clear the MAC control register */
1060 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1061 
1062 	/*
1063 	 * Clear the MAC statistics block in the NIC's
1064 	 * internal memory.
1065 	 */
1066 	for (i = BGE_STATS_BLOCK;
1067 	    i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1068 		BGE_MEMWIN_WRITE(sc, i, 0);
1069 
1070 	for (i = BGE_STATUS_BLOCK;
1071 	    i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1072 		BGE_MEMWIN_WRITE(sc, i, 0);
1073 
1074 	/* Set up the PCI DMA control register. */
1075 	if (sc->bge_flags & BGE_FLAG_PCIE) {
1076 		/* PCI Express */
1077 		dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1078 		    (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1079 		    (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1080 	} else if (sc->bge_flags & BGE_FLAG_PCIX) {
1081 		/* PCI-X bus */
1082 		if (BGE_IS_5714_FAMILY(sc)) {
1083 			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD;
1084 			dma_rw_ctl &= ~BGE_PCIDMARWCTL_ONEDMA_ATONCE; /* XXX */
1085 			/* XXX magic values, Broadcom-supplied Linux driver */
1086 			if (sc->bge_asicrev == BGE_ASICREV_BCM5780) {
1087 				dma_rw_ctl |= (1 << 20) | (1 << 18) |
1088 				    BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1089 			} else {
1090 				dma_rw_ctl |= (1 << 20) | (1 << 18) | (1 << 15);
1091 			}
1092 		} else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1093 			/*
1094 			 * The 5704 uses a different encoding of read/write
1095 			 * watermarks.
1096 			 */
1097 			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1098 			    (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1099 			    (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1100 		} else {
1101 			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1102 			    (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1103 			    (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1104 			    (0x0F);
1105 		}
1106 
1107 		/*
1108 		 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1109 		 * for hardware bugs.
1110 		 */
1111 		if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1112 		    sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1113 			uint32_t tmp;
1114 
1115 			tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1116 			if (tmp == 0x6 || tmp == 0x7)
1117 				dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1118 		}
1119 	} else {
1120 		/* Conventional PCI bus */
1121 		dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1122 		    (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1123 		    (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1124 		    (0x0F);
1125 	}
1126 
1127 	if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1128 	    sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1129 	    sc->bge_asicrev == BGE_ASICREV_BCM5705)
1130 		dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1131 	pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1132 
1133 	/*
1134 	 * Set up general mode register.
1135 	 */
1136 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1137 	    BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1138 	    BGE_MODECTL_TX_NO_PHDR_CSUM);
1139 
1140 	/*
1141 	 * Disable memory write invalidate.  Apparently it is not supported
1142 	 * properly by these devices.
1143 	 */
1144 	PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1145 
1146 	/* Set the timer prescaler (always 66Mhz) */
1147 	CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1148 
1149 	return(0);
1150 }
1151 
1152 static int
1153 bge_blockinit(struct bge_softc *sc)
1154 {
1155 	struct bge_rcb *rcb;
1156 	bus_size_t vrcb;
1157 	bge_hostaddr taddr;
1158 	uint32_t val;
1159 	int i;
1160 
1161 	/*
1162 	 * Initialize the memory window pointer register so that
1163 	 * we can access the first 32K of internal NIC RAM. This will
1164 	 * allow us to set up the TX send ring RCBs and the RX return
1165 	 * ring RCBs, plus other things which live in NIC memory.
1166 	 */
1167 	CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1168 
1169 	/* Note: the BCM5704 has a smaller mbuf space than other chips. */
1170 
1171 	if (!BGE_IS_5705_PLUS(sc)) {
1172 		/* Configure mbuf memory pool */
1173 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1174 		if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1175 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1176 		else
1177 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1178 
1179 		/* Configure DMA resource pool */
1180 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1181 		    BGE_DMA_DESCRIPTORS);
1182 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1183 	}
1184 
1185 	/* Configure mbuf pool watermarks */
1186 	if (BGE_IS_5705_PLUS(sc)) {
1187 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1188 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1189 	} else {
1190 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1191 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1192 	}
1193 	CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1194 
1195 	/* Configure DMA resource watermarks */
1196 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1197 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1198 
1199 	/* Enable buffer manager */
1200 	if (!BGE_IS_5705_PLUS(sc)) {
1201 		CSR_WRITE_4(sc, BGE_BMAN_MODE,
1202 		    BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1203 
1204 		/* Poll for buffer manager start indication */
1205 		for (i = 0; i < BGE_TIMEOUT; i++) {
1206 			if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1207 				break;
1208 			DELAY(10);
1209 		}
1210 
1211 		if (i == BGE_TIMEOUT) {
1212 			if_printf(&sc->arpcom.ac_if,
1213 				  "buffer manager failed to start\n");
1214 			return(ENXIO);
1215 		}
1216 	}
1217 
1218 	/* Enable flow-through queues */
1219 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1220 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1221 
1222 	/* Wait until queue initialization is complete */
1223 	for (i = 0; i < BGE_TIMEOUT; i++) {
1224 		if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1225 			break;
1226 		DELAY(10);
1227 	}
1228 
1229 	if (i == BGE_TIMEOUT) {
1230 		if_printf(&sc->arpcom.ac_if,
1231 			  "flow-through queue init failed\n");
1232 		return(ENXIO);
1233 	}
1234 
1235 	/* Initialize the standard RX ring control block */
1236 	rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1237 	rcb->bge_hostaddr.bge_addr_lo =
1238 	    BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1239 	rcb->bge_hostaddr.bge_addr_hi =
1240 	    BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1241 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1242 	    sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1243 	if (BGE_IS_5705_PLUS(sc))
1244 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1245 	else
1246 		rcb->bge_maxlen_flags =
1247 		    BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1248 	rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1249 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1250 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1251 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1252 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1253 
1254 	/*
1255 	 * Initialize the jumbo RX ring control block
1256 	 * We set the 'ring disabled' bit in the flags
1257 	 * field until we're actually ready to start
1258 	 * using this ring (i.e. once we set the MTU
1259 	 * high enough to require it).
1260 	 */
1261 	if (BGE_IS_JUMBO_CAPABLE(sc)) {
1262 		rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1263 
1264 		rcb->bge_hostaddr.bge_addr_lo =
1265 		    BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1266 		rcb->bge_hostaddr.bge_addr_hi =
1267 		    BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1268 		bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1269 		    sc->bge_cdata.bge_rx_jumbo_ring_map,
1270 		    BUS_DMASYNC_PREREAD);
1271 		rcb->bge_maxlen_flags =
1272 		    BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
1273 		    BGE_RCB_FLAG_RING_DISABLED);
1274 		rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1275 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1276 		    rcb->bge_hostaddr.bge_addr_hi);
1277 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1278 		    rcb->bge_hostaddr.bge_addr_lo);
1279 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1280 		    rcb->bge_maxlen_flags);
1281 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1282 
1283 		/* Set up dummy disabled mini ring RCB */
1284 		rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1285 		rcb->bge_maxlen_flags =
1286 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1287 		CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1288 		    rcb->bge_maxlen_flags);
1289 	}
1290 
1291 	/*
1292 	 * Set the BD ring replentish thresholds. The recommended
1293 	 * values are 1/8th the number of descriptors allocated to
1294 	 * each ring.
1295 	 */
1296 	if (BGE_IS_5705_PLUS(sc))
1297 		val = 8;
1298 	else
1299 		val = BGE_STD_RX_RING_CNT / 8;
1300 	CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1301 	CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1302 
1303 	/*
1304 	 * Disable all unused send rings by setting the 'ring disabled'
1305 	 * bit in the flags field of all the TX send ring control blocks.
1306 	 * These are located in NIC memory.
1307 	 */
1308 	vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1309 	for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1310 		RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1311 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1312 		RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1313 		vrcb += sizeof(struct bge_rcb);
1314 	}
1315 
1316 	/* Configure TX RCB 0 (we use only the first ring) */
1317 	vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1318 	BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1319 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1320 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1321 	RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1322 	    BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1323 	if (!BGE_IS_5705_PLUS(sc)) {
1324 		RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1325 		    BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1326 	}
1327 
1328 	/* Disable all unused RX return rings */
1329 	vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1330 	for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1331 		RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1332 		RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1333 		RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1334 		    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1335 		    BGE_RCB_FLAG_RING_DISABLED));
1336 		RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1337 		CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1338 		    (i * (sizeof(uint64_t))), 0);
1339 		vrcb += sizeof(struct bge_rcb);
1340 	}
1341 
1342 	/* Initialize RX ring indexes */
1343 	CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1344 	CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1345 	CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1346 
1347 	/*
1348 	 * Set up RX return ring 0
1349 	 * Note that the NIC address for RX return rings is 0x00000000.
1350 	 * The return rings live entirely within the host, so the
1351 	 * nicaddr field in the RCB isn't used.
1352 	 */
1353 	vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1354 	BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1355 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1356 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1357 	RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000);
1358 	RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1359 	    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1360 
1361 	/* Set random backoff seed for TX */
1362 	CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1363 	    sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1364 	    sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1365 	    sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1366 	    BGE_TX_BACKOFF_SEED_MASK);
1367 
1368 	/* Set inter-packet gap */
1369 	CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1370 
1371 	/*
1372 	 * Specify which ring to use for packets that don't match
1373 	 * any RX rules.
1374 	 */
1375 	CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1376 
1377 	/*
1378 	 * Configure number of RX lists. One interrupt distribution
1379 	 * list, sixteen active lists, one bad frames class.
1380 	 */
1381 	CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1382 
1383 	/* Inialize RX list placement stats mask. */
1384 	CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1385 	CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1386 
1387 	/* Disable host coalescing until we get it set up */
1388 	CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1389 
1390 	/* Poll to make sure it's shut down. */
1391 	for (i = 0; i < BGE_TIMEOUT; i++) {
1392 		if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1393 			break;
1394 		DELAY(10);
1395 	}
1396 
1397 	if (i == BGE_TIMEOUT) {
1398 		if_printf(&sc->arpcom.ac_if,
1399 			  "host coalescing engine failed to idle\n");
1400 		return(ENXIO);
1401 	}
1402 
1403 	/* Set up host coalescing defaults */
1404 	CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1405 	CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1406 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1407 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1408 	if (!BGE_IS_5705_PLUS(sc)) {
1409 		CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1410 		CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1411 	}
1412 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1413 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1414 
1415 	/* Set up address of statistics block */
1416 	if (!BGE_IS_5705_PLUS(sc)) {
1417 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1418 		    BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1419 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1420 		    BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1421 
1422 		CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1423 		CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1424 		CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1425 	}
1426 
1427 	/* Set up address of status block */
1428 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1429 	    BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1430 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1431 	    BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1432 	sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1433 	sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
1434 
1435 	/* Turn on host coalescing state machine */
1436 	CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1437 
1438 	/* Turn on RX BD completion state machine and enable attentions */
1439 	CSR_WRITE_4(sc, BGE_RBDC_MODE,
1440 	    BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1441 
1442 	/* Turn on RX list placement state machine */
1443 	CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1444 
1445 	/* Turn on RX list selector state machine. */
1446 	if (!BGE_IS_5705_PLUS(sc))
1447 		CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1448 
1449 	/* Turn on DMA, clear stats */
1450 	CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1451 	    BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1452 	    BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1453 	    BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1454 	    ((sc->bge_flags & BGE_FLAG_TBI) ?
1455 	     BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1456 
1457 	/* Set misc. local control, enable interrupts on attentions */
1458 	CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1459 
1460 #ifdef notdef
1461 	/* Assert GPIO pins for PHY reset */
1462 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1463 	    BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1464 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1465 	    BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1466 #endif
1467 
1468 	/* Turn on DMA completion state machine */
1469 	if (!BGE_IS_5705_PLUS(sc))
1470 		CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1471 
1472 	/* Turn on write DMA state machine */
1473 	val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS;
1474 	if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
1475 	    sc->bge_asicrev == BGE_ASICREV_BCM5787)
1476 		val |= (1 << 29);	/* Enable host coalescing bug fix. */
1477 	CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1478 
1479 	/* Turn on read DMA state machine */
1480 	CSR_WRITE_4(sc, BGE_RDMA_MODE,
1481 	    BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1482 
1483 	/* Turn on RX data completion state machine */
1484 	CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1485 
1486 	/* Turn on RX BD initiator state machine */
1487 	CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1488 
1489 	/* Turn on RX data and RX BD initiator state machine */
1490 	CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1491 
1492 	/* Turn on Mbuf cluster free state machine */
1493 	if (!BGE_IS_5705_PLUS(sc))
1494 		CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1495 
1496 	/* Turn on send BD completion state machine */
1497 	CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1498 
1499 	/* Turn on send data completion state machine */
1500 	CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1501 
1502 	/* Turn on send data initiator state machine */
1503 	CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1504 
1505 	/* Turn on send BD initiator state machine */
1506 	CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1507 
1508 	/* Turn on send BD selector state machine */
1509 	CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1510 
1511 	CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1512 	CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1513 	    BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1514 
1515 	/* ack/clear link change events */
1516 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1517 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1518 	    BGE_MACSTAT_LINK_CHANGED);
1519 	CSR_WRITE_4(sc, BGE_MI_STS, 0);
1520 
1521 	/* Enable PHY auto polling (for MII/GMII only) */
1522 	if (sc->bge_flags & BGE_FLAG_TBI) {
1523 		CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1524  	} else {
1525 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1526 		if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1527 		    sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
1528 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1529 			    BGE_EVTENB_MI_INTERRUPT);
1530 		}
1531 	}
1532 
1533 	/*
1534 	 * Clear any pending link state attention.
1535 	 * Otherwise some link state change events may be lost until attention
1536 	 * is cleared by bge_intr() -> bge_softc.bge_link_upd() sequence.
1537 	 * It's not necessary on newer BCM chips - perhaps enabling link
1538 	 * state change attentions implies clearing pending attention.
1539 	 */
1540 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1541 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1542 	    BGE_MACSTAT_LINK_CHANGED);
1543 
1544 	/* Enable link state change attentions. */
1545 	BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1546 
1547 	return(0);
1548 }
1549 
1550 /*
1551  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1552  * against our list and return its name if we find a match. Note
1553  * that since the Broadcom controller contains VPD support, we
1554  * can get the device name string from the controller itself instead
1555  * of the compiled-in string. This is a little slow, but it guarantees
1556  * we'll always announce the right product name.
1557  */
1558 static int
1559 bge_probe(device_t dev)
1560 {
1561 	struct bge_softc *sc;
1562 	struct bge_type *t;
1563 	char *descbuf;
1564 	uint16_t product, vendor;
1565 
1566 	product = pci_get_device(dev);
1567 	vendor = pci_get_vendor(dev);
1568 
1569 	for (t = bge_devs; t->bge_name != NULL; t++) {
1570 		if (vendor == t->bge_vid && product == t->bge_did)
1571 			break;
1572 	}
1573 
1574 	if (t->bge_name == NULL)
1575 		return(ENXIO);
1576 
1577 	sc = device_get_softc(dev);
1578 	descbuf = kmalloc(BGE_DEVDESC_MAX, M_TEMP, M_WAITOK);
1579 	ksnprintf(descbuf, BGE_DEVDESC_MAX, "%s, ASIC rev. %#04x", t->bge_name,
1580 	    pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16);
1581 	device_set_desc_copy(dev, descbuf);
1582 	if (pci_get_subvendor(dev) == PCI_VENDOR_DELL)
1583 		sc->bge_flags |= BGE_FLAG_NO_3LED;
1584 	kfree(descbuf, M_TEMP);
1585 	return(0);
1586 }
1587 
1588 static int
1589 bge_attach(device_t dev)
1590 {
1591 	struct ifnet *ifp;
1592 	struct bge_softc *sc;
1593 	uint32_t hwcfg = 0;
1594 	uint32_t mac_addr = 0;
1595 	int error = 0, rid;
1596 	uint8_t ether_addr[ETHER_ADDR_LEN];
1597 
1598 	sc = device_get_softc(dev);
1599 	sc->bge_dev = dev;
1600 	callout_init(&sc->bge_stat_timer);
1601 	lwkt_serialize_init(&sc->bge_jslot_serializer);
1602 
1603 	/*
1604 	 * Map control/status registers.
1605 	 */
1606 	pci_enable_busmaster(dev);
1607 
1608 	rid = BGE_PCI_BAR0;
1609 	sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1610 	    RF_ACTIVE);
1611 
1612 	if (sc->bge_res == NULL) {
1613 		device_printf(dev, "couldn't map memory\n");
1614 		return ENXIO;
1615 	}
1616 
1617 	sc->bge_btag = rman_get_bustag(sc->bge_res);
1618 	sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
1619 
1620 	/* Save ASIC rev. */
1621 	sc->bge_chipid =
1622 	    pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
1623 	    BGE_PCIMISCCTL_ASICREV;
1624 	sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
1625 	sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
1626 
1627 	/* Save chipset family. */
1628 	switch (sc->bge_asicrev) {
1629 	case BGE_ASICREV_BCM5700:
1630 	case BGE_ASICREV_BCM5701:
1631 	case BGE_ASICREV_BCM5703:
1632 	case BGE_ASICREV_BCM5704:
1633 		sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
1634 		break;
1635 
1636 	case BGE_ASICREV_BCM5714_A0:
1637 	case BGE_ASICREV_BCM5780:
1638 	case BGE_ASICREV_BCM5714:
1639 		sc->bge_flags |= BGE_FLAG_5714_FAMILY;
1640 		/* Fall through */
1641 
1642 	case BGE_ASICREV_BCM5750:
1643 	case BGE_ASICREV_BCM5752:
1644 	case BGE_ASICREV_BCM5755:
1645 	case BGE_ASICREV_BCM5787:
1646 		sc->bge_flags |= BGE_FLAG_575X_PLUS;
1647 		/* Fall through */
1648 
1649 	case BGE_ASICREV_BCM5705:
1650 		sc->bge_flags |= BGE_FLAG_5705_PLUS;
1651 		break;
1652 	}
1653 
1654 	/*
1655 	 * Set various quirk flags.
1656 	 */
1657 
1658 	sc->bge_flags |= BGE_FLAG_ETH_WIRESPEED;
1659 	if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1660 	    (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
1661 	     (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
1662 	      sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) ||
1663 	    sc->bge_asicrev == BGE_ASICREV_BCM5906)
1664 		sc->bge_flags &= ~BGE_FLAG_ETH_WIRESPEED;
1665 
1666 	if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
1667 	    sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
1668 		sc->bge_flags |= BGE_FLAG_CRC_BUG;
1669 
1670 	if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
1671 	    sc->bge_chiprev == BGE_CHIPREV_5704_AX)
1672 		sc->bge_flags |= BGE_FLAG_ADC_BUG;
1673 
1674 	if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
1675 		sc->bge_flags |= BGE_FLAG_5704_A0_BUG;
1676 
1677 	if (BGE_IS_5705_PLUS(sc)) {
1678 		if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
1679 		    sc->bge_asicrev == BGE_ASICREV_BCM5787) {
1680 			uint32_t product = pci_get_device(dev);
1681 
1682 			if (product != PCI_PRODUCT_BROADCOM_BCM5722 &&
1683 			    product != PCI_PRODUCT_BROADCOM_BCM5756)
1684 				sc->bge_flags |= BGE_FLAG_JITTER_BUG;
1685 			if (product == PCI_PRODUCT_BROADCOM_BCM5755M)
1686 				sc->bge_flags |= BGE_FLAG_ADJUST_TRIM;
1687 		} else if (sc->bge_asicrev != BGE_ASICREV_BCM5906) {
1688 			sc->bge_flags |= BGE_FLAG_BER_BUG;
1689 		}
1690 	}
1691 
1692 	/* Allocate interrupt */
1693 	rid = 0;
1694 
1695 	sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1696 	    RF_SHAREABLE | RF_ACTIVE);
1697 
1698 	if (sc->bge_irq == NULL) {
1699 		device_printf(dev, "couldn't map interrupt\n");
1700 		error = ENXIO;
1701 		goto fail;
1702 	}
1703 
1704   	/*
1705 	 * Check if this is a PCI-X or PCI Express device.
1706   	 */
1707 	if (BGE_IS_5705_PLUS(sc)) {
1708 		uint32_t reg;
1709 
1710 		reg = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4);
1711 		if ((reg & 0xff) == BGE_PCIE_CAPID)
1712 			sc->bge_flags |= BGE_FLAG_PCIE;
1713 	} else {
1714 		/*
1715 		 * Check if the device is in PCI-X Mode.
1716 		 * (This bit is not valid on PCI Express controllers.)
1717 		 */
1718 		if ((pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
1719 		    BGE_PCISTATE_PCI_BUSMODE) == 0)
1720 			sc->bge_flags |= BGE_FLAG_PCIX;
1721  	}
1722 
1723 	ifp = &sc->arpcom.ac_if;
1724 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1725 
1726 	/* Try to reset the chip. */
1727 	bge_reset(sc);
1728 
1729 	if (bge_chipinit(sc)) {
1730 		device_printf(dev, "chip initialization failed\n");
1731 		error = ENXIO;
1732 		goto fail;
1733 	}
1734 
1735 	/*
1736 	 * Get station address from the EEPROM.
1737 	 */
1738 	mac_addr = bge_readmem_ind(sc, 0x0c14);
1739 	if ((mac_addr >> 16) == 0x484b) {
1740 		ether_addr[0] = (uint8_t)(mac_addr >> 8);
1741 		ether_addr[1] = (uint8_t)mac_addr;
1742 		mac_addr = bge_readmem_ind(sc, 0x0c18);
1743 		ether_addr[2] = (uint8_t)(mac_addr >> 24);
1744 		ether_addr[3] = (uint8_t)(mac_addr >> 16);
1745 		ether_addr[4] = (uint8_t)(mac_addr >> 8);
1746 		ether_addr[5] = (uint8_t)mac_addr;
1747 	} else if (bge_read_eeprom(sc, ether_addr,
1748 	    BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
1749 		device_printf(dev, "failed to read station address\n");
1750 		error = ENXIO;
1751 		goto fail;
1752 	}
1753 
1754 	/* 5705/5750 limits RX return ring to 512 entries. */
1755 	if (BGE_IS_5705_PLUS(sc))
1756 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
1757 	else
1758 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
1759 
1760 	error = bge_dma_alloc(sc);
1761 	if (error)
1762 		goto fail;
1763 
1764 	/* Set default tuneable values. */
1765 	sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
1766 	sc->bge_rx_coal_ticks = 150;
1767 	sc->bge_tx_coal_ticks = 150;
1768 	sc->bge_rx_max_coal_bds = 10;
1769 	sc->bge_tx_max_coal_bds = 10;
1770 
1771 	/* Set up ifnet structure */
1772 	ifp->if_softc = sc;
1773 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1774 	ifp->if_ioctl = bge_ioctl;
1775 	ifp->if_start = bge_start;
1776 	ifp->if_watchdog = bge_watchdog;
1777 	ifp->if_init = bge_init;
1778 	ifp->if_mtu = ETHERMTU;
1779 	ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1780 	ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
1781 	ifq_set_ready(&ifp->if_snd);
1782 
1783 	/*
1784 	 * 5700 B0 chips do not support checksumming correctly due
1785 	 * to hardware bugs.
1786 	 */
1787 	if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0) {
1788 		ifp->if_capabilities |= IFCAP_HWCSUM;
1789 		ifp->if_hwassist = BGE_CSUM_FEATURES;
1790 	}
1791 	ifp->if_capenable = ifp->if_capabilities;
1792 
1793 	/*
1794 	 * Figure out what sort of media we have by checking the
1795 	 * hardware config word in the first 32k of NIC internal memory,
1796 	 * or fall back to examining the EEPROM if necessary.
1797 	 * Note: on some BCM5700 cards, this value appears to be unset.
1798 	 * If that's the case, we have to rely on identifying the NIC
1799 	 * by its PCI subsystem ID, as we do below for the SysKonnect
1800 	 * SK-9D41.
1801 	 */
1802 	if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
1803 		hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
1804 	else {
1805 		if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
1806 				    sizeof(hwcfg))) {
1807 			device_printf(dev, "failed to read EEPROM\n");
1808 			error = ENXIO;
1809 			goto fail;
1810 		}
1811 		hwcfg = ntohl(hwcfg);
1812 	}
1813 
1814 	if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
1815 		sc->bge_flags |= BGE_FLAG_TBI;
1816 
1817 	/* The SysKonnect SK-9D41 is a 1000baseSX card. */
1818 	if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41)
1819 		sc->bge_flags |= BGE_FLAG_TBI;
1820 
1821 	if (sc->bge_flags & BGE_FLAG_TBI) {
1822 		ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
1823 		    bge_ifmedia_upd, bge_ifmedia_sts);
1824 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
1825 		ifmedia_add(&sc->bge_ifmedia,
1826 		    IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
1827 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
1828 		ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
1829 		sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
1830 	} else {
1831 		/*
1832 		 * Do transceiver setup.
1833 		 */
1834 		if (mii_phy_probe(dev, &sc->bge_miibus,
1835 		    bge_ifmedia_upd, bge_ifmedia_sts)) {
1836 			device_printf(dev, "MII without any PHY!\n");
1837 			error = ENXIO;
1838 			goto fail;
1839 		}
1840 	}
1841 
1842 	/*
1843 	 * When using the BCM5701 in PCI-X mode, data corruption has
1844 	 * been observed in the first few bytes of some received packets.
1845 	 * Aligning the packet buffer in memory eliminates the corruption.
1846 	 * Unfortunately, this misaligns the packet payloads.  On platforms
1847 	 * which do not support unaligned accesses, we will realign the
1848 	 * payloads by copying the received packets.
1849 	 */
1850 	if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
1851 	    (sc->bge_flags & BGE_FLAG_PCIX))
1852 		sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
1853 
1854 	if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1855 	    sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
1856 		sc->bge_link_upd = bge_bcm5700_link_upd;
1857 		sc->bge_link_chg = BGE_MACSTAT_MI_INTERRUPT;
1858 	} else if (sc->bge_flags & BGE_FLAG_TBI) {
1859 		sc->bge_link_upd = bge_tbi_link_upd;
1860 		sc->bge_link_chg = BGE_MACSTAT_LINK_CHANGED;
1861 	} else {
1862 		sc->bge_link_upd = bge_copper_link_upd;
1863 		sc->bge_link_chg = BGE_MACSTAT_LINK_CHANGED;
1864 	}
1865 
1866 	/*
1867 	 * Call MI attach routine.
1868 	 */
1869 	ether_ifattach(ifp, ether_addr, NULL);
1870 
1871 	error = bus_setup_intr(dev, sc->bge_irq, INTR_NETSAFE,
1872 			       bge_intr, sc, &sc->bge_intrhand,
1873 			       ifp->if_serializer);
1874 	if (error) {
1875 		ether_ifdetach(ifp);
1876 		device_printf(dev, "couldn't set up irq\n");
1877 		goto fail;
1878 	}
1879 	return(0);
1880 fail:
1881 	bge_detach(dev);
1882 	return(error);
1883 }
1884 
1885 static int
1886 bge_detach(device_t dev)
1887 {
1888 	struct bge_softc *sc = device_get_softc(dev);
1889 
1890 	if (device_is_attached(dev)) {
1891 		struct ifnet *ifp = &sc->arpcom.ac_if;
1892 
1893 		lwkt_serialize_enter(ifp->if_serializer);
1894 		bge_stop(sc);
1895 		bge_reset(sc);
1896 		bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
1897 		lwkt_serialize_exit(ifp->if_serializer);
1898 
1899 		ether_ifdetach(ifp);
1900 	}
1901 
1902 	if (sc->bge_flags & BGE_FLAG_TBI)
1903 		ifmedia_removeall(&sc->bge_ifmedia);
1904 	if (sc->bge_miibus)
1905 		device_delete_child(dev, sc->bge_miibus);
1906 	bus_generic_detach(dev);
1907 
1908         if (sc->bge_irq != NULL)
1909 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
1910 
1911         if (sc->bge_res != NULL)
1912 		bus_release_resource(dev, SYS_RES_MEMORY,
1913 		    BGE_PCI_BAR0, sc->bge_res);
1914 
1915 	bge_dma_free(sc);
1916 
1917 	return 0;
1918 }
1919 
1920 static void
1921 bge_reset(struct bge_softc *sc)
1922 {
1923 	device_t dev;
1924 	uint32_t cachesize, command, pcistate, reset;
1925 	void (*write_op)(struct bge_softc *, uint32_t, uint32_t);
1926 	int i, val = 0;
1927 
1928 	dev = sc->bge_dev;
1929 
1930 	if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc)) {
1931 		if (sc->bge_flags & BGE_FLAG_PCIE)
1932 			write_op = bge_writemem_direct;
1933 		else
1934 			write_op = bge_writemem_ind;
1935 	} else {
1936 		write_op = bge_writereg_ind;
1937 	}
1938 
1939 	/* Save some important PCI state. */
1940 	cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
1941 	command = pci_read_config(dev, BGE_PCI_CMD, 4);
1942 	pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
1943 
1944 	pci_write_config(dev, BGE_PCI_MISC_CTL,
1945 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
1946 	    BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
1947 
1948 	/* Disable fastboot on controllers that support it. */
1949 	if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
1950 	    sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
1951 	    sc->bge_asicrev == BGE_ASICREV_BCM5787) {
1952 		if (bootverbose)
1953 			if_printf(&sc->arpcom.ac_if, "Disabling fastboot\n");
1954 		CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
1955 	}
1956 
1957 	/*
1958 	 * Write the magic number to SRAM at offset 0xB50.
1959 	 * When firmware finishes its initialization it will
1960 	 * write ~BGE_MAGIC_NUMBER to the same location.
1961 	 */
1962 	bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1963 
1964 	reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
1965 
1966 	/* XXX: Broadcom Linux driver. */
1967 	if (sc->bge_flags & BGE_FLAG_PCIE) {
1968 		if (CSR_READ_4(sc, 0x7e2c) == 0x60)	/* PCIE 1.0 */
1969 			CSR_WRITE_4(sc, 0x7e2c, 0x20);
1970 		if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
1971 			/* Prevent PCIE link training during global reset */
1972 			CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
1973 			reset |= (1<<29);
1974 		}
1975 	}
1976 
1977 	/*
1978 	 * Set GPHY Power Down Override to leave GPHY
1979 	 * powered up in D0 uninitialized.
1980 	 */
1981 	if (BGE_IS_5705_PLUS(sc))
1982 		reset |= 0x04000000;
1983 
1984 	/* Issue global reset */
1985 	write_op(sc, BGE_MISC_CFG, reset);
1986 
1987 	DELAY(1000);
1988 
1989 	/* XXX: Broadcom Linux driver. */
1990 	if (sc->bge_flags & BGE_FLAG_PCIE) {
1991 		if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
1992 			uint32_t v;
1993 
1994 			DELAY(500000); /* wait for link training to complete */
1995 			v = pci_read_config(dev, 0xc4, 4);
1996 			pci_write_config(dev, 0xc4, v | (1<<15), 4);
1997 		}
1998 		/*
1999 		 * Set PCIE max payload size to 128 bytes and
2000 		 * clear error status.
2001 		 */
2002 		pci_write_config(dev, 0xd8, 0xf5000, 4);
2003 	}
2004 
2005 	/* Reset some of the PCI state that got zapped by reset */
2006 	pci_write_config(dev, BGE_PCI_MISC_CTL,
2007 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2008 	    BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2009 	pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2010 	pci_write_config(dev, BGE_PCI_CMD, command, 4);
2011 	write_op(sc, BGE_MISC_CFG, (65 << 1));
2012 
2013 	/* Enable memory arbiter. */
2014 	if (BGE_IS_5714_FAMILY(sc)) {
2015 		uint32_t val;
2016 
2017 		val = CSR_READ_4(sc, BGE_MARB_MODE);
2018 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
2019 	} else {
2020 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2021 	}
2022 
2023 	/*
2024 	 * Poll until we see the 1's complement of the magic number.
2025 	 * This indicates that the firmware initialization
2026 	 * is complete.
2027 	 */
2028 	for (i = 0; i < BGE_TIMEOUT; i++) {
2029 		val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2030 		if (val == ~BGE_MAGIC_NUMBER)
2031 			break;
2032 		DELAY(10);
2033 	}
2034 
2035 	if (i == BGE_TIMEOUT) {
2036 		if_printf(&sc->arpcom.ac_if, "firmware handshake timed out,"
2037 			  "found 0x%08x\n", val);
2038 		return;
2039 	}
2040 
2041 	/*
2042 	 * XXX Wait for the value of the PCISTATE register to
2043 	 * return to its original pre-reset state. This is a
2044 	 * fairly good indicator of reset completion. If we don't
2045 	 * wait for the reset to fully complete, trying to read
2046 	 * from the device's non-PCI registers may yield garbage
2047 	 * results.
2048 	 */
2049 	for (i = 0; i < BGE_TIMEOUT; i++) {
2050 		if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2051 			break;
2052 		DELAY(10);
2053 	}
2054 
2055 	if (sc->bge_flags & BGE_FLAG_PCIE) {
2056 		reset = bge_readmem_ind(sc, 0x7c00);
2057 		bge_writemem_ind(sc, 0x7c00, reset | (1 << 25));
2058 	}
2059 
2060 	/* Fix up byte swapping */
2061 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
2062 	    BGE_MODECTL_BYTESWAP_DATA);
2063 
2064 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2065 
2066 	/*
2067 	 * The 5704 in TBI mode apparently needs some special
2068 	 * adjustment to insure the SERDES drive level is set
2069 	 * to 1.2V.
2070 	 */
2071 	if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
2072 	    (sc->bge_flags & BGE_FLAG_TBI)) {
2073 		uint32_t serdescfg;
2074 
2075 		serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2076 		serdescfg = (serdescfg & ~0xFFF) | 0x880;
2077 		CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2078 	}
2079 
2080 	/* XXX: Broadcom Linux driver. */
2081 	if ((sc->bge_flags & BGE_FLAG_PCIE) &&
2082 	    sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2083 		uint32_t v;
2084 
2085 		v = CSR_READ_4(sc, 0x7c00);
2086 		CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
2087 	}
2088 
2089 	DELAY(10000);
2090 }
2091 
2092 /*
2093  * Frame reception handling. This is called if there's a frame
2094  * on the receive return list.
2095  *
2096  * Note: we have to be able to handle two possibilities here:
2097  * 1) the frame is from the jumbo recieve ring
2098  * 2) the frame is from the standard receive ring
2099  */
2100 
2101 static void
2102 bge_rxeof(struct bge_softc *sc)
2103 {
2104 	struct ifnet *ifp;
2105 	int stdcnt = 0, jumbocnt = 0;
2106 
2107 	if (sc->bge_rx_saved_considx ==
2108 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx)
2109 		return;
2110 
2111 	ifp = &sc->arpcom.ac_if;
2112 
2113 	bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
2114 			sc->bge_cdata.bge_rx_return_ring_map,
2115 			BUS_DMASYNC_POSTREAD);
2116 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2117 			sc->bge_cdata.bge_rx_std_ring_map,
2118 			BUS_DMASYNC_POSTREAD);
2119 	if (BGE_IS_JUMBO_CAPABLE(sc)) {
2120 		bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2121 				sc->bge_cdata.bge_rx_jumbo_ring_map,
2122 				BUS_DMASYNC_POSTREAD);
2123 	}
2124 
2125 	while (sc->bge_rx_saved_considx !=
2126 	       sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
2127 		struct bge_rx_bd	*cur_rx;
2128 		uint32_t		rxidx;
2129 		struct mbuf		*m = NULL;
2130 		uint16_t		vlan_tag = 0;
2131 		int			have_tag = 0;
2132 
2133 		cur_rx =
2134 	    &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
2135 
2136 		rxidx = cur_rx->bge_idx;
2137 		BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2138 
2139 		if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2140 			have_tag = 1;
2141 			vlan_tag = cur_rx->bge_vlan_tag;
2142 		}
2143 
2144 		if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2145 			BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2146 			m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2147 			sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2148 			jumbocnt++;
2149 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2150 				ifp->if_ierrors++;
2151 				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2152 				continue;
2153 			}
2154 			if (bge_newbuf_jumbo(sc,
2155 			    sc->bge_jumbo, NULL) == ENOBUFS) {
2156 				ifp->if_ierrors++;
2157 				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2158 				continue;
2159 			}
2160 		} else {
2161 			BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2162 			bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2163 					sc->bge_cdata.bge_rx_std_dmamap[rxidx],
2164 					BUS_DMASYNC_POSTREAD);
2165 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2166 				sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
2167 			m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2168 			sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2169 			stdcnt++;
2170 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2171 				ifp->if_ierrors++;
2172 				bge_newbuf_std(sc, sc->bge_std, m);
2173 				continue;
2174 			}
2175 			if (bge_newbuf_std(sc, sc->bge_std,
2176 			    NULL) == ENOBUFS) {
2177 				ifp->if_ierrors++;
2178 				bge_newbuf_std(sc, sc->bge_std, m);
2179 				continue;
2180 			}
2181 		}
2182 
2183 		ifp->if_ipackets++;
2184 #ifndef __i386__
2185 		/*
2186 		 * The i386 allows unaligned accesses, but for other
2187 		 * platforms we must make sure the payload is aligned.
2188 		 */
2189 		if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
2190 			bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2191 			    cur_rx->bge_len);
2192 			m->m_data += ETHER_ALIGN;
2193 		}
2194 #endif
2195 		m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2196 		m->m_pkthdr.rcvif = ifp;
2197 
2198 		if (ifp->if_capenable & IFCAP_RXCSUM) {
2199 			if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2200 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2201 				if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2202 					m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2203 			}
2204 			if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
2205 			    m->m_pkthdr.len >= BGE_MIN_FRAME) {
2206 				m->m_pkthdr.csum_data =
2207 				    cur_rx->bge_tcp_udp_csum;
2208 				m->m_pkthdr.csum_flags |=
2209 					CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2210 			}
2211 		}
2212 
2213 		/*
2214 		 * If we received a packet with a vlan tag, pass it
2215 		 * to vlan_input() instead of ether_input().
2216 		 */
2217 		if (have_tag) {
2218 			VLAN_INPUT_TAG(m, vlan_tag);
2219 			have_tag = vlan_tag = 0;
2220 		} else {
2221 			ifp->if_input(ifp, m);
2222 		}
2223 	}
2224 
2225 	if (stdcnt > 0) {
2226 		bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2227 				sc->bge_cdata.bge_rx_std_ring_map,
2228 				BUS_DMASYNC_PREWRITE);
2229 	}
2230 
2231 	if (BGE_IS_JUMBO_CAPABLE(sc) && jumbocnt > 0) {
2232 		bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2233 				sc->bge_cdata.bge_rx_jumbo_ring_map,
2234 				BUS_DMASYNC_PREWRITE);
2235 	}
2236 
2237 	CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2238 	if (stdcnt)
2239 		CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2240 	if (jumbocnt)
2241 		CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2242 }
2243 
2244 static void
2245 bge_txeof(struct bge_softc *sc)
2246 {
2247 	struct bge_tx_bd *cur_tx = NULL;
2248 	struct ifnet *ifp;
2249 
2250 	if (sc->bge_tx_saved_considx ==
2251 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx)
2252 		return;
2253 
2254 	ifp = &sc->arpcom.ac_if;
2255 
2256 	bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
2257 			sc->bge_cdata.bge_tx_ring_map,
2258 			BUS_DMASYNC_POSTREAD);
2259 
2260 	/*
2261 	 * Go through our tx ring and free mbufs for those
2262 	 * frames that have been sent.
2263 	 */
2264 	while (sc->bge_tx_saved_considx !=
2265 	       sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
2266 		uint32_t idx = 0;
2267 
2268 		idx = sc->bge_tx_saved_considx;
2269 		cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
2270 		if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2271 			ifp->if_opackets++;
2272 		if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2273 			bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2274 					sc->bge_cdata.bge_tx_dmamap[idx],
2275 					BUS_DMASYNC_POSTWRITE);
2276 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2277 			    sc->bge_cdata.bge_tx_dmamap[idx]);
2278 			m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2279 			sc->bge_cdata.bge_tx_chain[idx] = NULL;
2280 		}
2281 		sc->bge_txcnt--;
2282 		BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2283 	}
2284 
2285 	if (cur_tx != NULL &&
2286 	    (BGE_TX_RING_CNT - sc->bge_txcnt) >=
2287 	    (BGE_NSEG_RSVD + BGE_NSEG_SPARE))
2288 		ifp->if_flags &= ~IFF_OACTIVE;
2289 
2290 	if (sc->bge_txcnt == 0)
2291 		ifp->if_timer = 0;
2292 
2293 	if (!ifq_is_empty(&ifp->if_snd))
2294 		ifp->if_start(ifp);
2295 }
2296 
2297 static void
2298 bge_intr(void *xsc)
2299 {
2300 	struct bge_softc *sc = xsc;
2301 	struct ifnet *ifp = &sc->arpcom.ac_if;
2302  	uint32_t status;
2303 
2304  	/*
2305 	 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO.  Don't
2306 	 * disable interrupts by writing nonzero like we used to, since with
2307 	 * our current organization this just gives complications and
2308 	 * pessimizations for re-enabling interrupts.  We used to have races
2309 	 * instead of the necessary complications.  Disabling interrupts
2310 	 * would just reduce the chance of a status update while we are
2311 	 * running (by switching to the interrupt-mode coalescence
2312 	 * parameters), but this chance is already very low so it is more
2313 	 * efficient to get another interrupt than prevent it.
2314 	 *
2315 	 * We do the ack first to ensure another interrupt if there is a
2316 	 * status update after the ack.  We don't check for the status
2317 	 * changing later because it is more efficient to get another
2318 	 * interrupt than prevent it, not quite as above (not checking is
2319 	 * a smaller optimization than not toggling the interrupt enable,
2320 	 * since checking doesn't involve PCI accesses and toggling require
2321 	 * the status check).  So toggling would probably be a pessimization
2322 	 * even with MSI.  It would only be needed for using a task queue.
2323 	 */
2324 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2325 
2326 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2327 			sc->bge_cdata.bge_status_map,
2328 			BUS_DMASYNC_POSTREAD);
2329 
2330 	/*
2331 	 * Process link state changes.
2332 	 */
2333 	status = CSR_READ_4(sc, BGE_MAC_STS);
2334 	if ((status & sc->bge_link_chg) || sc->bge_link_evt) {
2335 		sc->bge_link_evt = 0;
2336 		sc->bge_link_upd(sc, status);
2337 	}
2338 
2339 	if (ifp->if_flags & IFF_RUNNING) {
2340 		/* Check RX return ring producer/consumer */
2341 		bge_rxeof(sc);
2342 
2343 		/* Check TX ring producer/consumer */
2344 		bge_txeof(sc);
2345 	}
2346 }
2347 
2348 static void
2349 bge_tick(void *xsc)
2350 {
2351 	struct bge_softc *sc = xsc;
2352 	struct ifnet *ifp = &sc->arpcom.ac_if;
2353 
2354 	lwkt_serialize_enter(ifp->if_serializer);
2355 
2356 	if (BGE_IS_5705_PLUS(sc))
2357 		bge_stats_update_regs(sc);
2358 	else
2359 		bge_stats_update(sc);
2360 
2361 	if (sc->bge_flags & BGE_FLAG_TBI) {
2362 		/*
2363 		 * Since in TBI mode auto-polling can't be used we should poll
2364 		 * link status manually. Here we register pending link event
2365 		 * and trigger interrupt.
2366 		 */
2367 		sc->bge_link_evt++;
2368 		BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
2369 	} else if (!sc->bge_link) {
2370 		mii_tick(device_get_softc(sc->bge_miibus));
2371 	}
2372 
2373 	callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
2374 
2375 	lwkt_serialize_exit(ifp->if_serializer);
2376 }
2377 
2378 static void
2379 bge_stats_update_regs(struct bge_softc *sc)
2380 {
2381 	struct ifnet *ifp = &sc->arpcom.ac_if;
2382 	struct bge_mac_stats_regs stats;
2383 	uint32_t *s;
2384 	int i;
2385 
2386 	s = (uint32_t *)&stats;
2387 	for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2388 		*s = CSR_READ_4(sc, BGE_RX_STATS + i);
2389 		s++;
2390 	}
2391 
2392 	ifp->if_collisions +=
2393 	   (stats.dot3StatsSingleCollisionFrames +
2394 	   stats.dot3StatsMultipleCollisionFrames +
2395 	   stats.dot3StatsExcessiveCollisions +
2396 	   stats.dot3StatsLateCollisions) -
2397 	   ifp->if_collisions;
2398 }
2399 
2400 static void
2401 bge_stats_update(struct bge_softc *sc)
2402 {
2403 	struct ifnet *ifp = &sc->arpcom.ac_if;
2404 	bus_size_t stats;
2405 
2406 	stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
2407 
2408 #define READ_STAT(sc, stats, stat)	\
2409 	CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
2410 
2411 	ifp->if_collisions +=
2412 	   (READ_STAT(sc, stats,
2413 		txstats.dot3StatsSingleCollisionFrames.bge_addr_lo) +
2414 	    READ_STAT(sc, stats,
2415 		txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo) +
2416 	    READ_STAT(sc, stats,
2417 		txstats.dot3StatsExcessiveCollisions.bge_addr_lo) +
2418 	    READ_STAT(sc, stats,
2419 		txstats.dot3StatsLateCollisions.bge_addr_lo)) -
2420 	   ifp->if_collisions;
2421 
2422 #undef READ_STAT
2423 
2424 #ifdef notdef
2425 	ifp->if_collisions +=
2426 	   (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
2427 	   sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
2428 	   sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
2429 	   sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
2430 	   ifp->if_collisions;
2431 #endif
2432 }
2433 
2434 /*
2435  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
2436  * pointers to descriptors.
2437  */
2438 static int
2439 bge_encap(struct bge_softc *sc, struct mbuf *m_head, uint32_t *txidx)
2440 {
2441 	struct bge_tx_bd *d = NULL;
2442 	uint16_t csum_flags = 0;
2443 	struct ifvlan *ifv = NULL;
2444 	struct bge_dmamap_arg ctx;
2445 	bus_dma_segment_t segs[BGE_NSEG_NEW];
2446 	bus_dmamap_t map;
2447 	int error, maxsegs, idx, i;
2448 
2449 	if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
2450 	    m_head->m_pkthdr.rcvif != NULL &&
2451 	    m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
2452 		ifv = m_head->m_pkthdr.rcvif->if_softc;
2453 
2454 	if (m_head->m_pkthdr.csum_flags) {
2455 		if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2456 			csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2457 		if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
2458 			csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2459 		if (m_head->m_flags & M_LASTFRAG)
2460 			csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2461 		else if (m_head->m_flags & M_FRAG)
2462 			csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2463 	}
2464 
2465 	idx = *txidx;
2466 	map = sc->bge_cdata.bge_tx_dmamap[idx];
2467 
2468 	maxsegs = (BGE_TX_RING_CNT - sc->bge_txcnt) - BGE_NSEG_RSVD;
2469 	KASSERT(maxsegs >= BGE_NSEG_SPARE,
2470 		("not enough segments %d\n", maxsegs));
2471 
2472 	if (maxsegs > BGE_NSEG_NEW)
2473 		maxsegs = BGE_NSEG_NEW;
2474 
2475 	/*
2476 	 * Pad outbound frame to BGE_MIN_FRAME for an unusual reason.
2477 	 * The bge hardware will pad out Tx runts to BGE_MIN_FRAME,
2478 	 * but when such padded frames employ the bge IP/TCP checksum
2479 	 * offload, the hardware checksum assist gives incorrect results
2480 	 * (possibly from incorporating its own padding into the UDP/TCP
2481 	 * checksum; who knows).  If we pad such runts with zeros, the
2482 	 * onboard checksum comes out correct.  We do this by pretending
2483 	 * the mbuf chain has too many fragments so the coalescing code
2484 	 * below can assemble the packet into a single buffer that's
2485 	 * padded out to the mininum frame size.
2486 	 */
2487 	if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) &&
2488 	    m_head->m_pkthdr.len < BGE_MIN_FRAME) {
2489 		error = E2BIG;
2490 	} else {
2491 		ctx.bge_segs = segs;
2492 		ctx.bge_maxsegs = maxsegs;
2493 		error = bus_dmamap_load_mbuf(sc->bge_cdata.bge_mtag, map,
2494 					     m_head, bge_dma_map_mbuf, &ctx,
2495 					     BUS_DMA_NOWAIT);
2496 	}
2497 	if (error == E2BIG || ctx.bge_maxsegs == 0) {
2498 		struct mbuf *m_new;
2499 
2500 		m_new = m_defrag(m_head, MB_DONTWAIT);
2501 		if (m_new == NULL) {
2502 			if_printf(&sc->arpcom.ac_if,
2503 				  "could not defrag TX mbuf\n");
2504 			error = ENOBUFS;
2505 			goto back;
2506 		} else {
2507 			m_head = m_new;
2508 		}
2509 
2510 		/*
2511 		 * Manually pad short frames, and zero the pad space
2512 		 * to avoid leaking data.
2513 		 */
2514 		if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) &&
2515 		    m_head->m_pkthdr.len < BGE_MIN_FRAME) {
2516 			int pad_len = BGE_MIN_FRAME - m_head->m_pkthdr.len;
2517 
2518 			bzero(mtod(m_head, char *) + m_head->m_pkthdr.len,
2519 			      pad_len);
2520 			m_head->m_pkthdr.len += pad_len;
2521 			m_head->m_len = m_head->m_pkthdr.len;
2522 		}
2523 
2524 		ctx.bge_segs = segs;
2525 		ctx.bge_maxsegs = maxsegs;
2526 		error = bus_dmamap_load_mbuf(sc->bge_cdata.bge_mtag, map,
2527 					     m_head, bge_dma_map_mbuf, &ctx,
2528 					     BUS_DMA_NOWAIT);
2529 		if (error || ctx.bge_maxsegs == 0) {
2530 			if_printf(&sc->arpcom.ac_if,
2531 				  "could not defrag TX mbuf\n");
2532 			if (error == 0)
2533 				error = E2BIG;
2534 			goto back;
2535 		}
2536 	} else if (error) {
2537 		if_printf(&sc->arpcom.ac_if, "could not map TX mbuf\n");
2538 		goto back;
2539 	}
2540 
2541 	bus_dmamap_sync(sc->bge_cdata.bge_mtag, map, BUS_DMASYNC_PREWRITE);
2542 
2543 	for (i = 0; ; i++) {
2544 		d = &sc->bge_ldata.bge_tx_ring[idx];
2545 
2546 		d->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_segs[i].ds_addr);
2547 		d->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_segs[i].ds_addr);
2548 		d->bge_len = segs[i].ds_len;
2549 		d->bge_flags = csum_flags;
2550 
2551 		if (i == ctx.bge_maxsegs - 1)
2552 			break;
2553 		BGE_INC(idx, BGE_TX_RING_CNT);
2554 	}
2555 	/* Mark the last segment as end of packet... */
2556 	d->bge_flags |= BGE_TXBDFLAG_END;
2557 
2558 	/* Set vlan tag to the first segment of the packet. */
2559 	d = &sc->bge_ldata.bge_tx_ring[*txidx];
2560 	if (ifv != NULL) {
2561 		d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
2562 		d->bge_vlan_tag = ifv->ifv_tag;
2563 	} else {
2564 		d->bge_vlan_tag = 0;
2565 	}
2566 
2567 	/*
2568 	 * Insure that the map for this transmission is placed at
2569 	 * the array index of the last descriptor in this chain.
2570 	 */
2571 	sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
2572 	sc->bge_cdata.bge_tx_dmamap[idx] = map;
2573 	sc->bge_cdata.bge_tx_chain[idx] = m_head;
2574 	sc->bge_txcnt += ctx.bge_maxsegs;
2575 
2576 	BGE_INC(idx, BGE_TX_RING_CNT);
2577 	*txidx = idx;
2578 back:
2579 	if (error)
2580 		m_freem(m_head);
2581 	return error;
2582 }
2583 
2584 /*
2585  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2586  * to the mbuf data regions directly in the transmit descriptors.
2587  */
2588 static void
2589 bge_start(struct ifnet *ifp)
2590 {
2591 	struct bge_softc *sc = ifp->if_softc;
2592 	struct mbuf *m_head = NULL;
2593 	uint32_t prodidx;
2594 	int need_trans;
2595 
2596 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING ||
2597 	    !sc->bge_link)
2598 		return;
2599 
2600 	prodidx = sc->bge_tx_prodidx;
2601 
2602 	need_trans = 0;
2603 	while (sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
2604 		m_head = ifq_poll(&ifp->if_snd);
2605 		if (m_head == NULL)
2606 			break;
2607 
2608 		/*
2609 		 * XXX
2610 		 * The code inside the if() block is never reached since we
2611 		 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
2612 		 * requests to checksum TCP/UDP in a fragmented packet.
2613 		 *
2614 		 * XXX
2615 		 * safety overkill.  If this is a fragmented packet chain
2616 		 * with delayed TCP/UDP checksums, then only encapsulate
2617 		 * it if we have enough descriptors to handle the entire
2618 		 * chain at once.
2619 		 * (paranoia -- may not actually be needed)
2620 		 */
2621 		if (m_head->m_flags & M_FIRSTFRAG &&
2622 		    m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
2623 			if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
2624 			    m_head->m_pkthdr.csum_data + 16) {
2625 				ifp->if_flags |= IFF_OACTIVE;
2626 				break;
2627 			}
2628 		}
2629 
2630 		/*
2631 		 * Sanity check: avoid coming within BGE_NSEG_RSVD
2632 		 * descriptors of the end of the ring.  Also make
2633 		 * sure there are BGE_NSEG_SPARE descriptors for
2634 		 * jumbo buffers' defragmentation.
2635 		 */
2636 		if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
2637 		    (BGE_NSEG_RSVD + BGE_NSEG_SPARE)) {
2638 			ifp->if_flags |= IFF_OACTIVE;
2639 			break;
2640 		}
2641 
2642 		/*
2643 		 * Dequeue the packet before encapsulation, since
2644 		 * bge_encap() may free the packet if error happens.
2645 		 */
2646 		ifq_dequeue(&ifp->if_snd, m_head);
2647 
2648 		/*
2649 		 * Pack the data into the transmit ring. If we
2650 		 * don't have room, set the OACTIVE flag and wait
2651 		 * for the NIC to drain the ring.
2652 		 */
2653 		if (bge_encap(sc, m_head, &prodidx)) {
2654 			ifp->if_flags |= IFF_OACTIVE;
2655 			break;
2656 		}
2657 		need_trans = 1;
2658 
2659 		BPF_MTAP(ifp, m_head);
2660 	}
2661 
2662 	if (!need_trans)
2663 		return;
2664 
2665 	/* Transmit */
2666 	CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2667 	/* 5700 b2 errata */
2668 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
2669 		CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2670 
2671 	sc->bge_tx_prodidx = prodidx;
2672 
2673 	/*
2674 	 * Set a timeout in case the chip goes out to lunch.
2675 	 */
2676 	ifp->if_timer = 5;
2677 }
2678 
2679 static void
2680 bge_init(void *xsc)
2681 {
2682 	struct bge_softc *sc = xsc;
2683 	struct ifnet *ifp = &sc->arpcom.ac_if;
2684 	uint16_t *m;
2685 
2686 	ASSERT_SERIALIZED(ifp->if_serializer);
2687 
2688 	if (ifp->if_flags & IFF_RUNNING)
2689 		return;
2690 
2691 	/* Cancel pending I/O and flush buffers. */
2692 	bge_stop(sc);
2693 	bge_reset(sc);
2694 	bge_chipinit(sc);
2695 
2696 	/*
2697 	 * Init the various state machines, ring
2698 	 * control blocks and firmware.
2699 	 */
2700 	if (bge_blockinit(sc)) {
2701 		if_printf(ifp, "initialization failure\n");
2702 		return;
2703 	}
2704 
2705 	/* Specify MTU. */
2706 	CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
2707 	    ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN);
2708 
2709 	/* Load our MAC address. */
2710 	m = (uint16_t *)&sc->arpcom.ac_enaddr[0];
2711 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
2712 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
2713 
2714 	/* Enable or disable promiscuous mode as needed. */
2715 	bge_setpromisc(sc);
2716 
2717 	/* Program multicast filter. */
2718 	bge_setmulti(sc);
2719 
2720 	/* Init RX ring. */
2721 	bge_init_rx_ring_std(sc);
2722 
2723 	/*
2724 	 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
2725 	 * memory to insure that the chip has in fact read the first
2726 	 * entry of the ring.
2727 	 */
2728 	if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
2729 		uint32_t		v, i;
2730 		for (i = 0; i < 10; i++) {
2731 			DELAY(20);
2732 			v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
2733 			if (v == (MCLBYTES - ETHER_ALIGN))
2734 				break;
2735 		}
2736 		if (i == 10)
2737 			if_printf(ifp, "5705 A0 chip failed to load RX ring\n");
2738 	}
2739 
2740 	/* Init jumbo RX ring. */
2741 	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2742 		bge_init_rx_ring_jumbo(sc);
2743 
2744 	/* Init our RX return ring index */
2745 	sc->bge_rx_saved_considx = 0;
2746 
2747 	/* Init TX ring. */
2748 	bge_init_tx_ring(sc);
2749 
2750 	/* Turn on transmitter */
2751 	BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
2752 
2753 	/* Turn on receiver */
2754 	BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2755 
2756 	/* Tell firmware we're alive. */
2757 	BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2758 
2759 	/* Enable host interrupts. */
2760 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
2761 	BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
2762 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2763 
2764 	bge_ifmedia_upd(ifp);
2765 
2766 	ifp->if_flags |= IFF_RUNNING;
2767 	ifp->if_flags &= ~IFF_OACTIVE;
2768 
2769 	callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
2770 }
2771 
2772 /*
2773  * Set media options.
2774  */
2775 static int
2776 bge_ifmedia_upd(struct ifnet *ifp)
2777 {
2778 	struct bge_softc *sc = ifp->if_softc;
2779 
2780 	/* If this is a 1000baseX NIC, enable the TBI port. */
2781 	if (sc->bge_flags & BGE_FLAG_TBI) {
2782 		struct ifmedia *ifm = &sc->bge_ifmedia;
2783 
2784 		if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2785 			return(EINVAL);
2786 
2787 		switch(IFM_SUBTYPE(ifm->ifm_media)) {
2788 		case IFM_AUTO:
2789 			/*
2790 			 * The BCM5704 ASIC appears to have a special
2791 			 * mechanism for programming the autoneg
2792 			 * advertisement registers in TBI mode.
2793 			 */
2794 			if (!bge_fake_autoneg &&
2795 			    sc->bge_asicrev == BGE_ASICREV_BCM5704) {
2796 				uint32_t sgdig;
2797 
2798 				CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
2799 				sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
2800 				sgdig |= BGE_SGDIGCFG_AUTO |
2801 					 BGE_SGDIGCFG_PAUSE_CAP |
2802 					 BGE_SGDIGCFG_ASYM_PAUSE;
2803 				CSR_WRITE_4(sc, BGE_SGDIG_CFG,
2804 					    sgdig | BGE_SGDIGCFG_SEND);
2805 				DELAY(5);
2806 				CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
2807 			}
2808 			break;
2809 		case IFM_1000_SX:
2810 			if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
2811 				BGE_CLRBIT(sc, BGE_MAC_MODE,
2812 				    BGE_MACMODE_HALF_DUPLEX);
2813 			} else {
2814 				BGE_SETBIT(sc, BGE_MAC_MODE,
2815 				    BGE_MACMODE_HALF_DUPLEX);
2816 			}
2817 			break;
2818 		default:
2819 			return(EINVAL);
2820 		}
2821 	} else {
2822 		struct mii_data *mii = device_get_softc(sc->bge_miibus);
2823 
2824 		sc->bge_link_evt++;
2825 		sc->bge_link = 0;
2826 		if (mii->mii_instance) {
2827 			struct mii_softc *miisc;
2828 
2829 			LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
2830 				mii_phy_reset(miisc);
2831 		}
2832 		mii_mediachg(mii);
2833 	}
2834 	return(0);
2835 }
2836 
2837 /*
2838  * Report current media status.
2839  */
2840 static void
2841 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2842 {
2843 	struct bge_softc *sc = ifp->if_softc;
2844 
2845 	if (sc->bge_flags & BGE_FLAG_TBI) {
2846 		ifmr->ifm_status = IFM_AVALID;
2847 		ifmr->ifm_active = IFM_ETHER;
2848 		if (CSR_READ_4(sc, BGE_MAC_STS) &
2849 		    BGE_MACSTAT_TBI_PCS_SYNCHED) {
2850 			ifmr->ifm_status |= IFM_ACTIVE;
2851 		} else {
2852 			ifmr->ifm_active |= IFM_NONE;
2853 			return;
2854 		}
2855 
2856 		ifmr->ifm_active |= IFM_1000_SX;
2857 		if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
2858 			ifmr->ifm_active |= IFM_HDX;
2859 		else
2860 			ifmr->ifm_active |= IFM_FDX;
2861 	} else {
2862 		struct mii_data *mii = device_get_softc(sc->bge_miibus);
2863 
2864 		mii_pollstat(mii);
2865 		ifmr->ifm_active = mii->mii_media_active;
2866 		ifmr->ifm_status = mii->mii_media_status;
2867 	}
2868 }
2869 
2870 static int
2871 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
2872 {
2873 	struct bge_softc *sc = ifp->if_softc;
2874 	struct ifreq *ifr = (struct ifreq *) data;
2875 	int mask, error = 0;
2876 	struct mii_data *mii;
2877 
2878 	ASSERT_SERIALIZED(ifp->if_serializer);
2879 
2880 	switch(command) {
2881 	case SIOCSIFMTU:
2882 		if ((!BGE_IS_JUMBO_CAPABLE(sc) && ifr->ifr_mtu > ETHERMTU) ||
2883 		    (BGE_IS_JUMBO_CAPABLE(sc) &&
2884 		     ifr->ifr_mtu > BGE_JUMBO_MTU)) {
2885 			error = EINVAL;
2886 		} else if (ifp->if_mtu != ifr->ifr_mtu) {
2887 			ifp->if_mtu = ifr->ifr_mtu;
2888 			ifp->if_flags &= ~IFF_RUNNING;
2889 			bge_init(sc);
2890 		}
2891 		break;
2892 	case SIOCSIFFLAGS:
2893 		if (ifp->if_flags & IFF_UP) {
2894 			if (ifp->if_flags & IFF_RUNNING) {
2895 				int flags = ifp->if_flags & sc->bge_if_flags;
2896 
2897 				/*
2898 				 * If only the state of the PROMISC flag
2899 				 * changed, then just use the 'set promisc
2900 				 * mode' command instead of reinitializing
2901 				 * the entire NIC. Doing a full re-init
2902 				 * means reloading the firmware and waiting
2903 				 * for it to start up, which may take a
2904 				 * second or two.  Similarly for ALLMULTI.
2905 				 */
2906 				if (flags & IFF_PROMISC)
2907 					bge_setpromisc(sc);
2908 				if (flags & IFF_ALLMULTI)
2909 					bge_setmulti(sc);
2910 			} else {
2911 				bge_init(sc);
2912 			}
2913 		} else {
2914 			if (ifp->if_flags & IFF_RUNNING)
2915 				bge_stop(sc);
2916 		}
2917 		sc->bge_if_flags = ifp->if_flags;
2918 		error = 0;
2919 		break;
2920 	case SIOCADDMULTI:
2921 	case SIOCDELMULTI:
2922 		if (ifp->if_flags & IFF_RUNNING) {
2923 			bge_setmulti(sc);
2924 			error = 0;
2925 		}
2926 		break;
2927 	case SIOCSIFMEDIA:
2928 	case SIOCGIFMEDIA:
2929 		if (sc->bge_flags & BGE_FLAG_TBI) {
2930 			error = ifmedia_ioctl(ifp, ifr,
2931 			    &sc->bge_ifmedia, command);
2932 		} else {
2933 			mii = device_get_softc(sc->bge_miibus);
2934 			error = ifmedia_ioctl(ifp, ifr,
2935 			    &mii->mii_media, command);
2936 		}
2937 		break;
2938         case SIOCSIFCAP:
2939 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2940 		if (mask & IFCAP_HWCSUM) {
2941 			ifp->if_capenable ^= IFCAP_HWCSUM;
2942 			if (IFCAP_HWCSUM & ifp->if_capenable)
2943 				ifp->if_hwassist = BGE_CSUM_FEATURES;
2944 			else
2945 				ifp->if_hwassist = 0;
2946 		}
2947 		error = 0;
2948 		break;
2949 	default:
2950 		error = ether_ioctl(ifp, command, data);
2951 		break;
2952 	}
2953 	return(error);
2954 }
2955 
2956 static void
2957 bge_watchdog(struct ifnet *ifp)
2958 {
2959 	struct bge_softc *sc = ifp->if_softc;
2960 
2961 	if_printf(ifp, "watchdog timeout -- resetting\n");
2962 
2963 	ifp->if_flags &= ~IFF_RUNNING;
2964 	bge_init(sc);
2965 
2966 	ifp->if_oerrors++;
2967 
2968 	if (!ifq_is_empty(&ifp->if_snd))
2969 		ifp->if_start(ifp);
2970 }
2971 
2972 /*
2973  * Stop the adapter and free any mbufs allocated to the
2974  * RX and TX lists.
2975  */
2976 static void
2977 bge_stop(struct bge_softc *sc)
2978 {
2979 	struct ifnet *ifp = &sc->arpcom.ac_if;
2980 	struct ifmedia_entry *ifm;
2981 	struct mii_data *mii = NULL;
2982 	int mtmp, itmp;
2983 
2984 	ASSERT_SERIALIZED(ifp->if_serializer);
2985 
2986 	if ((sc->bge_flags & BGE_FLAG_TBI) == 0)
2987 		mii = device_get_softc(sc->bge_miibus);
2988 
2989 	callout_stop(&sc->bge_stat_timer);
2990 
2991 	/*
2992 	 * Disable all of the receiver blocks
2993 	 */
2994 	BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2995 	BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2996 	BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
2997 	if (!BGE_IS_5705_PLUS(sc))
2998 		BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
2999 	BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3000 	BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3001 	BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3002 
3003 	/*
3004 	 * Disable all of the transmit blocks
3005 	 */
3006 	BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3007 	BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3008 	BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3009 	BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3010 	BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3011 	if (!BGE_IS_5705_PLUS(sc))
3012 		BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3013 	BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3014 
3015 	/*
3016 	 * Shut down all of the memory managers and related
3017 	 * state machines.
3018 	 */
3019 	BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3020 	BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3021 	if (!BGE_IS_5705_PLUS(sc))
3022 		BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3023 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3024 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3025 	if (!BGE_IS_5705_PLUS(sc)) {
3026 		BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3027 		BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3028 	}
3029 
3030 	/* Disable host interrupts. */
3031 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3032 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3033 
3034 	/*
3035 	 * Tell firmware we're shutting down.
3036 	 */
3037 	BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3038 
3039 	/* Free the RX lists. */
3040 	bge_free_rx_ring_std(sc);
3041 
3042 	/* Free jumbo RX list. */
3043 	if (BGE_IS_JUMBO_CAPABLE(sc))
3044 		bge_free_rx_ring_jumbo(sc);
3045 
3046 	/* Free TX buffers. */
3047 	bge_free_tx_ring(sc);
3048 
3049 	/*
3050 	 * Isolate/power down the PHY, but leave the media selection
3051 	 * unchanged so that things will be put back to normal when
3052 	 * we bring the interface back up.
3053 	 */
3054 	if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
3055 		itmp = ifp->if_flags;
3056 		ifp->if_flags |= IFF_UP;
3057 		ifm = mii->mii_media.ifm_cur;
3058 		mtmp = ifm->ifm_media;
3059 		ifm->ifm_media = IFM_ETHER|IFM_NONE;
3060 		mii_mediachg(mii);
3061 		ifm->ifm_media = mtmp;
3062 		ifp->if_flags = itmp;
3063 	}
3064 
3065 	sc->bge_link = 0;
3066 
3067 	sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3068 
3069 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3070 	ifp->if_timer = 0;
3071 }
3072 
3073 /*
3074  * Stop all chip I/O so that the kernel's probe routines don't
3075  * get confused by errant DMAs when rebooting.
3076  */
3077 static void
3078 bge_shutdown(device_t dev)
3079 {
3080 	struct bge_softc *sc = device_get_softc(dev);
3081 	struct ifnet *ifp = &sc->arpcom.ac_if;
3082 
3083 	lwkt_serialize_enter(ifp->if_serializer);
3084 	bge_stop(sc);
3085 	bge_reset(sc);
3086 	lwkt_serialize_exit(ifp->if_serializer);
3087 }
3088 
3089 static int
3090 bge_suspend(device_t dev)
3091 {
3092 	struct bge_softc *sc = device_get_softc(dev);
3093 	struct ifnet *ifp = &sc->arpcom.ac_if;
3094 
3095 	lwkt_serialize_enter(ifp->if_serializer);
3096 	bge_stop(sc);
3097 	lwkt_serialize_exit(ifp->if_serializer);
3098 
3099 	return 0;
3100 }
3101 
3102 static int
3103 bge_resume(device_t dev)
3104 {
3105 	struct bge_softc *sc = device_get_softc(dev);
3106 	struct ifnet *ifp = &sc->arpcom.ac_if;
3107 
3108 	lwkt_serialize_enter(ifp->if_serializer);
3109 
3110 	if (ifp->if_flags & IFF_UP) {
3111 		bge_init(sc);
3112 
3113 		if (!ifq_is_empty(&ifp->if_snd))
3114 			ifp->if_start(ifp);
3115 	}
3116 
3117 	lwkt_serialize_exit(ifp->if_serializer);
3118 
3119 	return 0;
3120 }
3121 
3122 static void
3123 bge_setpromisc(struct bge_softc *sc)
3124 {
3125 	struct ifnet *ifp = &sc->arpcom.ac_if;
3126 
3127 	if (ifp->if_flags & IFF_PROMISC)
3128 		BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3129 	else
3130 		BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3131 }
3132 
3133 static void
3134 bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
3135 {
3136 	struct bge_dmamap_arg *ctx = arg;
3137 
3138 	if (error)
3139 		return;
3140 
3141 	KASSERT(nsegs == 1 && ctx->bge_maxsegs == 1,
3142 		("only one segment is allowed\n"));
3143 
3144 	ctx->bge_segs[0] = *segs;
3145 }
3146 
3147 static void
3148 bge_dma_map_mbuf(void *arg, bus_dma_segment_t *segs, int nsegs,
3149 		 bus_size_t mapsz __unused, int error)
3150 {
3151 	struct bge_dmamap_arg *ctx = arg;
3152 	int i;
3153 
3154 	if (error)
3155 		return;
3156 
3157 	if (nsegs > ctx->bge_maxsegs) {
3158 		ctx->bge_maxsegs = 0;
3159 		return;
3160 	}
3161 
3162 	ctx->bge_maxsegs = nsegs;
3163 	for (i = 0; i < nsegs; ++i)
3164 		ctx->bge_segs[i] = segs[i];
3165 }
3166 
3167 static void
3168 bge_dma_free(struct bge_softc *sc)
3169 {
3170 	int i;
3171 
3172 	/* Destroy RX/TX mbuf DMA stuffs. */
3173 	if (sc->bge_cdata.bge_mtag != NULL) {
3174 		for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3175 			if (sc->bge_cdata.bge_rx_std_dmamap[i]) {
3176 				bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
3177 				    sc->bge_cdata.bge_rx_std_dmamap[i]);
3178 			}
3179 		}
3180 
3181 		for (i = 0; i < BGE_TX_RING_CNT; i++) {
3182 			if (sc->bge_cdata.bge_tx_dmamap[i]) {
3183 				bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
3184 				    sc->bge_cdata.bge_tx_dmamap[i]);
3185 			}
3186 		}
3187 		bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
3188 	}
3189 
3190 	/* Destroy standard RX ring */
3191 	bge_dma_block_free(sc->bge_cdata.bge_rx_std_ring_tag,
3192 			   sc->bge_cdata.bge_rx_std_ring_map,
3193 			   sc->bge_ldata.bge_rx_std_ring);
3194 
3195 	if (BGE_IS_JUMBO_CAPABLE(sc))
3196 		bge_free_jumbo_mem(sc);
3197 
3198 	/* Destroy RX return ring */
3199 	bge_dma_block_free(sc->bge_cdata.bge_rx_return_ring_tag,
3200 			   sc->bge_cdata.bge_rx_return_ring_map,
3201 			   sc->bge_ldata.bge_rx_return_ring);
3202 
3203 	/* Destroy TX ring */
3204 	bge_dma_block_free(sc->bge_cdata.bge_tx_ring_tag,
3205 			   sc->bge_cdata.bge_tx_ring_map,
3206 			   sc->bge_ldata.bge_tx_ring);
3207 
3208 	/* Destroy status block */
3209 	bge_dma_block_free(sc->bge_cdata.bge_status_tag,
3210 			   sc->bge_cdata.bge_status_map,
3211 			   sc->bge_ldata.bge_status_block);
3212 
3213 	/* Destroy statistics block */
3214 	bge_dma_block_free(sc->bge_cdata.bge_stats_tag,
3215 			   sc->bge_cdata.bge_stats_map,
3216 			   sc->bge_ldata.bge_stats);
3217 
3218 	/* Destroy the parent tag */
3219 	if (sc->bge_cdata.bge_parent_tag != NULL)
3220 		bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
3221 }
3222 
3223 static int
3224 bge_dma_alloc(struct bge_softc *sc)
3225 {
3226 	struct ifnet *ifp = &sc->arpcom.ac_if;
3227 	int nseg, i, error;
3228 
3229 	/*
3230 	 * Allocate the parent bus DMA tag appropriate for PCI.
3231 	 */
3232 	error = bus_dma_tag_create(NULL, 1, 0,
3233 				   BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3234 				   NULL, NULL,
3235 				   MAXBSIZE, BGE_NSEG_NEW,
3236 				   BUS_SPACE_MAXSIZE_32BIT,
3237 				   0, &sc->bge_cdata.bge_parent_tag);
3238 	if (error) {
3239 		if_printf(ifp, "could not allocate parent dma tag\n");
3240 		return error;
3241 	}
3242 
3243 	/*
3244 	 * Create DMA tag for mbufs.
3245 	 */
3246 	nseg = BGE_NSEG_NEW;
3247 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 0,
3248 				   BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3249 				   NULL, NULL,
3250 				   MCLBYTES * nseg, nseg, MCLBYTES,
3251 				   BUS_DMA_ALLOCNOW, &sc->bge_cdata.bge_mtag);
3252 	if (error) {
3253 		if_printf(ifp, "could not allocate mbuf dma tag\n");
3254 		return error;
3255 	}
3256 
3257 	/*
3258 	 * Create DMA maps for TX/RX mbufs.
3259 	 */
3260 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3261 		error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
3262 					  &sc->bge_cdata.bge_rx_std_dmamap[i]);
3263 		if (error) {
3264 			int j;
3265 
3266 			for (j = 0; j < i; ++j) {
3267 				bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
3268 					sc->bge_cdata.bge_rx_std_dmamap[j]);
3269 			}
3270 			bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
3271 			sc->bge_cdata.bge_mtag = NULL;
3272 
3273 			if_printf(ifp, "could not create DMA map for RX\n");
3274 			return error;
3275 		}
3276 	}
3277 
3278 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
3279 		error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
3280 					  &sc->bge_cdata.bge_tx_dmamap[i]);
3281 		if (error) {
3282 			int j;
3283 
3284 			for (j = 0; j < BGE_STD_RX_RING_CNT; ++j) {
3285 				bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
3286 					sc->bge_cdata.bge_rx_std_dmamap[j]);
3287 			}
3288 			for (j = 0; j < i; ++j) {
3289 				bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
3290 					sc->bge_cdata.bge_tx_dmamap[j]);
3291 			}
3292 			bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
3293 			sc->bge_cdata.bge_mtag = NULL;
3294 
3295 			if_printf(ifp, "could not create DMA map for TX\n");
3296 			return error;
3297 		}
3298 	}
3299 
3300 	/*
3301 	 * Create DMA stuffs for standard RX ring.
3302 	 */
3303 	error = bge_dma_block_alloc(sc, BGE_STD_RX_RING_SZ,
3304 				    &sc->bge_cdata.bge_rx_std_ring_tag,
3305 				    &sc->bge_cdata.bge_rx_std_ring_map,
3306 				    (void **)&sc->bge_ldata.bge_rx_std_ring,
3307 				    &sc->bge_ldata.bge_rx_std_ring_paddr);
3308 	if (error) {
3309 		if_printf(ifp, "could not create std RX ring\n");
3310 		return error;
3311 	}
3312 
3313 	/*
3314 	 * Create jumbo buffer pool.
3315 	 */
3316 	if (BGE_IS_JUMBO_CAPABLE(sc)) {
3317 		error = bge_alloc_jumbo_mem(sc);
3318 		if (error) {
3319 			if_printf(ifp, "could not create jumbo buffer pool\n");
3320 			return error;
3321 		}
3322 	}
3323 
3324 	/*
3325 	 * Create DMA stuffs for RX return ring.
3326 	 */
3327 	error = bge_dma_block_alloc(sc, BGE_RX_RTN_RING_SZ(sc),
3328 				    &sc->bge_cdata.bge_rx_return_ring_tag,
3329 				    &sc->bge_cdata.bge_rx_return_ring_map,
3330 				    (void **)&sc->bge_ldata.bge_rx_return_ring,
3331 				    &sc->bge_ldata.bge_rx_return_ring_paddr);
3332 	if (error) {
3333 		if_printf(ifp, "could not create RX ret ring\n");
3334 		return error;
3335 	}
3336 
3337 	/*
3338 	 * Create DMA stuffs for TX ring.
3339 	 */
3340 	error = bge_dma_block_alloc(sc, BGE_TX_RING_SZ,
3341 				    &sc->bge_cdata.bge_tx_ring_tag,
3342 				    &sc->bge_cdata.bge_tx_ring_map,
3343 				    (void **)&sc->bge_ldata.bge_tx_ring,
3344 				    &sc->bge_ldata.bge_tx_ring_paddr);
3345 	if (error) {
3346 		if_printf(ifp, "could not create TX ring\n");
3347 		return error;
3348 	}
3349 
3350 	/*
3351 	 * Create DMA stuffs for status block.
3352 	 */
3353 	error = bge_dma_block_alloc(sc, BGE_STATUS_BLK_SZ,
3354 				    &sc->bge_cdata.bge_status_tag,
3355 				    &sc->bge_cdata.bge_status_map,
3356 				    (void **)&sc->bge_ldata.bge_status_block,
3357 				    &sc->bge_ldata.bge_status_block_paddr);
3358 	if (error) {
3359 		if_printf(ifp, "could not create status block\n");
3360 		return error;
3361 	}
3362 
3363 	/*
3364 	 * Create DMA stuffs for statistics block.
3365 	 */
3366 	error = bge_dma_block_alloc(sc, BGE_STATS_SZ,
3367 				    &sc->bge_cdata.bge_stats_tag,
3368 				    &sc->bge_cdata.bge_stats_map,
3369 				    (void **)&sc->bge_ldata.bge_stats,
3370 				    &sc->bge_ldata.bge_stats_paddr);
3371 	if (error) {
3372 		if_printf(ifp, "could not create stats block\n");
3373 		return error;
3374 	}
3375 	return 0;
3376 }
3377 
3378 static int
3379 bge_dma_block_alloc(struct bge_softc *sc, bus_size_t size, bus_dma_tag_t *tag,
3380 		    bus_dmamap_t *map, void **addr, bus_addr_t *paddr)
3381 {
3382 	struct ifnet *ifp = &sc->arpcom.ac_if;
3383 	struct bge_dmamap_arg ctx;
3384 	bus_dma_segment_t seg;
3385 	int error;
3386 
3387 	/*
3388 	 * Create DMA tag
3389 	 */
3390 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, PAGE_SIZE, 0,
3391 				   BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3392 				   NULL, NULL, size, 1, size, 0, tag);
3393 	if (error) {
3394 		if_printf(ifp, "could not allocate dma tag\n");
3395 		return error;
3396 	}
3397 
3398 	/*
3399 	 * Allocate DMA'able memory
3400 	 */
3401 	error = bus_dmamem_alloc(*tag, addr, BUS_DMA_WAITOK | BUS_DMA_ZERO,
3402 				 map);
3403         if (error) {
3404 		if_printf(ifp, "could not allocate dma memory\n");
3405 		bus_dma_tag_destroy(*tag);
3406 		*tag = NULL;
3407                 return error;
3408 	}
3409 
3410 	/*
3411 	 * Load the DMA'able memory
3412 	 */
3413 	ctx.bge_maxsegs = 1;
3414 	ctx.bge_segs = &seg;
3415 	error = bus_dmamap_load(*tag, *map, *addr, size, bge_dma_map_addr, &ctx,
3416 				BUS_DMA_WAITOK);
3417 	if (error) {
3418 		if_printf(ifp, "could not load dma memory\n");
3419 		bus_dmamem_free(*tag, *addr, *map);
3420 		bus_dma_tag_destroy(*tag);
3421 		*tag = NULL;
3422 		return error;
3423 	}
3424 	*paddr = ctx.bge_segs[0].ds_addr;
3425 
3426 	return 0;
3427 }
3428 
3429 static void
3430 bge_dma_block_free(bus_dma_tag_t tag, bus_dmamap_t map, void *addr)
3431 {
3432 	if (tag != NULL) {
3433 		bus_dmamap_unload(tag, map);
3434 		bus_dmamem_free(tag, addr, map);
3435 		bus_dma_tag_destroy(tag);
3436 	}
3437 }
3438 
3439 /*
3440  * Grrr. The link status word in the status block does
3441  * not work correctly on the BCM5700 rev AX and BX chips,
3442  * according to all available information. Hence, we have
3443  * to enable MII interrupts in order to properly obtain
3444  * async link changes. Unfortunately, this also means that
3445  * we have to read the MAC status register to detect link
3446  * changes, thereby adding an additional register access to
3447  * the interrupt handler.
3448  *
3449  * XXX: perhaps link state detection procedure used for
3450  * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
3451  */
3452 static void
3453 bge_bcm5700_link_upd(struct bge_softc *sc, uint32_t status __unused)
3454 {
3455 	struct ifnet *ifp = &sc->arpcom.ac_if;
3456 	struct mii_data *mii = device_get_softc(sc->bge_miibus);
3457 
3458 	mii_pollstat(mii);
3459 
3460 	if (!sc->bge_link &&
3461 	    (mii->mii_media_status & IFM_ACTIVE) &&
3462 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3463 		sc->bge_link++;
3464 		if (bootverbose)
3465 			if_printf(ifp, "link UP\n");
3466 	} else if (sc->bge_link &&
3467 	    (!(mii->mii_media_status & IFM_ACTIVE) ||
3468 	    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3469 		sc->bge_link = 0;
3470 		if (bootverbose)
3471 			if_printf(ifp, "link DOWN\n");
3472 	}
3473 
3474 	/* Clear the interrupt. */
3475 	CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_MI_INTERRUPT);
3476 	bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
3477 	bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR, BRGPHY_INTRS);
3478 }
3479 
3480 static void
3481 bge_tbi_link_upd(struct bge_softc *sc, uint32_t status)
3482 {
3483 	struct ifnet *ifp = &sc->arpcom.ac_if;
3484 
3485 #define PCS_ENCODE_ERR	(BGE_MACSTAT_PORT_DECODE_ERROR|BGE_MACSTAT_MI_COMPLETE)
3486 
3487 	/*
3488 	 * Sometimes PCS encoding errors are detected in
3489 	 * TBI mode (on fiber NICs), and for some reason
3490 	 * the chip will signal them as link changes.
3491 	 * If we get a link change event, but the 'PCS
3492 	 * encoding error' bit in the MAC status register
3493 	 * is set, don't bother doing a link check.
3494 	 * This avoids spurious "gigabit link up" messages
3495 	 * that sometimes appear on fiber NICs during
3496 	 * periods of heavy traffic.
3497 	 */
3498 	if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
3499 		if (!sc->bge_link) {
3500 			sc->bge_link++;
3501 			if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3502 				BGE_CLRBIT(sc, BGE_MAC_MODE,
3503 				    BGE_MACMODE_TBI_SEND_CFGS);
3504 			}
3505 			CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3506 
3507 			if (bootverbose)
3508 				if_printf(ifp, "link UP\n");
3509 
3510 			ifp->if_link_state = LINK_STATE_UP;
3511 			if_link_state_change(ifp);
3512 		}
3513 	} else if ((status & PCS_ENCODE_ERR) != PCS_ENCODE_ERR) {
3514 		if (sc->bge_link) {
3515 			sc->bge_link = 0;
3516 
3517 			if (bootverbose)
3518 				if_printf(ifp, "link DOWN\n");
3519 
3520 			ifp->if_link_state = LINK_STATE_DOWN;
3521 			if_link_state_change(ifp);
3522 		}
3523 	}
3524 
3525 #undef PCS_ENCODE_ERR
3526 
3527 	/* Clear the attention. */
3528 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3529 	    BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3530 	    BGE_MACSTAT_LINK_CHANGED);
3531 }
3532 
3533 static void
3534 bge_copper_link_upd(struct bge_softc *sc, uint32_t status __unused)
3535 {
3536 	/*
3537 	 * Check that the AUTOPOLL bit is set before
3538 	 * processing the event as a real link change.
3539 	 * Turning AUTOPOLL on and off in the MII read/write
3540 	 * functions will often trigger a link status
3541 	 * interrupt for no reason.
3542 	 */
3543 	if (CSR_READ_4(sc, BGE_MI_MODE) & BGE_MIMODE_AUTOPOLL) {
3544 		struct ifnet *ifp = &sc->arpcom.ac_if;
3545 		struct mii_data *mii = device_get_softc(sc->bge_miibus);
3546 
3547 		mii_pollstat(mii);
3548 
3549 		if (!sc->bge_link &&
3550 		    (mii->mii_media_status & IFM_ACTIVE) &&
3551 		    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3552 			sc->bge_link++;
3553 			if (bootverbose)
3554 				if_printf(ifp, "link UP\n");
3555 		} else if (sc->bge_link &&
3556 		    (!(mii->mii_media_status & IFM_ACTIVE) ||
3557 		    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3558 			sc->bge_link = 0;
3559 			if (bootverbose)
3560 				if_printf(ifp, "link DOWN\n");
3561 		}
3562 	}
3563 
3564 	/* Clear the attention. */
3565 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3566 	    BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3567 	    BGE_MACSTAT_LINK_CHANGED);
3568 }
3569