xref: /dragonfly/sys/dev/netif/bge/if_bge.c (revision 956939d5)
1 /*
2  * Copyright (c) 2001 Wind River Systems
3  * Copyright (c) 1997, 1998, 1999, 2001
4  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  *
33  * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $
34  * $DragonFly: src/sys/dev/netif/bge/if_bge.c,v 1.111 2008/10/22 14:24:24 sephe Exp $
35  *
36  */
37 
38 /*
39  * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
40  *
41  * Written by Bill Paul <wpaul@windriver.com>
42  * Senior Engineer, Wind River Systems
43  */
44 
45 /*
46  * The Broadcom BCM5700 is based on technology originally developed by
47  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
48  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
49  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
50  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
51  * frames, highly configurable RX filtering, and 16 RX and TX queues
52  * (which, along with RX filter rules, can be used for QOS applications).
53  * Other features, such as TCP segmentation, may be available as part
54  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
55  * firmware images can be stored in hardware and need not be compiled
56  * into the driver.
57  *
58  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
59  * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
60  *
61  * The BCM5701 is a single-chip solution incorporating both the BCM5700
62  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
63  * does not support external SSRAM.
64  *
65  * Broadcom also produces a variation of the BCM5700 under the "Altima"
66  * brand name, which is functionally similar but lacks PCI-X support.
67  *
68  * Without external SSRAM, you can only have at most 4 TX rings,
69  * and the use of the mini RX ring is disabled. This seems to imply
70  * that these features are simply not available on the BCM5701. As a
71  * result, this driver does not implement any support for the mini RX
72  * ring.
73  */
74 
75 #include "opt_polling.h"
76 
77 #include <sys/param.h>
78 #include <sys/bus.h>
79 #include <sys/endian.h>
80 #include <sys/kernel.h>
81 #include <sys/ktr.h>
82 #include <sys/interrupt.h>
83 #include <sys/mbuf.h>
84 #include <sys/malloc.h>
85 #include <sys/queue.h>
86 #include <sys/rman.h>
87 #include <sys/serialize.h>
88 #include <sys/socket.h>
89 #include <sys/sockio.h>
90 #include <sys/sysctl.h>
91 
92 #include <net/bpf.h>
93 #include <net/ethernet.h>
94 #include <net/if.h>
95 #include <net/if_arp.h>
96 #include <net/if_dl.h>
97 #include <net/if_media.h>
98 #include <net/if_types.h>
99 #include <net/ifq_var.h>
100 #include <net/vlan/if_vlan_var.h>
101 #include <net/vlan/if_vlan_ether.h>
102 
103 #include <dev/netif/mii_layer/mii.h>
104 #include <dev/netif/mii_layer/miivar.h>
105 #include <dev/netif/mii_layer/brgphyreg.h>
106 
107 #include <bus/pci/pcidevs.h>
108 #include <bus/pci/pcireg.h>
109 #include <bus/pci/pcivar.h>
110 
111 #include <dev/netif/bge/if_bgereg.h>
112 
113 /* "device miibus" required.  See GENERIC if you get errors here. */
114 #include "miibus_if.h"
115 
116 #define BGE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
117 #define BGE_MIN_FRAME		60
118 
119 static const struct bge_type bge_devs[] = {
120 	{ PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C996,
121 		"3COM 3C996 Gigabit Ethernet" },
122 
123 	{ PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700,
124 		"Alteon BCM5700 Gigabit Ethernet" },
125 	{ PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5701,
126 		"Alteon BCM5701 Gigabit Ethernet" },
127 
128 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000,
129 		"Altima AC1000 Gigabit Ethernet" },
130 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001,
131 		"Altima AC1002 Gigabit Ethernet" },
132 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100,
133 		"Altima AC9100 Gigabit Ethernet" },
134 
135 	{ PCI_VENDOR_APPLE, PCI_PRODUCT_APPLE_BCM5701,
136 		"Apple BCM5701 Gigabit Ethernet" },
137 
138 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700,
139 		"Broadcom BCM5700 Gigabit Ethernet" },
140 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701,
141 		"Broadcom BCM5701 Gigabit Ethernet" },
142 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702,
143 		"Broadcom BCM5702 Gigabit Ethernet" },
144 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X,
145 		"Broadcom BCM5702X Gigabit Ethernet" },
146 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702_ALT,
147 		"Broadcom BCM5702 Gigabit Ethernet" },
148 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703,
149 		"Broadcom BCM5703 Gigabit Ethernet" },
150 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X,
151 		"Broadcom BCM5703X Gigabit Ethernet" },
152 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703A3,
153 		"Broadcom BCM5703 Gigabit Ethernet" },
154 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C,
155 		"Broadcom BCM5704C Dual Gigabit Ethernet" },
156 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S,
157 		"Broadcom BCM5704S Dual Gigabit Ethernet" },
158 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S_ALT,
159 		"Broadcom BCM5704S Dual Gigabit Ethernet" },
160 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705,
161 		"Broadcom BCM5705 Gigabit Ethernet" },
162 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705F,
163 		"Broadcom BCM5705F Gigabit Ethernet" },
164 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705K,
165 		"Broadcom BCM5705K Gigabit Ethernet" },
166 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M,
167 		"Broadcom BCM5705M Gigabit Ethernet" },
168 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M_ALT,
169 		"Broadcom BCM5705M Gigabit Ethernet" },
170 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714,
171 		"Broadcom BCM5714C Gigabit Ethernet" },
172 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714S,
173 		"Broadcom BCM5714S Gigabit Ethernet" },
174 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715,
175 		"Broadcom BCM5715 Gigabit Ethernet" },
176 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715S,
177 		"Broadcom BCM5715S Gigabit Ethernet" },
178 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720,
179 		"Broadcom BCM5720 Gigabit Ethernet" },
180 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5721,
181 		"Broadcom BCM5721 Gigabit Ethernet" },
182 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5722,
183 		"Broadcom BCM5722 Gigabit Ethernet" },
184 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750,
185 		"Broadcom BCM5750 Gigabit Ethernet" },
186 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750M,
187 		"Broadcom BCM5750M Gigabit Ethernet" },
188 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751,
189 		"Broadcom BCM5751 Gigabit Ethernet" },
190 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751F,
191 		"Broadcom BCM5751F Gigabit Ethernet" },
192 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751M,
193 		"Broadcom BCM5751M Gigabit Ethernet" },
194 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752,
195 		"Broadcom BCM5752 Gigabit Ethernet" },
196 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752M,
197 		"Broadcom BCM5752M Gigabit Ethernet" },
198 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753,
199 		"Broadcom BCM5753 Gigabit Ethernet" },
200 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753F,
201 		"Broadcom BCM5753F Gigabit Ethernet" },
202 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753M,
203 		"Broadcom BCM5753M Gigabit Ethernet" },
204 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754,
205 		"Broadcom BCM5754 Gigabit Ethernet" },
206 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754M,
207 		"Broadcom BCM5754M Gigabit Ethernet" },
208 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755,
209 		"Broadcom BCM5755 Gigabit Ethernet" },
210 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755M,
211 		"Broadcom BCM5755M Gigabit Ethernet" },
212 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5756,
213 		"Broadcom BCM5756 Gigabit Ethernet" },
214 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780,
215 		"Broadcom BCM5780 Gigabit Ethernet" },
216 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780S,
217 		"Broadcom BCM5780S Gigabit Ethernet" },
218 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5781,
219 		"Broadcom BCM5781 Gigabit Ethernet" },
220 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782,
221 		"Broadcom BCM5782 Gigabit Ethernet" },
222 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5786,
223 		"Broadcom BCM5786 Gigabit Ethernet" },
224 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787,
225 		"Broadcom BCM5787 Gigabit Ethernet" },
226 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787F,
227 		"Broadcom BCM5787F Gigabit Ethernet" },
228 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787M,
229 		"Broadcom BCM5787M Gigabit Ethernet" },
230 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5788,
231 		"Broadcom BCM5788 Gigabit Ethernet" },
232 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5789,
233 		"Broadcom BCM5789 Gigabit Ethernet" },
234 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901,
235 		"Broadcom BCM5901 Fast Ethernet" },
236 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2,
237 		"Broadcom BCM5901A2 Fast Ethernet" },
238 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5903M,
239 		"Broadcom BCM5903M Fast Ethernet" },
240 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906,
241 		"Broadcom BCM5906 Fast Ethernet"},
242 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906M,
243 		"Broadcom BCM5906M Fast Ethernet"},
244 
245 	{ PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1,
246 		"SysKonnect Gigabit Ethernet" },
247 
248 	{ 0, 0, NULL }
249 };
250 
251 #define BGE_IS_JUMBO_CAPABLE(sc)	((sc)->bge_flags & BGE_FLAG_JUMBO)
252 #define BGE_IS_5700_FAMILY(sc)		((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
253 #define BGE_IS_5705_PLUS(sc)		((sc)->bge_flags & BGE_FLAG_5705_PLUS)
254 #define BGE_IS_5714_FAMILY(sc)		((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
255 #define BGE_IS_575X_PLUS(sc)		((sc)->bge_flags & BGE_FLAG_575X_PLUS)
256 
257 typedef int	(*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
258 
259 static int	bge_probe(device_t);
260 static int	bge_attach(device_t);
261 static int	bge_detach(device_t);
262 static void	bge_txeof(struct bge_softc *);
263 static void	bge_rxeof(struct bge_softc *);
264 
265 static void	bge_tick(void *);
266 static void	bge_stats_update(struct bge_softc *);
267 static void	bge_stats_update_regs(struct bge_softc *);
268 static int	bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
269 
270 #ifdef DEVICE_POLLING
271 static void	bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
272 #endif
273 static void	bge_intr(void *);
274 static void	bge_enable_intr(struct bge_softc *);
275 static void	bge_disable_intr(struct bge_softc *);
276 static void	bge_start(struct ifnet *);
277 static int	bge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
278 static void	bge_init(void *);
279 static void	bge_stop(struct bge_softc *);
280 static void	bge_watchdog(struct ifnet *);
281 static void	bge_shutdown(device_t);
282 static int	bge_suspend(device_t);
283 static int	bge_resume(device_t);
284 static int	bge_ifmedia_upd(struct ifnet *);
285 static void	bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
286 
287 static uint8_t	bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
288 static int	bge_read_nvram(struct bge_softc *, caddr_t, int, int);
289 
290 static uint8_t	bge_eeprom_getbyte(struct bge_softc *, uint32_t, uint8_t *);
291 static int	bge_read_eeprom(struct bge_softc *, caddr_t, uint32_t, size_t);
292 
293 static void	bge_setmulti(struct bge_softc *);
294 static void	bge_setpromisc(struct bge_softc *);
295 
296 static int	bge_alloc_jumbo_mem(struct bge_softc *);
297 static void	bge_free_jumbo_mem(struct bge_softc *);
298 static struct bge_jslot
299 		*bge_jalloc(struct bge_softc *);
300 static void	bge_jfree(void *);
301 static void	bge_jref(void *);
302 static int	bge_newbuf_std(struct bge_softc *, int, int);
303 static int	bge_newbuf_jumbo(struct bge_softc *, int, int);
304 static void	bge_setup_rxdesc_std(struct bge_softc *, int);
305 static void	bge_setup_rxdesc_jumbo(struct bge_softc *, int);
306 static int	bge_init_rx_ring_std(struct bge_softc *);
307 static void	bge_free_rx_ring_std(struct bge_softc *);
308 static int	bge_init_rx_ring_jumbo(struct bge_softc *);
309 static void	bge_free_rx_ring_jumbo(struct bge_softc *);
310 static void	bge_free_tx_ring(struct bge_softc *);
311 static int	bge_init_tx_ring(struct bge_softc *);
312 
313 static int	bge_chipinit(struct bge_softc *);
314 static int	bge_blockinit(struct bge_softc *);
315 
316 static uint32_t	bge_readmem_ind(struct bge_softc *, uint32_t);
317 static void	bge_writemem_ind(struct bge_softc *, uint32_t, uint32_t);
318 #ifdef notdef
319 static uint32_t	bge_readreg_ind(struct bge_softc *, uint32_t);
320 #endif
321 static void	bge_writereg_ind(struct bge_softc *, uint32_t, uint32_t);
322 static void	bge_writemem_direct(struct bge_softc *, uint32_t, uint32_t);
323 static void	bge_writembx(struct bge_softc *, int, int);
324 
325 static int	bge_miibus_readreg(device_t, int, int);
326 static int	bge_miibus_writereg(device_t, int, int, int);
327 static void	bge_miibus_statchg(device_t);
328 static void	bge_bcm5700_link_upd(struct bge_softc *, uint32_t);
329 static void	bge_tbi_link_upd(struct bge_softc *, uint32_t);
330 static void	bge_copper_link_upd(struct bge_softc *, uint32_t);
331 
332 static void	bge_reset(struct bge_softc *);
333 
334 static int	bge_dma_alloc(struct bge_softc *);
335 static void	bge_dma_free(struct bge_softc *);
336 static int	bge_dma_block_alloc(struct bge_softc *, bus_size_t,
337 				    bus_dma_tag_t *, bus_dmamap_t *,
338 				    void **, bus_addr_t *);
339 static void	bge_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *);
340 
341 static int	bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
342 static int	bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
343 static int	bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
344 static int	bge_get_eaddr(struct bge_softc *, uint8_t[]);
345 
346 static void	bge_coal_change(struct bge_softc *);
347 static int	bge_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS);
348 static int	bge_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS);
349 static int	bge_sysctl_rx_max_coal_bds(SYSCTL_HANDLER_ARGS);
350 static int	bge_sysctl_tx_max_coal_bds(SYSCTL_HANDLER_ARGS);
351 static int	bge_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *, uint32_t);
352 
353 /*
354  * Set following tunable to 1 for some IBM blade servers with the DNLK
355  * switch module. Auto negotiation is broken for those configurations.
356  */
357 static int	bge_fake_autoneg = 0;
358 TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg);
359 
360 /* Interrupt moderation control variables. */
361 static int	bge_rx_coal_ticks = 100;	/* usec */
362 static int	bge_tx_coal_ticks = 1023;	/* usec */
363 static int	bge_rx_max_coal_bds = 80;
364 static int	bge_tx_max_coal_bds = 128;
365 
366 TUNABLE_INT("hw.bge.rx_coal_ticks", &bge_rx_coal_ticks);
367 TUNABLE_INT("hw.bge.tx_coal_ticks", &bge_tx_coal_ticks);
368 TUNABLE_INT("hw.bge.rx_max_coal_bds", &bge_rx_max_coal_bds);
369 TUNABLE_INT("hw.bge.tx_max_coal_bds", &bge_tx_max_coal_bds);
370 
371 #if !defined(KTR_IF_BGE)
372 #define KTR_IF_BGE	KTR_ALL
373 #endif
374 KTR_INFO_MASTER(if_bge);
375 KTR_INFO(KTR_IF_BGE, if_bge, intr, 0, "intr", 0);
376 KTR_INFO(KTR_IF_BGE, if_bge, rx_pkt, 1, "rx_pkt", 0);
377 KTR_INFO(KTR_IF_BGE, if_bge, tx_pkt, 2, "tx_pkt", 0);
378 #define logif(name)	KTR_LOG(if_bge_ ## name)
379 
380 static device_method_t bge_methods[] = {
381 	/* Device interface */
382 	DEVMETHOD(device_probe,		bge_probe),
383 	DEVMETHOD(device_attach,	bge_attach),
384 	DEVMETHOD(device_detach,	bge_detach),
385 	DEVMETHOD(device_shutdown,	bge_shutdown),
386 	DEVMETHOD(device_suspend,	bge_suspend),
387 	DEVMETHOD(device_resume,	bge_resume),
388 
389 	/* bus interface */
390 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
391 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
392 
393 	/* MII interface */
394 	DEVMETHOD(miibus_readreg,	bge_miibus_readreg),
395 	DEVMETHOD(miibus_writereg,	bge_miibus_writereg),
396 	DEVMETHOD(miibus_statchg,	bge_miibus_statchg),
397 
398 	{ 0, 0 }
399 };
400 
401 static DEFINE_CLASS_0(bge, bge_driver, bge_methods, sizeof(struct bge_softc));
402 static devclass_t bge_devclass;
403 
404 DECLARE_DUMMY_MODULE(if_bge);
405 DRIVER_MODULE(if_bge, pci, bge_driver, bge_devclass, 0, 0);
406 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
407 
408 static uint32_t
409 bge_readmem_ind(struct bge_softc *sc, uint32_t off)
410 {
411 	device_t dev = sc->bge_dev;
412 	uint32_t val;
413 
414 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
415 	val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
416 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
417 	return (val);
418 }
419 
420 static void
421 bge_writemem_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
422 {
423 	device_t dev = sc->bge_dev;
424 
425 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
426 	pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
427 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
428 }
429 
430 #ifdef notdef
431 static uint32_t
432 bge_readreg_ind(struct bge_softc *sc, uin32_t off)
433 {
434 	device_t dev = sc->bge_dev;
435 
436 	pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
437 	return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
438 }
439 #endif
440 
441 static void
442 bge_writereg_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
443 {
444 	device_t dev = sc->bge_dev;
445 
446 	pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
447 	pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
448 }
449 
450 static void
451 bge_writemem_direct(struct bge_softc *sc, uint32_t off, uint32_t val)
452 {
453 	CSR_WRITE_4(sc, off, val);
454 }
455 
456 static void
457 bge_writembx(struct bge_softc *sc, int off, int val)
458 {
459 	if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
460 		off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
461 
462 	CSR_WRITE_4(sc, off, val);
463 }
464 
465 static uint8_t
466 bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
467 {
468 	uint32_t access, byte = 0;
469 	int i;
470 
471 	/* Lock. */
472 	CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
473 	for (i = 0; i < 8000; i++) {
474 		if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
475 			break;
476 		DELAY(20);
477 	}
478 	if (i == 8000)
479 		return (1);
480 
481 	/* Enable access. */
482 	access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
483 	CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
484 
485 	CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
486 	CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
487 	for (i = 0; i < BGE_TIMEOUT * 10; i++) {
488 		DELAY(10);
489 		if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
490 			DELAY(10);
491 			break;
492 		}
493 	}
494 
495 	if (i == BGE_TIMEOUT * 10) {
496 		if_printf(&sc->arpcom.ac_if, "nvram read timed out\n");
497 		return (1);
498 	}
499 
500 	/* Get result. */
501 	byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
502 
503 	*dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
504 
505 	/* Disable access. */
506 	CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
507 
508 	/* Unlock. */
509 	CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
510 	CSR_READ_4(sc, BGE_NVRAM_SWARB);
511 
512 	return (0);
513 }
514 
515 /*
516  * Read a sequence of bytes from NVRAM.
517  */
518 static int
519 bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
520 {
521 	int err = 0, i;
522 	uint8_t byte = 0;
523 
524 	if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
525 		return (1);
526 
527 	for (i = 0; i < cnt; i++) {
528 		err = bge_nvram_getbyte(sc, off + i, &byte);
529 		if (err)
530 			break;
531 		*(dest + i) = byte;
532 	}
533 
534 	return (err ? 1 : 0);
535 }
536 
537 /*
538  * Read a byte of data stored in the EEPROM at address 'addr.' The
539  * BCM570x supports both the traditional bitbang interface and an
540  * auto access interface for reading the EEPROM. We use the auto
541  * access method.
542  */
543 static uint8_t
544 bge_eeprom_getbyte(struct bge_softc *sc, uint32_t addr, uint8_t *dest)
545 {
546 	int i;
547 	uint32_t byte = 0;
548 
549 	/*
550 	 * Enable use of auto EEPROM access so we can avoid
551 	 * having to use the bitbang method.
552 	 */
553 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
554 
555 	/* Reset the EEPROM, load the clock period. */
556 	CSR_WRITE_4(sc, BGE_EE_ADDR,
557 	    BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
558 	DELAY(20);
559 
560 	/* Issue the read EEPROM command. */
561 	CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
562 
563 	/* Wait for completion */
564 	for(i = 0; i < BGE_TIMEOUT * 10; i++) {
565 		DELAY(10);
566 		if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
567 			break;
568 	}
569 
570 	if (i == BGE_TIMEOUT) {
571 		if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n");
572 		return(1);
573 	}
574 
575 	/* Get result. */
576 	byte = CSR_READ_4(sc, BGE_EE_DATA);
577 
578         *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
579 
580 	return(0);
581 }
582 
583 /*
584  * Read a sequence of bytes from the EEPROM.
585  */
586 static int
587 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, uint32_t off, size_t len)
588 {
589 	size_t i;
590 	int err;
591 	uint8_t byte;
592 
593 	for (byte = 0, err = 0, i = 0; i < len; i++) {
594 		err = bge_eeprom_getbyte(sc, off + i, &byte);
595 		if (err)
596 			break;
597 		*(dest + i) = byte;
598 	}
599 
600 	return(err ? 1 : 0);
601 }
602 
603 static int
604 bge_miibus_readreg(device_t dev, int phy, int reg)
605 {
606 	struct bge_softc *sc = device_get_softc(dev);
607 	struct ifnet *ifp = &sc->arpcom.ac_if;
608 	uint32_t val, autopoll;
609 	int i;
610 
611 	/*
612 	 * Broadcom's own driver always assumes the internal
613 	 * PHY is at GMII address 1. On some chips, the PHY responds
614 	 * to accesses at all addresses, which could cause us to
615 	 * bogusly attach the PHY 32 times at probe type. Always
616 	 * restricting the lookup to address 1 is simpler than
617 	 * trying to figure out which chips revisions should be
618 	 * special-cased.
619 	 */
620 	if (phy != 1)
621 		return(0);
622 
623 	/* Reading with autopolling on may trigger PCI errors */
624 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
625 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
626 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
627 		DELAY(40);
628 	}
629 
630 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
631 	    BGE_MIPHY(phy)|BGE_MIREG(reg));
632 
633 	for (i = 0; i < BGE_TIMEOUT; i++) {
634 		DELAY(10);
635 		val = CSR_READ_4(sc, BGE_MI_COMM);
636 		if (!(val & BGE_MICOMM_BUSY))
637 			break;
638 	}
639 
640 	if (i == BGE_TIMEOUT) {
641 		if_printf(ifp, "PHY read timed out "
642 			  "(phy %d, reg %d, val 0x%08x)\n", phy, reg, val);
643 		val = 0;
644 		goto done;
645 	}
646 
647 	DELAY(5);
648 	val = CSR_READ_4(sc, BGE_MI_COMM);
649 
650 done:
651 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
652 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
653 		DELAY(40);
654 	}
655 
656 	if (val & BGE_MICOMM_READFAIL)
657 		return(0);
658 
659 	return(val & 0xFFFF);
660 }
661 
662 static int
663 bge_miibus_writereg(device_t dev, int phy, int reg, int val)
664 {
665 	struct bge_softc *sc = device_get_softc(dev);
666 	uint32_t autopoll;
667 	int i;
668 
669 	/*
670 	 * See the related comment in bge_miibus_readreg()
671 	 */
672 	if (phy != 1)
673 		return(0);
674 
675 	if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
676 	    (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
677 	       return(0);
678 
679 	/* Reading with autopolling on may trigger PCI errors */
680 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
681 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
682 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
683 		DELAY(40);
684 	}
685 
686 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
687 	    BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
688 
689 	for (i = 0; i < BGE_TIMEOUT; i++) {
690 		DELAY(10);
691 		if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
692 			DELAY(5);
693 			CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
694 			break;
695 		}
696 	}
697 
698 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
699 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
700 		DELAY(40);
701 	}
702 
703 	if (i == BGE_TIMEOUT) {
704 		if_printf(&sc->arpcom.ac_if, "PHY write timed out "
705 			  "(phy %d, reg %d, val %d)\n", phy, reg, val);
706 		return(0);
707 	}
708 
709 	return(0);
710 }
711 
712 static void
713 bge_miibus_statchg(device_t dev)
714 {
715 	struct bge_softc *sc;
716 	struct mii_data *mii;
717 
718 	sc = device_get_softc(dev);
719 	mii = device_get_softc(sc->bge_miibus);
720 
721 	BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
722 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
723 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
724 	} else {
725 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
726 	}
727 
728 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
729 		BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
730 	} else {
731 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
732 	}
733 }
734 
735 /*
736  * Memory management for jumbo frames.
737  */
738 static int
739 bge_alloc_jumbo_mem(struct bge_softc *sc)
740 {
741 	struct ifnet *ifp = &sc->arpcom.ac_if;
742 	struct bge_jslot *entry;
743 	uint8_t *ptr;
744 	bus_addr_t paddr;
745 	int i, error;
746 
747 	/*
748 	 * Create tag for jumbo mbufs.
749 	 * This is really a bit of a kludge. We allocate a special
750 	 * jumbo buffer pool which (thanks to the way our DMA
751 	 * memory allocation works) will consist of contiguous
752 	 * pages. This means that even though a jumbo buffer might
753 	 * be larger than a page size, we don't really need to
754 	 * map it into more than one DMA segment. However, the
755 	 * default mbuf tag will result in multi-segment mappings,
756 	 * so we have to create a special jumbo mbuf tag that
757 	 * lets us get away with mapping the jumbo buffers as
758 	 * a single segment. I think eventually the driver should
759 	 * be changed so that it uses ordinary mbufs and cluster
760 	 * buffers, i.e. jumbo frames can span multiple DMA
761 	 * descriptors. But that's a project for another day.
762 	 */
763 
764 	/*
765 	 * Create DMA stuffs for jumbo RX ring.
766 	 */
767 	error = bge_dma_block_alloc(sc, BGE_JUMBO_RX_RING_SZ,
768 				    &sc->bge_cdata.bge_rx_jumbo_ring_tag,
769 				    &sc->bge_cdata.bge_rx_jumbo_ring_map,
770 				    (void **)&sc->bge_ldata.bge_rx_jumbo_ring,
771 				    &sc->bge_ldata.bge_rx_jumbo_ring_paddr);
772 	if (error) {
773 		if_printf(ifp, "could not create jumbo RX ring\n");
774 		return error;
775 	}
776 
777 	/*
778 	 * Create DMA stuffs for jumbo buffer block.
779 	 */
780 	error = bge_dma_block_alloc(sc, BGE_JMEM,
781 				    &sc->bge_cdata.bge_jumbo_tag,
782 				    &sc->bge_cdata.bge_jumbo_map,
783 				    (void **)&sc->bge_ldata.bge_jumbo_buf,
784 				    &paddr);
785 	if (error) {
786 		if_printf(ifp, "could not create jumbo buffer\n");
787 		return error;
788 	}
789 
790 	SLIST_INIT(&sc->bge_jfree_listhead);
791 
792 	/*
793 	 * Now divide it up into 9K pieces and save the addresses
794 	 * in an array. Note that we play an evil trick here by using
795 	 * the first few bytes in the buffer to hold the the address
796 	 * of the softc structure for this interface. This is because
797 	 * bge_jfree() needs it, but it is called by the mbuf management
798 	 * code which will not pass it to us explicitly.
799 	 */
800 	for (i = 0, ptr = sc->bge_ldata.bge_jumbo_buf; i < BGE_JSLOTS; i++) {
801 		entry = &sc->bge_cdata.bge_jslots[i];
802 		entry->bge_sc = sc;
803 		entry->bge_buf = ptr;
804 		entry->bge_paddr = paddr;
805 		entry->bge_inuse = 0;
806 		entry->bge_slot = i;
807 		SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jslot_link);
808 
809 		ptr += BGE_JLEN;
810 		paddr += BGE_JLEN;
811 	}
812 	return 0;
813 }
814 
815 static void
816 bge_free_jumbo_mem(struct bge_softc *sc)
817 {
818 	/* Destroy jumbo RX ring. */
819 	bge_dma_block_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
820 			   sc->bge_cdata.bge_rx_jumbo_ring_map,
821 			   sc->bge_ldata.bge_rx_jumbo_ring);
822 
823 	/* Destroy jumbo buffer block. */
824 	bge_dma_block_free(sc->bge_cdata.bge_jumbo_tag,
825 			   sc->bge_cdata.bge_jumbo_map,
826 			   sc->bge_ldata.bge_jumbo_buf);
827 }
828 
829 /*
830  * Allocate a jumbo buffer.
831  */
832 static struct bge_jslot *
833 bge_jalloc(struct bge_softc *sc)
834 {
835 	struct bge_jslot *entry;
836 
837 	lwkt_serialize_enter(&sc->bge_jslot_serializer);
838 	entry = SLIST_FIRST(&sc->bge_jfree_listhead);
839 	if (entry) {
840 		SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jslot_link);
841 		entry->bge_inuse = 1;
842 	} else {
843 		if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n");
844 	}
845 	lwkt_serialize_exit(&sc->bge_jslot_serializer);
846 	return(entry);
847 }
848 
849 /*
850  * Adjust usage count on a jumbo buffer.
851  */
852 static void
853 bge_jref(void *arg)
854 {
855 	struct bge_jslot *entry = (struct bge_jslot *)arg;
856 	struct bge_softc *sc = entry->bge_sc;
857 
858 	if (sc == NULL)
859 		panic("bge_jref: can't find softc pointer!");
860 
861 	if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) {
862 		panic("bge_jref: asked to reference buffer "
863 		    "that we don't manage!");
864 	} else if (entry->bge_inuse == 0) {
865 		panic("bge_jref: buffer already free!");
866 	} else {
867 		atomic_add_int(&entry->bge_inuse, 1);
868 	}
869 }
870 
871 /*
872  * Release a jumbo buffer.
873  */
874 static void
875 bge_jfree(void *arg)
876 {
877 	struct bge_jslot *entry = (struct bge_jslot *)arg;
878 	struct bge_softc *sc = entry->bge_sc;
879 
880 	if (sc == NULL)
881 		panic("bge_jfree: can't find softc pointer!");
882 
883 	if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) {
884 		panic("bge_jfree: asked to free buffer that we don't manage!");
885 	} else if (entry->bge_inuse == 0) {
886 		panic("bge_jfree: buffer already free!");
887 	} else {
888 		/*
889 		 * Possible MP race to 0, use the serializer.  The atomic insn
890 		 * is still needed for races against bge_jref().
891 		 */
892 		lwkt_serialize_enter(&sc->bge_jslot_serializer);
893 		atomic_subtract_int(&entry->bge_inuse, 1);
894 		if (entry->bge_inuse == 0) {
895 			SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
896 					  entry, jslot_link);
897 		}
898 		lwkt_serialize_exit(&sc->bge_jslot_serializer);
899 	}
900 }
901 
902 
903 /*
904  * Intialize a standard receive ring descriptor.
905  */
906 static int
907 bge_newbuf_std(struct bge_softc *sc, int i, int init)
908 {
909 	struct mbuf *m_new = NULL;
910 	bus_dma_segment_t seg;
911 	bus_dmamap_t map;
912 	int error, nsegs;
913 
914 	m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
915 	if (m_new == NULL)
916 		return ENOBUFS;
917 	m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
918 
919 	if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
920 		m_adj(m_new, ETHER_ALIGN);
921 
922 	error = bus_dmamap_load_mbuf_segment(sc->bge_cdata.bge_rx_mtag,
923 			sc->bge_cdata.bge_rx_tmpmap, m_new,
924 			&seg, 1, &nsegs, BUS_DMA_NOWAIT);
925 	if (error) {
926 		m_freem(m_new);
927 		return error;
928 	}
929 
930 	if (!init) {
931 		bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
932 				sc->bge_cdata.bge_rx_std_dmamap[i],
933 				BUS_DMASYNC_POSTREAD);
934 		bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
935 			sc->bge_cdata.bge_rx_std_dmamap[i]);
936 	}
937 
938 	map = sc->bge_cdata.bge_rx_tmpmap;
939 	sc->bge_cdata.bge_rx_tmpmap = sc->bge_cdata.bge_rx_std_dmamap[i];
940 	sc->bge_cdata.bge_rx_std_dmamap[i] = map;
941 
942 	sc->bge_cdata.bge_rx_std_chain[i].bge_mbuf = m_new;
943 	sc->bge_cdata.bge_rx_std_chain[i].bge_paddr = seg.ds_addr;
944 
945 	bge_setup_rxdesc_std(sc, i);
946 	return 0;
947 }
948 
949 static void
950 bge_setup_rxdesc_std(struct bge_softc *sc, int i)
951 {
952 	struct bge_rxchain *rc;
953 	struct bge_rx_bd *r;
954 
955 	rc = &sc->bge_cdata.bge_rx_std_chain[i];
956 	r = &sc->bge_ldata.bge_rx_std_ring[i];
957 
958 	r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bge_paddr);
959 	r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bge_paddr);
960 	r->bge_len = rc->bge_mbuf->m_len;
961 	r->bge_idx = i;
962 	r->bge_flags = BGE_RXBDFLAG_END;
963 }
964 
965 /*
966  * Initialize a jumbo receive ring descriptor. This allocates
967  * a jumbo buffer from the pool managed internally by the driver.
968  */
969 static int
970 bge_newbuf_jumbo(struct bge_softc *sc, int i, int init)
971 {
972 	struct mbuf *m_new = NULL;
973 	struct bge_jslot *buf;
974 	bus_addr_t paddr;
975 
976 	/* Allocate the mbuf. */
977 	MGETHDR(m_new, init ? MB_WAIT : MB_DONTWAIT, MT_DATA);
978 	if (m_new == NULL)
979 		return ENOBUFS;
980 
981 	/* Allocate the jumbo buffer */
982 	buf = bge_jalloc(sc);
983 	if (buf == NULL) {
984 		m_freem(m_new);
985 		return ENOBUFS;
986 	}
987 
988 	/* Attach the buffer to the mbuf. */
989 	m_new->m_ext.ext_arg = buf;
990 	m_new->m_ext.ext_buf = buf->bge_buf;
991 	m_new->m_ext.ext_free = bge_jfree;
992 	m_new->m_ext.ext_ref = bge_jref;
993 	m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
994 
995 	m_new->m_flags |= M_EXT;
996 
997 	m_new->m_data = m_new->m_ext.ext_buf;
998 	m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size;
999 
1000 	paddr = buf->bge_paddr;
1001 	if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) {
1002 		m_adj(m_new, ETHER_ALIGN);
1003 		paddr += ETHER_ALIGN;
1004 	}
1005 
1006 	/* Save necessary information */
1007 	sc->bge_cdata.bge_rx_jumbo_chain[i].bge_mbuf = m_new;
1008 	sc->bge_cdata.bge_rx_jumbo_chain[i].bge_paddr = paddr;
1009 
1010 	/* Set up the descriptor. */
1011 	bge_setup_rxdesc_jumbo(sc, i);
1012 	return 0;
1013 }
1014 
1015 static void
1016 bge_setup_rxdesc_jumbo(struct bge_softc *sc, int i)
1017 {
1018 	struct bge_rx_bd *r;
1019 	struct bge_rxchain *rc;
1020 
1021 	r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
1022 	rc = &sc->bge_cdata.bge_rx_jumbo_chain[i];
1023 
1024 	r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bge_paddr);
1025 	r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bge_paddr);
1026 	r->bge_len = rc->bge_mbuf->m_len;
1027 	r->bge_idx = i;
1028 	r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
1029 }
1030 
1031 static int
1032 bge_init_rx_ring_std(struct bge_softc *sc)
1033 {
1034 	int i, error;
1035 
1036 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1037 		error = bge_newbuf_std(sc, i, 1);
1038 		if (error)
1039 			return error;
1040 	};
1041 
1042 	sc->bge_std = BGE_STD_RX_RING_CNT - 1;
1043 	bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
1044 
1045 	return(0);
1046 }
1047 
1048 static void
1049 bge_free_rx_ring_std(struct bge_softc *sc)
1050 {
1051 	int i;
1052 
1053 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1054 		struct bge_rxchain *rc = &sc->bge_cdata.bge_rx_std_chain[i];
1055 
1056 		if (rc->bge_mbuf != NULL) {
1057 			bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1058 					  sc->bge_cdata.bge_rx_std_dmamap[i]);
1059 			m_freem(rc->bge_mbuf);
1060 			rc->bge_mbuf = NULL;
1061 		}
1062 		bzero(&sc->bge_ldata.bge_rx_std_ring[i],
1063 		    sizeof(struct bge_rx_bd));
1064 	}
1065 }
1066 
1067 static int
1068 bge_init_rx_ring_jumbo(struct bge_softc *sc)
1069 {
1070 	struct bge_rcb *rcb;
1071 	int i, error;
1072 
1073 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1074 		error = bge_newbuf_jumbo(sc, i, 1);
1075 		if (error)
1076 			return error;
1077 	};
1078 
1079 	sc->bge_jumbo = BGE_JUMBO_RX_RING_CNT - 1;
1080 
1081 	rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1082 	rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
1083 	CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1084 
1085 	bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1086 
1087 	return(0);
1088 }
1089 
1090 static void
1091 bge_free_rx_ring_jumbo(struct bge_softc *sc)
1092 {
1093 	int i;
1094 
1095 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1096 		struct bge_rxchain *rc = &sc->bge_cdata.bge_rx_jumbo_chain[i];
1097 
1098 		if (rc->bge_mbuf != NULL) {
1099 			m_freem(rc->bge_mbuf);
1100 			rc->bge_mbuf = NULL;
1101 		}
1102 		bzero(&sc->bge_ldata.bge_rx_jumbo_ring[i],
1103 		    sizeof(struct bge_rx_bd));
1104 	}
1105 }
1106 
1107 static void
1108 bge_free_tx_ring(struct bge_softc *sc)
1109 {
1110 	int i;
1111 
1112 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1113 		if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1114 			bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
1115 					  sc->bge_cdata.bge_tx_dmamap[i]);
1116 			m_freem(sc->bge_cdata.bge_tx_chain[i]);
1117 			sc->bge_cdata.bge_tx_chain[i] = NULL;
1118 		}
1119 		bzero(&sc->bge_ldata.bge_tx_ring[i],
1120 		    sizeof(struct bge_tx_bd));
1121 	}
1122 }
1123 
1124 static int
1125 bge_init_tx_ring(struct bge_softc *sc)
1126 {
1127 	sc->bge_txcnt = 0;
1128 	sc->bge_tx_saved_considx = 0;
1129 	sc->bge_tx_prodidx = 0;
1130 
1131 	/* Initialize transmit producer index for host-memory send ring. */
1132 	bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1133 
1134 	/* 5700 b2 errata */
1135 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1136 		bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1137 
1138 	bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1139 	/* 5700 b2 errata */
1140 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1141 		bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1142 
1143 	return(0);
1144 }
1145 
1146 static void
1147 bge_setmulti(struct bge_softc *sc)
1148 {
1149 	struct ifnet *ifp;
1150 	struct ifmultiaddr *ifma;
1151 	uint32_t hashes[4] = { 0, 0, 0, 0 };
1152 	int h, i;
1153 
1154 	ifp = &sc->arpcom.ac_if;
1155 
1156 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1157 		for (i = 0; i < 4; i++)
1158 			CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1159 		return;
1160 	}
1161 
1162 	/* First, zot all the existing filters. */
1163 	for (i = 0; i < 4; i++)
1164 		CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1165 
1166 	/* Now program new ones. */
1167 	LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1168 		if (ifma->ifma_addr->sa_family != AF_LINK)
1169 			continue;
1170 		h = ether_crc32_le(
1171 		    LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1172 		    ETHER_ADDR_LEN) & 0x7f;
1173 		hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1174 	}
1175 
1176 	for (i = 0; i < 4; i++)
1177 		CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1178 }
1179 
1180 /*
1181  * Do endian, PCI and DMA initialization. Also check the on-board ROM
1182  * self-test results.
1183  */
1184 static int
1185 bge_chipinit(struct bge_softc *sc)
1186 {
1187 	int i;
1188 	uint32_t dma_rw_ctl;
1189 
1190 	/* Set endian type before we access any non-PCI registers. */
1191 	pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
1192 
1193 	/* Clear the MAC control register */
1194 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1195 
1196 	/*
1197 	 * Clear the MAC statistics block in the NIC's
1198 	 * internal memory.
1199 	 */
1200 	for (i = BGE_STATS_BLOCK;
1201 	    i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1202 		BGE_MEMWIN_WRITE(sc, i, 0);
1203 
1204 	for (i = BGE_STATUS_BLOCK;
1205 	    i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1206 		BGE_MEMWIN_WRITE(sc, i, 0);
1207 
1208 	/* Set up the PCI DMA control register. */
1209 	if (sc->bge_flags & BGE_FLAG_PCIE) {
1210 		/* PCI Express */
1211 		dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1212 		    (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1213 		    (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1214 	} else if (sc->bge_flags & BGE_FLAG_PCIX) {
1215 		/* PCI-X bus */
1216 		if (BGE_IS_5714_FAMILY(sc)) {
1217 			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD;
1218 			dma_rw_ctl &= ~BGE_PCIDMARWCTL_ONEDMA_ATONCE; /* XXX */
1219 			/* XXX magic values, Broadcom-supplied Linux driver */
1220 			if (sc->bge_asicrev == BGE_ASICREV_BCM5780) {
1221 				dma_rw_ctl |= (1 << 20) | (1 << 18) |
1222 				    BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1223 			} else {
1224 				dma_rw_ctl |= (1 << 20) | (1 << 18) | (1 << 15);
1225 			}
1226 		} else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1227 			/*
1228 			 * The 5704 uses a different encoding of read/write
1229 			 * watermarks.
1230 			 */
1231 			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1232 			    (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1233 			    (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1234 		} else {
1235 			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1236 			    (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1237 			    (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1238 			    (0x0F);
1239 		}
1240 
1241 		/*
1242 		 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1243 		 * for hardware bugs.
1244 		 */
1245 		if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1246 		    sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1247 			uint32_t tmp;
1248 
1249 			tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1250 			if (tmp == 0x6 || tmp == 0x7)
1251 				dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1252 		}
1253 	} else {
1254 		/* Conventional PCI bus */
1255 		dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1256 		    (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1257 		    (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1258 		    (0x0F);
1259 	}
1260 
1261 	if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1262 	    sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1263 	    sc->bge_asicrev == BGE_ASICREV_BCM5705)
1264 		dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1265 	pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1266 
1267 	/*
1268 	 * Set up general mode register.
1269 	 */
1270 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1271 	    BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1272 	    BGE_MODECTL_TX_NO_PHDR_CSUM);
1273 
1274 	/*
1275 	 * Disable memory write invalidate.  Apparently it is not supported
1276 	 * properly by these devices.
1277 	 */
1278 	PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1279 
1280 	/* Set the timer prescaler (always 66Mhz) */
1281 	CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1282 
1283 	if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1284 		DELAY(40);	/* XXX */
1285 
1286 		/* Put PHY into ready state */
1287 		BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1288 		CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1289 		DELAY(40);
1290 	}
1291 
1292 	return(0);
1293 }
1294 
1295 static int
1296 bge_blockinit(struct bge_softc *sc)
1297 {
1298 	struct bge_rcb *rcb;
1299 	bus_size_t vrcb;
1300 	bge_hostaddr taddr;
1301 	uint32_t val;
1302 	int i;
1303 
1304 	/*
1305 	 * Initialize the memory window pointer register so that
1306 	 * we can access the first 32K of internal NIC RAM. This will
1307 	 * allow us to set up the TX send ring RCBs and the RX return
1308 	 * ring RCBs, plus other things which live in NIC memory.
1309 	 */
1310 	CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1311 
1312 	/* Note: the BCM5704 has a smaller mbuf space than other chips. */
1313 
1314 	if (!BGE_IS_5705_PLUS(sc)) {
1315 		/* Configure mbuf memory pool */
1316 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1317 		if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1318 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1319 		else
1320 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1321 
1322 		/* Configure DMA resource pool */
1323 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1324 		    BGE_DMA_DESCRIPTORS);
1325 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1326 	}
1327 
1328 	/* Configure mbuf pool watermarks */
1329 	if (!BGE_IS_5705_PLUS(sc)) {
1330 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1331 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1332 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1333 	} else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1334 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1335 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1336 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1337 	} else {
1338 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1339 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1340 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1341 	}
1342 
1343 	/* Configure DMA resource watermarks */
1344 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1345 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1346 
1347 	/* Enable buffer manager */
1348 	if (!BGE_IS_5705_PLUS(sc)) {
1349 		CSR_WRITE_4(sc, BGE_BMAN_MODE,
1350 		    BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1351 
1352 		/* Poll for buffer manager start indication */
1353 		for (i = 0; i < BGE_TIMEOUT; i++) {
1354 			if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1355 				break;
1356 			DELAY(10);
1357 		}
1358 
1359 		if (i == BGE_TIMEOUT) {
1360 			if_printf(&sc->arpcom.ac_if,
1361 				  "buffer manager failed to start\n");
1362 			return(ENXIO);
1363 		}
1364 	}
1365 
1366 	/* Enable flow-through queues */
1367 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1368 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1369 
1370 	/* Wait until queue initialization is complete */
1371 	for (i = 0; i < BGE_TIMEOUT; i++) {
1372 		if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1373 			break;
1374 		DELAY(10);
1375 	}
1376 
1377 	if (i == BGE_TIMEOUT) {
1378 		if_printf(&sc->arpcom.ac_if,
1379 			  "flow-through queue init failed\n");
1380 		return(ENXIO);
1381 	}
1382 
1383 	/* Initialize the standard RX ring control block */
1384 	rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1385 	rcb->bge_hostaddr.bge_addr_lo =
1386 	    BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1387 	rcb->bge_hostaddr.bge_addr_hi =
1388 	    BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1389 	if (BGE_IS_5705_PLUS(sc))
1390 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1391 	else
1392 		rcb->bge_maxlen_flags =
1393 		    BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1394 	rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1395 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1396 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1397 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1398 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1399 
1400 	/*
1401 	 * Initialize the jumbo RX ring control block
1402 	 * We set the 'ring disabled' bit in the flags
1403 	 * field until we're actually ready to start
1404 	 * using this ring (i.e. once we set the MTU
1405 	 * high enough to require it).
1406 	 */
1407 	if (BGE_IS_JUMBO_CAPABLE(sc)) {
1408 		rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1409 
1410 		rcb->bge_hostaddr.bge_addr_lo =
1411 		    BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1412 		rcb->bge_hostaddr.bge_addr_hi =
1413 		    BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1414 		rcb->bge_maxlen_flags =
1415 		    BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
1416 		    BGE_RCB_FLAG_RING_DISABLED);
1417 		rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1418 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1419 		    rcb->bge_hostaddr.bge_addr_hi);
1420 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1421 		    rcb->bge_hostaddr.bge_addr_lo);
1422 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1423 		    rcb->bge_maxlen_flags);
1424 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1425 
1426 		/* Set up dummy disabled mini ring RCB */
1427 		rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1428 		rcb->bge_maxlen_flags =
1429 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1430 		CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1431 		    rcb->bge_maxlen_flags);
1432 	}
1433 
1434 	/*
1435 	 * Set the BD ring replentish thresholds. The recommended
1436 	 * values are 1/8th the number of descriptors allocated to
1437 	 * each ring.
1438 	 */
1439 	if (BGE_IS_5705_PLUS(sc))
1440 		val = 8;
1441 	else
1442 		val = BGE_STD_RX_RING_CNT / 8;
1443 	CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1444 	CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1445 
1446 	/*
1447 	 * Disable all unused send rings by setting the 'ring disabled'
1448 	 * bit in the flags field of all the TX send ring control blocks.
1449 	 * These are located in NIC memory.
1450 	 */
1451 	vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1452 	for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1453 		RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1454 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1455 		RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1456 		vrcb += sizeof(struct bge_rcb);
1457 	}
1458 
1459 	/* Configure TX RCB 0 (we use only the first ring) */
1460 	vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1461 	BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1462 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1463 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1464 	RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1465 	    BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1466 	if (!BGE_IS_5705_PLUS(sc)) {
1467 		RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1468 		    BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1469 	}
1470 
1471 	/* Disable all unused RX return rings */
1472 	vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1473 	for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1474 		RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1475 		RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1476 		RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1477 		    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1478 		    BGE_RCB_FLAG_RING_DISABLED));
1479 		RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1480 		bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1481 		    (i * (sizeof(uint64_t))), 0);
1482 		vrcb += sizeof(struct bge_rcb);
1483 	}
1484 
1485 	/* Initialize RX ring indexes */
1486 	bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1487 	bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1488 	bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1489 
1490 	/*
1491 	 * Set up RX return ring 0
1492 	 * Note that the NIC address for RX return rings is 0x00000000.
1493 	 * The return rings live entirely within the host, so the
1494 	 * nicaddr field in the RCB isn't used.
1495 	 */
1496 	vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1497 	BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1498 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1499 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1500 	RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000);
1501 	RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1502 	    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1503 
1504 	/* Set random backoff seed for TX */
1505 	CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1506 	    sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1507 	    sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1508 	    sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1509 	    BGE_TX_BACKOFF_SEED_MASK);
1510 
1511 	/* Set inter-packet gap */
1512 	CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1513 
1514 	/*
1515 	 * Specify which ring to use for packets that don't match
1516 	 * any RX rules.
1517 	 */
1518 	CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1519 
1520 	/*
1521 	 * Configure number of RX lists. One interrupt distribution
1522 	 * list, sixteen active lists, one bad frames class.
1523 	 */
1524 	CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1525 
1526 	/* Inialize RX list placement stats mask. */
1527 	CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1528 	CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1529 
1530 	/* Disable host coalescing until we get it set up */
1531 	CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1532 
1533 	/* Poll to make sure it's shut down. */
1534 	for (i = 0; i < BGE_TIMEOUT; i++) {
1535 		if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1536 			break;
1537 		DELAY(10);
1538 	}
1539 
1540 	if (i == BGE_TIMEOUT) {
1541 		if_printf(&sc->arpcom.ac_if,
1542 			  "host coalescing engine failed to idle\n");
1543 		return(ENXIO);
1544 	}
1545 
1546 	/* Set up host coalescing defaults */
1547 	CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1548 	CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1549 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1550 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1551 	if (!BGE_IS_5705_PLUS(sc)) {
1552 		CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1553 		CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1554 	}
1555 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
1556 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
1557 
1558 	/* Set up address of statistics block */
1559 	if (!BGE_IS_5705_PLUS(sc)) {
1560 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1561 		    BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1562 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1563 		    BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1564 
1565 		CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1566 		CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1567 		CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1568 	}
1569 
1570 	/* Set up address of status block */
1571 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1572 	    BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1573 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1574 	    BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1575 	sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1576 	sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
1577 
1578 	/* Turn on host coalescing state machine */
1579 	CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1580 
1581 	/* Turn on RX BD completion state machine and enable attentions */
1582 	CSR_WRITE_4(sc, BGE_RBDC_MODE,
1583 	    BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1584 
1585 	/* Turn on RX list placement state machine */
1586 	CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1587 
1588 	/* Turn on RX list selector state machine. */
1589 	if (!BGE_IS_5705_PLUS(sc))
1590 		CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1591 
1592 	/* Turn on DMA, clear stats */
1593 	CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1594 	    BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1595 	    BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1596 	    BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1597 	    ((sc->bge_flags & BGE_FLAG_TBI) ?
1598 	     BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1599 
1600 	/* Set misc. local control, enable interrupts on attentions */
1601 	CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1602 
1603 #ifdef notdef
1604 	/* Assert GPIO pins for PHY reset */
1605 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1606 	    BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1607 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1608 	    BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1609 #endif
1610 
1611 	/* Turn on DMA completion state machine */
1612 	if (!BGE_IS_5705_PLUS(sc))
1613 		CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1614 
1615 	/* Turn on write DMA state machine */
1616 	val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS;
1617 	if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
1618 	    sc->bge_asicrev == BGE_ASICREV_BCM5787)
1619 		val |= (1 << 29);	/* Enable host coalescing bug fix. */
1620 	CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1621 	DELAY(40);
1622 
1623 	/* Turn on read DMA state machine */
1624 	val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1625 	if (sc->bge_flags & BGE_FLAG_PCIE)
1626 		val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1627 	CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
1628 	DELAY(40);
1629 
1630 	/* Turn on RX data completion state machine */
1631 	CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1632 
1633 	/* Turn on RX BD initiator state machine */
1634 	CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1635 
1636 	/* Turn on RX data and RX BD initiator state machine */
1637 	CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1638 
1639 	/* Turn on Mbuf cluster free state machine */
1640 	if (!BGE_IS_5705_PLUS(sc))
1641 		CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1642 
1643 	/* Turn on send BD completion state machine */
1644 	CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1645 
1646 	/* Turn on send data completion state machine */
1647 	CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1648 
1649 	/* Turn on send data initiator state machine */
1650 	CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1651 
1652 	/* Turn on send BD initiator state machine */
1653 	CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1654 
1655 	/* Turn on send BD selector state machine */
1656 	CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1657 
1658 	CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1659 	CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1660 	    BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1661 
1662 	/* ack/clear link change events */
1663 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1664 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1665 	    BGE_MACSTAT_LINK_CHANGED);
1666 	CSR_WRITE_4(sc, BGE_MI_STS, 0);
1667 
1668 	/* Enable PHY auto polling (for MII/GMII only) */
1669 	if (sc->bge_flags & BGE_FLAG_TBI) {
1670 		CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1671  	} else {
1672 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1673 		if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1674 		    sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
1675 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1676 			    BGE_EVTENB_MI_INTERRUPT);
1677 		}
1678 	}
1679 
1680 	/*
1681 	 * Clear any pending link state attention.
1682 	 * Otherwise some link state change events may be lost until attention
1683 	 * is cleared by bge_intr() -> bge_softc.bge_link_upd() sequence.
1684 	 * It's not necessary on newer BCM chips - perhaps enabling link
1685 	 * state change attentions implies clearing pending attention.
1686 	 */
1687 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1688 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1689 	    BGE_MACSTAT_LINK_CHANGED);
1690 
1691 	/* Enable link state change attentions. */
1692 	BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1693 
1694 	return(0);
1695 }
1696 
1697 /*
1698  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1699  * against our list and return its name if we find a match. Note
1700  * that since the Broadcom controller contains VPD support, we
1701  * can get the device name string from the controller itself instead
1702  * of the compiled-in string. This is a little slow, but it guarantees
1703  * we'll always announce the right product name.
1704  */
1705 static int
1706 bge_probe(device_t dev)
1707 {
1708 	const struct bge_type *t;
1709 	uint16_t product, vendor;
1710 
1711 	product = pci_get_device(dev);
1712 	vendor = pci_get_vendor(dev);
1713 
1714 	for (t = bge_devs; t->bge_name != NULL; t++) {
1715 		if (vendor == t->bge_vid && product == t->bge_did)
1716 			break;
1717 	}
1718 	if (t->bge_name == NULL)
1719 		return(ENXIO);
1720 
1721 	device_set_desc(dev, t->bge_name);
1722 	if (pci_get_subvendor(dev) == PCI_VENDOR_DELL) {
1723 		struct bge_softc *sc = device_get_softc(dev);
1724 		sc->bge_flags |= BGE_FLAG_NO_3LED;
1725 	}
1726 	return(0);
1727 }
1728 
1729 static int
1730 bge_attach(device_t dev)
1731 {
1732 	struct ifnet *ifp;
1733 	struct bge_softc *sc;
1734 	uint32_t hwcfg = 0;
1735 	int error = 0, rid;
1736 	uint8_t ether_addr[ETHER_ADDR_LEN];
1737 
1738 	sc = device_get_softc(dev);
1739 	sc->bge_dev = dev;
1740 	callout_init(&sc->bge_stat_timer);
1741 	lwkt_serialize_init(&sc->bge_jslot_serializer);
1742 
1743 #ifndef BURN_BRIDGES
1744 	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
1745 		uint32_t irq, mem;
1746 
1747 		irq = pci_read_config(dev, PCIR_INTLINE, 4);
1748 		mem = pci_read_config(dev, BGE_PCI_BAR0, 4);
1749 
1750 		device_printf(dev, "chip is in D%d power mode "
1751 		    "-- setting to D0\n", pci_get_powerstate(dev));
1752 
1753 		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
1754 
1755 		pci_write_config(dev, PCIR_INTLINE, irq, 4);
1756 		pci_write_config(dev, BGE_PCI_BAR0, mem, 4);
1757 	}
1758 #endif	/* !BURN_BRIDGE */
1759 
1760 	/*
1761 	 * Map control/status registers.
1762 	 */
1763 	pci_enable_busmaster(dev);
1764 
1765 	rid = BGE_PCI_BAR0;
1766 	sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1767 	    RF_ACTIVE);
1768 
1769 	if (sc->bge_res == NULL) {
1770 		device_printf(dev, "couldn't map memory\n");
1771 		return ENXIO;
1772 	}
1773 
1774 	sc->bge_btag = rman_get_bustag(sc->bge_res);
1775 	sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
1776 
1777 	/* Save various chip information */
1778 	sc->bge_chipid =
1779 	    pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
1780 	    BGE_PCIMISCCTL_ASICREV;
1781 	sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
1782 	sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
1783 
1784 	/* Save chipset family. */
1785 	switch (sc->bge_asicrev) {
1786 	case BGE_ASICREV_BCM5700:
1787 	case BGE_ASICREV_BCM5701:
1788 	case BGE_ASICREV_BCM5703:
1789 	case BGE_ASICREV_BCM5704:
1790 		sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
1791 		break;
1792 
1793 	case BGE_ASICREV_BCM5714_A0:
1794 	case BGE_ASICREV_BCM5780:
1795 	case BGE_ASICREV_BCM5714:
1796 		sc->bge_flags |= BGE_FLAG_5714_FAMILY;
1797 		/* Fall through */
1798 
1799 	case BGE_ASICREV_BCM5750:
1800 	case BGE_ASICREV_BCM5752:
1801 	case BGE_ASICREV_BCM5755:
1802 	case BGE_ASICREV_BCM5787:
1803 	case BGE_ASICREV_BCM5906:
1804 		sc->bge_flags |= BGE_FLAG_575X_PLUS;
1805 		/* Fall through */
1806 
1807 	case BGE_ASICREV_BCM5705:
1808 		sc->bge_flags |= BGE_FLAG_5705_PLUS;
1809 		break;
1810 	}
1811 
1812 	if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
1813 		sc->bge_flags |= BGE_FLAG_NO_EEPROM;
1814 
1815 	/*
1816 	 * Set various quirk flags.
1817 	 */
1818 
1819 	sc->bge_flags |= BGE_FLAG_ETH_WIRESPEED;
1820 	if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1821 	    (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
1822 	     (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
1823 	      sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) ||
1824 	    sc->bge_asicrev == BGE_ASICREV_BCM5906)
1825 		sc->bge_flags &= ~BGE_FLAG_ETH_WIRESPEED;
1826 
1827 	if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
1828 	    sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
1829 		sc->bge_flags |= BGE_FLAG_CRC_BUG;
1830 
1831 	if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
1832 	    sc->bge_chiprev == BGE_CHIPREV_5704_AX)
1833 		sc->bge_flags |= BGE_FLAG_ADC_BUG;
1834 
1835 	if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
1836 		sc->bge_flags |= BGE_FLAG_5704_A0_BUG;
1837 
1838 	if (BGE_IS_5705_PLUS(sc)) {
1839 		if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
1840 		    sc->bge_asicrev == BGE_ASICREV_BCM5787) {
1841 			uint32_t product = pci_get_device(dev);
1842 
1843 			if (product != PCI_PRODUCT_BROADCOM_BCM5722 &&
1844 			    product != PCI_PRODUCT_BROADCOM_BCM5756)
1845 				sc->bge_flags |= BGE_FLAG_JITTER_BUG;
1846 			if (product == PCI_PRODUCT_BROADCOM_BCM5755M)
1847 				sc->bge_flags |= BGE_FLAG_ADJUST_TRIM;
1848 		} else if (sc->bge_asicrev != BGE_ASICREV_BCM5906) {
1849 			sc->bge_flags |= BGE_FLAG_BER_BUG;
1850 		}
1851 	}
1852 
1853 	/* Allocate interrupt */
1854 	rid = 0;
1855 
1856 	sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1857 	    RF_SHAREABLE | RF_ACTIVE);
1858 
1859 	if (sc->bge_irq == NULL) {
1860 		device_printf(dev, "couldn't map interrupt\n");
1861 		error = ENXIO;
1862 		goto fail;
1863 	}
1864 
1865   	/*
1866 	 * Check if this is a PCI-X or PCI Express device.
1867   	 */
1868 	if (BGE_IS_5705_PLUS(sc)) {
1869 		if (pci_is_pcie(dev)) {
1870 			sc->bge_flags |= BGE_FLAG_PCIE;
1871 			pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_4096);
1872 		}
1873 	} else {
1874 		/*
1875 		 * Check if the device is in PCI-X Mode.
1876 		 * (This bit is not valid on PCI Express controllers.)
1877 		 */
1878 		if ((pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
1879 		    BGE_PCISTATE_PCI_BUSMODE) == 0)
1880 			sc->bge_flags |= BGE_FLAG_PCIX;
1881  	}
1882 
1883 	device_printf(dev, "CHIP ID 0x%08x; "
1884 		      "ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n",
1885 		      sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev,
1886 		      (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X"
1887 		      : ((sc->bge_flags & BGE_FLAG_PCIE) ?
1888 			"PCI-E" : "PCI"));
1889 
1890 	ifp = &sc->arpcom.ac_if;
1891 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1892 
1893 	/* Try to reset the chip. */
1894 	bge_reset(sc);
1895 
1896 	if (bge_chipinit(sc)) {
1897 		device_printf(dev, "chip initialization failed\n");
1898 		error = ENXIO;
1899 		goto fail;
1900 	}
1901 
1902 	/*
1903 	 * Get station address
1904 	 */
1905 	error = bge_get_eaddr(sc, ether_addr);
1906 	if (error) {
1907 		device_printf(dev, "failed to read station address\n");
1908 		goto fail;
1909 	}
1910 
1911 	/* 5705/5750 limits RX return ring to 512 entries. */
1912 	if (BGE_IS_5705_PLUS(sc))
1913 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
1914 	else
1915 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
1916 
1917 	error = bge_dma_alloc(sc);
1918 	if (error)
1919 		goto fail;
1920 
1921 	/* Set default tuneable values. */
1922 	sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
1923 	sc->bge_rx_coal_ticks = bge_rx_coal_ticks;
1924 	sc->bge_tx_coal_ticks = bge_tx_coal_ticks;
1925 	sc->bge_rx_max_coal_bds = bge_rx_max_coal_bds;
1926 	sc->bge_tx_max_coal_bds = bge_tx_max_coal_bds;
1927 
1928 	/* Set up ifnet structure */
1929 	ifp->if_softc = sc;
1930 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1931 	ifp->if_ioctl = bge_ioctl;
1932 	ifp->if_start = bge_start;
1933 #ifdef DEVICE_POLLING
1934 	ifp->if_poll = bge_poll;
1935 #endif
1936 	ifp->if_watchdog = bge_watchdog;
1937 	ifp->if_init = bge_init;
1938 	ifp->if_mtu = ETHERMTU;
1939 	ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1940 	ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
1941 	ifq_set_ready(&ifp->if_snd);
1942 
1943 	/*
1944 	 * 5700 B0 chips do not support checksumming correctly due
1945 	 * to hardware bugs.
1946 	 */
1947 	if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0) {
1948 		ifp->if_capabilities |= IFCAP_HWCSUM;
1949 		ifp->if_hwassist = BGE_CSUM_FEATURES;
1950 	}
1951 	ifp->if_capenable = ifp->if_capabilities;
1952 
1953 	/*
1954 	 * Figure out what sort of media we have by checking the
1955 	 * hardware config word in the first 32k of NIC internal memory,
1956 	 * or fall back to examining the EEPROM if necessary.
1957 	 * Note: on some BCM5700 cards, this value appears to be unset.
1958 	 * If that's the case, we have to rely on identifying the NIC
1959 	 * by its PCI subsystem ID, as we do below for the SysKonnect
1960 	 * SK-9D41.
1961 	 */
1962 	if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
1963 		hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
1964 	else {
1965 		if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
1966 				    sizeof(hwcfg))) {
1967 			device_printf(dev, "failed to read EEPROM\n");
1968 			error = ENXIO;
1969 			goto fail;
1970 		}
1971 		hwcfg = ntohl(hwcfg);
1972 	}
1973 
1974 	if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
1975 		sc->bge_flags |= BGE_FLAG_TBI;
1976 
1977 	/* The SysKonnect SK-9D41 is a 1000baseSX card. */
1978 	if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41)
1979 		sc->bge_flags |= BGE_FLAG_TBI;
1980 
1981 	if (sc->bge_flags & BGE_FLAG_TBI) {
1982 		ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
1983 		    bge_ifmedia_upd, bge_ifmedia_sts);
1984 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
1985 		ifmedia_add(&sc->bge_ifmedia,
1986 		    IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
1987 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
1988 		ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
1989 		sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
1990 	} else {
1991 		/*
1992 		 * Do transceiver setup.
1993 		 */
1994 		if (mii_phy_probe(dev, &sc->bge_miibus,
1995 		    bge_ifmedia_upd, bge_ifmedia_sts)) {
1996 			device_printf(dev, "MII without any PHY!\n");
1997 			error = ENXIO;
1998 			goto fail;
1999 		}
2000 	}
2001 
2002 	/*
2003 	 * When using the BCM5701 in PCI-X mode, data corruption has
2004 	 * been observed in the first few bytes of some received packets.
2005 	 * Aligning the packet buffer in memory eliminates the corruption.
2006 	 * Unfortunately, this misaligns the packet payloads.  On platforms
2007 	 * which do not support unaligned accesses, we will realign the
2008 	 * payloads by copying the received packets.
2009 	 */
2010 	if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
2011 	    (sc->bge_flags & BGE_FLAG_PCIX))
2012 		sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
2013 
2014 	if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2015 	    sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
2016 		sc->bge_link_upd = bge_bcm5700_link_upd;
2017 		sc->bge_link_chg = BGE_MACSTAT_MI_INTERRUPT;
2018 	} else if (sc->bge_flags & BGE_FLAG_TBI) {
2019 		sc->bge_link_upd = bge_tbi_link_upd;
2020 		sc->bge_link_chg = BGE_MACSTAT_LINK_CHANGED;
2021 	} else {
2022 		sc->bge_link_upd = bge_copper_link_upd;
2023 		sc->bge_link_chg = BGE_MACSTAT_LINK_CHANGED;
2024 	}
2025 
2026 	/*
2027 	 * Create sysctl nodes.
2028 	 */
2029 	sysctl_ctx_init(&sc->bge_sysctl_ctx);
2030 	sc->bge_sysctl_tree = SYSCTL_ADD_NODE(&sc->bge_sysctl_ctx,
2031 					      SYSCTL_STATIC_CHILDREN(_hw),
2032 					      OID_AUTO,
2033 					      device_get_nameunit(dev),
2034 					      CTLFLAG_RD, 0, "");
2035 	if (sc->bge_sysctl_tree == NULL) {
2036 		device_printf(dev, "can't add sysctl node\n");
2037 		error = ENXIO;
2038 		goto fail;
2039 	}
2040 
2041 	SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
2042 			SYSCTL_CHILDREN(sc->bge_sysctl_tree),
2043 			OID_AUTO, "rx_coal_ticks",
2044 			CTLTYPE_INT | CTLFLAG_RW,
2045 			sc, 0, bge_sysctl_rx_coal_ticks, "I",
2046 			"Receive coalescing ticks (usec).");
2047 	SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
2048 			SYSCTL_CHILDREN(sc->bge_sysctl_tree),
2049 			OID_AUTO, "tx_coal_ticks",
2050 			CTLTYPE_INT | CTLFLAG_RW,
2051 			sc, 0, bge_sysctl_tx_coal_ticks, "I",
2052 			"Transmit coalescing ticks (usec).");
2053 	SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
2054 			SYSCTL_CHILDREN(sc->bge_sysctl_tree),
2055 			OID_AUTO, "rx_max_coal_bds",
2056 			CTLTYPE_INT | CTLFLAG_RW,
2057 			sc, 0, bge_sysctl_rx_max_coal_bds, "I",
2058 			"Receive max coalesced BD count.");
2059 	SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
2060 			SYSCTL_CHILDREN(sc->bge_sysctl_tree),
2061 			OID_AUTO, "tx_max_coal_bds",
2062 			CTLTYPE_INT | CTLFLAG_RW,
2063 			sc, 0, bge_sysctl_tx_max_coal_bds, "I",
2064 			"Transmit max coalesced BD count.");
2065 
2066 	/*
2067 	 * Call MI attach routine.
2068 	 */
2069 	ether_ifattach(ifp, ether_addr, NULL);
2070 
2071 	error = bus_setup_intr(dev, sc->bge_irq, INTR_MPSAFE,
2072 			       bge_intr, sc, &sc->bge_intrhand,
2073 			       ifp->if_serializer);
2074 	if (error) {
2075 		ether_ifdetach(ifp);
2076 		device_printf(dev, "couldn't set up irq\n");
2077 		goto fail;
2078 	}
2079 
2080 	ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->bge_irq));
2081 	KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
2082 
2083 	return(0);
2084 fail:
2085 	bge_detach(dev);
2086 	return(error);
2087 }
2088 
2089 static int
2090 bge_detach(device_t dev)
2091 {
2092 	struct bge_softc *sc = device_get_softc(dev);
2093 
2094 	if (device_is_attached(dev)) {
2095 		struct ifnet *ifp = &sc->arpcom.ac_if;
2096 
2097 		lwkt_serialize_enter(ifp->if_serializer);
2098 		bge_stop(sc);
2099 		bge_reset(sc);
2100 		bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
2101 		lwkt_serialize_exit(ifp->if_serializer);
2102 
2103 		ether_ifdetach(ifp);
2104 	}
2105 
2106 	if (sc->bge_flags & BGE_FLAG_TBI)
2107 		ifmedia_removeall(&sc->bge_ifmedia);
2108 	if (sc->bge_miibus)
2109 		device_delete_child(dev, sc->bge_miibus);
2110 	bus_generic_detach(dev);
2111 
2112         if (sc->bge_irq != NULL)
2113 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
2114 
2115         if (sc->bge_res != NULL)
2116 		bus_release_resource(dev, SYS_RES_MEMORY,
2117 		    BGE_PCI_BAR0, sc->bge_res);
2118 
2119 	if (sc->bge_sysctl_tree != NULL)
2120 		sysctl_ctx_free(&sc->bge_sysctl_ctx);
2121 
2122 	bge_dma_free(sc);
2123 
2124 	return 0;
2125 }
2126 
2127 static void
2128 bge_reset(struct bge_softc *sc)
2129 {
2130 	device_t dev;
2131 	uint32_t cachesize, command, pcistate, reset;
2132 	void (*write_op)(struct bge_softc *, uint32_t, uint32_t);
2133 	int i, val = 0;
2134 
2135 	dev = sc->bge_dev;
2136 
2137 	if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
2138 	    sc->bge_asicrev != BGE_ASICREV_BCM5906) {
2139 		if (sc->bge_flags & BGE_FLAG_PCIE)
2140 			write_op = bge_writemem_direct;
2141 		else
2142 			write_op = bge_writemem_ind;
2143 	} else {
2144 		write_op = bge_writereg_ind;
2145 	}
2146 
2147 	/* Save some important PCI state. */
2148 	cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2149 	command = pci_read_config(dev, BGE_PCI_CMD, 4);
2150 	pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2151 
2152 	pci_write_config(dev, BGE_PCI_MISC_CTL,
2153 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2154 	    BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2155 
2156 	/* Disable fastboot on controllers that support it. */
2157 	if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
2158 	    sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2159 	    sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2160 		if (bootverbose)
2161 			if_printf(&sc->arpcom.ac_if, "Disabling fastboot\n");
2162 		CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
2163 	}
2164 
2165 	/*
2166 	 * Write the magic number to SRAM at offset 0xB50.
2167 	 * When firmware finishes its initialization it will
2168 	 * write ~BGE_MAGIC_NUMBER to the same location.
2169 	 */
2170 	bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2171 
2172 	reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2173 
2174 	/* XXX: Broadcom Linux driver. */
2175 	if (sc->bge_flags & BGE_FLAG_PCIE) {
2176 		if (CSR_READ_4(sc, 0x7e2c) == 0x60)	/* PCIE 1.0 */
2177 			CSR_WRITE_4(sc, 0x7e2c, 0x20);
2178 		if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2179 			/* Prevent PCIE link training during global reset */
2180 			CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2181 			reset |= (1<<29);
2182 		}
2183 	}
2184 
2185 	/*
2186 	 * Set GPHY Power Down Override to leave GPHY
2187 	 * powered up in D0 uninitialized.
2188 	 */
2189 	if (BGE_IS_5705_PLUS(sc))
2190 		reset |= 0x04000000;
2191 
2192 	/* Issue global reset */
2193 	write_op(sc, BGE_MISC_CFG, reset);
2194 
2195 	if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2196 		uint32_t status, ctrl;
2197 
2198 		status = CSR_READ_4(sc, BGE_VCPU_STATUS);
2199 		CSR_WRITE_4(sc, BGE_VCPU_STATUS,
2200 		    status | BGE_VCPU_STATUS_DRV_RESET);
2201 		ctrl = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
2202 		CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
2203 		    ctrl & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
2204 	}
2205 
2206 	DELAY(1000);
2207 
2208 	/* XXX: Broadcom Linux driver. */
2209 	if (sc->bge_flags & BGE_FLAG_PCIE) {
2210 		if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2211 			uint32_t v;
2212 
2213 			DELAY(500000); /* wait for link training to complete */
2214 			v = pci_read_config(dev, 0xc4, 4);
2215 			pci_write_config(dev, 0xc4, v | (1<<15), 4);
2216 		}
2217 		/*
2218 		 * Set PCIE max payload size to 128 bytes and
2219 		 * clear error status.
2220 		 */
2221 		pci_write_config(dev, 0xd8, 0xf5000, 4);
2222 	}
2223 
2224 	/* Reset some of the PCI state that got zapped by reset */
2225 	pci_write_config(dev, BGE_PCI_MISC_CTL,
2226 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2227 	    BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2228 	pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2229 	pci_write_config(dev, BGE_PCI_CMD, command, 4);
2230 	write_op(sc, BGE_MISC_CFG, (65 << 1));
2231 
2232 	/* Enable memory arbiter. */
2233 	if (BGE_IS_5714_FAMILY(sc)) {
2234 		uint32_t val;
2235 
2236 		val = CSR_READ_4(sc, BGE_MARB_MODE);
2237 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
2238 	} else {
2239 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2240 	}
2241 
2242 	if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2243 		for (i = 0; i < BGE_TIMEOUT; i++) {
2244 			val = CSR_READ_4(sc, BGE_VCPU_STATUS);
2245 			if (val & BGE_VCPU_STATUS_INIT_DONE)
2246 				break;
2247 			DELAY(100);
2248 		}
2249 		if (i == BGE_TIMEOUT) {
2250 			if_printf(&sc->arpcom.ac_if, "reset timed out\n");
2251 			return;
2252 		}
2253 	} else {
2254 		/*
2255 		 * Poll until we see the 1's complement of the magic number.
2256 		 * This indicates that the firmware initialization
2257 		 * is complete.
2258 		 */
2259 		for (i = 0; i < BGE_FIRMWARE_TIMEOUT; i++) {
2260 			val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2261 			if (val == ~BGE_MAGIC_NUMBER)
2262 				break;
2263 			DELAY(10);
2264 		}
2265 		if (i == BGE_FIRMWARE_TIMEOUT) {
2266 			if_printf(&sc->arpcom.ac_if, "firmware handshake "
2267 				  "timed out, found 0x%08x\n", val);
2268 			return;
2269 		}
2270 	}
2271 
2272 	/*
2273 	 * XXX Wait for the value of the PCISTATE register to
2274 	 * return to its original pre-reset state. This is a
2275 	 * fairly good indicator of reset completion. If we don't
2276 	 * wait for the reset to fully complete, trying to read
2277 	 * from the device's non-PCI registers may yield garbage
2278 	 * results.
2279 	 */
2280 	for (i = 0; i < BGE_TIMEOUT; i++) {
2281 		if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2282 			break;
2283 		DELAY(10);
2284 	}
2285 
2286 	if (sc->bge_flags & BGE_FLAG_PCIE) {
2287 		reset = bge_readmem_ind(sc, 0x7c00);
2288 		bge_writemem_ind(sc, 0x7c00, reset | (1 << 25));
2289 	}
2290 
2291 	/* Fix up byte swapping */
2292 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
2293 	    BGE_MODECTL_BYTESWAP_DATA);
2294 
2295 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2296 
2297 	/*
2298 	 * The 5704 in TBI mode apparently needs some special
2299 	 * adjustment to insure the SERDES drive level is set
2300 	 * to 1.2V.
2301 	 */
2302 	if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
2303 	    (sc->bge_flags & BGE_FLAG_TBI)) {
2304 		uint32_t serdescfg;
2305 
2306 		serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2307 		serdescfg = (serdescfg & ~0xFFF) | 0x880;
2308 		CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2309 	}
2310 
2311 	/* XXX: Broadcom Linux driver. */
2312 	if ((sc->bge_flags & BGE_FLAG_PCIE) &&
2313 	    sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2314 		uint32_t v;
2315 
2316 		v = CSR_READ_4(sc, 0x7c00);
2317 		CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
2318 	}
2319 
2320 	DELAY(10000);
2321 }
2322 
2323 /*
2324  * Frame reception handling. This is called if there's a frame
2325  * on the receive return list.
2326  *
2327  * Note: we have to be able to handle two possibilities here:
2328  * 1) the frame is from the jumbo recieve ring
2329  * 2) the frame is from the standard receive ring
2330  */
2331 
2332 static void
2333 bge_rxeof(struct bge_softc *sc)
2334 {
2335 	struct ifnet *ifp;
2336 	int stdcnt = 0, jumbocnt = 0;
2337 	struct mbuf_chain chain[MAXCPU];
2338 
2339 	if (sc->bge_rx_saved_considx ==
2340 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx)
2341 		return;
2342 
2343 	ether_input_chain_init(chain);
2344 
2345 	ifp = &sc->arpcom.ac_if;
2346 
2347 	while (sc->bge_rx_saved_considx !=
2348 	       sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
2349 		struct bge_rx_bd	*cur_rx;
2350 		uint32_t		rxidx;
2351 		struct mbuf		*m = NULL;
2352 		uint16_t		vlan_tag = 0;
2353 		int			have_tag = 0;
2354 
2355 		cur_rx =
2356 	    &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
2357 
2358 		rxidx = cur_rx->bge_idx;
2359 		BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2360 		logif(rx_pkt);
2361 
2362 		if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2363 			have_tag = 1;
2364 			vlan_tag = cur_rx->bge_vlan_tag;
2365 		}
2366 
2367 		if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2368 			BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2369 			jumbocnt++;
2370 
2371 			if (rxidx != sc->bge_jumbo) {
2372 				ifp->if_ierrors++;
2373 				if_printf(ifp, "sw jumbo index(%d) "
2374 				    "and hw jumbo index(%d) mismatch, drop!\n",
2375 				    sc->bge_jumbo, rxidx);
2376 				bge_setup_rxdesc_jumbo(sc, rxidx);
2377 				continue;
2378 			}
2379 
2380 			m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx].bge_mbuf;
2381 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2382 				ifp->if_ierrors++;
2383 				bge_setup_rxdesc_jumbo(sc, sc->bge_jumbo);
2384 				continue;
2385 			}
2386 			if (bge_newbuf_jumbo(sc, sc->bge_jumbo, 0)) {
2387 				ifp->if_ierrors++;
2388 				bge_setup_rxdesc_jumbo(sc, sc->bge_jumbo);
2389 				continue;
2390 			}
2391 		} else {
2392 			BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2393 			stdcnt++;
2394 
2395 			if (rxidx != sc->bge_std) {
2396 				ifp->if_ierrors++;
2397 				if_printf(ifp, "sw std index(%d) "
2398 				    "and hw std index(%d) mismatch, drop!\n",
2399 				    sc->bge_std, rxidx);
2400 				bge_setup_rxdesc_std(sc, rxidx);
2401 				continue;
2402 			}
2403 
2404 			m = sc->bge_cdata.bge_rx_std_chain[rxidx].bge_mbuf;
2405 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2406 				ifp->if_ierrors++;
2407 				bge_setup_rxdesc_std(sc, sc->bge_std);
2408 				continue;
2409 			}
2410 			if (bge_newbuf_std(sc, sc->bge_std, 0)) {
2411 				ifp->if_ierrors++;
2412 				bge_setup_rxdesc_std(sc, sc->bge_std);
2413 				continue;
2414 			}
2415 		}
2416 
2417 		ifp->if_ipackets++;
2418 #ifndef __i386__
2419 		/*
2420 		 * The i386 allows unaligned accesses, but for other
2421 		 * platforms we must make sure the payload is aligned.
2422 		 */
2423 		if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
2424 			bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2425 			    cur_rx->bge_len);
2426 			m->m_data += ETHER_ALIGN;
2427 		}
2428 #endif
2429 		m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2430 		m->m_pkthdr.rcvif = ifp;
2431 
2432 		if (ifp->if_capenable & IFCAP_RXCSUM) {
2433 			if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2434 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2435 				if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2436 					m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2437 			}
2438 			if ((cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) &&
2439 			    m->m_pkthdr.len >= BGE_MIN_FRAME) {
2440 				m->m_pkthdr.csum_data =
2441 					cur_rx->bge_tcp_udp_csum;
2442 				m->m_pkthdr.csum_flags |=
2443 					CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2444 			}
2445 		}
2446 
2447 		/*
2448 		 * If we received a packet with a vlan tag, pass it
2449 		 * to vlan_input() instead of ether_input().
2450 		 */
2451 		if (have_tag) {
2452 			m->m_flags |= M_VLANTAG;
2453 			m->m_pkthdr.ether_vlantag = vlan_tag;
2454 			have_tag = vlan_tag = 0;
2455 		}
2456 		ether_input_chain(ifp, m, NULL, chain);
2457 	}
2458 
2459 	ether_input_dispatch(chain);
2460 
2461 	bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2462 	if (stdcnt)
2463 		bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2464 	if (jumbocnt)
2465 		bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2466 }
2467 
2468 static void
2469 bge_txeof(struct bge_softc *sc)
2470 {
2471 	struct bge_tx_bd *cur_tx = NULL;
2472 	struct ifnet *ifp;
2473 
2474 	if (sc->bge_tx_saved_considx ==
2475 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx)
2476 		return;
2477 
2478 	ifp = &sc->arpcom.ac_if;
2479 
2480 	/*
2481 	 * Go through our tx ring and free mbufs for those
2482 	 * frames that have been sent.
2483 	 */
2484 	while (sc->bge_tx_saved_considx !=
2485 	       sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
2486 		uint32_t idx = 0;
2487 
2488 		idx = sc->bge_tx_saved_considx;
2489 		cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
2490 		if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2491 			ifp->if_opackets++;
2492 		if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2493 			bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
2494 			    sc->bge_cdata.bge_tx_dmamap[idx]);
2495 			m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2496 			sc->bge_cdata.bge_tx_chain[idx] = NULL;
2497 		}
2498 		sc->bge_txcnt--;
2499 		BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2500 		logif(tx_pkt);
2501 	}
2502 
2503 	if (cur_tx != NULL &&
2504 	    (BGE_TX_RING_CNT - sc->bge_txcnt) >=
2505 	    (BGE_NSEG_RSVD + BGE_NSEG_SPARE))
2506 		ifp->if_flags &= ~IFF_OACTIVE;
2507 
2508 	if (sc->bge_txcnt == 0)
2509 		ifp->if_timer = 0;
2510 
2511 	if (!ifq_is_empty(&ifp->if_snd))
2512 		if_devstart(ifp);
2513 }
2514 
2515 #ifdef DEVICE_POLLING
2516 
2517 static void
2518 bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2519 {
2520 	struct bge_softc *sc = ifp->if_softc;
2521  	uint32_t status;
2522 
2523 	switch(cmd) {
2524 	case POLL_REGISTER:
2525 		bge_disable_intr(sc);
2526 		break;
2527 	case POLL_DEREGISTER:
2528 		bge_enable_intr(sc);
2529 		break;
2530 	case POLL_AND_CHECK_STATUS:
2531 		/*
2532 		 * Process link state changes.
2533 		 */
2534 		status = CSR_READ_4(sc, BGE_MAC_STS);
2535 		if ((status & sc->bge_link_chg) || sc->bge_link_evt) {
2536 			sc->bge_link_evt = 0;
2537 			sc->bge_link_upd(sc, status);
2538 		}
2539 		/* fall through */
2540 	case POLL_ONLY:
2541 		if (ifp->if_flags & IFF_RUNNING) {
2542 			bge_rxeof(sc);
2543 			bge_txeof(sc);
2544 		}
2545 		break;
2546 	}
2547 }
2548 
2549 #endif
2550 
2551 static void
2552 bge_intr(void *xsc)
2553 {
2554 	struct bge_softc *sc = xsc;
2555 	struct ifnet *ifp = &sc->arpcom.ac_if;
2556 	uint32_t status;
2557 
2558 	logif(intr);
2559 
2560  	/*
2561 	 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO.  Don't
2562 	 * disable interrupts by writing nonzero like we used to, since with
2563 	 * our current organization this just gives complications and
2564 	 * pessimizations for re-enabling interrupts.  We used to have races
2565 	 * instead of the necessary complications.  Disabling interrupts
2566 	 * would just reduce the chance of a status update while we are
2567 	 * running (by switching to the interrupt-mode coalescence
2568 	 * parameters), but this chance is already very low so it is more
2569 	 * efficient to get another interrupt than prevent it.
2570 	 *
2571 	 * We do the ack first to ensure another interrupt if there is a
2572 	 * status update after the ack.  We don't check for the status
2573 	 * changing later because it is more efficient to get another
2574 	 * interrupt than prevent it, not quite as above (not checking is
2575 	 * a smaller optimization than not toggling the interrupt enable,
2576 	 * since checking doesn't involve PCI accesses and toggling require
2577 	 * the status check).  So toggling would probably be a pessimization
2578 	 * even with MSI.  It would only be needed for using a task queue.
2579 	 */
2580 	bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
2581 
2582 	/*
2583 	 * Process link state changes.
2584 	 */
2585 	status = CSR_READ_4(sc, BGE_MAC_STS);
2586 	if ((status & sc->bge_link_chg) || sc->bge_link_evt) {
2587 		sc->bge_link_evt = 0;
2588 		sc->bge_link_upd(sc, status);
2589 	}
2590 
2591 	if (ifp->if_flags & IFF_RUNNING) {
2592 		/* Check RX return ring producer/consumer */
2593 		bge_rxeof(sc);
2594 
2595 		/* Check TX ring producer/consumer */
2596 		bge_txeof(sc);
2597 	}
2598 
2599 	if (sc->bge_coal_chg)
2600 		bge_coal_change(sc);
2601 }
2602 
2603 static void
2604 bge_tick(void *xsc)
2605 {
2606 	struct bge_softc *sc = xsc;
2607 	struct ifnet *ifp = &sc->arpcom.ac_if;
2608 
2609 	lwkt_serialize_enter(ifp->if_serializer);
2610 
2611 	if (BGE_IS_5705_PLUS(sc))
2612 		bge_stats_update_regs(sc);
2613 	else
2614 		bge_stats_update(sc);
2615 
2616 	if (sc->bge_flags & BGE_FLAG_TBI) {
2617 		/*
2618 		 * Since in TBI mode auto-polling can't be used we should poll
2619 		 * link status manually. Here we register pending link event
2620 		 * and trigger interrupt.
2621 		 */
2622 		sc->bge_link_evt++;
2623 		BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
2624 	} else if (!sc->bge_link) {
2625 		mii_tick(device_get_softc(sc->bge_miibus));
2626 	}
2627 
2628 	callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
2629 
2630 	lwkt_serialize_exit(ifp->if_serializer);
2631 }
2632 
2633 static void
2634 bge_stats_update_regs(struct bge_softc *sc)
2635 {
2636 	struct ifnet *ifp = &sc->arpcom.ac_if;
2637 	struct bge_mac_stats_regs stats;
2638 	uint32_t *s;
2639 	int i;
2640 
2641 	s = (uint32_t *)&stats;
2642 	for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2643 		*s = CSR_READ_4(sc, BGE_RX_STATS + i);
2644 		s++;
2645 	}
2646 
2647 	ifp->if_collisions +=
2648 	   (stats.dot3StatsSingleCollisionFrames +
2649 	   stats.dot3StatsMultipleCollisionFrames +
2650 	   stats.dot3StatsExcessiveCollisions +
2651 	   stats.dot3StatsLateCollisions) -
2652 	   ifp->if_collisions;
2653 }
2654 
2655 static void
2656 bge_stats_update(struct bge_softc *sc)
2657 {
2658 	struct ifnet *ifp = &sc->arpcom.ac_if;
2659 	bus_size_t stats;
2660 
2661 	stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
2662 
2663 #define READ_STAT(sc, stats, stat)	\
2664 	CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
2665 
2666 	ifp->if_collisions +=
2667 	   (READ_STAT(sc, stats,
2668 		txstats.dot3StatsSingleCollisionFrames.bge_addr_lo) +
2669 	    READ_STAT(sc, stats,
2670 		txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo) +
2671 	    READ_STAT(sc, stats,
2672 		txstats.dot3StatsExcessiveCollisions.bge_addr_lo) +
2673 	    READ_STAT(sc, stats,
2674 		txstats.dot3StatsLateCollisions.bge_addr_lo)) -
2675 	   ifp->if_collisions;
2676 
2677 #undef READ_STAT
2678 
2679 #ifdef notdef
2680 	ifp->if_collisions +=
2681 	   (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
2682 	   sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
2683 	   sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
2684 	   sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
2685 	   ifp->if_collisions;
2686 #endif
2687 }
2688 
2689 /*
2690  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
2691  * pointers to descriptors.
2692  */
2693 static int
2694 bge_encap(struct bge_softc *sc, struct mbuf **m_head0, uint32_t *txidx)
2695 {
2696 	struct bge_tx_bd *d = NULL;
2697 	uint16_t csum_flags = 0;
2698 	bus_dma_segment_t segs[BGE_NSEG_NEW];
2699 	bus_dmamap_t map;
2700 	int error, maxsegs, nsegs, idx, i;
2701 	struct mbuf *m_head = *m_head0;
2702 
2703 	if (m_head->m_pkthdr.csum_flags) {
2704 		if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2705 			csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2706 		if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
2707 			csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2708 		if (m_head->m_flags & M_LASTFRAG)
2709 			csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2710 		else if (m_head->m_flags & M_FRAG)
2711 			csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2712 	}
2713 
2714 	idx = *txidx;
2715 	map = sc->bge_cdata.bge_tx_dmamap[idx];
2716 
2717 	maxsegs = (BGE_TX_RING_CNT - sc->bge_txcnt) - BGE_NSEG_RSVD;
2718 	KASSERT(maxsegs >= BGE_NSEG_SPARE,
2719 		("not enough segments %d\n", maxsegs));
2720 
2721 	if (maxsegs > BGE_NSEG_NEW)
2722 		maxsegs = BGE_NSEG_NEW;
2723 
2724 	/*
2725 	 * Pad outbound frame to BGE_MIN_FRAME for an unusual reason.
2726 	 * The bge hardware will pad out Tx runts to BGE_MIN_FRAME,
2727 	 * but when such padded frames employ the bge IP/TCP checksum
2728 	 * offload, the hardware checksum assist gives incorrect results
2729 	 * (possibly from incorporating its own padding into the UDP/TCP
2730 	 * checksum; who knows).  If we pad such runts with zeros, the
2731 	 * onboard checksum comes out correct.
2732 	 */
2733 	if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) &&
2734 	    m_head->m_pkthdr.len < BGE_MIN_FRAME) {
2735 		error = m_devpad(m_head, BGE_MIN_FRAME);
2736 		if (error)
2737 			goto back;
2738 	}
2739 
2740 	error = bus_dmamap_load_mbuf_defrag(sc->bge_cdata.bge_tx_mtag, map,
2741 			m_head0, segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
2742 	if (error)
2743 		goto back;
2744 
2745 	m_head = *m_head0;
2746 	bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE);
2747 
2748 	for (i = 0; ; i++) {
2749 		d = &sc->bge_ldata.bge_tx_ring[idx];
2750 
2751 		d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
2752 		d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
2753 		d->bge_len = segs[i].ds_len;
2754 		d->bge_flags = csum_flags;
2755 
2756 		if (i == nsegs - 1)
2757 			break;
2758 		BGE_INC(idx, BGE_TX_RING_CNT);
2759 	}
2760 	/* Mark the last segment as end of packet... */
2761 	d->bge_flags |= BGE_TXBDFLAG_END;
2762 
2763 	/* Set vlan tag to the first segment of the packet. */
2764 	d = &sc->bge_ldata.bge_tx_ring[*txidx];
2765 	if (m_head->m_flags & M_VLANTAG) {
2766 		d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
2767 		d->bge_vlan_tag = m_head->m_pkthdr.ether_vlantag;
2768 	} else {
2769 		d->bge_vlan_tag = 0;
2770 	}
2771 
2772 	/*
2773 	 * Insure that the map for this transmission is placed at
2774 	 * the array index of the last descriptor in this chain.
2775 	 */
2776 	sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
2777 	sc->bge_cdata.bge_tx_dmamap[idx] = map;
2778 	sc->bge_cdata.bge_tx_chain[idx] = m_head;
2779 	sc->bge_txcnt += nsegs;
2780 
2781 	BGE_INC(idx, BGE_TX_RING_CNT);
2782 	*txidx = idx;
2783 back:
2784 	if (error) {
2785 		m_freem(*m_head0);
2786 		*m_head0 = NULL;
2787 	}
2788 	return error;
2789 }
2790 
2791 /*
2792  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2793  * to the mbuf data regions directly in the transmit descriptors.
2794  */
2795 static void
2796 bge_start(struct ifnet *ifp)
2797 {
2798 	struct bge_softc *sc = ifp->if_softc;
2799 	struct mbuf *m_head = NULL;
2800 	uint32_t prodidx;
2801 	int need_trans;
2802 
2803 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
2804 		return;
2805 
2806 	prodidx = sc->bge_tx_prodidx;
2807 
2808 	need_trans = 0;
2809 	while (sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
2810 		m_head = ifq_dequeue(&ifp->if_snd, NULL);
2811 		if (m_head == NULL)
2812 			break;
2813 
2814 		/*
2815 		 * XXX
2816 		 * The code inside the if() block is never reached since we
2817 		 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
2818 		 * requests to checksum TCP/UDP in a fragmented packet.
2819 		 *
2820 		 * XXX
2821 		 * safety overkill.  If this is a fragmented packet chain
2822 		 * with delayed TCP/UDP checksums, then only encapsulate
2823 		 * it if we have enough descriptors to handle the entire
2824 		 * chain at once.
2825 		 * (paranoia -- may not actually be needed)
2826 		 */
2827 		if ((m_head->m_flags & M_FIRSTFRAG) &&
2828 		    (m_head->m_pkthdr.csum_flags & CSUM_DELAY_DATA)) {
2829 			if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
2830 			    m_head->m_pkthdr.csum_data + BGE_NSEG_RSVD) {
2831 				ifp->if_flags |= IFF_OACTIVE;
2832 				ifq_prepend(&ifp->if_snd, m_head);
2833 				break;
2834 			}
2835 		}
2836 
2837 		/*
2838 		 * Sanity check: avoid coming within BGE_NSEG_RSVD
2839 		 * descriptors of the end of the ring.  Also make
2840 		 * sure there are BGE_NSEG_SPARE descriptors for
2841 		 * jumbo buffers' defragmentation.
2842 		 */
2843 		if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
2844 		    (BGE_NSEG_RSVD + BGE_NSEG_SPARE)) {
2845 			ifp->if_flags |= IFF_OACTIVE;
2846 			ifq_prepend(&ifp->if_snd, m_head);
2847 			break;
2848 		}
2849 
2850 		/*
2851 		 * Pack the data into the transmit ring. If we
2852 		 * don't have room, set the OACTIVE flag and wait
2853 		 * for the NIC to drain the ring.
2854 		 */
2855 		if (bge_encap(sc, &m_head, &prodidx)) {
2856 			ifp->if_flags |= IFF_OACTIVE;
2857 			ifp->if_oerrors++;
2858 			break;
2859 		}
2860 		need_trans = 1;
2861 
2862 		ETHER_BPF_MTAP(ifp, m_head);
2863 	}
2864 
2865 	if (!need_trans)
2866 		return;
2867 
2868 	/* Transmit */
2869 	bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2870 	/* 5700 b2 errata */
2871 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
2872 		bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2873 
2874 	sc->bge_tx_prodidx = prodidx;
2875 
2876 	/*
2877 	 * Set a timeout in case the chip goes out to lunch.
2878 	 */
2879 	ifp->if_timer = 5;
2880 }
2881 
2882 static void
2883 bge_init(void *xsc)
2884 {
2885 	struct bge_softc *sc = xsc;
2886 	struct ifnet *ifp = &sc->arpcom.ac_if;
2887 	uint16_t *m;
2888 
2889 	ASSERT_SERIALIZED(ifp->if_serializer);
2890 
2891 	if (ifp->if_flags & IFF_RUNNING)
2892 		return;
2893 
2894 	/* Cancel pending I/O and flush buffers. */
2895 	bge_stop(sc);
2896 	bge_reset(sc);
2897 	bge_chipinit(sc);
2898 
2899 	/*
2900 	 * Init the various state machines, ring
2901 	 * control blocks and firmware.
2902 	 */
2903 	if (bge_blockinit(sc)) {
2904 		if_printf(ifp, "initialization failure\n");
2905 		bge_stop(sc);
2906 		return;
2907 	}
2908 
2909 	/* Specify MTU. */
2910 	CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
2911 	    ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN);
2912 
2913 	/* Load our MAC address. */
2914 	m = (uint16_t *)&sc->arpcom.ac_enaddr[0];
2915 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
2916 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
2917 
2918 	/* Enable or disable promiscuous mode as needed. */
2919 	bge_setpromisc(sc);
2920 
2921 	/* Program multicast filter. */
2922 	bge_setmulti(sc);
2923 
2924 	/* Init RX ring. */
2925 	if (bge_init_rx_ring_std(sc)) {
2926 		if_printf(ifp, "RX ring initialization failed\n");
2927 		bge_stop(sc);
2928 		return;
2929 	}
2930 
2931 	/*
2932 	 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
2933 	 * memory to insure that the chip has in fact read the first
2934 	 * entry of the ring.
2935 	 */
2936 	if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
2937 		uint32_t		v, i;
2938 		for (i = 0; i < 10; i++) {
2939 			DELAY(20);
2940 			v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
2941 			if (v == (MCLBYTES - ETHER_ALIGN))
2942 				break;
2943 		}
2944 		if (i == 10)
2945 			if_printf(ifp, "5705 A0 chip failed to load RX ring\n");
2946 	}
2947 
2948 	/* Init jumbo RX ring. */
2949 	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) {
2950 		if (bge_init_rx_ring_jumbo(sc)) {
2951 			if_printf(ifp, "Jumbo RX ring initialization failed\n");
2952 			bge_stop(sc);
2953 			return;
2954 		}
2955 	}
2956 
2957 	/* Init our RX return ring index */
2958 	sc->bge_rx_saved_considx = 0;
2959 
2960 	/* Init TX ring. */
2961 	bge_init_tx_ring(sc);
2962 
2963 	/* Turn on transmitter */
2964 	BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
2965 
2966 	/* Turn on receiver */
2967 	BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2968 
2969 	/* Tell firmware we're alive. */
2970 	BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2971 
2972 	/* Enable host interrupts if polling(4) is not enabled. */
2973 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
2974 #ifdef DEVICE_POLLING
2975 	if (ifp->if_flags & IFF_POLLING)
2976 		bge_disable_intr(sc);
2977 	else
2978 #endif
2979 	bge_enable_intr(sc);
2980 
2981 	bge_ifmedia_upd(ifp);
2982 
2983 	ifp->if_flags |= IFF_RUNNING;
2984 	ifp->if_flags &= ~IFF_OACTIVE;
2985 
2986 	callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
2987 }
2988 
2989 /*
2990  * Set media options.
2991  */
2992 static int
2993 bge_ifmedia_upd(struct ifnet *ifp)
2994 {
2995 	struct bge_softc *sc = ifp->if_softc;
2996 
2997 	/* If this is a 1000baseX NIC, enable the TBI port. */
2998 	if (sc->bge_flags & BGE_FLAG_TBI) {
2999 		struct ifmedia *ifm = &sc->bge_ifmedia;
3000 
3001 		if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3002 			return(EINVAL);
3003 
3004 		switch(IFM_SUBTYPE(ifm->ifm_media)) {
3005 		case IFM_AUTO:
3006 			/*
3007 			 * The BCM5704 ASIC appears to have a special
3008 			 * mechanism for programming the autoneg
3009 			 * advertisement registers in TBI mode.
3010 			 */
3011 			if (!bge_fake_autoneg &&
3012 			    sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3013 				uint32_t sgdig;
3014 
3015 				CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3016 				sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3017 				sgdig |= BGE_SGDIGCFG_AUTO |
3018 					 BGE_SGDIGCFG_PAUSE_CAP |
3019 					 BGE_SGDIGCFG_ASYM_PAUSE;
3020 				CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3021 					    sgdig | BGE_SGDIGCFG_SEND);
3022 				DELAY(5);
3023 				CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3024 			}
3025 			break;
3026 		case IFM_1000_SX:
3027 			if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3028 				BGE_CLRBIT(sc, BGE_MAC_MODE,
3029 				    BGE_MACMODE_HALF_DUPLEX);
3030 			} else {
3031 				BGE_SETBIT(sc, BGE_MAC_MODE,
3032 				    BGE_MACMODE_HALF_DUPLEX);
3033 			}
3034 			break;
3035 		default:
3036 			return(EINVAL);
3037 		}
3038 	} else {
3039 		struct mii_data *mii = device_get_softc(sc->bge_miibus);
3040 
3041 		sc->bge_link_evt++;
3042 		sc->bge_link = 0;
3043 		if (mii->mii_instance) {
3044 			struct mii_softc *miisc;
3045 
3046 			LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3047 				mii_phy_reset(miisc);
3048 		}
3049 		mii_mediachg(mii);
3050 	}
3051 	return(0);
3052 }
3053 
3054 /*
3055  * Report current media status.
3056  */
3057 static void
3058 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3059 {
3060 	struct bge_softc *sc = ifp->if_softc;
3061 
3062 	if (sc->bge_flags & BGE_FLAG_TBI) {
3063 		ifmr->ifm_status = IFM_AVALID;
3064 		ifmr->ifm_active = IFM_ETHER;
3065 		if (CSR_READ_4(sc, BGE_MAC_STS) &
3066 		    BGE_MACSTAT_TBI_PCS_SYNCHED) {
3067 			ifmr->ifm_status |= IFM_ACTIVE;
3068 		} else {
3069 			ifmr->ifm_active |= IFM_NONE;
3070 			return;
3071 		}
3072 
3073 		ifmr->ifm_active |= IFM_1000_SX;
3074 		if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3075 			ifmr->ifm_active |= IFM_HDX;
3076 		else
3077 			ifmr->ifm_active |= IFM_FDX;
3078 	} else {
3079 		struct mii_data *mii = device_get_softc(sc->bge_miibus);
3080 
3081 		mii_pollstat(mii);
3082 		ifmr->ifm_active = mii->mii_media_active;
3083 		ifmr->ifm_status = mii->mii_media_status;
3084 	}
3085 }
3086 
3087 static int
3088 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
3089 {
3090 	struct bge_softc *sc = ifp->if_softc;
3091 	struct ifreq *ifr = (struct ifreq *)data;
3092 	int mask, error = 0;
3093 
3094 	ASSERT_SERIALIZED(ifp->if_serializer);
3095 
3096 	switch (command) {
3097 	case SIOCSIFMTU:
3098 		if ((!BGE_IS_JUMBO_CAPABLE(sc) && ifr->ifr_mtu > ETHERMTU) ||
3099 		    (BGE_IS_JUMBO_CAPABLE(sc) &&
3100 		     ifr->ifr_mtu > BGE_JUMBO_MTU)) {
3101 			error = EINVAL;
3102 		} else if (ifp->if_mtu != ifr->ifr_mtu) {
3103 			ifp->if_mtu = ifr->ifr_mtu;
3104 			ifp->if_flags &= ~IFF_RUNNING;
3105 			bge_init(sc);
3106 		}
3107 		break;
3108 	case SIOCSIFFLAGS:
3109 		if (ifp->if_flags & IFF_UP) {
3110 			if (ifp->if_flags & IFF_RUNNING) {
3111 				mask = ifp->if_flags ^ sc->bge_if_flags;
3112 
3113 				/*
3114 				 * If only the state of the PROMISC flag
3115 				 * changed, then just use the 'set promisc
3116 				 * mode' command instead of reinitializing
3117 				 * the entire NIC. Doing a full re-init
3118 				 * means reloading the firmware and waiting
3119 				 * for it to start up, which may take a
3120 				 * second or two.  Similarly for ALLMULTI.
3121 				 */
3122 				if (mask & IFF_PROMISC)
3123 					bge_setpromisc(sc);
3124 				if (mask & IFF_ALLMULTI)
3125 					bge_setmulti(sc);
3126 			} else {
3127 				bge_init(sc);
3128 			}
3129 		} else {
3130 			if (ifp->if_flags & IFF_RUNNING)
3131 				bge_stop(sc);
3132 		}
3133 		sc->bge_if_flags = ifp->if_flags;
3134 		break;
3135 	case SIOCADDMULTI:
3136 	case SIOCDELMULTI:
3137 		if (ifp->if_flags & IFF_RUNNING)
3138 			bge_setmulti(sc);
3139 		break;
3140 	case SIOCSIFMEDIA:
3141 	case SIOCGIFMEDIA:
3142 		if (sc->bge_flags & BGE_FLAG_TBI) {
3143 			error = ifmedia_ioctl(ifp, ifr,
3144 			    &sc->bge_ifmedia, command);
3145 		} else {
3146 			struct mii_data *mii;
3147 
3148 			mii = device_get_softc(sc->bge_miibus);
3149 			error = ifmedia_ioctl(ifp, ifr,
3150 					      &mii->mii_media, command);
3151 		}
3152 		break;
3153         case SIOCSIFCAP:
3154 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3155 		if (mask & IFCAP_HWCSUM) {
3156 			ifp->if_capenable ^= (mask & IFCAP_HWCSUM);
3157 			if (IFCAP_HWCSUM & ifp->if_capenable)
3158 				ifp->if_hwassist = BGE_CSUM_FEATURES;
3159 			else
3160 				ifp->if_hwassist = 0;
3161 		}
3162 		break;
3163 	default:
3164 		error = ether_ioctl(ifp, command, data);
3165 		break;
3166 	}
3167 	return error;
3168 }
3169 
3170 static void
3171 bge_watchdog(struct ifnet *ifp)
3172 {
3173 	struct bge_softc *sc = ifp->if_softc;
3174 
3175 	if_printf(ifp, "watchdog timeout -- resetting\n");
3176 
3177 	ifp->if_flags &= ~IFF_RUNNING;
3178 	bge_init(sc);
3179 
3180 	ifp->if_oerrors++;
3181 
3182 	if (!ifq_is_empty(&ifp->if_snd))
3183 		if_devstart(ifp);
3184 }
3185 
3186 /*
3187  * Stop the adapter and free any mbufs allocated to the
3188  * RX and TX lists.
3189  */
3190 static void
3191 bge_stop(struct bge_softc *sc)
3192 {
3193 	struct ifnet *ifp = &sc->arpcom.ac_if;
3194 	struct ifmedia_entry *ifm;
3195 	struct mii_data *mii = NULL;
3196 	int mtmp, itmp;
3197 
3198 	ASSERT_SERIALIZED(ifp->if_serializer);
3199 
3200 	if ((sc->bge_flags & BGE_FLAG_TBI) == 0)
3201 		mii = device_get_softc(sc->bge_miibus);
3202 
3203 	callout_stop(&sc->bge_stat_timer);
3204 
3205 	/*
3206 	 * Disable all of the receiver blocks
3207 	 */
3208 	BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3209 	BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3210 	BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3211 	if (!BGE_IS_5705_PLUS(sc))
3212 		BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3213 	BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3214 	BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3215 	BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3216 
3217 	/*
3218 	 * Disable all of the transmit blocks
3219 	 */
3220 	BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3221 	BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3222 	BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3223 	BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3224 	BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3225 	if (!BGE_IS_5705_PLUS(sc))
3226 		BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3227 	BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3228 
3229 	/*
3230 	 * Shut down all of the memory managers and related
3231 	 * state machines.
3232 	 */
3233 	BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3234 	BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3235 	if (!BGE_IS_5705_PLUS(sc))
3236 		BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3237 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3238 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3239 	if (!BGE_IS_5705_PLUS(sc)) {
3240 		BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3241 		BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3242 	}
3243 
3244 	/* Disable host interrupts. */
3245 	bge_disable_intr(sc);
3246 
3247 	/*
3248 	 * Tell firmware we're shutting down.
3249 	 */
3250 	BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3251 
3252 	/* Free the RX lists. */
3253 	bge_free_rx_ring_std(sc);
3254 
3255 	/* Free jumbo RX list. */
3256 	if (BGE_IS_JUMBO_CAPABLE(sc))
3257 		bge_free_rx_ring_jumbo(sc);
3258 
3259 	/* Free TX buffers. */
3260 	bge_free_tx_ring(sc);
3261 
3262 	/*
3263 	 * Isolate/power down the PHY, but leave the media selection
3264 	 * unchanged so that things will be put back to normal when
3265 	 * we bring the interface back up.
3266 	 *
3267 	 * 'mii' may be NULL in the following cases:
3268 	 * - The device uses TBI.
3269 	 * - bge_stop() is called by bge_detach().
3270 	 */
3271 	if (mii != NULL) {
3272 		itmp = ifp->if_flags;
3273 		ifp->if_flags |= IFF_UP;
3274 		ifm = mii->mii_media.ifm_cur;
3275 		mtmp = ifm->ifm_media;
3276 		ifm->ifm_media = IFM_ETHER|IFM_NONE;
3277 		mii_mediachg(mii);
3278 		ifm->ifm_media = mtmp;
3279 		ifp->if_flags = itmp;
3280 	}
3281 
3282 	sc->bge_link = 0;
3283 	sc->bge_coal_chg = 0;
3284 
3285 	sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3286 
3287 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3288 	ifp->if_timer = 0;
3289 }
3290 
3291 /*
3292  * Stop all chip I/O so that the kernel's probe routines don't
3293  * get confused by errant DMAs when rebooting.
3294  */
3295 static void
3296 bge_shutdown(device_t dev)
3297 {
3298 	struct bge_softc *sc = device_get_softc(dev);
3299 	struct ifnet *ifp = &sc->arpcom.ac_if;
3300 
3301 	lwkt_serialize_enter(ifp->if_serializer);
3302 	bge_stop(sc);
3303 	bge_reset(sc);
3304 	lwkt_serialize_exit(ifp->if_serializer);
3305 }
3306 
3307 static int
3308 bge_suspend(device_t dev)
3309 {
3310 	struct bge_softc *sc = device_get_softc(dev);
3311 	struct ifnet *ifp = &sc->arpcom.ac_if;
3312 
3313 	lwkt_serialize_enter(ifp->if_serializer);
3314 	bge_stop(sc);
3315 	lwkt_serialize_exit(ifp->if_serializer);
3316 
3317 	return 0;
3318 }
3319 
3320 static int
3321 bge_resume(device_t dev)
3322 {
3323 	struct bge_softc *sc = device_get_softc(dev);
3324 	struct ifnet *ifp = &sc->arpcom.ac_if;
3325 
3326 	lwkt_serialize_enter(ifp->if_serializer);
3327 
3328 	if (ifp->if_flags & IFF_UP) {
3329 		bge_init(sc);
3330 
3331 		if (!ifq_is_empty(&ifp->if_snd))
3332 			if_devstart(ifp);
3333 	}
3334 
3335 	lwkt_serialize_exit(ifp->if_serializer);
3336 
3337 	return 0;
3338 }
3339 
3340 static void
3341 bge_setpromisc(struct bge_softc *sc)
3342 {
3343 	struct ifnet *ifp = &sc->arpcom.ac_if;
3344 
3345 	if (ifp->if_flags & IFF_PROMISC)
3346 		BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3347 	else
3348 		BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3349 }
3350 
3351 static void
3352 bge_dma_free(struct bge_softc *sc)
3353 {
3354 	int i;
3355 
3356 	/* Destroy RX mbuf DMA stuffs. */
3357 	if (sc->bge_cdata.bge_rx_mtag != NULL) {
3358 		for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3359 			bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
3360 			    sc->bge_cdata.bge_rx_std_dmamap[i]);
3361 		}
3362 		bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
3363 				   sc->bge_cdata.bge_rx_tmpmap);
3364 		bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
3365 	}
3366 
3367 	/* Destroy TX mbuf DMA stuffs. */
3368 	if (sc->bge_cdata.bge_tx_mtag != NULL) {
3369 		for (i = 0; i < BGE_TX_RING_CNT; i++) {
3370 			bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
3371 			    sc->bge_cdata.bge_tx_dmamap[i]);
3372 		}
3373 		bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
3374 	}
3375 
3376 	/* Destroy standard RX ring */
3377 	bge_dma_block_free(sc->bge_cdata.bge_rx_std_ring_tag,
3378 			   sc->bge_cdata.bge_rx_std_ring_map,
3379 			   sc->bge_ldata.bge_rx_std_ring);
3380 
3381 	if (BGE_IS_JUMBO_CAPABLE(sc))
3382 		bge_free_jumbo_mem(sc);
3383 
3384 	/* Destroy RX return ring */
3385 	bge_dma_block_free(sc->bge_cdata.bge_rx_return_ring_tag,
3386 			   sc->bge_cdata.bge_rx_return_ring_map,
3387 			   sc->bge_ldata.bge_rx_return_ring);
3388 
3389 	/* Destroy TX ring */
3390 	bge_dma_block_free(sc->bge_cdata.bge_tx_ring_tag,
3391 			   sc->bge_cdata.bge_tx_ring_map,
3392 			   sc->bge_ldata.bge_tx_ring);
3393 
3394 	/* Destroy status block */
3395 	bge_dma_block_free(sc->bge_cdata.bge_status_tag,
3396 			   sc->bge_cdata.bge_status_map,
3397 			   sc->bge_ldata.bge_status_block);
3398 
3399 	/* Destroy statistics block */
3400 	bge_dma_block_free(sc->bge_cdata.bge_stats_tag,
3401 			   sc->bge_cdata.bge_stats_map,
3402 			   sc->bge_ldata.bge_stats);
3403 
3404 	/* Destroy the parent tag */
3405 	if (sc->bge_cdata.bge_parent_tag != NULL)
3406 		bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
3407 }
3408 
3409 static int
3410 bge_dma_alloc(struct bge_softc *sc)
3411 {
3412 	struct ifnet *ifp = &sc->arpcom.ac_if;
3413 	int i, error;
3414 
3415 	/*
3416 	 * Allocate the parent bus DMA tag appropriate for PCI.
3417 	 */
3418 	error = bus_dma_tag_create(NULL, 1, 0,
3419 				   BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3420 				   NULL, NULL,
3421 				   BUS_SPACE_MAXSIZE_32BIT, 0,
3422 				   BUS_SPACE_MAXSIZE_32BIT,
3423 				   0, &sc->bge_cdata.bge_parent_tag);
3424 	if (error) {
3425 		if_printf(ifp, "could not allocate parent dma tag\n");
3426 		return error;
3427 	}
3428 
3429 	/*
3430 	 * Create DMA tag and maps for RX mbufs.
3431 	 */
3432 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 0,
3433 				   BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3434 				   NULL, NULL, MCLBYTES, 1, MCLBYTES,
3435 				   BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,
3436 				   &sc->bge_cdata.bge_rx_mtag);
3437 	if (error) {
3438 		if_printf(ifp, "could not allocate RX mbuf dma tag\n");
3439 		return error;
3440 	}
3441 
3442 	error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag,
3443 				  BUS_DMA_WAITOK, &sc->bge_cdata.bge_rx_tmpmap);
3444 	if (error) {
3445 		bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
3446 		sc->bge_cdata.bge_rx_mtag = NULL;
3447 		return error;
3448 	}
3449 
3450 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3451 		error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag,
3452 					  BUS_DMA_WAITOK,
3453 					  &sc->bge_cdata.bge_rx_std_dmamap[i]);
3454 		if (error) {
3455 			int j;
3456 
3457 			for (j = 0; j < i; ++j) {
3458 				bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
3459 					sc->bge_cdata.bge_rx_std_dmamap[j]);
3460 			}
3461 			bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
3462 			sc->bge_cdata.bge_rx_mtag = NULL;
3463 
3464 			if_printf(ifp, "could not create DMA map for RX\n");
3465 			return error;
3466 		}
3467 	}
3468 
3469 	/*
3470 	 * Create DMA tag and maps for TX mbufs.
3471 	 */
3472 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 0,
3473 				   BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3474 				   NULL, NULL,
3475 				   BGE_JUMBO_FRAMELEN, BGE_NSEG_NEW, MCLBYTES,
3476 				   BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK |
3477 				   BUS_DMA_ONEBPAGE,
3478 				   &sc->bge_cdata.bge_tx_mtag);
3479 	if (error) {
3480 		if_printf(ifp, "could not allocate TX mbuf dma tag\n");
3481 		return error;
3482 	}
3483 
3484 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
3485 		error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag,
3486 					  BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
3487 					  &sc->bge_cdata.bge_tx_dmamap[i]);
3488 		if (error) {
3489 			int j;
3490 
3491 			for (j = 0; j < i; ++j) {
3492 				bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
3493 					sc->bge_cdata.bge_tx_dmamap[j]);
3494 			}
3495 			bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
3496 			sc->bge_cdata.bge_tx_mtag = NULL;
3497 
3498 			if_printf(ifp, "could not create DMA map for TX\n");
3499 			return error;
3500 		}
3501 	}
3502 
3503 	/*
3504 	 * Create DMA stuffs for standard RX ring.
3505 	 */
3506 	error = bge_dma_block_alloc(sc, BGE_STD_RX_RING_SZ,
3507 				    &sc->bge_cdata.bge_rx_std_ring_tag,
3508 				    &sc->bge_cdata.bge_rx_std_ring_map,
3509 				    (void **)&sc->bge_ldata.bge_rx_std_ring,
3510 				    &sc->bge_ldata.bge_rx_std_ring_paddr);
3511 	if (error) {
3512 		if_printf(ifp, "could not create std RX ring\n");
3513 		return error;
3514 	}
3515 
3516 	/*
3517 	 * Create jumbo buffer pool.
3518 	 */
3519 	if (BGE_IS_JUMBO_CAPABLE(sc)) {
3520 		error = bge_alloc_jumbo_mem(sc);
3521 		if (error) {
3522 			if_printf(ifp, "could not create jumbo buffer pool\n");
3523 			return error;
3524 		}
3525 	}
3526 
3527 	/*
3528 	 * Create DMA stuffs for RX return ring.
3529 	 */
3530 	error = bge_dma_block_alloc(sc, BGE_RX_RTN_RING_SZ(sc),
3531 				    &sc->bge_cdata.bge_rx_return_ring_tag,
3532 				    &sc->bge_cdata.bge_rx_return_ring_map,
3533 				    (void **)&sc->bge_ldata.bge_rx_return_ring,
3534 				    &sc->bge_ldata.bge_rx_return_ring_paddr);
3535 	if (error) {
3536 		if_printf(ifp, "could not create RX ret ring\n");
3537 		return error;
3538 	}
3539 
3540 	/*
3541 	 * Create DMA stuffs for TX ring.
3542 	 */
3543 	error = bge_dma_block_alloc(sc, BGE_TX_RING_SZ,
3544 				    &sc->bge_cdata.bge_tx_ring_tag,
3545 				    &sc->bge_cdata.bge_tx_ring_map,
3546 				    (void **)&sc->bge_ldata.bge_tx_ring,
3547 				    &sc->bge_ldata.bge_tx_ring_paddr);
3548 	if (error) {
3549 		if_printf(ifp, "could not create TX ring\n");
3550 		return error;
3551 	}
3552 
3553 	/*
3554 	 * Create DMA stuffs for status block.
3555 	 */
3556 	error = bge_dma_block_alloc(sc, BGE_STATUS_BLK_SZ,
3557 				    &sc->bge_cdata.bge_status_tag,
3558 				    &sc->bge_cdata.bge_status_map,
3559 				    (void **)&sc->bge_ldata.bge_status_block,
3560 				    &sc->bge_ldata.bge_status_block_paddr);
3561 	if (error) {
3562 		if_printf(ifp, "could not create status block\n");
3563 		return error;
3564 	}
3565 
3566 	/*
3567 	 * Create DMA stuffs for statistics block.
3568 	 */
3569 	error = bge_dma_block_alloc(sc, BGE_STATS_SZ,
3570 				    &sc->bge_cdata.bge_stats_tag,
3571 				    &sc->bge_cdata.bge_stats_map,
3572 				    (void **)&sc->bge_ldata.bge_stats,
3573 				    &sc->bge_ldata.bge_stats_paddr);
3574 	if (error) {
3575 		if_printf(ifp, "could not create stats block\n");
3576 		return error;
3577 	}
3578 	return 0;
3579 }
3580 
3581 static int
3582 bge_dma_block_alloc(struct bge_softc *sc, bus_size_t size, bus_dma_tag_t *tag,
3583 		    bus_dmamap_t *map, void **addr, bus_addr_t *paddr)
3584 {
3585 	bus_dmamem_t dmem;
3586 	int error;
3587 
3588 	error = bus_dmamem_coherent(sc->bge_cdata.bge_parent_tag, PAGE_SIZE, 0,
3589 				    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3590 				    size, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3591 	if (error)
3592 		return error;
3593 
3594 	*tag = dmem.dmem_tag;
3595 	*map = dmem.dmem_map;
3596 	*addr = dmem.dmem_addr;
3597 	*paddr = dmem.dmem_busaddr;
3598 
3599 	return 0;
3600 }
3601 
3602 static void
3603 bge_dma_block_free(bus_dma_tag_t tag, bus_dmamap_t map, void *addr)
3604 {
3605 	if (tag != NULL) {
3606 		bus_dmamap_unload(tag, map);
3607 		bus_dmamem_free(tag, addr, map);
3608 		bus_dma_tag_destroy(tag);
3609 	}
3610 }
3611 
3612 /*
3613  * Grrr. The link status word in the status block does
3614  * not work correctly on the BCM5700 rev AX and BX chips,
3615  * according to all available information. Hence, we have
3616  * to enable MII interrupts in order to properly obtain
3617  * async link changes. Unfortunately, this also means that
3618  * we have to read the MAC status register to detect link
3619  * changes, thereby adding an additional register access to
3620  * the interrupt handler.
3621  *
3622  * XXX: perhaps link state detection procedure used for
3623  * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
3624  */
3625 static void
3626 bge_bcm5700_link_upd(struct bge_softc *sc, uint32_t status __unused)
3627 {
3628 	struct ifnet *ifp = &sc->arpcom.ac_if;
3629 	struct mii_data *mii = device_get_softc(sc->bge_miibus);
3630 
3631 	mii_pollstat(mii);
3632 
3633 	if (!sc->bge_link &&
3634 	    (mii->mii_media_status & IFM_ACTIVE) &&
3635 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3636 		sc->bge_link++;
3637 		if (bootverbose)
3638 			if_printf(ifp, "link UP\n");
3639 	} else if (sc->bge_link &&
3640 	    (!(mii->mii_media_status & IFM_ACTIVE) ||
3641 	    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3642 		sc->bge_link = 0;
3643 		if (bootverbose)
3644 			if_printf(ifp, "link DOWN\n");
3645 	}
3646 
3647 	/* Clear the interrupt. */
3648 	CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_MI_INTERRUPT);
3649 	bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
3650 	bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR, BRGPHY_INTRS);
3651 }
3652 
3653 static void
3654 bge_tbi_link_upd(struct bge_softc *sc, uint32_t status)
3655 {
3656 	struct ifnet *ifp = &sc->arpcom.ac_if;
3657 
3658 #define PCS_ENCODE_ERR	(BGE_MACSTAT_PORT_DECODE_ERROR|BGE_MACSTAT_MI_COMPLETE)
3659 
3660 	/*
3661 	 * Sometimes PCS encoding errors are detected in
3662 	 * TBI mode (on fiber NICs), and for some reason
3663 	 * the chip will signal them as link changes.
3664 	 * If we get a link change event, but the 'PCS
3665 	 * encoding error' bit in the MAC status register
3666 	 * is set, don't bother doing a link check.
3667 	 * This avoids spurious "gigabit link up" messages
3668 	 * that sometimes appear on fiber NICs during
3669 	 * periods of heavy traffic.
3670 	 */
3671 	if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
3672 		if (!sc->bge_link) {
3673 			sc->bge_link++;
3674 			if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3675 				BGE_CLRBIT(sc, BGE_MAC_MODE,
3676 				    BGE_MACMODE_TBI_SEND_CFGS);
3677 			}
3678 			CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3679 
3680 			if (bootverbose)
3681 				if_printf(ifp, "link UP\n");
3682 
3683 			ifp->if_link_state = LINK_STATE_UP;
3684 			if_link_state_change(ifp);
3685 		}
3686 	} else if ((status & PCS_ENCODE_ERR) != PCS_ENCODE_ERR) {
3687 		if (sc->bge_link) {
3688 			sc->bge_link = 0;
3689 
3690 			if (bootverbose)
3691 				if_printf(ifp, "link DOWN\n");
3692 
3693 			ifp->if_link_state = LINK_STATE_DOWN;
3694 			if_link_state_change(ifp);
3695 		}
3696 	}
3697 
3698 #undef PCS_ENCODE_ERR
3699 
3700 	/* Clear the attention. */
3701 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3702 	    BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3703 	    BGE_MACSTAT_LINK_CHANGED);
3704 }
3705 
3706 static void
3707 bge_copper_link_upd(struct bge_softc *sc, uint32_t status __unused)
3708 {
3709 	/*
3710 	 * Check that the AUTOPOLL bit is set before
3711 	 * processing the event as a real link change.
3712 	 * Turning AUTOPOLL on and off in the MII read/write
3713 	 * functions will often trigger a link status
3714 	 * interrupt for no reason.
3715 	 */
3716 	if (CSR_READ_4(sc, BGE_MI_MODE) & BGE_MIMODE_AUTOPOLL) {
3717 		struct ifnet *ifp = &sc->arpcom.ac_if;
3718 		struct mii_data *mii = device_get_softc(sc->bge_miibus);
3719 
3720 		mii_pollstat(mii);
3721 
3722 		if (!sc->bge_link &&
3723 		    (mii->mii_media_status & IFM_ACTIVE) &&
3724 		    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3725 			sc->bge_link++;
3726 			if (bootverbose)
3727 				if_printf(ifp, "link UP\n");
3728 		} else if (sc->bge_link &&
3729 		    (!(mii->mii_media_status & IFM_ACTIVE) ||
3730 		    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3731 			sc->bge_link = 0;
3732 			if (bootverbose)
3733 				if_printf(ifp, "link DOWN\n");
3734 		}
3735 	}
3736 
3737 	/* Clear the attention. */
3738 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3739 	    BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3740 	    BGE_MACSTAT_LINK_CHANGED);
3741 }
3742 
3743 static int
3744 bge_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS)
3745 {
3746 	struct bge_softc *sc = arg1;
3747 
3748 	return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
3749 				   &sc->bge_rx_coal_ticks,
3750 				   BGE_RX_COAL_TICKS_CHG);
3751 }
3752 
3753 static int
3754 bge_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS)
3755 {
3756 	struct bge_softc *sc = arg1;
3757 
3758 	return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
3759 				   &sc->bge_tx_coal_ticks,
3760 				   BGE_TX_COAL_TICKS_CHG);
3761 }
3762 
3763 static int
3764 bge_sysctl_rx_max_coal_bds(SYSCTL_HANDLER_ARGS)
3765 {
3766 	struct bge_softc *sc = arg1;
3767 
3768 	return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
3769 				   &sc->bge_rx_max_coal_bds,
3770 				   BGE_RX_MAX_COAL_BDS_CHG);
3771 }
3772 
3773 static int
3774 bge_sysctl_tx_max_coal_bds(SYSCTL_HANDLER_ARGS)
3775 {
3776 	struct bge_softc *sc = arg1;
3777 
3778 	return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
3779 				   &sc->bge_tx_max_coal_bds,
3780 				   BGE_TX_MAX_COAL_BDS_CHG);
3781 }
3782 
3783 static int
3784 bge_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *coal,
3785 		    uint32_t coal_chg_mask)
3786 {
3787 	struct bge_softc *sc = arg1;
3788 	struct ifnet *ifp = &sc->arpcom.ac_if;
3789 	int error = 0, v;
3790 
3791 	lwkt_serialize_enter(ifp->if_serializer);
3792 
3793 	v = *coal;
3794 	error = sysctl_handle_int(oidp, &v, 0, req);
3795 	if (!error && req->newptr != NULL) {
3796 		if (v < 0) {
3797 			error = EINVAL;
3798 		} else {
3799 			*coal = v;
3800 			sc->bge_coal_chg |= coal_chg_mask;
3801 		}
3802 	}
3803 
3804 	lwkt_serialize_exit(ifp->if_serializer);
3805 	return error;
3806 }
3807 
3808 static void
3809 bge_coal_change(struct bge_softc *sc)
3810 {
3811 	struct ifnet *ifp = &sc->arpcom.ac_if;
3812 	uint32_t val;
3813 
3814 	ASSERT_SERIALIZED(ifp->if_serializer);
3815 
3816 	if (sc->bge_coal_chg & BGE_RX_COAL_TICKS_CHG) {
3817 		CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS,
3818 			    sc->bge_rx_coal_ticks);
3819 		DELAY(10);
3820 		val = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS);
3821 
3822 		if (bootverbose) {
3823 			if_printf(ifp, "rx_coal_ticks -> %u\n",
3824 				  sc->bge_rx_coal_ticks);
3825 		}
3826 	}
3827 
3828 	if (sc->bge_coal_chg & BGE_TX_COAL_TICKS_CHG) {
3829 		CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS,
3830 			    sc->bge_tx_coal_ticks);
3831 		DELAY(10);
3832 		val = CSR_READ_4(sc, BGE_HCC_TX_COAL_TICKS);
3833 
3834 		if (bootverbose) {
3835 			if_printf(ifp, "tx_coal_ticks -> %u\n",
3836 				  sc->bge_tx_coal_ticks);
3837 		}
3838 	}
3839 
3840 	if (sc->bge_coal_chg & BGE_RX_MAX_COAL_BDS_CHG) {
3841 		CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS,
3842 			    sc->bge_rx_max_coal_bds);
3843 		DELAY(10);
3844 		val = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS);
3845 
3846 		if (bootverbose) {
3847 			if_printf(ifp, "rx_max_coal_bds -> %u\n",
3848 				  sc->bge_rx_max_coal_bds);
3849 		}
3850 	}
3851 
3852 	if (sc->bge_coal_chg & BGE_TX_MAX_COAL_BDS_CHG) {
3853 		CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS,
3854 			    sc->bge_tx_max_coal_bds);
3855 		DELAY(10);
3856 		val = CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS);
3857 
3858 		if (bootverbose) {
3859 			if_printf(ifp, "tx_max_coal_bds -> %u\n",
3860 				  sc->bge_tx_max_coal_bds);
3861 		}
3862 	}
3863 
3864 	sc->bge_coal_chg = 0;
3865 }
3866 
3867 static void
3868 bge_enable_intr(struct bge_softc *sc)
3869 {
3870 	struct ifnet *ifp = &sc->arpcom.ac_if;
3871 
3872 	lwkt_serialize_handler_enable(ifp->if_serializer);
3873 
3874 	/*
3875 	 * Enable interrupt.
3876 	 */
3877 	bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3878 
3879 	/*
3880 	 * Unmask the interrupt when we stop polling.
3881 	 */
3882 	BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3883 
3884 	/*
3885 	 * Trigger another interrupt, since above writing
3886 	 * to interrupt mailbox0 may acknowledge pending
3887 	 * interrupt.
3888 	 */
3889 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3890 }
3891 
3892 static void
3893 bge_disable_intr(struct bge_softc *sc)
3894 {
3895 	struct ifnet *ifp = &sc->arpcom.ac_if;
3896 
3897 	/*
3898 	 * Mask the interrupt when we start polling.
3899 	 */
3900 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3901 
3902 	/*
3903 	 * Acknowledge possible asserted interrupt.
3904 	 */
3905 	bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
3906 
3907 	lwkt_serialize_handler_disable(ifp->if_serializer);
3908 }
3909 
3910 static int
3911 bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
3912 {
3913 	uint32_t mac_addr;
3914 	int ret = 1;
3915 
3916 	mac_addr = bge_readmem_ind(sc, 0x0c14);
3917 	if ((mac_addr >> 16) == 0x484b) {
3918 		ether_addr[0] = (uint8_t)(mac_addr >> 8);
3919 		ether_addr[1] = (uint8_t)mac_addr;
3920 		mac_addr = bge_readmem_ind(sc, 0x0c18);
3921 		ether_addr[2] = (uint8_t)(mac_addr >> 24);
3922 		ether_addr[3] = (uint8_t)(mac_addr >> 16);
3923 		ether_addr[4] = (uint8_t)(mac_addr >> 8);
3924 		ether_addr[5] = (uint8_t)mac_addr;
3925 		ret = 0;
3926 	}
3927 	return ret;
3928 }
3929 
3930 static int
3931 bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
3932 {
3933 	int mac_offset = BGE_EE_MAC_OFFSET;
3934 
3935 	if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
3936 		mac_offset = BGE_EE_MAC_OFFSET_5906;
3937 
3938 	return bge_read_nvram(sc, ether_addr, mac_offset + 2, ETHER_ADDR_LEN);
3939 }
3940 
3941 static int
3942 bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
3943 {
3944 	if (sc->bge_flags & BGE_FLAG_NO_EEPROM)
3945 		return 1;
3946 
3947 	return bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
3948 			       ETHER_ADDR_LEN);
3949 }
3950 
3951 static int
3952 bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
3953 {
3954 	static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
3955 		/* NOTE: Order is critical */
3956 		bge_get_eaddr_mem,
3957 		bge_get_eaddr_nvram,
3958 		bge_get_eaddr_eeprom,
3959 		NULL
3960 	};
3961 	const bge_eaddr_fcn_t *func;
3962 
3963 	for (func = bge_eaddr_funcs; *func != NULL; ++func) {
3964 		if ((*func)(sc, eaddr) == 0)
3965 			break;
3966 	}
3967 	return (*func == NULL ? ENXIO : 0);
3968 }
3969