xref: /dragonfly/sys/dev/netif/bce/if_bce.c (revision b7367ef6)
1 /*-
2  * Copyright (c) 2006-2007 Broadcom Corporation
3  *	David Christensen <davidch@broadcom.com>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the name of Broadcom Corporation nor the name of its contributors
15  *    may be used to endorse or promote products derived from this software
16  *    without specific prior written consent.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
22  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  *
30  * $FreeBSD: src/sys/dev/bce/if_bce.c,v 1.31 2007/05/16 23:34:11 davidch Exp $
31  * $DragonFly: src/sys/dev/netif/bce/if_bce.c,v 1.1 2007/05/26 08:50:49 sephe Exp $
32  */
33 
34 /*
35  * The following controllers are supported by this driver:
36  *   BCM5706C A2, A3
37  *   BCM5708C B1, B2
38  *
39  * The following controllers are not supported by this driver:
40  *   BCM5706C A0, A1
41  *   BCM5706S A0, A1, A2, A3
42  *   BCM5708C A0, B0
43  *   BCM5708S A0, B0, B1, B2
44  */
45 
46 #include "opt_bce.h"
47 #include "opt_polling.h"
48 
49 #include <sys/param.h>
50 #include <sys/bus.h>
51 #include <sys/endian.h>
52 #include <sys/kernel.h>
53 #include <sys/mbuf.h>
54 #include <sys/malloc.h>
55 #include <sys/queue.h>
56 #ifdef BCE_DEBUG
57 #include <sys/random.h>
58 #endif
59 #include <sys/rman.h>
60 #include <sys/serialize.h>
61 #include <sys/socket.h>
62 #include <sys/sockio.h>
63 #include <sys/sysctl.h>
64 
65 #include <net/bpf.h>
66 #include <net/ethernet.h>
67 #include <net/if.h>
68 #include <net/if_arp.h>
69 #include <net/if_dl.h>
70 #include <net/if_media.h>
71 #include <net/if_types.h>
72 #include <net/ifq_var.h>
73 #include <net/vlan/if_vlan_var.h>
74 
75 #include <dev/netif/mii_layer/mii.h>
76 #include <dev/netif/mii_layer/miivar.h>
77 
78 #include <bus/pci/pcireg.h>
79 #include <bus/pci/pcivar.h>
80 
81 #include "miibus_if.h"
82 
83 #include "if_bcereg.h"
84 #include "if_bcefw.h"
85 
86 /****************************************************************************/
87 /* BCE Debug Options                                                        */
88 /****************************************************************************/
89 #ifdef BCE_DEBUG
90 
91 static uint32_t	bce_debug = BCE_WARN;
92 
93 /*
94  *          0 = Never
95  *          1 = 1 in 2,147,483,648
96  *        256 = 1 in     8,388,608
97  *       2048 = 1 in     1,048,576
98  *      65536 = 1 in        32,768
99  *    1048576 = 1 in         2,048
100  *  268435456 = 1 in             8
101  *  536870912 = 1 in             4
102  * 1073741824 = 1 in             2
103  *
104  * bce_debug_l2fhdr_status_check:
105  *     How often the l2_fhdr frame error check will fail.
106  *
107  * bce_debug_unexpected_attention:
108  *     How often the unexpected attention check will fail.
109  *
110  * bce_debug_mbuf_allocation_failure:
111  *     How often to simulate an mbuf allocation failure.
112  *
113  * bce_debug_dma_map_addr_failure:
114  *     How often to simulate a DMA mapping failure.
115  *
116  * bce_debug_bootcode_running_failure:
117  *     How often to simulate a bootcode failure.
118  */
119 static int	bce_debug_l2fhdr_status_check = 0;
120 static int	bce_debug_unexpected_attention = 0;
121 static int	bce_debug_mbuf_allocation_failure = 0;
122 static int	bce_debug_dma_map_addr_failure = 0;
123 static int	bce_debug_bootcode_running_failure = 0;
124 
125 #endif	/* BCE_DEBUG */
126 
127 
128 /****************************************************************************/
129 /* PCI Device ID Table                                                      */
130 /*                                                                          */
131 /* Used by bce_probe() to identify the devices supported by this driver.    */
132 /****************************************************************************/
133 #define BCE_DEVDESC_MAX		64
134 
135 static struct bce_type bce_devs[] = {
136 	/* BCM5706C Controllers and OEM boards. */
137 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3101,
138 		"HP NC370T Multifunction Gigabit Server Adapter" },
139 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3106,
140 		"HP NC370i Multifunction Gigabit Server Adapter" },
141 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  PCI_ANY_ID,  PCI_ANY_ID,
142 		"Broadcom NetXtreme II BCM5706 1000Base-T" },
143 
144 	/* BCM5706S controllers and OEM boards. */
145 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102,
146 		"HP NC370F Multifunction Gigabit Server Adapter" },
147 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID,  PCI_ANY_ID,
148 		"Broadcom NetXtreme II BCM5706 1000Base-SX" },
149 
150 	/* BCM5708C controllers and OEM boards. */
151 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  PCI_ANY_ID,  PCI_ANY_ID,
152 		"Broadcom NetXtreme II BCM5708 1000Base-T" },
153 
154 	/* BCM5708S controllers and OEM boards. */
155 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  PCI_ANY_ID,  PCI_ANY_ID,
156 		"Broadcom NetXtreme II BCM5708S 1000Base-T" },
157 	{ 0, 0, 0, 0, NULL }
158 };
159 
160 
161 /****************************************************************************/
162 /* Supported Flash NVRAM device data.                                       */
163 /****************************************************************************/
164 static const struct flash_spec flash_table[] =
165 {
166 	/* Slow EEPROM */
167 	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
168 	 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
169 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
170 	 "EEPROM - slow"},
171 	/* Expansion entry 0001 */
172 	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
173 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
175 	 "Entry 0001"},
176 	/* Saifun SA25F010 (non-buffered flash) */
177 	/* strap, cfg1, & write1 need updates */
178 	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
179 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
180 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
181 	 "Non-buffered flash (128kB)"},
182 	/* Saifun SA25F020 (non-buffered flash) */
183 	/* strap, cfg1, & write1 need updates */
184 	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
185 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
186 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
187 	 "Non-buffered flash (256kB)"},
188 	/* Expansion entry 0100 */
189 	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
190 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
191 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
192 	 "Entry 0100"},
193 	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
194 	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
195 	 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
196 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
197 	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
198 	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
199 	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
200 	 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
201 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
202 	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
203 	/* Saifun SA25F005 (non-buffered flash) */
204 	/* strap, cfg1, & write1 need updates */
205 	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
206 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
207 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
208 	 "Non-buffered flash (64kB)"},
209 	/* Fast EEPROM */
210 	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
211 	 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
212 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
213 	 "EEPROM - fast"},
214 	/* Expansion entry 1001 */
215 	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
216 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
217 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
218 	 "Entry 1001"},
219 	/* Expansion entry 1010 */
220 	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
221 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
222 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
223 	 "Entry 1010"},
224 	/* ATMEL AT45DB011B (buffered flash) */
225 	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
226 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
227 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
228 	 "Buffered flash (128kB)"},
229 	/* Expansion entry 1100 */
230 	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
231 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
232 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
233 	 "Entry 1100"},
234 	/* Expansion entry 1101 */
235 	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
236 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
237 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
238 	 "Entry 1101"},
239 	/* Ateml Expansion entry 1110 */
240 	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
241 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
242 	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
243 	 "Entry 1110 (Atmel)"},
244 	/* ATMEL AT45DB021B (buffered flash) */
245 	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
246 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
247 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
248 	 "Buffered flash (256kB)"},
249 };
250 
251 
252 /****************************************************************************/
253 /* DragonFly device entry points.                                           */
254 /****************************************************************************/
255 static int	bce_probe(device_t);
256 static int	bce_attach(device_t);
257 static int	bce_detach(device_t);
258 static void	bce_shutdown(device_t);
259 
260 /****************************************************************************/
261 /* BCE Debug Data Structure Dump Routines                                   */
262 /****************************************************************************/
263 #ifdef BCE_DEBUG
264 static void	bce_dump_mbuf(struct bce_softc *, struct mbuf *);
265 static void	bce_dump_tx_mbuf_chain(struct bce_softc *, int, int);
266 static void	bce_dump_rx_mbuf_chain(struct bce_softc *, int, int);
267 static void	bce_dump_txbd(struct bce_softc *, int, struct tx_bd *);
268 static void	bce_dump_rxbd(struct bce_softc *, int, struct rx_bd *);
269 static void	bce_dump_l2fhdr(struct bce_softc *, int,
270 				struct l2_fhdr *) __unused;
271 static void	bce_dump_tx_chain(struct bce_softc *, int, int);
272 static void	bce_dump_rx_chain(struct bce_softc *, int, int);
273 static void	bce_dump_status_block(struct bce_softc *);
274 static void	bce_dump_driver_state(struct bce_softc *);
275 static void	bce_dump_stats_block(struct bce_softc *) __unused;
276 static void	bce_dump_hw_state(struct bce_softc *);
277 static void	bce_dump_txp_state(struct bce_softc *);
278 static void	bce_dump_rxp_state(struct bce_softc *) __unused;
279 static void	bce_dump_tpat_state(struct bce_softc *) __unused;
280 static void	bce_freeze_controller(struct bce_softc *) __unused;
281 static void	bce_unfreeze_controller(struct bce_softc *) __unused;
282 static void	bce_breakpoint(struct bce_softc *);
283 #endif	/* BCE_DEBUG */
284 
285 
286 /****************************************************************************/
287 /* BCE Register/Memory Access Routines                                      */
288 /****************************************************************************/
289 static uint32_t	bce_reg_rd_ind(struct bce_softc *, uint32_t);
290 static void	bce_reg_wr_ind(struct bce_softc *, uint32_t, uint32_t);
291 static void	bce_ctx_wr(struct bce_softc *, uint32_t, uint32_t, uint32_t);
292 static int	bce_miibus_read_reg(device_t, int, int);
293 static int	bce_miibus_write_reg(device_t, int, int, int);
294 static void	bce_miibus_statchg(device_t);
295 
296 
297 /****************************************************************************/
298 /* BCE NVRAM Access Routines                                                */
299 /****************************************************************************/
300 static int	bce_acquire_nvram_lock(struct bce_softc *);
301 static int	bce_release_nvram_lock(struct bce_softc *);
302 static void	bce_enable_nvram_access(struct bce_softc *);
303 static void	bce_disable_nvram_access(struct bce_softc *);
304 static int	bce_nvram_read_dword(struct bce_softc *, uint32_t, uint8_t *,
305 				     uint32_t);
306 static int	bce_init_nvram(struct bce_softc *);
307 static int	bce_nvram_read(struct bce_softc *, uint32_t, uint8_t *, int);
308 static int	bce_nvram_test(struct bce_softc *);
309 #ifdef BCE_NVRAM_WRITE_SUPPORT
310 static int	bce_enable_nvram_write(struct bce_softc *);
311 static void	bce_disable_nvram_write(struct bce_softc *);
312 static int	bce_nvram_erase_page(struct bce_softc *, uint32_t);
313 static int	bce_nvram_write_dword(struct bce_softc *, uint32_t, uint8_t *,					      uint32_t);
314 static int	bce_nvram_write(struct bce_softc *, uint32_t, uint8_t *,
315 				int) __unused;
316 #endif
317 
318 /****************************************************************************/
319 /* BCE DMA Allocate/Free Routines                                           */
320 /****************************************************************************/
321 static int	bce_dma_alloc(struct bce_softc *);
322 static void	bce_dma_free(struct bce_softc *);
323 static void	bce_dma_map_addr(void *, bus_dma_segment_t *, int, int);
324 static void	bce_dma_map_mbuf(void *, bus_dma_segment_t *, int,
325 				 bus_size_t, int);
326 
327 /****************************************************************************/
328 /* BCE Firmware Synchronization and Load                                    */
329 /****************************************************************************/
330 static int	bce_fw_sync(struct bce_softc *, uint32_t);
331 static void	bce_load_rv2p_fw(struct bce_softc *, uint32_t *,
332 				 uint32_t, uint32_t);
333 static void	bce_load_cpu_fw(struct bce_softc *, struct cpu_reg *,
334 				struct fw_info *);
335 static void	bce_init_cpus(struct bce_softc *);
336 
337 static void	bce_stop(struct bce_softc *);
338 static int	bce_reset(struct bce_softc *, uint32_t);
339 static int	bce_chipinit(struct bce_softc *);
340 static int	bce_blockinit(struct bce_softc *);
341 static int	bce_newbuf_std(struct bce_softc *, struct mbuf *,
342 			       uint16_t *, uint16_t *, uint32_t *);
343 
344 static int	bce_init_tx_chain(struct bce_softc *);
345 static int	bce_init_rx_chain(struct bce_softc *);
346 static void	bce_free_rx_chain(struct bce_softc *);
347 static void	bce_free_tx_chain(struct bce_softc *);
348 
349 static int	bce_encap(struct bce_softc *, struct mbuf **);
350 static void	bce_start(struct ifnet *);
351 static int	bce_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
352 static void	bce_watchdog(struct ifnet *);
353 static int	bce_ifmedia_upd(struct ifnet *);
354 static void	bce_ifmedia_sts(struct ifnet *, struct ifmediareq *);
355 static void	bce_init(void *);
356 static void	bce_mgmt_init(struct bce_softc *);
357 
358 static void	bce_init_context(struct bce_softc *);
359 static void	bce_get_mac_addr(struct bce_softc *);
360 static void	bce_set_mac_addr(struct bce_softc *);
361 static void	bce_phy_intr(struct bce_softc *);
362 static void	bce_rx_intr(struct bce_softc *, int);
363 static void	bce_tx_intr(struct bce_softc *);
364 static void	bce_disable_intr(struct bce_softc *);
365 static void	bce_enable_intr(struct bce_softc *);
366 
367 #ifdef DEVICE_POLLING
368 static void	bce_poll(struct ifnet *, enum poll_cmd, int);
369 #endif
370 static void	bce_intr(void *);
371 static void	bce_set_rx_mode(struct bce_softc *);
372 static void	bce_stats_update(struct bce_softc *);
373 static void	bce_tick(void *);
374 static void	bce_tick_serialized(struct bce_softc *);
375 static void	bce_add_sysctls(struct bce_softc *);
376 
377 
378 /****************************************************************************/
379 /* DragonFly device dispatch table.                                         */
380 /****************************************************************************/
381 static device_method_t bce_methods[] = {
382 	/* Device interface */
383 	DEVMETHOD(device_probe,		bce_probe),
384 	DEVMETHOD(device_attach,	bce_attach),
385 	DEVMETHOD(device_detach,	bce_detach),
386 	DEVMETHOD(device_shutdown,	bce_shutdown),
387 
388 	/* bus interface */
389 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
390 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
391 
392 	/* MII interface */
393 	DEVMETHOD(miibus_readreg,	bce_miibus_read_reg),
394 	DEVMETHOD(miibus_writereg,	bce_miibus_write_reg),
395 	DEVMETHOD(miibus_statchg,	bce_miibus_statchg),
396 
397 	{ 0, 0 }
398 };
399 
400 static driver_t bce_driver = {
401 	"bce",
402 	bce_methods,
403 	sizeof(struct bce_softc)
404 };
405 
406 static devclass_t bce_devclass;
407 
408 MODULE_DEPEND(bce, pci, 1, 1, 1);
409 MODULE_DEPEND(bce, ether, 1, 1, 1);
410 MODULE_DEPEND(bce, miibus, 1, 1, 1);
411 
412 DRIVER_MODULE(bce, pci, bce_driver, bce_devclass, 0, 0);
413 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, 0, 0);
414 
415 
416 /****************************************************************************/
417 /* Device probe function.                                                   */
418 /*                                                                          */
419 /* Compares the device to the driver's list of supported devices and        */
420 /* reports back to the OS whether this is the right driver for the device.  */
421 /*                                                                          */
422 /* Returns:                                                                 */
423 /*   BUS_PROBE_DEFAULT on success, positive value on failure.               */
424 /****************************************************************************/
425 static int
426 bce_probe(device_t dev)
427 {
428 	struct bce_type *t;
429 	uint16_t vid, did, svid, sdid;
430 
431 	/* Get the data for the device to be probed. */
432 	vid  = pci_get_vendor(dev);
433 	did  = pci_get_device(dev);
434 	svid = pci_get_subvendor(dev);
435 	sdid = pci_get_subdevice(dev);
436 
437 	/* Look through the list of known devices for a match. */
438 	for (t = bce_devs; t->bce_name != NULL; ++t) {
439 		if (vid == t->bce_vid && did == t->bce_did &&
440 		    (svid == t->bce_svid || t->bce_svid == PCI_ANY_ID) &&
441 		    (sdid == t->bce_sdid || t->bce_sdid == PCI_ANY_ID)) {
442 		    	uint32_t revid = pci_read_config(dev, PCIR_REVID, 4);
443 			char *descbuf;
444 
445 			descbuf = kmalloc(BCE_DEVDESC_MAX, M_TEMP, M_WAITOK);
446 
447 			/* Print out the device identity. */
448 			ksnprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)",
449 				  t->bce_name,
450 				  ((revid & 0xf0) >> 4) + 'A', revid & 0xf);
451 
452 			device_set_desc_copy(dev, descbuf);
453 			kfree(descbuf, M_TEMP);
454 			return 0;
455 		}
456 	}
457 	return ENXIO;
458 }
459 
460 
461 /****************************************************************************/
462 /* Device attach function.                                                  */
463 /*                                                                          */
464 /* Allocates device resources, performs secondary chip identification,      */
465 /* resets and initializes the hardware, and initializes driver instance     */
466 /* variables.                                                               */
467 /*                                                                          */
468 /* Returns:                                                                 */
469 /*   0 on success, positive value on failure.                               */
470 /****************************************************************************/
471 static int
472 bce_attach(device_t dev)
473 {
474 	struct bce_softc *sc = device_get_softc(dev);
475 	struct ifnet *ifp = &sc->arpcom.ac_if;
476 	uint32_t val;
477 	int rid, rc = 0;
478 #ifdef notyet
479 	int count;
480 #endif
481 
482 	sc->bce_dev = dev;
483 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
484 
485 	pci_enable_busmaster(dev);
486 
487 	/* Allocate PCI memory resources. */
488 	rid = PCIR_BAR(0);
489 	sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
490 						 RF_ACTIVE | PCI_RF_DENSE);
491 	if (sc->bce_res_mem == NULL) {
492 		device_printf(dev, "PCI memory allocation failed\n");
493 		return ENXIO;
494 	}
495 	sc->bce_btag = rman_get_bustag(sc->bce_res_mem);
496 	sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem);
497 
498 	/* Allocate PCI IRQ resources. */
499 #ifdef notyet
500 	count = pci_msi_count(dev);
501 	if (count == 1 && pci_alloc_msi(dev, &count) == 0) {
502 		rid = 1;
503 		sc->bce_flags |= BCE_USING_MSI_FLAG;
504 	} else
505 #endif
506 	rid = 0;
507 	sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
508 						 RF_SHAREABLE | RF_ACTIVE);
509 	if (sc->bce_res_irq == NULL) {
510 		device_printf(dev, "PCI map interrupt failed\n");
511 		rc = ENXIO;
512 		goto fail;
513 	}
514 
515 	/*
516 	 * Configure byte swap and enable indirect register access.
517 	 * Rely on CPU to do target byte swapping on big endian systems.
518 	 * Access to registers outside of PCI configurtion space are not
519 	 * valid until this is done.
520 	 */
521 	pci_write_config(dev, BCE_PCICFG_MISC_CONFIG,
522 			 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
523 			 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4);
524 
525 	/* Save ASIC revsion info. */
526 	sc->bce_chipid =  REG_RD(sc, BCE_MISC_ID);
527 
528 	/* Weed out any non-production controller revisions. */
529 	switch(BCE_CHIP_ID(sc)) {
530 	case BCE_CHIP_ID_5706_A0:
531 	case BCE_CHIP_ID_5706_A1:
532 	case BCE_CHIP_ID_5708_A0:
533 	case BCE_CHIP_ID_5708_B0:
534 		device_printf(dev, "Unsupported chip id 0x%08x!\n",
535 			      BCE_CHIP_ID(sc));
536 		rc = ENODEV;
537 		goto fail;
538 	}
539 
540 	/*
541 	 * The embedded PCIe to PCI-X bridge (EPB)
542 	 * in the 5708 cannot address memory above
543 	 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043).
544 	 */
545 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)
546 		sc->max_bus_addr = BCE_BUS_SPACE_MAXADDR;
547 	else
548 		sc->max_bus_addr = BUS_SPACE_MAXADDR;
549 
550 	/*
551 	 * Find the base address for shared memory access.
552 	 * Newer versions of bootcode use a signature and offset
553 	 * while older versions use a fixed address.
554 	 */
555 	val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE);
556 	if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == BCE_SHM_HDR_SIGNATURE_SIG)
557 		sc->bce_shmem_base = REG_RD_IND(sc, BCE_SHM_HDR_ADDR_0);
558 	else
559 		sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE;
560 
561 	DBPRINT(sc, BCE_INFO, "bce_shmem_base = 0x%08X\n", sc->bce_shmem_base);
562 
563 	/* Get PCI bus information (speed and type). */
564 	val = REG_RD(sc, BCE_PCICFG_MISC_STATUS);
565 	if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) {
566 		uint32_t clkreg;
567 
568 		sc->bce_flags |= BCE_PCIX_FLAG;
569 
570 		clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS) &
571 			 BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
572 		switch (clkreg) {
573 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
574 			sc->bus_speed_mhz = 133;
575 			break;
576 
577 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
578 			sc->bus_speed_mhz = 100;
579 			break;
580 
581 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
582 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
583 			sc->bus_speed_mhz = 66;
584 			break;
585 
586 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
587 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
588 			sc->bus_speed_mhz = 50;
589 			break;
590 
591 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
592 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
593 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
594 			sc->bus_speed_mhz = 33;
595 			break;
596 		}
597 	} else {
598 		if (val & BCE_PCICFG_MISC_STATUS_M66EN)
599 			sc->bus_speed_mhz = 66;
600 		else
601 			sc->bus_speed_mhz = 33;
602 	}
603 
604 	if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET)
605 		sc->bce_flags |= BCE_PCI_32BIT_FLAG;
606 
607 	device_printf(dev, "ASIC ID 0x%08X; Revision (%c%d); PCI%s %s %dMHz\n",
608 		      sc->bce_chipid,
609 		      ((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A',
610 		      (BCE_CHIP_ID(sc) & 0x0ff0) >> 4,
611 		      (sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : "",
612 		      (sc->bce_flags & BCE_PCI_32BIT_FLAG) ?
613 		      "32-bit" : "64-bit", sc->bus_speed_mhz);
614 
615 	/* Reset the controller. */
616 	rc = bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
617 	if (rc != 0)
618 		goto fail;
619 
620 	/* Initialize the controller. */
621 	rc = bce_chipinit(sc);
622 	if (rc != 0) {
623 		device_printf(dev, "Controller initialization failed!\n");
624 		goto fail;
625 	}
626 
627 	/* Perform NVRAM test. */
628 	rc = bce_nvram_test(sc);
629 	if (rc != 0) {
630 		device_printf(dev, "NVRAM test failed!\n");
631 		goto fail;
632 	}
633 
634 	/* Fetch the permanent Ethernet MAC address. */
635 	bce_get_mac_addr(sc);
636 
637 	/*
638 	 * Trip points control how many BDs
639 	 * should be ready before generating an
640 	 * interrupt while ticks control how long
641 	 * a BD can sit in the chain before
642 	 * generating an interrupt.  Set the default
643 	 * values for the RX and TX rings.
644 	 */
645 
646 #ifdef BCE_DRBUG
647 	/* Force more frequent interrupts. */
648 	sc->bce_tx_quick_cons_trip_int = 1;
649 	sc->bce_tx_quick_cons_trip     = 1;
650 	sc->bce_tx_ticks_int           = 0;
651 	sc->bce_tx_ticks               = 0;
652 
653 	sc->bce_rx_quick_cons_trip_int = 1;
654 	sc->bce_rx_quick_cons_trip     = 1;
655 	sc->bce_rx_ticks_int           = 0;
656 	sc->bce_rx_ticks               = 0;
657 #else
658 	sc->bce_tx_quick_cons_trip_int = 20;
659 	sc->bce_tx_quick_cons_trip     = 20;
660 	sc->bce_tx_ticks_int           = 80;
661 	sc->bce_tx_ticks               = 80;
662 
663 	sc->bce_rx_quick_cons_trip_int = 6;
664 	sc->bce_rx_quick_cons_trip     = 6;
665 	sc->bce_rx_ticks_int           = 18;
666 	sc->bce_rx_ticks               = 18;
667 #endif
668 
669 	/* Update statistics once every second. */
670 	sc->bce_stats_ticks = 1000000 & 0xffff00;
671 
672 	/*
673 	 * The copper based NetXtreme II controllers
674 	 * use an integrated PHY at address 1 while
675 	 * the SerDes controllers use a PHY at
676 	 * address 2.
677 	 */
678 	sc->bce_phy_addr = 1;
679 
680 	if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) {
681 		sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
682 		sc->bce_flags |= BCE_NO_WOL_FLAG;
683 		if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708) {
684 			sc->bce_phy_addr = 2;
685 			val = REG_RD_IND(sc, sc->bce_shmem_base +
686 					 BCE_SHARED_HW_CFG_CONFIG);
687 			if (val & BCE_SHARED_HW_CFG_PHY_2_5G)
688 				sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG;
689 		}
690 	}
691 
692 	/* Allocate DMA memory resources. */
693 	rc = bce_dma_alloc(sc);
694 	if (rc != 0) {
695 		device_printf(dev, "DMA resource allocation failed!\n");
696 		goto fail;
697 	}
698 
699 	/* Initialize the ifnet interface. */
700 	ifp->if_softc = sc;
701 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
702 	ifp->if_ioctl = bce_ioctl;
703 	ifp->if_start = bce_start;
704 	ifp->if_init = bce_init;
705 	ifp->if_watchdog = bce_watchdog;
706 #ifdef DEVICE_POLLING
707 	ifp->if_poll = bce_poll;
708 #endif
709 	ifp->if_mtu = ETHERMTU;
710 	ifp->if_hwassist = BCE_IF_HWASSIST;
711 	ifp->if_capabilities = BCE_IF_CAPABILITIES;
712 	ifp->if_capenable = ifp->if_capabilities;
713 	ifq_set_maxlen(&ifp->if_snd, USABLE_TX_BD);
714 	ifq_set_ready(&ifp->if_snd);
715 
716 	if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
717 		ifp->if_baudrate = IF_Gbps(2.5);
718 	else
719 		ifp->if_baudrate = IF_Gbps(1);
720 
721 	/* Assume a standard 1500 byte MTU size for mbuf allocations. */
722 	sc->mbuf_alloc_size  = MCLBYTES;
723 
724 	/* Look for our PHY. */
725 	rc = mii_phy_probe(dev, &sc->bce_miibus,
726 			   bce_ifmedia_upd, bce_ifmedia_sts);
727 	if (rc != 0) {
728 		device_printf(dev, "PHY probe failed!\n");
729 		goto fail;
730 	}
731 
732 	/* Attach to the Ethernet interface list. */
733 	ether_ifattach(ifp, sc->eaddr, NULL);
734 
735 	callout_init(&sc->bce_stat_ch);
736 
737 	/* Hookup IRQ last. */
738 	rc = bus_setup_intr(dev, sc->bce_res_irq, INTR_NETSAFE, bce_intr, sc,
739 			    &sc->bce_intrhand, ifp->if_serializer);
740 	if (rc != 0) {
741 		device_printf(dev, "Failed to setup IRQ!\n");
742 		ether_ifdetach(ifp);
743 		goto fail;
744 	}
745 
746 	/* Print some important debugging info. */
747 	DBRUN(BCE_INFO, bce_dump_driver_state(sc));
748 
749 	/* Add the supported sysctls to the kernel. */
750 	bce_add_sysctls(sc);
751 
752 	/* Get the firmware running so IPMI still works */
753 	bce_mgmt_init(sc);
754 
755 	return 0;
756 fail:
757 	bce_detach(dev);
758 	return(rc);
759 }
760 
761 
762 /****************************************************************************/
763 /* Device detach function.                                                  */
764 /*                                                                          */
765 /* Stops the controller, resets the controller, and releases resources.     */
766 /*                                                                          */
767 /* Returns:                                                                 */
768 /*   0 on success, positive value on failure.                               */
769 /****************************************************************************/
770 static int
771 bce_detach(device_t dev)
772 {
773 	struct bce_softc *sc = device_get_softc(dev);
774 
775 	if (device_is_attached(dev)) {
776 		struct ifnet *ifp = &sc->arpcom.ac_if;
777 
778 		/* Stop and reset the controller. */
779 		lwkt_serialize_enter(ifp->if_serializer);
780 		bce_stop(sc);
781 		bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
782 		bus_teardown_intr(dev, sc->bce_res_irq, sc->bce_intrhand);
783 		lwkt_serialize_exit(ifp->if_serializer);
784 
785 		ether_ifdetach(ifp);
786 	}
787 
788 	/* If we have a child device on the MII bus remove it too. */
789 	if (sc->bce_miibus)
790 		device_delete_child(dev, sc->bce_miibus);
791 	bus_generic_detach(dev);
792 
793 	if (sc->bce_res_irq != NULL) {
794 		bus_release_resource(dev, SYS_RES_IRQ,
795 			sc->bce_flags & BCE_USING_MSI_FLAG ? 1 : 0,
796 			sc->bce_res_irq);
797 	}
798 
799 #ifdef notyet
800 	if (sc->bce_flags & BCE_USING_MSI_FLAG)
801 		pci_release_msi(dev);
802 #endif
803 
804 	if (sc->bce_res_mem != NULL) {
805 		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
806 				     sc->bce_res_mem);
807 	}
808 
809 	bce_dma_free(sc);
810 
811 	if (sc->bce_sysctl_tree != NULL)
812 		sysctl_ctx_free(&sc->bce_sysctl_ctx);
813 
814 	return 0;
815 }
816 
817 
818 /****************************************************************************/
819 /* Device shutdown function.                                                */
820 /*                                                                          */
821 /* Stops and resets the controller.                                         */
822 /*                                                                          */
823 /* Returns:                                                                 */
824 /*   Nothing                                                                */
825 /****************************************************************************/
826 static void
827 bce_shutdown(device_t dev)
828 {
829 	struct bce_softc *sc = device_get_softc(dev);
830 	struct ifnet *ifp = &sc->arpcom.ac_if;
831 
832 	lwkt_serialize_enter(ifp->if_serializer);
833 	bce_stop(sc);
834 	bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
835 	lwkt_serialize_exit(ifp->if_serializer);
836 }
837 
838 
839 /****************************************************************************/
840 /* Indirect register read.                                                  */
841 /*                                                                          */
842 /* Reads NetXtreme II registers using an index/data register pair in PCI    */
843 /* configuration space.  Using this mechanism avoids issues with posted     */
844 /* reads but is much slower than memory-mapped I/O.                         */
845 /*                                                                          */
846 /* Returns:                                                                 */
847 /*   The value of the register.                                             */
848 /****************************************************************************/
849 static uint32_t
850 bce_reg_rd_ind(struct bce_softc *sc, uint32_t offset)
851 {
852 	device_t dev = sc->bce_dev;
853 
854 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
855 #ifdef BCE_DEBUG
856 	{
857 		uint32_t val;
858 		val = pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
859 		DBPRINT(sc, BCE_EXCESSIVE,
860 			"%s(); offset = 0x%08X, val = 0x%08X\n",
861 			__func__, offset, val);
862 		return val;
863 	}
864 #else
865 	return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
866 #endif
867 }
868 
869 
870 /****************************************************************************/
871 /* Indirect register write.                                                 */
872 /*                                                                          */
873 /* Writes NetXtreme II registers using an index/data register pair in PCI   */
874 /* configuration space.  Using this mechanism avoids issues with posted     */
875 /* writes but is muchh slower than memory-mapped I/O.                       */
876 /*                                                                          */
877 /* Returns:                                                                 */
878 /*   Nothing.                                                               */
879 /****************************************************************************/
880 static void
881 bce_reg_wr_ind(struct bce_softc *sc, uint32_t offset, uint32_t val)
882 {
883 	device_t dev = sc->bce_dev;
884 
885 	DBPRINT(sc, BCE_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
886 		__func__, offset, val);
887 
888 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
889 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4);
890 }
891 
892 
893 /****************************************************************************/
894 /* Context memory write.                                                    */
895 /*                                                                          */
896 /* The NetXtreme II controller uses context memory to track connection      */
897 /* information for L2 and higher network protocols.                         */
898 /*                                                                          */
899 /* Returns:                                                                 */
900 /*   Nothing.                                                               */
901 /****************************************************************************/
902 static void
903 bce_ctx_wr(struct bce_softc *sc, uint32_t cid_addr, uint32_t offset,
904 	   uint32_t val)
905 {
906 	DBPRINT(sc, BCE_EXCESSIVE, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
907 		"val = 0x%08X\n", __func__, cid_addr, offset, val);
908 
909 	offset += cid_addr;
910 	REG_WR(sc, BCE_CTX_DATA_ADR, offset);
911 	REG_WR(sc, BCE_CTX_DATA, val);
912 }
913 
914 
915 /****************************************************************************/
916 /* PHY register read.                                                       */
917 /*                                                                          */
918 /* Implements register reads on the MII bus.                                */
919 /*                                                                          */
920 /* Returns:                                                                 */
921 /*   The value of the register.                                             */
922 /****************************************************************************/
923 static int
924 bce_miibus_read_reg(device_t dev, int phy, int reg)
925 {
926 	struct bce_softc *sc = device_get_softc(dev);
927 	uint32_t val;
928 	int i;
929 
930 	/* Make sure we are accessing the correct PHY address. */
931 	if (phy != sc->bce_phy_addr) {
932 		DBPRINT(sc, BCE_VERBOSE,
933 			"Invalid PHY address %d for PHY read!\n", phy);
934 		return 0;
935 	}
936 
937 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
938 		val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
939 		val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
940 
941 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
942 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
943 
944 		DELAY(40);
945 	}
946 
947 	val = BCE_MIPHY(phy) | BCE_MIREG(reg) |
948 	      BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT |
949 	      BCE_EMAC_MDIO_COMM_START_BUSY;
950 	REG_WR(sc, BCE_EMAC_MDIO_COMM, val);
951 
952 	for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
953 		DELAY(10);
954 
955 		val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
956 		if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) {
957 			DELAY(5);
958 
959 			val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
960 			val &= BCE_EMAC_MDIO_COMM_DATA;
961 			break;
962 		}
963 	}
964 
965 	if (val & BCE_EMAC_MDIO_COMM_START_BUSY) {
966 		if_printf(&sc->arpcom.ac_if,
967 			  "Error: PHY read timeout! phy = %d, reg = 0x%04X\n",
968 			  phy, reg);
969 		val = 0x0;
970 	} else {
971 		val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
972 	}
973 
974 	DBPRINT(sc, BCE_EXCESSIVE,
975 		"%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
976 		__func__, phy, (uint16_t)reg & 0xffff, (uint16_t) val & 0xffff);
977 
978 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
979 		val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
980 		val |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
981 
982 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
983 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
984 
985 		DELAY(40);
986 	}
987 	return (val & 0xffff);
988 }
989 
990 
991 /****************************************************************************/
992 /* PHY register write.                                                      */
993 /*                                                                          */
994 /* Implements register writes on the MII bus.                               */
995 /*                                                                          */
996 /* Returns:                                                                 */
997 /*   The value of the register.                                             */
998 /****************************************************************************/
999 static int
1000 bce_miibus_write_reg(device_t dev, int phy, int reg, int val)
1001 {
1002 	struct bce_softc *sc = device_get_softc(dev);
1003 	uint32_t val1;
1004 	int i;
1005 
1006 	/* Make sure we are accessing the correct PHY address. */
1007 	if (phy != sc->bce_phy_addr) {
1008 		DBPRINT(sc, BCE_WARN,
1009 			"Invalid PHY address %d for PHY write!\n", phy);
1010 		return(0);
1011 	}
1012 
1013 	DBPRINT(sc, BCE_EXCESSIVE,
1014 		"%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
1015 		__func__, phy, (uint16_t)(reg & 0xffff),
1016 		(uint16_t)(val & 0xffff));
1017 
1018 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1019 		val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1020 		val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1021 
1022 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1023 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1024 
1025 		DELAY(40);
1026 	}
1027 
1028 	val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val |
1029 		BCE_EMAC_MDIO_COMM_COMMAND_WRITE |
1030 		BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT;
1031 	REG_WR(sc, BCE_EMAC_MDIO_COMM, val1);
1032 
1033 	for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1034 		DELAY(10);
1035 
1036 		val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1037 		if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1038 			DELAY(5);
1039 			break;
1040 		}
1041 	}
1042 
1043 	if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY)
1044 		if_printf(&sc->arpcom.ac_if, "PHY write timeout!\n");
1045 
1046 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1047 		val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1048 		val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1049 
1050 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1051 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1052 
1053 		DELAY(40);
1054 	}
1055 	return 0;
1056 }
1057 
1058 
1059 /****************************************************************************/
1060 /* MII bus status change.                                                   */
1061 /*                                                                          */
1062 /* Called by the MII bus driver when the PHY establishes link to set the    */
1063 /* MAC interface registers.                                                 */
1064 /*                                                                          */
1065 /* Returns:                                                                 */
1066 /*   Nothing.                                                               */
1067 /****************************************************************************/
1068 static void
1069 bce_miibus_statchg(device_t dev)
1070 {
1071 	struct bce_softc *sc = device_get_softc(dev);
1072 	struct mii_data *mii = device_get_softc(sc->bce_miibus);
1073 
1074 	DBPRINT(sc, BCE_INFO, "mii_media_active = 0x%08X\n",
1075 		mii->mii_media_active);
1076 
1077 #ifdef BCE_DEBUG
1078 	/* Decode the interface media flags. */
1079 	if_printf(&sc->arpcom.ac_if, "Media: ( ");
1080 	switch(IFM_TYPE(mii->mii_media_active)) {
1081 	case IFM_ETHER:
1082 		kprintf("Ethernet )");
1083 		break;
1084 	default:
1085 		kprintf("Unknown )");
1086 		break;
1087 	}
1088 
1089 	kprintf(" Media Options: ( ");
1090 	switch(IFM_SUBTYPE(mii->mii_media_active)) {
1091 	case IFM_AUTO:
1092 		kprintf("Autoselect )");
1093 		break;
1094 	case IFM_MANUAL:
1095 		kprintf("Manual )");
1096 		break;
1097 	case IFM_NONE:
1098 		kprintf("None )");
1099 		break;
1100 	case IFM_10_T:
1101 		kprintf("10Base-T )");
1102 		break;
1103 	case IFM_100_TX:
1104 		kprintf("100Base-TX )");
1105 		break;
1106 	case IFM_1000_SX:
1107 		kprintf("1000Base-SX )");
1108 		break;
1109 	case IFM_1000_T:
1110 		kprintf("1000Base-T )");
1111 		break;
1112 	default:
1113 		kprintf("Other )");
1114 		break;
1115 	}
1116 
1117 	kprintf(" Global Options: (");
1118 	if (mii->mii_media_active & IFM_FDX)
1119 		kprintf(" FullDuplex");
1120 	if (mii->mii_media_active & IFM_HDX)
1121 		kprintf(" HalfDuplex");
1122 	if (mii->mii_media_active & IFM_LOOP)
1123 		kprintf(" Loopback");
1124 	if (mii->mii_media_active & IFM_FLAG0)
1125 		kprintf(" Flag0");
1126 	if (mii->mii_media_active & IFM_FLAG1)
1127 		kprintf(" Flag1");
1128 	if (mii->mii_media_active & IFM_FLAG2)
1129 		kprintf(" Flag2");
1130 	kprintf(" )\n");
1131 #endif
1132 
1133 	BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT);
1134 
1135 	/*
1136 	 * Set MII or GMII interface based on the speed negotiated
1137 	 * by the PHY.
1138 	 */
1139 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
1140 	    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) {
1141 		DBPRINT(sc, BCE_INFO, "Setting GMII interface.\n");
1142 		BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_GMII);
1143 	} else {
1144 		DBPRINT(sc, BCE_INFO, "Setting MII interface.\n");
1145 		BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_MII);
1146 	}
1147 
1148 	/*
1149 	 * Set half or full duplex based on the duplicity negotiated
1150 	 * by the PHY.
1151 	 */
1152 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1153 		DBPRINT(sc, BCE_INFO, "Setting Full-Duplex interface.\n");
1154 		BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1155 	} else {
1156 		DBPRINT(sc, BCE_INFO, "Setting Half-Duplex interface.\n");
1157 		BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1158 	}
1159 }
1160 
1161 
1162 /****************************************************************************/
1163 /* Acquire NVRAM lock.                                                      */
1164 /*                                                                          */
1165 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock.  */
1166 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1167 /* for use by the driver.                                                   */
1168 /*                                                                          */
1169 /* Returns:                                                                 */
1170 /*   0 on success, positive value on failure.                               */
1171 /****************************************************************************/
1172 static int
1173 bce_acquire_nvram_lock(struct bce_softc *sc)
1174 {
1175 	uint32_t val;
1176 	int j;
1177 
1178 	DBPRINT(sc, BCE_VERBOSE, "Acquiring NVRAM lock.\n");
1179 
1180 	/* Request access to the flash interface. */
1181 	REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2);
1182 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1183 		val = REG_RD(sc, BCE_NVM_SW_ARB);
1184 		if (val & BCE_NVM_SW_ARB_ARB_ARB2)
1185 			break;
1186 
1187 		DELAY(5);
1188 	}
1189 
1190 	if (j >= NVRAM_TIMEOUT_COUNT) {
1191 		DBPRINT(sc, BCE_WARN, "Timeout acquiring NVRAM lock!\n");
1192 		return EBUSY;
1193 	}
1194 	return 0;
1195 }
1196 
1197 
1198 /****************************************************************************/
1199 /* Release NVRAM lock.                                                      */
1200 /*                                                                          */
1201 /* When the caller is finished accessing NVRAM the lock must be released.   */
1202 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1203 /* for use by the driver.                                                   */
1204 /*                                                                          */
1205 /* Returns:                                                                 */
1206 /*   0 on success, positive value on failure.                               */
1207 /****************************************************************************/
1208 static int
1209 bce_release_nvram_lock(struct bce_softc *sc)
1210 {
1211 	int j;
1212 	uint32_t val;
1213 
1214 	DBPRINT(sc, BCE_VERBOSE, "Releasing NVRAM lock.\n");
1215 
1216 	/*
1217 	 * Relinquish nvram interface.
1218 	 */
1219 	REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2);
1220 
1221 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1222 		val = REG_RD(sc, BCE_NVM_SW_ARB);
1223 		if (!(val & BCE_NVM_SW_ARB_ARB_ARB2))
1224 			break;
1225 
1226 		DELAY(5);
1227 	}
1228 
1229 	if (j >= NVRAM_TIMEOUT_COUNT) {
1230 		DBPRINT(sc, BCE_WARN, "Timeout reeasing NVRAM lock!\n");
1231 		return EBUSY;
1232 	}
1233 	return 0;
1234 }
1235 
1236 
1237 #ifdef BCE_NVRAM_WRITE_SUPPORT
1238 /****************************************************************************/
1239 /* Enable NVRAM write access.                                               */
1240 /*                                                                          */
1241 /* Before writing to NVRAM the caller must enable NVRAM writes.             */
1242 /*                                                                          */
1243 /* Returns:                                                                 */
1244 /*   0 on success, positive value on failure.                               */
1245 /****************************************************************************/
1246 static int
1247 bce_enable_nvram_write(struct bce_softc *sc)
1248 {
1249 	uint32_t val;
1250 
1251 	DBPRINT(sc, BCE_VERBOSE, "Enabling NVRAM write.\n");
1252 
1253 	val = REG_RD(sc, BCE_MISC_CFG);
1254 	REG_WR(sc, BCE_MISC_CFG, val | BCE_MISC_CFG_NVM_WR_EN_PCI);
1255 
1256 	if (!sc->bce_flash_info->buffered) {
1257 		int j;
1258 
1259 		REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1260 		REG_WR(sc, BCE_NVM_COMMAND,
1261 		       BCE_NVM_COMMAND_WREN | BCE_NVM_COMMAND_DOIT);
1262 
1263 		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1264 			DELAY(5);
1265 
1266 			val = REG_RD(sc, BCE_NVM_COMMAND);
1267 			if (val & BCE_NVM_COMMAND_DONE)
1268 				break;
1269 		}
1270 
1271 		if (j >= NVRAM_TIMEOUT_COUNT) {
1272 			DBPRINT(sc, BCE_WARN, "Timeout writing NVRAM!\n");
1273 			return EBUSY;
1274 		}
1275 	}
1276 	return 0;
1277 }
1278 
1279 
1280 /****************************************************************************/
1281 /* Disable NVRAM write access.                                              */
1282 /*                                                                          */
1283 /* When the caller is finished writing to NVRAM write access must be        */
1284 /* disabled.                                                                */
1285 /*                                                                          */
1286 /* Returns:                                                                 */
1287 /*   Nothing.                                                               */
1288 /****************************************************************************/
1289 static void
1290 bce_disable_nvram_write(struct bce_softc *sc)
1291 {
1292 	uint32_t val;
1293 
1294 	DBPRINT(sc, BCE_VERBOSE, "Disabling NVRAM write.\n");
1295 
1296 	val = REG_RD(sc, BCE_MISC_CFG);
1297 	REG_WR(sc, BCE_MISC_CFG, val & ~BCE_MISC_CFG_NVM_WR_EN);
1298 }
1299 #endif	/* BCE_NVRAM_WRITE_SUPPORT */
1300 
1301 
1302 /****************************************************************************/
1303 /* Enable NVRAM access.                                                     */
1304 /*                                                                          */
1305 /* Before accessing NVRAM for read or write operations the caller must      */
1306 /* enabled NVRAM access.                                                    */
1307 /*                                                                          */
1308 /* Returns:                                                                 */
1309 /*   Nothing.                                                               */
1310 /****************************************************************************/
1311 static void
1312 bce_enable_nvram_access(struct bce_softc *sc)
1313 {
1314 	uint32_t val;
1315 
1316 	DBPRINT(sc, BCE_VERBOSE, "Enabling NVRAM access.\n");
1317 
1318 	val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1319 	/* Enable both bits, even on read. */
1320 	REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1321 	       val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN);
1322 }
1323 
1324 
1325 /****************************************************************************/
1326 /* Disable NVRAM access.                                                    */
1327 /*                                                                          */
1328 /* When the caller is finished accessing NVRAM access must be disabled.     */
1329 /*                                                                          */
1330 /* Returns:                                                                 */
1331 /*   Nothing.                                                               */
1332 /****************************************************************************/
1333 static void
1334 bce_disable_nvram_access(struct bce_softc *sc)
1335 {
1336 	uint32_t val;
1337 
1338 	DBPRINT(sc, BCE_VERBOSE, "Disabling NVRAM access.\n");
1339 
1340 	val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1341 
1342 	/* Disable both bits, even after read. */
1343 	REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1344 	       val & ~(BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN));
1345 }
1346 
1347 
1348 #ifdef BCE_NVRAM_WRITE_SUPPORT
1349 /****************************************************************************/
1350 /* Erase NVRAM page before writing.                                         */
1351 /*                                                                          */
1352 /* Non-buffered flash parts require that a page be erased before it is      */
1353 /* written.                                                                 */
1354 /*                                                                          */
1355 /* Returns:                                                                 */
1356 /*   0 on success, positive value on failure.                               */
1357 /****************************************************************************/
1358 static int
1359 bce_nvram_erase_page(struct bce_softc *sc, uint32_t offset)
1360 {
1361 	uint32_t cmd;
1362 	int j;
1363 
1364 	/* Buffered flash doesn't require an erase. */
1365 	if (sc->bce_flash_info->buffered)
1366 		return 0;
1367 
1368 	DBPRINT(sc, BCE_VERBOSE, "Erasing NVRAM page.\n");
1369 
1370 	/* Build an erase command. */
1371 	cmd = BCE_NVM_COMMAND_ERASE | BCE_NVM_COMMAND_WR |
1372 	      BCE_NVM_COMMAND_DOIT;
1373 
1374 	/*
1375 	 * Clear the DONE bit separately, set the NVRAM adress to erase,
1376 	 * and issue the erase command.
1377 	 */
1378 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1379 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1380 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1381 
1382 	/* Wait for completion. */
1383 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1384 		uint32_t val;
1385 
1386 		DELAY(5);
1387 
1388 		val = REG_RD(sc, BCE_NVM_COMMAND);
1389 		if (val & BCE_NVM_COMMAND_DONE)
1390 			break;
1391 	}
1392 
1393 	if (j >= NVRAM_TIMEOUT_COUNT) {
1394 		DBPRINT(sc, BCE_WARN, "Timeout erasing NVRAM.\n");
1395 		return EBUSY;
1396 	}
1397 	return 0;
1398 }
1399 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1400 
1401 
1402 /****************************************************************************/
1403 /* Read a dword (32 bits) from NVRAM.                                       */
1404 /*                                                                          */
1405 /* Read a 32 bit word from NVRAM.  The caller is assumed to have already    */
1406 /* obtained the NVRAM lock and enabled the controller for NVRAM access.     */
1407 /*                                                                          */
1408 /* Returns:                                                                 */
1409 /*   0 on success and the 32 bit value read, positive value on failure.     */
1410 /****************************************************************************/
1411 static int
1412 bce_nvram_read_dword(struct bce_softc *sc, uint32_t offset, uint8_t *ret_val,
1413 		     uint32_t cmd_flags)
1414 {
1415 	uint32_t cmd;
1416 	int i, rc = 0;
1417 
1418 	/* Build the command word. */
1419 	cmd = BCE_NVM_COMMAND_DOIT | cmd_flags;
1420 
1421 	/* Calculate the offset for buffered flash. */
1422 	if (sc->bce_flash_info->buffered) {
1423 		offset = ((offset / sc->bce_flash_info->page_size) <<
1424 			  sc->bce_flash_info->page_bits) +
1425 			 (offset % sc->bce_flash_info->page_size);
1426 	}
1427 
1428 	/*
1429 	 * Clear the DONE bit separately, set the address to read,
1430 	 * and issue the read.
1431 	 */
1432 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1433 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1434 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1435 
1436 	/* Wait for completion. */
1437 	for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1438 		uint32_t val;
1439 
1440 		DELAY(5);
1441 
1442 		val = REG_RD(sc, BCE_NVM_COMMAND);
1443 		if (val & BCE_NVM_COMMAND_DONE) {
1444 			val = REG_RD(sc, BCE_NVM_READ);
1445 
1446 			val = be32toh(val);
1447 			memcpy(ret_val, &val, 4);
1448 			break;
1449 		}
1450 	}
1451 
1452 	/* Check for errors. */
1453 	if (i >= NVRAM_TIMEOUT_COUNT) {
1454 		if_printf(&sc->arpcom.ac_if,
1455 			  "Timeout error reading NVRAM at offset 0x%08X!\n",
1456 			  offset);
1457 		rc = EBUSY;
1458 	}
1459 	return rc;
1460 }
1461 
1462 
1463 #ifdef BCE_NVRAM_WRITE_SUPPORT
1464 /****************************************************************************/
1465 /* Write a dword (32 bits) to NVRAM.                                        */
1466 /*                                                                          */
1467 /* Write a 32 bit word to NVRAM.  The caller is assumed to have already     */
1468 /* obtained the NVRAM lock, enabled the controller for NVRAM access, and    */
1469 /* enabled NVRAM write access.                                              */
1470 /*                                                                          */
1471 /* Returns:                                                                 */
1472 /*   0 on success, positive value on failure.                               */
1473 /****************************************************************************/
1474 static int
1475 bce_nvram_write_dword(struct bce_softc *sc, uint32_t offset, uint8_t *val,
1476 		      uint32_t cmd_flags)
1477 {
1478 	uint32_t cmd, val32;
1479 	int j;
1480 
1481 	/* Build the command word. */
1482 	cmd = BCE_NVM_COMMAND_DOIT | BCE_NVM_COMMAND_WR | cmd_flags;
1483 
1484 	/* Calculate the offset for buffered flash. */
1485 	if (sc->bce_flash_info->buffered) {
1486 		offset = ((offset / sc->bce_flash_info->page_size) <<
1487 			  sc->bce_flash_info->page_bits) +
1488 			 (offset % sc->bce_flash_info->page_size);
1489 	}
1490 
1491 	/*
1492 	 * Clear the DONE bit separately, convert NVRAM data to big-endian,
1493 	 * set the NVRAM address to write, and issue the write command
1494 	 */
1495 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1496 	memcpy(&val32, val, 4);
1497 	val32 = htobe32(val32);
1498 	REG_WR(sc, BCE_NVM_WRITE, val32);
1499 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1500 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1501 
1502 	/* Wait for completion. */
1503 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1504 		DELAY(5);
1505 
1506 		if (REG_RD(sc, BCE_NVM_COMMAND) & BCE_NVM_COMMAND_DONE)
1507 			break;
1508 	}
1509 	if (j >= NVRAM_TIMEOUT_COUNT) {
1510 		if_printf(&sc->arpcom.ac_if,
1511 			  "Timeout error writing NVRAM at offset 0x%08X\n",
1512 			  offset);
1513 		return EBUSY;
1514 	}
1515 	return 0;
1516 }
1517 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1518 
1519 
1520 /****************************************************************************/
1521 /* Initialize NVRAM access.                                                 */
1522 /*                                                                          */
1523 /* Identify the NVRAM device in use and prepare the NVRAM interface to      */
1524 /* access that device.                                                      */
1525 /*                                                                          */
1526 /* Returns:                                                                 */
1527 /*   0 on success, positive value on failure.                               */
1528 /****************************************************************************/
1529 static int
1530 bce_init_nvram(struct bce_softc *sc)
1531 {
1532 	uint32_t val;
1533 	int j, entry_count, rc = 0;
1534 	const struct flash_spec *flash;
1535 
1536 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__);
1537 
1538 	/* Determine the selected interface. */
1539 	val = REG_RD(sc, BCE_NVM_CFG1);
1540 
1541 	entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1542 
1543 	/*
1544 	 * Flash reconfiguration is required to support additional
1545 	 * NVRAM devices not directly supported in hardware.
1546 	 * Check if the flash interface was reconfigured
1547 	 * by the bootcode.
1548 	 */
1549 
1550 	if (val & 0x40000000) {
1551 		/* Flash interface reconfigured by bootcode. */
1552 
1553 		DBPRINT(sc, BCE_INFO_LOAD,
1554 			"%s(): Flash WAS reconfigured.\n", __func__);
1555 
1556 		for (j = 0, flash = flash_table; j < entry_count;
1557 		     j++, flash++) {
1558 			if ((val & FLASH_BACKUP_STRAP_MASK) ==
1559 			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1560 				sc->bce_flash_info = flash;
1561 				break;
1562 			}
1563 		}
1564 	} else {
1565 		/* Flash interface not yet reconfigured. */
1566 		uint32_t mask;
1567 
1568 		DBPRINT(sc, BCE_INFO_LOAD,
1569 			"%s(): Flash was NOT reconfigured.\n", __func__);
1570 
1571 		if (val & (1 << 23))
1572 			mask = FLASH_BACKUP_STRAP_MASK;
1573 		else
1574 			mask = FLASH_STRAP_MASK;
1575 
1576 		/* Look for the matching NVRAM device configuration data. */
1577 		for (j = 0, flash = flash_table; j < entry_count;
1578 		     j++, flash++) {
1579 			/* Check if the device matches any of the known devices. */
1580 			if ((val & mask) == (flash->strapping & mask)) {
1581 				/* Found a device match. */
1582 				sc->bce_flash_info = flash;
1583 
1584 				/* Request access to the flash interface. */
1585 				rc = bce_acquire_nvram_lock(sc);
1586 				if (rc != 0)
1587 					return rc;
1588 
1589 				/* Reconfigure the flash interface. */
1590 				bce_enable_nvram_access(sc);
1591 				REG_WR(sc, BCE_NVM_CFG1, flash->config1);
1592 				REG_WR(sc, BCE_NVM_CFG2, flash->config2);
1593 				REG_WR(sc, BCE_NVM_CFG3, flash->config3);
1594 				REG_WR(sc, BCE_NVM_WRITE1, flash->write1);
1595 				bce_disable_nvram_access(sc);
1596 				bce_release_nvram_lock(sc);
1597 				break;
1598 			}
1599 		}
1600 	}
1601 
1602 	/* Check if a matching device was found. */
1603 	if (j == entry_count) {
1604 		sc->bce_flash_info = NULL;
1605 		if_printf(&sc->arpcom.ac_if, "Unknown Flash NVRAM found!\n");
1606 		rc = ENODEV;
1607 	}
1608 
1609 	/* Write the flash config data to the shared memory interface. */
1610 	val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_SHARED_HW_CFG_CONFIG2) &
1611 	      BCE_SHARED_HW_CFG2_NVM_SIZE_MASK;
1612 	if (val)
1613 		sc->bce_flash_size = val;
1614 	else
1615 		sc->bce_flash_size = sc->bce_flash_info->total_size;
1616 
1617 	DBPRINT(sc, BCE_INFO_LOAD, "%s() flash->total_size = 0x%08X\n",
1618 		__func__, sc->bce_flash_info->total_size);
1619 
1620 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__);
1621 
1622 	return rc;
1623 }
1624 
1625 
1626 /****************************************************************************/
1627 /* Read an arbitrary range of data from NVRAM.                              */
1628 /*                                                                          */
1629 /* Prepares the NVRAM interface for access and reads the requested data     */
1630 /* into the supplied buffer.                                                */
1631 /*                                                                          */
1632 /* Returns:                                                                 */
1633 /*   0 on success and the data read, positive value on failure.             */
1634 /****************************************************************************/
1635 static int
1636 bce_nvram_read(struct bce_softc *sc, uint32_t offset, uint8_t *ret_buf,
1637 	       int buf_size)
1638 {
1639 	uint32_t cmd_flags, offset32, len32, extra;
1640 	int rc = 0;
1641 
1642 	if (buf_size == 0)
1643 		return 0;
1644 
1645 	/* Request access to the flash interface. */
1646 	rc = bce_acquire_nvram_lock(sc);
1647 	if (rc != 0)
1648 		return rc;
1649 
1650 	/* Enable access to flash interface */
1651 	bce_enable_nvram_access(sc);
1652 
1653 	len32 = buf_size;
1654 	offset32 = offset;
1655 	extra = 0;
1656 
1657 	cmd_flags = 0;
1658 
1659 	/* XXX should we release nvram lock if read_dword() fails? */
1660 	if (offset32 & 3) {
1661 		uint8_t buf[4];
1662 		uint32_t pre_len;
1663 
1664 		offset32 &= ~3;
1665 		pre_len = 4 - (offset & 3);
1666 
1667 		if (pre_len >= len32) {
1668 			pre_len = len32;
1669 			cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST;
1670 		} else {
1671 			cmd_flags = BCE_NVM_COMMAND_FIRST;
1672 		}
1673 
1674 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1675 		if (rc)
1676 			return rc;
1677 
1678 		memcpy(ret_buf, buf + (offset & 3), pre_len);
1679 
1680 		offset32 += 4;
1681 		ret_buf += pre_len;
1682 		len32 -= pre_len;
1683 	}
1684 
1685 	if (len32 & 3) {
1686 		extra = 4 - (len32 & 3);
1687 		len32 = (len32 + 4) & ~3;
1688 	}
1689 
1690 	if (len32 == 4) {
1691 		uint8_t buf[4];
1692 
1693 		if (cmd_flags)
1694 			cmd_flags = BCE_NVM_COMMAND_LAST;
1695 		else
1696 			cmd_flags = BCE_NVM_COMMAND_FIRST |
1697 				    BCE_NVM_COMMAND_LAST;
1698 
1699 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1700 
1701 		memcpy(ret_buf, buf, 4 - extra);
1702 	} else if (len32 > 0) {
1703 		uint8_t buf[4];
1704 
1705 		/* Read the first word. */
1706 		if (cmd_flags)
1707 			cmd_flags = 0;
1708 		else
1709 			cmd_flags = BCE_NVM_COMMAND_FIRST;
1710 
1711 		rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1712 
1713 		/* Advance to the next dword. */
1714 		offset32 += 4;
1715 		ret_buf += 4;
1716 		len32 -= 4;
1717 
1718 		while (len32 > 4 && rc == 0) {
1719 			rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0);
1720 
1721 			/* Advance to the next dword. */
1722 			offset32 += 4;
1723 			ret_buf += 4;
1724 			len32 -= 4;
1725 		}
1726 
1727 		if (rc)
1728 			return rc;
1729 
1730 		cmd_flags = BCE_NVM_COMMAND_LAST;
1731 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1732 
1733 		memcpy(ret_buf, buf, 4 - extra);
1734 	}
1735 
1736 	/* Disable access to flash interface and release the lock. */
1737 	bce_disable_nvram_access(sc);
1738 	bce_release_nvram_lock(sc);
1739 
1740 	return rc;
1741 }
1742 
1743 
1744 #ifdef BCE_NVRAM_WRITE_SUPPORT
1745 /****************************************************************************/
1746 /* Write an arbitrary range of data from NVRAM.                             */
1747 /*                                                                          */
1748 /* Prepares the NVRAM interface for write access and writes the requested   */
1749 /* data from the supplied buffer.  The caller is responsible for            */
1750 /* calculating any appropriate CRCs.                                        */
1751 /*                                                                          */
1752 /* Returns:                                                                 */
1753 /*   0 on success, positive value on failure.                               */
1754 /****************************************************************************/
1755 static int
1756 bce_nvram_write(struct bce_softc *sc, uint32_t offset, uint8_t *data_buf,
1757 		int buf_size)
1758 {
1759 	uint32_t written, offset32, len32;
1760 	uint8_t *buf, start[4], end[4];
1761 	int rc = 0;
1762 	int align_start, align_end;
1763 
1764 	buf = data_buf;
1765 	offset32 = offset;
1766 	len32 = buf_size;
1767 	align_end = 0;
1768 	align_start = (offset32 & 3);
1769 
1770 	if (align_start) {
1771 		offset32 &= ~3;
1772 		len32 += align_start;
1773 		rc = bce_nvram_read(sc, offset32, start, 4);
1774 		if (rc)
1775 			return rc;
1776 	}
1777 
1778 	if (len32 & 3) {
1779 	       	if (len32 > 4 || !align_start) {
1780 			align_end = 4 - (len32 & 3);
1781 			len32 += align_end;
1782 			rc = bce_nvram_read(sc, offset32 + len32 - 4, end, 4);
1783 			if (rc)
1784 				return rc;
1785 		}
1786 	}
1787 
1788 	if (align_start || align_end) {
1789 		buf = kmalloc(len32, M_DEVBUF, M_NOWAIT);
1790 		if (buf == NULL)
1791 			return ENOMEM;
1792 		if (align_start)
1793 			memcpy(buf, start, 4);
1794 		if (align_end)
1795 			memcpy(buf + len32 - 4, end, 4);
1796 		memcpy(buf + align_start, data_buf, buf_size);
1797 	}
1798 
1799 	written = 0;
1800 	while (written < len32 && rc == 0) {
1801 		uint32_t page_start, page_end, data_start, data_end;
1802 		uint32_t addr, cmd_flags;
1803 		int i;
1804 		uint8_t flash_buffer[264];
1805 
1806 		/* Find the page_start addr */
1807 		page_start = offset32 + written;
1808 		page_start -= (page_start % sc->bce_flash_info->page_size);
1809 		/* Find the page_end addr */
1810 		page_end = page_start + sc->bce_flash_info->page_size;
1811 		/* Find the data_start addr */
1812 		data_start = (written == 0) ? offset32 : page_start;
1813 		/* Find the data_end addr */
1814 		data_end = (page_end > offset32 + len32) ? (offset32 + len32)
1815 							 : page_end;
1816 
1817 		/* Request access to the flash interface. */
1818 		rc = bce_acquire_nvram_lock(sc);
1819 		if (rc != 0)
1820 			goto nvram_write_end;
1821 
1822 		/* Enable access to flash interface */
1823 		bce_enable_nvram_access(sc);
1824 
1825 		cmd_flags = BCE_NVM_COMMAND_FIRST;
1826 		if (sc->bce_flash_info->buffered == 0) {
1827 			int j;
1828 
1829 			/*
1830 			 * Read the whole page into the buffer
1831 			 * (non-buffer flash only)
1832 			 */
1833 			for (j = 0; j < sc->bce_flash_info->page_size; j += 4) {
1834 				if (j == (sc->bce_flash_info->page_size - 4))
1835 					cmd_flags |= BCE_NVM_COMMAND_LAST;
1836 
1837 				rc = bce_nvram_read_dword(sc, page_start + j,
1838 							  &flash_buffer[j],
1839 							  cmd_flags);
1840 				if (rc)
1841 					goto nvram_write_end;
1842 
1843 				cmd_flags = 0;
1844 			}
1845 		}
1846 
1847 		/* Enable writes to flash interface (unlock write-protect) */
1848 		rc = bce_enable_nvram_write(sc);
1849 		if (rc != 0)
1850 			goto nvram_write_end;
1851 
1852 		/* Erase the page */
1853 		rc = bce_nvram_erase_page(sc, page_start);
1854 		if (rc != 0)
1855 			goto nvram_write_end;
1856 
1857 		/* Re-enable the write again for the actual write */
1858 		bce_enable_nvram_write(sc);
1859 
1860 		/* Loop to write back the buffer data from page_start to
1861 		 * data_start */
1862 		i = 0;
1863 		if (sc->bce_flash_info->buffered == 0) {
1864 			for (addr = page_start; addr < data_start;
1865 			     addr += 4, i += 4) {
1866 				rc = bce_nvram_write_dword(sc, addr,
1867 							   &flash_buffer[i],
1868 							   cmd_flags);
1869 				if (rc != 0)
1870 					goto nvram_write_end;
1871 
1872 				cmd_flags = 0;
1873 			}
1874 		}
1875 
1876 		/* Loop to write the new data from data_start to data_end */
1877 		for (addr = data_start; addr < data_end; addr += 4, i++) {
1878 			if (addr == page_end - 4 ||
1879 			    (sc->bce_flash_info->buffered &&
1880 			     addr == data_end - 4))
1881 				cmd_flags |= BCE_NVM_COMMAND_LAST;
1882 
1883 			rc = bce_nvram_write_dword(sc, addr, buf, cmd_flags);
1884 			if (rc != 0)
1885 				goto nvram_write_end;
1886 
1887 			cmd_flags = 0;
1888 			buf += 4;
1889 		}
1890 
1891 		/* Loop to write back the buffer data from data_end
1892 		 * to page_end */
1893 		if (sc->bce_flash_info->buffered == 0) {
1894 			for (addr = data_end; addr < page_end;
1895 			     addr += 4, i += 4) {
1896 				if (addr == page_end-4)
1897 					cmd_flags = BCE_NVM_COMMAND_LAST;
1898 
1899 				rc = bce_nvram_write_dword(sc, addr,
1900 					&flash_buffer[i], cmd_flags);
1901 				if (rc != 0)
1902 					goto nvram_write_end;
1903 
1904 				cmd_flags = 0;
1905 			}
1906 		}
1907 
1908 		/* Disable writes to flash interface (lock write-protect) */
1909 		bce_disable_nvram_write(sc);
1910 
1911 		/* Disable access to flash interface */
1912 		bce_disable_nvram_access(sc);
1913 		bce_release_nvram_lock(sc);
1914 
1915 		/* Increment written */
1916 		written += data_end - data_start;
1917 	}
1918 
1919 nvram_write_end:
1920 	if (align_start || align_end)
1921 		kfree(buf, M_DEVBUF);
1922 	return rc;
1923 }
1924 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1925 
1926 
1927 /****************************************************************************/
1928 /* Verifies that NVRAM is accessible and contains valid data.               */
1929 /*                                                                          */
1930 /* Reads the configuration data from NVRAM and verifies that the CRC is     */
1931 /* correct.                                                                 */
1932 /*                                                                          */
1933 /* Returns:                                                                 */
1934 /*   0 on success, positive value on failure.                               */
1935 /****************************************************************************/
1936 static int
1937 bce_nvram_test(struct bce_softc *sc)
1938 {
1939 	uint32_t buf[BCE_NVRAM_SIZE / 4];
1940 	uint32_t magic, csum;
1941 	uint8_t *data = (uint8_t *)buf;
1942 	int rc = 0;
1943 
1944 	/*
1945 	 * Check that the device NVRAM is valid by reading
1946 	 * the magic value at offset 0.
1947 	 */
1948 	rc = bce_nvram_read(sc, 0, data, 4);
1949 	if (rc != 0)
1950 		return rc;
1951 
1952 	magic = be32toh(buf[0]);
1953 	if (magic != BCE_NVRAM_MAGIC) {
1954 		if_printf(&sc->arpcom.ac_if,
1955 			  "Invalid NVRAM magic value! Expected: 0x%08X, "
1956 			  "Found: 0x%08X\n", BCE_NVRAM_MAGIC, magic);
1957 		return ENODEV;
1958 	}
1959 
1960 	/*
1961 	 * Verify that the device NVRAM includes valid
1962 	 * configuration data.
1963 	 */
1964 	rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE);
1965 	if (rc != 0)
1966 		return rc;
1967 
1968 	csum = ether_crc32_le(data, 0x100);
1969 	if (csum != BCE_CRC32_RESIDUAL) {
1970 		if_printf(&sc->arpcom.ac_if,
1971 			  "Invalid Manufacturing Information NVRAM CRC! "
1972 			  "Expected: 0x%08X, Found: 0x%08X\n",
1973 			  BCE_CRC32_RESIDUAL, csum);
1974 		return ENODEV;
1975 	}
1976 
1977 	csum = ether_crc32_le(data + 0x100, 0x100);
1978 	if (csum != BCE_CRC32_RESIDUAL) {
1979 		if_printf(&sc->arpcom.ac_if,
1980 			  "Invalid Feature Configuration Information "
1981 			  "NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
1982 			  BCE_CRC32_RESIDUAL, csum);
1983 		rc = ENODEV;
1984 	}
1985 	return rc;
1986 }
1987 
1988 
1989 /****************************************************************************/
1990 /* Free any DMA memory owned by the driver.                                 */
1991 /*                                                                          */
1992 /* Scans through each data structre that requires DMA memory and frees      */
1993 /* the memory if allocated.                                                 */
1994 /*                                                                          */
1995 /* Returns:                                                                 */
1996 /*   Nothing.                                                               */
1997 /****************************************************************************/
1998 static void
1999 bce_dma_free(struct bce_softc *sc)
2000 {
2001 	int i;
2002 
2003 	/* Destroy the status block. */
2004 	if (sc->status_tag != NULL) {
2005 		if (sc->status_block != NULL) {
2006 			bus_dmamap_unload(sc->status_tag, sc->status_map);
2007 			bus_dmamem_free(sc->status_tag, sc->status_block,
2008 					sc->status_map);
2009 		}
2010 		bus_dma_tag_destroy(sc->status_tag);
2011 	}
2012 
2013 
2014 	/* Destroy the statistics block. */
2015 	if (sc->stats_tag != NULL) {
2016 		if (sc->stats_block != NULL) {
2017 			bus_dmamap_unload(sc->stats_tag, sc->stats_map);
2018 			bus_dmamem_free(sc->stats_tag, sc->stats_block,
2019 					sc->stats_map);
2020 		}
2021 		bus_dma_tag_destroy(sc->stats_tag);
2022 	}
2023 
2024 	/* Destroy the TX buffer descriptor DMA stuffs. */
2025 	if (sc->tx_bd_chain_tag != NULL) {
2026 		for (i = 0; i < TX_PAGES; i++) {
2027 			if (sc->tx_bd_chain[i] != NULL) {
2028 				bus_dmamap_unload(sc->tx_bd_chain_tag,
2029 						  sc->tx_bd_chain_map[i]);
2030 				bus_dmamem_free(sc->tx_bd_chain_tag,
2031 						sc->tx_bd_chain[i],
2032 						sc->tx_bd_chain_map[i]);
2033 			}
2034 		}
2035 		bus_dma_tag_destroy(sc->tx_bd_chain_tag);
2036 	}
2037 
2038 	/* Destroy the RX buffer descriptor DMA stuffs. */
2039 	if (sc->rx_bd_chain_tag != NULL) {
2040 		for (i = 0; i < RX_PAGES; i++) {
2041 			if (sc->rx_bd_chain[i] != NULL) {
2042 				bus_dmamap_unload(sc->rx_bd_chain_tag,
2043 						  sc->rx_bd_chain_map[i]);
2044 				bus_dmamem_free(sc->rx_bd_chain_tag,
2045 						sc->rx_bd_chain[i],
2046 						sc->rx_bd_chain_map[i]);
2047 			}
2048 		}
2049 		bus_dma_tag_destroy(sc->rx_bd_chain_tag);
2050 	}
2051 
2052 	/* Destroy the TX mbuf DMA stuffs. */
2053 	if (sc->tx_mbuf_tag != NULL) {
2054 		for (i = 0; i < TOTAL_TX_BD; i++) {
2055 			/* Must have been unloaded in bce_stop() */
2056 			KKASSERT(sc->tx_mbuf_ptr[i] == NULL);
2057 			bus_dmamap_destroy(sc->tx_mbuf_tag,
2058 					   sc->tx_mbuf_map[i]);
2059 		}
2060 		bus_dma_tag_destroy(sc->tx_mbuf_tag);
2061 	}
2062 
2063 	/* Destroy the RX mbuf DMA stuffs. */
2064 	if (sc->rx_mbuf_tag != NULL) {
2065 		for (i = 0; i < TOTAL_RX_BD; i++) {
2066 			/* Must have been unloaded in bce_stop() */
2067 			KKASSERT(sc->rx_mbuf_ptr[i] == NULL);
2068 			bus_dmamap_destroy(sc->rx_mbuf_tag,
2069 					   sc->rx_mbuf_map[i]);
2070 		}
2071 		bus_dma_tag_destroy(sc->rx_mbuf_tag);
2072 	}
2073 
2074 	/* Destroy the parent tag */
2075 	if (sc->parent_tag != NULL)
2076 		bus_dma_tag_destroy(sc->parent_tag);
2077 }
2078 
2079 
2080 /****************************************************************************/
2081 /* Get DMA memory from the OS.                                              */
2082 /*                                                                          */
2083 /* Validates that the OS has provided DMA buffers in response to a          */
2084 /* bus_dmamap_load() call and saves the physical address of those buffers.  */
2085 /* When the callback is used the OS will return 0 for the mapping function  */
2086 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any  */
2087 /* failures back to the caller.                                             */
2088 /*                                                                          */
2089 /* Returns:                                                                 */
2090 /*   Nothing.                                                               */
2091 /****************************************************************************/
2092 static void
2093 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2094 {
2095 	bus_addr_t *busaddr = arg;
2096 
2097 	/*
2098 	 * Simulate a mapping failure.
2099 	 * XXX not correct.
2100 	 */
2101 	DBRUNIF(DB_RANDOMTRUE(bce_debug_dma_map_addr_failure),
2102 		kprintf("bce: %s(%d): Simulating DMA mapping error.\n",
2103 			__FILE__, __LINE__);
2104 		error = ENOMEM);
2105 
2106 	/* Check for an error and signal the caller that an error occurred. */
2107 	if (error)
2108 		return;
2109 
2110 	KASSERT(nseg == 1, ("only one segment is allowed\n"));
2111 	*busaddr = segs->ds_addr;
2112 }
2113 
2114 
2115 static void
2116 bce_dma_map_mbuf(void *arg, bus_dma_segment_t *segs, int nsegs,
2117 		 bus_size_t mapsz __unused, int error)
2118 {
2119 	struct bce_dmamap_arg *ctx = arg;
2120 	int i;
2121 
2122 	if (error)
2123 		return;
2124 
2125 	if (nsegs > ctx->bce_maxsegs) {
2126 		ctx->bce_maxsegs = 0;
2127 		return;
2128 	}
2129 
2130 	ctx->bce_maxsegs = nsegs;
2131 	for (i = 0; i < nsegs; ++i)
2132 		ctx->bce_segs[i] = segs[i];
2133 }
2134 
2135 
2136 /****************************************************************************/
2137 /* Allocate any DMA memory needed by the driver.                            */
2138 /*                                                                          */
2139 /* Allocates DMA memory needed for the various global structures needed by  */
2140 /* hardware.                                                                */
2141 /*                                                                          */
2142 /* Returns:                                                                 */
2143 /*   0 for success, positive value for failure.                             */
2144 /****************************************************************************/
2145 static int
2146 bce_dma_alloc(struct bce_softc *sc)
2147 {
2148 	struct ifnet *ifp = &sc->arpcom.ac_if;
2149 	int i, j, rc = 0;
2150 	bus_addr_t busaddr;
2151 
2152 	/*
2153 	 * Allocate the parent bus DMA tag appropriate for PCI.
2154 	 */
2155 	rc = bus_dma_tag_create(NULL, 1, BCE_DMA_BOUNDARY,
2156 				sc->max_bus_addr, BUS_SPACE_MAXADDR,
2157 				NULL, NULL,
2158 				MAXBSIZE, BUS_SPACE_UNRESTRICTED,
2159 				BUS_SPACE_MAXSIZE_32BIT,
2160 				0, &sc->parent_tag);
2161 	if (rc != 0) {
2162 		if_printf(ifp, "Could not allocate parent DMA tag!\n");
2163 		return rc;
2164 	}
2165 
2166 	/*
2167 	 * Create a DMA tag for the status block, allocate and clear the
2168 	 * memory, map the memory into DMA space, and fetch the physical
2169 	 * address of the block.
2170 	 */
2171 	rc = bus_dma_tag_create(sc->parent_tag,
2172 				BCE_DMA_ALIGN, BCE_DMA_BOUNDARY,
2173 				sc->max_bus_addr, BUS_SPACE_MAXADDR,
2174 				NULL, NULL,
2175 				BCE_STATUS_BLK_SZ, 1, BCE_STATUS_BLK_SZ,
2176 				0, &sc->status_tag);
2177 	if (rc != 0) {
2178 		if_printf(ifp, "Could not allocate status block DMA tag!\n");
2179 		return rc;
2180 	}
2181 
2182 	rc = bus_dmamem_alloc(sc->status_tag, (void **)&sc->status_block,
2183 			      BUS_DMA_WAITOK | BUS_DMA_ZERO,
2184 			      &sc->status_map);
2185 	if (rc != 0) {
2186 		if_printf(ifp, "Could not allocate status block DMA memory!\n");
2187 		return rc;
2188 	}
2189 
2190 	rc = bus_dmamap_load(sc->status_tag, sc->status_map,
2191 			     sc->status_block, BCE_STATUS_BLK_SZ,
2192 			     bce_dma_map_addr, &busaddr, BUS_DMA_WAITOK);
2193 	if (rc != 0) {
2194 		if_printf(ifp, "Could not map status block DMA memory!\n");
2195 		bus_dmamem_free(sc->status_tag, sc->status_block,
2196 				sc->status_map);
2197 		sc->status_block = NULL;
2198 		return rc;
2199 	}
2200 
2201 	sc->status_block_paddr = busaddr;
2202 	/* DRC - Fix for 64 bit addresses. */
2203 	DBPRINT(sc, BCE_INFO, "status_block_paddr = 0x%08X\n",
2204 		(uint32_t)sc->status_block_paddr);
2205 
2206 	/*
2207 	 * Create a DMA tag for the statistics block, allocate and clear the
2208 	 * memory, map the memory into DMA space, and fetch the physical
2209 	 * address of the block.
2210 	 */
2211 	rc = bus_dma_tag_create(sc->parent_tag,
2212 				BCE_DMA_ALIGN, BCE_DMA_BOUNDARY,
2213 				sc->max_bus_addr, BUS_SPACE_MAXADDR,
2214 				NULL, NULL,
2215 				BCE_STATS_BLK_SZ, 1, BCE_STATS_BLK_SZ,
2216 				0, &sc->stats_tag);
2217 	if (rc != 0) {
2218 		if_printf(ifp, "Could not allocate "
2219 			  "statistics block DMA tag!\n");
2220 		return rc;
2221 	}
2222 
2223 	rc = bus_dmamem_alloc(sc->stats_tag, (void **)&sc->stats_block,
2224 			      BUS_DMA_WAITOK | BUS_DMA_ZERO,
2225 			      &sc->stats_map);
2226 	if (rc != 0) {
2227 		if_printf(ifp, "Could not allocate "
2228 			  "statistics block DMA memory!\n");
2229 		return rc;
2230 	}
2231 
2232 	rc = bus_dmamap_load(sc->stats_tag, sc->stats_map,
2233 			     sc->stats_block, BCE_STATS_BLK_SZ,
2234 			     bce_dma_map_addr, &busaddr, BUS_DMA_WAITOK);
2235 	if (rc != 0) {
2236 		if_printf(ifp, "Could not map statistics block DMA memory!\n");
2237 		bus_dmamem_free(sc->stats_tag, sc->stats_block, sc->stats_map);
2238 		sc->stats_block = NULL;
2239 		return rc;
2240 	}
2241 
2242 	sc->stats_block_paddr = busaddr;
2243 	/* DRC - Fix for 64 bit address. */
2244 	DBPRINT(sc, BCE_INFO, "stats_block_paddr = 0x%08X\n",
2245 		(uint32_t)sc->stats_block_paddr);
2246 
2247 	/*
2248 	 * Create a DMA tag for the TX buffer descriptor chain,
2249 	 * allocate and clear the  memory, and fetch the
2250 	 * physical address of the block.
2251 	 */
2252 	rc = bus_dma_tag_create(sc->parent_tag,
2253 				BCM_PAGE_SIZE, BCE_DMA_BOUNDARY,
2254 				sc->max_bus_addr, BUS_SPACE_MAXADDR,
2255 				NULL, NULL,
2256 				BCE_TX_CHAIN_PAGE_SZ, 1, BCE_TX_CHAIN_PAGE_SZ,
2257 				0, &sc->tx_bd_chain_tag);
2258 	if (rc != 0) {
2259 		if_printf(ifp, "Could not allocate "
2260 			  "TX descriptor chain DMA tag!\n");
2261 		return rc;
2262 	}
2263 
2264 	for (i = 0; i < TX_PAGES; i++) {
2265 		rc = bus_dmamem_alloc(sc->tx_bd_chain_tag,
2266 				      (void **)&sc->tx_bd_chain[i],
2267 				      BUS_DMA_WAITOK, &sc->tx_bd_chain_map[i]);
2268 		if (rc != 0) {
2269 			if_printf(ifp, "Could not allocate %dth TX descriptor "
2270 				  "chain DMA memory!\n", i);
2271 			return rc;
2272 		}
2273 
2274 		rc = bus_dmamap_load(sc->tx_bd_chain_tag,
2275 				     sc->tx_bd_chain_map[i],
2276 				     sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ,
2277 				     bce_dma_map_addr, &busaddr,
2278 				     BUS_DMA_WAITOK);
2279 		if (rc != 0) {
2280 			if_printf(ifp, "Could not map %dth TX descriptor "
2281 				  "chain DMA memory!\n", i);
2282 			bus_dmamem_free(sc->tx_bd_chain_tag,
2283 					sc->tx_bd_chain[i],
2284 					sc->tx_bd_chain_map[i]);
2285 			sc->tx_bd_chain[i] = NULL;
2286 			return rc;
2287 		}
2288 
2289 		sc->tx_bd_chain_paddr[i] = busaddr;
2290 		/* DRC - Fix for 64 bit systems. */
2291 		DBPRINT(sc, BCE_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n",
2292 			i, (uint32_t)sc->tx_bd_chain_paddr[i]);
2293 	}
2294 
2295 	/* Create a DMA tag for TX mbufs. */
2296 	rc = bus_dma_tag_create(sc->parent_tag, 1, BCE_DMA_BOUNDARY,
2297 				sc->max_bus_addr, BUS_SPACE_MAXADDR,
2298 				NULL, NULL,
2299 				MCLBYTES * BCE_MAX_SEGMENTS,
2300 				BCE_MAX_SEGMENTS, MCLBYTES,
2301 				0, &sc->tx_mbuf_tag);
2302 	if (rc != 0) {
2303 		if_printf(ifp, "Could not allocate TX mbuf DMA tag!\n");
2304 		return rc;
2305 	}
2306 
2307 	/* Create DMA maps for the TX mbufs clusters. */
2308 	for (i = 0; i < TOTAL_TX_BD; i++) {
2309 		rc = bus_dmamap_create(sc->tx_mbuf_tag, BUS_DMA_WAITOK,
2310 				       &sc->tx_mbuf_map[i]);
2311 		if (rc != 0) {
2312 			for (j = 0; j < i; ++j) {
2313 				bus_dmamap_destroy(sc->tx_mbuf_tag,
2314 						   sc->tx_mbuf_map[i]);
2315 			}
2316 			bus_dma_tag_destroy(sc->tx_mbuf_tag);
2317 			sc->tx_mbuf_tag = NULL;
2318 
2319 			if_printf(ifp, "Unable to create "
2320 				  "%dth TX mbuf DMA map!\n", i);
2321 			return rc;
2322 		}
2323 	}
2324 
2325 	/*
2326 	 * Create a DMA tag for the RX buffer descriptor chain,
2327 	 * allocate and clear the  memory, and fetch the physical
2328 	 * address of the blocks.
2329 	 */
2330 	rc = bus_dma_tag_create(sc->parent_tag,
2331 				BCM_PAGE_SIZE, BCE_DMA_BOUNDARY,
2332 				sc->max_bus_addr, BUS_SPACE_MAXADDR,
2333 				NULL, NULL,
2334 				BCE_RX_CHAIN_PAGE_SZ, 1, BCE_RX_CHAIN_PAGE_SZ,
2335 				0, &sc->rx_bd_chain_tag);
2336 	if (rc != 0) {
2337 		if_printf(ifp, "Could not allocate "
2338 			  "RX descriptor chain DMA tag!\n");
2339 		return rc;
2340 	}
2341 
2342 	for (i = 0; i < RX_PAGES; i++) {
2343 		rc = bus_dmamem_alloc(sc->rx_bd_chain_tag,
2344 				      (void **)&sc->rx_bd_chain[i],
2345 				      BUS_DMA_WAITOK | BUS_DMA_ZERO,
2346 				      &sc->rx_bd_chain_map[i]);
2347 		if (rc != 0) {
2348 			if_printf(ifp, "Could not allocate %dth RX descriptor "
2349 				  "chain DMA memory!\n", i);
2350 			return rc;
2351 		}
2352 
2353 		rc = bus_dmamap_load(sc->rx_bd_chain_tag,
2354 				     sc->rx_bd_chain_map[i],
2355 				     sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ,
2356 				     bce_dma_map_addr, &busaddr,
2357 				     BUS_DMA_WAITOK);
2358 		if (rc != 0) {
2359 			if_printf(ifp, "Could not map %dth RX descriptor "
2360 				  "chain DMA memory!\n", i);
2361 			bus_dmamem_free(sc->rx_bd_chain_tag,
2362 					sc->rx_bd_chain[i],
2363 					sc->rx_bd_chain_map[i]);
2364 			sc->rx_bd_chain[i] = NULL;
2365 			return rc;
2366 		}
2367 
2368 		sc->rx_bd_chain_paddr[i] = busaddr;
2369 		/* DRC - Fix for 64 bit systems. */
2370 		DBPRINT(sc, BCE_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n",
2371 			i, (uint32_t)sc->rx_bd_chain_paddr[i]);
2372 	}
2373 
2374 	/* Create a DMA tag for RX mbufs. */
2375 	rc = bus_dma_tag_create(sc->parent_tag, 1, BCE_DMA_BOUNDARY,
2376 				sc->max_bus_addr, BUS_SPACE_MAXADDR,
2377 				NULL, NULL,
2378 				MCLBYTES, 1/* BCE_MAX_SEGMENTS */, MCLBYTES,
2379 				0, &sc->rx_mbuf_tag);
2380 	if (rc != 0) {
2381 		if_printf(ifp, "Could not allocate RX mbuf DMA tag!\n");
2382 		return rc;
2383 	}
2384 
2385 	/* Create DMA maps for the RX mbuf clusters. */
2386 	for (i = 0; i < TOTAL_RX_BD; i++) {
2387 		rc = bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_WAITOK,
2388 				       &sc->rx_mbuf_map[i]);
2389 		if (rc != 0) {
2390 			for (j = 0; j < i; ++j) {
2391 				bus_dmamap_destroy(sc->rx_mbuf_tag,
2392 						   sc->rx_mbuf_map[j]);
2393 			}
2394 			bus_dma_tag_destroy(sc->rx_mbuf_tag);
2395 			sc->rx_mbuf_tag = NULL;
2396 
2397 			if_printf(ifp, "Unable to create "
2398 				  "%dth RX mbuf DMA map!\n", i);
2399 			return rc;
2400 		}
2401 	}
2402 	return 0;
2403 }
2404 
2405 
2406 /****************************************************************************/
2407 /* Firmware synchronization.                                                */
2408 /*                                                                          */
2409 /* Before performing certain events such as a chip reset, synchronize with  */
2410 /* the firmware first.                                                      */
2411 /*                                                                          */
2412 /* Returns:                                                                 */
2413 /*   0 for success, positive value for failure.                             */
2414 /****************************************************************************/
2415 static int
2416 bce_fw_sync(struct bce_softc *sc, uint32_t msg_data)
2417 {
2418 	int i, rc = 0;
2419 	uint32_t val;
2420 
2421 	/* Don't waste any time if we've timed out before. */
2422 	if (sc->bce_fw_timed_out)
2423 		return EBUSY;
2424 
2425 	/* Increment the message sequence number. */
2426 	sc->bce_fw_wr_seq++;
2427 	msg_data |= sc->bce_fw_wr_seq;
2428 
2429  	DBPRINT(sc, BCE_VERBOSE, "bce_fw_sync(): msg_data = 0x%08X\n", msg_data);
2430 
2431 	/* Send the message to the bootcode driver mailbox. */
2432 	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
2433 
2434 	/* Wait for the bootcode to acknowledge the message. */
2435 	for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2436 		/* Check for a response in the bootcode firmware mailbox. */
2437 		val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_FW_MB);
2438 		if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ))
2439 			break;
2440 		DELAY(1000);
2441 	}
2442 
2443 	/* If we've timed out, tell the bootcode that we've stopped waiting. */
2444 	if ((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ) &&
2445 	    (msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0) {
2446 		if_printf(&sc->arpcom.ac_if,
2447 			  "Firmware synchronization timeout! "
2448 			  "msg_data = 0x%08X\n", msg_data);
2449 
2450 		msg_data &= ~BCE_DRV_MSG_CODE;
2451 		msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT;
2452 
2453 		REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
2454 
2455 		sc->bce_fw_timed_out = 1;
2456 		rc = EBUSY;
2457 	}
2458 	return rc;
2459 }
2460 
2461 
2462 /****************************************************************************/
2463 /* Load Receive Virtual 2 Physical (RV2P) processor firmware.               */
2464 /*                                                                          */
2465 /* Returns:                                                                 */
2466 /*   Nothing.                                                               */
2467 /****************************************************************************/
2468 static void
2469 bce_load_rv2p_fw(struct bce_softc *sc, uint32_t *rv2p_code,
2470 		 uint32_t rv2p_code_len, uint32_t rv2p_proc)
2471 {
2472 	int i;
2473 	uint32_t val;
2474 
2475 	for (i = 0; i < rv2p_code_len; i += 8) {
2476 		REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code);
2477 		rv2p_code++;
2478 		REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code);
2479 		rv2p_code++;
2480 
2481 		if (rv2p_proc == RV2P_PROC1) {
2482 			val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR;
2483 			REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
2484 		} else {
2485 			val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR;
2486 			REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
2487 		}
2488 	}
2489 
2490 	/* Reset the processor, un-stall is done later. */
2491 	if (rv2p_proc == RV2P_PROC1)
2492 		REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET);
2493 	else
2494 		REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET);
2495 }
2496 
2497 
2498 /****************************************************************************/
2499 /* Load RISC processor firmware.                                            */
2500 /*                                                                          */
2501 /* Loads firmware from the file if_bcefw.h into the scratchpad memory       */
2502 /* associated with a particular processor.                                  */
2503 /*                                                                          */
2504 /* Returns:                                                                 */
2505 /*   Nothing.                                                               */
2506 /****************************************************************************/
2507 static void
2508 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg,
2509 		struct fw_info *fw)
2510 {
2511 	uint32_t offset, val;
2512 	int j;
2513 
2514 	/* Halt the CPU. */
2515 	val = REG_RD_IND(sc, cpu_reg->mode);
2516 	val |= cpu_reg->mode_value_halt;
2517 	REG_WR_IND(sc, cpu_reg->mode, val);
2518 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2519 
2520 	/* Load the Text area. */
2521 	offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2522 	if (fw->text) {
2523 		for (j = 0; j < (fw->text_len / 4); j++, offset += 4)
2524 			REG_WR_IND(sc, offset, fw->text[j]);
2525 	}
2526 
2527 	/* Load the Data area. */
2528 	offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2529 	if (fw->data) {
2530 		for (j = 0; j < (fw->data_len / 4); j++, offset += 4)
2531 			REG_WR_IND(sc, offset, fw->data[j]);
2532 	}
2533 
2534 	/* Load the SBSS area. */
2535 	offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2536 	if (fw->sbss) {
2537 		for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4)
2538 			REG_WR_IND(sc, offset, fw->sbss[j]);
2539 	}
2540 
2541 	/* Load the BSS area. */
2542 	offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2543 	if (fw->bss) {
2544 		for (j = 0; j < (fw->bss_len/4); j++, offset += 4)
2545 			REG_WR_IND(sc, offset, fw->bss[j]);
2546 	}
2547 
2548 	/* Load the Read-Only area. */
2549 	offset = cpu_reg->spad_base +
2550 		(fw->rodata_addr - cpu_reg->mips_view_base);
2551 	if (fw->rodata) {
2552 		for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4)
2553 			REG_WR_IND(sc, offset, fw->rodata[j]);
2554 	}
2555 
2556 	/* Clear the pre-fetch instruction. */
2557 	REG_WR_IND(sc, cpu_reg->inst, 0);
2558 	REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2559 
2560 	/* Start the CPU. */
2561 	val = REG_RD_IND(sc, cpu_reg->mode);
2562 	val &= ~cpu_reg->mode_value_halt;
2563 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2564 	REG_WR_IND(sc, cpu_reg->mode, val);
2565 }
2566 
2567 
2568 /****************************************************************************/
2569 /* Initialize the RV2P, RX, TX, TPAT, and COM CPUs.                         */
2570 /*                                                                          */
2571 /* Loads the firmware for each CPU and starts the CPU.                      */
2572 /*                                                                          */
2573 /* Returns:                                                                 */
2574 /*   Nothing.                                                               */
2575 /****************************************************************************/
2576 static void
2577 bce_init_cpus(struct bce_softc *sc)
2578 {
2579 	struct cpu_reg cpu_reg;
2580 	struct fw_info fw;
2581 
2582 	/* Initialize the RV2P processor. */
2583 	bce_load_rv2p_fw(sc, bce_rv2p_proc1, sizeof(bce_rv2p_proc1), RV2P_PROC1);
2584 	bce_load_rv2p_fw(sc, bce_rv2p_proc2, sizeof(bce_rv2p_proc2), RV2P_PROC2);
2585 
2586 	/* Initialize the RX Processor. */
2587 	cpu_reg.mode = BCE_RXP_CPU_MODE;
2588 	cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
2589 	cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
2590 	cpu_reg.state = BCE_RXP_CPU_STATE;
2591 	cpu_reg.state_value_clear = 0xffffff;
2592 	cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
2593 	cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
2594 	cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
2595 	cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
2596 	cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
2597 	cpu_reg.spad_base = BCE_RXP_SCRATCH;
2598 	cpu_reg.mips_view_base = 0x8000000;
2599 
2600 	fw.ver_major = bce_RXP_b06FwReleaseMajor;
2601 	fw.ver_minor = bce_RXP_b06FwReleaseMinor;
2602 	fw.ver_fix = bce_RXP_b06FwReleaseFix;
2603 	fw.start_addr = bce_RXP_b06FwStartAddr;
2604 
2605 	fw.text_addr = bce_RXP_b06FwTextAddr;
2606 	fw.text_len = bce_RXP_b06FwTextLen;
2607 	fw.text_index = 0;
2608 	fw.text = bce_RXP_b06FwText;
2609 
2610 	fw.data_addr = bce_RXP_b06FwDataAddr;
2611 	fw.data_len = bce_RXP_b06FwDataLen;
2612 	fw.data_index = 0;
2613 	fw.data = bce_RXP_b06FwData;
2614 
2615 	fw.sbss_addr = bce_RXP_b06FwSbssAddr;
2616 	fw.sbss_len = bce_RXP_b06FwSbssLen;
2617 	fw.sbss_index = 0;
2618 	fw.sbss = bce_RXP_b06FwSbss;
2619 
2620 	fw.bss_addr = bce_RXP_b06FwBssAddr;
2621 	fw.bss_len = bce_RXP_b06FwBssLen;
2622 	fw.bss_index = 0;
2623 	fw.bss = bce_RXP_b06FwBss;
2624 
2625 	fw.rodata_addr = bce_RXP_b06FwRodataAddr;
2626 	fw.rodata_len = bce_RXP_b06FwRodataLen;
2627 	fw.rodata_index = 0;
2628 	fw.rodata = bce_RXP_b06FwRodata;
2629 
2630 	DBPRINT(sc, BCE_INFO_RESET, "Loading RX firmware.\n");
2631 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
2632 
2633 	/* Initialize the TX Processor. */
2634 	cpu_reg.mode = BCE_TXP_CPU_MODE;
2635 	cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT;
2636 	cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA;
2637 	cpu_reg.state = BCE_TXP_CPU_STATE;
2638 	cpu_reg.state_value_clear = 0xffffff;
2639 	cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE;
2640 	cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK;
2641 	cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER;
2642 	cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION;
2643 	cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT;
2644 	cpu_reg.spad_base = BCE_TXP_SCRATCH;
2645 	cpu_reg.mips_view_base = 0x8000000;
2646 
2647 	fw.ver_major = bce_TXP_b06FwReleaseMajor;
2648 	fw.ver_minor = bce_TXP_b06FwReleaseMinor;
2649 	fw.ver_fix = bce_TXP_b06FwReleaseFix;
2650 	fw.start_addr = bce_TXP_b06FwStartAddr;
2651 
2652 	fw.text_addr = bce_TXP_b06FwTextAddr;
2653 	fw.text_len = bce_TXP_b06FwTextLen;
2654 	fw.text_index = 0;
2655 	fw.text = bce_TXP_b06FwText;
2656 
2657 	fw.data_addr = bce_TXP_b06FwDataAddr;
2658 	fw.data_len = bce_TXP_b06FwDataLen;
2659 	fw.data_index = 0;
2660 	fw.data = bce_TXP_b06FwData;
2661 
2662 	fw.sbss_addr = bce_TXP_b06FwSbssAddr;
2663 	fw.sbss_len = bce_TXP_b06FwSbssLen;
2664 	fw.sbss_index = 0;
2665 	fw.sbss = bce_TXP_b06FwSbss;
2666 
2667 	fw.bss_addr = bce_TXP_b06FwBssAddr;
2668 	fw.bss_len = bce_TXP_b06FwBssLen;
2669 	fw.bss_index = 0;
2670 	fw.bss = bce_TXP_b06FwBss;
2671 
2672 	fw.rodata_addr = bce_TXP_b06FwRodataAddr;
2673 	fw.rodata_len = bce_TXP_b06FwRodataLen;
2674 	fw.rodata_index = 0;
2675 	fw.rodata = bce_TXP_b06FwRodata;
2676 
2677 	DBPRINT(sc, BCE_INFO_RESET, "Loading TX firmware.\n");
2678 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
2679 
2680 	/* Initialize the TX Patch-up Processor. */
2681 	cpu_reg.mode = BCE_TPAT_CPU_MODE;
2682 	cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT;
2683 	cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA;
2684 	cpu_reg.state = BCE_TPAT_CPU_STATE;
2685 	cpu_reg.state_value_clear = 0xffffff;
2686 	cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE;
2687 	cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK;
2688 	cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER;
2689 	cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION;
2690 	cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT;
2691 	cpu_reg.spad_base = BCE_TPAT_SCRATCH;
2692 	cpu_reg.mips_view_base = 0x8000000;
2693 
2694 	fw.ver_major = bce_TPAT_b06FwReleaseMajor;
2695 	fw.ver_minor = bce_TPAT_b06FwReleaseMinor;
2696 	fw.ver_fix = bce_TPAT_b06FwReleaseFix;
2697 	fw.start_addr = bce_TPAT_b06FwStartAddr;
2698 
2699 	fw.text_addr = bce_TPAT_b06FwTextAddr;
2700 	fw.text_len = bce_TPAT_b06FwTextLen;
2701 	fw.text_index = 0;
2702 	fw.text = bce_TPAT_b06FwText;
2703 
2704 	fw.data_addr = bce_TPAT_b06FwDataAddr;
2705 	fw.data_len = bce_TPAT_b06FwDataLen;
2706 	fw.data_index = 0;
2707 	fw.data = bce_TPAT_b06FwData;
2708 
2709 	fw.sbss_addr = bce_TPAT_b06FwSbssAddr;
2710 	fw.sbss_len = bce_TPAT_b06FwSbssLen;
2711 	fw.sbss_index = 0;
2712 	fw.sbss = bce_TPAT_b06FwSbss;
2713 
2714 	fw.bss_addr = bce_TPAT_b06FwBssAddr;
2715 	fw.bss_len = bce_TPAT_b06FwBssLen;
2716 	fw.bss_index = 0;
2717 	fw.bss = bce_TPAT_b06FwBss;
2718 
2719 	fw.rodata_addr = bce_TPAT_b06FwRodataAddr;
2720 	fw.rodata_len = bce_TPAT_b06FwRodataLen;
2721 	fw.rodata_index = 0;
2722 	fw.rodata = bce_TPAT_b06FwRodata;
2723 
2724 	DBPRINT(sc, BCE_INFO_RESET, "Loading TPAT firmware.\n");
2725 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
2726 
2727 	/* Initialize the Completion Processor. */
2728 	cpu_reg.mode = BCE_COM_CPU_MODE;
2729 	cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT;
2730 	cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA;
2731 	cpu_reg.state = BCE_COM_CPU_STATE;
2732 	cpu_reg.state_value_clear = 0xffffff;
2733 	cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE;
2734 	cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK;
2735 	cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER;
2736 	cpu_reg.inst = BCE_COM_CPU_INSTRUCTION;
2737 	cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT;
2738 	cpu_reg.spad_base = BCE_COM_SCRATCH;
2739 	cpu_reg.mips_view_base = 0x8000000;
2740 
2741 	fw.ver_major = bce_COM_b06FwReleaseMajor;
2742 	fw.ver_minor = bce_COM_b06FwReleaseMinor;
2743 	fw.ver_fix = bce_COM_b06FwReleaseFix;
2744 	fw.start_addr = bce_COM_b06FwStartAddr;
2745 
2746 	fw.text_addr = bce_COM_b06FwTextAddr;
2747 	fw.text_len = bce_COM_b06FwTextLen;
2748 	fw.text_index = 0;
2749 	fw.text = bce_COM_b06FwText;
2750 
2751 	fw.data_addr = bce_COM_b06FwDataAddr;
2752 	fw.data_len = bce_COM_b06FwDataLen;
2753 	fw.data_index = 0;
2754 	fw.data = bce_COM_b06FwData;
2755 
2756 	fw.sbss_addr = bce_COM_b06FwSbssAddr;
2757 	fw.sbss_len = bce_COM_b06FwSbssLen;
2758 	fw.sbss_index = 0;
2759 	fw.sbss = bce_COM_b06FwSbss;
2760 
2761 	fw.bss_addr = bce_COM_b06FwBssAddr;
2762 	fw.bss_len = bce_COM_b06FwBssLen;
2763 	fw.bss_index = 0;
2764 	fw.bss = bce_COM_b06FwBss;
2765 
2766 	fw.rodata_addr = bce_COM_b06FwRodataAddr;
2767 	fw.rodata_len = bce_COM_b06FwRodataLen;
2768 	fw.rodata_index = 0;
2769 	fw.rodata = bce_COM_b06FwRodata;
2770 
2771 	DBPRINT(sc, BCE_INFO_RESET, "Loading COM firmware.\n");
2772 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
2773 }
2774 
2775 
2776 /****************************************************************************/
2777 /* Initialize context memory.                                               */
2778 /*                                                                          */
2779 /* Clears the memory associated with each Context ID (CID).                 */
2780 /*                                                                          */
2781 /* Returns:                                                                 */
2782 /*   Nothing.                                                               */
2783 /****************************************************************************/
2784 static void
2785 bce_init_context(struct bce_softc *sc)
2786 {
2787 	uint32_t vcid;
2788 
2789 	vcid = 96;
2790 	while (vcid) {
2791 		uint32_t vcid_addr, pcid_addr, offset;
2792 
2793 		vcid--;
2794 
2795    		vcid_addr = GET_CID_ADDR(vcid);
2796 		pcid_addr = vcid_addr;
2797 
2798 		REG_WR(sc, BCE_CTX_VIRT_ADDR, 0x00);
2799 		REG_WR(sc, BCE_CTX_PAGE_TBL, pcid_addr);
2800 
2801 		/* Zero out the context. */
2802 		for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2803 			CTX_WR(sc, 0x00, offset, 0);
2804 
2805 		REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr);
2806 		REG_WR(sc, BCE_CTX_PAGE_TBL, pcid_addr);
2807 	}
2808 }
2809 
2810 
2811 /****************************************************************************/
2812 /* Fetch the permanent MAC address of the controller.                       */
2813 /*                                                                          */
2814 /* Returns:                                                                 */
2815 /*   Nothing.                                                               */
2816 /****************************************************************************/
2817 static void
2818 bce_get_mac_addr(struct bce_softc *sc)
2819 {
2820 	uint32_t mac_lo = 0, mac_hi = 0;
2821 
2822 	/*
2823 	 * The NetXtreme II bootcode populates various NIC
2824 	 * power-on and runtime configuration items in a
2825 	 * shared memory area.  The factory configured MAC
2826 	 * address is available from both NVRAM and the
2827 	 * shared memory area so we'll read the value from
2828 	 * shared memory for speed.
2829 	 */
2830 
2831 	mac_hi = REG_RD_IND(sc, sc->bce_shmem_base + BCE_PORT_HW_CFG_MAC_UPPER);
2832 	mac_lo = REG_RD_IND(sc, sc->bce_shmem_base + BCE_PORT_HW_CFG_MAC_LOWER);
2833 
2834 	if (mac_lo == 0 && mac_hi == 0) {
2835 		if_printf(&sc->arpcom.ac_if, "Invalid Ethernet address!\n");
2836 	} else {
2837 		sc->eaddr[0] = (u_char)(mac_hi >> 8);
2838 		sc->eaddr[1] = (u_char)(mac_hi >> 0);
2839 		sc->eaddr[2] = (u_char)(mac_lo >> 24);
2840 		sc->eaddr[3] = (u_char)(mac_lo >> 16);
2841 		sc->eaddr[4] = (u_char)(mac_lo >> 8);
2842 		sc->eaddr[5] = (u_char)(mac_lo >> 0);
2843 	}
2844 
2845 	DBPRINT(sc, BCE_INFO, "Permanent Ethernet address = %6D\n", sc->eaddr, ":");
2846 }
2847 
2848 
2849 /****************************************************************************/
2850 /* Program the MAC address.                                                 */
2851 /*                                                                          */
2852 /* Returns:                                                                 */
2853 /*   Nothing.                                                               */
2854 /****************************************************************************/
2855 static void
2856 bce_set_mac_addr(struct bce_softc *sc)
2857 {
2858 	const uint8_t *mac_addr = sc->eaddr;
2859 	uint32_t val;
2860 
2861 	DBPRINT(sc, BCE_INFO, "Setting Ethernet address = %6D\n",
2862 		sc->eaddr, ":");
2863 
2864 	val = (mac_addr[0] << 8) | mac_addr[1];
2865 	REG_WR(sc, BCE_EMAC_MAC_MATCH0, val);
2866 
2867 	val = (mac_addr[2] << 24) |
2868 	      (mac_addr[3] << 16) |
2869 	      (mac_addr[4] << 8) |
2870 	      mac_addr[5];
2871 	REG_WR(sc, BCE_EMAC_MAC_MATCH1, val);
2872 }
2873 
2874 
2875 /****************************************************************************/
2876 /* Stop the controller.                                                     */
2877 /*                                                                          */
2878 /* Returns:                                                                 */
2879 /*   Nothing.                                                               */
2880 /****************************************************************************/
2881 static void
2882 bce_stop(struct bce_softc *sc)
2883 {
2884 	struct ifnet *ifp = &sc->arpcom.ac_if;
2885 	struct mii_data *mii = device_get_softc(sc->bce_miibus);
2886 	struct ifmedia_entry *ifm;
2887 	int mtmp, itmp;
2888 
2889 	ASSERT_SERIALIZED(ifp->if_serializer);
2890 
2891 	callout_stop(&sc->bce_stat_ch);
2892 
2893 	/* Disable the transmit/receive blocks. */
2894 	REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, 0x5ffffff);
2895 	REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
2896 	DELAY(20);
2897 
2898 	bce_disable_intr(sc);
2899 
2900 	/* Tell firmware that the driver is going away. */
2901 	bce_reset(sc, BCE_DRV_MSG_CODE_SUSPEND_NO_WOL);
2902 
2903 	/* Free the RX lists. */
2904 	bce_free_rx_chain(sc);
2905 
2906 	/* Free TX buffers. */
2907 	bce_free_tx_chain(sc);
2908 
2909 	/*
2910 	 * Isolate/power down the PHY, but leave the media selection
2911 	 * unchanged so that things will be put back to normal when
2912 	 * we bring the interface back up.
2913 	 */
2914 	itmp = ifp->if_flags;
2915 	ifp->if_flags |= IFF_UP;
2916 	ifm = mii->mii_media.ifm_cur;
2917 	mtmp = ifm->ifm_media;
2918 	ifm->ifm_media = IFM_ETHER | IFM_NONE;
2919 	mii_mediachg(mii);
2920 	ifm->ifm_media = mtmp;
2921 	ifp->if_flags = itmp;
2922 
2923 	sc->bce_link = 0;
2924 
2925 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2926 	ifp->if_timer = 0;
2927 
2928 	bce_mgmt_init(sc);
2929 }
2930 
2931 
2932 static int
2933 bce_reset(struct bce_softc *sc, uint32_t reset_code)
2934 {
2935 	uint32_t val;
2936 	int i, rc = 0;
2937 
2938 	/* Wait for pending PCI transactions to complete. */
2939 	REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS,
2940 	       BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
2941 	       BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
2942 	       BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
2943 	       BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
2944 	val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
2945 	DELAY(5);
2946 
2947 	/* Assume bootcode is running. */
2948 	sc->bce_fw_timed_out = 0;
2949 
2950 	/* Give the firmware a chance to prepare for the reset. */
2951 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code);
2952 	if (rc) {
2953 		if_printf(&sc->arpcom.ac_if,
2954 			  "Firmware is not ready for reset\n");
2955 		return rc;
2956 	}
2957 
2958 	/* Set a firmware reminder that this is a soft reset. */
2959 	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_RESET_SIGNATURE,
2960 		   BCE_DRV_RESET_SIGNATURE_MAGIC);
2961 
2962 	/* Dummy read to force the chip to complete all current transactions. */
2963 	val = REG_RD(sc, BCE_MISC_ID);
2964 
2965 	/* Chip reset. */
2966 	val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
2967 	      BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
2968 	      BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
2969 	REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val);
2970 
2971 	/* Allow up to 30us for reset to complete. */
2972 	for (i = 0; i < 10; i++) {
2973 		val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG);
2974 		if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
2975 			    BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
2976 			break;
2977 		}
2978 		DELAY(10);
2979 	}
2980 
2981 	/* Check that reset completed successfully. */
2982 	if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
2983 		   BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
2984 		if_printf(&sc->arpcom.ac_if, "Reset failed!\n");
2985 		return EBUSY;
2986 	}
2987 
2988 	/* Make sure byte swapping is properly configured. */
2989 	val = REG_RD(sc, BCE_PCI_SWAP_DIAG0);
2990 	if (val != 0x01020304) {
2991 		if_printf(&sc->arpcom.ac_if, "Byte swap is incorrect!\n");
2992 		return ENODEV;
2993 	}
2994 
2995 	/* Just completed a reset, assume that firmware is running again. */
2996 	sc->bce_fw_timed_out = 0;
2997 
2998 	/* Wait for the firmware to finish its initialization. */
2999 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code);
3000 	if (rc) {
3001 		if_printf(&sc->arpcom.ac_if,
3002 			  "Firmware did not complete initialization!\n");
3003 	}
3004 	return rc;
3005 }
3006 
3007 
3008 static int
3009 bce_chipinit(struct bce_softc *sc)
3010 {
3011 	uint32_t val;
3012 	int rc = 0;
3013 
3014 	/* Make sure the interrupt is not active. */
3015 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
3016 
3017 	/*
3018 	 * Initialize DMA byte/word swapping, configure the number of DMA
3019 	 * channels and PCI clock compensation delay.
3020 	 */
3021 	val = BCE_DMA_CONFIG_DATA_BYTE_SWAP |
3022 	      BCE_DMA_CONFIG_DATA_WORD_SWAP |
3023 #if BYTE_ORDER == BIG_ENDIAN
3024 	      BCE_DMA_CONFIG_CNTL_BYTE_SWAP |
3025 #endif
3026 	      BCE_DMA_CONFIG_CNTL_WORD_SWAP |
3027 	      DMA_READ_CHANS << 12 |
3028 	      DMA_WRITE_CHANS << 16;
3029 
3030 	val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3031 
3032 	if ((sc->bce_flags & BCE_PCIX_FLAG) && sc->bus_speed_mhz == 133)
3033 		val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP;
3034 
3035 	/*
3036 	 * This setting resolves a problem observed on certain Intel PCI
3037 	 * chipsets that cannot handle multiple outstanding DMA operations.
3038 	 * See errata E9_5706A1_65.
3039 	 */
3040 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706 &&
3041 	    BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0 &&
3042 	    !(sc->bce_flags & BCE_PCIX_FLAG))
3043 		val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA;
3044 
3045 	REG_WR(sc, BCE_DMA_CONFIG, val);
3046 
3047 	/* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */
3048 	if (sc->bce_flags & BCE_PCIX_FLAG) {
3049 		uint16_t cmd;
3050 
3051 		cmd = pci_read_config(sc->bce_dev, BCE_PCI_PCIX_CMD, 2);
3052 		pci_write_config(sc->bce_dev, BCE_PCI_PCIX_CMD, cmd & ~0x2, 2);
3053 	}
3054 
3055 	/* Enable the RX_V2P and Context state machines before access. */
3056 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3057 	       BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3058 	       BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3059 	       BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3060 
3061 	/* Initialize context mapping and zero out the quick contexts. */
3062 	bce_init_context(sc);
3063 
3064 	/* Initialize the on-boards CPUs */
3065 	bce_init_cpus(sc);
3066 
3067 	/* Prepare NVRAM for access. */
3068 	rc = bce_init_nvram(sc);
3069 	if (rc != 0)
3070 		return rc;
3071 
3072 	/* Set the kernel bypass block size */
3073 	val = REG_RD(sc, BCE_MQ_CONFIG);
3074 	val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3075 	val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3076 	REG_WR(sc, BCE_MQ_CONFIG, val);
3077 
3078 	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3079 	REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val);
3080 	REG_WR(sc, BCE_MQ_KNL_WIND_END, val);
3081 
3082 	/* Set the page size and clear the RV2P processor stall bits. */
3083 	val = (BCM_PAGE_BITS - 8) << 24;
3084 	REG_WR(sc, BCE_RV2P_CONFIG, val);
3085 
3086 	/* Configure page size. */
3087 	val = REG_RD(sc, BCE_TBDR_CONFIG);
3088 	val &= ~BCE_TBDR_CONFIG_PAGE_SIZE;
3089 	val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3090 	REG_WR(sc, BCE_TBDR_CONFIG, val);
3091 
3092 	return 0;
3093 }
3094 
3095 
3096 /****************************************************************************/
3097 /* Initialize the controller in preparation to send/receive traffic.        */
3098 /*                                                                          */
3099 /* Returns:                                                                 */
3100 /*   0 for success, positive value for failure.                             */
3101 /****************************************************************************/
3102 static int
3103 bce_blockinit(struct bce_softc *sc)
3104 {
3105 	uint32_t reg, val;
3106 	int rc = 0;
3107 
3108 	/* Load the hardware default MAC address. */
3109 	bce_set_mac_addr(sc);
3110 
3111 	/* Set the Ethernet backoff seed value */
3112 	val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) +
3113 	      sc->eaddr[3] + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16);
3114 	REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val);
3115 
3116 	sc->last_status_idx = 0;
3117 	sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE;
3118 
3119 	/* Set up link change interrupt generation. */
3120 	REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK);
3121 
3122 	/* Program the physical address of the status block. */
3123 	REG_WR(sc, BCE_HC_STATUS_ADDR_L, BCE_ADDR_LO(sc->status_block_paddr));
3124 	REG_WR(sc, BCE_HC_STATUS_ADDR_H, BCE_ADDR_HI(sc->status_block_paddr));
3125 
3126 	/* Program the physical address of the statistics block. */
3127 	REG_WR(sc, BCE_HC_STATISTICS_ADDR_L,
3128 	       BCE_ADDR_LO(sc->stats_block_paddr));
3129 	REG_WR(sc, BCE_HC_STATISTICS_ADDR_H,
3130 	       BCE_ADDR_HI(sc->stats_block_paddr));
3131 
3132 	/* Program various host coalescing parameters. */
3133 	REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
3134 	       (sc->bce_tx_quick_cons_trip_int << 16) |
3135 	       sc->bce_tx_quick_cons_trip);
3136 	REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
3137 	       (sc->bce_rx_quick_cons_trip_int << 16) |
3138 	       sc->bce_rx_quick_cons_trip);
3139 	REG_WR(sc, BCE_HC_COMP_PROD_TRIP,
3140 	       (sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip);
3141 	REG_WR(sc, BCE_HC_TX_TICKS,
3142 	       (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
3143 	REG_WR(sc, BCE_HC_RX_TICKS,
3144 	       (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
3145 	REG_WR(sc, BCE_HC_COM_TICKS,
3146 	       (sc->bce_com_ticks_int << 16) | sc->bce_com_ticks);
3147 	REG_WR(sc, BCE_HC_CMD_TICKS,
3148 	       (sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks);
3149 	REG_WR(sc, BCE_HC_STATS_TICKS, (sc->bce_stats_ticks & 0xffff00));
3150 	REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8);	/* 3ms */
3151 	REG_WR(sc, BCE_HC_CONFIG,
3152 	       BCE_HC_CONFIG_RX_TMR_MODE |
3153 	       BCE_HC_CONFIG_TX_TMR_MODE |
3154 	       BCE_HC_CONFIG_COLLECT_STATS);
3155 
3156 	/* Clear the internal statistics counters. */
3157 	REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
3158 
3159 	/* Verify that bootcode is running. */
3160 	reg = REG_RD_IND(sc, sc->bce_shmem_base + BCE_DEV_INFO_SIGNATURE);
3161 
3162 	DBRUNIF(DB_RANDOMTRUE(bce_debug_bootcode_running_failure),
3163 		if_printf(&sc->arpcom.ac_if,
3164 			  "%s(%d): Simulating bootcode failure.\n",
3165 			  __FILE__, __LINE__);
3166 		reg = 0);
3167 
3168 	if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3169 	    BCE_DEV_INFO_SIGNATURE_MAGIC) {
3170 		if_printf(&sc->arpcom.ac_if,
3171 			  "Bootcode not running! Found: 0x%08X, "
3172 			  "Expected: 08%08X\n",
3173 			  reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK,
3174 			  BCE_DEV_INFO_SIGNATURE_MAGIC);
3175 		return ENODEV;
3176 	}
3177 
3178 	/* Check if any management firmware is running. */
3179 	reg = REG_RD_IND(sc, sc->bce_shmem_base + BCE_PORT_FEATURE);
3180 	if (reg & (BCE_PORT_FEATURE_ASF_ENABLED |
3181 		   BCE_PORT_FEATURE_IMD_ENABLED)) {
3182 		DBPRINT(sc, BCE_INFO, "Management F/W Enabled.\n");
3183 		sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
3184 	}
3185 
3186 	sc->bce_fw_ver =
3187 		REG_RD_IND(sc, sc->bce_shmem_base + BCE_DEV_INFO_BC_REV);
3188 	DBPRINT(sc, BCE_INFO, "bootcode rev = 0x%08X\n", sc->bce_fw_ver);
3189 
3190 	/* Allow bootcode to apply any additional fixes before enabling MAC. */
3191 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET);
3192 
3193 	/* Enable link state change interrupt generation. */
3194 	REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3195 
3196 	/* Enable all remaining blocks in the MAC. */
3197 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 0x5ffffff);
3198 	REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
3199 	DELAY(20);
3200 
3201 	return 0;
3202 }
3203 
3204 
3205 /****************************************************************************/
3206 /* Encapsulate an mbuf cluster into the rx_bd chain.                        */
3207 /*                                                                          */
3208 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's.     */
3209 /* This routine will map an mbuf cluster into 1 or more rx_bd's as          */
3210 /* necessary.                                                               */
3211 /*                                                                          */
3212 /* Returns:                                                                 */
3213 /*   0 for success, positive value for failure.                             */
3214 /****************************************************************************/
3215 static int
3216 bce_newbuf_std(struct bce_softc *sc, struct mbuf *m,
3217 	       uint16_t *prod, uint16_t *chain_prod, uint32_t *prod_bseq)
3218 {
3219 	bus_dmamap_t map;
3220 	struct bce_dmamap_arg ctx;
3221 	bus_dma_segment_t seg;
3222 	struct mbuf *m_new;
3223 	struct rx_bd *rxbd;
3224 	int error;
3225 #ifdef BCE_DEBUG
3226 	uint16_t debug_chain_prod = *chain_prod;
3227 #endif
3228 
3229 	/* Make sure the inputs are valid. */
3230 	DBRUNIF((*chain_prod > MAX_RX_BD),
3231 		if_printf(&sc->arpcom.ac_if, "%s(%d): "
3232 			  "RX producer out of range: 0x%04X > 0x%04X\n",
3233 			  __FILE__, __LINE__,
3234 			  *chain_prod, (uint16_t)MAX_RX_BD));
3235 
3236 	DBPRINT(sc, BCE_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = 0x%04X, "
3237 		"prod_bseq = 0x%08X\n", __func__, *prod, *chain_prod, *prod_bseq);
3238 
3239 	if (m == NULL) {
3240 		DBRUNIF(DB_RANDOMTRUE(bce_debug_mbuf_allocation_failure),
3241 			if_printf(&sc->arpcom.ac_if, "%s(%d): "
3242 				  "Simulating mbuf allocation failure.\n",
3243 				  __FILE__, __LINE__);
3244 			sc->mbuf_alloc_failed++;
3245 			return ENOBUFS);
3246 
3247 		/* This is a new mbuf allocation. */
3248 		m_new = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR);
3249 		if (m_new == NULL)
3250 			return ENOBUFS;
3251 		DBRUNIF(1, sc->rx_mbuf_alloc++);
3252 	} else {
3253 		m_new = m;
3254 		m_new->m_data = m_new->m_ext.ext_buf;
3255 	}
3256 	m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
3257 
3258 	/* Map the mbuf cluster into device memory. */
3259 	map = sc->rx_mbuf_map[*chain_prod];
3260 
3261 	ctx.bce_maxsegs = 1;
3262 	ctx.bce_segs = &seg;
3263 	error = bus_dmamap_load_mbuf(sc->rx_mbuf_tag, map, m_new,
3264 				     bce_dma_map_mbuf, &ctx, BUS_DMA_NOWAIT);
3265 	if (error || ctx.bce_maxsegs == 0) {
3266 		if_printf(&sc->arpcom.ac_if,
3267 			  "Error mapping mbuf into RX chain!\n");
3268 
3269 		if (m == NULL)
3270 			m_freem(m_new);
3271 
3272 		DBRUNIF(1, sc->rx_mbuf_alloc--);
3273 		return ENOBUFS;
3274 	}
3275 
3276 	/* Watch for overflow. */
3277 	DBRUNIF((sc->free_rx_bd > USABLE_RX_BD),
3278 		if_printf(&sc->arpcom.ac_if, "%s(%d): "
3279 			  "Too many free rx_bd (0x%04X > 0x%04X)!\n",
3280 			  __FILE__, __LINE__, sc->free_rx_bd,
3281 			  (uint16_t)USABLE_RX_BD));
3282 
3283 	/* Update some debug statistic counters */
3284 	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3285 		sc->rx_low_watermark = sc->free_rx_bd);
3286 	DBRUNIF((sc->free_rx_bd == 0), sc->rx_empty_count++);
3287 
3288 	/* Setup the rx_bd for the first segment. */
3289 	rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3290 
3291 	rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(seg.ds_addr));
3292 	rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(seg.ds_addr));
3293 	rxbd->rx_bd_len = htole32(seg.ds_len);
3294 	rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START);
3295 	*prod_bseq += seg.ds_len;
3296 
3297 	rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END);
3298 
3299 	/* Save the mbuf and update our counter. */
3300 	sc->rx_mbuf_ptr[*chain_prod] = m_new;
3301 	sc->free_rx_bd--;
3302 
3303 	DBRUN(BCE_VERBOSE_RECV,
3304 	      bce_dump_rx_mbuf_chain(sc, debug_chain_prod, 1));
3305 
3306 	DBPRINT(sc, BCE_VERBOSE_RECV, "%s(exit): prod = 0x%04X, chain_prod = 0x%04X, "
3307 		"prod_bseq = 0x%08X\n", __func__, *prod, *chain_prod, *prod_bseq);
3308 
3309 	return 0;
3310 }
3311 
3312 
3313 /****************************************************************************/
3314 /* Allocate memory and initialize the TX data structures.                   */
3315 /*                                                                          */
3316 /* Returns:                                                                 */
3317 /*   0 for success, positive value for failure.                             */
3318 /****************************************************************************/
3319 static int
3320 bce_init_tx_chain(struct bce_softc *sc)
3321 {
3322 	struct tx_bd *txbd;
3323 	uint32_t val;
3324 	int i, rc = 0;
3325 
3326 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__);
3327 
3328 	/* Set the initial TX producer/consumer indices. */
3329 	sc->tx_prod = 0;
3330 	sc->tx_cons = 0;
3331 	sc->tx_prod_bseq   = 0;
3332 	sc->used_tx_bd = 0;
3333 	sc->max_tx_bd = USABLE_TX_BD;
3334 	DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD);
3335 	DBRUNIF(1, sc->tx_full_count = 0);
3336 
3337 	/*
3338 	 * The NetXtreme II supports a linked-list structre called
3339 	 * a Buffer Descriptor Chain (or BD chain).  A BD chain
3340 	 * consists of a series of 1 or more chain pages, each of which
3341 	 * consists of a fixed number of BD entries.
3342 	 * The last BD entry on each page is a pointer to the next page
3343 	 * in the chain, and the last pointer in the BD chain
3344 	 * points back to the beginning of the chain.
3345 	 */
3346 
3347 	/* Set the TX next pointer chain entries. */
3348 	for (i = 0; i < TX_PAGES; i++) {
3349 		int j;
3350 
3351 		txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
3352 
3353 		/* Check if we've reached the last page. */
3354 		if (i == (TX_PAGES - 1))
3355 			j = 0;
3356 		else
3357 			j = i + 1;
3358 
3359 		txbd->tx_bd_haddr_hi =
3360 			htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j]));
3361 		txbd->tx_bd_haddr_lo =
3362 			htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j]));
3363 	}
3364 
3365 	for (i = 0; i < TX_PAGES; ++i) {
3366 		bus_dmamap_sync(sc->tx_bd_chain_tag, sc->tx_bd_chain_map[i],
3367 				BUS_DMASYNC_PREWRITE);
3368 	}
3369 
3370 	/* Initialize the context ID for an L2 TX chain. */
3371 	val = BCE_L2CTX_TYPE_TYPE_L2;
3372 	val |= BCE_L2CTX_TYPE_SIZE_L2;
3373 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TYPE, val);
3374 
3375 	val = BCE_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3376 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_CMD_TYPE, val);
3377 
3378 	/* Point the hardware to the first page in the chain. */
3379 	val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]);
3380 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_HI, val);
3381 	val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]);
3382 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_LO, val);
3383 
3384 	DBRUN(BCE_VERBOSE_SEND, bce_dump_tx_chain(sc, 0, TOTAL_TX_BD));
3385 
3386 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__);
3387 
3388 	return(rc);
3389 }
3390 
3391 
3392 /****************************************************************************/
3393 /* Free memory and clear the TX data structures.                            */
3394 /*                                                                          */
3395 /* Returns:                                                                 */
3396 /*   Nothing.                                                               */
3397 /****************************************************************************/
3398 static void
3399 bce_free_tx_chain(struct bce_softc *sc)
3400 {
3401 	int i;
3402 
3403 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__);
3404 
3405 	/* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
3406 	for (i = 0; i < TOTAL_TX_BD; i++) {
3407 		if (sc->tx_mbuf_ptr[i] != NULL) {
3408 			bus_dmamap_sync(sc->tx_mbuf_tag, sc->tx_mbuf_map[i],
3409 					BUS_DMASYNC_POSTWRITE);
3410 			bus_dmamap_unload(sc->tx_mbuf_tag, sc->tx_mbuf_map[i]);
3411 			m_freem(sc->tx_mbuf_ptr[i]);
3412 			sc->tx_mbuf_ptr[i] = NULL;
3413 			DBRUNIF(1, sc->tx_mbuf_alloc--);
3414 		}
3415 	}
3416 
3417 	/* Clear each TX chain page. */
3418 	for (i = 0; i < TX_PAGES; i++)
3419 		bzero(sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ);
3420 
3421 	/* Check if we lost any mbufs in the process. */
3422 	DBRUNIF((sc->tx_mbuf_alloc),
3423 		if_printf(&sc->arpcom.ac_if,
3424 			  "%s(%d): Memory leak! "
3425 			  "Lost %d mbufs from tx chain!\n",
3426 			  __FILE__, __LINE__, sc->tx_mbuf_alloc));
3427 
3428 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__);
3429 }
3430 
3431 
3432 /****************************************************************************/
3433 /* Allocate memory and initialize the RX data structures.                   */
3434 /*                                                                          */
3435 /* Returns:                                                                 */
3436 /*   0 for success, positive value for failure.                             */
3437 /****************************************************************************/
3438 static int
3439 bce_init_rx_chain(struct bce_softc *sc)
3440 {
3441 	struct rx_bd *rxbd;
3442 	int i, rc = 0;
3443 	uint16_t prod, chain_prod;
3444 	uint32_t prod_bseq, val;
3445 
3446 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__);
3447 
3448 	/* Initialize the RX producer and consumer indices. */
3449 	sc->rx_prod = 0;
3450 	sc->rx_cons = 0;
3451 	sc->rx_prod_bseq = 0;
3452 	sc->free_rx_bd = USABLE_RX_BD;
3453 	sc->max_rx_bd = USABLE_RX_BD;
3454 	DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD);
3455 	DBRUNIF(1, sc->rx_empty_count = 0);
3456 
3457 	/* Initialize the RX next pointer chain entries. */
3458 	for (i = 0; i < RX_PAGES; i++) {
3459 		int j;
3460 
3461 		rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
3462 
3463 		/* Check if we've reached the last page. */
3464 		if (i == (RX_PAGES - 1))
3465 			j = 0;
3466 		else
3467 			j = i + 1;
3468 
3469 		/* Setup the chain page pointers. */
3470 		rxbd->rx_bd_haddr_hi =
3471 			htole32(BCE_ADDR_HI(sc->rx_bd_chain_paddr[j]));
3472 		rxbd->rx_bd_haddr_lo =
3473 			htole32(BCE_ADDR_LO(sc->rx_bd_chain_paddr[j]));
3474 	}
3475 
3476 	/* Initialize the context ID for an L2 RX chain. */
3477 	val = BCE_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3478 	val |= BCE_L2CTX_CTX_TYPE_SIZE_L2;
3479 	val |= 0x02 << 8;
3480 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_CTX_TYPE, val);
3481 
3482 	/* Point the hardware to the first page in the chain. */
3483 	/* XXX shouldn't this after RX descriptor initialization? */
3484 	val = BCE_ADDR_HI(sc->rx_bd_chain_paddr[0]);
3485 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_HI, val);
3486 	val = BCE_ADDR_LO(sc->rx_bd_chain_paddr[0]);
3487 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_LO, val);
3488 
3489 	/* Allocate mbuf clusters for the rx_bd chain. */
3490 	prod = prod_bseq = 0;
3491 	while (prod < TOTAL_RX_BD) {
3492 		chain_prod = RX_CHAIN_IDX(prod);
3493 		if (bce_newbuf_std(sc, NULL, &prod, &chain_prod, &prod_bseq)) {
3494 			if_printf(&sc->arpcom.ac_if,
3495 				  "Error filling RX chain: rx_bd[0x%04X]!\n",
3496 				  chain_prod);
3497 			rc = ENOBUFS;
3498 			break;
3499 		}
3500 		prod = NEXT_RX_BD(prod);
3501 	}
3502 
3503 	/* Save the RX chain producer index. */
3504 	sc->rx_prod = prod;
3505 	sc->rx_prod_bseq = prod_bseq;
3506 
3507 	for (i = 0; i < RX_PAGES; i++) {
3508 		bus_dmamap_sync(sc->rx_bd_chain_tag, sc->rx_bd_chain_map[i],
3509 				BUS_DMASYNC_PREWRITE);
3510 	}
3511 
3512 	/* Tell the chip about the waiting rx_bd's. */
3513 	REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BDIDX, sc->rx_prod);
3514 	REG_WR(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
3515 
3516 	DBRUN(BCE_VERBOSE_RECV, bce_dump_rx_chain(sc, 0, TOTAL_RX_BD));
3517 
3518 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__);
3519 
3520 	return(rc);
3521 }
3522 
3523 
3524 /****************************************************************************/
3525 /* Free memory and clear the RX data structures.                            */
3526 /*                                                                          */
3527 /* Returns:                                                                 */
3528 /*   Nothing.                                                               */
3529 /****************************************************************************/
3530 static void
3531 bce_free_rx_chain(struct bce_softc *sc)
3532 {
3533 	int i;
3534 
3535 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__);
3536 
3537 	/* Free any mbufs still in the RX mbuf chain. */
3538 	for (i = 0; i < TOTAL_RX_BD; i++) {
3539 		if (sc->rx_mbuf_ptr[i] != NULL) {
3540 			bus_dmamap_sync(sc->rx_mbuf_tag, sc->rx_mbuf_map[i],
3541 					BUS_DMASYNC_POSTREAD);
3542 			bus_dmamap_unload(sc->rx_mbuf_tag, sc->rx_mbuf_map[i]);
3543 			m_freem(sc->rx_mbuf_ptr[i]);
3544 			sc->rx_mbuf_ptr[i] = NULL;
3545 			DBRUNIF(1, sc->rx_mbuf_alloc--);
3546 		}
3547 	}
3548 
3549 	/* Clear each RX chain page. */
3550 	for (i = 0; i < RX_PAGES; i++)
3551 		bzero(sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
3552 
3553 	/* Check if we lost any mbufs in the process. */
3554 	DBRUNIF((sc->rx_mbuf_alloc),
3555 		if_printf(&sc->arpcom.ac_if,
3556 			  "%s(%d): Memory leak! "
3557 			  "Lost %d mbufs from rx chain!\n",
3558 			  __FILE__, __LINE__, sc->rx_mbuf_alloc));
3559 
3560 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__);
3561 }
3562 
3563 
3564 /****************************************************************************/
3565 /* Set media options.                                                       */
3566 /*                                                                          */
3567 /* Returns:                                                                 */
3568 /*   0 for success, positive value for failure.                             */
3569 /****************************************************************************/
3570 static int
3571 bce_ifmedia_upd(struct ifnet *ifp)
3572 {
3573 	struct bce_softc *sc = ifp->if_softc;
3574 	struct mii_data *mii = device_get_softc(sc->bce_miibus);
3575 
3576 	/*
3577 	 * 'mii' will be NULL, when this function is called on following
3578 	 * code path: bce_attach() -> bce_mgmt_init()
3579 	 */
3580 	if (mii != NULL) {
3581 		/* Make sure the MII bus has been enumerated. */
3582 		sc->bce_link = 0;
3583 		if (mii->mii_instance) {
3584 			struct mii_softc *miisc;
3585 
3586 			LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3587 				mii_phy_reset(miisc);
3588 		}
3589 		mii_mediachg(mii);
3590 	}
3591 	return 0;
3592 }
3593 
3594 
3595 /****************************************************************************/
3596 /* Reports current media status.                                            */
3597 /*                                                                          */
3598 /* Returns:                                                                 */
3599 /*   Nothing.                                                               */
3600 /****************************************************************************/
3601 static void
3602 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3603 {
3604 	struct bce_softc *sc = ifp->if_softc;
3605 	struct mii_data *mii = device_get_softc(sc->bce_miibus);
3606 
3607 	mii_pollstat(mii);
3608 	ifmr->ifm_active = mii->mii_media_active;
3609 	ifmr->ifm_status = mii->mii_media_status;
3610 }
3611 
3612 
3613 /****************************************************************************/
3614 /* Handles PHY generated interrupt events.                                  */
3615 /*                                                                          */
3616 /* Returns:                                                                 */
3617 /*   Nothing.                                                               */
3618 /****************************************************************************/
3619 static void
3620 bce_phy_intr(struct bce_softc *sc)
3621 {
3622 	uint32_t new_link_state, old_link_state;
3623 	struct ifnet *ifp = &sc->arpcom.ac_if;
3624 
3625 	ASSERT_SERIALIZED(ifp->if_serializer);
3626 
3627 	new_link_state = sc->status_block->status_attn_bits &
3628 			 STATUS_ATTN_BITS_LINK_STATE;
3629 	old_link_state = sc->status_block->status_attn_bits_ack &
3630 			 STATUS_ATTN_BITS_LINK_STATE;
3631 
3632 	/* Handle any changes if the link state has changed. */
3633 	if (new_link_state != old_link_state) {	/* XXX redundant? */
3634 		DBRUN(BCE_VERBOSE_INTR, bce_dump_status_block(sc));
3635 
3636 		sc->bce_link = 0;
3637 		callout_stop(&sc->bce_stat_ch);
3638 		bce_tick_serialized(sc);
3639 
3640 		/* Update the status_attn_bits_ack field in the status block. */
3641 		if (new_link_state) {
3642 			REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD,
3643 			       STATUS_ATTN_BITS_LINK_STATE);
3644 			if (bootverbose)
3645 				if_printf(ifp, "Link is now UP.\n");
3646 		} else {
3647 			REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD,
3648 			       STATUS_ATTN_BITS_LINK_STATE);
3649 			if (bootverbose)
3650 				if_printf(ifp, "Link is now DOWN.\n");
3651 		}
3652 	}
3653 
3654 	/* Acknowledge the link change interrupt. */
3655 	REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE);
3656 }
3657 
3658 
3659 /****************************************************************************/
3660 /* Handles received frame interrupt events.                                 */
3661 /*                                                                          */
3662 /* Returns:                                                                 */
3663 /*   Nothing.                                                               */
3664 /****************************************************************************/
3665 static void
3666 bce_rx_intr(struct bce_softc *sc, int count)
3667 {
3668 	struct status_block *sblk = sc->status_block;
3669 	struct ifnet *ifp = &sc->arpcom.ac_if;
3670 	uint16_t hw_cons, sw_cons, sw_chain_cons, sw_prod, sw_chain_prod;
3671 	uint32_t sw_prod_bseq;
3672 	int i;
3673 
3674 	ASSERT_SERIALIZED(ifp->if_serializer);
3675 
3676 	DBRUNIF(1, sc->rx_interrupts++);
3677 
3678 	/* Prepare the RX chain pages to be accessed by the host CPU. */
3679 	for (i = 0; i < RX_PAGES; i++) {
3680 		bus_dmamap_sync(sc->rx_bd_chain_tag,
3681 				sc->rx_bd_chain_map[i], BUS_DMASYNC_POSTREAD);
3682 	}
3683 
3684 	/* Get the hardware's view of the RX consumer index. */
3685 	hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
3686 	if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
3687 		hw_cons++;
3688 
3689 	/* Get working copies of the driver's view of the RX indices. */
3690 	sw_cons = sc->rx_cons;
3691 	sw_prod = sc->rx_prod;
3692 	sw_prod_bseq = sc->rx_prod_bseq;
3693 
3694 	DBPRINT(sc, BCE_INFO_RECV, "%s(enter): sw_prod = 0x%04X, "
3695 		"sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n",
3696 		__func__, sw_prod, sw_cons, sw_prod_bseq);
3697 
3698 	/* Prevent speculative reads from getting ahead of the status block. */
3699 	bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
3700 			  BUS_SPACE_BARRIER_READ);
3701 
3702 	/* Update some debug statistics counters */
3703 	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3704 		sc->rx_low_watermark = sc->free_rx_bd);
3705 	DBRUNIF((sc->free_rx_bd == 0), sc->rx_empty_count++);
3706 
3707 	/* Scan through the receive chain as long as there is work to do. */
3708 	while (sw_cons != hw_cons) {
3709 		struct mbuf *m = NULL;
3710 		struct l2_fhdr *l2fhdr = NULL;
3711 		struct rx_bd *rxbd;
3712 		unsigned int len;
3713 		uint32_t status = 0;
3714 
3715 #ifdef foo /* DEVICE_POLLING */
3716 		/*
3717 		 * Even if polling(4) is enabled, we can't just reap
3718 		 * 'count' RX descriptors and leave.  It seems that RX
3719 		 * engine would be left in a wired state, if we broke
3720 		 * out the loop in the middle.
3721 		 */
3722 		if (count >= 0 && count-- == 0)
3723 			break;
3724 #endif
3725 
3726 		/*
3727 		 * Convert the producer/consumer indices
3728 		 * to an actual rx_bd index.
3729 		 */
3730 		sw_chain_cons = RX_CHAIN_IDX(sw_cons);
3731 		sw_chain_prod = RX_CHAIN_IDX(sw_prod);
3732 
3733 		/* Get the used rx_bd. */
3734 		rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)]
3735 				       [RX_IDX(sw_chain_cons)];
3736 		sc->free_rx_bd++;
3737 
3738 		DBRUN(BCE_VERBOSE_RECV,
3739 		      if_printf(ifp, "%s(): ", __func__);
3740 		      bce_dump_rxbd(sc, sw_chain_cons, rxbd));
3741 
3742 		/* The mbuf is stored with the last rx_bd entry of a packet. */
3743 		if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) {
3744 			/* Validate that this is the last rx_bd. */
3745 			DBRUNIF((!(rxbd->rx_bd_flags & RX_BD_FLAGS_END)),
3746 				if_printf(ifp, "%s(%d): "
3747 				"Unexpected mbuf found in rx_bd[0x%04X]!\n",
3748 				__FILE__, __LINE__, sw_chain_cons);
3749 				bce_breakpoint(sc));
3750 
3751 			/*
3752 			 * ToDo: If the received packet is small enough
3753 			 * to fit into a single, non-M_EXT mbuf,
3754 			 * allocate a new mbuf here, copy the data to
3755 			 * that mbuf, and recycle the mapped jumbo frame.
3756 			 */
3757 
3758 			/* Unmap the mbuf from DMA space. */
3759 			bus_dmamap_sync(sc->rx_mbuf_tag,
3760 					sc->rx_mbuf_map[sw_chain_cons],
3761 					BUS_DMASYNC_POSTREAD);
3762 			bus_dmamap_unload(sc->rx_mbuf_tag,
3763 					  sc->rx_mbuf_map[sw_chain_cons]);
3764 
3765 			/* Remove the mbuf from the driver's chain. */
3766 			m = sc->rx_mbuf_ptr[sw_chain_cons];
3767 			sc->rx_mbuf_ptr[sw_chain_cons] = NULL;
3768 
3769 			/*
3770 			 * Frames received on the NetXteme II are prepended
3771 			 * with an l2_fhdr structure which provides status
3772 			 * information about the received frame (including
3773 			 * VLAN tags and checksum info).  The frames are also
3774 			 * automatically adjusted to align the IP header
3775 			 * (i.e. two null bytes are inserted before the
3776 			 * Ethernet header).
3777 			 */
3778 			l2fhdr = mtod(m, struct l2_fhdr *);
3779 
3780 			len = l2fhdr->l2_fhdr_pkt_len;
3781 			status = l2fhdr->l2_fhdr_status;
3782 
3783 			DBRUNIF(DB_RANDOMTRUE(bce_debug_l2fhdr_status_check),
3784 				if_printf(ifp,
3785 				"Simulating l2_fhdr status error.\n");
3786 				status = status | L2_FHDR_ERRORS_PHY_DECODE);
3787 
3788 			/* Watch for unusual sized frames. */
3789 			DBRUNIF((len < BCE_MIN_MTU ||
3790 				 len > BCE_MAX_JUMBO_ETHER_MTU_VLAN),
3791 				if_printf(ifp,
3792 				"%s(%d): Unusual frame size found. "
3793 				"Min(%d), Actual(%d), Max(%d)\n",
3794 				__FILE__, __LINE__,
3795 				(int)BCE_MIN_MTU, len,
3796 				(int)BCE_MAX_JUMBO_ETHER_MTU_VLAN);
3797 				bce_dump_mbuf(sc, m);
3798 		 		bce_breakpoint(sc));
3799 
3800 			len -= ETHER_CRC_LEN;
3801 
3802 			/* Check the received frame for errors. */
3803 			if (status & (L2_FHDR_ERRORS_BAD_CRC |
3804 				      L2_FHDR_ERRORS_PHY_DECODE |
3805 				      L2_FHDR_ERRORS_ALIGNMENT |
3806 				      L2_FHDR_ERRORS_TOO_SHORT |
3807 				      L2_FHDR_ERRORS_GIANT_FRAME)) {
3808 				ifp->if_ierrors++;
3809 				DBRUNIF(1, sc->l2fhdr_status_errors++);
3810 
3811 				/* Reuse the mbuf for a new frame. */
3812 				if (bce_newbuf_std(sc, m, &sw_prod,
3813 						   &sw_chain_prod,
3814 						   &sw_prod_bseq)) {
3815 					DBRUNIF(1, bce_breakpoint(sc));
3816 					/* XXX */
3817 					panic("%s: Can't reuse RX mbuf!\n",
3818 					      ifp->if_xname);
3819 				}
3820 				m = NULL;
3821 				goto bce_rx_int_next_rx;
3822 			}
3823 
3824 			/*
3825 			 * Get a new mbuf for the rx_bd.   If no new
3826 			 * mbufs are available then reuse the current mbuf,
3827 			 * log an ierror on the interface, and generate
3828 			 * an error in the system log.
3829 			 */
3830 			if (bce_newbuf_std(sc, NULL, &sw_prod, &sw_chain_prod,
3831 					   &sw_prod_bseq)) {
3832 				DBRUN(BCE_WARN,
3833 				      if_printf(ifp,
3834 				      "%s(%d): Failed to allocate new mbuf, "
3835 				      "incoming frame dropped!\n",
3836 				      __FILE__, __LINE__));
3837 
3838 				ifp->if_ierrors++;
3839 
3840 				/* Try and reuse the exisitng mbuf. */
3841 				if (bce_newbuf_std(sc, m, &sw_prod,
3842 						   &sw_chain_prod,
3843 						   &sw_prod_bseq)) {
3844 					DBRUNIF(1, bce_breakpoint(sc));
3845 					/* XXX */
3846 					panic("%s: Double mbuf allocation "
3847 					      "failure!", ifp->if_xname);
3848 				}
3849 				m = NULL;
3850 				goto bce_rx_int_next_rx;
3851 			}
3852 
3853 			/*
3854 			 * Skip over the l2_fhdr when passing
3855 			 * the data up the stack.
3856 			 */
3857 			m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
3858 
3859 			m->m_pkthdr.len = m->m_len = len;
3860 			m->m_pkthdr.rcvif = ifp;
3861 
3862 			DBRUN(BCE_VERBOSE_RECV,
3863 			      struct ether_header *eh;
3864 			      eh = mtod(m, struct ether_header *);
3865 			      if_printf(ifp, "%s(): to: %6D, from: %6D, "
3866 			      		"type: 0x%04X\n", __func__,
3867 					eh->ether_dhost, ":",
3868 					eh->ether_shost, ":",
3869 					htons(eh->ether_type)));
3870 
3871 			/* Validate the checksum if offload enabled. */
3872 			if (ifp->if_capenable & IFCAP_RXCSUM) {
3873 				/* Check for an IP datagram. */
3874 				if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
3875 					m->m_pkthdr.csum_flags |=
3876 						CSUM_IP_CHECKED;
3877 
3878 					/* Check if the IP checksum is valid. */
3879 					if ((l2fhdr->l2_fhdr_ip_xsum ^
3880 					     0xffff) == 0) {
3881 						m->m_pkthdr.csum_flags |=
3882 							CSUM_IP_VALID;
3883 					} else {
3884 						DBPRINT(sc, BCE_WARN_RECV,
3885 							"%s(): Invalid IP checksum = 0x%04X!\n",
3886 							__func__, l2fhdr->l2_fhdr_ip_xsum);
3887 					}
3888 				}
3889 
3890 				/* Check for a valid TCP/UDP frame. */
3891 				if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3892 					      L2_FHDR_STATUS_UDP_DATAGRAM)) {
3893 
3894 					/* Check for a good TCP/UDP checksum. */
3895 					if ((status &
3896 					     (L2_FHDR_ERRORS_TCP_XSUM |
3897 					      L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
3898 						m->m_pkthdr.csum_data =
3899 						l2fhdr->l2_fhdr_tcp_udp_xsum;
3900 						m->m_pkthdr.csum_flags |=
3901 							CSUM_DATA_VALID |
3902 							CSUM_PSEUDO_HDR;
3903 					} else {
3904 						DBPRINT(sc, BCE_WARN_RECV,
3905 							"%s(): Invalid TCP/UDP checksum = 0x%04X!\n",
3906 							__func__, l2fhdr->l2_fhdr_tcp_udp_xsum);
3907 					}
3908 				}
3909 			}
3910 
3911 			ifp->if_ipackets++;
3912 bce_rx_int_next_rx:
3913 			sw_prod = NEXT_RX_BD(sw_prod);
3914 		}
3915 
3916 		sw_cons = NEXT_RX_BD(sw_cons);
3917 
3918 		/* If we have a packet, pass it up the stack */
3919 		if (m) {
3920 			DBPRINT(sc, BCE_VERBOSE_RECV,
3921 				"%s(): Passing received frame up.\n", __func__);
3922 
3923 			if (status & L2_FHDR_STATUS_L2_VLAN_TAG)
3924 				VLAN_INPUT_TAG(m, l2fhdr->l2_fhdr_vlan_tag);
3925 			else
3926 				ifp->if_input(ifp, m);
3927 
3928 			DBRUNIF(1, sc->rx_mbuf_alloc--);
3929 		}
3930 
3931 		/*
3932 		 * If polling(4) is not enabled, refresh hw_cons to see
3933 		 * whether there's new work.
3934 		 *
3935 		 * If polling(4) is enabled, i.e count >= 0, refreshing
3936 		 * should not be performed, so that we would not spend
3937 		 * too much time in RX processing.
3938 		 */
3939 		if (count < 0 && sw_cons == hw_cons) {
3940 			hw_cons = sc->hw_rx_cons =
3941 				sblk->status_rx_quick_consumer_index0;
3942 			if ((hw_cons & USABLE_RX_BD_PER_PAGE) ==
3943 			    USABLE_RX_BD_PER_PAGE)
3944 				hw_cons++;
3945 		}
3946 
3947 		/*
3948 		 * Prevent speculative reads from getting ahead
3949 		 * of the status block.
3950 		 */
3951 		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
3952 				  BUS_SPACE_BARRIER_READ);
3953 	}
3954 
3955 	for (i = 0; i < RX_PAGES; i++) {
3956 		bus_dmamap_sync(sc->rx_bd_chain_tag,
3957 				sc->rx_bd_chain_map[i], BUS_DMASYNC_PREWRITE);
3958 	}
3959 
3960 	sc->rx_cons = sw_cons;
3961 	sc->rx_prod = sw_prod;
3962 	sc->rx_prod_bseq = sw_prod_bseq;
3963 
3964 	REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BDIDX, sc->rx_prod);
3965 	REG_WR(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
3966 
3967 	DBPRINT(sc, BCE_INFO_RECV, "%s(exit): rx_prod = 0x%04X, "
3968 		"rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
3969 		__func__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
3970 }
3971 
3972 
3973 /****************************************************************************/
3974 /* Handles transmit completion interrupt events.                            */
3975 /*                                                                          */
3976 /* Returns:                                                                 */
3977 /*   Nothing.                                                               */
3978 /****************************************************************************/
3979 static void
3980 bce_tx_intr(struct bce_softc *sc)
3981 {
3982 	struct status_block *sblk = sc->status_block;
3983 	struct ifnet *ifp = &sc->arpcom.ac_if;
3984 	uint16_t hw_tx_cons, sw_tx_cons, sw_tx_chain_cons;
3985 
3986 	ASSERT_SERIALIZED(ifp->if_serializer);
3987 
3988 	DBRUNIF(1, sc->tx_interrupts++);
3989 
3990 	/* Get the hardware's view of the TX consumer index. */
3991 	hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
3992 
3993 	/* Skip to the next entry if this is a chain page pointer. */
3994 	if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
3995 		hw_tx_cons++;
3996 
3997 	sw_tx_cons = sc->tx_cons;
3998 
3999 	/* Prevent speculative reads from getting ahead of the status block. */
4000 	bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4001 			  BUS_SPACE_BARRIER_READ);
4002 
4003 	/* Cycle through any completed TX chain page entries. */
4004 	while (sw_tx_cons != hw_tx_cons) {
4005 #ifdef BCE_DEBUG
4006 		struct tx_bd *txbd = NULL;
4007 #endif
4008 		sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons);
4009 
4010 		DBPRINT(sc, BCE_INFO_SEND,
4011 			"%s(): hw_tx_cons = 0x%04X, sw_tx_cons = 0x%04X, "
4012 			"sw_tx_chain_cons = 0x%04X\n",
4013 			__func__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons);
4014 
4015 		DBRUNIF((sw_tx_chain_cons > MAX_TX_BD),
4016 			if_printf(ifp, "%s(%d): "
4017 				  "TX chain consumer out of range! "
4018 				  " 0x%04X > 0x%04X\n",
4019 				  __FILE__, __LINE__, sw_tx_chain_cons,
4020 				  (int)MAX_TX_BD);
4021 			bce_breakpoint(sc));
4022 
4023 		DBRUNIF(1, txbd = &sc->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)]
4024 				[TX_IDX(sw_tx_chain_cons)]);
4025 
4026 		DBRUNIF((txbd == NULL),
4027 			if_printf(ifp, "%s(%d): "
4028 				  "Unexpected NULL tx_bd[0x%04X]!\n",
4029 				  __FILE__, __LINE__, sw_tx_chain_cons);
4030 			bce_breakpoint(sc));
4031 
4032 		DBRUN(BCE_INFO_SEND,
4033 		      if_printf(ifp, "%s(): ", __func__);
4034 		      bce_dump_txbd(sc, sw_tx_chain_cons, txbd));
4035 
4036 		/*
4037 		 * Free the associated mbuf. Remember
4038 		 * that only the last tx_bd of a packet
4039 		 * has an mbuf pointer and DMA map.
4040 		 */
4041 		if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) {
4042 			/* Validate that this is the last tx_bd. */
4043 			DBRUNIF((!(txbd->tx_bd_flags & TX_BD_FLAGS_END)),
4044 				if_printf(ifp, "%s(%d): "
4045 				"tx_bd END flag not set but "
4046 				"txmbuf == NULL!\n", __FILE__, __LINE__);
4047 				bce_breakpoint(sc));
4048 
4049 			DBRUN(BCE_INFO_SEND,
4050 			      if_printf(ifp, "%s(): Unloading map/freeing mbuf "
4051 			      		"from tx_bd[0x%04X]\n", __func__,
4052 					sw_tx_chain_cons));
4053 
4054 			/* Unmap the mbuf. */
4055 			bus_dmamap_unload(sc->tx_mbuf_tag,
4056 					  sc->tx_mbuf_map[sw_tx_chain_cons]);
4057 
4058 			/* Free the mbuf. */
4059 			m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]);
4060 			sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL;
4061 			DBRUNIF(1, sc->tx_mbuf_alloc--);
4062 
4063 			ifp->if_opackets++;
4064 		}
4065 
4066 		sc->used_tx_bd--;
4067 		sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
4068 
4069 		if (sw_tx_cons == hw_tx_cons) {
4070 			/* Refresh hw_cons to see if there's new work. */
4071 			hw_tx_cons = sc->hw_tx_cons =
4072 				sblk->status_tx_quick_consumer_index0;
4073 			if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) ==
4074 			    USABLE_TX_BD_PER_PAGE)
4075 				hw_tx_cons++;
4076 		}
4077 
4078 		/*
4079 		 * Prevent speculative reads from getting
4080 		 * ahead of the status block.
4081 		 */
4082 		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4083 				  BUS_SPACE_BARRIER_READ);
4084 	}
4085 
4086 	if (sc->used_tx_bd == 0) {
4087 		/* Clear the TX timeout timer. */
4088 		ifp->if_timer = 0;
4089 	}
4090 
4091 	/* Clear the tx hardware queue full flag. */
4092 	if (sc->max_tx_bd - sc->used_tx_bd >= BCE_TX_SPARE_SPACE) {
4093 		DBRUNIF((ifp->if_flags & IFF_OACTIVE),
4094 			DBPRINT(sc, BCE_WARN_SEND,
4095 				"%s(): Open TX chain! %d/%d (used/total)\n",
4096 				__func__, sc->used_tx_bd, sc->max_tx_bd));
4097 		ifp->if_flags &= ~IFF_OACTIVE;
4098 	}
4099 	sc->tx_cons = sw_tx_cons;
4100 }
4101 
4102 
4103 /****************************************************************************/
4104 /* Disables interrupt generation.                                           */
4105 /*                                                                          */
4106 /* Returns:                                                                 */
4107 /*   Nothing.                                                               */
4108 /****************************************************************************/
4109 static void
4110 bce_disable_intr(struct bce_softc *sc)
4111 {
4112 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
4113 	REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
4114 	lwkt_serialize_handler_disable(sc->arpcom.ac_if.if_serializer);
4115 }
4116 
4117 
4118 /****************************************************************************/
4119 /* Enables interrupt generation.                                            */
4120 /*                                                                          */
4121 /* Returns:                                                                 */
4122 /*   Nothing.                                                               */
4123 /****************************************************************************/
4124 static void
4125 bce_enable_intr(struct bce_softc *sc)
4126 {
4127 	uint32_t val;
4128 
4129 	lwkt_serialize_handler_enable(sc->arpcom.ac_if.if_serializer);
4130 
4131 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4132 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
4133 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
4134 
4135 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4136 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
4137 
4138 	val = REG_RD(sc, BCE_HC_COMMAND);
4139 	REG_WR(sc, BCE_HC_COMMAND, val | BCE_HC_COMMAND_COAL_NOW);
4140 }
4141 
4142 
4143 /****************************************************************************/
4144 /* Handles controller initialization.                                       */
4145 /*                                                                          */
4146 /* Returns:                                                                 */
4147 /*   Nothing.                                                               */
4148 /****************************************************************************/
4149 static void
4150 bce_init(void *xsc)
4151 {
4152 	struct bce_softc *sc = xsc;
4153 	struct ifnet *ifp = &sc->arpcom.ac_if;
4154 	uint32_t ether_mtu;
4155 	int error;
4156 
4157 	ASSERT_SERIALIZED(ifp->if_serializer);
4158 
4159 	/* Check if the driver is still running and bail out if it is. */
4160 	if (ifp->if_flags & IFF_RUNNING)
4161 		return;
4162 
4163 	bce_stop(sc);
4164 
4165 	error = bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
4166 	if (error) {
4167 		if_printf(ifp, "Controller reset failed!\n");
4168 		goto back;
4169 	}
4170 
4171 	error = bce_chipinit(sc);
4172 	if (error) {
4173 		if_printf(ifp, "Controller initialization failed!\n");
4174 		goto back;
4175 	}
4176 
4177 	error = bce_blockinit(sc);
4178 	if (error) {
4179 		if_printf(ifp, "Block initialization failed!\n");
4180 		goto back;
4181 	}
4182 
4183 	/* Load our MAC address. */
4184 	bcopy(IF_LLADDR(ifp), sc->eaddr, ETHER_ADDR_LEN);
4185 	bce_set_mac_addr(sc);
4186 
4187 	/* Calculate and program the Ethernet MTU size. */
4188 	ether_mtu = ETHER_HDR_LEN + EVL_ENCAPLEN + ifp->if_mtu + ETHER_CRC_LEN;
4189 
4190 	DBPRINT(sc, BCE_INFO, "%s(): setting mtu = %d\n", __func__, ether_mtu);
4191 
4192 	/*
4193 	 * Program the mtu, enabling jumbo frame
4194 	 * support if necessary.  Also set the mbuf
4195 	 * allocation count for RX frames.
4196 	 */
4197 	if (ether_mtu > ETHER_MAX_LEN + EVL_ENCAPLEN) {
4198 #ifdef notyet
4199 		REG_WR(sc, BCE_EMAC_RX_MTU_SIZE,
4200 		       min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) |
4201 		       BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA);
4202 		sc->mbuf_alloc_size = MJUM9BYTES;
4203 #else
4204 		panic("jumbo buffer is not supported yet\n");
4205 #endif
4206 	} else {
4207 		REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu);
4208 		sc->mbuf_alloc_size = MCLBYTES;
4209 	}
4210 
4211 	/* Calculate the RX Ethernet frame size for rx_bd's. */
4212 	sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8;
4213 
4214 	DBPRINT(sc, BCE_INFO,
4215 		"%s(): mclbytes = %d, mbuf_alloc_size = %d, "
4216 		"max_frame_size = %d\n",
4217 		__func__, (int)MCLBYTES, sc->mbuf_alloc_size,
4218 		sc->max_frame_size);
4219 
4220 	/* Program appropriate promiscuous/multicast filtering. */
4221 	bce_set_rx_mode(sc);
4222 
4223 	/* Init RX buffer descriptor chain. */
4224 	bce_init_rx_chain(sc);	/* XXX return value */
4225 
4226 	/* Init TX buffer descriptor chain. */
4227 	bce_init_tx_chain(sc);	/* XXX return value */
4228 
4229 #ifdef DEVICE_POLLING
4230 	/* Disable interrupts if we are polling. */
4231 	if (ifp->if_flags & IFF_POLLING) {
4232 		bce_disable_intr(sc);
4233 
4234 		REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4235 		       (1 << 16) | sc->bce_rx_quick_cons_trip);
4236 		REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4237 		       (1 << 16) | sc->bce_tx_quick_cons_trip);
4238 	} else
4239 #endif
4240 	/* Enable host interrupts. */
4241 	bce_enable_intr(sc);
4242 
4243 	bce_ifmedia_upd(ifp);
4244 
4245 	ifp->if_flags |= IFF_RUNNING;
4246 	ifp->if_flags &= ~IFF_OACTIVE;
4247 
4248 	callout_reset(&sc->bce_stat_ch, hz, bce_tick, sc);
4249 back:
4250 	if (error)
4251 		bce_stop(sc);
4252 }
4253 
4254 
4255 /****************************************************************************/
4256 /* Initialize the controller just enough so that any management firmware    */
4257 /* running on the device will continue to operate corectly.                 */
4258 /*                                                                          */
4259 /* Returns:                                                                 */
4260 /*   Nothing.                                                               */
4261 /****************************************************************************/
4262 static void
4263 bce_mgmt_init(struct bce_softc *sc)
4264 {
4265 	struct ifnet *ifp = &sc->arpcom.ac_if;
4266 	uint32_t val;
4267 
4268 	/* Check if the driver is still running and bail out if it is. */
4269 	if (ifp->if_flags & IFF_RUNNING)
4270 		return;
4271 
4272 	/* Initialize the on-boards CPUs */
4273 	bce_init_cpus(sc);
4274 
4275 	/* Set the page size and clear the RV2P processor stall bits. */
4276 	val = (BCM_PAGE_BITS - 8) << 24;
4277 	REG_WR(sc, BCE_RV2P_CONFIG, val);
4278 
4279 	/* Enable all critical blocks in the MAC. */
4280 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
4281 	       BCE_MISC_ENABLE_SET_BITS_RX_V2P_ENABLE |
4282 	       BCE_MISC_ENABLE_SET_BITS_RX_DMA_ENABLE |
4283 	       BCE_MISC_ENABLE_SET_BITS_COMPLETION_ENABLE);
4284 	REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
4285 	DELAY(20);
4286 
4287 	bce_ifmedia_upd(ifp);
4288 }
4289 
4290 
4291 /****************************************************************************/
4292 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
4293 /* memory visible to the controller.                                        */
4294 /*                                                                          */
4295 /* Returns:                                                                 */
4296 /*   0 for success, positive value for failure.                             */
4297 /****************************************************************************/
4298 static int
4299 bce_encap(struct bce_softc *sc, struct mbuf **m_head)
4300 {
4301 	struct bce_dmamap_arg ctx;
4302 	bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
4303 	bus_dmamap_t map, tmp_map;
4304 	struct mbuf *m0 = *m_head;
4305 	struct tx_bd *txbd = NULL;
4306 	uint16_t vlan_tag = 0, flags = 0;
4307 	uint16_t chain_prod, chain_prod_start, prod;
4308 	uint32_t prod_bseq;
4309 	int i, error, maxsegs;
4310 #ifdef BCE_DEBUG
4311 	uint16_t debug_prod;
4312 #endif
4313 
4314 	/* Transfer any checksum offload flags to the bd. */
4315 	if (m0->m_pkthdr.csum_flags) {
4316 		if (m0->m_pkthdr.csum_flags & CSUM_IP)
4317 			flags |= TX_BD_FLAGS_IP_CKSUM;
4318 		if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
4319 			flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4320 	}
4321 
4322 	/* Transfer any VLAN tags to the bd. */
4323 	if ((m0->m_flags & (M_PROTO1 | M_PKTHDR)) == (M_PROTO1 | M_PKTHDR) &&
4324 	    m0->m_pkthdr.rcvif != NULL &&
4325 	    m0->m_pkthdr.rcvif->if_type == IFT_L2VLAN) {
4326 	    	struct ifvlan *ifv = m0->m_pkthdr.rcvif->if_softc;
4327 
4328 		flags |= TX_BD_FLAGS_VLAN_TAG;
4329 		vlan_tag = ifv->ifv_tag;
4330 	}
4331 
4332 	prod = sc->tx_prod;
4333 	chain_prod_start = chain_prod = TX_CHAIN_IDX(prod);
4334 
4335 	/* Map the mbuf into DMAable memory. */
4336 	map = sc->tx_mbuf_map[chain_prod_start];
4337 
4338 	maxsegs = sc->max_tx_bd - sc->used_tx_bd;
4339 	KASSERT(maxsegs >= BCE_TX_SPARE_SPACE,
4340 		("not enough segements %d\n", maxsegs));
4341 	if (maxsegs > BCE_MAX_SEGMENTS)
4342 		maxsegs = BCE_MAX_SEGMENTS;
4343 
4344 	/* Map the mbuf into our DMA address space. */
4345 	ctx.bce_maxsegs = maxsegs;
4346 	ctx.bce_segs = segs;
4347 	error = bus_dmamap_load_mbuf(sc->tx_mbuf_tag, map, m0,
4348 				     bce_dma_map_mbuf, &ctx, BUS_DMA_NOWAIT);
4349 	if (error == EFBIG || ctx.bce_maxsegs == 0) {
4350 		DBPRINT(sc, BCE_WARN, "%s(): fragmented mbuf\n", __func__);
4351 		DBRUNIF(1, bce_dump_mbuf(sc, m0););
4352 
4353 		m0 = m_defrag(*m_head, MB_DONTWAIT);
4354 		if (m0 == NULL) {
4355 			error = ENOBUFS;
4356 			goto back;
4357 		}
4358 		*m_head = m0;
4359 
4360 		ctx.bce_maxsegs = maxsegs;
4361 		ctx.bce_segs = segs;
4362 		error = bus_dmamap_load_mbuf(sc->tx_mbuf_tag, map, m0,
4363 					     bce_dma_map_mbuf, &ctx,
4364 					     BUS_DMA_NOWAIT);
4365 		if (error || ctx.bce_maxsegs == 0) {
4366 			if_printf(&sc->arpcom.ac_if,
4367 				  "Error mapping mbuf into TX chain\n");
4368 			if (error == 0)
4369 				error = EFBIG;
4370 			goto back;
4371 		}
4372 	} else if (error) {
4373 		if_printf(&sc->arpcom.ac_if,
4374 			  "Error mapping mbuf into TX chain\n");
4375 		goto back;
4376 	}
4377 
4378 	/* prod points to an empty tx_bd at this point. */
4379 	prod_bseq  = sc->tx_prod_bseq;
4380 
4381 #ifdef BCE_DEBUG
4382 	debug_prod = chain_prod;
4383 #endif
4384 
4385 	DBPRINT(sc, BCE_INFO_SEND,
4386 		"%s(): Start: prod = 0x%04X, chain_prod = %04X, "
4387 		"prod_bseq = 0x%08X\n",
4388 		__func__, prod, chain_prod, prod_bseq);
4389 
4390 	/*
4391 	 * Cycle through each mbuf segment that makes up
4392 	 * the outgoing frame, gathering the mapping info
4393 	 * for that segment and creating a tx_bd to for
4394 	 * the mbuf.
4395 	 */
4396 	for (i = 0; i < ctx.bce_maxsegs; i++) {
4397 		chain_prod = TX_CHAIN_IDX(prod);
4398 		txbd= &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
4399 
4400 		txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr));
4401 		txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr));
4402 		txbd->tx_bd_mss_nbytes = htole16(segs[i].ds_len);
4403 		txbd->tx_bd_vlan_tag = htole16(vlan_tag);
4404 		txbd->tx_bd_flags = htole16(flags);
4405 		prod_bseq += segs[i].ds_len;
4406 		if (i == 0)
4407 			txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START);
4408 		prod = NEXT_TX_BD(prod);
4409 	}
4410 
4411 	/* Set the END flag on the last TX buffer descriptor. */
4412 	txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END);
4413 
4414 	DBRUN(BCE_EXCESSIVE_SEND,
4415 	      bce_dump_tx_chain(sc, debug_prod, ctx.bce_maxsegs));
4416 
4417 	DBPRINT(sc, BCE_INFO_SEND,
4418 		"%s(): End: prod = 0x%04X, chain_prod = %04X, "
4419 		"prod_bseq = 0x%08X\n",
4420 		__func__, prod, chain_prod, prod_bseq);
4421 
4422 	bus_dmamap_sync(sc->tx_mbuf_tag, map, BUS_DMASYNC_PREWRITE);
4423 
4424 	/*
4425 	 * Ensure that the mbuf pointer for this transmission
4426 	 * is placed at the array index of the last
4427 	 * descriptor in this chain.  This is done
4428 	 * because a single map is used for all
4429 	 * segments of the mbuf and we don't want to
4430 	 * unload the map before all of the segments
4431 	 * have been freed.
4432 	 */
4433 	sc->tx_mbuf_ptr[chain_prod] = m0;
4434 
4435 	tmp_map = sc->tx_mbuf_map[chain_prod];
4436 	sc->tx_mbuf_map[chain_prod] = map;
4437 	sc->tx_mbuf_map[chain_prod_start] = tmp_map;
4438 
4439 	sc->used_tx_bd += ctx.bce_maxsegs;
4440 
4441 	/* Update some debug statistic counters */
4442 	DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark),
4443 		sc->tx_hi_watermark = sc->used_tx_bd);
4444 	DBRUNIF((sc->used_tx_bd == sc->max_tx_bd), sc->tx_full_count++);
4445 	DBRUNIF(1, sc->tx_mbuf_alloc++);
4446 
4447 	DBRUN(BCE_VERBOSE_SEND,
4448 	      bce_dump_tx_mbuf_chain(sc, chain_prod, ctx.bce_maxsegs));
4449 
4450 	/* prod points to the next free tx_bd at this point. */
4451 	sc->tx_prod = prod;
4452 	sc->tx_prod_bseq = prod_bseq;
4453 back:
4454 	if (error) {
4455 		m_freem(*m_head);
4456 		*m_head = NULL;
4457 	}
4458 	return error;
4459 }
4460 
4461 
4462 /****************************************************************************/
4463 /* Main transmit routine when called from another routine with a lock.      */
4464 /*                                                                          */
4465 /* Returns:                                                                 */
4466 /*   Nothing.                                                               */
4467 /****************************************************************************/
4468 static void
4469 bce_start(struct ifnet *ifp)
4470 {
4471 	struct bce_softc *sc = ifp->if_softc;
4472 	int count = 0;
4473 
4474 	ASSERT_SERIALIZED(ifp->if_serializer);
4475 
4476 	/* If there's no link or the transmit queue is empty then just exit. */
4477 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING ||
4478 	    !sc->bce_link)
4479 		return;
4480 
4481 	DBPRINT(sc, BCE_INFO_SEND,
4482 		"%s(): Start: tx_prod = 0x%04X, tx_chain_prod = %04X, "
4483 		"tx_prod_bseq = 0x%08X\n",
4484 		__func__,
4485 		sc->tx_prod, TX_CHAIN_IDX(sc->tx_prod), sc->tx_prod_bseq);
4486 
4487 	for (;;) {
4488 		struct mbuf *m_head;
4489 
4490 		/*
4491 		 * We keep BCE_TX_SPARE_SPACE entries, so bce_encap() is
4492 		 * unlikely to fail.
4493 		 */
4494 		if (sc->max_tx_bd - sc->used_tx_bd < BCE_TX_SPARE_SPACE) {
4495 			ifp->if_flags |= IFF_OACTIVE;
4496 			break;
4497 		}
4498 
4499 		/* Check for any frames to send. */
4500 		m_head = ifq_dequeue(&ifp->if_snd, NULL);
4501 		if (m_head == NULL)
4502 			break;
4503 
4504 		/*
4505 		 * Pack the data into the transmit ring. If we
4506 		 * don't have room, place the mbuf back at the
4507 		 * head of the queue and set the OACTIVE flag
4508 		 * to wait for the NIC to drain the chain.
4509 		 */
4510 		if (bce_encap(sc, &m_head)) {
4511 			ifp->if_flags |= IFF_OACTIVE;
4512 			DBPRINT(sc, BCE_INFO_SEND,
4513 				"TX chain is closed for business! "
4514 				"Total tx_bd used = %d\n",
4515 				sc->used_tx_bd);
4516 			break;
4517 		}
4518 
4519 		count++;
4520 
4521 		/* Send a copy of the frame to any BPF listeners. */
4522 		BPF_MTAP(ifp, m_head);
4523 	}
4524 
4525 	if (count == 0) {
4526 		/* no packets were dequeued */
4527 		DBPRINT(sc, BCE_VERBOSE_SEND,
4528 			"%s(): No packets were dequeued\n", __func__);
4529 		return;
4530 	}
4531 
4532 	DBPRINT(sc, BCE_INFO_SEND,
4533 		"%s(): End: tx_prod = 0x%04X, tx_chain_prod = 0x%04X, "
4534 		"tx_prod_bseq = 0x%08X\n",
4535 		__func__,
4536 		sc->tx_prod, TX_CHAIN_IDX(sc->tx_prod), sc->tx_prod_bseq);
4537 
4538 	/* Start the transmit. */
4539 	REG_WR16(sc, MB_TX_CID_ADDR + BCE_L2CTX_TX_HOST_BIDX, sc->tx_prod);
4540 	REG_WR(sc, MB_TX_CID_ADDR + BCE_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq);
4541 
4542 	/* Set the tx timeout. */
4543 	ifp->if_timer = BCE_TX_TIMEOUT;
4544 }
4545 
4546 
4547 /****************************************************************************/
4548 /* Handles any IOCTL calls from the operating system.                       */
4549 /*                                                                          */
4550 /* Returns:                                                                 */
4551 /*   0 for success, positive value for failure.                             */
4552 /****************************************************************************/
4553 static int
4554 bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
4555 {
4556 	struct bce_softc *sc = ifp->if_softc;
4557 	struct ifreq *ifr = (struct ifreq *)data;
4558 	struct mii_data *mii;
4559 	int mask, error = 0;
4560 
4561 	ASSERT_SERIALIZED(ifp->if_serializer);
4562 
4563 	switch(command) {
4564 	case SIOCSIFMTU:
4565 		/* Check that the MTU setting is supported. */
4566 		if (ifr->ifr_mtu < BCE_MIN_MTU ||
4567 #ifdef notyet
4568 		    ifr->ifr_mtu > BCE_MAX_JUMBO_MTU
4569 #else
4570 		    ifr->ifr_mtu > ETHERMTU
4571 #endif
4572 		   ) {
4573 			error = EINVAL;
4574 			break;
4575 		}
4576 
4577 		DBPRINT(sc, BCE_INFO, "Setting new MTU of %d\n", ifr->ifr_mtu);
4578 
4579 		ifp->if_mtu = ifr->ifr_mtu;
4580 		ifp->if_flags &= ~IFF_RUNNING;	/* Force reinitialize */
4581 		bce_init(sc);
4582 		break;
4583 
4584 	case SIOCSIFFLAGS:
4585 		if (ifp->if_flags & IFF_UP) {
4586 			if (ifp->if_flags & IFF_RUNNING) {
4587 				mask = ifp->if_flags ^ sc->bce_if_flags;
4588 
4589 				if (mask & (IFF_PROMISC | IFF_ALLMULTI))
4590 					bce_set_rx_mode(sc);
4591 			} else {
4592 				bce_init(sc);
4593 			}
4594 		} else if (ifp->if_flags & IFF_RUNNING) {
4595 			bce_stop(sc);
4596 		}
4597 		sc->bce_if_flags = ifp->if_flags;
4598 		break;
4599 
4600 	case SIOCADDMULTI:
4601 	case SIOCDELMULTI:
4602 		if (ifp->if_flags & IFF_RUNNING)
4603 			bce_set_rx_mode(sc);
4604 		break;
4605 
4606 	case SIOCSIFMEDIA:
4607 	case SIOCGIFMEDIA:
4608 		DBPRINT(sc, BCE_VERBOSE, "bce_phy_flags = 0x%08X\n",
4609 			sc->bce_phy_flags);
4610 		DBPRINT(sc, BCE_VERBOSE, "Copper media set/get\n");
4611 
4612 		mii = device_get_softc(sc->bce_miibus);
4613 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
4614 		break;
4615 
4616 	case SIOCSIFCAP:
4617 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4618 		DBPRINT(sc, BCE_INFO, "Received SIOCSIFCAP = 0x%08X\n",
4619 			(uint32_t) mask);
4620 
4621 		if (mask & IFCAP_HWCSUM) {
4622 			ifp->if_capenable ^= IFCAP_HWCSUM;
4623 			if (IFCAP_HWCSUM & ifp->if_capenable)
4624 				ifp->if_hwassist = BCE_IF_HWASSIST;
4625 			else
4626 				ifp->if_hwassist = 0;
4627 		}
4628 		break;
4629 
4630 	default:
4631 		error = ether_ioctl(ifp, command, data);
4632 		break;
4633 	}
4634 	return error;
4635 }
4636 
4637 
4638 /****************************************************************************/
4639 /* Transmit timeout handler.                                                */
4640 /*                                                                          */
4641 /* Returns:                                                                 */
4642 /*   Nothing.                                                               */
4643 /****************************************************************************/
4644 static void
4645 bce_watchdog(struct ifnet *ifp)
4646 {
4647 	struct bce_softc *sc = ifp->if_softc;
4648 
4649 	ASSERT_SERIALIZED(ifp->if_serializer);
4650 
4651 	DBRUN(BCE_VERBOSE_SEND,
4652 	      bce_dump_driver_state(sc);
4653 	      bce_dump_status_block(sc));
4654 
4655 	/*
4656 	 * If we are in this routine because of pause frames, then
4657 	 * don't reset the hardware.
4658 	 */
4659 	if (REG_RD(sc, BCE_EMAC_TX_STATUS) & BCE_EMAC_TX_STATUS_XOFFED)
4660 		return;
4661 
4662 	if_printf(ifp, "Watchdog timeout occurred, resetting!\n");
4663 
4664 	/* DBRUN(BCE_FATAL, bce_breakpoint(sc)); */
4665 
4666 	ifp->if_flags &= ~IFF_RUNNING;	/* Force reinitialize */
4667 	bce_init(sc);
4668 
4669 	ifp->if_oerrors++;
4670 
4671 	if (!ifq_is_empty(&ifp->if_snd))
4672 		ifp->if_start(ifp);
4673 }
4674 
4675 
4676 #ifdef DEVICE_POLLING
4677 
4678 static void
4679 bce_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
4680 {
4681 	struct bce_softc *sc = ifp->if_softc;
4682 	struct status_block *sblk = sc->status_block;
4683 
4684 	ASSERT_SERIALIZED(ifp->if_serializer);
4685 
4686 	switch (cmd) {
4687 	case POLL_REGISTER:
4688 		bce_disable_intr(sc);
4689 
4690 		REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4691 		       (1 << 16) | sc->bce_rx_quick_cons_trip);
4692 		REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4693 		       (1 << 16) | sc->bce_tx_quick_cons_trip);
4694 		return;
4695 	case POLL_DEREGISTER:
4696 		bce_enable_intr(sc);
4697 
4698 		REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4699 		       (sc->bce_tx_quick_cons_trip_int << 16) |
4700 		       sc->bce_tx_quick_cons_trip);
4701 		REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4702 		       (sc->bce_rx_quick_cons_trip_int << 16) |
4703 		       sc->bce_rx_quick_cons_trip);
4704 		return;
4705 	default:
4706 		break;
4707 	}
4708 
4709 	bus_dmamap_sync(sc->status_tag, sc->status_map, BUS_DMASYNC_POSTREAD);
4710 
4711 	if (cmd == POLL_AND_CHECK_STATUS) {
4712 		uint32_t status_attn_bits;
4713 
4714 		status_attn_bits = sblk->status_attn_bits;
4715 
4716 		DBRUNIF(DB_RANDOMTRUE(bce_debug_unexpected_attention),
4717 			if_printf(ifp,
4718 			"Simulating unexpected status attention bit set.");
4719 			status_attn_bits |= STATUS_ATTN_BITS_PARITY_ERROR);
4720 
4721 		/* Was it a link change interrupt? */
4722 		if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
4723 		    (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE))
4724 			bce_phy_intr(sc);
4725 
4726 		/*
4727 		 * If any other attention is asserted then
4728 		 * the chip is toast.
4729 		 */
4730 		if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
4731 		     (sblk->status_attn_bits_ack &
4732 		      ~STATUS_ATTN_BITS_LINK_STATE)) {
4733 			DBRUN(1, sc->unexpected_attentions++);
4734 
4735 			if_printf(ifp, "Fatal attention detected: 0x%08X\n",
4736 				  sblk->status_attn_bits);
4737 
4738 			DBRUN(BCE_FATAL,
4739 			if (bce_debug_unexpected_attention == 0)
4740 				bce_breakpoint(sc));
4741 
4742 			bce_init(sc);
4743 			return;
4744 		}
4745 	}
4746 
4747 	/* Check for any completed RX frames. */
4748 	if (sblk->status_rx_quick_consumer_index0 != sc->hw_rx_cons)
4749 		bce_rx_intr(sc, count);
4750 
4751 	/* Check for any completed TX frames. */
4752 	if (sblk->status_tx_quick_consumer_index0 != sc->hw_tx_cons)
4753 		bce_tx_intr(sc);
4754 
4755 	bus_dmamap_sync(sc->status_tag,	sc->status_map, BUS_DMASYNC_PREWRITE);
4756 
4757 	/* Check for new frames to transmit. */
4758 	if (!ifq_is_empty(&ifp->if_snd))
4759 		ifp->if_start(ifp);
4760 }
4761 
4762 #endif	/* DEVICE_POLLING */
4763 
4764 
4765 #if 0
4766 static inline int
4767 bce_has_work(struct bce_softc *sc)
4768 {
4769 	struct status_block *stat = sc->status_block;
4770 
4771 	if ((stat->status_rx_quick_consumer_index0 != sc->hw_rx_cons) ||
4772 	    (stat->status_tx_quick_consumer_index0 != sc->hw_tx_cons))
4773 		return 1;
4774 
4775 	if (((stat->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
4776 	    bp->link_up)
4777 		return 1;
4778 
4779 	return 0;
4780 }
4781 #endif
4782 
4783 
4784 /*
4785  * Interrupt handler.
4786  */
4787 /****************************************************************************/
4788 /* Main interrupt entry point.  Verifies that the controller generated the  */
4789 /* interrupt and then calls a separate routine for handle the various       */
4790 /* interrupt causes (PHY, TX, RX).                                          */
4791 /*                                                                          */
4792 /* Returns:                                                                 */
4793 /*   0 for success, positive value for failure.                             */
4794 /****************************************************************************/
4795 static void
4796 bce_intr(void *xsc)
4797 {
4798 	struct bce_softc *sc = xsc;
4799 	struct ifnet *ifp = &sc->arpcom.ac_if;
4800 	struct status_block *sblk;
4801 
4802 	ASSERT_SERIALIZED(ifp->if_serializer);
4803 
4804 	DBPRINT(sc, BCE_EXCESSIVE, "Entering %s()\n", __func__);
4805 	DBRUNIF(1, sc->interrupts_generated++);
4806 
4807 	bus_dmamap_sync(sc->status_tag, sc->status_map, BUS_DMASYNC_POSTREAD);
4808 	sblk = sc->status_block;
4809 
4810 	/*
4811 	 * If the hardware status block index matches the last value
4812 	 * read by the driver and we haven't asserted our interrupt
4813 	 * then there's nothing to do.
4814 	 */
4815 	if (sblk->status_idx == sc->last_status_idx &&
4816 	    (REG_RD(sc, BCE_PCICFG_MISC_STATUS) &
4817 	     BCE_PCICFG_MISC_STATUS_INTA_VALUE))
4818 		return;
4819 
4820 	/* Ack the interrupt and stop others from occuring. */
4821 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4822 	       BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
4823 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT);
4824 
4825 	/* Keep processing data as long as there is work to do. */
4826 	for (;;) {
4827 		uint32_t status_attn_bits;
4828 
4829 		status_attn_bits = sblk->status_attn_bits;
4830 
4831 		DBRUNIF(DB_RANDOMTRUE(bce_debug_unexpected_attention),
4832 			if_printf(ifp,
4833 			"Simulating unexpected status attention bit set.");
4834 			status_attn_bits |= STATUS_ATTN_BITS_PARITY_ERROR);
4835 
4836 		/* Was it a link change interrupt? */
4837 		if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
4838 		    (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE))
4839 			bce_phy_intr(sc);
4840 
4841 		/*
4842 		 * If any other attention is asserted then
4843 		 * the chip is toast.
4844 		 */
4845 		if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
4846 		     (sblk->status_attn_bits_ack &
4847 		      ~STATUS_ATTN_BITS_LINK_STATE)) {
4848 			DBRUN(1, sc->unexpected_attentions++);
4849 
4850 			if_printf(ifp, "Fatal attention detected: 0x%08X\n",
4851 				  sblk->status_attn_bits);
4852 
4853 			DBRUN(BCE_FATAL,
4854 			if (bce_debug_unexpected_attention == 0)
4855 				bce_breakpoint(sc));
4856 
4857 			bce_init(sc);
4858 			return;
4859 		}
4860 
4861 		/* Check for any completed RX frames. */
4862 		if (sblk->status_rx_quick_consumer_index0 != sc->hw_rx_cons)
4863 			bce_rx_intr(sc, -1);
4864 
4865 		/* Check for any completed TX frames. */
4866 		if (sblk->status_tx_quick_consumer_index0 != sc->hw_tx_cons)
4867 			bce_tx_intr(sc);
4868 
4869 		/*
4870 		 * Save the status block index value
4871 		 * for use during the next interrupt.
4872 		 */
4873 		sc->last_status_idx = sblk->status_idx;
4874 
4875 		/*
4876 		 * Prevent speculative reads from getting
4877 		 * ahead of the status block.
4878 		 */
4879 		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4880 				  BUS_SPACE_BARRIER_READ);
4881 
4882 		/*
4883 		 * If there's no work left then exit the
4884 		 * interrupt service routine.
4885 		 */
4886 		if (sblk->status_rx_quick_consumer_index0 == sc->hw_rx_cons &&
4887 		    sblk->status_tx_quick_consumer_index0 == sc->hw_tx_cons)
4888 			break;
4889 	}
4890 
4891 	bus_dmamap_sync(sc->status_tag,	sc->status_map, BUS_DMASYNC_PREWRITE);
4892 
4893 	/* Re-enable interrupts. */
4894 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4895 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx |
4896 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT);
4897 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4898 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
4899 
4900 	/* Handle any frames that arrived while handling the interrupt. */
4901 	if (!ifq_is_empty(&ifp->if_snd))
4902 		ifp->if_start(ifp);
4903 }
4904 
4905 
4906 /****************************************************************************/
4907 /* Programs the various packet receive modes (broadcast and multicast).     */
4908 /*                                                                          */
4909 /* Returns:                                                                 */
4910 /*   Nothing.                                                               */
4911 /****************************************************************************/
4912 static void
4913 bce_set_rx_mode(struct bce_softc *sc)
4914 {
4915 	struct ifnet *ifp = &sc->arpcom.ac_if;
4916 	struct ifmultiaddr *ifma;
4917 	uint32_t hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
4918 	uint32_t rx_mode, sort_mode;
4919 	int h, i;
4920 
4921 	ASSERT_SERIALIZED(ifp->if_serializer);
4922 
4923 	/* Initialize receive mode default settings. */
4924 	rx_mode = sc->rx_mode &
4925 		  ~(BCE_EMAC_RX_MODE_PROMISCUOUS |
4926 		    BCE_EMAC_RX_MODE_KEEP_VLAN_TAG);
4927 	sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN;
4928 
4929 	/*
4930 	 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
4931 	 * be enbled.
4932 	 */
4933 	if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) &&
4934 	    !(sc->bce_flags & BCE_MFW_ENABLE_FLAG))
4935 		rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG;
4936 
4937 	/*
4938 	 * Check for promiscuous, all multicast, or selected
4939 	 * multicast address filtering.
4940 	 */
4941 	if (ifp->if_flags & IFF_PROMISC) {
4942 		DBPRINT(sc, BCE_INFO, "Enabling promiscuous mode.\n");
4943 
4944 		/* Enable promiscuous mode. */
4945 		rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS;
4946 		sort_mode |= BCE_RPM_SORT_USER0_PROM_EN;
4947 	} else if (ifp->if_flags & IFF_ALLMULTI) {
4948 		DBPRINT(sc, BCE_INFO, "Enabling all multicast mode.\n");
4949 
4950 		/* Enable all multicast addresses. */
4951 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
4952 			REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4),
4953 			       0xffffffff);
4954 		}
4955 		sort_mode |= BCE_RPM_SORT_USER0_MC_EN;
4956 	} else {
4957 		/* Accept one or more multicast(s). */
4958 		DBPRINT(sc, BCE_INFO, "Enabling selective multicast mode.\n");
4959 
4960 		LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
4961 			if (ifma->ifma_addr->sa_family != AF_LINK)
4962 				continue;
4963 			h = ether_crc32_le(
4964 			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
4965 			    ETHER_ADDR_LEN) & 0xFF;
4966 			hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
4967 		}
4968 
4969 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
4970 			REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4),
4971 			       hashes[i]);
4972 		}
4973 		sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN;
4974 	}
4975 
4976 	/* Only make changes if the recive mode has actually changed. */
4977 	if (rx_mode != sc->rx_mode) {
4978 		DBPRINT(sc, BCE_VERBOSE, "Enabling new receive mode: 0x%08X\n",
4979 			rx_mode);
4980 
4981 		sc->rx_mode = rx_mode;
4982 		REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode);
4983 	}
4984 
4985 	/* Disable and clear the exisitng sort before enabling a new sort. */
4986 	REG_WR(sc, BCE_RPM_SORT_USER0, 0x0);
4987 	REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode);
4988 	REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA);
4989 }
4990 
4991 
4992 /****************************************************************************/
4993 /* Called periodically to updates statistics from the controllers           */
4994 /* statistics block.                                                        */
4995 /*                                                                          */
4996 /* Returns:                                                                 */
4997 /*   Nothing.                                                               */
4998 /****************************************************************************/
4999 static void
5000 bce_stats_update(struct bce_softc *sc)
5001 {
5002 	struct ifnet *ifp = &sc->arpcom.ac_if;
5003 	struct statistics_block *stats = sc->stats_block;
5004 
5005 	DBPRINT(sc, BCE_EXCESSIVE, "Entering %s()\n", __func__);
5006 
5007 	ASSERT_SERIALIZED(ifp->if_serializer);
5008 
5009 	/*
5010 	 * Update the interface statistics from the hardware statistics.
5011 	 */
5012 	ifp->if_collisions = (u_long)stats->stat_EtherStatsCollisions;
5013 
5014 	ifp->if_ierrors = (u_long)stats->stat_EtherStatsUndersizePkts +
5015 			  (u_long)stats->stat_EtherStatsOverrsizePkts +
5016 			  (u_long)stats->stat_IfInMBUFDiscards +
5017 			  (u_long)stats->stat_Dot3StatsAlignmentErrors +
5018 			  (u_long)stats->stat_Dot3StatsFCSErrors;
5019 
5020 	ifp->if_oerrors =
5021 	(u_long)stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
5022 	(u_long)stats->stat_Dot3StatsExcessiveCollisions +
5023 	(u_long)stats->stat_Dot3StatsLateCollisions;
5024 
5025 	/*
5026 	 * Certain controllers don't report carrier sense errors correctly.
5027 	 * See errata E11_5708CA0_1165.
5028 	 */
5029 	if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
5030 	    !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0)) {
5031 		ifp->if_oerrors +=
5032 			(u_long)stats->stat_Dot3StatsCarrierSenseErrors;
5033 	}
5034 
5035 	/*
5036 	 * Update the sysctl statistics from the hardware statistics.
5037 	 */
5038 	sc->stat_IfHCInOctets =
5039 		((uint64_t)stats->stat_IfHCInOctets_hi << 32) +
5040 		 (uint64_t)stats->stat_IfHCInOctets_lo;
5041 
5042 	sc->stat_IfHCInBadOctets =
5043 		((uint64_t)stats->stat_IfHCInBadOctets_hi << 32) +
5044 		 (uint64_t)stats->stat_IfHCInBadOctets_lo;
5045 
5046 	sc->stat_IfHCOutOctets =
5047 		((uint64_t)stats->stat_IfHCOutOctets_hi << 32) +
5048 		 (uint64_t)stats->stat_IfHCOutOctets_lo;
5049 
5050 	sc->stat_IfHCOutBadOctets =
5051 		((uint64_t)stats->stat_IfHCOutBadOctets_hi << 32) +
5052 		 (uint64_t)stats->stat_IfHCOutBadOctets_lo;
5053 
5054 	sc->stat_IfHCInUcastPkts =
5055 		((uint64_t)stats->stat_IfHCInUcastPkts_hi << 32) +
5056 		 (uint64_t)stats->stat_IfHCInUcastPkts_lo;
5057 
5058 	sc->stat_IfHCInMulticastPkts =
5059 		((uint64_t)stats->stat_IfHCInMulticastPkts_hi << 32) +
5060 		 (uint64_t)stats->stat_IfHCInMulticastPkts_lo;
5061 
5062 	sc->stat_IfHCInBroadcastPkts =
5063 		((uint64_t)stats->stat_IfHCInBroadcastPkts_hi << 32) +
5064 		 (uint64_t)stats->stat_IfHCInBroadcastPkts_lo;
5065 
5066 	sc->stat_IfHCOutUcastPkts =
5067 		((uint64_t)stats->stat_IfHCOutUcastPkts_hi << 32) +
5068 		 (uint64_t)stats->stat_IfHCOutUcastPkts_lo;
5069 
5070 	sc->stat_IfHCOutMulticastPkts =
5071 		((uint64_t)stats->stat_IfHCOutMulticastPkts_hi << 32) +
5072 		 (uint64_t)stats->stat_IfHCOutMulticastPkts_lo;
5073 
5074 	sc->stat_IfHCOutBroadcastPkts =
5075 		((uint64_t)stats->stat_IfHCOutBroadcastPkts_hi << 32) +
5076 		 (uint64_t)stats->stat_IfHCOutBroadcastPkts_lo;
5077 
5078 	sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
5079 		stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
5080 
5081 	sc->stat_Dot3StatsCarrierSenseErrors =
5082 		stats->stat_Dot3StatsCarrierSenseErrors;
5083 
5084 	sc->stat_Dot3StatsFCSErrors =
5085 		stats->stat_Dot3StatsFCSErrors;
5086 
5087 	sc->stat_Dot3StatsAlignmentErrors =
5088 		stats->stat_Dot3StatsAlignmentErrors;
5089 
5090 	sc->stat_Dot3StatsSingleCollisionFrames =
5091 		stats->stat_Dot3StatsSingleCollisionFrames;
5092 
5093 	sc->stat_Dot3StatsMultipleCollisionFrames =
5094 		stats->stat_Dot3StatsMultipleCollisionFrames;
5095 
5096 	sc->stat_Dot3StatsDeferredTransmissions =
5097 		stats->stat_Dot3StatsDeferredTransmissions;
5098 
5099 	sc->stat_Dot3StatsExcessiveCollisions =
5100 		stats->stat_Dot3StatsExcessiveCollisions;
5101 
5102 	sc->stat_Dot3StatsLateCollisions =
5103 		stats->stat_Dot3StatsLateCollisions;
5104 
5105 	sc->stat_EtherStatsCollisions =
5106 		stats->stat_EtherStatsCollisions;
5107 
5108 	sc->stat_EtherStatsFragments =
5109 		stats->stat_EtherStatsFragments;
5110 
5111 	sc->stat_EtherStatsJabbers =
5112 		stats->stat_EtherStatsJabbers;
5113 
5114 	sc->stat_EtherStatsUndersizePkts =
5115 		stats->stat_EtherStatsUndersizePkts;
5116 
5117 	sc->stat_EtherStatsOverrsizePkts =
5118 		stats->stat_EtherStatsOverrsizePkts;
5119 
5120 	sc->stat_EtherStatsPktsRx64Octets =
5121 		stats->stat_EtherStatsPktsRx64Octets;
5122 
5123 	sc->stat_EtherStatsPktsRx65Octetsto127Octets =
5124 		stats->stat_EtherStatsPktsRx65Octetsto127Octets;
5125 
5126 	sc->stat_EtherStatsPktsRx128Octetsto255Octets =
5127 		stats->stat_EtherStatsPktsRx128Octetsto255Octets;
5128 
5129 	sc->stat_EtherStatsPktsRx256Octetsto511Octets =
5130 		stats->stat_EtherStatsPktsRx256Octetsto511Octets;
5131 
5132 	sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
5133 		stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
5134 
5135 	sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
5136 		stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
5137 
5138 	sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
5139 		stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
5140 
5141 	sc->stat_EtherStatsPktsTx64Octets =
5142 		stats->stat_EtherStatsPktsTx64Octets;
5143 
5144 	sc->stat_EtherStatsPktsTx65Octetsto127Octets =
5145 		stats->stat_EtherStatsPktsTx65Octetsto127Octets;
5146 
5147 	sc->stat_EtherStatsPktsTx128Octetsto255Octets =
5148 		stats->stat_EtherStatsPktsTx128Octetsto255Octets;
5149 
5150 	sc->stat_EtherStatsPktsTx256Octetsto511Octets =
5151 		stats->stat_EtherStatsPktsTx256Octetsto511Octets;
5152 
5153 	sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
5154 		stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
5155 
5156 	sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
5157 		stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
5158 
5159 	sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
5160 		stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
5161 
5162 	sc->stat_XonPauseFramesReceived =
5163 		stats->stat_XonPauseFramesReceived;
5164 
5165 	sc->stat_XoffPauseFramesReceived =
5166 		stats->stat_XoffPauseFramesReceived;
5167 
5168 	sc->stat_OutXonSent =
5169 		stats->stat_OutXonSent;
5170 
5171 	sc->stat_OutXoffSent =
5172 		stats->stat_OutXoffSent;
5173 
5174 	sc->stat_FlowControlDone =
5175 		stats->stat_FlowControlDone;
5176 
5177 	sc->stat_MacControlFramesReceived =
5178 		stats->stat_MacControlFramesReceived;
5179 
5180 	sc->stat_XoffStateEntered =
5181 		stats->stat_XoffStateEntered;
5182 
5183 	sc->stat_IfInFramesL2FilterDiscards =
5184 		stats->stat_IfInFramesL2FilterDiscards;
5185 
5186 	sc->stat_IfInRuleCheckerDiscards =
5187 		stats->stat_IfInRuleCheckerDiscards;
5188 
5189 	sc->stat_IfInFTQDiscards =
5190 		stats->stat_IfInFTQDiscards;
5191 
5192 	sc->stat_IfInMBUFDiscards =
5193 		stats->stat_IfInMBUFDiscards;
5194 
5195 	sc->stat_IfInRuleCheckerP4Hit =
5196 		stats->stat_IfInRuleCheckerP4Hit;
5197 
5198 	sc->stat_CatchupInRuleCheckerDiscards =
5199 		stats->stat_CatchupInRuleCheckerDiscards;
5200 
5201 	sc->stat_CatchupInFTQDiscards =
5202 		stats->stat_CatchupInFTQDiscards;
5203 
5204 	sc->stat_CatchupInMBUFDiscards =
5205 		stats->stat_CatchupInMBUFDiscards;
5206 
5207 	sc->stat_CatchupInRuleCheckerP4Hit =
5208 		stats->stat_CatchupInRuleCheckerP4Hit;
5209 
5210 	sc->com_no_buffers = REG_RD_IND(sc, 0x120084);
5211 
5212 	DBPRINT(sc, BCE_EXCESSIVE, "Exiting %s()\n", __func__);
5213 }
5214 
5215 
5216 /****************************************************************************/
5217 /* Periodic function to perform maintenance tasks.                          */
5218 /*                                                                          */
5219 /* Returns:                                                                 */
5220 /*   Nothing.                                                               */
5221 /****************************************************************************/
5222 static void
5223 bce_tick_serialized(struct bce_softc *sc)
5224 {
5225 	struct ifnet *ifp = &sc->arpcom.ac_if;
5226 	struct mii_data *mii;
5227 	uint32_t msg;
5228 
5229 	ASSERT_SERIALIZED(ifp->if_serializer);
5230 
5231 	/* Tell the firmware that the driver is still running. */
5232 #ifdef BCE_DEBUG
5233 	msg = (uint32_t)BCE_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE;
5234 #else
5235 	msg = (uint32_t)++sc->bce_fw_drv_pulse_wr_seq;
5236 #endif
5237 	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_PULSE_MB, msg);
5238 
5239 	/* Update the statistics from the hardware statistics block. */
5240 	bce_stats_update(sc);
5241 
5242 	/* Schedule the next tick. */
5243 	callout_reset(&sc->bce_stat_ch, hz, bce_tick, sc);
5244 
5245 	/* If link is up already up then we're done. */
5246 	if (sc->bce_link)
5247 		return;
5248 
5249 	mii = device_get_softc(sc->bce_miibus);
5250 	mii_tick(mii);
5251 
5252 	/* Check if the link has come up. */
5253 	if (!sc->bce_link && (mii->mii_media_status & IFM_ACTIVE) &&
5254 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5255 		sc->bce_link++;
5256 		/* Now that link is up, handle any outstanding TX traffic. */
5257 		if (!ifq_is_empty(&ifp->if_snd))
5258 			ifp->if_start(ifp);
5259 	}
5260 }
5261 
5262 
5263 static void
5264 bce_tick(void *xsc)
5265 {
5266 	struct bce_softc *sc = xsc;
5267 	struct ifnet *ifp = &sc->arpcom.ac_if;
5268 
5269 	lwkt_serialize_enter(ifp->if_serializer);
5270 	bce_tick_serialized(sc);
5271 	lwkt_serialize_exit(ifp->if_serializer);
5272 }
5273 
5274 
5275 #ifdef BCE_DEBUG
5276 /****************************************************************************/
5277 /* Allows the driver state to be dumped through the sysctl interface.       */
5278 /*                                                                          */
5279 /* Returns:                                                                 */
5280 /*   0 for success, positive value for failure.                             */
5281 /****************************************************************************/
5282 static int
5283 bce_sysctl_driver_state(SYSCTL_HANDLER_ARGS)
5284 {
5285         int error;
5286         int result;
5287         struct bce_softc *sc;
5288 
5289         result = -1;
5290         error = sysctl_handle_int(oidp, &result, 0, req);
5291 
5292         if (error || !req->newptr)
5293                 return (error);
5294 
5295         if (result == 1) {
5296                 sc = (struct bce_softc *)arg1;
5297                 bce_dump_driver_state(sc);
5298         }
5299 
5300         return error;
5301 }
5302 
5303 
5304 /****************************************************************************/
5305 /* Allows the hardware state to be dumped through the sysctl interface.     */
5306 /*                                                                          */
5307 /* Returns:                                                                 */
5308 /*   0 for success, positive value for failure.                             */
5309 /****************************************************************************/
5310 static int
5311 bce_sysctl_hw_state(SYSCTL_HANDLER_ARGS)
5312 {
5313         int error;
5314         int result;
5315         struct bce_softc *sc;
5316 
5317         result = -1;
5318         error = sysctl_handle_int(oidp, &result, 0, req);
5319 
5320         if (error || !req->newptr)
5321                 return (error);
5322 
5323         if (result == 1) {
5324                 sc = (struct bce_softc *)arg1;
5325                 bce_dump_hw_state(sc);
5326         }
5327 
5328         return error;
5329 }
5330 
5331 
5332 /****************************************************************************/
5333 /* Provides a sysctl interface to allows dumping the RX chain.              */
5334 /*                                                                          */
5335 /* Returns:                                                                 */
5336 /*   0 for success, positive value for failure.                             */
5337 /****************************************************************************/
5338 static int
5339 bce_sysctl_dump_rx_chain(SYSCTL_HANDLER_ARGS)
5340 {
5341         int error;
5342         int result;
5343         struct bce_softc *sc;
5344 
5345         result = -1;
5346         error = sysctl_handle_int(oidp, &result, 0, req);
5347 
5348         if (error || !req->newptr)
5349                 return (error);
5350 
5351         if (result == 1) {
5352                 sc = (struct bce_softc *)arg1;
5353                 bce_dump_rx_chain(sc, 0, USABLE_RX_BD);
5354         }
5355 
5356         return error;
5357 }
5358 
5359 
5360 /****************************************************************************/
5361 /* Provides a sysctl interface to allows dumping the TX chain.              */
5362 /*                                                                          */
5363 /* Returns:                                                                 */
5364 /*   0 for success, positive value for failure.                             */
5365 /****************************************************************************/
5366 static int
5367 bce_sysctl_dump_tx_chain(SYSCTL_HANDLER_ARGS)
5368 {
5369         int error;
5370         int result;
5371         struct bce_softc *sc;
5372 
5373         result = -1;
5374         error = sysctl_handle_int(oidp, &result, 0, req);
5375 
5376         if (error || !req->newptr)
5377                 return (error);
5378 
5379         if (result == 1) {
5380                 sc = (struct bce_softc *)arg1;
5381                 bce_dump_tx_chain(sc, 0, USABLE_TX_BD);
5382         }
5383 
5384         return error;
5385 }
5386 
5387 
5388 /****************************************************************************/
5389 /* Provides a sysctl interface to allow reading arbitrary registers in the  */
5390 /* device.  DO NOT ENABLE ON PRODUCTION SYSTEMS!                            */
5391 /*                                                                          */
5392 /* Returns:                                                                 */
5393 /*   0 for success, positive value for failure.                             */
5394 /****************************************************************************/
5395 static int
5396 bce_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
5397 {
5398 	struct bce_softc *sc;
5399 	int error;
5400 	uint32_t val, result;
5401 
5402 	result = -1;
5403 	error = sysctl_handle_int(oidp, &result, 0, req);
5404 	if (error || (req->newptr == NULL))
5405 		return (error);
5406 
5407 	/* Make sure the register is accessible. */
5408 	if (result < 0x8000) {
5409 		sc = (struct bce_softc *)arg1;
5410 		val = REG_RD(sc, result);
5411 		if_printf(&sc->arpcom.ac_if, "reg 0x%08X = 0x%08X\n",
5412 			  result, val);
5413 	} else if (result < 0x0280000) {
5414 		sc = (struct bce_softc *)arg1;
5415 		val = REG_RD_IND(sc, result);
5416 		if_printf(&sc->arpcom.ac_if, "reg 0x%08X = 0x%08X\n",
5417 			  result, val);
5418 	}
5419 	return (error);
5420 }
5421 
5422 
5423 /****************************************************************************/
5424 /* Provides a sysctl interface to allow reading arbitrary PHY registers in  */
5425 /* the device.  DO NOT ENABLE ON PRODUCTION SYSTEMS!                        */
5426 /*                                                                          */
5427 /* Returns:                                                                 */
5428 /*   0 for success, positive value for failure.                             */
5429 /****************************************************************************/
5430 static int
5431 bce_sysctl_phy_read(SYSCTL_HANDLER_ARGS)
5432 {
5433 	struct bce_softc *sc;
5434 	device_t dev;
5435 	int error, result;
5436 	uint16_t val;
5437 
5438 	result = -1;
5439 	error = sysctl_handle_int(oidp, &result, 0, req);
5440 	if (error || (req->newptr == NULL))
5441 		return (error);
5442 
5443 	/* Make sure the register is accessible. */
5444 	if (result < 0x20) {
5445 		sc = (struct bce_softc *)arg1;
5446 		dev = sc->bce_dev;
5447 		val = bce_miibus_read_reg(dev, sc->bce_phy_addr, result);
5448 		if_printf(&sc->arpcom.ac_if,
5449 			  "phy 0x%02X = 0x%04X\n", result, val);
5450 	}
5451 	return (error);
5452 }
5453 
5454 
5455 /****************************************************************************/
5456 /* Provides a sysctl interface to forcing the driver to dump state and      */
5457 /* enter the debugger.  DO NOT ENABLE ON PRODUCTION SYSTEMS!                */
5458 /*                                                                          */
5459 /* Returns:                                                                 */
5460 /*   0 for success, positive value for failure.                             */
5461 /****************************************************************************/
5462 static int
5463 bce_sysctl_breakpoint(SYSCTL_HANDLER_ARGS)
5464 {
5465         int error;
5466         int result;
5467         struct bce_softc *sc;
5468 
5469         result = -1;
5470         error = sysctl_handle_int(oidp, &result, 0, req);
5471 
5472         if (error || !req->newptr)
5473                 return (error);
5474 
5475         if (result == 1) {
5476                 sc = (struct bce_softc *)arg1;
5477                 bce_breakpoint(sc);
5478         }
5479 
5480         return error;
5481 }
5482 #endif
5483 
5484 
5485 /****************************************************************************/
5486 /* Adds any sysctl parameters for tuning or debugging purposes.             */
5487 /*                                                                          */
5488 /* Returns:                                                                 */
5489 /*   0 for success, positive value for failure.                             */
5490 /****************************************************************************/
5491 static void
5492 bce_add_sysctls(struct bce_softc *sc)
5493 {
5494 	struct sysctl_ctx_list *ctx;
5495 	struct sysctl_oid_list *children;
5496 
5497 	sysctl_ctx_init(&sc->bce_sysctl_ctx);
5498 	sc->bce_sysctl_tree = SYSCTL_ADD_NODE(&sc->bce_sysctl_ctx,
5499 					      SYSCTL_STATIC_CHILDREN(_hw),
5500 					      OID_AUTO,
5501 					      device_get_nameunit(sc->bce_dev),
5502 					      CTLFLAG_RD, 0, "");
5503 	if (sc->bce_sysctl_tree == NULL) {
5504 		device_printf(sc->bce_dev, "can't add sysctl node\n");
5505 		return;
5506 	}
5507 
5508 	ctx = &sc->bce_sysctl_ctx;
5509 	children = SYSCTL_CHILDREN(sc->bce_sysctl_tree);
5510 
5511 #ifdef BCE_DEBUG
5512 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5513 		"rx_low_watermark",
5514 		CTLFLAG_RD, &sc->rx_low_watermark,
5515 		0, "Lowest level of free rx_bd's");
5516 
5517 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5518 		"rx_empty_count",
5519 		CTLFLAG_RD, &sc->rx_empty_count,
5520 		0, "Number of times the RX chain was empty");
5521 
5522 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5523 		"tx_hi_watermark",
5524 		CTLFLAG_RD, &sc->tx_hi_watermark,
5525 		0, "Highest level of used tx_bd's");
5526 
5527 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5528 		"tx_full_count",
5529 		CTLFLAG_RD, &sc->tx_full_count,
5530 		0, "Number of times the TX chain was full");
5531 
5532 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5533 		"l2fhdr_status_errors",
5534 		CTLFLAG_RD, &sc->l2fhdr_status_errors,
5535 		0, "l2_fhdr status errors");
5536 
5537 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5538 		"unexpected_attentions",
5539 		CTLFLAG_RD, &sc->unexpected_attentions,
5540 		0, "unexpected attentions");
5541 
5542 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5543 		"lost_status_block_updates",
5544 		CTLFLAG_RD, &sc->lost_status_block_updates,
5545 		0, "lost status block updates");
5546 
5547 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5548 		"mbuf_alloc_failed",
5549 		CTLFLAG_RD, &sc->mbuf_alloc_failed,
5550 		0, "mbuf cluster allocation failures");
5551 #endif
5552 
5553 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5554 		"stat_IfHcInOctets",
5555 		CTLFLAG_RD, &sc->stat_IfHCInOctets,
5556 		"Bytes received");
5557 
5558 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5559 		"stat_IfHCInBadOctets",
5560 		CTLFLAG_RD, &sc->stat_IfHCInBadOctets,
5561 		"Bad bytes received");
5562 
5563 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5564 		"stat_IfHCOutOctets",
5565 		CTLFLAG_RD, &sc->stat_IfHCOutOctets,
5566 		"Bytes sent");
5567 
5568 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5569 		"stat_IfHCOutBadOctets",
5570 		CTLFLAG_RD, &sc->stat_IfHCOutBadOctets,
5571 		"Bad bytes sent");
5572 
5573 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5574 		"stat_IfHCInUcastPkts",
5575 		CTLFLAG_RD, &sc->stat_IfHCInUcastPkts,
5576 		"Unicast packets received");
5577 
5578 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5579 		"stat_IfHCInMulticastPkts",
5580 		CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts,
5581 		"Multicast packets received");
5582 
5583 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5584 		"stat_IfHCInBroadcastPkts",
5585 		CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts,
5586 		"Broadcast packets received");
5587 
5588 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5589 		"stat_IfHCOutUcastPkts",
5590 		CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts,
5591 		"Unicast packets sent");
5592 
5593 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5594 		"stat_IfHCOutMulticastPkts",
5595 		CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts,
5596 		"Multicast packets sent");
5597 
5598 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5599 		"stat_IfHCOutBroadcastPkts",
5600 		CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts,
5601 		"Broadcast packets sent");
5602 
5603 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5604 		"stat_emac_tx_stat_dot3statsinternalmactransmiterrors",
5605 		CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors,
5606 		0, "Internal MAC transmit errors");
5607 
5608 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5609 		"stat_Dot3StatsCarrierSenseErrors",
5610 		CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors,
5611 		0, "Carrier sense errors");
5612 
5613 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5614 		"stat_Dot3StatsFCSErrors",
5615 		CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors,
5616 		0, "Frame check sequence errors");
5617 
5618 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5619 		"stat_Dot3StatsAlignmentErrors",
5620 		CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors,
5621 		0, "Alignment errors");
5622 
5623 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5624 		"stat_Dot3StatsSingleCollisionFrames",
5625 		CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames,
5626 		0, "Single Collision Frames");
5627 
5628 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5629 		"stat_Dot3StatsMultipleCollisionFrames",
5630 		CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames,
5631 		0, "Multiple Collision Frames");
5632 
5633 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5634 		"stat_Dot3StatsDeferredTransmissions",
5635 		CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions,
5636 		0, "Deferred Transmissions");
5637 
5638 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5639 		"stat_Dot3StatsExcessiveCollisions",
5640 		CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions,
5641 		0, "Excessive Collisions");
5642 
5643 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5644 		"stat_Dot3StatsLateCollisions",
5645 		CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions,
5646 		0, "Late Collisions");
5647 
5648 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5649 		"stat_EtherStatsCollisions",
5650 		CTLFLAG_RD, &sc->stat_EtherStatsCollisions,
5651 		0, "Collisions");
5652 
5653 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5654 		"stat_EtherStatsFragments",
5655 		CTLFLAG_RD, &sc->stat_EtherStatsFragments,
5656 		0, "Fragments");
5657 
5658 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5659 		"stat_EtherStatsJabbers",
5660 		CTLFLAG_RD, &sc->stat_EtherStatsJabbers,
5661 		0, "Jabbers");
5662 
5663 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5664 		"stat_EtherStatsUndersizePkts",
5665 		CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts,
5666 		0, "Undersize packets");
5667 
5668 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5669 		"stat_EtherStatsOverrsizePkts",
5670 		CTLFLAG_RD, &sc->stat_EtherStatsOverrsizePkts,
5671 		0, "stat_EtherStatsOverrsizePkts");
5672 
5673 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5674 		"stat_EtherStatsPktsRx64Octets",
5675 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets,
5676 		0, "Bytes received in 64 byte packets");
5677 
5678 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5679 		"stat_EtherStatsPktsRx65Octetsto127Octets",
5680 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets,
5681 		0, "Bytes received in 65 to 127 byte packets");
5682 
5683 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5684 		"stat_EtherStatsPktsRx128Octetsto255Octets",
5685 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets,
5686 		0, "Bytes received in 128 to 255 byte packets");
5687 
5688 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5689 		"stat_EtherStatsPktsRx256Octetsto511Octets",
5690 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets,
5691 		0, "Bytes received in 256 to 511 byte packets");
5692 
5693 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5694 		"stat_EtherStatsPktsRx512Octetsto1023Octets",
5695 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets,
5696 		0, "Bytes received in 512 to 1023 byte packets");
5697 
5698 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5699 		"stat_EtherStatsPktsRx1024Octetsto1522Octets",
5700 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets,
5701 		0, "Bytes received in 1024 t0 1522 byte packets");
5702 
5703 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5704 		"stat_EtherStatsPktsRx1523Octetsto9022Octets",
5705 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets,
5706 		0, "Bytes received in 1523 to 9022 byte packets");
5707 
5708 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5709 		"stat_EtherStatsPktsTx64Octets",
5710 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets,
5711 		0, "Bytes sent in 64 byte packets");
5712 
5713 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5714 		"stat_EtherStatsPktsTx65Octetsto127Octets",
5715 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets,
5716 		0, "Bytes sent in 65 to 127 byte packets");
5717 
5718 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5719 		"stat_EtherStatsPktsTx128Octetsto255Octets",
5720 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets,
5721 		0, "Bytes sent in 128 to 255 byte packets");
5722 
5723 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5724 		"stat_EtherStatsPktsTx256Octetsto511Octets",
5725 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets,
5726 		0, "Bytes sent in 256 to 511 byte packets");
5727 
5728 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5729 		"stat_EtherStatsPktsTx512Octetsto1023Octets",
5730 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets,
5731 		0, "Bytes sent in 512 to 1023 byte packets");
5732 
5733 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5734 		"stat_EtherStatsPktsTx1024Octetsto1522Octets",
5735 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets,
5736 		0, "Bytes sent in 1024 to 1522 byte packets");
5737 
5738 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5739 		"stat_EtherStatsPktsTx1523Octetsto9022Octets",
5740 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets,
5741 		0, "Bytes sent in 1523 to 9022 byte packets");
5742 
5743 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5744 		"stat_XonPauseFramesReceived",
5745 		CTLFLAG_RD, &sc->stat_XonPauseFramesReceived,
5746 		0, "XON pause frames receved");
5747 
5748 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5749 		"stat_XoffPauseFramesReceived",
5750 		CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived,
5751 		0, "XOFF pause frames received");
5752 
5753 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5754 		"stat_OutXonSent",
5755 		CTLFLAG_RD, &sc->stat_OutXonSent,
5756 		0, "XON pause frames sent");
5757 
5758 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5759 		"stat_OutXoffSent",
5760 		CTLFLAG_RD, &sc->stat_OutXoffSent,
5761 		0, "XOFF pause frames sent");
5762 
5763 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5764 		"stat_FlowControlDone",
5765 		CTLFLAG_RD, &sc->stat_FlowControlDone,
5766 		0, "Flow control done");
5767 
5768 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5769 		"stat_MacControlFramesReceived",
5770 		CTLFLAG_RD, &sc->stat_MacControlFramesReceived,
5771 		0, "MAC control frames received");
5772 
5773 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5774 		"stat_XoffStateEntered",
5775 		CTLFLAG_RD, &sc->stat_XoffStateEntered,
5776 		0, "XOFF state entered");
5777 
5778 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5779 		"stat_IfInFramesL2FilterDiscards",
5780 		CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards,
5781 		0, "Received L2 packets discarded");
5782 
5783 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5784 		"stat_IfInRuleCheckerDiscards",
5785 		CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards,
5786 		0, "Received packets discarded by rule");
5787 
5788 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5789 		"stat_IfInFTQDiscards",
5790 		CTLFLAG_RD, &sc->stat_IfInFTQDiscards,
5791 		0, "Received packet FTQ discards");
5792 
5793 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5794 		"stat_IfInMBUFDiscards",
5795 		CTLFLAG_RD, &sc->stat_IfInMBUFDiscards,
5796 		0, "Received packets discarded due to lack of controller buffer memory");
5797 
5798 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5799 		"stat_IfInRuleCheckerP4Hit",
5800 		CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit,
5801 		0, "Received packets rule checker hits");
5802 
5803 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5804 		"stat_CatchupInRuleCheckerDiscards",
5805 		CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards,
5806 		0, "Received packets discarded in Catchup path");
5807 
5808 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5809 		"stat_CatchupInFTQDiscards",
5810 		CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards,
5811 		0, "Received packets discarded in FTQ in Catchup path");
5812 
5813 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5814 		"stat_CatchupInMBUFDiscards",
5815 		CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards,
5816 		0, "Received packets discarded in controller buffer memory in Catchup path");
5817 
5818 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5819 		"stat_CatchupInRuleCheckerP4Hit",
5820 		CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit,
5821 		0, "Received packets rule checker hits in Catchup path");
5822 
5823 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5824 		"com_no_buffers",
5825 		CTLFLAG_RD, &sc->com_no_buffers,
5826 		0, "Valid packets received but no RX buffers available");
5827 
5828 #ifdef BCE_DEBUG
5829 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
5830 		"driver_state", CTLTYPE_INT | CTLFLAG_RW,
5831 		(void *)sc, 0,
5832 		bce_sysctl_driver_state, "I", "Drive state information");
5833 
5834 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
5835 		"hw_state", CTLTYPE_INT | CTLFLAG_RW,
5836 		(void *)sc, 0,
5837 		bce_sysctl_hw_state, "I", "Hardware state information");
5838 
5839 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
5840 		"dump_rx_chain", CTLTYPE_INT | CTLFLAG_RW,
5841 		(void *)sc, 0,
5842 		bce_sysctl_dump_rx_chain, "I", "Dump rx_bd chain");
5843 
5844 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
5845 		"dump_tx_chain", CTLTYPE_INT | CTLFLAG_RW,
5846 		(void *)sc, 0,
5847 		bce_sysctl_dump_tx_chain, "I", "Dump tx_bd chain");
5848 
5849 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
5850 		"breakpoint", CTLTYPE_INT | CTLFLAG_RW,
5851 		(void *)sc, 0,
5852 		bce_sysctl_breakpoint, "I", "Driver breakpoint");
5853 
5854 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
5855 		"reg_read", CTLTYPE_INT | CTLFLAG_RW,
5856 		(void *)sc, 0,
5857 		bce_sysctl_reg_read, "I", "Register read");
5858 
5859 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
5860 		"phy_read", CTLTYPE_INT | CTLFLAG_RW,
5861 		(void *)sc, 0,
5862 		bce_sysctl_phy_read, "I", "PHY register read");
5863 
5864 #endif
5865 
5866 }
5867 
5868 
5869 /****************************************************************************/
5870 /* BCE Debug Routines                                                       */
5871 /****************************************************************************/
5872 #ifdef BCE_DEBUG
5873 
5874 /****************************************************************************/
5875 /* Freezes the controller to allow for a cohesive state dump.               */
5876 /*                                                                          */
5877 /* Returns:                                                                 */
5878 /*   Nothing.                                                               */
5879 /****************************************************************************/
5880 static void
5881 bce_freeze_controller(struct bce_softc *sc)
5882 {
5883 	uint32_t val;
5884 
5885 	val = REG_RD(sc, BCE_MISC_COMMAND);
5886 	val |= BCE_MISC_COMMAND_DISABLE_ALL;
5887 	REG_WR(sc, BCE_MISC_COMMAND, val);
5888 }
5889 
5890 
5891 /****************************************************************************/
5892 /* Unfreezes the controller after a freeze operation.  This may not always  */
5893 /* work and the controller will require a reset!                            */
5894 /*                                                                          */
5895 /* Returns:                                                                 */
5896 /*   Nothing.                                                               */
5897 /****************************************************************************/
5898 static void
5899 bce_unfreeze_controller(struct bce_softc *sc)
5900 {
5901 	uint32_t val;
5902 
5903 	val = REG_RD(sc, BCE_MISC_COMMAND);
5904 	val |= BCE_MISC_COMMAND_ENABLE_ALL;
5905 	REG_WR(sc, BCE_MISC_COMMAND, val);
5906 }
5907 
5908 
5909 /****************************************************************************/
5910 /* Prints out information about an mbuf.                                    */
5911 /*                                                                          */
5912 /* Returns:                                                                 */
5913 /*   Nothing.                                                               */
5914 /****************************************************************************/
5915 static void
5916 bce_dump_mbuf(struct bce_softc *sc, struct mbuf *m)
5917 {
5918 	struct ifnet *ifp = &sc->arpcom.ac_if;
5919 	uint32_t val_hi, val_lo;
5920 	struct mbuf *mp = m;
5921 
5922 	if (m == NULL) {
5923 		/* Index out of range. */
5924 		if_printf(ifp, "mbuf: null pointer\n");
5925 		return;
5926 	}
5927 
5928 	while (mp) {
5929 		val_hi = BCE_ADDR_HI(mp);
5930 		val_lo = BCE_ADDR_LO(mp);
5931 		if_printf(ifp, "mbuf: vaddr = 0x%08X:%08X, m_len = %d, "
5932 			  "m_flags = ( ", val_hi, val_lo, mp->m_len);
5933 
5934 		if (mp->m_flags & M_EXT)
5935 			kprintf("M_EXT ");
5936 		if (mp->m_flags & M_PKTHDR)
5937 			kprintf("M_PKTHDR ");
5938 		if (mp->m_flags & M_EOR)
5939 			kprintf("M_EOR ");
5940 #ifdef M_RDONLY
5941 		if (mp->m_flags & M_RDONLY)
5942 			kprintf("M_RDONLY ");
5943 #endif
5944 
5945 		val_hi = BCE_ADDR_HI(mp->m_data);
5946 		val_lo = BCE_ADDR_LO(mp->m_data);
5947 		kprintf(") m_data = 0x%08X:%08X\n", val_hi, val_lo);
5948 
5949 		if (mp->m_flags & M_PKTHDR) {
5950 			if_printf(ifp, "- m_pkthdr: flags = ( ");
5951 			if (mp->m_flags & M_BCAST)
5952 				kprintf("M_BCAST ");
5953 			if (mp->m_flags & M_MCAST)
5954 				kprintf("M_MCAST ");
5955 			if (mp->m_flags & M_FRAG)
5956 				kprintf("M_FRAG ");
5957 			if (mp->m_flags & M_FIRSTFRAG)
5958 				kprintf("M_FIRSTFRAG ");
5959 			if (mp->m_flags & M_LASTFRAG)
5960 				kprintf("M_LASTFRAG ");
5961 #ifdef M_VLANTAG
5962 			if (mp->m_flags & M_VLANTAG)
5963 				kprintf("M_VLANTAG ");
5964 #endif
5965 #ifdef M_PROMISC
5966 			if (mp->m_flags & M_PROMISC)
5967 				kprintf("M_PROMISC ");
5968 #endif
5969 			kprintf(") csum_flags = ( ");
5970 			if (mp->m_pkthdr.csum_flags & CSUM_IP)
5971 				kprintf("CSUM_IP ");
5972 			if (mp->m_pkthdr.csum_flags & CSUM_TCP)
5973 				kprintf("CSUM_TCP ");
5974 			if (mp->m_pkthdr.csum_flags & CSUM_UDP)
5975 				kprintf("CSUM_UDP ");
5976 			if (mp->m_pkthdr.csum_flags & CSUM_IP_FRAGS)
5977 				kprintf("CSUM_IP_FRAGS ");
5978 			if (mp->m_pkthdr.csum_flags & CSUM_FRAGMENT)
5979 				kprintf("CSUM_FRAGMENT ");
5980 #ifdef CSUM_TSO
5981 			if (mp->m_pkthdr.csum_flags & CSUM_TSO)
5982 				kprintf("CSUM_TSO ");
5983 #endif
5984 			if (mp->m_pkthdr.csum_flags & CSUM_IP_CHECKED)
5985 				kprintf("CSUM_IP_CHECKED ");
5986 			if (mp->m_pkthdr.csum_flags & CSUM_IP_VALID)
5987 				kprintf("CSUM_IP_VALID ");
5988 			if (mp->m_pkthdr.csum_flags & CSUM_DATA_VALID)
5989 				kprintf("CSUM_DATA_VALID ");
5990 			kprintf(")\n");
5991 		}
5992 
5993 		if (mp->m_flags & M_EXT) {
5994 			val_hi = BCE_ADDR_HI(mp->m_ext.ext_buf);
5995 			val_lo = BCE_ADDR_LO(mp->m_ext.ext_buf);
5996 			if_printf(ifp, "- m_ext: vaddr = 0x%08X:%08X, "
5997 				  "ext_size = %d\n",
5998 				  val_hi, val_lo, mp->m_ext.ext_size);
5999 		}
6000 		mp = mp->m_next;
6001 	}
6002 }
6003 
6004 
6005 /****************************************************************************/
6006 /* Prints out the mbufs in the TX mbuf chain.                               */
6007 /*                                                                          */
6008 /* Returns:                                                                 */
6009 /*   Nothing.                                                               */
6010 /****************************************************************************/
6011 static void
6012 bce_dump_tx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count)
6013 {
6014 	struct ifnet *ifp = &sc->arpcom.ac_if;
6015 	int i;
6016 
6017 	if_printf(ifp,
6018 	"----------------------------"
6019 	"  tx mbuf data  "
6020 	"----------------------------\n");
6021 
6022 	for (i = 0; i < count; i++) {
6023 		if_printf(ifp, "txmbuf[%d]\n", chain_prod);
6024 		bce_dump_mbuf(sc, sc->tx_mbuf_ptr[chain_prod]);
6025 		chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod));
6026 	}
6027 
6028 	if_printf(ifp,
6029 	"----------------------------"
6030 	"----------------"
6031 	"----------------------------\n");
6032 }
6033 
6034 
6035 /****************************************************************************/
6036 /* Prints out the mbufs in the RX mbuf chain.                               */
6037 /*                                                                          */
6038 /* Returns:                                                                 */
6039 /*   Nothing.                                                               */
6040 /****************************************************************************/
6041 static void
6042 bce_dump_rx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count)
6043 {
6044 	struct ifnet *ifp = &sc->arpcom.ac_if;
6045 	int i;
6046 
6047 	if_printf(ifp,
6048 	"----------------------------"
6049 	"  rx mbuf data  "
6050 	"----------------------------\n");
6051 
6052 	for (i = 0; i < count; i++) {
6053 		if_printf(ifp, "rxmbuf[0x%04X]\n", chain_prod);
6054 		bce_dump_mbuf(sc, sc->rx_mbuf_ptr[chain_prod]);
6055 		chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod));
6056 	}
6057 
6058 	if_printf(ifp,
6059 	"----------------------------"
6060 	"----------------"
6061 	"----------------------------\n");
6062 }
6063 
6064 
6065 /****************************************************************************/
6066 /* Prints out a tx_bd structure.                                            */
6067 /*                                                                          */
6068 /* Returns:                                                                 */
6069 /*   Nothing.                                                               */
6070 /****************************************************************************/
6071 static void
6072 bce_dump_txbd(struct bce_softc *sc, int idx, struct tx_bd *txbd)
6073 {
6074 	struct ifnet *ifp = &sc->arpcom.ac_if;
6075 
6076 	if (idx > MAX_TX_BD) {
6077 		/* Index out of range. */
6078 		if_printf(ifp, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx);
6079 	} else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) {
6080 		/* TX Chain page pointer. */
6081 		if_printf(ifp, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, "
6082 			  "chain page pointer\n",
6083 			  idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo);
6084 	} else {
6085 		/* Normal tx_bd entry. */
6086 		if_printf(ifp, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, "
6087 			  "nbytes = 0x%08X, "
6088 			  "vlan tag= 0x%04X, flags = 0x%04X (",
6089 			  idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo,
6090 			  txbd->tx_bd_mss_nbytes,
6091 			  txbd->tx_bd_vlan_tag, txbd->tx_bd_flags);
6092 
6093 		if (txbd->tx_bd_flags & TX_BD_FLAGS_CONN_FAULT)
6094 			kprintf(" CONN_FAULT");
6095 
6096 		if (txbd->tx_bd_flags & TX_BD_FLAGS_TCP_UDP_CKSUM)
6097 			kprintf(" TCP_UDP_CKSUM");
6098 
6099 		if (txbd->tx_bd_flags & TX_BD_FLAGS_IP_CKSUM)
6100 			kprintf(" IP_CKSUM");
6101 
6102 		if (txbd->tx_bd_flags & TX_BD_FLAGS_VLAN_TAG)
6103 			kprintf("  VLAN");
6104 
6105 		if (txbd->tx_bd_flags & TX_BD_FLAGS_COAL_NOW)
6106 			kprintf(" COAL_NOW");
6107 
6108 		if (txbd->tx_bd_flags & TX_BD_FLAGS_DONT_GEN_CRC)
6109 			kprintf(" DONT_GEN_CRC");
6110 
6111 		if (txbd->tx_bd_flags & TX_BD_FLAGS_START)
6112 			kprintf(" START");
6113 
6114 		if (txbd->tx_bd_flags & TX_BD_FLAGS_END)
6115 			kprintf(" END");
6116 
6117 		if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_LSO)
6118 			kprintf(" LSO");
6119 
6120 		if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_OPTION_WORD)
6121 			kprintf(" OPTION_WORD");
6122 
6123 		if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_FLAGS)
6124 			kprintf(" FLAGS");
6125 
6126 		if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_SNAP)
6127 			kprintf(" SNAP");
6128 
6129 		kprintf(" )\n");
6130 	}
6131 }
6132 
6133 
6134 /****************************************************************************/
6135 /* Prints out a rx_bd structure.                                            */
6136 /*                                                                          */
6137 /* Returns:                                                                 */
6138 /*   Nothing.                                                               */
6139 /****************************************************************************/
6140 static void
6141 bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd)
6142 {
6143 	struct ifnet *ifp = &sc->arpcom.ac_if;
6144 
6145 	if (idx > MAX_RX_BD) {
6146 		/* Index out of range. */
6147 		if_printf(ifp, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
6148 	} else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) {
6149 		/* TX Chain page pointer. */
6150 		if_printf(ifp, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, "
6151 			  "chain page pointer\n",
6152 			  idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo);
6153 	} else {
6154 		/* Normal tx_bd entry. */
6155 		if_printf(ifp, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, "
6156 			  "nbytes = 0x%08X, flags = 0x%08X\n",
6157 			  idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo,
6158 			  rxbd->rx_bd_len, rxbd->rx_bd_flags);
6159 	}
6160 }
6161 
6162 
6163 /****************************************************************************/
6164 /* Prints out a l2_fhdr structure.                                          */
6165 /*                                                                          */
6166 /* Returns:                                                                 */
6167 /*   Nothing.                                                               */
6168 /****************************************************************************/
6169 static void
6170 bce_dump_l2fhdr(struct bce_softc *sc, int idx, struct l2_fhdr *l2fhdr)
6171 {
6172 	if_printf(&sc->arpcom.ac_if, "l2_fhdr[0x%04X]: status = 0x%08X, "
6173 		  "pkt_len = 0x%04X, vlan = 0x%04x, "
6174 		  "ip_xsum = 0x%04X, tcp_udp_xsum = 0x%04X\n",
6175 		  idx, l2fhdr->l2_fhdr_status,
6176 		  l2fhdr->l2_fhdr_pkt_len, l2fhdr->l2_fhdr_vlan_tag,
6177 		  l2fhdr->l2_fhdr_ip_xsum, l2fhdr->l2_fhdr_tcp_udp_xsum);
6178 }
6179 
6180 
6181 /****************************************************************************/
6182 /* Prints out the tx chain.                                                 */
6183 /*                                                                          */
6184 /* Returns:                                                                 */
6185 /*   Nothing.                                                               */
6186 /****************************************************************************/
6187 static void
6188 bce_dump_tx_chain(struct bce_softc *sc, int tx_prod, int count)
6189 {
6190 	struct ifnet *ifp = &sc->arpcom.ac_if;
6191 	int i;
6192 
6193 	/* First some info about the tx_bd chain structure. */
6194 	if_printf(ifp,
6195 	"----------------------------"
6196 	"  tx_bd  chain  "
6197 	"----------------------------\n");
6198 
6199 	if_printf(ifp, "page size      = 0x%08X, "
6200 		  "tx chain pages        = 0x%08X\n",
6201 		  (uint32_t)BCM_PAGE_SIZE, (uint32_t)TX_PAGES);
6202 
6203 	if_printf(ifp, "tx_bd per page = 0x%08X, "
6204 		  "usable tx_bd per page = 0x%08X\n",
6205 		  (uint32_t)TOTAL_TX_BD_PER_PAGE,
6206 		  (uint32_t)USABLE_TX_BD_PER_PAGE);
6207 
6208 	if_printf(ifp, "total tx_bd    = 0x%08X\n", (uint32_t)TOTAL_TX_BD);
6209 
6210 	if_printf(ifp,
6211 	"----------------------------"
6212 	"  tx_bd data    "
6213 	"----------------------------\n");
6214 
6215 	/* Now print out the tx_bd's themselves. */
6216 	for (i = 0; i < count; i++) {
6217 		struct tx_bd *txbd;
6218 
6219 	 	txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)];
6220 		bce_dump_txbd(sc, tx_prod, txbd);
6221 		tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod));
6222 	}
6223 
6224 	if_printf(ifp,
6225 	"----------------------------"
6226 	"----------------"
6227 	"----------------------------\n");
6228 }
6229 
6230 
6231 /****************************************************************************/
6232 /* Prints out the rx chain.                                                 */
6233 /*                                                                          */
6234 /* Returns:                                                                 */
6235 /*   Nothing.                                                               */
6236 /****************************************************************************/
6237 static void
6238 bce_dump_rx_chain(struct bce_softc *sc, int rx_prod, int count)
6239 {
6240 	struct ifnet *ifp = &sc->arpcom.ac_if;
6241 	int i;
6242 
6243 	/* First some info about the tx_bd chain structure. */
6244 	if_printf(ifp,
6245 	"----------------------------"
6246 	"  rx_bd  chain  "
6247 	"----------------------------\n");
6248 
6249 	if_printf(ifp, "page size      = 0x%08X, "
6250 		  "rx chain pages        = 0x%08X\n",
6251 		  (uint32_t)BCM_PAGE_SIZE, (uint32_t)RX_PAGES);
6252 
6253 	if_printf(ifp, "rx_bd per page = 0x%08X, "
6254 		  "usable rx_bd per page = 0x%08X\n",
6255 		  (uint32_t)TOTAL_RX_BD_PER_PAGE,
6256 		  (uint32_t)USABLE_RX_BD_PER_PAGE);
6257 
6258 	if_printf(ifp, "total rx_bd    = 0x%08X\n", (uint32_t)TOTAL_RX_BD);
6259 
6260 	if_printf(ifp,
6261 	"----------------------------"
6262 	"   rx_bd data   "
6263 	"----------------------------\n");
6264 
6265 	/* Now print out the rx_bd's themselves. */
6266 	for (i = 0; i < count; i++) {
6267 		struct rx_bd *rxbd;
6268 
6269 		rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)];
6270 		bce_dump_rxbd(sc, rx_prod, rxbd);
6271 		rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod));
6272 	}
6273 
6274 	if_printf(ifp,
6275 	"----------------------------"
6276 	"----------------"
6277 	"----------------------------\n");
6278 }
6279 
6280 
6281 /****************************************************************************/
6282 /* Prints out the status block from host memory.                            */
6283 /*                                                                          */
6284 /* Returns:                                                                 */
6285 /*   Nothing.                                                               */
6286 /****************************************************************************/
6287 static void
6288 bce_dump_status_block(struct bce_softc *sc)
6289 {
6290 	struct status_block *sblk = sc->status_block;
6291 	struct ifnet *ifp = &sc->arpcom.ac_if;
6292 
6293 	if_printf(ifp,
6294 	"----------------------------"
6295 	"  Status Block  "
6296 	"----------------------------\n");
6297 
6298 	if_printf(ifp, "    0x%08X - attn_bits\n", sblk->status_attn_bits);
6299 
6300 	if_printf(ifp, "    0x%08X - attn_bits_ack\n",
6301 		  sblk->status_attn_bits_ack);
6302 
6303 	if_printf(ifp, "0x%04X(0x%04X) - rx_cons0\n",
6304 	    sblk->status_rx_quick_consumer_index0,
6305 	    (uint16_t)RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index0));
6306 
6307 	if_printf(ifp, "0x%04X(0x%04X) - tx_cons0\n",
6308 	    sblk->status_tx_quick_consumer_index0,
6309 	    (uint16_t)TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index0));
6310 
6311 	if_printf(ifp, "        0x%04X - status_idx\n", sblk->status_idx);
6312 
6313 	/* Theses indices are not used for normal L2 drivers. */
6314 	if (sblk->status_rx_quick_consumer_index1) {
6315 		if_printf(ifp, "0x%04X(0x%04X) - rx_cons1\n",
6316 		sblk->status_rx_quick_consumer_index1,
6317 		(uint16_t)RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index1));
6318 	}
6319 
6320 	if (sblk->status_tx_quick_consumer_index1) {
6321 		if_printf(ifp, "0x%04X(0x%04X) - tx_cons1\n",
6322 		sblk->status_tx_quick_consumer_index1,
6323 		(uint16_t)TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index1));
6324 	}
6325 
6326 	if (sblk->status_rx_quick_consumer_index2) {
6327 		if_printf(ifp, "0x%04X(0x%04X)- rx_cons2\n",
6328 		sblk->status_rx_quick_consumer_index2,
6329 		(uint16_t)RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index2));
6330 	}
6331 
6332 	if (sblk->status_tx_quick_consumer_index2) {
6333 		if_printf(ifp, "0x%04X(0x%04X) - tx_cons2\n",
6334 		sblk->status_tx_quick_consumer_index2,
6335 		(uint16_t)TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index2));
6336 	}
6337 
6338 	if (sblk->status_rx_quick_consumer_index3) {
6339 		if_printf(ifp, "0x%04X(0x%04X) - rx_cons3\n",
6340 		sblk->status_rx_quick_consumer_index3,
6341 		(uint16_t)RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index3));
6342 	}
6343 
6344 	if (sblk->status_tx_quick_consumer_index3) {
6345 		if_printf(ifp, "0x%04X(0x%04X) - tx_cons3\n",
6346 		sblk->status_tx_quick_consumer_index3,
6347 		(uint16_t)TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index3));
6348 	}
6349 
6350 	if (sblk->status_rx_quick_consumer_index4 ||
6351 	    sblk->status_rx_quick_consumer_index5) {
6352 		if_printf(ifp, "rx_cons4  = 0x%08X, rx_cons5      = 0x%08X\n",
6353 			  sblk->status_rx_quick_consumer_index4,
6354 			  sblk->status_rx_quick_consumer_index5);
6355 	}
6356 
6357 	if (sblk->status_rx_quick_consumer_index6 ||
6358 	    sblk->status_rx_quick_consumer_index7) {
6359 		if_printf(ifp, "rx_cons6  = 0x%08X, rx_cons7      = 0x%08X\n",
6360 			  sblk->status_rx_quick_consumer_index6,
6361 			  sblk->status_rx_quick_consumer_index7);
6362 	}
6363 
6364 	if (sblk->status_rx_quick_consumer_index8 ||
6365 	    sblk->status_rx_quick_consumer_index9) {
6366 		if_printf(ifp, "rx_cons8  = 0x%08X, rx_cons9      = 0x%08X\n",
6367 			  sblk->status_rx_quick_consumer_index8,
6368 			  sblk->status_rx_quick_consumer_index9);
6369 	}
6370 
6371 	if (sblk->status_rx_quick_consumer_index10 ||
6372 	    sblk->status_rx_quick_consumer_index11) {
6373 		if_printf(ifp, "rx_cons10 = 0x%08X, rx_cons11     = 0x%08X\n",
6374 			  sblk->status_rx_quick_consumer_index10,
6375 			  sblk->status_rx_quick_consumer_index11);
6376 	}
6377 
6378 	if (sblk->status_rx_quick_consumer_index12 ||
6379 	    sblk->status_rx_quick_consumer_index13) {
6380 		if_printf(ifp, "rx_cons12 = 0x%08X, rx_cons13     = 0x%08X\n",
6381 			  sblk->status_rx_quick_consumer_index12,
6382 			  sblk->status_rx_quick_consumer_index13);
6383 	}
6384 
6385 	if (sblk->status_rx_quick_consumer_index14 ||
6386 	    sblk->status_rx_quick_consumer_index15) {
6387 		if_printf(ifp, "rx_cons14 = 0x%08X, rx_cons15     = 0x%08X\n",
6388 			  sblk->status_rx_quick_consumer_index14,
6389 			  sblk->status_rx_quick_consumer_index15);
6390 	}
6391 
6392 	if (sblk->status_completion_producer_index ||
6393 	    sblk->status_cmd_consumer_index) {
6394 		if_printf(ifp, "com_prod  = 0x%08X, cmd_cons      = 0x%08X\n",
6395 			  sblk->status_completion_producer_index,
6396 			  sblk->status_cmd_consumer_index);
6397 	}
6398 
6399 	if_printf(ifp,
6400 	"----------------------------"
6401 	"----------------"
6402 	"----------------------------\n");
6403 }
6404 
6405 
6406 /****************************************************************************/
6407 /* Prints out the statistics block.                                         */
6408 /*                                                                          */
6409 /* Returns:                                                                 */
6410 /*   Nothing.                                                               */
6411 /****************************************************************************/
6412 static void
6413 bce_dump_stats_block(struct bce_softc *sc)
6414 {
6415 	struct statistics_block *sblk = sc->stats_block;
6416 	struct ifnet *ifp = &sc->arpcom.ac_if;
6417 
6418 	if_printf(ifp,
6419 	"---------------"
6420 	" Stats Block  (All Stats Not Shown Are 0) "
6421 	"---------------\n");
6422 
6423 	if (sblk->stat_IfHCInOctets_hi || sblk->stat_IfHCInOctets_lo) {
6424 		if_printf(ifp, "0x%08X:%08X : IfHcInOctets\n",
6425 			  sblk->stat_IfHCInOctets_hi,
6426 			  sblk->stat_IfHCInOctets_lo);
6427 	}
6428 
6429 	if (sblk->stat_IfHCInBadOctets_hi || sblk->stat_IfHCInBadOctets_lo) {
6430 		if_printf(ifp, "0x%08X:%08X : IfHcInBadOctets\n",
6431 			  sblk->stat_IfHCInBadOctets_hi,
6432 			  sblk->stat_IfHCInBadOctets_lo);
6433 	}
6434 
6435 	if (sblk->stat_IfHCOutOctets_hi || sblk->stat_IfHCOutOctets_lo) {
6436 		if_printf(ifp, "0x%08X:%08X : IfHcOutOctets\n",
6437 			  sblk->stat_IfHCOutOctets_hi,
6438 			  sblk->stat_IfHCOutOctets_lo);
6439 	}
6440 
6441 	if (sblk->stat_IfHCOutBadOctets_hi || sblk->stat_IfHCOutBadOctets_lo) {
6442 		if_printf(ifp, "0x%08X:%08X : IfHcOutBadOctets\n",
6443 			  sblk->stat_IfHCOutBadOctets_hi,
6444 			  sblk->stat_IfHCOutBadOctets_lo);
6445 	}
6446 
6447 	if (sblk->stat_IfHCInUcastPkts_hi || sblk->stat_IfHCInUcastPkts_lo) {
6448 		if_printf(ifp, "0x%08X:%08X : IfHcInUcastPkts\n",
6449 			  sblk->stat_IfHCInUcastPkts_hi,
6450 			  sblk->stat_IfHCInUcastPkts_lo);
6451 	}
6452 
6453 	if (sblk->stat_IfHCInBroadcastPkts_hi ||
6454 	    sblk->stat_IfHCInBroadcastPkts_lo) {
6455 		if_printf(ifp, "0x%08X:%08X : IfHcInBroadcastPkts\n",
6456 			  sblk->stat_IfHCInBroadcastPkts_hi,
6457 			  sblk->stat_IfHCInBroadcastPkts_lo);
6458 	}
6459 
6460 	if (sblk->stat_IfHCInMulticastPkts_hi ||
6461 	    sblk->stat_IfHCInMulticastPkts_lo) {
6462 		if_printf(ifp, "0x%08X:%08X : IfHcInMulticastPkts\n",
6463 			  sblk->stat_IfHCInMulticastPkts_hi,
6464 			  sblk->stat_IfHCInMulticastPkts_lo);
6465 	}
6466 
6467 	if (sblk->stat_IfHCOutUcastPkts_hi || sblk->stat_IfHCOutUcastPkts_lo) {
6468 		if_printf(ifp, "0x%08X:%08X : IfHcOutUcastPkts\n",
6469 			  sblk->stat_IfHCOutUcastPkts_hi,
6470 			  sblk->stat_IfHCOutUcastPkts_lo);
6471 	}
6472 
6473 	if (sblk->stat_IfHCOutBroadcastPkts_hi ||
6474 	    sblk->stat_IfHCOutBroadcastPkts_lo) {
6475 		if_printf(ifp, "0x%08X:%08X : IfHcOutBroadcastPkts\n",
6476 			  sblk->stat_IfHCOutBroadcastPkts_hi,
6477 			  sblk->stat_IfHCOutBroadcastPkts_lo);
6478 	}
6479 
6480 	if (sblk->stat_IfHCOutMulticastPkts_hi ||
6481 	    sblk->stat_IfHCOutMulticastPkts_lo) {
6482 		if_printf(ifp, "0x%08X:%08X : IfHcOutMulticastPkts\n",
6483 			  sblk->stat_IfHCOutMulticastPkts_hi,
6484 			  sblk->stat_IfHCOutMulticastPkts_lo);
6485 	}
6486 
6487 	if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors) {
6488 		if_printf(ifp, "         0x%08X : "
6489 		"emac_tx_stat_dot3statsinternalmactransmiterrors\n",
6490 		sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors);
6491 	}
6492 
6493 	if (sblk->stat_Dot3StatsCarrierSenseErrors) {
6494 		if_printf(ifp, "         0x%08X : "
6495 			  "Dot3StatsCarrierSenseErrors\n",
6496 			  sblk->stat_Dot3StatsCarrierSenseErrors);
6497 	}
6498 
6499 	if (sblk->stat_Dot3StatsFCSErrors) {
6500 		if_printf(ifp, "         0x%08X : Dot3StatsFCSErrors\n",
6501 			  sblk->stat_Dot3StatsFCSErrors);
6502 	}
6503 
6504 	if (sblk->stat_Dot3StatsAlignmentErrors) {
6505 		if_printf(ifp, "         0x%08X : Dot3StatsAlignmentErrors\n",
6506 			  sblk->stat_Dot3StatsAlignmentErrors);
6507 	}
6508 
6509 	if (sblk->stat_Dot3StatsSingleCollisionFrames) {
6510 		if_printf(ifp, "         0x%08X : "
6511 			  "Dot3StatsSingleCollisionFrames\n",
6512 			  sblk->stat_Dot3StatsSingleCollisionFrames);
6513 	}
6514 
6515 	if (sblk->stat_Dot3StatsMultipleCollisionFrames) {
6516 		if_printf(ifp, "         0x%08X : "
6517 			  "Dot3StatsMultipleCollisionFrames\n",
6518 			  sblk->stat_Dot3StatsMultipleCollisionFrames);
6519 	}
6520 
6521 	if (sblk->stat_Dot3StatsDeferredTransmissions) {
6522 		if_printf(ifp, "         0x%08X : "
6523 			  "Dot3StatsDeferredTransmissions\n",
6524 			  sblk->stat_Dot3StatsDeferredTransmissions);
6525 	}
6526 
6527 	if (sblk->stat_Dot3StatsExcessiveCollisions) {
6528 		if_printf(ifp, "         0x%08X : "
6529 			  "Dot3StatsExcessiveCollisions\n",
6530 			  sblk->stat_Dot3StatsExcessiveCollisions);
6531 	}
6532 
6533 	if (sblk->stat_Dot3StatsLateCollisions) {
6534 		if_printf(ifp, "         0x%08X : Dot3StatsLateCollisions\n",
6535 			  sblk->stat_Dot3StatsLateCollisions);
6536 	}
6537 
6538 	if (sblk->stat_EtherStatsCollisions) {
6539 		if_printf(ifp, "         0x%08X : EtherStatsCollisions\n",
6540 			  sblk->stat_EtherStatsCollisions);
6541 	}
6542 
6543 	if (sblk->stat_EtherStatsFragments)  {
6544 		if_printf(ifp, "         0x%08X : EtherStatsFragments\n",
6545 			  sblk->stat_EtherStatsFragments);
6546 	}
6547 
6548 	if (sblk->stat_EtherStatsJabbers) {
6549 		if_printf(ifp, "         0x%08X : EtherStatsJabbers\n",
6550 			  sblk->stat_EtherStatsJabbers);
6551 	}
6552 
6553 	if (sblk->stat_EtherStatsUndersizePkts) {
6554 		if_printf(ifp, "         0x%08X : EtherStatsUndersizePkts\n",
6555 			  sblk->stat_EtherStatsUndersizePkts);
6556 	}
6557 
6558 	if (sblk->stat_EtherStatsOverrsizePkts) {
6559 		if_printf(ifp, "         0x%08X : EtherStatsOverrsizePkts\n",
6560 			  sblk->stat_EtherStatsOverrsizePkts);
6561 	}
6562 
6563 	if (sblk->stat_EtherStatsPktsRx64Octets) {
6564 		if_printf(ifp, "         0x%08X : EtherStatsPktsRx64Octets\n",
6565 			  sblk->stat_EtherStatsPktsRx64Octets);
6566 	}
6567 
6568 	if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets) {
6569 		if_printf(ifp, "         0x%08X : "
6570 			  "EtherStatsPktsRx65Octetsto127Octets\n",
6571 			  sblk->stat_EtherStatsPktsRx65Octetsto127Octets);
6572 	}
6573 
6574 	if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets) {
6575 		if_printf(ifp, "         0x%08X : "
6576 			  "EtherStatsPktsRx128Octetsto255Octets\n",
6577 			  sblk->stat_EtherStatsPktsRx128Octetsto255Octets);
6578 	}
6579 
6580 	if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets) {
6581 		if_printf(ifp, "         0x%08X : "
6582 			  "EtherStatsPktsRx256Octetsto511Octets\n",
6583 			  sblk->stat_EtherStatsPktsRx256Octetsto511Octets);
6584 	}
6585 
6586 	if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets) {
6587 		if_printf(ifp, "         0x%08X : "
6588 			  "EtherStatsPktsRx512Octetsto1023Octets\n",
6589 			  sblk->stat_EtherStatsPktsRx512Octetsto1023Octets);
6590 	}
6591 
6592 	if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets) {
6593 		if_printf(ifp, "         0x%08X : "
6594 			  "EtherStatsPktsRx1024Octetsto1522Octets\n",
6595 			  sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets);
6596 	}
6597 
6598 	if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets) {
6599 		if_printf(ifp, "         0x%08X : "
6600 			  "EtherStatsPktsRx1523Octetsto9022Octets\n",
6601 			  sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets);
6602 	}
6603 
6604 	if (sblk->stat_EtherStatsPktsTx64Octets) {
6605 		if_printf(ifp, "         0x%08X : EtherStatsPktsTx64Octets\n",
6606 			  sblk->stat_EtherStatsPktsTx64Octets);
6607 	}
6608 
6609 	if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets) {
6610 		if_printf(ifp, "         0x%08X : "
6611 			  "EtherStatsPktsTx65Octetsto127Octets\n",
6612 			  sblk->stat_EtherStatsPktsTx65Octetsto127Octets);
6613 	}
6614 
6615 	if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets) {
6616 		if_printf(ifp, "         0x%08X : "
6617 			  "EtherStatsPktsTx128Octetsto255Octets\n",
6618 			  sblk->stat_EtherStatsPktsTx128Octetsto255Octets);
6619 	}
6620 
6621 	if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets) {
6622 		if_printf(ifp, "         0x%08X : "
6623 			  "EtherStatsPktsTx256Octetsto511Octets\n",
6624 			  sblk->stat_EtherStatsPktsTx256Octetsto511Octets);
6625 	}
6626 
6627 	if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets) {
6628 		if_printf(ifp, "         0x%08X : "
6629 			  "EtherStatsPktsTx512Octetsto1023Octets\n",
6630 			  sblk->stat_EtherStatsPktsTx512Octetsto1023Octets);
6631 	}
6632 
6633 	if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets) {
6634 		if_printf(ifp, "         0x%08X : "
6635 			  "EtherStatsPktsTx1024Octetsto1522Octets\n",
6636 			  sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets);
6637 	}
6638 
6639 	if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets) {
6640 		if_printf(ifp, "         0x%08X : "
6641 			  "EtherStatsPktsTx1523Octetsto9022Octets\n",
6642 			  sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets);
6643 	}
6644 
6645 	if (sblk->stat_XonPauseFramesReceived) {
6646 		if_printf(ifp, "         0x%08X : XonPauseFramesReceived\n",
6647 			  sblk->stat_XonPauseFramesReceived);
6648 	}
6649 
6650 	if (sblk->stat_XoffPauseFramesReceived) {
6651 		if_printf(ifp, "          0x%08X : XoffPauseFramesReceived\n",
6652 			  sblk->stat_XoffPauseFramesReceived);
6653 	}
6654 
6655 	if (sblk->stat_OutXonSent) {
6656 		if_printf(ifp, "         0x%08X : OutXoffSent\n",
6657 			  sblk->stat_OutXonSent);
6658 	}
6659 
6660 	if (sblk->stat_OutXoffSent) {
6661 		if_printf(ifp, "         0x%08X : OutXoffSent\n",
6662 			  sblk->stat_OutXoffSent);
6663 	}
6664 
6665 	if (sblk->stat_FlowControlDone) {
6666 		if_printf(ifp, "         0x%08X : FlowControlDone\n",
6667 			  sblk->stat_FlowControlDone);
6668 	}
6669 
6670 	if (sblk->stat_MacControlFramesReceived) {
6671 		if_printf(ifp, "         0x%08X : MacControlFramesReceived\n",
6672 			  sblk->stat_MacControlFramesReceived);
6673 	}
6674 
6675 	if (sblk->stat_XoffStateEntered) {
6676 		if_printf(ifp, "         0x%08X : XoffStateEntered\n",
6677 			  sblk->stat_XoffStateEntered);
6678 	}
6679 
6680 	if (sblk->stat_IfInFramesL2FilterDiscards) {
6681 		if_printf(ifp, "         0x%08X : IfInFramesL2FilterDiscards\n",			  sblk->stat_IfInFramesL2FilterDiscards);
6682 	}
6683 
6684 	if (sblk->stat_IfInRuleCheckerDiscards) {
6685 		if_printf(ifp, "         0x%08X : IfInRuleCheckerDiscards\n",
6686 			  sblk->stat_IfInRuleCheckerDiscards);
6687 	}
6688 
6689 	if (sblk->stat_IfInFTQDiscards) {
6690 		if_printf(ifp, "         0x%08X : IfInFTQDiscards\n",
6691 			  sblk->stat_IfInFTQDiscards);
6692 	}
6693 
6694 	if (sblk->stat_IfInMBUFDiscards) {
6695 		if_printf(ifp, "         0x%08X : IfInMBUFDiscards\n",
6696 			  sblk->stat_IfInMBUFDiscards);
6697 	}
6698 
6699 	if (sblk->stat_IfInRuleCheckerP4Hit) {
6700 		if_printf(ifp, "         0x%08X : IfInRuleCheckerP4Hit\n",
6701 			  sblk->stat_IfInRuleCheckerP4Hit);
6702 	}
6703 
6704 	if (sblk->stat_CatchupInRuleCheckerDiscards) {
6705 		if_printf(ifp, "         0x%08X : "
6706 			  "CatchupInRuleCheckerDiscards\n",
6707 			  sblk->stat_CatchupInRuleCheckerDiscards);
6708 	}
6709 
6710 	if (sblk->stat_CatchupInFTQDiscards) {
6711 		if_printf(ifp, "         0x%08X : CatchupInFTQDiscards\n",
6712 			  sblk->stat_CatchupInFTQDiscards);
6713 	}
6714 
6715 	if (sblk->stat_CatchupInMBUFDiscards) {
6716 		if_printf(ifp, "         0x%08X : CatchupInMBUFDiscards\n",
6717 			  sblk->stat_CatchupInMBUFDiscards);
6718 	}
6719 
6720 	if (sblk->stat_CatchupInRuleCheckerP4Hit) {
6721 		if_printf(ifp, "         0x%08X : CatchupInRuleCheckerP4Hit\n",
6722 			  sblk->stat_CatchupInRuleCheckerP4Hit);
6723 	}
6724 
6725 	if_printf(ifp,
6726 	"----------------------------"
6727 	"----------------"
6728 	"----------------------------\n");
6729 }
6730 
6731 
6732 /****************************************************************************/
6733 /* Prints out a summary of the driver state.                                */
6734 /*                                                                          */
6735 /* Returns:                                                                 */
6736 /*   Nothing.                                                               */
6737 /****************************************************************************/
6738 static void
6739 bce_dump_driver_state(struct bce_softc *sc)
6740 {
6741 	struct ifnet *ifp = &sc->arpcom.ac_if;
6742 	uint32_t val_hi, val_lo;
6743 
6744 	if_printf(ifp,
6745 	"-----------------------------"
6746 	" Driver State "
6747 	"-----------------------------\n");
6748 
6749 	val_hi = BCE_ADDR_HI(sc);
6750 	val_lo = BCE_ADDR_LO(sc);
6751 	if_printf(ifp, "0x%08X:%08X - (sc) driver softc structure "
6752 		  "virtual address\n", val_hi, val_lo);
6753 
6754 	val_hi = BCE_ADDR_HI(sc->status_block);
6755 	val_lo = BCE_ADDR_LO(sc->status_block);
6756 	if_printf(ifp, "0x%08X:%08X - (sc->status_block) status block "
6757 		  "virtual address\n", val_hi, val_lo);
6758 
6759 	val_hi = BCE_ADDR_HI(sc->stats_block);
6760 	val_lo = BCE_ADDR_LO(sc->stats_block);
6761 	if_printf(ifp, "0x%08X:%08X - (sc->stats_block) statistics block "
6762 		  "virtual address\n", val_hi, val_lo);
6763 
6764 	val_hi = BCE_ADDR_HI(sc->tx_bd_chain);
6765 	val_lo = BCE_ADDR_LO(sc->tx_bd_chain);
6766 	if_printf(ifp, "0x%08X:%08X - (sc->tx_bd_chain) tx_bd chain "
6767 		  "virtual adddress\n", val_hi, val_lo);
6768 
6769 	val_hi = BCE_ADDR_HI(sc->rx_bd_chain);
6770 	val_lo = BCE_ADDR_LO(sc->rx_bd_chain);
6771 	if_printf(ifp, "0x%08X:%08X - (sc->rx_bd_chain) rx_bd chain "
6772 		  "virtual address\n", val_hi, val_lo);
6773 
6774 	val_hi = BCE_ADDR_HI(sc->tx_mbuf_ptr);
6775 	val_lo = BCE_ADDR_LO(sc->tx_mbuf_ptr);
6776 	if_printf(ifp, "0x%08X:%08X - (sc->tx_mbuf_ptr) tx mbuf chain "
6777 		  "virtual address\n", val_hi, val_lo);
6778 
6779 	val_hi = BCE_ADDR_HI(sc->rx_mbuf_ptr);
6780 	val_lo = BCE_ADDR_LO(sc->rx_mbuf_ptr);
6781 	if_printf(ifp, "0x%08X:%08X - (sc->rx_mbuf_ptr) rx mbuf chain "
6782 		  "virtual address\n", val_hi, val_lo);
6783 
6784 	if_printf(ifp, "         0x%08X - (sc->interrupts_generated) "
6785 		  "h/w intrs\n", sc->interrupts_generated);
6786 
6787 	if_printf(ifp, "         0x%08X - (sc->rx_interrupts) "
6788 		  "rx interrupts handled\n", sc->rx_interrupts);
6789 
6790 	if_printf(ifp, "         0x%08X - (sc->tx_interrupts) "
6791 		  "tx interrupts handled\n", sc->tx_interrupts);
6792 
6793 	if_printf(ifp, "         0x%08X - (sc->last_status_idx) "
6794 		  "status block index\n", sc->last_status_idx);
6795 
6796 	if_printf(ifp, "     0x%04X(0x%04X) - (sc->tx_prod) "
6797 		  "tx producer index\n",
6798 		  sc->tx_prod, (uint16_t)TX_CHAIN_IDX(sc->tx_prod));
6799 
6800 	if_printf(ifp, "     0x%04X(0x%04X) - (sc->tx_cons) "
6801 		  "tx consumer index\n",
6802 		  sc->tx_cons, (uint16_t)TX_CHAIN_IDX(sc->tx_cons));
6803 
6804 	if_printf(ifp, "         0x%08X - (sc->tx_prod_bseq) "
6805 		  "tx producer bseq index\n", sc->tx_prod_bseq);
6806 
6807 	if_printf(ifp, "     0x%04X(0x%04X) - (sc->rx_prod) "
6808 		  "rx producer index\n",
6809 		  sc->rx_prod, (uint16_t)RX_CHAIN_IDX(sc->rx_prod));
6810 
6811 	if_printf(ifp, "     0x%04X(0x%04X) - (sc->rx_cons) "
6812 		  "rx consumer index\n",
6813 		  sc->rx_cons, (uint16_t)RX_CHAIN_IDX(sc->rx_cons));
6814 
6815 	if_printf(ifp, "         0x%08X - (sc->rx_prod_bseq) "
6816 		  "rx producer bseq index\n", sc->rx_prod_bseq);
6817 
6818 	if_printf(ifp, "         0x%08X - (sc->rx_mbuf_alloc) "
6819 		  "rx mbufs allocated\n", sc->rx_mbuf_alloc);
6820 
6821 	if_printf(ifp, "         0x%08X - (sc->free_rx_bd) "
6822 		  "free rx_bd's\n", sc->free_rx_bd);
6823 
6824 	if_printf(ifp, "0x%08X/%08X - (sc->rx_low_watermark) rx "
6825 		  "low watermark\n", sc->rx_low_watermark, sc->max_rx_bd);
6826 
6827 	if_printf(ifp, "         0x%08X - (sc->txmbuf_alloc) "
6828 		  "tx mbufs allocated\n", sc->tx_mbuf_alloc);
6829 
6830 	if_printf(ifp, "         0x%08X - (sc->rx_mbuf_alloc) "
6831 		  "rx mbufs allocated\n", sc->rx_mbuf_alloc);
6832 
6833 	if_printf(ifp, "         0x%08X - (sc->used_tx_bd) used tx_bd's\n",
6834 		  sc->used_tx_bd);
6835 
6836 	if_printf(ifp, "0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n",
6837 		  sc->tx_hi_watermark, sc->max_tx_bd);
6838 
6839 	if_printf(ifp, "         0x%08X - (sc->mbuf_alloc_failed) "
6840 		  "failed mbuf alloc\n", sc->mbuf_alloc_failed);
6841 
6842 	if_printf(ifp,
6843 	"----------------------------"
6844 	"----------------"
6845 	"----------------------------\n");
6846 }
6847 
6848 
6849 /****************************************************************************/
6850 /* Prints out the hardware state through a summary of important registers,  */
6851 /* followed by a complete register dump.                                    */
6852 /*                                                                          */
6853 /* Returns:                                                                 */
6854 /*   Nothing.                                                               */
6855 /****************************************************************************/
6856 static void
6857 bce_dump_hw_state(struct bce_softc *sc)
6858 {
6859 	struct ifnet *ifp = &sc->arpcom.ac_if;
6860 	uint32_t val1;
6861 	int i;
6862 
6863 	if_printf(ifp,
6864 	"----------------------------"
6865 	" Hardware State "
6866 	"----------------------------\n");
6867 
6868 	if_printf(ifp, "0x%08X - bootcode version\n", sc->bce_fw_ver);
6869 
6870 	val1 = REG_RD(sc, BCE_MISC_ENABLE_STATUS_BITS);
6871 	if_printf(ifp, "0x%08X - (0x%06X) misc_enable_status_bits\n",
6872 		  val1, BCE_MISC_ENABLE_STATUS_BITS);
6873 
6874 	val1 = REG_RD(sc, BCE_DMA_STATUS);
6875 	if_printf(ifp, "0x%08X - (0x%04X) dma_status\n", val1, BCE_DMA_STATUS);
6876 
6877 	val1 = REG_RD(sc, BCE_CTX_STATUS);
6878 	if_printf(ifp, "0x%08X - (0x%04X) ctx_status\n", val1, BCE_CTX_STATUS);
6879 
6880 	val1 = REG_RD(sc, BCE_EMAC_STATUS);
6881 	if_printf(ifp, "0x%08X - (0x%04X) emac_status\n",
6882 		  val1, BCE_EMAC_STATUS);
6883 
6884 	val1 = REG_RD(sc, BCE_RPM_STATUS);
6885 	if_printf(ifp, "0x%08X - (0x%04X) rpm_status\n", val1, BCE_RPM_STATUS);
6886 
6887 	val1 = REG_RD(sc, BCE_TBDR_STATUS);
6888 	if_printf(ifp, "0x%08X - (0x%04X) tbdr_status\n",
6889 		  val1, BCE_TBDR_STATUS);
6890 
6891 	val1 = REG_RD(sc, BCE_TDMA_STATUS);
6892 	if_printf(ifp, "0x%08X - (0x%04X) tdma_status\n",
6893 		  val1, BCE_TDMA_STATUS);
6894 
6895 	val1 = REG_RD(sc, BCE_HC_STATUS);
6896 	if_printf(ifp, "0x%08X - (0x%06X) hc_status\n", val1, BCE_HC_STATUS);
6897 
6898 	val1 = REG_RD_IND(sc, BCE_TXP_CPU_STATE);
6899 	if_printf(ifp, "0x%08X - (0x%06X) txp_cpu_state\n",
6900 		  val1, BCE_TXP_CPU_STATE);
6901 
6902 	val1 = REG_RD_IND(sc, BCE_TPAT_CPU_STATE);
6903 	if_printf(ifp, "0x%08X - (0x%06X) tpat_cpu_state\n",
6904 		  val1, BCE_TPAT_CPU_STATE);
6905 
6906 	val1 = REG_RD_IND(sc, BCE_RXP_CPU_STATE);
6907 	if_printf(ifp, "0x%08X - (0x%06X) rxp_cpu_state\n",
6908 		  val1, BCE_RXP_CPU_STATE);
6909 
6910 	val1 = REG_RD_IND(sc, BCE_COM_CPU_STATE);
6911 	if_printf(ifp, "0x%08X - (0x%06X) com_cpu_state\n",
6912 		  val1, BCE_COM_CPU_STATE);
6913 
6914 	val1 = REG_RD_IND(sc, BCE_MCP_CPU_STATE);
6915 	if_printf(ifp, "0x%08X - (0x%06X) mcp_cpu_state\n",
6916 		  val1, BCE_MCP_CPU_STATE);
6917 
6918 	val1 = REG_RD_IND(sc, BCE_CP_CPU_STATE);
6919 	if_printf(ifp, "0x%08X - (0x%06X) cp_cpu_state\n",
6920 		  val1, BCE_CP_CPU_STATE);
6921 
6922 	if_printf(ifp,
6923 	"----------------------------"
6924 	"----------------"
6925 	"----------------------------\n");
6926 
6927 	if_printf(ifp,
6928 	"----------------------------"
6929 	" Register  Dump "
6930 	"----------------------------\n");
6931 
6932 	for (i = 0x400; i < 0x8000; i += 0x10) {
6933 		if_printf(ifp, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", i,
6934 			  REG_RD(sc, i),
6935 			  REG_RD(sc, i + 0x4),
6936 			  REG_RD(sc, i + 0x8),
6937 			  REG_RD(sc, i + 0xc));
6938 	}
6939 
6940 	if_printf(ifp,
6941 	"----------------------------"
6942 	"----------------"
6943 	"----------------------------\n");
6944 }
6945 
6946 
6947 /****************************************************************************/
6948 /* Prints out the TXP state.                                                */
6949 /*                                                                          */
6950 /* Returns:                                                                 */
6951 /*   Nothing.                                                               */
6952 /****************************************************************************/
6953 static void
6954 bce_dump_txp_state(struct bce_softc *sc)
6955 {
6956 	struct ifnet *ifp = &sc->arpcom.ac_if;
6957 	uint32_t val1;
6958 	int i;
6959 
6960 	if_printf(ifp,
6961 	"----------------------------"
6962 	"   TXP  State   "
6963 	"----------------------------\n");
6964 
6965 	val1 = REG_RD_IND(sc, BCE_TXP_CPU_MODE);
6966 	if_printf(ifp, "0x%08X - (0x%06X) txp_cpu_mode\n",
6967 		  val1, BCE_TXP_CPU_MODE);
6968 
6969 	val1 = REG_RD_IND(sc, BCE_TXP_CPU_STATE);
6970 	if_printf(ifp, "0x%08X - (0x%06X) txp_cpu_state\n",
6971 		  val1, BCE_TXP_CPU_STATE);
6972 
6973 	val1 = REG_RD_IND(sc, BCE_TXP_CPU_EVENT_MASK);
6974 	if_printf(ifp, "0x%08X - (0x%06X) txp_cpu_event_mask\n",
6975 		  val1, BCE_TXP_CPU_EVENT_MASK);
6976 
6977 	if_printf(ifp,
6978 	"----------------------------"
6979 	" Register  Dump "
6980 	"----------------------------\n");
6981 
6982 	for (i = BCE_TXP_CPU_MODE; i < 0x68000; i += 0x10) {
6983 		/* Skip the big blank spaces */
6984 		if (i < 0x454000 && i > 0x5ffff) {
6985 			if_printf(ifp, "0x%04X: "
6986 				  "0x%08X 0x%08X 0x%08X 0x%08X\n", i,
6987 				  REG_RD_IND(sc, i),
6988 				  REG_RD_IND(sc, i + 0x4),
6989 				  REG_RD_IND(sc, i + 0x8),
6990 				  REG_RD_IND(sc, i + 0xc));
6991 		}
6992 	}
6993 
6994 	if_printf(ifp,
6995 	"----------------------------"
6996 	"----------------"
6997 	"----------------------------\n");
6998 }
6999 
7000 
7001 /****************************************************************************/
7002 /* Prints out the RXP state.                                                */
7003 /*                                                                          */
7004 /* Returns:                                                                 */
7005 /*   Nothing.                                                               */
7006 /****************************************************************************/
7007 static void
7008 bce_dump_rxp_state(struct bce_softc *sc)
7009 {
7010 	struct ifnet *ifp = &sc->arpcom.ac_if;
7011 	uint32_t val1;
7012 	int i;
7013 
7014 	if_printf(ifp,
7015 	"----------------------------"
7016 	"   RXP  State   "
7017 	"----------------------------\n");
7018 
7019 	val1 = REG_RD_IND(sc, BCE_RXP_CPU_MODE);
7020 	if_printf(ifp, "0x%08X - (0x%06X) rxp_cpu_mode\n",
7021 		  val1, BCE_RXP_CPU_MODE);
7022 
7023 	val1 = REG_RD_IND(sc, BCE_RXP_CPU_STATE);
7024 	if_printf(ifp, "0x%08X - (0x%06X) rxp_cpu_state\n",
7025 		  val1, BCE_RXP_CPU_STATE);
7026 
7027 	val1 = REG_RD_IND(sc, BCE_RXP_CPU_EVENT_MASK);
7028 	if_printf(ifp, "0x%08X - (0x%06X) rxp_cpu_event_mask\n",
7029 		  val1, BCE_RXP_CPU_EVENT_MASK);
7030 
7031 	if_printf(ifp,
7032 	"----------------------------"
7033 	" Register  Dump "
7034 	"----------------------------\n");
7035 
7036 	for (i = BCE_RXP_CPU_MODE; i < 0xe8fff; i += 0x10) {
7037 		/* Skip the big blank sapces */
7038 		if (i < 0xc5400 && i > 0xdffff) {
7039 			if_printf(ifp, "0x%04X: "
7040 				  "0x%08X 0x%08X 0x%08X 0x%08X\n", i,
7041 				  REG_RD_IND(sc, i),
7042 				  REG_RD_IND(sc, i + 0x4),
7043 				  REG_RD_IND(sc, i + 0x8),
7044 				  REG_RD_IND(sc, i + 0xc));
7045 		}
7046 	}
7047 
7048 	if_printf(ifp,
7049 	"----------------------------"
7050 	"----------------"
7051 	"----------------------------\n");
7052 }
7053 
7054 
7055 /****************************************************************************/
7056 /* Prints out the TPAT state.                                               */
7057 /*                                                                          */
7058 /* Returns:                                                                 */
7059 /*   Nothing.                                                               */
7060 /****************************************************************************/
7061 static void
7062 bce_dump_tpat_state(struct bce_softc *sc)
7063 {
7064 	struct ifnet *ifp = &sc->arpcom.ac_if;
7065 	uint32_t val1;
7066 	int i;
7067 
7068 	if_printf(ifp,
7069 	"----------------------------"
7070 	"   TPAT State   "
7071 	"----------------------------\n");
7072 
7073 	val1 = REG_RD_IND(sc, BCE_TPAT_CPU_MODE);
7074 	if_printf(ifp, "0x%08X - (0x%06X) tpat_cpu_mode\n",
7075 		  val1, BCE_TPAT_CPU_MODE);
7076 
7077 	val1 = REG_RD_IND(sc, BCE_TPAT_CPU_STATE);
7078 	if_printf(ifp, "0x%08X - (0x%06X) tpat_cpu_state\n",
7079 		  val1, BCE_TPAT_CPU_STATE);
7080 
7081 	val1 = REG_RD_IND(sc, BCE_TPAT_CPU_EVENT_MASK);
7082 	if_printf(ifp, "0x%08X - (0x%06X) tpat_cpu_event_mask\n",
7083 		  val1, BCE_TPAT_CPU_EVENT_MASK);
7084 
7085 	if_printf(ifp,
7086 	"----------------------------"
7087 	" Register  Dump "
7088 	"----------------------------\n");
7089 
7090 	for (i = BCE_TPAT_CPU_MODE; i < 0xa3fff; i += 0x10) {
7091 		/* Skip the big blank spaces */
7092 		if (i < 0x854000 && i > 0x9ffff) {
7093 			if_printf(ifp, "0x%04X: "
7094 				  "0x%08X 0x%08X 0x%08X 0x%08X\n", i,
7095 				  REG_RD_IND(sc, i),
7096 				  REG_RD_IND(sc, i + 0x4),
7097 				  REG_RD_IND(sc, i + 0x8),
7098 				  REG_RD_IND(sc, i + 0xc));
7099 		}
7100 	}
7101 
7102 	if_printf(ifp,
7103 	"----------------------------"
7104 	"----------------"
7105 	"----------------------------\n");
7106 }
7107 
7108 
7109 /****************************************************************************/
7110 /* Prints out the driver state and then enters the debugger.                */
7111 /*                                                                          */
7112 /* Returns:                                                                 */
7113 /*   Nothing.                                                               */
7114 /****************************************************************************/
7115 static void
7116 bce_breakpoint(struct bce_softc *sc)
7117 {
7118 #if 0
7119 	bce_freeze_controller(sc);
7120 #endif
7121 
7122 	bce_dump_driver_state(sc);
7123 	bce_dump_status_block(sc);
7124 	bce_dump_tx_chain(sc, 0, TOTAL_TX_BD);
7125 	bce_dump_hw_state(sc);
7126 	bce_dump_txp_state(sc);
7127 
7128 #if 0
7129 	bce_unfreeze_controller(sc);
7130 #endif
7131 
7132 	/* Call the debugger. */
7133 	breakpoint();
7134 }
7135 
7136 #endif	/* BCE_DEBUG */
7137