xref: /dragonfly/sys/dev/netif/bce/if_bce.c (revision 0cfebe3d)
1 /*-
2  * Copyright (c) 2006-2007 Broadcom Corporation
3  *	David Christensen <davidch@broadcom.com>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the name of Broadcom Corporation nor the name of its contributors
15  *    may be used to endorse or promote products derived from this software
16  *    without specific prior written consent.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
22  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  *
30  * $FreeBSD: src/sys/dev/bce/if_bce.c,v 1.31 2007/05/16 23:34:11 davidch Exp $
31  * $DragonFly: src/sys/dev/netif/bce/if_bce.c,v 1.3 2008/03/10 12:59:51 sephe Exp $
32  */
33 
34 /*
35  * The following controllers are supported by this driver:
36  *   BCM5706C A2, A3
37  *   BCM5708C B1, B2
38  *
39  * The following controllers are not supported by this driver:
40  *   BCM5706C A0, A1
41  *   BCM5706S A0, A1, A2, A3
42  *   BCM5708C A0, B0
43  *   BCM5708S A0, B0, B1, B2
44  */
45 
46 #include "opt_bce.h"
47 #include "opt_polling.h"
48 
49 #include <sys/param.h>
50 #include <sys/bus.h>
51 #include <sys/endian.h>
52 #include <sys/kernel.h>
53 #include <sys/mbuf.h>
54 #include <sys/malloc.h>
55 #include <sys/queue.h>
56 #ifdef BCE_DEBUG
57 #include <sys/random.h>
58 #endif
59 #include <sys/rman.h>
60 #include <sys/serialize.h>
61 #include <sys/socket.h>
62 #include <sys/sockio.h>
63 #include <sys/sysctl.h>
64 
65 #include <net/bpf.h>
66 #include <net/ethernet.h>
67 #include <net/if.h>
68 #include <net/if_arp.h>
69 #include <net/if_dl.h>
70 #include <net/if_media.h>
71 #include <net/if_types.h>
72 #include <net/ifq_var.h>
73 #include <net/vlan/if_vlan_var.h>
74 #include <net/vlan/if_vlan_ether.h>
75 
76 #include <dev/netif/mii_layer/mii.h>
77 #include <dev/netif/mii_layer/miivar.h>
78 
79 #include <bus/pci/pcireg.h>
80 #include <bus/pci/pcivar.h>
81 
82 #include "miibus_if.h"
83 
84 #include "if_bcereg.h"
85 #include "if_bcefw.h"
86 
87 /****************************************************************************/
88 /* BCE Debug Options                                                        */
89 /****************************************************************************/
90 #ifdef BCE_DEBUG
91 
92 static uint32_t	bce_debug = BCE_WARN;
93 
94 /*
95  *          0 = Never
96  *          1 = 1 in 2,147,483,648
97  *        256 = 1 in     8,388,608
98  *       2048 = 1 in     1,048,576
99  *      65536 = 1 in        32,768
100  *    1048576 = 1 in         2,048
101  *  268435456 = 1 in             8
102  *  536870912 = 1 in             4
103  * 1073741824 = 1 in             2
104  *
105  * bce_debug_l2fhdr_status_check:
106  *     How often the l2_fhdr frame error check will fail.
107  *
108  * bce_debug_unexpected_attention:
109  *     How often the unexpected attention check will fail.
110  *
111  * bce_debug_mbuf_allocation_failure:
112  *     How often to simulate an mbuf allocation failure.
113  *
114  * bce_debug_dma_map_addr_failure:
115  *     How often to simulate a DMA mapping failure.
116  *
117  * bce_debug_bootcode_running_failure:
118  *     How often to simulate a bootcode failure.
119  */
120 static int	bce_debug_l2fhdr_status_check = 0;
121 static int	bce_debug_unexpected_attention = 0;
122 static int	bce_debug_mbuf_allocation_failure = 0;
123 static int	bce_debug_dma_map_addr_failure = 0;
124 static int	bce_debug_bootcode_running_failure = 0;
125 
126 #endif	/* BCE_DEBUG */
127 
128 
129 /****************************************************************************/
130 /* PCI Device ID Table                                                      */
131 /*                                                                          */
132 /* Used by bce_probe() to identify the devices supported by this driver.    */
133 /****************************************************************************/
134 #define BCE_DEVDESC_MAX		64
135 
136 static struct bce_type bce_devs[] = {
137 	/* BCM5706C Controllers and OEM boards. */
138 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3101,
139 		"HP NC370T Multifunction Gigabit Server Adapter" },
140 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3106,
141 		"HP NC370i Multifunction Gigabit Server Adapter" },
142 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  PCI_ANY_ID,  PCI_ANY_ID,
143 		"Broadcom NetXtreme II BCM5706 1000Base-T" },
144 
145 	/* BCM5706S controllers and OEM boards. */
146 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102,
147 		"HP NC370F Multifunction Gigabit Server Adapter" },
148 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID,  PCI_ANY_ID,
149 		"Broadcom NetXtreme II BCM5706 1000Base-SX" },
150 
151 	/* BCM5708C controllers and OEM boards. */
152 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  PCI_ANY_ID,  PCI_ANY_ID,
153 		"Broadcom NetXtreme II BCM5708 1000Base-T" },
154 
155 	/* BCM5708S controllers and OEM boards. */
156 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  PCI_ANY_ID,  PCI_ANY_ID,
157 		"Broadcom NetXtreme II BCM5708S 1000Base-T" },
158 	{ 0, 0, 0, 0, NULL }
159 };
160 
161 
162 /****************************************************************************/
163 /* Supported Flash NVRAM device data.                                       */
164 /****************************************************************************/
165 static const struct flash_spec flash_table[] =
166 {
167 	/* Slow EEPROM */
168 	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
169 	 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
170 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
171 	 "EEPROM - slow"},
172 	/* Expansion entry 0001 */
173 	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
174 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
175 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
176 	 "Entry 0001"},
177 	/* Saifun SA25F010 (non-buffered flash) */
178 	/* strap, cfg1, & write1 need updates */
179 	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
180 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
181 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
182 	 "Non-buffered flash (128kB)"},
183 	/* Saifun SA25F020 (non-buffered flash) */
184 	/* strap, cfg1, & write1 need updates */
185 	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
186 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
187 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
188 	 "Non-buffered flash (256kB)"},
189 	/* Expansion entry 0100 */
190 	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
191 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
192 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
193 	 "Entry 0100"},
194 	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
195 	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
196 	 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
197 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
198 	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
199 	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
200 	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
201 	 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
202 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
203 	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
204 	/* Saifun SA25F005 (non-buffered flash) */
205 	/* strap, cfg1, & write1 need updates */
206 	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
207 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
208 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
209 	 "Non-buffered flash (64kB)"},
210 	/* Fast EEPROM */
211 	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
212 	 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
213 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
214 	 "EEPROM - fast"},
215 	/* Expansion entry 1001 */
216 	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
217 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
218 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
219 	 "Entry 1001"},
220 	/* Expansion entry 1010 */
221 	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
222 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
223 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
224 	 "Entry 1010"},
225 	/* ATMEL AT45DB011B (buffered flash) */
226 	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
227 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
228 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
229 	 "Buffered flash (128kB)"},
230 	/* Expansion entry 1100 */
231 	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
232 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
233 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
234 	 "Entry 1100"},
235 	/* Expansion entry 1101 */
236 	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
237 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
238 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
239 	 "Entry 1101"},
240 	/* Ateml Expansion entry 1110 */
241 	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
242 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
243 	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
244 	 "Entry 1110 (Atmel)"},
245 	/* ATMEL AT45DB021B (buffered flash) */
246 	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
247 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
248 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
249 	 "Buffered flash (256kB)"},
250 };
251 
252 
253 /****************************************************************************/
254 /* DragonFly device entry points.                                           */
255 /****************************************************************************/
256 static int	bce_probe(device_t);
257 static int	bce_attach(device_t);
258 static int	bce_detach(device_t);
259 static void	bce_shutdown(device_t);
260 
261 /****************************************************************************/
262 /* BCE Debug Data Structure Dump Routines                                   */
263 /****************************************************************************/
264 #ifdef BCE_DEBUG
265 static void	bce_dump_mbuf(struct bce_softc *, struct mbuf *);
266 static void	bce_dump_tx_mbuf_chain(struct bce_softc *, int, int);
267 static void	bce_dump_rx_mbuf_chain(struct bce_softc *, int, int);
268 static void	bce_dump_txbd(struct bce_softc *, int, struct tx_bd *);
269 static void	bce_dump_rxbd(struct bce_softc *, int, struct rx_bd *);
270 static void	bce_dump_l2fhdr(struct bce_softc *, int,
271 				struct l2_fhdr *) __unused;
272 static void	bce_dump_tx_chain(struct bce_softc *, int, int);
273 static void	bce_dump_rx_chain(struct bce_softc *, int, int);
274 static void	bce_dump_status_block(struct bce_softc *);
275 static void	bce_dump_driver_state(struct bce_softc *);
276 static void	bce_dump_stats_block(struct bce_softc *) __unused;
277 static void	bce_dump_hw_state(struct bce_softc *);
278 static void	bce_dump_txp_state(struct bce_softc *);
279 static void	bce_dump_rxp_state(struct bce_softc *) __unused;
280 static void	bce_dump_tpat_state(struct bce_softc *) __unused;
281 static void	bce_freeze_controller(struct bce_softc *) __unused;
282 static void	bce_unfreeze_controller(struct bce_softc *) __unused;
283 static void	bce_breakpoint(struct bce_softc *);
284 #endif	/* BCE_DEBUG */
285 
286 
287 /****************************************************************************/
288 /* BCE Register/Memory Access Routines                                      */
289 /****************************************************************************/
290 static uint32_t	bce_reg_rd_ind(struct bce_softc *, uint32_t);
291 static void	bce_reg_wr_ind(struct bce_softc *, uint32_t, uint32_t);
292 static void	bce_ctx_wr(struct bce_softc *, uint32_t, uint32_t, uint32_t);
293 static int	bce_miibus_read_reg(device_t, int, int);
294 static int	bce_miibus_write_reg(device_t, int, int, int);
295 static void	bce_miibus_statchg(device_t);
296 
297 
298 /****************************************************************************/
299 /* BCE NVRAM Access Routines                                                */
300 /****************************************************************************/
301 static int	bce_acquire_nvram_lock(struct bce_softc *);
302 static int	bce_release_nvram_lock(struct bce_softc *);
303 static void	bce_enable_nvram_access(struct bce_softc *);
304 static void	bce_disable_nvram_access(struct bce_softc *);
305 static int	bce_nvram_read_dword(struct bce_softc *, uint32_t, uint8_t *,
306 				     uint32_t);
307 static int	bce_init_nvram(struct bce_softc *);
308 static int	bce_nvram_read(struct bce_softc *, uint32_t, uint8_t *, int);
309 static int	bce_nvram_test(struct bce_softc *);
310 #ifdef BCE_NVRAM_WRITE_SUPPORT
311 static int	bce_enable_nvram_write(struct bce_softc *);
312 static void	bce_disable_nvram_write(struct bce_softc *);
313 static int	bce_nvram_erase_page(struct bce_softc *, uint32_t);
314 static int	bce_nvram_write_dword(struct bce_softc *, uint32_t, uint8_t *,					      uint32_t);
315 static int	bce_nvram_write(struct bce_softc *, uint32_t, uint8_t *,
316 				int) __unused;
317 #endif
318 
319 /****************************************************************************/
320 /* BCE DMA Allocate/Free Routines                                           */
321 /****************************************************************************/
322 static int	bce_dma_alloc(struct bce_softc *);
323 static void	bce_dma_free(struct bce_softc *);
324 static void	bce_dma_map_addr(void *, bus_dma_segment_t *, int, int);
325 static void	bce_dma_map_mbuf(void *, bus_dma_segment_t *, int,
326 				 bus_size_t, int);
327 
328 /****************************************************************************/
329 /* BCE Firmware Synchronization and Load                                    */
330 /****************************************************************************/
331 static int	bce_fw_sync(struct bce_softc *, uint32_t);
332 static void	bce_load_rv2p_fw(struct bce_softc *, uint32_t *,
333 				 uint32_t, uint32_t);
334 static void	bce_load_cpu_fw(struct bce_softc *, struct cpu_reg *,
335 				struct fw_info *);
336 static void	bce_init_cpus(struct bce_softc *);
337 
338 static void	bce_stop(struct bce_softc *);
339 static int	bce_reset(struct bce_softc *, uint32_t);
340 static int	bce_chipinit(struct bce_softc *);
341 static int	bce_blockinit(struct bce_softc *);
342 static int	bce_newbuf_std(struct bce_softc *, struct mbuf *,
343 			       uint16_t *, uint16_t *, uint32_t *);
344 
345 static int	bce_init_tx_chain(struct bce_softc *);
346 static int	bce_init_rx_chain(struct bce_softc *);
347 static void	bce_free_rx_chain(struct bce_softc *);
348 static void	bce_free_tx_chain(struct bce_softc *);
349 
350 static int	bce_encap(struct bce_softc *, struct mbuf **);
351 static void	bce_start(struct ifnet *);
352 static int	bce_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
353 static void	bce_watchdog(struct ifnet *);
354 static int	bce_ifmedia_upd(struct ifnet *);
355 static void	bce_ifmedia_sts(struct ifnet *, struct ifmediareq *);
356 static void	bce_init(void *);
357 static void	bce_mgmt_init(struct bce_softc *);
358 
359 static void	bce_init_context(struct bce_softc *);
360 static void	bce_get_mac_addr(struct bce_softc *);
361 static void	bce_set_mac_addr(struct bce_softc *);
362 static void	bce_phy_intr(struct bce_softc *);
363 static void	bce_rx_intr(struct bce_softc *, int);
364 static void	bce_tx_intr(struct bce_softc *);
365 static void	bce_disable_intr(struct bce_softc *);
366 static void	bce_enable_intr(struct bce_softc *);
367 
368 #ifdef DEVICE_POLLING
369 static void	bce_poll(struct ifnet *, enum poll_cmd, int);
370 #endif
371 static void	bce_intr(void *);
372 static void	bce_set_rx_mode(struct bce_softc *);
373 static void	bce_stats_update(struct bce_softc *);
374 static void	bce_tick(void *);
375 static void	bce_tick_serialized(struct bce_softc *);
376 static void	bce_add_sysctls(struct bce_softc *);
377 
378 
379 /****************************************************************************/
380 /* DragonFly device dispatch table.                                         */
381 /****************************************************************************/
382 static device_method_t bce_methods[] = {
383 	/* Device interface */
384 	DEVMETHOD(device_probe,		bce_probe),
385 	DEVMETHOD(device_attach,	bce_attach),
386 	DEVMETHOD(device_detach,	bce_detach),
387 	DEVMETHOD(device_shutdown,	bce_shutdown),
388 
389 	/* bus interface */
390 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
391 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
392 
393 	/* MII interface */
394 	DEVMETHOD(miibus_readreg,	bce_miibus_read_reg),
395 	DEVMETHOD(miibus_writereg,	bce_miibus_write_reg),
396 	DEVMETHOD(miibus_statchg,	bce_miibus_statchg),
397 
398 	{ 0, 0 }
399 };
400 
401 static driver_t bce_driver = {
402 	"bce",
403 	bce_methods,
404 	sizeof(struct bce_softc)
405 };
406 
407 static devclass_t bce_devclass;
408 
409 MODULE_DEPEND(bce, pci, 1, 1, 1);
410 MODULE_DEPEND(bce, ether, 1, 1, 1);
411 MODULE_DEPEND(bce, miibus, 1, 1, 1);
412 
413 DRIVER_MODULE(bce, pci, bce_driver, bce_devclass, 0, 0);
414 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, 0, 0);
415 
416 
417 /****************************************************************************/
418 /* Device probe function.                                                   */
419 /*                                                                          */
420 /* Compares the device to the driver's list of supported devices and        */
421 /* reports back to the OS whether this is the right driver for the device.  */
422 /*                                                                          */
423 /* Returns:                                                                 */
424 /*   BUS_PROBE_DEFAULT on success, positive value on failure.               */
425 /****************************************************************************/
426 static int
427 bce_probe(device_t dev)
428 {
429 	struct bce_type *t;
430 	uint16_t vid, did, svid, sdid;
431 
432 	/* Get the data for the device to be probed. */
433 	vid  = pci_get_vendor(dev);
434 	did  = pci_get_device(dev);
435 	svid = pci_get_subvendor(dev);
436 	sdid = pci_get_subdevice(dev);
437 
438 	/* Look through the list of known devices for a match. */
439 	for (t = bce_devs; t->bce_name != NULL; ++t) {
440 		if (vid == t->bce_vid && did == t->bce_did &&
441 		    (svid == t->bce_svid || t->bce_svid == PCI_ANY_ID) &&
442 		    (sdid == t->bce_sdid || t->bce_sdid == PCI_ANY_ID)) {
443 		    	uint32_t revid = pci_read_config(dev, PCIR_REVID, 4);
444 			char *descbuf;
445 
446 			descbuf = kmalloc(BCE_DEVDESC_MAX, M_TEMP, M_WAITOK);
447 
448 			/* Print out the device identity. */
449 			ksnprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)",
450 				  t->bce_name,
451 				  ((revid & 0xf0) >> 4) + 'A', revid & 0xf);
452 
453 			device_set_desc_copy(dev, descbuf);
454 			kfree(descbuf, M_TEMP);
455 			return 0;
456 		}
457 	}
458 	return ENXIO;
459 }
460 
461 
462 /****************************************************************************/
463 /* Device attach function.                                                  */
464 /*                                                                          */
465 /* Allocates device resources, performs secondary chip identification,      */
466 /* resets and initializes the hardware, and initializes driver instance     */
467 /* variables.                                                               */
468 /*                                                                          */
469 /* Returns:                                                                 */
470 /*   0 on success, positive value on failure.                               */
471 /****************************************************************************/
472 static int
473 bce_attach(device_t dev)
474 {
475 	struct bce_softc *sc = device_get_softc(dev);
476 	struct ifnet *ifp = &sc->arpcom.ac_if;
477 	uint32_t val;
478 	int rid, rc = 0;
479 #ifdef notyet
480 	int count;
481 #endif
482 
483 	sc->bce_dev = dev;
484 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
485 
486 	pci_enable_busmaster(dev);
487 
488 	/* Allocate PCI memory resources. */
489 	rid = PCIR_BAR(0);
490 	sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
491 						 RF_ACTIVE | PCI_RF_DENSE);
492 	if (sc->bce_res_mem == NULL) {
493 		device_printf(dev, "PCI memory allocation failed\n");
494 		return ENXIO;
495 	}
496 	sc->bce_btag = rman_get_bustag(sc->bce_res_mem);
497 	sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem);
498 
499 	/* Allocate PCI IRQ resources. */
500 #ifdef notyet
501 	count = pci_msi_count(dev);
502 	if (count == 1 && pci_alloc_msi(dev, &count) == 0) {
503 		rid = 1;
504 		sc->bce_flags |= BCE_USING_MSI_FLAG;
505 	} else
506 #endif
507 	rid = 0;
508 	sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
509 						 RF_SHAREABLE | RF_ACTIVE);
510 	if (sc->bce_res_irq == NULL) {
511 		device_printf(dev, "PCI map interrupt failed\n");
512 		rc = ENXIO;
513 		goto fail;
514 	}
515 
516 	/*
517 	 * Configure byte swap and enable indirect register access.
518 	 * Rely on CPU to do target byte swapping on big endian systems.
519 	 * Access to registers outside of PCI configurtion space are not
520 	 * valid until this is done.
521 	 */
522 	pci_write_config(dev, BCE_PCICFG_MISC_CONFIG,
523 			 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
524 			 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4);
525 
526 	/* Save ASIC revsion info. */
527 	sc->bce_chipid =  REG_RD(sc, BCE_MISC_ID);
528 
529 	/* Weed out any non-production controller revisions. */
530 	switch(BCE_CHIP_ID(sc)) {
531 	case BCE_CHIP_ID_5706_A0:
532 	case BCE_CHIP_ID_5706_A1:
533 	case BCE_CHIP_ID_5708_A0:
534 	case BCE_CHIP_ID_5708_B0:
535 		device_printf(dev, "Unsupported chip id 0x%08x!\n",
536 			      BCE_CHIP_ID(sc));
537 		rc = ENODEV;
538 		goto fail;
539 	}
540 
541 	/*
542 	 * The embedded PCIe to PCI-X bridge (EPB)
543 	 * in the 5708 cannot address memory above
544 	 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043).
545 	 */
546 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)
547 		sc->max_bus_addr = BCE_BUS_SPACE_MAXADDR;
548 	else
549 		sc->max_bus_addr = BUS_SPACE_MAXADDR;
550 
551 	/*
552 	 * Find the base address for shared memory access.
553 	 * Newer versions of bootcode use a signature and offset
554 	 * while older versions use a fixed address.
555 	 */
556 	val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE);
557 	if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == BCE_SHM_HDR_SIGNATURE_SIG)
558 		sc->bce_shmem_base = REG_RD_IND(sc, BCE_SHM_HDR_ADDR_0);
559 	else
560 		sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE;
561 
562 	DBPRINT(sc, BCE_INFO, "bce_shmem_base = 0x%08X\n", sc->bce_shmem_base);
563 
564 	/* Get PCI bus information (speed and type). */
565 	val = REG_RD(sc, BCE_PCICFG_MISC_STATUS);
566 	if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) {
567 		uint32_t clkreg;
568 
569 		sc->bce_flags |= BCE_PCIX_FLAG;
570 
571 		clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS) &
572 			 BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
573 		switch (clkreg) {
574 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
575 			sc->bus_speed_mhz = 133;
576 			break;
577 
578 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
579 			sc->bus_speed_mhz = 100;
580 			break;
581 
582 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
583 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
584 			sc->bus_speed_mhz = 66;
585 			break;
586 
587 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
588 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
589 			sc->bus_speed_mhz = 50;
590 			break;
591 
592 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
593 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
594 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
595 			sc->bus_speed_mhz = 33;
596 			break;
597 		}
598 	} else {
599 		if (val & BCE_PCICFG_MISC_STATUS_M66EN)
600 			sc->bus_speed_mhz = 66;
601 		else
602 			sc->bus_speed_mhz = 33;
603 	}
604 
605 	if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET)
606 		sc->bce_flags |= BCE_PCI_32BIT_FLAG;
607 
608 	device_printf(dev, "ASIC ID 0x%08X; Revision (%c%d); PCI%s %s %dMHz\n",
609 		      sc->bce_chipid,
610 		      ((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A',
611 		      (BCE_CHIP_ID(sc) & 0x0ff0) >> 4,
612 		      (sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : "",
613 		      (sc->bce_flags & BCE_PCI_32BIT_FLAG) ?
614 		      "32-bit" : "64-bit", sc->bus_speed_mhz);
615 
616 	/* Reset the controller. */
617 	rc = bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
618 	if (rc != 0)
619 		goto fail;
620 
621 	/* Initialize the controller. */
622 	rc = bce_chipinit(sc);
623 	if (rc != 0) {
624 		device_printf(dev, "Controller initialization failed!\n");
625 		goto fail;
626 	}
627 
628 	/* Perform NVRAM test. */
629 	rc = bce_nvram_test(sc);
630 	if (rc != 0) {
631 		device_printf(dev, "NVRAM test failed!\n");
632 		goto fail;
633 	}
634 
635 	/* Fetch the permanent Ethernet MAC address. */
636 	bce_get_mac_addr(sc);
637 
638 	/*
639 	 * Trip points control how many BDs
640 	 * should be ready before generating an
641 	 * interrupt while ticks control how long
642 	 * a BD can sit in the chain before
643 	 * generating an interrupt.  Set the default
644 	 * values for the RX and TX rings.
645 	 */
646 
647 #ifdef BCE_DRBUG
648 	/* Force more frequent interrupts. */
649 	sc->bce_tx_quick_cons_trip_int = 1;
650 	sc->bce_tx_quick_cons_trip     = 1;
651 	sc->bce_tx_ticks_int           = 0;
652 	sc->bce_tx_ticks               = 0;
653 
654 	sc->bce_rx_quick_cons_trip_int = 1;
655 	sc->bce_rx_quick_cons_trip     = 1;
656 	sc->bce_rx_ticks_int           = 0;
657 	sc->bce_rx_ticks               = 0;
658 #else
659 	sc->bce_tx_quick_cons_trip_int = 20;
660 	sc->bce_tx_quick_cons_trip     = 20;
661 	sc->bce_tx_ticks_int           = 80;
662 	sc->bce_tx_ticks               = 80;
663 
664 	sc->bce_rx_quick_cons_trip_int = 6;
665 	sc->bce_rx_quick_cons_trip     = 6;
666 	sc->bce_rx_ticks_int           = 18;
667 	sc->bce_rx_ticks               = 18;
668 #endif
669 
670 	/* Update statistics once every second. */
671 	sc->bce_stats_ticks = 1000000 & 0xffff00;
672 
673 	/*
674 	 * The copper based NetXtreme II controllers
675 	 * use an integrated PHY at address 1 while
676 	 * the SerDes controllers use a PHY at
677 	 * address 2.
678 	 */
679 	sc->bce_phy_addr = 1;
680 
681 	if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) {
682 		sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
683 		sc->bce_flags |= BCE_NO_WOL_FLAG;
684 		if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708) {
685 			sc->bce_phy_addr = 2;
686 			val = REG_RD_IND(sc, sc->bce_shmem_base +
687 					 BCE_SHARED_HW_CFG_CONFIG);
688 			if (val & BCE_SHARED_HW_CFG_PHY_2_5G)
689 				sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG;
690 		}
691 	}
692 
693 	/* Allocate DMA memory resources. */
694 	rc = bce_dma_alloc(sc);
695 	if (rc != 0) {
696 		device_printf(dev, "DMA resource allocation failed!\n");
697 		goto fail;
698 	}
699 
700 	/* Initialize the ifnet interface. */
701 	ifp->if_softc = sc;
702 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
703 	ifp->if_ioctl = bce_ioctl;
704 	ifp->if_start = bce_start;
705 	ifp->if_init = bce_init;
706 	ifp->if_watchdog = bce_watchdog;
707 #ifdef DEVICE_POLLING
708 	ifp->if_poll = bce_poll;
709 #endif
710 	ifp->if_mtu = ETHERMTU;
711 	ifp->if_hwassist = BCE_IF_HWASSIST;
712 	ifp->if_capabilities = BCE_IF_CAPABILITIES;
713 	ifp->if_capenable = ifp->if_capabilities;
714 	ifq_set_maxlen(&ifp->if_snd, USABLE_TX_BD);
715 	ifq_set_ready(&ifp->if_snd);
716 
717 	if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
718 		ifp->if_baudrate = IF_Gbps(2.5);
719 	else
720 		ifp->if_baudrate = IF_Gbps(1);
721 
722 	/* Assume a standard 1500 byte MTU size for mbuf allocations. */
723 	sc->mbuf_alloc_size  = MCLBYTES;
724 
725 	/* Look for our PHY. */
726 	rc = mii_phy_probe(dev, &sc->bce_miibus,
727 			   bce_ifmedia_upd, bce_ifmedia_sts);
728 	if (rc != 0) {
729 		device_printf(dev, "PHY probe failed!\n");
730 		goto fail;
731 	}
732 
733 	/* Attach to the Ethernet interface list. */
734 	ether_ifattach(ifp, sc->eaddr, NULL);
735 
736 	callout_init(&sc->bce_stat_ch);
737 
738 	/* Hookup IRQ last. */
739 	rc = bus_setup_intr(dev, sc->bce_res_irq, INTR_NETSAFE, bce_intr, sc,
740 			    &sc->bce_intrhand, ifp->if_serializer);
741 	if (rc != 0) {
742 		device_printf(dev, "Failed to setup IRQ!\n");
743 		ether_ifdetach(ifp);
744 		goto fail;
745 	}
746 
747 	/* Print some important debugging info. */
748 	DBRUN(BCE_INFO, bce_dump_driver_state(sc));
749 
750 	/* Add the supported sysctls to the kernel. */
751 	bce_add_sysctls(sc);
752 
753 	/* Get the firmware running so IPMI still works */
754 	bce_mgmt_init(sc);
755 
756 	return 0;
757 fail:
758 	bce_detach(dev);
759 	return(rc);
760 }
761 
762 
763 /****************************************************************************/
764 /* Device detach function.                                                  */
765 /*                                                                          */
766 /* Stops the controller, resets the controller, and releases resources.     */
767 /*                                                                          */
768 /* Returns:                                                                 */
769 /*   0 on success, positive value on failure.                               */
770 /****************************************************************************/
771 static int
772 bce_detach(device_t dev)
773 {
774 	struct bce_softc *sc = device_get_softc(dev);
775 
776 	if (device_is_attached(dev)) {
777 		struct ifnet *ifp = &sc->arpcom.ac_if;
778 
779 		/* Stop and reset the controller. */
780 		lwkt_serialize_enter(ifp->if_serializer);
781 		bce_stop(sc);
782 		bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
783 		bus_teardown_intr(dev, sc->bce_res_irq, sc->bce_intrhand);
784 		lwkt_serialize_exit(ifp->if_serializer);
785 
786 		ether_ifdetach(ifp);
787 	}
788 
789 	/* If we have a child device on the MII bus remove it too. */
790 	if (sc->bce_miibus)
791 		device_delete_child(dev, sc->bce_miibus);
792 	bus_generic_detach(dev);
793 
794 	if (sc->bce_res_irq != NULL) {
795 		bus_release_resource(dev, SYS_RES_IRQ,
796 			sc->bce_flags & BCE_USING_MSI_FLAG ? 1 : 0,
797 			sc->bce_res_irq);
798 	}
799 
800 #ifdef notyet
801 	if (sc->bce_flags & BCE_USING_MSI_FLAG)
802 		pci_release_msi(dev);
803 #endif
804 
805 	if (sc->bce_res_mem != NULL) {
806 		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
807 				     sc->bce_res_mem);
808 	}
809 
810 	bce_dma_free(sc);
811 
812 	if (sc->bce_sysctl_tree != NULL)
813 		sysctl_ctx_free(&sc->bce_sysctl_ctx);
814 
815 	return 0;
816 }
817 
818 
819 /****************************************************************************/
820 /* Device shutdown function.                                                */
821 /*                                                                          */
822 /* Stops and resets the controller.                                         */
823 /*                                                                          */
824 /* Returns:                                                                 */
825 /*   Nothing                                                                */
826 /****************************************************************************/
827 static void
828 bce_shutdown(device_t dev)
829 {
830 	struct bce_softc *sc = device_get_softc(dev);
831 	struct ifnet *ifp = &sc->arpcom.ac_if;
832 
833 	lwkt_serialize_enter(ifp->if_serializer);
834 	bce_stop(sc);
835 	bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
836 	lwkt_serialize_exit(ifp->if_serializer);
837 }
838 
839 
840 /****************************************************************************/
841 /* Indirect register read.                                                  */
842 /*                                                                          */
843 /* Reads NetXtreme II registers using an index/data register pair in PCI    */
844 /* configuration space.  Using this mechanism avoids issues with posted     */
845 /* reads but is much slower than memory-mapped I/O.                         */
846 /*                                                                          */
847 /* Returns:                                                                 */
848 /*   The value of the register.                                             */
849 /****************************************************************************/
850 static uint32_t
851 bce_reg_rd_ind(struct bce_softc *sc, uint32_t offset)
852 {
853 	device_t dev = sc->bce_dev;
854 
855 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
856 #ifdef BCE_DEBUG
857 	{
858 		uint32_t val;
859 		val = pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
860 		DBPRINT(sc, BCE_EXCESSIVE,
861 			"%s(); offset = 0x%08X, val = 0x%08X\n",
862 			__func__, offset, val);
863 		return val;
864 	}
865 #else
866 	return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
867 #endif
868 }
869 
870 
871 /****************************************************************************/
872 /* Indirect register write.                                                 */
873 /*                                                                          */
874 /* Writes NetXtreme II registers using an index/data register pair in PCI   */
875 /* configuration space.  Using this mechanism avoids issues with posted     */
876 /* writes but is muchh slower than memory-mapped I/O.                       */
877 /*                                                                          */
878 /* Returns:                                                                 */
879 /*   Nothing.                                                               */
880 /****************************************************************************/
881 static void
882 bce_reg_wr_ind(struct bce_softc *sc, uint32_t offset, uint32_t val)
883 {
884 	device_t dev = sc->bce_dev;
885 
886 	DBPRINT(sc, BCE_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
887 		__func__, offset, val);
888 
889 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
890 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4);
891 }
892 
893 
894 /****************************************************************************/
895 /* Context memory write.                                                    */
896 /*                                                                          */
897 /* The NetXtreme II controller uses context memory to track connection      */
898 /* information for L2 and higher network protocols.                         */
899 /*                                                                          */
900 /* Returns:                                                                 */
901 /*   Nothing.                                                               */
902 /****************************************************************************/
903 static void
904 bce_ctx_wr(struct bce_softc *sc, uint32_t cid_addr, uint32_t offset,
905 	   uint32_t val)
906 {
907 	DBPRINT(sc, BCE_EXCESSIVE, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
908 		"val = 0x%08X\n", __func__, cid_addr, offset, val);
909 
910 	offset += cid_addr;
911 	REG_WR(sc, BCE_CTX_DATA_ADR, offset);
912 	REG_WR(sc, BCE_CTX_DATA, val);
913 }
914 
915 
916 /****************************************************************************/
917 /* PHY register read.                                                       */
918 /*                                                                          */
919 /* Implements register reads on the MII bus.                                */
920 /*                                                                          */
921 /* Returns:                                                                 */
922 /*   The value of the register.                                             */
923 /****************************************************************************/
924 static int
925 bce_miibus_read_reg(device_t dev, int phy, int reg)
926 {
927 	struct bce_softc *sc = device_get_softc(dev);
928 	uint32_t val;
929 	int i;
930 
931 	/* Make sure we are accessing the correct PHY address. */
932 	if (phy != sc->bce_phy_addr) {
933 		DBPRINT(sc, BCE_VERBOSE,
934 			"Invalid PHY address %d for PHY read!\n", phy);
935 		return 0;
936 	}
937 
938 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
939 		val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
940 		val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
941 
942 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
943 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
944 
945 		DELAY(40);
946 	}
947 
948 	val = BCE_MIPHY(phy) | BCE_MIREG(reg) |
949 	      BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT |
950 	      BCE_EMAC_MDIO_COMM_START_BUSY;
951 	REG_WR(sc, BCE_EMAC_MDIO_COMM, val);
952 
953 	for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
954 		DELAY(10);
955 
956 		val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
957 		if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) {
958 			DELAY(5);
959 
960 			val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
961 			val &= BCE_EMAC_MDIO_COMM_DATA;
962 			break;
963 		}
964 	}
965 
966 	if (val & BCE_EMAC_MDIO_COMM_START_BUSY) {
967 		if_printf(&sc->arpcom.ac_if,
968 			  "Error: PHY read timeout! phy = %d, reg = 0x%04X\n",
969 			  phy, reg);
970 		val = 0x0;
971 	} else {
972 		val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
973 	}
974 
975 	DBPRINT(sc, BCE_EXCESSIVE,
976 		"%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
977 		__func__, phy, (uint16_t)reg & 0xffff, (uint16_t) val & 0xffff);
978 
979 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
980 		val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
981 		val |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
982 
983 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
984 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
985 
986 		DELAY(40);
987 	}
988 	return (val & 0xffff);
989 }
990 
991 
992 /****************************************************************************/
993 /* PHY register write.                                                      */
994 /*                                                                          */
995 /* Implements register writes on the MII bus.                               */
996 /*                                                                          */
997 /* Returns:                                                                 */
998 /*   The value of the register.                                             */
999 /****************************************************************************/
1000 static int
1001 bce_miibus_write_reg(device_t dev, int phy, int reg, int val)
1002 {
1003 	struct bce_softc *sc = device_get_softc(dev);
1004 	uint32_t val1;
1005 	int i;
1006 
1007 	/* Make sure we are accessing the correct PHY address. */
1008 	if (phy != sc->bce_phy_addr) {
1009 		DBPRINT(sc, BCE_WARN,
1010 			"Invalid PHY address %d for PHY write!\n", phy);
1011 		return(0);
1012 	}
1013 
1014 	DBPRINT(sc, BCE_EXCESSIVE,
1015 		"%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
1016 		__func__, phy, (uint16_t)(reg & 0xffff),
1017 		(uint16_t)(val & 0xffff));
1018 
1019 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1020 		val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1021 		val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1022 
1023 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1024 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1025 
1026 		DELAY(40);
1027 	}
1028 
1029 	val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val |
1030 		BCE_EMAC_MDIO_COMM_COMMAND_WRITE |
1031 		BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT;
1032 	REG_WR(sc, BCE_EMAC_MDIO_COMM, val1);
1033 
1034 	for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1035 		DELAY(10);
1036 
1037 		val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1038 		if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1039 			DELAY(5);
1040 			break;
1041 		}
1042 	}
1043 
1044 	if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY)
1045 		if_printf(&sc->arpcom.ac_if, "PHY write timeout!\n");
1046 
1047 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1048 		val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1049 		val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1050 
1051 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1052 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1053 
1054 		DELAY(40);
1055 	}
1056 	return 0;
1057 }
1058 
1059 
1060 /****************************************************************************/
1061 /* MII bus status change.                                                   */
1062 /*                                                                          */
1063 /* Called by the MII bus driver when the PHY establishes link to set the    */
1064 /* MAC interface registers.                                                 */
1065 /*                                                                          */
1066 /* Returns:                                                                 */
1067 /*   Nothing.                                                               */
1068 /****************************************************************************/
1069 static void
1070 bce_miibus_statchg(device_t dev)
1071 {
1072 	struct bce_softc *sc = device_get_softc(dev);
1073 	struct mii_data *mii = device_get_softc(sc->bce_miibus);
1074 
1075 	DBPRINT(sc, BCE_INFO, "mii_media_active = 0x%08X\n",
1076 		mii->mii_media_active);
1077 
1078 #ifdef BCE_DEBUG
1079 	/* Decode the interface media flags. */
1080 	if_printf(&sc->arpcom.ac_if, "Media: ( ");
1081 	switch(IFM_TYPE(mii->mii_media_active)) {
1082 	case IFM_ETHER:
1083 		kprintf("Ethernet )");
1084 		break;
1085 	default:
1086 		kprintf("Unknown )");
1087 		break;
1088 	}
1089 
1090 	kprintf(" Media Options: ( ");
1091 	switch(IFM_SUBTYPE(mii->mii_media_active)) {
1092 	case IFM_AUTO:
1093 		kprintf("Autoselect )");
1094 		break;
1095 	case IFM_MANUAL:
1096 		kprintf("Manual )");
1097 		break;
1098 	case IFM_NONE:
1099 		kprintf("None )");
1100 		break;
1101 	case IFM_10_T:
1102 		kprintf("10Base-T )");
1103 		break;
1104 	case IFM_100_TX:
1105 		kprintf("100Base-TX )");
1106 		break;
1107 	case IFM_1000_SX:
1108 		kprintf("1000Base-SX )");
1109 		break;
1110 	case IFM_1000_T:
1111 		kprintf("1000Base-T )");
1112 		break;
1113 	default:
1114 		kprintf("Other )");
1115 		break;
1116 	}
1117 
1118 	kprintf(" Global Options: (");
1119 	if (mii->mii_media_active & IFM_FDX)
1120 		kprintf(" FullDuplex");
1121 	if (mii->mii_media_active & IFM_HDX)
1122 		kprintf(" HalfDuplex");
1123 	if (mii->mii_media_active & IFM_LOOP)
1124 		kprintf(" Loopback");
1125 	if (mii->mii_media_active & IFM_FLAG0)
1126 		kprintf(" Flag0");
1127 	if (mii->mii_media_active & IFM_FLAG1)
1128 		kprintf(" Flag1");
1129 	if (mii->mii_media_active & IFM_FLAG2)
1130 		kprintf(" Flag2");
1131 	kprintf(" )\n");
1132 #endif
1133 
1134 	BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT);
1135 
1136 	/*
1137 	 * Set MII or GMII interface based on the speed negotiated
1138 	 * by the PHY.
1139 	 */
1140 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
1141 	    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) {
1142 		DBPRINT(sc, BCE_INFO, "Setting GMII interface.\n");
1143 		BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_GMII);
1144 	} else {
1145 		DBPRINT(sc, BCE_INFO, "Setting MII interface.\n");
1146 		BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_MII);
1147 	}
1148 
1149 	/*
1150 	 * Set half or full duplex based on the duplicity negotiated
1151 	 * by the PHY.
1152 	 */
1153 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1154 		DBPRINT(sc, BCE_INFO, "Setting Full-Duplex interface.\n");
1155 		BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1156 	} else {
1157 		DBPRINT(sc, BCE_INFO, "Setting Half-Duplex interface.\n");
1158 		BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1159 	}
1160 }
1161 
1162 
1163 /****************************************************************************/
1164 /* Acquire NVRAM lock.                                                      */
1165 /*                                                                          */
1166 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock.  */
1167 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1168 /* for use by the driver.                                                   */
1169 /*                                                                          */
1170 /* Returns:                                                                 */
1171 /*   0 on success, positive value on failure.                               */
1172 /****************************************************************************/
1173 static int
1174 bce_acquire_nvram_lock(struct bce_softc *sc)
1175 {
1176 	uint32_t val;
1177 	int j;
1178 
1179 	DBPRINT(sc, BCE_VERBOSE, "Acquiring NVRAM lock.\n");
1180 
1181 	/* Request access to the flash interface. */
1182 	REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2);
1183 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1184 		val = REG_RD(sc, BCE_NVM_SW_ARB);
1185 		if (val & BCE_NVM_SW_ARB_ARB_ARB2)
1186 			break;
1187 
1188 		DELAY(5);
1189 	}
1190 
1191 	if (j >= NVRAM_TIMEOUT_COUNT) {
1192 		DBPRINT(sc, BCE_WARN, "Timeout acquiring NVRAM lock!\n");
1193 		return EBUSY;
1194 	}
1195 	return 0;
1196 }
1197 
1198 
1199 /****************************************************************************/
1200 /* Release NVRAM lock.                                                      */
1201 /*                                                                          */
1202 /* When the caller is finished accessing NVRAM the lock must be released.   */
1203 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1204 /* for use by the driver.                                                   */
1205 /*                                                                          */
1206 /* Returns:                                                                 */
1207 /*   0 on success, positive value on failure.                               */
1208 /****************************************************************************/
1209 static int
1210 bce_release_nvram_lock(struct bce_softc *sc)
1211 {
1212 	int j;
1213 	uint32_t val;
1214 
1215 	DBPRINT(sc, BCE_VERBOSE, "Releasing NVRAM lock.\n");
1216 
1217 	/*
1218 	 * Relinquish nvram interface.
1219 	 */
1220 	REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2);
1221 
1222 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1223 		val = REG_RD(sc, BCE_NVM_SW_ARB);
1224 		if (!(val & BCE_NVM_SW_ARB_ARB_ARB2))
1225 			break;
1226 
1227 		DELAY(5);
1228 	}
1229 
1230 	if (j >= NVRAM_TIMEOUT_COUNT) {
1231 		DBPRINT(sc, BCE_WARN, "Timeout reeasing NVRAM lock!\n");
1232 		return EBUSY;
1233 	}
1234 	return 0;
1235 }
1236 
1237 
1238 #ifdef BCE_NVRAM_WRITE_SUPPORT
1239 /****************************************************************************/
1240 /* Enable NVRAM write access.                                               */
1241 /*                                                                          */
1242 /* Before writing to NVRAM the caller must enable NVRAM writes.             */
1243 /*                                                                          */
1244 /* Returns:                                                                 */
1245 /*   0 on success, positive value on failure.                               */
1246 /****************************************************************************/
1247 static int
1248 bce_enable_nvram_write(struct bce_softc *sc)
1249 {
1250 	uint32_t val;
1251 
1252 	DBPRINT(sc, BCE_VERBOSE, "Enabling NVRAM write.\n");
1253 
1254 	val = REG_RD(sc, BCE_MISC_CFG);
1255 	REG_WR(sc, BCE_MISC_CFG, val | BCE_MISC_CFG_NVM_WR_EN_PCI);
1256 
1257 	if (!sc->bce_flash_info->buffered) {
1258 		int j;
1259 
1260 		REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1261 		REG_WR(sc, BCE_NVM_COMMAND,
1262 		       BCE_NVM_COMMAND_WREN | BCE_NVM_COMMAND_DOIT);
1263 
1264 		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1265 			DELAY(5);
1266 
1267 			val = REG_RD(sc, BCE_NVM_COMMAND);
1268 			if (val & BCE_NVM_COMMAND_DONE)
1269 				break;
1270 		}
1271 
1272 		if (j >= NVRAM_TIMEOUT_COUNT) {
1273 			DBPRINT(sc, BCE_WARN, "Timeout writing NVRAM!\n");
1274 			return EBUSY;
1275 		}
1276 	}
1277 	return 0;
1278 }
1279 
1280 
1281 /****************************************************************************/
1282 /* Disable NVRAM write access.                                              */
1283 /*                                                                          */
1284 /* When the caller is finished writing to NVRAM write access must be        */
1285 /* disabled.                                                                */
1286 /*                                                                          */
1287 /* Returns:                                                                 */
1288 /*   Nothing.                                                               */
1289 /****************************************************************************/
1290 static void
1291 bce_disable_nvram_write(struct bce_softc *sc)
1292 {
1293 	uint32_t val;
1294 
1295 	DBPRINT(sc, BCE_VERBOSE, "Disabling NVRAM write.\n");
1296 
1297 	val = REG_RD(sc, BCE_MISC_CFG);
1298 	REG_WR(sc, BCE_MISC_CFG, val & ~BCE_MISC_CFG_NVM_WR_EN);
1299 }
1300 #endif	/* BCE_NVRAM_WRITE_SUPPORT */
1301 
1302 
1303 /****************************************************************************/
1304 /* Enable NVRAM access.                                                     */
1305 /*                                                                          */
1306 /* Before accessing NVRAM for read or write operations the caller must      */
1307 /* enabled NVRAM access.                                                    */
1308 /*                                                                          */
1309 /* Returns:                                                                 */
1310 /*   Nothing.                                                               */
1311 /****************************************************************************/
1312 static void
1313 bce_enable_nvram_access(struct bce_softc *sc)
1314 {
1315 	uint32_t val;
1316 
1317 	DBPRINT(sc, BCE_VERBOSE, "Enabling NVRAM access.\n");
1318 
1319 	val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1320 	/* Enable both bits, even on read. */
1321 	REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1322 	       val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN);
1323 }
1324 
1325 
1326 /****************************************************************************/
1327 /* Disable NVRAM access.                                                    */
1328 /*                                                                          */
1329 /* When the caller is finished accessing NVRAM access must be disabled.     */
1330 /*                                                                          */
1331 /* Returns:                                                                 */
1332 /*   Nothing.                                                               */
1333 /****************************************************************************/
1334 static void
1335 bce_disable_nvram_access(struct bce_softc *sc)
1336 {
1337 	uint32_t val;
1338 
1339 	DBPRINT(sc, BCE_VERBOSE, "Disabling NVRAM access.\n");
1340 
1341 	val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1342 
1343 	/* Disable both bits, even after read. */
1344 	REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1345 	       val & ~(BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN));
1346 }
1347 
1348 
1349 #ifdef BCE_NVRAM_WRITE_SUPPORT
1350 /****************************************************************************/
1351 /* Erase NVRAM page before writing.                                         */
1352 /*                                                                          */
1353 /* Non-buffered flash parts require that a page be erased before it is      */
1354 /* written.                                                                 */
1355 /*                                                                          */
1356 /* Returns:                                                                 */
1357 /*   0 on success, positive value on failure.                               */
1358 /****************************************************************************/
1359 static int
1360 bce_nvram_erase_page(struct bce_softc *sc, uint32_t offset)
1361 {
1362 	uint32_t cmd;
1363 	int j;
1364 
1365 	/* Buffered flash doesn't require an erase. */
1366 	if (sc->bce_flash_info->buffered)
1367 		return 0;
1368 
1369 	DBPRINT(sc, BCE_VERBOSE, "Erasing NVRAM page.\n");
1370 
1371 	/* Build an erase command. */
1372 	cmd = BCE_NVM_COMMAND_ERASE | BCE_NVM_COMMAND_WR |
1373 	      BCE_NVM_COMMAND_DOIT;
1374 
1375 	/*
1376 	 * Clear the DONE bit separately, set the NVRAM adress to erase,
1377 	 * and issue the erase command.
1378 	 */
1379 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1380 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1381 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1382 
1383 	/* Wait for completion. */
1384 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1385 		uint32_t val;
1386 
1387 		DELAY(5);
1388 
1389 		val = REG_RD(sc, BCE_NVM_COMMAND);
1390 		if (val & BCE_NVM_COMMAND_DONE)
1391 			break;
1392 	}
1393 
1394 	if (j >= NVRAM_TIMEOUT_COUNT) {
1395 		DBPRINT(sc, BCE_WARN, "Timeout erasing NVRAM.\n");
1396 		return EBUSY;
1397 	}
1398 	return 0;
1399 }
1400 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1401 
1402 
1403 /****************************************************************************/
1404 /* Read a dword (32 bits) from NVRAM.                                       */
1405 /*                                                                          */
1406 /* Read a 32 bit word from NVRAM.  The caller is assumed to have already    */
1407 /* obtained the NVRAM lock and enabled the controller for NVRAM access.     */
1408 /*                                                                          */
1409 /* Returns:                                                                 */
1410 /*   0 on success and the 32 bit value read, positive value on failure.     */
1411 /****************************************************************************/
1412 static int
1413 bce_nvram_read_dword(struct bce_softc *sc, uint32_t offset, uint8_t *ret_val,
1414 		     uint32_t cmd_flags)
1415 {
1416 	uint32_t cmd;
1417 	int i, rc = 0;
1418 
1419 	/* Build the command word. */
1420 	cmd = BCE_NVM_COMMAND_DOIT | cmd_flags;
1421 
1422 	/* Calculate the offset for buffered flash. */
1423 	if (sc->bce_flash_info->buffered) {
1424 		offset = ((offset / sc->bce_flash_info->page_size) <<
1425 			  sc->bce_flash_info->page_bits) +
1426 			 (offset % sc->bce_flash_info->page_size);
1427 	}
1428 
1429 	/*
1430 	 * Clear the DONE bit separately, set the address to read,
1431 	 * and issue the read.
1432 	 */
1433 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1434 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1435 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1436 
1437 	/* Wait for completion. */
1438 	for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1439 		uint32_t val;
1440 
1441 		DELAY(5);
1442 
1443 		val = REG_RD(sc, BCE_NVM_COMMAND);
1444 		if (val & BCE_NVM_COMMAND_DONE) {
1445 			val = REG_RD(sc, BCE_NVM_READ);
1446 
1447 			val = be32toh(val);
1448 			memcpy(ret_val, &val, 4);
1449 			break;
1450 		}
1451 	}
1452 
1453 	/* Check for errors. */
1454 	if (i >= NVRAM_TIMEOUT_COUNT) {
1455 		if_printf(&sc->arpcom.ac_if,
1456 			  "Timeout error reading NVRAM at offset 0x%08X!\n",
1457 			  offset);
1458 		rc = EBUSY;
1459 	}
1460 	return rc;
1461 }
1462 
1463 
1464 #ifdef BCE_NVRAM_WRITE_SUPPORT
1465 /****************************************************************************/
1466 /* Write a dword (32 bits) to NVRAM.                                        */
1467 /*                                                                          */
1468 /* Write a 32 bit word to NVRAM.  The caller is assumed to have already     */
1469 /* obtained the NVRAM lock, enabled the controller for NVRAM access, and    */
1470 /* enabled NVRAM write access.                                              */
1471 /*                                                                          */
1472 /* Returns:                                                                 */
1473 /*   0 on success, positive value on failure.                               */
1474 /****************************************************************************/
1475 static int
1476 bce_nvram_write_dword(struct bce_softc *sc, uint32_t offset, uint8_t *val,
1477 		      uint32_t cmd_flags)
1478 {
1479 	uint32_t cmd, val32;
1480 	int j;
1481 
1482 	/* Build the command word. */
1483 	cmd = BCE_NVM_COMMAND_DOIT | BCE_NVM_COMMAND_WR | cmd_flags;
1484 
1485 	/* Calculate the offset for buffered flash. */
1486 	if (sc->bce_flash_info->buffered) {
1487 		offset = ((offset / sc->bce_flash_info->page_size) <<
1488 			  sc->bce_flash_info->page_bits) +
1489 			 (offset % sc->bce_flash_info->page_size);
1490 	}
1491 
1492 	/*
1493 	 * Clear the DONE bit separately, convert NVRAM data to big-endian,
1494 	 * set the NVRAM address to write, and issue the write command
1495 	 */
1496 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1497 	memcpy(&val32, val, 4);
1498 	val32 = htobe32(val32);
1499 	REG_WR(sc, BCE_NVM_WRITE, val32);
1500 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1501 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1502 
1503 	/* Wait for completion. */
1504 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1505 		DELAY(5);
1506 
1507 		if (REG_RD(sc, BCE_NVM_COMMAND) & BCE_NVM_COMMAND_DONE)
1508 			break;
1509 	}
1510 	if (j >= NVRAM_TIMEOUT_COUNT) {
1511 		if_printf(&sc->arpcom.ac_if,
1512 			  "Timeout error writing NVRAM at offset 0x%08X\n",
1513 			  offset);
1514 		return EBUSY;
1515 	}
1516 	return 0;
1517 }
1518 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1519 
1520 
1521 /****************************************************************************/
1522 /* Initialize NVRAM access.                                                 */
1523 /*                                                                          */
1524 /* Identify the NVRAM device in use and prepare the NVRAM interface to      */
1525 /* access that device.                                                      */
1526 /*                                                                          */
1527 /* Returns:                                                                 */
1528 /*   0 on success, positive value on failure.                               */
1529 /****************************************************************************/
1530 static int
1531 bce_init_nvram(struct bce_softc *sc)
1532 {
1533 	uint32_t val;
1534 	int j, entry_count, rc = 0;
1535 	const struct flash_spec *flash;
1536 
1537 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__);
1538 
1539 	/* Determine the selected interface. */
1540 	val = REG_RD(sc, BCE_NVM_CFG1);
1541 
1542 	entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1543 
1544 	/*
1545 	 * Flash reconfiguration is required to support additional
1546 	 * NVRAM devices not directly supported in hardware.
1547 	 * Check if the flash interface was reconfigured
1548 	 * by the bootcode.
1549 	 */
1550 
1551 	if (val & 0x40000000) {
1552 		/* Flash interface reconfigured by bootcode. */
1553 
1554 		DBPRINT(sc, BCE_INFO_LOAD,
1555 			"%s(): Flash WAS reconfigured.\n", __func__);
1556 
1557 		for (j = 0, flash = flash_table; j < entry_count;
1558 		     j++, flash++) {
1559 			if ((val & FLASH_BACKUP_STRAP_MASK) ==
1560 			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1561 				sc->bce_flash_info = flash;
1562 				break;
1563 			}
1564 		}
1565 	} else {
1566 		/* Flash interface not yet reconfigured. */
1567 		uint32_t mask;
1568 
1569 		DBPRINT(sc, BCE_INFO_LOAD,
1570 			"%s(): Flash was NOT reconfigured.\n", __func__);
1571 
1572 		if (val & (1 << 23))
1573 			mask = FLASH_BACKUP_STRAP_MASK;
1574 		else
1575 			mask = FLASH_STRAP_MASK;
1576 
1577 		/* Look for the matching NVRAM device configuration data. */
1578 		for (j = 0, flash = flash_table; j < entry_count;
1579 		     j++, flash++) {
1580 			/* Check if the device matches any of the known devices. */
1581 			if ((val & mask) == (flash->strapping & mask)) {
1582 				/* Found a device match. */
1583 				sc->bce_flash_info = flash;
1584 
1585 				/* Request access to the flash interface. */
1586 				rc = bce_acquire_nvram_lock(sc);
1587 				if (rc != 0)
1588 					return rc;
1589 
1590 				/* Reconfigure the flash interface. */
1591 				bce_enable_nvram_access(sc);
1592 				REG_WR(sc, BCE_NVM_CFG1, flash->config1);
1593 				REG_WR(sc, BCE_NVM_CFG2, flash->config2);
1594 				REG_WR(sc, BCE_NVM_CFG3, flash->config3);
1595 				REG_WR(sc, BCE_NVM_WRITE1, flash->write1);
1596 				bce_disable_nvram_access(sc);
1597 				bce_release_nvram_lock(sc);
1598 				break;
1599 			}
1600 		}
1601 	}
1602 
1603 	/* Check if a matching device was found. */
1604 	if (j == entry_count) {
1605 		sc->bce_flash_info = NULL;
1606 		if_printf(&sc->arpcom.ac_if, "Unknown Flash NVRAM found!\n");
1607 		rc = ENODEV;
1608 	}
1609 
1610 	/* Write the flash config data to the shared memory interface. */
1611 	val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_SHARED_HW_CFG_CONFIG2) &
1612 	      BCE_SHARED_HW_CFG2_NVM_SIZE_MASK;
1613 	if (val)
1614 		sc->bce_flash_size = val;
1615 	else
1616 		sc->bce_flash_size = sc->bce_flash_info->total_size;
1617 
1618 	DBPRINT(sc, BCE_INFO_LOAD, "%s() flash->total_size = 0x%08X\n",
1619 		__func__, sc->bce_flash_info->total_size);
1620 
1621 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__);
1622 
1623 	return rc;
1624 }
1625 
1626 
1627 /****************************************************************************/
1628 /* Read an arbitrary range of data from NVRAM.                              */
1629 /*                                                                          */
1630 /* Prepares the NVRAM interface for access and reads the requested data     */
1631 /* into the supplied buffer.                                                */
1632 /*                                                                          */
1633 /* Returns:                                                                 */
1634 /*   0 on success and the data read, positive value on failure.             */
1635 /****************************************************************************/
1636 static int
1637 bce_nvram_read(struct bce_softc *sc, uint32_t offset, uint8_t *ret_buf,
1638 	       int buf_size)
1639 {
1640 	uint32_t cmd_flags, offset32, len32, extra;
1641 	int rc = 0;
1642 
1643 	if (buf_size == 0)
1644 		return 0;
1645 
1646 	/* Request access to the flash interface. */
1647 	rc = bce_acquire_nvram_lock(sc);
1648 	if (rc != 0)
1649 		return rc;
1650 
1651 	/* Enable access to flash interface */
1652 	bce_enable_nvram_access(sc);
1653 
1654 	len32 = buf_size;
1655 	offset32 = offset;
1656 	extra = 0;
1657 
1658 	cmd_flags = 0;
1659 
1660 	/* XXX should we release nvram lock if read_dword() fails? */
1661 	if (offset32 & 3) {
1662 		uint8_t buf[4];
1663 		uint32_t pre_len;
1664 
1665 		offset32 &= ~3;
1666 		pre_len = 4 - (offset & 3);
1667 
1668 		if (pre_len >= len32) {
1669 			pre_len = len32;
1670 			cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST;
1671 		} else {
1672 			cmd_flags = BCE_NVM_COMMAND_FIRST;
1673 		}
1674 
1675 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1676 		if (rc)
1677 			return rc;
1678 
1679 		memcpy(ret_buf, buf + (offset & 3), pre_len);
1680 
1681 		offset32 += 4;
1682 		ret_buf += pre_len;
1683 		len32 -= pre_len;
1684 	}
1685 
1686 	if (len32 & 3) {
1687 		extra = 4 - (len32 & 3);
1688 		len32 = (len32 + 4) & ~3;
1689 	}
1690 
1691 	if (len32 == 4) {
1692 		uint8_t buf[4];
1693 
1694 		if (cmd_flags)
1695 			cmd_flags = BCE_NVM_COMMAND_LAST;
1696 		else
1697 			cmd_flags = BCE_NVM_COMMAND_FIRST |
1698 				    BCE_NVM_COMMAND_LAST;
1699 
1700 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1701 
1702 		memcpy(ret_buf, buf, 4 - extra);
1703 	} else if (len32 > 0) {
1704 		uint8_t buf[4];
1705 
1706 		/* Read the first word. */
1707 		if (cmd_flags)
1708 			cmd_flags = 0;
1709 		else
1710 			cmd_flags = BCE_NVM_COMMAND_FIRST;
1711 
1712 		rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1713 
1714 		/* Advance to the next dword. */
1715 		offset32 += 4;
1716 		ret_buf += 4;
1717 		len32 -= 4;
1718 
1719 		while (len32 > 4 && rc == 0) {
1720 			rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0);
1721 
1722 			/* Advance to the next dword. */
1723 			offset32 += 4;
1724 			ret_buf += 4;
1725 			len32 -= 4;
1726 		}
1727 
1728 		if (rc)
1729 			return rc;
1730 
1731 		cmd_flags = BCE_NVM_COMMAND_LAST;
1732 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1733 
1734 		memcpy(ret_buf, buf, 4 - extra);
1735 	}
1736 
1737 	/* Disable access to flash interface and release the lock. */
1738 	bce_disable_nvram_access(sc);
1739 	bce_release_nvram_lock(sc);
1740 
1741 	return rc;
1742 }
1743 
1744 
1745 #ifdef BCE_NVRAM_WRITE_SUPPORT
1746 /****************************************************************************/
1747 /* Write an arbitrary range of data from NVRAM.                             */
1748 /*                                                                          */
1749 /* Prepares the NVRAM interface for write access and writes the requested   */
1750 /* data from the supplied buffer.  The caller is responsible for            */
1751 /* calculating any appropriate CRCs.                                        */
1752 /*                                                                          */
1753 /* Returns:                                                                 */
1754 /*   0 on success, positive value on failure.                               */
1755 /****************************************************************************/
1756 static int
1757 bce_nvram_write(struct bce_softc *sc, uint32_t offset, uint8_t *data_buf,
1758 		int buf_size)
1759 {
1760 	uint32_t written, offset32, len32;
1761 	uint8_t *buf, start[4], end[4];
1762 	int rc = 0;
1763 	int align_start, align_end;
1764 
1765 	buf = data_buf;
1766 	offset32 = offset;
1767 	len32 = buf_size;
1768 	align_end = 0;
1769 	align_start = (offset32 & 3);
1770 
1771 	if (align_start) {
1772 		offset32 &= ~3;
1773 		len32 += align_start;
1774 		rc = bce_nvram_read(sc, offset32, start, 4);
1775 		if (rc)
1776 			return rc;
1777 	}
1778 
1779 	if (len32 & 3) {
1780 	       	if (len32 > 4 || !align_start) {
1781 			align_end = 4 - (len32 & 3);
1782 			len32 += align_end;
1783 			rc = bce_nvram_read(sc, offset32 + len32 - 4, end, 4);
1784 			if (rc)
1785 				return rc;
1786 		}
1787 	}
1788 
1789 	if (align_start || align_end) {
1790 		buf = kmalloc(len32, M_DEVBUF, M_NOWAIT);
1791 		if (buf == NULL)
1792 			return ENOMEM;
1793 		if (align_start)
1794 			memcpy(buf, start, 4);
1795 		if (align_end)
1796 			memcpy(buf + len32 - 4, end, 4);
1797 		memcpy(buf + align_start, data_buf, buf_size);
1798 	}
1799 
1800 	written = 0;
1801 	while (written < len32 && rc == 0) {
1802 		uint32_t page_start, page_end, data_start, data_end;
1803 		uint32_t addr, cmd_flags;
1804 		int i;
1805 		uint8_t flash_buffer[264];
1806 
1807 		/* Find the page_start addr */
1808 		page_start = offset32 + written;
1809 		page_start -= (page_start % sc->bce_flash_info->page_size);
1810 		/* Find the page_end addr */
1811 		page_end = page_start + sc->bce_flash_info->page_size;
1812 		/* Find the data_start addr */
1813 		data_start = (written == 0) ? offset32 : page_start;
1814 		/* Find the data_end addr */
1815 		data_end = (page_end > offset32 + len32) ? (offset32 + len32)
1816 							 : page_end;
1817 
1818 		/* Request access to the flash interface. */
1819 		rc = bce_acquire_nvram_lock(sc);
1820 		if (rc != 0)
1821 			goto nvram_write_end;
1822 
1823 		/* Enable access to flash interface */
1824 		bce_enable_nvram_access(sc);
1825 
1826 		cmd_flags = BCE_NVM_COMMAND_FIRST;
1827 		if (sc->bce_flash_info->buffered == 0) {
1828 			int j;
1829 
1830 			/*
1831 			 * Read the whole page into the buffer
1832 			 * (non-buffer flash only)
1833 			 */
1834 			for (j = 0; j < sc->bce_flash_info->page_size; j += 4) {
1835 				if (j == (sc->bce_flash_info->page_size - 4))
1836 					cmd_flags |= BCE_NVM_COMMAND_LAST;
1837 
1838 				rc = bce_nvram_read_dword(sc, page_start + j,
1839 							  &flash_buffer[j],
1840 							  cmd_flags);
1841 				if (rc)
1842 					goto nvram_write_end;
1843 
1844 				cmd_flags = 0;
1845 			}
1846 		}
1847 
1848 		/* Enable writes to flash interface (unlock write-protect) */
1849 		rc = bce_enable_nvram_write(sc);
1850 		if (rc != 0)
1851 			goto nvram_write_end;
1852 
1853 		/* Erase the page */
1854 		rc = bce_nvram_erase_page(sc, page_start);
1855 		if (rc != 0)
1856 			goto nvram_write_end;
1857 
1858 		/* Re-enable the write again for the actual write */
1859 		bce_enable_nvram_write(sc);
1860 
1861 		/* Loop to write back the buffer data from page_start to
1862 		 * data_start */
1863 		i = 0;
1864 		if (sc->bce_flash_info->buffered == 0) {
1865 			for (addr = page_start; addr < data_start;
1866 			     addr += 4, i += 4) {
1867 				rc = bce_nvram_write_dword(sc, addr,
1868 							   &flash_buffer[i],
1869 							   cmd_flags);
1870 				if (rc != 0)
1871 					goto nvram_write_end;
1872 
1873 				cmd_flags = 0;
1874 			}
1875 		}
1876 
1877 		/* Loop to write the new data from data_start to data_end */
1878 		for (addr = data_start; addr < data_end; addr += 4, i++) {
1879 			if (addr == page_end - 4 ||
1880 			    (sc->bce_flash_info->buffered &&
1881 			     addr == data_end - 4))
1882 				cmd_flags |= BCE_NVM_COMMAND_LAST;
1883 
1884 			rc = bce_nvram_write_dword(sc, addr, buf, cmd_flags);
1885 			if (rc != 0)
1886 				goto nvram_write_end;
1887 
1888 			cmd_flags = 0;
1889 			buf += 4;
1890 		}
1891 
1892 		/* Loop to write back the buffer data from data_end
1893 		 * to page_end */
1894 		if (sc->bce_flash_info->buffered == 0) {
1895 			for (addr = data_end; addr < page_end;
1896 			     addr += 4, i += 4) {
1897 				if (addr == page_end-4)
1898 					cmd_flags = BCE_NVM_COMMAND_LAST;
1899 
1900 				rc = bce_nvram_write_dword(sc, addr,
1901 					&flash_buffer[i], cmd_flags);
1902 				if (rc != 0)
1903 					goto nvram_write_end;
1904 
1905 				cmd_flags = 0;
1906 			}
1907 		}
1908 
1909 		/* Disable writes to flash interface (lock write-protect) */
1910 		bce_disable_nvram_write(sc);
1911 
1912 		/* Disable access to flash interface */
1913 		bce_disable_nvram_access(sc);
1914 		bce_release_nvram_lock(sc);
1915 
1916 		/* Increment written */
1917 		written += data_end - data_start;
1918 	}
1919 
1920 nvram_write_end:
1921 	if (align_start || align_end)
1922 		kfree(buf, M_DEVBUF);
1923 	return rc;
1924 }
1925 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1926 
1927 
1928 /****************************************************************************/
1929 /* Verifies that NVRAM is accessible and contains valid data.               */
1930 /*                                                                          */
1931 /* Reads the configuration data from NVRAM and verifies that the CRC is     */
1932 /* correct.                                                                 */
1933 /*                                                                          */
1934 /* Returns:                                                                 */
1935 /*   0 on success, positive value on failure.                               */
1936 /****************************************************************************/
1937 static int
1938 bce_nvram_test(struct bce_softc *sc)
1939 {
1940 	uint32_t buf[BCE_NVRAM_SIZE / 4];
1941 	uint32_t magic, csum;
1942 	uint8_t *data = (uint8_t *)buf;
1943 	int rc = 0;
1944 
1945 	/*
1946 	 * Check that the device NVRAM is valid by reading
1947 	 * the magic value at offset 0.
1948 	 */
1949 	rc = bce_nvram_read(sc, 0, data, 4);
1950 	if (rc != 0)
1951 		return rc;
1952 
1953 	magic = be32toh(buf[0]);
1954 	if (magic != BCE_NVRAM_MAGIC) {
1955 		if_printf(&sc->arpcom.ac_if,
1956 			  "Invalid NVRAM magic value! Expected: 0x%08X, "
1957 			  "Found: 0x%08X\n", BCE_NVRAM_MAGIC, magic);
1958 		return ENODEV;
1959 	}
1960 
1961 	/*
1962 	 * Verify that the device NVRAM includes valid
1963 	 * configuration data.
1964 	 */
1965 	rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE);
1966 	if (rc != 0)
1967 		return rc;
1968 
1969 	csum = ether_crc32_le(data, 0x100);
1970 	if (csum != BCE_CRC32_RESIDUAL) {
1971 		if_printf(&sc->arpcom.ac_if,
1972 			  "Invalid Manufacturing Information NVRAM CRC! "
1973 			  "Expected: 0x%08X, Found: 0x%08X\n",
1974 			  BCE_CRC32_RESIDUAL, csum);
1975 		return ENODEV;
1976 	}
1977 
1978 	csum = ether_crc32_le(data + 0x100, 0x100);
1979 	if (csum != BCE_CRC32_RESIDUAL) {
1980 		if_printf(&sc->arpcom.ac_if,
1981 			  "Invalid Feature Configuration Information "
1982 			  "NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
1983 			  BCE_CRC32_RESIDUAL, csum);
1984 		rc = ENODEV;
1985 	}
1986 	return rc;
1987 }
1988 
1989 
1990 /****************************************************************************/
1991 /* Free any DMA memory owned by the driver.                                 */
1992 /*                                                                          */
1993 /* Scans through each data structre that requires DMA memory and frees      */
1994 /* the memory if allocated.                                                 */
1995 /*                                                                          */
1996 /* Returns:                                                                 */
1997 /*   Nothing.                                                               */
1998 /****************************************************************************/
1999 static void
2000 bce_dma_free(struct bce_softc *sc)
2001 {
2002 	int i;
2003 
2004 	/* Destroy the status block. */
2005 	if (sc->status_tag != NULL) {
2006 		if (sc->status_block != NULL) {
2007 			bus_dmamap_unload(sc->status_tag, sc->status_map);
2008 			bus_dmamem_free(sc->status_tag, sc->status_block,
2009 					sc->status_map);
2010 		}
2011 		bus_dma_tag_destroy(sc->status_tag);
2012 	}
2013 
2014 
2015 	/* Destroy the statistics block. */
2016 	if (sc->stats_tag != NULL) {
2017 		if (sc->stats_block != NULL) {
2018 			bus_dmamap_unload(sc->stats_tag, sc->stats_map);
2019 			bus_dmamem_free(sc->stats_tag, sc->stats_block,
2020 					sc->stats_map);
2021 		}
2022 		bus_dma_tag_destroy(sc->stats_tag);
2023 	}
2024 
2025 	/* Destroy the TX buffer descriptor DMA stuffs. */
2026 	if (sc->tx_bd_chain_tag != NULL) {
2027 		for (i = 0; i < TX_PAGES; i++) {
2028 			if (sc->tx_bd_chain[i] != NULL) {
2029 				bus_dmamap_unload(sc->tx_bd_chain_tag,
2030 						  sc->tx_bd_chain_map[i]);
2031 				bus_dmamem_free(sc->tx_bd_chain_tag,
2032 						sc->tx_bd_chain[i],
2033 						sc->tx_bd_chain_map[i]);
2034 			}
2035 		}
2036 		bus_dma_tag_destroy(sc->tx_bd_chain_tag);
2037 	}
2038 
2039 	/* Destroy the RX buffer descriptor DMA stuffs. */
2040 	if (sc->rx_bd_chain_tag != NULL) {
2041 		for (i = 0; i < RX_PAGES; i++) {
2042 			if (sc->rx_bd_chain[i] != NULL) {
2043 				bus_dmamap_unload(sc->rx_bd_chain_tag,
2044 						  sc->rx_bd_chain_map[i]);
2045 				bus_dmamem_free(sc->rx_bd_chain_tag,
2046 						sc->rx_bd_chain[i],
2047 						sc->rx_bd_chain_map[i]);
2048 			}
2049 		}
2050 		bus_dma_tag_destroy(sc->rx_bd_chain_tag);
2051 	}
2052 
2053 	/* Destroy the TX mbuf DMA stuffs. */
2054 	if (sc->tx_mbuf_tag != NULL) {
2055 		for (i = 0; i < TOTAL_TX_BD; i++) {
2056 			/* Must have been unloaded in bce_stop() */
2057 			KKASSERT(sc->tx_mbuf_ptr[i] == NULL);
2058 			bus_dmamap_destroy(sc->tx_mbuf_tag,
2059 					   sc->tx_mbuf_map[i]);
2060 		}
2061 		bus_dma_tag_destroy(sc->tx_mbuf_tag);
2062 	}
2063 
2064 	/* Destroy the RX mbuf DMA stuffs. */
2065 	if (sc->rx_mbuf_tag != NULL) {
2066 		for (i = 0; i < TOTAL_RX_BD; i++) {
2067 			/* Must have been unloaded in bce_stop() */
2068 			KKASSERT(sc->rx_mbuf_ptr[i] == NULL);
2069 			bus_dmamap_destroy(sc->rx_mbuf_tag,
2070 					   sc->rx_mbuf_map[i]);
2071 		}
2072 		bus_dma_tag_destroy(sc->rx_mbuf_tag);
2073 	}
2074 
2075 	/* Destroy the parent tag */
2076 	if (sc->parent_tag != NULL)
2077 		bus_dma_tag_destroy(sc->parent_tag);
2078 }
2079 
2080 
2081 /****************************************************************************/
2082 /* Get DMA memory from the OS.                                              */
2083 /*                                                                          */
2084 /* Validates that the OS has provided DMA buffers in response to a          */
2085 /* bus_dmamap_load() call and saves the physical address of those buffers.  */
2086 /* When the callback is used the OS will return 0 for the mapping function  */
2087 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any  */
2088 /* failures back to the caller.                                             */
2089 /*                                                                          */
2090 /* Returns:                                                                 */
2091 /*   Nothing.                                                               */
2092 /****************************************************************************/
2093 static void
2094 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2095 {
2096 	bus_addr_t *busaddr = arg;
2097 
2098 	/*
2099 	 * Simulate a mapping failure.
2100 	 * XXX not correct.
2101 	 */
2102 	DBRUNIF(DB_RANDOMTRUE(bce_debug_dma_map_addr_failure),
2103 		kprintf("bce: %s(%d): Simulating DMA mapping error.\n",
2104 			__FILE__, __LINE__);
2105 		error = ENOMEM);
2106 
2107 	/* Check for an error and signal the caller that an error occurred. */
2108 	if (error)
2109 		return;
2110 
2111 	KASSERT(nseg == 1, ("only one segment is allowed\n"));
2112 	*busaddr = segs->ds_addr;
2113 }
2114 
2115 
2116 static void
2117 bce_dma_map_mbuf(void *arg, bus_dma_segment_t *segs, int nsegs,
2118 		 bus_size_t mapsz __unused, int error)
2119 {
2120 	struct bce_dmamap_arg *ctx = arg;
2121 	int i;
2122 
2123 	if (error)
2124 		return;
2125 
2126 	if (nsegs > ctx->bce_maxsegs) {
2127 		ctx->bce_maxsegs = 0;
2128 		return;
2129 	}
2130 
2131 	ctx->bce_maxsegs = nsegs;
2132 	for (i = 0; i < nsegs; ++i)
2133 		ctx->bce_segs[i] = segs[i];
2134 }
2135 
2136 
2137 /****************************************************************************/
2138 /* Allocate any DMA memory needed by the driver.                            */
2139 /*                                                                          */
2140 /* Allocates DMA memory needed for the various global structures needed by  */
2141 /* hardware.                                                                */
2142 /*                                                                          */
2143 /* Returns:                                                                 */
2144 /*   0 for success, positive value for failure.                             */
2145 /****************************************************************************/
2146 static int
2147 bce_dma_alloc(struct bce_softc *sc)
2148 {
2149 	struct ifnet *ifp = &sc->arpcom.ac_if;
2150 	int i, j, rc = 0;
2151 	bus_addr_t busaddr;
2152 
2153 	/*
2154 	 * Allocate the parent bus DMA tag appropriate for PCI.
2155 	 */
2156 	rc = bus_dma_tag_create(NULL, 1, BCE_DMA_BOUNDARY,
2157 				sc->max_bus_addr, BUS_SPACE_MAXADDR,
2158 				NULL, NULL,
2159 				MAXBSIZE, BUS_SPACE_UNRESTRICTED,
2160 				BUS_SPACE_MAXSIZE_32BIT,
2161 				0, &sc->parent_tag);
2162 	if (rc != 0) {
2163 		if_printf(ifp, "Could not allocate parent DMA tag!\n");
2164 		return rc;
2165 	}
2166 
2167 	/*
2168 	 * Create a DMA tag for the status block, allocate and clear the
2169 	 * memory, map the memory into DMA space, and fetch the physical
2170 	 * address of the block.
2171 	 */
2172 	rc = bus_dma_tag_create(sc->parent_tag,
2173 				BCE_DMA_ALIGN, BCE_DMA_BOUNDARY,
2174 				sc->max_bus_addr, BUS_SPACE_MAXADDR,
2175 				NULL, NULL,
2176 				BCE_STATUS_BLK_SZ, 1, BCE_STATUS_BLK_SZ,
2177 				0, &sc->status_tag);
2178 	if (rc != 0) {
2179 		if_printf(ifp, "Could not allocate status block DMA tag!\n");
2180 		return rc;
2181 	}
2182 
2183 	rc = bus_dmamem_alloc(sc->status_tag, (void **)&sc->status_block,
2184 			      BUS_DMA_WAITOK | BUS_DMA_ZERO,
2185 			      &sc->status_map);
2186 	if (rc != 0) {
2187 		if_printf(ifp, "Could not allocate status block DMA memory!\n");
2188 		return rc;
2189 	}
2190 
2191 	rc = bus_dmamap_load(sc->status_tag, sc->status_map,
2192 			     sc->status_block, BCE_STATUS_BLK_SZ,
2193 			     bce_dma_map_addr, &busaddr, BUS_DMA_WAITOK);
2194 	if (rc != 0) {
2195 		if_printf(ifp, "Could not map status block DMA memory!\n");
2196 		bus_dmamem_free(sc->status_tag, sc->status_block,
2197 				sc->status_map);
2198 		sc->status_block = NULL;
2199 		return rc;
2200 	}
2201 
2202 	sc->status_block_paddr = busaddr;
2203 	/* DRC - Fix for 64 bit addresses. */
2204 	DBPRINT(sc, BCE_INFO, "status_block_paddr = 0x%08X\n",
2205 		(uint32_t)sc->status_block_paddr);
2206 
2207 	/*
2208 	 * Create a DMA tag for the statistics block, allocate and clear the
2209 	 * memory, map the memory into DMA space, and fetch the physical
2210 	 * address of the block.
2211 	 */
2212 	rc = bus_dma_tag_create(sc->parent_tag,
2213 				BCE_DMA_ALIGN, BCE_DMA_BOUNDARY,
2214 				sc->max_bus_addr, BUS_SPACE_MAXADDR,
2215 				NULL, NULL,
2216 				BCE_STATS_BLK_SZ, 1, BCE_STATS_BLK_SZ,
2217 				0, &sc->stats_tag);
2218 	if (rc != 0) {
2219 		if_printf(ifp, "Could not allocate "
2220 			  "statistics block DMA tag!\n");
2221 		return rc;
2222 	}
2223 
2224 	rc = bus_dmamem_alloc(sc->stats_tag, (void **)&sc->stats_block,
2225 			      BUS_DMA_WAITOK | BUS_DMA_ZERO,
2226 			      &sc->stats_map);
2227 	if (rc != 0) {
2228 		if_printf(ifp, "Could not allocate "
2229 			  "statistics block DMA memory!\n");
2230 		return rc;
2231 	}
2232 
2233 	rc = bus_dmamap_load(sc->stats_tag, sc->stats_map,
2234 			     sc->stats_block, BCE_STATS_BLK_SZ,
2235 			     bce_dma_map_addr, &busaddr, BUS_DMA_WAITOK);
2236 	if (rc != 0) {
2237 		if_printf(ifp, "Could not map statistics block DMA memory!\n");
2238 		bus_dmamem_free(sc->stats_tag, sc->stats_block, sc->stats_map);
2239 		sc->stats_block = NULL;
2240 		return rc;
2241 	}
2242 
2243 	sc->stats_block_paddr = busaddr;
2244 	/* DRC - Fix for 64 bit address. */
2245 	DBPRINT(sc, BCE_INFO, "stats_block_paddr = 0x%08X\n",
2246 		(uint32_t)sc->stats_block_paddr);
2247 
2248 	/*
2249 	 * Create a DMA tag for the TX buffer descriptor chain,
2250 	 * allocate and clear the  memory, and fetch the
2251 	 * physical address of the block.
2252 	 */
2253 	rc = bus_dma_tag_create(sc->parent_tag,
2254 				BCM_PAGE_SIZE, BCE_DMA_BOUNDARY,
2255 				sc->max_bus_addr, BUS_SPACE_MAXADDR,
2256 				NULL, NULL,
2257 				BCE_TX_CHAIN_PAGE_SZ, 1, BCE_TX_CHAIN_PAGE_SZ,
2258 				0, &sc->tx_bd_chain_tag);
2259 	if (rc != 0) {
2260 		if_printf(ifp, "Could not allocate "
2261 			  "TX descriptor chain DMA tag!\n");
2262 		return rc;
2263 	}
2264 
2265 	for (i = 0; i < TX_PAGES; i++) {
2266 		rc = bus_dmamem_alloc(sc->tx_bd_chain_tag,
2267 				      (void **)&sc->tx_bd_chain[i],
2268 				      BUS_DMA_WAITOK, &sc->tx_bd_chain_map[i]);
2269 		if (rc != 0) {
2270 			if_printf(ifp, "Could not allocate %dth TX descriptor "
2271 				  "chain DMA memory!\n", i);
2272 			return rc;
2273 		}
2274 
2275 		rc = bus_dmamap_load(sc->tx_bd_chain_tag,
2276 				     sc->tx_bd_chain_map[i],
2277 				     sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ,
2278 				     bce_dma_map_addr, &busaddr,
2279 				     BUS_DMA_WAITOK);
2280 		if (rc != 0) {
2281 			if_printf(ifp, "Could not map %dth TX descriptor "
2282 				  "chain DMA memory!\n", i);
2283 			bus_dmamem_free(sc->tx_bd_chain_tag,
2284 					sc->tx_bd_chain[i],
2285 					sc->tx_bd_chain_map[i]);
2286 			sc->tx_bd_chain[i] = NULL;
2287 			return rc;
2288 		}
2289 
2290 		sc->tx_bd_chain_paddr[i] = busaddr;
2291 		/* DRC - Fix for 64 bit systems. */
2292 		DBPRINT(sc, BCE_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n",
2293 			i, (uint32_t)sc->tx_bd_chain_paddr[i]);
2294 	}
2295 
2296 	/* Create a DMA tag for TX mbufs. */
2297 	rc = bus_dma_tag_create(sc->parent_tag, 1, BCE_DMA_BOUNDARY,
2298 				sc->max_bus_addr, BUS_SPACE_MAXADDR,
2299 				NULL, NULL,
2300 				MCLBYTES * BCE_MAX_SEGMENTS,
2301 				BCE_MAX_SEGMENTS, MCLBYTES,
2302 				0, &sc->tx_mbuf_tag);
2303 	if (rc != 0) {
2304 		if_printf(ifp, "Could not allocate TX mbuf DMA tag!\n");
2305 		return rc;
2306 	}
2307 
2308 	/* Create DMA maps for the TX mbufs clusters. */
2309 	for (i = 0; i < TOTAL_TX_BD; i++) {
2310 		rc = bus_dmamap_create(sc->tx_mbuf_tag, BUS_DMA_WAITOK,
2311 				       &sc->tx_mbuf_map[i]);
2312 		if (rc != 0) {
2313 			for (j = 0; j < i; ++j) {
2314 				bus_dmamap_destroy(sc->tx_mbuf_tag,
2315 						   sc->tx_mbuf_map[i]);
2316 			}
2317 			bus_dma_tag_destroy(sc->tx_mbuf_tag);
2318 			sc->tx_mbuf_tag = NULL;
2319 
2320 			if_printf(ifp, "Unable to create "
2321 				  "%dth TX mbuf DMA map!\n", i);
2322 			return rc;
2323 		}
2324 	}
2325 
2326 	/*
2327 	 * Create a DMA tag for the RX buffer descriptor chain,
2328 	 * allocate and clear the  memory, and fetch the physical
2329 	 * address of the blocks.
2330 	 */
2331 	rc = bus_dma_tag_create(sc->parent_tag,
2332 				BCM_PAGE_SIZE, BCE_DMA_BOUNDARY,
2333 				sc->max_bus_addr, BUS_SPACE_MAXADDR,
2334 				NULL, NULL,
2335 				BCE_RX_CHAIN_PAGE_SZ, 1, BCE_RX_CHAIN_PAGE_SZ,
2336 				0, &sc->rx_bd_chain_tag);
2337 	if (rc != 0) {
2338 		if_printf(ifp, "Could not allocate "
2339 			  "RX descriptor chain DMA tag!\n");
2340 		return rc;
2341 	}
2342 
2343 	for (i = 0; i < RX_PAGES; i++) {
2344 		rc = bus_dmamem_alloc(sc->rx_bd_chain_tag,
2345 				      (void **)&sc->rx_bd_chain[i],
2346 				      BUS_DMA_WAITOK | BUS_DMA_ZERO,
2347 				      &sc->rx_bd_chain_map[i]);
2348 		if (rc != 0) {
2349 			if_printf(ifp, "Could not allocate %dth RX descriptor "
2350 				  "chain DMA memory!\n", i);
2351 			return rc;
2352 		}
2353 
2354 		rc = bus_dmamap_load(sc->rx_bd_chain_tag,
2355 				     sc->rx_bd_chain_map[i],
2356 				     sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ,
2357 				     bce_dma_map_addr, &busaddr,
2358 				     BUS_DMA_WAITOK);
2359 		if (rc != 0) {
2360 			if_printf(ifp, "Could not map %dth RX descriptor "
2361 				  "chain DMA memory!\n", i);
2362 			bus_dmamem_free(sc->rx_bd_chain_tag,
2363 					sc->rx_bd_chain[i],
2364 					sc->rx_bd_chain_map[i]);
2365 			sc->rx_bd_chain[i] = NULL;
2366 			return rc;
2367 		}
2368 
2369 		sc->rx_bd_chain_paddr[i] = busaddr;
2370 		/* DRC - Fix for 64 bit systems. */
2371 		DBPRINT(sc, BCE_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n",
2372 			i, (uint32_t)sc->rx_bd_chain_paddr[i]);
2373 	}
2374 
2375 	/* Create a DMA tag for RX mbufs. */
2376 	rc = bus_dma_tag_create(sc->parent_tag, 1, BCE_DMA_BOUNDARY,
2377 				sc->max_bus_addr, BUS_SPACE_MAXADDR,
2378 				NULL, NULL,
2379 				MCLBYTES, 1/* BCE_MAX_SEGMENTS */, MCLBYTES,
2380 				0, &sc->rx_mbuf_tag);
2381 	if (rc != 0) {
2382 		if_printf(ifp, "Could not allocate RX mbuf DMA tag!\n");
2383 		return rc;
2384 	}
2385 
2386 	/* Create DMA maps for the RX mbuf clusters. */
2387 	for (i = 0; i < TOTAL_RX_BD; i++) {
2388 		rc = bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_WAITOK,
2389 				       &sc->rx_mbuf_map[i]);
2390 		if (rc != 0) {
2391 			for (j = 0; j < i; ++j) {
2392 				bus_dmamap_destroy(sc->rx_mbuf_tag,
2393 						   sc->rx_mbuf_map[j]);
2394 			}
2395 			bus_dma_tag_destroy(sc->rx_mbuf_tag);
2396 			sc->rx_mbuf_tag = NULL;
2397 
2398 			if_printf(ifp, "Unable to create "
2399 				  "%dth RX mbuf DMA map!\n", i);
2400 			return rc;
2401 		}
2402 	}
2403 	return 0;
2404 }
2405 
2406 
2407 /****************************************************************************/
2408 /* Firmware synchronization.                                                */
2409 /*                                                                          */
2410 /* Before performing certain events such as a chip reset, synchronize with  */
2411 /* the firmware first.                                                      */
2412 /*                                                                          */
2413 /* Returns:                                                                 */
2414 /*   0 for success, positive value for failure.                             */
2415 /****************************************************************************/
2416 static int
2417 bce_fw_sync(struct bce_softc *sc, uint32_t msg_data)
2418 {
2419 	int i, rc = 0;
2420 	uint32_t val;
2421 
2422 	/* Don't waste any time if we've timed out before. */
2423 	if (sc->bce_fw_timed_out)
2424 		return EBUSY;
2425 
2426 	/* Increment the message sequence number. */
2427 	sc->bce_fw_wr_seq++;
2428 	msg_data |= sc->bce_fw_wr_seq;
2429 
2430  	DBPRINT(sc, BCE_VERBOSE, "bce_fw_sync(): msg_data = 0x%08X\n", msg_data);
2431 
2432 	/* Send the message to the bootcode driver mailbox. */
2433 	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
2434 
2435 	/* Wait for the bootcode to acknowledge the message. */
2436 	for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2437 		/* Check for a response in the bootcode firmware mailbox. */
2438 		val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_FW_MB);
2439 		if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ))
2440 			break;
2441 		DELAY(1000);
2442 	}
2443 
2444 	/* If we've timed out, tell the bootcode that we've stopped waiting. */
2445 	if ((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ) &&
2446 	    (msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0) {
2447 		if_printf(&sc->arpcom.ac_if,
2448 			  "Firmware synchronization timeout! "
2449 			  "msg_data = 0x%08X\n", msg_data);
2450 
2451 		msg_data &= ~BCE_DRV_MSG_CODE;
2452 		msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT;
2453 
2454 		REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
2455 
2456 		sc->bce_fw_timed_out = 1;
2457 		rc = EBUSY;
2458 	}
2459 	return rc;
2460 }
2461 
2462 
2463 /****************************************************************************/
2464 /* Load Receive Virtual 2 Physical (RV2P) processor firmware.               */
2465 /*                                                                          */
2466 /* Returns:                                                                 */
2467 /*   Nothing.                                                               */
2468 /****************************************************************************/
2469 static void
2470 bce_load_rv2p_fw(struct bce_softc *sc, uint32_t *rv2p_code,
2471 		 uint32_t rv2p_code_len, uint32_t rv2p_proc)
2472 {
2473 	int i;
2474 	uint32_t val;
2475 
2476 	for (i = 0; i < rv2p_code_len; i += 8) {
2477 		REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code);
2478 		rv2p_code++;
2479 		REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code);
2480 		rv2p_code++;
2481 
2482 		if (rv2p_proc == RV2P_PROC1) {
2483 			val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR;
2484 			REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
2485 		} else {
2486 			val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR;
2487 			REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
2488 		}
2489 	}
2490 
2491 	/* Reset the processor, un-stall is done later. */
2492 	if (rv2p_proc == RV2P_PROC1)
2493 		REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET);
2494 	else
2495 		REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET);
2496 }
2497 
2498 
2499 /****************************************************************************/
2500 /* Load RISC processor firmware.                                            */
2501 /*                                                                          */
2502 /* Loads firmware from the file if_bcefw.h into the scratchpad memory       */
2503 /* associated with a particular processor.                                  */
2504 /*                                                                          */
2505 /* Returns:                                                                 */
2506 /*   Nothing.                                                               */
2507 /****************************************************************************/
2508 static void
2509 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg,
2510 		struct fw_info *fw)
2511 {
2512 	uint32_t offset, val;
2513 	int j;
2514 
2515 	/* Halt the CPU. */
2516 	val = REG_RD_IND(sc, cpu_reg->mode);
2517 	val |= cpu_reg->mode_value_halt;
2518 	REG_WR_IND(sc, cpu_reg->mode, val);
2519 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2520 
2521 	/* Load the Text area. */
2522 	offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2523 	if (fw->text) {
2524 		for (j = 0; j < (fw->text_len / 4); j++, offset += 4)
2525 			REG_WR_IND(sc, offset, fw->text[j]);
2526 	}
2527 
2528 	/* Load the Data area. */
2529 	offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2530 	if (fw->data) {
2531 		for (j = 0; j < (fw->data_len / 4); j++, offset += 4)
2532 			REG_WR_IND(sc, offset, fw->data[j]);
2533 	}
2534 
2535 	/* Load the SBSS area. */
2536 	offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2537 	if (fw->sbss) {
2538 		for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4)
2539 			REG_WR_IND(sc, offset, fw->sbss[j]);
2540 	}
2541 
2542 	/* Load the BSS area. */
2543 	offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2544 	if (fw->bss) {
2545 		for (j = 0; j < (fw->bss_len/4); j++, offset += 4)
2546 			REG_WR_IND(sc, offset, fw->bss[j]);
2547 	}
2548 
2549 	/* Load the Read-Only area. */
2550 	offset = cpu_reg->spad_base +
2551 		(fw->rodata_addr - cpu_reg->mips_view_base);
2552 	if (fw->rodata) {
2553 		for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4)
2554 			REG_WR_IND(sc, offset, fw->rodata[j]);
2555 	}
2556 
2557 	/* Clear the pre-fetch instruction. */
2558 	REG_WR_IND(sc, cpu_reg->inst, 0);
2559 	REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2560 
2561 	/* Start the CPU. */
2562 	val = REG_RD_IND(sc, cpu_reg->mode);
2563 	val &= ~cpu_reg->mode_value_halt;
2564 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2565 	REG_WR_IND(sc, cpu_reg->mode, val);
2566 }
2567 
2568 
2569 /****************************************************************************/
2570 /* Initialize the RV2P, RX, TX, TPAT, and COM CPUs.                         */
2571 /*                                                                          */
2572 /* Loads the firmware for each CPU and starts the CPU.                      */
2573 /*                                                                          */
2574 /* Returns:                                                                 */
2575 /*   Nothing.                                                               */
2576 /****************************************************************************/
2577 static void
2578 bce_init_cpus(struct bce_softc *sc)
2579 {
2580 	struct cpu_reg cpu_reg;
2581 	struct fw_info fw;
2582 
2583 	/* Initialize the RV2P processor. */
2584 	bce_load_rv2p_fw(sc, bce_rv2p_proc1, sizeof(bce_rv2p_proc1), RV2P_PROC1);
2585 	bce_load_rv2p_fw(sc, bce_rv2p_proc2, sizeof(bce_rv2p_proc2), RV2P_PROC2);
2586 
2587 	/* Initialize the RX Processor. */
2588 	cpu_reg.mode = BCE_RXP_CPU_MODE;
2589 	cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
2590 	cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
2591 	cpu_reg.state = BCE_RXP_CPU_STATE;
2592 	cpu_reg.state_value_clear = 0xffffff;
2593 	cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
2594 	cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
2595 	cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
2596 	cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
2597 	cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
2598 	cpu_reg.spad_base = BCE_RXP_SCRATCH;
2599 	cpu_reg.mips_view_base = 0x8000000;
2600 
2601 	fw.ver_major = bce_RXP_b06FwReleaseMajor;
2602 	fw.ver_minor = bce_RXP_b06FwReleaseMinor;
2603 	fw.ver_fix = bce_RXP_b06FwReleaseFix;
2604 	fw.start_addr = bce_RXP_b06FwStartAddr;
2605 
2606 	fw.text_addr = bce_RXP_b06FwTextAddr;
2607 	fw.text_len = bce_RXP_b06FwTextLen;
2608 	fw.text_index = 0;
2609 	fw.text = bce_RXP_b06FwText;
2610 
2611 	fw.data_addr = bce_RXP_b06FwDataAddr;
2612 	fw.data_len = bce_RXP_b06FwDataLen;
2613 	fw.data_index = 0;
2614 	fw.data = bce_RXP_b06FwData;
2615 
2616 	fw.sbss_addr = bce_RXP_b06FwSbssAddr;
2617 	fw.sbss_len = bce_RXP_b06FwSbssLen;
2618 	fw.sbss_index = 0;
2619 	fw.sbss = bce_RXP_b06FwSbss;
2620 
2621 	fw.bss_addr = bce_RXP_b06FwBssAddr;
2622 	fw.bss_len = bce_RXP_b06FwBssLen;
2623 	fw.bss_index = 0;
2624 	fw.bss = bce_RXP_b06FwBss;
2625 
2626 	fw.rodata_addr = bce_RXP_b06FwRodataAddr;
2627 	fw.rodata_len = bce_RXP_b06FwRodataLen;
2628 	fw.rodata_index = 0;
2629 	fw.rodata = bce_RXP_b06FwRodata;
2630 
2631 	DBPRINT(sc, BCE_INFO_RESET, "Loading RX firmware.\n");
2632 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
2633 
2634 	/* Initialize the TX Processor. */
2635 	cpu_reg.mode = BCE_TXP_CPU_MODE;
2636 	cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT;
2637 	cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA;
2638 	cpu_reg.state = BCE_TXP_CPU_STATE;
2639 	cpu_reg.state_value_clear = 0xffffff;
2640 	cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE;
2641 	cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK;
2642 	cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER;
2643 	cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION;
2644 	cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT;
2645 	cpu_reg.spad_base = BCE_TXP_SCRATCH;
2646 	cpu_reg.mips_view_base = 0x8000000;
2647 
2648 	fw.ver_major = bce_TXP_b06FwReleaseMajor;
2649 	fw.ver_minor = bce_TXP_b06FwReleaseMinor;
2650 	fw.ver_fix = bce_TXP_b06FwReleaseFix;
2651 	fw.start_addr = bce_TXP_b06FwStartAddr;
2652 
2653 	fw.text_addr = bce_TXP_b06FwTextAddr;
2654 	fw.text_len = bce_TXP_b06FwTextLen;
2655 	fw.text_index = 0;
2656 	fw.text = bce_TXP_b06FwText;
2657 
2658 	fw.data_addr = bce_TXP_b06FwDataAddr;
2659 	fw.data_len = bce_TXP_b06FwDataLen;
2660 	fw.data_index = 0;
2661 	fw.data = bce_TXP_b06FwData;
2662 
2663 	fw.sbss_addr = bce_TXP_b06FwSbssAddr;
2664 	fw.sbss_len = bce_TXP_b06FwSbssLen;
2665 	fw.sbss_index = 0;
2666 	fw.sbss = bce_TXP_b06FwSbss;
2667 
2668 	fw.bss_addr = bce_TXP_b06FwBssAddr;
2669 	fw.bss_len = bce_TXP_b06FwBssLen;
2670 	fw.bss_index = 0;
2671 	fw.bss = bce_TXP_b06FwBss;
2672 
2673 	fw.rodata_addr = bce_TXP_b06FwRodataAddr;
2674 	fw.rodata_len = bce_TXP_b06FwRodataLen;
2675 	fw.rodata_index = 0;
2676 	fw.rodata = bce_TXP_b06FwRodata;
2677 
2678 	DBPRINT(sc, BCE_INFO_RESET, "Loading TX firmware.\n");
2679 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
2680 
2681 	/* Initialize the TX Patch-up Processor. */
2682 	cpu_reg.mode = BCE_TPAT_CPU_MODE;
2683 	cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT;
2684 	cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA;
2685 	cpu_reg.state = BCE_TPAT_CPU_STATE;
2686 	cpu_reg.state_value_clear = 0xffffff;
2687 	cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE;
2688 	cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK;
2689 	cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER;
2690 	cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION;
2691 	cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT;
2692 	cpu_reg.spad_base = BCE_TPAT_SCRATCH;
2693 	cpu_reg.mips_view_base = 0x8000000;
2694 
2695 	fw.ver_major = bce_TPAT_b06FwReleaseMajor;
2696 	fw.ver_minor = bce_TPAT_b06FwReleaseMinor;
2697 	fw.ver_fix = bce_TPAT_b06FwReleaseFix;
2698 	fw.start_addr = bce_TPAT_b06FwStartAddr;
2699 
2700 	fw.text_addr = bce_TPAT_b06FwTextAddr;
2701 	fw.text_len = bce_TPAT_b06FwTextLen;
2702 	fw.text_index = 0;
2703 	fw.text = bce_TPAT_b06FwText;
2704 
2705 	fw.data_addr = bce_TPAT_b06FwDataAddr;
2706 	fw.data_len = bce_TPAT_b06FwDataLen;
2707 	fw.data_index = 0;
2708 	fw.data = bce_TPAT_b06FwData;
2709 
2710 	fw.sbss_addr = bce_TPAT_b06FwSbssAddr;
2711 	fw.sbss_len = bce_TPAT_b06FwSbssLen;
2712 	fw.sbss_index = 0;
2713 	fw.sbss = bce_TPAT_b06FwSbss;
2714 
2715 	fw.bss_addr = bce_TPAT_b06FwBssAddr;
2716 	fw.bss_len = bce_TPAT_b06FwBssLen;
2717 	fw.bss_index = 0;
2718 	fw.bss = bce_TPAT_b06FwBss;
2719 
2720 	fw.rodata_addr = bce_TPAT_b06FwRodataAddr;
2721 	fw.rodata_len = bce_TPAT_b06FwRodataLen;
2722 	fw.rodata_index = 0;
2723 	fw.rodata = bce_TPAT_b06FwRodata;
2724 
2725 	DBPRINT(sc, BCE_INFO_RESET, "Loading TPAT firmware.\n");
2726 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
2727 
2728 	/* Initialize the Completion Processor. */
2729 	cpu_reg.mode = BCE_COM_CPU_MODE;
2730 	cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT;
2731 	cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA;
2732 	cpu_reg.state = BCE_COM_CPU_STATE;
2733 	cpu_reg.state_value_clear = 0xffffff;
2734 	cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE;
2735 	cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK;
2736 	cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER;
2737 	cpu_reg.inst = BCE_COM_CPU_INSTRUCTION;
2738 	cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT;
2739 	cpu_reg.spad_base = BCE_COM_SCRATCH;
2740 	cpu_reg.mips_view_base = 0x8000000;
2741 
2742 	fw.ver_major = bce_COM_b06FwReleaseMajor;
2743 	fw.ver_minor = bce_COM_b06FwReleaseMinor;
2744 	fw.ver_fix = bce_COM_b06FwReleaseFix;
2745 	fw.start_addr = bce_COM_b06FwStartAddr;
2746 
2747 	fw.text_addr = bce_COM_b06FwTextAddr;
2748 	fw.text_len = bce_COM_b06FwTextLen;
2749 	fw.text_index = 0;
2750 	fw.text = bce_COM_b06FwText;
2751 
2752 	fw.data_addr = bce_COM_b06FwDataAddr;
2753 	fw.data_len = bce_COM_b06FwDataLen;
2754 	fw.data_index = 0;
2755 	fw.data = bce_COM_b06FwData;
2756 
2757 	fw.sbss_addr = bce_COM_b06FwSbssAddr;
2758 	fw.sbss_len = bce_COM_b06FwSbssLen;
2759 	fw.sbss_index = 0;
2760 	fw.sbss = bce_COM_b06FwSbss;
2761 
2762 	fw.bss_addr = bce_COM_b06FwBssAddr;
2763 	fw.bss_len = bce_COM_b06FwBssLen;
2764 	fw.bss_index = 0;
2765 	fw.bss = bce_COM_b06FwBss;
2766 
2767 	fw.rodata_addr = bce_COM_b06FwRodataAddr;
2768 	fw.rodata_len = bce_COM_b06FwRodataLen;
2769 	fw.rodata_index = 0;
2770 	fw.rodata = bce_COM_b06FwRodata;
2771 
2772 	DBPRINT(sc, BCE_INFO_RESET, "Loading COM firmware.\n");
2773 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
2774 }
2775 
2776 
2777 /****************************************************************************/
2778 /* Initialize context memory.                                               */
2779 /*                                                                          */
2780 /* Clears the memory associated with each Context ID (CID).                 */
2781 /*                                                                          */
2782 /* Returns:                                                                 */
2783 /*   Nothing.                                                               */
2784 /****************************************************************************/
2785 static void
2786 bce_init_context(struct bce_softc *sc)
2787 {
2788 	uint32_t vcid;
2789 
2790 	vcid = 96;
2791 	while (vcid) {
2792 		uint32_t vcid_addr, pcid_addr, offset;
2793 
2794 		vcid--;
2795 
2796    		vcid_addr = GET_CID_ADDR(vcid);
2797 		pcid_addr = vcid_addr;
2798 
2799 		REG_WR(sc, BCE_CTX_VIRT_ADDR, 0x00);
2800 		REG_WR(sc, BCE_CTX_PAGE_TBL, pcid_addr);
2801 
2802 		/* Zero out the context. */
2803 		for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2804 			CTX_WR(sc, 0x00, offset, 0);
2805 
2806 		REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr);
2807 		REG_WR(sc, BCE_CTX_PAGE_TBL, pcid_addr);
2808 	}
2809 }
2810 
2811 
2812 /****************************************************************************/
2813 /* Fetch the permanent MAC address of the controller.                       */
2814 /*                                                                          */
2815 /* Returns:                                                                 */
2816 /*   Nothing.                                                               */
2817 /****************************************************************************/
2818 static void
2819 bce_get_mac_addr(struct bce_softc *sc)
2820 {
2821 	uint32_t mac_lo = 0, mac_hi = 0;
2822 
2823 	/*
2824 	 * The NetXtreme II bootcode populates various NIC
2825 	 * power-on and runtime configuration items in a
2826 	 * shared memory area.  The factory configured MAC
2827 	 * address is available from both NVRAM and the
2828 	 * shared memory area so we'll read the value from
2829 	 * shared memory for speed.
2830 	 */
2831 
2832 	mac_hi = REG_RD_IND(sc, sc->bce_shmem_base + BCE_PORT_HW_CFG_MAC_UPPER);
2833 	mac_lo = REG_RD_IND(sc, sc->bce_shmem_base + BCE_PORT_HW_CFG_MAC_LOWER);
2834 
2835 	if (mac_lo == 0 && mac_hi == 0) {
2836 		if_printf(&sc->arpcom.ac_if, "Invalid Ethernet address!\n");
2837 	} else {
2838 		sc->eaddr[0] = (u_char)(mac_hi >> 8);
2839 		sc->eaddr[1] = (u_char)(mac_hi >> 0);
2840 		sc->eaddr[2] = (u_char)(mac_lo >> 24);
2841 		sc->eaddr[3] = (u_char)(mac_lo >> 16);
2842 		sc->eaddr[4] = (u_char)(mac_lo >> 8);
2843 		sc->eaddr[5] = (u_char)(mac_lo >> 0);
2844 	}
2845 
2846 	DBPRINT(sc, BCE_INFO, "Permanent Ethernet address = %6D\n", sc->eaddr, ":");
2847 }
2848 
2849 
2850 /****************************************************************************/
2851 /* Program the MAC address.                                                 */
2852 /*                                                                          */
2853 /* Returns:                                                                 */
2854 /*   Nothing.                                                               */
2855 /****************************************************************************/
2856 static void
2857 bce_set_mac_addr(struct bce_softc *sc)
2858 {
2859 	const uint8_t *mac_addr = sc->eaddr;
2860 	uint32_t val;
2861 
2862 	DBPRINT(sc, BCE_INFO, "Setting Ethernet address = %6D\n",
2863 		sc->eaddr, ":");
2864 
2865 	val = (mac_addr[0] << 8) | mac_addr[1];
2866 	REG_WR(sc, BCE_EMAC_MAC_MATCH0, val);
2867 
2868 	val = (mac_addr[2] << 24) |
2869 	      (mac_addr[3] << 16) |
2870 	      (mac_addr[4] << 8) |
2871 	      mac_addr[5];
2872 	REG_WR(sc, BCE_EMAC_MAC_MATCH1, val);
2873 }
2874 
2875 
2876 /****************************************************************************/
2877 /* Stop the controller.                                                     */
2878 /*                                                                          */
2879 /* Returns:                                                                 */
2880 /*   Nothing.                                                               */
2881 /****************************************************************************/
2882 static void
2883 bce_stop(struct bce_softc *sc)
2884 {
2885 	struct ifnet *ifp = &sc->arpcom.ac_if;
2886 	struct mii_data *mii = device_get_softc(sc->bce_miibus);
2887 	struct ifmedia_entry *ifm;
2888 	int mtmp, itmp;
2889 
2890 	ASSERT_SERIALIZED(ifp->if_serializer);
2891 
2892 	callout_stop(&sc->bce_stat_ch);
2893 
2894 	/* Disable the transmit/receive blocks. */
2895 	REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, 0x5ffffff);
2896 	REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
2897 	DELAY(20);
2898 
2899 	bce_disable_intr(sc);
2900 
2901 	/* Tell firmware that the driver is going away. */
2902 	bce_reset(sc, BCE_DRV_MSG_CODE_SUSPEND_NO_WOL);
2903 
2904 	/* Free the RX lists. */
2905 	bce_free_rx_chain(sc);
2906 
2907 	/* Free TX buffers. */
2908 	bce_free_tx_chain(sc);
2909 
2910 	/*
2911 	 * Isolate/power down the PHY, but leave the media selection
2912 	 * unchanged so that things will be put back to normal when
2913 	 * we bring the interface back up.
2914 	 */
2915 	itmp = ifp->if_flags;
2916 	ifp->if_flags |= IFF_UP;
2917 	ifm = mii->mii_media.ifm_cur;
2918 	mtmp = ifm->ifm_media;
2919 	ifm->ifm_media = IFM_ETHER | IFM_NONE;
2920 	mii_mediachg(mii);
2921 	ifm->ifm_media = mtmp;
2922 	ifp->if_flags = itmp;
2923 
2924 	sc->bce_link = 0;
2925 
2926 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2927 	ifp->if_timer = 0;
2928 
2929 	bce_mgmt_init(sc);
2930 }
2931 
2932 
2933 static int
2934 bce_reset(struct bce_softc *sc, uint32_t reset_code)
2935 {
2936 	uint32_t val;
2937 	int i, rc = 0;
2938 
2939 	/* Wait for pending PCI transactions to complete. */
2940 	REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS,
2941 	       BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
2942 	       BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
2943 	       BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
2944 	       BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
2945 	val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
2946 	DELAY(5);
2947 
2948 	/* Assume bootcode is running. */
2949 	sc->bce_fw_timed_out = 0;
2950 
2951 	/* Give the firmware a chance to prepare for the reset. */
2952 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code);
2953 	if (rc) {
2954 		if_printf(&sc->arpcom.ac_if,
2955 			  "Firmware is not ready for reset\n");
2956 		return rc;
2957 	}
2958 
2959 	/* Set a firmware reminder that this is a soft reset. */
2960 	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_RESET_SIGNATURE,
2961 		   BCE_DRV_RESET_SIGNATURE_MAGIC);
2962 
2963 	/* Dummy read to force the chip to complete all current transactions. */
2964 	val = REG_RD(sc, BCE_MISC_ID);
2965 
2966 	/* Chip reset. */
2967 	val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
2968 	      BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
2969 	      BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
2970 	REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val);
2971 
2972 	/* Allow up to 30us for reset to complete. */
2973 	for (i = 0; i < 10; i++) {
2974 		val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG);
2975 		if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
2976 			    BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
2977 			break;
2978 		}
2979 		DELAY(10);
2980 	}
2981 
2982 	/* Check that reset completed successfully. */
2983 	if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
2984 		   BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
2985 		if_printf(&sc->arpcom.ac_if, "Reset failed!\n");
2986 		return EBUSY;
2987 	}
2988 
2989 	/* Make sure byte swapping is properly configured. */
2990 	val = REG_RD(sc, BCE_PCI_SWAP_DIAG0);
2991 	if (val != 0x01020304) {
2992 		if_printf(&sc->arpcom.ac_if, "Byte swap is incorrect!\n");
2993 		return ENODEV;
2994 	}
2995 
2996 	/* Just completed a reset, assume that firmware is running again. */
2997 	sc->bce_fw_timed_out = 0;
2998 
2999 	/* Wait for the firmware to finish its initialization. */
3000 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code);
3001 	if (rc) {
3002 		if_printf(&sc->arpcom.ac_if,
3003 			  "Firmware did not complete initialization!\n");
3004 	}
3005 	return rc;
3006 }
3007 
3008 
3009 static int
3010 bce_chipinit(struct bce_softc *sc)
3011 {
3012 	uint32_t val;
3013 	int rc = 0;
3014 
3015 	/* Make sure the interrupt is not active. */
3016 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
3017 
3018 	/*
3019 	 * Initialize DMA byte/word swapping, configure the number of DMA
3020 	 * channels and PCI clock compensation delay.
3021 	 */
3022 	val = BCE_DMA_CONFIG_DATA_BYTE_SWAP |
3023 	      BCE_DMA_CONFIG_DATA_WORD_SWAP |
3024 #if BYTE_ORDER == BIG_ENDIAN
3025 	      BCE_DMA_CONFIG_CNTL_BYTE_SWAP |
3026 #endif
3027 	      BCE_DMA_CONFIG_CNTL_WORD_SWAP |
3028 	      DMA_READ_CHANS << 12 |
3029 	      DMA_WRITE_CHANS << 16;
3030 
3031 	val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3032 
3033 	if ((sc->bce_flags & BCE_PCIX_FLAG) && sc->bus_speed_mhz == 133)
3034 		val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP;
3035 
3036 	/*
3037 	 * This setting resolves a problem observed on certain Intel PCI
3038 	 * chipsets that cannot handle multiple outstanding DMA operations.
3039 	 * See errata E9_5706A1_65.
3040 	 */
3041 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706 &&
3042 	    BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0 &&
3043 	    !(sc->bce_flags & BCE_PCIX_FLAG))
3044 		val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA;
3045 
3046 	REG_WR(sc, BCE_DMA_CONFIG, val);
3047 
3048 	/* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */
3049 	if (sc->bce_flags & BCE_PCIX_FLAG) {
3050 		uint16_t cmd;
3051 
3052 		cmd = pci_read_config(sc->bce_dev, BCE_PCI_PCIX_CMD, 2);
3053 		pci_write_config(sc->bce_dev, BCE_PCI_PCIX_CMD, cmd & ~0x2, 2);
3054 	}
3055 
3056 	/* Enable the RX_V2P and Context state machines before access. */
3057 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3058 	       BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3059 	       BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3060 	       BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3061 
3062 	/* Initialize context mapping and zero out the quick contexts. */
3063 	bce_init_context(sc);
3064 
3065 	/* Initialize the on-boards CPUs */
3066 	bce_init_cpus(sc);
3067 
3068 	/* Prepare NVRAM for access. */
3069 	rc = bce_init_nvram(sc);
3070 	if (rc != 0)
3071 		return rc;
3072 
3073 	/* Set the kernel bypass block size */
3074 	val = REG_RD(sc, BCE_MQ_CONFIG);
3075 	val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3076 	val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3077 	REG_WR(sc, BCE_MQ_CONFIG, val);
3078 
3079 	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3080 	REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val);
3081 	REG_WR(sc, BCE_MQ_KNL_WIND_END, val);
3082 
3083 	/* Set the page size and clear the RV2P processor stall bits. */
3084 	val = (BCM_PAGE_BITS - 8) << 24;
3085 	REG_WR(sc, BCE_RV2P_CONFIG, val);
3086 
3087 	/* Configure page size. */
3088 	val = REG_RD(sc, BCE_TBDR_CONFIG);
3089 	val &= ~BCE_TBDR_CONFIG_PAGE_SIZE;
3090 	val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3091 	REG_WR(sc, BCE_TBDR_CONFIG, val);
3092 
3093 	return 0;
3094 }
3095 
3096 
3097 /****************************************************************************/
3098 /* Initialize the controller in preparation to send/receive traffic.        */
3099 /*                                                                          */
3100 /* Returns:                                                                 */
3101 /*   0 for success, positive value for failure.                             */
3102 /****************************************************************************/
3103 static int
3104 bce_blockinit(struct bce_softc *sc)
3105 {
3106 	uint32_t reg, val;
3107 	int rc = 0;
3108 
3109 	/* Load the hardware default MAC address. */
3110 	bce_set_mac_addr(sc);
3111 
3112 	/* Set the Ethernet backoff seed value */
3113 	val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) +
3114 	      sc->eaddr[3] + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16);
3115 	REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val);
3116 
3117 	sc->last_status_idx = 0;
3118 	sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE;
3119 
3120 	/* Set up link change interrupt generation. */
3121 	REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK);
3122 
3123 	/* Program the physical address of the status block. */
3124 	REG_WR(sc, BCE_HC_STATUS_ADDR_L, BCE_ADDR_LO(sc->status_block_paddr));
3125 	REG_WR(sc, BCE_HC_STATUS_ADDR_H, BCE_ADDR_HI(sc->status_block_paddr));
3126 
3127 	/* Program the physical address of the statistics block. */
3128 	REG_WR(sc, BCE_HC_STATISTICS_ADDR_L,
3129 	       BCE_ADDR_LO(sc->stats_block_paddr));
3130 	REG_WR(sc, BCE_HC_STATISTICS_ADDR_H,
3131 	       BCE_ADDR_HI(sc->stats_block_paddr));
3132 
3133 	/* Program various host coalescing parameters. */
3134 	REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
3135 	       (sc->bce_tx_quick_cons_trip_int << 16) |
3136 	       sc->bce_tx_quick_cons_trip);
3137 	REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
3138 	       (sc->bce_rx_quick_cons_trip_int << 16) |
3139 	       sc->bce_rx_quick_cons_trip);
3140 	REG_WR(sc, BCE_HC_COMP_PROD_TRIP,
3141 	       (sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip);
3142 	REG_WR(sc, BCE_HC_TX_TICKS,
3143 	       (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
3144 	REG_WR(sc, BCE_HC_RX_TICKS,
3145 	       (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
3146 	REG_WR(sc, BCE_HC_COM_TICKS,
3147 	       (sc->bce_com_ticks_int << 16) | sc->bce_com_ticks);
3148 	REG_WR(sc, BCE_HC_CMD_TICKS,
3149 	       (sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks);
3150 	REG_WR(sc, BCE_HC_STATS_TICKS, (sc->bce_stats_ticks & 0xffff00));
3151 	REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8);	/* 3ms */
3152 	REG_WR(sc, BCE_HC_CONFIG,
3153 	       BCE_HC_CONFIG_RX_TMR_MODE |
3154 	       BCE_HC_CONFIG_TX_TMR_MODE |
3155 	       BCE_HC_CONFIG_COLLECT_STATS);
3156 
3157 	/* Clear the internal statistics counters. */
3158 	REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
3159 
3160 	/* Verify that bootcode is running. */
3161 	reg = REG_RD_IND(sc, sc->bce_shmem_base + BCE_DEV_INFO_SIGNATURE);
3162 
3163 	DBRUNIF(DB_RANDOMTRUE(bce_debug_bootcode_running_failure),
3164 		if_printf(&sc->arpcom.ac_if,
3165 			  "%s(%d): Simulating bootcode failure.\n",
3166 			  __FILE__, __LINE__);
3167 		reg = 0);
3168 
3169 	if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3170 	    BCE_DEV_INFO_SIGNATURE_MAGIC) {
3171 		if_printf(&sc->arpcom.ac_if,
3172 			  "Bootcode not running! Found: 0x%08X, "
3173 			  "Expected: 08%08X\n",
3174 			  reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK,
3175 			  BCE_DEV_INFO_SIGNATURE_MAGIC);
3176 		return ENODEV;
3177 	}
3178 
3179 	/* Check if any management firmware is running. */
3180 	reg = REG_RD_IND(sc, sc->bce_shmem_base + BCE_PORT_FEATURE);
3181 	if (reg & (BCE_PORT_FEATURE_ASF_ENABLED |
3182 		   BCE_PORT_FEATURE_IMD_ENABLED)) {
3183 		DBPRINT(sc, BCE_INFO, "Management F/W Enabled.\n");
3184 		sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
3185 	}
3186 
3187 	sc->bce_fw_ver =
3188 		REG_RD_IND(sc, sc->bce_shmem_base + BCE_DEV_INFO_BC_REV);
3189 	DBPRINT(sc, BCE_INFO, "bootcode rev = 0x%08X\n", sc->bce_fw_ver);
3190 
3191 	/* Allow bootcode to apply any additional fixes before enabling MAC. */
3192 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET);
3193 
3194 	/* Enable link state change interrupt generation. */
3195 	REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3196 
3197 	/* Enable all remaining blocks in the MAC. */
3198 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 0x5ffffff);
3199 	REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
3200 	DELAY(20);
3201 
3202 	return 0;
3203 }
3204 
3205 
3206 /****************************************************************************/
3207 /* Encapsulate an mbuf cluster into the rx_bd chain.                        */
3208 /*                                                                          */
3209 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's.     */
3210 /* This routine will map an mbuf cluster into 1 or more rx_bd's as          */
3211 /* necessary.                                                               */
3212 /*                                                                          */
3213 /* Returns:                                                                 */
3214 /*   0 for success, positive value for failure.                             */
3215 /****************************************************************************/
3216 static int
3217 bce_newbuf_std(struct bce_softc *sc, struct mbuf *m,
3218 	       uint16_t *prod, uint16_t *chain_prod, uint32_t *prod_bseq)
3219 {
3220 	bus_dmamap_t map;
3221 	struct bce_dmamap_arg ctx;
3222 	bus_dma_segment_t seg;
3223 	struct mbuf *m_new;
3224 	struct rx_bd *rxbd;
3225 	int error;
3226 #ifdef BCE_DEBUG
3227 	uint16_t debug_chain_prod = *chain_prod;
3228 #endif
3229 
3230 	/* Make sure the inputs are valid. */
3231 	DBRUNIF((*chain_prod > MAX_RX_BD),
3232 		if_printf(&sc->arpcom.ac_if, "%s(%d): "
3233 			  "RX producer out of range: 0x%04X > 0x%04X\n",
3234 			  __FILE__, __LINE__,
3235 			  *chain_prod, (uint16_t)MAX_RX_BD));
3236 
3237 	DBPRINT(sc, BCE_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = 0x%04X, "
3238 		"prod_bseq = 0x%08X\n", __func__, *prod, *chain_prod, *prod_bseq);
3239 
3240 	if (m == NULL) {
3241 		DBRUNIF(DB_RANDOMTRUE(bce_debug_mbuf_allocation_failure),
3242 			if_printf(&sc->arpcom.ac_if, "%s(%d): "
3243 				  "Simulating mbuf allocation failure.\n",
3244 				  __FILE__, __LINE__);
3245 			sc->mbuf_alloc_failed++;
3246 			return ENOBUFS);
3247 
3248 		/* This is a new mbuf allocation. */
3249 		m_new = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR);
3250 		if (m_new == NULL)
3251 			return ENOBUFS;
3252 		DBRUNIF(1, sc->rx_mbuf_alloc++);
3253 	} else {
3254 		m_new = m;
3255 		m_new->m_data = m_new->m_ext.ext_buf;
3256 	}
3257 	m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
3258 
3259 	/* Map the mbuf cluster into device memory. */
3260 	map = sc->rx_mbuf_map[*chain_prod];
3261 
3262 	ctx.bce_maxsegs = 1;
3263 	ctx.bce_segs = &seg;
3264 	error = bus_dmamap_load_mbuf(sc->rx_mbuf_tag, map, m_new,
3265 				     bce_dma_map_mbuf, &ctx, BUS_DMA_NOWAIT);
3266 	if (error || ctx.bce_maxsegs == 0) {
3267 		if_printf(&sc->arpcom.ac_if,
3268 			  "Error mapping mbuf into RX chain!\n");
3269 
3270 		if (m == NULL)
3271 			m_freem(m_new);
3272 
3273 		DBRUNIF(1, sc->rx_mbuf_alloc--);
3274 		return ENOBUFS;
3275 	}
3276 
3277 	/* Watch for overflow. */
3278 	DBRUNIF((sc->free_rx_bd > USABLE_RX_BD),
3279 		if_printf(&sc->arpcom.ac_if, "%s(%d): "
3280 			  "Too many free rx_bd (0x%04X > 0x%04X)!\n",
3281 			  __FILE__, __LINE__, sc->free_rx_bd,
3282 			  (uint16_t)USABLE_RX_BD));
3283 
3284 	/* Update some debug statistic counters */
3285 	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3286 		sc->rx_low_watermark = sc->free_rx_bd);
3287 	DBRUNIF((sc->free_rx_bd == 0), sc->rx_empty_count++);
3288 
3289 	/* Setup the rx_bd for the first segment. */
3290 	rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3291 
3292 	rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(seg.ds_addr));
3293 	rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(seg.ds_addr));
3294 	rxbd->rx_bd_len = htole32(seg.ds_len);
3295 	rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START);
3296 	*prod_bseq += seg.ds_len;
3297 
3298 	rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END);
3299 
3300 	/* Save the mbuf and update our counter. */
3301 	sc->rx_mbuf_ptr[*chain_prod] = m_new;
3302 	sc->free_rx_bd--;
3303 
3304 	DBRUN(BCE_VERBOSE_RECV,
3305 	      bce_dump_rx_mbuf_chain(sc, debug_chain_prod, 1));
3306 
3307 	DBPRINT(sc, BCE_VERBOSE_RECV, "%s(exit): prod = 0x%04X, chain_prod = 0x%04X, "
3308 		"prod_bseq = 0x%08X\n", __func__, *prod, *chain_prod, *prod_bseq);
3309 
3310 	return 0;
3311 }
3312 
3313 
3314 /****************************************************************************/
3315 /* Allocate memory and initialize the TX data structures.                   */
3316 /*                                                                          */
3317 /* Returns:                                                                 */
3318 /*   0 for success, positive value for failure.                             */
3319 /****************************************************************************/
3320 static int
3321 bce_init_tx_chain(struct bce_softc *sc)
3322 {
3323 	struct tx_bd *txbd;
3324 	uint32_t val;
3325 	int i, rc = 0;
3326 
3327 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__);
3328 
3329 	/* Set the initial TX producer/consumer indices. */
3330 	sc->tx_prod = 0;
3331 	sc->tx_cons = 0;
3332 	sc->tx_prod_bseq   = 0;
3333 	sc->used_tx_bd = 0;
3334 	sc->max_tx_bd = USABLE_TX_BD;
3335 	DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD);
3336 	DBRUNIF(1, sc->tx_full_count = 0);
3337 
3338 	/*
3339 	 * The NetXtreme II supports a linked-list structre called
3340 	 * a Buffer Descriptor Chain (or BD chain).  A BD chain
3341 	 * consists of a series of 1 or more chain pages, each of which
3342 	 * consists of a fixed number of BD entries.
3343 	 * The last BD entry on each page is a pointer to the next page
3344 	 * in the chain, and the last pointer in the BD chain
3345 	 * points back to the beginning of the chain.
3346 	 */
3347 
3348 	/* Set the TX next pointer chain entries. */
3349 	for (i = 0; i < TX_PAGES; i++) {
3350 		int j;
3351 
3352 		txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
3353 
3354 		/* Check if we've reached the last page. */
3355 		if (i == (TX_PAGES - 1))
3356 			j = 0;
3357 		else
3358 			j = i + 1;
3359 
3360 		txbd->tx_bd_haddr_hi =
3361 			htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j]));
3362 		txbd->tx_bd_haddr_lo =
3363 			htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j]));
3364 	}
3365 
3366 	for (i = 0; i < TX_PAGES; ++i) {
3367 		bus_dmamap_sync(sc->tx_bd_chain_tag, sc->tx_bd_chain_map[i],
3368 				BUS_DMASYNC_PREWRITE);
3369 	}
3370 
3371 	/* Initialize the context ID for an L2 TX chain. */
3372 	val = BCE_L2CTX_TYPE_TYPE_L2;
3373 	val |= BCE_L2CTX_TYPE_SIZE_L2;
3374 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TYPE, val);
3375 
3376 	val = BCE_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3377 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_CMD_TYPE, val);
3378 
3379 	/* Point the hardware to the first page in the chain. */
3380 	val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]);
3381 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_HI, val);
3382 	val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]);
3383 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_LO, val);
3384 
3385 	DBRUN(BCE_VERBOSE_SEND, bce_dump_tx_chain(sc, 0, TOTAL_TX_BD));
3386 
3387 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__);
3388 
3389 	return(rc);
3390 }
3391 
3392 
3393 /****************************************************************************/
3394 /* Free memory and clear the TX data structures.                            */
3395 /*                                                                          */
3396 /* Returns:                                                                 */
3397 /*   Nothing.                                                               */
3398 /****************************************************************************/
3399 static void
3400 bce_free_tx_chain(struct bce_softc *sc)
3401 {
3402 	int i;
3403 
3404 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__);
3405 
3406 	/* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
3407 	for (i = 0; i < TOTAL_TX_BD; i++) {
3408 		if (sc->tx_mbuf_ptr[i] != NULL) {
3409 			bus_dmamap_sync(sc->tx_mbuf_tag, sc->tx_mbuf_map[i],
3410 					BUS_DMASYNC_POSTWRITE);
3411 			bus_dmamap_unload(sc->tx_mbuf_tag, sc->tx_mbuf_map[i]);
3412 			m_freem(sc->tx_mbuf_ptr[i]);
3413 			sc->tx_mbuf_ptr[i] = NULL;
3414 			DBRUNIF(1, sc->tx_mbuf_alloc--);
3415 		}
3416 	}
3417 
3418 	/* Clear each TX chain page. */
3419 	for (i = 0; i < TX_PAGES; i++)
3420 		bzero(sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ);
3421 
3422 	/* Check if we lost any mbufs in the process. */
3423 	DBRUNIF((sc->tx_mbuf_alloc),
3424 		if_printf(&sc->arpcom.ac_if,
3425 			  "%s(%d): Memory leak! "
3426 			  "Lost %d mbufs from tx chain!\n",
3427 			  __FILE__, __LINE__, sc->tx_mbuf_alloc));
3428 
3429 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__);
3430 }
3431 
3432 
3433 /****************************************************************************/
3434 /* Allocate memory and initialize the RX data structures.                   */
3435 /*                                                                          */
3436 /* Returns:                                                                 */
3437 /*   0 for success, positive value for failure.                             */
3438 /****************************************************************************/
3439 static int
3440 bce_init_rx_chain(struct bce_softc *sc)
3441 {
3442 	struct rx_bd *rxbd;
3443 	int i, rc = 0;
3444 	uint16_t prod, chain_prod;
3445 	uint32_t prod_bseq, val;
3446 
3447 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__);
3448 
3449 	/* Initialize the RX producer and consumer indices. */
3450 	sc->rx_prod = 0;
3451 	sc->rx_cons = 0;
3452 	sc->rx_prod_bseq = 0;
3453 	sc->free_rx_bd = USABLE_RX_BD;
3454 	sc->max_rx_bd = USABLE_RX_BD;
3455 	DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD);
3456 	DBRUNIF(1, sc->rx_empty_count = 0);
3457 
3458 	/* Initialize the RX next pointer chain entries. */
3459 	for (i = 0; i < RX_PAGES; i++) {
3460 		int j;
3461 
3462 		rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
3463 
3464 		/* Check if we've reached the last page. */
3465 		if (i == (RX_PAGES - 1))
3466 			j = 0;
3467 		else
3468 			j = i + 1;
3469 
3470 		/* Setup the chain page pointers. */
3471 		rxbd->rx_bd_haddr_hi =
3472 			htole32(BCE_ADDR_HI(sc->rx_bd_chain_paddr[j]));
3473 		rxbd->rx_bd_haddr_lo =
3474 			htole32(BCE_ADDR_LO(sc->rx_bd_chain_paddr[j]));
3475 	}
3476 
3477 	/* Initialize the context ID for an L2 RX chain. */
3478 	val = BCE_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3479 	val |= BCE_L2CTX_CTX_TYPE_SIZE_L2;
3480 	val |= 0x02 << 8;
3481 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_CTX_TYPE, val);
3482 
3483 	/* Point the hardware to the first page in the chain. */
3484 	/* XXX shouldn't this after RX descriptor initialization? */
3485 	val = BCE_ADDR_HI(sc->rx_bd_chain_paddr[0]);
3486 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_HI, val);
3487 	val = BCE_ADDR_LO(sc->rx_bd_chain_paddr[0]);
3488 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_LO, val);
3489 
3490 	/* Allocate mbuf clusters for the rx_bd chain. */
3491 	prod = prod_bseq = 0;
3492 	while (prod < TOTAL_RX_BD) {
3493 		chain_prod = RX_CHAIN_IDX(prod);
3494 		if (bce_newbuf_std(sc, NULL, &prod, &chain_prod, &prod_bseq)) {
3495 			if_printf(&sc->arpcom.ac_if,
3496 				  "Error filling RX chain: rx_bd[0x%04X]!\n",
3497 				  chain_prod);
3498 			rc = ENOBUFS;
3499 			break;
3500 		}
3501 		prod = NEXT_RX_BD(prod);
3502 	}
3503 
3504 	/* Save the RX chain producer index. */
3505 	sc->rx_prod = prod;
3506 	sc->rx_prod_bseq = prod_bseq;
3507 
3508 	for (i = 0; i < RX_PAGES; i++) {
3509 		bus_dmamap_sync(sc->rx_bd_chain_tag, sc->rx_bd_chain_map[i],
3510 				BUS_DMASYNC_PREWRITE);
3511 	}
3512 
3513 	/* Tell the chip about the waiting rx_bd's. */
3514 	REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BDIDX, sc->rx_prod);
3515 	REG_WR(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
3516 
3517 	DBRUN(BCE_VERBOSE_RECV, bce_dump_rx_chain(sc, 0, TOTAL_RX_BD));
3518 
3519 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__);
3520 
3521 	return(rc);
3522 }
3523 
3524 
3525 /****************************************************************************/
3526 /* Free memory and clear the RX data structures.                            */
3527 /*                                                                          */
3528 /* Returns:                                                                 */
3529 /*   Nothing.                                                               */
3530 /****************************************************************************/
3531 static void
3532 bce_free_rx_chain(struct bce_softc *sc)
3533 {
3534 	int i;
3535 
3536 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__);
3537 
3538 	/* Free any mbufs still in the RX mbuf chain. */
3539 	for (i = 0; i < TOTAL_RX_BD; i++) {
3540 		if (sc->rx_mbuf_ptr[i] != NULL) {
3541 			bus_dmamap_sync(sc->rx_mbuf_tag, sc->rx_mbuf_map[i],
3542 					BUS_DMASYNC_POSTREAD);
3543 			bus_dmamap_unload(sc->rx_mbuf_tag, sc->rx_mbuf_map[i]);
3544 			m_freem(sc->rx_mbuf_ptr[i]);
3545 			sc->rx_mbuf_ptr[i] = NULL;
3546 			DBRUNIF(1, sc->rx_mbuf_alloc--);
3547 		}
3548 	}
3549 
3550 	/* Clear each RX chain page. */
3551 	for (i = 0; i < RX_PAGES; i++)
3552 		bzero(sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
3553 
3554 	/* Check if we lost any mbufs in the process. */
3555 	DBRUNIF((sc->rx_mbuf_alloc),
3556 		if_printf(&sc->arpcom.ac_if,
3557 			  "%s(%d): Memory leak! "
3558 			  "Lost %d mbufs from rx chain!\n",
3559 			  __FILE__, __LINE__, sc->rx_mbuf_alloc));
3560 
3561 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__);
3562 }
3563 
3564 
3565 /****************************************************************************/
3566 /* Set media options.                                                       */
3567 /*                                                                          */
3568 /* Returns:                                                                 */
3569 /*   0 for success, positive value for failure.                             */
3570 /****************************************************************************/
3571 static int
3572 bce_ifmedia_upd(struct ifnet *ifp)
3573 {
3574 	struct bce_softc *sc = ifp->if_softc;
3575 	struct mii_data *mii = device_get_softc(sc->bce_miibus);
3576 
3577 	/*
3578 	 * 'mii' will be NULL, when this function is called on following
3579 	 * code path: bce_attach() -> bce_mgmt_init()
3580 	 */
3581 	if (mii != NULL) {
3582 		/* Make sure the MII bus has been enumerated. */
3583 		sc->bce_link = 0;
3584 		if (mii->mii_instance) {
3585 			struct mii_softc *miisc;
3586 
3587 			LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3588 				mii_phy_reset(miisc);
3589 		}
3590 		mii_mediachg(mii);
3591 	}
3592 	return 0;
3593 }
3594 
3595 
3596 /****************************************************************************/
3597 /* Reports current media status.                                            */
3598 /*                                                                          */
3599 /* Returns:                                                                 */
3600 /*   Nothing.                                                               */
3601 /****************************************************************************/
3602 static void
3603 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3604 {
3605 	struct bce_softc *sc = ifp->if_softc;
3606 	struct mii_data *mii = device_get_softc(sc->bce_miibus);
3607 
3608 	mii_pollstat(mii);
3609 	ifmr->ifm_active = mii->mii_media_active;
3610 	ifmr->ifm_status = mii->mii_media_status;
3611 }
3612 
3613 
3614 /****************************************************************************/
3615 /* Handles PHY generated interrupt events.                                  */
3616 /*                                                                          */
3617 /* Returns:                                                                 */
3618 /*   Nothing.                                                               */
3619 /****************************************************************************/
3620 static void
3621 bce_phy_intr(struct bce_softc *sc)
3622 {
3623 	uint32_t new_link_state, old_link_state;
3624 	struct ifnet *ifp = &sc->arpcom.ac_if;
3625 
3626 	ASSERT_SERIALIZED(ifp->if_serializer);
3627 
3628 	new_link_state = sc->status_block->status_attn_bits &
3629 			 STATUS_ATTN_BITS_LINK_STATE;
3630 	old_link_state = sc->status_block->status_attn_bits_ack &
3631 			 STATUS_ATTN_BITS_LINK_STATE;
3632 
3633 	/* Handle any changes if the link state has changed. */
3634 	if (new_link_state != old_link_state) {	/* XXX redundant? */
3635 		DBRUN(BCE_VERBOSE_INTR, bce_dump_status_block(sc));
3636 
3637 		sc->bce_link = 0;
3638 		callout_stop(&sc->bce_stat_ch);
3639 		bce_tick_serialized(sc);
3640 
3641 		/* Update the status_attn_bits_ack field in the status block. */
3642 		if (new_link_state) {
3643 			REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD,
3644 			       STATUS_ATTN_BITS_LINK_STATE);
3645 			if (bootverbose)
3646 				if_printf(ifp, "Link is now UP.\n");
3647 		} else {
3648 			REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD,
3649 			       STATUS_ATTN_BITS_LINK_STATE);
3650 			if (bootverbose)
3651 				if_printf(ifp, "Link is now DOWN.\n");
3652 		}
3653 	}
3654 
3655 	/* Acknowledge the link change interrupt. */
3656 	REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE);
3657 }
3658 
3659 
3660 /****************************************************************************/
3661 /* Handles received frame interrupt events.                                 */
3662 /*                                                                          */
3663 /* Returns:                                                                 */
3664 /*   Nothing.                                                               */
3665 /****************************************************************************/
3666 static void
3667 bce_rx_intr(struct bce_softc *sc, int count)
3668 {
3669 	struct status_block *sblk = sc->status_block;
3670 	struct ifnet *ifp = &sc->arpcom.ac_if;
3671 	uint16_t hw_cons, sw_cons, sw_chain_cons, sw_prod, sw_chain_prod;
3672 	uint32_t sw_prod_bseq;
3673 	int i;
3674 
3675 	ASSERT_SERIALIZED(ifp->if_serializer);
3676 
3677 	DBRUNIF(1, sc->rx_interrupts++);
3678 
3679 	/* Prepare the RX chain pages to be accessed by the host CPU. */
3680 	for (i = 0; i < RX_PAGES; i++) {
3681 		bus_dmamap_sync(sc->rx_bd_chain_tag,
3682 				sc->rx_bd_chain_map[i], BUS_DMASYNC_POSTREAD);
3683 	}
3684 
3685 	/* Get the hardware's view of the RX consumer index. */
3686 	hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
3687 	if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
3688 		hw_cons++;
3689 
3690 	/* Get working copies of the driver's view of the RX indices. */
3691 	sw_cons = sc->rx_cons;
3692 	sw_prod = sc->rx_prod;
3693 	sw_prod_bseq = sc->rx_prod_bseq;
3694 
3695 	DBPRINT(sc, BCE_INFO_RECV, "%s(enter): sw_prod = 0x%04X, "
3696 		"sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n",
3697 		__func__, sw_prod, sw_cons, sw_prod_bseq);
3698 
3699 	/* Prevent speculative reads from getting ahead of the status block. */
3700 	bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
3701 			  BUS_SPACE_BARRIER_READ);
3702 
3703 	/* Update some debug statistics counters */
3704 	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3705 		sc->rx_low_watermark = sc->free_rx_bd);
3706 	DBRUNIF((sc->free_rx_bd == 0), sc->rx_empty_count++);
3707 
3708 	/* Scan through the receive chain as long as there is work to do. */
3709 	while (sw_cons != hw_cons) {
3710 		struct mbuf *m = NULL;
3711 		struct l2_fhdr *l2fhdr = NULL;
3712 		struct rx_bd *rxbd;
3713 		unsigned int len;
3714 		uint32_t status = 0;
3715 
3716 #ifdef foo /* DEVICE_POLLING */
3717 		/*
3718 		 * Even if polling(4) is enabled, we can't just reap
3719 		 * 'count' RX descriptors and leave.  It seems that RX
3720 		 * engine would be left in a wired state, if we broke
3721 		 * out the loop in the middle.
3722 		 */
3723 		if (count >= 0 && count-- == 0)
3724 			break;
3725 #endif
3726 
3727 		/*
3728 		 * Convert the producer/consumer indices
3729 		 * to an actual rx_bd index.
3730 		 */
3731 		sw_chain_cons = RX_CHAIN_IDX(sw_cons);
3732 		sw_chain_prod = RX_CHAIN_IDX(sw_prod);
3733 
3734 		/* Get the used rx_bd. */
3735 		rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)]
3736 				       [RX_IDX(sw_chain_cons)];
3737 		sc->free_rx_bd++;
3738 
3739 		DBRUN(BCE_VERBOSE_RECV,
3740 		      if_printf(ifp, "%s(): ", __func__);
3741 		      bce_dump_rxbd(sc, sw_chain_cons, rxbd));
3742 
3743 		/* The mbuf is stored with the last rx_bd entry of a packet. */
3744 		if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) {
3745 			/* Validate that this is the last rx_bd. */
3746 			DBRUNIF((!(rxbd->rx_bd_flags & RX_BD_FLAGS_END)),
3747 				if_printf(ifp, "%s(%d): "
3748 				"Unexpected mbuf found in rx_bd[0x%04X]!\n",
3749 				__FILE__, __LINE__, sw_chain_cons);
3750 				bce_breakpoint(sc));
3751 
3752 			/*
3753 			 * ToDo: If the received packet is small enough
3754 			 * to fit into a single, non-M_EXT mbuf,
3755 			 * allocate a new mbuf here, copy the data to
3756 			 * that mbuf, and recycle the mapped jumbo frame.
3757 			 */
3758 
3759 			/* Unmap the mbuf from DMA space. */
3760 			bus_dmamap_sync(sc->rx_mbuf_tag,
3761 					sc->rx_mbuf_map[sw_chain_cons],
3762 					BUS_DMASYNC_POSTREAD);
3763 			bus_dmamap_unload(sc->rx_mbuf_tag,
3764 					  sc->rx_mbuf_map[sw_chain_cons]);
3765 
3766 			/* Remove the mbuf from the driver's chain. */
3767 			m = sc->rx_mbuf_ptr[sw_chain_cons];
3768 			sc->rx_mbuf_ptr[sw_chain_cons] = NULL;
3769 
3770 			/*
3771 			 * Frames received on the NetXteme II are prepended
3772 			 * with an l2_fhdr structure which provides status
3773 			 * information about the received frame (including
3774 			 * VLAN tags and checksum info).  The frames are also
3775 			 * automatically adjusted to align the IP header
3776 			 * (i.e. two null bytes are inserted before the
3777 			 * Ethernet header).
3778 			 */
3779 			l2fhdr = mtod(m, struct l2_fhdr *);
3780 
3781 			len = l2fhdr->l2_fhdr_pkt_len;
3782 			status = l2fhdr->l2_fhdr_status;
3783 
3784 			DBRUNIF(DB_RANDOMTRUE(bce_debug_l2fhdr_status_check),
3785 				if_printf(ifp,
3786 				"Simulating l2_fhdr status error.\n");
3787 				status = status | L2_FHDR_ERRORS_PHY_DECODE);
3788 
3789 			/* Watch for unusual sized frames. */
3790 			DBRUNIF((len < BCE_MIN_MTU ||
3791 				 len > BCE_MAX_JUMBO_ETHER_MTU_VLAN),
3792 				if_printf(ifp,
3793 				"%s(%d): Unusual frame size found. "
3794 				"Min(%d), Actual(%d), Max(%d)\n",
3795 				__FILE__, __LINE__,
3796 				(int)BCE_MIN_MTU, len,
3797 				(int)BCE_MAX_JUMBO_ETHER_MTU_VLAN);
3798 				bce_dump_mbuf(sc, m);
3799 		 		bce_breakpoint(sc));
3800 
3801 			len -= ETHER_CRC_LEN;
3802 
3803 			/* Check the received frame for errors. */
3804 			if (status & (L2_FHDR_ERRORS_BAD_CRC |
3805 				      L2_FHDR_ERRORS_PHY_DECODE |
3806 				      L2_FHDR_ERRORS_ALIGNMENT |
3807 				      L2_FHDR_ERRORS_TOO_SHORT |
3808 				      L2_FHDR_ERRORS_GIANT_FRAME)) {
3809 				ifp->if_ierrors++;
3810 				DBRUNIF(1, sc->l2fhdr_status_errors++);
3811 
3812 				/* Reuse the mbuf for a new frame. */
3813 				if (bce_newbuf_std(sc, m, &sw_prod,
3814 						   &sw_chain_prod,
3815 						   &sw_prod_bseq)) {
3816 					DBRUNIF(1, bce_breakpoint(sc));
3817 					/* XXX */
3818 					panic("%s: Can't reuse RX mbuf!\n",
3819 					      ifp->if_xname);
3820 				}
3821 				m = NULL;
3822 				goto bce_rx_int_next_rx;
3823 			}
3824 
3825 			/*
3826 			 * Get a new mbuf for the rx_bd.   If no new
3827 			 * mbufs are available then reuse the current mbuf,
3828 			 * log an ierror on the interface, and generate
3829 			 * an error in the system log.
3830 			 */
3831 			if (bce_newbuf_std(sc, NULL, &sw_prod, &sw_chain_prod,
3832 					   &sw_prod_bseq)) {
3833 				DBRUN(BCE_WARN,
3834 				      if_printf(ifp,
3835 				      "%s(%d): Failed to allocate new mbuf, "
3836 				      "incoming frame dropped!\n",
3837 				      __FILE__, __LINE__));
3838 
3839 				ifp->if_ierrors++;
3840 
3841 				/* Try and reuse the exisitng mbuf. */
3842 				if (bce_newbuf_std(sc, m, &sw_prod,
3843 						   &sw_chain_prod,
3844 						   &sw_prod_bseq)) {
3845 					DBRUNIF(1, bce_breakpoint(sc));
3846 					/* XXX */
3847 					panic("%s: Double mbuf allocation "
3848 					      "failure!", ifp->if_xname);
3849 				}
3850 				m = NULL;
3851 				goto bce_rx_int_next_rx;
3852 			}
3853 
3854 			/*
3855 			 * Skip over the l2_fhdr when passing
3856 			 * the data up the stack.
3857 			 */
3858 			m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
3859 
3860 			m->m_pkthdr.len = m->m_len = len;
3861 			m->m_pkthdr.rcvif = ifp;
3862 
3863 			DBRUN(BCE_VERBOSE_RECV,
3864 			      struct ether_header *eh;
3865 			      eh = mtod(m, struct ether_header *);
3866 			      if_printf(ifp, "%s(): to: %6D, from: %6D, "
3867 			      		"type: 0x%04X\n", __func__,
3868 					eh->ether_dhost, ":",
3869 					eh->ether_shost, ":",
3870 					htons(eh->ether_type)));
3871 
3872 			/* Validate the checksum if offload enabled. */
3873 			if (ifp->if_capenable & IFCAP_RXCSUM) {
3874 				/* Check for an IP datagram. */
3875 				if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
3876 					m->m_pkthdr.csum_flags |=
3877 						CSUM_IP_CHECKED;
3878 
3879 					/* Check if the IP checksum is valid. */
3880 					if ((l2fhdr->l2_fhdr_ip_xsum ^
3881 					     0xffff) == 0) {
3882 						m->m_pkthdr.csum_flags |=
3883 							CSUM_IP_VALID;
3884 					} else {
3885 						DBPRINT(sc, BCE_WARN_RECV,
3886 							"%s(): Invalid IP checksum = 0x%04X!\n",
3887 							__func__, l2fhdr->l2_fhdr_ip_xsum);
3888 					}
3889 				}
3890 
3891 				/* Check for a valid TCP/UDP frame. */
3892 				if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3893 					      L2_FHDR_STATUS_UDP_DATAGRAM)) {
3894 
3895 					/* Check for a good TCP/UDP checksum. */
3896 					if ((status &
3897 					     (L2_FHDR_ERRORS_TCP_XSUM |
3898 					      L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
3899 						m->m_pkthdr.csum_data =
3900 						l2fhdr->l2_fhdr_tcp_udp_xsum;
3901 						m->m_pkthdr.csum_flags |=
3902 							CSUM_DATA_VALID |
3903 							CSUM_PSEUDO_HDR;
3904 					} else {
3905 						DBPRINT(sc, BCE_WARN_RECV,
3906 							"%s(): Invalid TCP/UDP checksum = 0x%04X!\n",
3907 							__func__, l2fhdr->l2_fhdr_tcp_udp_xsum);
3908 					}
3909 				}
3910 			}
3911 
3912 			ifp->if_ipackets++;
3913 bce_rx_int_next_rx:
3914 			sw_prod = NEXT_RX_BD(sw_prod);
3915 		}
3916 
3917 		sw_cons = NEXT_RX_BD(sw_cons);
3918 
3919 		/* If we have a packet, pass it up the stack */
3920 		if (m) {
3921 			DBPRINT(sc, BCE_VERBOSE_RECV,
3922 				"%s(): Passing received frame up.\n", __func__);
3923 
3924 			if (status & L2_FHDR_STATUS_L2_VLAN_TAG)
3925 				VLAN_INPUT_TAG(m, l2fhdr->l2_fhdr_vlan_tag);
3926 			else
3927 				ifp->if_input(ifp, m);
3928 
3929 			DBRUNIF(1, sc->rx_mbuf_alloc--);
3930 		}
3931 
3932 		/*
3933 		 * If polling(4) is not enabled, refresh hw_cons to see
3934 		 * whether there's new work.
3935 		 *
3936 		 * If polling(4) is enabled, i.e count >= 0, refreshing
3937 		 * should not be performed, so that we would not spend
3938 		 * too much time in RX processing.
3939 		 */
3940 		if (count < 0 && sw_cons == hw_cons) {
3941 			hw_cons = sc->hw_rx_cons =
3942 				sblk->status_rx_quick_consumer_index0;
3943 			if ((hw_cons & USABLE_RX_BD_PER_PAGE) ==
3944 			    USABLE_RX_BD_PER_PAGE)
3945 				hw_cons++;
3946 		}
3947 
3948 		/*
3949 		 * Prevent speculative reads from getting ahead
3950 		 * of the status block.
3951 		 */
3952 		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
3953 				  BUS_SPACE_BARRIER_READ);
3954 	}
3955 
3956 	for (i = 0; i < RX_PAGES; i++) {
3957 		bus_dmamap_sync(sc->rx_bd_chain_tag,
3958 				sc->rx_bd_chain_map[i], BUS_DMASYNC_PREWRITE);
3959 	}
3960 
3961 	sc->rx_cons = sw_cons;
3962 	sc->rx_prod = sw_prod;
3963 	sc->rx_prod_bseq = sw_prod_bseq;
3964 
3965 	REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BDIDX, sc->rx_prod);
3966 	REG_WR(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
3967 
3968 	DBPRINT(sc, BCE_INFO_RECV, "%s(exit): rx_prod = 0x%04X, "
3969 		"rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
3970 		__func__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
3971 }
3972 
3973 
3974 /****************************************************************************/
3975 /* Handles transmit completion interrupt events.                            */
3976 /*                                                                          */
3977 /* Returns:                                                                 */
3978 /*   Nothing.                                                               */
3979 /****************************************************************************/
3980 static void
3981 bce_tx_intr(struct bce_softc *sc)
3982 {
3983 	struct status_block *sblk = sc->status_block;
3984 	struct ifnet *ifp = &sc->arpcom.ac_if;
3985 	uint16_t hw_tx_cons, sw_tx_cons, sw_tx_chain_cons;
3986 
3987 	ASSERT_SERIALIZED(ifp->if_serializer);
3988 
3989 	DBRUNIF(1, sc->tx_interrupts++);
3990 
3991 	/* Get the hardware's view of the TX consumer index. */
3992 	hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
3993 
3994 	/* Skip to the next entry if this is a chain page pointer. */
3995 	if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
3996 		hw_tx_cons++;
3997 
3998 	sw_tx_cons = sc->tx_cons;
3999 
4000 	/* Prevent speculative reads from getting ahead of the status block. */
4001 	bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4002 			  BUS_SPACE_BARRIER_READ);
4003 
4004 	/* Cycle through any completed TX chain page entries. */
4005 	while (sw_tx_cons != hw_tx_cons) {
4006 #ifdef BCE_DEBUG
4007 		struct tx_bd *txbd = NULL;
4008 #endif
4009 		sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons);
4010 
4011 		DBPRINT(sc, BCE_INFO_SEND,
4012 			"%s(): hw_tx_cons = 0x%04X, sw_tx_cons = 0x%04X, "
4013 			"sw_tx_chain_cons = 0x%04X\n",
4014 			__func__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons);
4015 
4016 		DBRUNIF((sw_tx_chain_cons > MAX_TX_BD),
4017 			if_printf(ifp, "%s(%d): "
4018 				  "TX chain consumer out of range! "
4019 				  " 0x%04X > 0x%04X\n",
4020 				  __FILE__, __LINE__, sw_tx_chain_cons,
4021 				  (int)MAX_TX_BD);
4022 			bce_breakpoint(sc));
4023 
4024 		DBRUNIF(1, txbd = &sc->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)]
4025 				[TX_IDX(sw_tx_chain_cons)]);
4026 
4027 		DBRUNIF((txbd == NULL),
4028 			if_printf(ifp, "%s(%d): "
4029 				  "Unexpected NULL tx_bd[0x%04X]!\n",
4030 				  __FILE__, __LINE__, sw_tx_chain_cons);
4031 			bce_breakpoint(sc));
4032 
4033 		DBRUN(BCE_INFO_SEND,
4034 		      if_printf(ifp, "%s(): ", __func__);
4035 		      bce_dump_txbd(sc, sw_tx_chain_cons, txbd));
4036 
4037 		/*
4038 		 * Free the associated mbuf. Remember
4039 		 * that only the last tx_bd of a packet
4040 		 * has an mbuf pointer and DMA map.
4041 		 */
4042 		if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) {
4043 			/* Validate that this is the last tx_bd. */
4044 			DBRUNIF((!(txbd->tx_bd_flags & TX_BD_FLAGS_END)),
4045 				if_printf(ifp, "%s(%d): "
4046 				"tx_bd END flag not set but "
4047 				"txmbuf == NULL!\n", __FILE__, __LINE__);
4048 				bce_breakpoint(sc));
4049 
4050 			DBRUN(BCE_INFO_SEND,
4051 			      if_printf(ifp, "%s(): Unloading map/freeing mbuf "
4052 			      		"from tx_bd[0x%04X]\n", __func__,
4053 					sw_tx_chain_cons));
4054 
4055 			/* Unmap the mbuf. */
4056 			bus_dmamap_unload(sc->tx_mbuf_tag,
4057 					  sc->tx_mbuf_map[sw_tx_chain_cons]);
4058 
4059 			/* Free the mbuf. */
4060 			m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]);
4061 			sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL;
4062 			DBRUNIF(1, sc->tx_mbuf_alloc--);
4063 
4064 			ifp->if_opackets++;
4065 		}
4066 
4067 		sc->used_tx_bd--;
4068 		sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
4069 
4070 		if (sw_tx_cons == hw_tx_cons) {
4071 			/* Refresh hw_cons to see if there's new work. */
4072 			hw_tx_cons = sc->hw_tx_cons =
4073 				sblk->status_tx_quick_consumer_index0;
4074 			if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) ==
4075 			    USABLE_TX_BD_PER_PAGE)
4076 				hw_tx_cons++;
4077 		}
4078 
4079 		/*
4080 		 * Prevent speculative reads from getting
4081 		 * ahead of the status block.
4082 		 */
4083 		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4084 				  BUS_SPACE_BARRIER_READ);
4085 	}
4086 
4087 	if (sc->used_tx_bd == 0) {
4088 		/* Clear the TX timeout timer. */
4089 		ifp->if_timer = 0;
4090 	}
4091 
4092 	/* Clear the tx hardware queue full flag. */
4093 	if (sc->max_tx_bd - sc->used_tx_bd >= BCE_TX_SPARE_SPACE) {
4094 		DBRUNIF((ifp->if_flags & IFF_OACTIVE),
4095 			DBPRINT(sc, BCE_WARN_SEND,
4096 				"%s(): Open TX chain! %d/%d (used/total)\n",
4097 				__func__, sc->used_tx_bd, sc->max_tx_bd));
4098 		ifp->if_flags &= ~IFF_OACTIVE;
4099 	}
4100 	sc->tx_cons = sw_tx_cons;
4101 }
4102 
4103 
4104 /****************************************************************************/
4105 /* Disables interrupt generation.                                           */
4106 /*                                                                          */
4107 /* Returns:                                                                 */
4108 /*   Nothing.                                                               */
4109 /****************************************************************************/
4110 static void
4111 bce_disable_intr(struct bce_softc *sc)
4112 {
4113 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
4114 	REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
4115 	lwkt_serialize_handler_disable(sc->arpcom.ac_if.if_serializer);
4116 }
4117 
4118 
4119 /****************************************************************************/
4120 /* Enables interrupt generation.                                            */
4121 /*                                                                          */
4122 /* Returns:                                                                 */
4123 /*   Nothing.                                                               */
4124 /****************************************************************************/
4125 static void
4126 bce_enable_intr(struct bce_softc *sc)
4127 {
4128 	uint32_t val;
4129 
4130 	lwkt_serialize_handler_enable(sc->arpcom.ac_if.if_serializer);
4131 
4132 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4133 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
4134 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
4135 
4136 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4137 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
4138 
4139 	val = REG_RD(sc, BCE_HC_COMMAND);
4140 	REG_WR(sc, BCE_HC_COMMAND, val | BCE_HC_COMMAND_COAL_NOW);
4141 }
4142 
4143 
4144 /****************************************************************************/
4145 /* Handles controller initialization.                                       */
4146 /*                                                                          */
4147 /* Returns:                                                                 */
4148 /*   Nothing.                                                               */
4149 /****************************************************************************/
4150 static void
4151 bce_init(void *xsc)
4152 {
4153 	struct bce_softc *sc = xsc;
4154 	struct ifnet *ifp = &sc->arpcom.ac_if;
4155 	uint32_t ether_mtu;
4156 	int error;
4157 
4158 	ASSERT_SERIALIZED(ifp->if_serializer);
4159 
4160 	/* Check if the driver is still running and bail out if it is. */
4161 	if (ifp->if_flags & IFF_RUNNING)
4162 		return;
4163 
4164 	bce_stop(sc);
4165 
4166 	error = bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
4167 	if (error) {
4168 		if_printf(ifp, "Controller reset failed!\n");
4169 		goto back;
4170 	}
4171 
4172 	error = bce_chipinit(sc);
4173 	if (error) {
4174 		if_printf(ifp, "Controller initialization failed!\n");
4175 		goto back;
4176 	}
4177 
4178 	error = bce_blockinit(sc);
4179 	if (error) {
4180 		if_printf(ifp, "Block initialization failed!\n");
4181 		goto back;
4182 	}
4183 
4184 	/* Load our MAC address. */
4185 	bcopy(IF_LLADDR(ifp), sc->eaddr, ETHER_ADDR_LEN);
4186 	bce_set_mac_addr(sc);
4187 
4188 	/* Calculate and program the Ethernet MTU size. */
4189 	ether_mtu = ETHER_HDR_LEN + EVL_ENCAPLEN + ifp->if_mtu + ETHER_CRC_LEN;
4190 
4191 	DBPRINT(sc, BCE_INFO, "%s(): setting mtu = %d\n", __func__, ether_mtu);
4192 
4193 	/*
4194 	 * Program the mtu, enabling jumbo frame
4195 	 * support if necessary.  Also set the mbuf
4196 	 * allocation count for RX frames.
4197 	 */
4198 	if (ether_mtu > ETHER_MAX_LEN + EVL_ENCAPLEN) {
4199 #ifdef notyet
4200 		REG_WR(sc, BCE_EMAC_RX_MTU_SIZE,
4201 		       min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) |
4202 		       BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA);
4203 		sc->mbuf_alloc_size = MJUM9BYTES;
4204 #else
4205 		panic("jumbo buffer is not supported yet\n");
4206 #endif
4207 	} else {
4208 		REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu);
4209 		sc->mbuf_alloc_size = MCLBYTES;
4210 	}
4211 
4212 	/* Calculate the RX Ethernet frame size for rx_bd's. */
4213 	sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8;
4214 
4215 	DBPRINT(sc, BCE_INFO,
4216 		"%s(): mclbytes = %d, mbuf_alloc_size = %d, "
4217 		"max_frame_size = %d\n",
4218 		__func__, (int)MCLBYTES, sc->mbuf_alloc_size,
4219 		sc->max_frame_size);
4220 
4221 	/* Program appropriate promiscuous/multicast filtering. */
4222 	bce_set_rx_mode(sc);
4223 
4224 	/* Init RX buffer descriptor chain. */
4225 	bce_init_rx_chain(sc);	/* XXX return value */
4226 
4227 	/* Init TX buffer descriptor chain. */
4228 	bce_init_tx_chain(sc);	/* XXX return value */
4229 
4230 #ifdef DEVICE_POLLING
4231 	/* Disable interrupts if we are polling. */
4232 	if (ifp->if_flags & IFF_POLLING) {
4233 		bce_disable_intr(sc);
4234 
4235 		REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4236 		       (1 << 16) | sc->bce_rx_quick_cons_trip);
4237 		REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4238 		       (1 << 16) | sc->bce_tx_quick_cons_trip);
4239 	} else
4240 #endif
4241 	/* Enable host interrupts. */
4242 	bce_enable_intr(sc);
4243 
4244 	bce_ifmedia_upd(ifp);
4245 
4246 	ifp->if_flags |= IFF_RUNNING;
4247 	ifp->if_flags &= ~IFF_OACTIVE;
4248 
4249 	callout_reset(&sc->bce_stat_ch, hz, bce_tick, sc);
4250 back:
4251 	if (error)
4252 		bce_stop(sc);
4253 }
4254 
4255 
4256 /****************************************************************************/
4257 /* Initialize the controller just enough so that any management firmware    */
4258 /* running on the device will continue to operate corectly.                 */
4259 /*                                                                          */
4260 /* Returns:                                                                 */
4261 /*   Nothing.                                                               */
4262 /****************************************************************************/
4263 static void
4264 bce_mgmt_init(struct bce_softc *sc)
4265 {
4266 	struct ifnet *ifp = &sc->arpcom.ac_if;
4267 	uint32_t val;
4268 
4269 	/* Check if the driver is still running and bail out if it is. */
4270 	if (ifp->if_flags & IFF_RUNNING)
4271 		return;
4272 
4273 	/* Initialize the on-boards CPUs */
4274 	bce_init_cpus(sc);
4275 
4276 	/* Set the page size and clear the RV2P processor stall bits. */
4277 	val = (BCM_PAGE_BITS - 8) << 24;
4278 	REG_WR(sc, BCE_RV2P_CONFIG, val);
4279 
4280 	/* Enable all critical blocks in the MAC. */
4281 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
4282 	       BCE_MISC_ENABLE_SET_BITS_RX_V2P_ENABLE |
4283 	       BCE_MISC_ENABLE_SET_BITS_RX_DMA_ENABLE |
4284 	       BCE_MISC_ENABLE_SET_BITS_COMPLETION_ENABLE);
4285 	REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
4286 	DELAY(20);
4287 
4288 	bce_ifmedia_upd(ifp);
4289 }
4290 
4291 
4292 /****************************************************************************/
4293 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
4294 /* memory visible to the controller.                                        */
4295 /*                                                                          */
4296 /* Returns:                                                                 */
4297 /*   0 for success, positive value for failure.                             */
4298 /****************************************************************************/
4299 static int
4300 bce_encap(struct bce_softc *sc, struct mbuf **m_head)
4301 {
4302 	struct bce_dmamap_arg ctx;
4303 	bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
4304 	bus_dmamap_t map, tmp_map;
4305 	struct mbuf *m0 = *m_head;
4306 	struct tx_bd *txbd = NULL;
4307 	uint16_t vlan_tag = 0, flags = 0;
4308 	uint16_t chain_prod, chain_prod_start, prod;
4309 	uint32_t prod_bseq;
4310 	int i, error, maxsegs;
4311 #ifdef BCE_DEBUG
4312 	uint16_t debug_prod;
4313 #endif
4314 
4315 	/* Transfer any checksum offload flags to the bd. */
4316 	if (m0->m_pkthdr.csum_flags) {
4317 		if (m0->m_pkthdr.csum_flags & CSUM_IP)
4318 			flags |= TX_BD_FLAGS_IP_CKSUM;
4319 		if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
4320 			flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4321 	}
4322 
4323 	/* Transfer any VLAN tags to the bd. */
4324 	if (m0->m_flags & M_VLANTAG) {
4325 		flags |= TX_BD_FLAGS_VLAN_TAG;
4326 		vlan_tag = m0->m_pkthdr.ether_vlantag;
4327 	}
4328 
4329 	prod = sc->tx_prod;
4330 	chain_prod_start = chain_prod = TX_CHAIN_IDX(prod);
4331 
4332 	/* Map the mbuf into DMAable memory. */
4333 	map = sc->tx_mbuf_map[chain_prod_start];
4334 
4335 	maxsegs = sc->max_tx_bd - sc->used_tx_bd;
4336 	KASSERT(maxsegs >= BCE_TX_SPARE_SPACE,
4337 		("not enough segements %d\n", maxsegs));
4338 	if (maxsegs > BCE_MAX_SEGMENTS)
4339 		maxsegs = BCE_MAX_SEGMENTS;
4340 
4341 	/* Map the mbuf into our DMA address space. */
4342 	ctx.bce_maxsegs = maxsegs;
4343 	ctx.bce_segs = segs;
4344 	error = bus_dmamap_load_mbuf(sc->tx_mbuf_tag, map, m0,
4345 				     bce_dma_map_mbuf, &ctx, BUS_DMA_NOWAIT);
4346 	if (error == EFBIG || ctx.bce_maxsegs == 0) {
4347 		DBPRINT(sc, BCE_WARN, "%s(): fragmented mbuf\n", __func__);
4348 		DBRUNIF(1, bce_dump_mbuf(sc, m0););
4349 
4350 		m0 = m_defrag(*m_head, MB_DONTWAIT);
4351 		if (m0 == NULL) {
4352 			error = ENOBUFS;
4353 			goto back;
4354 		}
4355 		*m_head = m0;
4356 
4357 		ctx.bce_maxsegs = maxsegs;
4358 		ctx.bce_segs = segs;
4359 		error = bus_dmamap_load_mbuf(sc->tx_mbuf_tag, map, m0,
4360 					     bce_dma_map_mbuf, &ctx,
4361 					     BUS_DMA_NOWAIT);
4362 		if (error || ctx.bce_maxsegs == 0) {
4363 			if_printf(&sc->arpcom.ac_if,
4364 				  "Error mapping mbuf into TX chain\n");
4365 			if (error == 0)
4366 				error = EFBIG;
4367 			goto back;
4368 		}
4369 	} else if (error) {
4370 		if_printf(&sc->arpcom.ac_if,
4371 			  "Error mapping mbuf into TX chain\n");
4372 		goto back;
4373 	}
4374 
4375 	/* prod points to an empty tx_bd at this point. */
4376 	prod_bseq  = sc->tx_prod_bseq;
4377 
4378 #ifdef BCE_DEBUG
4379 	debug_prod = chain_prod;
4380 #endif
4381 
4382 	DBPRINT(sc, BCE_INFO_SEND,
4383 		"%s(): Start: prod = 0x%04X, chain_prod = %04X, "
4384 		"prod_bseq = 0x%08X\n",
4385 		__func__, prod, chain_prod, prod_bseq);
4386 
4387 	/*
4388 	 * Cycle through each mbuf segment that makes up
4389 	 * the outgoing frame, gathering the mapping info
4390 	 * for that segment and creating a tx_bd to for
4391 	 * the mbuf.
4392 	 */
4393 	for (i = 0; i < ctx.bce_maxsegs; i++) {
4394 		chain_prod = TX_CHAIN_IDX(prod);
4395 		txbd= &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
4396 
4397 		txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr));
4398 		txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr));
4399 		txbd->tx_bd_mss_nbytes = htole16(segs[i].ds_len);
4400 		txbd->tx_bd_vlan_tag = htole16(vlan_tag);
4401 		txbd->tx_bd_flags = htole16(flags);
4402 		prod_bseq += segs[i].ds_len;
4403 		if (i == 0)
4404 			txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START);
4405 		prod = NEXT_TX_BD(prod);
4406 	}
4407 
4408 	/* Set the END flag on the last TX buffer descriptor. */
4409 	txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END);
4410 
4411 	DBRUN(BCE_EXCESSIVE_SEND,
4412 	      bce_dump_tx_chain(sc, debug_prod, ctx.bce_maxsegs));
4413 
4414 	DBPRINT(sc, BCE_INFO_SEND,
4415 		"%s(): End: prod = 0x%04X, chain_prod = %04X, "
4416 		"prod_bseq = 0x%08X\n",
4417 		__func__, prod, chain_prod, prod_bseq);
4418 
4419 	bus_dmamap_sync(sc->tx_mbuf_tag, map, BUS_DMASYNC_PREWRITE);
4420 
4421 	/*
4422 	 * Ensure that the mbuf pointer for this transmission
4423 	 * is placed at the array index of the last
4424 	 * descriptor in this chain.  This is done
4425 	 * because a single map is used for all
4426 	 * segments of the mbuf and we don't want to
4427 	 * unload the map before all of the segments
4428 	 * have been freed.
4429 	 */
4430 	sc->tx_mbuf_ptr[chain_prod] = m0;
4431 
4432 	tmp_map = sc->tx_mbuf_map[chain_prod];
4433 	sc->tx_mbuf_map[chain_prod] = map;
4434 	sc->tx_mbuf_map[chain_prod_start] = tmp_map;
4435 
4436 	sc->used_tx_bd += ctx.bce_maxsegs;
4437 
4438 	/* Update some debug statistic counters */
4439 	DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark),
4440 		sc->tx_hi_watermark = sc->used_tx_bd);
4441 	DBRUNIF((sc->used_tx_bd == sc->max_tx_bd), sc->tx_full_count++);
4442 	DBRUNIF(1, sc->tx_mbuf_alloc++);
4443 
4444 	DBRUN(BCE_VERBOSE_SEND,
4445 	      bce_dump_tx_mbuf_chain(sc, chain_prod, ctx.bce_maxsegs));
4446 
4447 	/* prod points to the next free tx_bd at this point. */
4448 	sc->tx_prod = prod;
4449 	sc->tx_prod_bseq = prod_bseq;
4450 back:
4451 	if (error) {
4452 		m_freem(*m_head);
4453 		*m_head = NULL;
4454 	}
4455 	return error;
4456 }
4457 
4458 
4459 /****************************************************************************/
4460 /* Main transmit routine when called from another routine with a lock.      */
4461 /*                                                                          */
4462 /* Returns:                                                                 */
4463 /*   Nothing.                                                               */
4464 /****************************************************************************/
4465 static void
4466 bce_start(struct ifnet *ifp)
4467 {
4468 	struct bce_softc *sc = ifp->if_softc;
4469 	int count = 0;
4470 
4471 	ASSERT_SERIALIZED(ifp->if_serializer);
4472 
4473 	/* If there's no link or the transmit queue is empty then just exit. */
4474 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING ||
4475 	    !sc->bce_link)
4476 		return;
4477 
4478 	DBPRINT(sc, BCE_INFO_SEND,
4479 		"%s(): Start: tx_prod = 0x%04X, tx_chain_prod = %04X, "
4480 		"tx_prod_bseq = 0x%08X\n",
4481 		__func__,
4482 		sc->tx_prod, TX_CHAIN_IDX(sc->tx_prod), sc->tx_prod_bseq);
4483 
4484 	for (;;) {
4485 		struct mbuf *m_head;
4486 
4487 		/*
4488 		 * We keep BCE_TX_SPARE_SPACE entries, so bce_encap() is
4489 		 * unlikely to fail.
4490 		 */
4491 		if (sc->max_tx_bd - sc->used_tx_bd < BCE_TX_SPARE_SPACE) {
4492 			ifp->if_flags |= IFF_OACTIVE;
4493 			break;
4494 		}
4495 
4496 		/* Check for any frames to send. */
4497 		m_head = ifq_dequeue(&ifp->if_snd, NULL);
4498 		if (m_head == NULL)
4499 			break;
4500 
4501 		/*
4502 		 * Pack the data into the transmit ring. If we
4503 		 * don't have room, place the mbuf back at the
4504 		 * head of the queue and set the OACTIVE flag
4505 		 * to wait for the NIC to drain the chain.
4506 		 */
4507 		if (bce_encap(sc, &m_head)) {
4508 			ifp->if_flags |= IFF_OACTIVE;
4509 			DBPRINT(sc, BCE_INFO_SEND,
4510 				"TX chain is closed for business! "
4511 				"Total tx_bd used = %d\n",
4512 				sc->used_tx_bd);
4513 			break;
4514 		}
4515 
4516 		count++;
4517 
4518 		/* Send a copy of the frame to any BPF listeners. */
4519 		ETHER_BPF_MTAP(ifp, m_head);
4520 	}
4521 
4522 	if (count == 0) {
4523 		/* no packets were dequeued */
4524 		DBPRINT(sc, BCE_VERBOSE_SEND,
4525 			"%s(): No packets were dequeued\n", __func__);
4526 		return;
4527 	}
4528 
4529 	DBPRINT(sc, BCE_INFO_SEND,
4530 		"%s(): End: tx_prod = 0x%04X, tx_chain_prod = 0x%04X, "
4531 		"tx_prod_bseq = 0x%08X\n",
4532 		__func__,
4533 		sc->tx_prod, TX_CHAIN_IDX(sc->tx_prod), sc->tx_prod_bseq);
4534 
4535 	/* Start the transmit. */
4536 	REG_WR16(sc, MB_TX_CID_ADDR + BCE_L2CTX_TX_HOST_BIDX, sc->tx_prod);
4537 	REG_WR(sc, MB_TX_CID_ADDR + BCE_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq);
4538 
4539 	/* Set the tx timeout. */
4540 	ifp->if_timer = BCE_TX_TIMEOUT;
4541 }
4542 
4543 
4544 /****************************************************************************/
4545 /* Handles any IOCTL calls from the operating system.                       */
4546 /*                                                                          */
4547 /* Returns:                                                                 */
4548 /*   0 for success, positive value for failure.                             */
4549 /****************************************************************************/
4550 static int
4551 bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
4552 {
4553 	struct bce_softc *sc = ifp->if_softc;
4554 	struct ifreq *ifr = (struct ifreq *)data;
4555 	struct mii_data *mii;
4556 	int mask, error = 0;
4557 
4558 	ASSERT_SERIALIZED(ifp->if_serializer);
4559 
4560 	switch(command) {
4561 	case SIOCSIFMTU:
4562 		/* Check that the MTU setting is supported. */
4563 		if (ifr->ifr_mtu < BCE_MIN_MTU ||
4564 #ifdef notyet
4565 		    ifr->ifr_mtu > BCE_MAX_JUMBO_MTU
4566 #else
4567 		    ifr->ifr_mtu > ETHERMTU
4568 #endif
4569 		   ) {
4570 			error = EINVAL;
4571 			break;
4572 		}
4573 
4574 		DBPRINT(sc, BCE_INFO, "Setting new MTU of %d\n", ifr->ifr_mtu);
4575 
4576 		ifp->if_mtu = ifr->ifr_mtu;
4577 		ifp->if_flags &= ~IFF_RUNNING;	/* Force reinitialize */
4578 		bce_init(sc);
4579 		break;
4580 
4581 	case SIOCSIFFLAGS:
4582 		if (ifp->if_flags & IFF_UP) {
4583 			if (ifp->if_flags & IFF_RUNNING) {
4584 				mask = ifp->if_flags ^ sc->bce_if_flags;
4585 
4586 				if (mask & (IFF_PROMISC | IFF_ALLMULTI))
4587 					bce_set_rx_mode(sc);
4588 			} else {
4589 				bce_init(sc);
4590 			}
4591 		} else if (ifp->if_flags & IFF_RUNNING) {
4592 			bce_stop(sc);
4593 		}
4594 		sc->bce_if_flags = ifp->if_flags;
4595 		break;
4596 
4597 	case SIOCADDMULTI:
4598 	case SIOCDELMULTI:
4599 		if (ifp->if_flags & IFF_RUNNING)
4600 			bce_set_rx_mode(sc);
4601 		break;
4602 
4603 	case SIOCSIFMEDIA:
4604 	case SIOCGIFMEDIA:
4605 		DBPRINT(sc, BCE_VERBOSE, "bce_phy_flags = 0x%08X\n",
4606 			sc->bce_phy_flags);
4607 		DBPRINT(sc, BCE_VERBOSE, "Copper media set/get\n");
4608 
4609 		mii = device_get_softc(sc->bce_miibus);
4610 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
4611 		break;
4612 
4613 	case SIOCSIFCAP:
4614 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4615 		DBPRINT(sc, BCE_INFO, "Received SIOCSIFCAP = 0x%08X\n",
4616 			(uint32_t) mask);
4617 
4618 		if (mask & IFCAP_HWCSUM) {
4619 			ifp->if_capenable ^= IFCAP_HWCSUM;
4620 			if (IFCAP_HWCSUM & ifp->if_capenable)
4621 				ifp->if_hwassist = BCE_IF_HWASSIST;
4622 			else
4623 				ifp->if_hwassist = 0;
4624 		}
4625 		break;
4626 
4627 	default:
4628 		error = ether_ioctl(ifp, command, data);
4629 		break;
4630 	}
4631 	return error;
4632 }
4633 
4634 
4635 /****************************************************************************/
4636 /* Transmit timeout handler.                                                */
4637 /*                                                                          */
4638 /* Returns:                                                                 */
4639 /*   Nothing.                                                               */
4640 /****************************************************************************/
4641 static void
4642 bce_watchdog(struct ifnet *ifp)
4643 {
4644 	struct bce_softc *sc = ifp->if_softc;
4645 
4646 	ASSERT_SERIALIZED(ifp->if_serializer);
4647 
4648 	DBRUN(BCE_VERBOSE_SEND,
4649 	      bce_dump_driver_state(sc);
4650 	      bce_dump_status_block(sc));
4651 
4652 	/*
4653 	 * If we are in this routine because of pause frames, then
4654 	 * don't reset the hardware.
4655 	 */
4656 	if (REG_RD(sc, BCE_EMAC_TX_STATUS) & BCE_EMAC_TX_STATUS_XOFFED)
4657 		return;
4658 
4659 	if_printf(ifp, "Watchdog timeout occurred, resetting!\n");
4660 
4661 	/* DBRUN(BCE_FATAL, bce_breakpoint(sc)); */
4662 
4663 	ifp->if_flags &= ~IFF_RUNNING;	/* Force reinitialize */
4664 	bce_init(sc);
4665 
4666 	ifp->if_oerrors++;
4667 
4668 	if (!ifq_is_empty(&ifp->if_snd))
4669 		ifp->if_start(ifp);
4670 }
4671 
4672 
4673 #ifdef DEVICE_POLLING
4674 
4675 static void
4676 bce_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
4677 {
4678 	struct bce_softc *sc = ifp->if_softc;
4679 	struct status_block *sblk = sc->status_block;
4680 
4681 	ASSERT_SERIALIZED(ifp->if_serializer);
4682 
4683 	switch (cmd) {
4684 	case POLL_REGISTER:
4685 		bce_disable_intr(sc);
4686 
4687 		REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4688 		       (1 << 16) | sc->bce_rx_quick_cons_trip);
4689 		REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4690 		       (1 << 16) | sc->bce_tx_quick_cons_trip);
4691 		return;
4692 	case POLL_DEREGISTER:
4693 		bce_enable_intr(sc);
4694 
4695 		REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4696 		       (sc->bce_tx_quick_cons_trip_int << 16) |
4697 		       sc->bce_tx_quick_cons_trip);
4698 		REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4699 		       (sc->bce_rx_quick_cons_trip_int << 16) |
4700 		       sc->bce_rx_quick_cons_trip);
4701 		return;
4702 	default:
4703 		break;
4704 	}
4705 
4706 	bus_dmamap_sync(sc->status_tag, sc->status_map, BUS_DMASYNC_POSTREAD);
4707 
4708 	if (cmd == POLL_AND_CHECK_STATUS) {
4709 		uint32_t status_attn_bits;
4710 
4711 		status_attn_bits = sblk->status_attn_bits;
4712 
4713 		DBRUNIF(DB_RANDOMTRUE(bce_debug_unexpected_attention),
4714 			if_printf(ifp,
4715 			"Simulating unexpected status attention bit set.");
4716 			status_attn_bits |= STATUS_ATTN_BITS_PARITY_ERROR);
4717 
4718 		/* Was it a link change interrupt? */
4719 		if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
4720 		    (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE))
4721 			bce_phy_intr(sc);
4722 
4723 		/*
4724 		 * If any other attention is asserted then
4725 		 * the chip is toast.
4726 		 */
4727 		if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
4728 		     (sblk->status_attn_bits_ack &
4729 		      ~STATUS_ATTN_BITS_LINK_STATE)) {
4730 			DBRUN(1, sc->unexpected_attentions++);
4731 
4732 			if_printf(ifp, "Fatal attention detected: 0x%08X\n",
4733 				  sblk->status_attn_bits);
4734 
4735 			DBRUN(BCE_FATAL,
4736 			if (bce_debug_unexpected_attention == 0)
4737 				bce_breakpoint(sc));
4738 
4739 			bce_init(sc);
4740 			return;
4741 		}
4742 	}
4743 
4744 	/* Check for any completed RX frames. */
4745 	if (sblk->status_rx_quick_consumer_index0 != sc->hw_rx_cons)
4746 		bce_rx_intr(sc, count);
4747 
4748 	/* Check for any completed TX frames. */
4749 	if (sblk->status_tx_quick_consumer_index0 != sc->hw_tx_cons)
4750 		bce_tx_intr(sc);
4751 
4752 	bus_dmamap_sync(sc->status_tag,	sc->status_map, BUS_DMASYNC_PREWRITE);
4753 
4754 	/* Check for new frames to transmit. */
4755 	if (!ifq_is_empty(&ifp->if_snd))
4756 		ifp->if_start(ifp);
4757 }
4758 
4759 #endif	/* DEVICE_POLLING */
4760 
4761 
4762 #if 0
4763 static inline int
4764 bce_has_work(struct bce_softc *sc)
4765 {
4766 	struct status_block *stat = sc->status_block;
4767 
4768 	if ((stat->status_rx_quick_consumer_index0 != sc->hw_rx_cons) ||
4769 	    (stat->status_tx_quick_consumer_index0 != sc->hw_tx_cons))
4770 		return 1;
4771 
4772 	if (((stat->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
4773 	    bp->link_up)
4774 		return 1;
4775 
4776 	return 0;
4777 }
4778 #endif
4779 
4780 
4781 /*
4782  * Interrupt handler.
4783  */
4784 /****************************************************************************/
4785 /* Main interrupt entry point.  Verifies that the controller generated the  */
4786 /* interrupt and then calls a separate routine for handle the various       */
4787 /* interrupt causes (PHY, TX, RX).                                          */
4788 /*                                                                          */
4789 /* Returns:                                                                 */
4790 /*   0 for success, positive value for failure.                             */
4791 /****************************************************************************/
4792 static void
4793 bce_intr(void *xsc)
4794 {
4795 	struct bce_softc *sc = xsc;
4796 	struct ifnet *ifp = &sc->arpcom.ac_if;
4797 	struct status_block *sblk;
4798 
4799 	ASSERT_SERIALIZED(ifp->if_serializer);
4800 
4801 	DBPRINT(sc, BCE_EXCESSIVE, "Entering %s()\n", __func__);
4802 	DBRUNIF(1, sc->interrupts_generated++);
4803 
4804 	bus_dmamap_sync(sc->status_tag, sc->status_map, BUS_DMASYNC_POSTREAD);
4805 	sblk = sc->status_block;
4806 
4807 	/*
4808 	 * If the hardware status block index matches the last value
4809 	 * read by the driver and we haven't asserted our interrupt
4810 	 * then there's nothing to do.
4811 	 */
4812 	if (sblk->status_idx == sc->last_status_idx &&
4813 	    (REG_RD(sc, BCE_PCICFG_MISC_STATUS) &
4814 	     BCE_PCICFG_MISC_STATUS_INTA_VALUE))
4815 		return;
4816 
4817 	/* Ack the interrupt and stop others from occuring. */
4818 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4819 	       BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
4820 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT);
4821 
4822 	/* Keep processing data as long as there is work to do. */
4823 	for (;;) {
4824 		uint32_t status_attn_bits;
4825 
4826 		status_attn_bits = sblk->status_attn_bits;
4827 
4828 		DBRUNIF(DB_RANDOMTRUE(bce_debug_unexpected_attention),
4829 			if_printf(ifp,
4830 			"Simulating unexpected status attention bit set.");
4831 			status_attn_bits |= STATUS_ATTN_BITS_PARITY_ERROR);
4832 
4833 		/* Was it a link change interrupt? */
4834 		if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
4835 		    (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE))
4836 			bce_phy_intr(sc);
4837 
4838 		/*
4839 		 * If any other attention is asserted then
4840 		 * the chip is toast.
4841 		 */
4842 		if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
4843 		     (sblk->status_attn_bits_ack &
4844 		      ~STATUS_ATTN_BITS_LINK_STATE)) {
4845 			DBRUN(1, sc->unexpected_attentions++);
4846 
4847 			if_printf(ifp, "Fatal attention detected: 0x%08X\n",
4848 				  sblk->status_attn_bits);
4849 
4850 			DBRUN(BCE_FATAL,
4851 			if (bce_debug_unexpected_attention == 0)
4852 				bce_breakpoint(sc));
4853 
4854 			bce_init(sc);
4855 			return;
4856 		}
4857 
4858 		/* Check for any completed RX frames. */
4859 		if (sblk->status_rx_quick_consumer_index0 != sc->hw_rx_cons)
4860 			bce_rx_intr(sc, -1);
4861 
4862 		/* Check for any completed TX frames. */
4863 		if (sblk->status_tx_quick_consumer_index0 != sc->hw_tx_cons)
4864 			bce_tx_intr(sc);
4865 
4866 		/*
4867 		 * Save the status block index value
4868 		 * for use during the next interrupt.
4869 		 */
4870 		sc->last_status_idx = sblk->status_idx;
4871 
4872 		/*
4873 		 * Prevent speculative reads from getting
4874 		 * ahead of the status block.
4875 		 */
4876 		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4877 				  BUS_SPACE_BARRIER_READ);
4878 
4879 		/*
4880 		 * If there's no work left then exit the
4881 		 * interrupt service routine.
4882 		 */
4883 		if (sblk->status_rx_quick_consumer_index0 == sc->hw_rx_cons &&
4884 		    sblk->status_tx_quick_consumer_index0 == sc->hw_tx_cons)
4885 			break;
4886 	}
4887 
4888 	bus_dmamap_sync(sc->status_tag,	sc->status_map, BUS_DMASYNC_PREWRITE);
4889 
4890 	/* Re-enable interrupts. */
4891 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4892 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx |
4893 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT);
4894 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4895 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
4896 
4897 	/* Handle any frames that arrived while handling the interrupt. */
4898 	if (!ifq_is_empty(&ifp->if_snd))
4899 		ifp->if_start(ifp);
4900 }
4901 
4902 
4903 /****************************************************************************/
4904 /* Programs the various packet receive modes (broadcast and multicast).     */
4905 /*                                                                          */
4906 /* Returns:                                                                 */
4907 /*   Nothing.                                                               */
4908 /****************************************************************************/
4909 static void
4910 bce_set_rx_mode(struct bce_softc *sc)
4911 {
4912 	struct ifnet *ifp = &sc->arpcom.ac_if;
4913 	struct ifmultiaddr *ifma;
4914 	uint32_t hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
4915 	uint32_t rx_mode, sort_mode;
4916 	int h, i;
4917 
4918 	ASSERT_SERIALIZED(ifp->if_serializer);
4919 
4920 	/* Initialize receive mode default settings. */
4921 	rx_mode = sc->rx_mode &
4922 		  ~(BCE_EMAC_RX_MODE_PROMISCUOUS |
4923 		    BCE_EMAC_RX_MODE_KEEP_VLAN_TAG);
4924 	sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN;
4925 
4926 	/*
4927 	 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
4928 	 * be enbled.
4929 	 */
4930 	if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) &&
4931 	    !(sc->bce_flags & BCE_MFW_ENABLE_FLAG))
4932 		rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG;
4933 
4934 	/*
4935 	 * Check for promiscuous, all multicast, or selected
4936 	 * multicast address filtering.
4937 	 */
4938 	if (ifp->if_flags & IFF_PROMISC) {
4939 		DBPRINT(sc, BCE_INFO, "Enabling promiscuous mode.\n");
4940 
4941 		/* Enable promiscuous mode. */
4942 		rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS;
4943 		sort_mode |= BCE_RPM_SORT_USER0_PROM_EN;
4944 	} else if (ifp->if_flags & IFF_ALLMULTI) {
4945 		DBPRINT(sc, BCE_INFO, "Enabling all multicast mode.\n");
4946 
4947 		/* Enable all multicast addresses. */
4948 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
4949 			REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4),
4950 			       0xffffffff);
4951 		}
4952 		sort_mode |= BCE_RPM_SORT_USER0_MC_EN;
4953 	} else {
4954 		/* Accept one or more multicast(s). */
4955 		DBPRINT(sc, BCE_INFO, "Enabling selective multicast mode.\n");
4956 
4957 		LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
4958 			if (ifma->ifma_addr->sa_family != AF_LINK)
4959 				continue;
4960 			h = ether_crc32_le(
4961 			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
4962 			    ETHER_ADDR_LEN) & 0xFF;
4963 			hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
4964 		}
4965 
4966 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
4967 			REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4),
4968 			       hashes[i]);
4969 		}
4970 		sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN;
4971 	}
4972 
4973 	/* Only make changes if the recive mode has actually changed. */
4974 	if (rx_mode != sc->rx_mode) {
4975 		DBPRINT(sc, BCE_VERBOSE, "Enabling new receive mode: 0x%08X\n",
4976 			rx_mode);
4977 
4978 		sc->rx_mode = rx_mode;
4979 		REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode);
4980 	}
4981 
4982 	/* Disable and clear the exisitng sort before enabling a new sort. */
4983 	REG_WR(sc, BCE_RPM_SORT_USER0, 0x0);
4984 	REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode);
4985 	REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA);
4986 }
4987 
4988 
4989 /****************************************************************************/
4990 /* Called periodically to updates statistics from the controllers           */
4991 /* statistics block.                                                        */
4992 /*                                                                          */
4993 /* Returns:                                                                 */
4994 /*   Nothing.                                                               */
4995 /****************************************************************************/
4996 static void
4997 bce_stats_update(struct bce_softc *sc)
4998 {
4999 	struct ifnet *ifp = &sc->arpcom.ac_if;
5000 	struct statistics_block *stats = sc->stats_block;
5001 
5002 	DBPRINT(sc, BCE_EXCESSIVE, "Entering %s()\n", __func__);
5003 
5004 	ASSERT_SERIALIZED(ifp->if_serializer);
5005 
5006 	/*
5007 	 * Update the interface statistics from the hardware statistics.
5008 	 */
5009 	ifp->if_collisions = (u_long)stats->stat_EtherStatsCollisions;
5010 
5011 	ifp->if_ierrors = (u_long)stats->stat_EtherStatsUndersizePkts +
5012 			  (u_long)stats->stat_EtherStatsOverrsizePkts +
5013 			  (u_long)stats->stat_IfInMBUFDiscards +
5014 			  (u_long)stats->stat_Dot3StatsAlignmentErrors +
5015 			  (u_long)stats->stat_Dot3StatsFCSErrors;
5016 
5017 	ifp->if_oerrors =
5018 	(u_long)stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
5019 	(u_long)stats->stat_Dot3StatsExcessiveCollisions +
5020 	(u_long)stats->stat_Dot3StatsLateCollisions;
5021 
5022 	/*
5023 	 * Certain controllers don't report carrier sense errors correctly.
5024 	 * See errata E11_5708CA0_1165.
5025 	 */
5026 	if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
5027 	    !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0)) {
5028 		ifp->if_oerrors +=
5029 			(u_long)stats->stat_Dot3StatsCarrierSenseErrors;
5030 	}
5031 
5032 	/*
5033 	 * Update the sysctl statistics from the hardware statistics.
5034 	 */
5035 	sc->stat_IfHCInOctets =
5036 		((uint64_t)stats->stat_IfHCInOctets_hi << 32) +
5037 		 (uint64_t)stats->stat_IfHCInOctets_lo;
5038 
5039 	sc->stat_IfHCInBadOctets =
5040 		((uint64_t)stats->stat_IfHCInBadOctets_hi << 32) +
5041 		 (uint64_t)stats->stat_IfHCInBadOctets_lo;
5042 
5043 	sc->stat_IfHCOutOctets =
5044 		((uint64_t)stats->stat_IfHCOutOctets_hi << 32) +
5045 		 (uint64_t)stats->stat_IfHCOutOctets_lo;
5046 
5047 	sc->stat_IfHCOutBadOctets =
5048 		((uint64_t)stats->stat_IfHCOutBadOctets_hi << 32) +
5049 		 (uint64_t)stats->stat_IfHCOutBadOctets_lo;
5050 
5051 	sc->stat_IfHCInUcastPkts =
5052 		((uint64_t)stats->stat_IfHCInUcastPkts_hi << 32) +
5053 		 (uint64_t)stats->stat_IfHCInUcastPkts_lo;
5054 
5055 	sc->stat_IfHCInMulticastPkts =
5056 		((uint64_t)stats->stat_IfHCInMulticastPkts_hi << 32) +
5057 		 (uint64_t)stats->stat_IfHCInMulticastPkts_lo;
5058 
5059 	sc->stat_IfHCInBroadcastPkts =
5060 		((uint64_t)stats->stat_IfHCInBroadcastPkts_hi << 32) +
5061 		 (uint64_t)stats->stat_IfHCInBroadcastPkts_lo;
5062 
5063 	sc->stat_IfHCOutUcastPkts =
5064 		((uint64_t)stats->stat_IfHCOutUcastPkts_hi << 32) +
5065 		 (uint64_t)stats->stat_IfHCOutUcastPkts_lo;
5066 
5067 	sc->stat_IfHCOutMulticastPkts =
5068 		((uint64_t)stats->stat_IfHCOutMulticastPkts_hi << 32) +
5069 		 (uint64_t)stats->stat_IfHCOutMulticastPkts_lo;
5070 
5071 	sc->stat_IfHCOutBroadcastPkts =
5072 		((uint64_t)stats->stat_IfHCOutBroadcastPkts_hi << 32) +
5073 		 (uint64_t)stats->stat_IfHCOutBroadcastPkts_lo;
5074 
5075 	sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
5076 		stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
5077 
5078 	sc->stat_Dot3StatsCarrierSenseErrors =
5079 		stats->stat_Dot3StatsCarrierSenseErrors;
5080 
5081 	sc->stat_Dot3StatsFCSErrors =
5082 		stats->stat_Dot3StatsFCSErrors;
5083 
5084 	sc->stat_Dot3StatsAlignmentErrors =
5085 		stats->stat_Dot3StatsAlignmentErrors;
5086 
5087 	sc->stat_Dot3StatsSingleCollisionFrames =
5088 		stats->stat_Dot3StatsSingleCollisionFrames;
5089 
5090 	sc->stat_Dot3StatsMultipleCollisionFrames =
5091 		stats->stat_Dot3StatsMultipleCollisionFrames;
5092 
5093 	sc->stat_Dot3StatsDeferredTransmissions =
5094 		stats->stat_Dot3StatsDeferredTransmissions;
5095 
5096 	sc->stat_Dot3StatsExcessiveCollisions =
5097 		stats->stat_Dot3StatsExcessiveCollisions;
5098 
5099 	sc->stat_Dot3StatsLateCollisions =
5100 		stats->stat_Dot3StatsLateCollisions;
5101 
5102 	sc->stat_EtherStatsCollisions =
5103 		stats->stat_EtherStatsCollisions;
5104 
5105 	sc->stat_EtherStatsFragments =
5106 		stats->stat_EtherStatsFragments;
5107 
5108 	sc->stat_EtherStatsJabbers =
5109 		stats->stat_EtherStatsJabbers;
5110 
5111 	sc->stat_EtherStatsUndersizePkts =
5112 		stats->stat_EtherStatsUndersizePkts;
5113 
5114 	sc->stat_EtherStatsOverrsizePkts =
5115 		stats->stat_EtherStatsOverrsizePkts;
5116 
5117 	sc->stat_EtherStatsPktsRx64Octets =
5118 		stats->stat_EtherStatsPktsRx64Octets;
5119 
5120 	sc->stat_EtherStatsPktsRx65Octetsto127Octets =
5121 		stats->stat_EtherStatsPktsRx65Octetsto127Octets;
5122 
5123 	sc->stat_EtherStatsPktsRx128Octetsto255Octets =
5124 		stats->stat_EtherStatsPktsRx128Octetsto255Octets;
5125 
5126 	sc->stat_EtherStatsPktsRx256Octetsto511Octets =
5127 		stats->stat_EtherStatsPktsRx256Octetsto511Octets;
5128 
5129 	sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
5130 		stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
5131 
5132 	sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
5133 		stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
5134 
5135 	sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
5136 		stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
5137 
5138 	sc->stat_EtherStatsPktsTx64Octets =
5139 		stats->stat_EtherStatsPktsTx64Octets;
5140 
5141 	sc->stat_EtherStatsPktsTx65Octetsto127Octets =
5142 		stats->stat_EtherStatsPktsTx65Octetsto127Octets;
5143 
5144 	sc->stat_EtherStatsPktsTx128Octetsto255Octets =
5145 		stats->stat_EtherStatsPktsTx128Octetsto255Octets;
5146 
5147 	sc->stat_EtherStatsPktsTx256Octetsto511Octets =
5148 		stats->stat_EtherStatsPktsTx256Octetsto511Octets;
5149 
5150 	sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
5151 		stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
5152 
5153 	sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
5154 		stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
5155 
5156 	sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
5157 		stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
5158 
5159 	sc->stat_XonPauseFramesReceived =
5160 		stats->stat_XonPauseFramesReceived;
5161 
5162 	sc->stat_XoffPauseFramesReceived =
5163 		stats->stat_XoffPauseFramesReceived;
5164 
5165 	sc->stat_OutXonSent =
5166 		stats->stat_OutXonSent;
5167 
5168 	sc->stat_OutXoffSent =
5169 		stats->stat_OutXoffSent;
5170 
5171 	sc->stat_FlowControlDone =
5172 		stats->stat_FlowControlDone;
5173 
5174 	sc->stat_MacControlFramesReceived =
5175 		stats->stat_MacControlFramesReceived;
5176 
5177 	sc->stat_XoffStateEntered =
5178 		stats->stat_XoffStateEntered;
5179 
5180 	sc->stat_IfInFramesL2FilterDiscards =
5181 		stats->stat_IfInFramesL2FilterDiscards;
5182 
5183 	sc->stat_IfInRuleCheckerDiscards =
5184 		stats->stat_IfInRuleCheckerDiscards;
5185 
5186 	sc->stat_IfInFTQDiscards =
5187 		stats->stat_IfInFTQDiscards;
5188 
5189 	sc->stat_IfInMBUFDiscards =
5190 		stats->stat_IfInMBUFDiscards;
5191 
5192 	sc->stat_IfInRuleCheckerP4Hit =
5193 		stats->stat_IfInRuleCheckerP4Hit;
5194 
5195 	sc->stat_CatchupInRuleCheckerDiscards =
5196 		stats->stat_CatchupInRuleCheckerDiscards;
5197 
5198 	sc->stat_CatchupInFTQDiscards =
5199 		stats->stat_CatchupInFTQDiscards;
5200 
5201 	sc->stat_CatchupInMBUFDiscards =
5202 		stats->stat_CatchupInMBUFDiscards;
5203 
5204 	sc->stat_CatchupInRuleCheckerP4Hit =
5205 		stats->stat_CatchupInRuleCheckerP4Hit;
5206 
5207 	sc->com_no_buffers = REG_RD_IND(sc, 0x120084);
5208 
5209 	DBPRINT(sc, BCE_EXCESSIVE, "Exiting %s()\n", __func__);
5210 }
5211 
5212 
5213 /****************************************************************************/
5214 /* Periodic function to perform maintenance tasks.                          */
5215 /*                                                                          */
5216 /* Returns:                                                                 */
5217 /*   Nothing.                                                               */
5218 /****************************************************************************/
5219 static void
5220 bce_tick_serialized(struct bce_softc *sc)
5221 {
5222 	struct ifnet *ifp = &sc->arpcom.ac_if;
5223 	struct mii_data *mii;
5224 	uint32_t msg;
5225 
5226 	ASSERT_SERIALIZED(ifp->if_serializer);
5227 
5228 	/* Tell the firmware that the driver is still running. */
5229 #ifdef BCE_DEBUG
5230 	msg = (uint32_t)BCE_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE;
5231 #else
5232 	msg = (uint32_t)++sc->bce_fw_drv_pulse_wr_seq;
5233 #endif
5234 	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_PULSE_MB, msg);
5235 
5236 	/* Update the statistics from the hardware statistics block. */
5237 	bce_stats_update(sc);
5238 
5239 	/* Schedule the next tick. */
5240 	callout_reset(&sc->bce_stat_ch, hz, bce_tick, sc);
5241 
5242 	/* If link is up already up then we're done. */
5243 	if (sc->bce_link)
5244 		return;
5245 
5246 	mii = device_get_softc(sc->bce_miibus);
5247 	mii_tick(mii);
5248 
5249 	/* Check if the link has come up. */
5250 	if (!sc->bce_link && (mii->mii_media_status & IFM_ACTIVE) &&
5251 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5252 		sc->bce_link++;
5253 		/* Now that link is up, handle any outstanding TX traffic. */
5254 		if (!ifq_is_empty(&ifp->if_snd))
5255 			ifp->if_start(ifp);
5256 	}
5257 }
5258 
5259 
5260 static void
5261 bce_tick(void *xsc)
5262 {
5263 	struct bce_softc *sc = xsc;
5264 	struct ifnet *ifp = &sc->arpcom.ac_if;
5265 
5266 	lwkt_serialize_enter(ifp->if_serializer);
5267 	bce_tick_serialized(sc);
5268 	lwkt_serialize_exit(ifp->if_serializer);
5269 }
5270 
5271 
5272 #ifdef BCE_DEBUG
5273 /****************************************************************************/
5274 /* Allows the driver state to be dumped through the sysctl interface.       */
5275 /*                                                                          */
5276 /* Returns:                                                                 */
5277 /*   0 for success, positive value for failure.                             */
5278 /****************************************************************************/
5279 static int
5280 bce_sysctl_driver_state(SYSCTL_HANDLER_ARGS)
5281 {
5282         int error;
5283         int result;
5284         struct bce_softc *sc;
5285 
5286         result = -1;
5287         error = sysctl_handle_int(oidp, &result, 0, req);
5288 
5289         if (error || !req->newptr)
5290                 return (error);
5291 
5292         if (result == 1) {
5293                 sc = (struct bce_softc *)arg1;
5294                 bce_dump_driver_state(sc);
5295         }
5296 
5297         return error;
5298 }
5299 
5300 
5301 /****************************************************************************/
5302 /* Allows the hardware state to be dumped through the sysctl interface.     */
5303 /*                                                                          */
5304 /* Returns:                                                                 */
5305 /*   0 for success, positive value for failure.                             */
5306 /****************************************************************************/
5307 static int
5308 bce_sysctl_hw_state(SYSCTL_HANDLER_ARGS)
5309 {
5310         int error;
5311         int result;
5312         struct bce_softc *sc;
5313 
5314         result = -1;
5315         error = sysctl_handle_int(oidp, &result, 0, req);
5316 
5317         if (error || !req->newptr)
5318                 return (error);
5319 
5320         if (result == 1) {
5321                 sc = (struct bce_softc *)arg1;
5322                 bce_dump_hw_state(sc);
5323         }
5324 
5325         return error;
5326 }
5327 
5328 
5329 /****************************************************************************/
5330 /* Provides a sysctl interface to allows dumping the RX chain.              */
5331 /*                                                                          */
5332 /* Returns:                                                                 */
5333 /*   0 for success, positive value for failure.                             */
5334 /****************************************************************************/
5335 static int
5336 bce_sysctl_dump_rx_chain(SYSCTL_HANDLER_ARGS)
5337 {
5338         int error;
5339         int result;
5340         struct bce_softc *sc;
5341 
5342         result = -1;
5343         error = sysctl_handle_int(oidp, &result, 0, req);
5344 
5345         if (error || !req->newptr)
5346                 return (error);
5347 
5348         if (result == 1) {
5349                 sc = (struct bce_softc *)arg1;
5350                 bce_dump_rx_chain(sc, 0, USABLE_RX_BD);
5351         }
5352 
5353         return error;
5354 }
5355 
5356 
5357 /****************************************************************************/
5358 /* Provides a sysctl interface to allows dumping the TX chain.              */
5359 /*                                                                          */
5360 /* Returns:                                                                 */
5361 /*   0 for success, positive value for failure.                             */
5362 /****************************************************************************/
5363 static int
5364 bce_sysctl_dump_tx_chain(SYSCTL_HANDLER_ARGS)
5365 {
5366         int error;
5367         int result;
5368         struct bce_softc *sc;
5369 
5370         result = -1;
5371         error = sysctl_handle_int(oidp, &result, 0, req);
5372 
5373         if (error || !req->newptr)
5374                 return (error);
5375 
5376         if (result == 1) {
5377                 sc = (struct bce_softc *)arg1;
5378                 bce_dump_tx_chain(sc, 0, USABLE_TX_BD);
5379         }
5380 
5381         return error;
5382 }
5383 
5384 
5385 /****************************************************************************/
5386 /* Provides a sysctl interface to allow reading arbitrary registers in the  */
5387 /* device.  DO NOT ENABLE ON PRODUCTION SYSTEMS!                            */
5388 /*                                                                          */
5389 /* Returns:                                                                 */
5390 /*   0 for success, positive value for failure.                             */
5391 /****************************************************************************/
5392 static int
5393 bce_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
5394 {
5395 	struct bce_softc *sc;
5396 	int error;
5397 	uint32_t val, result;
5398 
5399 	result = -1;
5400 	error = sysctl_handle_int(oidp, &result, 0, req);
5401 	if (error || (req->newptr == NULL))
5402 		return (error);
5403 
5404 	/* Make sure the register is accessible. */
5405 	if (result < 0x8000) {
5406 		sc = (struct bce_softc *)arg1;
5407 		val = REG_RD(sc, result);
5408 		if_printf(&sc->arpcom.ac_if, "reg 0x%08X = 0x%08X\n",
5409 			  result, val);
5410 	} else if (result < 0x0280000) {
5411 		sc = (struct bce_softc *)arg1;
5412 		val = REG_RD_IND(sc, result);
5413 		if_printf(&sc->arpcom.ac_if, "reg 0x%08X = 0x%08X\n",
5414 			  result, val);
5415 	}
5416 	return (error);
5417 }
5418 
5419 
5420 /****************************************************************************/
5421 /* Provides a sysctl interface to allow reading arbitrary PHY registers in  */
5422 /* the device.  DO NOT ENABLE ON PRODUCTION SYSTEMS!                        */
5423 /*                                                                          */
5424 /* Returns:                                                                 */
5425 /*   0 for success, positive value for failure.                             */
5426 /****************************************************************************/
5427 static int
5428 bce_sysctl_phy_read(SYSCTL_HANDLER_ARGS)
5429 {
5430 	struct bce_softc *sc;
5431 	device_t dev;
5432 	int error, result;
5433 	uint16_t val;
5434 
5435 	result = -1;
5436 	error = sysctl_handle_int(oidp, &result, 0, req);
5437 	if (error || (req->newptr == NULL))
5438 		return (error);
5439 
5440 	/* Make sure the register is accessible. */
5441 	if (result < 0x20) {
5442 		sc = (struct bce_softc *)arg1;
5443 		dev = sc->bce_dev;
5444 		val = bce_miibus_read_reg(dev, sc->bce_phy_addr, result);
5445 		if_printf(&sc->arpcom.ac_if,
5446 			  "phy 0x%02X = 0x%04X\n", result, val);
5447 	}
5448 	return (error);
5449 }
5450 
5451 
5452 /****************************************************************************/
5453 /* Provides a sysctl interface to forcing the driver to dump state and      */
5454 /* enter the debugger.  DO NOT ENABLE ON PRODUCTION SYSTEMS!                */
5455 /*                                                                          */
5456 /* Returns:                                                                 */
5457 /*   0 for success, positive value for failure.                             */
5458 /****************************************************************************/
5459 static int
5460 bce_sysctl_breakpoint(SYSCTL_HANDLER_ARGS)
5461 {
5462         int error;
5463         int result;
5464         struct bce_softc *sc;
5465 
5466         result = -1;
5467         error = sysctl_handle_int(oidp, &result, 0, req);
5468 
5469         if (error || !req->newptr)
5470                 return (error);
5471 
5472         if (result == 1) {
5473                 sc = (struct bce_softc *)arg1;
5474                 bce_breakpoint(sc);
5475         }
5476 
5477         return error;
5478 }
5479 #endif
5480 
5481 
5482 /****************************************************************************/
5483 /* Adds any sysctl parameters for tuning or debugging purposes.             */
5484 /*                                                                          */
5485 /* Returns:                                                                 */
5486 /*   0 for success, positive value for failure.                             */
5487 /****************************************************************************/
5488 static void
5489 bce_add_sysctls(struct bce_softc *sc)
5490 {
5491 	struct sysctl_ctx_list *ctx;
5492 	struct sysctl_oid_list *children;
5493 
5494 	sysctl_ctx_init(&sc->bce_sysctl_ctx);
5495 	sc->bce_sysctl_tree = SYSCTL_ADD_NODE(&sc->bce_sysctl_ctx,
5496 					      SYSCTL_STATIC_CHILDREN(_hw),
5497 					      OID_AUTO,
5498 					      device_get_nameunit(sc->bce_dev),
5499 					      CTLFLAG_RD, 0, "");
5500 	if (sc->bce_sysctl_tree == NULL) {
5501 		device_printf(sc->bce_dev, "can't add sysctl node\n");
5502 		return;
5503 	}
5504 
5505 	ctx = &sc->bce_sysctl_ctx;
5506 	children = SYSCTL_CHILDREN(sc->bce_sysctl_tree);
5507 
5508 #ifdef BCE_DEBUG
5509 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5510 		"rx_low_watermark",
5511 		CTLFLAG_RD, &sc->rx_low_watermark,
5512 		0, "Lowest level of free rx_bd's");
5513 
5514 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5515 		"rx_empty_count",
5516 		CTLFLAG_RD, &sc->rx_empty_count,
5517 		0, "Number of times the RX chain was empty");
5518 
5519 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5520 		"tx_hi_watermark",
5521 		CTLFLAG_RD, &sc->tx_hi_watermark,
5522 		0, "Highest level of used tx_bd's");
5523 
5524 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5525 		"tx_full_count",
5526 		CTLFLAG_RD, &sc->tx_full_count,
5527 		0, "Number of times the TX chain was full");
5528 
5529 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5530 		"l2fhdr_status_errors",
5531 		CTLFLAG_RD, &sc->l2fhdr_status_errors,
5532 		0, "l2_fhdr status errors");
5533 
5534 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5535 		"unexpected_attentions",
5536 		CTLFLAG_RD, &sc->unexpected_attentions,
5537 		0, "unexpected attentions");
5538 
5539 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5540 		"lost_status_block_updates",
5541 		CTLFLAG_RD, &sc->lost_status_block_updates,
5542 		0, "lost status block updates");
5543 
5544 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5545 		"mbuf_alloc_failed",
5546 		CTLFLAG_RD, &sc->mbuf_alloc_failed,
5547 		0, "mbuf cluster allocation failures");
5548 #endif
5549 
5550 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5551 		"stat_IfHcInOctets",
5552 		CTLFLAG_RD, &sc->stat_IfHCInOctets,
5553 		"Bytes received");
5554 
5555 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5556 		"stat_IfHCInBadOctets",
5557 		CTLFLAG_RD, &sc->stat_IfHCInBadOctets,
5558 		"Bad bytes received");
5559 
5560 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5561 		"stat_IfHCOutOctets",
5562 		CTLFLAG_RD, &sc->stat_IfHCOutOctets,
5563 		"Bytes sent");
5564 
5565 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5566 		"stat_IfHCOutBadOctets",
5567 		CTLFLAG_RD, &sc->stat_IfHCOutBadOctets,
5568 		"Bad bytes sent");
5569 
5570 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5571 		"stat_IfHCInUcastPkts",
5572 		CTLFLAG_RD, &sc->stat_IfHCInUcastPkts,
5573 		"Unicast packets received");
5574 
5575 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5576 		"stat_IfHCInMulticastPkts",
5577 		CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts,
5578 		"Multicast packets received");
5579 
5580 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5581 		"stat_IfHCInBroadcastPkts",
5582 		CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts,
5583 		"Broadcast packets received");
5584 
5585 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5586 		"stat_IfHCOutUcastPkts",
5587 		CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts,
5588 		"Unicast packets sent");
5589 
5590 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5591 		"stat_IfHCOutMulticastPkts",
5592 		CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts,
5593 		"Multicast packets sent");
5594 
5595 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5596 		"stat_IfHCOutBroadcastPkts",
5597 		CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts,
5598 		"Broadcast packets sent");
5599 
5600 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5601 		"stat_emac_tx_stat_dot3statsinternalmactransmiterrors",
5602 		CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors,
5603 		0, "Internal MAC transmit errors");
5604 
5605 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5606 		"stat_Dot3StatsCarrierSenseErrors",
5607 		CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors,
5608 		0, "Carrier sense errors");
5609 
5610 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5611 		"stat_Dot3StatsFCSErrors",
5612 		CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors,
5613 		0, "Frame check sequence errors");
5614 
5615 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5616 		"stat_Dot3StatsAlignmentErrors",
5617 		CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors,
5618 		0, "Alignment errors");
5619 
5620 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5621 		"stat_Dot3StatsSingleCollisionFrames",
5622 		CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames,
5623 		0, "Single Collision Frames");
5624 
5625 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5626 		"stat_Dot3StatsMultipleCollisionFrames",
5627 		CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames,
5628 		0, "Multiple Collision Frames");
5629 
5630 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5631 		"stat_Dot3StatsDeferredTransmissions",
5632 		CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions,
5633 		0, "Deferred Transmissions");
5634 
5635 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5636 		"stat_Dot3StatsExcessiveCollisions",
5637 		CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions,
5638 		0, "Excessive Collisions");
5639 
5640 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5641 		"stat_Dot3StatsLateCollisions",
5642 		CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions,
5643 		0, "Late Collisions");
5644 
5645 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5646 		"stat_EtherStatsCollisions",
5647 		CTLFLAG_RD, &sc->stat_EtherStatsCollisions,
5648 		0, "Collisions");
5649 
5650 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5651 		"stat_EtherStatsFragments",
5652 		CTLFLAG_RD, &sc->stat_EtherStatsFragments,
5653 		0, "Fragments");
5654 
5655 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5656 		"stat_EtherStatsJabbers",
5657 		CTLFLAG_RD, &sc->stat_EtherStatsJabbers,
5658 		0, "Jabbers");
5659 
5660 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5661 		"stat_EtherStatsUndersizePkts",
5662 		CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts,
5663 		0, "Undersize packets");
5664 
5665 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5666 		"stat_EtherStatsOverrsizePkts",
5667 		CTLFLAG_RD, &sc->stat_EtherStatsOverrsizePkts,
5668 		0, "stat_EtherStatsOverrsizePkts");
5669 
5670 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5671 		"stat_EtherStatsPktsRx64Octets",
5672 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets,
5673 		0, "Bytes received in 64 byte packets");
5674 
5675 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5676 		"stat_EtherStatsPktsRx65Octetsto127Octets",
5677 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets,
5678 		0, "Bytes received in 65 to 127 byte packets");
5679 
5680 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5681 		"stat_EtherStatsPktsRx128Octetsto255Octets",
5682 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets,
5683 		0, "Bytes received in 128 to 255 byte packets");
5684 
5685 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5686 		"stat_EtherStatsPktsRx256Octetsto511Octets",
5687 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets,
5688 		0, "Bytes received in 256 to 511 byte packets");
5689 
5690 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5691 		"stat_EtherStatsPktsRx512Octetsto1023Octets",
5692 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets,
5693 		0, "Bytes received in 512 to 1023 byte packets");
5694 
5695 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5696 		"stat_EtherStatsPktsRx1024Octetsto1522Octets",
5697 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets,
5698 		0, "Bytes received in 1024 t0 1522 byte packets");
5699 
5700 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5701 		"stat_EtherStatsPktsRx1523Octetsto9022Octets",
5702 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets,
5703 		0, "Bytes received in 1523 to 9022 byte packets");
5704 
5705 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5706 		"stat_EtherStatsPktsTx64Octets",
5707 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets,
5708 		0, "Bytes sent in 64 byte packets");
5709 
5710 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5711 		"stat_EtherStatsPktsTx65Octetsto127Octets",
5712 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets,
5713 		0, "Bytes sent in 65 to 127 byte packets");
5714 
5715 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5716 		"stat_EtherStatsPktsTx128Octetsto255Octets",
5717 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets,
5718 		0, "Bytes sent in 128 to 255 byte packets");
5719 
5720 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5721 		"stat_EtherStatsPktsTx256Octetsto511Octets",
5722 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets,
5723 		0, "Bytes sent in 256 to 511 byte packets");
5724 
5725 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5726 		"stat_EtherStatsPktsTx512Octetsto1023Octets",
5727 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets,
5728 		0, "Bytes sent in 512 to 1023 byte packets");
5729 
5730 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5731 		"stat_EtherStatsPktsTx1024Octetsto1522Octets",
5732 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets,
5733 		0, "Bytes sent in 1024 to 1522 byte packets");
5734 
5735 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5736 		"stat_EtherStatsPktsTx1523Octetsto9022Octets",
5737 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets,
5738 		0, "Bytes sent in 1523 to 9022 byte packets");
5739 
5740 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5741 		"stat_XonPauseFramesReceived",
5742 		CTLFLAG_RD, &sc->stat_XonPauseFramesReceived,
5743 		0, "XON pause frames receved");
5744 
5745 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5746 		"stat_XoffPauseFramesReceived",
5747 		CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived,
5748 		0, "XOFF pause frames received");
5749 
5750 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5751 		"stat_OutXonSent",
5752 		CTLFLAG_RD, &sc->stat_OutXonSent,
5753 		0, "XON pause frames sent");
5754 
5755 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5756 		"stat_OutXoffSent",
5757 		CTLFLAG_RD, &sc->stat_OutXoffSent,
5758 		0, "XOFF pause frames sent");
5759 
5760 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5761 		"stat_FlowControlDone",
5762 		CTLFLAG_RD, &sc->stat_FlowControlDone,
5763 		0, "Flow control done");
5764 
5765 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5766 		"stat_MacControlFramesReceived",
5767 		CTLFLAG_RD, &sc->stat_MacControlFramesReceived,
5768 		0, "MAC control frames received");
5769 
5770 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5771 		"stat_XoffStateEntered",
5772 		CTLFLAG_RD, &sc->stat_XoffStateEntered,
5773 		0, "XOFF state entered");
5774 
5775 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5776 		"stat_IfInFramesL2FilterDiscards",
5777 		CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards,
5778 		0, "Received L2 packets discarded");
5779 
5780 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5781 		"stat_IfInRuleCheckerDiscards",
5782 		CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards,
5783 		0, "Received packets discarded by rule");
5784 
5785 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5786 		"stat_IfInFTQDiscards",
5787 		CTLFLAG_RD, &sc->stat_IfInFTQDiscards,
5788 		0, "Received packet FTQ discards");
5789 
5790 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5791 		"stat_IfInMBUFDiscards",
5792 		CTLFLAG_RD, &sc->stat_IfInMBUFDiscards,
5793 		0, "Received packets discarded due to lack of controller buffer memory");
5794 
5795 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5796 		"stat_IfInRuleCheckerP4Hit",
5797 		CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit,
5798 		0, "Received packets rule checker hits");
5799 
5800 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5801 		"stat_CatchupInRuleCheckerDiscards",
5802 		CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards,
5803 		0, "Received packets discarded in Catchup path");
5804 
5805 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5806 		"stat_CatchupInFTQDiscards",
5807 		CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards,
5808 		0, "Received packets discarded in FTQ in Catchup path");
5809 
5810 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5811 		"stat_CatchupInMBUFDiscards",
5812 		CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards,
5813 		0, "Received packets discarded in controller buffer memory in Catchup path");
5814 
5815 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5816 		"stat_CatchupInRuleCheckerP4Hit",
5817 		CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit,
5818 		0, "Received packets rule checker hits in Catchup path");
5819 
5820 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5821 		"com_no_buffers",
5822 		CTLFLAG_RD, &sc->com_no_buffers,
5823 		0, "Valid packets received but no RX buffers available");
5824 
5825 #ifdef BCE_DEBUG
5826 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
5827 		"driver_state", CTLTYPE_INT | CTLFLAG_RW,
5828 		(void *)sc, 0,
5829 		bce_sysctl_driver_state, "I", "Drive state information");
5830 
5831 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
5832 		"hw_state", CTLTYPE_INT | CTLFLAG_RW,
5833 		(void *)sc, 0,
5834 		bce_sysctl_hw_state, "I", "Hardware state information");
5835 
5836 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
5837 		"dump_rx_chain", CTLTYPE_INT | CTLFLAG_RW,
5838 		(void *)sc, 0,
5839 		bce_sysctl_dump_rx_chain, "I", "Dump rx_bd chain");
5840 
5841 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
5842 		"dump_tx_chain", CTLTYPE_INT | CTLFLAG_RW,
5843 		(void *)sc, 0,
5844 		bce_sysctl_dump_tx_chain, "I", "Dump tx_bd chain");
5845 
5846 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
5847 		"breakpoint", CTLTYPE_INT | CTLFLAG_RW,
5848 		(void *)sc, 0,
5849 		bce_sysctl_breakpoint, "I", "Driver breakpoint");
5850 
5851 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
5852 		"reg_read", CTLTYPE_INT | CTLFLAG_RW,
5853 		(void *)sc, 0,
5854 		bce_sysctl_reg_read, "I", "Register read");
5855 
5856 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
5857 		"phy_read", CTLTYPE_INT | CTLFLAG_RW,
5858 		(void *)sc, 0,
5859 		bce_sysctl_phy_read, "I", "PHY register read");
5860 
5861 #endif
5862 
5863 }
5864 
5865 
5866 /****************************************************************************/
5867 /* BCE Debug Routines                                                       */
5868 /****************************************************************************/
5869 #ifdef BCE_DEBUG
5870 
5871 /****************************************************************************/
5872 /* Freezes the controller to allow for a cohesive state dump.               */
5873 /*                                                                          */
5874 /* Returns:                                                                 */
5875 /*   Nothing.                                                               */
5876 /****************************************************************************/
5877 static void
5878 bce_freeze_controller(struct bce_softc *sc)
5879 {
5880 	uint32_t val;
5881 
5882 	val = REG_RD(sc, BCE_MISC_COMMAND);
5883 	val |= BCE_MISC_COMMAND_DISABLE_ALL;
5884 	REG_WR(sc, BCE_MISC_COMMAND, val);
5885 }
5886 
5887 
5888 /****************************************************************************/
5889 /* Unfreezes the controller after a freeze operation.  This may not always  */
5890 /* work and the controller will require a reset!                            */
5891 /*                                                                          */
5892 /* Returns:                                                                 */
5893 /*   Nothing.                                                               */
5894 /****************************************************************************/
5895 static void
5896 bce_unfreeze_controller(struct bce_softc *sc)
5897 {
5898 	uint32_t val;
5899 
5900 	val = REG_RD(sc, BCE_MISC_COMMAND);
5901 	val |= BCE_MISC_COMMAND_ENABLE_ALL;
5902 	REG_WR(sc, BCE_MISC_COMMAND, val);
5903 }
5904 
5905 
5906 /****************************************************************************/
5907 /* Prints out information about an mbuf.                                    */
5908 /*                                                                          */
5909 /* Returns:                                                                 */
5910 /*   Nothing.                                                               */
5911 /****************************************************************************/
5912 static void
5913 bce_dump_mbuf(struct bce_softc *sc, struct mbuf *m)
5914 {
5915 	struct ifnet *ifp = &sc->arpcom.ac_if;
5916 	uint32_t val_hi, val_lo;
5917 	struct mbuf *mp = m;
5918 
5919 	if (m == NULL) {
5920 		/* Index out of range. */
5921 		if_printf(ifp, "mbuf: null pointer\n");
5922 		return;
5923 	}
5924 
5925 	while (mp) {
5926 		val_hi = BCE_ADDR_HI(mp);
5927 		val_lo = BCE_ADDR_LO(mp);
5928 		if_printf(ifp, "mbuf: vaddr = 0x%08X:%08X, m_len = %d, "
5929 			  "m_flags = ( ", val_hi, val_lo, mp->m_len);
5930 
5931 		if (mp->m_flags & M_EXT)
5932 			kprintf("M_EXT ");
5933 		if (mp->m_flags & M_PKTHDR)
5934 			kprintf("M_PKTHDR ");
5935 		if (mp->m_flags & M_EOR)
5936 			kprintf("M_EOR ");
5937 #ifdef M_RDONLY
5938 		if (mp->m_flags & M_RDONLY)
5939 			kprintf("M_RDONLY ");
5940 #endif
5941 
5942 		val_hi = BCE_ADDR_HI(mp->m_data);
5943 		val_lo = BCE_ADDR_LO(mp->m_data);
5944 		kprintf(") m_data = 0x%08X:%08X\n", val_hi, val_lo);
5945 
5946 		if (mp->m_flags & M_PKTHDR) {
5947 			if_printf(ifp, "- m_pkthdr: flags = ( ");
5948 			if (mp->m_flags & M_BCAST)
5949 				kprintf("M_BCAST ");
5950 			if (mp->m_flags & M_MCAST)
5951 				kprintf("M_MCAST ");
5952 			if (mp->m_flags & M_FRAG)
5953 				kprintf("M_FRAG ");
5954 			if (mp->m_flags & M_FIRSTFRAG)
5955 				kprintf("M_FIRSTFRAG ");
5956 			if (mp->m_flags & M_LASTFRAG)
5957 				kprintf("M_LASTFRAG ");
5958 #ifdef M_VLANTAG
5959 			if (mp->m_flags & M_VLANTAG)
5960 				kprintf("M_VLANTAG ");
5961 #endif
5962 #ifdef M_PROMISC
5963 			if (mp->m_flags & M_PROMISC)
5964 				kprintf("M_PROMISC ");
5965 #endif
5966 			kprintf(") csum_flags = ( ");
5967 			if (mp->m_pkthdr.csum_flags & CSUM_IP)
5968 				kprintf("CSUM_IP ");
5969 			if (mp->m_pkthdr.csum_flags & CSUM_TCP)
5970 				kprintf("CSUM_TCP ");
5971 			if (mp->m_pkthdr.csum_flags & CSUM_UDP)
5972 				kprintf("CSUM_UDP ");
5973 			if (mp->m_pkthdr.csum_flags & CSUM_IP_FRAGS)
5974 				kprintf("CSUM_IP_FRAGS ");
5975 			if (mp->m_pkthdr.csum_flags & CSUM_FRAGMENT)
5976 				kprintf("CSUM_FRAGMENT ");
5977 #ifdef CSUM_TSO
5978 			if (mp->m_pkthdr.csum_flags & CSUM_TSO)
5979 				kprintf("CSUM_TSO ");
5980 #endif
5981 			if (mp->m_pkthdr.csum_flags & CSUM_IP_CHECKED)
5982 				kprintf("CSUM_IP_CHECKED ");
5983 			if (mp->m_pkthdr.csum_flags & CSUM_IP_VALID)
5984 				kprintf("CSUM_IP_VALID ");
5985 			if (mp->m_pkthdr.csum_flags & CSUM_DATA_VALID)
5986 				kprintf("CSUM_DATA_VALID ");
5987 			kprintf(")\n");
5988 		}
5989 
5990 		if (mp->m_flags & M_EXT) {
5991 			val_hi = BCE_ADDR_HI(mp->m_ext.ext_buf);
5992 			val_lo = BCE_ADDR_LO(mp->m_ext.ext_buf);
5993 			if_printf(ifp, "- m_ext: vaddr = 0x%08X:%08X, "
5994 				  "ext_size = %d\n",
5995 				  val_hi, val_lo, mp->m_ext.ext_size);
5996 		}
5997 		mp = mp->m_next;
5998 	}
5999 }
6000 
6001 
6002 /****************************************************************************/
6003 /* Prints out the mbufs in the TX mbuf chain.                               */
6004 /*                                                                          */
6005 /* Returns:                                                                 */
6006 /*   Nothing.                                                               */
6007 /****************************************************************************/
6008 static void
6009 bce_dump_tx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count)
6010 {
6011 	struct ifnet *ifp = &sc->arpcom.ac_if;
6012 	int i;
6013 
6014 	if_printf(ifp,
6015 	"----------------------------"
6016 	"  tx mbuf data  "
6017 	"----------------------------\n");
6018 
6019 	for (i = 0; i < count; i++) {
6020 		if_printf(ifp, "txmbuf[%d]\n", chain_prod);
6021 		bce_dump_mbuf(sc, sc->tx_mbuf_ptr[chain_prod]);
6022 		chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod));
6023 	}
6024 
6025 	if_printf(ifp,
6026 	"----------------------------"
6027 	"----------------"
6028 	"----------------------------\n");
6029 }
6030 
6031 
6032 /****************************************************************************/
6033 /* Prints out the mbufs in the RX mbuf chain.                               */
6034 /*                                                                          */
6035 /* Returns:                                                                 */
6036 /*   Nothing.                                                               */
6037 /****************************************************************************/
6038 static void
6039 bce_dump_rx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count)
6040 {
6041 	struct ifnet *ifp = &sc->arpcom.ac_if;
6042 	int i;
6043 
6044 	if_printf(ifp,
6045 	"----------------------------"
6046 	"  rx mbuf data  "
6047 	"----------------------------\n");
6048 
6049 	for (i = 0; i < count; i++) {
6050 		if_printf(ifp, "rxmbuf[0x%04X]\n", chain_prod);
6051 		bce_dump_mbuf(sc, sc->rx_mbuf_ptr[chain_prod]);
6052 		chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod));
6053 	}
6054 
6055 	if_printf(ifp,
6056 	"----------------------------"
6057 	"----------------"
6058 	"----------------------------\n");
6059 }
6060 
6061 
6062 /****************************************************************************/
6063 /* Prints out a tx_bd structure.                                            */
6064 /*                                                                          */
6065 /* Returns:                                                                 */
6066 /*   Nothing.                                                               */
6067 /****************************************************************************/
6068 static void
6069 bce_dump_txbd(struct bce_softc *sc, int idx, struct tx_bd *txbd)
6070 {
6071 	struct ifnet *ifp = &sc->arpcom.ac_if;
6072 
6073 	if (idx > MAX_TX_BD) {
6074 		/* Index out of range. */
6075 		if_printf(ifp, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx);
6076 	} else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) {
6077 		/* TX Chain page pointer. */
6078 		if_printf(ifp, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, "
6079 			  "chain page pointer\n",
6080 			  idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo);
6081 	} else {
6082 		/* Normal tx_bd entry. */
6083 		if_printf(ifp, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, "
6084 			  "nbytes = 0x%08X, "
6085 			  "vlan tag= 0x%04X, flags = 0x%04X (",
6086 			  idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo,
6087 			  txbd->tx_bd_mss_nbytes,
6088 			  txbd->tx_bd_vlan_tag, txbd->tx_bd_flags);
6089 
6090 		if (txbd->tx_bd_flags & TX_BD_FLAGS_CONN_FAULT)
6091 			kprintf(" CONN_FAULT");
6092 
6093 		if (txbd->tx_bd_flags & TX_BD_FLAGS_TCP_UDP_CKSUM)
6094 			kprintf(" TCP_UDP_CKSUM");
6095 
6096 		if (txbd->tx_bd_flags & TX_BD_FLAGS_IP_CKSUM)
6097 			kprintf(" IP_CKSUM");
6098 
6099 		if (txbd->tx_bd_flags & TX_BD_FLAGS_VLAN_TAG)
6100 			kprintf("  VLAN");
6101 
6102 		if (txbd->tx_bd_flags & TX_BD_FLAGS_COAL_NOW)
6103 			kprintf(" COAL_NOW");
6104 
6105 		if (txbd->tx_bd_flags & TX_BD_FLAGS_DONT_GEN_CRC)
6106 			kprintf(" DONT_GEN_CRC");
6107 
6108 		if (txbd->tx_bd_flags & TX_BD_FLAGS_START)
6109 			kprintf(" START");
6110 
6111 		if (txbd->tx_bd_flags & TX_BD_FLAGS_END)
6112 			kprintf(" END");
6113 
6114 		if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_LSO)
6115 			kprintf(" LSO");
6116 
6117 		if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_OPTION_WORD)
6118 			kprintf(" OPTION_WORD");
6119 
6120 		if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_FLAGS)
6121 			kprintf(" FLAGS");
6122 
6123 		if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_SNAP)
6124 			kprintf(" SNAP");
6125 
6126 		kprintf(" )\n");
6127 	}
6128 }
6129 
6130 
6131 /****************************************************************************/
6132 /* Prints out a rx_bd structure.                                            */
6133 /*                                                                          */
6134 /* Returns:                                                                 */
6135 /*   Nothing.                                                               */
6136 /****************************************************************************/
6137 static void
6138 bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd)
6139 {
6140 	struct ifnet *ifp = &sc->arpcom.ac_if;
6141 
6142 	if (idx > MAX_RX_BD) {
6143 		/* Index out of range. */
6144 		if_printf(ifp, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
6145 	} else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) {
6146 		/* TX Chain page pointer. */
6147 		if_printf(ifp, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, "
6148 			  "chain page pointer\n",
6149 			  idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo);
6150 	} else {
6151 		/* Normal tx_bd entry. */
6152 		if_printf(ifp, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, "
6153 			  "nbytes = 0x%08X, flags = 0x%08X\n",
6154 			  idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo,
6155 			  rxbd->rx_bd_len, rxbd->rx_bd_flags);
6156 	}
6157 }
6158 
6159 
6160 /****************************************************************************/
6161 /* Prints out a l2_fhdr structure.                                          */
6162 /*                                                                          */
6163 /* Returns:                                                                 */
6164 /*   Nothing.                                                               */
6165 /****************************************************************************/
6166 static void
6167 bce_dump_l2fhdr(struct bce_softc *sc, int idx, struct l2_fhdr *l2fhdr)
6168 {
6169 	if_printf(&sc->arpcom.ac_if, "l2_fhdr[0x%04X]: status = 0x%08X, "
6170 		  "pkt_len = 0x%04X, vlan = 0x%04x, "
6171 		  "ip_xsum = 0x%04X, tcp_udp_xsum = 0x%04X\n",
6172 		  idx, l2fhdr->l2_fhdr_status,
6173 		  l2fhdr->l2_fhdr_pkt_len, l2fhdr->l2_fhdr_vlan_tag,
6174 		  l2fhdr->l2_fhdr_ip_xsum, l2fhdr->l2_fhdr_tcp_udp_xsum);
6175 }
6176 
6177 
6178 /****************************************************************************/
6179 /* Prints out the tx chain.                                                 */
6180 /*                                                                          */
6181 /* Returns:                                                                 */
6182 /*   Nothing.                                                               */
6183 /****************************************************************************/
6184 static void
6185 bce_dump_tx_chain(struct bce_softc *sc, int tx_prod, int count)
6186 {
6187 	struct ifnet *ifp = &sc->arpcom.ac_if;
6188 	int i;
6189 
6190 	/* First some info about the tx_bd chain structure. */
6191 	if_printf(ifp,
6192 	"----------------------------"
6193 	"  tx_bd  chain  "
6194 	"----------------------------\n");
6195 
6196 	if_printf(ifp, "page size      = 0x%08X, "
6197 		  "tx chain pages        = 0x%08X\n",
6198 		  (uint32_t)BCM_PAGE_SIZE, (uint32_t)TX_PAGES);
6199 
6200 	if_printf(ifp, "tx_bd per page = 0x%08X, "
6201 		  "usable tx_bd per page = 0x%08X\n",
6202 		  (uint32_t)TOTAL_TX_BD_PER_PAGE,
6203 		  (uint32_t)USABLE_TX_BD_PER_PAGE);
6204 
6205 	if_printf(ifp, "total tx_bd    = 0x%08X\n", (uint32_t)TOTAL_TX_BD);
6206 
6207 	if_printf(ifp,
6208 	"----------------------------"
6209 	"  tx_bd data    "
6210 	"----------------------------\n");
6211 
6212 	/* Now print out the tx_bd's themselves. */
6213 	for (i = 0; i < count; i++) {
6214 		struct tx_bd *txbd;
6215 
6216 	 	txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)];
6217 		bce_dump_txbd(sc, tx_prod, txbd);
6218 		tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod));
6219 	}
6220 
6221 	if_printf(ifp,
6222 	"----------------------------"
6223 	"----------------"
6224 	"----------------------------\n");
6225 }
6226 
6227 
6228 /****************************************************************************/
6229 /* Prints out the rx chain.                                                 */
6230 /*                                                                          */
6231 /* Returns:                                                                 */
6232 /*   Nothing.                                                               */
6233 /****************************************************************************/
6234 static void
6235 bce_dump_rx_chain(struct bce_softc *sc, int rx_prod, int count)
6236 {
6237 	struct ifnet *ifp = &sc->arpcom.ac_if;
6238 	int i;
6239 
6240 	/* First some info about the tx_bd chain structure. */
6241 	if_printf(ifp,
6242 	"----------------------------"
6243 	"  rx_bd  chain  "
6244 	"----------------------------\n");
6245 
6246 	if_printf(ifp, "page size      = 0x%08X, "
6247 		  "rx chain pages        = 0x%08X\n",
6248 		  (uint32_t)BCM_PAGE_SIZE, (uint32_t)RX_PAGES);
6249 
6250 	if_printf(ifp, "rx_bd per page = 0x%08X, "
6251 		  "usable rx_bd per page = 0x%08X\n",
6252 		  (uint32_t)TOTAL_RX_BD_PER_PAGE,
6253 		  (uint32_t)USABLE_RX_BD_PER_PAGE);
6254 
6255 	if_printf(ifp, "total rx_bd    = 0x%08X\n", (uint32_t)TOTAL_RX_BD);
6256 
6257 	if_printf(ifp,
6258 	"----------------------------"
6259 	"   rx_bd data   "
6260 	"----------------------------\n");
6261 
6262 	/* Now print out the rx_bd's themselves. */
6263 	for (i = 0; i < count; i++) {
6264 		struct rx_bd *rxbd;
6265 
6266 		rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)];
6267 		bce_dump_rxbd(sc, rx_prod, rxbd);
6268 		rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod));
6269 	}
6270 
6271 	if_printf(ifp,
6272 	"----------------------------"
6273 	"----------------"
6274 	"----------------------------\n");
6275 }
6276 
6277 
6278 /****************************************************************************/
6279 /* Prints out the status block from host memory.                            */
6280 /*                                                                          */
6281 /* Returns:                                                                 */
6282 /*   Nothing.                                                               */
6283 /****************************************************************************/
6284 static void
6285 bce_dump_status_block(struct bce_softc *sc)
6286 {
6287 	struct status_block *sblk = sc->status_block;
6288 	struct ifnet *ifp = &sc->arpcom.ac_if;
6289 
6290 	if_printf(ifp,
6291 	"----------------------------"
6292 	"  Status Block  "
6293 	"----------------------------\n");
6294 
6295 	if_printf(ifp, "    0x%08X - attn_bits\n", sblk->status_attn_bits);
6296 
6297 	if_printf(ifp, "    0x%08X - attn_bits_ack\n",
6298 		  sblk->status_attn_bits_ack);
6299 
6300 	if_printf(ifp, "0x%04X(0x%04X) - rx_cons0\n",
6301 	    sblk->status_rx_quick_consumer_index0,
6302 	    (uint16_t)RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index0));
6303 
6304 	if_printf(ifp, "0x%04X(0x%04X) - tx_cons0\n",
6305 	    sblk->status_tx_quick_consumer_index0,
6306 	    (uint16_t)TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index0));
6307 
6308 	if_printf(ifp, "        0x%04X - status_idx\n", sblk->status_idx);
6309 
6310 	/* Theses indices are not used for normal L2 drivers. */
6311 	if (sblk->status_rx_quick_consumer_index1) {
6312 		if_printf(ifp, "0x%04X(0x%04X) - rx_cons1\n",
6313 		sblk->status_rx_quick_consumer_index1,
6314 		(uint16_t)RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index1));
6315 	}
6316 
6317 	if (sblk->status_tx_quick_consumer_index1) {
6318 		if_printf(ifp, "0x%04X(0x%04X) - tx_cons1\n",
6319 		sblk->status_tx_quick_consumer_index1,
6320 		(uint16_t)TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index1));
6321 	}
6322 
6323 	if (sblk->status_rx_quick_consumer_index2) {
6324 		if_printf(ifp, "0x%04X(0x%04X)- rx_cons2\n",
6325 		sblk->status_rx_quick_consumer_index2,
6326 		(uint16_t)RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index2));
6327 	}
6328 
6329 	if (sblk->status_tx_quick_consumer_index2) {
6330 		if_printf(ifp, "0x%04X(0x%04X) - tx_cons2\n",
6331 		sblk->status_tx_quick_consumer_index2,
6332 		(uint16_t)TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index2));
6333 	}
6334 
6335 	if (sblk->status_rx_quick_consumer_index3) {
6336 		if_printf(ifp, "0x%04X(0x%04X) - rx_cons3\n",
6337 		sblk->status_rx_quick_consumer_index3,
6338 		(uint16_t)RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index3));
6339 	}
6340 
6341 	if (sblk->status_tx_quick_consumer_index3) {
6342 		if_printf(ifp, "0x%04X(0x%04X) - tx_cons3\n",
6343 		sblk->status_tx_quick_consumer_index3,
6344 		(uint16_t)TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index3));
6345 	}
6346 
6347 	if (sblk->status_rx_quick_consumer_index4 ||
6348 	    sblk->status_rx_quick_consumer_index5) {
6349 		if_printf(ifp, "rx_cons4  = 0x%08X, rx_cons5      = 0x%08X\n",
6350 			  sblk->status_rx_quick_consumer_index4,
6351 			  sblk->status_rx_quick_consumer_index5);
6352 	}
6353 
6354 	if (sblk->status_rx_quick_consumer_index6 ||
6355 	    sblk->status_rx_quick_consumer_index7) {
6356 		if_printf(ifp, "rx_cons6  = 0x%08X, rx_cons7      = 0x%08X\n",
6357 			  sblk->status_rx_quick_consumer_index6,
6358 			  sblk->status_rx_quick_consumer_index7);
6359 	}
6360 
6361 	if (sblk->status_rx_quick_consumer_index8 ||
6362 	    sblk->status_rx_quick_consumer_index9) {
6363 		if_printf(ifp, "rx_cons8  = 0x%08X, rx_cons9      = 0x%08X\n",
6364 			  sblk->status_rx_quick_consumer_index8,
6365 			  sblk->status_rx_quick_consumer_index9);
6366 	}
6367 
6368 	if (sblk->status_rx_quick_consumer_index10 ||
6369 	    sblk->status_rx_quick_consumer_index11) {
6370 		if_printf(ifp, "rx_cons10 = 0x%08X, rx_cons11     = 0x%08X\n",
6371 			  sblk->status_rx_quick_consumer_index10,
6372 			  sblk->status_rx_quick_consumer_index11);
6373 	}
6374 
6375 	if (sblk->status_rx_quick_consumer_index12 ||
6376 	    sblk->status_rx_quick_consumer_index13) {
6377 		if_printf(ifp, "rx_cons12 = 0x%08X, rx_cons13     = 0x%08X\n",
6378 			  sblk->status_rx_quick_consumer_index12,
6379 			  sblk->status_rx_quick_consumer_index13);
6380 	}
6381 
6382 	if (sblk->status_rx_quick_consumer_index14 ||
6383 	    sblk->status_rx_quick_consumer_index15) {
6384 		if_printf(ifp, "rx_cons14 = 0x%08X, rx_cons15     = 0x%08X\n",
6385 			  sblk->status_rx_quick_consumer_index14,
6386 			  sblk->status_rx_quick_consumer_index15);
6387 	}
6388 
6389 	if (sblk->status_completion_producer_index ||
6390 	    sblk->status_cmd_consumer_index) {
6391 		if_printf(ifp, "com_prod  = 0x%08X, cmd_cons      = 0x%08X\n",
6392 			  sblk->status_completion_producer_index,
6393 			  sblk->status_cmd_consumer_index);
6394 	}
6395 
6396 	if_printf(ifp,
6397 	"----------------------------"
6398 	"----------------"
6399 	"----------------------------\n");
6400 }
6401 
6402 
6403 /****************************************************************************/
6404 /* Prints out the statistics block.                                         */
6405 /*                                                                          */
6406 /* Returns:                                                                 */
6407 /*   Nothing.                                                               */
6408 /****************************************************************************/
6409 static void
6410 bce_dump_stats_block(struct bce_softc *sc)
6411 {
6412 	struct statistics_block *sblk = sc->stats_block;
6413 	struct ifnet *ifp = &sc->arpcom.ac_if;
6414 
6415 	if_printf(ifp,
6416 	"---------------"
6417 	" Stats Block  (All Stats Not Shown Are 0) "
6418 	"---------------\n");
6419 
6420 	if (sblk->stat_IfHCInOctets_hi || sblk->stat_IfHCInOctets_lo) {
6421 		if_printf(ifp, "0x%08X:%08X : IfHcInOctets\n",
6422 			  sblk->stat_IfHCInOctets_hi,
6423 			  sblk->stat_IfHCInOctets_lo);
6424 	}
6425 
6426 	if (sblk->stat_IfHCInBadOctets_hi || sblk->stat_IfHCInBadOctets_lo) {
6427 		if_printf(ifp, "0x%08X:%08X : IfHcInBadOctets\n",
6428 			  sblk->stat_IfHCInBadOctets_hi,
6429 			  sblk->stat_IfHCInBadOctets_lo);
6430 	}
6431 
6432 	if (sblk->stat_IfHCOutOctets_hi || sblk->stat_IfHCOutOctets_lo) {
6433 		if_printf(ifp, "0x%08X:%08X : IfHcOutOctets\n",
6434 			  sblk->stat_IfHCOutOctets_hi,
6435 			  sblk->stat_IfHCOutOctets_lo);
6436 	}
6437 
6438 	if (sblk->stat_IfHCOutBadOctets_hi || sblk->stat_IfHCOutBadOctets_lo) {
6439 		if_printf(ifp, "0x%08X:%08X : IfHcOutBadOctets\n",
6440 			  sblk->stat_IfHCOutBadOctets_hi,
6441 			  sblk->stat_IfHCOutBadOctets_lo);
6442 	}
6443 
6444 	if (sblk->stat_IfHCInUcastPkts_hi || sblk->stat_IfHCInUcastPkts_lo) {
6445 		if_printf(ifp, "0x%08X:%08X : IfHcInUcastPkts\n",
6446 			  sblk->stat_IfHCInUcastPkts_hi,
6447 			  sblk->stat_IfHCInUcastPkts_lo);
6448 	}
6449 
6450 	if (sblk->stat_IfHCInBroadcastPkts_hi ||
6451 	    sblk->stat_IfHCInBroadcastPkts_lo) {
6452 		if_printf(ifp, "0x%08X:%08X : IfHcInBroadcastPkts\n",
6453 			  sblk->stat_IfHCInBroadcastPkts_hi,
6454 			  sblk->stat_IfHCInBroadcastPkts_lo);
6455 	}
6456 
6457 	if (sblk->stat_IfHCInMulticastPkts_hi ||
6458 	    sblk->stat_IfHCInMulticastPkts_lo) {
6459 		if_printf(ifp, "0x%08X:%08X : IfHcInMulticastPkts\n",
6460 			  sblk->stat_IfHCInMulticastPkts_hi,
6461 			  sblk->stat_IfHCInMulticastPkts_lo);
6462 	}
6463 
6464 	if (sblk->stat_IfHCOutUcastPkts_hi || sblk->stat_IfHCOutUcastPkts_lo) {
6465 		if_printf(ifp, "0x%08X:%08X : IfHcOutUcastPkts\n",
6466 			  sblk->stat_IfHCOutUcastPkts_hi,
6467 			  sblk->stat_IfHCOutUcastPkts_lo);
6468 	}
6469 
6470 	if (sblk->stat_IfHCOutBroadcastPkts_hi ||
6471 	    sblk->stat_IfHCOutBroadcastPkts_lo) {
6472 		if_printf(ifp, "0x%08X:%08X : IfHcOutBroadcastPkts\n",
6473 			  sblk->stat_IfHCOutBroadcastPkts_hi,
6474 			  sblk->stat_IfHCOutBroadcastPkts_lo);
6475 	}
6476 
6477 	if (sblk->stat_IfHCOutMulticastPkts_hi ||
6478 	    sblk->stat_IfHCOutMulticastPkts_lo) {
6479 		if_printf(ifp, "0x%08X:%08X : IfHcOutMulticastPkts\n",
6480 			  sblk->stat_IfHCOutMulticastPkts_hi,
6481 			  sblk->stat_IfHCOutMulticastPkts_lo);
6482 	}
6483 
6484 	if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors) {
6485 		if_printf(ifp, "         0x%08X : "
6486 		"emac_tx_stat_dot3statsinternalmactransmiterrors\n",
6487 		sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors);
6488 	}
6489 
6490 	if (sblk->stat_Dot3StatsCarrierSenseErrors) {
6491 		if_printf(ifp, "         0x%08X : "
6492 			  "Dot3StatsCarrierSenseErrors\n",
6493 			  sblk->stat_Dot3StatsCarrierSenseErrors);
6494 	}
6495 
6496 	if (sblk->stat_Dot3StatsFCSErrors) {
6497 		if_printf(ifp, "         0x%08X : Dot3StatsFCSErrors\n",
6498 			  sblk->stat_Dot3StatsFCSErrors);
6499 	}
6500 
6501 	if (sblk->stat_Dot3StatsAlignmentErrors) {
6502 		if_printf(ifp, "         0x%08X : Dot3StatsAlignmentErrors\n",
6503 			  sblk->stat_Dot3StatsAlignmentErrors);
6504 	}
6505 
6506 	if (sblk->stat_Dot3StatsSingleCollisionFrames) {
6507 		if_printf(ifp, "         0x%08X : "
6508 			  "Dot3StatsSingleCollisionFrames\n",
6509 			  sblk->stat_Dot3StatsSingleCollisionFrames);
6510 	}
6511 
6512 	if (sblk->stat_Dot3StatsMultipleCollisionFrames) {
6513 		if_printf(ifp, "         0x%08X : "
6514 			  "Dot3StatsMultipleCollisionFrames\n",
6515 			  sblk->stat_Dot3StatsMultipleCollisionFrames);
6516 	}
6517 
6518 	if (sblk->stat_Dot3StatsDeferredTransmissions) {
6519 		if_printf(ifp, "         0x%08X : "
6520 			  "Dot3StatsDeferredTransmissions\n",
6521 			  sblk->stat_Dot3StatsDeferredTransmissions);
6522 	}
6523 
6524 	if (sblk->stat_Dot3StatsExcessiveCollisions) {
6525 		if_printf(ifp, "         0x%08X : "
6526 			  "Dot3StatsExcessiveCollisions\n",
6527 			  sblk->stat_Dot3StatsExcessiveCollisions);
6528 	}
6529 
6530 	if (sblk->stat_Dot3StatsLateCollisions) {
6531 		if_printf(ifp, "         0x%08X : Dot3StatsLateCollisions\n",
6532 			  sblk->stat_Dot3StatsLateCollisions);
6533 	}
6534 
6535 	if (sblk->stat_EtherStatsCollisions) {
6536 		if_printf(ifp, "         0x%08X : EtherStatsCollisions\n",
6537 			  sblk->stat_EtherStatsCollisions);
6538 	}
6539 
6540 	if (sblk->stat_EtherStatsFragments)  {
6541 		if_printf(ifp, "         0x%08X : EtherStatsFragments\n",
6542 			  sblk->stat_EtherStatsFragments);
6543 	}
6544 
6545 	if (sblk->stat_EtherStatsJabbers) {
6546 		if_printf(ifp, "         0x%08X : EtherStatsJabbers\n",
6547 			  sblk->stat_EtherStatsJabbers);
6548 	}
6549 
6550 	if (sblk->stat_EtherStatsUndersizePkts) {
6551 		if_printf(ifp, "         0x%08X : EtherStatsUndersizePkts\n",
6552 			  sblk->stat_EtherStatsUndersizePkts);
6553 	}
6554 
6555 	if (sblk->stat_EtherStatsOverrsizePkts) {
6556 		if_printf(ifp, "         0x%08X : EtherStatsOverrsizePkts\n",
6557 			  sblk->stat_EtherStatsOverrsizePkts);
6558 	}
6559 
6560 	if (sblk->stat_EtherStatsPktsRx64Octets) {
6561 		if_printf(ifp, "         0x%08X : EtherStatsPktsRx64Octets\n",
6562 			  sblk->stat_EtherStatsPktsRx64Octets);
6563 	}
6564 
6565 	if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets) {
6566 		if_printf(ifp, "         0x%08X : "
6567 			  "EtherStatsPktsRx65Octetsto127Octets\n",
6568 			  sblk->stat_EtherStatsPktsRx65Octetsto127Octets);
6569 	}
6570 
6571 	if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets) {
6572 		if_printf(ifp, "         0x%08X : "
6573 			  "EtherStatsPktsRx128Octetsto255Octets\n",
6574 			  sblk->stat_EtherStatsPktsRx128Octetsto255Octets);
6575 	}
6576 
6577 	if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets) {
6578 		if_printf(ifp, "         0x%08X : "
6579 			  "EtherStatsPktsRx256Octetsto511Octets\n",
6580 			  sblk->stat_EtherStatsPktsRx256Octetsto511Octets);
6581 	}
6582 
6583 	if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets) {
6584 		if_printf(ifp, "         0x%08X : "
6585 			  "EtherStatsPktsRx512Octetsto1023Octets\n",
6586 			  sblk->stat_EtherStatsPktsRx512Octetsto1023Octets);
6587 	}
6588 
6589 	if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets) {
6590 		if_printf(ifp, "         0x%08X : "
6591 			  "EtherStatsPktsRx1024Octetsto1522Octets\n",
6592 			  sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets);
6593 	}
6594 
6595 	if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets) {
6596 		if_printf(ifp, "         0x%08X : "
6597 			  "EtherStatsPktsRx1523Octetsto9022Octets\n",
6598 			  sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets);
6599 	}
6600 
6601 	if (sblk->stat_EtherStatsPktsTx64Octets) {
6602 		if_printf(ifp, "         0x%08X : EtherStatsPktsTx64Octets\n",
6603 			  sblk->stat_EtherStatsPktsTx64Octets);
6604 	}
6605 
6606 	if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets) {
6607 		if_printf(ifp, "         0x%08X : "
6608 			  "EtherStatsPktsTx65Octetsto127Octets\n",
6609 			  sblk->stat_EtherStatsPktsTx65Octetsto127Octets);
6610 	}
6611 
6612 	if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets) {
6613 		if_printf(ifp, "         0x%08X : "
6614 			  "EtherStatsPktsTx128Octetsto255Octets\n",
6615 			  sblk->stat_EtherStatsPktsTx128Octetsto255Octets);
6616 	}
6617 
6618 	if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets) {
6619 		if_printf(ifp, "         0x%08X : "
6620 			  "EtherStatsPktsTx256Octetsto511Octets\n",
6621 			  sblk->stat_EtherStatsPktsTx256Octetsto511Octets);
6622 	}
6623 
6624 	if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets) {
6625 		if_printf(ifp, "         0x%08X : "
6626 			  "EtherStatsPktsTx512Octetsto1023Octets\n",
6627 			  sblk->stat_EtherStatsPktsTx512Octetsto1023Octets);
6628 	}
6629 
6630 	if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets) {
6631 		if_printf(ifp, "         0x%08X : "
6632 			  "EtherStatsPktsTx1024Octetsto1522Octets\n",
6633 			  sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets);
6634 	}
6635 
6636 	if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets) {
6637 		if_printf(ifp, "         0x%08X : "
6638 			  "EtherStatsPktsTx1523Octetsto9022Octets\n",
6639 			  sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets);
6640 	}
6641 
6642 	if (sblk->stat_XonPauseFramesReceived) {
6643 		if_printf(ifp, "         0x%08X : XonPauseFramesReceived\n",
6644 			  sblk->stat_XonPauseFramesReceived);
6645 	}
6646 
6647 	if (sblk->stat_XoffPauseFramesReceived) {
6648 		if_printf(ifp, "          0x%08X : XoffPauseFramesReceived\n",
6649 			  sblk->stat_XoffPauseFramesReceived);
6650 	}
6651 
6652 	if (sblk->stat_OutXonSent) {
6653 		if_printf(ifp, "         0x%08X : OutXoffSent\n",
6654 			  sblk->stat_OutXonSent);
6655 	}
6656 
6657 	if (sblk->stat_OutXoffSent) {
6658 		if_printf(ifp, "         0x%08X : OutXoffSent\n",
6659 			  sblk->stat_OutXoffSent);
6660 	}
6661 
6662 	if (sblk->stat_FlowControlDone) {
6663 		if_printf(ifp, "         0x%08X : FlowControlDone\n",
6664 			  sblk->stat_FlowControlDone);
6665 	}
6666 
6667 	if (sblk->stat_MacControlFramesReceived) {
6668 		if_printf(ifp, "         0x%08X : MacControlFramesReceived\n",
6669 			  sblk->stat_MacControlFramesReceived);
6670 	}
6671 
6672 	if (sblk->stat_XoffStateEntered) {
6673 		if_printf(ifp, "         0x%08X : XoffStateEntered\n",
6674 			  sblk->stat_XoffStateEntered);
6675 	}
6676 
6677 	if (sblk->stat_IfInFramesL2FilterDiscards) {
6678 		if_printf(ifp, "         0x%08X : IfInFramesL2FilterDiscards\n",			  sblk->stat_IfInFramesL2FilterDiscards);
6679 	}
6680 
6681 	if (sblk->stat_IfInRuleCheckerDiscards) {
6682 		if_printf(ifp, "         0x%08X : IfInRuleCheckerDiscards\n",
6683 			  sblk->stat_IfInRuleCheckerDiscards);
6684 	}
6685 
6686 	if (sblk->stat_IfInFTQDiscards) {
6687 		if_printf(ifp, "         0x%08X : IfInFTQDiscards\n",
6688 			  sblk->stat_IfInFTQDiscards);
6689 	}
6690 
6691 	if (sblk->stat_IfInMBUFDiscards) {
6692 		if_printf(ifp, "         0x%08X : IfInMBUFDiscards\n",
6693 			  sblk->stat_IfInMBUFDiscards);
6694 	}
6695 
6696 	if (sblk->stat_IfInRuleCheckerP4Hit) {
6697 		if_printf(ifp, "         0x%08X : IfInRuleCheckerP4Hit\n",
6698 			  sblk->stat_IfInRuleCheckerP4Hit);
6699 	}
6700 
6701 	if (sblk->stat_CatchupInRuleCheckerDiscards) {
6702 		if_printf(ifp, "         0x%08X : "
6703 			  "CatchupInRuleCheckerDiscards\n",
6704 			  sblk->stat_CatchupInRuleCheckerDiscards);
6705 	}
6706 
6707 	if (sblk->stat_CatchupInFTQDiscards) {
6708 		if_printf(ifp, "         0x%08X : CatchupInFTQDiscards\n",
6709 			  sblk->stat_CatchupInFTQDiscards);
6710 	}
6711 
6712 	if (sblk->stat_CatchupInMBUFDiscards) {
6713 		if_printf(ifp, "         0x%08X : CatchupInMBUFDiscards\n",
6714 			  sblk->stat_CatchupInMBUFDiscards);
6715 	}
6716 
6717 	if (sblk->stat_CatchupInRuleCheckerP4Hit) {
6718 		if_printf(ifp, "         0x%08X : CatchupInRuleCheckerP4Hit\n",
6719 			  sblk->stat_CatchupInRuleCheckerP4Hit);
6720 	}
6721 
6722 	if_printf(ifp,
6723 	"----------------------------"
6724 	"----------------"
6725 	"----------------------------\n");
6726 }
6727 
6728 
6729 /****************************************************************************/
6730 /* Prints out a summary of the driver state.                                */
6731 /*                                                                          */
6732 /* Returns:                                                                 */
6733 /*   Nothing.                                                               */
6734 /****************************************************************************/
6735 static void
6736 bce_dump_driver_state(struct bce_softc *sc)
6737 {
6738 	struct ifnet *ifp = &sc->arpcom.ac_if;
6739 	uint32_t val_hi, val_lo;
6740 
6741 	if_printf(ifp,
6742 	"-----------------------------"
6743 	" Driver State "
6744 	"-----------------------------\n");
6745 
6746 	val_hi = BCE_ADDR_HI(sc);
6747 	val_lo = BCE_ADDR_LO(sc);
6748 	if_printf(ifp, "0x%08X:%08X - (sc) driver softc structure "
6749 		  "virtual address\n", val_hi, val_lo);
6750 
6751 	val_hi = BCE_ADDR_HI(sc->status_block);
6752 	val_lo = BCE_ADDR_LO(sc->status_block);
6753 	if_printf(ifp, "0x%08X:%08X - (sc->status_block) status block "
6754 		  "virtual address\n", val_hi, val_lo);
6755 
6756 	val_hi = BCE_ADDR_HI(sc->stats_block);
6757 	val_lo = BCE_ADDR_LO(sc->stats_block);
6758 	if_printf(ifp, "0x%08X:%08X - (sc->stats_block) statistics block "
6759 		  "virtual address\n", val_hi, val_lo);
6760 
6761 	val_hi = BCE_ADDR_HI(sc->tx_bd_chain);
6762 	val_lo = BCE_ADDR_LO(sc->tx_bd_chain);
6763 	if_printf(ifp, "0x%08X:%08X - (sc->tx_bd_chain) tx_bd chain "
6764 		  "virtual adddress\n", val_hi, val_lo);
6765 
6766 	val_hi = BCE_ADDR_HI(sc->rx_bd_chain);
6767 	val_lo = BCE_ADDR_LO(sc->rx_bd_chain);
6768 	if_printf(ifp, "0x%08X:%08X - (sc->rx_bd_chain) rx_bd chain "
6769 		  "virtual address\n", val_hi, val_lo);
6770 
6771 	val_hi = BCE_ADDR_HI(sc->tx_mbuf_ptr);
6772 	val_lo = BCE_ADDR_LO(sc->tx_mbuf_ptr);
6773 	if_printf(ifp, "0x%08X:%08X - (sc->tx_mbuf_ptr) tx mbuf chain "
6774 		  "virtual address\n", val_hi, val_lo);
6775 
6776 	val_hi = BCE_ADDR_HI(sc->rx_mbuf_ptr);
6777 	val_lo = BCE_ADDR_LO(sc->rx_mbuf_ptr);
6778 	if_printf(ifp, "0x%08X:%08X - (sc->rx_mbuf_ptr) rx mbuf chain "
6779 		  "virtual address\n", val_hi, val_lo);
6780 
6781 	if_printf(ifp, "         0x%08X - (sc->interrupts_generated) "
6782 		  "h/w intrs\n", sc->interrupts_generated);
6783 
6784 	if_printf(ifp, "         0x%08X - (sc->rx_interrupts) "
6785 		  "rx interrupts handled\n", sc->rx_interrupts);
6786 
6787 	if_printf(ifp, "         0x%08X - (sc->tx_interrupts) "
6788 		  "tx interrupts handled\n", sc->tx_interrupts);
6789 
6790 	if_printf(ifp, "         0x%08X - (sc->last_status_idx) "
6791 		  "status block index\n", sc->last_status_idx);
6792 
6793 	if_printf(ifp, "     0x%04X(0x%04X) - (sc->tx_prod) "
6794 		  "tx producer index\n",
6795 		  sc->tx_prod, (uint16_t)TX_CHAIN_IDX(sc->tx_prod));
6796 
6797 	if_printf(ifp, "     0x%04X(0x%04X) - (sc->tx_cons) "
6798 		  "tx consumer index\n",
6799 		  sc->tx_cons, (uint16_t)TX_CHAIN_IDX(sc->tx_cons));
6800 
6801 	if_printf(ifp, "         0x%08X - (sc->tx_prod_bseq) "
6802 		  "tx producer bseq index\n", sc->tx_prod_bseq);
6803 
6804 	if_printf(ifp, "     0x%04X(0x%04X) - (sc->rx_prod) "
6805 		  "rx producer index\n",
6806 		  sc->rx_prod, (uint16_t)RX_CHAIN_IDX(sc->rx_prod));
6807 
6808 	if_printf(ifp, "     0x%04X(0x%04X) - (sc->rx_cons) "
6809 		  "rx consumer index\n",
6810 		  sc->rx_cons, (uint16_t)RX_CHAIN_IDX(sc->rx_cons));
6811 
6812 	if_printf(ifp, "         0x%08X - (sc->rx_prod_bseq) "
6813 		  "rx producer bseq index\n", sc->rx_prod_bseq);
6814 
6815 	if_printf(ifp, "         0x%08X - (sc->rx_mbuf_alloc) "
6816 		  "rx mbufs allocated\n", sc->rx_mbuf_alloc);
6817 
6818 	if_printf(ifp, "         0x%08X - (sc->free_rx_bd) "
6819 		  "free rx_bd's\n", sc->free_rx_bd);
6820 
6821 	if_printf(ifp, "0x%08X/%08X - (sc->rx_low_watermark) rx "
6822 		  "low watermark\n", sc->rx_low_watermark, sc->max_rx_bd);
6823 
6824 	if_printf(ifp, "         0x%08X - (sc->txmbuf_alloc) "
6825 		  "tx mbufs allocated\n", sc->tx_mbuf_alloc);
6826 
6827 	if_printf(ifp, "         0x%08X - (sc->rx_mbuf_alloc) "
6828 		  "rx mbufs allocated\n", sc->rx_mbuf_alloc);
6829 
6830 	if_printf(ifp, "         0x%08X - (sc->used_tx_bd) used tx_bd's\n",
6831 		  sc->used_tx_bd);
6832 
6833 	if_printf(ifp, "0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n",
6834 		  sc->tx_hi_watermark, sc->max_tx_bd);
6835 
6836 	if_printf(ifp, "         0x%08X - (sc->mbuf_alloc_failed) "
6837 		  "failed mbuf alloc\n", sc->mbuf_alloc_failed);
6838 
6839 	if_printf(ifp,
6840 	"----------------------------"
6841 	"----------------"
6842 	"----------------------------\n");
6843 }
6844 
6845 
6846 /****************************************************************************/
6847 /* Prints out the hardware state through a summary of important registers,  */
6848 /* followed by a complete register dump.                                    */
6849 /*                                                                          */
6850 /* Returns:                                                                 */
6851 /*   Nothing.                                                               */
6852 /****************************************************************************/
6853 static void
6854 bce_dump_hw_state(struct bce_softc *sc)
6855 {
6856 	struct ifnet *ifp = &sc->arpcom.ac_if;
6857 	uint32_t val1;
6858 	int i;
6859 
6860 	if_printf(ifp,
6861 	"----------------------------"
6862 	" Hardware State "
6863 	"----------------------------\n");
6864 
6865 	if_printf(ifp, "0x%08X - bootcode version\n", sc->bce_fw_ver);
6866 
6867 	val1 = REG_RD(sc, BCE_MISC_ENABLE_STATUS_BITS);
6868 	if_printf(ifp, "0x%08X - (0x%06X) misc_enable_status_bits\n",
6869 		  val1, BCE_MISC_ENABLE_STATUS_BITS);
6870 
6871 	val1 = REG_RD(sc, BCE_DMA_STATUS);
6872 	if_printf(ifp, "0x%08X - (0x%04X) dma_status\n", val1, BCE_DMA_STATUS);
6873 
6874 	val1 = REG_RD(sc, BCE_CTX_STATUS);
6875 	if_printf(ifp, "0x%08X - (0x%04X) ctx_status\n", val1, BCE_CTX_STATUS);
6876 
6877 	val1 = REG_RD(sc, BCE_EMAC_STATUS);
6878 	if_printf(ifp, "0x%08X - (0x%04X) emac_status\n",
6879 		  val1, BCE_EMAC_STATUS);
6880 
6881 	val1 = REG_RD(sc, BCE_RPM_STATUS);
6882 	if_printf(ifp, "0x%08X - (0x%04X) rpm_status\n", val1, BCE_RPM_STATUS);
6883 
6884 	val1 = REG_RD(sc, BCE_TBDR_STATUS);
6885 	if_printf(ifp, "0x%08X - (0x%04X) tbdr_status\n",
6886 		  val1, BCE_TBDR_STATUS);
6887 
6888 	val1 = REG_RD(sc, BCE_TDMA_STATUS);
6889 	if_printf(ifp, "0x%08X - (0x%04X) tdma_status\n",
6890 		  val1, BCE_TDMA_STATUS);
6891 
6892 	val1 = REG_RD(sc, BCE_HC_STATUS);
6893 	if_printf(ifp, "0x%08X - (0x%06X) hc_status\n", val1, BCE_HC_STATUS);
6894 
6895 	val1 = REG_RD_IND(sc, BCE_TXP_CPU_STATE);
6896 	if_printf(ifp, "0x%08X - (0x%06X) txp_cpu_state\n",
6897 		  val1, BCE_TXP_CPU_STATE);
6898 
6899 	val1 = REG_RD_IND(sc, BCE_TPAT_CPU_STATE);
6900 	if_printf(ifp, "0x%08X - (0x%06X) tpat_cpu_state\n",
6901 		  val1, BCE_TPAT_CPU_STATE);
6902 
6903 	val1 = REG_RD_IND(sc, BCE_RXP_CPU_STATE);
6904 	if_printf(ifp, "0x%08X - (0x%06X) rxp_cpu_state\n",
6905 		  val1, BCE_RXP_CPU_STATE);
6906 
6907 	val1 = REG_RD_IND(sc, BCE_COM_CPU_STATE);
6908 	if_printf(ifp, "0x%08X - (0x%06X) com_cpu_state\n",
6909 		  val1, BCE_COM_CPU_STATE);
6910 
6911 	val1 = REG_RD_IND(sc, BCE_MCP_CPU_STATE);
6912 	if_printf(ifp, "0x%08X - (0x%06X) mcp_cpu_state\n",
6913 		  val1, BCE_MCP_CPU_STATE);
6914 
6915 	val1 = REG_RD_IND(sc, BCE_CP_CPU_STATE);
6916 	if_printf(ifp, "0x%08X - (0x%06X) cp_cpu_state\n",
6917 		  val1, BCE_CP_CPU_STATE);
6918 
6919 	if_printf(ifp,
6920 	"----------------------------"
6921 	"----------------"
6922 	"----------------------------\n");
6923 
6924 	if_printf(ifp,
6925 	"----------------------------"
6926 	" Register  Dump "
6927 	"----------------------------\n");
6928 
6929 	for (i = 0x400; i < 0x8000; i += 0x10) {
6930 		if_printf(ifp, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", i,
6931 			  REG_RD(sc, i),
6932 			  REG_RD(sc, i + 0x4),
6933 			  REG_RD(sc, i + 0x8),
6934 			  REG_RD(sc, i + 0xc));
6935 	}
6936 
6937 	if_printf(ifp,
6938 	"----------------------------"
6939 	"----------------"
6940 	"----------------------------\n");
6941 }
6942 
6943 
6944 /****************************************************************************/
6945 /* Prints out the TXP state.                                                */
6946 /*                                                                          */
6947 /* Returns:                                                                 */
6948 /*   Nothing.                                                               */
6949 /****************************************************************************/
6950 static void
6951 bce_dump_txp_state(struct bce_softc *sc)
6952 {
6953 	struct ifnet *ifp = &sc->arpcom.ac_if;
6954 	uint32_t val1;
6955 	int i;
6956 
6957 	if_printf(ifp,
6958 	"----------------------------"
6959 	"   TXP  State   "
6960 	"----------------------------\n");
6961 
6962 	val1 = REG_RD_IND(sc, BCE_TXP_CPU_MODE);
6963 	if_printf(ifp, "0x%08X - (0x%06X) txp_cpu_mode\n",
6964 		  val1, BCE_TXP_CPU_MODE);
6965 
6966 	val1 = REG_RD_IND(sc, BCE_TXP_CPU_STATE);
6967 	if_printf(ifp, "0x%08X - (0x%06X) txp_cpu_state\n",
6968 		  val1, BCE_TXP_CPU_STATE);
6969 
6970 	val1 = REG_RD_IND(sc, BCE_TXP_CPU_EVENT_MASK);
6971 	if_printf(ifp, "0x%08X - (0x%06X) txp_cpu_event_mask\n",
6972 		  val1, BCE_TXP_CPU_EVENT_MASK);
6973 
6974 	if_printf(ifp,
6975 	"----------------------------"
6976 	" Register  Dump "
6977 	"----------------------------\n");
6978 
6979 	for (i = BCE_TXP_CPU_MODE; i < 0x68000; i += 0x10) {
6980 		/* Skip the big blank spaces */
6981 		if (i < 0x454000 && i > 0x5ffff) {
6982 			if_printf(ifp, "0x%04X: "
6983 				  "0x%08X 0x%08X 0x%08X 0x%08X\n", i,
6984 				  REG_RD_IND(sc, i),
6985 				  REG_RD_IND(sc, i + 0x4),
6986 				  REG_RD_IND(sc, i + 0x8),
6987 				  REG_RD_IND(sc, i + 0xc));
6988 		}
6989 	}
6990 
6991 	if_printf(ifp,
6992 	"----------------------------"
6993 	"----------------"
6994 	"----------------------------\n");
6995 }
6996 
6997 
6998 /****************************************************************************/
6999 /* Prints out the RXP state.                                                */
7000 /*                                                                          */
7001 /* Returns:                                                                 */
7002 /*   Nothing.                                                               */
7003 /****************************************************************************/
7004 static void
7005 bce_dump_rxp_state(struct bce_softc *sc)
7006 {
7007 	struct ifnet *ifp = &sc->arpcom.ac_if;
7008 	uint32_t val1;
7009 	int i;
7010 
7011 	if_printf(ifp,
7012 	"----------------------------"
7013 	"   RXP  State   "
7014 	"----------------------------\n");
7015 
7016 	val1 = REG_RD_IND(sc, BCE_RXP_CPU_MODE);
7017 	if_printf(ifp, "0x%08X - (0x%06X) rxp_cpu_mode\n",
7018 		  val1, BCE_RXP_CPU_MODE);
7019 
7020 	val1 = REG_RD_IND(sc, BCE_RXP_CPU_STATE);
7021 	if_printf(ifp, "0x%08X - (0x%06X) rxp_cpu_state\n",
7022 		  val1, BCE_RXP_CPU_STATE);
7023 
7024 	val1 = REG_RD_IND(sc, BCE_RXP_CPU_EVENT_MASK);
7025 	if_printf(ifp, "0x%08X - (0x%06X) rxp_cpu_event_mask\n",
7026 		  val1, BCE_RXP_CPU_EVENT_MASK);
7027 
7028 	if_printf(ifp,
7029 	"----------------------------"
7030 	" Register  Dump "
7031 	"----------------------------\n");
7032 
7033 	for (i = BCE_RXP_CPU_MODE; i < 0xe8fff; i += 0x10) {
7034 		/* Skip the big blank sapces */
7035 		if (i < 0xc5400 && i > 0xdffff) {
7036 			if_printf(ifp, "0x%04X: "
7037 				  "0x%08X 0x%08X 0x%08X 0x%08X\n", i,
7038 				  REG_RD_IND(sc, i),
7039 				  REG_RD_IND(sc, i + 0x4),
7040 				  REG_RD_IND(sc, i + 0x8),
7041 				  REG_RD_IND(sc, i + 0xc));
7042 		}
7043 	}
7044 
7045 	if_printf(ifp,
7046 	"----------------------------"
7047 	"----------------"
7048 	"----------------------------\n");
7049 }
7050 
7051 
7052 /****************************************************************************/
7053 /* Prints out the TPAT state.                                               */
7054 /*                                                                          */
7055 /* Returns:                                                                 */
7056 /*   Nothing.                                                               */
7057 /****************************************************************************/
7058 static void
7059 bce_dump_tpat_state(struct bce_softc *sc)
7060 {
7061 	struct ifnet *ifp = &sc->arpcom.ac_if;
7062 	uint32_t val1;
7063 	int i;
7064 
7065 	if_printf(ifp,
7066 	"----------------------------"
7067 	"   TPAT State   "
7068 	"----------------------------\n");
7069 
7070 	val1 = REG_RD_IND(sc, BCE_TPAT_CPU_MODE);
7071 	if_printf(ifp, "0x%08X - (0x%06X) tpat_cpu_mode\n",
7072 		  val1, BCE_TPAT_CPU_MODE);
7073 
7074 	val1 = REG_RD_IND(sc, BCE_TPAT_CPU_STATE);
7075 	if_printf(ifp, "0x%08X - (0x%06X) tpat_cpu_state\n",
7076 		  val1, BCE_TPAT_CPU_STATE);
7077 
7078 	val1 = REG_RD_IND(sc, BCE_TPAT_CPU_EVENT_MASK);
7079 	if_printf(ifp, "0x%08X - (0x%06X) tpat_cpu_event_mask\n",
7080 		  val1, BCE_TPAT_CPU_EVENT_MASK);
7081 
7082 	if_printf(ifp,
7083 	"----------------------------"
7084 	" Register  Dump "
7085 	"----------------------------\n");
7086 
7087 	for (i = BCE_TPAT_CPU_MODE; i < 0xa3fff; i += 0x10) {
7088 		/* Skip the big blank spaces */
7089 		if (i < 0x854000 && i > 0x9ffff) {
7090 			if_printf(ifp, "0x%04X: "
7091 				  "0x%08X 0x%08X 0x%08X 0x%08X\n", i,
7092 				  REG_RD_IND(sc, i),
7093 				  REG_RD_IND(sc, i + 0x4),
7094 				  REG_RD_IND(sc, i + 0x8),
7095 				  REG_RD_IND(sc, i + 0xc));
7096 		}
7097 	}
7098 
7099 	if_printf(ifp,
7100 	"----------------------------"
7101 	"----------------"
7102 	"----------------------------\n");
7103 }
7104 
7105 
7106 /****************************************************************************/
7107 /* Prints out the driver state and then enters the debugger.                */
7108 /*                                                                          */
7109 /* Returns:                                                                 */
7110 /*   Nothing.                                                               */
7111 /****************************************************************************/
7112 static void
7113 bce_breakpoint(struct bce_softc *sc)
7114 {
7115 #if 0
7116 	bce_freeze_controller(sc);
7117 #endif
7118 
7119 	bce_dump_driver_state(sc);
7120 	bce_dump_status_block(sc);
7121 	bce_dump_tx_chain(sc, 0, TOTAL_TX_BD);
7122 	bce_dump_hw_state(sc);
7123 	bce_dump_txp_state(sc);
7124 
7125 #if 0
7126 	bce_unfreeze_controller(sc);
7127 #endif
7128 
7129 	/* Call the debugger. */
7130 	breakpoint();
7131 }
7132 
7133 #endif	/* BCE_DEBUG */
7134