xref: /dragonfly/sys/dev/netif/bce/if_bce.c (revision a8ca8ac6)
1 /*-
2  * Copyright (c) 2006-2007 Broadcom Corporation
3  *	David Christensen <davidch@broadcom.com>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the name of Broadcom Corporation nor the name of its contributors
15  *    may be used to endorse or promote products derived from this software
16  *    without specific prior written consent.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
22  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  *
30  * $FreeBSD: src/sys/dev/bce/if_bce.c,v 1.31 2007/05/16 23:34:11 davidch Exp $
31  * $DragonFly: src/sys/dev/netif/bce/if_bce.c,v 1.21 2008/11/19 13:57:49 sephe Exp $
32  */
33 
34 /*
35  * The following controllers are supported by this driver:
36  *   BCM5706C A2, A3
37  *   BCM5708C B1, B2
38  *
39  * The following controllers are not supported by this driver:
40  *   BCM5706C A0, A1
41  *   BCM5706S A0, A1, A2, A3
42  *   BCM5708C A0, B0
43  *   BCM5708S A0, B0, B1, B2
44  */
45 
46 #include "opt_bce.h"
47 #include "opt_polling.h"
48 
49 #include <sys/param.h>
50 #include <sys/bus.h>
51 #include <sys/endian.h>
52 #include <sys/kernel.h>
53 #include <sys/interrupt.h>
54 #include <sys/mbuf.h>
55 #include <sys/malloc.h>
56 #include <sys/queue.h>
57 #ifdef BCE_DEBUG
58 #include <sys/random.h>
59 #endif
60 #include <sys/rman.h>
61 #include <sys/serialize.h>
62 #include <sys/socket.h>
63 #include <sys/sockio.h>
64 #include <sys/sysctl.h>
65 
66 #include <net/bpf.h>
67 #include <net/ethernet.h>
68 #include <net/if.h>
69 #include <net/if_arp.h>
70 #include <net/if_dl.h>
71 #include <net/if_media.h>
72 #include <net/if_types.h>
73 #include <net/ifq_var.h>
74 #include <net/vlan/if_vlan_var.h>
75 #include <net/vlan/if_vlan_ether.h>
76 
77 #include <dev/netif/mii_layer/mii.h>
78 #include <dev/netif/mii_layer/miivar.h>
79 
80 #include <bus/pci/pcireg.h>
81 #include <bus/pci/pcivar.h>
82 
83 #include "miibus_if.h"
84 
85 #include <dev/netif/bce/if_bcereg.h>
86 #include <dev/netif/bce/if_bcefw.h>
87 
88 /****************************************************************************/
89 /* BCE Debug Options                                                        */
90 /****************************************************************************/
91 #ifdef BCE_DEBUG
92 
93 static uint32_t	bce_debug = BCE_WARN;
94 
95 /*
96  *          0 = Never
97  *          1 = 1 in 2,147,483,648
98  *        256 = 1 in     8,388,608
99  *       2048 = 1 in     1,048,576
100  *      65536 = 1 in        32,768
101  *    1048576 = 1 in         2,048
102  *  268435456 = 1 in             8
103  *  536870912 = 1 in             4
104  * 1073741824 = 1 in             2
105  *
106  * bce_debug_l2fhdr_status_check:
107  *     How often the l2_fhdr frame error check will fail.
108  *
109  * bce_debug_unexpected_attention:
110  *     How often the unexpected attention check will fail.
111  *
112  * bce_debug_mbuf_allocation_failure:
113  *     How often to simulate an mbuf allocation failure.
114  *
115  * bce_debug_dma_map_addr_failure:
116  *     How often to simulate a DMA mapping failure.
117  *
118  * bce_debug_bootcode_running_failure:
119  *     How often to simulate a bootcode failure.
120  */
121 static int	bce_debug_l2fhdr_status_check = 0;
122 static int	bce_debug_unexpected_attention = 0;
123 static int	bce_debug_mbuf_allocation_failure = 0;
124 static int	bce_debug_dma_map_addr_failure = 0;
125 static int	bce_debug_bootcode_running_failure = 0;
126 
127 #endif	/* BCE_DEBUG */
128 
129 
130 /****************************************************************************/
131 /* PCI Device ID Table                                                      */
132 /*                                                                          */
133 /* Used by bce_probe() to identify the devices supported by this driver.    */
134 /****************************************************************************/
135 #define BCE_DEVDESC_MAX		64
136 
137 static struct bce_type bce_devs[] = {
138 	/* BCM5706C Controllers and OEM boards. */
139 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3101,
140 		"HP NC370T Multifunction Gigabit Server Adapter" },
141 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3106,
142 		"HP NC370i Multifunction Gigabit Server Adapter" },
143 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  PCI_ANY_ID,  PCI_ANY_ID,
144 		"Broadcom NetXtreme II BCM5706 1000Base-T" },
145 
146 	/* BCM5706S controllers and OEM boards. */
147 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102,
148 		"HP NC370F Multifunction Gigabit Server Adapter" },
149 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID,  PCI_ANY_ID,
150 		"Broadcom NetXtreme II BCM5706 1000Base-SX" },
151 
152 	/* BCM5708C controllers and OEM boards. */
153 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  PCI_ANY_ID,  PCI_ANY_ID,
154 		"Broadcom NetXtreme II BCM5708 1000Base-T" },
155 
156 	/* BCM5708S controllers and OEM boards. */
157 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  PCI_ANY_ID,  PCI_ANY_ID,
158 		"Broadcom NetXtreme II BCM5708S 1000Base-T" },
159 	{ 0, 0, 0, 0, NULL }
160 };
161 
162 
163 /****************************************************************************/
164 /* Supported Flash NVRAM device data.                                       */
165 /****************************************************************************/
166 static const struct flash_spec flash_table[] =
167 {
168 	/* Slow EEPROM */
169 	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
170 	 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
171 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
172 	 "EEPROM - slow"},
173 	/* Expansion entry 0001 */
174 	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
175 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
176 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
177 	 "Entry 0001"},
178 	/* Saifun SA25F010 (non-buffered flash) */
179 	/* strap, cfg1, & write1 need updates */
180 	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
181 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
182 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
183 	 "Non-buffered flash (128kB)"},
184 	/* Saifun SA25F020 (non-buffered flash) */
185 	/* strap, cfg1, & write1 need updates */
186 	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
187 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
188 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
189 	 "Non-buffered flash (256kB)"},
190 	/* Expansion entry 0100 */
191 	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
192 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
193 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
194 	 "Entry 0100"},
195 	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
196 	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
197 	 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
198 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
199 	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
200 	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
201 	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
202 	 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
203 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
204 	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
205 	/* Saifun SA25F005 (non-buffered flash) */
206 	/* strap, cfg1, & write1 need updates */
207 	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
208 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
209 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
210 	 "Non-buffered flash (64kB)"},
211 	/* Fast EEPROM */
212 	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
213 	 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
214 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
215 	 "EEPROM - fast"},
216 	/* Expansion entry 1001 */
217 	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
218 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220 	 "Entry 1001"},
221 	/* Expansion entry 1010 */
222 	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
223 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
225 	 "Entry 1010"},
226 	/* ATMEL AT45DB011B (buffered flash) */
227 	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
228 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
230 	 "Buffered flash (128kB)"},
231 	/* Expansion entry 1100 */
232 	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
233 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
234 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
235 	 "Entry 1100"},
236 	/* Expansion entry 1101 */
237 	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
238 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
239 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
240 	 "Entry 1101"},
241 	/* Ateml Expansion entry 1110 */
242 	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
243 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
244 	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
245 	 "Entry 1110 (Atmel)"},
246 	/* ATMEL AT45DB021B (buffered flash) */
247 	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
248 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
249 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
250 	 "Buffered flash (256kB)"},
251 };
252 
253 
254 /****************************************************************************/
255 /* DragonFly device entry points.                                           */
256 /****************************************************************************/
257 static int	bce_probe(device_t);
258 static int	bce_attach(device_t);
259 static int	bce_detach(device_t);
260 static void	bce_shutdown(device_t);
261 
262 /****************************************************************************/
263 /* BCE Debug Data Structure Dump Routines                                   */
264 /****************************************************************************/
265 #ifdef BCE_DEBUG
266 static void	bce_dump_mbuf(struct bce_softc *, struct mbuf *);
267 static void	bce_dump_tx_mbuf_chain(struct bce_softc *, int, int);
268 static void	bce_dump_rx_mbuf_chain(struct bce_softc *, int, int);
269 static void	bce_dump_txbd(struct bce_softc *, int, struct tx_bd *);
270 static void	bce_dump_rxbd(struct bce_softc *, int, struct rx_bd *);
271 static void	bce_dump_l2fhdr(struct bce_softc *, int,
272 				struct l2_fhdr *) __unused;
273 static void	bce_dump_tx_chain(struct bce_softc *, int, int);
274 static void	bce_dump_rx_chain(struct bce_softc *, int, int);
275 static void	bce_dump_status_block(struct bce_softc *);
276 static void	bce_dump_driver_state(struct bce_softc *);
277 static void	bce_dump_stats_block(struct bce_softc *) __unused;
278 static void	bce_dump_hw_state(struct bce_softc *);
279 static void	bce_dump_txp_state(struct bce_softc *);
280 static void	bce_dump_rxp_state(struct bce_softc *) __unused;
281 static void	bce_dump_tpat_state(struct bce_softc *) __unused;
282 static void	bce_freeze_controller(struct bce_softc *) __unused;
283 static void	bce_unfreeze_controller(struct bce_softc *) __unused;
284 static void	bce_breakpoint(struct bce_softc *);
285 #endif	/* BCE_DEBUG */
286 
287 
288 /****************************************************************************/
289 /* BCE Register/Memory Access Routines                                      */
290 /****************************************************************************/
291 static uint32_t	bce_reg_rd_ind(struct bce_softc *, uint32_t);
292 static void	bce_reg_wr_ind(struct bce_softc *, uint32_t, uint32_t);
293 static void	bce_ctx_wr(struct bce_softc *, uint32_t, uint32_t, uint32_t);
294 static int	bce_miibus_read_reg(device_t, int, int);
295 static int	bce_miibus_write_reg(device_t, int, int, int);
296 static void	bce_miibus_statchg(device_t);
297 
298 
299 /****************************************************************************/
300 /* BCE NVRAM Access Routines                                                */
301 /****************************************************************************/
302 static int	bce_acquire_nvram_lock(struct bce_softc *);
303 static int	bce_release_nvram_lock(struct bce_softc *);
304 static void	bce_enable_nvram_access(struct bce_softc *);
305 static void	bce_disable_nvram_access(struct bce_softc *);
306 static int	bce_nvram_read_dword(struct bce_softc *, uint32_t, uint8_t *,
307 				     uint32_t);
308 static int	bce_init_nvram(struct bce_softc *);
309 static int	bce_nvram_read(struct bce_softc *, uint32_t, uint8_t *, int);
310 static int	bce_nvram_test(struct bce_softc *);
311 #ifdef BCE_NVRAM_WRITE_SUPPORT
312 static int	bce_enable_nvram_write(struct bce_softc *);
313 static void	bce_disable_nvram_write(struct bce_softc *);
314 static int	bce_nvram_erase_page(struct bce_softc *, uint32_t);
315 static int	bce_nvram_write_dword(struct bce_softc *, uint32_t, uint8_t *,
316 				      uint32_t);
317 static int	bce_nvram_write(struct bce_softc *, uint32_t, uint8_t *,
318 				int) __unused;
319 #endif
320 
321 /****************************************************************************/
322 /* BCE DMA Allocate/Free Routines                                           */
323 /****************************************************************************/
324 static int	bce_dma_alloc(struct bce_softc *);
325 static void	bce_dma_free(struct bce_softc *);
326 static void	bce_dma_map_addr(void *, bus_dma_segment_t *, int, int);
327 
328 /****************************************************************************/
329 /* BCE Firmware Synchronization and Load                                    */
330 /****************************************************************************/
331 static int	bce_fw_sync(struct bce_softc *, uint32_t);
332 static void	bce_load_rv2p_fw(struct bce_softc *, uint32_t *,
333 				 uint32_t, uint32_t);
334 static void	bce_load_cpu_fw(struct bce_softc *, struct cpu_reg *,
335 				struct fw_info *);
336 static void	bce_init_cpus(struct bce_softc *);
337 
338 static void	bce_stop(struct bce_softc *);
339 static int	bce_reset(struct bce_softc *, uint32_t);
340 static int	bce_chipinit(struct bce_softc *);
341 static int	bce_blockinit(struct bce_softc *);
342 static int	bce_newbuf_std(struct bce_softc *, uint16_t *, uint16_t *,
343 			       uint32_t *, int);
344 static void	bce_setup_rxdesc_std(struct bce_softc *, uint16_t, uint32_t *);
345 
346 static int	bce_init_tx_chain(struct bce_softc *);
347 static int	bce_init_rx_chain(struct bce_softc *);
348 static void	bce_free_rx_chain(struct bce_softc *);
349 static void	bce_free_tx_chain(struct bce_softc *);
350 
351 static int	bce_encap(struct bce_softc *, struct mbuf **);
352 static void	bce_start(struct ifnet *);
353 static int	bce_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
354 static void	bce_watchdog(struct ifnet *);
355 static int	bce_ifmedia_upd(struct ifnet *);
356 static void	bce_ifmedia_sts(struct ifnet *, struct ifmediareq *);
357 static void	bce_init(void *);
358 static void	bce_mgmt_init(struct bce_softc *);
359 
360 static void	bce_init_ctx(struct bce_softc *);
361 static void	bce_get_mac_addr(struct bce_softc *);
362 static void	bce_set_mac_addr(struct bce_softc *);
363 static void	bce_phy_intr(struct bce_softc *);
364 static void	bce_rx_intr(struct bce_softc *, int);
365 static void	bce_tx_intr(struct bce_softc *);
366 static void	bce_disable_intr(struct bce_softc *);
367 static void	bce_enable_intr(struct bce_softc *);
368 
369 #ifdef DEVICE_POLLING
370 static void	bce_poll(struct ifnet *, enum poll_cmd, int);
371 #endif
372 static void	bce_intr(void *);
373 static void	bce_set_rx_mode(struct bce_softc *);
374 static void	bce_stats_update(struct bce_softc *);
375 static void	bce_tick(void *);
376 static void	bce_tick_serialized(struct bce_softc *);
377 static void	bce_add_sysctls(struct bce_softc *);
378 
379 static void	bce_coal_change(struct bce_softc *);
380 static int	bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS);
381 static int	bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS);
382 static int	bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS);
383 static int	bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS);
384 static int	bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS);
385 static int	bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS);
386 static int	bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS);
387 static int	bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS);
388 static int	bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS,
389 				       uint32_t *, uint32_t);
390 
391 /*
392  * NOTE:
393  * Don't set bce_tx_ticks_int/bce_tx_ticks to 1023.  Linux's bnx2
394  * takes 1023 as the TX ticks limit.  However, using 1023 will
395  * cause 5708(B2) to generate extra interrupts (~2000/s) even when
396  * there is _no_ network activity on the NIC.
397  */
398 static uint32_t	bce_tx_bds_int = 255;		/* bcm: 20 */
399 static uint32_t	bce_tx_bds = 255;		/* bcm: 20 */
400 static uint32_t	bce_tx_ticks_int = 1022;	/* bcm: 80 */
401 static uint32_t	bce_tx_ticks = 1022;		/* bcm: 80 */
402 static uint32_t	bce_rx_bds_int = 128;		/* bcm: 6 */
403 static uint32_t	bce_rx_bds = 128;		/* bcm: 6 */
404 static uint32_t	bce_rx_ticks_int = 125;		/* bcm: 18 */
405 static uint32_t	bce_rx_ticks = 125;		/* bcm: 18 */
406 
407 TUNABLE_INT("hw.bce.tx_bds_int", &bce_tx_bds_int);
408 TUNABLE_INT("hw.bce.tx_bds", &bce_tx_bds);
409 TUNABLE_INT("hw.bce.tx_ticks_int", &bce_tx_ticks_int);
410 TUNABLE_INT("hw.bce.tx_ticks", &bce_tx_ticks);
411 TUNABLE_INT("hw.bce.rx_bds_int", &bce_rx_bds_int);
412 TUNABLE_INT("hw.bce.rx_bds", &bce_rx_bds);
413 TUNABLE_INT("hw.bce.rx_ticks_int", &bce_rx_ticks_int);
414 TUNABLE_INT("hw.bce.rx_ticks", &bce_rx_ticks);
415 
416 /****************************************************************************/
417 /* DragonFly device dispatch table.                                         */
418 /****************************************************************************/
419 static device_method_t bce_methods[] = {
420 	/* Device interface */
421 	DEVMETHOD(device_probe,		bce_probe),
422 	DEVMETHOD(device_attach,	bce_attach),
423 	DEVMETHOD(device_detach,	bce_detach),
424 	DEVMETHOD(device_shutdown,	bce_shutdown),
425 
426 	/* bus interface */
427 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
428 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
429 
430 	/* MII interface */
431 	DEVMETHOD(miibus_readreg,	bce_miibus_read_reg),
432 	DEVMETHOD(miibus_writereg,	bce_miibus_write_reg),
433 	DEVMETHOD(miibus_statchg,	bce_miibus_statchg),
434 
435 	{ 0, 0 }
436 };
437 
438 static driver_t bce_driver = {
439 	"bce",
440 	bce_methods,
441 	sizeof(struct bce_softc)
442 };
443 
444 static devclass_t bce_devclass;
445 
446 
447 DECLARE_DUMMY_MODULE(if_xl);
448 MODULE_DEPEND(bce, miibus, 1, 1, 1);
449 DRIVER_MODULE(if_bce, pci, bce_driver, bce_devclass, 0, 0);
450 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, 0, 0);
451 
452 
453 /****************************************************************************/
454 /* Device probe function.                                                   */
455 /*                                                                          */
456 /* Compares the device to the driver's list of supported devices and        */
457 /* reports back to the OS whether this is the right driver for the device.  */
458 /*                                                                          */
459 /* Returns:                                                                 */
460 /*   BUS_PROBE_DEFAULT on success, positive value on failure.               */
461 /****************************************************************************/
462 static int
463 bce_probe(device_t dev)
464 {
465 	struct bce_type *t;
466 	uint16_t vid, did, svid, sdid;
467 
468 	/* Get the data for the device to be probed. */
469 	vid  = pci_get_vendor(dev);
470 	did  = pci_get_device(dev);
471 	svid = pci_get_subvendor(dev);
472 	sdid = pci_get_subdevice(dev);
473 
474 	/* Look through the list of known devices for a match. */
475 	for (t = bce_devs; t->bce_name != NULL; ++t) {
476 		if (vid == t->bce_vid && did == t->bce_did &&
477 		    (svid == t->bce_svid || t->bce_svid == PCI_ANY_ID) &&
478 		    (sdid == t->bce_sdid || t->bce_sdid == PCI_ANY_ID)) {
479 		    	uint32_t revid = pci_read_config(dev, PCIR_REVID, 4);
480 			char *descbuf;
481 
482 			descbuf = kmalloc(BCE_DEVDESC_MAX, M_TEMP, M_WAITOK);
483 
484 			/* Print out the device identity. */
485 			ksnprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)",
486 				  t->bce_name,
487 				  ((revid & 0xf0) >> 4) + 'A', revid & 0xf);
488 
489 			device_set_desc_copy(dev, descbuf);
490 			kfree(descbuf, M_TEMP);
491 			return 0;
492 		}
493 	}
494 	return ENXIO;
495 }
496 
497 
498 /****************************************************************************/
499 /* Device attach function.                                                  */
500 /*                                                                          */
501 /* Allocates device resources, performs secondary chip identification,      */
502 /* resets and initializes the hardware, and initializes driver instance     */
503 /* variables.                                                               */
504 /*                                                                          */
505 /* Returns:                                                                 */
506 /*   0 on success, positive value on failure.                               */
507 /****************************************************************************/
508 static int
509 bce_attach(device_t dev)
510 {
511 	struct bce_softc *sc = device_get_softc(dev);
512 	struct ifnet *ifp = &sc->arpcom.ac_if;
513 	uint32_t val;
514 	int rid, rc = 0;
515 #ifdef notyet
516 	int count;
517 #endif
518 
519 	sc->bce_dev = dev;
520 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
521 
522 	pci_enable_busmaster(dev);
523 
524 	/* Allocate PCI memory resources. */
525 	rid = PCIR_BAR(0);
526 	sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
527 						 RF_ACTIVE | PCI_RF_DENSE);
528 	if (sc->bce_res_mem == NULL) {
529 		device_printf(dev, "PCI memory allocation failed\n");
530 		return ENXIO;
531 	}
532 	sc->bce_btag = rman_get_bustag(sc->bce_res_mem);
533 	sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem);
534 
535 	/* Allocate PCI IRQ resources. */
536 #ifdef notyet
537 	count = pci_msi_count(dev);
538 	if (count == 1 && pci_alloc_msi(dev, &count) == 0) {
539 		rid = 1;
540 		sc->bce_flags |= BCE_USING_MSI_FLAG;
541 	} else
542 #endif
543 	rid = 0;
544 	sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
545 						 RF_SHAREABLE | RF_ACTIVE);
546 	if (sc->bce_res_irq == NULL) {
547 		device_printf(dev, "PCI map interrupt failed\n");
548 		rc = ENXIO;
549 		goto fail;
550 	}
551 
552 	/*
553 	 * Configure byte swap and enable indirect register access.
554 	 * Rely on CPU to do target byte swapping on big endian systems.
555 	 * Access to registers outside of PCI configurtion space are not
556 	 * valid until this is done.
557 	 */
558 	pci_write_config(dev, BCE_PCICFG_MISC_CONFIG,
559 			 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
560 			 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4);
561 
562 	/* Save ASIC revsion info. */
563 	sc->bce_chipid =  REG_RD(sc, BCE_MISC_ID);
564 
565 	/* Weed out any non-production controller revisions. */
566 	switch(BCE_CHIP_ID(sc)) {
567 	case BCE_CHIP_ID_5706_A0:
568 	case BCE_CHIP_ID_5706_A1:
569 	case BCE_CHIP_ID_5708_A0:
570 	case BCE_CHIP_ID_5708_B0:
571 		device_printf(dev, "Unsupported chip id 0x%08x!\n",
572 			      BCE_CHIP_ID(sc));
573 		rc = ENODEV;
574 		goto fail;
575 	}
576 
577 	/*
578 	 * The embedded PCIe to PCI-X bridge (EPB)
579 	 * in the 5708 cannot address memory above
580 	 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043).
581 	 */
582 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)
583 		sc->max_bus_addr = BCE_BUS_SPACE_MAXADDR;
584 	else
585 		sc->max_bus_addr = BUS_SPACE_MAXADDR;
586 
587 	/*
588 	 * Find the base address for shared memory access.
589 	 * Newer versions of bootcode use a signature and offset
590 	 * while older versions use a fixed address.
591 	 */
592 	val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE);
593 	if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == BCE_SHM_HDR_SIGNATURE_SIG)
594 		sc->bce_shmem_base = REG_RD_IND(sc, BCE_SHM_HDR_ADDR_0);
595 	else
596 		sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE;
597 
598 	DBPRINT(sc, BCE_INFO, "bce_shmem_base = 0x%08X\n", sc->bce_shmem_base);
599 
600 	/* Get PCI bus information (speed and type). */
601 	val = REG_RD(sc, BCE_PCICFG_MISC_STATUS);
602 	if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) {
603 		uint32_t clkreg;
604 
605 		sc->bce_flags |= BCE_PCIX_FLAG;
606 
607 		clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS) &
608 			 BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
609 		switch (clkreg) {
610 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
611 			sc->bus_speed_mhz = 133;
612 			break;
613 
614 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
615 			sc->bus_speed_mhz = 100;
616 			break;
617 
618 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
619 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
620 			sc->bus_speed_mhz = 66;
621 			break;
622 
623 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
624 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
625 			sc->bus_speed_mhz = 50;
626 			break;
627 
628 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
629 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
630 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
631 			sc->bus_speed_mhz = 33;
632 			break;
633 		}
634 	} else {
635 		if (val & BCE_PCICFG_MISC_STATUS_M66EN)
636 			sc->bus_speed_mhz = 66;
637 		else
638 			sc->bus_speed_mhz = 33;
639 	}
640 
641 	if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET)
642 		sc->bce_flags |= BCE_PCI_32BIT_FLAG;
643 
644 	device_printf(dev, "ASIC ID 0x%08X; Revision (%c%d); PCI%s %s %dMHz\n",
645 		      sc->bce_chipid,
646 		      ((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A',
647 		      (BCE_CHIP_ID(sc) & 0x0ff0) >> 4,
648 		      (sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : "",
649 		      (sc->bce_flags & BCE_PCI_32BIT_FLAG) ?
650 		      "32-bit" : "64-bit", sc->bus_speed_mhz);
651 
652 	/* Reset the controller. */
653 	rc = bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
654 	if (rc != 0)
655 		goto fail;
656 
657 	/* Initialize the controller. */
658 	rc = bce_chipinit(sc);
659 	if (rc != 0) {
660 		device_printf(dev, "Controller initialization failed!\n");
661 		goto fail;
662 	}
663 
664 	/* Perform NVRAM test. */
665 	rc = bce_nvram_test(sc);
666 	if (rc != 0) {
667 		device_printf(dev, "NVRAM test failed!\n");
668 		goto fail;
669 	}
670 
671 	/* Fetch the permanent Ethernet MAC address. */
672 	bce_get_mac_addr(sc);
673 
674 	/*
675 	 * Trip points control how many BDs
676 	 * should be ready before generating an
677 	 * interrupt while ticks control how long
678 	 * a BD can sit in the chain before
679 	 * generating an interrupt.  Set the default
680 	 * values for the RX and TX rings.
681 	 */
682 
683 #ifdef BCE_DRBUG
684 	/* Force more frequent interrupts. */
685 	sc->bce_tx_quick_cons_trip_int = 1;
686 	sc->bce_tx_quick_cons_trip     = 1;
687 	sc->bce_tx_ticks_int           = 0;
688 	sc->bce_tx_ticks               = 0;
689 
690 	sc->bce_rx_quick_cons_trip_int = 1;
691 	sc->bce_rx_quick_cons_trip     = 1;
692 	sc->bce_rx_ticks_int           = 0;
693 	sc->bce_rx_ticks               = 0;
694 #else
695 	sc->bce_tx_quick_cons_trip_int = bce_tx_bds_int;
696 	sc->bce_tx_quick_cons_trip     = bce_tx_bds;
697 	sc->bce_tx_ticks_int           = bce_tx_ticks_int;
698 	sc->bce_tx_ticks               = bce_tx_ticks;
699 
700 	sc->bce_rx_quick_cons_trip_int = bce_rx_bds_int;
701 	sc->bce_rx_quick_cons_trip     = bce_rx_bds;
702 	sc->bce_rx_ticks_int           = bce_rx_ticks_int;
703 	sc->bce_rx_ticks               = bce_rx_ticks;
704 #endif
705 
706 	/* Update statistics once every second. */
707 	sc->bce_stats_ticks = 1000000 & 0xffff00;
708 
709 	/*
710 	 * The copper based NetXtreme II controllers
711 	 * use an integrated PHY at address 1 while
712 	 * the SerDes controllers use a PHY at
713 	 * address 2.
714 	 */
715 	sc->bce_phy_addr = 1;
716 
717 	if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) {
718 		sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
719 		sc->bce_flags |= BCE_NO_WOL_FLAG;
720 		if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708) {
721 			sc->bce_phy_addr = 2;
722 			val = REG_RD_IND(sc, sc->bce_shmem_base +
723 					 BCE_SHARED_HW_CFG_CONFIG);
724 			if (val & BCE_SHARED_HW_CFG_PHY_2_5G)
725 				sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG;
726 		}
727 	}
728 
729 	/* Allocate DMA memory resources. */
730 	rc = bce_dma_alloc(sc);
731 	if (rc != 0) {
732 		device_printf(dev, "DMA resource allocation failed!\n");
733 		goto fail;
734 	}
735 
736 	/* Initialize the ifnet interface. */
737 	ifp->if_softc = sc;
738 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
739 	ifp->if_ioctl = bce_ioctl;
740 	ifp->if_start = bce_start;
741 	ifp->if_init = bce_init;
742 	ifp->if_watchdog = bce_watchdog;
743 #ifdef DEVICE_POLLING
744 	ifp->if_poll = bce_poll;
745 #endif
746 	ifp->if_mtu = ETHERMTU;
747 	ifp->if_hwassist = BCE_IF_HWASSIST;
748 	ifp->if_capabilities = BCE_IF_CAPABILITIES;
749 	ifp->if_capenable = ifp->if_capabilities;
750 	ifq_set_maxlen(&ifp->if_snd, USABLE_TX_BD);
751 	ifq_set_ready(&ifp->if_snd);
752 
753 	if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
754 		ifp->if_baudrate = IF_Gbps(2.5);
755 	else
756 		ifp->if_baudrate = IF_Gbps(1);
757 
758 	/* Assume a standard 1500 byte MTU size for mbuf allocations. */
759 	sc->mbuf_alloc_size  = MCLBYTES;
760 
761 	/* Look for our PHY. */
762 	rc = mii_phy_probe(dev, &sc->bce_miibus,
763 			   bce_ifmedia_upd, bce_ifmedia_sts);
764 	if (rc != 0) {
765 		device_printf(dev, "PHY probe failed!\n");
766 		goto fail;
767 	}
768 
769 	/* Attach to the Ethernet interface list. */
770 	ether_ifattach(ifp, sc->eaddr, NULL);
771 
772 	callout_init(&sc->bce_stat_ch);
773 
774 	/* Hookup IRQ last. */
775 	rc = bus_setup_intr(dev, sc->bce_res_irq, INTR_MPSAFE, bce_intr, sc,
776 			    &sc->bce_intrhand, ifp->if_serializer);
777 	if (rc != 0) {
778 		device_printf(dev, "Failed to setup IRQ!\n");
779 		ether_ifdetach(ifp);
780 		goto fail;
781 	}
782 
783 	ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->bce_res_irq));
784 	KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
785 
786 	/* Print some important debugging info. */
787 	DBRUN(BCE_INFO, bce_dump_driver_state(sc));
788 
789 	/* Add the supported sysctls to the kernel. */
790 	bce_add_sysctls(sc);
791 
792 	/* Get the firmware running so IPMI still works */
793 	bce_mgmt_init(sc);
794 
795 	return 0;
796 fail:
797 	bce_detach(dev);
798 	return(rc);
799 }
800 
801 
802 /****************************************************************************/
803 /* Device detach function.                                                  */
804 /*                                                                          */
805 /* Stops the controller, resets the controller, and releases resources.     */
806 /*                                                                          */
807 /* Returns:                                                                 */
808 /*   0 on success, positive value on failure.                               */
809 /****************************************************************************/
810 static int
811 bce_detach(device_t dev)
812 {
813 	struct bce_softc *sc = device_get_softc(dev);
814 
815 	if (device_is_attached(dev)) {
816 		struct ifnet *ifp = &sc->arpcom.ac_if;
817 
818 		/* Stop and reset the controller. */
819 		lwkt_serialize_enter(ifp->if_serializer);
820 		bce_stop(sc);
821 		bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
822 		bus_teardown_intr(dev, sc->bce_res_irq, sc->bce_intrhand);
823 		lwkt_serialize_exit(ifp->if_serializer);
824 
825 		ether_ifdetach(ifp);
826 	}
827 
828 	/* If we have a child device on the MII bus remove it too. */
829 	if (sc->bce_miibus)
830 		device_delete_child(dev, sc->bce_miibus);
831 	bus_generic_detach(dev);
832 
833 	if (sc->bce_res_irq != NULL) {
834 		bus_release_resource(dev, SYS_RES_IRQ,
835 			sc->bce_flags & BCE_USING_MSI_FLAG ? 1 : 0,
836 			sc->bce_res_irq);
837 	}
838 
839 #ifdef notyet
840 	if (sc->bce_flags & BCE_USING_MSI_FLAG)
841 		pci_release_msi(dev);
842 #endif
843 
844 	if (sc->bce_res_mem != NULL) {
845 		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
846 				     sc->bce_res_mem);
847 	}
848 
849 	bce_dma_free(sc);
850 
851 	if (sc->bce_sysctl_tree != NULL)
852 		sysctl_ctx_free(&sc->bce_sysctl_ctx);
853 
854 	return 0;
855 }
856 
857 
858 /****************************************************************************/
859 /* Device shutdown function.                                                */
860 /*                                                                          */
861 /* Stops and resets the controller.                                         */
862 /*                                                                          */
863 /* Returns:                                                                 */
864 /*   Nothing                                                                */
865 /****************************************************************************/
866 static void
867 bce_shutdown(device_t dev)
868 {
869 	struct bce_softc *sc = device_get_softc(dev);
870 	struct ifnet *ifp = &sc->arpcom.ac_if;
871 
872 	lwkt_serialize_enter(ifp->if_serializer);
873 	bce_stop(sc);
874 	bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
875 	lwkt_serialize_exit(ifp->if_serializer);
876 }
877 
878 
879 /****************************************************************************/
880 /* Indirect register read.                                                  */
881 /*                                                                          */
882 /* Reads NetXtreme II registers using an index/data register pair in PCI    */
883 /* configuration space.  Using this mechanism avoids issues with posted     */
884 /* reads but is much slower than memory-mapped I/O.                         */
885 /*                                                                          */
886 /* Returns:                                                                 */
887 /*   The value of the register.                                             */
888 /****************************************************************************/
889 static uint32_t
890 bce_reg_rd_ind(struct bce_softc *sc, uint32_t offset)
891 {
892 	device_t dev = sc->bce_dev;
893 
894 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
895 #ifdef BCE_DEBUG
896 	{
897 		uint32_t val;
898 		val = pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
899 		DBPRINT(sc, BCE_EXCESSIVE,
900 			"%s(); offset = 0x%08X, val = 0x%08X\n",
901 			__func__, offset, val);
902 		return val;
903 	}
904 #else
905 	return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
906 #endif
907 }
908 
909 
910 /****************************************************************************/
911 /* Indirect register write.                                                 */
912 /*                                                                          */
913 /* Writes NetXtreme II registers using an index/data register pair in PCI   */
914 /* configuration space.  Using this mechanism avoids issues with posted     */
915 /* writes but is muchh slower than memory-mapped I/O.                       */
916 /*                                                                          */
917 /* Returns:                                                                 */
918 /*   Nothing.                                                               */
919 /****************************************************************************/
920 static void
921 bce_reg_wr_ind(struct bce_softc *sc, uint32_t offset, uint32_t val)
922 {
923 	device_t dev = sc->bce_dev;
924 
925 	DBPRINT(sc, BCE_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
926 		__func__, offset, val);
927 
928 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
929 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4);
930 }
931 
932 
933 /****************************************************************************/
934 /* Context memory write.                                                    */
935 /*                                                                          */
936 /* The NetXtreme II controller uses context memory to track connection      */
937 /* information for L2 and higher network protocols.                         */
938 /*                                                                          */
939 /* Returns:                                                                 */
940 /*   Nothing.                                                               */
941 /****************************************************************************/
942 static void
943 bce_ctx_wr(struct bce_softc *sc, uint32_t cid_addr, uint32_t offset,
944 	   uint32_t val)
945 {
946 	DBPRINT(sc, BCE_EXCESSIVE, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
947 		"val = 0x%08X\n", __func__, cid_addr, offset, val);
948 
949 	offset += cid_addr;
950 	REG_WR(sc, BCE_CTX_DATA_ADR, offset);
951 	REG_WR(sc, BCE_CTX_DATA, val);
952 }
953 
954 
955 /****************************************************************************/
956 /* PHY register read.                                                       */
957 /*                                                                          */
958 /* Implements register reads on the MII bus.                                */
959 /*                                                                          */
960 /* Returns:                                                                 */
961 /*   The value of the register.                                             */
962 /****************************************************************************/
963 static int
964 bce_miibus_read_reg(device_t dev, int phy, int reg)
965 {
966 	struct bce_softc *sc = device_get_softc(dev);
967 	uint32_t val;
968 	int i;
969 
970 	/* Make sure we are accessing the correct PHY address. */
971 	if (phy != sc->bce_phy_addr) {
972 		DBPRINT(sc, BCE_VERBOSE,
973 			"Invalid PHY address %d for PHY read!\n", phy);
974 		return 0;
975 	}
976 
977 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
978 		val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
979 		val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
980 
981 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
982 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
983 
984 		DELAY(40);
985 	}
986 
987 	val = BCE_MIPHY(phy) | BCE_MIREG(reg) |
988 	      BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT |
989 	      BCE_EMAC_MDIO_COMM_START_BUSY;
990 	REG_WR(sc, BCE_EMAC_MDIO_COMM, val);
991 
992 	for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
993 		DELAY(10);
994 
995 		val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
996 		if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) {
997 			DELAY(5);
998 
999 			val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1000 			val &= BCE_EMAC_MDIO_COMM_DATA;
1001 			break;
1002 		}
1003 	}
1004 
1005 	if (val & BCE_EMAC_MDIO_COMM_START_BUSY) {
1006 		if_printf(&sc->arpcom.ac_if,
1007 			  "Error: PHY read timeout! phy = %d, reg = 0x%04X\n",
1008 			  phy, reg);
1009 		val = 0x0;
1010 	} else {
1011 		val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1012 	}
1013 
1014 	DBPRINT(sc, BCE_EXCESSIVE,
1015 		"%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
1016 		__func__, phy, (uint16_t)reg & 0xffff, (uint16_t) val & 0xffff);
1017 
1018 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1019 		val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1020 		val |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1021 
1022 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1023 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1024 
1025 		DELAY(40);
1026 	}
1027 	return (val & 0xffff);
1028 }
1029 
1030 
1031 /****************************************************************************/
1032 /* PHY register write.                                                      */
1033 /*                                                                          */
1034 /* Implements register writes on the MII bus.                               */
1035 /*                                                                          */
1036 /* Returns:                                                                 */
1037 /*   The value of the register.                                             */
1038 /****************************************************************************/
1039 static int
1040 bce_miibus_write_reg(device_t dev, int phy, int reg, int val)
1041 {
1042 	struct bce_softc *sc = device_get_softc(dev);
1043 	uint32_t val1;
1044 	int i;
1045 
1046 	/* Make sure we are accessing the correct PHY address. */
1047 	if (phy != sc->bce_phy_addr) {
1048 		DBPRINT(sc, BCE_WARN,
1049 			"Invalid PHY address %d for PHY write!\n", phy);
1050 		return(0);
1051 	}
1052 
1053 	DBPRINT(sc, BCE_EXCESSIVE,
1054 		"%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
1055 		__func__, phy, (uint16_t)(reg & 0xffff),
1056 		(uint16_t)(val & 0xffff));
1057 
1058 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1059 		val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1060 		val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1061 
1062 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1063 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1064 
1065 		DELAY(40);
1066 	}
1067 
1068 	val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val |
1069 		BCE_EMAC_MDIO_COMM_COMMAND_WRITE |
1070 		BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT;
1071 	REG_WR(sc, BCE_EMAC_MDIO_COMM, val1);
1072 
1073 	for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1074 		DELAY(10);
1075 
1076 		val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1077 		if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1078 			DELAY(5);
1079 			break;
1080 		}
1081 	}
1082 
1083 	if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY)
1084 		if_printf(&sc->arpcom.ac_if, "PHY write timeout!\n");
1085 
1086 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1087 		val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1088 		val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1089 
1090 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1091 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1092 
1093 		DELAY(40);
1094 	}
1095 	return 0;
1096 }
1097 
1098 
1099 /****************************************************************************/
1100 /* MII bus status change.                                                   */
1101 /*                                                                          */
1102 /* Called by the MII bus driver when the PHY establishes link to set the    */
1103 /* MAC interface registers.                                                 */
1104 /*                                                                          */
1105 /* Returns:                                                                 */
1106 /*   Nothing.                                                               */
1107 /****************************************************************************/
1108 static void
1109 bce_miibus_statchg(device_t dev)
1110 {
1111 	struct bce_softc *sc = device_get_softc(dev);
1112 	struct mii_data *mii = device_get_softc(sc->bce_miibus);
1113 
1114 	DBPRINT(sc, BCE_INFO, "mii_media_active = 0x%08X\n",
1115 		mii->mii_media_active);
1116 
1117 #ifdef BCE_DEBUG
1118 	/* Decode the interface media flags. */
1119 	if_printf(&sc->arpcom.ac_if, "Media: ( ");
1120 	switch(IFM_TYPE(mii->mii_media_active)) {
1121 	case IFM_ETHER:
1122 		kprintf("Ethernet )");
1123 		break;
1124 	default:
1125 		kprintf("Unknown )");
1126 		break;
1127 	}
1128 
1129 	kprintf(" Media Options: ( ");
1130 	switch(IFM_SUBTYPE(mii->mii_media_active)) {
1131 	case IFM_AUTO:
1132 		kprintf("Autoselect )");
1133 		break;
1134 	case IFM_MANUAL:
1135 		kprintf("Manual )");
1136 		break;
1137 	case IFM_NONE:
1138 		kprintf("None )");
1139 		break;
1140 	case IFM_10_T:
1141 		kprintf("10Base-T )");
1142 		break;
1143 	case IFM_100_TX:
1144 		kprintf("100Base-TX )");
1145 		break;
1146 	case IFM_1000_SX:
1147 		kprintf("1000Base-SX )");
1148 		break;
1149 	case IFM_1000_T:
1150 		kprintf("1000Base-T )");
1151 		break;
1152 	default:
1153 		kprintf("Other )");
1154 		break;
1155 	}
1156 
1157 	kprintf(" Global Options: (");
1158 	if (mii->mii_media_active & IFM_FDX)
1159 		kprintf(" FullDuplex");
1160 	if (mii->mii_media_active & IFM_HDX)
1161 		kprintf(" HalfDuplex");
1162 	if (mii->mii_media_active & IFM_LOOP)
1163 		kprintf(" Loopback");
1164 	if (mii->mii_media_active & IFM_FLAG0)
1165 		kprintf(" Flag0");
1166 	if (mii->mii_media_active & IFM_FLAG1)
1167 		kprintf(" Flag1");
1168 	if (mii->mii_media_active & IFM_FLAG2)
1169 		kprintf(" Flag2");
1170 	kprintf(" )\n");
1171 #endif
1172 
1173 	BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT);
1174 
1175 	/*
1176 	 * Set MII or GMII interface based on the speed negotiated
1177 	 * by the PHY.
1178 	 */
1179 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
1180 	    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) {
1181 		DBPRINT(sc, BCE_INFO, "Setting GMII interface.\n");
1182 		BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_GMII);
1183 	} else {
1184 		DBPRINT(sc, BCE_INFO, "Setting MII interface.\n");
1185 		BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_MII);
1186 	}
1187 
1188 	/*
1189 	 * Set half or full duplex based on the duplicity negotiated
1190 	 * by the PHY.
1191 	 */
1192 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1193 		DBPRINT(sc, BCE_INFO, "Setting Full-Duplex interface.\n");
1194 		BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1195 	} else {
1196 		DBPRINT(sc, BCE_INFO, "Setting Half-Duplex interface.\n");
1197 		BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1198 	}
1199 }
1200 
1201 
1202 /****************************************************************************/
1203 /* Acquire NVRAM lock.                                                      */
1204 /*                                                                          */
1205 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock.  */
1206 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1207 /* for use by the driver.                                                   */
1208 /*                                                                          */
1209 /* Returns:                                                                 */
1210 /*   0 on success, positive value on failure.                               */
1211 /****************************************************************************/
1212 static int
1213 bce_acquire_nvram_lock(struct bce_softc *sc)
1214 {
1215 	uint32_t val;
1216 	int j;
1217 
1218 	DBPRINT(sc, BCE_VERBOSE, "Acquiring NVRAM lock.\n");
1219 
1220 	/* Request access to the flash interface. */
1221 	REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2);
1222 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1223 		val = REG_RD(sc, BCE_NVM_SW_ARB);
1224 		if (val & BCE_NVM_SW_ARB_ARB_ARB2)
1225 			break;
1226 
1227 		DELAY(5);
1228 	}
1229 
1230 	if (j >= NVRAM_TIMEOUT_COUNT) {
1231 		DBPRINT(sc, BCE_WARN, "Timeout acquiring NVRAM lock!\n");
1232 		return EBUSY;
1233 	}
1234 	return 0;
1235 }
1236 
1237 
1238 /****************************************************************************/
1239 /* Release NVRAM lock.                                                      */
1240 /*                                                                          */
1241 /* When the caller is finished accessing NVRAM the lock must be released.   */
1242 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1243 /* for use by the driver.                                                   */
1244 /*                                                                          */
1245 /* Returns:                                                                 */
1246 /*   0 on success, positive value on failure.                               */
1247 /****************************************************************************/
1248 static int
1249 bce_release_nvram_lock(struct bce_softc *sc)
1250 {
1251 	int j;
1252 	uint32_t val;
1253 
1254 	DBPRINT(sc, BCE_VERBOSE, "Releasing NVRAM lock.\n");
1255 
1256 	/*
1257 	 * Relinquish nvram interface.
1258 	 */
1259 	REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2);
1260 
1261 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1262 		val = REG_RD(sc, BCE_NVM_SW_ARB);
1263 		if (!(val & BCE_NVM_SW_ARB_ARB_ARB2))
1264 			break;
1265 
1266 		DELAY(5);
1267 	}
1268 
1269 	if (j >= NVRAM_TIMEOUT_COUNT) {
1270 		DBPRINT(sc, BCE_WARN, "Timeout reeasing NVRAM lock!\n");
1271 		return EBUSY;
1272 	}
1273 	return 0;
1274 }
1275 
1276 
1277 #ifdef BCE_NVRAM_WRITE_SUPPORT
1278 /****************************************************************************/
1279 /* Enable NVRAM write access.                                               */
1280 /*                                                                          */
1281 /* Before writing to NVRAM the caller must enable NVRAM writes.             */
1282 /*                                                                          */
1283 /* Returns:                                                                 */
1284 /*   0 on success, positive value on failure.                               */
1285 /****************************************************************************/
1286 static int
1287 bce_enable_nvram_write(struct bce_softc *sc)
1288 {
1289 	uint32_t val;
1290 
1291 	DBPRINT(sc, BCE_VERBOSE, "Enabling NVRAM write.\n");
1292 
1293 	val = REG_RD(sc, BCE_MISC_CFG);
1294 	REG_WR(sc, BCE_MISC_CFG, val | BCE_MISC_CFG_NVM_WR_EN_PCI);
1295 
1296 	if (!sc->bce_flash_info->buffered) {
1297 		int j;
1298 
1299 		REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1300 		REG_WR(sc, BCE_NVM_COMMAND,
1301 		       BCE_NVM_COMMAND_WREN | BCE_NVM_COMMAND_DOIT);
1302 
1303 		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1304 			DELAY(5);
1305 
1306 			val = REG_RD(sc, BCE_NVM_COMMAND);
1307 			if (val & BCE_NVM_COMMAND_DONE)
1308 				break;
1309 		}
1310 
1311 		if (j >= NVRAM_TIMEOUT_COUNT) {
1312 			DBPRINT(sc, BCE_WARN, "Timeout writing NVRAM!\n");
1313 			return EBUSY;
1314 		}
1315 	}
1316 	return 0;
1317 }
1318 
1319 
1320 /****************************************************************************/
1321 /* Disable NVRAM write access.                                              */
1322 /*                                                                          */
1323 /* When the caller is finished writing to NVRAM write access must be        */
1324 /* disabled.                                                                */
1325 /*                                                                          */
1326 /* Returns:                                                                 */
1327 /*   Nothing.                                                               */
1328 /****************************************************************************/
1329 static void
1330 bce_disable_nvram_write(struct bce_softc *sc)
1331 {
1332 	uint32_t val;
1333 
1334 	DBPRINT(sc, BCE_VERBOSE, "Disabling NVRAM write.\n");
1335 
1336 	val = REG_RD(sc, BCE_MISC_CFG);
1337 	REG_WR(sc, BCE_MISC_CFG, val & ~BCE_MISC_CFG_NVM_WR_EN);
1338 }
1339 #endif	/* BCE_NVRAM_WRITE_SUPPORT */
1340 
1341 
1342 /****************************************************************************/
1343 /* Enable NVRAM access.                                                     */
1344 /*                                                                          */
1345 /* Before accessing NVRAM for read or write operations the caller must      */
1346 /* enabled NVRAM access.                                                    */
1347 /*                                                                          */
1348 /* Returns:                                                                 */
1349 /*   Nothing.                                                               */
1350 /****************************************************************************/
1351 static void
1352 bce_enable_nvram_access(struct bce_softc *sc)
1353 {
1354 	uint32_t val;
1355 
1356 	DBPRINT(sc, BCE_VERBOSE, "Enabling NVRAM access.\n");
1357 
1358 	val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1359 	/* Enable both bits, even on read. */
1360 	REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1361 	       val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN);
1362 }
1363 
1364 
1365 /****************************************************************************/
1366 /* Disable NVRAM access.                                                    */
1367 /*                                                                          */
1368 /* When the caller is finished accessing NVRAM access must be disabled.     */
1369 /*                                                                          */
1370 /* Returns:                                                                 */
1371 /*   Nothing.                                                               */
1372 /****************************************************************************/
1373 static void
1374 bce_disable_nvram_access(struct bce_softc *sc)
1375 {
1376 	uint32_t val;
1377 
1378 	DBPRINT(sc, BCE_VERBOSE, "Disabling NVRAM access.\n");
1379 
1380 	val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1381 
1382 	/* Disable both bits, even after read. */
1383 	REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1384 	       val & ~(BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN));
1385 }
1386 
1387 
1388 #ifdef BCE_NVRAM_WRITE_SUPPORT
1389 /****************************************************************************/
1390 /* Erase NVRAM page before writing.                                         */
1391 /*                                                                          */
1392 /* Non-buffered flash parts require that a page be erased before it is      */
1393 /* written.                                                                 */
1394 /*                                                                          */
1395 /* Returns:                                                                 */
1396 /*   0 on success, positive value on failure.                               */
1397 /****************************************************************************/
1398 static int
1399 bce_nvram_erase_page(struct bce_softc *sc, uint32_t offset)
1400 {
1401 	uint32_t cmd;
1402 	int j;
1403 
1404 	/* Buffered flash doesn't require an erase. */
1405 	if (sc->bce_flash_info->buffered)
1406 		return 0;
1407 
1408 	DBPRINT(sc, BCE_VERBOSE, "Erasing NVRAM page.\n");
1409 
1410 	/* Build an erase command. */
1411 	cmd = BCE_NVM_COMMAND_ERASE | BCE_NVM_COMMAND_WR |
1412 	      BCE_NVM_COMMAND_DOIT;
1413 
1414 	/*
1415 	 * Clear the DONE bit separately, set the NVRAM adress to erase,
1416 	 * and issue the erase command.
1417 	 */
1418 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1419 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1420 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1421 
1422 	/* Wait for completion. */
1423 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1424 		uint32_t val;
1425 
1426 		DELAY(5);
1427 
1428 		val = REG_RD(sc, BCE_NVM_COMMAND);
1429 		if (val & BCE_NVM_COMMAND_DONE)
1430 			break;
1431 	}
1432 
1433 	if (j >= NVRAM_TIMEOUT_COUNT) {
1434 		DBPRINT(sc, BCE_WARN, "Timeout erasing NVRAM.\n");
1435 		return EBUSY;
1436 	}
1437 	return 0;
1438 }
1439 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1440 
1441 
1442 /****************************************************************************/
1443 /* Read a dword (32 bits) from NVRAM.                                       */
1444 /*                                                                          */
1445 /* Read a 32 bit word from NVRAM.  The caller is assumed to have already    */
1446 /* obtained the NVRAM lock and enabled the controller for NVRAM access.     */
1447 /*                                                                          */
1448 /* Returns:                                                                 */
1449 /*   0 on success and the 32 bit value read, positive value on failure.     */
1450 /****************************************************************************/
1451 static int
1452 bce_nvram_read_dword(struct bce_softc *sc, uint32_t offset, uint8_t *ret_val,
1453 		     uint32_t cmd_flags)
1454 {
1455 	uint32_t cmd;
1456 	int i, rc = 0;
1457 
1458 	/* Build the command word. */
1459 	cmd = BCE_NVM_COMMAND_DOIT | cmd_flags;
1460 
1461 	/* Calculate the offset for buffered flash. */
1462 	if (sc->bce_flash_info->buffered) {
1463 		offset = ((offset / sc->bce_flash_info->page_size) <<
1464 			  sc->bce_flash_info->page_bits) +
1465 			 (offset % sc->bce_flash_info->page_size);
1466 	}
1467 
1468 	/*
1469 	 * Clear the DONE bit separately, set the address to read,
1470 	 * and issue the read.
1471 	 */
1472 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1473 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1474 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1475 
1476 	/* Wait for completion. */
1477 	for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1478 		uint32_t val;
1479 
1480 		DELAY(5);
1481 
1482 		val = REG_RD(sc, BCE_NVM_COMMAND);
1483 		if (val & BCE_NVM_COMMAND_DONE) {
1484 			val = REG_RD(sc, BCE_NVM_READ);
1485 
1486 			val = be32toh(val);
1487 			memcpy(ret_val, &val, 4);
1488 			break;
1489 		}
1490 	}
1491 
1492 	/* Check for errors. */
1493 	if (i >= NVRAM_TIMEOUT_COUNT) {
1494 		if_printf(&sc->arpcom.ac_if,
1495 			  "Timeout error reading NVRAM at offset 0x%08X!\n",
1496 			  offset);
1497 		rc = EBUSY;
1498 	}
1499 	return rc;
1500 }
1501 
1502 
1503 #ifdef BCE_NVRAM_WRITE_SUPPORT
1504 /****************************************************************************/
1505 /* Write a dword (32 bits) to NVRAM.                                        */
1506 /*                                                                          */
1507 /* Write a 32 bit word to NVRAM.  The caller is assumed to have already     */
1508 /* obtained the NVRAM lock, enabled the controller for NVRAM access, and    */
1509 /* enabled NVRAM write access.                                              */
1510 /*                                                                          */
1511 /* Returns:                                                                 */
1512 /*   0 on success, positive value on failure.                               */
1513 /****************************************************************************/
1514 static int
1515 bce_nvram_write_dword(struct bce_softc *sc, uint32_t offset, uint8_t *val,
1516 		      uint32_t cmd_flags)
1517 {
1518 	uint32_t cmd, val32;
1519 	int j;
1520 
1521 	/* Build the command word. */
1522 	cmd = BCE_NVM_COMMAND_DOIT | BCE_NVM_COMMAND_WR | cmd_flags;
1523 
1524 	/* Calculate the offset for buffered flash. */
1525 	if (sc->bce_flash_info->buffered) {
1526 		offset = ((offset / sc->bce_flash_info->page_size) <<
1527 			  sc->bce_flash_info->page_bits) +
1528 			 (offset % sc->bce_flash_info->page_size);
1529 	}
1530 
1531 	/*
1532 	 * Clear the DONE bit separately, convert NVRAM data to big-endian,
1533 	 * set the NVRAM address to write, and issue the write command
1534 	 */
1535 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1536 	memcpy(&val32, val, 4);
1537 	val32 = htobe32(val32);
1538 	REG_WR(sc, BCE_NVM_WRITE, val32);
1539 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1540 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1541 
1542 	/* Wait for completion. */
1543 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1544 		DELAY(5);
1545 
1546 		if (REG_RD(sc, BCE_NVM_COMMAND) & BCE_NVM_COMMAND_DONE)
1547 			break;
1548 	}
1549 	if (j >= NVRAM_TIMEOUT_COUNT) {
1550 		if_printf(&sc->arpcom.ac_if,
1551 			  "Timeout error writing NVRAM at offset 0x%08X\n",
1552 			  offset);
1553 		return EBUSY;
1554 	}
1555 	return 0;
1556 }
1557 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1558 
1559 
1560 /****************************************************************************/
1561 /* Initialize NVRAM access.                                                 */
1562 /*                                                                          */
1563 /* Identify the NVRAM device in use and prepare the NVRAM interface to      */
1564 /* access that device.                                                      */
1565 /*                                                                          */
1566 /* Returns:                                                                 */
1567 /*   0 on success, positive value on failure.                               */
1568 /****************************************************************************/
1569 static int
1570 bce_init_nvram(struct bce_softc *sc)
1571 {
1572 	uint32_t val;
1573 	int j, entry_count, rc = 0;
1574 	const struct flash_spec *flash;
1575 
1576 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__);
1577 
1578 	/* Determine the selected interface. */
1579 	val = REG_RD(sc, BCE_NVM_CFG1);
1580 
1581 	entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1582 
1583 	/*
1584 	 * Flash reconfiguration is required to support additional
1585 	 * NVRAM devices not directly supported in hardware.
1586 	 * Check if the flash interface was reconfigured
1587 	 * by the bootcode.
1588 	 */
1589 
1590 	if (val & 0x40000000) {
1591 		/* Flash interface reconfigured by bootcode. */
1592 
1593 		DBPRINT(sc, BCE_INFO_LOAD,
1594 			"%s(): Flash WAS reconfigured.\n", __func__);
1595 
1596 		for (j = 0, flash = flash_table; j < entry_count;
1597 		     j++, flash++) {
1598 			if ((val & FLASH_BACKUP_STRAP_MASK) ==
1599 			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1600 				sc->bce_flash_info = flash;
1601 				break;
1602 			}
1603 		}
1604 	} else {
1605 		/* Flash interface not yet reconfigured. */
1606 		uint32_t mask;
1607 
1608 		DBPRINT(sc, BCE_INFO_LOAD,
1609 			"%s(): Flash was NOT reconfigured.\n", __func__);
1610 
1611 		if (val & (1 << 23))
1612 			mask = FLASH_BACKUP_STRAP_MASK;
1613 		else
1614 			mask = FLASH_STRAP_MASK;
1615 
1616 		/* Look for the matching NVRAM device configuration data. */
1617 		for (j = 0, flash = flash_table; j < entry_count;
1618 		     j++, flash++) {
1619 			/* Check if the device matches any of the known devices. */
1620 			if ((val & mask) == (flash->strapping & mask)) {
1621 				/* Found a device match. */
1622 				sc->bce_flash_info = flash;
1623 
1624 				/* Request access to the flash interface. */
1625 				rc = bce_acquire_nvram_lock(sc);
1626 				if (rc != 0)
1627 					return rc;
1628 
1629 				/* Reconfigure the flash interface. */
1630 				bce_enable_nvram_access(sc);
1631 				REG_WR(sc, BCE_NVM_CFG1, flash->config1);
1632 				REG_WR(sc, BCE_NVM_CFG2, flash->config2);
1633 				REG_WR(sc, BCE_NVM_CFG3, flash->config3);
1634 				REG_WR(sc, BCE_NVM_WRITE1, flash->write1);
1635 				bce_disable_nvram_access(sc);
1636 				bce_release_nvram_lock(sc);
1637 				break;
1638 			}
1639 		}
1640 	}
1641 
1642 	/* Check if a matching device was found. */
1643 	if (j == entry_count) {
1644 		sc->bce_flash_info = NULL;
1645 		if_printf(&sc->arpcom.ac_if, "Unknown Flash NVRAM found!\n");
1646 		rc = ENODEV;
1647 	}
1648 
1649 	/* Write the flash config data to the shared memory interface. */
1650 	val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_SHARED_HW_CFG_CONFIG2) &
1651 	      BCE_SHARED_HW_CFG2_NVM_SIZE_MASK;
1652 	if (val)
1653 		sc->bce_flash_size = val;
1654 	else
1655 		sc->bce_flash_size = sc->bce_flash_info->total_size;
1656 
1657 	DBPRINT(sc, BCE_INFO_LOAD, "%s() flash->total_size = 0x%08X\n",
1658 		__func__, sc->bce_flash_info->total_size);
1659 
1660 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__);
1661 
1662 	return rc;
1663 }
1664 
1665 
1666 /****************************************************************************/
1667 /* Read an arbitrary range of data from NVRAM.                              */
1668 /*                                                                          */
1669 /* Prepares the NVRAM interface for access and reads the requested data     */
1670 /* into the supplied buffer.                                                */
1671 /*                                                                          */
1672 /* Returns:                                                                 */
1673 /*   0 on success and the data read, positive value on failure.             */
1674 /****************************************************************************/
1675 static int
1676 bce_nvram_read(struct bce_softc *sc, uint32_t offset, uint8_t *ret_buf,
1677 	       int buf_size)
1678 {
1679 	uint32_t cmd_flags, offset32, len32, extra;
1680 	int rc = 0;
1681 
1682 	if (buf_size == 0)
1683 		return 0;
1684 
1685 	/* Request access to the flash interface. */
1686 	rc = bce_acquire_nvram_lock(sc);
1687 	if (rc != 0)
1688 		return rc;
1689 
1690 	/* Enable access to flash interface */
1691 	bce_enable_nvram_access(sc);
1692 
1693 	len32 = buf_size;
1694 	offset32 = offset;
1695 	extra = 0;
1696 
1697 	cmd_flags = 0;
1698 
1699 	/* XXX should we release nvram lock if read_dword() fails? */
1700 	if (offset32 & 3) {
1701 		uint8_t buf[4];
1702 		uint32_t pre_len;
1703 
1704 		offset32 &= ~3;
1705 		pre_len = 4 - (offset & 3);
1706 
1707 		if (pre_len >= len32) {
1708 			pre_len = len32;
1709 			cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST;
1710 		} else {
1711 			cmd_flags = BCE_NVM_COMMAND_FIRST;
1712 		}
1713 
1714 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1715 		if (rc)
1716 			return rc;
1717 
1718 		memcpy(ret_buf, buf + (offset & 3), pre_len);
1719 
1720 		offset32 += 4;
1721 		ret_buf += pre_len;
1722 		len32 -= pre_len;
1723 	}
1724 
1725 	if (len32 & 3) {
1726 		extra = 4 - (len32 & 3);
1727 		len32 = (len32 + 4) & ~3;
1728 	}
1729 
1730 	if (len32 == 4) {
1731 		uint8_t buf[4];
1732 
1733 		if (cmd_flags)
1734 			cmd_flags = BCE_NVM_COMMAND_LAST;
1735 		else
1736 			cmd_flags = BCE_NVM_COMMAND_FIRST |
1737 				    BCE_NVM_COMMAND_LAST;
1738 
1739 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1740 
1741 		memcpy(ret_buf, buf, 4 - extra);
1742 	} else if (len32 > 0) {
1743 		uint8_t buf[4];
1744 
1745 		/* Read the first word. */
1746 		if (cmd_flags)
1747 			cmd_flags = 0;
1748 		else
1749 			cmd_flags = BCE_NVM_COMMAND_FIRST;
1750 
1751 		rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1752 
1753 		/* Advance to the next dword. */
1754 		offset32 += 4;
1755 		ret_buf += 4;
1756 		len32 -= 4;
1757 
1758 		while (len32 > 4 && rc == 0) {
1759 			rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0);
1760 
1761 			/* Advance to the next dword. */
1762 			offset32 += 4;
1763 			ret_buf += 4;
1764 			len32 -= 4;
1765 		}
1766 
1767 		if (rc)
1768 			return rc;
1769 
1770 		cmd_flags = BCE_NVM_COMMAND_LAST;
1771 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1772 
1773 		memcpy(ret_buf, buf, 4 - extra);
1774 	}
1775 
1776 	/* Disable access to flash interface and release the lock. */
1777 	bce_disable_nvram_access(sc);
1778 	bce_release_nvram_lock(sc);
1779 
1780 	return rc;
1781 }
1782 
1783 
1784 #ifdef BCE_NVRAM_WRITE_SUPPORT
1785 /****************************************************************************/
1786 /* Write an arbitrary range of data from NVRAM.                             */
1787 /*                                                                          */
1788 /* Prepares the NVRAM interface for write access and writes the requested   */
1789 /* data from the supplied buffer.  The caller is responsible for            */
1790 /* calculating any appropriate CRCs.                                        */
1791 /*                                                                          */
1792 /* Returns:                                                                 */
1793 /*   0 on success, positive value on failure.                               */
1794 /****************************************************************************/
1795 static int
1796 bce_nvram_write(struct bce_softc *sc, uint32_t offset, uint8_t *data_buf,
1797 		int buf_size)
1798 {
1799 	uint32_t written, offset32, len32;
1800 	uint8_t *buf, start[4], end[4];
1801 	int rc = 0;
1802 	int align_start, align_end;
1803 
1804 	buf = data_buf;
1805 	offset32 = offset;
1806 	len32 = buf_size;
1807 	align_end = 0;
1808 	align_start = (offset32 & 3);
1809 
1810 	if (align_start) {
1811 		offset32 &= ~3;
1812 		len32 += align_start;
1813 		rc = bce_nvram_read(sc, offset32, start, 4);
1814 		if (rc)
1815 			return rc;
1816 	}
1817 
1818 	if (len32 & 3) {
1819 	       	if (len32 > 4 || !align_start) {
1820 			align_end = 4 - (len32 & 3);
1821 			len32 += align_end;
1822 			rc = bce_nvram_read(sc, offset32 + len32 - 4, end, 4);
1823 			if (rc)
1824 				return rc;
1825 		}
1826 	}
1827 
1828 	if (align_start || align_end) {
1829 		buf = kmalloc(len32, M_DEVBUF, M_NOWAIT);
1830 		if (buf == NULL)
1831 			return ENOMEM;
1832 		if (align_start)
1833 			memcpy(buf, start, 4);
1834 		if (align_end)
1835 			memcpy(buf + len32 - 4, end, 4);
1836 		memcpy(buf + align_start, data_buf, buf_size);
1837 	}
1838 
1839 	written = 0;
1840 	while (written < len32 && rc == 0) {
1841 		uint32_t page_start, page_end, data_start, data_end;
1842 		uint32_t addr, cmd_flags;
1843 		int i;
1844 		uint8_t flash_buffer[264];
1845 
1846 		/* Find the page_start addr */
1847 		page_start = offset32 + written;
1848 		page_start -= (page_start % sc->bce_flash_info->page_size);
1849 		/* Find the page_end addr */
1850 		page_end = page_start + sc->bce_flash_info->page_size;
1851 		/* Find the data_start addr */
1852 		data_start = (written == 0) ? offset32 : page_start;
1853 		/* Find the data_end addr */
1854 		data_end = (page_end > offset32 + len32) ? (offset32 + len32)
1855 							 : page_end;
1856 
1857 		/* Request access to the flash interface. */
1858 		rc = bce_acquire_nvram_lock(sc);
1859 		if (rc != 0)
1860 			goto nvram_write_end;
1861 
1862 		/* Enable access to flash interface */
1863 		bce_enable_nvram_access(sc);
1864 
1865 		cmd_flags = BCE_NVM_COMMAND_FIRST;
1866 		if (sc->bce_flash_info->buffered == 0) {
1867 			int j;
1868 
1869 			/*
1870 			 * Read the whole page into the buffer
1871 			 * (non-buffer flash only)
1872 			 */
1873 			for (j = 0; j < sc->bce_flash_info->page_size; j += 4) {
1874 				if (j == (sc->bce_flash_info->page_size - 4))
1875 					cmd_flags |= BCE_NVM_COMMAND_LAST;
1876 
1877 				rc = bce_nvram_read_dword(sc, page_start + j,
1878 							  &flash_buffer[j],
1879 							  cmd_flags);
1880 				if (rc)
1881 					goto nvram_write_end;
1882 
1883 				cmd_flags = 0;
1884 			}
1885 		}
1886 
1887 		/* Enable writes to flash interface (unlock write-protect) */
1888 		rc = bce_enable_nvram_write(sc);
1889 		if (rc != 0)
1890 			goto nvram_write_end;
1891 
1892 		/* Erase the page */
1893 		rc = bce_nvram_erase_page(sc, page_start);
1894 		if (rc != 0)
1895 			goto nvram_write_end;
1896 
1897 		/* Re-enable the write again for the actual write */
1898 		bce_enable_nvram_write(sc);
1899 
1900 		/* Loop to write back the buffer data from page_start to
1901 		 * data_start */
1902 		i = 0;
1903 		if (sc->bce_flash_info->buffered == 0) {
1904 			for (addr = page_start; addr < data_start;
1905 			     addr += 4, i += 4) {
1906 				rc = bce_nvram_write_dword(sc, addr,
1907 							   &flash_buffer[i],
1908 							   cmd_flags);
1909 				if (rc != 0)
1910 					goto nvram_write_end;
1911 
1912 				cmd_flags = 0;
1913 			}
1914 		}
1915 
1916 		/* Loop to write the new data from data_start to data_end */
1917 		for (addr = data_start; addr < data_end; addr += 4, i++) {
1918 			if (addr == page_end - 4 ||
1919 			    (sc->bce_flash_info->buffered &&
1920 			     addr == data_end - 4))
1921 				cmd_flags |= BCE_NVM_COMMAND_LAST;
1922 
1923 			rc = bce_nvram_write_dword(sc, addr, buf, cmd_flags);
1924 			if (rc != 0)
1925 				goto nvram_write_end;
1926 
1927 			cmd_flags = 0;
1928 			buf += 4;
1929 		}
1930 
1931 		/* Loop to write back the buffer data from data_end
1932 		 * to page_end */
1933 		if (sc->bce_flash_info->buffered == 0) {
1934 			for (addr = data_end; addr < page_end;
1935 			     addr += 4, i += 4) {
1936 				if (addr == page_end-4)
1937 					cmd_flags = BCE_NVM_COMMAND_LAST;
1938 
1939 				rc = bce_nvram_write_dword(sc, addr,
1940 					&flash_buffer[i], cmd_flags);
1941 				if (rc != 0)
1942 					goto nvram_write_end;
1943 
1944 				cmd_flags = 0;
1945 			}
1946 		}
1947 
1948 		/* Disable writes to flash interface (lock write-protect) */
1949 		bce_disable_nvram_write(sc);
1950 
1951 		/* Disable access to flash interface */
1952 		bce_disable_nvram_access(sc);
1953 		bce_release_nvram_lock(sc);
1954 
1955 		/* Increment written */
1956 		written += data_end - data_start;
1957 	}
1958 
1959 nvram_write_end:
1960 	if (align_start || align_end)
1961 		kfree(buf, M_DEVBUF);
1962 	return rc;
1963 }
1964 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1965 
1966 
1967 /****************************************************************************/
1968 /* Verifies that NVRAM is accessible and contains valid data.               */
1969 /*                                                                          */
1970 /* Reads the configuration data from NVRAM and verifies that the CRC is     */
1971 /* correct.                                                                 */
1972 /*                                                                          */
1973 /* Returns:                                                                 */
1974 /*   0 on success, positive value on failure.                               */
1975 /****************************************************************************/
1976 static int
1977 bce_nvram_test(struct bce_softc *sc)
1978 {
1979 	uint32_t buf[BCE_NVRAM_SIZE / 4];
1980 	uint32_t magic, csum;
1981 	uint8_t *data = (uint8_t *)buf;
1982 	int rc = 0;
1983 
1984 	/*
1985 	 * Check that the device NVRAM is valid by reading
1986 	 * the magic value at offset 0.
1987 	 */
1988 	rc = bce_nvram_read(sc, 0, data, 4);
1989 	if (rc != 0)
1990 		return rc;
1991 
1992 	magic = be32toh(buf[0]);
1993 	if (magic != BCE_NVRAM_MAGIC) {
1994 		if_printf(&sc->arpcom.ac_if,
1995 			  "Invalid NVRAM magic value! Expected: 0x%08X, "
1996 			  "Found: 0x%08X\n", BCE_NVRAM_MAGIC, magic);
1997 		return ENODEV;
1998 	}
1999 
2000 	/*
2001 	 * Verify that the device NVRAM includes valid
2002 	 * configuration data.
2003 	 */
2004 	rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE);
2005 	if (rc != 0)
2006 		return rc;
2007 
2008 	csum = ether_crc32_le(data, 0x100);
2009 	if (csum != BCE_CRC32_RESIDUAL) {
2010 		if_printf(&sc->arpcom.ac_if,
2011 			  "Invalid Manufacturing Information NVRAM CRC! "
2012 			  "Expected: 0x%08X, Found: 0x%08X\n",
2013 			  BCE_CRC32_RESIDUAL, csum);
2014 		return ENODEV;
2015 	}
2016 
2017 	csum = ether_crc32_le(data + 0x100, 0x100);
2018 	if (csum != BCE_CRC32_RESIDUAL) {
2019 		if_printf(&sc->arpcom.ac_if,
2020 			  "Invalid Feature Configuration Information "
2021 			  "NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
2022 			  BCE_CRC32_RESIDUAL, csum);
2023 		rc = ENODEV;
2024 	}
2025 	return rc;
2026 }
2027 
2028 
2029 /****************************************************************************/
2030 /* Free any DMA memory owned by the driver.                                 */
2031 /*                                                                          */
2032 /* Scans through each data structre that requires DMA memory and frees      */
2033 /* the memory if allocated.                                                 */
2034 /*                                                                          */
2035 /* Returns:                                                                 */
2036 /*   Nothing.                                                               */
2037 /****************************************************************************/
2038 static void
2039 bce_dma_free(struct bce_softc *sc)
2040 {
2041 	int i;
2042 
2043 	/* Destroy the status block. */
2044 	if (sc->status_tag != NULL) {
2045 		if (sc->status_block != NULL) {
2046 			bus_dmamap_unload(sc->status_tag, sc->status_map);
2047 			bus_dmamem_free(sc->status_tag, sc->status_block,
2048 					sc->status_map);
2049 		}
2050 		bus_dma_tag_destroy(sc->status_tag);
2051 	}
2052 
2053 
2054 	/* Destroy the statistics block. */
2055 	if (sc->stats_tag != NULL) {
2056 		if (sc->stats_block != NULL) {
2057 			bus_dmamap_unload(sc->stats_tag, sc->stats_map);
2058 			bus_dmamem_free(sc->stats_tag, sc->stats_block,
2059 					sc->stats_map);
2060 		}
2061 		bus_dma_tag_destroy(sc->stats_tag);
2062 	}
2063 
2064 	/* Destroy the TX buffer descriptor DMA stuffs. */
2065 	if (sc->tx_bd_chain_tag != NULL) {
2066 		for (i = 0; i < TX_PAGES; i++) {
2067 			if (sc->tx_bd_chain[i] != NULL) {
2068 				bus_dmamap_unload(sc->tx_bd_chain_tag,
2069 						  sc->tx_bd_chain_map[i]);
2070 				bus_dmamem_free(sc->tx_bd_chain_tag,
2071 						sc->tx_bd_chain[i],
2072 						sc->tx_bd_chain_map[i]);
2073 			}
2074 		}
2075 		bus_dma_tag_destroy(sc->tx_bd_chain_tag);
2076 	}
2077 
2078 	/* Destroy the RX buffer descriptor DMA stuffs. */
2079 	if (sc->rx_bd_chain_tag != NULL) {
2080 		for (i = 0; i < RX_PAGES; i++) {
2081 			if (sc->rx_bd_chain[i] != NULL) {
2082 				bus_dmamap_unload(sc->rx_bd_chain_tag,
2083 						  sc->rx_bd_chain_map[i]);
2084 				bus_dmamem_free(sc->rx_bd_chain_tag,
2085 						sc->rx_bd_chain[i],
2086 						sc->rx_bd_chain_map[i]);
2087 			}
2088 		}
2089 		bus_dma_tag_destroy(sc->rx_bd_chain_tag);
2090 	}
2091 
2092 	/* Destroy the TX mbuf DMA stuffs. */
2093 	if (sc->tx_mbuf_tag != NULL) {
2094 		for (i = 0; i < TOTAL_TX_BD; i++) {
2095 			/* Must have been unloaded in bce_stop() */
2096 			KKASSERT(sc->tx_mbuf_ptr[i] == NULL);
2097 			bus_dmamap_destroy(sc->tx_mbuf_tag,
2098 					   sc->tx_mbuf_map[i]);
2099 		}
2100 		bus_dma_tag_destroy(sc->tx_mbuf_tag);
2101 	}
2102 
2103 	/* Destroy the RX mbuf DMA stuffs. */
2104 	if (sc->rx_mbuf_tag != NULL) {
2105 		for (i = 0; i < TOTAL_RX_BD; i++) {
2106 			/* Must have been unloaded in bce_stop() */
2107 			KKASSERT(sc->rx_mbuf_ptr[i] == NULL);
2108 			bus_dmamap_destroy(sc->rx_mbuf_tag,
2109 					   sc->rx_mbuf_map[i]);
2110 		}
2111 		bus_dmamap_destroy(sc->rx_mbuf_tag, sc->rx_mbuf_tmpmap);
2112 		bus_dma_tag_destroy(sc->rx_mbuf_tag);
2113 	}
2114 
2115 	/* Destroy the parent tag */
2116 	if (sc->parent_tag != NULL)
2117 		bus_dma_tag_destroy(sc->parent_tag);
2118 }
2119 
2120 
2121 /****************************************************************************/
2122 /* Get DMA memory from the OS.                                              */
2123 /*                                                                          */
2124 /* Validates that the OS has provided DMA buffers in response to a          */
2125 /* bus_dmamap_load() call and saves the physical address of those buffers.  */
2126 /* When the callback is used the OS will return 0 for the mapping function  */
2127 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any  */
2128 /* failures back to the caller.                                             */
2129 /*                                                                          */
2130 /* Returns:                                                                 */
2131 /*   Nothing.                                                               */
2132 /****************************************************************************/
2133 static void
2134 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2135 {
2136 	bus_addr_t *busaddr = arg;
2137 
2138 	/*
2139 	 * Simulate a mapping failure.
2140 	 * XXX not correct.
2141 	 */
2142 	DBRUNIF(DB_RANDOMTRUE(bce_debug_dma_map_addr_failure),
2143 		kprintf("bce: %s(%d): Simulating DMA mapping error.\n",
2144 			__FILE__, __LINE__);
2145 		error = ENOMEM);
2146 
2147 	/* Check for an error and signal the caller that an error occurred. */
2148 	if (error)
2149 		return;
2150 
2151 	KASSERT(nseg == 1, ("only one segment is allowed\n"));
2152 	*busaddr = segs->ds_addr;
2153 }
2154 
2155 
2156 /****************************************************************************/
2157 /* Allocate any DMA memory needed by the driver.                            */
2158 /*                                                                          */
2159 /* Allocates DMA memory needed for the various global structures needed by  */
2160 /* hardware.                                                                */
2161 /*                                                                          */
2162 /* Returns:                                                                 */
2163 /*   0 for success, positive value for failure.                             */
2164 /****************************************************************************/
2165 static int
2166 bce_dma_alloc(struct bce_softc *sc)
2167 {
2168 	struct ifnet *ifp = &sc->arpcom.ac_if;
2169 	int i, j, rc = 0;
2170 	bus_addr_t busaddr;
2171 
2172 	/*
2173 	 * Allocate the parent bus DMA tag appropriate for PCI.
2174 	 */
2175 	rc = bus_dma_tag_create(NULL, 1, BCE_DMA_BOUNDARY,
2176 				sc->max_bus_addr, BUS_SPACE_MAXADDR,
2177 				NULL, NULL,
2178 				BUS_SPACE_MAXSIZE_32BIT, 0,
2179 				BUS_SPACE_MAXSIZE_32BIT,
2180 				0, &sc->parent_tag);
2181 	if (rc != 0) {
2182 		if_printf(ifp, "Could not allocate parent DMA tag!\n");
2183 		return rc;
2184 	}
2185 
2186 	/*
2187 	 * Allocate status block.
2188 	 */
2189 	sc->status_block = bus_dmamem_coherent_any(sc->parent_tag,
2190 				BCE_DMA_ALIGN, BCE_STATUS_BLK_SZ,
2191 				BUS_DMA_WAITOK | BUS_DMA_ZERO,
2192 				&sc->status_tag, &sc->status_map,
2193 				&sc->status_block_paddr);
2194 	if (sc->status_block == NULL) {
2195 		if_printf(ifp, "Could not allocate status block!\n");
2196 		return ENOMEM;
2197 	}
2198 
2199 	/*
2200 	 * Allocate statistics block.
2201 	 */
2202 	sc->stats_block = bus_dmamem_coherent_any(sc->parent_tag,
2203 				BCE_DMA_ALIGN, BCE_STATS_BLK_SZ,
2204 				BUS_DMA_WAITOK | BUS_DMA_ZERO,
2205 				&sc->stats_tag, &sc->stats_map,
2206 				&sc->stats_block_paddr);
2207 	if (sc->stats_block == NULL) {
2208 		if_printf(ifp, "Could not allocate statistics block!\n");
2209 		return ENOMEM;
2210 	}
2211 
2212 	/*
2213 	 * Create a DMA tag for the TX buffer descriptor chain,
2214 	 * allocate and clear the  memory, and fetch the
2215 	 * physical address of the block.
2216 	 */
2217 	rc = bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 0,
2218 				BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
2219 				NULL, NULL,
2220 				BCE_TX_CHAIN_PAGE_SZ, 1, BCE_TX_CHAIN_PAGE_SZ,
2221 				0, &sc->tx_bd_chain_tag);
2222 	if (rc != 0) {
2223 		if_printf(ifp, "Could not allocate "
2224 			  "TX descriptor chain DMA tag!\n");
2225 		return rc;
2226 	}
2227 
2228 	for (i = 0; i < TX_PAGES; i++) {
2229 		rc = bus_dmamem_alloc(sc->tx_bd_chain_tag,
2230 				      (void **)&sc->tx_bd_chain[i],
2231 				      BUS_DMA_WAITOK | BUS_DMA_ZERO |
2232 				      BUS_DMA_COHERENT,
2233 				      &sc->tx_bd_chain_map[i]);
2234 		if (rc != 0) {
2235 			if_printf(ifp, "Could not allocate %dth TX descriptor "
2236 				  "chain DMA memory!\n", i);
2237 			return rc;
2238 		}
2239 
2240 		rc = bus_dmamap_load(sc->tx_bd_chain_tag,
2241 				     sc->tx_bd_chain_map[i],
2242 				     sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ,
2243 				     bce_dma_map_addr, &busaddr,
2244 				     BUS_DMA_WAITOK);
2245 		if (rc != 0) {
2246 			if (rc == EINPROGRESS) {
2247 				panic("%s coherent memory loading "
2248 				      "is still in progress!", ifp->if_xname);
2249 			}
2250 			if_printf(ifp, "Could not map %dth TX descriptor "
2251 				  "chain DMA memory!\n", i);
2252 			bus_dmamem_free(sc->tx_bd_chain_tag,
2253 					sc->tx_bd_chain[i],
2254 					sc->tx_bd_chain_map[i]);
2255 			sc->tx_bd_chain[i] = NULL;
2256 			return rc;
2257 		}
2258 
2259 		sc->tx_bd_chain_paddr[i] = busaddr;
2260 		/* DRC - Fix for 64 bit systems. */
2261 		DBPRINT(sc, BCE_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n",
2262 			i, (uint32_t)sc->tx_bd_chain_paddr[i]);
2263 	}
2264 
2265 	/* Create a DMA tag for TX mbufs. */
2266 	rc = bus_dma_tag_create(sc->parent_tag, 1, 0,
2267 				BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
2268 				NULL, NULL,
2269 				/* BCE_MAX_JUMBO_ETHER_MTU_VLAN */MCLBYTES,
2270 				BCE_MAX_SEGMENTS, MCLBYTES,
2271 				BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK |
2272 				BUS_DMA_ONEBPAGE,
2273 				&sc->tx_mbuf_tag);
2274 	if (rc != 0) {
2275 		if_printf(ifp, "Could not allocate TX mbuf DMA tag!\n");
2276 		return rc;
2277 	}
2278 
2279 	/* Create DMA maps for the TX mbufs clusters. */
2280 	for (i = 0; i < TOTAL_TX_BD; i++) {
2281 		rc = bus_dmamap_create(sc->tx_mbuf_tag,
2282 				       BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
2283 				       &sc->tx_mbuf_map[i]);
2284 		if (rc != 0) {
2285 			for (j = 0; j < i; ++j) {
2286 				bus_dmamap_destroy(sc->tx_mbuf_tag,
2287 						   sc->tx_mbuf_map[i]);
2288 			}
2289 			bus_dma_tag_destroy(sc->tx_mbuf_tag);
2290 			sc->tx_mbuf_tag = NULL;
2291 
2292 			if_printf(ifp, "Unable to create "
2293 				  "%dth TX mbuf DMA map!\n", i);
2294 			return rc;
2295 		}
2296 	}
2297 
2298 	/*
2299 	 * Create a DMA tag for the RX buffer descriptor chain,
2300 	 * allocate and clear the  memory, and fetch the physical
2301 	 * address of the blocks.
2302 	 */
2303 	rc = bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 0,
2304 				BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
2305 				NULL, NULL,
2306 				BCE_RX_CHAIN_PAGE_SZ, 1, BCE_RX_CHAIN_PAGE_SZ,
2307 				0, &sc->rx_bd_chain_tag);
2308 	if (rc != 0) {
2309 		if_printf(ifp, "Could not allocate "
2310 			  "RX descriptor chain DMA tag!\n");
2311 		return rc;
2312 	}
2313 
2314 	for (i = 0; i < RX_PAGES; i++) {
2315 		rc = bus_dmamem_alloc(sc->rx_bd_chain_tag,
2316 				      (void **)&sc->rx_bd_chain[i],
2317 				      BUS_DMA_WAITOK | BUS_DMA_ZERO |
2318 				      BUS_DMA_COHERENT,
2319 				      &sc->rx_bd_chain_map[i]);
2320 		if (rc != 0) {
2321 			if_printf(ifp, "Could not allocate %dth RX descriptor "
2322 				  "chain DMA memory!\n", i);
2323 			return rc;
2324 		}
2325 
2326 		rc = bus_dmamap_load(sc->rx_bd_chain_tag,
2327 				     sc->rx_bd_chain_map[i],
2328 				     sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ,
2329 				     bce_dma_map_addr, &busaddr,
2330 				     BUS_DMA_WAITOK);
2331 		if (rc != 0) {
2332 			if (rc == EINPROGRESS) {
2333 				panic("%s coherent memory loading "
2334 				      "is still in progress!", ifp->if_xname);
2335 			}
2336 			if_printf(ifp, "Could not map %dth RX descriptor "
2337 				  "chain DMA memory!\n", i);
2338 			bus_dmamem_free(sc->rx_bd_chain_tag,
2339 					sc->rx_bd_chain[i],
2340 					sc->rx_bd_chain_map[i]);
2341 			sc->rx_bd_chain[i] = NULL;
2342 			return rc;
2343 		}
2344 
2345 		sc->rx_bd_chain_paddr[i] = busaddr;
2346 		/* DRC - Fix for 64 bit systems. */
2347 		DBPRINT(sc, BCE_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n",
2348 			i, (uint32_t)sc->rx_bd_chain_paddr[i]);
2349 	}
2350 
2351 	/* Create a DMA tag for RX mbufs. */
2352 	rc = bus_dma_tag_create(sc->parent_tag, 1, 0,
2353 				BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
2354 				NULL, NULL,
2355 				MCLBYTES, 1, MCLBYTES,
2356 				BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,
2357 				&sc->rx_mbuf_tag);
2358 	if (rc != 0) {
2359 		if_printf(ifp, "Could not allocate RX mbuf DMA tag!\n");
2360 		return rc;
2361 	}
2362 
2363 	/* Create tmp DMA map for RX mbuf clusters. */
2364 	rc = bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_WAITOK,
2365 			       &sc->rx_mbuf_tmpmap);
2366 	if (rc != 0) {
2367 		bus_dma_tag_destroy(sc->rx_mbuf_tag);
2368 		sc->rx_mbuf_tag = NULL;
2369 
2370 		if_printf(ifp, "Could not create RX mbuf tmp DMA map!\n");
2371 		return rc;
2372 	}
2373 
2374 	/* Create DMA maps for the RX mbuf clusters. */
2375 	for (i = 0; i < TOTAL_RX_BD; i++) {
2376 		rc = bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_WAITOK,
2377 				       &sc->rx_mbuf_map[i]);
2378 		if (rc != 0) {
2379 			for (j = 0; j < i; ++j) {
2380 				bus_dmamap_destroy(sc->rx_mbuf_tag,
2381 						   sc->rx_mbuf_map[j]);
2382 			}
2383 			bus_dma_tag_destroy(sc->rx_mbuf_tag);
2384 			sc->rx_mbuf_tag = NULL;
2385 
2386 			if_printf(ifp, "Unable to create "
2387 				  "%dth RX mbuf DMA map!\n", i);
2388 			return rc;
2389 		}
2390 	}
2391 	return 0;
2392 }
2393 
2394 
2395 /****************************************************************************/
2396 /* Firmware synchronization.                                                */
2397 /*                                                                          */
2398 /* Before performing certain events such as a chip reset, synchronize with  */
2399 /* the firmware first.                                                      */
2400 /*                                                                          */
2401 /* Returns:                                                                 */
2402 /*   0 for success, positive value for failure.                             */
2403 /****************************************************************************/
2404 static int
2405 bce_fw_sync(struct bce_softc *sc, uint32_t msg_data)
2406 {
2407 	int i, rc = 0;
2408 	uint32_t val;
2409 
2410 	/* Don't waste any time if we've timed out before. */
2411 	if (sc->bce_fw_timed_out)
2412 		return EBUSY;
2413 
2414 	/* Increment the message sequence number. */
2415 	sc->bce_fw_wr_seq++;
2416 	msg_data |= sc->bce_fw_wr_seq;
2417 
2418  	DBPRINT(sc, BCE_VERBOSE, "bce_fw_sync(): msg_data = 0x%08X\n", msg_data);
2419 
2420 	/* Send the message to the bootcode driver mailbox. */
2421 	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
2422 
2423 	/* Wait for the bootcode to acknowledge the message. */
2424 	for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2425 		/* Check for a response in the bootcode firmware mailbox. */
2426 		val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_FW_MB);
2427 		if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ))
2428 			break;
2429 		DELAY(1000);
2430 	}
2431 
2432 	/* If we've timed out, tell the bootcode that we've stopped waiting. */
2433 	if ((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ) &&
2434 	    (msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0) {
2435 		if_printf(&sc->arpcom.ac_if,
2436 			  "Firmware synchronization timeout! "
2437 			  "msg_data = 0x%08X\n", msg_data);
2438 
2439 		msg_data &= ~BCE_DRV_MSG_CODE;
2440 		msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT;
2441 
2442 		REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
2443 
2444 		sc->bce_fw_timed_out = 1;
2445 		rc = EBUSY;
2446 	}
2447 	return rc;
2448 }
2449 
2450 
2451 /****************************************************************************/
2452 /* Load Receive Virtual 2 Physical (RV2P) processor firmware.               */
2453 /*                                                                          */
2454 /* Returns:                                                                 */
2455 /*   Nothing.                                                               */
2456 /****************************************************************************/
2457 static void
2458 bce_load_rv2p_fw(struct bce_softc *sc, uint32_t *rv2p_code,
2459 		 uint32_t rv2p_code_len, uint32_t rv2p_proc)
2460 {
2461 	int i;
2462 	uint32_t val;
2463 
2464 	for (i = 0; i < rv2p_code_len; i += 8) {
2465 		REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code);
2466 		rv2p_code++;
2467 		REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code);
2468 		rv2p_code++;
2469 
2470 		if (rv2p_proc == RV2P_PROC1) {
2471 			val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR;
2472 			REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
2473 		} else {
2474 			val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR;
2475 			REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
2476 		}
2477 	}
2478 
2479 	/* Reset the processor, un-stall is done later. */
2480 	if (rv2p_proc == RV2P_PROC1)
2481 		REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET);
2482 	else
2483 		REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET);
2484 }
2485 
2486 
2487 /****************************************************************************/
2488 /* Load RISC processor firmware.                                            */
2489 /*                                                                          */
2490 /* Loads firmware from the file if_bcefw.h into the scratchpad memory       */
2491 /* associated with a particular processor.                                  */
2492 /*                                                                          */
2493 /* Returns:                                                                 */
2494 /*   Nothing.                                                               */
2495 /****************************************************************************/
2496 static void
2497 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg,
2498 		struct fw_info *fw)
2499 {
2500 	uint32_t offset, val;
2501 	int j;
2502 
2503 	/* Halt the CPU. */
2504 	val = REG_RD_IND(sc, cpu_reg->mode);
2505 	val |= cpu_reg->mode_value_halt;
2506 	REG_WR_IND(sc, cpu_reg->mode, val);
2507 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2508 
2509 	/* Load the Text area. */
2510 	offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2511 	if (fw->text) {
2512 		for (j = 0; j < (fw->text_len / 4); j++, offset += 4)
2513 			REG_WR_IND(sc, offset, fw->text[j]);
2514 	}
2515 
2516 	/* Load the Data area. */
2517 	offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2518 	if (fw->data) {
2519 		for (j = 0; j < (fw->data_len / 4); j++, offset += 4)
2520 			REG_WR_IND(sc, offset, fw->data[j]);
2521 	}
2522 
2523 	/* Load the SBSS area. */
2524 	offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2525 	if (fw->sbss) {
2526 		for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4)
2527 			REG_WR_IND(sc, offset, fw->sbss[j]);
2528 	}
2529 
2530 	/* Load the BSS area. */
2531 	offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2532 	if (fw->bss) {
2533 		for (j = 0; j < (fw->bss_len/4); j++, offset += 4)
2534 			REG_WR_IND(sc, offset, fw->bss[j]);
2535 	}
2536 
2537 	/* Load the Read-Only area. */
2538 	offset = cpu_reg->spad_base +
2539 		(fw->rodata_addr - cpu_reg->mips_view_base);
2540 	if (fw->rodata) {
2541 		for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4)
2542 			REG_WR_IND(sc, offset, fw->rodata[j]);
2543 	}
2544 
2545 	/* Clear the pre-fetch instruction. */
2546 	REG_WR_IND(sc, cpu_reg->inst, 0);
2547 	REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2548 
2549 	/* Start the CPU. */
2550 	val = REG_RD_IND(sc, cpu_reg->mode);
2551 	val &= ~cpu_reg->mode_value_halt;
2552 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2553 	REG_WR_IND(sc, cpu_reg->mode, val);
2554 }
2555 
2556 
2557 /****************************************************************************/
2558 /* Initialize the RV2P, RX, TX, TPAT, and COM CPUs.                         */
2559 /*                                                                          */
2560 /* Loads the firmware for each CPU and starts the CPU.                      */
2561 /*                                                                          */
2562 /* Returns:                                                                 */
2563 /*   Nothing.                                                               */
2564 /****************************************************************************/
2565 static void
2566 bce_init_cpus(struct bce_softc *sc)
2567 {
2568 	struct cpu_reg cpu_reg;
2569 	struct fw_info fw;
2570 
2571 	/* Initialize the RV2P processor. */
2572 	bce_load_rv2p_fw(sc, bce_rv2p_proc1, sizeof(bce_rv2p_proc1), RV2P_PROC1);
2573 	bce_load_rv2p_fw(sc, bce_rv2p_proc2, sizeof(bce_rv2p_proc2), RV2P_PROC2);
2574 
2575 	/* Initialize the RX Processor. */
2576 	cpu_reg.mode = BCE_RXP_CPU_MODE;
2577 	cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
2578 	cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
2579 	cpu_reg.state = BCE_RXP_CPU_STATE;
2580 	cpu_reg.state_value_clear = 0xffffff;
2581 	cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
2582 	cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
2583 	cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
2584 	cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
2585 	cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
2586 	cpu_reg.spad_base = BCE_RXP_SCRATCH;
2587 	cpu_reg.mips_view_base = 0x8000000;
2588 
2589 	fw.ver_major = bce_RXP_b06FwReleaseMajor;
2590 	fw.ver_minor = bce_RXP_b06FwReleaseMinor;
2591 	fw.ver_fix = bce_RXP_b06FwReleaseFix;
2592 	fw.start_addr = bce_RXP_b06FwStartAddr;
2593 
2594 	fw.text_addr = bce_RXP_b06FwTextAddr;
2595 	fw.text_len = bce_RXP_b06FwTextLen;
2596 	fw.text_index = 0;
2597 	fw.text = bce_RXP_b06FwText;
2598 
2599 	fw.data_addr = bce_RXP_b06FwDataAddr;
2600 	fw.data_len = bce_RXP_b06FwDataLen;
2601 	fw.data_index = 0;
2602 	fw.data = bce_RXP_b06FwData;
2603 
2604 	fw.sbss_addr = bce_RXP_b06FwSbssAddr;
2605 	fw.sbss_len = bce_RXP_b06FwSbssLen;
2606 	fw.sbss_index = 0;
2607 	fw.sbss = bce_RXP_b06FwSbss;
2608 
2609 	fw.bss_addr = bce_RXP_b06FwBssAddr;
2610 	fw.bss_len = bce_RXP_b06FwBssLen;
2611 	fw.bss_index = 0;
2612 	fw.bss = bce_RXP_b06FwBss;
2613 
2614 	fw.rodata_addr = bce_RXP_b06FwRodataAddr;
2615 	fw.rodata_len = bce_RXP_b06FwRodataLen;
2616 	fw.rodata_index = 0;
2617 	fw.rodata = bce_RXP_b06FwRodata;
2618 
2619 	DBPRINT(sc, BCE_INFO_RESET, "Loading RX firmware.\n");
2620 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
2621 
2622 	/* Initialize the TX Processor. */
2623 	cpu_reg.mode = BCE_TXP_CPU_MODE;
2624 	cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT;
2625 	cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA;
2626 	cpu_reg.state = BCE_TXP_CPU_STATE;
2627 	cpu_reg.state_value_clear = 0xffffff;
2628 	cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE;
2629 	cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK;
2630 	cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER;
2631 	cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION;
2632 	cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT;
2633 	cpu_reg.spad_base = BCE_TXP_SCRATCH;
2634 	cpu_reg.mips_view_base = 0x8000000;
2635 
2636 	fw.ver_major = bce_TXP_b06FwReleaseMajor;
2637 	fw.ver_minor = bce_TXP_b06FwReleaseMinor;
2638 	fw.ver_fix = bce_TXP_b06FwReleaseFix;
2639 	fw.start_addr = bce_TXP_b06FwStartAddr;
2640 
2641 	fw.text_addr = bce_TXP_b06FwTextAddr;
2642 	fw.text_len = bce_TXP_b06FwTextLen;
2643 	fw.text_index = 0;
2644 	fw.text = bce_TXP_b06FwText;
2645 
2646 	fw.data_addr = bce_TXP_b06FwDataAddr;
2647 	fw.data_len = bce_TXP_b06FwDataLen;
2648 	fw.data_index = 0;
2649 	fw.data = bce_TXP_b06FwData;
2650 
2651 	fw.sbss_addr = bce_TXP_b06FwSbssAddr;
2652 	fw.sbss_len = bce_TXP_b06FwSbssLen;
2653 	fw.sbss_index = 0;
2654 	fw.sbss = bce_TXP_b06FwSbss;
2655 
2656 	fw.bss_addr = bce_TXP_b06FwBssAddr;
2657 	fw.bss_len = bce_TXP_b06FwBssLen;
2658 	fw.bss_index = 0;
2659 	fw.bss = bce_TXP_b06FwBss;
2660 
2661 	fw.rodata_addr = bce_TXP_b06FwRodataAddr;
2662 	fw.rodata_len = bce_TXP_b06FwRodataLen;
2663 	fw.rodata_index = 0;
2664 	fw.rodata = bce_TXP_b06FwRodata;
2665 
2666 	DBPRINT(sc, BCE_INFO_RESET, "Loading TX firmware.\n");
2667 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
2668 
2669 	/* Initialize the TX Patch-up Processor. */
2670 	cpu_reg.mode = BCE_TPAT_CPU_MODE;
2671 	cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT;
2672 	cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA;
2673 	cpu_reg.state = BCE_TPAT_CPU_STATE;
2674 	cpu_reg.state_value_clear = 0xffffff;
2675 	cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE;
2676 	cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK;
2677 	cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER;
2678 	cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION;
2679 	cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT;
2680 	cpu_reg.spad_base = BCE_TPAT_SCRATCH;
2681 	cpu_reg.mips_view_base = 0x8000000;
2682 
2683 	fw.ver_major = bce_TPAT_b06FwReleaseMajor;
2684 	fw.ver_minor = bce_TPAT_b06FwReleaseMinor;
2685 	fw.ver_fix = bce_TPAT_b06FwReleaseFix;
2686 	fw.start_addr = bce_TPAT_b06FwStartAddr;
2687 
2688 	fw.text_addr = bce_TPAT_b06FwTextAddr;
2689 	fw.text_len = bce_TPAT_b06FwTextLen;
2690 	fw.text_index = 0;
2691 	fw.text = bce_TPAT_b06FwText;
2692 
2693 	fw.data_addr = bce_TPAT_b06FwDataAddr;
2694 	fw.data_len = bce_TPAT_b06FwDataLen;
2695 	fw.data_index = 0;
2696 	fw.data = bce_TPAT_b06FwData;
2697 
2698 	fw.sbss_addr = bce_TPAT_b06FwSbssAddr;
2699 	fw.sbss_len = bce_TPAT_b06FwSbssLen;
2700 	fw.sbss_index = 0;
2701 	fw.sbss = bce_TPAT_b06FwSbss;
2702 
2703 	fw.bss_addr = bce_TPAT_b06FwBssAddr;
2704 	fw.bss_len = bce_TPAT_b06FwBssLen;
2705 	fw.bss_index = 0;
2706 	fw.bss = bce_TPAT_b06FwBss;
2707 
2708 	fw.rodata_addr = bce_TPAT_b06FwRodataAddr;
2709 	fw.rodata_len = bce_TPAT_b06FwRodataLen;
2710 	fw.rodata_index = 0;
2711 	fw.rodata = bce_TPAT_b06FwRodata;
2712 
2713 	DBPRINT(sc, BCE_INFO_RESET, "Loading TPAT firmware.\n");
2714 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
2715 
2716 	/* Initialize the Completion Processor. */
2717 	cpu_reg.mode = BCE_COM_CPU_MODE;
2718 	cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT;
2719 	cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA;
2720 	cpu_reg.state = BCE_COM_CPU_STATE;
2721 	cpu_reg.state_value_clear = 0xffffff;
2722 	cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE;
2723 	cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK;
2724 	cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER;
2725 	cpu_reg.inst = BCE_COM_CPU_INSTRUCTION;
2726 	cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT;
2727 	cpu_reg.spad_base = BCE_COM_SCRATCH;
2728 	cpu_reg.mips_view_base = 0x8000000;
2729 
2730 	fw.ver_major = bce_COM_b06FwReleaseMajor;
2731 	fw.ver_minor = bce_COM_b06FwReleaseMinor;
2732 	fw.ver_fix = bce_COM_b06FwReleaseFix;
2733 	fw.start_addr = bce_COM_b06FwStartAddr;
2734 
2735 	fw.text_addr = bce_COM_b06FwTextAddr;
2736 	fw.text_len = bce_COM_b06FwTextLen;
2737 	fw.text_index = 0;
2738 	fw.text = bce_COM_b06FwText;
2739 
2740 	fw.data_addr = bce_COM_b06FwDataAddr;
2741 	fw.data_len = bce_COM_b06FwDataLen;
2742 	fw.data_index = 0;
2743 	fw.data = bce_COM_b06FwData;
2744 
2745 	fw.sbss_addr = bce_COM_b06FwSbssAddr;
2746 	fw.sbss_len = bce_COM_b06FwSbssLen;
2747 	fw.sbss_index = 0;
2748 	fw.sbss = bce_COM_b06FwSbss;
2749 
2750 	fw.bss_addr = bce_COM_b06FwBssAddr;
2751 	fw.bss_len = bce_COM_b06FwBssLen;
2752 	fw.bss_index = 0;
2753 	fw.bss = bce_COM_b06FwBss;
2754 
2755 	fw.rodata_addr = bce_COM_b06FwRodataAddr;
2756 	fw.rodata_len = bce_COM_b06FwRodataLen;
2757 	fw.rodata_index = 0;
2758 	fw.rodata = bce_COM_b06FwRodata;
2759 
2760 	DBPRINT(sc, BCE_INFO_RESET, "Loading COM firmware.\n");
2761 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
2762 }
2763 
2764 
2765 /****************************************************************************/
2766 /* Initialize context memory.                                               */
2767 /*                                                                          */
2768 /* Clears the memory associated with each Context ID (CID).                 */
2769 /*                                                                          */
2770 /* Returns:                                                                 */
2771 /*   Nothing.                                                               */
2772 /****************************************************************************/
2773 static void
2774 bce_init_ctx(struct bce_softc *sc)
2775 {
2776 	uint32_t vcid = 96;
2777 
2778 	while (vcid) {
2779 		uint32_t vcid_addr, pcid_addr, offset;
2780 		int i;
2781 
2782 		vcid--;
2783 
2784    		vcid_addr = GET_CID_ADDR(vcid);
2785 		pcid_addr = vcid_addr;
2786 
2787 		for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2788 			vcid_addr += (i << PHY_CTX_SHIFT);
2789 			pcid_addr += (i << PHY_CTX_SHIFT);
2790 
2791 			REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr);
2792 			REG_WR(sc, BCE_CTX_PAGE_TBL, pcid_addr);
2793 
2794 			/* Zero out the context. */
2795 			for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2796 				CTX_WR(sc, vcid_addr, offset, 0);
2797 		}
2798 	}
2799 }
2800 
2801 
2802 /****************************************************************************/
2803 /* Fetch the permanent MAC address of the controller.                       */
2804 /*                                                                          */
2805 /* Returns:                                                                 */
2806 /*   Nothing.                                                               */
2807 /****************************************************************************/
2808 static void
2809 bce_get_mac_addr(struct bce_softc *sc)
2810 {
2811 	uint32_t mac_lo = 0, mac_hi = 0;
2812 
2813 	/*
2814 	 * The NetXtreme II bootcode populates various NIC
2815 	 * power-on and runtime configuration items in a
2816 	 * shared memory area.  The factory configured MAC
2817 	 * address is available from both NVRAM and the
2818 	 * shared memory area so we'll read the value from
2819 	 * shared memory for speed.
2820 	 */
2821 
2822 	mac_hi = REG_RD_IND(sc, sc->bce_shmem_base + BCE_PORT_HW_CFG_MAC_UPPER);
2823 	mac_lo = REG_RD_IND(sc, sc->bce_shmem_base + BCE_PORT_HW_CFG_MAC_LOWER);
2824 
2825 	if (mac_lo == 0 && mac_hi == 0) {
2826 		if_printf(&sc->arpcom.ac_if, "Invalid Ethernet address!\n");
2827 	} else {
2828 		sc->eaddr[0] = (u_char)(mac_hi >> 8);
2829 		sc->eaddr[1] = (u_char)(mac_hi >> 0);
2830 		sc->eaddr[2] = (u_char)(mac_lo >> 24);
2831 		sc->eaddr[3] = (u_char)(mac_lo >> 16);
2832 		sc->eaddr[4] = (u_char)(mac_lo >> 8);
2833 		sc->eaddr[5] = (u_char)(mac_lo >> 0);
2834 	}
2835 
2836 	DBPRINT(sc, BCE_INFO, "Permanent Ethernet address = %6D\n", sc->eaddr, ":");
2837 }
2838 
2839 
2840 /****************************************************************************/
2841 /* Program the MAC address.                                                 */
2842 /*                                                                          */
2843 /* Returns:                                                                 */
2844 /*   Nothing.                                                               */
2845 /****************************************************************************/
2846 static void
2847 bce_set_mac_addr(struct bce_softc *sc)
2848 {
2849 	const uint8_t *mac_addr = sc->eaddr;
2850 	uint32_t val;
2851 
2852 	DBPRINT(sc, BCE_INFO, "Setting Ethernet address = %6D\n",
2853 		sc->eaddr, ":");
2854 
2855 	val = (mac_addr[0] << 8) | mac_addr[1];
2856 	REG_WR(sc, BCE_EMAC_MAC_MATCH0, val);
2857 
2858 	val = (mac_addr[2] << 24) |
2859 	      (mac_addr[3] << 16) |
2860 	      (mac_addr[4] << 8) |
2861 	      mac_addr[5];
2862 	REG_WR(sc, BCE_EMAC_MAC_MATCH1, val);
2863 }
2864 
2865 
2866 /****************************************************************************/
2867 /* Stop the controller.                                                     */
2868 /*                                                                          */
2869 /* Returns:                                                                 */
2870 /*   Nothing.                                                               */
2871 /****************************************************************************/
2872 static void
2873 bce_stop(struct bce_softc *sc)
2874 {
2875 	struct ifnet *ifp = &sc->arpcom.ac_if;
2876 	struct mii_data *mii = device_get_softc(sc->bce_miibus);
2877 	struct ifmedia_entry *ifm;
2878 	int mtmp, itmp;
2879 
2880 	ASSERT_SERIALIZED(ifp->if_serializer);
2881 
2882 	callout_stop(&sc->bce_stat_ch);
2883 
2884 	/* Disable the transmit/receive blocks. */
2885 	REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, 0x5ffffff);
2886 	REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
2887 	DELAY(20);
2888 
2889 	bce_disable_intr(sc);
2890 
2891 	/* Tell firmware that the driver is going away. */
2892 	bce_reset(sc, BCE_DRV_MSG_CODE_SUSPEND_NO_WOL);
2893 
2894 	/* Free the RX lists. */
2895 	bce_free_rx_chain(sc);
2896 
2897 	/* Free TX buffers. */
2898 	bce_free_tx_chain(sc);
2899 
2900 	/*
2901 	 * Isolate/power down the PHY, but leave the media selection
2902 	 * unchanged so that things will be put back to normal when
2903 	 * we bring the interface back up.
2904 	 *
2905 	 * 'mii' may be NULL if bce_stop() is called by bce_detach().
2906 	 */
2907 	if (mii != NULL) {
2908 		itmp = ifp->if_flags;
2909 		ifp->if_flags |= IFF_UP;
2910 		ifm = mii->mii_media.ifm_cur;
2911 		mtmp = ifm->ifm_media;
2912 		ifm->ifm_media = IFM_ETHER | IFM_NONE;
2913 		mii_mediachg(mii);
2914 		ifm->ifm_media = mtmp;
2915 		ifp->if_flags = itmp;
2916 	}
2917 
2918 	sc->bce_link = 0;
2919 	sc->bce_coalchg_mask = 0;
2920 
2921 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2922 	ifp->if_timer = 0;
2923 
2924 	bce_mgmt_init(sc);
2925 }
2926 
2927 
2928 static int
2929 bce_reset(struct bce_softc *sc, uint32_t reset_code)
2930 {
2931 	uint32_t val;
2932 	int i, rc = 0;
2933 
2934 	/* Wait for pending PCI transactions to complete. */
2935 	REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS,
2936 	       BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
2937 	       BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
2938 	       BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
2939 	       BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
2940 	val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
2941 	DELAY(5);
2942 
2943 	/* Assume bootcode is running. */
2944 	sc->bce_fw_timed_out = 0;
2945 
2946 	/* Give the firmware a chance to prepare for the reset. */
2947 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code);
2948 	if (rc) {
2949 		if_printf(&sc->arpcom.ac_if,
2950 			  "Firmware is not ready for reset\n");
2951 		return rc;
2952 	}
2953 
2954 	/* Set a firmware reminder that this is a soft reset. */
2955 	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_RESET_SIGNATURE,
2956 		   BCE_DRV_RESET_SIGNATURE_MAGIC);
2957 
2958 	/* Dummy read to force the chip to complete all current transactions. */
2959 	val = REG_RD(sc, BCE_MISC_ID);
2960 
2961 	/* Chip reset. */
2962 	val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
2963 	      BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
2964 	      BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
2965 	REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val);
2966 
2967 	/* Allow up to 30us for reset to complete. */
2968 	for (i = 0; i < 10; i++) {
2969 		val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG);
2970 		if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
2971 			    BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
2972 			break;
2973 		}
2974 		DELAY(10);
2975 	}
2976 
2977 	/* Check that reset completed successfully. */
2978 	if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
2979 		   BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
2980 		if_printf(&sc->arpcom.ac_if, "Reset failed!\n");
2981 		return EBUSY;
2982 	}
2983 
2984 	/* Make sure byte swapping is properly configured. */
2985 	val = REG_RD(sc, BCE_PCI_SWAP_DIAG0);
2986 	if (val != 0x01020304) {
2987 		if_printf(&sc->arpcom.ac_if, "Byte swap is incorrect!\n");
2988 		return ENODEV;
2989 	}
2990 
2991 	/* Just completed a reset, assume that firmware is running again. */
2992 	sc->bce_fw_timed_out = 0;
2993 
2994 	/* Wait for the firmware to finish its initialization. */
2995 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code);
2996 	if (rc) {
2997 		if_printf(&sc->arpcom.ac_if,
2998 			  "Firmware did not complete initialization!\n");
2999 	}
3000 	return rc;
3001 }
3002 
3003 
3004 static int
3005 bce_chipinit(struct bce_softc *sc)
3006 {
3007 	uint32_t val;
3008 	int rc = 0;
3009 
3010 	/* Make sure the interrupt is not active. */
3011 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
3012 
3013 	/*
3014 	 * Initialize DMA byte/word swapping, configure the number of DMA
3015 	 * channels and PCI clock compensation delay.
3016 	 */
3017 	val = BCE_DMA_CONFIG_DATA_BYTE_SWAP |
3018 	      BCE_DMA_CONFIG_DATA_WORD_SWAP |
3019 #if BYTE_ORDER == BIG_ENDIAN
3020 	      BCE_DMA_CONFIG_CNTL_BYTE_SWAP |
3021 #endif
3022 	      BCE_DMA_CONFIG_CNTL_WORD_SWAP |
3023 	      DMA_READ_CHANS << 12 |
3024 	      DMA_WRITE_CHANS << 16;
3025 
3026 	val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3027 
3028 	if ((sc->bce_flags & BCE_PCIX_FLAG) && sc->bus_speed_mhz == 133)
3029 		val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP;
3030 
3031 	/*
3032 	 * This setting resolves a problem observed on certain Intel PCI
3033 	 * chipsets that cannot handle multiple outstanding DMA operations.
3034 	 * See errata E9_5706A1_65.
3035 	 */
3036 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706 &&
3037 	    BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0 &&
3038 	    !(sc->bce_flags & BCE_PCIX_FLAG))
3039 		val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA;
3040 
3041 	REG_WR(sc, BCE_DMA_CONFIG, val);
3042 
3043 	/* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */
3044 	if (sc->bce_flags & BCE_PCIX_FLAG) {
3045 		uint16_t cmd;
3046 
3047 		cmd = pci_read_config(sc->bce_dev, BCE_PCI_PCIX_CMD, 2);
3048 		pci_write_config(sc->bce_dev, BCE_PCI_PCIX_CMD, cmd & ~0x2, 2);
3049 	}
3050 
3051 	/* Enable the RX_V2P and Context state machines before access. */
3052 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3053 	       BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3054 	       BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3055 	       BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3056 
3057 	/* Initialize context mapping and zero out the quick contexts. */
3058 	bce_init_ctx(sc);
3059 
3060 	/* Initialize the on-boards CPUs */
3061 	bce_init_cpus(sc);
3062 
3063 	/* Prepare NVRAM for access. */
3064 	rc = bce_init_nvram(sc);
3065 	if (rc != 0)
3066 		return rc;
3067 
3068 	/* Set the kernel bypass block size */
3069 	val = REG_RD(sc, BCE_MQ_CONFIG);
3070 	val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3071 	val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3072 	REG_WR(sc, BCE_MQ_CONFIG, val);
3073 
3074 	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3075 	REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val);
3076 	REG_WR(sc, BCE_MQ_KNL_WIND_END, val);
3077 
3078 	/* Set the page size and clear the RV2P processor stall bits. */
3079 	val = (BCM_PAGE_BITS - 8) << 24;
3080 	REG_WR(sc, BCE_RV2P_CONFIG, val);
3081 
3082 	/* Configure page size. */
3083 	val = REG_RD(sc, BCE_TBDR_CONFIG);
3084 	val &= ~BCE_TBDR_CONFIG_PAGE_SIZE;
3085 	val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3086 	REG_WR(sc, BCE_TBDR_CONFIG, val);
3087 
3088 	return 0;
3089 }
3090 
3091 
3092 /****************************************************************************/
3093 /* Initialize the controller in preparation to send/receive traffic.        */
3094 /*                                                                          */
3095 /* Returns:                                                                 */
3096 /*   0 for success, positive value for failure.                             */
3097 /****************************************************************************/
3098 static int
3099 bce_blockinit(struct bce_softc *sc)
3100 {
3101 	uint32_t reg, val;
3102 	int rc = 0;
3103 
3104 	/* Load the hardware default MAC address. */
3105 	bce_set_mac_addr(sc);
3106 
3107 	/* Set the Ethernet backoff seed value */
3108 	val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) +
3109 	      sc->eaddr[3] + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16);
3110 	REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val);
3111 
3112 	sc->last_status_idx = 0;
3113 	sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE;
3114 
3115 	/* Set up link change interrupt generation. */
3116 	REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK);
3117 
3118 	/* Program the physical address of the status block. */
3119 	REG_WR(sc, BCE_HC_STATUS_ADDR_L, BCE_ADDR_LO(sc->status_block_paddr));
3120 	REG_WR(sc, BCE_HC_STATUS_ADDR_H, BCE_ADDR_HI(sc->status_block_paddr));
3121 
3122 	/* Program the physical address of the statistics block. */
3123 	REG_WR(sc, BCE_HC_STATISTICS_ADDR_L,
3124 	       BCE_ADDR_LO(sc->stats_block_paddr));
3125 	REG_WR(sc, BCE_HC_STATISTICS_ADDR_H,
3126 	       BCE_ADDR_HI(sc->stats_block_paddr));
3127 
3128 	/* Program various host coalescing parameters. */
3129 	REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
3130 	       (sc->bce_tx_quick_cons_trip_int << 16) |
3131 	       sc->bce_tx_quick_cons_trip);
3132 	REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
3133 	       (sc->bce_rx_quick_cons_trip_int << 16) |
3134 	       sc->bce_rx_quick_cons_trip);
3135 	REG_WR(sc, BCE_HC_COMP_PROD_TRIP,
3136 	       (sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip);
3137 	REG_WR(sc, BCE_HC_TX_TICKS,
3138 	       (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
3139 	REG_WR(sc, BCE_HC_RX_TICKS,
3140 	       (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
3141 	REG_WR(sc, BCE_HC_COM_TICKS,
3142 	       (sc->bce_com_ticks_int << 16) | sc->bce_com_ticks);
3143 	REG_WR(sc, BCE_HC_CMD_TICKS,
3144 	       (sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks);
3145 	REG_WR(sc, BCE_HC_STATS_TICKS, (sc->bce_stats_ticks & 0xffff00));
3146 	REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8);	/* 3ms */
3147 	REG_WR(sc, BCE_HC_CONFIG,
3148 	       BCE_HC_CONFIG_TX_TMR_MODE |
3149 	       BCE_HC_CONFIG_COLLECT_STATS);
3150 
3151 	/* Clear the internal statistics counters. */
3152 	REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
3153 
3154 	/* Verify that bootcode is running. */
3155 	reg = REG_RD_IND(sc, sc->bce_shmem_base + BCE_DEV_INFO_SIGNATURE);
3156 
3157 	DBRUNIF(DB_RANDOMTRUE(bce_debug_bootcode_running_failure),
3158 		if_printf(&sc->arpcom.ac_if,
3159 			  "%s(%d): Simulating bootcode failure.\n",
3160 			  __FILE__, __LINE__);
3161 		reg = 0);
3162 
3163 	if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3164 	    BCE_DEV_INFO_SIGNATURE_MAGIC) {
3165 		if_printf(&sc->arpcom.ac_if,
3166 			  "Bootcode not running! Found: 0x%08X, "
3167 			  "Expected: 08%08X\n",
3168 			  reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK,
3169 			  BCE_DEV_INFO_SIGNATURE_MAGIC);
3170 		return ENODEV;
3171 	}
3172 
3173 	/* Check if any management firmware is running. */
3174 	reg = REG_RD_IND(sc, sc->bce_shmem_base + BCE_PORT_FEATURE);
3175 	if (reg & (BCE_PORT_FEATURE_ASF_ENABLED |
3176 		   BCE_PORT_FEATURE_IMD_ENABLED)) {
3177 		DBPRINT(sc, BCE_INFO, "Management F/W Enabled.\n");
3178 		sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
3179 	}
3180 
3181 	sc->bce_fw_ver =
3182 		REG_RD_IND(sc, sc->bce_shmem_base + BCE_DEV_INFO_BC_REV);
3183 	DBPRINT(sc, BCE_INFO, "bootcode rev = 0x%08X\n", sc->bce_fw_ver);
3184 
3185 	/* Allow bootcode to apply any additional fixes before enabling MAC. */
3186 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET);
3187 
3188 	/* Enable link state change interrupt generation. */
3189 	REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3190 
3191 	/* Enable all remaining blocks in the MAC. */
3192 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 0x5ffffff);
3193 	REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
3194 	DELAY(20);
3195 
3196 	return 0;
3197 }
3198 
3199 
3200 /****************************************************************************/
3201 /* Encapsulate an mbuf cluster into the rx_bd chain.                        */
3202 /*                                                                          */
3203 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's.     */
3204 /* This routine will map an mbuf cluster into 1 or more rx_bd's as          */
3205 /* necessary.                                                               */
3206 /*                                                                          */
3207 /* Returns:                                                                 */
3208 /*   0 for success, positive value for failure.                             */
3209 /****************************************************************************/
3210 static int
3211 bce_newbuf_std(struct bce_softc *sc, uint16_t *prod, uint16_t *chain_prod,
3212 	       uint32_t *prod_bseq, int init)
3213 {
3214 	bus_dmamap_t map;
3215 	bus_dma_segment_t seg;
3216 	struct mbuf *m_new;
3217 	int error, nseg;
3218 #ifdef BCE_DEBUG
3219 	uint16_t debug_chain_prod = *chain_prod;
3220 #endif
3221 
3222 	/* Make sure the inputs are valid. */
3223 	DBRUNIF((*chain_prod > MAX_RX_BD),
3224 		if_printf(&sc->arpcom.ac_if, "%s(%d): "
3225 			  "RX producer out of range: 0x%04X > 0x%04X\n",
3226 			  __FILE__, __LINE__,
3227 			  *chain_prod, (uint16_t)MAX_RX_BD));
3228 
3229 	DBPRINT(sc, BCE_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = 0x%04X, "
3230 		"prod_bseq = 0x%08X\n", __func__, *prod, *chain_prod, *prod_bseq);
3231 
3232 	DBRUNIF(DB_RANDOMTRUE(bce_debug_mbuf_allocation_failure),
3233 		if_printf(&sc->arpcom.ac_if, "%s(%d): "
3234 			  "Simulating mbuf allocation failure.\n",
3235 			  __FILE__, __LINE__);
3236 		sc->mbuf_alloc_failed++;
3237 		return ENOBUFS);
3238 
3239 	/* This is a new mbuf allocation. */
3240 	m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
3241 	if (m_new == NULL)
3242 		return ENOBUFS;
3243 	DBRUNIF(1, sc->rx_mbuf_alloc++);
3244 
3245 	m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
3246 
3247 	/* Map the mbuf cluster into device memory. */
3248 	error = bus_dmamap_load_mbuf_segment(sc->rx_mbuf_tag,
3249 			sc->rx_mbuf_tmpmap, m_new, &seg, 1, &nseg,
3250 			BUS_DMA_NOWAIT);
3251 	if (error) {
3252 		m_freem(m_new);
3253 		if (init) {
3254 			if_printf(&sc->arpcom.ac_if,
3255 				  "Error mapping mbuf into RX chain!\n");
3256 		}
3257 		DBRUNIF(1, sc->rx_mbuf_alloc--);
3258 		return error;
3259 	}
3260 
3261 	if (sc->rx_mbuf_ptr[*chain_prod] != NULL) {
3262 		bus_dmamap_unload(sc->rx_mbuf_tag,
3263 				  sc->rx_mbuf_map[*chain_prod]);
3264 	}
3265 
3266 	map = sc->rx_mbuf_map[*chain_prod];
3267 	sc->rx_mbuf_map[*chain_prod] = sc->rx_mbuf_tmpmap;
3268 	sc->rx_mbuf_tmpmap = map;
3269 
3270 	/* Watch for overflow. */
3271 	DBRUNIF((sc->free_rx_bd > USABLE_RX_BD),
3272 		if_printf(&sc->arpcom.ac_if, "%s(%d): "
3273 			  "Too many free rx_bd (0x%04X > 0x%04X)!\n",
3274 			  __FILE__, __LINE__, sc->free_rx_bd,
3275 			  (uint16_t)USABLE_RX_BD));
3276 
3277 	/* Update some debug statistic counters */
3278 	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3279 		sc->rx_low_watermark = sc->free_rx_bd);
3280 	DBRUNIF((sc->free_rx_bd == 0), sc->rx_empty_count++);
3281 
3282 	/* Save the mbuf and update our counter. */
3283 	sc->rx_mbuf_ptr[*chain_prod] = m_new;
3284 	sc->rx_mbuf_paddr[*chain_prod] = seg.ds_addr;
3285 	sc->free_rx_bd--;
3286 
3287 	bce_setup_rxdesc_std(sc, *chain_prod, prod_bseq);
3288 
3289 	DBRUN(BCE_VERBOSE_RECV,
3290 	      bce_dump_rx_mbuf_chain(sc, debug_chain_prod, 1));
3291 
3292 	DBPRINT(sc, BCE_VERBOSE_RECV, "%s(exit): prod = 0x%04X, chain_prod = 0x%04X, "
3293 		"prod_bseq = 0x%08X\n", __func__, *prod, *chain_prod, *prod_bseq);
3294 
3295 	return 0;
3296 }
3297 
3298 
3299 static void
3300 bce_setup_rxdesc_std(struct bce_softc *sc, uint16_t chain_prod, uint32_t *prod_bseq)
3301 {
3302 	struct rx_bd *rxbd;
3303 	bus_addr_t paddr;
3304 	int len;
3305 
3306 	paddr = sc->rx_mbuf_paddr[chain_prod];
3307 	len = sc->rx_mbuf_ptr[chain_prod]->m_len;
3308 
3309 	/* Setup the rx_bd for the first segment. */
3310 	rxbd = &sc->rx_bd_chain[RX_PAGE(chain_prod)][RX_IDX(chain_prod)];
3311 
3312 	rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(paddr));
3313 	rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(paddr));
3314 	rxbd->rx_bd_len = htole32(len);
3315 	rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START);
3316 	*prod_bseq += len;
3317 
3318 	rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END);
3319 }
3320 
3321 
3322 /****************************************************************************/
3323 /* Allocate memory and initialize the TX data structures.                   */
3324 /*                                                                          */
3325 /* Returns:                                                                 */
3326 /*   0 for success, positive value for failure.                             */
3327 /****************************************************************************/
3328 static int
3329 bce_init_tx_chain(struct bce_softc *sc)
3330 {
3331 	struct tx_bd *txbd;
3332 	uint32_t val;
3333 	int i, rc = 0;
3334 
3335 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__);
3336 
3337 	/* Set the initial TX producer/consumer indices. */
3338 	sc->tx_prod = 0;
3339 	sc->tx_cons = 0;
3340 	sc->tx_prod_bseq   = 0;
3341 	sc->used_tx_bd = 0;
3342 	sc->max_tx_bd = USABLE_TX_BD;
3343 	DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD);
3344 	DBRUNIF(1, sc->tx_full_count = 0);
3345 
3346 	/*
3347 	 * The NetXtreme II supports a linked-list structre called
3348 	 * a Buffer Descriptor Chain (or BD chain).  A BD chain
3349 	 * consists of a series of 1 or more chain pages, each of which
3350 	 * consists of a fixed number of BD entries.
3351 	 * The last BD entry on each page is a pointer to the next page
3352 	 * in the chain, and the last pointer in the BD chain
3353 	 * points back to the beginning of the chain.
3354 	 */
3355 
3356 	/* Set the TX next pointer chain entries. */
3357 	for (i = 0; i < TX_PAGES; i++) {
3358 		int j;
3359 
3360 		txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
3361 
3362 		/* Check if we've reached the last page. */
3363 		if (i == (TX_PAGES - 1))
3364 			j = 0;
3365 		else
3366 			j = i + 1;
3367 
3368 		txbd->tx_bd_haddr_hi =
3369 			htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j]));
3370 		txbd->tx_bd_haddr_lo =
3371 			htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j]));
3372 	}
3373 
3374 	/* Initialize the context ID for an L2 TX chain. */
3375 	val = BCE_L2CTX_TYPE_TYPE_L2;
3376 	val |= BCE_L2CTX_TYPE_SIZE_L2;
3377 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TYPE, val);
3378 
3379 	val = BCE_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3380 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_CMD_TYPE, val);
3381 
3382 	/* Point the hardware to the first page in the chain. */
3383 	val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]);
3384 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_HI, val);
3385 	val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]);
3386 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_LO, val);
3387 
3388 	DBRUN(BCE_VERBOSE_SEND, bce_dump_tx_chain(sc, 0, TOTAL_TX_BD));
3389 
3390 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__);
3391 
3392 	return(rc);
3393 }
3394 
3395 
3396 /****************************************************************************/
3397 /* Free memory and clear the TX data structures.                            */
3398 /*                                                                          */
3399 /* Returns:                                                                 */
3400 /*   Nothing.                                                               */
3401 /****************************************************************************/
3402 static void
3403 bce_free_tx_chain(struct bce_softc *sc)
3404 {
3405 	int i;
3406 
3407 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__);
3408 
3409 	/* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
3410 	for (i = 0; i < TOTAL_TX_BD; i++) {
3411 		if (sc->tx_mbuf_ptr[i] != NULL) {
3412 			bus_dmamap_unload(sc->tx_mbuf_tag, sc->tx_mbuf_map[i]);
3413 			m_freem(sc->tx_mbuf_ptr[i]);
3414 			sc->tx_mbuf_ptr[i] = NULL;
3415 			DBRUNIF(1, sc->tx_mbuf_alloc--);
3416 		}
3417 	}
3418 
3419 	/* Clear each TX chain page. */
3420 	for (i = 0; i < TX_PAGES; i++)
3421 		bzero(sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ);
3422 	sc->used_tx_bd = 0;
3423 
3424 	/* Check if we lost any mbufs in the process. */
3425 	DBRUNIF((sc->tx_mbuf_alloc),
3426 		if_printf(&sc->arpcom.ac_if,
3427 			  "%s(%d): Memory leak! "
3428 			  "Lost %d mbufs from tx chain!\n",
3429 			  __FILE__, __LINE__, sc->tx_mbuf_alloc));
3430 
3431 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__);
3432 }
3433 
3434 
3435 /****************************************************************************/
3436 /* Allocate memory and initialize the RX data structures.                   */
3437 /*                                                                          */
3438 /* Returns:                                                                 */
3439 /*   0 for success, positive value for failure.                             */
3440 /****************************************************************************/
3441 static int
3442 bce_init_rx_chain(struct bce_softc *sc)
3443 {
3444 	struct rx_bd *rxbd;
3445 	int i, rc = 0;
3446 	uint16_t prod, chain_prod;
3447 	uint32_t prod_bseq, val;
3448 
3449 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__);
3450 
3451 	/* Initialize the RX producer and consumer indices. */
3452 	sc->rx_prod = 0;
3453 	sc->rx_cons = 0;
3454 	sc->rx_prod_bseq = 0;
3455 	sc->free_rx_bd = USABLE_RX_BD;
3456 	sc->max_rx_bd = USABLE_RX_BD;
3457 	DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD);
3458 	DBRUNIF(1, sc->rx_empty_count = 0);
3459 
3460 	/* Initialize the RX next pointer chain entries. */
3461 	for (i = 0; i < RX_PAGES; i++) {
3462 		int j;
3463 
3464 		rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
3465 
3466 		/* Check if we've reached the last page. */
3467 		if (i == (RX_PAGES - 1))
3468 			j = 0;
3469 		else
3470 			j = i + 1;
3471 
3472 		/* Setup the chain page pointers. */
3473 		rxbd->rx_bd_haddr_hi =
3474 			htole32(BCE_ADDR_HI(sc->rx_bd_chain_paddr[j]));
3475 		rxbd->rx_bd_haddr_lo =
3476 			htole32(BCE_ADDR_LO(sc->rx_bd_chain_paddr[j]));
3477 	}
3478 
3479 	/* Initialize the context ID for an L2 RX chain. */
3480 	val = BCE_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3481 	val |= BCE_L2CTX_CTX_TYPE_SIZE_L2;
3482 	val |= 0x02 << 8;
3483 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_CTX_TYPE, val);
3484 
3485 	/* Point the hardware to the first page in the chain. */
3486 	/* XXX shouldn't this after RX descriptor initialization? */
3487 	val = BCE_ADDR_HI(sc->rx_bd_chain_paddr[0]);
3488 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_HI, val);
3489 	val = BCE_ADDR_LO(sc->rx_bd_chain_paddr[0]);
3490 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_LO, val);
3491 
3492 	/* Allocate mbuf clusters for the rx_bd chain. */
3493 	prod = prod_bseq = 0;
3494 	while (prod < TOTAL_RX_BD) {
3495 		chain_prod = RX_CHAIN_IDX(prod);
3496 		if (bce_newbuf_std(sc, &prod, &chain_prod, &prod_bseq, 1)) {
3497 			if_printf(&sc->arpcom.ac_if,
3498 				  "Error filling RX chain: rx_bd[0x%04X]!\n",
3499 				  chain_prod);
3500 			rc = ENOBUFS;
3501 			break;
3502 		}
3503 		prod = NEXT_RX_BD(prod);
3504 	}
3505 
3506 	/* Save the RX chain producer index. */
3507 	sc->rx_prod = prod;
3508 	sc->rx_prod_bseq = prod_bseq;
3509 
3510 	/* Tell the chip about the waiting rx_bd's. */
3511 	REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BDIDX, sc->rx_prod);
3512 	REG_WR(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
3513 
3514 	DBRUN(BCE_VERBOSE_RECV, bce_dump_rx_chain(sc, 0, TOTAL_RX_BD));
3515 
3516 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__);
3517 
3518 	return(rc);
3519 }
3520 
3521 
3522 /****************************************************************************/
3523 /* Free memory and clear the RX data structures.                            */
3524 /*                                                                          */
3525 /* Returns:                                                                 */
3526 /*   Nothing.                                                               */
3527 /****************************************************************************/
3528 static void
3529 bce_free_rx_chain(struct bce_softc *sc)
3530 {
3531 	int i;
3532 
3533 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__);
3534 
3535 	/* Free any mbufs still in the RX mbuf chain. */
3536 	for (i = 0; i < TOTAL_RX_BD; i++) {
3537 		if (sc->rx_mbuf_ptr[i] != NULL) {
3538 			bus_dmamap_unload(sc->rx_mbuf_tag, sc->rx_mbuf_map[i]);
3539 			m_freem(sc->rx_mbuf_ptr[i]);
3540 			sc->rx_mbuf_ptr[i] = NULL;
3541 			DBRUNIF(1, sc->rx_mbuf_alloc--);
3542 		}
3543 	}
3544 
3545 	/* Clear each RX chain page. */
3546 	for (i = 0; i < RX_PAGES; i++)
3547 		bzero(sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
3548 
3549 	/* Check if we lost any mbufs in the process. */
3550 	DBRUNIF((sc->rx_mbuf_alloc),
3551 		if_printf(&sc->arpcom.ac_if,
3552 			  "%s(%d): Memory leak! "
3553 			  "Lost %d mbufs from rx chain!\n",
3554 			  __FILE__, __LINE__, sc->rx_mbuf_alloc));
3555 
3556 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__);
3557 }
3558 
3559 
3560 /****************************************************************************/
3561 /* Set media options.                                                       */
3562 /*                                                                          */
3563 /* Returns:                                                                 */
3564 /*   0 for success, positive value for failure.                             */
3565 /****************************************************************************/
3566 static int
3567 bce_ifmedia_upd(struct ifnet *ifp)
3568 {
3569 	struct bce_softc *sc = ifp->if_softc;
3570 	struct mii_data *mii = device_get_softc(sc->bce_miibus);
3571 
3572 	/*
3573 	 * 'mii' will be NULL, when this function is called on following
3574 	 * code path: bce_attach() -> bce_mgmt_init()
3575 	 */
3576 	if (mii != NULL) {
3577 		/* Make sure the MII bus has been enumerated. */
3578 		sc->bce_link = 0;
3579 		if (mii->mii_instance) {
3580 			struct mii_softc *miisc;
3581 
3582 			LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3583 				mii_phy_reset(miisc);
3584 		}
3585 		mii_mediachg(mii);
3586 	}
3587 	return 0;
3588 }
3589 
3590 
3591 /****************************************************************************/
3592 /* Reports current media status.                                            */
3593 /*                                                                          */
3594 /* Returns:                                                                 */
3595 /*   Nothing.                                                               */
3596 /****************************************************************************/
3597 static void
3598 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3599 {
3600 	struct bce_softc *sc = ifp->if_softc;
3601 	struct mii_data *mii = device_get_softc(sc->bce_miibus);
3602 
3603 	mii_pollstat(mii);
3604 	ifmr->ifm_active = mii->mii_media_active;
3605 	ifmr->ifm_status = mii->mii_media_status;
3606 }
3607 
3608 
3609 /****************************************************************************/
3610 /* Handles PHY generated interrupt events.                                  */
3611 /*                                                                          */
3612 /* Returns:                                                                 */
3613 /*   Nothing.                                                               */
3614 /****************************************************************************/
3615 static void
3616 bce_phy_intr(struct bce_softc *sc)
3617 {
3618 	uint32_t new_link_state, old_link_state;
3619 	struct ifnet *ifp = &sc->arpcom.ac_if;
3620 
3621 	ASSERT_SERIALIZED(ifp->if_serializer);
3622 
3623 	new_link_state = sc->status_block->status_attn_bits &
3624 			 STATUS_ATTN_BITS_LINK_STATE;
3625 	old_link_state = sc->status_block->status_attn_bits_ack &
3626 			 STATUS_ATTN_BITS_LINK_STATE;
3627 
3628 	/* Handle any changes if the link state has changed. */
3629 	if (new_link_state != old_link_state) {	/* XXX redundant? */
3630 		DBRUN(BCE_VERBOSE_INTR, bce_dump_status_block(sc));
3631 
3632 		sc->bce_link = 0;
3633 		callout_stop(&sc->bce_stat_ch);
3634 		bce_tick_serialized(sc);
3635 
3636 		/* Update the status_attn_bits_ack field in the status block. */
3637 		if (new_link_state) {
3638 			REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD,
3639 			       STATUS_ATTN_BITS_LINK_STATE);
3640 			if (bootverbose)
3641 				if_printf(ifp, "Link is now UP.\n");
3642 		} else {
3643 			REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD,
3644 			       STATUS_ATTN_BITS_LINK_STATE);
3645 			if (bootverbose)
3646 				if_printf(ifp, "Link is now DOWN.\n");
3647 		}
3648 	}
3649 
3650 	/* Acknowledge the link change interrupt. */
3651 	REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE);
3652 }
3653 
3654 
3655 /****************************************************************************/
3656 /* Reads the receive consumer value from the status block (skipping over    */
3657 /* chain page pointer if necessary).                                        */
3658 /*                                                                          */
3659 /* Returns:                                                                 */
3660 /*   hw_cons                                                                */
3661 /****************************************************************************/
3662 static __inline uint16_t
3663 bce_get_hw_rx_cons(struct bce_softc *sc)
3664 {
3665 	uint16_t hw_cons = sc->status_block->status_rx_quick_consumer_index0;
3666 
3667 	if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
3668 		hw_cons++;
3669 	return hw_cons;
3670 }
3671 
3672 
3673 /****************************************************************************/
3674 /* Handles received frame interrupt events.                                 */
3675 /*                                                                          */
3676 /* Returns:                                                                 */
3677 /*   Nothing.                                                               */
3678 /****************************************************************************/
3679 static void
3680 bce_rx_intr(struct bce_softc *sc, int count)
3681 {
3682 	struct ifnet *ifp = &sc->arpcom.ac_if;
3683 	uint16_t hw_cons, sw_cons, sw_chain_cons, sw_prod, sw_chain_prod;
3684 	uint32_t sw_prod_bseq;
3685 	struct mbuf_chain chain[MAXCPU];
3686 
3687 	ASSERT_SERIALIZED(ifp->if_serializer);
3688 
3689 	ether_input_chain_init(chain);
3690 
3691 	DBRUNIF(1, sc->rx_interrupts++);
3692 
3693 	/* Get the hardware's view of the RX consumer index. */
3694 	hw_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc);
3695 
3696 	/* Get working copies of the driver's view of the RX indices. */
3697 	sw_cons = sc->rx_cons;
3698 	sw_prod = sc->rx_prod;
3699 	sw_prod_bseq = sc->rx_prod_bseq;
3700 
3701 	DBPRINT(sc, BCE_INFO_RECV, "%s(enter): sw_prod = 0x%04X, "
3702 		"sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n",
3703 		__func__, sw_prod, sw_cons, sw_prod_bseq);
3704 
3705 	/* Prevent speculative reads from getting ahead of the status block. */
3706 	bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
3707 			  BUS_SPACE_BARRIER_READ);
3708 
3709 	/* Update some debug statistics counters */
3710 	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3711 		sc->rx_low_watermark = sc->free_rx_bd);
3712 	DBRUNIF((sc->free_rx_bd == 0), sc->rx_empty_count++);
3713 
3714 	/* Scan through the receive chain as long as there is work to do. */
3715 	while (sw_cons != hw_cons) {
3716 		struct mbuf *m = NULL;
3717 		struct l2_fhdr *l2fhdr = NULL;
3718 		struct rx_bd *rxbd;
3719 		unsigned int len;
3720 		uint32_t status = 0;
3721 
3722 #ifdef DEVICE_POLLING
3723 		if (count >= 0 && count-- == 0) {
3724 			sc->hw_rx_cons = sw_cons;
3725 			break;
3726 		}
3727 #endif
3728 
3729 		/*
3730 		 * Convert the producer/consumer indices
3731 		 * to an actual rx_bd index.
3732 		 */
3733 		sw_chain_cons = RX_CHAIN_IDX(sw_cons);
3734 		sw_chain_prod = RX_CHAIN_IDX(sw_prod);
3735 
3736 		/* Get the used rx_bd. */
3737 		rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)]
3738 				       [RX_IDX(sw_chain_cons)];
3739 		sc->free_rx_bd++;
3740 
3741 		DBRUN(BCE_VERBOSE_RECV,
3742 		      if_printf(ifp, "%s(): ", __func__);
3743 		      bce_dump_rxbd(sc, sw_chain_cons, rxbd));
3744 
3745 		/* The mbuf is stored with the last rx_bd entry of a packet. */
3746 		if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) {
3747 			/* Validate that this is the last rx_bd. */
3748 			DBRUNIF((!(rxbd->rx_bd_flags & RX_BD_FLAGS_END)),
3749 				if_printf(ifp, "%s(%d): "
3750 				"Unexpected mbuf found in rx_bd[0x%04X]!\n",
3751 				__FILE__, __LINE__, sw_chain_cons);
3752 				bce_breakpoint(sc));
3753 
3754 			if (sw_chain_cons != sw_chain_prod) {
3755 				if_printf(ifp, "RX cons(%d) != prod(%d), "
3756 					  "drop!\n", sw_chain_cons,
3757 					  sw_chain_prod);
3758 				ifp->if_ierrors++;
3759 
3760 				bce_setup_rxdesc_std(sc, sw_chain_cons,
3761 						     &sw_prod_bseq);
3762 				m = NULL;
3763 				goto bce_rx_int_next_rx;
3764 			}
3765 
3766 			/* Unmap the mbuf from DMA space. */
3767 			bus_dmamap_sync(sc->rx_mbuf_tag,
3768 					sc->rx_mbuf_map[sw_chain_cons],
3769 					BUS_DMASYNC_POSTREAD);
3770 
3771 			/* Save the mbuf from the driver's chain. */
3772 			m = sc->rx_mbuf_ptr[sw_chain_cons];
3773 
3774 			/*
3775 			 * Frames received on the NetXteme II are prepended
3776 			 * with an l2_fhdr structure which provides status
3777 			 * information about the received frame (including
3778 			 * VLAN tags and checksum info).  The frames are also
3779 			 * automatically adjusted to align the IP header
3780 			 * (i.e. two null bytes are inserted before the
3781 			 * Ethernet header).
3782 			 */
3783 			l2fhdr = mtod(m, struct l2_fhdr *);
3784 
3785 			len = l2fhdr->l2_fhdr_pkt_len;
3786 			status = l2fhdr->l2_fhdr_status;
3787 
3788 			DBRUNIF(DB_RANDOMTRUE(bce_debug_l2fhdr_status_check),
3789 				if_printf(ifp,
3790 				"Simulating l2_fhdr status error.\n");
3791 				status = status | L2_FHDR_ERRORS_PHY_DECODE);
3792 
3793 			/* Watch for unusual sized frames. */
3794 			DBRUNIF((len < BCE_MIN_MTU ||
3795 				 len > BCE_MAX_JUMBO_ETHER_MTU_VLAN),
3796 				if_printf(ifp,
3797 				"%s(%d): Unusual frame size found. "
3798 				"Min(%d), Actual(%d), Max(%d)\n",
3799 				__FILE__, __LINE__,
3800 				(int)BCE_MIN_MTU, len,
3801 				(int)BCE_MAX_JUMBO_ETHER_MTU_VLAN);
3802 				bce_dump_mbuf(sc, m);
3803 		 		bce_breakpoint(sc));
3804 
3805 			len -= ETHER_CRC_LEN;
3806 
3807 			/* Check the received frame for errors. */
3808 			if (status & (L2_FHDR_ERRORS_BAD_CRC |
3809 				      L2_FHDR_ERRORS_PHY_DECODE |
3810 				      L2_FHDR_ERRORS_ALIGNMENT |
3811 				      L2_FHDR_ERRORS_TOO_SHORT |
3812 				      L2_FHDR_ERRORS_GIANT_FRAME)) {
3813 				ifp->if_ierrors++;
3814 				DBRUNIF(1, sc->l2fhdr_status_errors++);
3815 
3816 				/* Reuse the mbuf for a new frame. */
3817 				bce_setup_rxdesc_std(sc, sw_chain_prod,
3818 						     &sw_prod_bseq);
3819 				m = NULL;
3820 				goto bce_rx_int_next_rx;
3821 			}
3822 
3823 			/*
3824 			 * Get a new mbuf for the rx_bd.   If no new
3825 			 * mbufs are available then reuse the current mbuf,
3826 			 * log an ierror on the interface, and generate
3827 			 * an error in the system log.
3828 			 */
3829 			if (bce_newbuf_std(sc, &sw_prod, &sw_chain_prod,
3830 					   &sw_prod_bseq, 0)) {
3831 				DBRUN(BCE_WARN,
3832 				      if_printf(ifp,
3833 				      "%s(%d): Failed to allocate new mbuf, "
3834 				      "incoming frame dropped!\n",
3835 				      __FILE__, __LINE__));
3836 
3837 				ifp->if_ierrors++;
3838 
3839 				/* Try and reuse the exisitng mbuf. */
3840 				bce_setup_rxdesc_std(sc, sw_chain_prod,
3841 						     &sw_prod_bseq);
3842 				m = NULL;
3843 				goto bce_rx_int_next_rx;
3844 			}
3845 
3846 			/*
3847 			 * Skip over the l2_fhdr when passing
3848 			 * the data up the stack.
3849 			 */
3850 			m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
3851 
3852 			m->m_pkthdr.len = m->m_len = len;
3853 			m->m_pkthdr.rcvif = ifp;
3854 
3855 			DBRUN(BCE_VERBOSE_RECV,
3856 			      struct ether_header *eh;
3857 			      eh = mtod(m, struct ether_header *);
3858 			      if_printf(ifp, "%s(): to: %6D, from: %6D, "
3859 			      		"type: 0x%04X\n", __func__,
3860 					eh->ether_dhost, ":",
3861 					eh->ether_shost, ":",
3862 					htons(eh->ether_type)));
3863 
3864 			/* Validate the checksum if offload enabled. */
3865 			if (ifp->if_capenable & IFCAP_RXCSUM) {
3866 				/* Check for an IP datagram. */
3867 				if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
3868 					m->m_pkthdr.csum_flags |=
3869 						CSUM_IP_CHECKED;
3870 
3871 					/* Check if the IP checksum is valid. */
3872 					if ((l2fhdr->l2_fhdr_ip_xsum ^
3873 					     0xffff) == 0) {
3874 						m->m_pkthdr.csum_flags |=
3875 							CSUM_IP_VALID;
3876 					} else {
3877 						DBPRINT(sc, BCE_WARN_RECV,
3878 							"%s(): Invalid IP checksum = 0x%04X!\n",
3879 							__func__, l2fhdr->l2_fhdr_ip_xsum);
3880 					}
3881 				}
3882 
3883 				/* Check for a valid TCP/UDP frame. */
3884 				if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3885 					      L2_FHDR_STATUS_UDP_DATAGRAM)) {
3886 
3887 					/* Check for a good TCP/UDP checksum. */
3888 					if ((status &
3889 					     (L2_FHDR_ERRORS_TCP_XSUM |
3890 					      L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
3891 						m->m_pkthdr.csum_data =
3892 						l2fhdr->l2_fhdr_tcp_udp_xsum;
3893 						m->m_pkthdr.csum_flags |=
3894 							CSUM_DATA_VALID |
3895 							CSUM_PSEUDO_HDR;
3896 					} else {
3897 						DBPRINT(sc, BCE_WARN_RECV,
3898 							"%s(): Invalid TCP/UDP checksum = 0x%04X!\n",
3899 							__func__, l2fhdr->l2_fhdr_tcp_udp_xsum);
3900 					}
3901 				}
3902 			}
3903 
3904 			ifp->if_ipackets++;
3905 bce_rx_int_next_rx:
3906 			sw_prod = NEXT_RX_BD(sw_prod);
3907 		}
3908 
3909 		sw_cons = NEXT_RX_BD(sw_cons);
3910 
3911 		/* If we have a packet, pass it up the stack */
3912 		if (m) {
3913 			DBPRINT(sc, BCE_VERBOSE_RECV,
3914 				"%s(): Passing received frame up.\n", __func__);
3915 
3916 			if (status & L2_FHDR_STATUS_L2_VLAN_TAG) {
3917 				m->m_flags |= M_VLANTAG;
3918 				m->m_pkthdr.ether_vlantag =
3919 					l2fhdr->l2_fhdr_vlan_tag;
3920 			}
3921 			ether_input_chain(ifp, m, NULL, chain);
3922 
3923 			DBRUNIF(1, sc->rx_mbuf_alloc--);
3924 		}
3925 
3926 		/*
3927 		 * If polling(4) is not enabled, refresh hw_cons to see
3928 		 * whether there's new work.
3929 		 *
3930 		 * If polling(4) is enabled, i.e count >= 0, refreshing
3931 		 * should not be performed, so that we would not spend
3932 		 * too much time in RX processing.
3933 		 */
3934 		if (count < 0 && sw_cons == hw_cons)
3935 			hw_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc);
3936 
3937 		/*
3938 		 * Prevent speculative reads from getting ahead
3939 		 * of the status block.
3940 		 */
3941 		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
3942 				  BUS_SPACE_BARRIER_READ);
3943 	}
3944 
3945 	ether_input_dispatch(chain);
3946 
3947 	sc->rx_cons = sw_cons;
3948 	sc->rx_prod = sw_prod;
3949 	sc->rx_prod_bseq = sw_prod_bseq;
3950 
3951 	REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BDIDX, sc->rx_prod);
3952 	REG_WR(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
3953 
3954 	DBPRINT(sc, BCE_INFO_RECV, "%s(exit): rx_prod = 0x%04X, "
3955 		"rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
3956 		__func__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
3957 }
3958 
3959 
3960 /****************************************************************************/
3961 /* Reads the transmit consumer value from the status block (skipping over   */
3962 /* chain page pointer if necessary).                                        */
3963 /*                                                                          */
3964 /* Returns:                                                                 */
3965 /*   hw_cons                                                                */
3966 /****************************************************************************/
3967 static __inline uint16_t
3968 bce_get_hw_tx_cons(struct bce_softc *sc)
3969 {
3970 	uint16_t hw_cons = sc->status_block->status_tx_quick_consumer_index0;
3971 
3972 	if ((hw_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
3973 		hw_cons++;
3974 	return hw_cons;
3975 }
3976 
3977 
3978 /****************************************************************************/
3979 /* Handles transmit completion interrupt events.                            */
3980 /*                                                                          */
3981 /* Returns:                                                                 */
3982 /*   Nothing.                                                               */
3983 /****************************************************************************/
3984 static void
3985 bce_tx_intr(struct bce_softc *sc)
3986 {
3987 	struct ifnet *ifp = &sc->arpcom.ac_if;
3988 	uint16_t hw_tx_cons, sw_tx_cons, sw_tx_chain_cons;
3989 
3990 	ASSERT_SERIALIZED(ifp->if_serializer);
3991 
3992 	DBRUNIF(1, sc->tx_interrupts++);
3993 
3994 	/* Get the hardware's view of the TX consumer index. */
3995 	hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc);
3996 	sw_tx_cons = sc->tx_cons;
3997 
3998 	/* Prevent speculative reads from getting ahead of the status block. */
3999 	bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4000 			  BUS_SPACE_BARRIER_READ);
4001 
4002 	/* Cycle through any completed TX chain page entries. */
4003 	while (sw_tx_cons != hw_tx_cons) {
4004 #ifdef BCE_DEBUG
4005 		struct tx_bd *txbd = NULL;
4006 #endif
4007 		sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons);
4008 
4009 		DBPRINT(sc, BCE_INFO_SEND,
4010 			"%s(): hw_tx_cons = 0x%04X, sw_tx_cons = 0x%04X, "
4011 			"sw_tx_chain_cons = 0x%04X\n",
4012 			__func__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons);
4013 
4014 		DBRUNIF((sw_tx_chain_cons > MAX_TX_BD),
4015 			if_printf(ifp, "%s(%d): "
4016 				  "TX chain consumer out of range! "
4017 				  " 0x%04X > 0x%04X\n",
4018 				  __FILE__, __LINE__, sw_tx_chain_cons,
4019 				  (int)MAX_TX_BD);
4020 			bce_breakpoint(sc));
4021 
4022 		DBRUNIF(1, txbd = &sc->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)]
4023 				[TX_IDX(sw_tx_chain_cons)]);
4024 
4025 		DBRUNIF((txbd == NULL),
4026 			if_printf(ifp, "%s(%d): "
4027 				  "Unexpected NULL tx_bd[0x%04X]!\n",
4028 				  __FILE__, __LINE__, sw_tx_chain_cons);
4029 			bce_breakpoint(sc));
4030 
4031 		DBRUN(BCE_INFO_SEND,
4032 		      if_printf(ifp, "%s(): ", __func__);
4033 		      bce_dump_txbd(sc, sw_tx_chain_cons, txbd));
4034 
4035 		/*
4036 		 * Free the associated mbuf. Remember
4037 		 * that only the last tx_bd of a packet
4038 		 * has an mbuf pointer and DMA map.
4039 		 */
4040 		if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) {
4041 			/* Validate that this is the last tx_bd. */
4042 			DBRUNIF((!(txbd->tx_bd_flags & TX_BD_FLAGS_END)),
4043 				if_printf(ifp, "%s(%d): "
4044 				"tx_bd END flag not set but "
4045 				"txmbuf == NULL!\n", __FILE__, __LINE__);
4046 				bce_breakpoint(sc));
4047 
4048 			DBRUN(BCE_INFO_SEND,
4049 			      if_printf(ifp, "%s(): Unloading map/freeing mbuf "
4050 			      		"from tx_bd[0x%04X]\n", __func__,
4051 					sw_tx_chain_cons));
4052 
4053 			/* Unmap the mbuf. */
4054 			bus_dmamap_unload(sc->tx_mbuf_tag,
4055 					  sc->tx_mbuf_map[sw_tx_chain_cons]);
4056 
4057 			/* Free the mbuf. */
4058 			m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]);
4059 			sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL;
4060 			DBRUNIF(1, sc->tx_mbuf_alloc--);
4061 
4062 			ifp->if_opackets++;
4063 		}
4064 
4065 		sc->used_tx_bd--;
4066 		sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
4067 
4068 		if (sw_tx_cons == hw_tx_cons) {
4069 			/* Refresh hw_cons to see if there's new work. */
4070 			hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc);
4071 		}
4072 
4073 		/*
4074 		 * Prevent speculative reads from getting
4075 		 * ahead of the status block.
4076 		 */
4077 		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4078 				  BUS_SPACE_BARRIER_READ);
4079 	}
4080 
4081 	if (sc->used_tx_bd == 0) {
4082 		/* Clear the TX timeout timer. */
4083 		ifp->if_timer = 0;
4084 	}
4085 
4086 	/* Clear the tx hardware queue full flag. */
4087 	if (sc->max_tx_bd - sc->used_tx_bd >= BCE_TX_SPARE_SPACE) {
4088 		DBRUNIF((ifp->if_flags & IFF_OACTIVE),
4089 			DBPRINT(sc, BCE_WARN_SEND,
4090 				"%s(): Open TX chain! %d/%d (used/total)\n",
4091 				__func__, sc->used_tx_bd, sc->max_tx_bd));
4092 		ifp->if_flags &= ~IFF_OACTIVE;
4093 	}
4094 	sc->tx_cons = sw_tx_cons;
4095 }
4096 
4097 
4098 /****************************************************************************/
4099 /* Disables interrupt generation.                                           */
4100 /*                                                                          */
4101 /* Returns:                                                                 */
4102 /*   Nothing.                                                               */
4103 /****************************************************************************/
4104 static void
4105 bce_disable_intr(struct bce_softc *sc)
4106 {
4107 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
4108 	REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
4109 	lwkt_serialize_handler_disable(sc->arpcom.ac_if.if_serializer);
4110 }
4111 
4112 
4113 /****************************************************************************/
4114 /* Enables interrupt generation.                                            */
4115 /*                                                                          */
4116 /* Returns:                                                                 */
4117 /*   Nothing.                                                               */
4118 /****************************************************************************/
4119 static void
4120 bce_enable_intr(struct bce_softc *sc)
4121 {
4122 	uint32_t val;
4123 
4124 	lwkt_serialize_handler_enable(sc->arpcom.ac_if.if_serializer);
4125 
4126 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4127 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
4128 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
4129 
4130 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4131 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
4132 
4133 	val = REG_RD(sc, BCE_HC_COMMAND);
4134 	REG_WR(sc, BCE_HC_COMMAND, val | BCE_HC_COMMAND_COAL_NOW);
4135 }
4136 
4137 
4138 /****************************************************************************/
4139 /* Handles controller initialization.                                       */
4140 /*                                                                          */
4141 /* Returns:                                                                 */
4142 /*   Nothing.                                                               */
4143 /****************************************************************************/
4144 static void
4145 bce_init(void *xsc)
4146 {
4147 	struct bce_softc *sc = xsc;
4148 	struct ifnet *ifp = &sc->arpcom.ac_if;
4149 	uint32_t ether_mtu;
4150 	int error;
4151 
4152 	ASSERT_SERIALIZED(ifp->if_serializer);
4153 
4154 	/* Check if the driver is still running and bail out if it is. */
4155 	if (ifp->if_flags & IFF_RUNNING)
4156 		return;
4157 
4158 	bce_stop(sc);
4159 
4160 	error = bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
4161 	if (error) {
4162 		if_printf(ifp, "Controller reset failed!\n");
4163 		goto back;
4164 	}
4165 
4166 	error = bce_chipinit(sc);
4167 	if (error) {
4168 		if_printf(ifp, "Controller initialization failed!\n");
4169 		goto back;
4170 	}
4171 
4172 	error = bce_blockinit(sc);
4173 	if (error) {
4174 		if_printf(ifp, "Block initialization failed!\n");
4175 		goto back;
4176 	}
4177 
4178 	/* Load our MAC address. */
4179 	bcopy(IF_LLADDR(ifp), sc->eaddr, ETHER_ADDR_LEN);
4180 	bce_set_mac_addr(sc);
4181 
4182 	/* Calculate and program the Ethernet MTU size. */
4183 	ether_mtu = ETHER_HDR_LEN + EVL_ENCAPLEN + ifp->if_mtu + ETHER_CRC_LEN;
4184 
4185 	DBPRINT(sc, BCE_INFO, "%s(): setting mtu = %d\n", __func__, ether_mtu);
4186 
4187 	/*
4188 	 * Program the mtu, enabling jumbo frame
4189 	 * support if necessary.  Also set the mbuf
4190 	 * allocation count for RX frames.
4191 	 */
4192 	if (ether_mtu > ETHER_MAX_LEN + EVL_ENCAPLEN) {
4193 #ifdef notyet
4194 		REG_WR(sc, BCE_EMAC_RX_MTU_SIZE,
4195 		       min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) |
4196 		       BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA);
4197 		sc->mbuf_alloc_size = MJUM9BYTES;
4198 #else
4199 		panic("jumbo buffer is not supported yet\n");
4200 #endif
4201 	} else {
4202 		REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu);
4203 		sc->mbuf_alloc_size = MCLBYTES;
4204 	}
4205 
4206 	/* Calculate the RX Ethernet frame size for rx_bd's. */
4207 	sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8;
4208 
4209 	DBPRINT(sc, BCE_INFO,
4210 		"%s(): mclbytes = %d, mbuf_alloc_size = %d, "
4211 		"max_frame_size = %d\n",
4212 		__func__, (int)MCLBYTES, sc->mbuf_alloc_size,
4213 		sc->max_frame_size);
4214 
4215 	/* Program appropriate promiscuous/multicast filtering. */
4216 	bce_set_rx_mode(sc);
4217 
4218 	/* Init RX buffer descriptor chain. */
4219 	bce_init_rx_chain(sc);	/* XXX return value */
4220 
4221 	/* Init TX buffer descriptor chain. */
4222 	bce_init_tx_chain(sc);	/* XXX return value */
4223 
4224 #ifdef DEVICE_POLLING
4225 	/* Disable interrupts if we are polling. */
4226 	if (ifp->if_flags & IFF_POLLING) {
4227 		bce_disable_intr(sc);
4228 
4229 		REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4230 		       (1 << 16) | sc->bce_rx_quick_cons_trip);
4231 		REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4232 		       (1 << 16) | sc->bce_tx_quick_cons_trip);
4233 	} else
4234 #endif
4235 	/* Enable host interrupts. */
4236 	bce_enable_intr(sc);
4237 
4238 	bce_ifmedia_upd(ifp);
4239 
4240 	ifp->if_flags |= IFF_RUNNING;
4241 	ifp->if_flags &= ~IFF_OACTIVE;
4242 
4243 	callout_reset(&sc->bce_stat_ch, hz, bce_tick, sc);
4244 back:
4245 	if (error)
4246 		bce_stop(sc);
4247 }
4248 
4249 
4250 /****************************************************************************/
4251 /* Initialize the controller just enough so that any management firmware    */
4252 /* running on the device will continue to operate corectly.                 */
4253 /*                                                                          */
4254 /* Returns:                                                                 */
4255 /*   Nothing.                                                               */
4256 /****************************************************************************/
4257 static void
4258 bce_mgmt_init(struct bce_softc *sc)
4259 {
4260 	struct ifnet *ifp = &sc->arpcom.ac_if;
4261 	uint32_t val;
4262 
4263 	/* Check if the driver is still running and bail out if it is. */
4264 	if (ifp->if_flags & IFF_RUNNING)
4265 		return;
4266 
4267 	/* Initialize the on-boards CPUs */
4268 	bce_init_cpus(sc);
4269 
4270 	/* Set the page size and clear the RV2P processor stall bits. */
4271 	val = (BCM_PAGE_BITS - 8) << 24;
4272 	REG_WR(sc, BCE_RV2P_CONFIG, val);
4273 
4274 	/* Enable all critical blocks in the MAC. */
4275 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
4276 	       BCE_MISC_ENABLE_SET_BITS_RX_V2P_ENABLE |
4277 	       BCE_MISC_ENABLE_SET_BITS_RX_DMA_ENABLE |
4278 	       BCE_MISC_ENABLE_SET_BITS_COMPLETION_ENABLE);
4279 	REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
4280 	DELAY(20);
4281 
4282 	bce_ifmedia_upd(ifp);
4283 }
4284 
4285 
4286 /****************************************************************************/
4287 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
4288 /* memory visible to the controller.                                        */
4289 /*                                                                          */
4290 /* Returns:                                                                 */
4291 /*   0 for success, positive value for failure.                             */
4292 /****************************************************************************/
4293 static int
4294 bce_encap(struct bce_softc *sc, struct mbuf **m_head)
4295 {
4296 	bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
4297 	bus_dmamap_t map, tmp_map;
4298 	struct mbuf *m0 = *m_head;
4299 	struct tx_bd *txbd = NULL;
4300 	uint16_t vlan_tag = 0, flags = 0;
4301 	uint16_t chain_prod, chain_prod_start, prod;
4302 	uint32_t prod_bseq;
4303 	int i, error, maxsegs, nsegs;
4304 #ifdef BCE_DEBUG
4305 	uint16_t debug_prod;
4306 #endif
4307 
4308 	/* Transfer any checksum offload flags to the bd. */
4309 	if (m0->m_pkthdr.csum_flags) {
4310 		if (m0->m_pkthdr.csum_flags & CSUM_IP)
4311 			flags |= TX_BD_FLAGS_IP_CKSUM;
4312 		if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
4313 			flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4314 	}
4315 
4316 	/* Transfer any VLAN tags to the bd. */
4317 	if (m0->m_flags & M_VLANTAG) {
4318 		flags |= TX_BD_FLAGS_VLAN_TAG;
4319 		vlan_tag = m0->m_pkthdr.ether_vlantag;
4320 	}
4321 
4322 	prod = sc->tx_prod;
4323 	chain_prod_start = chain_prod = TX_CHAIN_IDX(prod);
4324 
4325 	/* Map the mbuf into DMAable memory. */
4326 	map = sc->tx_mbuf_map[chain_prod_start];
4327 
4328 	maxsegs = sc->max_tx_bd - sc->used_tx_bd;
4329 	KASSERT(maxsegs >= BCE_TX_SPARE_SPACE,
4330 		("not enough segements %d\n", maxsegs));
4331 	if (maxsegs > BCE_MAX_SEGMENTS)
4332 		maxsegs = BCE_MAX_SEGMENTS;
4333 
4334 	/* Map the mbuf into our DMA address space. */
4335 	error = bus_dmamap_load_mbuf_defrag(sc->tx_mbuf_tag, map, m_head,
4336 			segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
4337 	if (error)
4338 		goto back;
4339 	bus_dmamap_sync(sc->tx_mbuf_tag, map, BUS_DMASYNC_PREWRITE);
4340 
4341 	/* Reset m0 */
4342 	m0 = *m_head;
4343 
4344 	/* prod points to an empty tx_bd at this point. */
4345 	prod_bseq  = sc->tx_prod_bseq;
4346 
4347 #ifdef BCE_DEBUG
4348 	debug_prod = chain_prod;
4349 #endif
4350 
4351 	DBPRINT(sc, BCE_INFO_SEND,
4352 		"%s(): Start: prod = 0x%04X, chain_prod = %04X, "
4353 		"prod_bseq = 0x%08X\n",
4354 		__func__, prod, chain_prod, prod_bseq);
4355 
4356 	/*
4357 	 * Cycle through each mbuf segment that makes up
4358 	 * the outgoing frame, gathering the mapping info
4359 	 * for that segment and creating a tx_bd to for
4360 	 * the mbuf.
4361 	 */
4362 	for (i = 0; i < nsegs; i++) {
4363 		chain_prod = TX_CHAIN_IDX(prod);
4364 		txbd= &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
4365 
4366 		txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr));
4367 		txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr));
4368 		txbd->tx_bd_mss_nbytes = htole16(segs[i].ds_len);
4369 		txbd->tx_bd_vlan_tag = htole16(vlan_tag);
4370 		txbd->tx_bd_flags = htole16(flags);
4371 		prod_bseq += segs[i].ds_len;
4372 		if (i == 0)
4373 			txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START);
4374 		prod = NEXT_TX_BD(prod);
4375 	}
4376 
4377 	/* Set the END flag on the last TX buffer descriptor. */
4378 	txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END);
4379 
4380 	DBRUN(BCE_EXCESSIVE_SEND,
4381 	      bce_dump_tx_chain(sc, debug_prod, nsegs));
4382 
4383 	DBPRINT(sc, BCE_INFO_SEND,
4384 		"%s(): End: prod = 0x%04X, chain_prod = %04X, "
4385 		"prod_bseq = 0x%08X\n",
4386 		__func__, prod, chain_prod, prod_bseq);
4387 
4388 	/*
4389 	 * Ensure that the mbuf pointer for this transmission
4390 	 * is placed at the array index of the last
4391 	 * descriptor in this chain.  This is done
4392 	 * because a single map is used for all
4393 	 * segments of the mbuf and we don't want to
4394 	 * unload the map before all of the segments
4395 	 * have been freed.
4396 	 */
4397 	sc->tx_mbuf_ptr[chain_prod] = m0;
4398 
4399 	tmp_map = sc->tx_mbuf_map[chain_prod];
4400 	sc->tx_mbuf_map[chain_prod] = map;
4401 	sc->tx_mbuf_map[chain_prod_start] = tmp_map;
4402 
4403 	sc->used_tx_bd += nsegs;
4404 
4405 	/* Update some debug statistic counters */
4406 	DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark),
4407 		sc->tx_hi_watermark = sc->used_tx_bd);
4408 	DBRUNIF((sc->used_tx_bd == sc->max_tx_bd), sc->tx_full_count++);
4409 	DBRUNIF(1, sc->tx_mbuf_alloc++);
4410 
4411 	DBRUN(BCE_VERBOSE_SEND,
4412 	      bce_dump_tx_mbuf_chain(sc, chain_prod, nsegs));
4413 
4414 	/* prod points to the next free tx_bd at this point. */
4415 	sc->tx_prod = prod;
4416 	sc->tx_prod_bseq = prod_bseq;
4417 back:
4418 	if (error) {
4419 		m_freem(*m_head);
4420 		*m_head = NULL;
4421 	}
4422 	return error;
4423 }
4424 
4425 
4426 /****************************************************************************/
4427 /* Main transmit routine when called from another routine with a lock.      */
4428 /*                                                                          */
4429 /* Returns:                                                                 */
4430 /*   Nothing.                                                               */
4431 /****************************************************************************/
4432 static void
4433 bce_start(struct ifnet *ifp)
4434 {
4435 	struct bce_softc *sc = ifp->if_softc;
4436 	int count = 0;
4437 
4438 	ASSERT_SERIALIZED(ifp->if_serializer);
4439 
4440 	/* If there's no link or the transmit queue is empty then just exit. */
4441 	if (!sc->bce_link) {
4442 		ifq_purge(&ifp->if_snd);
4443 		return;
4444 	}
4445 
4446 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
4447 		return;
4448 
4449 	DBPRINT(sc, BCE_INFO_SEND,
4450 		"%s(): Start: tx_prod = 0x%04X, tx_chain_prod = %04X, "
4451 		"tx_prod_bseq = 0x%08X\n",
4452 		__func__,
4453 		sc->tx_prod, TX_CHAIN_IDX(sc->tx_prod), sc->tx_prod_bseq);
4454 
4455 	for (;;) {
4456 		struct mbuf *m_head;
4457 
4458 		/*
4459 		 * We keep BCE_TX_SPARE_SPACE entries, so bce_encap() is
4460 		 * unlikely to fail.
4461 		 */
4462 		if (sc->max_tx_bd - sc->used_tx_bd < BCE_TX_SPARE_SPACE) {
4463 			ifp->if_flags |= IFF_OACTIVE;
4464 			break;
4465 		}
4466 
4467 		/* Check for any frames to send. */
4468 		m_head = ifq_dequeue(&ifp->if_snd, NULL);
4469 		if (m_head == NULL)
4470 			break;
4471 
4472 		/*
4473 		 * Pack the data into the transmit ring. If we
4474 		 * don't have room, place the mbuf back at the
4475 		 * head of the queue and set the OACTIVE flag
4476 		 * to wait for the NIC to drain the chain.
4477 		 */
4478 		if (bce_encap(sc, &m_head)) {
4479 			ifp->if_oerrors++;
4480 			if (sc->used_tx_bd == 0) {
4481 				continue;
4482 			} else {
4483 				ifp->if_flags |= IFF_OACTIVE;
4484 				break;
4485 			}
4486 		}
4487 
4488 		count++;
4489 
4490 		/* Send a copy of the frame to any BPF listeners. */
4491 		ETHER_BPF_MTAP(ifp, m_head);
4492 	}
4493 
4494 	if (count == 0) {
4495 		/* no packets were dequeued */
4496 		DBPRINT(sc, BCE_VERBOSE_SEND,
4497 			"%s(): No packets were dequeued\n", __func__);
4498 		return;
4499 	}
4500 
4501 	DBPRINT(sc, BCE_INFO_SEND,
4502 		"%s(): End: tx_prod = 0x%04X, tx_chain_prod = 0x%04X, "
4503 		"tx_prod_bseq = 0x%08X\n",
4504 		__func__,
4505 		sc->tx_prod, TX_CHAIN_IDX(sc->tx_prod), sc->tx_prod_bseq);
4506 
4507 	/* Start the transmit. */
4508 	REG_WR16(sc, MB_TX_CID_ADDR + BCE_L2CTX_TX_HOST_BIDX, sc->tx_prod);
4509 	REG_WR(sc, MB_TX_CID_ADDR + BCE_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq);
4510 
4511 	/* Set the tx timeout. */
4512 	ifp->if_timer = BCE_TX_TIMEOUT;
4513 }
4514 
4515 
4516 /****************************************************************************/
4517 /* Handles any IOCTL calls from the operating system.                       */
4518 /*                                                                          */
4519 /* Returns:                                                                 */
4520 /*   0 for success, positive value for failure.                             */
4521 /****************************************************************************/
4522 static int
4523 bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
4524 {
4525 	struct bce_softc *sc = ifp->if_softc;
4526 	struct ifreq *ifr = (struct ifreq *)data;
4527 	struct mii_data *mii;
4528 	int mask, error = 0;
4529 
4530 	ASSERT_SERIALIZED(ifp->if_serializer);
4531 
4532 	switch(command) {
4533 	case SIOCSIFMTU:
4534 		/* Check that the MTU setting is supported. */
4535 		if (ifr->ifr_mtu < BCE_MIN_MTU ||
4536 #ifdef notyet
4537 		    ifr->ifr_mtu > BCE_MAX_JUMBO_MTU
4538 #else
4539 		    ifr->ifr_mtu > ETHERMTU
4540 #endif
4541 		   ) {
4542 			error = EINVAL;
4543 			break;
4544 		}
4545 
4546 		DBPRINT(sc, BCE_INFO, "Setting new MTU of %d\n", ifr->ifr_mtu);
4547 
4548 		ifp->if_mtu = ifr->ifr_mtu;
4549 		ifp->if_flags &= ~IFF_RUNNING;	/* Force reinitialize */
4550 		bce_init(sc);
4551 		break;
4552 
4553 	case SIOCSIFFLAGS:
4554 		if (ifp->if_flags & IFF_UP) {
4555 			if (ifp->if_flags & IFF_RUNNING) {
4556 				mask = ifp->if_flags ^ sc->bce_if_flags;
4557 
4558 				if (mask & (IFF_PROMISC | IFF_ALLMULTI))
4559 					bce_set_rx_mode(sc);
4560 			} else {
4561 				bce_init(sc);
4562 			}
4563 		} else if (ifp->if_flags & IFF_RUNNING) {
4564 			bce_stop(sc);
4565 		}
4566 		sc->bce_if_flags = ifp->if_flags;
4567 		break;
4568 
4569 	case SIOCADDMULTI:
4570 	case SIOCDELMULTI:
4571 		if (ifp->if_flags & IFF_RUNNING)
4572 			bce_set_rx_mode(sc);
4573 		break;
4574 
4575 	case SIOCSIFMEDIA:
4576 	case SIOCGIFMEDIA:
4577 		DBPRINT(sc, BCE_VERBOSE, "bce_phy_flags = 0x%08X\n",
4578 			sc->bce_phy_flags);
4579 		DBPRINT(sc, BCE_VERBOSE, "Copper media set/get\n");
4580 
4581 		mii = device_get_softc(sc->bce_miibus);
4582 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
4583 		break;
4584 
4585 	case SIOCSIFCAP:
4586 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4587 		DBPRINT(sc, BCE_INFO, "Received SIOCSIFCAP = 0x%08X\n",
4588 			(uint32_t) mask);
4589 
4590 		if (mask & IFCAP_HWCSUM) {
4591 			ifp->if_capenable ^= (mask & IFCAP_HWCSUM);
4592 			if (IFCAP_HWCSUM & ifp->if_capenable)
4593 				ifp->if_hwassist = BCE_IF_HWASSIST;
4594 			else
4595 				ifp->if_hwassist = 0;
4596 		}
4597 		break;
4598 
4599 	default:
4600 		error = ether_ioctl(ifp, command, data);
4601 		break;
4602 	}
4603 	return error;
4604 }
4605 
4606 
4607 /****************************************************************************/
4608 /* Transmit timeout handler.                                                */
4609 /*                                                                          */
4610 /* Returns:                                                                 */
4611 /*   Nothing.                                                               */
4612 /****************************************************************************/
4613 static void
4614 bce_watchdog(struct ifnet *ifp)
4615 {
4616 	struct bce_softc *sc = ifp->if_softc;
4617 
4618 	ASSERT_SERIALIZED(ifp->if_serializer);
4619 
4620 	DBRUN(BCE_VERBOSE_SEND,
4621 	      bce_dump_driver_state(sc);
4622 	      bce_dump_status_block(sc));
4623 
4624 	/*
4625 	 * If we are in this routine because of pause frames, then
4626 	 * don't reset the hardware.
4627 	 */
4628 	if (REG_RD(sc, BCE_EMAC_TX_STATUS) & BCE_EMAC_TX_STATUS_XOFFED)
4629 		return;
4630 
4631 	if_printf(ifp, "Watchdog timeout occurred, resetting!\n");
4632 
4633 	/* DBRUN(BCE_FATAL, bce_breakpoint(sc)); */
4634 
4635 	ifp->if_flags &= ~IFF_RUNNING;	/* Force reinitialize */
4636 	bce_init(sc);
4637 
4638 	ifp->if_oerrors++;
4639 
4640 	if (!ifq_is_empty(&ifp->if_snd))
4641 		if_devstart(ifp);
4642 }
4643 
4644 
4645 #ifdef DEVICE_POLLING
4646 
4647 static void
4648 bce_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
4649 {
4650 	struct bce_softc *sc = ifp->if_softc;
4651 	struct status_block *sblk = sc->status_block;
4652 	uint16_t hw_tx_cons, hw_rx_cons;
4653 
4654 	ASSERT_SERIALIZED(ifp->if_serializer);
4655 
4656 	switch (cmd) {
4657 	case POLL_REGISTER:
4658 		bce_disable_intr(sc);
4659 
4660 		REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4661 		       (1 << 16) | sc->bce_rx_quick_cons_trip);
4662 		REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4663 		       (1 << 16) | sc->bce_tx_quick_cons_trip);
4664 		return;
4665 	case POLL_DEREGISTER:
4666 		bce_enable_intr(sc);
4667 
4668 		REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4669 		       (sc->bce_tx_quick_cons_trip_int << 16) |
4670 		       sc->bce_tx_quick_cons_trip);
4671 		REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4672 		       (sc->bce_rx_quick_cons_trip_int << 16) |
4673 		       sc->bce_rx_quick_cons_trip);
4674 		return;
4675 	default:
4676 		break;
4677 	}
4678 
4679 	if (cmd == POLL_AND_CHECK_STATUS) {
4680 		uint32_t status_attn_bits;
4681 
4682 		status_attn_bits = sblk->status_attn_bits;
4683 
4684 		DBRUNIF(DB_RANDOMTRUE(bce_debug_unexpected_attention),
4685 			if_printf(ifp,
4686 			"Simulating unexpected status attention bit set.");
4687 			status_attn_bits |= STATUS_ATTN_BITS_PARITY_ERROR);
4688 
4689 		/* Was it a link change interrupt? */
4690 		if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
4691 		    (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE))
4692 			bce_phy_intr(sc);
4693 
4694 		/*
4695 		 * If any other attention is asserted then
4696 		 * the chip is toast.
4697 		 */
4698 		if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
4699 		     (sblk->status_attn_bits_ack &
4700 		      ~STATUS_ATTN_BITS_LINK_STATE)) {
4701 			DBRUN(1, sc->unexpected_attentions++);
4702 
4703 			if_printf(ifp, "Fatal attention detected: 0x%08X\n",
4704 				  sblk->status_attn_bits);
4705 
4706 			DBRUN(BCE_FATAL,
4707 			if (bce_debug_unexpected_attention == 0)
4708 				bce_breakpoint(sc));
4709 
4710 			bce_init(sc);
4711 			return;
4712 		}
4713 	}
4714 
4715 	hw_rx_cons = bce_get_hw_rx_cons(sc);
4716 	hw_tx_cons = bce_get_hw_tx_cons(sc);
4717 
4718 	/* Check for any completed RX frames. */
4719 	if (hw_rx_cons != sc->hw_rx_cons)
4720 		bce_rx_intr(sc, count);
4721 
4722 	/* Check for any completed TX frames. */
4723 	if (hw_tx_cons != sc->hw_tx_cons)
4724 		bce_tx_intr(sc);
4725 
4726 	/* Check for new frames to transmit. */
4727 	if (!ifq_is_empty(&ifp->if_snd))
4728 		if_devstart(ifp);
4729 }
4730 
4731 #endif	/* DEVICE_POLLING */
4732 
4733 
4734 /*
4735  * Interrupt handler.
4736  */
4737 /****************************************************************************/
4738 /* Main interrupt entry point.  Verifies that the controller generated the  */
4739 /* interrupt and then calls a separate routine for handle the various       */
4740 /* interrupt causes (PHY, TX, RX).                                          */
4741 /*                                                                          */
4742 /* Returns:                                                                 */
4743 /*   0 for success, positive value for failure.                             */
4744 /****************************************************************************/
4745 static void
4746 bce_intr(void *xsc)
4747 {
4748 	struct bce_softc *sc = xsc;
4749 	struct ifnet *ifp = &sc->arpcom.ac_if;
4750 	struct status_block *sblk;
4751 	uint16_t hw_rx_cons, hw_tx_cons;
4752 
4753 	ASSERT_SERIALIZED(ifp->if_serializer);
4754 
4755 	DBPRINT(sc, BCE_EXCESSIVE, "Entering %s()\n", __func__);
4756 	DBRUNIF(1, sc->interrupts_generated++);
4757 
4758 	sblk = sc->status_block;
4759 
4760 	/*
4761 	 * If the hardware status block index matches the last value
4762 	 * read by the driver and we haven't asserted our interrupt
4763 	 * then there's nothing to do.
4764 	 */
4765 	if (sblk->status_idx == sc->last_status_idx &&
4766 	    (REG_RD(sc, BCE_PCICFG_MISC_STATUS) &
4767 	     BCE_PCICFG_MISC_STATUS_INTA_VALUE))
4768 		return;
4769 
4770 	/* Ack the interrupt and stop others from occuring. */
4771 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4772 	       BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
4773 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT);
4774 
4775 	/* Check if the hardware has finished any work. */
4776 	hw_rx_cons = bce_get_hw_rx_cons(sc);
4777 	hw_tx_cons = bce_get_hw_tx_cons(sc);
4778 
4779 	/* Keep processing data as long as there is work to do. */
4780 	for (;;) {
4781 		uint32_t status_attn_bits;
4782 
4783 		status_attn_bits = sblk->status_attn_bits;
4784 
4785 		DBRUNIF(DB_RANDOMTRUE(bce_debug_unexpected_attention),
4786 			if_printf(ifp,
4787 			"Simulating unexpected status attention bit set.");
4788 			status_attn_bits |= STATUS_ATTN_BITS_PARITY_ERROR);
4789 
4790 		/* Was it a link change interrupt? */
4791 		if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
4792 		    (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE))
4793 			bce_phy_intr(sc);
4794 
4795 		/*
4796 		 * If any other attention is asserted then
4797 		 * the chip is toast.
4798 		 */
4799 		if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
4800 		     (sblk->status_attn_bits_ack &
4801 		      ~STATUS_ATTN_BITS_LINK_STATE)) {
4802 			DBRUN(1, sc->unexpected_attentions++);
4803 
4804 			if_printf(ifp, "Fatal attention detected: 0x%08X\n",
4805 				  sblk->status_attn_bits);
4806 
4807 			DBRUN(BCE_FATAL,
4808 			if (bce_debug_unexpected_attention == 0)
4809 				bce_breakpoint(sc));
4810 
4811 			bce_init(sc);
4812 			return;
4813 		}
4814 
4815 		/* Check for any completed RX frames. */
4816 		if (hw_rx_cons != sc->hw_rx_cons)
4817 			bce_rx_intr(sc, -1);
4818 
4819 		/* Check for any completed TX frames. */
4820 		if (hw_tx_cons != sc->hw_tx_cons)
4821 			bce_tx_intr(sc);
4822 
4823 		/*
4824 		 * Save the status block index value
4825 		 * for use during the next interrupt.
4826 		 */
4827 		sc->last_status_idx = sblk->status_idx;
4828 
4829 		/*
4830 		 * Prevent speculative reads from getting
4831 		 * ahead of the status block.
4832 		 */
4833 		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4834 				  BUS_SPACE_BARRIER_READ);
4835 
4836 		/*
4837 		 * If there's no work left then exit the
4838 		 * interrupt service routine.
4839 		 */
4840 		hw_rx_cons = bce_get_hw_rx_cons(sc);
4841 		hw_tx_cons = bce_get_hw_tx_cons(sc);
4842 		if ((hw_rx_cons == sc->hw_rx_cons) && (hw_tx_cons == sc->hw_tx_cons))
4843 			break;
4844 	}
4845 
4846 	/* Re-enable interrupts. */
4847 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4848 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx |
4849 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT);
4850 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4851 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
4852 
4853 	if (sc->bce_coalchg_mask)
4854 		bce_coal_change(sc);
4855 
4856 	/* Handle any frames that arrived while handling the interrupt. */
4857 	if (!ifq_is_empty(&ifp->if_snd))
4858 		if_devstart(ifp);
4859 }
4860 
4861 
4862 /****************************************************************************/
4863 /* Programs the various packet receive modes (broadcast and multicast).     */
4864 /*                                                                          */
4865 /* Returns:                                                                 */
4866 /*   Nothing.                                                               */
4867 /****************************************************************************/
4868 static void
4869 bce_set_rx_mode(struct bce_softc *sc)
4870 {
4871 	struct ifnet *ifp = &sc->arpcom.ac_if;
4872 	struct ifmultiaddr *ifma;
4873 	uint32_t hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
4874 	uint32_t rx_mode, sort_mode;
4875 	int h, i;
4876 
4877 	ASSERT_SERIALIZED(ifp->if_serializer);
4878 
4879 	/* Initialize receive mode default settings. */
4880 	rx_mode = sc->rx_mode &
4881 		  ~(BCE_EMAC_RX_MODE_PROMISCUOUS |
4882 		    BCE_EMAC_RX_MODE_KEEP_VLAN_TAG);
4883 	sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN;
4884 
4885 	/*
4886 	 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
4887 	 * be enbled.
4888 	 */
4889 	if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) &&
4890 	    !(sc->bce_flags & BCE_MFW_ENABLE_FLAG))
4891 		rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG;
4892 
4893 	/*
4894 	 * Check for promiscuous, all multicast, or selected
4895 	 * multicast address filtering.
4896 	 */
4897 	if (ifp->if_flags & IFF_PROMISC) {
4898 		DBPRINT(sc, BCE_INFO, "Enabling promiscuous mode.\n");
4899 
4900 		/* Enable promiscuous mode. */
4901 		rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS;
4902 		sort_mode |= BCE_RPM_SORT_USER0_PROM_EN;
4903 	} else if (ifp->if_flags & IFF_ALLMULTI) {
4904 		DBPRINT(sc, BCE_INFO, "Enabling all multicast mode.\n");
4905 
4906 		/* Enable all multicast addresses. */
4907 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
4908 			REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4),
4909 			       0xffffffff);
4910 		}
4911 		sort_mode |= BCE_RPM_SORT_USER0_MC_EN;
4912 	} else {
4913 		/* Accept one or more multicast(s). */
4914 		DBPRINT(sc, BCE_INFO, "Enabling selective multicast mode.\n");
4915 
4916 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
4917 			if (ifma->ifma_addr->sa_family != AF_LINK)
4918 				continue;
4919 			h = ether_crc32_le(
4920 			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
4921 			    ETHER_ADDR_LEN) & 0xFF;
4922 			hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
4923 		}
4924 
4925 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
4926 			REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4),
4927 			       hashes[i]);
4928 		}
4929 		sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN;
4930 	}
4931 
4932 	/* Only make changes if the recive mode has actually changed. */
4933 	if (rx_mode != sc->rx_mode) {
4934 		DBPRINT(sc, BCE_VERBOSE, "Enabling new receive mode: 0x%08X\n",
4935 			rx_mode);
4936 
4937 		sc->rx_mode = rx_mode;
4938 		REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode);
4939 	}
4940 
4941 	/* Disable and clear the exisitng sort before enabling a new sort. */
4942 	REG_WR(sc, BCE_RPM_SORT_USER0, 0x0);
4943 	REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode);
4944 	REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA);
4945 }
4946 
4947 
4948 /****************************************************************************/
4949 /* Called periodically to updates statistics from the controllers           */
4950 /* statistics block.                                                        */
4951 /*                                                                          */
4952 /* Returns:                                                                 */
4953 /*   Nothing.                                                               */
4954 /****************************************************************************/
4955 static void
4956 bce_stats_update(struct bce_softc *sc)
4957 {
4958 	struct ifnet *ifp = &sc->arpcom.ac_if;
4959 	struct statistics_block *stats = sc->stats_block;
4960 
4961 	DBPRINT(sc, BCE_EXCESSIVE, "Entering %s()\n", __func__);
4962 
4963 	ASSERT_SERIALIZED(ifp->if_serializer);
4964 
4965 	/*
4966 	 * Update the interface statistics from the hardware statistics.
4967 	 */
4968 	ifp->if_collisions = (u_long)stats->stat_EtherStatsCollisions;
4969 
4970 	ifp->if_ierrors = (u_long)stats->stat_EtherStatsUndersizePkts +
4971 			  (u_long)stats->stat_EtherStatsOverrsizePkts +
4972 			  (u_long)stats->stat_IfInMBUFDiscards +
4973 			  (u_long)stats->stat_Dot3StatsAlignmentErrors +
4974 			  (u_long)stats->stat_Dot3StatsFCSErrors;
4975 
4976 	ifp->if_oerrors =
4977 	(u_long)stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
4978 	(u_long)stats->stat_Dot3StatsExcessiveCollisions +
4979 	(u_long)stats->stat_Dot3StatsLateCollisions;
4980 
4981 	/*
4982 	 * Certain controllers don't report carrier sense errors correctly.
4983 	 * See errata E11_5708CA0_1165.
4984 	 */
4985 	if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
4986 	    !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0)) {
4987 		ifp->if_oerrors +=
4988 			(u_long)stats->stat_Dot3StatsCarrierSenseErrors;
4989 	}
4990 
4991 	/*
4992 	 * Update the sysctl statistics from the hardware statistics.
4993 	 */
4994 	sc->stat_IfHCInOctets =
4995 		((uint64_t)stats->stat_IfHCInOctets_hi << 32) +
4996 		 (uint64_t)stats->stat_IfHCInOctets_lo;
4997 
4998 	sc->stat_IfHCInBadOctets =
4999 		((uint64_t)stats->stat_IfHCInBadOctets_hi << 32) +
5000 		 (uint64_t)stats->stat_IfHCInBadOctets_lo;
5001 
5002 	sc->stat_IfHCOutOctets =
5003 		((uint64_t)stats->stat_IfHCOutOctets_hi << 32) +
5004 		 (uint64_t)stats->stat_IfHCOutOctets_lo;
5005 
5006 	sc->stat_IfHCOutBadOctets =
5007 		((uint64_t)stats->stat_IfHCOutBadOctets_hi << 32) +
5008 		 (uint64_t)stats->stat_IfHCOutBadOctets_lo;
5009 
5010 	sc->stat_IfHCInUcastPkts =
5011 		((uint64_t)stats->stat_IfHCInUcastPkts_hi << 32) +
5012 		 (uint64_t)stats->stat_IfHCInUcastPkts_lo;
5013 
5014 	sc->stat_IfHCInMulticastPkts =
5015 		((uint64_t)stats->stat_IfHCInMulticastPkts_hi << 32) +
5016 		 (uint64_t)stats->stat_IfHCInMulticastPkts_lo;
5017 
5018 	sc->stat_IfHCInBroadcastPkts =
5019 		((uint64_t)stats->stat_IfHCInBroadcastPkts_hi << 32) +
5020 		 (uint64_t)stats->stat_IfHCInBroadcastPkts_lo;
5021 
5022 	sc->stat_IfHCOutUcastPkts =
5023 		((uint64_t)stats->stat_IfHCOutUcastPkts_hi << 32) +
5024 		 (uint64_t)stats->stat_IfHCOutUcastPkts_lo;
5025 
5026 	sc->stat_IfHCOutMulticastPkts =
5027 		((uint64_t)stats->stat_IfHCOutMulticastPkts_hi << 32) +
5028 		 (uint64_t)stats->stat_IfHCOutMulticastPkts_lo;
5029 
5030 	sc->stat_IfHCOutBroadcastPkts =
5031 		((uint64_t)stats->stat_IfHCOutBroadcastPkts_hi << 32) +
5032 		 (uint64_t)stats->stat_IfHCOutBroadcastPkts_lo;
5033 
5034 	sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
5035 		stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
5036 
5037 	sc->stat_Dot3StatsCarrierSenseErrors =
5038 		stats->stat_Dot3StatsCarrierSenseErrors;
5039 
5040 	sc->stat_Dot3StatsFCSErrors =
5041 		stats->stat_Dot3StatsFCSErrors;
5042 
5043 	sc->stat_Dot3StatsAlignmentErrors =
5044 		stats->stat_Dot3StatsAlignmentErrors;
5045 
5046 	sc->stat_Dot3StatsSingleCollisionFrames =
5047 		stats->stat_Dot3StatsSingleCollisionFrames;
5048 
5049 	sc->stat_Dot3StatsMultipleCollisionFrames =
5050 		stats->stat_Dot3StatsMultipleCollisionFrames;
5051 
5052 	sc->stat_Dot3StatsDeferredTransmissions =
5053 		stats->stat_Dot3StatsDeferredTransmissions;
5054 
5055 	sc->stat_Dot3StatsExcessiveCollisions =
5056 		stats->stat_Dot3StatsExcessiveCollisions;
5057 
5058 	sc->stat_Dot3StatsLateCollisions =
5059 		stats->stat_Dot3StatsLateCollisions;
5060 
5061 	sc->stat_EtherStatsCollisions =
5062 		stats->stat_EtherStatsCollisions;
5063 
5064 	sc->stat_EtherStatsFragments =
5065 		stats->stat_EtherStatsFragments;
5066 
5067 	sc->stat_EtherStatsJabbers =
5068 		stats->stat_EtherStatsJabbers;
5069 
5070 	sc->stat_EtherStatsUndersizePkts =
5071 		stats->stat_EtherStatsUndersizePkts;
5072 
5073 	sc->stat_EtherStatsOverrsizePkts =
5074 		stats->stat_EtherStatsOverrsizePkts;
5075 
5076 	sc->stat_EtherStatsPktsRx64Octets =
5077 		stats->stat_EtherStatsPktsRx64Octets;
5078 
5079 	sc->stat_EtherStatsPktsRx65Octetsto127Octets =
5080 		stats->stat_EtherStatsPktsRx65Octetsto127Octets;
5081 
5082 	sc->stat_EtherStatsPktsRx128Octetsto255Octets =
5083 		stats->stat_EtherStatsPktsRx128Octetsto255Octets;
5084 
5085 	sc->stat_EtherStatsPktsRx256Octetsto511Octets =
5086 		stats->stat_EtherStatsPktsRx256Octetsto511Octets;
5087 
5088 	sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
5089 		stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
5090 
5091 	sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
5092 		stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
5093 
5094 	sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
5095 		stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
5096 
5097 	sc->stat_EtherStatsPktsTx64Octets =
5098 		stats->stat_EtherStatsPktsTx64Octets;
5099 
5100 	sc->stat_EtherStatsPktsTx65Octetsto127Octets =
5101 		stats->stat_EtherStatsPktsTx65Octetsto127Octets;
5102 
5103 	sc->stat_EtherStatsPktsTx128Octetsto255Octets =
5104 		stats->stat_EtherStatsPktsTx128Octetsto255Octets;
5105 
5106 	sc->stat_EtherStatsPktsTx256Octetsto511Octets =
5107 		stats->stat_EtherStatsPktsTx256Octetsto511Octets;
5108 
5109 	sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
5110 		stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
5111 
5112 	sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
5113 		stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
5114 
5115 	sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
5116 		stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
5117 
5118 	sc->stat_XonPauseFramesReceived =
5119 		stats->stat_XonPauseFramesReceived;
5120 
5121 	sc->stat_XoffPauseFramesReceived =
5122 		stats->stat_XoffPauseFramesReceived;
5123 
5124 	sc->stat_OutXonSent =
5125 		stats->stat_OutXonSent;
5126 
5127 	sc->stat_OutXoffSent =
5128 		stats->stat_OutXoffSent;
5129 
5130 	sc->stat_FlowControlDone =
5131 		stats->stat_FlowControlDone;
5132 
5133 	sc->stat_MacControlFramesReceived =
5134 		stats->stat_MacControlFramesReceived;
5135 
5136 	sc->stat_XoffStateEntered =
5137 		stats->stat_XoffStateEntered;
5138 
5139 	sc->stat_IfInFramesL2FilterDiscards =
5140 		stats->stat_IfInFramesL2FilterDiscards;
5141 
5142 	sc->stat_IfInRuleCheckerDiscards =
5143 		stats->stat_IfInRuleCheckerDiscards;
5144 
5145 	sc->stat_IfInFTQDiscards =
5146 		stats->stat_IfInFTQDiscards;
5147 
5148 	sc->stat_IfInMBUFDiscards =
5149 		stats->stat_IfInMBUFDiscards;
5150 
5151 	sc->stat_IfInRuleCheckerP4Hit =
5152 		stats->stat_IfInRuleCheckerP4Hit;
5153 
5154 	sc->stat_CatchupInRuleCheckerDiscards =
5155 		stats->stat_CatchupInRuleCheckerDiscards;
5156 
5157 	sc->stat_CatchupInFTQDiscards =
5158 		stats->stat_CatchupInFTQDiscards;
5159 
5160 	sc->stat_CatchupInMBUFDiscards =
5161 		stats->stat_CatchupInMBUFDiscards;
5162 
5163 	sc->stat_CatchupInRuleCheckerP4Hit =
5164 		stats->stat_CatchupInRuleCheckerP4Hit;
5165 
5166 	sc->com_no_buffers = REG_RD_IND(sc, 0x120084);
5167 
5168 	DBPRINT(sc, BCE_EXCESSIVE, "Exiting %s()\n", __func__);
5169 }
5170 
5171 
5172 /****************************************************************************/
5173 /* Periodic function to perform maintenance tasks.                          */
5174 /*                                                                          */
5175 /* Returns:                                                                 */
5176 /*   Nothing.                                                               */
5177 /****************************************************************************/
5178 static void
5179 bce_tick_serialized(struct bce_softc *sc)
5180 {
5181 	struct ifnet *ifp = &sc->arpcom.ac_if;
5182 	struct mii_data *mii;
5183 	uint32_t msg;
5184 
5185 	ASSERT_SERIALIZED(ifp->if_serializer);
5186 
5187 	/* Tell the firmware that the driver is still running. */
5188 #ifdef BCE_DEBUG
5189 	msg = (uint32_t)BCE_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE;
5190 #else
5191 	msg = (uint32_t)++sc->bce_fw_drv_pulse_wr_seq;
5192 #endif
5193 	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_PULSE_MB, msg);
5194 
5195 	/* Update the statistics from the hardware statistics block. */
5196 	bce_stats_update(sc);
5197 
5198 	/* Schedule the next tick. */
5199 	callout_reset(&sc->bce_stat_ch, hz, bce_tick, sc);
5200 
5201 	/* If link is up already up then we're done. */
5202 	if (sc->bce_link)
5203 		return;
5204 
5205 	mii = device_get_softc(sc->bce_miibus);
5206 	mii_tick(mii);
5207 
5208 	/* Check if the link has come up. */
5209 	if (!sc->bce_link && (mii->mii_media_status & IFM_ACTIVE) &&
5210 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5211 		sc->bce_link++;
5212 		/* Now that link is up, handle any outstanding TX traffic. */
5213 		if (!ifq_is_empty(&ifp->if_snd))
5214 			if_devstart(ifp);
5215 	}
5216 }
5217 
5218 
5219 static void
5220 bce_tick(void *xsc)
5221 {
5222 	struct bce_softc *sc = xsc;
5223 	struct ifnet *ifp = &sc->arpcom.ac_if;
5224 
5225 	lwkt_serialize_enter(ifp->if_serializer);
5226 	bce_tick_serialized(sc);
5227 	lwkt_serialize_exit(ifp->if_serializer);
5228 }
5229 
5230 
5231 #ifdef BCE_DEBUG
5232 /****************************************************************************/
5233 /* Allows the driver state to be dumped through the sysctl interface.       */
5234 /*                                                                          */
5235 /* Returns:                                                                 */
5236 /*   0 for success, positive value for failure.                             */
5237 /****************************************************************************/
5238 static int
5239 bce_sysctl_driver_state(SYSCTL_HANDLER_ARGS)
5240 {
5241         int error;
5242         int result;
5243         struct bce_softc *sc;
5244 
5245         result = -1;
5246         error = sysctl_handle_int(oidp, &result, 0, req);
5247 
5248         if (error || !req->newptr)
5249                 return (error);
5250 
5251         if (result == 1) {
5252                 sc = (struct bce_softc *)arg1;
5253                 bce_dump_driver_state(sc);
5254         }
5255 
5256         return error;
5257 }
5258 
5259 
5260 /****************************************************************************/
5261 /* Allows the hardware state to be dumped through the sysctl interface.     */
5262 /*                                                                          */
5263 /* Returns:                                                                 */
5264 /*   0 for success, positive value for failure.                             */
5265 /****************************************************************************/
5266 static int
5267 bce_sysctl_hw_state(SYSCTL_HANDLER_ARGS)
5268 {
5269         int error;
5270         int result;
5271         struct bce_softc *sc;
5272 
5273         result = -1;
5274         error = sysctl_handle_int(oidp, &result, 0, req);
5275 
5276         if (error || !req->newptr)
5277                 return (error);
5278 
5279         if (result == 1) {
5280                 sc = (struct bce_softc *)arg1;
5281                 bce_dump_hw_state(sc);
5282         }
5283 
5284         return error;
5285 }
5286 
5287 
5288 /****************************************************************************/
5289 /* Provides a sysctl interface to allows dumping the RX chain.              */
5290 /*                                                                          */
5291 /* Returns:                                                                 */
5292 /*   0 for success, positive value for failure.                             */
5293 /****************************************************************************/
5294 static int
5295 bce_sysctl_dump_rx_chain(SYSCTL_HANDLER_ARGS)
5296 {
5297         int error;
5298         int result;
5299         struct bce_softc *sc;
5300 
5301         result = -1;
5302         error = sysctl_handle_int(oidp, &result, 0, req);
5303 
5304         if (error || !req->newptr)
5305                 return (error);
5306 
5307         if (result == 1) {
5308                 sc = (struct bce_softc *)arg1;
5309                 bce_dump_rx_chain(sc, 0, USABLE_RX_BD);
5310         }
5311 
5312         return error;
5313 }
5314 
5315 
5316 /****************************************************************************/
5317 /* Provides a sysctl interface to allows dumping the TX chain.              */
5318 /*                                                                          */
5319 /* Returns:                                                                 */
5320 /*   0 for success, positive value for failure.                             */
5321 /****************************************************************************/
5322 static int
5323 bce_sysctl_dump_tx_chain(SYSCTL_HANDLER_ARGS)
5324 {
5325         int error;
5326         int result;
5327         struct bce_softc *sc;
5328 
5329         result = -1;
5330         error = sysctl_handle_int(oidp, &result, 0, req);
5331 
5332         if (error || !req->newptr)
5333                 return (error);
5334 
5335         if (result == 1) {
5336                 sc = (struct bce_softc *)arg1;
5337                 bce_dump_tx_chain(sc, 0, USABLE_TX_BD);
5338         }
5339 
5340         return error;
5341 }
5342 
5343 
5344 /****************************************************************************/
5345 /* Provides a sysctl interface to allow reading arbitrary registers in the  */
5346 /* device.  DO NOT ENABLE ON PRODUCTION SYSTEMS!                            */
5347 /*                                                                          */
5348 /* Returns:                                                                 */
5349 /*   0 for success, positive value for failure.                             */
5350 /****************************************************************************/
5351 static int
5352 bce_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
5353 {
5354 	struct bce_softc *sc;
5355 	int error;
5356 	uint32_t val, result;
5357 
5358 	result = -1;
5359 	error = sysctl_handle_int(oidp, &result, 0, req);
5360 	if (error || (req->newptr == NULL))
5361 		return (error);
5362 
5363 	/* Make sure the register is accessible. */
5364 	if (result < 0x8000) {
5365 		sc = (struct bce_softc *)arg1;
5366 		val = REG_RD(sc, result);
5367 		if_printf(&sc->arpcom.ac_if, "reg 0x%08X = 0x%08X\n",
5368 			  result, val);
5369 	} else if (result < 0x0280000) {
5370 		sc = (struct bce_softc *)arg1;
5371 		val = REG_RD_IND(sc, result);
5372 		if_printf(&sc->arpcom.ac_if, "reg 0x%08X = 0x%08X\n",
5373 			  result, val);
5374 	}
5375 	return (error);
5376 }
5377 
5378 
5379 /****************************************************************************/
5380 /* Provides a sysctl interface to allow reading arbitrary PHY registers in  */
5381 /* the device.  DO NOT ENABLE ON PRODUCTION SYSTEMS!                        */
5382 /*                                                                          */
5383 /* Returns:                                                                 */
5384 /*   0 for success, positive value for failure.                             */
5385 /****************************************************************************/
5386 static int
5387 bce_sysctl_phy_read(SYSCTL_HANDLER_ARGS)
5388 {
5389 	struct bce_softc *sc;
5390 	device_t dev;
5391 	int error, result;
5392 	uint16_t val;
5393 
5394 	result = -1;
5395 	error = sysctl_handle_int(oidp, &result, 0, req);
5396 	if (error || (req->newptr == NULL))
5397 		return (error);
5398 
5399 	/* Make sure the register is accessible. */
5400 	if (result < 0x20) {
5401 		sc = (struct bce_softc *)arg1;
5402 		dev = sc->bce_dev;
5403 		val = bce_miibus_read_reg(dev, sc->bce_phy_addr, result);
5404 		if_printf(&sc->arpcom.ac_if,
5405 			  "phy 0x%02X = 0x%04X\n", result, val);
5406 	}
5407 	return (error);
5408 }
5409 
5410 
5411 /****************************************************************************/
5412 /* Provides a sysctl interface to forcing the driver to dump state and      */
5413 /* enter the debugger.  DO NOT ENABLE ON PRODUCTION SYSTEMS!                */
5414 /*                                                                          */
5415 /* Returns:                                                                 */
5416 /*   0 for success, positive value for failure.                             */
5417 /****************************************************************************/
5418 static int
5419 bce_sysctl_breakpoint(SYSCTL_HANDLER_ARGS)
5420 {
5421         int error;
5422         int result;
5423         struct bce_softc *sc;
5424 
5425         result = -1;
5426         error = sysctl_handle_int(oidp, &result, 0, req);
5427 
5428         if (error || !req->newptr)
5429                 return (error);
5430 
5431         if (result == 1) {
5432                 sc = (struct bce_softc *)arg1;
5433                 bce_breakpoint(sc);
5434         }
5435 
5436         return error;
5437 }
5438 #endif
5439 
5440 
5441 /****************************************************************************/
5442 /* Adds any sysctl parameters for tuning or debugging purposes.             */
5443 /*                                                                          */
5444 /* Returns:                                                                 */
5445 /*   0 for success, positive value for failure.                             */
5446 /****************************************************************************/
5447 static void
5448 bce_add_sysctls(struct bce_softc *sc)
5449 {
5450 	struct sysctl_ctx_list *ctx;
5451 	struct sysctl_oid_list *children;
5452 
5453 	sysctl_ctx_init(&sc->bce_sysctl_ctx);
5454 	sc->bce_sysctl_tree = SYSCTL_ADD_NODE(&sc->bce_sysctl_ctx,
5455 					      SYSCTL_STATIC_CHILDREN(_hw),
5456 					      OID_AUTO,
5457 					      device_get_nameunit(sc->bce_dev),
5458 					      CTLFLAG_RD, 0, "");
5459 	if (sc->bce_sysctl_tree == NULL) {
5460 		device_printf(sc->bce_dev, "can't add sysctl node\n");
5461 		return;
5462 	}
5463 
5464 	ctx = &sc->bce_sysctl_ctx;
5465 	children = SYSCTL_CHILDREN(sc->bce_sysctl_tree);
5466 
5467 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds_int",
5468 			CTLTYPE_INT | CTLFLAG_RW,
5469 			sc, 0, bce_sysctl_tx_bds_int, "I",
5470 			"Send max coalesced BD count during interrupt");
5471 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds",
5472 			CTLTYPE_INT | CTLFLAG_RW,
5473 			sc, 0, bce_sysctl_tx_bds, "I",
5474 			"Send max coalesced BD count");
5475 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks_int",
5476 			CTLTYPE_INT | CTLFLAG_RW,
5477 			sc, 0, bce_sysctl_tx_ticks_int, "I",
5478 			"Send coalescing ticks during interrupt");
5479 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks",
5480 			CTLTYPE_INT | CTLFLAG_RW,
5481 			sc, 0, bce_sysctl_tx_ticks, "I",
5482 			"Send coalescing ticks");
5483 
5484 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds_int",
5485 			CTLTYPE_INT | CTLFLAG_RW,
5486 			sc, 0, bce_sysctl_rx_bds_int, "I",
5487 			"Receive max coalesced BD count during interrupt");
5488 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds",
5489 			CTLTYPE_INT | CTLFLAG_RW,
5490 			sc, 0, bce_sysctl_rx_bds, "I",
5491 			"Receive max coalesced BD count");
5492 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks_int",
5493 			CTLTYPE_INT | CTLFLAG_RW,
5494 			sc, 0, bce_sysctl_rx_ticks_int, "I",
5495 			"Receive coalescing ticks during interrupt");
5496 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks",
5497 			CTLTYPE_INT | CTLFLAG_RW,
5498 			sc, 0, bce_sysctl_rx_ticks, "I",
5499 			"Receive coalescing ticks");
5500 
5501 #ifdef BCE_DEBUG
5502 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5503 		"rx_low_watermark",
5504 		CTLFLAG_RD, &sc->rx_low_watermark,
5505 		0, "Lowest level of free rx_bd's");
5506 
5507 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5508 		"rx_empty_count",
5509 		CTLFLAG_RD, &sc->rx_empty_count,
5510 		0, "Number of times the RX chain was empty");
5511 
5512 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5513 		"tx_hi_watermark",
5514 		CTLFLAG_RD, &sc->tx_hi_watermark,
5515 		0, "Highest level of used tx_bd's");
5516 
5517 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5518 		"tx_full_count",
5519 		CTLFLAG_RD, &sc->tx_full_count,
5520 		0, "Number of times the TX chain was full");
5521 
5522 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5523 		"l2fhdr_status_errors",
5524 		CTLFLAG_RD, &sc->l2fhdr_status_errors,
5525 		0, "l2_fhdr status errors");
5526 
5527 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5528 		"unexpected_attentions",
5529 		CTLFLAG_RD, &sc->unexpected_attentions,
5530 		0, "unexpected attentions");
5531 
5532 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5533 		"lost_status_block_updates",
5534 		CTLFLAG_RD, &sc->lost_status_block_updates,
5535 		0, "lost status block updates");
5536 
5537 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5538 		"mbuf_alloc_failed",
5539 		CTLFLAG_RD, &sc->mbuf_alloc_failed,
5540 		0, "mbuf cluster allocation failures");
5541 #endif
5542 
5543 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5544 		"stat_IfHCInOctets",
5545 		CTLFLAG_RD, &sc->stat_IfHCInOctets,
5546 		"Bytes received");
5547 
5548 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5549 		"stat_IfHCInBadOctets",
5550 		CTLFLAG_RD, &sc->stat_IfHCInBadOctets,
5551 		"Bad bytes received");
5552 
5553 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5554 		"stat_IfHCOutOctets",
5555 		CTLFLAG_RD, &sc->stat_IfHCOutOctets,
5556 		"Bytes sent");
5557 
5558 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5559 		"stat_IfHCOutBadOctets",
5560 		CTLFLAG_RD, &sc->stat_IfHCOutBadOctets,
5561 		"Bad bytes sent");
5562 
5563 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5564 		"stat_IfHCInUcastPkts",
5565 		CTLFLAG_RD, &sc->stat_IfHCInUcastPkts,
5566 		"Unicast packets received");
5567 
5568 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5569 		"stat_IfHCInMulticastPkts",
5570 		CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts,
5571 		"Multicast packets received");
5572 
5573 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5574 		"stat_IfHCInBroadcastPkts",
5575 		CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts,
5576 		"Broadcast packets received");
5577 
5578 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5579 		"stat_IfHCOutUcastPkts",
5580 		CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts,
5581 		"Unicast packets sent");
5582 
5583 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5584 		"stat_IfHCOutMulticastPkts",
5585 		CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts,
5586 		"Multicast packets sent");
5587 
5588 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5589 		"stat_IfHCOutBroadcastPkts",
5590 		CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts,
5591 		"Broadcast packets sent");
5592 
5593 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5594 		"stat_emac_tx_stat_dot3statsinternalmactransmiterrors",
5595 		CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors,
5596 		0, "Internal MAC transmit errors");
5597 
5598 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5599 		"stat_Dot3StatsCarrierSenseErrors",
5600 		CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors,
5601 		0, "Carrier sense errors");
5602 
5603 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5604 		"stat_Dot3StatsFCSErrors",
5605 		CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors,
5606 		0, "Frame check sequence errors");
5607 
5608 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5609 		"stat_Dot3StatsAlignmentErrors",
5610 		CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors,
5611 		0, "Alignment errors");
5612 
5613 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5614 		"stat_Dot3StatsSingleCollisionFrames",
5615 		CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames,
5616 		0, "Single Collision Frames");
5617 
5618 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5619 		"stat_Dot3StatsMultipleCollisionFrames",
5620 		CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames,
5621 		0, "Multiple Collision Frames");
5622 
5623 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5624 		"stat_Dot3StatsDeferredTransmissions",
5625 		CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions,
5626 		0, "Deferred Transmissions");
5627 
5628 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5629 		"stat_Dot3StatsExcessiveCollisions",
5630 		CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions,
5631 		0, "Excessive Collisions");
5632 
5633 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5634 		"stat_Dot3StatsLateCollisions",
5635 		CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions,
5636 		0, "Late Collisions");
5637 
5638 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5639 		"stat_EtherStatsCollisions",
5640 		CTLFLAG_RD, &sc->stat_EtherStatsCollisions,
5641 		0, "Collisions");
5642 
5643 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5644 		"stat_EtherStatsFragments",
5645 		CTLFLAG_RD, &sc->stat_EtherStatsFragments,
5646 		0, "Fragments");
5647 
5648 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5649 		"stat_EtherStatsJabbers",
5650 		CTLFLAG_RD, &sc->stat_EtherStatsJabbers,
5651 		0, "Jabbers");
5652 
5653 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5654 		"stat_EtherStatsUndersizePkts",
5655 		CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts,
5656 		0, "Undersize packets");
5657 
5658 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5659 		"stat_EtherStatsOverrsizePkts",
5660 		CTLFLAG_RD, &sc->stat_EtherStatsOverrsizePkts,
5661 		0, "stat_EtherStatsOverrsizePkts");
5662 
5663 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5664 		"stat_EtherStatsPktsRx64Octets",
5665 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets,
5666 		0, "Bytes received in 64 byte packets");
5667 
5668 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5669 		"stat_EtherStatsPktsRx65Octetsto127Octets",
5670 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets,
5671 		0, "Bytes received in 65 to 127 byte packets");
5672 
5673 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5674 		"stat_EtherStatsPktsRx128Octetsto255Octets",
5675 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets,
5676 		0, "Bytes received in 128 to 255 byte packets");
5677 
5678 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5679 		"stat_EtherStatsPktsRx256Octetsto511Octets",
5680 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets,
5681 		0, "Bytes received in 256 to 511 byte packets");
5682 
5683 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5684 		"stat_EtherStatsPktsRx512Octetsto1023Octets",
5685 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets,
5686 		0, "Bytes received in 512 to 1023 byte packets");
5687 
5688 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5689 		"stat_EtherStatsPktsRx1024Octetsto1522Octets",
5690 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets,
5691 		0, "Bytes received in 1024 t0 1522 byte packets");
5692 
5693 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5694 		"stat_EtherStatsPktsRx1523Octetsto9022Octets",
5695 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets,
5696 		0, "Bytes received in 1523 to 9022 byte packets");
5697 
5698 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5699 		"stat_EtherStatsPktsTx64Octets",
5700 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets,
5701 		0, "Bytes sent in 64 byte packets");
5702 
5703 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5704 		"stat_EtherStatsPktsTx65Octetsto127Octets",
5705 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets,
5706 		0, "Bytes sent in 65 to 127 byte packets");
5707 
5708 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5709 		"stat_EtherStatsPktsTx128Octetsto255Octets",
5710 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets,
5711 		0, "Bytes sent in 128 to 255 byte packets");
5712 
5713 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5714 		"stat_EtherStatsPktsTx256Octetsto511Octets",
5715 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets,
5716 		0, "Bytes sent in 256 to 511 byte packets");
5717 
5718 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5719 		"stat_EtherStatsPktsTx512Octetsto1023Octets",
5720 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets,
5721 		0, "Bytes sent in 512 to 1023 byte packets");
5722 
5723 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5724 		"stat_EtherStatsPktsTx1024Octetsto1522Octets",
5725 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets,
5726 		0, "Bytes sent in 1024 to 1522 byte packets");
5727 
5728 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5729 		"stat_EtherStatsPktsTx1523Octetsto9022Octets",
5730 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets,
5731 		0, "Bytes sent in 1523 to 9022 byte packets");
5732 
5733 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5734 		"stat_XonPauseFramesReceived",
5735 		CTLFLAG_RD, &sc->stat_XonPauseFramesReceived,
5736 		0, "XON pause frames receved");
5737 
5738 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5739 		"stat_XoffPauseFramesReceived",
5740 		CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived,
5741 		0, "XOFF pause frames received");
5742 
5743 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5744 		"stat_OutXonSent",
5745 		CTLFLAG_RD, &sc->stat_OutXonSent,
5746 		0, "XON pause frames sent");
5747 
5748 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5749 		"stat_OutXoffSent",
5750 		CTLFLAG_RD, &sc->stat_OutXoffSent,
5751 		0, "XOFF pause frames sent");
5752 
5753 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5754 		"stat_FlowControlDone",
5755 		CTLFLAG_RD, &sc->stat_FlowControlDone,
5756 		0, "Flow control done");
5757 
5758 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5759 		"stat_MacControlFramesReceived",
5760 		CTLFLAG_RD, &sc->stat_MacControlFramesReceived,
5761 		0, "MAC control frames received");
5762 
5763 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5764 		"stat_XoffStateEntered",
5765 		CTLFLAG_RD, &sc->stat_XoffStateEntered,
5766 		0, "XOFF state entered");
5767 
5768 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5769 		"stat_IfInFramesL2FilterDiscards",
5770 		CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards,
5771 		0, "Received L2 packets discarded");
5772 
5773 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5774 		"stat_IfInRuleCheckerDiscards",
5775 		CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards,
5776 		0, "Received packets discarded by rule");
5777 
5778 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5779 		"stat_IfInFTQDiscards",
5780 		CTLFLAG_RD, &sc->stat_IfInFTQDiscards,
5781 		0, "Received packet FTQ discards");
5782 
5783 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5784 		"stat_IfInMBUFDiscards",
5785 		CTLFLAG_RD, &sc->stat_IfInMBUFDiscards,
5786 		0, "Received packets discarded due to lack of controller buffer memory");
5787 
5788 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5789 		"stat_IfInRuleCheckerP4Hit",
5790 		CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit,
5791 		0, "Received packets rule checker hits");
5792 
5793 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5794 		"stat_CatchupInRuleCheckerDiscards",
5795 		CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards,
5796 		0, "Received packets discarded in Catchup path");
5797 
5798 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5799 		"stat_CatchupInFTQDiscards",
5800 		CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards,
5801 		0, "Received packets discarded in FTQ in Catchup path");
5802 
5803 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5804 		"stat_CatchupInMBUFDiscards",
5805 		CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards,
5806 		0, "Received packets discarded in controller buffer memory in Catchup path");
5807 
5808 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5809 		"stat_CatchupInRuleCheckerP4Hit",
5810 		CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit,
5811 		0, "Received packets rule checker hits in Catchup path");
5812 
5813 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5814 		"com_no_buffers",
5815 		CTLFLAG_RD, &sc->com_no_buffers,
5816 		0, "Valid packets received but no RX buffers available");
5817 
5818 #ifdef BCE_DEBUG
5819 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
5820 		"driver_state", CTLTYPE_INT | CTLFLAG_RW,
5821 		(void *)sc, 0,
5822 		bce_sysctl_driver_state, "I", "Drive state information");
5823 
5824 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
5825 		"hw_state", CTLTYPE_INT | CTLFLAG_RW,
5826 		(void *)sc, 0,
5827 		bce_sysctl_hw_state, "I", "Hardware state information");
5828 
5829 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
5830 		"dump_rx_chain", CTLTYPE_INT | CTLFLAG_RW,
5831 		(void *)sc, 0,
5832 		bce_sysctl_dump_rx_chain, "I", "Dump rx_bd chain");
5833 
5834 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
5835 		"dump_tx_chain", CTLTYPE_INT | CTLFLAG_RW,
5836 		(void *)sc, 0,
5837 		bce_sysctl_dump_tx_chain, "I", "Dump tx_bd chain");
5838 
5839 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
5840 		"breakpoint", CTLTYPE_INT | CTLFLAG_RW,
5841 		(void *)sc, 0,
5842 		bce_sysctl_breakpoint, "I", "Driver breakpoint");
5843 
5844 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
5845 		"reg_read", CTLTYPE_INT | CTLFLAG_RW,
5846 		(void *)sc, 0,
5847 		bce_sysctl_reg_read, "I", "Register read");
5848 
5849 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
5850 		"phy_read", CTLTYPE_INT | CTLFLAG_RW,
5851 		(void *)sc, 0,
5852 		bce_sysctl_phy_read, "I", "PHY register read");
5853 
5854 #endif
5855 
5856 }
5857 
5858 
5859 /****************************************************************************/
5860 /* BCE Debug Routines                                                       */
5861 /****************************************************************************/
5862 #ifdef BCE_DEBUG
5863 
5864 /****************************************************************************/
5865 /* Freezes the controller to allow for a cohesive state dump.               */
5866 /*                                                                          */
5867 /* Returns:                                                                 */
5868 /*   Nothing.                                                               */
5869 /****************************************************************************/
5870 static void
5871 bce_freeze_controller(struct bce_softc *sc)
5872 {
5873 	uint32_t val;
5874 
5875 	val = REG_RD(sc, BCE_MISC_COMMAND);
5876 	val |= BCE_MISC_COMMAND_DISABLE_ALL;
5877 	REG_WR(sc, BCE_MISC_COMMAND, val);
5878 }
5879 
5880 
5881 /****************************************************************************/
5882 /* Unfreezes the controller after a freeze operation.  This may not always  */
5883 /* work and the controller will require a reset!                            */
5884 /*                                                                          */
5885 /* Returns:                                                                 */
5886 /*   Nothing.                                                               */
5887 /****************************************************************************/
5888 static void
5889 bce_unfreeze_controller(struct bce_softc *sc)
5890 {
5891 	uint32_t val;
5892 
5893 	val = REG_RD(sc, BCE_MISC_COMMAND);
5894 	val |= BCE_MISC_COMMAND_ENABLE_ALL;
5895 	REG_WR(sc, BCE_MISC_COMMAND, val);
5896 }
5897 
5898 
5899 /****************************************************************************/
5900 /* Prints out information about an mbuf.                                    */
5901 /*                                                                          */
5902 /* Returns:                                                                 */
5903 /*   Nothing.                                                               */
5904 /****************************************************************************/
5905 static void
5906 bce_dump_mbuf(struct bce_softc *sc, struct mbuf *m)
5907 {
5908 	struct ifnet *ifp = &sc->arpcom.ac_if;
5909 	uint32_t val_hi, val_lo;
5910 	struct mbuf *mp = m;
5911 
5912 	if (m == NULL) {
5913 		/* Index out of range. */
5914 		if_printf(ifp, "mbuf: null pointer\n");
5915 		return;
5916 	}
5917 
5918 	while (mp) {
5919 		val_hi = BCE_ADDR_HI(mp);
5920 		val_lo = BCE_ADDR_LO(mp);
5921 		if_printf(ifp, "mbuf: vaddr = 0x%08X:%08X, m_len = %d, "
5922 			  "m_flags = ( ", val_hi, val_lo, mp->m_len);
5923 
5924 		if (mp->m_flags & M_EXT)
5925 			kprintf("M_EXT ");
5926 		if (mp->m_flags & M_PKTHDR)
5927 			kprintf("M_PKTHDR ");
5928 		if (mp->m_flags & M_EOR)
5929 			kprintf("M_EOR ");
5930 #ifdef M_RDONLY
5931 		if (mp->m_flags & M_RDONLY)
5932 			kprintf("M_RDONLY ");
5933 #endif
5934 
5935 		val_hi = BCE_ADDR_HI(mp->m_data);
5936 		val_lo = BCE_ADDR_LO(mp->m_data);
5937 		kprintf(") m_data = 0x%08X:%08X\n", val_hi, val_lo);
5938 
5939 		if (mp->m_flags & M_PKTHDR) {
5940 			if_printf(ifp, "- m_pkthdr: flags = ( ");
5941 			if (mp->m_flags & M_BCAST)
5942 				kprintf("M_BCAST ");
5943 			if (mp->m_flags & M_MCAST)
5944 				kprintf("M_MCAST ");
5945 			if (mp->m_flags & M_FRAG)
5946 				kprintf("M_FRAG ");
5947 			if (mp->m_flags & M_FIRSTFRAG)
5948 				kprintf("M_FIRSTFRAG ");
5949 			if (mp->m_flags & M_LASTFRAG)
5950 				kprintf("M_LASTFRAG ");
5951 #ifdef M_VLANTAG
5952 			if (mp->m_flags & M_VLANTAG)
5953 				kprintf("M_VLANTAG ");
5954 #endif
5955 #ifdef M_PROMISC
5956 			if (mp->m_flags & M_PROMISC)
5957 				kprintf("M_PROMISC ");
5958 #endif
5959 			kprintf(") csum_flags = ( ");
5960 			if (mp->m_pkthdr.csum_flags & CSUM_IP)
5961 				kprintf("CSUM_IP ");
5962 			if (mp->m_pkthdr.csum_flags & CSUM_TCP)
5963 				kprintf("CSUM_TCP ");
5964 			if (mp->m_pkthdr.csum_flags & CSUM_UDP)
5965 				kprintf("CSUM_UDP ");
5966 			if (mp->m_pkthdr.csum_flags & CSUM_IP_FRAGS)
5967 				kprintf("CSUM_IP_FRAGS ");
5968 			if (mp->m_pkthdr.csum_flags & CSUM_FRAGMENT)
5969 				kprintf("CSUM_FRAGMENT ");
5970 #ifdef CSUM_TSO
5971 			if (mp->m_pkthdr.csum_flags & CSUM_TSO)
5972 				kprintf("CSUM_TSO ");
5973 #endif
5974 			if (mp->m_pkthdr.csum_flags & CSUM_IP_CHECKED)
5975 				kprintf("CSUM_IP_CHECKED ");
5976 			if (mp->m_pkthdr.csum_flags & CSUM_IP_VALID)
5977 				kprintf("CSUM_IP_VALID ");
5978 			if (mp->m_pkthdr.csum_flags & CSUM_DATA_VALID)
5979 				kprintf("CSUM_DATA_VALID ");
5980 			kprintf(")\n");
5981 		}
5982 
5983 		if (mp->m_flags & M_EXT) {
5984 			val_hi = BCE_ADDR_HI(mp->m_ext.ext_buf);
5985 			val_lo = BCE_ADDR_LO(mp->m_ext.ext_buf);
5986 			if_printf(ifp, "- m_ext: vaddr = 0x%08X:%08X, "
5987 				  "ext_size = %d\n",
5988 				  val_hi, val_lo, mp->m_ext.ext_size);
5989 		}
5990 		mp = mp->m_next;
5991 	}
5992 }
5993 
5994 
5995 /****************************************************************************/
5996 /* Prints out the mbufs in the TX mbuf chain.                               */
5997 /*                                                                          */
5998 /* Returns:                                                                 */
5999 /*   Nothing.                                                               */
6000 /****************************************************************************/
6001 static void
6002 bce_dump_tx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count)
6003 {
6004 	struct ifnet *ifp = &sc->arpcom.ac_if;
6005 	int i;
6006 
6007 	if_printf(ifp,
6008 	"----------------------------"
6009 	"  tx mbuf data  "
6010 	"----------------------------\n");
6011 
6012 	for (i = 0; i < count; i++) {
6013 		if_printf(ifp, "txmbuf[%d]\n", chain_prod);
6014 		bce_dump_mbuf(sc, sc->tx_mbuf_ptr[chain_prod]);
6015 		chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod));
6016 	}
6017 
6018 	if_printf(ifp,
6019 	"----------------------------"
6020 	"----------------"
6021 	"----------------------------\n");
6022 }
6023 
6024 
6025 /****************************************************************************/
6026 /* Prints out the mbufs in the RX mbuf chain.                               */
6027 /*                                                                          */
6028 /* Returns:                                                                 */
6029 /*   Nothing.                                                               */
6030 /****************************************************************************/
6031 static void
6032 bce_dump_rx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count)
6033 {
6034 	struct ifnet *ifp = &sc->arpcom.ac_if;
6035 	int i;
6036 
6037 	if_printf(ifp,
6038 	"----------------------------"
6039 	"  rx mbuf data  "
6040 	"----------------------------\n");
6041 
6042 	for (i = 0; i < count; i++) {
6043 		if_printf(ifp, "rxmbuf[0x%04X]\n", chain_prod);
6044 		bce_dump_mbuf(sc, sc->rx_mbuf_ptr[chain_prod]);
6045 		chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod));
6046 	}
6047 
6048 	if_printf(ifp,
6049 	"----------------------------"
6050 	"----------------"
6051 	"----------------------------\n");
6052 }
6053 
6054 
6055 /****************************************************************************/
6056 /* Prints out a tx_bd structure.                                            */
6057 /*                                                                          */
6058 /* Returns:                                                                 */
6059 /*   Nothing.                                                               */
6060 /****************************************************************************/
6061 static void
6062 bce_dump_txbd(struct bce_softc *sc, int idx, struct tx_bd *txbd)
6063 {
6064 	struct ifnet *ifp = &sc->arpcom.ac_if;
6065 
6066 	if (idx > MAX_TX_BD) {
6067 		/* Index out of range. */
6068 		if_printf(ifp, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx);
6069 	} else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) {
6070 		/* TX Chain page pointer. */
6071 		if_printf(ifp, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, "
6072 			  "chain page pointer\n",
6073 			  idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo);
6074 	} else {
6075 		/* Normal tx_bd entry. */
6076 		if_printf(ifp, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, "
6077 			  "nbytes = 0x%08X, "
6078 			  "vlan tag= 0x%04X, flags = 0x%04X (",
6079 			  idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo,
6080 			  txbd->tx_bd_mss_nbytes,
6081 			  txbd->tx_bd_vlan_tag, txbd->tx_bd_flags);
6082 
6083 		if (txbd->tx_bd_flags & TX_BD_FLAGS_CONN_FAULT)
6084 			kprintf(" CONN_FAULT");
6085 
6086 		if (txbd->tx_bd_flags & TX_BD_FLAGS_TCP_UDP_CKSUM)
6087 			kprintf(" TCP_UDP_CKSUM");
6088 
6089 		if (txbd->tx_bd_flags & TX_BD_FLAGS_IP_CKSUM)
6090 			kprintf(" IP_CKSUM");
6091 
6092 		if (txbd->tx_bd_flags & TX_BD_FLAGS_VLAN_TAG)
6093 			kprintf("  VLAN");
6094 
6095 		if (txbd->tx_bd_flags & TX_BD_FLAGS_COAL_NOW)
6096 			kprintf(" COAL_NOW");
6097 
6098 		if (txbd->tx_bd_flags & TX_BD_FLAGS_DONT_GEN_CRC)
6099 			kprintf(" DONT_GEN_CRC");
6100 
6101 		if (txbd->tx_bd_flags & TX_BD_FLAGS_START)
6102 			kprintf(" START");
6103 
6104 		if (txbd->tx_bd_flags & TX_BD_FLAGS_END)
6105 			kprintf(" END");
6106 
6107 		if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_LSO)
6108 			kprintf(" LSO");
6109 
6110 		if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_OPTION_WORD)
6111 			kprintf(" OPTION_WORD");
6112 
6113 		if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_FLAGS)
6114 			kprintf(" FLAGS");
6115 
6116 		if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_SNAP)
6117 			kprintf(" SNAP");
6118 
6119 		kprintf(" )\n");
6120 	}
6121 }
6122 
6123 
6124 /****************************************************************************/
6125 /* Prints out a rx_bd structure.                                            */
6126 /*                                                                          */
6127 /* Returns:                                                                 */
6128 /*   Nothing.                                                               */
6129 /****************************************************************************/
6130 static void
6131 bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd)
6132 {
6133 	struct ifnet *ifp = &sc->arpcom.ac_if;
6134 
6135 	if (idx > MAX_RX_BD) {
6136 		/* Index out of range. */
6137 		if_printf(ifp, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
6138 	} else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) {
6139 		/* TX Chain page pointer. */
6140 		if_printf(ifp, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, "
6141 			  "chain page pointer\n",
6142 			  idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo);
6143 	} else {
6144 		/* Normal tx_bd entry. */
6145 		if_printf(ifp, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, "
6146 			  "nbytes = 0x%08X, flags = 0x%08X\n",
6147 			  idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo,
6148 			  rxbd->rx_bd_len, rxbd->rx_bd_flags);
6149 	}
6150 }
6151 
6152 
6153 /****************************************************************************/
6154 /* Prints out a l2_fhdr structure.                                          */
6155 /*                                                                          */
6156 /* Returns:                                                                 */
6157 /*   Nothing.                                                               */
6158 /****************************************************************************/
6159 static void
6160 bce_dump_l2fhdr(struct bce_softc *sc, int idx, struct l2_fhdr *l2fhdr)
6161 {
6162 	if_printf(&sc->arpcom.ac_if, "l2_fhdr[0x%04X]: status = 0x%08X, "
6163 		  "pkt_len = 0x%04X, vlan = 0x%04x, "
6164 		  "ip_xsum = 0x%04X, tcp_udp_xsum = 0x%04X\n",
6165 		  idx, l2fhdr->l2_fhdr_status,
6166 		  l2fhdr->l2_fhdr_pkt_len, l2fhdr->l2_fhdr_vlan_tag,
6167 		  l2fhdr->l2_fhdr_ip_xsum, l2fhdr->l2_fhdr_tcp_udp_xsum);
6168 }
6169 
6170 
6171 /****************************************************************************/
6172 /* Prints out the tx chain.                                                 */
6173 /*                                                                          */
6174 /* Returns:                                                                 */
6175 /*   Nothing.                                                               */
6176 /****************************************************************************/
6177 static void
6178 bce_dump_tx_chain(struct bce_softc *sc, int tx_prod, int count)
6179 {
6180 	struct ifnet *ifp = &sc->arpcom.ac_if;
6181 	int i;
6182 
6183 	/* First some info about the tx_bd chain structure. */
6184 	if_printf(ifp,
6185 	"----------------------------"
6186 	"  tx_bd  chain  "
6187 	"----------------------------\n");
6188 
6189 	if_printf(ifp, "page size      = 0x%08X, "
6190 		  "tx chain pages        = 0x%08X\n",
6191 		  (uint32_t)BCM_PAGE_SIZE, (uint32_t)TX_PAGES);
6192 
6193 	if_printf(ifp, "tx_bd per page = 0x%08X, "
6194 		  "usable tx_bd per page = 0x%08X\n",
6195 		  (uint32_t)TOTAL_TX_BD_PER_PAGE,
6196 		  (uint32_t)USABLE_TX_BD_PER_PAGE);
6197 
6198 	if_printf(ifp, "total tx_bd    = 0x%08X\n", (uint32_t)TOTAL_TX_BD);
6199 
6200 	if_printf(ifp,
6201 	"----------------------------"
6202 	"  tx_bd data    "
6203 	"----------------------------\n");
6204 
6205 	/* Now print out the tx_bd's themselves. */
6206 	for (i = 0; i < count; i++) {
6207 		struct tx_bd *txbd;
6208 
6209 	 	txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)];
6210 		bce_dump_txbd(sc, tx_prod, txbd);
6211 		tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod));
6212 	}
6213 
6214 	if_printf(ifp,
6215 	"----------------------------"
6216 	"----------------"
6217 	"----------------------------\n");
6218 }
6219 
6220 
6221 /****************************************************************************/
6222 /* Prints out the rx chain.                                                 */
6223 /*                                                                          */
6224 /* Returns:                                                                 */
6225 /*   Nothing.                                                               */
6226 /****************************************************************************/
6227 static void
6228 bce_dump_rx_chain(struct bce_softc *sc, int rx_prod, int count)
6229 {
6230 	struct ifnet *ifp = &sc->arpcom.ac_if;
6231 	int i;
6232 
6233 	/* First some info about the tx_bd chain structure. */
6234 	if_printf(ifp,
6235 	"----------------------------"
6236 	"  rx_bd  chain  "
6237 	"----------------------------\n");
6238 
6239 	if_printf(ifp, "page size      = 0x%08X, "
6240 		  "rx chain pages        = 0x%08X\n",
6241 		  (uint32_t)BCM_PAGE_SIZE, (uint32_t)RX_PAGES);
6242 
6243 	if_printf(ifp, "rx_bd per page = 0x%08X, "
6244 		  "usable rx_bd per page = 0x%08X\n",
6245 		  (uint32_t)TOTAL_RX_BD_PER_PAGE,
6246 		  (uint32_t)USABLE_RX_BD_PER_PAGE);
6247 
6248 	if_printf(ifp, "total rx_bd    = 0x%08X\n", (uint32_t)TOTAL_RX_BD);
6249 
6250 	if_printf(ifp,
6251 	"----------------------------"
6252 	"   rx_bd data   "
6253 	"----------------------------\n");
6254 
6255 	/* Now print out the rx_bd's themselves. */
6256 	for (i = 0; i < count; i++) {
6257 		struct rx_bd *rxbd;
6258 
6259 		rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)];
6260 		bce_dump_rxbd(sc, rx_prod, rxbd);
6261 		rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod));
6262 	}
6263 
6264 	if_printf(ifp,
6265 	"----------------------------"
6266 	"----------------"
6267 	"----------------------------\n");
6268 }
6269 
6270 
6271 /****************************************************************************/
6272 /* Prints out the status block from host memory.                            */
6273 /*                                                                          */
6274 /* Returns:                                                                 */
6275 /*   Nothing.                                                               */
6276 /****************************************************************************/
6277 static void
6278 bce_dump_status_block(struct bce_softc *sc)
6279 {
6280 	struct status_block *sblk = sc->status_block;
6281 	struct ifnet *ifp = &sc->arpcom.ac_if;
6282 
6283 	if_printf(ifp,
6284 	"----------------------------"
6285 	"  Status Block  "
6286 	"----------------------------\n");
6287 
6288 	if_printf(ifp, "    0x%08X - attn_bits\n", sblk->status_attn_bits);
6289 
6290 	if_printf(ifp, "    0x%08X - attn_bits_ack\n",
6291 		  sblk->status_attn_bits_ack);
6292 
6293 	if_printf(ifp, "0x%04X(0x%04X) - rx_cons0\n",
6294 	    sblk->status_rx_quick_consumer_index0,
6295 	    (uint16_t)RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index0));
6296 
6297 	if_printf(ifp, "0x%04X(0x%04X) - tx_cons0\n",
6298 	    sblk->status_tx_quick_consumer_index0,
6299 	    (uint16_t)TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index0));
6300 
6301 	if_printf(ifp, "        0x%04X - status_idx\n", sblk->status_idx);
6302 
6303 	/* Theses indices are not used for normal L2 drivers. */
6304 	if (sblk->status_rx_quick_consumer_index1) {
6305 		if_printf(ifp, "0x%04X(0x%04X) - rx_cons1\n",
6306 		sblk->status_rx_quick_consumer_index1,
6307 		(uint16_t)RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index1));
6308 	}
6309 
6310 	if (sblk->status_tx_quick_consumer_index1) {
6311 		if_printf(ifp, "0x%04X(0x%04X) - tx_cons1\n",
6312 		sblk->status_tx_quick_consumer_index1,
6313 		(uint16_t)TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index1));
6314 	}
6315 
6316 	if (sblk->status_rx_quick_consumer_index2) {
6317 		if_printf(ifp, "0x%04X(0x%04X)- rx_cons2\n",
6318 		sblk->status_rx_quick_consumer_index2,
6319 		(uint16_t)RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index2));
6320 	}
6321 
6322 	if (sblk->status_tx_quick_consumer_index2) {
6323 		if_printf(ifp, "0x%04X(0x%04X) - tx_cons2\n",
6324 		sblk->status_tx_quick_consumer_index2,
6325 		(uint16_t)TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index2));
6326 	}
6327 
6328 	if (sblk->status_rx_quick_consumer_index3) {
6329 		if_printf(ifp, "0x%04X(0x%04X) - rx_cons3\n",
6330 		sblk->status_rx_quick_consumer_index3,
6331 		(uint16_t)RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index3));
6332 	}
6333 
6334 	if (sblk->status_tx_quick_consumer_index3) {
6335 		if_printf(ifp, "0x%04X(0x%04X) - tx_cons3\n",
6336 		sblk->status_tx_quick_consumer_index3,
6337 		(uint16_t)TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index3));
6338 	}
6339 
6340 	if (sblk->status_rx_quick_consumer_index4 ||
6341 	    sblk->status_rx_quick_consumer_index5) {
6342 		if_printf(ifp, "rx_cons4  = 0x%08X, rx_cons5      = 0x%08X\n",
6343 			  sblk->status_rx_quick_consumer_index4,
6344 			  sblk->status_rx_quick_consumer_index5);
6345 	}
6346 
6347 	if (sblk->status_rx_quick_consumer_index6 ||
6348 	    sblk->status_rx_quick_consumer_index7) {
6349 		if_printf(ifp, "rx_cons6  = 0x%08X, rx_cons7      = 0x%08X\n",
6350 			  sblk->status_rx_quick_consumer_index6,
6351 			  sblk->status_rx_quick_consumer_index7);
6352 	}
6353 
6354 	if (sblk->status_rx_quick_consumer_index8 ||
6355 	    sblk->status_rx_quick_consumer_index9) {
6356 		if_printf(ifp, "rx_cons8  = 0x%08X, rx_cons9      = 0x%08X\n",
6357 			  sblk->status_rx_quick_consumer_index8,
6358 			  sblk->status_rx_quick_consumer_index9);
6359 	}
6360 
6361 	if (sblk->status_rx_quick_consumer_index10 ||
6362 	    sblk->status_rx_quick_consumer_index11) {
6363 		if_printf(ifp, "rx_cons10 = 0x%08X, rx_cons11     = 0x%08X\n",
6364 			  sblk->status_rx_quick_consumer_index10,
6365 			  sblk->status_rx_quick_consumer_index11);
6366 	}
6367 
6368 	if (sblk->status_rx_quick_consumer_index12 ||
6369 	    sblk->status_rx_quick_consumer_index13) {
6370 		if_printf(ifp, "rx_cons12 = 0x%08X, rx_cons13     = 0x%08X\n",
6371 			  sblk->status_rx_quick_consumer_index12,
6372 			  sblk->status_rx_quick_consumer_index13);
6373 	}
6374 
6375 	if (sblk->status_rx_quick_consumer_index14 ||
6376 	    sblk->status_rx_quick_consumer_index15) {
6377 		if_printf(ifp, "rx_cons14 = 0x%08X, rx_cons15     = 0x%08X\n",
6378 			  sblk->status_rx_quick_consumer_index14,
6379 			  sblk->status_rx_quick_consumer_index15);
6380 	}
6381 
6382 	if (sblk->status_completion_producer_index ||
6383 	    sblk->status_cmd_consumer_index) {
6384 		if_printf(ifp, "com_prod  = 0x%08X, cmd_cons      = 0x%08X\n",
6385 			  sblk->status_completion_producer_index,
6386 			  sblk->status_cmd_consumer_index);
6387 	}
6388 
6389 	if_printf(ifp,
6390 	"----------------------------"
6391 	"----------------"
6392 	"----------------------------\n");
6393 }
6394 
6395 
6396 /****************************************************************************/
6397 /* Prints out the statistics block.                                         */
6398 /*                                                                          */
6399 /* Returns:                                                                 */
6400 /*   Nothing.                                                               */
6401 /****************************************************************************/
6402 static void
6403 bce_dump_stats_block(struct bce_softc *sc)
6404 {
6405 	struct statistics_block *sblk = sc->stats_block;
6406 	struct ifnet *ifp = &sc->arpcom.ac_if;
6407 
6408 	if_printf(ifp,
6409 	"---------------"
6410 	" Stats Block  (All Stats Not Shown Are 0) "
6411 	"---------------\n");
6412 
6413 	if (sblk->stat_IfHCInOctets_hi || sblk->stat_IfHCInOctets_lo) {
6414 		if_printf(ifp, "0x%08X:%08X : IfHcInOctets\n",
6415 			  sblk->stat_IfHCInOctets_hi,
6416 			  sblk->stat_IfHCInOctets_lo);
6417 	}
6418 
6419 	if (sblk->stat_IfHCInBadOctets_hi || sblk->stat_IfHCInBadOctets_lo) {
6420 		if_printf(ifp, "0x%08X:%08X : IfHcInBadOctets\n",
6421 			  sblk->stat_IfHCInBadOctets_hi,
6422 			  sblk->stat_IfHCInBadOctets_lo);
6423 	}
6424 
6425 	if (sblk->stat_IfHCOutOctets_hi || sblk->stat_IfHCOutOctets_lo) {
6426 		if_printf(ifp, "0x%08X:%08X : IfHcOutOctets\n",
6427 			  sblk->stat_IfHCOutOctets_hi,
6428 			  sblk->stat_IfHCOutOctets_lo);
6429 	}
6430 
6431 	if (sblk->stat_IfHCOutBadOctets_hi || sblk->stat_IfHCOutBadOctets_lo) {
6432 		if_printf(ifp, "0x%08X:%08X : IfHcOutBadOctets\n",
6433 			  sblk->stat_IfHCOutBadOctets_hi,
6434 			  sblk->stat_IfHCOutBadOctets_lo);
6435 	}
6436 
6437 	if (sblk->stat_IfHCInUcastPkts_hi || sblk->stat_IfHCInUcastPkts_lo) {
6438 		if_printf(ifp, "0x%08X:%08X : IfHcInUcastPkts\n",
6439 			  sblk->stat_IfHCInUcastPkts_hi,
6440 			  sblk->stat_IfHCInUcastPkts_lo);
6441 	}
6442 
6443 	if (sblk->stat_IfHCInBroadcastPkts_hi ||
6444 	    sblk->stat_IfHCInBroadcastPkts_lo) {
6445 		if_printf(ifp, "0x%08X:%08X : IfHcInBroadcastPkts\n",
6446 			  sblk->stat_IfHCInBroadcastPkts_hi,
6447 			  sblk->stat_IfHCInBroadcastPkts_lo);
6448 	}
6449 
6450 	if (sblk->stat_IfHCInMulticastPkts_hi ||
6451 	    sblk->stat_IfHCInMulticastPkts_lo) {
6452 		if_printf(ifp, "0x%08X:%08X : IfHcInMulticastPkts\n",
6453 			  sblk->stat_IfHCInMulticastPkts_hi,
6454 			  sblk->stat_IfHCInMulticastPkts_lo);
6455 	}
6456 
6457 	if (sblk->stat_IfHCOutUcastPkts_hi || sblk->stat_IfHCOutUcastPkts_lo) {
6458 		if_printf(ifp, "0x%08X:%08X : IfHcOutUcastPkts\n",
6459 			  sblk->stat_IfHCOutUcastPkts_hi,
6460 			  sblk->stat_IfHCOutUcastPkts_lo);
6461 	}
6462 
6463 	if (sblk->stat_IfHCOutBroadcastPkts_hi ||
6464 	    sblk->stat_IfHCOutBroadcastPkts_lo) {
6465 		if_printf(ifp, "0x%08X:%08X : IfHcOutBroadcastPkts\n",
6466 			  sblk->stat_IfHCOutBroadcastPkts_hi,
6467 			  sblk->stat_IfHCOutBroadcastPkts_lo);
6468 	}
6469 
6470 	if (sblk->stat_IfHCOutMulticastPkts_hi ||
6471 	    sblk->stat_IfHCOutMulticastPkts_lo) {
6472 		if_printf(ifp, "0x%08X:%08X : IfHcOutMulticastPkts\n",
6473 			  sblk->stat_IfHCOutMulticastPkts_hi,
6474 			  sblk->stat_IfHCOutMulticastPkts_lo);
6475 	}
6476 
6477 	if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors) {
6478 		if_printf(ifp, "         0x%08X : "
6479 		"emac_tx_stat_dot3statsinternalmactransmiterrors\n",
6480 		sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors);
6481 	}
6482 
6483 	if (sblk->stat_Dot3StatsCarrierSenseErrors) {
6484 		if_printf(ifp, "         0x%08X : "
6485 			  "Dot3StatsCarrierSenseErrors\n",
6486 			  sblk->stat_Dot3StatsCarrierSenseErrors);
6487 	}
6488 
6489 	if (sblk->stat_Dot3StatsFCSErrors) {
6490 		if_printf(ifp, "         0x%08X : Dot3StatsFCSErrors\n",
6491 			  sblk->stat_Dot3StatsFCSErrors);
6492 	}
6493 
6494 	if (sblk->stat_Dot3StatsAlignmentErrors) {
6495 		if_printf(ifp, "         0x%08X : Dot3StatsAlignmentErrors\n",
6496 			  sblk->stat_Dot3StatsAlignmentErrors);
6497 	}
6498 
6499 	if (sblk->stat_Dot3StatsSingleCollisionFrames) {
6500 		if_printf(ifp, "         0x%08X : "
6501 			  "Dot3StatsSingleCollisionFrames\n",
6502 			  sblk->stat_Dot3StatsSingleCollisionFrames);
6503 	}
6504 
6505 	if (sblk->stat_Dot3StatsMultipleCollisionFrames) {
6506 		if_printf(ifp, "         0x%08X : "
6507 			  "Dot3StatsMultipleCollisionFrames\n",
6508 			  sblk->stat_Dot3StatsMultipleCollisionFrames);
6509 	}
6510 
6511 	if (sblk->stat_Dot3StatsDeferredTransmissions) {
6512 		if_printf(ifp, "         0x%08X : "
6513 			  "Dot3StatsDeferredTransmissions\n",
6514 			  sblk->stat_Dot3StatsDeferredTransmissions);
6515 	}
6516 
6517 	if (sblk->stat_Dot3StatsExcessiveCollisions) {
6518 		if_printf(ifp, "         0x%08X : "
6519 			  "Dot3StatsExcessiveCollisions\n",
6520 			  sblk->stat_Dot3StatsExcessiveCollisions);
6521 	}
6522 
6523 	if (sblk->stat_Dot3StatsLateCollisions) {
6524 		if_printf(ifp, "         0x%08X : Dot3StatsLateCollisions\n",
6525 			  sblk->stat_Dot3StatsLateCollisions);
6526 	}
6527 
6528 	if (sblk->stat_EtherStatsCollisions) {
6529 		if_printf(ifp, "         0x%08X : EtherStatsCollisions\n",
6530 			  sblk->stat_EtherStatsCollisions);
6531 	}
6532 
6533 	if (sblk->stat_EtherStatsFragments)  {
6534 		if_printf(ifp, "         0x%08X : EtherStatsFragments\n",
6535 			  sblk->stat_EtherStatsFragments);
6536 	}
6537 
6538 	if (sblk->stat_EtherStatsJabbers) {
6539 		if_printf(ifp, "         0x%08X : EtherStatsJabbers\n",
6540 			  sblk->stat_EtherStatsJabbers);
6541 	}
6542 
6543 	if (sblk->stat_EtherStatsUndersizePkts) {
6544 		if_printf(ifp, "         0x%08X : EtherStatsUndersizePkts\n",
6545 			  sblk->stat_EtherStatsUndersizePkts);
6546 	}
6547 
6548 	if (sblk->stat_EtherStatsOverrsizePkts) {
6549 		if_printf(ifp, "         0x%08X : EtherStatsOverrsizePkts\n",
6550 			  sblk->stat_EtherStatsOverrsizePkts);
6551 	}
6552 
6553 	if (sblk->stat_EtherStatsPktsRx64Octets) {
6554 		if_printf(ifp, "         0x%08X : EtherStatsPktsRx64Octets\n",
6555 			  sblk->stat_EtherStatsPktsRx64Octets);
6556 	}
6557 
6558 	if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets) {
6559 		if_printf(ifp, "         0x%08X : "
6560 			  "EtherStatsPktsRx65Octetsto127Octets\n",
6561 			  sblk->stat_EtherStatsPktsRx65Octetsto127Octets);
6562 	}
6563 
6564 	if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets) {
6565 		if_printf(ifp, "         0x%08X : "
6566 			  "EtherStatsPktsRx128Octetsto255Octets\n",
6567 			  sblk->stat_EtherStatsPktsRx128Octetsto255Octets);
6568 	}
6569 
6570 	if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets) {
6571 		if_printf(ifp, "         0x%08X : "
6572 			  "EtherStatsPktsRx256Octetsto511Octets\n",
6573 			  sblk->stat_EtherStatsPktsRx256Octetsto511Octets);
6574 	}
6575 
6576 	if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets) {
6577 		if_printf(ifp, "         0x%08X : "
6578 			  "EtherStatsPktsRx512Octetsto1023Octets\n",
6579 			  sblk->stat_EtherStatsPktsRx512Octetsto1023Octets);
6580 	}
6581 
6582 	if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets) {
6583 		if_printf(ifp, "         0x%08X : "
6584 			  "EtherStatsPktsRx1024Octetsto1522Octets\n",
6585 			  sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets);
6586 	}
6587 
6588 	if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets) {
6589 		if_printf(ifp, "         0x%08X : "
6590 			  "EtherStatsPktsRx1523Octetsto9022Octets\n",
6591 			  sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets);
6592 	}
6593 
6594 	if (sblk->stat_EtherStatsPktsTx64Octets) {
6595 		if_printf(ifp, "         0x%08X : EtherStatsPktsTx64Octets\n",
6596 			  sblk->stat_EtherStatsPktsTx64Octets);
6597 	}
6598 
6599 	if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets) {
6600 		if_printf(ifp, "         0x%08X : "
6601 			  "EtherStatsPktsTx65Octetsto127Octets\n",
6602 			  sblk->stat_EtherStatsPktsTx65Octetsto127Octets);
6603 	}
6604 
6605 	if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets) {
6606 		if_printf(ifp, "         0x%08X : "
6607 			  "EtherStatsPktsTx128Octetsto255Octets\n",
6608 			  sblk->stat_EtherStatsPktsTx128Octetsto255Octets);
6609 	}
6610 
6611 	if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets) {
6612 		if_printf(ifp, "         0x%08X : "
6613 			  "EtherStatsPktsTx256Octetsto511Octets\n",
6614 			  sblk->stat_EtherStatsPktsTx256Octetsto511Octets);
6615 	}
6616 
6617 	if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets) {
6618 		if_printf(ifp, "         0x%08X : "
6619 			  "EtherStatsPktsTx512Octetsto1023Octets\n",
6620 			  sblk->stat_EtherStatsPktsTx512Octetsto1023Octets);
6621 	}
6622 
6623 	if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets) {
6624 		if_printf(ifp, "         0x%08X : "
6625 			  "EtherStatsPktsTx1024Octetsto1522Octets\n",
6626 			  sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets);
6627 	}
6628 
6629 	if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets) {
6630 		if_printf(ifp, "         0x%08X : "
6631 			  "EtherStatsPktsTx1523Octetsto9022Octets\n",
6632 			  sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets);
6633 	}
6634 
6635 	if (sblk->stat_XonPauseFramesReceived) {
6636 		if_printf(ifp, "         0x%08X : XonPauseFramesReceived\n",
6637 			  sblk->stat_XonPauseFramesReceived);
6638 	}
6639 
6640 	if (sblk->stat_XoffPauseFramesReceived) {
6641 		if_printf(ifp, "          0x%08X : XoffPauseFramesReceived\n",
6642 			  sblk->stat_XoffPauseFramesReceived);
6643 	}
6644 
6645 	if (sblk->stat_OutXonSent) {
6646 		if_printf(ifp, "         0x%08X : OutXoffSent\n",
6647 			  sblk->stat_OutXonSent);
6648 	}
6649 
6650 	if (sblk->stat_OutXoffSent) {
6651 		if_printf(ifp, "         0x%08X : OutXoffSent\n",
6652 			  sblk->stat_OutXoffSent);
6653 	}
6654 
6655 	if (sblk->stat_FlowControlDone) {
6656 		if_printf(ifp, "         0x%08X : FlowControlDone\n",
6657 			  sblk->stat_FlowControlDone);
6658 	}
6659 
6660 	if (sblk->stat_MacControlFramesReceived) {
6661 		if_printf(ifp, "         0x%08X : MacControlFramesReceived\n",
6662 			  sblk->stat_MacControlFramesReceived);
6663 	}
6664 
6665 	if (sblk->stat_XoffStateEntered) {
6666 		if_printf(ifp, "         0x%08X : XoffStateEntered\n",
6667 			  sblk->stat_XoffStateEntered);
6668 	}
6669 
6670 	if (sblk->stat_IfInFramesL2FilterDiscards) {
6671 		if_printf(ifp, "         0x%08X : IfInFramesL2FilterDiscards\n",			  sblk->stat_IfInFramesL2FilterDiscards);
6672 	}
6673 
6674 	if (sblk->stat_IfInRuleCheckerDiscards) {
6675 		if_printf(ifp, "         0x%08X : IfInRuleCheckerDiscards\n",
6676 			  sblk->stat_IfInRuleCheckerDiscards);
6677 	}
6678 
6679 	if (sblk->stat_IfInFTQDiscards) {
6680 		if_printf(ifp, "         0x%08X : IfInFTQDiscards\n",
6681 			  sblk->stat_IfInFTQDiscards);
6682 	}
6683 
6684 	if (sblk->stat_IfInMBUFDiscards) {
6685 		if_printf(ifp, "         0x%08X : IfInMBUFDiscards\n",
6686 			  sblk->stat_IfInMBUFDiscards);
6687 	}
6688 
6689 	if (sblk->stat_IfInRuleCheckerP4Hit) {
6690 		if_printf(ifp, "         0x%08X : IfInRuleCheckerP4Hit\n",
6691 			  sblk->stat_IfInRuleCheckerP4Hit);
6692 	}
6693 
6694 	if (sblk->stat_CatchupInRuleCheckerDiscards) {
6695 		if_printf(ifp, "         0x%08X : "
6696 			  "CatchupInRuleCheckerDiscards\n",
6697 			  sblk->stat_CatchupInRuleCheckerDiscards);
6698 	}
6699 
6700 	if (sblk->stat_CatchupInFTQDiscards) {
6701 		if_printf(ifp, "         0x%08X : CatchupInFTQDiscards\n",
6702 			  sblk->stat_CatchupInFTQDiscards);
6703 	}
6704 
6705 	if (sblk->stat_CatchupInMBUFDiscards) {
6706 		if_printf(ifp, "         0x%08X : CatchupInMBUFDiscards\n",
6707 			  sblk->stat_CatchupInMBUFDiscards);
6708 	}
6709 
6710 	if (sblk->stat_CatchupInRuleCheckerP4Hit) {
6711 		if_printf(ifp, "         0x%08X : CatchupInRuleCheckerP4Hit\n",
6712 			  sblk->stat_CatchupInRuleCheckerP4Hit);
6713 	}
6714 
6715 	if_printf(ifp,
6716 	"----------------------------"
6717 	"----------------"
6718 	"----------------------------\n");
6719 }
6720 
6721 
6722 /****************************************************************************/
6723 /* Prints out a summary of the driver state.                                */
6724 /*                                                                          */
6725 /* Returns:                                                                 */
6726 /*   Nothing.                                                               */
6727 /****************************************************************************/
6728 static void
6729 bce_dump_driver_state(struct bce_softc *sc)
6730 {
6731 	struct ifnet *ifp = &sc->arpcom.ac_if;
6732 	uint32_t val_hi, val_lo;
6733 
6734 	if_printf(ifp,
6735 	"-----------------------------"
6736 	" Driver State "
6737 	"-----------------------------\n");
6738 
6739 	val_hi = BCE_ADDR_HI(sc);
6740 	val_lo = BCE_ADDR_LO(sc);
6741 	if_printf(ifp, "0x%08X:%08X - (sc) driver softc structure "
6742 		  "virtual address\n", val_hi, val_lo);
6743 
6744 	val_hi = BCE_ADDR_HI(sc->status_block);
6745 	val_lo = BCE_ADDR_LO(sc->status_block);
6746 	if_printf(ifp, "0x%08X:%08X - (sc->status_block) status block "
6747 		  "virtual address\n", val_hi, val_lo);
6748 
6749 	val_hi = BCE_ADDR_HI(sc->stats_block);
6750 	val_lo = BCE_ADDR_LO(sc->stats_block);
6751 	if_printf(ifp, "0x%08X:%08X - (sc->stats_block) statistics block "
6752 		  "virtual address\n", val_hi, val_lo);
6753 
6754 	val_hi = BCE_ADDR_HI(sc->tx_bd_chain);
6755 	val_lo = BCE_ADDR_LO(sc->tx_bd_chain);
6756 	if_printf(ifp, "0x%08X:%08X - (sc->tx_bd_chain) tx_bd chain "
6757 		  "virtual adddress\n", val_hi, val_lo);
6758 
6759 	val_hi = BCE_ADDR_HI(sc->rx_bd_chain);
6760 	val_lo = BCE_ADDR_LO(sc->rx_bd_chain);
6761 	if_printf(ifp, "0x%08X:%08X - (sc->rx_bd_chain) rx_bd chain "
6762 		  "virtual address\n", val_hi, val_lo);
6763 
6764 	val_hi = BCE_ADDR_HI(sc->tx_mbuf_ptr);
6765 	val_lo = BCE_ADDR_LO(sc->tx_mbuf_ptr);
6766 	if_printf(ifp, "0x%08X:%08X - (sc->tx_mbuf_ptr) tx mbuf chain "
6767 		  "virtual address\n", val_hi, val_lo);
6768 
6769 	val_hi = BCE_ADDR_HI(sc->rx_mbuf_ptr);
6770 	val_lo = BCE_ADDR_LO(sc->rx_mbuf_ptr);
6771 	if_printf(ifp, "0x%08X:%08X - (sc->rx_mbuf_ptr) rx mbuf chain "
6772 		  "virtual address\n", val_hi, val_lo);
6773 
6774 	if_printf(ifp, "         0x%08X - (sc->interrupts_generated) "
6775 		  "h/w intrs\n", sc->interrupts_generated);
6776 
6777 	if_printf(ifp, "         0x%08X - (sc->rx_interrupts) "
6778 		  "rx interrupts handled\n", sc->rx_interrupts);
6779 
6780 	if_printf(ifp, "         0x%08X - (sc->tx_interrupts) "
6781 		  "tx interrupts handled\n", sc->tx_interrupts);
6782 
6783 	if_printf(ifp, "         0x%08X - (sc->last_status_idx) "
6784 		  "status block index\n", sc->last_status_idx);
6785 
6786 	if_printf(ifp, "     0x%04X(0x%04X) - (sc->tx_prod) "
6787 		  "tx producer index\n",
6788 		  sc->tx_prod, (uint16_t)TX_CHAIN_IDX(sc->tx_prod));
6789 
6790 	if_printf(ifp, "     0x%04X(0x%04X) - (sc->tx_cons) "
6791 		  "tx consumer index\n",
6792 		  sc->tx_cons, (uint16_t)TX_CHAIN_IDX(sc->tx_cons));
6793 
6794 	if_printf(ifp, "         0x%08X - (sc->tx_prod_bseq) "
6795 		  "tx producer bseq index\n", sc->tx_prod_bseq);
6796 
6797 	if_printf(ifp, "     0x%04X(0x%04X) - (sc->rx_prod) "
6798 		  "rx producer index\n",
6799 		  sc->rx_prod, (uint16_t)RX_CHAIN_IDX(sc->rx_prod));
6800 
6801 	if_printf(ifp, "     0x%04X(0x%04X) - (sc->rx_cons) "
6802 		  "rx consumer index\n",
6803 		  sc->rx_cons, (uint16_t)RX_CHAIN_IDX(sc->rx_cons));
6804 
6805 	if_printf(ifp, "         0x%08X - (sc->rx_prod_bseq) "
6806 		  "rx producer bseq index\n", sc->rx_prod_bseq);
6807 
6808 	if_printf(ifp, "         0x%08X - (sc->rx_mbuf_alloc) "
6809 		  "rx mbufs allocated\n", sc->rx_mbuf_alloc);
6810 
6811 	if_printf(ifp, "         0x%08X - (sc->free_rx_bd) "
6812 		  "free rx_bd's\n", sc->free_rx_bd);
6813 
6814 	if_printf(ifp, "0x%08X/%08X - (sc->rx_low_watermark) rx "
6815 		  "low watermark\n", sc->rx_low_watermark, sc->max_rx_bd);
6816 
6817 	if_printf(ifp, "         0x%08X - (sc->txmbuf_alloc) "
6818 		  "tx mbufs allocated\n", sc->tx_mbuf_alloc);
6819 
6820 	if_printf(ifp, "         0x%08X - (sc->rx_mbuf_alloc) "
6821 		  "rx mbufs allocated\n", sc->rx_mbuf_alloc);
6822 
6823 	if_printf(ifp, "         0x%08X - (sc->used_tx_bd) used tx_bd's\n",
6824 		  sc->used_tx_bd);
6825 
6826 	if_printf(ifp, "0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n",
6827 		  sc->tx_hi_watermark, sc->max_tx_bd);
6828 
6829 	if_printf(ifp, "         0x%08X - (sc->mbuf_alloc_failed) "
6830 		  "failed mbuf alloc\n", sc->mbuf_alloc_failed);
6831 
6832 	if_printf(ifp,
6833 	"----------------------------"
6834 	"----------------"
6835 	"----------------------------\n");
6836 }
6837 
6838 
6839 /****************************************************************************/
6840 /* Prints out the hardware state through a summary of important registers,  */
6841 /* followed by a complete register dump.                                    */
6842 /*                                                                          */
6843 /* Returns:                                                                 */
6844 /*   Nothing.                                                               */
6845 /****************************************************************************/
6846 static void
6847 bce_dump_hw_state(struct bce_softc *sc)
6848 {
6849 	struct ifnet *ifp = &sc->arpcom.ac_if;
6850 	uint32_t val1;
6851 	int i;
6852 
6853 	if_printf(ifp,
6854 	"----------------------------"
6855 	" Hardware State "
6856 	"----------------------------\n");
6857 
6858 	if_printf(ifp, "0x%08X - bootcode version\n", sc->bce_fw_ver);
6859 
6860 	val1 = REG_RD(sc, BCE_MISC_ENABLE_STATUS_BITS);
6861 	if_printf(ifp, "0x%08X - (0x%06X) misc_enable_status_bits\n",
6862 		  val1, BCE_MISC_ENABLE_STATUS_BITS);
6863 
6864 	val1 = REG_RD(sc, BCE_DMA_STATUS);
6865 	if_printf(ifp, "0x%08X - (0x%04X) dma_status\n", val1, BCE_DMA_STATUS);
6866 
6867 	val1 = REG_RD(sc, BCE_CTX_STATUS);
6868 	if_printf(ifp, "0x%08X - (0x%04X) ctx_status\n", val1, BCE_CTX_STATUS);
6869 
6870 	val1 = REG_RD(sc, BCE_EMAC_STATUS);
6871 	if_printf(ifp, "0x%08X - (0x%04X) emac_status\n",
6872 		  val1, BCE_EMAC_STATUS);
6873 
6874 	val1 = REG_RD(sc, BCE_RPM_STATUS);
6875 	if_printf(ifp, "0x%08X - (0x%04X) rpm_status\n", val1, BCE_RPM_STATUS);
6876 
6877 	val1 = REG_RD(sc, BCE_TBDR_STATUS);
6878 	if_printf(ifp, "0x%08X - (0x%04X) tbdr_status\n",
6879 		  val1, BCE_TBDR_STATUS);
6880 
6881 	val1 = REG_RD(sc, BCE_TDMA_STATUS);
6882 	if_printf(ifp, "0x%08X - (0x%04X) tdma_status\n",
6883 		  val1, BCE_TDMA_STATUS);
6884 
6885 	val1 = REG_RD(sc, BCE_HC_STATUS);
6886 	if_printf(ifp, "0x%08X - (0x%06X) hc_status\n", val1, BCE_HC_STATUS);
6887 
6888 	val1 = REG_RD_IND(sc, BCE_TXP_CPU_STATE);
6889 	if_printf(ifp, "0x%08X - (0x%06X) txp_cpu_state\n",
6890 		  val1, BCE_TXP_CPU_STATE);
6891 
6892 	val1 = REG_RD_IND(sc, BCE_TPAT_CPU_STATE);
6893 	if_printf(ifp, "0x%08X - (0x%06X) tpat_cpu_state\n",
6894 		  val1, BCE_TPAT_CPU_STATE);
6895 
6896 	val1 = REG_RD_IND(sc, BCE_RXP_CPU_STATE);
6897 	if_printf(ifp, "0x%08X - (0x%06X) rxp_cpu_state\n",
6898 		  val1, BCE_RXP_CPU_STATE);
6899 
6900 	val1 = REG_RD_IND(sc, BCE_COM_CPU_STATE);
6901 	if_printf(ifp, "0x%08X - (0x%06X) com_cpu_state\n",
6902 		  val1, BCE_COM_CPU_STATE);
6903 
6904 	val1 = REG_RD_IND(sc, BCE_MCP_CPU_STATE);
6905 	if_printf(ifp, "0x%08X - (0x%06X) mcp_cpu_state\n",
6906 		  val1, BCE_MCP_CPU_STATE);
6907 
6908 	val1 = REG_RD_IND(sc, BCE_CP_CPU_STATE);
6909 	if_printf(ifp, "0x%08X - (0x%06X) cp_cpu_state\n",
6910 		  val1, BCE_CP_CPU_STATE);
6911 
6912 	if_printf(ifp,
6913 	"----------------------------"
6914 	"----------------"
6915 	"----------------------------\n");
6916 
6917 	if_printf(ifp,
6918 	"----------------------------"
6919 	" Register  Dump "
6920 	"----------------------------\n");
6921 
6922 	for (i = 0x400; i < 0x8000; i += 0x10) {
6923 		if_printf(ifp, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", i,
6924 			  REG_RD(sc, i),
6925 			  REG_RD(sc, i + 0x4),
6926 			  REG_RD(sc, i + 0x8),
6927 			  REG_RD(sc, i + 0xc));
6928 	}
6929 
6930 	if_printf(ifp,
6931 	"----------------------------"
6932 	"----------------"
6933 	"----------------------------\n");
6934 }
6935 
6936 
6937 /****************************************************************************/
6938 /* Prints out the TXP state.                                                */
6939 /*                                                                          */
6940 /* Returns:                                                                 */
6941 /*   Nothing.                                                               */
6942 /****************************************************************************/
6943 static void
6944 bce_dump_txp_state(struct bce_softc *sc)
6945 {
6946 	struct ifnet *ifp = &sc->arpcom.ac_if;
6947 	uint32_t val1;
6948 	int i;
6949 
6950 	if_printf(ifp,
6951 	"----------------------------"
6952 	"   TXP  State   "
6953 	"----------------------------\n");
6954 
6955 	val1 = REG_RD_IND(sc, BCE_TXP_CPU_MODE);
6956 	if_printf(ifp, "0x%08X - (0x%06X) txp_cpu_mode\n",
6957 		  val1, BCE_TXP_CPU_MODE);
6958 
6959 	val1 = REG_RD_IND(sc, BCE_TXP_CPU_STATE);
6960 	if_printf(ifp, "0x%08X - (0x%06X) txp_cpu_state\n",
6961 		  val1, BCE_TXP_CPU_STATE);
6962 
6963 	val1 = REG_RD_IND(sc, BCE_TXP_CPU_EVENT_MASK);
6964 	if_printf(ifp, "0x%08X - (0x%06X) txp_cpu_event_mask\n",
6965 		  val1, BCE_TXP_CPU_EVENT_MASK);
6966 
6967 	if_printf(ifp,
6968 	"----------------------------"
6969 	" Register  Dump "
6970 	"----------------------------\n");
6971 
6972 	for (i = BCE_TXP_CPU_MODE; i < 0x68000; i += 0x10) {
6973 		/* Skip the big blank spaces */
6974 		if (i < 0x454000 && i > 0x5ffff) {
6975 			if_printf(ifp, "0x%04X: "
6976 				  "0x%08X 0x%08X 0x%08X 0x%08X\n", i,
6977 				  REG_RD_IND(sc, i),
6978 				  REG_RD_IND(sc, i + 0x4),
6979 				  REG_RD_IND(sc, i + 0x8),
6980 				  REG_RD_IND(sc, i + 0xc));
6981 		}
6982 	}
6983 
6984 	if_printf(ifp,
6985 	"----------------------------"
6986 	"----------------"
6987 	"----------------------------\n");
6988 }
6989 
6990 
6991 /****************************************************************************/
6992 /* Prints out the RXP state.                                                */
6993 /*                                                                          */
6994 /* Returns:                                                                 */
6995 /*   Nothing.                                                               */
6996 /****************************************************************************/
6997 static void
6998 bce_dump_rxp_state(struct bce_softc *sc)
6999 {
7000 	struct ifnet *ifp = &sc->arpcom.ac_if;
7001 	uint32_t val1;
7002 	int i;
7003 
7004 	if_printf(ifp,
7005 	"----------------------------"
7006 	"   RXP  State   "
7007 	"----------------------------\n");
7008 
7009 	val1 = REG_RD_IND(sc, BCE_RXP_CPU_MODE);
7010 	if_printf(ifp, "0x%08X - (0x%06X) rxp_cpu_mode\n",
7011 		  val1, BCE_RXP_CPU_MODE);
7012 
7013 	val1 = REG_RD_IND(sc, BCE_RXP_CPU_STATE);
7014 	if_printf(ifp, "0x%08X - (0x%06X) rxp_cpu_state\n",
7015 		  val1, BCE_RXP_CPU_STATE);
7016 
7017 	val1 = REG_RD_IND(sc, BCE_RXP_CPU_EVENT_MASK);
7018 	if_printf(ifp, "0x%08X - (0x%06X) rxp_cpu_event_mask\n",
7019 		  val1, BCE_RXP_CPU_EVENT_MASK);
7020 
7021 	if_printf(ifp,
7022 	"----------------------------"
7023 	" Register  Dump "
7024 	"----------------------------\n");
7025 
7026 	for (i = BCE_RXP_CPU_MODE; i < 0xe8fff; i += 0x10) {
7027 		/* Skip the big blank sapces */
7028 		if (i < 0xc5400 && i > 0xdffff) {
7029 			if_printf(ifp, "0x%04X: "
7030 				  "0x%08X 0x%08X 0x%08X 0x%08X\n", i,
7031 				  REG_RD_IND(sc, i),
7032 				  REG_RD_IND(sc, i + 0x4),
7033 				  REG_RD_IND(sc, i + 0x8),
7034 				  REG_RD_IND(sc, i + 0xc));
7035 		}
7036 	}
7037 
7038 	if_printf(ifp,
7039 	"----------------------------"
7040 	"----------------"
7041 	"----------------------------\n");
7042 }
7043 
7044 
7045 /****************************************************************************/
7046 /* Prints out the TPAT state.                                               */
7047 /*                                                                          */
7048 /* Returns:                                                                 */
7049 /*   Nothing.                                                               */
7050 /****************************************************************************/
7051 static void
7052 bce_dump_tpat_state(struct bce_softc *sc)
7053 {
7054 	struct ifnet *ifp = &sc->arpcom.ac_if;
7055 	uint32_t val1;
7056 	int i;
7057 
7058 	if_printf(ifp,
7059 	"----------------------------"
7060 	"   TPAT State   "
7061 	"----------------------------\n");
7062 
7063 	val1 = REG_RD_IND(sc, BCE_TPAT_CPU_MODE);
7064 	if_printf(ifp, "0x%08X - (0x%06X) tpat_cpu_mode\n",
7065 		  val1, BCE_TPAT_CPU_MODE);
7066 
7067 	val1 = REG_RD_IND(sc, BCE_TPAT_CPU_STATE);
7068 	if_printf(ifp, "0x%08X - (0x%06X) tpat_cpu_state\n",
7069 		  val1, BCE_TPAT_CPU_STATE);
7070 
7071 	val1 = REG_RD_IND(sc, BCE_TPAT_CPU_EVENT_MASK);
7072 	if_printf(ifp, "0x%08X - (0x%06X) tpat_cpu_event_mask\n",
7073 		  val1, BCE_TPAT_CPU_EVENT_MASK);
7074 
7075 	if_printf(ifp,
7076 	"----------------------------"
7077 	" Register  Dump "
7078 	"----------------------------\n");
7079 
7080 	for (i = BCE_TPAT_CPU_MODE; i < 0xa3fff; i += 0x10) {
7081 		/* Skip the big blank spaces */
7082 		if (i < 0x854000 && i > 0x9ffff) {
7083 			if_printf(ifp, "0x%04X: "
7084 				  "0x%08X 0x%08X 0x%08X 0x%08X\n", i,
7085 				  REG_RD_IND(sc, i),
7086 				  REG_RD_IND(sc, i + 0x4),
7087 				  REG_RD_IND(sc, i + 0x8),
7088 				  REG_RD_IND(sc, i + 0xc));
7089 		}
7090 	}
7091 
7092 	if_printf(ifp,
7093 	"----------------------------"
7094 	"----------------"
7095 	"----------------------------\n");
7096 }
7097 
7098 
7099 /****************************************************************************/
7100 /* Prints out the driver state and then enters the debugger.                */
7101 /*                                                                          */
7102 /* Returns:                                                                 */
7103 /*   Nothing.                                                               */
7104 /****************************************************************************/
7105 static void
7106 bce_breakpoint(struct bce_softc *sc)
7107 {
7108 #if 0
7109 	bce_freeze_controller(sc);
7110 #endif
7111 
7112 	bce_dump_driver_state(sc);
7113 	bce_dump_status_block(sc);
7114 	bce_dump_tx_chain(sc, 0, TOTAL_TX_BD);
7115 	bce_dump_hw_state(sc);
7116 	bce_dump_txp_state(sc);
7117 
7118 #if 0
7119 	bce_unfreeze_controller(sc);
7120 #endif
7121 
7122 	/* Call the debugger. */
7123 	breakpoint();
7124 }
7125 
7126 #endif	/* BCE_DEBUG */
7127 
7128 static int
7129 bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS)
7130 {
7131 	struct bce_softc *sc = arg1;
7132 
7133 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
7134 			&sc->bce_tx_quick_cons_trip_int,
7135 			BCE_COALMASK_TX_BDS_INT);
7136 }
7137 
7138 static int
7139 bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS)
7140 {
7141 	struct bce_softc *sc = arg1;
7142 
7143 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
7144 			&sc->bce_tx_quick_cons_trip,
7145 			BCE_COALMASK_TX_BDS);
7146 }
7147 
7148 static int
7149 bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS)
7150 {
7151 	struct bce_softc *sc = arg1;
7152 
7153 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
7154 			&sc->bce_tx_ticks_int,
7155 			BCE_COALMASK_TX_TICKS_INT);
7156 }
7157 
7158 static int
7159 bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS)
7160 {
7161 	struct bce_softc *sc = arg1;
7162 
7163 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
7164 			&sc->bce_tx_ticks,
7165 			BCE_COALMASK_TX_TICKS);
7166 }
7167 
7168 static int
7169 bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS)
7170 {
7171 	struct bce_softc *sc = arg1;
7172 
7173 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
7174 			&sc->bce_rx_quick_cons_trip_int,
7175 			BCE_COALMASK_RX_BDS_INT);
7176 }
7177 
7178 static int
7179 bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS)
7180 {
7181 	struct bce_softc *sc = arg1;
7182 
7183 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
7184 			&sc->bce_rx_quick_cons_trip,
7185 			BCE_COALMASK_RX_BDS);
7186 }
7187 
7188 static int
7189 bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS)
7190 {
7191 	struct bce_softc *sc = arg1;
7192 
7193 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
7194 			&sc->bce_rx_ticks_int,
7195 			BCE_COALMASK_RX_TICKS_INT);
7196 }
7197 
7198 static int
7199 bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS)
7200 {
7201 	struct bce_softc *sc = arg1;
7202 
7203 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
7204 			&sc->bce_rx_ticks,
7205 			BCE_COALMASK_RX_TICKS);
7206 }
7207 
7208 static int
7209 bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS, uint32_t *coal,
7210 		       uint32_t coalchg_mask)
7211 {
7212 	struct bce_softc *sc = arg1;
7213 	struct ifnet *ifp = &sc->arpcom.ac_if;
7214 	int error = 0, v;
7215 
7216 	lwkt_serialize_enter(ifp->if_serializer);
7217 
7218 	v = *coal;
7219 	error = sysctl_handle_int(oidp, &v, 0, req);
7220 	if (!error && req->newptr != NULL) {
7221 		if (v < 0) {
7222 			error = EINVAL;
7223 		} else {
7224 			*coal = v;
7225 			sc->bce_coalchg_mask |= coalchg_mask;
7226 		}
7227 	}
7228 
7229 	lwkt_serialize_exit(ifp->if_serializer);
7230 	return error;
7231 }
7232 
7233 static void
7234 bce_coal_change(struct bce_softc *sc)
7235 {
7236 	struct ifnet *ifp = &sc->arpcom.ac_if;
7237 
7238 	ASSERT_SERIALIZED(ifp->if_serializer);
7239 
7240 	if ((ifp->if_flags & IFF_RUNNING) == 0) {
7241 		sc->bce_coalchg_mask = 0;
7242 		return;
7243 	}
7244 
7245 	if (sc->bce_coalchg_mask &
7246 	    (BCE_COALMASK_TX_BDS | BCE_COALMASK_TX_BDS_INT)) {
7247 		REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
7248 		       (sc->bce_tx_quick_cons_trip_int << 16) |
7249 		       sc->bce_tx_quick_cons_trip);
7250 		if (bootverbose) {
7251 			if_printf(ifp, "tx_bds %u, tx_bds_int %u\n",
7252 				  sc->bce_tx_quick_cons_trip,
7253 				  sc->bce_tx_quick_cons_trip_int);
7254 		}
7255 	}
7256 
7257 	if (sc->bce_coalchg_mask &
7258 	    (BCE_COALMASK_TX_TICKS | BCE_COALMASK_TX_TICKS_INT)) {
7259 		REG_WR(sc, BCE_HC_TX_TICKS,
7260 		       (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
7261 		if (bootverbose) {
7262 			if_printf(ifp, "tx_ticks %u, tx_ticks_int %u\n",
7263 				  sc->bce_tx_ticks, sc->bce_tx_ticks_int);
7264 		}
7265 	}
7266 
7267 	if (sc->bce_coalchg_mask &
7268 	    (BCE_COALMASK_RX_BDS | BCE_COALMASK_RX_BDS_INT)) {
7269 		REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
7270 		       (sc->bce_rx_quick_cons_trip_int << 16) |
7271 		       sc->bce_rx_quick_cons_trip);
7272 		if (bootverbose) {
7273 			if_printf(ifp, "rx_bds %u, rx_bds_int %u\n",
7274 				  sc->bce_rx_quick_cons_trip,
7275 				  sc->bce_rx_quick_cons_trip_int);
7276 		}
7277 	}
7278 
7279 	if (sc->bce_coalchg_mask &
7280 	    (BCE_COALMASK_RX_TICKS | BCE_COALMASK_RX_TICKS_INT)) {
7281 		REG_WR(sc, BCE_HC_RX_TICKS,
7282 		       (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
7283 		if (bootverbose) {
7284 			if_printf(ifp, "rx_ticks %u, rx_ticks_int %u\n",
7285 				  sc->bce_rx_ticks, sc->bce_rx_ticks_int);
7286 		}
7287 	}
7288 
7289 	sc->bce_coalchg_mask = 0;
7290 }
7291