xref: /dragonfly/sys/dev/netif/bce/if_bce.c (revision 18ba9178)
1 /*-
2  * Copyright (c) 2006-2007 Broadcom Corporation
3  *	David Christensen <davidch@broadcom.com>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the name of Broadcom Corporation nor the name of its contributors
15  *    may be used to endorse or promote products derived from this software
16  *    without specific prior written consent.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
22  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  *
30  * $FreeBSD: src/sys/dev/bce/if_bce.c,v 1.31 2007/05/16 23:34:11 davidch Exp $
31  * $DragonFly: src/sys/dev/netif/bce/if_bce.c,v 1.21 2008/11/19 13:57:49 sephe Exp $
32  */
33 
34 /*
35  * The following controllers are supported by this driver:
36  *   BCM5706C A2, A3
37  *   BCM5708C B1, B2
38  *
39  * The following controllers are not supported by this driver:
40  *   BCM5706C A0, A1
41  *   BCM5706S A0, A1, A2, A3
42  *   BCM5708C A0, B0
43  *   BCM5708S A0, B0, B1, B2
44  */
45 
46 #include "opt_bce.h"
47 #include "opt_polling.h"
48 
49 #include <sys/param.h>
50 #include <sys/bus.h>
51 #include <sys/endian.h>
52 #include <sys/kernel.h>
53 #include <sys/interrupt.h>
54 #include <sys/mbuf.h>
55 #include <sys/malloc.h>
56 #include <sys/queue.h>
57 #ifdef BCE_DEBUG
58 #include <sys/random.h>
59 #endif
60 #include <sys/rman.h>
61 #include <sys/serialize.h>
62 #include <sys/socket.h>
63 #include <sys/sockio.h>
64 #include <sys/sysctl.h>
65 
66 #include <net/bpf.h>
67 #include <net/ethernet.h>
68 #include <net/if.h>
69 #include <net/if_arp.h>
70 #include <net/if_dl.h>
71 #include <net/if_media.h>
72 #include <net/if_types.h>
73 #include <net/ifq_var.h>
74 #include <net/vlan/if_vlan_var.h>
75 #include <net/vlan/if_vlan_ether.h>
76 
77 #include <dev/netif/mii_layer/mii.h>
78 #include <dev/netif/mii_layer/miivar.h>
79 
80 #include <bus/pci/pcireg.h>
81 #include <bus/pci/pcivar.h>
82 
83 #include "miibus_if.h"
84 
85 #include <dev/netif/bce/if_bcereg.h>
86 #include <dev/netif/bce/if_bcefw.h>
87 
88 /****************************************************************************/
89 /* BCE Debug Options                                                        */
90 /****************************************************************************/
91 #ifdef BCE_DEBUG
92 
93 static uint32_t	bce_debug = BCE_WARN;
94 
95 /*
96  *          0 = Never
97  *          1 = 1 in 2,147,483,648
98  *        256 = 1 in     8,388,608
99  *       2048 = 1 in     1,048,576
100  *      65536 = 1 in        32,768
101  *    1048576 = 1 in         2,048
102  *  268435456 = 1 in             8
103  *  536870912 = 1 in             4
104  * 1073741824 = 1 in             2
105  *
106  * bce_debug_l2fhdr_status_check:
107  *     How often the l2_fhdr frame error check will fail.
108  *
109  * bce_debug_unexpected_attention:
110  *     How often the unexpected attention check will fail.
111  *
112  * bce_debug_mbuf_allocation_failure:
113  *     How often to simulate an mbuf allocation failure.
114  *
115  * bce_debug_dma_map_addr_failure:
116  *     How often to simulate a DMA mapping failure.
117  *
118  * bce_debug_bootcode_running_failure:
119  *     How often to simulate a bootcode failure.
120  */
121 static int	bce_debug_l2fhdr_status_check = 0;
122 static int	bce_debug_unexpected_attention = 0;
123 static int	bce_debug_mbuf_allocation_failure = 0;
124 static int	bce_debug_dma_map_addr_failure = 0;
125 static int	bce_debug_bootcode_running_failure = 0;
126 
127 #endif	/* BCE_DEBUG */
128 
129 
130 /****************************************************************************/
131 /* PCI Device ID Table                                                      */
132 /*                                                                          */
133 /* Used by bce_probe() to identify the devices supported by this driver.    */
134 /****************************************************************************/
135 #define BCE_DEVDESC_MAX		64
136 
137 static struct bce_type bce_devs[] = {
138 	/* BCM5706C Controllers and OEM boards. */
139 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3101,
140 		"HP NC370T Multifunction Gigabit Server Adapter" },
141 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3106,
142 		"HP NC370i Multifunction Gigabit Server Adapter" },
143 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  PCI_ANY_ID,  PCI_ANY_ID,
144 		"Broadcom NetXtreme II BCM5706 1000Base-T" },
145 
146 	/* BCM5706S controllers and OEM boards. */
147 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102,
148 		"HP NC370F Multifunction Gigabit Server Adapter" },
149 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID,  PCI_ANY_ID,
150 		"Broadcom NetXtreme II BCM5706 1000Base-SX" },
151 
152 	/* BCM5708C controllers and OEM boards. */
153 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  PCI_ANY_ID,  PCI_ANY_ID,
154 		"Broadcom NetXtreme II BCM5708 1000Base-T" },
155 
156 	/* BCM5708S controllers and OEM boards. */
157 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  PCI_ANY_ID,  PCI_ANY_ID,
158 		"Broadcom NetXtreme II BCM5708S 1000Base-T" },
159 	{ 0, 0, 0, 0, NULL }
160 };
161 
162 
163 /****************************************************************************/
164 /* Supported Flash NVRAM device data.                                       */
165 /****************************************************************************/
166 static const struct flash_spec flash_table[] =
167 {
168 	/* Slow EEPROM */
169 	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
170 	 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
171 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
172 	 "EEPROM - slow"},
173 	/* Expansion entry 0001 */
174 	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
175 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
176 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
177 	 "Entry 0001"},
178 	/* Saifun SA25F010 (non-buffered flash) */
179 	/* strap, cfg1, & write1 need updates */
180 	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
181 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
182 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
183 	 "Non-buffered flash (128kB)"},
184 	/* Saifun SA25F020 (non-buffered flash) */
185 	/* strap, cfg1, & write1 need updates */
186 	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
187 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
188 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
189 	 "Non-buffered flash (256kB)"},
190 	/* Expansion entry 0100 */
191 	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
192 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
193 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
194 	 "Entry 0100"},
195 	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
196 	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
197 	 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
198 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
199 	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
200 	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
201 	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
202 	 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
203 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
204 	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
205 	/* Saifun SA25F005 (non-buffered flash) */
206 	/* strap, cfg1, & write1 need updates */
207 	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
208 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
209 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
210 	 "Non-buffered flash (64kB)"},
211 	/* Fast EEPROM */
212 	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
213 	 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
214 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
215 	 "EEPROM - fast"},
216 	/* Expansion entry 1001 */
217 	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
218 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220 	 "Entry 1001"},
221 	/* Expansion entry 1010 */
222 	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
223 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
225 	 "Entry 1010"},
226 	/* ATMEL AT45DB011B (buffered flash) */
227 	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
228 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
230 	 "Buffered flash (128kB)"},
231 	/* Expansion entry 1100 */
232 	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
233 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
234 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
235 	 "Entry 1100"},
236 	/* Expansion entry 1101 */
237 	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
238 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
239 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
240 	 "Entry 1101"},
241 	/* Ateml Expansion entry 1110 */
242 	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
243 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
244 	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
245 	 "Entry 1110 (Atmel)"},
246 	/* ATMEL AT45DB021B (buffered flash) */
247 	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
248 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
249 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
250 	 "Buffered flash (256kB)"},
251 };
252 
253 
254 /****************************************************************************/
255 /* DragonFly device entry points.                                           */
256 /****************************************************************************/
257 static int	bce_probe(device_t);
258 static int	bce_attach(device_t);
259 static int	bce_detach(device_t);
260 static void	bce_shutdown(device_t);
261 
262 /****************************************************************************/
263 /* BCE Debug Data Structure Dump Routines                                   */
264 /****************************************************************************/
265 #ifdef BCE_DEBUG
266 static void	bce_dump_mbuf(struct bce_softc *, struct mbuf *);
267 static void	bce_dump_tx_mbuf_chain(struct bce_softc *, int, int);
268 static void	bce_dump_rx_mbuf_chain(struct bce_softc *, int, int);
269 static void	bce_dump_txbd(struct bce_softc *, int, struct tx_bd *);
270 static void	bce_dump_rxbd(struct bce_softc *, int, struct rx_bd *);
271 static void	bce_dump_l2fhdr(struct bce_softc *, int,
272 				struct l2_fhdr *) __unused;
273 static void	bce_dump_tx_chain(struct bce_softc *, int, int);
274 static void	bce_dump_rx_chain(struct bce_softc *, int, int);
275 static void	bce_dump_status_block(struct bce_softc *);
276 static void	bce_dump_driver_state(struct bce_softc *);
277 static void	bce_dump_stats_block(struct bce_softc *) __unused;
278 static void	bce_dump_hw_state(struct bce_softc *);
279 static void	bce_dump_txp_state(struct bce_softc *);
280 static void	bce_dump_rxp_state(struct bce_softc *) __unused;
281 static void	bce_dump_tpat_state(struct bce_softc *) __unused;
282 static void	bce_freeze_controller(struct bce_softc *) __unused;
283 static void	bce_unfreeze_controller(struct bce_softc *) __unused;
284 static void	bce_breakpoint(struct bce_softc *);
285 #endif	/* BCE_DEBUG */
286 
287 
288 /****************************************************************************/
289 /* BCE Register/Memory Access Routines                                      */
290 /****************************************************************************/
291 static uint32_t	bce_reg_rd_ind(struct bce_softc *, uint32_t);
292 static void	bce_reg_wr_ind(struct bce_softc *, uint32_t, uint32_t);
293 static void	bce_ctx_wr(struct bce_softc *, uint32_t, uint32_t, uint32_t);
294 static int	bce_miibus_read_reg(device_t, int, int);
295 static int	bce_miibus_write_reg(device_t, int, int, int);
296 static void	bce_miibus_statchg(device_t);
297 
298 
299 /****************************************************************************/
300 /* BCE NVRAM Access Routines                                                */
301 /****************************************************************************/
302 static int	bce_acquire_nvram_lock(struct bce_softc *);
303 static int	bce_release_nvram_lock(struct bce_softc *);
304 static void	bce_enable_nvram_access(struct bce_softc *);
305 static void	bce_disable_nvram_access(struct bce_softc *);
306 static int	bce_nvram_read_dword(struct bce_softc *, uint32_t, uint8_t *,
307 				     uint32_t);
308 static int	bce_init_nvram(struct bce_softc *);
309 static int	bce_nvram_read(struct bce_softc *, uint32_t, uint8_t *, int);
310 static int	bce_nvram_test(struct bce_softc *);
311 #ifdef BCE_NVRAM_WRITE_SUPPORT
312 static int	bce_enable_nvram_write(struct bce_softc *);
313 static void	bce_disable_nvram_write(struct bce_softc *);
314 static int	bce_nvram_erase_page(struct bce_softc *, uint32_t);
315 static int	bce_nvram_write_dword(struct bce_softc *, uint32_t, uint8_t *,
316 				      uint32_t);
317 static int	bce_nvram_write(struct bce_softc *, uint32_t, uint8_t *,
318 				int) __unused;
319 #endif
320 
321 /****************************************************************************/
322 /* BCE DMA Allocate/Free Routines                                           */
323 /****************************************************************************/
324 static int	bce_dma_alloc(struct bce_softc *);
325 static void	bce_dma_free(struct bce_softc *);
326 static void	bce_dma_map_addr(void *, bus_dma_segment_t *, int, int);
327 static void	bce_dma_map_mbuf(void *, bus_dma_segment_t *, int,
328 				 bus_size_t, int);
329 
330 /****************************************************************************/
331 /* BCE Firmware Synchronization and Load                                    */
332 /****************************************************************************/
333 static int	bce_fw_sync(struct bce_softc *, uint32_t);
334 static void	bce_load_rv2p_fw(struct bce_softc *, uint32_t *,
335 				 uint32_t, uint32_t);
336 static void	bce_load_cpu_fw(struct bce_softc *, struct cpu_reg *,
337 				struct fw_info *);
338 static void	bce_init_cpus(struct bce_softc *);
339 
340 static void	bce_stop(struct bce_softc *);
341 static int	bce_reset(struct bce_softc *, uint32_t);
342 static int	bce_chipinit(struct bce_softc *);
343 static int	bce_blockinit(struct bce_softc *);
344 static int	bce_newbuf_std(struct bce_softc *, struct mbuf *,
345 			       uint16_t *, uint16_t *, uint32_t *);
346 
347 static int	bce_init_tx_chain(struct bce_softc *);
348 static int	bce_init_rx_chain(struct bce_softc *);
349 static void	bce_free_rx_chain(struct bce_softc *);
350 static void	bce_free_tx_chain(struct bce_softc *);
351 
352 static int	bce_encap(struct bce_softc *, struct mbuf **);
353 static void	bce_start(struct ifnet *);
354 static int	bce_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
355 static void	bce_watchdog(struct ifnet *);
356 static int	bce_ifmedia_upd(struct ifnet *);
357 static void	bce_ifmedia_sts(struct ifnet *, struct ifmediareq *);
358 static void	bce_init(void *);
359 static void	bce_mgmt_init(struct bce_softc *);
360 
361 static void	bce_init_ctx(struct bce_softc *);
362 static void	bce_get_mac_addr(struct bce_softc *);
363 static void	bce_set_mac_addr(struct bce_softc *);
364 static void	bce_phy_intr(struct bce_softc *);
365 static void	bce_rx_intr(struct bce_softc *, int);
366 static void	bce_tx_intr(struct bce_softc *);
367 static void	bce_disable_intr(struct bce_softc *);
368 static void	bce_enable_intr(struct bce_softc *);
369 
370 #ifdef DEVICE_POLLING
371 static void	bce_poll(struct ifnet *, enum poll_cmd, int);
372 #endif
373 static void	bce_intr(void *);
374 static void	bce_set_rx_mode(struct bce_softc *);
375 static void	bce_stats_update(struct bce_softc *);
376 static void	bce_tick(void *);
377 static void	bce_tick_serialized(struct bce_softc *);
378 static void	bce_add_sysctls(struct bce_softc *);
379 
380 static void	bce_coal_change(struct bce_softc *);
381 static int	bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS);
382 static int	bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS);
383 static int	bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS);
384 static int	bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS);
385 static int	bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS);
386 static int	bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS);
387 static int	bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS);
388 static int	bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS);
389 static int	bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS,
390 				       uint32_t *, uint32_t);
391 
392 /*
393  * NOTE:
394  * Don't set bce_tx_ticks_int/bce_tx_ticks to 1023.  Linux's bnx2
395  * takes 1023 as the TX ticks limit.  However, using 1023 will
396  * cause 5708(B2) to generate extra interrupts (~2000/s) even when
397  * there is _no_ network activity on the NIC.
398  */
399 static uint32_t	bce_tx_bds_int = 255;		/* bcm: 20 */
400 static uint32_t	bce_tx_bds = 255;		/* bcm: 20 */
401 static uint32_t	bce_tx_ticks_int = 1022;	/* bcm: 80 */
402 static uint32_t	bce_tx_ticks = 1022;		/* bcm: 80 */
403 static uint32_t	bce_rx_bds_int = 128;		/* bcm: 6 */
404 static uint32_t	bce_rx_bds = 128;		/* bcm: 6 */
405 static uint32_t	bce_rx_ticks_int = 125;		/* bcm: 18 */
406 static uint32_t	bce_rx_ticks = 125;		/* bcm: 18 */
407 
408 TUNABLE_INT("hw.bce.tx_bds_int", &bce_tx_bds_int);
409 TUNABLE_INT("hw.bce.tx_bds", &bce_tx_bds);
410 TUNABLE_INT("hw.bce.tx_ticks_int", &bce_tx_ticks_int);
411 TUNABLE_INT("hw.bce.tx_ticks", &bce_tx_ticks);
412 TUNABLE_INT("hw.bce.rx_bds_int", &bce_rx_bds_int);
413 TUNABLE_INT("hw.bce.rx_bds", &bce_rx_bds);
414 TUNABLE_INT("hw.bce.rx_ticks_int", &bce_rx_ticks_int);
415 TUNABLE_INT("hw.bce.rx_ticks", &bce_rx_ticks);
416 
417 /****************************************************************************/
418 /* DragonFly device dispatch table.                                         */
419 /****************************************************************************/
420 static device_method_t bce_methods[] = {
421 	/* Device interface */
422 	DEVMETHOD(device_probe,		bce_probe),
423 	DEVMETHOD(device_attach,	bce_attach),
424 	DEVMETHOD(device_detach,	bce_detach),
425 	DEVMETHOD(device_shutdown,	bce_shutdown),
426 
427 	/* bus interface */
428 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
429 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
430 
431 	/* MII interface */
432 	DEVMETHOD(miibus_readreg,	bce_miibus_read_reg),
433 	DEVMETHOD(miibus_writereg,	bce_miibus_write_reg),
434 	DEVMETHOD(miibus_statchg,	bce_miibus_statchg),
435 
436 	{ 0, 0 }
437 };
438 
439 static driver_t bce_driver = {
440 	"bce",
441 	bce_methods,
442 	sizeof(struct bce_softc)
443 };
444 
445 static devclass_t bce_devclass;
446 
447 MODULE_DEPEND(bce, pci, 1, 1, 1);
448 MODULE_DEPEND(bce, ether, 1, 1, 1);
449 MODULE_DEPEND(bce, miibus, 1, 1, 1);
450 
451 DRIVER_MODULE(bce, pci, bce_driver, bce_devclass, 0, 0);
452 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, 0, 0);
453 
454 
455 /****************************************************************************/
456 /* Device probe function.                                                   */
457 /*                                                                          */
458 /* Compares the device to the driver's list of supported devices and        */
459 /* reports back to the OS whether this is the right driver for the device.  */
460 /*                                                                          */
461 /* Returns:                                                                 */
462 /*   BUS_PROBE_DEFAULT on success, positive value on failure.               */
463 /****************************************************************************/
464 static int
465 bce_probe(device_t dev)
466 {
467 	struct bce_type *t;
468 	uint16_t vid, did, svid, sdid;
469 
470 	/* Get the data for the device to be probed. */
471 	vid  = pci_get_vendor(dev);
472 	did  = pci_get_device(dev);
473 	svid = pci_get_subvendor(dev);
474 	sdid = pci_get_subdevice(dev);
475 
476 	/* Look through the list of known devices for a match. */
477 	for (t = bce_devs; t->bce_name != NULL; ++t) {
478 		if (vid == t->bce_vid && did == t->bce_did &&
479 		    (svid == t->bce_svid || t->bce_svid == PCI_ANY_ID) &&
480 		    (sdid == t->bce_sdid || t->bce_sdid == PCI_ANY_ID)) {
481 		    	uint32_t revid = pci_read_config(dev, PCIR_REVID, 4);
482 			char *descbuf;
483 
484 			descbuf = kmalloc(BCE_DEVDESC_MAX, M_TEMP, M_WAITOK);
485 
486 			/* Print out the device identity. */
487 			ksnprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)",
488 				  t->bce_name,
489 				  ((revid & 0xf0) >> 4) + 'A', revid & 0xf);
490 
491 			device_set_desc_copy(dev, descbuf);
492 			kfree(descbuf, M_TEMP);
493 			return 0;
494 		}
495 	}
496 	return ENXIO;
497 }
498 
499 
500 /****************************************************************************/
501 /* Device attach function.                                                  */
502 /*                                                                          */
503 /* Allocates device resources, performs secondary chip identification,      */
504 /* resets and initializes the hardware, and initializes driver instance     */
505 /* variables.                                                               */
506 /*                                                                          */
507 /* Returns:                                                                 */
508 /*   0 on success, positive value on failure.                               */
509 /****************************************************************************/
510 static int
511 bce_attach(device_t dev)
512 {
513 	struct bce_softc *sc = device_get_softc(dev);
514 	struct ifnet *ifp = &sc->arpcom.ac_if;
515 	uint32_t val;
516 	int rid, rc = 0;
517 #ifdef notyet
518 	int count;
519 #endif
520 
521 	sc->bce_dev = dev;
522 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
523 
524 	pci_enable_busmaster(dev);
525 
526 	/* Allocate PCI memory resources. */
527 	rid = PCIR_BAR(0);
528 	sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
529 						 RF_ACTIVE | PCI_RF_DENSE);
530 	if (sc->bce_res_mem == NULL) {
531 		device_printf(dev, "PCI memory allocation failed\n");
532 		return ENXIO;
533 	}
534 	sc->bce_btag = rman_get_bustag(sc->bce_res_mem);
535 	sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem);
536 
537 	/* Allocate PCI IRQ resources. */
538 #ifdef notyet
539 	count = pci_msi_count(dev);
540 	if (count == 1 && pci_alloc_msi(dev, &count) == 0) {
541 		rid = 1;
542 		sc->bce_flags |= BCE_USING_MSI_FLAG;
543 	} else
544 #endif
545 	rid = 0;
546 	sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
547 						 RF_SHAREABLE | RF_ACTIVE);
548 	if (sc->bce_res_irq == NULL) {
549 		device_printf(dev, "PCI map interrupt failed\n");
550 		rc = ENXIO;
551 		goto fail;
552 	}
553 
554 	/*
555 	 * Configure byte swap and enable indirect register access.
556 	 * Rely on CPU to do target byte swapping on big endian systems.
557 	 * Access to registers outside of PCI configurtion space are not
558 	 * valid until this is done.
559 	 */
560 	pci_write_config(dev, BCE_PCICFG_MISC_CONFIG,
561 			 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
562 			 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4);
563 
564 	/* Save ASIC revsion info. */
565 	sc->bce_chipid =  REG_RD(sc, BCE_MISC_ID);
566 
567 	/* Weed out any non-production controller revisions. */
568 	switch(BCE_CHIP_ID(sc)) {
569 	case BCE_CHIP_ID_5706_A0:
570 	case BCE_CHIP_ID_5706_A1:
571 	case BCE_CHIP_ID_5708_A0:
572 	case BCE_CHIP_ID_5708_B0:
573 		device_printf(dev, "Unsupported chip id 0x%08x!\n",
574 			      BCE_CHIP_ID(sc));
575 		rc = ENODEV;
576 		goto fail;
577 	}
578 
579 	/*
580 	 * The embedded PCIe to PCI-X bridge (EPB)
581 	 * in the 5708 cannot address memory above
582 	 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043).
583 	 */
584 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)
585 		sc->max_bus_addr = BCE_BUS_SPACE_MAXADDR;
586 	else
587 		sc->max_bus_addr = BUS_SPACE_MAXADDR;
588 
589 	/*
590 	 * Find the base address for shared memory access.
591 	 * Newer versions of bootcode use a signature and offset
592 	 * while older versions use a fixed address.
593 	 */
594 	val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE);
595 	if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == BCE_SHM_HDR_SIGNATURE_SIG)
596 		sc->bce_shmem_base = REG_RD_IND(sc, BCE_SHM_HDR_ADDR_0);
597 	else
598 		sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE;
599 
600 	DBPRINT(sc, BCE_INFO, "bce_shmem_base = 0x%08X\n", sc->bce_shmem_base);
601 
602 	/* Get PCI bus information (speed and type). */
603 	val = REG_RD(sc, BCE_PCICFG_MISC_STATUS);
604 	if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) {
605 		uint32_t clkreg;
606 
607 		sc->bce_flags |= BCE_PCIX_FLAG;
608 
609 		clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS) &
610 			 BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
611 		switch (clkreg) {
612 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
613 			sc->bus_speed_mhz = 133;
614 			break;
615 
616 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
617 			sc->bus_speed_mhz = 100;
618 			break;
619 
620 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
621 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
622 			sc->bus_speed_mhz = 66;
623 			break;
624 
625 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
626 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
627 			sc->bus_speed_mhz = 50;
628 			break;
629 
630 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
631 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
632 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
633 			sc->bus_speed_mhz = 33;
634 			break;
635 		}
636 	} else {
637 		if (val & BCE_PCICFG_MISC_STATUS_M66EN)
638 			sc->bus_speed_mhz = 66;
639 		else
640 			sc->bus_speed_mhz = 33;
641 	}
642 
643 	if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET)
644 		sc->bce_flags |= BCE_PCI_32BIT_FLAG;
645 
646 	device_printf(dev, "ASIC ID 0x%08X; Revision (%c%d); PCI%s %s %dMHz\n",
647 		      sc->bce_chipid,
648 		      ((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A',
649 		      (BCE_CHIP_ID(sc) & 0x0ff0) >> 4,
650 		      (sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : "",
651 		      (sc->bce_flags & BCE_PCI_32BIT_FLAG) ?
652 		      "32-bit" : "64-bit", sc->bus_speed_mhz);
653 
654 	/* Reset the controller. */
655 	rc = bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
656 	if (rc != 0)
657 		goto fail;
658 
659 	/* Initialize the controller. */
660 	rc = bce_chipinit(sc);
661 	if (rc != 0) {
662 		device_printf(dev, "Controller initialization failed!\n");
663 		goto fail;
664 	}
665 
666 	/* Perform NVRAM test. */
667 	rc = bce_nvram_test(sc);
668 	if (rc != 0) {
669 		device_printf(dev, "NVRAM test failed!\n");
670 		goto fail;
671 	}
672 
673 	/* Fetch the permanent Ethernet MAC address. */
674 	bce_get_mac_addr(sc);
675 
676 	/*
677 	 * Trip points control how many BDs
678 	 * should be ready before generating an
679 	 * interrupt while ticks control how long
680 	 * a BD can sit in the chain before
681 	 * generating an interrupt.  Set the default
682 	 * values for the RX and TX rings.
683 	 */
684 
685 #ifdef BCE_DRBUG
686 	/* Force more frequent interrupts. */
687 	sc->bce_tx_quick_cons_trip_int = 1;
688 	sc->bce_tx_quick_cons_trip     = 1;
689 	sc->bce_tx_ticks_int           = 0;
690 	sc->bce_tx_ticks               = 0;
691 
692 	sc->bce_rx_quick_cons_trip_int = 1;
693 	sc->bce_rx_quick_cons_trip     = 1;
694 	sc->bce_rx_ticks_int           = 0;
695 	sc->bce_rx_ticks               = 0;
696 #else
697 	sc->bce_tx_quick_cons_trip_int = bce_tx_bds_int;
698 	sc->bce_tx_quick_cons_trip     = bce_tx_bds;
699 	sc->bce_tx_ticks_int           = bce_tx_ticks_int;
700 	sc->bce_tx_ticks               = bce_tx_ticks;
701 
702 	sc->bce_rx_quick_cons_trip_int = bce_rx_bds_int;
703 	sc->bce_rx_quick_cons_trip     = bce_rx_bds;
704 	sc->bce_rx_ticks_int           = bce_rx_ticks_int;
705 	sc->bce_rx_ticks               = bce_rx_ticks;
706 #endif
707 
708 	/* Update statistics once every second. */
709 	sc->bce_stats_ticks = 1000000 & 0xffff00;
710 
711 	/*
712 	 * The copper based NetXtreme II controllers
713 	 * use an integrated PHY at address 1 while
714 	 * the SerDes controllers use a PHY at
715 	 * address 2.
716 	 */
717 	sc->bce_phy_addr = 1;
718 
719 	if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) {
720 		sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
721 		sc->bce_flags |= BCE_NO_WOL_FLAG;
722 		if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708) {
723 			sc->bce_phy_addr = 2;
724 			val = REG_RD_IND(sc, sc->bce_shmem_base +
725 					 BCE_SHARED_HW_CFG_CONFIG);
726 			if (val & BCE_SHARED_HW_CFG_PHY_2_5G)
727 				sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG;
728 		}
729 	}
730 
731 	/* Allocate DMA memory resources. */
732 	rc = bce_dma_alloc(sc);
733 	if (rc != 0) {
734 		device_printf(dev, "DMA resource allocation failed!\n");
735 		goto fail;
736 	}
737 
738 	/* Initialize the ifnet interface. */
739 	ifp->if_softc = sc;
740 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
741 	ifp->if_ioctl = bce_ioctl;
742 	ifp->if_start = bce_start;
743 	ifp->if_init = bce_init;
744 	ifp->if_watchdog = bce_watchdog;
745 #ifdef DEVICE_POLLING
746 	ifp->if_poll = bce_poll;
747 #endif
748 	ifp->if_mtu = ETHERMTU;
749 	ifp->if_hwassist = BCE_IF_HWASSIST;
750 	ifp->if_capabilities = BCE_IF_CAPABILITIES;
751 	ifp->if_capenable = ifp->if_capabilities;
752 	ifq_set_maxlen(&ifp->if_snd, USABLE_TX_BD);
753 	ifq_set_ready(&ifp->if_snd);
754 
755 	if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
756 		ifp->if_baudrate = IF_Gbps(2.5);
757 	else
758 		ifp->if_baudrate = IF_Gbps(1);
759 
760 	/* Assume a standard 1500 byte MTU size for mbuf allocations. */
761 	sc->mbuf_alloc_size  = MCLBYTES;
762 
763 	/* Look for our PHY. */
764 	rc = mii_phy_probe(dev, &sc->bce_miibus,
765 			   bce_ifmedia_upd, bce_ifmedia_sts);
766 	if (rc != 0) {
767 		device_printf(dev, "PHY probe failed!\n");
768 		goto fail;
769 	}
770 
771 	/* Attach to the Ethernet interface list. */
772 	ether_ifattach(ifp, sc->eaddr, NULL);
773 
774 	callout_init(&sc->bce_stat_ch);
775 
776 	/* Hookup IRQ last. */
777 	rc = bus_setup_intr(dev, sc->bce_res_irq, INTR_MPSAFE, bce_intr, sc,
778 			    &sc->bce_intrhand, ifp->if_serializer);
779 	if (rc != 0) {
780 		device_printf(dev, "Failed to setup IRQ!\n");
781 		ether_ifdetach(ifp);
782 		goto fail;
783 	}
784 
785 	ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->bce_res_irq));
786 	KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
787 
788 	/* Print some important debugging info. */
789 	DBRUN(BCE_INFO, bce_dump_driver_state(sc));
790 
791 	/* Add the supported sysctls to the kernel. */
792 	bce_add_sysctls(sc);
793 
794 	/* Get the firmware running so IPMI still works */
795 	bce_mgmt_init(sc);
796 
797 	return 0;
798 fail:
799 	bce_detach(dev);
800 	return(rc);
801 }
802 
803 
804 /****************************************************************************/
805 /* Device detach function.                                                  */
806 /*                                                                          */
807 /* Stops the controller, resets the controller, and releases resources.     */
808 /*                                                                          */
809 /* Returns:                                                                 */
810 /*   0 on success, positive value on failure.                               */
811 /****************************************************************************/
812 static int
813 bce_detach(device_t dev)
814 {
815 	struct bce_softc *sc = device_get_softc(dev);
816 
817 	if (device_is_attached(dev)) {
818 		struct ifnet *ifp = &sc->arpcom.ac_if;
819 
820 		/* Stop and reset the controller. */
821 		lwkt_serialize_enter(ifp->if_serializer);
822 		bce_stop(sc);
823 		bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
824 		bus_teardown_intr(dev, sc->bce_res_irq, sc->bce_intrhand);
825 		lwkt_serialize_exit(ifp->if_serializer);
826 
827 		ether_ifdetach(ifp);
828 	}
829 
830 	/* If we have a child device on the MII bus remove it too. */
831 	if (sc->bce_miibus)
832 		device_delete_child(dev, sc->bce_miibus);
833 	bus_generic_detach(dev);
834 
835 	if (sc->bce_res_irq != NULL) {
836 		bus_release_resource(dev, SYS_RES_IRQ,
837 			sc->bce_flags & BCE_USING_MSI_FLAG ? 1 : 0,
838 			sc->bce_res_irq);
839 	}
840 
841 #ifdef notyet
842 	if (sc->bce_flags & BCE_USING_MSI_FLAG)
843 		pci_release_msi(dev);
844 #endif
845 
846 	if (sc->bce_res_mem != NULL) {
847 		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
848 				     sc->bce_res_mem);
849 	}
850 
851 	bce_dma_free(sc);
852 
853 	if (sc->bce_sysctl_tree != NULL)
854 		sysctl_ctx_free(&sc->bce_sysctl_ctx);
855 
856 	return 0;
857 }
858 
859 
860 /****************************************************************************/
861 /* Device shutdown function.                                                */
862 /*                                                                          */
863 /* Stops and resets the controller.                                         */
864 /*                                                                          */
865 /* Returns:                                                                 */
866 /*   Nothing                                                                */
867 /****************************************************************************/
868 static void
869 bce_shutdown(device_t dev)
870 {
871 	struct bce_softc *sc = device_get_softc(dev);
872 	struct ifnet *ifp = &sc->arpcom.ac_if;
873 
874 	lwkt_serialize_enter(ifp->if_serializer);
875 	bce_stop(sc);
876 	bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
877 	lwkt_serialize_exit(ifp->if_serializer);
878 }
879 
880 
881 /****************************************************************************/
882 /* Indirect register read.                                                  */
883 /*                                                                          */
884 /* Reads NetXtreme II registers using an index/data register pair in PCI    */
885 /* configuration space.  Using this mechanism avoids issues with posted     */
886 /* reads but is much slower than memory-mapped I/O.                         */
887 /*                                                                          */
888 /* Returns:                                                                 */
889 /*   The value of the register.                                             */
890 /****************************************************************************/
891 static uint32_t
892 bce_reg_rd_ind(struct bce_softc *sc, uint32_t offset)
893 {
894 	device_t dev = sc->bce_dev;
895 
896 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
897 #ifdef BCE_DEBUG
898 	{
899 		uint32_t val;
900 		val = pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
901 		DBPRINT(sc, BCE_EXCESSIVE,
902 			"%s(); offset = 0x%08X, val = 0x%08X\n",
903 			__func__, offset, val);
904 		return val;
905 	}
906 #else
907 	return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
908 #endif
909 }
910 
911 
912 /****************************************************************************/
913 /* Indirect register write.                                                 */
914 /*                                                                          */
915 /* Writes NetXtreme II registers using an index/data register pair in PCI   */
916 /* configuration space.  Using this mechanism avoids issues with posted     */
917 /* writes but is muchh slower than memory-mapped I/O.                       */
918 /*                                                                          */
919 /* Returns:                                                                 */
920 /*   Nothing.                                                               */
921 /****************************************************************************/
922 static void
923 bce_reg_wr_ind(struct bce_softc *sc, uint32_t offset, uint32_t val)
924 {
925 	device_t dev = sc->bce_dev;
926 
927 	DBPRINT(sc, BCE_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
928 		__func__, offset, val);
929 
930 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
931 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4);
932 }
933 
934 
935 /****************************************************************************/
936 /* Context memory write.                                                    */
937 /*                                                                          */
938 /* The NetXtreme II controller uses context memory to track connection      */
939 /* information for L2 and higher network protocols.                         */
940 /*                                                                          */
941 /* Returns:                                                                 */
942 /*   Nothing.                                                               */
943 /****************************************************************************/
944 static void
945 bce_ctx_wr(struct bce_softc *sc, uint32_t cid_addr, uint32_t offset,
946 	   uint32_t val)
947 {
948 	DBPRINT(sc, BCE_EXCESSIVE, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
949 		"val = 0x%08X\n", __func__, cid_addr, offset, val);
950 
951 	offset += cid_addr;
952 	REG_WR(sc, BCE_CTX_DATA_ADR, offset);
953 	REG_WR(sc, BCE_CTX_DATA, val);
954 }
955 
956 
957 /****************************************************************************/
958 /* PHY register read.                                                       */
959 /*                                                                          */
960 /* Implements register reads on the MII bus.                                */
961 /*                                                                          */
962 /* Returns:                                                                 */
963 /*   The value of the register.                                             */
964 /****************************************************************************/
965 static int
966 bce_miibus_read_reg(device_t dev, int phy, int reg)
967 {
968 	struct bce_softc *sc = device_get_softc(dev);
969 	uint32_t val;
970 	int i;
971 
972 	/* Make sure we are accessing the correct PHY address. */
973 	if (phy != sc->bce_phy_addr) {
974 		DBPRINT(sc, BCE_VERBOSE,
975 			"Invalid PHY address %d for PHY read!\n", phy);
976 		return 0;
977 	}
978 
979 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
980 		val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
981 		val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
982 
983 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
984 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
985 
986 		DELAY(40);
987 	}
988 
989 	val = BCE_MIPHY(phy) | BCE_MIREG(reg) |
990 	      BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT |
991 	      BCE_EMAC_MDIO_COMM_START_BUSY;
992 	REG_WR(sc, BCE_EMAC_MDIO_COMM, val);
993 
994 	for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
995 		DELAY(10);
996 
997 		val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
998 		if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) {
999 			DELAY(5);
1000 
1001 			val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1002 			val &= BCE_EMAC_MDIO_COMM_DATA;
1003 			break;
1004 		}
1005 	}
1006 
1007 	if (val & BCE_EMAC_MDIO_COMM_START_BUSY) {
1008 		if_printf(&sc->arpcom.ac_if,
1009 			  "Error: PHY read timeout! phy = %d, reg = 0x%04X\n",
1010 			  phy, reg);
1011 		val = 0x0;
1012 	} else {
1013 		val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1014 	}
1015 
1016 	DBPRINT(sc, BCE_EXCESSIVE,
1017 		"%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
1018 		__func__, phy, (uint16_t)reg & 0xffff, (uint16_t) val & 0xffff);
1019 
1020 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1021 		val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1022 		val |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1023 
1024 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1025 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1026 
1027 		DELAY(40);
1028 	}
1029 	return (val & 0xffff);
1030 }
1031 
1032 
1033 /****************************************************************************/
1034 /* PHY register write.                                                      */
1035 /*                                                                          */
1036 /* Implements register writes on the MII bus.                               */
1037 /*                                                                          */
1038 /* Returns:                                                                 */
1039 /*   The value of the register.                                             */
1040 /****************************************************************************/
1041 static int
1042 bce_miibus_write_reg(device_t dev, int phy, int reg, int val)
1043 {
1044 	struct bce_softc *sc = device_get_softc(dev);
1045 	uint32_t val1;
1046 	int i;
1047 
1048 	/* Make sure we are accessing the correct PHY address. */
1049 	if (phy != sc->bce_phy_addr) {
1050 		DBPRINT(sc, BCE_WARN,
1051 			"Invalid PHY address %d for PHY write!\n", phy);
1052 		return(0);
1053 	}
1054 
1055 	DBPRINT(sc, BCE_EXCESSIVE,
1056 		"%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
1057 		__func__, phy, (uint16_t)(reg & 0xffff),
1058 		(uint16_t)(val & 0xffff));
1059 
1060 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1061 		val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1062 		val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1063 
1064 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1065 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1066 
1067 		DELAY(40);
1068 	}
1069 
1070 	val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val |
1071 		BCE_EMAC_MDIO_COMM_COMMAND_WRITE |
1072 		BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT;
1073 	REG_WR(sc, BCE_EMAC_MDIO_COMM, val1);
1074 
1075 	for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1076 		DELAY(10);
1077 
1078 		val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1079 		if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1080 			DELAY(5);
1081 			break;
1082 		}
1083 	}
1084 
1085 	if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY)
1086 		if_printf(&sc->arpcom.ac_if, "PHY write timeout!\n");
1087 
1088 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1089 		val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1090 		val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1091 
1092 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1093 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1094 
1095 		DELAY(40);
1096 	}
1097 	return 0;
1098 }
1099 
1100 
1101 /****************************************************************************/
1102 /* MII bus status change.                                                   */
1103 /*                                                                          */
1104 /* Called by the MII bus driver when the PHY establishes link to set the    */
1105 /* MAC interface registers.                                                 */
1106 /*                                                                          */
1107 /* Returns:                                                                 */
1108 /*   Nothing.                                                               */
1109 /****************************************************************************/
1110 static void
1111 bce_miibus_statchg(device_t dev)
1112 {
1113 	struct bce_softc *sc = device_get_softc(dev);
1114 	struct mii_data *mii = device_get_softc(sc->bce_miibus);
1115 
1116 	DBPRINT(sc, BCE_INFO, "mii_media_active = 0x%08X\n",
1117 		mii->mii_media_active);
1118 
1119 #ifdef BCE_DEBUG
1120 	/* Decode the interface media flags. */
1121 	if_printf(&sc->arpcom.ac_if, "Media: ( ");
1122 	switch(IFM_TYPE(mii->mii_media_active)) {
1123 	case IFM_ETHER:
1124 		kprintf("Ethernet )");
1125 		break;
1126 	default:
1127 		kprintf("Unknown )");
1128 		break;
1129 	}
1130 
1131 	kprintf(" Media Options: ( ");
1132 	switch(IFM_SUBTYPE(mii->mii_media_active)) {
1133 	case IFM_AUTO:
1134 		kprintf("Autoselect )");
1135 		break;
1136 	case IFM_MANUAL:
1137 		kprintf("Manual )");
1138 		break;
1139 	case IFM_NONE:
1140 		kprintf("None )");
1141 		break;
1142 	case IFM_10_T:
1143 		kprintf("10Base-T )");
1144 		break;
1145 	case IFM_100_TX:
1146 		kprintf("100Base-TX )");
1147 		break;
1148 	case IFM_1000_SX:
1149 		kprintf("1000Base-SX )");
1150 		break;
1151 	case IFM_1000_T:
1152 		kprintf("1000Base-T )");
1153 		break;
1154 	default:
1155 		kprintf("Other )");
1156 		break;
1157 	}
1158 
1159 	kprintf(" Global Options: (");
1160 	if (mii->mii_media_active & IFM_FDX)
1161 		kprintf(" FullDuplex");
1162 	if (mii->mii_media_active & IFM_HDX)
1163 		kprintf(" HalfDuplex");
1164 	if (mii->mii_media_active & IFM_LOOP)
1165 		kprintf(" Loopback");
1166 	if (mii->mii_media_active & IFM_FLAG0)
1167 		kprintf(" Flag0");
1168 	if (mii->mii_media_active & IFM_FLAG1)
1169 		kprintf(" Flag1");
1170 	if (mii->mii_media_active & IFM_FLAG2)
1171 		kprintf(" Flag2");
1172 	kprintf(" )\n");
1173 #endif
1174 
1175 	BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT);
1176 
1177 	/*
1178 	 * Set MII or GMII interface based on the speed negotiated
1179 	 * by the PHY.
1180 	 */
1181 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
1182 	    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) {
1183 		DBPRINT(sc, BCE_INFO, "Setting GMII interface.\n");
1184 		BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_GMII);
1185 	} else {
1186 		DBPRINT(sc, BCE_INFO, "Setting MII interface.\n");
1187 		BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_MII);
1188 	}
1189 
1190 	/*
1191 	 * Set half or full duplex based on the duplicity negotiated
1192 	 * by the PHY.
1193 	 */
1194 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1195 		DBPRINT(sc, BCE_INFO, "Setting Full-Duplex interface.\n");
1196 		BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1197 	} else {
1198 		DBPRINT(sc, BCE_INFO, "Setting Half-Duplex interface.\n");
1199 		BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1200 	}
1201 }
1202 
1203 
1204 /****************************************************************************/
1205 /* Acquire NVRAM lock.                                                      */
1206 /*                                                                          */
1207 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock.  */
1208 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1209 /* for use by the driver.                                                   */
1210 /*                                                                          */
1211 /* Returns:                                                                 */
1212 /*   0 on success, positive value on failure.                               */
1213 /****************************************************************************/
1214 static int
1215 bce_acquire_nvram_lock(struct bce_softc *sc)
1216 {
1217 	uint32_t val;
1218 	int j;
1219 
1220 	DBPRINT(sc, BCE_VERBOSE, "Acquiring NVRAM lock.\n");
1221 
1222 	/* Request access to the flash interface. */
1223 	REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2);
1224 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1225 		val = REG_RD(sc, BCE_NVM_SW_ARB);
1226 		if (val & BCE_NVM_SW_ARB_ARB_ARB2)
1227 			break;
1228 
1229 		DELAY(5);
1230 	}
1231 
1232 	if (j >= NVRAM_TIMEOUT_COUNT) {
1233 		DBPRINT(sc, BCE_WARN, "Timeout acquiring NVRAM lock!\n");
1234 		return EBUSY;
1235 	}
1236 	return 0;
1237 }
1238 
1239 
1240 /****************************************************************************/
1241 /* Release NVRAM lock.                                                      */
1242 /*                                                                          */
1243 /* When the caller is finished accessing NVRAM the lock must be released.   */
1244 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1245 /* for use by the driver.                                                   */
1246 /*                                                                          */
1247 /* Returns:                                                                 */
1248 /*   0 on success, positive value on failure.                               */
1249 /****************************************************************************/
1250 static int
1251 bce_release_nvram_lock(struct bce_softc *sc)
1252 {
1253 	int j;
1254 	uint32_t val;
1255 
1256 	DBPRINT(sc, BCE_VERBOSE, "Releasing NVRAM lock.\n");
1257 
1258 	/*
1259 	 * Relinquish nvram interface.
1260 	 */
1261 	REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2);
1262 
1263 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1264 		val = REG_RD(sc, BCE_NVM_SW_ARB);
1265 		if (!(val & BCE_NVM_SW_ARB_ARB_ARB2))
1266 			break;
1267 
1268 		DELAY(5);
1269 	}
1270 
1271 	if (j >= NVRAM_TIMEOUT_COUNT) {
1272 		DBPRINT(sc, BCE_WARN, "Timeout reeasing NVRAM lock!\n");
1273 		return EBUSY;
1274 	}
1275 	return 0;
1276 }
1277 
1278 
1279 #ifdef BCE_NVRAM_WRITE_SUPPORT
1280 /****************************************************************************/
1281 /* Enable NVRAM write access.                                               */
1282 /*                                                                          */
1283 /* Before writing to NVRAM the caller must enable NVRAM writes.             */
1284 /*                                                                          */
1285 /* Returns:                                                                 */
1286 /*   0 on success, positive value on failure.                               */
1287 /****************************************************************************/
1288 static int
1289 bce_enable_nvram_write(struct bce_softc *sc)
1290 {
1291 	uint32_t val;
1292 
1293 	DBPRINT(sc, BCE_VERBOSE, "Enabling NVRAM write.\n");
1294 
1295 	val = REG_RD(sc, BCE_MISC_CFG);
1296 	REG_WR(sc, BCE_MISC_CFG, val | BCE_MISC_CFG_NVM_WR_EN_PCI);
1297 
1298 	if (!sc->bce_flash_info->buffered) {
1299 		int j;
1300 
1301 		REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1302 		REG_WR(sc, BCE_NVM_COMMAND,
1303 		       BCE_NVM_COMMAND_WREN | BCE_NVM_COMMAND_DOIT);
1304 
1305 		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1306 			DELAY(5);
1307 
1308 			val = REG_RD(sc, BCE_NVM_COMMAND);
1309 			if (val & BCE_NVM_COMMAND_DONE)
1310 				break;
1311 		}
1312 
1313 		if (j >= NVRAM_TIMEOUT_COUNT) {
1314 			DBPRINT(sc, BCE_WARN, "Timeout writing NVRAM!\n");
1315 			return EBUSY;
1316 		}
1317 	}
1318 	return 0;
1319 }
1320 
1321 
1322 /****************************************************************************/
1323 /* Disable NVRAM write access.                                              */
1324 /*                                                                          */
1325 /* When the caller is finished writing to NVRAM write access must be        */
1326 /* disabled.                                                                */
1327 /*                                                                          */
1328 /* Returns:                                                                 */
1329 /*   Nothing.                                                               */
1330 /****************************************************************************/
1331 static void
1332 bce_disable_nvram_write(struct bce_softc *sc)
1333 {
1334 	uint32_t val;
1335 
1336 	DBPRINT(sc, BCE_VERBOSE, "Disabling NVRAM write.\n");
1337 
1338 	val = REG_RD(sc, BCE_MISC_CFG);
1339 	REG_WR(sc, BCE_MISC_CFG, val & ~BCE_MISC_CFG_NVM_WR_EN);
1340 }
1341 #endif	/* BCE_NVRAM_WRITE_SUPPORT */
1342 
1343 
1344 /****************************************************************************/
1345 /* Enable NVRAM access.                                                     */
1346 /*                                                                          */
1347 /* Before accessing NVRAM for read or write operations the caller must      */
1348 /* enabled NVRAM access.                                                    */
1349 /*                                                                          */
1350 /* Returns:                                                                 */
1351 /*   Nothing.                                                               */
1352 /****************************************************************************/
1353 static void
1354 bce_enable_nvram_access(struct bce_softc *sc)
1355 {
1356 	uint32_t val;
1357 
1358 	DBPRINT(sc, BCE_VERBOSE, "Enabling NVRAM access.\n");
1359 
1360 	val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1361 	/* Enable both bits, even on read. */
1362 	REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1363 	       val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN);
1364 }
1365 
1366 
1367 /****************************************************************************/
1368 /* Disable NVRAM access.                                                    */
1369 /*                                                                          */
1370 /* When the caller is finished accessing NVRAM access must be disabled.     */
1371 /*                                                                          */
1372 /* Returns:                                                                 */
1373 /*   Nothing.                                                               */
1374 /****************************************************************************/
1375 static void
1376 bce_disable_nvram_access(struct bce_softc *sc)
1377 {
1378 	uint32_t val;
1379 
1380 	DBPRINT(sc, BCE_VERBOSE, "Disabling NVRAM access.\n");
1381 
1382 	val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1383 
1384 	/* Disable both bits, even after read. */
1385 	REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1386 	       val & ~(BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN));
1387 }
1388 
1389 
1390 #ifdef BCE_NVRAM_WRITE_SUPPORT
1391 /****************************************************************************/
1392 /* Erase NVRAM page before writing.                                         */
1393 /*                                                                          */
1394 /* Non-buffered flash parts require that a page be erased before it is      */
1395 /* written.                                                                 */
1396 /*                                                                          */
1397 /* Returns:                                                                 */
1398 /*   0 on success, positive value on failure.                               */
1399 /****************************************************************************/
1400 static int
1401 bce_nvram_erase_page(struct bce_softc *sc, uint32_t offset)
1402 {
1403 	uint32_t cmd;
1404 	int j;
1405 
1406 	/* Buffered flash doesn't require an erase. */
1407 	if (sc->bce_flash_info->buffered)
1408 		return 0;
1409 
1410 	DBPRINT(sc, BCE_VERBOSE, "Erasing NVRAM page.\n");
1411 
1412 	/* Build an erase command. */
1413 	cmd = BCE_NVM_COMMAND_ERASE | BCE_NVM_COMMAND_WR |
1414 	      BCE_NVM_COMMAND_DOIT;
1415 
1416 	/*
1417 	 * Clear the DONE bit separately, set the NVRAM adress to erase,
1418 	 * and issue the erase command.
1419 	 */
1420 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1421 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1422 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1423 
1424 	/* Wait for completion. */
1425 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1426 		uint32_t val;
1427 
1428 		DELAY(5);
1429 
1430 		val = REG_RD(sc, BCE_NVM_COMMAND);
1431 		if (val & BCE_NVM_COMMAND_DONE)
1432 			break;
1433 	}
1434 
1435 	if (j >= NVRAM_TIMEOUT_COUNT) {
1436 		DBPRINT(sc, BCE_WARN, "Timeout erasing NVRAM.\n");
1437 		return EBUSY;
1438 	}
1439 	return 0;
1440 }
1441 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1442 
1443 
1444 /****************************************************************************/
1445 /* Read a dword (32 bits) from NVRAM.                                       */
1446 /*                                                                          */
1447 /* Read a 32 bit word from NVRAM.  The caller is assumed to have already    */
1448 /* obtained the NVRAM lock and enabled the controller for NVRAM access.     */
1449 /*                                                                          */
1450 /* Returns:                                                                 */
1451 /*   0 on success and the 32 bit value read, positive value on failure.     */
1452 /****************************************************************************/
1453 static int
1454 bce_nvram_read_dword(struct bce_softc *sc, uint32_t offset, uint8_t *ret_val,
1455 		     uint32_t cmd_flags)
1456 {
1457 	uint32_t cmd;
1458 	int i, rc = 0;
1459 
1460 	/* Build the command word. */
1461 	cmd = BCE_NVM_COMMAND_DOIT | cmd_flags;
1462 
1463 	/* Calculate the offset for buffered flash. */
1464 	if (sc->bce_flash_info->buffered) {
1465 		offset = ((offset / sc->bce_flash_info->page_size) <<
1466 			  sc->bce_flash_info->page_bits) +
1467 			 (offset % sc->bce_flash_info->page_size);
1468 	}
1469 
1470 	/*
1471 	 * Clear the DONE bit separately, set the address to read,
1472 	 * and issue the read.
1473 	 */
1474 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1475 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1476 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1477 
1478 	/* Wait for completion. */
1479 	for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1480 		uint32_t val;
1481 
1482 		DELAY(5);
1483 
1484 		val = REG_RD(sc, BCE_NVM_COMMAND);
1485 		if (val & BCE_NVM_COMMAND_DONE) {
1486 			val = REG_RD(sc, BCE_NVM_READ);
1487 
1488 			val = be32toh(val);
1489 			memcpy(ret_val, &val, 4);
1490 			break;
1491 		}
1492 	}
1493 
1494 	/* Check for errors. */
1495 	if (i >= NVRAM_TIMEOUT_COUNT) {
1496 		if_printf(&sc->arpcom.ac_if,
1497 			  "Timeout error reading NVRAM at offset 0x%08X!\n",
1498 			  offset);
1499 		rc = EBUSY;
1500 	}
1501 	return rc;
1502 }
1503 
1504 
1505 #ifdef BCE_NVRAM_WRITE_SUPPORT
1506 /****************************************************************************/
1507 /* Write a dword (32 bits) to NVRAM.                                        */
1508 /*                                                                          */
1509 /* Write a 32 bit word to NVRAM.  The caller is assumed to have already     */
1510 /* obtained the NVRAM lock, enabled the controller for NVRAM access, and    */
1511 /* enabled NVRAM write access.                                              */
1512 /*                                                                          */
1513 /* Returns:                                                                 */
1514 /*   0 on success, positive value on failure.                               */
1515 /****************************************************************************/
1516 static int
1517 bce_nvram_write_dword(struct bce_softc *sc, uint32_t offset, uint8_t *val,
1518 		      uint32_t cmd_flags)
1519 {
1520 	uint32_t cmd, val32;
1521 	int j;
1522 
1523 	/* Build the command word. */
1524 	cmd = BCE_NVM_COMMAND_DOIT | BCE_NVM_COMMAND_WR | cmd_flags;
1525 
1526 	/* Calculate the offset for buffered flash. */
1527 	if (sc->bce_flash_info->buffered) {
1528 		offset = ((offset / sc->bce_flash_info->page_size) <<
1529 			  sc->bce_flash_info->page_bits) +
1530 			 (offset % sc->bce_flash_info->page_size);
1531 	}
1532 
1533 	/*
1534 	 * Clear the DONE bit separately, convert NVRAM data to big-endian,
1535 	 * set the NVRAM address to write, and issue the write command
1536 	 */
1537 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1538 	memcpy(&val32, val, 4);
1539 	val32 = htobe32(val32);
1540 	REG_WR(sc, BCE_NVM_WRITE, val32);
1541 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1542 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1543 
1544 	/* Wait for completion. */
1545 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1546 		DELAY(5);
1547 
1548 		if (REG_RD(sc, BCE_NVM_COMMAND) & BCE_NVM_COMMAND_DONE)
1549 			break;
1550 	}
1551 	if (j >= NVRAM_TIMEOUT_COUNT) {
1552 		if_printf(&sc->arpcom.ac_if,
1553 			  "Timeout error writing NVRAM at offset 0x%08X\n",
1554 			  offset);
1555 		return EBUSY;
1556 	}
1557 	return 0;
1558 }
1559 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1560 
1561 
1562 /****************************************************************************/
1563 /* Initialize NVRAM access.                                                 */
1564 /*                                                                          */
1565 /* Identify the NVRAM device in use and prepare the NVRAM interface to      */
1566 /* access that device.                                                      */
1567 /*                                                                          */
1568 /* Returns:                                                                 */
1569 /*   0 on success, positive value on failure.                               */
1570 /****************************************************************************/
1571 static int
1572 bce_init_nvram(struct bce_softc *sc)
1573 {
1574 	uint32_t val;
1575 	int j, entry_count, rc = 0;
1576 	const struct flash_spec *flash;
1577 
1578 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__);
1579 
1580 	/* Determine the selected interface. */
1581 	val = REG_RD(sc, BCE_NVM_CFG1);
1582 
1583 	entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1584 
1585 	/*
1586 	 * Flash reconfiguration is required to support additional
1587 	 * NVRAM devices not directly supported in hardware.
1588 	 * Check if the flash interface was reconfigured
1589 	 * by the bootcode.
1590 	 */
1591 
1592 	if (val & 0x40000000) {
1593 		/* Flash interface reconfigured by bootcode. */
1594 
1595 		DBPRINT(sc, BCE_INFO_LOAD,
1596 			"%s(): Flash WAS reconfigured.\n", __func__);
1597 
1598 		for (j = 0, flash = flash_table; j < entry_count;
1599 		     j++, flash++) {
1600 			if ((val & FLASH_BACKUP_STRAP_MASK) ==
1601 			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1602 				sc->bce_flash_info = flash;
1603 				break;
1604 			}
1605 		}
1606 	} else {
1607 		/* Flash interface not yet reconfigured. */
1608 		uint32_t mask;
1609 
1610 		DBPRINT(sc, BCE_INFO_LOAD,
1611 			"%s(): Flash was NOT reconfigured.\n", __func__);
1612 
1613 		if (val & (1 << 23))
1614 			mask = FLASH_BACKUP_STRAP_MASK;
1615 		else
1616 			mask = FLASH_STRAP_MASK;
1617 
1618 		/* Look for the matching NVRAM device configuration data. */
1619 		for (j = 0, flash = flash_table; j < entry_count;
1620 		     j++, flash++) {
1621 			/* Check if the device matches any of the known devices. */
1622 			if ((val & mask) == (flash->strapping & mask)) {
1623 				/* Found a device match. */
1624 				sc->bce_flash_info = flash;
1625 
1626 				/* Request access to the flash interface. */
1627 				rc = bce_acquire_nvram_lock(sc);
1628 				if (rc != 0)
1629 					return rc;
1630 
1631 				/* Reconfigure the flash interface. */
1632 				bce_enable_nvram_access(sc);
1633 				REG_WR(sc, BCE_NVM_CFG1, flash->config1);
1634 				REG_WR(sc, BCE_NVM_CFG2, flash->config2);
1635 				REG_WR(sc, BCE_NVM_CFG3, flash->config3);
1636 				REG_WR(sc, BCE_NVM_WRITE1, flash->write1);
1637 				bce_disable_nvram_access(sc);
1638 				bce_release_nvram_lock(sc);
1639 				break;
1640 			}
1641 		}
1642 	}
1643 
1644 	/* Check if a matching device was found. */
1645 	if (j == entry_count) {
1646 		sc->bce_flash_info = NULL;
1647 		if_printf(&sc->arpcom.ac_if, "Unknown Flash NVRAM found!\n");
1648 		rc = ENODEV;
1649 	}
1650 
1651 	/* Write the flash config data to the shared memory interface. */
1652 	val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_SHARED_HW_CFG_CONFIG2) &
1653 	      BCE_SHARED_HW_CFG2_NVM_SIZE_MASK;
1654 	if (val)
1655 		sc->bce_flash_size = val;
1656 	else
1657 		sc->bce_flash_size = sc->bce_flash_info->total_size;
1658 
1659 	DBPRINT(sc, BCE_INFO_LOAD, "%s() flash->total_size = 0x%08X\n",
1660 		__func__, sc->bce_flash_info->total_size);
1661 
1662 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__);
1663 
1664 	return rc;
1665 }
1666 
1667 
1668 /****************************************************************************/
1669 /* Read an arbitrary range of data from NVRAM.                              */
1670 /*                                                                          */
1671 /* Prepares the NVRAM interface for access and reads the requested data     */
1672 /* into the supplied buffer.                                                */
1673 /*                                                                          */
1674 /* Returns:                                                                 */
1675 /*   0 on success and the data read, positive value on failure.             */
1676 /****************************************************************************/
1677 static int
1678 bce_nvram_read(struct bce_softc *sc, uint32_t offset, uint8_t *ret_buf,
1679 	       int buf_size)
1680 {
1681 	uint32_t cmd_flags, offset32, len32, extra;
1682 	int rc = 0;
1683 
1684 	if (buf_size == 0)
1685 		return 0;
1686 
1687 	/* Request access to the flash interface. */
1688 	rc = bce_acquire_nvram_lock(sc);
1689 	if (rc != 0)
1690 		return rc;
1691 
1692 	/* Enable access to flash interface */
1693 	bce_enable_nvram_access(sc);
1694 
1695 	len32 = buf_size;
1696 	offset32 = offset;
1697 	extra = 0;
1698 
1699 	cmd_flags = 0;
1700 
1701 	/* XXX should we release nvram lock if read_dword() fails? */
1702 	if (offset32 & 3) {
1703 		uint8_t buf[4];
1704 		uint32_t pre_len;
1705 
1706 		offset32 &= ~3;
1707 		pre_len = 4 - (offset & 3);
1708 
1709 		if (pre_len >= len32) {
1710 			pre_len = len32;
1711 			cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST;
1712 		} else {
1713 			cmd_flags = BCE_NVM_COMMAND_FIRST;
1714 		}
1715 
1716 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1717 		if (rc)
1718 			return rc;
1719 
1720 		memcpy(ret_buf, buf + (offset & 3), pre_len);
1721 
1722 		offset32 += 4;
1723 		ret_buf += pre_len;
1724 		len32 -= pre_len;
1725 	}
1726 
1727 	if (len32 & 3) {
1728 		extra = 4 - (len32 & 3);
1729 		len32 = (len32 + 4) & ~3;
1730 	}
1731 
1732 	if (len32 == 4) {
1733 		uint8_t buf[4];
1734 
1735 		if (cmd_flags)
1736 			cmd_flags = BCE_NVM_COMMAND_LAST;
1737 		else
1738 			cmd_flags = BCE_NVM_COMMAND_FIRST |
1739 				    BCE_NVM_COMMAND_LAST;
1740 
1741 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1742 
1743 		memcpy(ret_buf, buf, 4 - extra);
1744 	} else if (len32 > 0) {
1745 		uint8_t buf[4];
1746 
1747 		/* Read the first word. */
1748 		if (cmd_flags)
1749 			cmd_flags = 0;
1750 		else
1751 			cmd_flags = BCE_NVM_COMMAND_FIRST;
1752 
1753 		rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1754 
1755 		/* Advance to the next dword. */
1756 		offset32 += 4;
1757 		ret_buf += 4;
1758 		len32 -= 4;
1759 
1760 		while (len32 > 4 && rc == 0) {
1761 			rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0);
1762 
1763 			/* Advance to the next dword. */
1764 			offset32 += 4;
1765 			ret_buf += 4;
1766 			len32 -= 4;
1767 		}
1768 
1769 		if (rc)
1770 			return rc;
1771 
1772 		cmd_flags = BCE_NVM_COMMAND_LAST;
1773 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1774 
1775 		memcpy(ret_buf, buf, 4 - extra);
1776 	}
1777 
1778 	/* Disable access to flash interface and release the lock. */
1779 	bce_disable_nvram_access(sc);
1780 	bce_release_nvram_lock(sc);
1781 
1782 	return rc;
1783 }
1784 
1785 
1786 #ifdef BCE_NVRAM_WRITE_SUPPORT
1787 /****************************************************************************/
1788 /* Write an arbitrary range of data from NVRAM.                             */
1789 /*                                                                          */
1790 /* Prepares the NVRAM interface for write access and writes the requested   */
1791 /* data from the supplied buffer.  The caller is responsible for            */
1792 /* calculating any appropriate CRCs.                                        */
1793 /*                                                                          */
1794 /* Returns:                                                                 */
1795 /*   0 on success, positive value on failure.                               */
1796 /****************************************************************************/
1797 static int
1798 bce_nvram_write(struct bce_softc *sc, uint32_t offset, uint8_t *data_buf,
1799 		int buf_size)
1800 {
1801 	uint32_t written, offset32, len32;
1802 	uint8_t *buf, start[4], end[4];
1803 	int rc = 0;
1804 	int align_start, align_end;
1805 
1806 	buf = data_buf;
1807 	offset32 = offset;
1808 	len32 = buf_size;
1809 	align_end = 0;
1810 	align_start = (offset32 & 3);
1811 
1812 	if (align_start) {
1813 		offset32 &= ~3;
1814 		len32 += align_start;
1815 		rc = bce_nvram_read(sc, offset32, start, 4);
1816 		if (rc)
1817 			return rc;
1818 	}
1819 
1820 	if (len32 & 3) {
1821 	       	if (len32 > 4 || !align_start) {
1822 			align_end = 4 - (len32 & 3);
1823 			len32 += align_end;
1824 			rc = bce_nvram_read(sc, offset32 + len32 - 4, end, 4);
1825 			if (rc)
1826 				return rc;
1827 		}
1828 	}
1829 
1830 	if (align_start || align_end) {
1831 		buf = kmalloc(len32, M_DEVBUF, M_NOWAIT);
1832 		if (buf == NULL)
1833 			return ENOMEM;
1834 		if (align_start)
1835 			memcpy(buf, start, 4);
1836 		if (align_end)
1837 			memcpy(buf + len32 - 4, end, 4);
1838 		memcpy(buf + align_start, data_buf, buf_size);
1839 	}
1840 
1841 	written = 0;
1842 	while (written < len32 && rc == 0) {
1843 		uint32_t page_start, page_end, data_start, data_end;
1844 		uint32_t addr, cmd_flags;
1845 		int i;
1846 		uint8_t flash_buffer[264];
1847 
1848 		/* Find the page_start addr */
1849 		page_start = offset32 + written;
1850 		page_start -= (page_start % sc->bce_flash_info->page_size);
1851 		/* Find the page_end addr */
1852 		page_end = page_start + sc->bce_flash_info->page_size;
1853 		/* Find the data_start addr */
1854 		data_start = (written == 0) ? offset32 : page_start;
1855 		/* Find the data_end addr */
1856 		data_end = (page_end > offset32 + len32) ? (offset32 + len32)
1857 							 : page_end;
1858 
1859 		/* Request access to the flash interface. */
1860 		rc = bce_acquire_nvram_lock(sc);
1861 		if (rc != 0)
1862 			goto nvram_write_end;
1863 
1864 		/* Enable access to flash interface */
1865 		bce_enable_nvram_access(sc);
1866 
1867 		cmd_flags = BCE_NVM_COMMAND_FIRST;
1868 		if (sc->bce_flash_info->buffered == 0) {
1869 			int j;
1870 
1871 			/*
1872 			 * Read the whole page into the buffer
1873 			 * (non-buffer flash only)
1874 			 */
1875 			for (j = 0; j < sc->bce_flash_info->page_size; j += 4) {
1876 				if (j == (sc->bce_flash_info->page_size - 4))
1877 					cmd_flags |= BCE_NVM_COMMAND_LAST;
1878 
1879 				rc = bce_nvram_read_dword(sc, page_start + j,
1880 							  &flash_buffer[j],
1881 							  cmd_flags);
1882 				if (rc)
1883 					goto nvram_write_end;
1884 
1885 				cmd_flags = 0;
1886 			}
1887 		}
1888 
1889 		/* Enable writes to flash interface (unlock write-protect) */
1890 		rc = bce_enable_nvram_write(sc);
1891 		if (rc != 0)
1892 			goto nvram_write_end;
1893 
1894 		/* Erase the page */
1895 		rc = bce_nvram_erase_page(sc, page_start);
1896 		if (rc != 0)
1897 			goto nvram_write_end;
1898 
1899 		/* Re-enable the write again for the actual write */
1900 		bce_enable_nvram_write(sc);
1901 
1902 		/* Loop to write back the buffer data from page_start to
1903 		 * data_start */
1904 		i = 0;
1905 		if (sc->bce_flash_info->buffered == 0) {
1906 			for (addr = page_start; addr < data_start;
1907 			     addr += 4, i += 4) {
1908 				rc = bce_nvram_write_dword(sc, addr,
1909 							   &flash_buffer[i],
1910 							   cmd_flags);
1911 				if (rc != 0)
1912 					goto nvram_write_end;
1913 
1914 				cmd_flags = 0;
1915 			}
1916 		}
1917 
1918 		/* Loop to write the new data from data_start to data_end */
1919 		for (addr = data_start; addr < data_end; addr += 4, i++) {
1920 			if (addr == page_end - 4 ||
1921 			    (sc->bce_flash_info->buffered &&
1922 			     addr == data_end - 4))
1923 				cmd_flags |= BCE_NVM_COMMAND_LAST;
1924 
1925 			rc = bce_nvram_write_dword(sc, addr, buf, cmd_flags);
1926 			if (rc != 0)
1927 				goto nvram_write_end;
1928 
1929 			cmd_flags = 0;
1930 			buf += 4;
1931 		}
1932 
1933 		/* Loop to write back the buffer data from data_end
1934 		 * to page_end */
1935 		if (sc->bce_flash_info->buffered == 0) {
1936 			for (addr = data_end; addr < page_end;
1937 			     addr += 4, i += 4) {
1938 				if (addr == page_end-4)
1939 					cmd_flags = BCE_NVM_COMMAND_LAST;
1940 
1941 				rc = bce_nvram_write_dword(sc, addr,
1942 					&flash_buffer[i], cmd_flags);
1943 				if (rc != 0)
1944 					goto nvram_write_end;
1945 
1946 				cmd_flags = 0;
1947 			}
1948 		}
1949 
1950 		/* Disable writes to flash interface (lock write-protect) */
1951 		bce_disable_nvram_write(sc);
1952 
1953 		/* Disable access to flash interface */
1954 		bce_disable_nvram_access(sc);
1955 		bce_release_nvram_lock(sc);
1956 
1957 		/* Increment written */
1958 		written += data_end - data_start;
1959 	}
1960 
1961 nvram_write_end:
1962 	if (align_start || align_end)
1963 		kfree(buf, M_DEVBUF);
1964 	return rc;
1965 }
1966 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1967 
1968 
1969 /****************************************************************************/
1970 /* Verifies that NVRAM is accessible and contains valid data.               */
1971 /*                                                                          */
1972 /* Reads the configuration data from NVRAM and verifies that the CRC is     */
1973 /* correct.                                                                 */
1974 /*                                                                          */
1975 /* Returns:                                                                 */
1976 /*   0 on success, positive value on failure.                               */
1977 /****************************************************************************/
1978 static int
1979 bce_nvram_test(struct bce_softc *sc)
1980 {
1981 	uint32_t buf[BCE_NVRAM_SIZE / 4];
1982 	uint32_t magic, csum;
1983 	uint8_t *data = (uint8_t *)buf;
1984 	int rc = 0;
1985 
1986 	/*
1987 	 * Check that the device NVRAM is valid by reading
1988 	 * the magic value at offset 0.
1989 	 */
1990 	rc = bce_nvram_read(sc, 0, data, 4);
1991 	if (rc != 0)
1992 		return rc;
1993 
1994 	magic = be32toh(buf[0]);
1995 	if (magic != BCE_NVRAM_MAGIC) {
1996 		if_printf(&sc->arpcom.ac_if,
1997 			  "Invalid NVRAM magic value! Expected: 0x%08X, "
1998 			  "Found: 0x%08X\n", BCE_NVRAM_MAGIC, magic);
1999 		return ENODEV;
2000 	}
2001 
2002 	/*
2003 	 * Verify that the device NVRAM includes valid
2004 	 * configuration data.
2005 	 */
2006 	rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE);
2007 	if (rc != 0)
2008 		return rc;
2009 
2010 	csum = ether_crc32_le(data, 0x100);
2011 	if (csum != BCE_CRC32_RESIDUAL) {
2012 		if_printf(&sc->arpcom.ac_if,
2013 			  "Invalid Manufacturing Information NVRAM CRC! "
2014 			  "Expected: 0x%08X, Found: 0x%08X\n",
2015 			  BCE_CRC32_RESIDUAL, csum);
2016 		return ENODEV;
2017 	}
2018 
2019 	csum = ether_crc32_le(data + 0x100, 0x100);
2020 	if (csum != BCE_CRC32_RESIDUAL) {
2021 		if_printf(&sc->arpcom.ac_if,
2022 			  "Invalid Feature Configuration Information "
2023 			  "NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
2024 			  BCE_CRC32_RESIDUAL, csum);
2025 		rc = ENODEV;
2026 	}
2027 	return rc;
2028 }
2029 
2030 
2031 /****************************************************************************/
2032 /* Free any DMA memory owned by the driver.                                 */
2033 /*                                                                          */
2034 /* Scans through each data structre that requires DMA memory and frees      */
2035 /* the memory if allocated.                                                 */
2036 /*                                                                          */
2037 /* Returns:                                                                 */
2038 /*   Nothing.                                                               */
2039 /****************************************************************************/
2040 static void
2041 bce_dma_free(struct bce_softc *sc)
2042 {
2043 	int i;
2044 
2045 	/* Destroy the status block. */
2046 	if (sc->status_tag != NULL) {
2047 		if (sc->status_block != NULL) {
2048 			bus_dmamap_unload(sc->status_tag, sc->status_map);
2049 			bus_dmamem_free(sc->status_tag, sc->status_block,
2050 					sc->status_map);
2051 		}
2052 		bus_dma_tag_destroy(sc->status_tag);
2053 	}
2054 
2055 
2056 	/* Destroy the statistics block. */
2057 	if (sc->stats_tag != NULL) {
2058 		if (sc->stats_block != NULL) {
2059 			bus_dmamap_unload(sc->stats_tag, sc->stats_map);
2060 			bus_dmamem_free(sc->stats_tag, sc->stats_block,
2061 					sc->stats_map);
2062 		}
2063 		bus_dma_tag_destroy(sc->stats_tag);
2064 	}
2065 
2066 	/* Destroy the TX buffer descriptor DMA stuffs. */
2067 	if (sc->tx_bd_chain_tag != NULL) {
2068 		for (i = 0; i < TX_PAGES; i++) {
2069 			if (sc->tx_bd_chain[i] != NULL) {
2070 				bus_dmamap_unload(sc->tx_bd_chain_tag,
2071 						  sc->tx_bd_chain_map[i]);
2072 				bus_dmamem_free(sc->tx_bd_chain_tag,
2073 						sc->tx_bd_chain[i],
2074 						sc->tx_bd_chain_map[i]);
2075 			}
2076 		}
2077 		bus_dma_tag_destroy(sc->tx_bd_chain_tag);
2078 	}
2079 
2080 	/* Destroy the RX buffer descriptor DMA stuffs. */
2081 	if (sc->rx_bd_chain_tag != NULL) {
2082 		for (i = 0; i < RX_PAGES; i++) {
2083 			if (sc->rx_bd_chain[i] != NULL) {
2084 				bus_dmamap_unload(sc->rx_bd_chain_tag,
2085 						  sc->rx_bd_chain_map[i]);
2086 				bus_dmamem_free(sc->rx_bd_chain_tag,
2087 						sc->rx_bd_chain[i],
2088 						sc->rx_bd_chain_map[i]);
2089 			}
2090 		}
2091 		bus_dma_tag_destroy(sc->rx_bd_chain_tag);
2092 	}
2093 
2094 	/* Destroy the TX mbuf DMA stuffs. */
2095 	if (sc->tx_mbuf_tag != NULL) {
2096 		for (i = 0; i < TOTAL_TX_BD; i++) {
2097 			/* Must have been unloaded in bce_stop() */
2098 			KKASSERT(sc->tx_mbuf_ptr[i] == NULL);
2099 			bus_dmamap_destroy(sc->tx_mbuf_tag,
2100 					   sc->tx_mbuf_map[i]);
2101 		}
2102 		bus_dma_tag_destroy(sc->tx_mbuf_tag);
2103 	}
2104 
2105 	/* Destroy the RX mbuf DMA stuffs. */
2106 	if (sc->rx_mbuf_tag != NULL) {
2107 		for (i = 0; i < TOTAL_RX_BD; i++) {
2108 			/* Must have been unloaded in bce_stop() */
2109 			KKASSERT(sc->rx_mbuf_ptr[i] == NULL);
2110 			bus_dmamap_destroy(sc->rx_mbuf_tag,
2111 					   sc->rx_mbuf_map[i]);
2112 		}
2113 		bus_dma_tag_destroy(sc->rx_mbuf_tag);
2114 	}
2115 
2116 	/* Destroy the parent tag */
2117 	if (sc->parent_tag != NULL)
2118 		bus_dma_tag_destroy(sc->parent_tag);
2119 }
2120 
2121 
2122 /****************************************************************************/
2123 /* Get DMA memory from the OS.                                              */
2124 /*                                                                          */
2125 /* Validates that the OS has provided DMA buffers in response to a          */
2126 /* bus_dmamap_load() call and saves the physical address of those buffers.  */
2127 /* When the callback is used the OS will return 0 for the mapping function  */
2128 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any  */
2129 /* failures back to the caller.                                             */
2130 /*                                                                          */
2131 /* Returns:                                                                 */
2132 /*   Nothing.                                                               */
2133 /****************************************************************************/
2134 static void
2135 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2136 {
2137 	bus_addr_t *busaddr = arg;
2138 
2139 	/*
2140 	 * Simulate a mapping failure.
2141 	 * XXX not correct.
2142 	 */
2143 	DBRUNIF(DB_RANDOMTRUE(bce_debug_dma_map_addr_failure),
2144 		kprintf("bce: %s(%d): Simulating DMA mapping error.\n",
2145 			__FILE__, __LINE__);
2146 		error = ENOMEM);
2147 
2148 	/* Check for an error and signal the caller that an error occurred. */
2149 	if (error)
2150 		return;
2151 
2152 	KASSERT(nseg == 1, ("only one segment is allowed\n"));
2153 	*busaddr = segs->ds_addr;
2154 }
2155 
2156 
2157 static void
2158 bce_dma_map_mbuf(void *arg, bus_dma_segment_t *segs, int nsegs,
2159 		 bus_size_t mapsz __unused, int error)
2160 {
2161 	struct bce_dmamap_arg *ctx = arg;
2162 	int i;
2163 
2164 	if (error)
2165 		return;
2166 
2167 	if (nsegs > ctx->bce_maxsegs) {
2168 		ctx->bce_maxsegs = 0;
2169 		return;
2170 	}
2171 
2172 	ctx->bce_maxsegs = nsegs;
2173 	for (i = 0; i < nsegs; ++i)
2174 		ctx->bce_segs[i] = segs[i];
2175 }
2176 
2177 
2178 /****************************************************************************/
2179 /* Allocate any DMA memory needed by the driver.                            */
2180 /*                                                                          */
2181 /* Allocates DMA memory needed for the various global structures needed by  */
2182 /* hardware.                                                                */
2183 /*                                                                          */
2184 /* Returns:                                                                 */
2185 /*   0 for success, positive value for failure.                             */
2186 /****************************************************************************/
2187 static int
2188 bce_dma_alloc(struct bce_softc *sc)
2189 {
2190 	struct ifnet *ifp = &sc->arpcom.ac_if;
2191 	int i, j, rc = 0;
2192 	bus_addr_t busaddr;
2193 
2194 	/*
2195 	 * Allocate the parent bus DMA tag appropriate for PCI.
2196 	 */
2197 	rc = bus_dma_tag_create(NULL, 1, BCE_DMA_BOUNDARY,
2198 				sc->max_bus_addr, BUS_SPACE_MAXADDR,
2199 				NULL, NULL,
2200 				MAXBSIZE, BUS_SPACE_UNRESTRICTED,
2201 				BUS_SPACE_MAXSIZE_32BIT,
2202 				0, &sc->parent_tag);
2203 	if (rc != 0) {
2204 		if_printf(ifp, "Could not allocate parent DMA tag!\n");
2205 		return rc;
2206 	}
2207 
2208 	/*
2209 	 * Create a DMA tag for the status block, allocate and clear the
2210 	 * memory, map the memory into DMA space, and fetch the physical
2211 	 * address of the block.
2212 	 */
2213 	rc = bus_dma_tag_create(sc->parent_tag,
2214 				BCE_DMA_ALIGN, BCE_DMA_BOUNDARY,
2215 				sc->max_bus_addr, BUS_SPACE_MAXADDR,
2216 				NULL, NULL,
2217 				BCE_STATUS_BLK_SZ, 1, BCE_STATUS_BLK_SZ,
2218 				0, &sc->status_tag);
2219 	if (rc != 0) {
2220 		if_printf(ifp, "Could not allocate status block DMA tag!\n");
2221 		return rc;
2222 	}
2223 
2224 	rc = bus_dmamem_alloc(sc->status_tag, (void **)&sc->status_block,
2225 			      BUS_DMA_WAITOK | BUS_DMA_ZERO,
2226 			      &sc->status_map);
2227 	if (rc != 0) {
2228 		if_printf(ifp, "Could not allocate status block DMA memory!\n");
2229 		return rc;
2230 	}
2231 
2232 	rc = bus_dmamap_load(sc->status_tag, sc->status_map,
2233 			     sc->status_block, BCE_STATUS_BLK_SZ,
2234 			     bce_dma_map_addr, &busaddr, BUS_DMA_WAITOK);
2235 	if (rc != 0) {
2236 		if_printf(ifp, "Could not map status block DMA memory!\n");
2237 		bus_dmamem_free(sc->status_tag, sc->status_block,
2238 				sc->status_map);
2239 		sc->status_block = NULL;
2240 		return rc;
2241 	}
2242 
2243 	sc->status_block_paddr = busaddr;
2244 	/* DRC - Fix for 64 bit addresses. */
2245 	DBPRINT(sc, BCE_INFO, "status_block_paddr = 0x%08X\n",
2246 		(uint32_t)sc->status_block_paddr);
2247 
2248 	/*
2249 	 * Create a DMA tag for the statistics block, allocate and clear the
2250 	 * memory, map the memory into DMA space, and fetch the physical
2251 	 * address of the block.
2252 	 */
2253 	rc = bus_dma_tag_create(sc->parent_tag,
2254 				BCE_DMA_ALIGN, BCE_DMA_BOUNDARY,
2255 				sc->max_bus_addr, BUS_SPACE_MAXADDR,
2256 				NULL, NULL,
2257 				BCE_STATS_BLK_SZ, 1, BCE_STATS_BLK_SZ,
2258 				0, &sc->stats_tag);
2259 	if (rc != 0) {
2260 		if_printf(ifp, "Could not allocate "
2261 			  "statistics block DMA tag!\n");
2262 		return rc;
2263 	}
2264 
2265 	rc = bus_dmamem_alloc(sc->stats_tag, (void **)&sc->stats_block,
2266 			      BUS_DMA_WAITOK | BUS_DMA_ZERO,
2267 			      &sc->stats_map);
2268 	if (rc != 0) {
2269 		if_printf(ifp, "Could not allocate "
2270 			  "statistics block DMA memory!\n");
2271 		return rc;
2272 	}
2273 
2274 	rc = bus_dmamap_load(sc->stats_tag, sc->stats_map,
2275 			     sc->stats_block, BCE_STATS_BLK_SZ,
2276 			     bce_dma_map_addr, &busaddr, BUS_DMA_WAITOK);
2277 	if (rc != 0) {
2278 		if_printf(ifp, "Could not map statistics block DMA memory!\n");
2279 		bus_dmamem_free(sc->stats_tag, sc->stats_block, sc->stats_map);
2280 		sc->stats_block = NULL;
2281 		return rc;
2282 	}
2283 
2284 	sc->stats_block_paddr = busaddr;
2285 	/* DRC - Fix for 64 bit address. */
2286 	DBPRINT(sc, BCE_INFO, "stats_block_paddr = 0x%08X\n",
2287 		(uint32_t)sc->stats_block_paddr);
2288 
2289 	/*
2290 	 * Create a DMA tag for the TX buffer descriptor chain,
2291 	 * allocate and clear the  memory, and fetch the
2292 	 * physical address of the block.
2293 	 */
2294 	rc = bus_dma_tag_create(sc->parent_tag,
2295 				BCM_PAGE_SIZE, BCE_DMA_BOUNDARY,
2296 				sc->max_bus_addr, BUS_SPACE_MAXADDR,
2297 				NULL, NULL,
2298 				BCE_TX_CHAIN_PAGE_SZ, 1, BCE_TX_CHAIN_PAGE_SZ,
2299 				0, &sc->tx_bd_chain_tag);
2300 	if (rc != 0) {
2301 		if_printf(ifp, "Could not allocate "
2302 			  "TX descriptor chain DMA tag!\n");
2303 		return rc;
2304 	}
2305 
2306 	for (i = 0; i < TX_PAGES; i++) {
2307 		rc = bus_dmamem_alloc(sc->tx_bd_chain_tag,
2308 				      (void **)&sc->tx_bd_chain[i],
2309 				      BUS_DMA_WAITOK, &sc->tx_bd_chain_map[i]);
2310 		if (rc != 0) {
2311 			if_printf(ifp, "Could not allocate %dth TX descriptor "
2312 				  "chain DMA memory!\n", i);
2313 			return rc;
2314 		}
2315 
2316 		rc = bus_dmamap_load(sc->tx_bd_chain_tag,
2317 				     sc->tx_bd_chain_map[i],
2318 				     sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ,
2319 				     bce_dma_map_addr, &busaddr,
2320 				     BUS_DMA_WAITOK);
2321 		if (rc != 0) {
2322 			if_printf(ifp, "Could not map %dth TX descriptor "
2323 				  "chain DMA memory!\n", i);
2324 			bus_dmamem_free(sc->tx_bd_chain_tag,
2325 					sc->tx_bd_chain[i],
2326 					sc->tx_bd_chain_map[i]);
2327 			sc->tx_bd_chain[i] = NULL;
2328 			return rc;
2329 		}
2330 
2331 		sc->tx_bd_chain_paddr[i] = busaddr;
2332 		/* DRC - Fix for 64 bit systems. */
2333 		DBPRINT(sc, BCE_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n",
2334 			i, (uint32_t)sc->tx_bd_chain_paddr[i]);
2335 	}
2336 
2337 	/* Create a DMA tag for TX mbufs. */
2338 	rc = bus_dma_tag_create(sc->parent_tag, 1, BCE_DMA_BOUNDARY,
2339 				sc->max_bus_addr, BUS_SPACE_MAXADDR,
2340 				NULL, NULL,
2341 				MCLBYTES * BCE_MAX_SEGMENTS,
2342 				BCE_MAX_SEGMENTS, MCLBYTES,
2343 				0, &sc->tx_mbuf_tag);
2344 	if (rc != 0) {
2345 		if_printf(ifp, "Could not allocate TX mbuf DMA tag!\n");
2346 		return rc;
2347 	}
2348 
2349 	/* Create DMA maps for the TX mbufs clusters. */
2350 	for (i = 0; i < TOTAL_TX_BD; i++) {
2351 		rc = bus_dmamap_create(sc->tx_mbuf_tag, BUS_DMA_WAITOK,
2352 				       &sc->tx_mbuf_map[i]);
2353 		if (rc != 0) {
2354 			for (j = 0; j < i; ++j) {
2355 				bus_dmamap_destroy(sc->tx_mbuf_tag,
2356 						   sc->tx_mbuf_map[i]);
2357 			}
2358 			bus_dma_tag_destroy(sc->tx_mbuf_tag);
2359 			sc->tx_mbuf_tag = NULL;
2360 
2361 			if_printf(ifp, "Unable to create "
2362 				  "%dth TX mbuf DMA map!\n", i);
2363 			return rc;
2364 		}
2365 	}
2366 
2367 	/*
2368 	 * Create a DMA tag for the RX buffer descriptor chain,
2369 	 * allocate and clear the  memory, and fetch the physical
2370 	 * address of the blocks.
2371 	 */
2372 	rc = bus_dma_tag_create(sc->parent_tag,
2373 				BCM_PAGE_SIZE, BCE_DMA_BOUNDARY,
2374 				sc->max_bus_addr, BUS_SPACE_MAXADDR,
2375 				NULL, NULL,
2376 				BCE_RX_CHAIN_PAGE_SZ, 1, BCE_RX_CHAIN_PAGE_SZ,
2377 				0, &sc->rx_bd_chain_tag);
2378 	if (rc != 0) {
2379 		if_printf(ifp, "Could not allocate "
2380 			  "RX descriptor chain DMA tag!\n");
2381 		return rc;
2382 	}
2383 
2384 	for (i = 0; i < RX_PAGES; i++) {
2385 		rc = bus_dmamem_alloc(sc->rx_bd_chain_tag,
2386 				      (void **)&sc->rx_bd_chain[i],
2387 				      BUS_DMA_WAITOK | BUS_DMA_ZERO,
2388 				      &sc->rx_bd_chain_map[i]);
2389 		if (rc != 0) {
2390 			if_printf(ifp, "Could not allocate %dth RX descriptor "
2391 				  "chain DMA memory!\n", i);
2392 			return rc;
2393 		}
2394 
2395 		rc = bus_dmamap_load(sc->rx_bd_chain_tag,
2396 				     sc->rx_bd_chain_map[i],
2397 				     sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ,
2398 				     bce_dma_map_addr, &busaddr,
2399 				     BUS_DMA_WAITOK);
2400 		if (rc != 0) {
2401 			if_printf(ifp, "Could not map %dth RX descriptor "
2402 				  "chain DMA memory!\n", i);
2403 			bus_dmamem_free(sc->rx_bd_chain_tag,
2404 					sc->rx_bd_chain[i],
2405 					sc->rx_bd_chain_map[i]);
2406 			sc->rx_bd_chain[i] = NULL;
2407 			return rc;
2408 		}
2409 
2410 		sc->rx_bd_chain_paddr[i] = busaddr;
2411 		/* DRC - Fix for 64 bit systems. */
2412 		DBPRINT(sc, BCE_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n",
2413 			i, (uint32_t)sc->rx_bd_chain_paddr[i]);
2414 	}
2415 
2416 	/* Create a DMA tag for RX mbufs. */
2417 	rc = bus_dma_tag_create(sc->parent_tag, 1, BCE_DMA_BOUNDARY,
2418 				sc->max_bus_addr, BUS_SPACE_MAXADDR,
2419 				NULL, NULL,
2420 				MCLBYTES, 1/* BCE_MAX_SEGMENTS */, MCLBYTES,
2421 				0, &sc->rx_mbuf_tag);
2422 	if (rc != 0) {
2423 		if_printf(ifp, "Could not allocate RX mbuf DMA tag!\n");
2424 		return rc;
2425 	}
2426 
2427 	/* Create DMA maps for the RX mbuf clusters. */
2428 	for (i = 0; i < TOTAL_RX_BD; i++) {
2429 		rc = bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_WAITOK,
2430 				       &sc->rx_mbuf_map[i]);
2431 		if (rc != 0) {
2432 			for (j = 0; j < i; ++j) {
2433 				bus_dmamap_destroy(sc->rx_mbuf_tag,
2434 						   sc->rx_mbuf_map[j]);
2435 			}
2436 			bus_dma_tag_destroy(sc->rx_mbuf_tag);
2437 			sc->rx_mbuf_tag = NULL;
2438 
2439 			if_printf(ifp, "Unable to create "
2440 				  "%dth RX mbuf DMA map!\n", i);
2441 			return rc;
2442 		}
2443 	}
2444 	return 0;
2445 }
2446 
2447 
2448 /****************************************************************************/
2449 /* Firmware synchronization.                                                */
2450 /*                                                                          */
2451 /* Before performing certain events such as a chip reset, synchronize with  */
2452 /* the firmware first.                                                      */
2453 /*                                                                          */
2454 /* Returns:                                                                 */
2455 /*   0 for success, positive value for failure.                             */
2456 /****************************************************************************/
2457 static int
2458 bce_fw_sync(struct bce_softc *sc, uint32_t msg_data)
2459 {
2460 	int i, rc = 0;
2461 	uint32_t val;
2462 
2463 	/* Don't waste any time if we've timed out before. */
2464 	if (sc->bce_fw_timed_out)
2465 		return EBUSY;
2466 
2467 	/* Increment the message sequence number. */
2468 	sc->bce_fw_wr_seq++;
2469 	msg_data |= sc->bce_fw_wr_seq;
2470 
2471  	DBPRINT(sc, BCE_VERBOSE, "bce_fw_sync(): msg_data = 0x%08X\n", msg_data);
2472 
2473 	/* Send the message to the bootcode driver mailbox. */
2474 	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
2475 
2476 	/* Wait for the bootcode to acknowledge the message. */
2477 	for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2478 		/* Check for a response in the bootcode firmware mailbox. */
2479 		val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_FW_MB);
2480 		if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ))
2481 			break;
2482 		DELAY(1000);
2483 	}
2484 
2485 	/* If we've timed out, tell the bootcode that we've stopped waiting. */
2486 	if ((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ) &&
2487 	    (msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0) {
2488 		if_printf(&sc->arpcom.ac_if,
2489 			  "Firmware synchronization timeout! "
2490 			  "msg_data = 0x%08X\n", msg_data);
2491 
2492 		msg_data &= ~BCE_DRV_MSG_CODE;
2493 		msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT;
2494 
2495 		REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
2496 
2497 		sc->bce_fw_timed_out = 1;
2498 		rc = EBUSY;
2499 	}
2500 	return rc;
2501 }
2502 
2503 
2504 /****************************************************************************/
2505 /* Load Receive Virtual 2 Physical (RV2P) processor firmware.               */
2506 /*                                                                          */
2507 /* Returns:                                                                 */
2508 /*   Nothing.                                                               */
2509 /****************************************************************************/
2510 static void
2511 bce_load_rv2p_fw(struct bce_softc *sc, uint32_t *rv2p_code,
2512 		 uint32_t rv2p_code_len, uint32_t rv2p_proc)
2513 {
2514 	int i;
2515 	uint32_t val;
2516 
2517 	for (i = 0; i < rv2p_code_len; i += 8) {
2518 		REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code);
2519 		rv2p_code++;
2520 		REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code);
2521 		rv2p_code++;
2522 
2523 		if (rv2p_proc == RV2P_PROC1) {
2524 			val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR;
2525 			REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
2526 		} else {
2527 			val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR;
2528 			REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
2529 		}
2530 	}
2531 
2532 	/* Reset the processor, un-stall is done later. */
2533 	if (rv2p_proc == RV2P_PROC1)
2534 		REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET);
2535 	else
2536 		REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET);
2537 }
2538 
2539 
2540 /****************************************************************************/
2541 /* Load RISC processor firmware.                                            */
2542 /*                                                                          */
2543 /* Loads firmware from the file if_bcefw.h into the scratchpad memory       */
2544 /* associated with a particular processor.                                  */
2545 /*                                                                          */
2546 /* Returns:                                                                 */
2547 /*   Nothing.                                                               */
2548 /****************************************************************************/
2549 static void
2550 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg,
2551 		struct fw_info *fw)
2552 {
2553 	uint32_t offset, val;
2554 	int j;
2555 
2556 	/* Halt the CPU. */
2557 	val = REG_RD_IND(sc, cpu_reg->mode);
2558 	val |= cpu_reg->mode_value_halt;
2559 	REG_WR_IND(sc, cpu_reg->mode, val);
2560 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2561 
2562 	/* Load the Text area. */
2563 	offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2564 	if (fw->text) {
2565 		for (j = 0; j < (fw->text_len / 4); j++, offset += 4)
2566 			REG_WR_IND(sc, offset, fw->text[j]);
2567 	}
2568 
2569 	/* Load the Data area. */
2570 	offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2571 	if (fw->data) {
2572 		for (j = 0; j < (fw->data_len / 4); j++, offset += 4)
2573 			REG_WR_IND(sc, offset, fw->data[j]);
2574 	}
2575 
2576 	/* Load the SBSS area. */
2577 	offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2578 	if (fw->sbss) {
2579 		for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4)
2580 			REG_WR_IND(sc, offset, fw->sbss[j]);
2581 	}
2582 
2583 	/* Load the BSS area. */
2584 	offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2585 	if (fw->bss) {
2586 		for (j = 0; j < (fw->bss_len/4); j++, offset += 4)
2587 			REG_WR_IND(sc, offset, fw->bss[j]);
2588 	}
2589 
2590 	/* Load the Read-Only area. */
2591 	offset = cpu_reg->spad_base +
2592 		(fw->rodata_addr - cpu_reg->mips_view_base);
2593 	if (fw->rodata) {
2594 		for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4)
2595 			REG_WR_IND(sc, offset, fw->rodata[j]);
2596 	}
2597 
2598 	/* Clear the pre-fetch instruction. */
2599 	REG_WR_IND(sc, cpu_reg->inst, 0);
2600 	REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2601 
2602 	/* Start the CPU. */
2603 	val = REG_RD_IND(sc, cpu_reg->mode);
2604 	val &= ~cpu_reg->mode_value_halt;
2605 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2606 	REG_WR_IND(sc, cpu_reg->mode, val);
2607 }
2608 
2609 
2610 /****************************************************************************/
2611 /* Initialize the RV2P, RX, TX, TPAT, and COM CPUs.                         */
2612 /*                                                                          */
2613 /* Loads the firmware for each CPU and starts the CPU.                      */
2614 /*                                                                          */
2615 /* Returns:                                                                 */
2616 /*   Nothing.                                                               */
2617 /****************************************************************************/
2618 static void
2619 bce_init_cpus(struct bce_softc *sc)
2620 {
2621 	struct cpu_reg cpu_reg;
2622 	struct fw_info fw;
2623 
2624 	/* Initialize the RV2P processor. */
2625 	bce_load_rv2p_fw(sc, bce_rv2p_proc1, sizeof(bce_rv2p_proc1), RV2P_PROC1);
2626 	bce_load_rv2p_fw(sc, bce_rv2p_proc2, sizeof(bce_rv2p_proc2), RV2P_PROC2);
2627 
2628 	/* Initialize the RX Processor. */
2629 	cpu_reg.mode = BCE_RXP_CPU_MODE;
2630 	cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
2631 	cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
2632 	cpu_reg.state = BCE_RXP_CPU_STATE;
2633 	cpu_reg.state_value_clear = 0xffffff;
2634 	cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
2635 	cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
2636 	cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
2637 	cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
2638 	cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
2639 	cpu_reg.spad_base = BCE_RXP_SCRATCH;
2640 	cpu_reg.mips_view_base = 0x8000000;
2641 
2642 	fw.ver_major = bce_RXP_b06FwReleaseMajor;
2643 	fw.ver_minor = bce_RXP_b06FwReleaseMinor;
2644 	fw.ver_fix = bce_RXP_b06FwReleaseFix;
2645 	fw.start_addr = bce_RXP_b06FwStartAddr;
2646 
2647 	fw.text_addr = bce_RXP_b06FwTextAddr;
2648 	fw.text_len = bce_RXP_b06FwTextLen;
2649 	fw.text_index = 0;
2650 	fw.text = bce_RXP_b06FwText;
2651 
2652 	fw.data_addr = bce_RXP_b06FwDataAddr;
2653 	fw.data_len = bce_RXP_b06FwDataLen;
2654 	fw.data_index = 0;
2655 	fw.data = bce_RXP_b06FwData;
2656 
2657 	fw.sbss_addr = bce_RXP_b06FwSbssAddr;
2658 	fw.sbss_len = bce_RXP_b06FwSbssLen;
2659 	fw.sbss_index = 0;
2660 	fw.sbss = bce_RXP_b06FwSbss;
2661 
2662 	fw.bss_addr = bce_RXP_b06FwBssAddr;
2663 	fw.bss_len = bce_RXP_b06FwBssLen;
2664 	fw.bss_index = 0;
2665 	fw.bss = bce_RXP_b06FwBss;
2666 
2667 	fw.rodata_addr = bce_RXP_b06FwRodataAddr;
2668 	fw.rodata_len = bce_RXP_b06FwRodataLen;
2669 	fw.rodata_index = 0;
2670 	fw.rodata = bce_RXP_b06FwRodata;
2671 
2672 	DBPRINT(sc, BCE_INFO_RESET, "Loading RX firmware.\n");
2673 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
2674 
2675 	/* Initialize the TX Processor. */
2676 	cpu_reg.mode = BCE_TXP_CPU_MODE;
2677 	cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT;
2678 	cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA;
2679 	cpu_reg.state = BCE_TXP_CPU_STATE;
2680 	cpu_reg.state_value_clear = 0xffffff;
2681 	cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE;
2682 	cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK;
2683 	cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER;
2684 	cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION;
2685 	cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT;
2686 	cpu_reg.spad_base = BCE_TXP_SCRATCH;
2687 	cpu_reg.mips_view_base = 0x8000000;
2688 
2689 	fw.ver_major = bce_TXP_b06FwReleaseMajor;
2690 	fw.ver_minor = bce_TXP_b06FwReleaseMinor;
2691 	fw.ver_fix = bce_TXP_b06FwReleaseFix;
2692 	fw.start_addr = bce_TXP_b06FwStartAddr;
2693 
2694 	fw.text_addr = bce_TXP_b06FwTextAddr;
2695 	fw.text_len = bce_TXP_b06FwTextLen;
2696 	fw.text_index = 0;
2697 	fw.text = bce_TXP_b06FwText;
2698 
2699 	fw.data_addr = bce_TXP_b06FwDataAddr;
2700 	fw.data_len = bce_TXP_b06FwDataLen;
2701 	fw.data_index = 0;
2702 	fw.data = bce_TXP_b06FwData;
2703 
2704 	fw.sbss_addr = bce_TXP_b06FwSbssAddr;
2705 	fw.sbss_len = bce_TXP_b06FwSbssLen;
2706 	fw.sbss_index = 0;
2707 	fw.sbss = bce_TXP_b06FwSbss;
2708 
2709 	fw.bss_addr = bce_TXP_b06FwBssAddr;
2710 	fw.bss_len = bce_TXP_b06FwBssLen;
2711 	fw.bss_index = 0;
2712 	fw.bss = bce_TXP_b06FwBss;
2713 
2714 	fw.rodata_addr = bce_TXP_b06FwRodataAddr;
2715 	fw.rodata_len = bce_TXP_b06FwRodataLen;
2716 	fw.rodata_index = 0;
2717 	fw.rodata = bce_TXP_b06FwRodata;
2718 
2719 	DBPRINT(sc, BCE_INFO_RESET, "Loading TX firmware.\n");
2720 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
2721 
2722 	/* Initialize the TX Patch-up Processor. */
2723 	cpu_reg.mode = BCE_TPAT_CPU_MODE;
2724 	cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT;
2725 	cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA;
2726 	cpu_reg.state = BCE_TPAT_CPU_STATE;
2727 	cpu_reg.state_value_clear = 0xffffff;
2728 	cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE;
2729 	cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK;
2730 	cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER;
2731 	cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION;
2732 	cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT;
2733 	cpu_reg.spad_base = BCE_TPAT_SCRATCH;
2734 	cpu_reg.mips_view_base = 0x8000000;
2735 
2736 	fw.ver_major = bce_TPAT_b06FwReleaseMajor;
2737 	fw.ver_minor = bce_TPAT_b06FwReleaseMinor;
2738 	fw.ver_fix = bce_TPAT_b06FwReleaseFix;
2739 	fw.start_addr = bce_TPAT_b06FwStartAddr;
2740 
2741 	fw.text_addr = bce_TPAT_b06FwTextAddr;
2742 	fw.text_len = bce_TPAT_b06FwTextLen;
2743 	fw.text_index = 0;
2744 	fw.text = bce_TPAT_b06FwText;
2745 
2746 	fw.data_addr = bce_TPAT_b06FwDataAddr;
2747 	fw.data_len = bce_TPAT_b06FwDataLen;
2748 	fw.data_index = 0;
2749 	fw.data = bce_TPAT_b06FwData;
2750 
2751 	fw.sbss_addr = bce_TPAT_b06FwSbssAddr;
2752 	fw.sbss_len = bce_TPAT_b06FwSbssLen;
2753 	fw.sbss_index = 0;
2754 	fw.sbss = bce_TPAT_b06FwSbss;
2755 
2756 	fw.bss_addr = bce_TPAT_b06FwBssAddr;
2757 	fw.bss_len = bce_TPAT_b06FwBssLen;
2758 	fw.bss_index = 0;
2759 	fw.bss = bce_TPAT_b06FwBss;
2760 
2761 	fw.rodata_addr = bce_TPAT_b06FwRodataAddr;
2762 	fw.rodata_len = bce_TPAT_b06FwRodataLen;
2763 	fw.rodata_index = 0;
2764 	fw.rodata = bce_TPAT_b06FwRodata;
2765 
2766 	DBPRINT(sc, BCE_INFO_RESET, "Loading TPAT firmware.\n");
2767 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
2768 
2769 	/* Initialize the Completion Processor. */
2770 	cpu_reg.mode = BCE_COM_CPU_MODE;
2771 	cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT;
2772 	cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA;
2773 	cpu_reg.state = BCE_COM_CPU_STATE;
2774 	cpu_reg.state_value_clear = 0xffffff;
2775 	cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE;
2776 	cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK;
2777 	cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER;
2778 	cpu_reg.inst = BCE_COM_CPU_INSTRUCTION;
2779 	cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT;
2780 	cpu_reg.spad_base = BCE_COM_SCRATCH;
2781 	cpu_reg.mips_view_base = 0x8000000;
2782 
2783 	fw.ver_major = bce_COM_b06FwReleaseMajor;
2784 	fw.ver_minor = bce_COM_b06FwReleaseMinor;
2785 	fw.ver_fix = bce_COM_b06FwReleaseFix;
2786 	fw.start_addr = bce_COM_b06FwStartAddr;
2787 
2788 	fw.text_addr = bce_COM_b06FwTextAddr;
2789 	fw.text_len = bce_COM_b06FwTextLen;
2790 	fw.text_index = 0;
2791 	fw.text = bce_COM_b06FwText;
2792 
2793 	fw.data_addr = bce_COM_b06FwDataAddr;
2794 	fw.data_len = bce_COM_b06FwDataLen;
2795 	fw.data_index = 0;
2796 	fw.data = bce_COM_b06FwData;
2797 
2798 	fw.sbss_addr = bce_COM_b06FwSbssAddr;
2799 	fw.sbss_len = bce_COM_b06FwSbssLen;
2800 	fw.sbss_index = 0;
2801 	fw.sbss = bce_COM_b06FwSbss;
2802 
2803 	fw.bss_addr = bce_COM_b06FwBssAddr;
2804 	fw.bss_len = bce_COM_b06FwBssLen;
2805 	fw.bss_index = 0;
2806 	fw.bss = bce_COM_b06FwBss;
2807 
2808 	fw.rodata_addr = bce_COM_b06FwRodataAddr;
2809 	fw.rodata_len = bce_COM_b06FwRodataLen;
2810 	fw.rodata_index = 0;
2811 	fw.rodata = bce_COM_b06FwRodata;
2812 
2813 	DBPRINT(sc, BCE_INFO_RESET, "Loading COM firmware.\n");
2814 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
2815 }
2816 
2817 
2818 /****************************************************************************/
2819 /* Initialize context memory.                                               */
2820 /*                                                                          */
2821 /* Clears the memory associated with each Context ID (CID).                 */
2822 /*                                                                          */
2823 /* Returns:                                                                 */
2824 /*   Nothing.                                                               */
2825 /****************************************************************************/
2826 static void
2827 bce_init_ctx(struct bce_softc *sc)
2828 {
2829 	uint32_t vcid = 96;
2830 
2831 	while (vcid) {
2832 		uint32_t vcid_addr, pcid_addr, offset;
2833 		int i;
2834 
2835 		vcid--;
2836 
2837    		vcid_addr = GET_CID_ADDR(vcid);
2838 		pcid_addr = vcid_addr;
2839 
2840 		for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2841 			vcid_addr += (i << PHY_CTX_SHIFT);
2842 			pcid_addr += (i << PHY_CTX_SHIFT);
2843 
2844 			REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr);
2845 			REG_WR(sc, BCE_CTX_PAGE_TBL, pcid_addr);
2846 
2847 			/* Zero out the context. */
2848 			for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2849 				CTX_WR(sc, vcid_addr, offset, 0);
2850 		}
2851 	}
2852 }
2853 
2854 
2855 /****************************************************************************/
2856 /* Fetch the permanent MAC address of the controller.                       */
2857 /*                                                                          */
2858 /* Returns:                                                                 */
2859 /*   Nothing.                                                               */
2860 /****************************************************************************/
2861 static void
2862 bce_get_mac_addr(struct bce_softc *sc)
2863 {
2864 	uint32_t mac_lo = 0, mac_hi = 0;
2865 
2866 	/*
2867 	 * The NetXtreme II bootcode populates various NIC
2868 	 * power-on and runtime configuration items in a
2869 	 * shared memory area.  The factory configured MAC
2870 	 * address is available from both NVRAM and the
2871 	 * shared memory area so we'll read the value from
2872 	 * shared memory for speed.
2873 	 */
2874 
2875 	mac_hi = REG_RD_IND(sc, sc->bce_shmem_base + BCE_PORT_HW_CFG_MAC_UPPER);
2876 	mac_lo = REG_RD_IND(sc, sc->bce_shmem_base + BCE_PORT_HW_CFG_MAC_LOWER);
2877 
2878 	if (mac_lo == 0 && mac_hi == 0) {
2879 		if_printf(&sc->arpcom.ac_if, "Invalid Ethernet address!\n");
2880 	} else {
2881 		sc->eaddr[0] = (u_char)(mac_hi >> 8);
2882 		sc->eaddr[1] = (u_char)(mac_hi >> 0);
2883 		sc->eaddr[2] = (u_char)(mac_lo >> 24);
2884 		sc->eaddr[3] = (u_char)(mac_lo >> 16);
2885 		sc->eaddr[4] = (u_char)(mac_lo >> 8);
2886 		sc->eaddr[5] = (u_char)(mac_lo >> 0);
2887 	}
2888 
2889 	DBPRINT(sc, BCE_INFO, "Permanent Ethernet address = %6D\n", sc->eaddr, ":");
2890 }
2891 
2892 
2893 /****************************************************************************/
2894 /* Program the MAC address.                                                 */
2895 /*                                                                          */
2896 /* Returns:                                                                 */
2897 /*   Nothing.                                                               */
2898 /****************************************************************************/
2899 static void
2900 bce_set_mac_addr(struct bce_softc *sc)
2901 {
2902 	const uint8_t *mac_addr = sc->eaddr;
2903 	uint32_t val;
2904 
2905 	DBPRINT(sc, BCE_INFO, "Setting Ethernet address = %6D\n",
2906 		sc->eaddr, ":");
2907 
2908 	val = (mac_addr[0] << 8) | mac_addr[1];
2909 	REG_WR(sc, BCE_EMAC_MAC_MATCH0, val);
2910 
2911 	val = (mac_addr[2] << 24) |
2912 	      (mac_addr[3] << 16) |
2913 	      (mac_addr[4] << 8) |
2914 	      mac_addr[5];
2915 	REG_WR(sc, BCE_EMAC_MAC_MATCH1, val);
2916 }
2917 
2918 
2919 /****************************************************************************/
2920 /* Stop the controller.                                                     */
2921 /*                                                                          */
2922 /* Returns:                                                                 */
2923 /*   Nothing.                                                               */
2924 /****************************************************************************/
2925 static void
2926 bce_stop(struct bce_softc *sc)
2927 {
2928 	struct ifnet *ifp = &sc->arpcom.ac_if;
2929 	struct mii_data *mii = device_get_softc(sc->bce_miibus);
2930 	struct ifmedia_entry *ifm;
2931 	int mtmp, itmp;
2932 
2933 	ASSERT_SERIALIZED(ifp->if_serializer);
2934 
2935 	callout_stop(&sc->bce_stat_ch);
2936 
2937 	/* Disable the transmit/receive blocks. */
2938 	REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, 0x5ffffff);
2939 	REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
2940 	DELAY(20);
2941 
2942 	bce_disable_intr(sc);
2943 
2944 	/* Tell firmware that the driver is going away. */
2945 	bce_reset(sc, BCE_DRV_MSG_CODE_SUSPEND_NO_WOL);
2946 
2947 	/* Free the RX lists. */
2948 	bce_free_rx_chain(sc);
2949 
2950 	/* Free TX buffers. */
2951 	bce_free_tx_chain(sc);
2952 
2953 	/*
2954 	 * Isolate/power down the PHY, but leave the media selection
2955 	 * unchanged so that things will be put back to normal when
2956 	 * we bring the interface back up.
2957 	 *
2958 	 * 'mii' may be NULL if bce_stop() is called by bce_detach().
2959 	 */
2960 	if (mii != NULL) {
2961 		itmp = ifp->if_flags;
2962 		ifp->if_flags |= IFF_UP;
2963 		ifm = mii->mii_media.ifm_cur;
2964 		mtmp = ifm->ifm_media;
2965 		ifm->ifm_media = IFM_ETHER | IFM_NONE;
2966 		mii_mediachg(mii);
2967 		ifm->ifm_media = mtmp;
2968 		ifp->if_flags = itmp;
2969 	}
2970 
2971 	sc->bce_link = 0;
2972 	sc->bce_coalchg_mask = 0;
2973 
2974 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2975 	ifp->if_timer = 0;
2976 
2977 	bce_mgmt_init(sc);
2978 }
2979 
2980 
2981 static int
2982 bce_reset(struct bce_softc *sc, uint32_t reset_code)
2983 {
2984 	uint32_t val;
2985 	int i, rc = 0;
2986 
2987 	/* Wait for pending PCI transactions to complete. */
2988 	REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS,
2989 	       BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
2990 	       BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
2991 	       BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
2992 	       BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
2993 	val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
2994 	DELAY(5);
2995 
2996 	/* Assume bootcode is running. */
2997 	sc->bce_fw_timed_out = 0;
2998 
2999 	/* Give the firmware a chance to prepare for the reset. */
3000 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code);
3001 	if (rc) {
3002 		if_printf(&sc->arpcom.ac_if,
3003 			  "Firmware is not ready for reset\n");
3004 		return rc;
3005 	}
3006 
3007 	/* Set a firmware reminder that this is a soft reset. */
3008 	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_RESET_SIGNATURE,
3009 		   BCE_DRV_RESET_SIGNATURE_MAGIC);
3010 
3011 	/* Dummy read to force the chip to complete all current transactions. */
3012 	val = REG_RD(sc, BCE_MISC_ID);
3013 
3014 	/* Chip reset. */
3015 	val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3016 	      BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3017 	      BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3018 	REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val);
3019 
3020 	/* Allow up to 30us for reset to complete. */
3021 	for (i = 0; i < 10; i++) {
3022 		val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG);
3023 		if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3024 			    BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3025 			break;
3026 		}
3027 		DELAY(10);
3028 	}
3029 
3030 	/* Check that reset completed successfully. */
3031 	if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3032 		   BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3033 		if_printf(&sc->arpcom.ac_if, "Reset failed!\n");
3034 		return EBUSY;
3035 	}
3036 
3037 	/* Make sure byte swapping is properly configured. */
3038 	val = REG_RD(sc, BCE_PCI_SWAP_DIAG0);
3039 	if (val != 0x01020304) {
3040 		if_printf(&sc->arpcom.ac_if, "Byte swap is incorrect!\n");
3041 		return ENODEV;
3042 	}
3043 
3044 	/* Just completed a reset, assume that firmware is running again. */
3045 	sc->bce_fw_timed_out = 0;
3046 
3047 	/* Wait for the firmware to finish its initialization. */
3048 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code);
3049 	if (rc) {
3050 		if_printf(&sc->arpcom.ac_if,
3051 			  "Firmware did not complete initialization!\n");
3052 	}
3053 	return rc;
3054 }
3055 
3056 
3057 static int
3058 bce_chipinit(struct bce_softc *sc)
3059 {
3060 	uint32_t val;
3061 	int rc = 0;
3062 
3063 	/* Make sure the interrupt is not active. */
3064 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
3065 
3066 	/*
3067 	 * Initialize DMA byte/word swapping, configure the number of DMA
3068 	 * channels and PCI clock compensation delay.
3069 	 */
3070 	val = BCE_DMA_CONFIG_DATA_BYTE_SWAP |
3071 	      BCE_DMA_CONFIG_DATA_WORD_SWAP |
3072 #if BYTE_ORDER == BIG_ENDIAN
3073 	      BCE_DMA_CONFIG_CNTL_BYTE_SWAP |
3074 #endif
3075 	      BCE_DMA_CONFIG_CNTL_WORD_SWAP |
3076 	      DMA_READ_CHANS << 12 |
3077 	      DMA_WRITE_CHANS << 16;
3078 
3079 	val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3080 
3081 	if ((sc->bce_flags & BCE_PCIX_FLAG) && sc->bus_speed_mhz == 133)
3082 		val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP;
3083 
3084 	/*
3085 	 * This setting resolves a problem observed on certain Intel PCI
3086 	 * chipsets that cannot handle multiple outstanding DMA operations.
3087 	 * See errata E9_5706A1_65.
3088 	 */
3089 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706 &&
3090 	    BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0 &&
3091 	    !(sc->bce_flags & BCE_PCIX_FLAG))
3092 		val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA;
3093 
3094 	REG_WR(sc, BCE_DMA_CONFIG, val);
3095 
3096 	/* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */
3097 	if (sc->bce_flags & BCE_PCIX_FLAG) {
3098 		uint16_t cmd;
3099 
3100 		cmd = pci_read_config(sc->bce_dev, BCE_PCI_PCIX_CMD, 2);
3101 		pci_write_config(sc->bce_dev, BCE_PCI_PCIX_CMD, cmd & ~0x2, 2);
3102 	}
3103 
3104 	/* Enable the RX_V2P and Context state machines before access. */
3105 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3106 	       BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3107 	       BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3108 	       BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3109 
3110 	/* Initialize context mapping and zero out the quick contexts. */
3111 	bce_init_ctx(sc);
3112 
3113 	/* Initialize the on-boards CPUs */
3114 	bce_init_cpus(sc);
3115 
3116 	/* Prepare NVRAM for access. */
3117 	rc = bce_init_nvram(sc);
3118 	if (rc != 0)
3119 		return rc;
3120 
3121 	/* Set the kernel bypass block size */
3122 	val = REG_RD(sc, BCE_MQ_CONFIG);
3123 	val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3124 	val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3125 	REG_WR(sc, BCE_MQ_CONFIG, val);
3126 
3127 	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3128 	REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val);
3129 	REG_WR(sc, BCE_MQ_KNL_WIND_END, val);
3130 
3131 	/* Set the page size and clear the RV2P processor stall bits. */
3132 	val = (BCM_PAGE_BITS - 8) << 24;
3133 	REG_WR(sc, BCE_RV2P_CONFIG, val);
3134 
3135 	/* Configure page size. */
3136 	val = REG_RD(sc, BCE_TBDR_CONFIG);
3137 	val &= ~BCE_TBDR_CONFIG_PAGE_SIZE;
3138 	val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3139 	REG_WR(sc, BCE_TBDR_CONFIG, val);
3140 
3141 	return 0;
3142 }
3143 
3144 
3145 /****************************************************************************/
3146 /* Initialize the controller in preparation to send/receive traffic.        */
3147 /*                                                                          */
3148 /* Returns:                                                                 */
3149 /*   0 for success, positive value for failure.                             */
3150 /****************************************************************************/
3151 static int
3152 bce_blockinit(struct bce_softc *sc)
3153 {
3154 	uint32_t reg, val;
3155 	int rc = 0;
3156 
3157 	/* Load the hardware default MAC address. */
3158 	bce_set_mac_addr(sc);
3159 
3160 	/* Set the Ethernet backoff seed value */
3161 	val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) +
3162 	      sc->eaddr[3] + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16);
3163 	REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val);
3164 
3165 	sc->last_status_idx = 0;
3166 	sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE;
3167 
3168 	/* Set up link change interrupt generation. */
3169 	REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK);
3170 
3171 	/* Program the physical address of the status block. */
3172 	REG_WR(sc, BCE_HC_STATUS_ADDR_L, BCE_ADDR_LO(sc->status_block_paddr));
3173 	REG_WR(sc, BCE_HC_STATUS_ADDR_H, BCE_ADDR_HI(sc->status_block_paddr));
3174 
3175 	/* Program the physical address of the statistics block. */
3176 	REG_WR(sc, BCE_HC_STATISTICS_ADDR_L,
3177 	       BCE_ADDR_LO(sc->stats_block_paddr));
3178 	REG_WR(sc, BCE_HC_STATISTICS_ADDR_H,
3179 	       BCE_ADDR_HI(sc->stats_block_paddr));
3180 
3181 	/* Program various host coalescing parameters. */
3182 	REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
3183 	       (sc->bce_tx_quick_cons_trip_int << 16) |
3184 	       sc->bce_tx_quick_cons_trip);
3185 	REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
3186 	       (sc->bce_rx_quick_cons_trip_int << 16) |
3187 	       sc->bce_rx_quick_cons_trip);
3188 	REG_WR(sc, BCE_HC_COMP_PROD_TRIP,
3189 	       (sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip);
3190 	REG_WR(sc, BCE_HC_TX_TICKS,
3191 	       (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
3192 	REG_WR(sc, BCE_HC_RX_TICKS,
3193 	       (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
3194 	REG_WR(sc, BCE_HC_COM_TICKS,
3195 	       (sc->bce_com_ticks_int << 16) | sc->bce_com_ticks);
3196 	REG_WR(sc, BCE_HC_CMD_TICKS,
3197 	       (sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks);
3198 	REG_WR(sc, BCE_HC_STATS_TICKS, (sc->bce_stats_ticks & 0xffff00));
3199 	REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8);	/* 3ms */
3200 	REG_WR(sc, BCE_HC_CONFIG,
3201 	       BCE_HC_CONFIG_TX_TMR_MODE |
3202 	       BCE_HC_CONFIG_COLLECT_STATS);
3203 
3204 	/* Clear the internal statistics counters. */
3205 	REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
3206 
3207 	/* Verify that bootcode is running. */
3208 	reg = REG_RD_IND(sc, sc->bce_shmem_base + BCE_DEV_INFO_SIGNATURE);
3209 
3210 	DBRUNIF(DB_RANDOMTRUE(bce_debug_bootcode_running_failure),
3211 		if_printf(&sc->arpcom.ac_if,
3212 			  "%s(%d): Simulating bootcode failure.\n",
3213 			  __FILE__, __LINE__);
3214 		reg = 0);
3215 
3216 	if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3217 	    BCE_DEV_INFO_SIGNATURE_MAGIC) {
3218 		if_printf(&sc->arpcom.ac_if,
3219 			  "Bootcode not running! Found: 0x%08X, "
3220 			  "Expected: 08%08X\n",
3221 			  reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK,
3222 			  BCE_DEV_INFO_SIGNATURE_MAGIC);
3223 		return ENODEV;
3224 	}
3225 
3226 	/* Check if any management firmware is running. */
3227 	reg = REG_RD_IND(sc, sc->bce_shmem_base + BCE_PORT_FEATURE);
3228 	if (reg & (BCE_PORT_FEATURE_ASF_ENABLED |
3229 		   BCE_PORT_FEATURE_IMD_ENABLED)) {
3230 		DBPRINT(sc, BCE_INFO, "Management F/W Enabled.\n");
3231 		sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
3232 	}
3233 
3234 	sc->bce_fw_ver =
3235 		REG_RD_IND(sc, sc->bce_shmem_base + BCE_DEV_INFO_BC_REV);
3236 	DBPRINT(sc, BCE_INFO, "bootcode rev = 0x%08X\n", sc->bce_fw_ver);
3237 
3238 	/* Allow bootcode to apply any additional fixes before enabling MAC. */
3239 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET);
3240 
3241 	/* Enable link state change interrupt generation. */
3242 	REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3243 
3244 	/* Enable all remaining blocks in the MAC. */
3245 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 0x5ffffff);
3246 	REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
3247 	DELAY(20);
3248 
3249 	return 0;
3250 }
3251 
3252 
3253 /****************************************************************************/
3254 /* Encapsulate an mbuf cluster into the rx_bd chain.                        */
3255 /*                                                                          */
3256 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's.     */
3257 /* This routine will map an mbuf cluster into 1 or more rx_bd's as          */
3258 /* necessary.                                                               */
3259 /*                                                                          */
3260 /* Returns:                                                                 */
3261 /*   0 for success, positive value for failure.                             */
3262 /****************************************************************************/
3263 static int
3264 bce_newbuf_std(struct bce_softc *sc, struct mbuf *m,
3265 	       uint16_t *prod, uint16_t *chain_prod, uint32_t *prod_bseq)
3266 {
3267 	bus_dmamap_t map;
3268 	struct bce_dmamap_arg ctx;
3269 	bus_dma_segment_t seg;
3270 	struct mbuf *m_new;
3271 	struct rx_bd *rxbd;
3272 	int error;
3273 #ifdef BCE_DEBUG
3274 	uint16_t debug_chain_prod = *chain_prod;
3275 #endif
3276 
3277 	/* Make sure the inputs are valid. */
3278 	DBRUNIF((*chain_prod > MAX_RX_BD),
3279 		if_printf(&sc->arpcom.ac_if, "%s(%d): "
3280 			  "RX producer out of range: 0x%04X > 0x%04X\n",
3281 			  __FILE__, __LINE__,
3282 			  *chain_prod, (uint16_t)MAX_RX_BD));
3283 
3284 	DBPRINT(sc, BCE_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = 0x%04X, "
3285 		"prod_bseq = 0x%08X\n", __func__, *prod, *chain_prod, *prod_bseq);
3286 
3287 	if (m == NULL) {
3288 		DBRUNIF(DB_RANDOMTRUE(bce_debug_mbuf_allocation_failure),
3289 			if_printf(&sc->arpcom.ac_if, "%s(%d): "
3290 				  "Simulating mbuf allocation failure.\n",
3291 				  __FILE__, __LINE__);
3292 			sc->mbuf_alloc_failed++;
3293 			return ENOBUFS);
3294 
3295 		/* This is a new mbuf allocation. */
3296 		m_new = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR);
3297 		if (m_new == NULL)
3298 			return ENOBUFS;
3299 		DBRUNIF(1, sc->rx_mbuf_alloc++);
3300 	} else {
3301 		m_new = m;
3302 		m_new->m_data = m_new->m_ext.ext_buf;
3303 	}
3304 	m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
3305 
3306 	/* Map the mbuf cluster into device memory. */
3307 	map = sc->rx_mbuf_map[*chain_prod];
3308 
3309 	ctx.bce_maxsegs = 1;
3310 	ctx.bce_segs = &seg;
3311 	error = bus_dmamap_load_mbuf(sc->rx_mbuf_tag, map, m_new,
3312 				     bce_dma_map_mbuf, &ctx, BUS_DMA_NOWAIT);
3313 	if (error || ctx.bce_maxsegs == 0) {
3314 		if_printf(&sc->arpcom.ac_if,
3315 			  "Error mapping mbuf into RX chain!\n");
3316 
3317 		if (m == NULL)
3318 			m_freem(m_new);
3319 
3320 		DBRUNIF(1, sc->rx_mbuf_alloc--);
3321 		return ENOBUFS;
3322 	}
3323 
3324 	/* Watch for overflow. */
3325 	DBRUNIF((sc->free_rx_bd > USABLE_RX_BD),
3326 		if_printf(&sc->arpcom.ac_if, "%s(%d): "
3327 			  "Too many free rx_bd (0x%04X > 0x%04X)!\n",
3328 			  __FILE__, __LINE__, sc->free_rx_bd,
3329 			  (uint16_t)USABLE_RX_BD));
3330 
3331 	/* Update some debug statistic counters */
3332 	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3333 		sc->rx_low_watermark = sc->free_rx_bd);
3334 	DBRUNIF((sc->free_rx_bd == 0), sc->rx_empty_count++);
3335 
3336 	/* Setup the rx_bd for the first segment. */
3337 	rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3338 
3339 	rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(seg.ds_addr));
3340 	rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(seg.ds_addr));
3341 	rxbd->rx_bd_len = htole32(seg.ds_len);
3342 	rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START);
3343 	*prod_bseq += seg.ds_len;
3344 
3345 	rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END);
3346 
3347 	/* Save the mbuf and update our counter. */
3348 	sc->rx_mbuf_ptr[*chain_prod] = m_new;
3349 	sc->free_rx_bd--;
3350 
3351 	DBRUN(BCE_VERBOSE_RECV,
3352 	      bce_dump_rx_mbuf_chain(sc, debug_chain_prod, 1));
3353 
3354 	DBPRINT(sc, BCE_VERBOSE_RECV, "%s(exit): prod = 0x%04X, chain_prod = 0x%04X, "
3355 		"prod_bseq = 0x%08X\n", __func__, *prod, *chain_prod, *prod_bseq);
3356 
3357 	return 0;
3358 }
3359 
3360 
3361 /****************************************************************************/
3362 /* Allocate memory and initialize the TX data structures.                   */
3363 /*                                                                          */
3364 /* Returns:                                                                 */
3365 /*   0 for success, positive value for failure.                             */
3366 /****************************************************************************/
3367 static int
3368 bce_init_tx_chain(struct bce_softc *sc)
3369 {
3370 	struct tx_bd *txbd;
3371 	uint32_t val;
3372 	int i, rc = 0;
3373 
3374 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__);
3375 
3376 	/* Set the initial TX producer/consumer indices. */
3377 	sc->tx_prod = 0;
3378 	sc->tx_cons = 0;
3379 	sc->tx_prod_bseq   = 0;
3380 	sc->used_tx_bd = 0;
3381 	sc->max_tx_bd = USABLE_TX_BD;
3382 	DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD);
3383 	DBRUNIF(1, sc->tx_full_count = 0);
3384 
3385 	/*
3386 	 * The NetXtreme II supports a linked-list structre called
3387 	 * a Buffer Descriptor Chain (or BD chain).  A BD chain
3388 	 * consists of a series of 1 or more chain pages, each of which
3389 	 * consists of a fixed number of BD entries.
3390 	 * The last BD entry on each page is a pointer to the next page
3391 	 * in the chain, and the last pointer in the BD chain
3392 	 * points back to the beginning of the chain.
3393 	 */
3394 
3395 	/* Set the TX next pointer chain entries. */
3396 	for (i = 0; i < TX_PAGES; i++) {
3397 		int j;
3398 
3399 		txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
3400 
3401 		/* Check if we've reached the last page. */
3402 		if (i == (TX_PAGES - 1))
3403 			j = 0;
3404 		else
3405 			j = i + 1;
3406 
3407 		txbd->tx_bd_haddr_hi =
3408 			htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j]));
3409 		txbd->tx_bd_haddr_lo =
3410 			htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j]));
3411 	}
3412 
3413 	for (i = 0; i < TX_PAGES; ++i) {
3414 		bus_dmamap_sync(sc->tx_bd_chain_tag, sc->tx_bd_chain_map[i],
3415 				BUS_DMASYNC_PREWRITE);
3416 	}
3417 
3418 	/* Initialize the context ID for an L2 TX chain. */
3419 	val = BCE_L2CTX_TYPE_TYPE_L2;
3420 	val |= BCE_L2CTX_TYPE_SIZE_L2;
3421 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TYPE, val);
3422 
3423 	val = BCE_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3424 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_CMD_TYPE, val);
3425 
3426 	/* Point the hardware to the first page in the chain. */
3427 	val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]);
3428 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_HI, val);
3429 	val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]);
3430 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_LO, val);
3431 
3432 	DBRUN(BCE_VERBOSE_SEND, bce_dump_tx_chain(sc, 0, TOTAL_TX_BD));
3433 
3434 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__);
3435 
3436 	return(rc);
3437 }
3438 
3439 
3440 /****************************************************************************/
3441 /* Free memory and clear the TX data structures.                            */
3442 /*                                                                          */
3443 /* Returns:                                                                 */
3444 /*   Nothing.                                                               */
3445 /****************************************************************************/
3446 static void
3447 bce_free_tx_chain(struct bce_softc *sc)
3448 {
3449 	int i;
3450 
3451 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__);
3452 
3453 	/* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
3454 	for (i = 0; i < TOTAL_TX_BD; i++) {
3455 		if (sc->tx_mbuf_ptr[i] != NULL) {
3456 			bus_dmamap_sync(sc->tx_mbuf_tag, sc->tx_mbuf_map[i],
3457 					BUS_DMASYNC_POSTWRITE);
3458 			bus_dmamap_unload(sc->tx_mbuf_tag, sc->tx_mbuf_map[i]);
3459 			m_freem(sc->tx_mbuf_ptr[i]);
3460 			sc->tx_mbuf_ptr[i] = NULL;
3461 			DBRUNIF(1, sc->tx_mbuf_alloc--);
3462 		}
3463 	}
3464 
3465 	/* Clear each TX chain page. */
3466 	for (i = 0; i < TX_PAGES; i++)
3467 		bzero(sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ);
3468 	sc->used_tx_bd = 0;
3469 
3470 	/* Check if we lost any mbufs in the process. */
3471 	DBRUNIF((sc->tx_mbuf_alloc),
3472 		if_printf(&sc->arpcom.ac_if,
3473 			  "%s(%d): Memory leak! "
3474 			  "Lost %d mbufs from tx chain!\n",
3475 			  __FILE__, __LINE__, sc->tx_mbuf_alloc));
3476 
3477 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__);
3478 }
3479 
3480 
3481 /****************************************************************************/
3482 /* Allocate memory and initialize the RX data structures.                   */
3483 /*                                                                          */
3484 /* Returns:                                                                 */
3485 /*   0 for success, positive value for failure.                             */
3486 /****************************************************************************/
3487 static int
3488 bce_init_rx_chain(struct bce_softc *sc)
3489 {
3490 	struct rx_bd *rxbd;
3491 	int i, rc = 0;
3492 	uint16_t prod, chain_prod;
3493 	uint32_t prod_bseq, val;
3494 
3495 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__);
3496 
3497 	/* Initialize the RX producer and consumer indices. */
3498 	sc->rx_prod = 0;
3499 	sc->rx_cons = 0;
3500 	sc->rx_prod_bseq = 0;
3501 	sc->free_rx_bd = USABLE_RX_BD;
3502 	sc->max_rx_bd = USABLE_RX_BD;
3503 	DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD);
3504 	DBRUNIF(1, sc->rx_empty_count = 0);
3505 
3506 	/* Initialize the RX next pointer chain entries. */
3507 	for (i = 0; i < RX_PAGES; i++) {
3508 		int j;
3509 
3510 		rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
3511 
3512 		/* Check if we've reached the last page. */
3513 		if (i == (RX_PAGES - 1))
3514 			j = 0;
3515 		else
3516 			j = i + 1;
3517 
3518 		/* Setup the chain page pointers. */
3519 		rxbd->rx_bd_haddr_hi =
3520 			htole32(BCE_ADDR_HI(sc->rx_bd_chain_paddr[j]));
3521 		rxbd->rx_bd_haddr_lo =
3522 			htole32(BCE_ADDR_LO(sc->rx_bd_chain_paddr[j]));
3523 	}
3524 
3525 	/* Initialize the context ID for an L2 RX chain. */
3526 	val = BCE_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3527 	val |= BCE_L2CTX_CTX_TYPE_SIZE_L2;
3528 	val |= 0x02 << 8;
3529 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_CTX_TYPE, val);
3530 
3531 	/* Point the hardware to the first page in the chain. */
3532 	/* XXX shouldn't this after RX descriptor initialization? */
3533 	val = BCE_ADDR_HI(sc->rx_bd_chain_paddr[0]);
3534 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_HI, val);
3535 	val = BCE_ADDR_LO(sc->rx_bd_chain_paddr[0]);
3536 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_LO, val);
3537 
3538 	/* Allocate mbuf clusters for the rx_bd chain. */
3539 	prod = prod_bseq = 0;
3540 	while (prod < TOTAL_RX_BD) {
3541 		chain_prod = RX_CHAIN_IDX(prod);
3542 		if (bce_newbuf_std(sc, NULL, &prod, &chain_prod, &prod_bseq)) {
3543 			if_printf(&sc->arpcom.ac_if,
3544 				  "Error filling RX chain: rx_bd[0x%04X]!\n",
3545 				  chain_prod);
3546 			rc = ENOBUFS;
3547 			break;
3548 		}
3549 		prod = NEXT_RX_BD(prod);
3550 	}
3551 
3552 	/* Save the RX chain producer index. */
3553 	sc->rx_prod = prod;
3554 	sc->rx_prod_bseq = prod_bseq;
3555 
3556 	for (i = 0; i < RX_PAGES; i++) {
3557 		bus_dmamap_sync(sc->rx_bd_chain_tag, sc->rx_bd_chain_map[i],
3558 				BUS_DMASYNC_PREWRITE);
3559 	}
3560 
3561 	/* Tell the chip about the waiting rx_bd's. */
3562 	REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BDIDX, sc->rx_prod);
3563 	REG_WR(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
3564 
3565 	DBRUN(BCE_VERBOSE_RECV, bce_dump_rx_chain(sc, 0, TOTAL_RX_BD));
3566 
3567 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__);
3568 
3569 	return(rc);
3570 }
3571 
3572 
3573 /****************************************************************************/
3574 /* Free memory and clear the RX data structures.                            */
3575 /*                                                                          */
3576 /* Returns:                                                                 */
3577 /*   Nothing.                                                               */
3578 /****************************************************************************/
3579 static void
3580 bce_free_rx_chain(struct bce_softc *sc)
3581 {
3582 	int i;
3583 
3584 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__);
3585 
3586 	/* Free any mbufs still in the RX mbuf chain. */
3587 	for (i = 0; i < TOTAL_RX_BD; i++) {
3588 		if (sc->rx_mbuf_ptr[i] != NULL) {
3589 			bus_dmamap_sync(sc->rx_mbuf_tag, sc->rx_mbuf_map[i],
3590 					BUS_DMASYNC_POSTREAD);
3591 			bus_dmamap_unload(sc->rx_mbuf_tag, sc->rx_mbuf_map[i]);
3592 			m_freem(sc->rx_mbuf_ptr[i]);
3593 			sc->rx_mbuf_ptr[i] = NULL;
3594 			DBRUNIF(1, sc->rx_mbuf_alloc--);
3595 		}
3596 	}
3597 
3598 	/* Clear each RX chain page. */
3599 	for (i = 0; i < RX_PAGES; i++)
3600 		bzero(sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
3601 
3602 	/* Check if we lost any mbufs in the process. */
3603 	DBRUNIF((sc->rx_mbuf_alloc),
3604 		if_printf(&sc->arpcom.ac_if,
3605 			  "%s(%d): Memory leak! "
3606 			  "Lost %d mbufs from rx chain!\n",
3607 			  __FILE__, __LINE__, sc->rx_mbuf_alloc));
3608 
3609 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__);
3610 }
3611 
3612 
3613 /****************************************************************************/
3614 /* Set media options.                                                       */
3615 /*                                                                          */
3616 /* Returns:                                                                 */
3617 /*   0 for success, positive value for failure.                             */
3618 /****************************************************************************/
3619 static int
3620 bce_ifmedia_upd(struct ifnet *ifp)
3621 {
3622 	struct bce_softc *sc = ifp->if_softc;
3623 	struct mii_data *mii = device_get_softc(sc->bce_miibus);
3624 
3625 	/*
3626 	 * 'mii' will be NULL, when this function is called on following
3627 	 * code path: bce_attach() -> bce_mgmt_init()
3628 	 */
3629 	if (mii != NULL) {
3630 		/* Make sure the MII bus has been enumerated. */
3631 		sc->bce_link = 0;
3632 		if (mii->mii_instance) {
3633 			struct mii_softc *miisc;
3634 
3635 			LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3636 				mii_phy_reset(miisc);
3637 		}
3638 		mii_mediachg(mii);
3639 	}
3640 	return 0;
3641 }
3642 
3643 
3644 /****************************************************************************/
3645 /* Reports current media status.                                            */
3646 /*                                                                          */
3647 /* Returns:                                                                 */
3648 /*   Nothing.                                                               */
3649 /****************************************************************************/
3650 static void
3651 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3652 {
3653 	struct bce_softc *sc = ifp->if_softc;
3654 	struct mii_data *mii = device_get_softc(sc->bce_miibus);
3655 
3656 	mii_pollstat(mii);
3657 	ifmr->ifm_active = mii->mii_media_active;
3658 	ifmr->ifm_status = mii->mii_media_status;
3659 }
3660 
3661 
3662 /****************************************************************************/
3663 /* Handles PHY generated interrupt events.                                  */
3664 /*                                                                          */
3665 /* Returns:                                                                 */
3666 /*   Nothing.                                                               */
3667 /****************************************************************************/
3668 static void
3669 bce_phy_intr(struct bce_softc *sc)
3670 {
3671 	uint32_t new_link_state, old_link_state;
3672 	struct ifnet *ifp = &sc->arpcom.ac_if;
3673 
3674 	ASSERT_SERIALIZED(ifp->if_serializer);
3675 
3676 	new_link_state = sc->status_block->status_attn_bits &
3677 			 STATUS_ATTN_BITS_LINK_STATE;
3678 	old_link_state = sc->status_block->status_attn_bits_ack &
3679 			 STATUS_ATTN_BITS_LINK_STATE;
3680 
3681 	/* Handle any changes if the link state has changed. */
3682 	if (new_link_state != old_link_state) {	/* XXX redundant? */
3683 		DBRUN(BCE_VERBOSE_INTR, bce_dump_status_block(sc));
3684 
3685 		sc->bce_link = 0;
3686 		callout_stop(&sc->bce_stat_ch);
3687 		bce_tick_serialized(sc);
3688 
3689 		/* Update the status_attn_bits_ack field in the status block. */
3690 		if (new_link_state) {
3691 			REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD,
3692 			       STATUS_ATTN_BITS_LINK_STATE);
3693 			if (bootverbose)
3694 				if_printf(ifp, "Link is now UP.\n");
3695 		} else {
3696 			REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD,
3697 			       STATUS_ATTN_BITS_LINK_STATE);
3698 			if (bootverbose)
3699 				if_printf(ifp, "Link is now DOWN.\n");
3700 		}
3701 	}
3702 
3703 	/* Acknowledge the link change interrupt. */
3704 	REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE);
3705 }
3706 
3707 
3708 /****************************************************************************/
3709 /* Reads the receive consumer value from the status block (skipping over    */
3710 /* chain page pointer if necessary).                                        */
3711 /*                                                                          */
3712 /* Returns:                                                                 */
3713 /*   hw_cons                                                                */
3714 /****************************************************************************/
3715 static __inline uint16_t
3716 bce_get_hw_rx_cons(struct bce_softc *sc)
3717 {
3718 	uint16_t hw_cons = sc->status_block->status_rx_quick_consumer_index0;
3719 
3720 	if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
3721 		hw_cons++;
3722 	return hw_cons;
3723 }
3724 
3725 
3726 /****************************************************************************/
3727 /* Handles received frame interrupt events.                                 */
3728 /*                                                                          */
3729 /* Returns:                                                                 */
3730 /*   Nothing.                                                               */
3731 /****************************************************************************/
3732 static void
3733 bce_rx_intr(struct bce_softc *sc, int count)
3734 {
3735 	struct ifnet *ifp = &sc->arpcom.ac_if;
3736 	uint16_t hw_cons, sw_cons, sw_chain_cons, sw_prod, sw_chain_prod;
3737 	uint32_t sw_prod_bseq;
3738 	int i;
3739 	struct mbuf_chain chain[MAXCPU];
3740 
3741 	ASSERT_SERIALIZED(ifp->if_serializer);
3742 
3743 	ether_input_chain_init(chain);
3744 
3745 	DBRUNIF(1, sc->rx_interrupts++);
3746 
3747 	/* Prepare the RX chain pages to be accessed by the host CPU. */
3748 	for (i = 0; i < RX_PAGES; i++) {
3749 		bus_dmamap_sync(sc->rx_bd_chain_tag,
3750 				sc->rx_bd_chain_map[i], BUS_DMASYNC_POSTREAD);
3751 	}
3752 
3753 	/* Get the hardware's view of the RX consumer index. */
3754 	hw_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc);
3755 
3756 	/* Get working copies of the driver's view of the RX indices. */
3757 	sw_cons = sc->rx_cons;
3758 	sw_prod = sc->rx_prod;
3759 	sw_prod_bseq = sc->rx_prod_bseq;
3760 
3761 	DBPRINT(sc, BCE_INFO_RECV, "%s(enter): sw_prod = 0x%04X, "
3762 		"sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n",
3763 		__func__, sw_prod, sw_cons, sw_prod_bseq);
3764 
3765 	/* Prevent speculative reads from getting ahead of the status block. */
3766 	bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
3767 			  BUS_SPACE_BARRIER_READ);
3768 
3769 	/* Update some debug statistics counters */
3770 	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3771 		sc->rx_low_watermark = sc->free_rx_bd);
3772 	DBRUNIF((sc->free_rx_bd == 0), sc->rx_empty_count++);
3773 
3774 	/* Scan through the receive chain as long as there is work to do. */
3775 	while (sw_cons != hw_cons) {
3776 		struct mbuf *m = NULL;
3777 		struct l2_fhdr *l2fhdr = NULL;
3778 		struct rx_bd *rxbd;
3779 		unsigned int len;
3780 		uint32_t status = 0;
3781 
3782 #ifdef DEVICE_POLLING
3783 		if (count >= 0 && count-- == 0) {
3784 			sc->hw_rx_cons = sw_cons;
3785 			break;
3786 		}
3787 #endif
3788 
3789 		/*
3790 		 * Convert the producer/consumer indices
3791 		 * to an actual rx_bd index.
3792 		 */
3793 		sw_chain_cons = RX_CHAIN_IDX(sw_cons);
3794 		sw_chain_prod = RX_CHAIN_IDX(sw_prod);
3795 
3796 		/* Get the used rx_bd. */
3797 		rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)]
3798 				       [RX_IDX(sw_chain_cons)];
3799 		sc->free_rx_bd++;
3800 
3801 		DBRUN(BCE_VERBOSE_RECV,
3802 		      if_printf(ifp, "%s(): ", __func__);
3803 		      bce_dump_rxbd(sc, sw_chain_cons, rxbd));
3804 
3805 		/* The mbuf is stored with the last rx_bd entry of a packet. */
3806 		if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) {
3807 			/* Validate that this is the last rx_bd. */
3808 			DBRUNIF((!(rxbd->rx_bd_flags & RX_BD_FLAGS_END)),
3809 				if_printf(ifp, "%s(%d): "
3810 				"Unexpected mbuf found in rx_bd[0x%04X]!\n",
3811 				__FILE__, __LINE__, sw_chain_cons);
3812 				bce_breakpoint(sc));
3813 
3814 			/*
3815 			 * ToDo: If the received packet is small enough
3816 			 * to fit into a single, non-M_EXT mbuf,
3817 			 * allocate a new mbuf here, copy the data to
3818 			 * that mbuf, and recycle the mapped jumbo frame.
3819 			 */
3820 
3821 			/* Unmap the mbuf from DMA space. */
3822 			bus_dmamap_sync(sc->rx_mbuf_tag,
3823 					sc->rx_mbuf_map[sw_chain_cons],
3824 					BUS_DMASYNC_POSTREAD);
3825 			bus_dmamap_unload(sc->rx_mbuf_tag,
3826 					  sc->rx_mbuf_map[sw_chain_cons]);
3827 
3828 			/* Remove the mbuf from the driver's chain. */
3829 			m = sc->rx_mbuf_ptr[sw_chain_cons];
3830 			sc->rx_mbuf_ptr[sw_chain_cons] = NULL;
3831 
3832 			/*
3833 			 * Frames received on the NetXteme II are prepended
3834 			 * with an l2_fhdr structure which provides status
3835 			 * information about the received frame (including
3836 			 * VLAN tags and checksum info).  The frames are also
3837 			 * automatically adjusted to align the IP header
3838 			 * (i.e. two null bytes are inserted before the
3839 			 * Ethernet header).
3840 			 */
3841 			l2fhdr = mtod(m, struct l2_fhdr *);
3842 
3843 			len = l2fhdr->l2_fhdr_pkt_len;
3844 			status = l2fhdr->l2_fhdr_status;
3845 
3846 			DBRUNIF(DB_RANDOMTRUE(bce_debug_l2fhdr_status_check),
3847 				if_printf(ifp,
3848 				"Simulating l2_fhdr status error.\n");
3849 				status = status | L2_FHDR_ERRORS_PHY_DECODE);
3850 
3851 			/* Watch for unusual sized frames. */
3852 			DBRUNIF((len < BCE_MIN_MTU ||
3853 				 len > BCE_MAX_JUMBO_ETHER_MTU_VLAN),
3854 				if_printf(ifp,
3855 				"%s(%d): Unusual frame size found. "
3856 				"Min(%d), Actual(%d), Max(%d)\n",
3857 				__FILE__, __LINE__,
3858 				(int)BCE_MIN_MTU, len,
3859 				(int)BCE_MAX_JUMBO_ETHER_MTU_VLAN);
3860 				bce_dump_mbuf(sc, m);
3861 		 		bce_breakpoint(sc));
3862 
3863 			len -= ETHER_CRC_LEN;
3864 
3865 			/* Check the received frame for errors. */
3866 			if (status & (L2_FHDR_ERRORS_BAD_CRC |
3867 				      L2_FHDR_ERRORS_PHY_DECODE |
3868 				      L2_FHDR_ERRORS_ALIGNMENT |
3869 				      L2_FHDR_ERRORS_TOO_SHORT |
3870 				      L2_FHDR_ERRORS_GIANT_FRAME)) {
3871 				ifp->if_ierrors++;
3872 				DBRUNIF(1, sc->l2fhdr_status_errors++);
3873 
3874 				/* Reuse the mbuf for a new frame. */
3875 				if (bce_newbuf_std(sc, m, &sw_prod,
3876 						   &sw_chain_prod,
3877 						   &sw_prod_bseq)) {
3878 					DBRUNIF(1, bce_breakpoint(sc));
3879 					/* XXX */
3880 					panic("%s: Can't reuse RX mbuf!\n",
3881 					      ifp->if_xname);
3882 				}
3883 				m = NULL;
3884 				goto bce_rx_int_next_rx;
3885 			}
3886 
3887 			/*
3888 			 * Get a new mbuf for the rx_bd.   If no new
3889 			 * mbufs are available then reuse the current mbuf,
3890 			 * log an ierror on the interface, and generate
3891 			 * an error in the system log.
3892 			 */
3893 			if (bce_newbuf_std(sc, NULL, &sw_prod, &sw_chain_prod,
3894 					   &sw_prod_bseq)) {
3895 				DBRUN(BCE_WARN,
3896 				      if_printf(ifp,
3897 				      "%s(%d): Failed to allocate new mbuf, "
3898 				      "incoming frame dropped!\n",
3899 				      __FILE__, __LINE__));
3900 
3901 				ifp->if_ierrors++;
3902 
3903 				/* Try and reuse the exisitng mbuf. */
3904 				if (bce_newbuf_std(sc, m, &sw_prod,
3905 						   &sw_chain_prod,
3906 						   &sw_prod_bseq)) {
3907 					DBRUNIF(1, bce_breakpoint(sc));
3908 					/* XXX */
3909 					panic("%s: Double mbuf allocation "
3910 					      "failure!", ifp->if_xname);
3911 				}
3912 				m = NULL;
3913 				goto bce_rx_int_next_rx;
3914 			}
3915 
3916 			/*
3917 			 * Skip over the l2_fhdr when passing
3918 			 * the data up the stack.
3919 			 */
3920 			m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
3921 
3922 			m->m_pkthdr.len = m->m_len = len;
3923 			m->m_pkthdr.rcvif = ifp;
3924 
3925 			DBRUN(BCE_VERBOSE_RECV,
3926 			      struct ether_header *eh;
3927 			      eh = mtod(m, struct ether_header *);
3928 			      if_printf(ifp, "%s(): to: %6D, from: %6D, "
3929 			      		"type: 0x%04X\n", __func__,
3930 					eh->ether_dhost, ":",
3931 					eh->ether_shost, ":",
3932 					htons(eh->ether_type)));
3933 
3934 			/* Validate the checksum if offload enabled. */
3935 			if (ifp->if_capenable & IFCAP_RXCSUM) {
3936 				/* Check for an IP datagram. */
3937 				if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
3938 					m->m_pkthdr.csum_flags |=
3939 						CSUM_IP_CHECKED;
3940 
3941 					/* Check if the IP checksum is valid. */
3942 					if ((l2fhdr->l2_fhdr_ip_xsum ^
3943 					     0xffff) == 0) {
3944 						m->m_pkthdr.csum_flags |=
3945 							CSUM_IP_VALID;
3946 					} else {
3947 						DBPRINT(sc, BCE_WARN_RECV,
3948 							"%s(): Invalid IP checksum = 0x%04X!\n",
3949 							__func__, l2fhdr->l2_fhdr_ip_xsum);
3950 					}
3951 				}
3952 
3953 				/* Check for a valid TCP/UDP frame. */
3954 				if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3955 					      L2_FHDR_STATUS_UDP_DATAGRAM)) {
3956 
3957 					/* Check for a good TCP/UDP checksum. */
3958 					if ((status &
3959 					     (L2_FHDR_ERRORS_TCP_XSUM |
3960 					      L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
3961 						m->m_pkthdr.csum_data =
3962 						l2fhdr->l2_fhdr_tcp_udp_xsum;
3963 						m->m_pkthdr.csum_flags |=
3964 							CSUM_DATA_VALID |
3965 							CSUM_PSEUDO_HDR;
3966 					} else {
3967 						DBPRINT(sc, BCE_WARN_RECV,
3968 							"%s(): Invalid TCP/UDP checksum = 0x%04X!\n",
3969 							__func__, l2fhdr->l2_fhdr_tcp_udp_xsum);
3970 					}
3971 				}
3972 			}
3973 
3974 			ifp->if_ipackets++;
3975 bce_rx_int_next_rx:
3976 			sw_prod = NEXT_RX_BD(sw_prod);
3977 		}
3978 
3979 		sw_cons = NEXT_RX_BD(sw_cons);
3980 
3981 		/* If we have a packet, pass it up the stack */
3982 		if (m) {
3983 			DBPRINT(sc, BCE_VERBOSE_RECV,
3984 				"%s(): Passing received frame up.\n", __func__);
3985 
3986 			if (status & L2_FHDR_STATUS_L2_VLAN_TAG) {
3987 				m->m_flags |= M_VLANTAG;
3988 				m->m_pkthdr.ether_vlantag =
3989 					l2fhdr->l2_fhdr_vlan_tag;
3990 			}
3991 			ether_input_chain(ifp, m, chain);
3992 
3993 			DBRUNIF(1, sc->rx_mbuf_alloc--);
3994 		}
3995 
3996 		/*
3997 		 * If polling(4) is not enabled, refresh hw_cons to see
3998 		 * whether there's new work.
3999 		 *
4000 		 * If polling(4) is enabled, i.e count >= 0, refreshing
4001 		 * should not be performed, so that we would not spend
4002 		 * too much time in RX processing.
4003 		 */
4004 		if (count < 0 && sw_cons == hw_cons)
4005 			hw_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc);
4006 
4007 		/*
4008 		 * Prevent speculative reads from getting ahead
4009 		 * of the status block.
4010 		 */
4011 		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4012 				  BUS_SPACE_BARRIER_READ);
4013 	}
4014 
4015 	ether_input_dispatch(chain);
4016 
4017 	for (i = 0; i < RX_PAGES; i++) {
4018 		bus_dmamap_sync(sc->rx_bd_chain_tag,
4019 				sc->rx_bd_chain_map[i], BUS_DMASYNC_PREWRITE);
4020 	}
4021 
4022 	sc->rx_cons = sw_cons;
4023 	sc->rx_prod = sw_prod;
4024 	sc->rx_prod_bseq = sw_prod_bseq;
4025 
4026 	REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BDIDX, sc->rx_prod);
4027 	REG_WR(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
4028 
4029 	DBPRINT(sc, BCE_INFO_RECV, "%s(exit): rx_prod = 0x%04X, "
4030 		"rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
4031 		__func__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
4032 }
4033 
4034 
4035 /****************************************************************************/
4036 /* Reads the transmit consumer value from the status block (skipping over   */
4037 /* chain page pointer if necessary).                                        */
4038 /*                                                                          */
4039 /* Returns:                                                                 */
4040 /*   hw_cons                                                                */
4041 /****************************************************************************/
4042 static __inline uint16_t
4043 bce_get_hw_tx_cons(struct bce_softc *sc)
4044 {
4045 	uint16_t hw_cons = sc->status_block->status_tx_quick_consumer_index0;
4046 
4047 	if ((hw_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4048 		hw_cons++;
4049 	return hw_cons;
4050 }
4051 
4052 
4053 /****************************************************************************/
4054 /* Handles transmit completion interrupt events.                            */
4055 /*                                                                          */
4056 /* Returns:                                                                 */
4057 /*   Nothing.                                                               */
4058 /****************************************************************************/
4059 static void
4060 bce_tx_intr(struct bce_softc *sc)
4061 {
4062 	struct ifnet *ifp = &sc->arpcom.ac_if;
4063 	uint16_t hw_tx_cons, sw_tx_cons, sw_tx_chain_cons;
4064 
4065 	ASSERT_SERIALIZED(ifp->if_serializer);
4066 
4067 	DBRUNIF(1, sc->tx_interrupts++);
4068 
4069 	/* Get the hardware's view of the TX consumer index. */
4070 	hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc);
4071 	sw_tx_cons = sc->tx_cons;
4072 
4073 	/* Prevent speculative reads from getting ahead of the status block. */
4074 	bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4075 			  BUS_SPACE_BARRIER_READ);
4076 
4077 	/* Cycle through any completed TX chain page entries. */
4078 	while (sw_tx_cons != hw_tx_cons) {
4079 #ifdef BCE_DEBUG
4080 		struct tx_bd *txbd = NULL;
4081 #endif
4082 		sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons);
4083 
4084 		DBPRINT(sc, BCE_INFO_SEND,
4085 			"%s(): hw_tx_cons = 0x%04X, sw_tx_cons = 0x%04X, "
4086 			"sw_tx_chain_cons = 0x%04X\n",
4087 			__func__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons);
4088 
4089 		DBRUNIF((sw_tx_chain_cons > MAX_TX_BD),
4090 			if_printf(ifp, "%s(%d): "
4091 				  "TX chain consumer out of range! "
4092 				  " 0x%04X > 0x%04X\n",
4093 				  __FILE__, __LINE__, sw_tx_chain_cons,
4094 				  (int)MAX_TX_BD);
4095 			bce_breakpoint(sc));
4096 
4097 		DBRUNIF(1, txbd = &sc->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)]
4098 				[TX_IDX(sw_tx_chain_cons)]);
4099 
4100 		DBRUNIF((txbd == NULL),
4101 			if_printf(ifp, "%s(%d): "
4102 				  "Unexpected NULL tx_bd[0x%04X]!\n",
4103 				  __FILE__, __LINE__, sw_tx_chain_cons);
4104 			bce_breakpoint(sc));
4105 
4106 		DBRUN(BCE_INFO_SEND,
4107 		      if_printf(ifp, "%s(): ", __func__);
4108 		      bce_dump_txbd(sc, sw_tx_chain_cons, txbd));
4109 
4110 		/*
4111 		 * Free the associated mbuf. Remember
4112 		 * that only the last tx_bd of a packet
4113 		 * has an mbuf pointer and DMA map.
4114 		 */
4115 		if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) {
4116 			/* Validate that this is the last tx_bd. */
4117 			DBRUNIF((!(txbd->tx_bd_flags & TX_BD_FLAGS_END)),
4118 				if_printf(ifp, "%s(%d): "
4119 				"tx_bd END flag not set but "
4120 				"txmbuf == NULL!\n", __FILE__, __LINE__);
4121 				bce_breakpoint(sc));
4122 
4123 			DBRUN(BCE_INFO_SEND,
4124 			      if_printf(ifp, "%s(): Unloading map/freeing mbuf "
4125 			      		"from tx_bd[0x%04X]\n", __func__,
4126 					sw_tx_chain_cons));
4127 
4128 			/* Unmap the mbuf. */
4129 			bus_dmamap_unload(sc->tx_mbuf_tag,
4130 					  sc->tx_mbuf_map[sw_tx_chain_cons]);
4131 
4132 			/* Free the mbuf. */
4133 			m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]);
4134 			sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL;
4135 			DBRUNIF(1, sc->tx_mbuf_alloc--);
4136 
4137 			ifp->if_opackets++;
4138 		}
4139 
4140 		sc->used_tx_bd--;
4141 		sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
4142 
4143 		if (sw_tx_cons == hw_tx_cons) {
4144 			/* Refresh hw_cons to see if there's new work. */
4145 			hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc);
4146 		}
4147 
4148 		/*
4149 		 * Prevent speculative reads from getting
4150 		 * ahead of the status block.
4151 		 */
4152 		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4153 				  BUS_SPACE_BARRIER_READ);
4154 	}
4155 
4156 	if (sc->used_tx_bd == 0) {
4157 		/* Clear the TX timeout timer. */
4158 		ifp->if_timer = 0;
4159 	}
4160 
4161 	/* Clear the tx hardware queue full flag. */
4162 	if (sc->max_tx_bd - sc->used_tx_bd >= BCE_TX_SPARE_SPACE) {
4163 		DBRUNIF((ifp->if_flags & IFF_OACTIVE),
4164 			DBPRINT(sc, BCE_WARN_SEND,
4165 				"%s(): Open TX chain! %d/%d (used/total)\n",
4166 				__func__, sc->used_tx_bd, sc->max_tx_bd));
4167 		ifp->if_flags &= ~IFF_OACTIVE;
4168 	}
4169 	sc->tx_cons = sw_tx_cons;
4170 }
4171 
4172 
4173 /****************************************************************************/
4174 /* Disables interrupt generation.                                           */
4175 /*                                                                          */
4176 /* Returns:                                                                 */
4177 /*   Nothing.                                                               */
4178 /****************************************************************************/
4179 static void
4180 bce_disable_intr(struct bce_softc *sc)
4181 {
4182 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
4183 	REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
4184 	lwkt_serialize_handler_disable(sc->arpcom.ac_if.if_serializer);
4185 }
4186 
4187 
4188 /****************************************************************************/
4189 /* Enables interrupt generation.                                            */
4190 /*                                                                          */
4191 /* Returns:                                                                 */
4192 /*   Nothing.                                                               */
4193 /****************************************************************************/
4194 static void
4195 bce_enable_intr(struct bce_softc *sc)
4196 {
4197 	uint32_t val;
4198 
4199 	lwkt_serialize_handler_enable(sc->arpcom.ac_if.if_serializer);
4200 
4201 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4202 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
4203 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
4204 
4205 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4206 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
4207 
4208 	val = REG_RD(sc, BCE_HC_COMMAND);
4209 	REG_WR(sc, BCE_HC_COMMAND, val | BCE_HC_COMMAND_COAL_NOW);
4210 }
4211 
4212 
4213 /****************************************************************************/
4214 /* Handles controller initialization.                                       */
4215 /*                                                                          */
4216 /* Returns:                                                                 */
4217 /*   Nothing.                                                               */
4218 /****************************************************************************/
4219 static void
4220 bce_init(void *xsc)
4221 {
4222 	struct bce_softc *sc = xsc;
4223 	struct ifnet *ifp = &sc->arpcom.ac_if;
4224 	uint32_t ether_mtu;
4225 	int error;
4226 
4227 	ASSERT_SERIALIZED(ifp->if_serializer);
4228 
4229 	/* Check if the driver is still running and bail out if it is. */
4230 	if (ifp->if_flags & IFF_RUNNING)
4231 		return;
4232 
4233 	bce_stop(sc);
4234 
4235 	error = bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
4236 	if (error) {
4237 		if_printf(ifp, "Controller reset failed!\n");
4238 		goto back;
4239 	}
4240 
4241 	error = bce_chipinit(sc);
4242 	if (error) {
4243 		if_printf(ifp, "Controller initialization failed!\n");
4244 		goto back;
4245 	}
4246 
4247 	error = bce_blockinit(sc);
4248 	if (error) {
4249 		if_printf(ifp, "Block initialization failed!\n");
4250 		goto back;
4251 	}
4252 
4253 	/* Load our MAC address. */
4254 	bcopy(IF_LLADDR(ifp), sc->eaddr, ETHER_ADDR_LEN);
4255 	bce_set_mac_addr(sc);
4256 
4257 	/* Calculate and program the Ethernet MTU size. */
4258 	ether_mtu = ETHER_HDR_LEN + EVL_ENCAPLEN + ifp->if_mtu + ETHER_CRC_LEN;
4259 
4260 	DBPRINT(sc, BCE_INFO, "%s(): setting mtu = %d\n", __func__, ether_mtu);
4261 
4262 	/*
4263 	 * Program the mtu, enabling jumbo frame
4264 	 * support if necessary.  Also set the mbuf
4265 	 * allocation count for RX frames.
4266 	 */
4267 	if (ether_mtu > ETHER_MAX_LEN + EVL_ENCAPLEN) {
4268 #ifdef notyet
4269 		REG_WR(sc, BCE_EMAC_RX_MTU_SIZE,
4270 		       min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) |
4271 		       BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA);
4272 		sc->mbuf_alloc_size = MJUM9BYTES;
4273 #else
4274 		panic("jumbo buffer is not supported yet\n");
4275 #endif
4276 	} else {
4277 		REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu);
4278 		sc->mbuf_alloc_size = MCLBYTES;
4279 	}
4280 
4281 	/* Calculate the RX Ethernet frame size for rx_bd's. */
4282 	sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8;
4283 
4284 	DBPRINT(sc, BCE_INFO,
4285 		"%s(): mclbytes = %d, mbuf_alloc_size = %d, "
4286 		"max_frame_size = %d\n",
4287 		__func__, (int)MCLBYTES, sc->mbuf_alloc_size,
4288 		sc->max_frame_size);
4289 
4290 	/* Program appropriate promiscuous/multicast filtering. */
4291 	bce_set_rx_mode(sc);
4292 
4293 	/* Init RX buffer descriptor chain. */
4294 	bce_init_rx_chain(sc);	/* XXX return value */
4295 
4296 	/* Init TX buffer descriptor chain. */
4297 	bce_init_tx_chain(sc);	/* XXX return value */
4298 
4299 #ifdef DEVICE_POLLING
4300 	/* Disable interrupts if we are polling. */
4301 	if (ifp->if_flags & IFF_POLLING) {
4302 		bce_disable_intr(sc);
4303 
4304 		REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4305 		       (1 << 16) | sc->bce_rx_quick_cons_trip);
4306 		REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4307 		       (1 << 16) | sc->bce_tx_quick_cons_trip);
4308 	} else
4309 #endif
4310 	/* Enable host interrupts. */
4311 	bce_enable_intr(sc);
4312 
4313 	bce_ifmedia_upd(ifp);
4314 
4315 	ifp->if_flags |= IFF_RUNNING;
4316 	ifp->if_flags &= ~IFF_OACTIVE;
4317 
4318 	callout_reset(&sc->bce_stat_ch, hz, bce_tick, sc);
4319 back:
4320 	if (error)
4321 		bce_stop(sc);
4322 }
4323 
4324 
4325 /****************************************************************************/
4326 /* Initialize the controller just enough so that any management firmware    */
4327 /* running on the device will continue to operate corectly.                 */
4328 /*                                                                          */
4329 /* Returns:                                                                 */
4330 /*   Nothing.                                                               */
4331 /****************************************************************************/
4332 static void
4333 bce_mgmt_init(struct bce_softc *sc)
4334 {
4335 	struct ifnet *ifp = &sc->arpcom.ac_if;
4336 	uint32_t val;
4337 
4338 	/* Check if the driver is still running and bail out if it is. */
4339 	if (ifp->if_flags & IFF_RUNNING)
4340 		return;
4341 
4342 	/* Initialize the on-boards CPUs */
4343 	bce_init_cpus(sc);
4344 
4345 	/* Set the page size and clear the RV2P processor stall bits. */
4346 	val = (BCM_PAGE_BITS - 8) << 24;
4347 	REG_WR(sc, BCE_RV2P_CONFIG, val);
4348 
4349 	/* Enable all critical blocks in the MAC. */
4350 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
4351 	       BCE_MISC_ENABLE_SET_BITS_RX_V2P_ENABLE |
4352 	       BCE_MISC_ENABLE_SET_BITS_RX_DMA_ENABLE |
4353 	       BCE_MISC_ENABLE_SET_BITS_COMPLETION_ENABLE);
4354 	REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
4355 	DELAY(20);
4356 
4357 	bce_ifmedia_upd(ifp);
4358 }
4359 
4360 
4361 /****************************************************************************/
4362 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
4363 /* memory visible to the controller.                                        */
4364 /*                                                                          */
4365 /* Returns:                                                                 */
4366 /*   0 for success, positive value for failure.                             */
4367 /****************************************************************************/
4368 static int
4369 bce_encap(struct bce_softc *sc, struct mbuf **m_head)
4370 {
4371 	struct bce_dmamap_arg ctx;
4372 	bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
4373 	bus_dmamap_t map, tmp_map;
4374 	struct mbuf *m0 = *m_head;
4375 	struct tx_bd *txbd = NULL;
4376 	uint16_t vlan_tag = 0, flags = 0;
4377 	uint16_t chain_prod, chain_prod_start, prod;
4378 	uint32_t prod_bseq;
4379 	int i, error, maxsegs;
4380 #ifdef BCE_DEBUG
4381 	uint16_t debug_prod;
4382 #endif
4383 
4384 	/* Transfer any checksum offload flags to the bd. */
4385 	if (m0->m_pkthdr.csum_flags) {
4386 		if (m0->m_pkthdr.csum_flags & CSUM_IP)
4387 			flags |= TX_BD_FLAGS_IP_CKSUM;
4388 		if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
4389 			flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4390 	}
4391 
4392 	/* Transfer any VLAN tags to the bd. */
4393 	if (m0->m_flags & M_VLANTAG) {
4394 		flags |= TX_BD_FLAGS_VLAN_TAG;
4395 		vlan_tag = m0->m_pkthdr.ether_vlantag;
4396 	}
4397 
4398 	prod = sc->tx_prod;
4399 	chain_prod_start = chain_prod = TX_CHAIN_IDX(prod);
4400 
4401 	/* Map the mbuf into DMAable memory. */
4402 	map = sc->tx_mbuf_map[chain_prod_start];
4403 
4404 	maxsegs = sc->max_tx_bd - sc->used_tx_bd;
4405 	KASSERT(maxsegs >= BCE_TX_SPARE_SPACE,
4406 		("not enough segements %d\n", maxsegs));
4407 	if (maxsegs > BCE_MAX_SEGMENTS)
4408 		maxsegs = BCE_MAX_SEGMENTS;
4409 
4410 	/* Map the mbuf into our DMA address space. */
4411 	ctx.bce_maxsegs = maxsegs;
4412 	ctx.bce_segs = segs;
4413 	error = bus_dmamap_load_mbuf(sc->tx_mbuf_tag, map, m0,
4414 				     bce_dma_map_mbuf, &ctx, BUS_DMA_NOWAIT);
4415 	if (error == EFBIG || ctx.bce_maxsegs == 0) {
4416 		DBPRINT(sc, BCE_WARN, "%s(): fragmented mbuf\n", __func__);
4417 		DBRUNIF(1, bce_dump_mbuf(sc, m0););
4418 
4419 		m0 = m_defrag(*m_head, MB_DONTWAIT);
4420 		if (m0 == NULL) {
4421 			error = ENOBUFS;
4422 			goto back;
4423 		}
4424 		*m_head = m0;
4425 
4426 		ctx.bce_maxsegs = maxsegs;
4427 		ctx.bce_segs = segs;
4428 		error = bus_dmamap_load_mbuf(sc->tx_mbuf_tag, map, m0,
4429 					     bce_dma_map_mbuf, &ctx,
4430 					     BUS_DMA_NOWAIT);
4431 		if (error || ctx.bce_maxsegs == 0) {
4432 			if_printf(&sc->arpcom.ac_if,
4433 				  "Error mapping mbuf into TX chain\n");
4434 			if (error == 0)
4435 				error = EFBIG;
4436 			goto back;
4437 		}
4438 	} else if (error) {
4439 		if_printf(&sc->arpcom.ac_if,
4440 			  "Error mapping mbuf into TX chain\n");
4441 		goto back;
4442 	}
4443 
4444 	/* prod points to an empty tx_bd at this point. */
4445 	prod_bseq  = sc->tx_prod_bseq;
4446 
4447 #ifdef BCE_DEBUG
4448 	debug_prod = chain_prod;
4449 #endif
4450 
4451 	DBPRINT(sc, BCE_INFO_SEND,
4452 		"%s(): Start: prod = 0x%04X, chain_prod = %04X, "
4453 		"prod_bseq = 0x%08X\n",
4454 		__func__, prod, chain_prod, prod_bseq);
4455 
4456 	/*
4457 	 * Cycle through each mbuf segment that makes up
4458 	 * the outgoing frame, gathering the mapping info
4459 	 * for that segment and creating a tx_bd to for
4460 	 * the mbuf.
4461 	 */
4462 	for (i = 0; i < ctx.bce_maxsegs; i++) {
4463 		chain_prod = TX_CHAIN_IDX(prod);
4464 		txbd= &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
4465 
4466 		txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr));
4467 		txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr));
4468 		txbd->tx_bd_mss_nbytes = htole16(segs[i].ds_len);
4469 		txbd->tx_bd_vlan_tag = htole16(vlan_tag);
4470 		txbd->tx_bd_flags = htole16(flags);
4471 		prod_bseq += segs[i].ds_len;
4472 		if (i == 0)
4473 			txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START);
4474 		prod = NEXT_TX_BD(prod);
4475 	}
4476 
4477 	/* Set the END flag on the last TX buffer descriptor. */
4478 	txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END);
4479 
4480 	DBRUN(BCE_EXCESSIVE_SEND,
4481 	      bce_dump_tx_chain(sc, debug_prod, ctx.bce_maxsegs));
4482 
4483 	DBPRINT(sc, BCE_INFO_SEND,
4484 		"%s(): End: prod = 0x%04X, chain_prod = %04X, "
4485 		"prod_bseq = 0x%08X\n",
4486 		__func__, prod, chain_prod, prod_bseq);
4487 
4488 	bus_dmamap_sync(sc->tx_mbuf_tag, map, BUS_DMASYNC_PREWRITE);
4489 
4490 	/*
4491 	 * Ensure that the mbuf pointer for this transmission
4492 	 * is placed at the array index of the last
4493 	 * descriptor in this chain.  This is done
4494 	 * because a single map is used for all
4495 	 * segments of the mbuf and we don't want to
4496 	 * unload the map before all of the segments
4497 	 * have been freed.
4498 	 */
4499 	sc->tx_mbuf_ptr[chain_prod] = m0;
4500 
4501 	tmp_map = sc->tx_mbuf_map[chain_prod];
4502 	sc->tx_mbuf_map[chain_prod] = map;
4503 	sc->tx_mbuf_map[chain_prod_start] = tmp_map;
4504 
4505 	sc->used_tx_bd += ctx.bce_maxsegs;
4506 
4507 	/* Update some debug statistic counters */
4508 	DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark),
4509 		sc->tx_hi_watermark = sc->used_tx_bd);
4510 	DBRUNIF((sc->used_tx_bd == sc->max_tx_bd), sc->tx_full_count++);
4511 	DBRUNIF(1, sc->tx_mbuf_alloc++);
4512 
4513 	DBRUN(BCE_VERBOSE_SEND,
4514 	      bce_dump_tx_mbuf_chain(sc, chain_prod, ctx.bce_maxsegs));
4515 
4516 	/* prod points to the next free tx_bd at this point. */
4517 	sc->tx_prod = prod;
4518 	sc->tx_prod_bseq = prod_bseq;
4519 back:
4520 	if (error) {
4521 		m_freem(*m_head);
4522 		*m_head = NULL;
4523 	}
4524 	return error;
4525 }
4526 
4527 
4528 /****************************************************************************/
4529 /* Main transmit routine when called from another routine with a lock.      */
4530 /*                                                                          */
4531 /* Returns:                                                                 */
4532 /*   Nothing.                                                               */
4533 /****************************************************************************/
4534 static void
4535 bce_start(struct ifnet *ifp)
4536 {
4537 	struct bce_softc *sc = ifp->if_softc;
4538 	int count = 0;
4539 
4540 	ASSERT_SERIALIZED(ifp->if_serializer);
4541 
4542 	/* If there's no link or the transmit queue is empty then just exit. */
4543 	if (!sc->bce_link) {
4544 		ifq_purge(&ifp->if_snd);
4545 		return;
4546 	}
4547 
4548 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
4549 		return;
4550 
4551 	DBPRINT(sc, BCE_INFO_SEND,
4552 		"%s(): Start: tx_prod = 0x%04X, tx_chain_prod = %04X, "
4553 		"tx_prod_bseq = 0x%08X\n",
4554 		__func__,
4555 		sc->tx_prod, TX_CHAIN_IDX(sc->tx_prod), sc->tx_prod_bseq);
4556 
4557 	for (;;) {
4558 		struct mbuf *m_head;
4559 
4560 		/*
4561 		 * We keep BCE_TX_SPARE_SPACE entries, so bce_encap() is
4562 		 * unlikely to fail.
4563 		 */
4564 		if (sc->max_tx_bd - sc->used_tx_bd < BCE_TX_SPARE_SPACE) {
4565 			ifp->if_flags |= IFF_OACTIVE;
4566 			break;
4567 		}
4568 
4569 		/* Check for any frames to send. */
4570 		m_head = ifq_dequeue(&ifp->if_snd, NULL);
4571 		if (m_head == NULL)
4572 			break;
4573 
4574 		/*
4575 		 * Pack the data into the transmit ring. If we
4576 		 * don't have room, place the mbuf back at the
4577 		 * head of the queue and set the OACTIVE flag
4578 		 * to wait for the NIC to drain the chain.
4579 		 */
4580 		if (bce_encap(sc, &m_head)) {
4581 			ifp->if_flags |= IFF_OACTIVE;
4582 			DBPRINT(sc, BCE_INFO_SEND,
4583 				"TX chain is closed for business! "
4584 				"Total tx_bd used = %d\n",
4585 				sc->used_tx_bd);
4586 			break;
4587 		}
4588 
4589 		count++;
4590 
4591 		/* Send a copy of the frame to any BPF listeners. */
4592 		ETHER_BPF_MTAP(ifp, m_head);
4593 	}
4594 
4595 	if (count == 0) {
4596 		/* no packets were dequeued */
4597 		DBPRINT(sc, BCE_VERBOSE_SEND,
4598 			"%s(): No packets were dequeued\n", __func__);
4599 		return;
4600 	}
4601 
4602 	DBPRINT(sc, BCE_INFO_SEND,
4603 		"%s(): End: tx_prod = 0x%04X, tx_chain_prod = 0x%04X, "
4604 		"tx_prod_bseq = 0x%08X\n",
4605 		__func__,
4606 		sc->tx_prod, TX_CHAIN_IDX(sc->tx_prod), sc->tx_prod_bseq);
4607 
4608 	/* Start the transmit. */
4609 	REG_WR16(sc, MB_TX_CID_ADDR + BCE_L2CTX_TX_HOST_BIDX, sc->tx_prod);
4610 	REG_WR(sc, MB_TX_CID_ADDR + BCE_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq);
4611 
4612 	/* Set the tx timeout. */
4613 	ifp->if_timer = BCE_TX_TIMEOUT;
4614 }
4615 
4616 
4617 /****************************************************************************/
4618 /* Handles any IOCTL calls from the operating system.                       */
4619 /*                                                                          */
4620 /* Returns:                                                                 */
4621 /*   0 for success, positive value for failure.                             */
4622 /****************************************************************************/
4623 static int
4624 bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
4625 {
4626 	struct bce_softc *sc = ifp->if_softc;
4627 	struct ifreq *ifr = (struct ifreq *)data;
4628 	struct mii_data *mii;
4629 	int mask, error = 0;
4630 
4631 	ASSERT_SERIALIZED(ifp->if_serializer);
4632 
4633 	switch(command) {
4634 	case SIOCSIFMTU:
4635 		/* Check that the MTU setting is supported. */
4636 		if (ifr->ifr_mtu < BCE_MIN_MTU ||
4637 #ifdef notyet
4638 		    ifr->ifr_mtu > BCE_MAX_JUMBO_MTU
4639 #else
4640 		    ifr->ifr_mtu > ETHERMTU
4641 #endif
4642 		   ) {
4643 			error = EINVAL;
4644 			break;
4645 		}
4646 
4647 		DBPRINT(sc, BCE_INFO, "Setting new MTU of %d\n", ifr->ifr_mtu);
4648 
4649 		ifp->if_mtu = ifr->ifr_mtu;
4650 		ifp->if_flags &= ~IFF_RUNNING;	/* Force reinitialize */
4651 		bce_init(sc);
4652 		break;
4653 
4654 	case SIOCSIFFLAGS:
4655 		if (ifp->if_flags & IFF_UP) {
4656 			if (ifp->if_flags & IFF_RUNNING) {
4657 				mask = ifp->if_flags ^ sc->bce_if_flags;
4658 
4659 				if (mask & (IFF_PROMISC | IFF_ALLMULTI))
4660 					bce_set_rx_mode(sc);
4661 			} else {
4662 				bce_init(sc);
4663 			}
4664 		} else if (ifp->if_flags & IFF_RUNNING) {
4665 			bce_stop(sc);
4666 		}
4667 		sc->bce_if_flags = ifp->if_flags;
4668 		break;
4669 
4670 	case SIOCADDMULTI:
4671 	case SIOCDELMULTI:
4672 		if (ifp->if_flags & IFF_RUNNING)
4673 			bce_set_rx_mode(sc);
4674 		break;
4675 
4676 	case SIOCSIFMEDIA:
4677 	case SIOCGIFMEDIA:
4678 		DBPRINT(sc, BCE_VERBOSE, "bce_phy_flags = 0x%08X\n",
4679 			sc->bce_phy_flags);
4680 		DBPRINT(sc, BCE_VERBOSE, "Copper media set/get\n");
4681 
4682 		mii = device_get_softc(sc->bce_miibus);
4683 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
4684 		break;
4685 
4686 	case SIOCSIFCAP:
4687 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4688 		DBPRINT(sc, BCE_INFO, "Received SIOCSIFCAP = 0x%08X\n",
4689 			(uint32_t) mask);
4690 
4691 		if (mask & IFCAP_HWCSUM) {
4692 			ifp->if_capenable ^= IFCAP_HWCSUM;
4693 			if (IFCAP_HWCSUM & ifp->if_capenable)
4694 				ifp->if_hwassist = BCE_IF_HWASSIST;
4695 			else
4696 				ifp->if_hwassist = 0;
4697 		}
4698 		break;
4699 
4700 	default:
4701 		error = ether_ioctl(ifp, command, data);
4702 		break;
4703 	}
4704 	return error;
4705 }
4706 
4707 
4708 /****************************************************************************/
4709 /* Transmit timeout handler.                                                */
4710 /*                                                                          */
4711 /* Returns:                                                                 */
4712 /*   Nothing.                                                               */
4713 /****************************************************************************/
4714 static void
4715 bce_watchdog(struct ifnet *ifp)
4716 {
4717 	struct bce_softc *sc = ifp->if_softc;
4718 
4719 	ASSERT_SERIALIZED(ifp->if_serializer);
4720 
4721 	DBRUN(BCE_VERBOSE_SEND,
4722 	      bce_dump_driver_state(sc);
4723 	      bce_dump_status_block(sc));
4724 
4725 	/*
4726 	 * If we are in this routine because of pause frames, then
4727 	 * don't reset the hardware.
4728 	 */
4729 	if (REG_RD(sc, BCE_EMAC_TX_STATUS) & BCE_EMAC_TX_STATUS_XOFFED)
4730 		return;
4731 
4732 	if_printf(ifp, "Watchdog timeout occurred, resetting!\n");
4733 
4734 	/* DBRUN(BCE_FATAL, bce_breakpoint(sc)); */
4735 
4736 	ifp->if_flags &= ~IFF_RUNNING;	/* Force reinitialize */
4737 	bce_init(sc);
4738 
4739 	ifp->if_oerrors++;
4740 
4741 	if (!ifq_is_empty(&ifp->if_snd))
4742 		if_devstart(ifp);
4743 }
4744 
4745 
4746 #ifdef DEVICE_POLLING
4747 
4748 static void
4749 bce_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
4750 {
4751 	struct bce_softc *sc = ifp->if_softc;
4752 	struct status_block *sblk = sc->status_block;
4753 	uint16_t hw_tx_cons, hw_rx_cons;
4754 
4755 	ASSERT_SERIALIZED(ifp->if_serializer);
4756 
4757 	switch (cmd) {
4758 	case POLL_REGISTER:
4759 		bce_disable_intr(sc);
4760 
4761 		REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4762 		       (1 << 16) | sc->bce_rx_quick_cons_trip);
4763 		REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4764 		       (1 << 16) | sc->bce_tx_quick_cons_trip);
4765 		return;
4766 	case POLL_DEREGISTER:
4767 		bce_enable_intr(sc);
4768 
4769 		REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4770 		       (sc->bce_tx_quick_cons_trip_int << 16) |
4771 		       sc->bce_tx_quick_cons_trip);
4772 		REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4773 		       (sc->bce_rx_quick_cons_trip_int << 16) |
4774 		       sc->bce_rx_quick_cons_trip);
4775 		return;
4776 	default:
4777 		break;
4778 	}
4779 
4780 	bus_dmamap_sync(sc->status_tag, sc->status_map, BUS_DMASYNC_POSTREAD);
4781 
4782 	if (cmd == POLL_AND_CHECK_STATUS) {
4783 		uint32_t status_attn_bits;
4784 
4785 		status_attn_bits = sblk->status_attn_bits;
4786 
4787 		DBRUNIF(DB_RANDOMTRUE(bce_debug_unexpected_attention),
4788 			if_printf(ifp,
4789 			"Simulating unexpected status attention bit set.");
4790 			status_attn_bits |= STATUS_ATTN_BITS_PARITY_ERROR);
4791 
4792 		/* Was it a link change interrupt? */
4793 		if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
4794 		    (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE))
4795 			bce_phy_intr(sc);
4796 
4797 		/*
4798 		 * If any other attention is asserted then
4799 		 * the chip is toast.
4800 		 */
4801 		if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
4802 		     (sblk->status_attn_bits_ack &
4803 		      ~STATUS_ATTN_BITS_LINK_STATE)) {
4804 			DBRUN(1, sc->unexpected_attentions++);
4805 
4806 			if_printf(ifp, "Fatal attention detected: 0x%08X\n",
4807 				  sblk->status_attn_bits);
4808 
4809 			DBRUN(BCE_FATAL,
4810 			if (bce_debug_unexpected_attention == 0)
4811 				bce_breakpoint(sc));
4812 
4813 			bce_init(sc);
4814 			return;
4815 		}
4816 	}
4817 
4818 	hw_rx_cons = bce_get_hw_rx_cons(sc);
4819 	hw_tx_cons = bce_get_hw_tx_cons(sc);
4820 
4821 	/* Check for any completed RX frames. */
4822 	if (hw_rx_cons != sc->hw_rx_cons)
4823 		bce_rx_intr(sc, count);
4824 
4825 	/* Check for any completed TX frames. */
4826 	if (hw_tx_cons != sc->hw_tx_cons)
4827 		bce_tx_intr(sc);
4828 
4829 	bus_dmamap_sync(sc->status_tag,	sc->status_map, BUS_DMASYNC_PREWRITE);
4830 
4831 	/* Check for new frames to transmit. */
4832 	if (!ifq_is_empty(&ifp->if_snd))
4833 		if_devstart(ifp);
4834 }
4835 
4836 #endif	/* DEVICE_POLLING */
4837 
4838 
4839 /*
4840  * Interrupt handler.
4841  */
4842 /****************************************************************************/
4843 /* Main interrupt entry point.  Verifies that the controller generated the  */
4844 /* interrupt and then calls a separate routine for handle the various       */
4845 /* interrupt causes (PHY, TX, RX).                                          */
4846 /*                                                                          */
4847 /* Returns:                                                                 */
4848 /*   0 for success, positive value for failure.                             */
4849 /****************************************************************************/
4850 static void
4851 bce_intr(void *xsc)
4852 {
4853 	struct bce_softc *sc = xsc;
4854 	struct ifnet *ifp = &sc->arpcom.ac_if;
4855 	struct status_block *sblk;
4856 	uint16_t hw_rx_cons, hw_tx_cons;
4857 
4858 	ASSERT_SERIALIZED(ifp->if_serializer);
4859 
4860 	DBPRINT(sc, BCE_EXCESSIVE, "Entering %s()\n", __func__);
4861 	DBRUNIF(1, sc->interrupts_generated++);
4862 
4863 	bus_dmamap_sync(sc->status_tag, sc->status_map, BUS_DMASYNC_POSTREAD);
4864 	sblk = sc->status_block;
4865 
4866 	/*
4867 	 * If the hardware status block index matches the last value
4868 	 * read by the driver and we haven't asserted our interrupt
4869 	 * then there's nothing to do.
4870 	 */
4871 	if (sblk->status_idx == sc->last_status_idx &&
4872 	    (REG_RD(sc, BCE_PCICFG_MISC_STATUS) &
4873 	     BCE_PCICFG_MISC_STATUS_INTA_VALUE))
4874 		return;
4875 
4876 	/* Ack the interrupt and stop others from occuring. */
4877 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4878 	       BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
4879 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT);
4880 
4881 	/* Check if the hardware has finished any work. */
4882 	hw_rx_cons = bce_get_hw_rx_cons(sc);
4883 	hw_tx_cons = bce_get_hw_tx_cons(sc);
4884 
4885 	/* Keep processing data as long as there is work to do. */
4886 	for (;;) {
4887 		uint32_t status_attn_bits;
4888 
4889 		status_attn_bits = sblk->status_attn_bits;
4890 
4891 		DBRUNIF(DB_RANDOMTRUE(bce_debug_unexpected_attention),
4892 			if_printf(ifp,
4893 			"Simulating unexpected status attention bit set.");
4894 			status_attn_bits |= STATUS_ATTN_BITS_PARITY_ERROR);
4895 
4896 		/* Was it a link change interrupt? */
4897 		if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
4898 		    (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE))
4899 			bce_phy_intr(sc);
4900 
4901 		/*
4902 		 * If any other attention is asserted then
4903 		 * the chip is toast.
4904 		 */
4905 		if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
4906 		     (sblk->status_attn_bits_ack &
4907 		      ~STATUS_ATTN_BITS_LINK_STATE)) {
4908 			DBRUN(1, sc->unexpected_attentions++);
4909 
4910 			if_printf(ifp, "Fatal attention detected: 0x%08X\n",
4911 				  sblk->status_attn_bits);
4912 
4913 			DBRUN(BCE_FATAL,
4914 			if (bce_debug_unexpected_attention == 0)
4915 				bce_breakpoint(sc));
4916 
4917 			bce_init(sc);
4918 			return;
4919 		}
4920 
4921 		/* Check for any completed RX frames. */
4922 		if (hw_rx_cons != sc->hw_rx_cons)
4923 			bce_rx_intr(sc, -1);
4924 
4925 		/* Check for any completed TX frames. */
4926 		if (hw_tx_cons != sc->hw_tx_cons)
4927 			bce_tx_intr(sc);
4928 
4929 		/*
4930 		 * Save the status block index value
4931 		 * for use during the next interrupt.
4932 		 */
4933 		sc->last_status_idx = sblk->status_idx;
4934 
4935 		/*
4936 		 * Prevent speculative reads from getting
4937 		 * ahead of the status block.
4938 		 */
4939 		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4940 				  BUS_SPACE_BARRIER_READ);
4941 
4942 		/*
4943 		 * If there's no work left then exit the
4944 		 * interrupt service routine.
4945 		 */
4946 		hw_rx_cons = bce_get_hw_rx_cons(sc);
4947 		hw_tx_cons = bce_get_hw_tx_cons(sc);
4948 		if ((hw_rx_cons == sc->hw_rx_cons) && (hw_tx_cons == sc->hw_tx_cons))
4949 			break;
4950 	}
4951 
4952 	bus_dmamap_sync(sc->status_tag,	sc->status_map, BUS_DMASYNC_PREWRITE);
4953 
4954 	/* Re-enable interrupts. */
4955 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4956 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx |
4957 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT);
4958 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4959 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
4960 
4961 	if (sc->bce_coalchg_mask)
4962 		bce_coal_change(sc);
4963 
4964 	/* Handle any frames that arrived while handling the interrupt. */
4965 	if (!ifq_is_empty(&ifp->if_snd))
4966 		if_devstart(ifp);
4967 }
4968 
4969 
4970 /****************************************************************************/
4971 /* Programs the various packet receive modes (broadcast and multicast).     */
4972 /*                                                                          */
4973 /* Returns:                                                                 */
4974 /*   Nothing.                                                               */
4975 /****************************************************************************/
4976 static void
4977 bce_set_rx_mode(struct bce_softc *sc)
4978 {
4979 	struct ifnet *ifp = &sc->arpcom.ac_if;
4980 	struct ifmultiaddr *ifma;
4981 	uint32_t hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
4982 	uint32_t rx_mode, sort_mode;
4983 	int h, i;
4984 
4985 	ASSERT_SERIALIZED(ifp->if_serializer);
4986 
4987 	/* Initialize receive mode default settings. */
4988 	rx_mode = sc->rx_mode &
4989 		  ~(BCE_EMAC_RX_MODE_PROMISCUOUS |
4990 		    BCE_EMAC_RX_MODE_KEEP_VLAN_TAG);
4991 	sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN;
4992 
4993 	/*
4994 	 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
4995 	 * be enbled.
4996 	 */
4997 	if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) &&
4998 	    !(sc->bce_flags & BCE_MFW_ENABLE_FLAG))
4999 		rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG;
5000 
5001 	/*
5002 	 * Check for promiscuous, all multicast, or selected
5003 	 * multicast address filtering.
5004 	 */
5005 	if (ifp->if_flags & IFF_PROMISC) {
5006 		DBPRINT(sc, BCE_INFO, "Enabling promiscuous mode.\n");
5007 
5008 		/* Enable promiscuous mode. */
5009 		rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS;
5010 		sort_mode |= BCE_RPM_SORT_USER0_PROM_EN;
5011 	} else if (ifp->if_flags & IFF_ALLMULTI) {
5012 		DBPRINT(sc, BCE_INFO, "Enabling all multicast mode.\n");
5013 
5014 		/* Enable all multicast addresses. */
5015 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
5016 			REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4),
5017 			       0xffffffff);
5018 		}
5019 		sort_mode |= BCE_RPM_SORT_USER0_MC_EN;
5020 	} else {
5021 		/* Accept one or more multicast(s). */
5022 		DBPRINT(sc, BCE_INFO, "Enabling selective multicast mode.\n");
5023 
5024 		LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
5025 			if (ifma->ifma_addr->sa_family != AF_LINK)
5026 				continue;
5027 			h = ether_crc32_le(
5028 			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
5029 			    ETHER_ADDR_LEN) & 0xFF;
5030 			hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
5031 		}
5032 
5033 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
5034 			REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4),
5035 			       hashes[i]);
5036 		}
5037 		sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN;
5038 	}
5039 
5040 	/* Only make changes if the recive mode has actually changed. */
5041 	if (rx_mode != sc->rx_mode) {
5042 		DBPRINT(sc, BCE_VERBOSE, "Enabling new receive mode: 0x%08X\n",
5043 			rx_mode);
5044 
5045 		sc->rx_mode = rx_mode;
5046 		REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode);
5047 	}
5048 
5049 	/* Disable and clear the exisitng sort before enabling a new sort. */
5050 	REG_WR(sc, BCE_RPM_SORT_USER0, 0x0);
5051 	REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode);
5052 	REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA);
5053 }
5054 
5055 
5056 /****************************************************************************/
5057 /* Called periodically to updates statistics from the controllers           */
5058 /* statistics block.                                                        */
5059 /*                                                                          */
5060 /* Returns:                                                                 */
5061 /*   Nothing.                                                               */
5062 /****************************************************************************/
5063 static void
5064 bce_stats_update(struct bce_softc *sc)
5065 {
5066 	struct ifnet *ifp = &sc->arpcom.ac_if;
5067 	struct statistics_block *stats = sc->stats_block;
5068 
5069 	DBPRINT(sc, BCE_EXCESSIVE, "Entering %s()\n", __func__);
5070 
5071 	ASSERT_SERIALIZED(ifp->if_serializer);
5072 
5073 	/*
5074 	 * Update the interface statistics from the hardware statistics.
5075 	 */
5076 	ifp->if_collisions = (u_long)stats->stat_EtherStatsCollisions;
5077 
5078 	ifp->if_ierrors = (u_long)stats->stat_EtherStatsUndersizePkts +
5079 			  (u_long)stats->stat_EtherStatsOverrsizePkts +
5080 			  (u_long)stats->stat_IfInMBUFDiscards +
5081 			  (u_long)stats->stat_Dot3StatsAlignmentErrors +
5082 			  (u_long)stats->stat_Dot3StatsFCSErrors;
5083 
5084 	ifp->if_oerrors =
5085 	(u_long)stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
5086 	(u_long)stats->stat_Dot3StatsExcessiveCollisions +
5087 	(u_long)stats->stat_Dot3StatsLateCollisions;
5088 
5089 	/*
5090 	 * Certain controllers don't report carrier sense errors correctly.
5091 	 * See errata E11_5708CA0_1165.
5092 	 */
5093 	if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
5094 	    !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0)) {
5095 		ifp->if_oerrors +=
5096 			(u_long)stats->stat_Dot3StatsCarrierSenseErrors;
5097 	}
5098 
5099 	/*
5100 	 * Update the sysctl statistics from the hardware statistics.
5101 	 */
5102 	sc->stat_IfHCInOctets =
5103 		((uint64_t)stats->stat_IfHCInOctets_hi << 32) +
5104 		 (uint64_t)stats->stat_IfHCInOctets_lo;
5105 
5106 	sc->stat_IfHCInBadOctets =
5107 		((uint64_t)stats->stat_IfHCInBadOctets_hi << 32) +
5108 		 (uint64_t)stats->stat_IfHCInBadOctets_lo;
5109 
5110 	sc->stat_IfHCOutOctets =
5111 		((uint64_t)stats->stat_IfHCOutOctets_hi << 32) +
5112 		 (uint64_t)stats->stat_IfHCOutOctets_lo;
5113 
5114 	sc->stat_IfHCOutBadOctets =
5115 		((uint64_t)stats->stat_IfHCOutBadOctets_hi << 32) +
5116 		 (uint64_t)stats->stat_IfHCOutBadOctets_lo;
5117 
5118 	sc->stat_IfHCInUcastPkts =
5119 		((uint64_t)stats->stat_IfHCInUcastPkts_hi << 32) +
5120 		 (uint64_t)stats->stat_IfHCInUcastPkts_lo;
5121 
5122 	sc->stat_IfHCInMulticastPkts =
5123 		((uint64_t)stats->stat_IfHCInMulticastPkts_hi << 32) +
5124 		 (uint64_t)stats->stat_IfHCInMulticastPkts_lo;
5125 
5126 	sc->stat_IfHCInBroadcastPkts =
5127 		((uint64_t)stats->stat_IfHCInBroadcastPkts_hi << 32) +
5128 		 (uint64_t)stats->stat_IfHCInBroadcastPkts_lo;
5129 
5130 	sc->stat_IfHCOutUcastPkts =
5131 		((uint64_t)stats->stat_IfHCOutUcastPkts_hi << 32) +
5132 		 (uint64_t)stats->stat_IfHCOutUcastPkts_lo;
5133 
5134 	sc->stat_IfHCOutMulticastPkts =
5135 		((uint64_t)stats->stat_IfHCOutMulticastPkts_hi << 32) +
5136 		 (uint64_t)stats->stat_IfHCOutMulticastPkts_lo;
5137 
5138 	sc->stat_IfHCOutBroadcastPkts =
5139 		((uint64_t)stats->stat_IfHCOutBroadcastPkts_hi << 32) +
5140 		 (uint64_t)stats->stat_IfHCOutBroadcastPkts_lo;
5141 
5142 	sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
5143 		stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
5144 
5145 	sc->stat_Dot3StatsCarrierSenseErrors =
5146 		stats->stat_Dot3StatsCarrierSenseErrors;
5147 
5148 	sc->stat_Dot3StatsFCSErrors =
5149 		stats->stat_Dot3StatsFCSErrors;
5150 
5151 	sc->stat_Dot3StatsAlignmentErrors =
5152 		stats->stat_Dot3StatsAlignmentErrors;
5153 
5154 	sc->stat_Dot3StatsSingleCollisionFrames =
5155 		stats->stat_Dot3StatsSingleCollisionFrames;
5156 
5157 	sc->stat_Dot3StatsMultipleCollisionFrames =
5158 		stats->stat_Dot3StatsMultipleCollisionFrames;
5159 
5160 	sc->stat_Dot3StatsDeferredTransmissions =
5161 		stats->stat_Dot3StatsDeferredTransmissions;
5162 
5163 	sc->stat_Dot3StatsExcessiveCollisions =
5164 		stats->stat_Dot3StatsExcessiveCollisions;
5165 
5166 	sc->stat_Dot3StatsLateCollisions =
5167 		stats->stat_Dot3StatsLateCollisions;
5168 
5169 	sc->stat_EtherStatsCollisions =
5170 		stats->stat_EtherStatsCollisions;
5171 
5172 	sc->stat_EtherStatsFragments =
5173 		stats->stat_EtherStatsFragments;
5174 
5175 	sc->stat_EtherStatsJabbers =
5176 		stats->stat_EtherStatsJabbers;
5177 
5178 	sc->stat_EtherStatsUndersizePkts =
5179 		stats->stat_EtherStatsUndersizePkts;
5180 
5181 	sc->stat_EtherStatsOverrsizePkts =
5182 		stats->stat_EtherStatsOverrsizePkts;
5183 
5184 	sc->stat_EtherStatsPktsRx64Octets =
5185 		stats->stat_EtherStatsPktsRx64Octets;
5186 
5187 	sc->stat_EtherStatsPktsRx65Octetsto127Octets =
5188 		stats->stat_EtherStatsPktsRx65Octetsto127Octets;
5189 
5190 	sc->stat_EtherStatsPktsRx128Octetsto255Octets =
5191 		stats->stat_EtherStatsPktsRx128Octetsto255Octets;
5192 
5193 	sc->stat_EtherStatsPktsRx256Octetsto511Octets =
5194 		stats->stat_EtherStatsPktsRx256Octetsto511Octets;
5195 
5196 	sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
5197 		stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
5198 
5199 	sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
5200 		stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
5201 
5202 	sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
5203 		stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
5204 
5205 	sc->stat_EtherStatsPktsTx64Octets =
5206 		stats->stat_EtherStatsPktsTx64Octets;
5207 
5208 	sc->stat_EtherStatsPktsTx65Octetsto127Octets =
5209 		stats->stat_EtherStatsPktsTx65Octetsto127Octets;
5210 
5211 	sc->stat_EtherStatsPktsTx128Octetsto255Octets =
5212 		stats->stat_EtherStatsPktsTx128Octetsto255Octets;
5213 
5214 	sc->stat_EtherStatsPktsTx256Octetsto511Octets =
5215 		stats->stat_EtherStatsPktsTx256Octetsto511Octets;
5216 
5217 	sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
5218 		stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
5219 
5220 	sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
5221 		stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
5222 
5223 	sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
5224 		stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
5225 
5226 	sc->stat_XonPauseFramesReceived =
5227 		stats->stat_XonPauseFramesReceived;
5228 
5229 	sc->stat_XoffPauseFramesReceived =
5230 		stats->stat_XoffPauseFramesReceived;
5231 
5232 	sc->stat_OutXonSent =
5233 		stats->stat_OutXonSent;
5234 
5235 	sc->stat_OutXoffSent =
5236 		stats->stat_OutXoffSent;
5237 
5238 	sc->stat_FlowControlDone =
5239 		stats->stat_FlowControlDone;
5240 
5241 	sc->stat_MacControlFramesReceived =
5242 		stats->stat_MacControlFramesReceived;
5243 
5244 	sc->stat_XoffStateEntered =
5245 		stats->stat_XoffStateEntered;
5246 
5247 	sc->stat_IfInFramesL2FilterDiscards =
5248 		stats->stat_IfInFramesL2FilterDiscards;
5249 
5250 	sc->stat_IfInRuleCheckerDiscards =
5251 		stats->stat_IfInRuleCheckerDiscards;
5252 
5253 	sc->stat_IfInFTQDiscards =
5254 		stats->stat_IfInFTQDiscards;
5255 
5256 	sc->stat_IfInMBUFDiscards =
5257 		stats->stat_IfInMBUFDiscards;
5258 
5259 	sc->stat_IfInRuleCheckerP4Hit =
5260 		stats->stat_IfInRuleCheckerP4Hit;
5261 
5262 	sc->stat_CatchupInRuleCheckerDiscards =
5263 		stats->stat_CatchupInRuleCheckerDiscards;
5264 
5265 	sc->stat_CatchupInFTQDiscards =
5266 		stats->stat_CatchupInFTQDiscards;
5267 
5268 	sc->stat_CatchupInMBUFDiscards =
5269 		stats->stat_CatchupInMBUFDiscards;
5270 
5271 	sc->stat_CatchupInRuleCheckerP4Hit =
5272 		stats->stat_CatchupInRuleCheckerP4Hit;
5273 
5274 	sc->com_no_buffers = REG_RD_IND(sc, 0x120084);
5275 
5276 	DBPRINT(sc, BCE_EXCESSIVE, "Exiting %s()\n", __func__);
5277 }
5278 
5279 
5280 /****************************************************************************/
5281 /* Periodic function to perform maintenance tasks.                          */
5282 /*                                                                          */
5283 /* Returns:                                                                 */
5284 /*   Nothing.                                                               */
5285 /****************************************************************************/
5286 static void
5287 bce_tick_serialized(struct bce_softc *sc)
5288 {
5289 	struct ifnet *ifp = &sc->arpcom.ac_if;
5290 	struct mii_data *mii;
5291 	uint32_t msg;
5292 
5293 	ASSERT_SERIALIZED(ifp->if_serializer);
5294 
5295 	/* Tell the firmware that the driver is still running. */
5296 #ifdef BCE_DEBUG
5297 	msg = (uint32_t)BCE_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE;
5298 #else
5299 	msg = (uint32_t)++sc->bce_fw_drv_pulse_wr_seq;
5300 #endif
5301 	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_PULSE_MB, msg);
5302 
5303 	/* Update the statistics from the hardware statistics block. */
5304 	bce_stats_update(sc);
5305 
5306 	/* Schedule the next tick. */
5307 	callout_reset(&sc->bce_stat_ch, hz, bce_tick, sc);
5308 
5309 	/* If link is up already up then we're done. */
5310 	if (sc->bce_link)
5311 		return;
5312 
5313 	mii = device_get_softc(sc->bce_miibus);
5314 	mii_tick(mii);
5315 
5316 	/* Check if the link has come up. */
5317 	if (!sc->bce_link && (mii->mii_media_status & IFM_ACTIVE) &&
5318 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5319 		sc->bce_link++;
5320 		/* Now that link is up, handle any outstanding TX traffic. */
5321 		if (!ifq_is_empty(&ifp->if_snd))
5322 			if_devstart(ifp);
5323 	}
5324 }
5325 
5326 
5327 static void
5328 bce_tick(void *xsc)
5329 {
5330 	struct bce_softc *sc = xsc;
5331 	struct ifnet *ifp = &sc->arpcom.ac_if;
5332 
5333 	lwkt_serialize_enter(ifp->if_serializer);
5334 	bce_tick_serialized(sc);
5335 	lwkt_serialize_exit(ifp->if_serializer);
5336 }
5337 
5338 
5339 #ifdef BCE_DEBUG
5340 /****************************************************************************/
5341 /* Allows the driver state to be dumped through the sysctl interface.       */
5342 /*                                                                          */
5343 /* Returns:                                                                 */
5344 /*   0 for success, positive value for failure.                             */
5345 /****************************************************************************/
5346 static int
5347 bce_sysctl_driver_state(SYSCTL_HANDLER_ARGS)
5348 {
5349         int error;
5350         int result;
5351         struct bce_softc *sc;
5352 
5353         result = -1;
5354         error = sysctl_handle_int(oidp, &result, 0, req);
5355 
5356         if (error || !req->newptr)
5357                 return (error);
5358 
5359         if (result == 1) {
5360                 sc = (struct bce_softc *)arg1;
5361                 bce_dump_driver_state(sc);
5362         }
5363 
5364         return error;
5365 }
5366 
5367 
5368 /****************************************************************************/
5369 /* Allows the hardware state to be dumped through the sysctl interface.     */
5370 /*                                                                          */
5371 /* Returns:                                                                 */
5372 /*   0 for success, positive value for failure.                             */
5373 /****************************************************************************/
5374 static int
5375 bce_sysctl_hw_state(SYSCTL_HANDLER_ARGS)
5376 {
5377         int error;
5378         int result;
5379         struct bce_softc *sc;
5380 
5381         result = -1;
5382         error = sysctl_handle_int(oidp, &result, 0, req);
5383 
5384         if (error || !req->newptr)
5385                 return (error);
5386 
5387         if (result == 1) {
5388                 sc = (struct bce_softc *)arg1;
5389                 bce_dump_hw_state(sc);
5390         }
5391 
5392         return error;
5393 }
5394 
5395 
5396 /****************************************************************************/
5397 /* Provides a sysctl interface to allows dumping the RX chain.              */
5398 /*                                                                          */
5399 /* Returns:                                                                 */
5400 /*   0 for success, positive value for failure.                             */
5401 /****************************************************************************/
5402 static int
5403 bce_sysctl_dump_rx_chain(SYSCTL_HANDLER_ARGS)
5404 {
5405         int error;
5406         int result;
5407         struct bce_softc *sc;
5408 
5409         result = -1;
5410         error = sysctl_handle_int(oidp, &result, 0, req);
5411 
5412         if (error || !req->newptr)
5413                 return (error);
5414 
5415         if (result == 1) {
5416                 sc = (struct bce_softc *)arg1;
5417                 bce_dump_rx_chain(sc, 0, USABLE_RX_BD);
5418         }
5419 
5420         return error;
5421 }
5422 
5423 
5424 /****************************************************************************/
5425 /* Provides a sysctl interface to allows dumping the TX chain.              */
5426 /*                                                                          */
5427 /* Returns:                                                                 */
5428 /*   0 for success, positive value for failure.                             */
5429 /****************************************************************************/
5430 static int
5431 bce_sysctl_dump_tx_chain(SYSCTL_HANDLER_ARGS)
5432 {
5433         int error;
5434         int result;
5435         struct bce_softc *sc;
5436 
5437         result = -1;
5438         error = sysctl_handle_int(oidp, &result, 0, req);
5439 
5440         if (error || !req->newptr)
5441                 return (error);
5442 
5443         if (result == 1) {
5444                 sc = (struct bce_softc *)arg1;
5445                 bce_dump_tx_chain(sc, 0, USABLE_TX_BD);
5446         }
5447 
5448         return error;
5449 }
5450 
5451 
5452 /****************************************************************************/
5453 /* Provides a sysctl interface to allow reading arbitrary registers in the  */
5454 /* device.  DO NOT ENABLE ON PRODUCTION SYSTEMS!                            */
5455 /*                                                                          */
5456 /* Returns:                                                                 */
5457 /*   0 for success, positive value for failure.                             */
5458 /****************************************************************************/
5459 static int
5460 bce_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
5461 {
5462 	struct bce_softc *sc;
5463 	int error;
5464 	uint32_t val, result;
5465 
5466 	result = -1;
5467 	error = sysctl_handle_int(oidp, &result, 0, req);
5468 	if (error || (req->newptr == NULL))
5469 		return (error);
5470 
5471 	/* Make sure the register is accessible. */
5472 	if (result < 0x8000) {
5473 		sc = (struct bce_softc *)arg1;
5474 		val = REG_RD(sc, result);
5475 		if_printf(&sc->arpcom.ac_if, "reg 0x%08X = 0x%08X\n",
5476 			  result, val);
5477 	} else if (result < 0x0280000) {
5478 		sc = (struct bce_softc *)arg1;
5479 		val = REG_RD_IND(sc, result);
5480 		if_printf(&sc->arpcom.ac_if, "reg 0x%08X = 0x%08X\n",
5481 			  result, val);
5482 	}
5483 	return (error);
5484 }
5485 
5486 
5487 /****************************************************************************/
5488 /* Provides a sysctl interface to allow reading arbitrary PHY registers in  */
5489 /* the device.  DO NOT ENABLE ON PRODUCTION SYSTEMS!                        */
5490 /*                                                                          */
5491 /* Returns:                                                                 */
5492 /*   0 for success, positive value for failure.                             */
5493 /****************************************************************************/
5494 static int
5495 bce_sysctl_phy_read(SYSCTL_HANDLER_ARGS)
5496 {
5497 	struct bce_softc *sc;
5498 	device_t dev;
5499 	int error, result;
5500 	uint16_t val;
5501 
5502 	result = -1;
5503 	error = sysctl_handle_int(oidp, &result, 0, req);
5504 	if (error || (req->newptr == NULL))
5505 		return (error);
5506 
5507 	/* Make sure the register is accessible. */
5508 	if (result < 0x20) {
5509 		sc = (struct bce_softc *)arg1;
5510 		dev = sc->bce_dev;
5511 		val = bce_miibus_read_reg(dev, sc->bce_phy_addr, result);
5512 		if_printf(&sc->arpcom.ac_if,
5513 			  "phy 0x%02X = 0x%04X\n", result, val);
5514 	}
5515 	return (error);
5516 }
5517 
5518 
5519 /****************************************************************************/
5520 /* Provides a sysctl interface to forcing the driver to dump state and      */
5521 /* enter the debugger.  DO NOT ENABLE ON PRODUCTION SYSTEMS!                */
5522 /*                                                                          */
5523 /* Returns:                                                                 */
5524 /*   0 for success, positive value for failure.                             */
5525 /****************************************************************************/
5526 static int
5527 bce_sysctl_breakpoint(SYSCTL_HANDLER_ARGS)
5528 {
5529         int error;
5530         int result;
5531         struct bce_softc *sc;
5532 
5533         result = -1;
5534         error = sysctl_handle_int(oidp, &result, 0, req);
5535 
5536         if (error || !req->newptr)
5537                 return (error);
5538 
5539         if (result == 1) {
5540                 sc = (struct bce_softc *)arg1;
5541                 bce_breakpoint(sc);
5542         }
5543 
5544         return error;
5545 }
5546 #endif
5547 
5548 
5549 /****************************************************************************/
5550 /* Adds any sysctl parameters for tuning or debugging purposes.             */
5551 /*                                                                          */
5552 /* Returns:                                                                 */
5553 /*   0 for success, positive value for failure.                             */
5554 /****************************************************************************/
5555 static void
5556 bce_add_sysctls(struct bce_softc *sc)
5557 {
5558 	struct sysctl_ctx_list *ctx;
5559 	struct sysctl_oid_list *children;
5560 
5561 	sysctl_ctx_init(&sc->bce_sysctl_ctx);
5562 	sc->bce_sysctl_tree = SYSCTL_ADD_NODE(&sc->bce_sysctl_ctx,
5563 					      SYSCTL_STATIC_CHILDREN(_hw),
5564 					      OID_AUTO,
5565 					      device_get_nameunit(sc->bce_dev),
5566 					      CTLFLAG_RD, 0, "");
5567 	if (sc->bce_sysctl_tree == NULL) {
5568 		device_printf(sc->bce_dev, "can't add sysctl node\n");
5569 		return;
5570 	}
5571 
5572 	ctx = &sc->bce_sysctl_ctx;
5573 	children = SYSCTL_CHILDREN(sc->bce_sysctl_tree);
5574 
5575 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds_int",
5576 			CTLTYPE_INT | CTLFLAG_RW,
5577 			sc, 0, bce_sysctl_tx_bds_int, "I",
5578 			"Send max coalesced BD count during interrupt");
5579 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds",
5580 			CTLTYPE_INT | CTLFLAG_RW,
5581 			sc, 0, bce_sysctl_tx_bds, "I",
5582 			"Send max coalesced BD count");
5583 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks_int",
5584 			CTLTYPE_INT | CTLFLAG_RW,
5585 			sc, 0, bce_sysctl_tx_ticks_int, "I",
5586 			"Send coalescing ticks during interrupt");
5587 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks",
5588 			CTLTYPE_INT | CTLFLAG_RW,
5589 			sc, 0, bce_sysctl_tx_ticks, "I",
5590 			"Send coalescing ticks");
5591 
5592 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds_int",
5593 			CTLTYPE_INT | CTLFLAG_RW,
5594 			sc, 0, bce_sysctl_rx_bds_int, "I",
5595 			"Receive max coalesced BD count during interrupt");
5596 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds",
5597 			CTLTYPE_INT | CTLFLAG_RW,
5598 			sc, 0, bce_sysctl_rx_bds, "I",
5599 			"Receive max coalesced BD count");
5600 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks_int",
5601 			CTLTYPE_INT | CTLFLAG_RW,
5602 			sc, 0, bce_sysctl_rx_ticks_int, "I",
5603 			"Receive coalescing ticks during interrupt");
5604 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks",
5605 			CTLTYPE_INT | CTLFLAG_RW,
5606 			sc, 0, bce_sysctl_rx_ticks, "I",
5607 			"Receive coalescing ticks");
5608 
5609 #ifdef BCE_DEBUG
5610 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5611 		"rx_low_watermark",
5612 		CTLFLAG_RD, &sc->rx_low_watermark,
5613 		0, "Lowest level of free rx_bd's");
5614 
5615 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5616 		"rx_empty_count",
5617 		CTLFLAG_RD, &sc->rx_empty_count,
5618 		0, "Number of times the RX chain was empty");
5619 
5620 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5621 		"tx_hi_watermark",
5622 		CTLFLAG_RD, &sc->tx_hi_watermark,
5623 		0, "Highest level of used tx_bd's");
5624 
5625 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5626 		"tx_full_count",
5627 		CTLFLAG_RD, &sc->tx_full_count,
5628 		0, "Number of times the TX chain was full");
5629 
5630 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5631 		"l2fhdr_status_errors",
5632 		CTLFLAG_RD, &sc->l2fhdr_status_errors,
5633 		0, "l2_fhdr status errors");
5634 
5635 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5636 		"unexpected_attentions",
5637 		CTLFLAG_RD, &sc->unexpected_attentions,
5638 		0, "unexpected attentions");
5639 
5640 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5641 		"lost_status_block_updates",
5642 		CTLFLAG_RD, &sc->lost_status_block_updates,
5643 		0, "lost status block updates");
5644 
5645 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5646 		"mbuf_alloc_failed",
5647 		CTLFLAG_RD, &sc->mbuf_alloc_failed,
5648 		0, "mbuf cluster allocation failures");
5649 #endif
5650 
5651 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5652 		"stat_IfHCInOctets",
5653 		CTLFLAG_RD, &sc->stat_IfHCInOctets,
5654 		"Bytes received");
5655 
5656 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5657 		"stat_IfHCInBadOctets",
5658 		CTLFLAG_RD, &sc->stat_IfHCInBadOctets,
5659 		"Bad bytes received");
5660 
5661 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5662 		"stat_IfHCOutOctets",
5663 		CTLFLAG_RD, &sc->stat_IfHCOutOctets,
5664 		"Bytes sent");
5665 
5666 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5667 		"stat_IfHCOutBadOctets",
5668 		CTLFLAG_RD, &sc->stat_IfHCOutBadOctets,
5669 		"Bad bytes sent");
5670 
5671 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5672 		"stat_IfHCInUcastPkts",
5673 		CTLFLAG_RD, &sc->stat_IfHCInUcastPkts,
5674 		"Unicast packets received");
5675 
5676 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5677 		"stat_IfHCInMulticastPkts",
5678 		CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts,
5679 		"Multicast packets received");
5680 
5681 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5682 		"stat_IfHCInBroadcastPkts",
5683 		CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts,
5684 		"Broadcast packets received");
5685 
5686 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5687 		"stat_IfHCOutUcastPkts",
5688 		CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts,
5689 		"Unicast packets sent");
5690 
5691 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5692 		"stat_IfHCOutMulticastPkts",
5693 		CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts,
5694 		"Multicast packets sent");
5695 
5696 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5697 		"stat_IfHCOutBroadcastPkts",
5698 		CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts,
5699 		"Broadcast packets sent");
5700 
5701 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5702 		"stat_emac_tx_stat_dot3statsinternalmactransmiterrors",
5703 		CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors,
5704 		0, "Internal MAC transmit errors");
5705 
5706 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5707 		"stat_Dot3StatsCarrierSenseErrors",
5708 		CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors,
5709 		0, "Carrier sense errors");
5710 
5711 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5712 		"stat_Dot3StatsFCSErrors",
5713 		CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors,
5714 		0, "Frame check sequence errors");
5715 
5716 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5717 		"stat_Dot3StatsAlignmentErrors",
5718 		CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors,
5719 		0, "Alignment errors");
5720 
5721 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5722 		"stat_Dot3StatsSingleCollisionFrames",
5723 		CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames,
5724 		0, "Single Collision Frames");
5725 
5726 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5727 		"stat_Dot3StatsMultipleCollisionFrames",
5728 		CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames,
5729 		0, "Multiple Collision Frames");
5730 
5731 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5732 		"stat_Dot3StatsDeferredTransmissions",
5733 		CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions,
5734 		0, "Deferred Transmissions");
5735 
5736 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5737 		"stat_Dot3StatsExcessiveCollisions",
5738 		CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions,
5739 		0, "Excessive Collisions");
5740 
5741 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5742 		"stat_Dot3StatsLateCollisions",
5743 		CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions,
5744 		0, "Late Collisions");
5745 
5746 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5747 		"stat_EtherStatsCollisions",
5748 		CTLFLAG_RD, &sc->stat_EtherStatsCollisions,
5749 		0, "Collisions");
5750 
5751 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5752 		"stat_EtherStatsFragments",
5753 		CTLFLAG_RD, &sc->stat_EtherStatsFragments,
5754 		0, "Fragments");
5755 
5756 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5757 		"stat_EtherStatsJabbers",
5758 		CTLFLAG_RD, &sc->stat_EtherStatsJabbers,
5759 		0, "Jabbers");
5760 
5761 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5762 		"stat_EtherStatsUndersizePkts",
5763 		CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts,
5764 		0, "Undersize packets");
5765 
5766 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5767 		"stat_EtherStatsOverrsizePkts",
5768 		CTLFLAG_RD, &sc->stat_EtherStatsOverrsizePkts,
5769 		0, "stat_EtherStatsOverrsizePkts");
5770 
5771 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5772 		"stat_EtherStatsPktsRx64Octets",
5773 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets,
5774 		0, "Bytes received in 64 byte packets");
5775 
5776 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5777 		"stat_EtherStatsPktsRx65Octetsto127Octets",
5778 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets,
5779 		0, "Bytes received in 65 to 127 byte packets");
5780 
5781 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5782 		"stat_EtherStatsPktsRx128Octetsto255Octets",
5783 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets,
5784 		0, "Bytes received in 128 to 255 byte packets");
5785 
5786 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5787 		"stat_EtherStatsPktsRx256Octetsto511Octets",
5788 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets,
5789 		0, "Bytes received in 256 to 511 byte packets");
5790 
5791 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5792 		"stat_EtherStatsPktsRx512Octetsto1023Octets",
5793 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets,
5794 		0, "Bytes received in 512 to 1023 byte packets");
5795 
5796 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5797 		"stat_EtherStatsPktsRx1024Octetsto1522Octets",
5798 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets,
5799 		0, "Bytes received in 1024 t0 1522 byte packets");
5800 
5801 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5802 		"stat_EtherStatsPktsRx1523Octetsto9022Octets",
5803 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets,
5804 		0, "Bytes received in 1523 to 9022 byte packets");
5805 
5806 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5807 		"stat_EtherStatsPktsTx64Octets",
5808 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets,
5809 		0, "Bytes sent in 64 byte packets");
5810 
5811 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5812 		"stat_EtherStatsPktsTx65Octetsto127Octets",
5813 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets,
5814 		0, "Bytes sent in 65 to 127 byte packets");
5815 
5816 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5817 		"stat_EtherStatsPktsTx128Octetsto255Octets",
5818 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets,
5819 		0, "Bytes sent in 128 to 255 byte packets");
5820 
5821 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5822 		"stat_EtherStatsPktsTx256Octetsto511Octets",
5823 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets,
5824 		0, "Bytes sent in 256 to 511 byte packets");
5825 
5826 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5827 		"stat_EtherStatsPktsTx512Octetsto1023Octets",
5828 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets,
5829 		0, "Bytes sent in 512 to 1023 byte packets");
5830 
5831 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5832 		"stat_EtherStatsPktsTx1024Octetsto1522Octets",
5833 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets,
5834 		0, "Bytes sent in 1024 to 1522 byte packets");
5835 
5836 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5837 		"stat_EtherStatsPktsTx1523Octetsto9022Octets",
5838 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets,
5839 		0, "Bytes sent in 1523 to 9022 byte packets");
5840 
5841 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5842 		"stat_XonPauseFramesReceived",
5843 		CTLFLAG_RD, &sc->stat_XonPauseFramesReceived,
5844 		0, "XON pause frames receved");
5845 
5846 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5847 		"stat_XoffPauseFramesReceived",
5848 		CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived,
5849 		0, "XOFF pause frames received");
5850 
5851 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5852 		"stat_OutXonSent",
5853 		CTLFLAG_RD, &sc->stat_OutXonSent,
5854 		0, "XON pause frames sent");
5855 
5856 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5857 		"stat_OutXoffSent",
5858 		CTLFLAG_RD, &sc->stat_OutXoffSent,
5859 		0, "XOFF pause frames sent");
5860 
5861 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5862 		"stat_FlowControlDone",
5863 		CTLFLAG_RD, &sc->stat_FlowControlDone,
5864 		0, "Flow control done");
5865 
5866 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5867 		"stat_MacControlFramesReceived",
5868 		CTLFLAG_RD, &sc->stat_MacControlFramesReceived,
5869 		0, "MAC control frames received");
5870 
5871 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5872 		"stat_XoffStateEntered",
5873 		CTLFLAG_RD, &sc->stat_XoffStateEntered,
5874 		0, "XOFF state entered");
5875 
5876 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5877 		"stat_IfInFramesL2FilterDiscards",
5878 		CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards,
5879 		0, "Received L2 packets discarded");
5880 
5881 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5882 		"stat_IfInRuleCheckerDiscards",
5883 		CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards,
5884 		0, "Received packets discarded by rule");
5885 
5886 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5887 		"stat_IfInFTQDiscards",
5888 		CTLFLAG_RD, &sc->stat_IfInFTQDiscards,
5889 		0, "Received packet FTQ discards");
5890 
5891 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5892 		"stat_IfInMBUFDiscards",
5893 		CTLFLAG_RD, &sc->stat_IfInMBUFDiscards,
5894 		0, "Received packets discarded due to lack of controller buffer memory");
5895 
5896 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5897 		"stat_IfInRuleCheckerP4Hit",
5898 		CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit,
5899 		0, "Received packets rule checker hits");
5900 
5901 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5902 		"stat_CatchupInRuleCheckerDiscards",
5903 		CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards,
5904 		0, "Received packets discarded in Catchup path");
5905 
5906 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5907 		"stat_CatchupInFTQDiscards",
5908 		CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards,
5909 		0, "Received packets discarded in FTQ in Catchup path");
5910 
5911 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5912 		"stat_CatchupInMBUFDiscards",
5913 		CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards,
5914 		0, "Received packets discarded in controller buffer memory in Catchup path");
5915 
5916 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5917 		"stat_CatchupInRuleCheckerP4Hit",
5918 		CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit,
5919 		0, "Received packets rule checker hits in Catchup path");
5920 
5921 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5922 		"com_no_buffers",
5923 		CTLFLAG_RD, &sc->com_no_buffers,
5924 		0, "Valid packets received but no RX buffers available");
5925 
5926 #ifdef BCE_DEBUG
5927 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
5928 		"driver_state", CTLTYPE_INT | CTLFLAG_RW,
5929 		(void *)sc, 0,
5930 		bce_sysctl_driver_state, "I", "Drive state information");
5931 
5932 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
5933 		"hw_state", CTLTYPE_INT | CTLFLAG_RW,
5934 		(void *)sc, 0,
5935 		bce_sysctl_hw_state, "I", "Hardware state information");
5936 
5937 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
5938 		"dump_rx_chain", CTLTYPE_INT | CTLFLAG_RW,
5939 		(void *)sc, 0,
5940 		bce_sysctl_dump_rx_chain, "I", "Dump rx_bd chain");
5941 
5942 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
5943 		"dump_tx_chain", CTLTYPE_INT | CTLFLAG_RW,
5944 		(void *)sc, 0,
5945 		bce_sysctl_dump_tx_chain, "I", "Dump tx_bd chain");
5946 
5947 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
5948 		"breakpoint", CTLTYPE_INT | CTLFLAG_RW,
5949 		(void *)sc, 0,
5950 		bce_sysctl_breakpoint, "I", "Driver breakpoint");
5951 
5952 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
5953 		"reg_read", CTLTYPE_INT | CTLFLAG_RW,
5954 		(void *)sc, 0,
5955 		bce_sysctl_reg_read, "I", "Register read");
5956 
5957 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
5958 		"phy_read", CTLTYPE_INT | CTLFLAG_RW,
5959 		(void *)sc, 0,
5960 		bce_sysctl_phy_read, "I", "PHY register read");
5961 
5962 #endif
5963 
5964 }
5965 
5966 
5967 /****************************************************************************/
5968 /* BCE Debug Routines                                                       */
5969 /****************************************************************************/
5970 #ifdef BCE_DEBUG
5971 
5972 /****************************************************************************/
5973 /* Freezes the controller to allow for a cohesive state dump.               */
5974 /*                                                                          */
5975 /* Returns:                                                                 */
5976 /*   Nothing.                                                               */
5977 /****************************************************************************/
5978 static void
5979 bce_freeze_controller(struct bce_softc *sc)
5980 {
5981 	uint32_t val;
5982 
5983 	val = REG_RD(sc, BCE_MISC_COMMAND);
5984 	val |= BCE_MISC_COMMAND_DISABLE_ALL;
5985 	REG_WR(sc, BCE_MISC_COMMAND, val);
5986 }
5987 
5988 
5989 /****************************************************************************/
5990 /* Unfreezes the controller after a freeze operation.  This may not always  */
5991 /* work and the controller will require a reset!                            */
5992 /*                                                                          */
5993 /* Returns:                                                                 */
5994 /*   Nothing.                                                               */
5995 /****************************************************************************/
5996 static void
5997 bce_unfreeze_controller(struct bce_softc *sc)
5998 {
5999 	uint32_t val;
6000 
6001 	val = REG_RD(sc, BCE_MISC_COMMAND);
6002 	val |= BCE_MISC_COMMAND_ENABLE_ALL;
6003 	REG_WR(sc, BCE_MISC_COMMAND, val);
6004 }
6005 
6006 
6007 /****************************************************************************/
6008 /* Prints out information about an mbuf.                                    */
6009 /*                                                                          */
6010 /* Returns:                                                                 */
6011 /*   Nothing.                                                               */
6012 /****************************************************************************/
6013 static void
6014 bce_dump_mbuf(struct bce_softc *sc, struct mbuf *m)
6015 {
6016 	struct ifnet *ifp = &sc->arpcom.ac_if;
6017 	uint32_t val_hi, val_lo;
6018 	struct mbuf *mp = m;
6019 
6020 	if (m == NULL) {
6021 		/* Index out of range. */
6022 		if_printf(ifp, "mbuf: null pointer\n");
6023 		return;
6024 	}
6025 
6026 	while (mp) {
6027 		val_hi = BCE_ADDR_HI(mp);
6028 		val_lo = BCE_ADDR_LO(mp);
6029 		if_printf(ifp, "mbuf: vaddr = 0x%08X:%08X, m_len = %d, "
6030 			  "m_flags = ( ", val_hi, val_lo, mp->m_len);
6031 
6032 		if (mp->m_flags & M_EXT)
6033 			kprintf("M_EXT ");
6034 		if (mp->m_flags & M_PKTHDR)
6035 			kprintf("M_PKTHDR ");
6036 		if (mp->m_flags & M_EOR)
6037 			kprintf("M_EOR ");
6038 #ifdef M_RDONLY
6039 		if (mp->m_flags & M_RDONLY)
6040 			kprintf("M_RDONLY ");
6041 #endif
6042 
6043 		val_hi = BCE_ADDR_HI(mp->m_data);
6044 		val_lo = BCE_ADDR_LO(mp->m_data);
6045 		kprintf(") m_data = 0x%08X:%08X\n", val_hi, val_lo);
6046 
6047 		if (mp->m_flags & M_PKTHDR) {
6048 			if_printf(ifp, "- m_pkthdr: flags = ( ");
6049 			if (mp->m_flags & M_BCAST)
6050 				kprintf("M_BCAST ");
6051 			if (mp->m_flags & M_MCAST)
6052 				kprintf("M_MCAST ");
6053 			if (mp->m_flags & M_FRAG)
6054 				kprintf("M_FRAG ");
6055 			if (mp->m_flags & M_FIRSTFRAG)
6056 				kprintf("M_FIRSTFRAG ");
6057 			if (mp->m_flags & M_LASTFRAG)
6058 				kprintf("M_LASTFRAG ");
6059 #ifdef M_VLANTAG
6060 			if (mp->m_flags & M_VLANTAG)
6061 				kprintf("M_VLANTAG ");
6062 #endif
6063 #ifdef M_PROMISC
6064 			if (mp->m_flags & M_PROMISC)
6065 				kprintf("M_PROMISC ");
6066 #endif
6067 			kprintf(") csum_flags = ( ");
6068 			if (mp->m_pkthdr.csum_flags & CSUM_IP)
6069 				kprintf("CSUM_IP ");
6070 			if (mp->m_pkthdr.csum_flags & CSUM_TCP)
6071 				kprintf("CSUM_TCP ");
6072 			if (mp->m_pkthdr.csum_flags & CSUM_UDP)
6073 				kprintf("CSUM_UDP ");
6074 			if (mp->m_pkthdr.csum_flags & CSUM_IP_FRAGS)
6075 				kprintf("CSUM_IP_FRAGS ");
6076 			if (mp->m_pkthdr.csum_flags & CSUM_FRAGMENT)
6077 				kprintf("CSUM_FRAGMENT ");
6078 #ifdef CSUM_TSO
6079 			if (mp->m_pkthdr.csum_flags & CSUM_TSO)
6080 				kprintf("CSUM_TSO ");
6081 #endif
6082 			if (mp->m_pkthdr.csum_flags & CSUM_IP_CHECKED)
6083 				kprintf("CSUM_IP_CHECKED ");
6084 			if (mp->m_pkthdr.csum_flags & CSUM_IP_VALID)
6085 				kprintf("CSUM_IP_VALID ");
6086 			if (mp->m_pkthdr.csum_flags & CSUM_DATA_VALID)
6087 				kprintf("CSUM_DATA_VALID ");
6088 			kprintf(")\n");
6089 		}
6090 
6091 		if (mp->m_flags & M_EXT) {
6092 			val_hi = BCE_ADDR_HI(mp->m_ext.ext_buf);
6093 			val_lo = BCE_ADDR_LO(mp->m_ext.ext_buf);
6094 			if_printf(ifp, "- m_ext: vaddr = 0x%08X:%08X, "
6095 				  "ext_size = %d\n",
6096 				  val_hi, val_lo, mp->m_ext.ext_size);
6097 		}
6098 		mp = mp->m_next;
6099 	}
6100 }
6101 
6102 
6103 /****************************************************************************/
6104 /* Prints out the mbufs in the TX mbuf chain.                               */
6105 /*                                                                          */
6106 /* Returns:                                                                 */
6107 /*   Nothing.                                                               */
6108 /****************************************************************************/
6109 static void
6110 bce_dump_tx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count)
6111 {
6112 	struct ifnet *ifp = &sc->arpcom.ac_if;
6113 	int i;
6114 
6115 	if_printf(ifp,
6116 	"----------------------------"
6117 	"  tx mbuf data  "
6118 	"----------------------------\n");
6119 
6120 	for (i = 0; i < count; i++) {
6121 		if_printf(ifp, "txmbuf[%d]\n", chain_prod);
6122 		bce_dump_mbuf(sc, sc->tx_mbuf_ptr[chain_prod]);
6123 		chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod));
6124 	}
6125 
6126 	if_printf(ifp,
6127 	"----------------------------"
6128 	"----------------"
6129 	"----------------------------\n");
6130 }
6131 
6132 
6133 /****************************************************************************/
6134 /* Prints out the mbufs in the RX mbuf chain.                               */
6135 /*                                                                          */
6136 /* Returns:                                                                 */
6137 /*   Nothing.                                                               */
6138 /****************************************************************************/
6139 static void
6140 bce_dump_rx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count)
6141 {
6142 	struct ifnet *ifp = &sc->arpcom.ac_if;
6143 	int i;
6144 
6145 	if_printf(ifp,
6146 	"----------------------------"
6147 	"  rx mbuf data  "
6148 	"----------------------------\n");
6149 
6150 	for (i = 0; i < count; i++) {
6151 		if_printf(ifp, "rxmbuf[0x%04X]\n", chain_prod);
6152 		bce_dump_mbuf(sc, sc->rx_mbuf_ptr[chain_prod]);
6153 		chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod));
6154 	}
6155 
6156 	if_printf(ifp,
6157 	"----------------------------"
6158 	"----------------"
6159 	"----------------------------\n");
6160 }
6161 
6162 
6163 /****************************************************************************/
6164 /* Prints out a tx_bd structure.                                            */
6165 /*                                                                          */
6166 /* Returns:                                                                 */
6167 /*   Nothing.                                                               */
6168 /****************************************************************************/
6169 static void
6170 bce_dump_txbd(struct bce_softc *sc, int idx, struct tx_bd *txbd)
6171 {
6172 	struct ifnet *ifp = &sc->arpcom.ac_if;
6173 
6174 	if (idx > MAX_TX_BD) {
6175 		/* Index out of range. */
6176 		if_printf(ifp, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx);
6177 	} else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) {
6178 		/* TX Chain page pointer. */
6179 		if_printf(ifp, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, "
6180 			  "chain page pointer\n",
6181 			  idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo);
6182 	} else {
6183 		/* Normal tx_bd entry. */
6184 		if_printf(ifp, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, "
6185 			  "nbytes = 0x%08X, "
6186 			  "vlan tag= 0x%04X, flags = 0x%04X (",
6187 			  idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo,
6188 			  txbd->tx_bd_mss_nbytes,
6189 			  txbd->tx_bd_vlan_tag, txbd->tx_bd_flags);
6190 
6191 		if (txbd->tx_bd_flags & TX_BD_FLAGS_CONN_FAULT)
6192 			kprintf(" CONN_FAULT");
6193 
6194 		if (txbd->tx_bd_flags & TX_BD_FLAGS_TCP_UDP_CKSUM)
6195 			kprintf(" TCP_UDP_CKSUM");
6196 
6197 		if (txbd->tx_bd_flags & TX_BD_FLAGS_IP_CKSUM)
6198 			kprintf(" IP_CKSUM");
6199 
6200 		if (txbd->tx_bd_flags & TX_BD_FLAGS_VLAN_TAG)
6201 			kprintf("  VLAN");
6202 
6203 		if (txbd->tx_bd_flags & TX_BD_FLAGS_COAL_NOW)
6204 			kprintf(" COAL_NOW");
6205 
6206 		if (txbd->tx_bd_flags & TX_BD_FLAGS_DONT_GEN_CRC)
6207 			kprintf(" DONT_GEN_CRC");
6208 
6209 		if (txbd->tx_bd_flags & TX_BD_FLAGS_START)
6210 			kprintf(" START");
6211 
6212 		if (txbd->tx_bd_flags & TX_BD_FLAGS_END)
6213 			kprintf(" END");
6214 
6215 		if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_LSO)
6216 			kprintf(" LSO");
6217 
6218 		if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_OPTION_WORD)
6219 			kprintf(" OPTION_WORD");
6220 
6221 		if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_FLAGS)
6222 			kprintf(" FLAGS");
6223 
6224 		if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_SNAP)
6225 			kprintf(" SNAP");
6226 
6227 		kprintf(" )\n");
6228 	}
6229 }
6230 
6231 
6232 /****************************************************************************/
6233 /* Prints out a rx_bd structure.                                            */
6234 /*                                                                          */
6235 /* Returns:                                                                 */
6236 /*   Nothing.                                                               */
6237 /****************************************************************************/
6238 static void
6239 bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd)
6240 {
6241 	struct ifnet *ifp = &sc->arpcom.ac_if;
6242 
6243 	if (idx > MAX_RX_BD) {
6244 		/* Index out of range. */
6245 		if_printf(ifp, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
6246 	} else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) {
6247 		/* TX Chain page pointer. */
6248 		if_printf(ifp, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, "
6249 			  "chain page pointer\n",
6250 			  idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo);
6251 	} else {
6252 		/* Normal tx_bd entry. */
6253 		if_printf(ifp, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, "
6254 			  "nbytes = 0x%08X, flags = 0x%08X\n",
6255 			  idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo,
6256 			  rxbd->rx_bd_len, rxbd->rx_bd_flags);
6257 	}
6258 }
6259 
6260 
6261 /****************************************************************************/
6262 /* Prints out a l2_fhdr structure.                                          */
6263 /*                                                                          */
6264 /* Returns:                                                                 */
6265 /*   Nothing.                                                               */
6266 /****************************************************************************/
6267 static void
6268 bce_dump_l2fhdr(struct bce_softc *sc, int idx, struct l2_fhdr *l2fhdr)
6269 {
6270 	if_printf(&sc->arpcom.ac_if, "l2_fhdr[0x%04X]: status = 0x%08X, "
6271 		  "pkt_len = 0x%04X, vlan = 0x%04x, "
6272 		  "ip_xsum = 0x%04X, tcp_udp_xsum = 0x%04X\n",
6273 		  idx, l2fhdr->l2_fhdr_status,
6274 		  l2fhdr->l2_fhdr_pkt_len, l2fhdr->l2_fhdr_vlan_tag,
6275 		  l2fhdr->l2_fhdr_ip_xsum, l2fhdr->l2_fhdr_tcp_udp_xsum);
6276 }
6277 
6278 
6279 /****************************************************************************/
6280 /* Prints out the tx chain.                                                 */
6281 /*                                                                          */
6282 /* Returns:                                                                 */
6283 /*   Nothing.                                                               */
6284 /****************************************************************************/
6285 static void
6286 bce_dump_tx_chain(struct bce_softc *sc, int tx_prod, int count)
6287 {
6288 	struct ifnet *ifp = &sc->arpcom.ac_if;
6289 	int i;
6290 
6291 	/* First some info about the tx_bd chain structure. */
6292 	if_printf(ifp,
6293 	"----------------------------"
6294 	"  tx_bd  chain  "
6295 	"----------------------------\n");
6296 
6297 	if_printf(ifp, "page size      = 0x%08X, "
6298 		  "tx chain pages        = 0x%08X\n",
6299 		  (uint32_t)BCM_PAGE_SIZE, (uint32_t)TX_PAGES);
6300 
6301 	if_printf(ifp, "tx_bd per page = 0x%08X, "
6302 		  "usable tx_bd per page = 0x%08X\n",
6303 		  (uint32_t)TOTAL_TX_BD_PER_PAGE,
6304 		  (uint32_t)USABLE_TX_BD_PER_PAGE);
6305 
6306 	if_printf(ifp, "total tx_bd    = 0x%08X\n", (uint32_t)TOTAL_TX_BD);
6307 
6308 	if_printf(ifp,
6309 	"----------------------------"
6310 	"  tx_bd data    "
6311 	"----------------------------\n");
6312 
6313 	/* Now print out the tx_bd's themselves. */
6314 	for (i = 0; i < count; i++) {
6315 		struct tx_bd *txbd;
6316 
6317 	 	txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)];
6318 		bce_dump_txbd(sc, tx_prod, txbd);
6319 		tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod));
6320 	}
6321 
6322 	if_printf(ifp,
6323 	"----------------------------"
6324 	"----------------"
6325 	"----------------------------\n");
6326 }
6327 
6328 
6329 /****************************************************************************/
6330 /* Prints out the rx chain.                                                 */
6331 /*                                                                          */
6332 /* Returns:                                                                 */
6333 /*   Nothing.                                                               */
6334 /****************************************************************************/
6335 static void
6336 bce_dump_rx_chain(struct bce_softc *sc, int rx_prod, int count)
6337 {
6338 	struct ifnet *ifp = &sc->arpcom.ac_if;
6339 	int i;
6340 
6341 	/* First some info about the tx_bd chain structure. */
6342 	if_printf(ifp,
6343 	"----------------------------"
6344 	"  rx_bd  chain  "
6345 	"----------------------------\n");
6346 
6347 	if_printf(ifp, "page size      = 0x%08X, "
6348 		  "rx chain pages        = 0x%08X\n",
6349 		  (uint32_t)BCM_PAGE_SIZE, (uint32_t)RX_PAGES);
6350 
6351 	if_printf(ifp, "rx_bd per page = 0x%08X, "
6352 		  "usable rx_bd per page = 0x%08X\n",
6353 		  (uint32_t)TOTAL_RX_BD_PER_PAGE,
6354 		  (uint32_t)USABLE_RX_BD_PER_PAGE);
6355 
6356 	if_printf(ifp, "total rx_bd    = 0x%08X\n", (uint32_t)TOTAL_RX_BD);
6357 
6358 	if_printf(ifp,
6359 	"----------------------------"
6360 	"   rx_bd data   "
6361 	"----------------------------\n");
6362 
6363 	/* Now print out the rx_bd's themselves. */
6364 	for (i = 0; i < count; i++) {
6365 		struct rx_bd *rxbd;
6366 
6367 		rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)];
6368 		bce_dump_rxbd(sc, rx_prod, rxbd);
6369 		rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod));
6370 	}
6371 
6372 	if_printf(ifp,
6373 	"----------------------------"
6374 	"----------------"
6375 	"----------------------------\n");
6376 }
6377 
6378 
6379 /****************************************************************************/
6380 /* Prints out the status block from host memory.                            */
6381 /*                                                                          */
6382 /* Returns:                                                                 */
6383 /*   Nothing.                                                               */
6384 /****************************************************************************/
6385 static void
6386 bce_dump_status_block(struct bce_softc *sc)
6387 {
6388 	struct status_block *sblk = sc->status_block;
6389 	struct ifnet *ifp = &sc->arpcom.ac_if;
6390 
6391 	if_printf(ifp,
6392 	"----------------------------"
6393 	"  Status Block  "
6394 	"----------------------------\n");
6395 
6396 	if_printf(ifp, "    0x%08X - attn_bits\n", sblk->status_attn_bits);
6397 
6398 	if_printf(ifp, "    0x%08X - attn_bits_ack\n",
6399 		  sblk->status_attn_bits_ack);
6400 
6401 	if_printf(ifp, "0x%04X(0x%04X) - rx_cons0\n",
6402 	    sblk->status_rx_quick_consumer_index0,
6403 	    (uint16_t)RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index0));
6404 
6405 	if_printf(ifp, "0x%04X(0x%04X) - tx_cons0\n",
6406 	    sblk->status_tx_quick_consumer_index0,
6407 	    (uint16_t)TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index0));
6408 
6409 	if_printf(ifp, "        0x%04X - status_idx\n", sblk->status_idx);
6410 
6411 	/* Theses indices are not used for normal L2 drivers. */
6412 	if (sblk->status_rx_quick_consumer_index1) {
6413 		if_printf(ifp, "0x%04X(0x%04X) - rx_cons1\n",
6414 		sblk->status_rx_quick_consumer_index1,
6415 		(uint16_t)RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index1));
6416 	}
6417 
6418 	if (sblk->status_tx_quick_consumer_index1) {
6419 		if_printf(ifp, "0x%04X(0x%04X) - tx_cons1\n",
6420 		sblk->status_tx_quick_consumer_index1,
6421 		(uint16_t)TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index1));
6422 	}
6423 
6424 	if (sblk->status_rx_quick_consumer_index2) {
6425 		if_printf(ifp, "0x%04X(0x%04X)- rx_cons2\n",
6426 		sblk->status_rx_quick_consumer_index2,
6427 		(uint16_t)RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index2));
6428 	}
6429 
6430 	if (sblk->status_tx_quick_consumer_index2) {
6431 		if_printf(ifp, "0x%04X(0x%04X) - tx_cons2\n",
6432 		sblk->status_tx_quick_consumer_index2,
6433 		(uint16_t)TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index2));
6434 	}
6435 
6436 	if (sblk->status_rx_quick_consumer_index3) {
6437 		if_printf(ifp, "0x%04X(0x%04X) - rx_cons3\n",
6438 		sblk->status_rx_quick_consumer_index3,
6439 		(uint16_t)RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index3));
6440 	}
6441 
6442 	if (sblk->status_tx_quick_consumer_index3) {
6443 		if_printf(ifp, "0x%04X(0x%04X) - tx_cons3\n",
6444 		sblk->status_tx_quick_consumer_index3,
6445 		(uint16_t)TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index3));
6446 	}
6447 
6448 	if (sblk->status_rx_quick_consumer_index4 ||
6449 	    sblk->status_rx_quick_consumer_index5) {
6450 		if_printf(ifp, "rx_cons4  = 0x%08X, rx_cons5      = 0x%08X\n",
6451 			  sblk->status_rx_quick_consumer_index4,
6452 			  sblk->status_rx_quick_consumer_index5);
6453 	}
6454 
6455 	if (sblk->status_rx_quick_consumer_index6 ||
6456 	    sblk->status_rx_quick_consumer_index7) {
6457 		if_printf(ifp, "rx_cons6  = 0x%08X, rx_cons7      = 0x%08X\n",
6458 			  sblk->status_rx_quick_consumer_index6,
6459 			  sblk->status_rx_quick_consumer_index7);
6460 	}
6461 
6462 	if (sblk->status_rx_quick_consumer_index8 ||
6463 	    sblk->status_rx_quick_consumer_index9) {
6464 		if_printf(ifp, "rx_cons8  = 0x%08X, rx_cons9      = 0x%08X\n",
6465 			  sblk->status_rx_quick_consumer_index8,
6466 			  sblk->status_rx_quick_consumer_index9);
6467 	}
6468 
6469 	if (sblk->status_rx_quick_consumer_index10 ||
6470 	    sblk->status_rx_quick_consumer_index11) {
6471 		if_printf(ifp, "rx_cons10 = 0x%08X, rx_cons11     = 0x%08X\n",
6472 			  sblk->status_rx_quick_consumer_index10,
6473 			  sblk->status_rx_quick_consumer_index11);
6474 	}
6475 
6476 	if (sblk->status_rx_quick_consumer_index12 ||
6477 	    sblk->status_rx_quick_consumer_index13) {
6478 		if_printf(ifp, "rx_cons12 = 0x%08X, rx_cons13     = 0x%08X\n",
6479 			  sblk->status_rx_quick_consumer_index12,
6480 			  sblk->status_rx_quick_consumer_index13);
6481 	}
6482 
6483 	if (sblk->status_rx_quick_consumer_index14 ||
6484 	    sblk->status_rx_quick_consumer_index15) {
6485 		if_printf(ifp, "rx_cons14 = 0x%08X, rx_cons15     = 0x%08X\n",
6486 			  sblk->status_rx_quick_consumer_index14,
6487 			  sblk->status_rx_quick_consumer_index15);
6488 	}
6489 
6490 	if (sblk->status_completion_producer_index ||
6491 	    sblk->status_cmd_consumer_index) {
6492 		if_printf(ifp, "com_prod  = 0x%08X, cmd_cons      = 0x%08X\n",
6493 			  sblk->status_completion_producer_index,
6494 			  sblk->status_cmd_consumer_index);
6495 	}
6496 
6497 	if_printf(ifp,
6498 	"----------------------------"
6499 	"----------------"
6500 	"----------------------------\n");
6501 }
6502 
6503 
6504 /****************************************************************************/
6505 /* Prints out the statistics block.                                         */
6506 /*                                                                          */
6507 /* Returns:                                                                 */
6508 /*   Nothing.                                                               */
6509 /****************************************************************************/
6510 static void
6511 bce_dump_stats_block(struct bce_softc *sc)
6512 {
6513 	struct statistics_block *sblk = sc->stats_block;
6514 	struct ifnet *ifp = &sc->arpcom.ac_if;
6515 
6516 	if_printf(ifp,
6517 	"---------------"
6518 	" Stats Block  (All Stats Not Shown Are 0) "
6519 	"---------------\n");
6520 
6521 	if (sblk->stat_IfHCInOctets_hi || sblk->stat_IfHCInOctets_lo) {
6522 		if_printf(ifp, "0x%08X:%08X : IfHcInOctets\n",
6523 			  sblk->stat_IfHCInOctets_hi,
6524 			  sblk->stat_IfHCInOctets_lo);
6525 	}
6526 
6527 	if (sblk->stat_IfHCInBadOctets_hi || sblk->stat_IfHCInBadOctets_lo) {
6528 		if_printf(ifp, "0x%08X:%08X : IfHcInBadOctets\n",
6529 			  sblk->stat_IfHCInBadOctets_hi,
6530 			  sblk->stat_IfHCInBadOctets_lo);
6531 	}
6532 
6533 	if (sblk->stat_IfHCOutOctets_hi || sblk->stat_IfHCOutOctets_lo) {
6534 		if_printf(ifp, "0x%08X:%08X : IfHcOutOctets\n",
6535 			  sblk->stat_IfHCOutOctets_hi,
6536 			  sblk->stat_IfHCOutOctets_lo);
6537 	}
6538 
6539 	if (sblk->stat_IfHCOutBadOctets_hi || sblk->stat_IfHCOutBadOctets_lo) {
6540 		if_printf(ifp, "0x%08X:%08X : IfHcOutBadOctets\n",
6541 			  sblk->stat_IfHCOutBadOctets_hi,
6542 			  sblk->stat_IfHCOutBadOctets_lo);
6543 	}
6544 
6545 	if (sblk->stat_IfHCInUcastPkts_hi || sblk->stat_IfHCInUcastPkts_lo) {
6546 		if_printf(ifp, "0x%08X:%08X : IfHcInUcastPkts\n",
6547 			  sblk->stat_IfHCInUcastPkts_hi,
6548 			  sblk->stat_IfHCInUcastPkts_lo);
6549 	}
6550 
6551 	if (sblk->stat_IfHCInBroadcastPkts_hi ||
6552 	    sblk->stat_IfHCInBroadcastPkts_lo) {
6553 		if_printf(ifp, "0x%08X:%08X : IfHcInBroadcastPkts\n",
6554 			  sblk->stat_IfHCInBroadcastPkts_hi,
6555 			  sblk->stat_IfHCInBroadcastPkts_lo);
6556 	}
6557 
6558 	if (sblk->stat_IfHCInMulticastPkts_hi ||
6559 	    sblk->stat_IfHCInMulticastPkts_lo) {
6560 		if_printf(ifp, "0x%08X:%08X : IfHcInMulticastPkts\n",
6561 			  sblk->stat_IfHCInMulticastPkts_hi,
6562 			  sblk->stat_IfHCInMulticastPkts_lo);
6563 	}
6564 
6565 	if (sblk->stat_IfHCOutUcastPkts_hi || sblk->stat_IfHCOutUcastPkts_lo) {
6566 		if_printf(ifp, "0x%08X:%08X : IfHcOutUcastPkts\n",
6567 			  sblk->stat_IfHCOutUcastPkts_hi,
6568 			  sblk->stat_IfHCOutUcastPkts_lo);
6569 	}
6570 
6571 	if (sblk->stat_IfHCOutBroadcastPkts_hi ||
6572 	    sblk->stat_IfHCOutBroadcastPkts_lo) {
6573 		if_printf(ifp, "0x%08X:%08X : IfHcOutBroadcastPkts\n",
6574 			  sblk->stat_IfHCOutBroadcastPkts_hi,
6575 			  sblk->stat_IfHCOutBroadcastPkts_lo);
6576 	}
6577 
6578 	if (sblk->stat_IfHCOutMulticastPkts_hi ||
6579 	    sblk->stat_IfHCOutMulticastPkts_lo) {
6580 		if_printf(ifp, "0x%08X:%08X : IfHcOutMulticastPkts\n",
6581 			  sblk->stat_IfHCOutMulticastPkts_hi,
6582 			  sblk->stat_IfHCOutMulticastPkts_lo);
6583 	}
6584 
6585 	if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors) {
6586 		if_printf(ifp, "         0x%08X : "
6587 		"emac_tx_stat_dot3statsinternalmactransmiterrors\n",
6588 		sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors);
6589 	}
6590 
6591 	if (sblk->stat_Dot3StatsCarrierSenseErrors) {
6592 		if_printf(ifp, "         0x%08X : "
6593 			  "Dot3StatsCarrierSenseErrors\n",
6594 			  sblk->stat_Dot3StatsCarrierSenseErrors);
6595 	}
6596 
6597 	if (sblk->stat_Dot3StatsFCSErrors) {
6598 		if_printf(ifp, "         0x%08X : Dot3StatsFCSErrors\n",
6599 			  sblk->stat_Dot3StatsFCSErrors);
6600 	}
6601 
6602 	if (sblk->stat_Dot3StatsAlignmentErrors) {
6603 		if_printf(ifp, "         0x%08X : Dot3StatsAlignmentErrors\n",
6604 			  sblk->stat_Dot3StatsAlignmentErrors);
6605 	}
6606 
6607 	if (sblk->stat_Dot3StatsSingleCollisionFrames) {
6608 		if_printf(ifp, "         0x%08X : "
6609 			  "Dot3StatsSingleCollisionFrames\n",
6610 			  sblk->stat_Dot3StatsSingleCollisionFrames);
6611 	}
6612 
6613 	if (sblk->stat_Dot3StatsMultipleCollisionFrames) {
6614 		if_printf(ifp, "         0x%08X : "
6615 			  "Dot3StatsMultipleCollisionFrames\n",
6616 			  sblk->stat_Dot3StatsMultipleCollisionFrames);
6617 	}
6618 
6619 	if (sblk->stat_Dot3StatsDeferredTransmissions) {
6620 		if_printf(ifp, "         0x%08X : "
6621 			  "Dot3StatsDeferredTransmissions\n",
6622 			  sblk->stat_Dot3StatsDeferredTransmissions);
6623 	}
6624 
6625 	if (sblk->stat_Dot3StatsExcessiveCollisions) {
6626 		if_printf(ifp, "         0x%08X : "
6627 			  "Dot3StatsExcessiveCollisions\n",
6628 			  sblk->stat_Dot3StatsExcessiveCollisions);
6629 	}
6630 
6631 	if (sblk->stat_Dot3StatsLateCollisions) {
6632 		if_printf(ifp, "         0x%08X : Dot3StatsLateCollisions\n",
6633 			  sblk->stat_Dot3StatsLateCollisions);
6634 	}
6635 
6636 	if (sblk->stat_EtherStatsCollisions) {
6637 		if_printf(ifp, "         0x%08X : EtherStatsCollisions\n",
6638 			  sblk->stat_EtherStatsCollisions);
6639 	}
6640 
6641 	if (sblk->stat_EtherStatsFragments)  {
6642 		if_printf(ifp, "         0x%08X : EtherStatsFragments\n",
6643 			  sblk->stat_EtherStatsFragments);
6644 	}
6645 
6646 	if (sblk->stat_EtherStatsJabbers) {
6647 		if_printf(ifp, "         0x%08X : EtherStatsJabbers\n",
6648 			  sblk->stat_EtherStatsJabbers);
6649 	}
6650 
6651 	if (sblk->stat_EtherStatsUndersizePkts) {
6652 		if_printf(ifp, "         0x%08X : EtherStatsUndersizePkts\n",
6653 			  sblk->stat_EtherStatsUndersizePkts);
6654 	}
6655 
6656 	if (sblk->stat_EtherStatsOverrsizePkts) {
6657 		if_printf(ifp, "         0x%08X : EtherStatsOverrsizePkts\n",
6658 			  sblk->stat_EtherStatsOverrsizePkts);
6659 	}
6660 
6661 	if (sblk->stat_EtherStatsPktsRx64Octets) {
6662 		if_printf(ifp, "         0x%08X : EtherStatsPktsRx64Octets\n",
6663 			  sblk->stat_EtherStatsPktsRx64Octets);
6664 	}
6665 
6666 	if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets) {
6667 		if_printf(ifp, "         0x%08X : "
6668 			  "EtherStatsPktsRx65Octetsto127Octets\n",
6669 			  sblk->stat_EtherStatsPktsRx65Octetsto127Octets);
6670 	}
6671 
6672 	if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets) {
6673 		if_printf(ifp, "         0x%08X : "
6674 			  "EtherStatsPktsRx128Octetsto255Octets\n",
6675 			  sblk->stat_EtherStatsPktsRx128Octetsto255Octets);
6676 	}
6677 
6678 	if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets) {
6679 		if_printf(ifp, "         0x%08X : "
6680 			  "EtherStatsPktsRx256Octetsto511Octets\n",
6681 			  sblk->stat_EtherStatsPktsRx256Octetsto511Octets);
6682 	}
6683 
6684 	if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets) {
6685 		if_printf(ifp, "         0x%08X : "
6686 			  "EtherStatsPktsRx512Octetsto1023Octets\n",
6687 			  sblk->stat_EtherStatsPktsRx512Octetsto1023Octets);
6688 	}
6689 
6690 	if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets) {
6691 		if_printf(ifp, "         0x%08X : "
6692 			  "EtherStatsPktsRx1024Octetsto1522Octets\n",
6693 			  sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets);
6694 	}
6695 
6696 	if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets) {
6697 		if_printf(ifp, "         0x%08X : "
6698 			  "EtherStatsPktsRx1523Octetsto9022Octets\n",
6699 			  sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets);
6700 	}
6701 
6702 	if (sblk->stat_EtherStatsPktsTx64Octets) {
6703 		if_printf(ifp, "         0x%08X : EtherStatsPktsTx64Octets\n",
6704 			  sblk->stat_EtherStatsPktsTx64Octets);
6705 	}
6706 
6707 	if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets) {
6708 		if_printf(ifp, "         0x%08X : "
6709 			  "EtherStatsPktsTx65Octetsto127Octets\n",
6710 			  sblk->stat_EtherStatsPktsTx65Octetsto127Octets);
6711 	}
6712 
6713 	if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets) {
6714 		if_printf(ifp, "         0x%08X : "
6715 			  "EtherStatsPktsTx128Octetsto255Octets\n",
6716 			  sblk->stat_EtherStatsPktsTx128Octetsto255Octets);
6717 	}
6718 
6719 	if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets) {
6720 		if_printf(ifp, "         0x%08X : "
6721 			  "EtherStatsPktsTx256Octetsto511Octets\n",
6722 			  sblk->stat_EtherStatsPktsTx256Octetsto511Octets);
6723 	}
6724 
6725 	if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets) {
6726 		if_printf(ifp, "         0x%08X : "
6727 			  "EtherStatsPktsTx512Octetsto1023Octets\n",
6728 			  sblk->stat_EtherStatsPktsTx512Octetsto1023Octets);
6729 	}
6730 
6731 	if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets) {
6732 		if_printf(ifp, "         0x%08X : "
6733 			  "EtherStatsPktsTx1024Octetsto1522Octets\n",
6734 			  sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets);
6735 	}
6736 
6737 	if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets) {
6738 		if_printf(ifp, "         0x%08X : "
6739 			  "EtherStatsPktsTx1523Octetsto9022Octets\n",
6740 			  sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets);
6741 	}
6742 
6743 	if (sblk->stat_XonPauseFramesReceived) {
6744 		if_printf(ifp, "         0x%08X : XonPauseFramesReceived\n",
6745 			  sblk->stat_XonPauseFramesReceived);
6746 	}
6747 
6748 	if (sblk->stat_XoffPauseFramesReceived) {
6749 		if_printf(ifp, "          0x%08X : XoffPauseFramesReceived\n",
6750 			  sblk->stat_XoffPauseFramesReceived);
6751 	}
6752 
6753 	if (sblk->stat_OutXonSent) {
6754 		if_printf(ifp, "         0x%08X : OutXoffSent\n",
6755 			  sblk->stat_OutXonSent);
6756 	}
6757 
6758 	if (sblk->stat_OutXoffSent) {
6759 		if_printf(ifp, "         0x%08X : OutXoffSent\n",
6760 			  sblk->stat_OutXoffSent);
6761 	}
6762 
6763 	if (sblk->stat_FlowControlDone) {
6764 		if_printf(ifp, "         0x%08X : FlowControlDone\n",
6765 			  sblk->stat_FlowControlDone);
6766 	}
6767 
6768 	if (sblk->stat_MacControlFramesReceived) {
6769 		if_printf(ifp, "         0x%08X : MacControlFramesReceived\n",
6770 			  sblk->stat_MacControlFramesReceived);
6771 	}
6772 
6773 	if (sblk->stat_XoffStateEntered) {
6774 		if_printf(ifp, "         0x%08X : XoffStateEntered\n",
6775 			  sblk->stat_XoffStateEntered);
6776 	}
6777 
6778 	if (sblk->stat_IfInFramesL2FilterDiscards) {
6779 		if_printf(ifp, "         0x%08X : IfInFramesL2FilterDiscards\n",			  sblk->stat_IfInFramesL2FilterDiscards);
6780 	}
6781 
6782 	if (sblk->stat_IfInRuleCheckerDiscards) {
6783 		if_printf(ifp, "         0x%08X : IfInRuleCheckerDiscards\n",
6784 			  sblk->stat_IfInRuleCheckerDiscards);
6785 	}
6786 
6787 	if (sblk->stat_IfInFTQDiscards) {
6788 		if_printf(ifp, "         0x%08X : IfInFTQDiscards\n",
6789 			  sblk->stat_IfInFTQDiscards);
6790 	}
6791 
6792 	if (sblk->stat_IfInMBUFDiscards) {
6793 		if_printf(ifp, "         0x%08X : IfInMBUFDiscards\n",
6794 			  sblk->stat_IfInMBUFDiscards);
6795 	}
6796 
6797 	if (sblk->stat_IfInRuleCheckerP4Hit) {
6798 		if_printf(ifp, "         0x%08X : IfInRuleCheckerP4Hit\n",
6799 			  sblk->stat_IfInRuleCheckerP4Hit);
6800 	}
6801 
6802 	if (sblk->stat_CatchupInRuleCheckerDiscards) {
6803 		if_printf(ifp, "         0x%08X : "
6804 			  "CatchupInRuleCheckerDiscards\n",
6805 			  sblk->stat_CatchupInRuleCheckerDiscards);
6806 	}
6807 
6808 	if (sblk->stat_CatchupInFTQDiscards) {
6809 		if_printf(ifp, "         0x%08X : CatchupInFTQDiscards\n",
6810 			  sblk->stat_CatchupInFTQDiscards);
6811 	}
6812 
6813 	if (sblk->stat_CatchupInMBUFDiscards) {
6814 		if_printf(ifp, "         0x%08X : CatchupInMBUFDiscards\n",
6815 			  sblk->stat_CatchupInMBUFDiscards);
6816 	}
6817 
6818 	if (sblk->stat_CatchupInRuleCheckerP4Hit) {
6819 		if_printf(ifp, "         0x%08X : CatchupInRuleCheckerP4Hit\n",
6820 			  sblk->stat_CatchupInRuleCheckerP4Hit);
6821 	}
6822 
6823 	if_printf(ifp,
6824 	"----------------------------"
6825 	"----------------"
6826 	"----------------------------\n");
6827 }
6828 
6829 
6830 /****************************************************************************/
6831 /* Prints out a summary of the driver state.                                */
6832 /*                                                                          */
6833 /* Returns:                                                                 */
6834 /*   Nothing.                                                               */
6835 /****************************************************************************/
6836 static void
6837 bce_dump_driver_state(struct bce_softc *sc)
6838 {
6839 	struct ifnet *ifp = &sc->arpcom.ac_if;
6840 	uint32_t val_hi, val_lo;
6841 
6842 	if_printf(ifp,
6843 	"-----------------------------"
6844 	" Driver State "
6845 	"-----------------------------\n");
6846 
6847 	val_hi = BCE_ADDR_HI(sc);
6848 	val_lo = BCE_ADDR_LO(sc);
6849 	if_printf(ifp, "0x%08X:%08X - (sc) driver softc structure "
6850 		  "virtual address\n", val_hi, val_lo);
6851 
6852 	val_hi = BCE_ADDR_HI(sc->status_block);
6853 	val_lo = BCE_ADDR_LO(sc->status_block);
6854 	if_printf(ifp, "0x%08X:%08X - (sc->status_block) status block "
6855 		  "virtual address\n", val_hi, val_lo);
6856 
6857 	val_hi = BCE_ADDR_HI(sc->stats_block);
6858 	val_lo = BCE_ADDR_LO(sc->stats_block);
6859 	if_printf(ifp, "0x%08X:%08X - (sc->stats_block) statistics block "
6860 		  "virtual address\n", val_hi, val_lo);
6861 
6862 	val_hi = BCE_ADDR_HI(sc->tx_bd_chain);
6863 	val_lo = BCE_ADDR_LO(sc->tx_bd_chain);
6864 	if_printf(ifp, "0x%08X:%08X - (sc->tx_bd_chain) tx_bd chain "
6865 		  "virtual adddress\n", val_hi, val_lo);
6866 
6867 	val_hi = BCE_ADDR_HI(sc->rx_bd_chain);
6868 	val_lo = BCE_ADDR_LO(sc->rx_bd_chain);
6869 	if_printf(ifp, "0x%08X:%08X - (sc->rx_bd_chain) rx_bd chain "
6870 		  "virtual address\n", val_hi, val_lo);
6871 
6872 	val_hi = BCE_ADDR_HI(sc->tx_mbuf_ptr);
6873 	val_lo = BCE_ADDR_LO(sc->tx_mbuf_ptr);
6874 	if_printf(ifp, "0x%08X:%08X - (sc->tx_mbuf_ptr) tx mbuf chain "
6875 		  "virtual address\n", val_hi, val_lo);
6876 
6877 	val_hi = BCE_ADDR_HI(sc->rx_mbuf_ptr);
6878 	val_lo = BCE_ADDR_LO(sc->rx_mbuf_ptr);
6879 	if_printf(ifp, "0x%08X:%08X - (sc->rx_mbuf_ptr) rx mbuf chain "
6880 		  "virtual address\n", val_hi, val_lo);
6881 
6882 	if_printf(ifp, "         0x%08X - (sc->interrupts_generated) "
6883 		  "h/w intrs\n", sc->interrupts_generated);
6884 
6885 	if_printf(ifp, "         0x%08X - (sc->rx_interrupts) "
6886 		  "rx interrupts handled\n", sc->rx_interrupts);
6887 
6888 	if_printf(ifp, "         0x%08X - (sc->tx_interrupts) "
6889 		  "tx interrupts handled\n", sc->tx_interrupts);
6890 
6891 	if_printf(ifp, "         0x%08X - (sc->last_status_idx) "
6892 		  "status block index\n", sc->last_status_idx);
6893 
6894 	if_printf(ifp, "     0x%04X(0x%04X) - (sc->tx_prod) "
6895 		  "tx producer index\n",
6896 		  sc->tx_prod, (uint16_t)TX_CHAIN_IDX(sc->tx_prod));
6897 
6898 	if_printf(ifp, "     0x%04X(0x%04X) - (sc->tx_cons) "
6899 		  "tx consumer index\n",
6900 		  sc->tx_cons, (uint16_t)TX_CHAIN_IDX(sc->tx_cons));
6901 
6902 	if_printf(ifp, "         0x%08X - (sc->tx_prod_bseq) "
6903 		  "tx producer bseq index\n", sc->tx_prod_bseq);
6904 
6905 	if_printf(ifp, "     0x%04X(0x%04X) - (sc->rx_prod) "
6906 		  "rx producer index\n",
6907 		  sc->rx_prod, (uint16_t)RX_CHAIN_IDX(sc->rx_prod));
6908 
6909 	if_printf(ifp, "     0x%04X(0x%04X) - (sc->rx_cons) "
6910 		  "rx consumer index\n",
6911 		  sc->rx_cons, (uint16_t)RX_CHAIN_IDX(sc->rx_cons));
6912 
6913 	if_printf(ifp, "         0x%08X - (sc->rx_prod_bseq) "
6914 		  "rx producer bseq index\n", sc->rx_prod_bseq);
6915 
6916 	if_printf(ifp, "         0x%08X - (sc->rx_mbuf_alloc) "
6917 		  "rx mbufs allocated\n", sc->rx_mbuf_alloc);
6918 
6919 	if_printf(ifp, "         0x%08X - (sc->free_rx_bd) "
6920 		  "free rx_bd's\n", sc->free_rx_bd);
6921 
6922 	if_printf(ifp, "0x%08X/%08X - (sc->rx_low_watermark) rx "
6923 		  "low watermark\n", sc->rx_low_watermark, sc->max_rx_bd);
6924 
6925 	if_printf(ifp, "         0x%08X - (sc->txmbuf_alloc) "
6926 		  "tx mbufs allocated\n", sc->tx_mbuf_alloc);
6927 
6928 	if_printf(ifp, "         0x%08X - (sc->rx_mbuf_alloc) "
6929 		  "rx mbufs allocated\n", sc->rx_mbuf_alloc);
6930 
6931 	if_printf(ifp, "         0x%08X - (sc->used_tx_bd) used tx_bd's\n",
6932 		  sc->used_tx_bd);
6933 
6934 	if_printf(ifp, "0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n",
6935 		  sc->tx_hi_watermark, sc->max_tx_bd);
6936 
6937 	if_printf(ifp, "         0x%08X - (sc->mbuf_alloc_failed) "
6938 		  "failed mbuf alloc\n", sc->mbuf_alloc_failed);
6939 
6940 	if_printf(ifp,
6941 	"----------------------------"
6942 	"----------------"
6943 	"----------------------------\n");
6944 }
6945 
6946 
6947 /****************************************************************************/
6948 /* Prints out the hardware state through a summary of important registers,  */
6949 /* followed by a complete register dump.                                    */
6950 /*                                                                          */
6951 /* Returns:                                                                 */
6952 /*   Nothing.                                                               */
6953 /****************************************************************************/
6954 static void
6955 bce_dump_hw_state(struct bce_softc *sc)
6956 {
6957 	struct ifnet *ifp = &sc->arpcom.ac_if;
6958 	uint32_t val1;
6959 	int i;
6960 
6961 	if_printf(ifp,
6962 	"----------------------------"
6963 	" Hardware State "
6964 	"----------------------------\n");
6965 
6966 	if_printf(ifp, "0x%08X - bootcode version\n", sc->bce_fw_ver);
6967 
6968 	val1 = REG_RD(sc, BCE_MISC_ENABLE_STATUS_BITS);
6969 	if_printf(ifp, "0x%08X - (0x%06X) misc_enable_status_bits\n",
6970 		  val1, BCE_MISC_ENABLE_STATUS_BITS);
6971 
6972 	val1 = REG_RD(sc, BCE_DMA_STATUS);
6973 	if_printf(ifp, "0x%08X - (0x%04X) dma_status\n", val1, BCE_DMA_STATUS);
6974 
6975 	val1 = REG_RD(sc, BCE_CTX_STATUS);
6976 	if_printf(ifp, "0x%08X - (0x%04X) ctx_status\n", val1, BCE_CTX_STATUS);
6977 
6978 	val1 = REG_RD(sc, BCE_EMAC_STATUS);
6979 	if_printf(ifp, "0x%08X - (0x%04X) emac_status\n",
6980 		  val1, BCE_EMAC_STATUS);
6981 
6982 	val1 = REG_RD(sc, BCE_RPM_STATUS);
6983 	if_printf(ifp, "0x%08X - (0x%04X) rpm_status\n", val1, BCE_RPM_STATUS);
6984 
6985 	val1 = REG_RD(sc, BCE_TBDR_STATUS);
6986 	if_printf(ifp, "0x%08X - (0x%04X) tbdr_status\n",
6987 		  val1, BCE_TBDR_STATUS);
6988 
6989 	val1 = REG_RD(sc, BCE_TDMA_STATUS);
6990 	if_printf(ifp, "0x%08X - (0x%04X) tdma_status\n",
6991 		  val1, BCE_TDMA_STATUS);
6992 
6993 	val1 = REG_RD(sc, BCE_HC_STATUS);
6994 	if_printf(ifp, "0x%08X - (0x%06X) hc_status\n", val1, BCE_HC_STATUS);
6995 
6996 	val1 = REG_RD_IND(sc, BCE_TXP_CPU_STATE);
6997 	if_printf(ifp, "0x%08X - (0x%06X) txp_cpu_state\n",
6998 		  val1, BCE_TXP_CPU_STATE);
6999 
7000 	val1 = REG_RD_IND(sc, BCE_TPAT_CPU_STATE);
7001 	if_printf(ifp, "0x%08X - (0x%06X) tpat_cpu_state\n",
7002 		  val1, BCE_TPAT_CPU_STATE);
7003 
7004 	val1 = REG_RD_IND(sc, BCE_RXP_CPU_STATE);
7005 	if_printf(ifp, "0x%08X - (0x%06X) rxp_cpu_state\n",
7006 		  val1, BCE_RXP_CPU_STATE);
7007 
7008 	val1 = REG_RD_IND(sc, BCE_COM_CPU_STATE);
7009 	if_printf(ifp, "0x%08X - (0x%06X) com_cpu_state\n",
7010 		  val1, BCE_COM_CPU_STATE);
7011 
7012 	val1 = REG_RD_IND(sc, BCE_MCP_CPU_STATE);
7013 	if_printf(ifp, "0x%08X - (0x%06X) mcp_cpu_state\n",
7014 		  val1, BCE_MCP_CPU_STATE);
7015 
7016 	val1 = REG_RD_IND(sc, BCE_CP_CPU_STATE);
7017 	if_printf(ifp, "0x%08X - (0x%06X) cp_cpu_state\n",
7018 		  val1, BCE_CP_CPU_STATE);
7019 
7020 	if_printf(ifp,
7021 	"----------------------------"
7022 	"----------------"
7023 	"----------------------------\n");
7024 
7025 	if_printf(ifp,
7026 	"----------------------------"
7027 	" Register  Dump "
7028 	"----------------------------\n");
7029 
7030 	for (i = 0x400; i < 0x8000; i += 0x10) {
7031 		if_printf(ifp, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", i,
7032 			  REG_RD(sc, i),
7033 			  REG_RD(sc, i + 0x4),
7034 			  REG_RD(sc, i + 0x8),
7035 			  REG_RD(sc, i + 0xc));
7036 	}
7037 
7038 	if_printf(ifp,
7039 	"----------------------------"
7040 	"----------------"
7041 	"----------------------------\n");
7042 }
7043 
7044 
7045 /****************************************************************************/
7046 /* Prints out the TXP state.                                                */
7047 /*                                                                          */
7048 /* Returns:                                                                 */
7049 /*   Nothing.                                                               */
7050 /****************************************************************************/
7051 static void
7052 bce_dump_txp_state(struct bce_softc *sc)
7053 {
7054 	struct ifnet *ifp = &sc->arpcom.ac_if;
7055 	uint32_t val1;
7056 	int i;
7057 
7058 	if_printf(ifp,
7059 	"----------------------------"
7060 	"   TXP  State   "
7061 	"----------------------------\n");
7062 
7063 	val1 = REG_RD_IND(sc, BCE_TXP_CPU_MODE);
7064 	if_printf(ifp, "0x%08X - (0x%06X) txp_cpu_mode\n",
7065 		  val1, BCE_TXP_CPU_MODE);
7066 
7067 	val1 = REG_RD_IND(sc, BCE_TXP_CPU_STATE);
7068 	if_printf(ifp, "0x%08X - (0x%06X) txp_cpu_state\n",
7069 		  val1, BCE_TXP_CPU_STATE);
7070 
7071 	val1 = REG_RD_IND(sc, BCE_TXP_CPU_EVENT_MASK);
7072 	if_printf(ifp, "0x%08X - (0x%06X) txp_cpu_event_mask\n",
7073 		  val1, BCE_TXP_CPU_EVENT_MASK);
7074 
7075 	if_printf(ifp,
7076 	"----------------------------"
7077 	" Register  Dump "
7078 	"----------------------------\n");
7079 
7080 	for (i = BCE_TXP_CPU_MODE; i < 0x68000; i += 0x10) {
7081 		/* Skip the big blank spaces */
7082 		if (i < 0x454000 && i > 0x5ffff) {
7083 			if_printf(ifp, "0x%04X: "
7084 				  "0x%08X 0x%08X 0x%08X 0x%08X\n", i,
7085 				  REG_RD_IND(sc, i),
7086 				  REG_RD_IND(sc, i + 0x4),
7087 				  REG_RD_IND(sc, i + 0x8),
7088 				  REG_RD_IND(sc, i + 0xc));
7089 		}
7090 	}
7091 
7092 	if_printf(ifp,
7093 	"----------------------------"
7094 	"----------------"
7095 	"----------------------------\n");
7096 }
7097 
7098 
7099 /****************************************************************************/
7100 /* Prints out the RXP state.                                                */
7101 /*                                                                          */
7102 /* Returns:                                                                 */
7103 /*   Nothing.                                                               */
7104 /****************************************************************************/
7105 static void
7106 bce_dump_rxp_state(struct bce_softc *sc)
7107 {
7108 	struct ifnet *ifp = &sc->arpcom.ac_if;
7109 	uint32_t val1;
7110 	int i;
7111 
7112 	if_printf(ifp,
7113 	"----------------------------"
7114 	"   RXP  State   "
7115 	"----------------------------\n");
7116 
7117 	val1 = REG_RD_IND(sc, BCE_RXP_CPU_MODE);
7118 	if_printf(ifp, "0x%08X - (0x%06X) rxp_cpu_mode\n",
7119 		  val1, BCE_RXP_CPU_MODE);
7120 
7121 	val1 = REG_RD_IND(sc, BCE_RXP_CPU_STATE);
7122 	if_printf(ifp, "0x%08X - (0x%06X) rxp_cpu_state\n",
7123 		  val1, BCE_RXP_CPU_STATE);
7124 
7125 	val1 = REG_RD_IND(sc, BCE_RXP_CPU_EVENT_MASK);
7126 	if_printf(ifp, "0x%08X - (0x%06X) rxp_cpu_event_mask\n",
7127 		  val1, BCE_RXP_CPU_EVENT_MASK);
7128 
7129 	if_printf(ifp,
7130 	"----------------------------"
7131 	" Register  Dump "
7132 	"----------------------------\n");
7133 
7134 	for (i = BCE_RXP_CPU_MODE; i < 0xe8fff; i += 0x10) {
7135 		/* Skip the big blank sapces */
7136 		if (i < 0xc5400 && i > 0xdffff) {
7137 			if_printf(ifp, "0x%04X: "
7138 				  "0x%08X 0x%08X 0x%08X 0x%08X\n", i,
7139 				  REG_RD_IND(sc, i),
7140 				  REG_RD_IND(sc, i + 0x4),
7141 				  REG_RD_IND(sc, i + 0x8),
7142 				  REG_RD_IND(sc, i + 0xc));
7143 		}
7144 	}
7145 
7146 	if_printf(ifp,
7147 	"----------------------------"
7148 	"----------------"
7149 	"----------------------------\n");
7150 }
7151 
7152 
7153 /****************************************************************************/
7154 /* Prints out the TPAT state.                                               */
7155 /*                                                                          */
7156 /* Returns:                                                                 */
7157 /*   Nothing.                                                               */
7158 /****************************************************************************/
7159 static void
7160 bce_dump_tpat_state(struct bce_softc *sc)
7161 {
7162 	struct ifnet *ifp = &sc->arpcom.ac_if;
7163 	uint32_t val1;
7164 	int i;
7165 
7166 	if_printf(ifp,
7167 	"----------------------------"
7168 	"   TPAT State   "
7169 	"----------------------------\n");
7170 
7171 	val1 = REG_RD_IND(sc, BCE_TPAT_CPU_MODE);
7172 	if_printf(ifp, "0x%08X - (0x%06X) tpat_cpu_mode\n",
7173 		  val1, BCE_TPAT_CPU_MODE);
7174 
7175 	val1 = REG_RD_IND(sc, BCE_TPAT_CPU_STATE);
7176 	if_printf(ifp, "0x%08X - (0x%06X) tpat_cpu_state\n",
7177 		  val1, BCE_TPAT_CPU_STATE);
7178 
7179 	val1 = REG_RD_IND(sc, BCE_TPAT_CPU_EVENT_MASK);
7180 	if_printf(ifp, "0x%08X - (0x%06X) tpat_cpu_event_mask\n",
7181 		  val1, BCE_TPAT_CPU_EVENT_MASK);
7182 
7183 	if_printf(ifp,
7184 	"----------------------------"
7185 	" Register  Dump "
7186 	"----------------------------\n");
7187 
7188 	for (i = BCE_TPAT_CPU_MODE; i < 0xa3fff; i += 0x10) {
7189 		/* Skip the big blank spaces */
7190 		if (i < 0x854000 && i > 0x9ffff) {
7191 			if_printf(ifp, "0x%04X: "
7192 				  "0x%08X 0x%08X 0x%08X 0x%08X\n", i,
7193 				  REG_RD_IND(sc, i),
7194 				  REG_RD_IND(sc, i + 0x4),
7195 				  REG_RD_IND(sc, i + 0x8),
7196 				  REG_RD_IND(sc, i + 0xc));
7197 		}
7198 	}
7199 
7200 	if_printf(ifp,
7201 	"----------------------------"
7202 	"----------------"
7203 	"----------------------------\n");
7204 }
7205 
7206 
7207 /****************************************************************************/
7208 /* Prints out the driver state and then enters the debugger.                */
7209 /*                                                                          */
7210 /* Returns:                                                                 */
7211 /*   Nothing.                                                               */
7212 /****************************************************************************/
7213 static void
7214 bce_breakpoint(struct bce_softc *sc)
7215 {
7216 #if 0
7217 	bce_freeze_controller(sc);
7218 #endif
7219 
7220 	bce_dump_driver_state(sc);
7221 	bce_dump_status_block(sc);
7222 	bce_dump_tx_chain(sc, 0, TOTAL_TX_BD);
7223 	bce_dump_hw_state(sc);
7224 	bce_dump_txp_state(sc);
7225 
7226 #if 0
7227 	bce_unfreeze_controller(sc);
7228 #endif
7229 
7230 	/* Call the debugger. */
7231 	breakpoint();
7232 }
7233 
7234 #endif	/* BCE_DEBUG */
7235 
7236 static int
7237 bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS)
7238 {
7239 	struct bce_softc *sc = arg1;
7240 
7241 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
7242 			&sc->bce_tx_quick_cons_trip_int,
7243 			BCE_COALMASK_TX_BDS_INT);
7244 }
7245 
7246 static int
7247 bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS)
7248 {
7249 	struct bce_softc *sc = arg1;
7250 
7251 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
7252 			&sc->bce_tx_quick_cons_trip,
7253 			BCE_COALMASK_TX_BDS);
7254 }
7255 
7256 static int
7257 bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS)
7258 {
7259 	struct bce_softc *sc = arg1;
7260 
7261 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
7262 			&sc->bce_tx_ticks_int,
7263 			BCE_COALMASK_TX_TICKS_INT);
7264 }
7265 
7266 static int
7267 bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS)
7268 {
7269 	struct bce_softc *sc = arg1;
7270 
7271 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
7272 			&sc->bce_tx_ticks,
7273 			BCE_COALMASK_TX_TICKS);
7274 }
7275 
7276 static int
7277 bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS)
7278 {
7279 	struct bce_softc *sc = arg1;
7280 
7281 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
7282 			&sc->bce_rx_quick_cons_trip_int,
7283 			BCE_COALMASK_RX_BDS_INT);
7284 }
7285 
7286 static int
7287 bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS)
7288 {
7289 	struct bce_softc *sc = arg1;
7290 
7291 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
7292 			&sc->bce_rx_quick_cons_trip,
7293 			BCE_COALMASK_RX_BDS);
7294 }
7295 
7296 static int
7297 bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS)
7298 {
7299 	struct bce_softc *sc = arg1;
7300 
7301 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
7302 			&sc->bce_rx_ticks_int,
7303 			BCE_COALMASK_RX_TICKS_INT);
7304 }
7305 
7306 static int
7307 bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS)
7308 {
7309 	struct bce_softc *sc = arg1;
7310 
7311 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
7312 			&sc->bce_rx_ticks,
7313 			BCE_COALMASK_RX_TICKS);
7314 }
7315 
7316 static int
7317 bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS, uint32_t *coal,
7318 		       uint32_t coalchg_mask)
7319 {
7320 	struct bce_softc *sc = arg1;
7321 	struct ifnet *ifp = &sc->arpcom.ac_if;
7322 	int error = 0, v;
7323 
7324 	lwkt_serialize_enter(ifp->if_serializer);
7325 
7326 	v = *coal;
7327 	error = sysctl_handle_int(oidp, &v, 0, req);
7328 	if (!error && req->newptr != NULL) {
7329 		if (v < 0) {
7330 			error = EINVAL;
7331 		} else {
7332 			*coal = v;
7333 			sc->bce_coalchg_mask |= coalchg_mask;
7334 		}
7335 	}
7336 
7337 	lwkt_serialize_exit(ifp->if_serializer);
7338 	return error;
7339 }
7340 
7341 static void
7342 bce_coal_change(struct bce_softc *sc)
7343 {
7344 	struct ifnet *ifp = &sc->arpcom.ac_if;
7345 
7346 	ASSERT_SERIALIZED(ifp->if_serializer);
7347 
7348 	if ((ifp->if_flags & IFF_RUNNING) == 0) {
7349 		sc->bce_coalchg_mask = 0;
7350 		return;
7351 	}
7352 
7353 	if (sc->bce_coalchg_mask &
7354 	    (BCE_COALMASK_TX_BDS | BCE_COALMASK_TX_BDS_INT)) {
7355 		REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
7356 		       (sc->bce_tx_quick_cons_trip_int << 16) |
7357 		       sc->bce_tx_quick_cons_trip);
7358 		if (bootverbose) {
7359 			if_printf(ifp, "tx_bds %u, tx_bds_int %u\n",
7360 				  sc->bce_tx_quick_cons_trip,
7361 				  sc->bce_tx_quick_cons_trip_int);
7362 		}
7363 	}
7364 
7365 	if (sc->bce_coalchg_mask &
7366 	    (BCE_COALMASK_TX_TICKS | BCE_COALMASK_TX_TICKS_INT)) {
7367 		REG_WR(sc, BCE_HC_TX_TICKS,
7368 		       (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
7369 		if (bootverbose) {
7370 			if_printf(ifp, "tx_ticks %u, tx_ticks_int %u\n",
7371 				  sc->bce_tx_ticks, sc->bce_tx_ticks_int);
7372 		}
7373 	}
7374 
7375 	if (sc->bce_coalchg_mask &
7376 	    (BCE_COALMASK_RX_BDS | BCE_COALMASK_RX_BDS_INT)) {
7377 		REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
7378 		       (sc->bce_rx_quick_cons_trip_int << 16) |
7379 		       sc->bce_rx_quick_cons_trip);
7380 		if (bootverbose) {
7381 			if_printf(ifp, "rx_bds %u, rx_bds_int %u\n",
7382 				  sc->bce_rx_quick_cons_trip,
7383 				  sc->bce_rx_quick_cons_trip_int);
7384 		}
7385 	}
7386 
7387 	if (sc->bce_coalchg_mask &
7388 	    (BCE_COALMASK_RX_TICKS | BCE_COALMASK_RX_TICKS_INT)) {
7389 		REG_WR(sc, BCE_HC_RX_TICKS,
7390 		       (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
7391 		if (bootverbose) {
7392 			if_printf(ifp, "rx_ticks %u, rx_ticks_int %u\n",
7393 				  sc->bce_rx_ticks, sc->bce_rx_ticks_int);
7394 		}
7395 	}
7396 
7397 	sc->bce_coalchg_mask = 0;
7398 }
7399