xref: /dragonfly/sys/dev/netif/bce/if_bce.c (revision 8e1c6f81)
1 /*-
2  * Copyright (c) 2006-2007 Broadcom Corporation
3  *	David Christensen <davidch@broadcom.com>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the name of Broadcom Corporation nor the name of its contributors
15  *    may be used to endorse or promote products derived from this software
16  *    without specific prior written consent.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
22  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  *
30  * $FreeBSD: src/sys/dev/bce/if_bce.c,v 1.31 2007/05/16 23:34:11 davidch Exp $
31  * $DragonFly: src/sys/dev/netif/bce/if_bce.c,v 1.17 2008/08/17 04:32:32 sephe Exp $
32  */
33 
34 /*
35  * The following controllers are supported by this driver:
36  *   BCM5706C A2, A3
37  *   BCM5708C B1, B2
38  *
39  * The following controllers are not supported by this driver:
40  *   BCM5706C A0, A1
41  *   BCM5706S A0, A1, A2, A3
42  *   BCM5708C A0, B0
43  *   BCM5708S A0, B0, B1, B2
44  */
45 
46 #include "opt_bce.h"
47 #include "opt_polling.h"
48 #include "opt_ethernet.h"
49 
50 #include <sys/param.h>
51 #include <sys/bus.h>
52 #include <sys/endian.h>
53 #include <sys/kernel.h>
54 #include <sys/interrupt.h>
55 #include <sys/mbuf.h>
56 #include <sys/malloc.h>
57 #include <sys/queue.h>
58 #ifdef BCE_DEBUG
59 #include <sys/random.h>
60 #endif
61 #include <sys/rman.h>
62 #include <sys/serialize.h>
63 #include <sys/socket.h>
64 #include <sys/sockio.h>
65 #include <sys/sysctl.h>
66 
67 #include <net/bpf.h>
68 #include <net/ethernet.h>
69 #include <net/if.h>
70 #include <net/if_arp.h>
71 #include <net/if_dl.h>
72 #include <net/if_media.h>
73 #include <net/if_types.h>
74 #include <net/ifq_var.h>
75 #include <net/vlan/if_vlan_var.h>
76 #include <net/vlan/if_vlan_ether.h>
77 
78 #include <dev/netif/mii_layer/mii.h>
79 #include <dev/netif/mii_layer/miivar.h>
80 
81 #include <bus/pci/pcireg.h>
82 #include <bus/pci/pcivar.h>
83 
84 #include "miibus_if.h"
85 
86 #include <dev/netif/bce/if_bcereg.h>
87 #include <dev/netif/bce/if_bcefw.h>
88 
89 /****************************************************************************/
90 /* BCE Debug Options                                                        */
91 /****************************************************************************/
92 #ifdef BCE_DEBUG
93 
94 static uint32_t	bce_debug = BCE_WARN;
95 
96 /*
97  *          0 = Never
98  *          1 = 1 in 2,147,483,648
99  *        256 = 1 in     8,388,608
100  *       2048 = 1 in     1,048,576
101  *      65536 = 1 in        32,768
102  *    1048576 = 1 in         2,048
103  *  268435456 = 1 in             8
104  *  536870912 = 1 in             4
105  * 1073741824 = 1 in             2
106  *
107  * bce_debug_l2fhdr_status_check:
108  *     How often the l2_fhdr frame error check will fail.
109  *
110  * bce_debug_unexpected_attention:
111  *     How often the unexpected attention check will fail.
112  *
113  * bce_debug_mbuf_allocation_failure:
114  *     How often to simulate an mbuf allocation failure.
115  *
116  * bce_debug_dma_map_addr_failure:
117  *     How often to simulate a DMA mapping failure.
118  *
119  * bce_debug_bootcode_running_failure:
120  *     How often to simulate a bootcode failure.
121  */
122 static int	bce_debug_l2fhdr_status_check = 0;
123 static int	bce_debug_unexpected_attention = 0;
124 static int	bce_debug_mbuf_allocation_failure = 0;
125 static int	bce_debug_dma_map_addr_failure = 0;
126 static int	bce_debug_bootcode_running_failure = 0;
127 
128 #endif	/* BCE_DEBUG */
129 
130 
131 /****************************************************************************/
132 /* PCI Device ID Table                                                      */
133 /*                                                                          */
134 /* Used by bce_probe() to identify the devices supported by this driver.    */
135 /****************************************************************************/
136 #define BCE_DEVDESC_MAX		64
137 
138 static struct bce_type bce_devs[] = {
139 	/* BCM5706C Controllers and OEM boards. */
140 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3101,
141 		"HP NC370T Multifunction Gigabit Server Adapter" },
142 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3106,
143 		"HP NC370i Multifunction Gigabit Server Adapter" },
144 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  PCI_ANY_ID,  PCI_ANY_ID,
145 		"Broadcom NetXtreme II BCM5706 1000Base-T" },
146 
147 	/* BCM5706S controllers and OEM boards. */
148 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102,
149 		"HP NC370F Multifunction Gigabit Server Adapter" },
150 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID,  PCI_ANY_ID,
151 		"Broadcom NetXtreme II BCM5706 1000Base-SX" },
152 
153 	/* BCM5708C controllers and OEM boards. */
154 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  PCI_ANY_ID,  PCI_ANY_ID,
155 		"Broadcom NetXtreme II BCM5708 1000Base-T" },
156 
157 	/* BCM5708S controllers and OEM boards. */
158 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  PCI_ANY_ID,  PCI_ANY_ID,
159 		"Broadcom NetXtreme II BCM5708S 1000Base-T" },
160 	{ 0, 0, 0, 0, NULL }
161 };
162 
163 
164 /****************************************************************************/
165 /* Supported Flash NVRAM device data.                                       */
166 /****************************************************************************/
167 static const struct flash_spec flash_table[] =
168 {
169 	/* Slow EEPROM */
170 	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
171 	 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
172 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
173 	 "EEPROM - slow"},
174 	/* Expansion entry 0001 */
175 	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
176 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
177 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
178 	 "Entry 0001"},
179 	/* Saifun SA25F010 (non-buffered flash) */
180 	/* strap, cfg1, & write1 need updates */
181 	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
182 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
183 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
184 	 "Non-buffered flash (128kB)"},
185 	/* Saifun SA25F020 (non-buffered flash) */
186 	/* strap, cfg1, & write1 need updates */
187 	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
188 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
190 	 "Non-buffered flash (256kB)"},
191 	/* Expansion entry 0100 */
192 	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
193 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
195 	 "Entry 0100"},
196 	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
197 	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
198 	 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
199 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
200 	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
201 	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
202 	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
203 	 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
204 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
205 	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
206 	/* Saifun SA25F005 (non-buffered flash) */
207 	/* strap, cfg1, & write1 need updates */
208 	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
209 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
210 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
211 	 "Non-buffered flash (64kB)"},
212 	/* Fast EEPROM */
213 	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
214 	 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
215 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
216 	 "EEPROM - fast"},
217 	/* Expansion entry 1001 */
218 	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
219 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
220 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
221 	 "Entry 1001"},
222 	/* Expansion entry 1010 */
223 	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
224 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
225 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
226 	 "Entry 1010"},
227 	/* ATMEL AT45DB011B (buffered flash) */
228 	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
229 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
230 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
231 	 "Buffered flash (128kB)"},
232 	/* Expansion entry 1100 */
233 	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
234 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
235 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
236 	 "Entry 1100"},
237 	/* Expansion entry 1101 */
238 	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
239 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
240 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
241 	 "Entry 1101"},
242 	/* Ateml Expansion entry 1110 */
243 	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
244 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
245 	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
246 	 "Entry 1110 (Atmel)"},
247 	/* ATMEL AT45DB021B (buffered flash) */
248 	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
249 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
250 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
251 	 "Buffered flash (256kB)"},
252 };
253 
254 
255 /****************************************************************************/
256 /* DragonFly device entry points.                                           */
257 /****************************************************************************/
258 static int	bce_probe(device_t);
259 static int	bce_attach(device_t);
260 static int	bce_detach(device_t);
261 static void	bce_shutdown(device_t);
262 
263 /****************************************************************************/
264 /* BCE Debug Data Structure Dump Routines                                   */
265 /****************************************************************************/
266 #ifdef BCE_DEBUG
267 static void	bce_dump_mbuf(struct bce_softc *, struct mbuf *);
268 static void	bce_dump_tx_mbuf_chain(struct bce_softc *, int, int);
269 static void	bce_dump_rx_mbuf_chain(struct bce_softc *, int, int);
270 static void	bce_dump_txbd(struct bce_softc *, int, struct tx_bd *);
271 static void	bce_dump_rxbd(struct bce_softc *, int, struct rx_bd *);
272 static void	bce_dump_l2fhdr(struct bce_softc *, int,
273 				struct l2_fhdr *) __unused;
274 static void	bce_dump_tx_chain(struct bce_softc *, int, int);
275 static void	bce_dump_rx_chain(struct bce_softc *, int, int);
276 static void	bce_dump_status_block(struct bce_softc *);
277 static void	bce_dump_driver_state(struct bce_softc *);
278 static void	bce_dump_stats_block(struct bce_softc *) __unused;
279 static void	bce_dump_hw_state(struct bce_softc *);
280 static void	bce_dump_txp_state(struct bce_softc *);
281 static void	bce_dump_rxp_state(struct bce_softc *) __unused;
282 static void	bce_dump_tpat_state(struct bce_softc *) __unused;
283 static void	bce_freeze_controller(struct bce_softc *) __unused;
284 static void	bce_unfreeze_controller(struct bce_softc *) __unused;
285 static void	bce_breakpoint(struct bce_softc *);
286 #endif	/* BCE_DEBUG */
287 
288 
289 /****************************************************************************/
290 /* BCE Register/Memory Access Routines                                      */
291 /****************************************************************************/
292 static uint32_t	bce_reg_rd_ind(struct bce_softc *, uint32_t);
293 static void	bce_reg_wr_ind(struct bce_softc *, uint32_t, uint32_t);
294 static void	bce_ctx_wr(struct bce_softc *, uint32_t, uint32_t, uint32_t);
295 static int	bce_miibus_read_reg(device_t, int, int);
296 static int	bce_miibus_write_reg(device_t, int, int, int);
297 static void	bce_miibus_statchg(device_t);
298 
299 
300 /****************************************************************************/
301 /* BCE NVRAM Access Routines                                                */
302 /****************************************************************************/
303 static int	bce_acquire_nvram_lock(struct bce_softc *);
304 static int	bce_release_nvram_lock(struct bce_softc *);
305 static void	bce_enable_nvram_access(struct bce_softc *);
306 static void	bce_disable_nvram_access(struct bce_softc *);
307 static int	bce_nvram_read_dword(struct bce_softc *, uint32_t, uint8_t *,
308 				     uint32_t);
309 static int	bce_init_nvram(struct bce_softc *);
310 static int	bce_nvram_read(struct bce_softc *, uint32_t, uint8_t *, int);
311 static int	bce_nvram_test(struct bce_softc *);
312 #ifdef BCE_NVRAM_WRITE_SUPPORT
313 static int	bce_enable_nvram_write(struct bce_softc *);
314 static void	bce_disable_nvram_write(struct bce_softc *);
315 static int	bce_nvram_erase_page(struct bce_softc *, uint32_t);
316 static int	bce_nvram_write_dword(struct bce_softc *, uint32_t, uint8_t *,
317 				      uint32_t);
318 static int	bce_nvram_write(struct bce_softc *, uint32_t, uint8_t *,
319 				int) __unused;
320 #endif
321 
322 /****************************************************************************/
323 /* BCE DMA Allocate/Free Routines                                           */
324 /****************************************************************************/
325 static int	bce_dma_alloc(struct bce_softc *);
326 static void	bce_dma_free(struct bce_softc *);
327 static void	bce_dma_map_addr(void *, bus_dma_segment_t *, int, int);
328 static void	bce_dma_map_mbuf(void *, bus_dma_segment_t *, int,
329 				 bus_size_t, int);
330 
331 /****************************************************************************/
332 /* BCE Firmware Synchronization and Load                                    */
333 /****************************************************************************/
334 static int	bce_fw_sync(struct bce_softc *, uint32_t);
335 static void	bce_load_rv2p_fw(struct bce_softc *, uint32_t *,
336 				 uint32_t, uint32_t);
337 static void	bce_load_cpu_fw(struct bce_softc *, struct cpu_reg *,
338 				struct fw_info *);
339 static void	bce_init_cpus(struct bce_softc *);
340 
341 static void	bce_stop(struct bce_softc *);
342 static int	bce_reset(struct bce_softc *, uint32_t);
343 static int	bce_chipinit(struct bce_softc *);
344 static int	bce_blockinit(struct bce_softc *);
345 static int	bce_newbuf_std(struct bce_softc *, struct mbuf *,
346 			       uint16_t *, uint16_t *, uint32_t *);
347 
348 static int	bce_init_tx_chain(struct bce_softc *);
349 static int	bce_init_rx_chain(struct bce_softc *);
350 static void	bce_free_rx_chain(struct bce_softc *);
351 static void	bce_free_tx_chain(struct bce_softc *);
352 
353 static int	bce_encap(struct bce_softc *, struct mbuf **);
354 static void	bce_start(struct ifnet *);
355 static int	bce_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
356 static void	bce_watchdog(struct ifnet *);
357 static int	bce_ifmedia_upd(struct ifnet *);
358 static void	bce_ifmedia_sts(struct ifnet *, struct ifmediareq *);
359 static void	bce_init(void *);
360 static void	bce_mgmt_init(struct bce_softc *);
361 
362 static void	bce_init_ctx(struct bce_softc *);
363 static void	bce_get_mac_addr(struct bce_softc *);
364 static void	bce_set_mac_addr(struct bce_softc *);
365 static void	bce_phy_intr(struct bce_softc *);
366 static void	bce_rx_intr(struct bce_softc *, int);
367 static void	bce_tx_intr(struct bce_softc *);
368 static void	bce_disable_intr(struct bce_softc *);
369 static void	bce_enable_intr(struct bce_softc *);
370 
371 #ifdef DEVICE_POLLING
372 static void	bce_poll(struct ifnet *, enum poll_cmd, int);
373 #endif
374 static void	bce_intr(void *);
375 static void	bce_set_rx_mode(struct bce_softc *);
376 static void	bce_stats_update(struct bce_softc *);
377 static void	bce_tick(void *);
378 static void	bce_tick_serialized(struct bce_softc *);
379 static void	bce_add_sysctls(struct bce_softc *);
380 
381 static void	bce_coal_change(struct bce_softc *);
382 static int	bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS);
383 static int	bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS);
384 static int	bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS);
385 static int	bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS);
386 static int	bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS);
387 static int	bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS);
388 static int	bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS);
389 static int	bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS);
390 static int	bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS,
391 				       uint32_t *, uint32_t);
392 
393 static uint32_t	bce_tx_bds_int = 20;	/* bcm: 20 */
394 static uint32_t	bce_tx_bds = 24;	/* bcm: 20 */
395 static uint32_t	bce_tx_ticks_int = 80;	/* bcm: 80 */
396 static uint32_t	bce_tx_ticks = 1000;	/* bcm: 80 */
397 static uint32_t	bce_rx_bds_int = 6;	/* bcm: 6 */
398 static uint32_t	bce_rx_bds = 24;	/* bcm: 6 */
399 static uint32_t	bce_rx_ticks_int = 18;	/* bcm: 18 */
400 static uint32_t	bce_rx_ticks = 100;	/* bcm: 18 */
401 
402 TUNABLE_INT("hw.bce.tx_bds_int", &bce_tx_bds_int);
403 TUNABLE_INT("hw.bce.tx_bds", &bce_tx_bds);
404 TUNABLE_INT("hw.bce.tx_ticks_int", &bce_tx_ticks_int);
405 TUNABLE_INT("hw.bce.tx_ticks", &bce_tx_ticks);
406 TUNABLE_INT("hw.bce.rx_bds_int", &bce_rx_bds_int);
407 TUNABLE_INT("hw.bce.rx_bds", &bce_rx_bds);
408 TUNABLE_INT("hw.bce.rx_ticks_int", &bce_rx_ticks_int);
409 TUNABLE_INT("hw.bce.rx_ticks", &bce_rx_ticks);
410 
411 /****************************************************************************/
412 /* DragonFly device dispatch table.                                         */
413 /****************************************************************************/
414 static device_method_t bce_methods[] = {
415 	/* Device interface */
416 	DEVMETHOD(device_probe,		bce_probe),
417 	DEVMETHOD(device_attach,	bce_attach),
418 	DEVMETHOD(device_detach,	bce_detach),
419 	DEVMETHOD(device_shutdown,	bce_shutdown),
420 
421 	/* bus interface */
422 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
423 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
424 
425 	/* MII interface */
426 	DEVMETHOD(miibus_readreg,	bce_miibus_read_reg),
427 	DEVMETHOD(miibus_writereg,	bce_miibus_write_reg),
428 	DEVMETHOD(miibus_statchg,	bce_miibus_statchg),
429 
430 	{ 0, 0 }
431 };
432 
433 static driver_t bce_driver = {
434 	"bce",
435 	bce_methods,
436 	sizeof(struct bce_softc)
437 };
438 
439 static devclass_t bce_devclass;
440 
441 MODULE_DEPEND(bce, pci, 1, 1, 1);
442 MODULE_DEPEND(bce, ether, 1, 1, 1);
443 MODULE_DEPEND(bce, miibus, 1, 1, 1);
444 
445 DRIVER_MODULE(bce, pci, bce_driver, bce_devclass, 0, 0);
446 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, 0, 0);
447 
448 
449 /****************************************************************************/
450 /* Device probe function.                                                   */
451 /*                                                                          */
452 /* Compares the device to the driver's list of supported devices and        */
453 /* reports back to the OS whether this is the right driver for the device.  */
454 /*                                                                          */
455 /* Returns:                                                                 */
456 /*   BUS_PROBE_DEFAULT on success, positive value on failure.               */
457 /****************************************************************************/
458 static int
459 bce_probe(device_t dev)
460 {
461 	struct bce_type *t;
462 	uint16_t vid, did, svid, sdid;
463 
464 	/* Get the data for the device to be probed. */
465 	vid  = pci_get_vendor(dev);
466 	did  = pci_get_device(dev);
467 	svid = pci_get_subvendor(dev);
468 	sdid = pci_get_subdevice(dev);
469 
470 	/* Look through the list of known devices for a match. */
471 	for (t = bce_devs; t->bce_name != NULL; ++t) {
472 		if (vid == t->bce_vid && did == t->bce_did &&
473 		    (svid == t->bce_svid || t->bce_svid == PCI_ANY_ID) &&
474 		    (sdid == t->bce_sdid || t->bce_sdid == PCI_ANY_ID)) {
475 		    	uint32_t revid = pci_read_config(dev, PCIR_REVID, 4);
476 			char *descbuf;
477 
478 			descbuf = kmalloc(BCE_DEVDESC_MAX, M_TEMP, M_WAITOK);
479 
480 			/* Print out the device identity. */
481 			ksnprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)",
482 				  t->bce_name,
483 				  ((revid & 0xf0) >> 4) + 'A', revid & 0xf);
484 
485 			device_set_desc_copy(dev, descbuf);
486 			kfree(descbuf, M_TEMP);
487 			return 0;
488 		}
489 	}
490 	return ENXIO;
491 }
492 
493 
494 /****************************************************************************/
495 /* Device attach function.                                                  */
496 /*                                                                          */
497 /* Allocates device resources, performs secondary chip identification,      */
498 /* resets and initializes the hardware, and initializes driver instance     */
499 /* variables.                                                               */
500 /*                                                                          */
501 /* Returns:                                                                 */
502 /*   0 on success, positive value on failure.                               */
503 /****************************************************************************/
504 static int
505 bce_attach(device_t dev)
506 {
507 	struct bce_softc *sc = device_get_softc(dev);
508 	struct ifnet *ifp = &sc->arpcom.ac_if;
509 	uint32_t val;
510 	int rid, rc = 0;
511 #ifdef notyet
512 	int count;
513 #endif
514 
515 	sc->bce_dev = dev;
516 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
517 
518 	pci_enable_busmaster(dev);
519 
520 	/* Allocate PCI memory resources. */
521 	rid = PCIR_BAR(0);
522 	sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
523 						 RF_ACTIVE | PCI_RF_DENSE);
524 	if (sc->bce_res_mem == NULL) {
525 		device_printf(dev, "PCI memory allocation failed\n");
526 		return ENXIO;
527 	}
528 	sc->bce_btag = rman_get_bustag(sc->bce_res_mem);
529 	sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem);
530 
531 	/* Allocate PCI IRQ resources. */
532 #ifdef notyet
533 	count = pci_msi_count(dev);
534 	if (count == 1 && pci_alloc_msi(dev, &count) == 0) {
535 		rid = 1;
536 		sc->bce_flags |= BCE_USING_MSI_FLAG;
537 	} else
538 #endif
539 	rid = 0;
540 	sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
541 						 RF_SHAREABLE | RF_ACTIVE);
542 	if (sc->bce_res_irq == NULL) {
543 		device_printf(dev, "PCI map interrupt failed\n");
544 		rc = ENXIO;
545 		goto fail;
546 	}
547 
548 	/*
549 	 * Configure byte swap and enable indirect register access.
550 	 * Rely on CPU to do target byte swapping on big endian systems.
551 	 * Access to registers outside of PCI configurtion space are not
552 	 * valid until this is done.
553 	 */
554 	pci_write_config(dev, BCE_PCICFG_MISC_CONFIG,
555 			 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
556 			 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4);
557 
558 	/* Save ASIC revsion info. */
559 	sc->bce_chipid =  REG_RD(sc, BCE_MISC_ID);
560 
561 	/* Weed out any non-production controller revisions. */
562 	switch(BCE_CHIP_ID(sc)) {
563 	case BCE_CHIP_ID_5706_A0:
564 	case BCE_CHIP_ID_5706_A1:
565 	case BCE_CHIP_ID_5708_A0:
566 	case BCE_CHIP_ID_5708_B0:
567 		device_printf(dev, "Unsupported chip id 0x%08x!\n",
568 			      BCE_CHIP_ID(sc));
569 		rc = ENODEV;
570 		goto fail;
571 	}
572 
573 	/*
574 	 * The embedded PCIe to PCI-X bridge (EPB)
575 	 * in the 5708 cannot address memory above
576 	 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043).
577 	 */
578 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)
579 		sc->max_bus_addr = BCE_BUS_SPACE_MAXADDR;
580 	else
581 		sc->max_bus_addr = BUS_SPACE_MAXADDR;
582 
583 	/*
584 	 * Find the base address for shared memory access.
585 	 * Newer versions of bootcode use a signature and offset
586 	 * while older versions use a fixed address.
587 	 */
588 	val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE);
589 	if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == BCE_SHM_HDR_SIGNATURE_SIG)
590 		sc->bce_shmem_base = REG_RD_IND(sc, BCE_SHM_HDR_ADDR_0);
591 	else
592 		sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE;
593 
594 	DBPRINT(sc, BCE_INFO, "bce_shmem_base = 0x%08X\n", sc->bce_shmem_base);
595 
596 	/* Get PCI bus information (speed and type). */
597 	val = REG_RD(sc, BCE_PCICFG_MISC_STATUS);
598 	if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) {
599 		uint32_t clkreg;
600 
601 		sc->bce_flags |= BCE_PCIX_FLAG;
602 
603 		clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS) &
604 			 BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
605 		switch (clkreg) {
606 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
607 			sc->bus_speed_mhz = 133;
608 			break;
609 
610 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
611 			sc->bus_speed_mhz = 100;
612 			break;
613 
614 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
615 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
616 			sc->bus_speed_mhz = 66;
617 			break;
618 
619 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
620 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
621 			sc->bus_speed_mhz = 50;
622 			break;
623 
624 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
625 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
626 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
627 			sc->bus_speed_mhz = 33;
628 			break;
629 		}
630 	} else {
631 		if (val & BCE_PCICFG_MISC_STATUS_M66EN)
632 			sc->bus_speed_mhz = 66;
633 		else
634 			sc->bus_speed_mhz = 33;
635 	}
636 
637 	if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET)
638 		sc->bce_flags |= BCE_PCI_32BIT_FLAG;
639 
640 	device_printf(dev, "ASIC ID 0x%08X; Revision (%c%d); PCI%s %s %dMHz\n",
641 		      sc->bce_chipid,
642 		      ((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A',
643 		      (BCE_CHIP_ID(sc) & 0x0ff0) >> 4,
644 		      (sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : "",
645 		      (sc->bce_flags & BCE_PCI_32BIT_FLAG) ?
646 		      "32-bit" : "64-bit", sc->bus_speed_mhz);
647 
648 	/* Reset the controller. */
649 	rc = bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
650 	if (rc != 0)
651 		goto fail;
652 
653 	/* Initialize the controller. */
654 	rc = bce_chipinit(sc);
655 	if (rc != 0) {
656 		device_printf(dev, "Controller initialization failed!\n");
657 		goto fail;
658 	}
659 
660 	/* Perform NVRAM test. */
661 	rc = bce_nvram_test(sc);
662 	if (rc != 0) {
663 		device_printf(dev, "NVRAM test failed!\n");
664 		goto fail;
665 	}
666 
667 	/* Fetch the permanent Ethernet MAC address. */
668 	bce_get_mac_addr(sc);
669 
670 	/*
671 	 * Trip points control how many BDs
672 	 * should be ready before generating an
673 	 * interrupt while ticks control how long
674 	 * a BD can sit in the chain before
675 	 * generating an interrupt.  Set the default
676 	 * values for the RX and TX rings.
677 	 */
678 
679 #ifdef BCE_DRBUG
680 	/* Force more frequent interrupts. */
681 	sc->bce_tx_quick_cons_trip_int = 1;
682 	sc->bce_tx_quick_cons_trip     = 1;
683 	sc->bce_tx_ticks_int           = 0;
684 	sc->bce_tx_ticks               = 0;
685 
686 	sc->bce_rx_quick_cons_trip_int = 1;
687 	sc->bce_rx_quick_cons_trip     = 1;
688 	sc->bce_rx_ticks_int           = 0;
689 	sc->bce_rx_ticks               = 0;
690 #else
691 	sc->bce_tx_quick_cons_trip_int = bce_tx_bds_int;
692 	sc->bce_tx_quick_cons_trip     = bce_tx_bds;
693 	sc->bce_tx_ticks_int           = bce_tx_ticks_int;
694 	sc->bce_tx_ticks               = bce_tx_ticks;
695 
696 	sc->bce_rx_quick_cons_trip_int = bce_rx_bds_int;
697 	sc->bce_rx_quick_cons_trip     = bce_rx_bds;
698 	sc->bce_rx_ticks_int           = bce_rx_ticks_int;
699 	sc->bce_rx_ticks               = bce_rx_ticks;
700 #endif
701 
702 	/* Update statistics once every second. */
703 	sc->bce_stats_ticks = 1000000 & 0xffff00;
704 
705 	/*
706 	 * The copper based NetXtreme II controllers
707 	 * use an integrated PHY at address 1 while
708 	 * the SerDes controllers use a PHY at
709 	 * address 2.
710 	 */
711 	sc->bce_phy_addr = 1;
712 
713 	if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) {
714 		sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
715 		sc->bce_flags |= BCE_NO_WOL_FLAG;
716 		if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708) {
717 			sc->bce_phy_addr = 2;
718 			val = REG_RD_IND(sc, sc->bce_shmem_base +
719 					 BCE_SHARED_HW_CFG_CONFIG);
720 			if (val & BCE_SHARED_HW_CFG_PHY_2_5G)
721 				sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG;
722 		}
723 	}
724 
725 	/* Allocate DMA memory resources. */
726 	rc = bce_dma_alloc(sc);
727 	if (rc != 0) {
728 		device_printf(dev, "DMA resource allocation failed!\n");
729 		goto fail;
730 	}
731 
732 	/* Initialize the ifnet interface. */
733 	ifp->if_softc = sc;
734 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
735 	ifp->if_ioctl = bce_ioctl;
736 	ifp->if_start = bce_start;
737 	ifp->if_init = bce_init;
738 	ifp->if_watchdog = bce_watchdog;
739 #ifdef DEVICE_POLLING
740 	ifp->if_poll = bce_poll;
741 #endif
742 	ifp->if_mtu = ETHERMTU;
743 	ifp->if_hwassist = BCE_IF_HWASSIST;
744 	ifp->if_capabilities = BCE_IF_CAPABILITIES;
745 	ifp->if_capenable = ifp->if_capabilities;
746 	ifq_set_maxlen(&ifp->if_snd, USABLE_TX_BD);
747 	ifq_set_ready(&ifp->if_snd);
748 
749 	if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
750 		ifp->if_baudrate = IF_Gbps(2.5);
751 	else
752 		ifp->if_baudrate = IF_Gbps(1);
753 
754 	/* Assume a standard 1500 byte MTU size for mbuf allocations. */
755 	sc->mbuf_alloc_size  = MCLBYTES;
756 
757 	/* Look for our PHY. */
758 	rc = mii_phy_probe(dev, &sc->bce_miibus,
759 			   bce_ifmedia_upd, bce_ifmedia_sts);
760 	if (rc != 0) {
761 		device_printf(dev, "PHY probe failed!\n");
762 		goto fail;
763 	}
764 
765 	/* Attach to the Ethernet interface list. */
766 	ether_ifattach(ifp, sc->eaddr, NULL);
767 
768 	callout_init(&sc->bce_stat_ch);
769 
770 	/* Hookup IRQ last. */
771 	rc = bus_setup_intr(dev, sc->bce_res_irq, INTR_MPSAFE, bce_intr, sc,
772 			    &sc->bce_intrhand, ifp->if_serializer);
773 	if (rc != 0) {
774 		device_printf(dev, "Failed to setup IRQ!\n");
775 		ether_ifdetach(ifp);
776 		goto fail;
777 	}
778 
779 	ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->bce_res_irq));
780 	KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
781 
782 	/* Print some important debugging info. */
783 	DBRUN(BCE_INFO, bce_dump_driver_state(sc));
784 
785 	/* Add the supported sysctls to the kernel. */
786 	bce_add_sysctls(sc);
787 
788 	/* Get the firmware running so IPMI still works */
789 	bce_mgmt_init(sc);
790 
791 	return 0;
792 fail:
793 	bce_detach(dev);
794 	return(rc);
795 }
796 
797 
798 /****************************************************************************/
799 /* Device detach function.                                                  */
800 /*                                                                          */
801 /* Stops the controller, resets the controller, and releases resources.     */
802 /*                                                                          */
803 /* Returns:                                                                 */
804 /*   0 on success, positive value on failure.                               */
805 /****************************************************************************/
806 static int
807 bce_detach(device_t dev)
808 {
809 	struct bce_softc *sc = device_get_softc(dev);
810 
811 	if (device_is_attached(dev)) {
812 		struct ifnet *ifp = &sc->arpcom.ac_if;
813 
814 		/* Stop and reset the controller. */
815 		lwkt_serialize_enter(ifp->if_serializer);
816 		bce_stop(sc);
817 		bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
818 		bus_teardown_intr(dev, sc->bce_res_irq, sc->bce_intrhand);
819 		lwkt_serialize_exit(ifp->if_serializer);
820 
821 		ether_ifdetach(ifp);
822 	}
823 
824 	/* If we have a child device on the MII bus remove it too. */
825 	if (sc->bce_miibus)
826 		device_delete_child(dev, sc->bce_miibus);
827 	bus_generic_detach(dev);
828 
829 	if (sc->bce_res_irq != NULL) {
830 		bus_release_resource(dev, SYS_RES_IRQ,
831 			sc->bce_flags & BCE_USING_MSI_FLAG ? 1 : 0,
832 			sc->bce_res_irq);
833 	}
834 
835 #ifdef notyet
836 	if (sc->bce_flags & BCE_USING_MSI_FLAG)
837 		pci_release_msi(dev);
838 #endif
839 
840 	if (sc->bce_res_mem != NULL) {
841 		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
842 				     sc->bce_res_mem);
843 	}
844 
845 	bce_dma_free(sc);
846 
847 	if (sc->bce_sysctl_tree != NULL)
848 		sysctl_ctx_free(&sc->bce_sysctl_ctx);
849 
850 	return 0;
851 }
852 
853 
854 /****************************************************************************/
855 /* Device shutdown function.                                                */
856 /*                                                                          */
857 /* Stops and resets the controller.                                         */
858 /*                                                                          */
859 /* Returns:                                                                 */
860 /*   Nothing                                                                */
861 /****************************************************************************/
862 static void
863 bce_shutdown(device_t dev)
864 {
865 	struct bce_softc *sc = device_get_softc(dev);
866 	struct ifnet *ifp = &sc->arpcom.ac_if;
867 
868 	lwkt_serialize_enter(ifp->if_serializer);
869 	bce_stop(sc);
870 	bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
871 	lwkt_serialize_exit(ifp->if_serializer);
872 }
873 
874 
875 /****************************************************************************/
876 /* Indirect register read.                                                  */
877 /*                                                                          */
878 /* Reads NetXtreme II registers using an index/data register pair in PCI    */
879 /* configuration space.  Using this mechanism avoids issues with posted     */
880 /* reads but is much slower than memory-mapped I/O.                         */
881 /*                                                                          */
882 /* Returns:                                                                 */
883 /*   The value of the register.                                             */
884 /****************************************************************************/
885 static uint32_t
886 bce_reg_rd_ind(struct bce_softc *sc, uint32_t offset)
887 {
888 	device_t dev = sc->bce_dev;
889 
890 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
891 #ifdef BCE_DEBUG
892 	{
893 		uint32_t val;
894 		val = pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
895 		DBPRINT(sc, BCE_EXCESSIVE,
896 			"%s(); offset = 0x%08X, val = 0x%08X\n",
897 			__func__, offset, val);
898 		return val;
899 	}
900 #else
901 	return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
902 #endif
903 }
904 
905 
906 /****************************************************************************/
907 /* Indirect register write.                                                 */
908 /*                                                                          */
909 /* Writes NetXtreme II registers using an index/data register pair in PCI   */
910 /* configuration space.  Using this mechanism avoids issues with posted     */
911 /* writes but is muchh slower than memory-mapped I/O.                       */
912 /*                                                                          */
913 /* Returns:                                                                 */
914 /*   Nothing.                                                               */
915 /****************************************************************************/
916 static void
917 bce_reg_wr_ind(struct bce_softc *sc, uint32_t offset, uint32_t val)
918 {
919 	device_t dev = sc->bce_dev;
920 
921 	DBPRINT(sc, BCE_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
922 		__func__, offset, val);
923 
924 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
925 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4);
926 }
927 
928 
929 /****************************************************************************/
930 /* Context memory write.                                                    */
931 /*                                                                          */
932 /* The NetXtreme II controller uses context memory to track connection      */
933 /* information for L2 and higher network protocols.                         */
934 /*                                                                          */
935 /* Returns:                                                                 */
936 /*   Nothing.                                                               */
937 /****************************************************************************/
938 static void
939 bce_ctx_wr(struct bce_softc *sc, uint32_t cid_addr, uint32_t offset,
940 	   uint32_t val)
941 {
942 	DBPRINT(sc, BCE_EXCESSIVE, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
943 		"val = 0x%08X\n", __func__, cid_addr, offset, val);
944 
945 	offset += cid_addr;
946 	REG_WR(sc, BCE_CTX_DATA_ADR, offset);
947 	REG_WR(sc, BCE_CTX_DATA, val);
948 }
949 
950 
951 /****************************************************************************/
952 /* PHY register read.                                                       */
953 /*                                                                          */
954 /* Implements register reads on the MII bus.                                */
955 /*                                                                          */
956 /* Returns:                                                                 */
957 /*   The value of the register.                                             */
958 /****************************************************************************/
959 static int
960 bce_miibus_read_reg(device_t dev, int phy, int reg)
961 {
962 	struct bce_softc *sc = device_get_softc(dev);
963 	uint32_t val;
964 	int i;
965 
966 	/* Make sure we are accessing the correct PHY address. */
967 	if (phy != sc->bce_phy_addr) {
968 		DBPRINT(sc, BCE_VERBOSE,
969 			"Invalid PHY address %d for PHY read!\n", phy);
970 		return 0;
971 	}
972 
973 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
974 		val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
975 		val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
976 
977 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
978 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
979 
980 		DELAY(40);
981 	}
982 
983 	val = BCE_MIPHY(phy) | BCE_MIREG(reg) |
984 	      BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT |
985 	      BCE_EMAC_MDIO_COMM_START_BUSY;
986 	REG_WR(sc, BCE_EMAC_MDIO_COMM, val);
987 
988 	for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
989 		DELAY(10);
990 
991 		val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
992 		if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) {
993 			DELAY(5);
994 
995 			val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
996 			val &= BCE_EMAC_MDIO_COMM_DATA;
997 			break;
998 		}
999 	}
1000 
1001 	if (val & BCE_EMAC_MDIO_COMM_START_BUSY) {
1002 		if_printf(&sc->arpcom.ac_if,
1003 			  "Error: PHY read timeout! phy = %d, reg = 0x%04X\n",
1004 			  phy, reg);
1005 		val = 0x0;
1006 	} else {
1007 		val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1008 	}
1009 
1010 	DBPRINT(sc, BCE_EXCESSIVE,
1011 		"%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
1012 		__func__, phy, (uint16_t)reg & 0xffff, (uint16_t) val & 0xffff);
1013 
1014 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1015 		val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1016 		val |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1017 
1018 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1019 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1020 
1021 		DELAY(40);
1022 	}
1023 	return (val & 0xffff);
1024 }
1025 
1026 
1027 /****************************************************************************/
1028 /* PHY register write.                                                      */
1029 /*                                                                          */
1030 /* Implements register writes on the MII bus.                               */
1031 /*                                                                          */
1032 /* Returns:                                                                 */
1033 /*   The value of the register.                                             */
1034 /****************************************************************************/
1035 static int
1036 bce_miibus_write_reg(device_t dev, int phy, int reg, int val)
1037 {
1038 	struct bce_softc *sc = device_get_softc(dev);
1039 	uint32_t val1;
1040 	int i;
1041 
1042 	/* Make sure we are accessing the correct PHY address. */
1043 	if (phy != sc->bce_phy_addr) {
1044 		DBPRINT(sc, BCE_WARN,
1045 			"Invalid PHY address %d for PHY write!\n", phy);
1046 		return(0);
1047 	}
1048 
1049 	DBPRINT(sc, BCE_EXCESSIVE,
1050 		"%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
1051 		__func__, phy, (uint16_t)(reg & 0xffff),
1052 		(uint16_t)(val & 0xffff));
1053 
1054 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1055 		val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1056 		val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1057 
1058 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1059 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1060 
1061 		DELAY(40);
1062 	}
1063 
1064 	val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val |
1065 		BCE_EMAC_MDIO_COMM_COMMAND_WRITE |
1066 		BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT;
1067 	REG_WR(sc, BCE_EMAC_MDIO_COMM, val1);
1068 
1069 	for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1070 		DELAY(10);
1071 
1072 		val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1073 		if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1074 			DELAY(5);
1075 			break;
1076 		}
1077 	}
1078 
1079 	if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY)
1080 		if_printf(&sc->arpcom.ac_if, "PHY write timeout!\n");
1081 
1082 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1083 		val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1084 		val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1085 
1086 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1087 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1088 
1089 		DELAY(40);
1090 	}
1091 	return 0;
1092 }
1093 
1094 
1095 /****************************************************************************/
1096 /* MII bus status change.                                                   */
1097 /*                                                                          */
1098 /* Called by the MII bus driver when the PHY establishes link to set the    */
1099 /* MAC interface registers.                                                 */
1100 /*                                                                          */
1101 /* Returns:                                                                 */
1102 /*   Nothing.                                                               */
1103 /****************************************************************************/
1104 static void
1105 bce_miibus_statchg(device_t dev)
1106 {
1107 	struct bce_softc *sc = device_get_softc(dev);
1108 	struct mii_data *mii = device_get_softc(sc->bce_miibus);
1109 
1110 	DBPRINT(sc, BCE_INFO, "mii_media_active = 0x%08X\n",
1111 		mii->mii_media_active);
1112 
1113 #ifdef BCE_DEBUG
1114 	/* Decode the interface media flags. */
1115 	if_printf(&sc->arpcom.ac_if, "Media: ( ");
1116 	switch(IFM_TYPE(mii->mii_media_active)) {
1117 	case IFM_ETHER:
1118 		kprintf("Ethernet )");
1119 		break;
1120 	default:
1121 		kprintf("Unknown )");
1122 		break;
1123 	}
1124 
1125 	kprintf(" Media Options: ( ");
1126 	switch(IFM_SUBTYPE(mii->mii_media_active)) {
1127 	case IFM_AUTO:
1128 		kprintf("Autoselect )");
1129 		break;
1130 	case IFM_MANUAL:
1131 		kprintf("Manual )");
1132 		break;
1133 	case IFM_NONE:
1134 		kprintf("None )");
1135 		break;
1136 	case IFM_10_T:
1137 		kprintf("10Base-T )");
1138 		break;
1139 	case IFM_100_TX:
1140 		kprintf("100Base-TX )");
1141 		break;
1142 	case IFM_1000_SX:
1143 		kprintf("1000Base-SX )");
1144 		break;
1145 	case IFM_1000_T:
1146 		kprintf("1000Base-T )");
1147 		break;
1148 	default:
1149 		kprintf("Other )");
1150 		break;
1151 	}
1152 
1153 	kprintf(" Global Options: (");
1154 	if (mii->mii_media_active & IFM_FDX)
1155 		kprintf(" FullDuplex");
1156 	if (mii->mii_media_active & IFM_HDX)
1157 		kprintf(" HalfDuplex");
1158 	if (mii->mii_media_active & IFM_LOOP)
1159 		kprintf(" Loopback");
1160 	if (mii->mii_media_active & IFM_FLAG0)
1161 		kprintf(" Flag0");
1162 	if (mii->mii_media_active & IFM_FLAG1)
1163 		kprintf(" Flag1");
1164 	if (mii->mii_media_active & IFM_FLAG2)
1165 		kprintf(" Flag2");
1166 	kprintf(" )\n");
1167 #endif
1168 
1169 	BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT);
1170 
1171 	/*
1172 	 * Set MII or GMII interface based on the speed negotiated
1173 	 * by the PHY.
1174 	 */
1175 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
1176 	    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) {
1177 		DBPRINT(sc, BCE_INFO, "Setting GMII interface.\n");
1178 		BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_GMII);
1179 	} else {
1180 		DBPRINT(sc, BCE_INFO, "Setting MII interface.\n");
1181 		BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_MII);
1182 	}
1183 
1184 	/*
1185 	 * Set half or full duplex based on the duplicity negotiated
1186 	 * by the PHY.
1187 	 */
1188 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1189 		DBPRINT(sc, BCE_INFO, "Setting Full-Duplex interface.\n");
1190 		BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1191 	} else {
1192 		DBPRINT(sc, BCE_INFO, "Setting Half-Duplex interface.\n");
1193 		BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1194 	}
1195 }
1196 
1197 
1198 /****************************************************************************/
1199 /* Acquire NVRAM lock.                                                      */
1200 /*                                                                          */
1201 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock.  */
1202 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1203 /* for use by the driver.                                                   */
1204 /*                                                                          */
1205 /* Returns:                                                                 */
1206 /*   0 on success, positive value on failure.                               */
1207 /****************************************************************************/
1208 static int
1209 bce_acquire_nvram_lock(struct bce_softc *sc)
1210 {
1211 	uint32_t val;
1212 	int j;
1213 
1214 	DBPRINT(sc, BCE_VERBOSE, "Acquiring NVRAM lock.\n");
1215 
1216 	/* Request access to the flash interface. */
1217 	REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2);
1218 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1219 		val = REG_RD(sc, BCE_NVM_SW_ARB);
1220 		if (val & BCE_NVM_SW_ARB_ARB_ARB2)
1221 			break;
1222 
1223 		DELAY(5);
1224 	}
1225 
1226 	if (j >= NVRAM_TIMEOUT_COUNT) {
1227 		DBPRINT(sc, BCE_WARN, "Timeout acquiring NVRAM lock!\n");
1228 		return EBUSY;
1229 	}
1230 	return 0;
1231 }
1232 
1233 
1234 /****************************************************************************/
1235 /* Release NVRAM lock.                                                      */
1236 /*                                                                          */
1237 /* When the caller is finished accessing NVRAM the lock must be released.   */
1238 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1239 /* for use by the driver.                                                   */
1240 /*                                                                          */
1241 /* Returns:                                                                 */
1242 /*   0 on success, positive value on failure.                               */
1243 /****************************************************************************/
1244 static int
1245 bce_release_nvram_lock(struct bce_softc *sc)
1246 {
1247 	int j;
1248 	uint32_t val;
1249 
1250 	DBPRINT(sc, BCE_VERBOSE, "Releasing NVRAM lock.\n");
1251 
1252 	/*
1253 	 * Relinquish nvram interface.
1254 	 */
1255 	REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2);
1256 
1257 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1258 		val = REG_RD(sc, BCE_NVM_SW_ARB);
1259 		if (!(val & BCE_NVM_SW_ARB_ARB_ARB2))
1260 			break;
1261 
1262 		DELAY(5);
1263 	}
1264 
1265 	if (j >= NVRAM_TIMEOUT_COUNT) {
1266 		DBPRINT(sc, BCE_WARN, "Timeout reeasing NVRAM lock!\n");
1267 		return EBUSY;
1268 	}
1269 	return 0;
1270 }
1271 
1272 
1273 #ifdef BCE_NVRAM_WRITE_SUPPORT
1274 /****************************************************************************/
1275 /* Enable NVRAM write access.                                               */
1276 /*                                                                          */
1277 /* Before writing to NVRAM the caller must enable NVRAM writes.             */
1278 /*                                                                          */
1279 /* Returns:                                                                 */
1280 /*   0 on success, positive value on failure.                               */
1281 /****************************************************************************/
1282 static int
1283 bce_enable_nvram_write(struct bce_softc *sc)
1284 {
1285 	uint32_t val;
1286 
1287 	DBPRINT(sc, BCE_VERBOSE, "Enabling NVRAM write.\n");
1288 
1289 	val = REG_RD(sc, BCE_MISC_CFG);
1290 	REG_WR(sc, BCE_MISC_CFG, val | BCE_MISC_CFG_NVM_WR_EN_PCI);
1291 
1292 	if (!sc->bce_flash_info->buffered) {
1293 		int j;
1294 
1295 		REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1296 		REG_WR(sc, BCE_NVM_COMMAND,
1297 		       BCE_NVM_COMMAND_WREN | BCE_NVM_COMMAND_DOIT);
1298 
1299 		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1300 			DELAY(5);
1301 
1302 			val = REG_RD(sc, BCE_NVM_COMMAND);
1303 			if (val & BCE_NVM_COMMAND_DONE)
1304 				break;
1305 		}
1306 
1307 		if (j >= NVRAM_TIMEOUT_COUNT) {
1308 			DBPRINT(sc, BCE_WARN, "Timeout writing NVRAM!\n");
1309 			return EBUSY;
1310 		}
1311 	}
1312 	return 0;
1313 }
1314 
1315 
1316 /****************************************************************************/
1317 /* Disable NVRAM write access.                                              */
1318 /*                                                                          */
1319 /* When the caller is finished writing to NVRAM write access must be        */
1320 /* disabled.                                                                */
1321 /*                                                                          */
1322 /* Returns:                                                                 */
1323 /*   Nothing.                                                               */
1324 /****************************************************************************/
1325 static void
1326 bce_disable_nvram_write(struct bce_softc *sc)
1327 {
1328 	uint32_t val;
1329 
1330 	DBPRINT(sc, BCE_VERBOSE, "Disabling NVRAM write.\n");
1331 
1332 	val = REG_RD(sc, BCE_MISC_CFG);
1333 	REG_WR(sc, BCE_MISC_CFG, val & ~BCE_MISC_CFG_NVM_WR_EN);
1334 }
1335 #endif	/* BCE_NVRAM_WRITE_SUPPORT */
1336 
1337 
1338 /****************************************************************************/
1339 /* Enable NVRAM access.                                                     */
1340 /*                                                                          */
1341 /* Before accessing NVRAM for read or write operations the caller must      */
1342 /* enabled NVRAM access.                                                    */
1343 /*                                                                          */
1344 /* Returns:                                                                 */
1345 /*   Nothing.                                                               */
1346 /****************************************************************************/
1347 static void
1348 bce_enable_nvram_access(struct bce_softc *sc)
1349 {
1350 	uint32_t val;
1351 
1352 	DBPRINT(sc, BCE_VERBOSE, "Enabling NVRAM access.\n");
1353 
1354 	val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1355 	/* Enable both bits, even on read. */
1356 	REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1357 	       val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN);
1358 }
1359 
1360 
1361 /****************************************************************************/
1362 /* Disable NVRAM access.                                                    */
1363 /*                                                                          */
1364 /* When the caller is finished accessing NVRAM access must be disabled.     */
1365 /*                                                                          */
1366 /* Returns:                                                                 */
1367 /*   Nothing.                                                               */
1368 /****************************************************************************/
1369 static void
1370 bce_disable_nvram_access(struct bce_softc *sc)
1371 {
1372 	uint32_t val;
1373 
1374 	DBPRINT(sc, BCE_VERBOSE, "Disabling NVRAM access.\n");
1375 
1376 	val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1377 
1378 	/* Disable both bits, even after read. */
1379 	REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1380 	       val & ~(BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN));
1381 }
1382 
1383 
1384 #ifdef BCE_NVRAM_WRITE_SUPPORT
1385 /****************************************************************************/
1386 /* Erase NVRAM page before writing.                                         */
1387 /*                                                                          */
1388 /* Non-buffered flash parts require that a page be erased before it is      */
1389 /* written.                                                                 */
1390 /*                                                                          */
1391 /* Returns:                                                                 */
1392 /*   0 on success, positive value on failure.                               */
1393 /****************************************************************************/
1394 static int
1395 bce_nvram_erase_page(struct bce_softc *sc, uint32_t offset)
1396 {
1397 	uint32_t cmd;
1398 	int j;
1399 
1400 	/* Buffered flash doesn't require an erase. */
1401 	if (sc->bce_flash_info->buffered)
1402 		return 0;
1403 
1404 	DBPRINT(sc, BCE_VERBOSE, "Erasing NVRAM page.\n");
1405 
1406 	/* Build an erase command. */
1407 	cmd = BCE_NVM_COMMAND_ERASE | BCE_NVM_COMMAND_WR |
1408 	      BCE_NVM_COMMAND_DOIT;
1409 
1410 	/*
1411 	 * Clear the DONE bit separately, set the NVRAM adress to erase,
1412 	 * and issue the erase command.
1413 	 */
1414 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1415 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1416 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1417 
1418 	/* Wait for completion. */
1419 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1420 		uint32_t val;
1421 
1422 		DELAY(5);
1423 
1424 		val = REG_RD(sc, BCE_NVM_COMMAND);
1425 		if (val & BCE_NVM_COMMAND_DONE)
1426 			break;
1427 	}
1428 
1429 	if (j >= NVRAM_TIMEOUT_COUNT) {
1430 		DBPRINT(sc, BCE_WARN, "Timeout erasing NVRAM.\n");
1431 		return EBUSY;
1432 	}
1433 	return 0;
1434 }
1435 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1436 
1437 
1438 /****************************************************************************/
1439 /* Read a dword (32 bits) from NVRAM.                                       */
1440 /*                                                                          */
1441 /* Read a 32 bit word from NVRAM.  The caller is assumed to have already    */
1442 /* obtained the NVRAM lock and enabled the controller for NVRAM access.     */
1443 /*                                                                          */
1444 /* Returns:                                                                 */
1445 /*   0 on success and the 32 bit value read, positive value on failure.     */
1446 /****************************************************************************/
1447 static int
1448 bce_nvram_read_dword(struct bce_softc *sc, uint32_t offset, uint8_t *ret_val,
1449 		     uint32_t cmd_flags)
1450 {
1451 	uint32_t cmd;
1452 	int i, rc = 0;
1453 
1454 	/* Build the command word. */
1455 	cmd = BCE_NVM_COMMAND_DOIT | cmd_flags;
1456 
1457 	/* Calculate the offset for buffered flash. */
1458 	if (sc->bce_flash_info->buffered) {
1459 		offset = ((offset / sc->bce_flash_info->page_size) <<
1460 			  sc->bce_flash_info->page_bits) +
1461 			 (offset % sc->bce_flash_info->page_size);
1462 	}
1463 
1464 	/*
1465 	 * Clear the DONE bit separately, set the address to read,
1466 	 * and issue the read.
1467 	 */
1468 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1469 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1470 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1471 
1472 	/* Wait for completion. */
1473 	for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1474 		uint32_t val;
1475 
1476 		DELAY(5);
1477 
1478 		val = REG_RD(sc, BCE_NVM_COMMAND);
1479 		if (val & BCE_NVM_COMMAND_DONE) {
1480 			val = REG_RD(sc, BCE_NVM_READ);
1481 
1482 			val = be32toh(val);
1483 			memcpy(ret_val, &val, 4);
1484 			break;
1485 		}
1486 	}
1487 
1488 	/* Check for errors. */
1489 	if (i >= NVRAM_TIMEOUT_COUNT) {
1490 		if_printf(&sc->arpcom.ac_if,
1491 			  "Timeout error reading NVRAM at offset 0x%08X!\n",
1492 			  offset);
1493 		rc = EBUSY;
1494 	}
1495 	return rc;
1496 }
1497 
1498 
1499 #ifdef BCE_NVRAM_WRITE_SUPPORT
1500 /****************************************************************************/
1501 /* Write a dword (32 bits) to NVRAM.                                        */
1502 /*                                                                          */
1503 /* Write a 32 bit word to NVRAM.  The caller is assumed to have already     */
1504 /* obtained the NVRAM lock, enabled the controller for NVRAM access, and    */
1505 /* enabled NVRAM write access.                                              */
1506 /*                                                                          */
1507 /* Returns:                                                                 */
1508 /*   0 on success, positive value on failure.                               */
1509 /****************************************************************************/
1510 static int
1511 bce_nvram_write_dword(struct bce_softc *sc, uint32_t offset, uint8_t *val,
1512 		      uint32_t cmd_flags)
1513 {
1514 	uint32_t cmd, val32;
1515 	int j;
1516 
1517 	/* Build the command word. */
1518 	cmd = BCE_NVM_COMMAND_DOIT | BCE_NVM_COMMAND_WR | cmd_flags;
1519 
1520 	/* Calculate the offset for buffered flash. */
1521 	if (sc->bce_flash_info->buffered) {
1522 		offset = ((offset / sc->bce_flash_info->page_size) <<
1523 			  sc->bce_flash_info->page_bits) +
1524 			 (offset % sc->bce_flash_info->page_size);
1525 	}
1526 
1527 	/*
1528 	 * Clear the DONE bit separately, convert NVRAM data to big-endian,
1529 	 * set the NVRAM address to write, and issue the write command
1530 	 */
1531 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1532 	memcpy(&val32, val, 4);
1533 	val32 = htobe32(val32);
1534 	REG_WR(sc, BCE_NVM_WRITE, val32);
1535 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1536 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1537 
1538 	/* Wait for completion. */
1539 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1540 		DELAY(5);
1541 
1542 		if (REG_RD(sc, BCE_NVM_COMMAND) & BCE_NVM_COMMAND_DONE)
1543 			break;
1544 	}
1545 	if (j >= NVRAM_TIMEOUT_COUNT) {
1546 		if_printf(&sc->arpcom.ac_if,
1547 			  "Timeout error writing NVRAM at offset 0x%08X\n",
1548 			  offset);
1549 		return EBUSY;
1550 	}
1551 	return 0;
1552 }
1553 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1554 
1555 
1556 /****************************************************************************/
1557 /* Initialize NVRAM access.                                                 */
1558 /*                                                                          */
1559 /* Identify the NVRAM device in use and prepare the NVRAM interface to      */
1560 /* access that device.                                                      */
1561 /*                                                                          */
1562 /* Returns:                                                                 */
1563 /*   0 on success, positive value on failure.                               */
1564 /****************************************************************************/
1565 static int
1566 bce_init_nvram(struct bce_softc *sc)
1567 {
1568 	uint32_t val;
1569 	int j, entry_count, rc = 0;
1570 	const struct flash_spec *flash;
1571 
1572 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__);
1573 
1574 	/* Determine the selected interface. */
1575 	val = REG_RD(sc, BCE_NVM_CFG1);
1576 
1577 	entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1578 
1579 	/*
1580 	 * Flash reconfiguration is required to support additional
1581 	 * NVRAM devices not directly supported in hardware.
1582 	 * Check if the flash interface was reconfigured
1583 	 * by the bootcode.
1584 	 */
1585 
1586 	if (val & 0x40000000) {
1587 		/* Flash interface reconfigured by bootcode. */
1588 
1589 		DBPRINT(sc, BCE_INFO_LOAD,
1590 			"%s(): Flash WAS reconfigured.\n", __func__);
1591 
1592 		for (j = 0, flash = flash_table; j < entry_count;
1593 		     j++, flash++) {
1594 			if ((val & FLASH_BACKUP_STRAP_MASK) ==
1595 			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1596 				sc->bce_flash_info = flash;
1597 				break;
1598 			}
1599 		}
1600 	} else {
1601 		/* Flash interface not yet reconfigured. */
1602 		uint32_t mask;
1603 
1604 		DBPRINT(sc, BCE_INFO_LOAD,
1605 			"%s(): Flash was NOT reconfigured.\n", __func__);
1606 
1607 		if (val & (1 << 23))
1608 			mask = FLASH_BACKUP_STRAP_MASK;
1609 		else
1610 			mask = FLASH_STRAP_MASK;
1611 
1612 		/* Look for the matching NVRAM device configuration data. */
1613 		for (j = 0, flash = flash_table; j < entry_count;
1614 		     j++, flash++) {
1615 			/* Check if the device matches any of the known devices. */
1616 			if ((val & mask) == (flash->strapping & mask)) {
1617 				/* Found a device match. */
1618 				sc->bce_flash_info = flash;
1619 
1620 				/* Request access to the flash interface. */
1621 				rc = bce_acquire_nvram_lock(sc);
1622 				if (rc != 0)
1623 					return rc;
1624 
1625 				/* Reconfigure the flash interface. */
1626 				bce_enable_nvram_access(sc);
1627 				REG_WR(sc, BCE_NVM_CFG1, flash->config1);
1628 				REG_WR(sc, BCE_NVM_CFG2, flash->config2);
1629 				REG_WR(sc, BCE_NVM_CFG3, flash->config3);
1630 				REG_WR(sc, BCE_NVM_WRITE1, flash->write1);
1631 				bce_disable_nvram_access(sc);
1632 				bce_release_nvram_lock(sc);
1633 				break;
1634 			}
1635 		}
1636 	}
1637 
1638 	/* Check if a matching device was found. */
1639 	if (j == entry_count) {
1640 		sc->bce_flash_info = NULL;
1641 		if_printf(&sc->arpcom.ac_if, "Unknown Flash NVRAM found!\n");
1642 		rc = ENODEV;
1643 	}
1644 
1645 	/* Write the flash config data to the shared memory interface. */
1646 	val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_SHARED_HW_CFG_CONFIG2) &
1647 	      BCE_SHARED_HW_CFG2_NVM_SIZE_MASK;
1648 	if (val)
1649 		sc->bce_flash_size = val;
1650 	else
1651 		sc->bce_flash_size = sc->bce_flash_info->total_size;
1652 
1653 	DBPRINT(sc, BCE_INFO_LOAD, "%s() flash->total_size = 0x%08X\n",
1654 		__func__, sc->bce_flash_info->total_size);
1655 
1656 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__);
1657 
1658 	return rc;
1659 }
1660 
1661 
1662 /****************************************************************************/
1663 /* Read an arbitrary range of data from NVRAM.                              */
1664 /*                                                                          */
1665 /* Prepares the NVRAM interface for access and reads the requested data     */
1666 /* into the supplied buffer.                                                */
1667 /*                                                                          */
1668 /* Returns:                                                                 */
1669 /*   0 on success and the data read, positive value on failure.             */
1670 /****************************************************************************/
1671 static int
1672 bce_nvram_read(struct bce_softc *sc, uint32_t offset, uint8_t *ret_buf,
1673 	       int buf_size)
1674 {
1675 	uint32_t cmd_flags, offset32, len32, extra;
1676 	int rc = 0;
1677 
1678 	if (buf_size == 0)
1679 		return 0;
1680 
1681 	/* Request access to the flash interface. */
1682 	rc = bce_acquire_nvram_lock(sc);
1683 	if (rc != 0)
1684 		return rc;
1685 
1686 	/* Enable access to flash interface */
1687 	bce_enable_nvram_access(sc);
1688 
1689 	len32 = buf_size;
1690 	offset32 = offset;
1691 	extra = 0;
1692 
1693 	cmd_flags = 0;
1694 
1695 	/* XXX should we release nvram lock if read_dword() fails? */
1696 	if (offset32 & 3) {
1697 		uint8_t buf[4];
1698 		uint32_t pre_len;
1699 
1700 		offset32 &= ~3;
1701 		pre_len = 4 - (offset & 3);
1702 
1703 		if (pre_len >= len32) {
1704 			pre_len = len32;
1705 			cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST;
1706 		} else {
1707 			cmd_flags = BCE_NVM_COMMAND_FIRST;
1708 		}
1709 
1710 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1711 		if (rc)
1712 			return rc;
1713 
1714 		memcpy(ret_buf, buf + (offset & 3), pre_len);
1715 
1716 		offset32 += 4;
1717 		ret_buf += pre_len;
1718 		len32 -= pre_len;
1719 	}
1720 
1721 	if (len32 & 3) {
1722 		extra = 4 - (len32 & 3);
1723 		len32 = (len32 + 4) & ~3;
1724 	}
1725 
1726 	if (len32 == 4) {
1727 		uint8_t buf[4];
1728 
1729 		if (cmd_flags)
1730 			cmd_flags = BCE_NVM_COMMAND_LAST;
1731 		else
1732 			cmd_flags = BCE_NVM_COMMAND_FIRST |
1733 				    BCE_NVM_COMMAND_LAST;
1734 
1735 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1736 
1737 		memcpy(ret_buf, buf, 4 - extra);
1738 	} else if (len32 > 0) {
1739 		uint8_t buf[4];
1740 
1741 		/* Read the first word. */
1742 		if (cmd_flags)
1743 			cmd_flags = 0;
1744 		else
1745 			cmd_flags = BCE_NVM_COMMAND_FIRST;
1746 
1747 		rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1748 
1749 		/* Advance to the next dword. */
1750 		offset32 += 4;
1751 		ret_buf += 4;
1752 		len32 -= 4;
1753 
1754 		while (len32 > 4 && rc == 0) {
1755 			rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0);
1756 
1757 			/* Advance to the next dword. */
1758 			offset32 += 4;
1759 			ret_buf += 4;
1760 			len32 -= 4;
1761 		}
1762 
1763 		if (rc)
1764 			return rc;
1765 
1766 		cmd_flags = BCE_NVM_COMMAND_LAST;
1767 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1768 
1769 		memcpy(ret_buf, buf, 4 - extra);
1770 	}
1771 
1772 	/* Disable access to flash interface and release the lock. */
1773 	bce_disable_nvram_access(sc);
1774 	bce_release_nvram_lock(sc);
1775 
1776 	return rc;
1777 }
1778 
1779 
1780 #ifdef BCE_NVRAM_WRITE_SUPPORT
1781 /****************************************************************************/
1782 /* Write an arbitrary range of data from NVRAM.                             */
1783 /*                                                                          */
1784 /* Prepares the NVRAM interface for write access and writes the requested   */
1785 /* data from the supplied buffer.  The caller is responsible for            */
1786 /* calculating any appropriate CRCs.                                        */
1787 /*                                                                          */
1788 /* Returns:                                                                 */
1789 /*   0 on success, positive value on failure.                               */
1790 /****************************************************************************/
1791 static int
1792 bce_nvram_write(struct bce_softc *sc, uint32_t offset, uint8_t *data_buf,
1793 		int buf_size)
1794 {
1795 	uint32_t written, offset32, len32;
1796 	uint8_t *buf, start[4], end[4];
1797 	int rc = 0;
1798 	int align_start, align_end;
1799 
1800 	buf = data_buf;
1801 	offset32 = offset;
1802 	len32 = buf_size;
1803 	align_end = 0;
1804 	align_start = (offset32 & 3);
1805 
1806 	if (align_start) {
1807 		offset32 &= ~3;
1808 		len32 += align_start;
1809 		rc = bce_nvram_read(sc, offset32, start, 4);
1810 		if (rc)
1811 			return rc;
1812 	}
1813 
1814 	if (len32 & 3) {
1815 	       	if (len32 > 4 || !align_start) {
1816 			align_end = 4 - (len32 & 3);
1817 			len32 += align_end;
1818 			rc = bce_nvram_read(sc, offset32 + len32 - 4, end, 4);
1819 			if (rc)
1820 				return rc;
1821 		}
1822 	}
1823 
1824 	if (align_start || align_end) {
1825 		buf = kmalloc(len32, M_DEVBUF, M_NOWAIT);
1826 		if (buf == NULL)
1827 			return ENOMEM;
1828 		if (align_start)
1829 			memcpy(buf, start, 4);
1830 		if (align_end)
1831 			memcpy(buf + len32 - 4, end, 4);
1832 		memcpy(buf + align_start, data_buf, buf_size);
1833 	}
1834 
1835 	written = 0;
1836 	while (written < len32 && rc == 0) {
1837 		uint32_t page_start, page_end, data_start, data_end;
1838 		uint32_t addr, cmd_flags;
1839 		int i;
1840 		uint8_t flash_buffer[264];
1841 
1842 		/* Find the page_start addr */
1843 		page_start = offset32 + written;
1844 		page_start -= (page_start % sc->bce_flash_info->page_size);
1845 		/* Find the page_end addr */
1846 		page_end = page_start + sc->bce_flash_info->page_size;
1847 		/* Find the data_start addr */
1848 		data_start = (written == 0) ? offset32 : page_start;
1849 		/* Find the data_end addr */
1850 		data_end = (page_end > offset32 + len32) ? (offset32 + len32)
1851 							 : page_end;
1852 
1853 		/* Request access to the flash interface. */
1854 		rc = bce_acquire_nvram_lock(sc);
1855 		if (rc != 0)
1856 			goto nvram_write_end;
1857 
1858 		/* Enable access to flash interface */
1859 		bce_enable_nvram_access(sc);
1860 
1861 		cmd_flags = BCE_NVM_COMMAND_FIRST;
1862 		if (sc->bce_flash_info->buffered == 0) {
1863 			int j;
1864 
1865 			/*
1866 			 * Read the whole page into the buffer
1867 			 * (non-buffer flash only)
1868 			 */
1869 			for (j = 0; j < sc->bce_flash_info->page_size; j += 4) {
1870 				if (j == (sc->bce_flash_info->page_size - 4))
1871 					cmd_flags |= BCE_NVM_COMMAND_LAST;
1872 
1873 				rc = bce_nvram_read_dword(sc, page_start + j,
1874 							  &flash_buffer[j],
1875 							  cmd_flags);
1876 				if (rc)
1877 					goto nvram_write_end;
1878 
1879 				cmd_flags = 0;
1880 			}
1881 		}
1882 
1883 		/* Enable writes to flash interface (unlock write-protect) */
1884 		rc = bce_enable_nvram_write(sc);
1885 		if (rc != 0)
1886 			goto nvram_write_end;
1887 
1888 		/* Erase the page */
1889 		rc = bce_nvram_erase_page(sc, page_start);
1890 		if (rc != 0)
1891 			goto nvram_write_end;
1892 
1893 		/* Re-enable the write again for the actual write */
1894 		bce_enable_nvram_write(sc);
1895 
1896 		/* Loop to write back the buffer data from page_start to
1897 		 * data_start */
1898 		i = 0;
1899 		if (sc->bce_flash_info->buffered == 0) {
1900 			for (addr = page_start; addr < data_start;
1901 			     addr += 4, i += 4) {
1902 				rc = bce_nvram_write_dword(sc, addr,
1903 							   &flash_buffer[i],
1904 							   cmd_flags);
1905 				if (rc != 0)
1906 					goto nvram_write_end;
1907 
1908 				cmd_flags = 0;
1909 			}
1910 		}
1911 
1912 		/* Loop to write the new data from data_start to data_end */
1913 		for (addr = data_start; addr < data_end; addr += 4, i++) {
1914 			if (addr == page_end - 4 ||
1915 			    (sc->bce_flash_info->buffered &&
1916 			     addr == data_end - 4))
1917 				cmd_flags |= BCE_NVM_COMMAND_LAST;
1918 
1919 			rc = bce_nvram_write_dword(sc, addr, buf, cmd_flags);
1920 			if (rc != 0)
1921 				goto nvram_write_end;
1922 
1923 			cmd_flags = 0;
1924 			buf += 4;
1925 		}
1926 
1927 		/* Loop to write back the buffer data from data_end
1928 		 * to page_end */
1929 		if (sc->bce_flash_info->buffered == 0) {
1930 			for (addr = data_end; addr < page_end;
1931 			     addr += 4, i += 4) {
1932 				if (addr == page_end-4)
1933 					cmd_flags = BCE_NVM_COMMAND_LAST;
1934 
1935 				rc = bce_nvram_write_dword(sc, addr,
1936 					&flash_buffer[i], cmd_flags);
1937 				if (rc != 0)
1938 					goto nvram_write_end;
1939 
1940 				cmd_flags = 0;
1941 			}
1942 		}
1943 
1944 		/* Disable writes to flash interface (lock write-protect) */
1945 		bce_disable_nvram_write(sc);
1946 
1947 		/* Disable access to flash interface */
1948 		bce_disable_nvram_access(sc);
1949 		bce_release_nvram_lock(sc);
1950 
1951 		/* Increment written */
1952 		written += data_end - data_start;
1953 	}
1954 
1955 nvram_write_end:
1956 	if (align_start || align_end)
1957 		kfree(buf, M_DEVBUF);
1958 	return rc;
1959 }
1960 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1961 
1962 
1963 /****************************************************************************/
1964 /* Verifies that NVRAM is accessible and contains valid data.               */
1965 /*                                                                          */
1966 /* Reads the configuration data from NVRAM and verifies that the CRC is     */
1967 /* correct.                                                                 */
1968 /*                                                                          */
1969 /* Returns:                                                                 */
1970 /*   0 on success, positive value on failure.                               */
1971 /****************************************************************************/
1972 static int
1973 bce_nvram_test(struct bce_softc *sc)
1974 {
1975 	uint32_t buf[BCE_NVRAM_SIZE / 4];
1976 	uint32_t magic, csum;
1977 	uint8_t *data = (uint8_t *)buf;
1978 	int rc = 0;
1979 
1980 	/*
1981 	 * Check that the device NVRAM is valid by reading
1982 	 * the magic value at offset 0.
1983 	 */
1984 	rc = bce_nvram_read(sc, 0, data, 4);
1985 	if (rc != 0)
1986 		return rc;
1987 
1988 	magic = be32toh(buf[0]);
1989 	if (magic != BCE_NVRAM_MAGIC) {
1990 		if_printf(&sc->arpcom.ac_if,
1991 			  "Invalid NVRAM magic value! Expected: 0x%08X, "
1992 			  "Found: 0x%08X\n", BCE_NVRAM_MAGIC, magic);
1993 		return ENODEV;
1994 	}
1995 
1996 	/*
1997 	 * Verify that the device NVRAM includes valid
1998 	 * configuration data.
1999 	 */
2000 	rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE);
2001 	if (rc != 0)
2002 		return rc;
2003 
2004 	csum = ether_crc32_le(data, 0x100);
2005 	if (csum != BCE_CRC32_RESIDUAL) {
2006 		if_printf(&sc->arpcom.ac_if,
2007 			  "Invalid Manufacturing Information NVRAM CRC! "
2008 			  "Expected: 0x%08X, Found: 0x%08X\n",
2009 			  BCE_CRC32_RESIDUAL, csum);
2010 		return ENODEV;
2011 	}
2012 
2013 	csum = ether_crc32_le(data + 0x100, 0x100);
2014 	if (csum != BCE_CRC32_RESIDUAL) {
2015 		if_printf(&sc->arpcom.ac_if,
2016 			  "Invalid Feature Configuration Information "
2017 			  "NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
2018 			  BCE_CRC32_RESIDUAL, csum);
2019 		rc = ENODEV;
2020 	}
2021 	return rc;
2022 }
2023 
2024 
2025 /****************************************************************************/
2026 /* Free any DMA memory owned by the driver.                                 */
2027 /*                                                                          */
2028 /* Scans through each data structre that requires DMA memory and frees      */
2029 /* the memory if allocated.                                                 */
2030 /*                                                                          */
2031 /* Returns:                                                                 */
2032 /*   Nothing.                                                               */
2033 /****************************************************************************/
2034 static void
2035 bce_dma_free(struct bce_softc *sc)
2036 {
2037 	int i;
2038 
2039 	/* Destroy the status block. */
2040 	if (sc->status_tag != NULL) {
2041 		if (sc->status_block != NULL) {
2042 			bus_dmamap_unload(sc->status_tag, sc->status_map);
2043 			bus_dmamem_free(sc->status_tag, sc->status_block,
2044 					sc->status_map);
2045 		}
2046 		bus_dma_tag_destroy(sc->status_tag);
2047 	}
2048 
2049 
2050 	/* Destroy the statistics block. */
2051 	if (sc->stats_tag != NULL) {
2052 		if (sc->stats_block != NULL) {
2053 			bus_dmamap_unload(sc->stats_tag, sc->stats_map);
2054 			bus_dmamem_free(sc->stats_tag, sc->stats_block,
2055 					sc->stats_map);
2056 		}
2057 		bus_dma_tag_destroy(sc->stats_tag);
2058 	}
2059 
2060 	/* Destroy the TX buffer descriptor DMA stuffs. */
2061 	if (sc->tx_bd_chain_tag != NULL) {
2062 		for (i = 0; i < TX_PAGES; i++) {
2063 			if (sc->tx_bd_chain[i] != NULL) {
2064 				bus_dmamap_unload(sc->tx_bd_chain_tag,
2065 						  sc->tx_bd_chain_map[i]);
2066 				bus_dmamem_free(sc->tx_bd_chain_tag,
2067 						sc->tx_bd_chain[i],
2068 						sc->tx_bd_chain_map[i]);
2069 			}
2070 		}
2071 		bus_dma_tag_destroy(sc->tx_bd_chain_tag);
2072 	}
2073 
2074 	/* Destroy the RX buffer descriptor DMA stuffs. */
2075 	if (sc->rx_bd_chain_tag != NULL) {
2076 		for (i = 0; i < RX_PAGES; i++) {
2077 			if (sc->rx_bd_chain[i] != NULL) {
2078 				bus_dmamap_unload(sc->rx_bd_chain_tag,
2079 						  sc->rx_bd_chain_map[i]);
2080 				bus_dmamem_free(sc->rx_bd_chain_tag,
2081 						sc->rx_bd_chain[i],
2082 						sc->rx_bd_chain_map[i]);
2083 			}
2084 		}
2085 		bus_dma_tag_destroy(sc->rx_bd_chain_tag);
2086 	}
2087 
2088 	/* Destroy the TX mbuf DMA stuffs. */
2089 	if (sc->tx_mbuf_tag != NULL) {
2090 		for (i = 0; i < TOTAL_TX_BD; i++) {
2091 			/* Must have been unloaded in bce_stop() */
2092 			KKASSERT(sc->tx_mbuf_ptr[i] == NULL);
2093 			bus_dmamap_destroy(sc->tx_mbuf_tag,
2094 					   sc->tx_mbuf_map[i]);
2095 		}
2096 		bus_dma_tag_destroy(sc->tx_mbuf_tag);
2097 	}
2098 
2099 	/* Destroy the RX mbuf DMA stuffs. */
2100 	if (sc->rx_mbuf_tag != NULL) {
2101 		for (i = 0; i < TOTAL_RX_BD; i++) {
2102 			/* Must have been unloaded in bce_stop() */
2103 			KKASSERT(sc->rx_mbuf_ptr[i] == NULL);
2104 			bus_dmamap_destroy(sc->rx_mbuf_tag,
2105 					   sc->rx_mbuf_map[i]);
2106 		}
2107 		bus_dma_tag_destroy(sc->rx_mbuf_tag);
2108 	}
2109 
2110 	/* Destroy the parent tag */
2111 	if (sc->parent_tag != NULL)
2112 		bus_dma_tag_destroy(sc->parent_tag);
2113 }
2114 
2115 
2116 /****************************************************************************/
2117 /* Get DMA memory from the OS.                                              */
2118 /*                                                                          */
2119 /* Validates that the OS has provided DMA buffers in response to a          */
2120 /* bus_dmamap_load() call and saves the physical address of those buffers.  */
2121 /* When the callback is used the OS will return 0 for the mapping function  */
2122 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any  */
2123 /* failures back to the caller.                                             */
2124 /*                                                                          */
2125 /* Returns:                                                                 */
2126 /*   Nothing.                                                               */
2127 /****************************************************************************/
2128 static void
2129 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2130 {
2131 	bus_addr_t *busaddr = arg;
2132 
2133 	/*
2134 	 * Simulate a mapping failure.
2135 	 * XXX not correct.
2136 	 */
2137 	DBRUNIF(DB_RANDOMTRUE(bce_debug_dma_map_addr_failure),
2138 		kprintf("bce: %s(%d): Simulating DMA mapping error.\n",
2139 			__FILE__, __LINE__);
2140 		error = ENOMEM);
2141 
2142 	/* Check for an error and signal the caller that an error occurred. */
2143 	if (error)
2144 		return;
2145 
2146 	KASSERT(nseg == 1, ("only one segment is allowed\n"));
2147 	*busaddr = segs->ds_addr;
2148 }
2149 
2150 
2151 static void
2152 bce_dma_map_mbuf(void *arg, bus_dma_segment_t *segs, int nsegs,
2153 		 bus_size_t mapsz __unused, int error)
2154 {
2155 	struct bce_dmamap_arg *ctx = arg;
2156 	int i;
2157 
2158 	if (error)
2159 		return;
2160 
2161 	if (nsegs > ctx->bce_maxsegs) {
2162 		ctx->bce_maxsegs = 0;
2163 		return;
2164 	}
2165 
2166 	ctx->bce_maxsegs = nsegs;
2167 	for (i = 0; i < nsegs; ++i)
2168 		ctx->bce_segs[i] = segs[i];
2169 }
2170 
2171 
2172 /****************************************************************************/
2173 /* Allocate any DMA memory needed by the driver.                            */
2174 /*                                                                          */
2175 /* Allocates DMA memory needed for the various global structures needed by  */
2176 /* hardware.                                                                */
2177 /*                                                                          */
2178 /* Returns:                                                                 */
2179 /*   0 for success, positive value for failure.                             */
2180 /****************************************************************************/
2181 static int
2182 bce_dma_alloc(struct bce_softc *sc)
2183 {
2184 	struct ifnet *ifp = &sc->arpcom.ac_if;
2185 	int i, j, rc = 0;
2186 	bus_addr_t busaddr;
2187 
2188 	/*
2189 	 * Allocate the parent bus DMA tag appropriate for PCI.
2190 	 */
2191 	rc = bus_dma_tag_create(NULL, 1, BCE_DMA_BOUNDARY,
2192 				sc->max_bus_addr, BUS_SPACE_MAXADDR,
2193 				NULL, NULL,
2194 				MAXBSIZE, BUS_SPACE_UNRESTRICTED,
2195 				BUS_SPACE_MAXSIZE_32BIT,
2196 				0, &sc->parent_tag);
2197 	if (rc != 0) {
2198 		if_printf(ifp, "Could not allocate parent DMA tag!\n");
2199 		return rc;
2200 	}
2201 
2202 	/*
2203 	 * Create a DMA tag for the status block, allocate and clear the
2204 	 * memory, map the memory into DMA space, and fetch the physical
2205 	 * address of the block.
2206 	 */
2207 	rc = bus_dma_tag_create(sc->parent_tag,
2208 				BCE_DMA_ALIGN, BCE_DMA_BOUNDARY,
2209 				sc->max_bus_addr, BUS_SPACE_MAXADDR,
2210 				NULL, NULL,
2211 				BCE_STATUS_BLK_SZ, 1, BCE_STATUS_BLK_SZ,
2212 				0, &sc->status_tag);
2213 	if (rc != 0) {
2214 		if_printf(ifp, "Could not allocate status block DMA tag!\n");
2215 		return rc;
2216 	}
2217 
2218 	rc = bus_dmamem_alloc(sc->status_tag, (void **)&sc->status_block,
2219 			      BUS_DMA_WAITOK | BUS_DMA_ZERO,
2220 			      &sc->status_map);
2221 	if (rc != 0) {
2222 		if_printf(ifp, "Could not allocate status block DMA memory!\n");
2223 		return rc;
2224 	}
2225 
2226 	rc = bus_dmamap_load(sc->status_tag, sc->status_map,
2227 			     sc->status_block, BCE_STATUS_BLK_SZ,
2228 			     bce_dma_map_addr, &busaddr, BUS_DMA_WAITOK);
2229 	if (rc != 0) {
2230 		if_printf(ifp, "Could not map status block DMA memory!\n");
2231 		bus_dmamem_free(sc->status_tag, sc->status_block,
2232 				sc->status_map);
2233 		sc->status_block = NULL;
2234 		return rc;
2235 	}
2236 
2237 	sc->status_block_paddr = busaddr;
2238 	/* DRC - Fix for 64 bit addresses. */
2239 	DBPRINT(sc, BCE_INFO, "status_block_paddr = 0x%08X\n",
2240 		(uint32_t)sc->status_block_paddr);
2241 
2242 	/*
2243 	 * Create a DMA tag for the statistics block, allocate and clear the
2244 	 * memory, map the memory into DMA space, and fetch the physical
2245 	 * address of the block.
2246 	 */
2247 	rc = bus_dma_tag_create(sc->parent_tag,
2248 				BCE_DMA_ALIGN, BCE_DMA_BOUNDARY,
2249 				sc->max_bus_addr, BUS_SPACE_MAXADDR,
2250 				NULL, NULL,
2251 				BCE_STATS_BLK_SZ, 1, BCE_STATS_BLK_SZ,
2252 				0, &sc->stats_tag);
2253 	if (rc != 0) {
2254 		if_printf(ifp, "Could not allocate "
2255 			  "statistics block DMA tag!\n");
2256 		return rc;
2257 	}
2258 
2259 	rc = bus_dmamem_alloc(sc->stats_tag, (void **)&sc->stats_block,
2260 			      BUS_DMA_WAITOK | BUS_DMA_ZERO,
2261 			      &sc->stats_map);
2262 	if (rc != 0) {
2263 		if_printf(ifp, "Could not allocate "
2264 			  "statistics block DMA memory!\n");
2265 		return rc;
2266 	}
2267 
2268 	rc = bus_dmamap_load(sc->stats_tag, sc->stats_map,
2269 			     sc->stats_block, BCE_STATS_BLK_SZ,
2270 			     bce_dma_map_addr, &busaddr, BUS_DMA_WAITOK);
2271 	if (rc != 0) {
2272 		if_printf(ifp, "Could not map statistics block DMA memory!\n");
2273 		bus_dmamem_free(sc->stats_tag, sc->stats_block, sc->stats_map);
2274 		sc->stats_block = NULL;
2275 		return rc;
2276 	}
2277 
2278 	sc->stats_block_paddr = busaddr;
2279 	/* DRC - Fix for 64 bit address. */
2280 	DBPRINT(sc, BCE_INFO, "stats_block_paddr = 0x%08X\n",
2281 		(uint32_t)sc->stats_block_paddr);
2282 
2283 	/*
2284 	 * Create a DMA tag for the TX buffer descriptor chain,
2285 	 * allocate and clear the  memory, and fetch the
2286 	 * physical address of the block.
2287 	 */
2288 	rc = bus_dma_tag_create(sc->parent_tag,
2289 				BCM_PAGE_SIZE, BCE_DMA_BOUNDARY,
2290 				sc->max_bus_addr, BUS_SPACE_MAXADDR,
2291 				NULL, NULL,
2292 				BCE_TX_CHAIN_PAGE_SZ, 1, BCE_TX_CHAIN_PAGE_SZ,
2293 				0, &sc->tx_bd_chain_tag);
2294 	if (rc != 0) {
2295 		if_printf(ifp, "Could not allocate "
2296 			  "TX descriptor chain DMA tag!\n");
2297 		return rc;
2298 	}
2299 
2300 	for (i = 0; i < TX_PAGES; i++) {
2301 		rc = bus_dmamem_alloc(sc->tx_bd_chain_tag,
2302 				      (void **)&sc->tx_bd_chain[i],
2303 				      BUS_DMA_WAITOK, &sc->tx_bd_chain_map[i]);
2304 		if (rc != 0) {
2305 			if_printf(ifp, "Could not allocate %dth TX descriptor "
2306 				  "chain DMA memory!\n", i);
2307 			return rc;
2308 		}
2309 
2310 		rc = bus_dmamap_load(sc->tx_bd_chain_tag,
2311 				     sc->tx_bd_chain_map[i],
2312 				     sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ,
2313 				     bce_dma_map_addr, &busaddr,
2314 				     BUS_DMA_WAITOK);
2315 		if (rc != 0) {
2316 			if_printf(ifp, "Could not map %dth TX descriptor "
2317 				  "chain DMA memory!\n", i);
2318 			bus_dmamem_free(sc->tx_bd_chain_tag,
2319 					sc->tx_bd_chain[i],
2320 					sc->tx_bd_chain_map[i]);
2321 			sc->tx_bd_chain[i] = NULL;
2322 			return rc;
2323 		}
2324 
2325 		sc->tx_bd_chain_paddr[i] = busaddr;
2326 		/* DRC - Fix for 64 bit systems. */
2327 		DBPRINT(sc, BCE_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n",
2328 			i, (uint32_t)sc->tx_bd_chain_paddr[i]);
2329 	}
2330 
2331 	/* Create a DMA tag for TX mbufs. */
2332 	rc = bus_dma_tag_create(sc->parent_tag, 1, BCE_DMA_BOUNDARY,
2333 				sc->max_bus_addr, BUS_SPACE_MAXADDR,
2334 				NULL, NULL,
2335 				MCLBYTES * BCE_MAX_SEGMENTS,
2336 				BCE_MAX_SEGMENTS, MCLBYTES,
2337 				0, &sc->tx_mbuf_tag);
2338 	if (rc != 0) {
2339 		if_printf(ifp, "Could not allocate TX mbuf DMA tag!\n");
2340 		return rc;
2341 	}
2342 
2343 	/* Create DMA maps for the TX mbufs clusters. */
2344 	for (i = 0; i < TOTAL_TX_BD; i++) {
2345 		rc = bus_dmamap_create(sc->tx_mbuf_tag, BUS_DMA_WAITOK,
2346 				       &sc->tx_mbuf_map[i]);
2347 		if (rc != 0) {
2348 			for (j = 0; j < i; ++j) {
2349 				bus_dmamap_destroy(sc->tx_mbuf_tag,
2350 						   sc->tx_mbuf_map[i]);
2351 			}
2352 			bus_dma_tag_destroy(sc->tx_mbuf_tag);
2353 			sc->tx_mbuf_tag = NULL;
2354 
2355 			if_printf(ifp, "Unable to create "
2356 				  "%dth TX mbuf DMA map!\n", i);
2357 			return rc;
2358 		}
2359 	}
2360 
2361 	/*
2362 	 * Create a DMA tag for the RX buffer descriptor chain,
2363 	 * allocate and clear the  memory, and fetch the physical
2364 	 * address of the blocks.
2365 	 */
2366 	rc = bus_dma_tag_create(sc->parent_tag,
2367 				BCM_PAGE_SIZE, BCE_DMA_BOUNDARY,
2368 				sc->max_bus_addr, BUS_SPACE_MAXADDR,
2369 				NULL, NULL,
2370 				BCE_RX_CHAIN_PAGE_SZ, 1, BCE_RX_CHAIN_PAGE_SZ,
2371 				0, &sc->rx_bd_chain_tag);
2372 	if (rc != 0) {
2373 		if_printf(ifp, "Could not allocate "
2374 			  "RX descriptor chain DMA tag!\n");
2375 		return rc;
2376 	}
2377 
2378 	for (i = 0; i < RX_PAGES; i++) {
2379 		rc = bus_dmamem_alloc(sc->rx_bd_chain_tag,
2380 				      (void **)&sc->rx_bd_chain[i],
2381 				      BUS_DMA_WAITOK | BUS_DMA_ZERO,
2382 				      &sc->rx_bd_chain_map[i]);
2383 		if (rc != 0) {
2384 			if_printf(ifp, "Could not allocate %dth RX descriptor "
2385 				  "chain DMA memory!\n", i);
2386 			return rc;
2387 		}
2388 
2389 		rc = bus_dmamap_load(sc->rx_bd_chain_tag,
2390 				     sc->rx_bd_chain_map[i],
2391 				     sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ,
2392 				     bce_dma_map_addr, &busaddr,
2393 				     BUS_DMA_WAITOK);
2394 		if (rc != 0) {
2395 			if_printf(ifp, "Could not map %dth RX descriptor "
2396 				  "chain DMA memory!\n", i);
2397 			bus_dmamem_free(sc->rx_bd_chain_tag,
2398 					sc->rx_bd_chain[i],
2399 					sc->rx_bd_chain_map[i]);
2400 			sc->rx_bd_chain[i] = NULL;
2401 			return rc;
2402 		}
2403 
2404 		sc->rx_bd_chain_paddr[i] = busaddr;
2405 		/* DRC - Fix for 64 bit systems. */
2406 		DBPRINT(sc, BCE_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n",
2407 			i, (uint32_t)sc->rx_bd_chain_paddr[i]);
2408 	}
2409 
2410 	/* Create a DMA tag for RX mbufs. */
2411 	rc = bus_dma_tag_create(sc->parent_tag, 1, BCE_DMA_BOUNDARY,
2412 				sc->max_bus_addr, BUS_SPACE_MAXADDR,
2413 				NULL, NULL,
2414 				MCLBYTES, 1/* BCE_MAX_SEGMENTS */, MCLBYTES,
2415 				0, &sc->rx_mbuf_tag);
2416 	if (rc != 0) {
2417 		if_printf(ifp, "Could not allocate RX mbuf DMA tag!\n");
2418 		return rc;
2419 	}
2420 
2421 	/* Create DMA maps for the RX mbuf clusters. */
2422 	for (i = 0; i < TOTAL_RX_BD; i++) {
2423 		rc = bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_WAITOK,
2424 				       &sc->rx_mbuf_map[i]);
2425 		if (rc != 0) {
2426 			for (j = 0; j < i; ++j) {
2427 				bus_dmamap_destroy(sc->rx_mbuf_tag,
2428 						   sc->rx_mbuf_map[j]);
2429 			}
2430 			bus_dma_tag_destroy(sc->rx_mbuf_tag);
2431 			sc->rx_mbuf_tag = NULL;
2432 
2433 			if_printf(ifp, "Unable to create "
2434 				  "%dth RX mbuf DMA map!\n", i);
2435 			return rc;
2436 		}
2437 	}
2438 	return 0;
2439 }
2440 
2441 
2442 /****************************************************************************/
2443 /* Firmware synchronization.                                                */
2444 /*                                                                          */
2445 /* Before performing certain events such as a chip reset, synchronize with  */
2446 /* the firmware first.                                                      */
2447 /*                                                                          */
2448 /* Returns:                                                                 */
2449 /*   0 for success, positive value for failure.                             */
2450 /****************************************************************************/
2451 static int
2452 bce_fw_sync(struct bce_softc *sc, uint32_t msg_data)
2453 {
2454 	int i, rc = 0;
2455 	uint32_t val;
2456 
2457 	/* Don't waste any time if we've timed out before. */
2458 	if (sc->bce_fw_timed_out)
2459 		return EBUSY;
2460 
2461 	/* Increment the message sequence number. */
2462 	sc->bce_fw_wr_seq++;
2463 	msg_data |= sc->bce_fw_wr_seq;
2464 
2465  	DBPRINT(sc, BCE_VERBOSE, "bce_fw_sync(): msg_data = 0x%08X\n", msg_data);
2466 
2467 	/* Send the message to the bootcode driver mailbox. */
2468 	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
2469 
2470 	/* Wait for the bootcode to acknowledge the message. */
2471 	for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2472 		/* Check for a response in the bootcode firmware mailbox. */
2473 		val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_FW_MB);
2474 		if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ))
2475 			break;
2476 		DELAY(1000);
2477 	}
2478 
2479 	/* If we've timed out, tell the bootcode that we've stopped waiting. */
2480 	if ((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ) &&
2481 	    (msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0) {
2482 		if_printf(&sc->arpcom.ac_if,
2483 			  "Firmware synchronization timeout! "
2484 			  "msg_data = 0x%08X\n", msg_data);
2485 
2486 		msg_data &= ~BCE_DRV_MSG_CODE;
2487 		msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT;
2488 
2489 		REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
2490 
2491 		sc->bce_fw_timed_out = 1;
2492 		rc = EBUSY;
2493 	}
2494 	return rc;
2495 }
2496 
2497 
2498 /****************************************************************************/
2499 /* Load Receive Virtual 2 Physical (RV2P) processor firmware.               */
2500 /*                                                                          */
2501 /* Returns:                                                                 */
2502 /*   Nothing.                                                               */
2503 /****************************************************************************/
2504 static void
2505 bce_load_rv2p_fw(struct bce_softc *sc, uint32_t *rv2p_code,
2506 		 uint32_t rv2p_code_len, uint32_t rv2p_proc)
2507 {
2508 	int i;
2509 	uint32_t val;
2510 
2511 	for (i = 0; i < rv2p_code_len; i += 8) {
2512 		REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code);
2513 		rv2p_code++;
2514 		REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code);
2515 		rv2p_code++;
2516 
2517 		if (rv2p_proc == RV2P_PROC1) {
2518 			val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR;
2519 			REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
2520 		} else {
2521 			val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR;
2522 			REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
2523 		}
2524 	}
2525 
2526 	/* Reset the processor, un-stall is done later. */
2527 	if (rv2p_proc == RV2P_PROC1)
2528 		REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET);
2529 	else
2530 		REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET);
2531 }
2532 
2533 
2534 /****************************************************************************/
2535 /* Load RISC processor firmware.                                            */
2536 /*                                                                          */
2537 /* Loads firmware from the file if_bcefw.h into the scratchpad memory       */
2538 /* associated with a particular processor.                                  */
2539 /*                                                                          */
2540 /* Returns:                                                                 */
2541 /*   Nothing.                                                               */
2542 /****************************************************************************/
2543 static void
2544 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg,
2545 		struct fw_info *fw)
2546 {
2547 	uint32_t offset, val;
2548 	int j;
2549 
2550 	/* Halt the CPU. */
2551 	val = REG_RD_IND(sc, cpu_reg->mode);
2552 	val |= cpu_reg->mode_value_halt;
2553 	REG_WR_IND(sc, cpu_reg->mode, val);
2554 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2555 
2556 	/* Load the Text area. */
2557 	offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2558 	if (fw->text) {
2559 		for (j = 0; j < (fw->text_len / 4); j++, offset += 4)
2560 			REG_WR_IND(sc, offset, fw->text[j]);
2561 	}
2562 
2563 	/* Load the Data area. */
2564 	offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2565 	if (fw->data) {
2566 		for (j = 0; j < (fw->data_len / 4); j++, offset += 4)
2567 			REG_WR_IND(sc, offset, fw->data[j]);
2568 	}
2569 
2570 	/* Load the SBSS area. */
2571 	offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2572 	if (fw->sbss) {
2573 		for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4)
2574 			REG_WR_IND(sc, offset, fw->sbss[j]);
2575 	}
2576 
2577 	/* Load the BSS area. */
2578 	offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2579 	if (fw->bss) {
2580 		for (j = 0; j < (fw->bss_len/4); j++, offset += 4)
2581 			REG_WR_IND(sc, offset, fw->bss[j]);
2582 	}
2583 
2584 	/* Load the Read-Only area. */
2585 	offset = cpu_reg->spad_base +
2586 		(fw->rodata_addr - cpu_reg->mips_view_base);
2587 	if (fw->rodata) {
2588 		for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4)
2589 			REG_WR_IND(sc, offset, fw->rodata[j]);
2590 	}
2591 
2592 	/* Clear the pre-fetch instruction. */
2593 	REG_WR_IND(sc, cpu_reg->inst, 0);
2594 	REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2595 
2596 	/* Start the CPU. */
2597 	val = REG_RD_IND(sc, cpu_reg->mode);
2598 	val &= ~cpu_reg->mode_value_halt;
2599 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2600 	REG_WR_IND(sc, cpu_reg->mode, val);
2601 }
2602 
2603 
2604 /****************************************************************************/
2605 /* Initialize the RV2P, RX, TX, TPAT, and COM CPUs.                         */
2606 /*                                                                          */
2607 /* Loads the firmware for each CPU and starts the CPU.                      */
2608 /*                                                                          */
2609 /* Returns:                                                                 */
2610 /*   Nothing.                                                               */
2611 /****************************************************************************/
2612 static void
2613 bce_init_cpus(struct bce_softc *sc)
2614 {
2615 	struct cpu_reg cpu_reg;
2616 	struct fw_info fw;
2617 
2618 	/* Initialize the RV2P processor. */
2619 	bce_load_rv2p_fw(sc, bce_rv2p_proc1, sizeof(bce_rv2p_proc1), RV2P_PROC1);
2620 	bce_load_rv2p_fw(sc, bce_rv2p_proc2, sizeof(bce_rv2p_proc2), RV2P_PROC2);
2621 
2622 	/* Initialize the RX Processor. */
2623 	cpu_reg.mode = BCE_RXP_CPU_MODE;
2624 	cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
2625 	cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
2626 	cpu_reg.state = BCE_RXP_CPU_STATE;
2627 	cpu_reg.state_value_clear = 0xffffff;
2628 	cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
2629 	cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
2630 	cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
2631 	cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
2632 	cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
2633 	cpu_reg.spad_base = BCE_RXP_SCRATCH;
2634 	cpu_reg.mips_view_base = 0x8000000;
2635 
2636 	fw.ver_major = bce_RXP_b06FwReleaseMajor;
2637 	fw.ver_minor = bce_RXP_b06FwReleaseMinor;
2638 	fw.ver_fix = bce_RXP_b06FwReleaseFix;
2639 	fw.start_addr = bce_RXP_b06FwStartAddr;
2640 
2641 	fw.text_addr = bce_RXP_b06FwTextAddr;
2642 	fw.text_len = bce_RXP_b06FwTextLen;
2643 	fw.text_index = 0;
2644 	fw.text = bce_RXP_b06FwText;
2645 
2646 	fw.data_addr = bce_RXP_b06FwDataAddr;
2647 	fw.data_len = bce_RXP_b06FwDataLen;
2648 	fw.data_index = 0;
2649 	fw.data = bce_RXP_b06FwData;
2650 
2651 	fw.sbss_addr = bce_RXP_b06FwSbssAddr;
2652 	fw.sbss_len = bce_RXP_b06FwSbssLen;
2653 	fw.sbss_index = 0;
2654 	fw.sbss = bce_RXP_b06FwSbss;
2655 
2656 	fw.bss_addr = bce_RXP_b06FwBssAddr;
2657 	fw.bss_len = bce_RXP_b06FwBssLen;
2658 	fw.bss_index = 0;
2659 	fw.bss = bce_RXP_b06FwBss;
2660 
2661 	fw.rodata_addr = bce_RXP_b06FwRodataAddr;
2662 	fw.rodata_len = bce_RXP_b06FwRodataLen;
2663 	fw.rodata_index = 0;
2664 	fw.rodata = bce_RXP_b06FwRodata;
2665 
2666 	DBPRINT(sc, BCE_INFO_RESET, "Loading RX firmware.\n");
2667 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
2668 
2669 	/* Initialize the TX Processor. */
2670 	cpu_reg.mode = BCE_TXP_CPU_MODE;
2671 	cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT;
2672 	cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA;
2673 	cpu_reg.state = BCE_TXP_CPU_STATE;
2674 	cpu_reg.state_value_clear = 0xffffff;
2675 	cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE;
2676 	cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK;
2677 	cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER;
2678 	cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION;
2679 	cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT;
2680 	cpu_reg.spad_base = BCE_TXP_SCRATCH;
2681 	cpu_reg.mips_view_base = 0x8000000;
2682 
2683 	fw.ver_major = bce_TXP_b06FwReleaseMajor;
2684 	fw.ver_minor = bce_TXP_b06FwReleaseMinor;
2685 	fw.ver_fix = bce_TXP_b06FwReleaseFix;
2686 	fw.start_addr = bce_TXP_b06FwStartAddr;
2687 
2688 	fw.text_addr = bce_TXP_b06FwTextAddr;
2689 	fw.text_len = bce_TXP_b06FwTextLen;
2690 	fw.text_index = 0;
2691 	fw.text = bce_TXP_b06FwText;
2692 
2693 	fw.data_addr = bce_TXP_b06FwDataAddr;
2694 	fw.data_len = bce_TXP_b06FwDataLen;
2695 	fw.data_index = 0;
2696 	fw.data = bce_TXP_b06FwData;
2697 
2698 	fw.sbss_addr = bce_TXP_b06FwSbssAddr;
2699 	fw.sbss_len = bce_TXP_b06FwSbssLen;
2700 	fw.sbss_index = 0;
2701 	fw.sbss = bce_TXP_b06FwSbss;
2702 
2703 	fw.bss_addr = bce_TXP_b06FwBssAddr;
2704 	fw.bss_len = bce_TXP_b06FwBssLen;
2705 	fw.bss_index = 0;
2706 	fw.bss = bce_TXP_b06FwBss;
2707 
2708 	fw.rodata_addr = bce_TXP_b06FwRodataAddr;
2709 	fw.rodata_len = bce_TXP_b06FwRodataLen;
2710 	fw.rodata_index = 0;
2711 	fw.rodata = bce_TXP_b06FwRodata;
2712 
2713 	DBPRINT(sc, BCE_INFO_RESET, "Loading TX firmware.\n");
2714 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
2715 
2716 	/* Initialize the TX Patch-up Processor. */
2717 	cpu_reg.mode = BCE_TPAT_CPU_MODE;
2718 	cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT;
2719 	cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA;
2720 	cpu_reg.state = BCE_TPAT_CPU_STATE;
2721 	cpu_reg.state_value_clear = 0xffffff;
2722 	cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE;
2723 	cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK;
2724 	cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER;
2725 	cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION;
2726 	cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT;
2727 	cpu_reg.spad_base = BCE_TPAT_SCRATCH;
2728 	cpu_reg.mips_view_base = 0x8000000;
2729 
2730 	fw.ver_major = bce_TPAT_b06FwReleaseMajor;
2731 	fw.ver_minor = bce_TPAT_b06FwReleaseMinor;
2732 	fw.ver_fix = bce_TPAT_b06FwReleaseFix;
2733 	fw.start_addr = bce_TPAT_b06FwStartAddr;
2734 
2735 	fw.text_addr = bce_TPAT_b06FwTextAddr;
2736 	fw.text_len = bce_TPAT_b06FwTextLen;
2737 	fw.text_index = 0;
2738 	fw.text = bce_TPAT_b06FwText;
2739 
2740 	fw.data_addr = bce_TPAT_b06FwDataAddr;
2741 	fw.data_len = bce_TPAT_b06FwDataLen;
2742 	fw.data_index = 0;
2743 	fw.data = bce_TPAT_b06FwData;
2744 
2745 	fw.sbss_addr = bce_TPAT_b06FwSbssAddr;
2746 	fw.sbss_len = bce_TPAT_b06FwSbssLen;
2747 	fw.sbss_index = 0;
2748 	fw.sbss = bce_TPAT_b06FwSbss;
2749 
2750 	fw.bss_addr = bce_TPAT_b06FwBssAddr;
2751 	fw.bss_len = bce_TPAT_b06FwBssLen;
2752 	fw.bss_index = 0;
2753 	fw.bss = bce_TPAT_b06FwBss;
2754 
2755 	fw.rodata_addr = bce_TPAT_b06FwRodataAddr;
2756 	fw.rodata_len = bce_TPAT_b06FwRodataLen;
2757 	fw.rodata_index = 0;
2758 	fw.rodata = bce_TPAT_b06FwRodata;
2759 
2760 	DBPRINT(sc, BCE_INFO_RESET, "Loading TPAT firmware.\n");
2761 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
2762 
2763 	/* Initialize the Completion Processor. */
2764 	cpu_reg.mode = BCE_COM_CPU_MODE;
2765 	cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT;
2766 	cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA;
2767 	cpu_reg.state = BCE_COM_CPU_STATE;
2768 	cpu_reg.state_value_clear = 0xffffff;
2769 	cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE;
2770 	cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK;
2771 	cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER;
2772 	cpu_reg.inst = BCE_COM_CPU_INSTRUCTION;
2773 	cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT;
2774 	cpu_reg.spad_base = BCE_COM_SCRATCH;
2775 	cpu_reg.mips_view_base = 0x8000000;
2776 
2777 	fw.ver_major = bce_COM_b06FwReleaseMajor;
2778 	fw.ver_minor = bce_COM_b06FwReleaseMinor;
2779 	fw.ver_fix = bce_COM_b06FwReleaseFix;
2780 	fw.start_addr = bce_COM_b06FwStartAddr;
2781 
2782 	fw.text_addr = bce_COM_b06FwTextAddr;
2783 	fw.text_len = bce_COM_b06FwTextLen;
2784 	fw.text_index = 0;
2785 	fw.text = bce_COM_b06FwText;
2786 
2787 	fw.data_addr = bce_COM_b06FwDataAddr;
2788 	fw.data_len = bce_COM_b06FwDataLen;
2789 	fw.data_index = 0;
2790 	fw.data = bce_COM_b06FwData;
2791 
2792 	fw.sbss_addr = bce_COM_b06FwSbssAddr;
2793 	fw.sbss_len = bce_COM_b06FwSbssLen;
2794 	fw.sbss_index = 0;
2795 	fw.sbss = bce_COM_b06FwSbss;
2796 
2797 	fw.bss_addr = bce_COM_b06FwBssAddr;
2798 	fw.bss_len = bce_COM_b06FwBssLen;
2799 	fw.bss_index = 0;
2800 	fw.bss = bce_COM_b06FwBss;
2801 
2802 	fw.rodata_addr = bce_COM_b06FwRodataAddr;
2803 	fw.rodata_len = bce_COM_b06FwRodataLen;
2804 	fw.rodata_index = 0;
2805 	fw.rodata = bce_COM_b06FwRodata;
2806 
2807 	DBPRINT(sc, BCE_INFO_RESET, "Loading COM firmware.\n");
2808 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
2809 }
2810 
2811 
2812 /****************************************************************************/
2813 /* Initialize context memory.                                               */
2814 /*                                                                          */
2815 /* Clears the memory associated with each Context ID (CID).                 */
2816 /*                                                                          */
2817 /* Returns:                                                                 */
2818 /*   Nothing.                                                               */
2819 /****************************************************************************/
2820 static void
2821 bce_init_ctx(struct bce_softc *sc)
2822 {
2823 	uint32_t vcid = 96;
2824 
2825 	while (vcid) {
2826 		uint32_t vcid_addr, pcid_addr, offset;
2827 		int i;
2828 
2829 		vcid--;
2830 
2831    		vcid_addr = GET_CID_ADDR(vcid);
2832 		pcid_addr = vcid_addr;
2833 
2834 		for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2835 			vcid_addr += (i << PHY_CTX_SHIFT);
2836 			pcid_addr += (i << PHY_CTX_SHIFT);
2837 
2838 			REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr);
2839 			REG_WR(sc, BCE_CTX_PAGE_TBL, pcid_addr);
2840 
2841 			/* Zero out the context. */
2842 			for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2843 				CTX_WR(sc, vcid_addr, offset, 0);
2844 		}
2845 	}
2846 }
2847 
2848 
2849 /****************************************************************************/
2850 /* Fetch the permanent MAC address of the controller.                       */
2851 /*                                                                          */
2852 /* Returns:                                                                 */
2853 /*   Nothing.                                                               */
2854 /****************************************************************************/
2855 static void
2856 bce_get_mac_addr(struct bce_softc *sc)
2857 {
2858 	uint32_t mac_lo = 0, mac_hi = 0;
2859 
2860 	/*
2861 	 * The NetXtreme II bootcode populates various NIC
2862 	 * power-on and runtime configuration items in a
2863 	 * shared memory area.  The factory configured MAC
2864 	 * address is available from both NVRAM and the
2865 	 * shared memory area so we'll read the value from
2866 	 * shared memory for speed.
2867 	 */
2868 
2869 	mac_hi = REG_RD_IND(sc, sc->bce_shmem_base + BCE_PORT_HW_CFG_MAC_UPPER);
2870 	mac_lo = REG_RD_IND(sc, sc->bce_shmem_base + BCE_PORT_HW_CFG_MAC_LOWER);
2871 
2872 	if (mac_lo == 0 && mac_hi == 0) {
2873 		if_printf(&sc->arpcom.ac_if, "Invalid Ethernet address!\n");
2874 	} else {
2875 		sc->eaddr[0] = (u_char)(mac_hi >> 8);
2876 		sc->eaddr[1] = (u_char)(mac_hi >> 0);
2877 		sc->eaddr[2] = (u_char)(mac_lo >> 24);
2878 		sc->eaddr[3] = (u_char)(mac_lo >> 16);
2879 		sc->eaddr[4] = (u_char)(mac_lo >> 8);
2880 		sc->eaddr[5] = (u_char)(mac_lo >> 0);
2881 	}
2882 
2883 	DBPRINT(sc, BCE_INFO, "Permanent Ethernet address = %6D\n", sc->eaddr, ":");
2884 }
2885 
2886 
2887 /****************************************************************************/
2888 /* Program the MAC address.                                                 */
2889 /*                                                                          */
2890 /* Returns:                                                                 */
2891 /*   Nothing.                                                               */
2892 /****************************************************************************/
2893 static void
2894 bce_set_mac_addr(struct bce_softc *sc)
2895 {
2896 	const uint8_t *mac_addr = sc->eaddr;
2897 	uint32_t val;
2898 
2899 	DBPRINT(sc, BCE_INFO, "Setting Ethernet address = %6D\n",
2900 		sc->eaddr, ":");
2901 
2902 	val = (mac_addr[0] << 8) | mac_addr[1];
2903 	REG_WR(sc, BCE_EMAC_MAC_MATCH0, val);
2904 
2905 	val = (mac_addr[2] << 24) |
2906 	      (mac_addr[3] << 16) |
2907 	      (mac_addr[4] << 8) |
2908 	      mac_addr[5];
2909 	REG_WR(sc, BCE_EMAC_MAC_MATCH1, val);
2910 }
2911 
2912 
2913 /****************************************************************************/
2914 /* Stop the controller.                                                     */
2915 /*                                                                          */
2916 /* Returns:                                                                 */
2917 /*   Nothing.                                                               */
2918 /****************************************************************************/
2919 static void
2920 bce_stop(struct bce_softc *sc)
2921 {
2922 	struct ifnet *ifp = &sc->arpcom.ac_if;
2923 	struct mii_data *mii = device_get_softc(sc->bce_miibus);
2924 	struct ifmedia_entry *ifm;
2925 	int mtmp, itmp;
2926 
2927 	ASSERT_SERIALIZED(ifp->if_serializer);
2928 
2929 	callout_stop(&sc->bce_stat_ch);
2930 
2931 	/* Disable the transmit/receive blocks. */
2932 	REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, 0x5ffffff);
2933 	REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
2934 	DELAY(20);
2935 
2936 	bce_disable_intr(sc);
2937 
2938 	/* Tell firmware that the driver is going away. */
2939 	bce_reset(sc, BCE_DRV_MSG_CODE_SUSPEND_NO_WOL);
2940 
2941 	/* Free the RX lists. */
2942 	bce_free_rx_chain(sc);
2943 
2944 	/* Free TX buffers. */
2945 	bce_free_tx_chain(sc);
2946 
2947 	/*
2948 	 * Isolate/power down the PHY, but leave the media selection
2949 	 * unchanged so that things will be put back to normal when
2950 	 * we bring the interface back up.
2951 	 *
2952 	 * 'mii' may be NULL if bce_stop() is called by bce_detach().
2953 	 */
2954 	if (mii != NULL) {
2955 		itmp = ifp->if_flags;
2956 		ifp->if_flags |= IFF_UP;
2957 		ifm = mii->mii_media.ifm_cur;
2958 		mtmp = ifm->ifm_media;
2959 		ifm->ifm_media = IFM_ETHER | IFM_NONE;
2960 		mii_mediachg(mii);
2961 		ifm->ifm_media = mtmp;
2962 		ifp->if_flags = itmp;
2963 	}
2964 
2965 	sc->bce_link = 0;
2966 	sc->bce_coalchg_mask = 0;
2967 
2968 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2969 	ifp->if_timer = 0;
2970 
2971 	bce_mgmt_init(sc);
2972 }
2973 
2974 
2975 static int
2976 bce_reset(struct bce_softc *sc, uint32_t reset_code)
2977 {
2978 	uint32_t val;
2979 	int i, rc = 0;
2980 
2981 	/* Wait for pending PCI transactions to complete. */
2982 	REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS,
2983 	       BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
2984 	       BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
2985 	       BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
2986 	       BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
2987 	val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
2988 	DELAY(5);
2989 
2990 	/* Assume bootcode is running. */
2991 	sc->bce_fw_timed_out = 0;
2992 
2993 	/* Give the firmware a chance to prepare for the reset. */
2994 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code);
2995 	if (rc) {
2996 		if_printf(&sc->arpcom.ac_if,
2997 			  "Firmware is not ready for reset\n");
2998 		return rc;
2999 	}
3000 
3001 	/* Set a firmware reminder that this is a soft reset. */
3002 	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_RESET_SIGNATURE,
3003 		   BCE_DRV_RESET_SIGNATURE_MAGIC);
3004 
3005 	/* Dummy read to force the chip to complete all current transactions. */
3006 	val = REG_RD(sc, BCE_MISC_ID);
3007 
3008 	/* Chip reset. */
3009 	val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3010 	      BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3011 	      BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3012 	REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val);
3013 
3014 	/* Allow up to 30us for reset to complete. */
3015 	for (i = 0; i < 10; i++) {
3016 		val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG);
3017 		if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3018 			    BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3019 			break;
3020 		}
3021 		DELAY(10);
3022 	}
3023 
3024 	/* Check that reset completed successfully. */
3025 	if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3026 		   BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3027 		if_printf(&sc->arpcom.ac_if, "Reset failed!\n");
3028 		return EBUSY;
3029 	}
3030 
3031 	/* Make sure byte swapping is properly configured. */
3032 	val = REG_RD(sc, BCE_PCI_SWAP_DIAG0);
3033 	if (val != 0x01020304) {
3034 		if_printf(&sc->arpcom.ac_if, "Byte swap is incorrect!\n");
3035 		return ENODEV;
3036 	}
3037 
3038 	/* Just completed a reset, assume that firmware is running again. */
3039 	sc->bce_fw_timed_out = 0;
3040 
3041 	/* Wait for the firmware to finish its initialization. */
3042 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code);
3043 	if (rc) {
3044 		if_printf(&sc->arpcom.ac_if,
3045 			  "Firmware did not complete initialization!\n");
3046 	}
3047 	return rc;
3048 }
3049 
3050 
3051 static int
3052 bce_chipinit(struct bce_softc *sc)
3053 {
3054 	uint32_t val;
3055 	int rc = 0;
3056 
3057 	/* Make sure the interrupt is not active. */
3058 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
3059 
3060 	/*
3061 	 * Initialize DMA byte/word swapping, configure the number of DMA
3062 	 * channels and PCI clock compensation delay.
3063 	 */
3064 	val = BCE_DMA_CONFIG_DATA_BYTE_SWAP |
3065 	      BCE_DMA_CONFIG_DATA_WORD_SWAP |
3066 #if BYTE_ORDER == BIG_ENDIAN
3067 	      BCE_DMA_CONFIG_CNTL_BYTE_SWAP |
3068 #endif
3069 	      BCE_DMA_CONFIG_CNTL_WORD_SWAP |
3070 	      DMA_READ_CHANS << 12 |
3071 	      DMA_WRITE_CHANS << 16;
3072 
3073 	val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3074 
3075 	if ((sc->bce_flags & BCE_PCIX_FLAG) && sc->bus_speed_mhz == 133)
3076 		val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP;
3077 
3078 	/*
3079 	 * This setting resolves a problem observed on certain Intel PCI
3080 	 * chipsets that cannot handle multiple outstanding DMA operations.
3081 	 * See errata E9_5706A1_65.
3082 	 */
3083 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706 &&
3084 	    BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0 &&
3085 	    !(sc->bce_flags & BCE_PCIX_FLAG))
3086 		val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA;
3087 
3088 	REG_WR(sc, BCE_DMA_CONFIG, val);
3089 
3090 	/* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */
3091 	if (sc->bce_flags & BCE_PCIX_FLAG) {
3092 		uint16_t cmd;
3093 
3094 		cmd = pci_read_config(sc->bce_dev, BCE_PCI_PCIX_CMD, 2);
3095 		pci_write_config(sc->bce_dev, BCE_PCI_PCIX_CMD, cmd & ~0x2, 2);
3096 	}
3097 
3098 	/* Enable the RX_V2P and Context state machines before access. */
3099 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3100 	       BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3101 	       BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3102 	       BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3103 
3104 	/* Initialize context mapping and zero out the quick contexts. */
3105 	bce_init_ctx(sc);
3106 
3107 	/* Initialize the on-boards CPUs */
3108 	bce_init_cpus(sc);
3109 
3110 	/* Prepare NVRAM for access. */
3111 	rc = bce_init_nvram(sc);
3112 	if (rc != 0)
3113 		return rc;
3114 
3115 	/* Set the kernel bypass block size */
3116 	val = REG_RD(sc, BCE_MQ_CONFIG);
3117 	val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3118 	val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3119 	REG_WR(sc, BCE_MQ_CONFIG, val);
3120 
3121 	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3122 	REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val);
3123 	REG_WR(sc, BCE_MQ_KNL_WIND_END, val);
3124 
3125 	/* Set the page size and clear the RV2P processor stall bits. */
3126 	val = (BCM_PAGE_BITS - 8) << 24;
3127 	REG_WR(sc, BCE_RV2P_CONFIG, val);
3128 
3129 	/* Configure page size. */
3130 	val = REG_RD(sc, BCE_TBDR_CONFIG);
3131 	val &= ~BCE_TBDR_CONFIG_PAGE_SIZE;
3132 	val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3133 	REG_WR(sc, BCE_TBDR_CONFIG, val);
3134 
3135 	return 0;
3136 }
3137 
3138 
3139 /****************************************************************************/
3140 /* Initialize the controller in preparation to send/receive traffic.        */
3141 /*                                                                          */
3142 /* Returns:                                                                 */
3143 /*   0 for success, positive value for failure.                             */
3144 /****************************************************************************/
3145 static int
3146 bce_blockinit(struct bce_softc *sc)
3147 {
3148 	uint32_t reg, val;
3149 	int rc = 0;
3150 
3151 	/* Load the hardware default MAC address. */
3152 	bce_set_mac_addr(sc);
3153 
3154 	/* Set the Ethernet backoff seed value */
3155 	val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) +
3156 	      sc->eaddr[3] + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16);
3157 	REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val);
3158 
3159 	sc->last_status_idx = 0;
3160 	sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE;
3161 
3162 	/* Set up link change interrupt generation. */
3163 	REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK);
3164 
3165 	/* Program the physical address of the status block. */
3166 	REG_WR(sc, BCE_HC_STATUS_ADDR_L, BCE_ADDR_LO(sc->status_block_paddr));
3167 	REG_WR(sc, BCE_HC_STATUS_ADDR_H, BCE_ADDR_HI(sc->status_block_paddr));
3168 
3169 	/* Program the physical address of the statistics block. */
3170 	REG_WR(sc, BCE_HC_STATISTICS_ADDR_L,
3171 	       BCE_ADDR_LO(sc->stats_block_paddr));
3172 	REG_WR(sc, BCE_HC_STATISTICS_ADDR_H,
3173 	       BCE_ADDR_HI(sc->stats_block_paddr));
3174 
3175 	/* Program various host coalescing parameters. */
3176 	REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
3177 	       (sc->bce_tx_quick_cons_trip_int << 16) |
3178 	       sc->bce_tx_quick_cons_trip);
3179 	REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
3180 	       (sc->bce_rx_quick_cons_trip_int << 16) |
3181 	       sc->bce_rx_quick_cons_trip);
3182 	REG_WR(sc, BCE_HC_COMP_PROD_TRIP,
3183 	       (sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip);
3184 	REG_WR(sc, BCE_HC_TX_TICKS,
3185 	       (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
3186 	REG_WR(sc, BCE_HC_RX_TICKS,
3187 	       (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
3188 	REG_WR(sc, BCE_HC_COM_TICKS,
3189 	       (sc->bce_com_ticks_int << 16) | sc->bce_com_ticks);
3190 	REG_WR(sc, BCE_HC_CMD_TICKS,
3191 	       (sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks);
3192 	REG_WR(sc, BCE_HC_STATS_TICKS, (sc->bce_stats_ticks & 0xffff00));
3193 	REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8);	/* 3ms */
3194 	REG_WR(sc, BCE_HC_CONFIG,
3195 	       BCE_HC_CONFIG_RX_TMR_MODE |
3196 	       BCE_HC_CONFIG_TX_TMR_MODE |
3197 	       BCE_HC_CONFIG_COLLECT_STATS);
3198 
3199 	/* Clear the internal statistics counters. */
3200 	REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
3201 
3202 	/* Verify that bootcode is running. */
3203 	reg = REG_RD_IND(sc, sc->bce_shmem_base + BCE_DEV_INFO_SIGNATURE);
3204 
3205 	DBRUNIF(DB_RANDOMTRUE(bce_debug_bootcode_running_failure),
3206 		if_printf(&sc->arpcom.ac_if,
3207 			  "%s(%d): Simulating bootcode failure.\n",
3208 			  __FILE__, __LINE__);
3209 		reg = 0);
3210 
3211 	if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3212 	    BCE_DEV_INFO_SIGNATURE_MAGIC) {
3213 		if_printf(&sc->arpcom.ac_if,
3214 			  "Bootcode not running! Found: 0x%08X, "
3215 			  "Expected: 08%08X\n",
3216 			  reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK,
3217 			  BCE_DEV_INFO_SIGNATURE_MAGIC);
3218 		return ENODEV;
3219 	}
3220 
3221 	/* Check if any management firmware is running. */
3222 	reg = REG_RD_IND(sc, sc->bce_shmem_base + BCE_PORT_FEATURE);
3223 	if (reg & (BCE_PORT_FEATURE_ASF_ENABLED |
3224 		   BCE_PORT_FEATURE_IMD_ENABLED)) {
3225 		DBPRINT(sc, BCE_INFO, "Management F/W Enabled.\n");
3226 		sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
3227 	}
3228 
3229 	sc->bce_fw_ver =
3230 		REG_RD_IND(sc, sc->bce_shmem_base + BCE_DEV_INFO_BC_REV);
3231 	DBPRINT(sc, BCE_INFO, "bootcode rev = 0x%08X\n", sc->bce_fw_ver);
3232 
3233 	/* Allow bootcode to apply any additional fixes before enabling MAC. */
3234 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET);
3235 
3236 	/* Enable link state change interrupt generation. */
3237 	REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3238 
3239 	/* Enable all remaining blocks in the MAC. */
3240 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 0x5ffffff);
3241 	REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
3242 	DELAY(20);
3243 
3244 	return 0;
3245 }
3246 
3247 
3248 /****************************************************************************/
3249 /* Encapsulate an mbuf cluster into the rx_bd chain.                        */
3250 /*                                                                          */
3251 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's.     */
3252 /* This routine will map an mbuf cluster into 1 or more rx_bd's as          */
3253 /* necessary.                                                               */
3254 /*                                                                          */
3255 /* Returns:                                                                 */
3256 /*   0 for success, positive value for failure.                             */
3257 /****************************************************************************/
3258 static int
3259 bce_newbuf_std(struct bce_softc *sc, struct mbuf *m,
3260 	       uint16_t *prod, uint16_t *chain_prod, uint32_t *prod_bseq)
3261 {
3262 	bus_dmamap_t map;
3263 	struct bce_dmamap_arg ctx;
3264 	bus_dma_segment_t seg;
3265 	struct mbuf *m_new;
3266 	struct rx_bd *rxbd;
3267 	int error;
3268 #ifdef BCE_DEBUG
3269 	uint16_t debug_chain_prod = *chain_prod;
3270 #endif
3271 
3272 	/* Make sure the inputs are valid. */
3273 	DBRUNIF((*chain_prod > MAX_RX_BD),
3274 		if_printf(&sc->arpcom.ac_if, "%s(%d): "
3275 			  "RX producer out of range: 0x%04X > 0x%04X\n",
3276 			  __FILE__, __LINE__,
3277 			  *chain_prod, (uint16_t)MAX_RX_BD));
3278 
3279 	DBPRINT(sc, BCE_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = 0x%04X, "
3280 		"prod_bseq = 0x%08X\n", __func__, *prod, *chain_prod, *prod_bseq);
3281 
3282 	if (m == NULL) {
3283 		DBRUNIF(DB_RANDOMTRUE(bce_debug_mbuf_allocation_failure),
3284 			if_printf(&sc->arpcom.ac_if, "%s(%d): "
3285 				  "Simulating mbuf allocation failure.\n",
3286 				  __FILE__, __LINE__);
3287 			sc->mbuf_alloc_failed++;
3288 			return ENOBUFS);
3289 
3290 		/* This is a new mbuf allocation. */
3291 		m_new = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR);
3292 		if (m_new == NULL)
3293 			return ENOBUFS;
3294 		DBRUNIF(1, sc->rx_mbuf_alloc++);
3295 	} else {
3296 		m_new = m;
3297 		m_new->m_data = m_new->m_ext.ext_buf;
3298 	}
3299 	m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
3300 
3301 	/* Map the mbuf cluster into device memory. */
3302 	map = sc->rx_mbuf_map[*chain_prod];
3303 
3304 	ctx.bce_maxsegs = 1;
3305 	ctx.bce_segs = &seg;
3306 	error = bus_dmamap_load_mbuf(sc->rx_mbuf_tag, map, m_new,
3307 				     bce_dma_map_mbuf, &ctx, BUS_DMA_NOWAIT);
3308 	if (error || ctx.bce_maxsegs == 0) {
3309 		if_printf(&sc->arpcom.ac_if,
3310 			  "Error mapping mbuf into RX chain!\n");
3311 
3312 		if (m == NULL)
3313 			m_freem(m_new);
3314 
3315 		DBRUNIF(1, sc->rx_mbuf_alloc--);
3316 		return ENOBUFS;
3317 	}
3318 
3319 	/* Watch for overflow. */
3320 	DBRUNIF((sc->free_rx_bd > USABLE_RX_BD),
3321 		if_printf(&sc->arpcom.ac_if, "%s(%d): "
3322 			  "Too many free rx_bd (0x%04X > 0x%04X)!\n",
3323 			  __FILE__, __LINE__, sc->free_rx_bd,
3324 			  (uint16_t)USABLE_RX_BD));
3325 
3326 	/* Update some debug statistic counters */
3327 	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3328 		sc->rx_low_watermark = sc->free_rx_bd);
3329 	DBRUNIF((sc->free_rx_bd == 0), sc->rx_empty_count++);
3330 
3331 	/* Setup the rx_bd for the first segment. */
3332 	rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3333 
3334 	rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(seg.ds_addr));
3335 	rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(seg.ds_addr));
3336 	rxbd->rx_bd_len = htole32(seg.ds_len);
3337 	rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START);
3338 	*prod_bseq += seg.ds_len;
3339 
3340 	rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END);
3341 
3342 	/* Save the mbuf and update our counter. */
3343 	sc->rx_mbuf_ptr[*chain_prod] = m_new;
3344 	sc->free_rx_bd--;
3345 
3346 	DBRUN(BCE_VERBOSE_RECV,
3347 	      bce_dump_rx_mbuf_chain(sc, debug_chain_prod, 1));
3348 
3349 	DBPRINT(sc, BCE_VERBOSE_RECV, "%s(exit): prod = 0x%04X, chain_prod = 0x%04X, "
3350 		"prod_bseq = 0x%08X\n", __func__, *prod, *chain_prod, *prod_bseq);
3351 
3352 	return 0;
3353 }
3354 
3355 
3356 /****************************************************************************/
3357 /* Allocate memory and initialize the TX data structures.                   */
3358 /*                                                                          */
3359 /* Returns:                                                                 */
3360 /*   0 for success, positive value for failure.                             */
3361 /****************************************************************************/
3362 static int
3363 bce_init_tx_chain(struct bce_softc *sc)
3364 {
3365 	struct tx_bd *txbd;
3366 	uint32_t val;
3367 	int i, rc = 0;
3368 
3369 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__);
3370 
3371 	/* Set the initial TX producer/consumer indices. */
3372 	sc->tx_prod = 0;
3373 	sc->tx_cons = 0;
3374 	sc->tx_prod_bseq   = 0;
3375 	sc->used_tx_bd = 0;
3376 	sc->max_tx_bd = USABLE_TX_BD;
3377 	DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD);
3378 	DBRUNIF(1, sc->tx_full_count = 0);
3379 
3380 	/*
3381 	 * The NetXtreme II supports a linked-list structre called
3382 	 * a Buffer Descriptor Chain (or BD chain).  A BD chain
3383 	 * consists of a series of 1 or more chain pages, each of which
3384 	 * consists of a fixed number of BD entries.
3385 	 * The last BD entry on each page is a pointer to the next page
3386 	 * in the chain, and the last pointer in the BD chain
3387 	 * points back to the beginning of the chain.
3388 	 */
3389 
3390 	/* Set the TX next pointer chain entries. */
3391 	for (i = 0; i < TX_PAGES; i++) {
3392 		int j;
3393 
3394 		txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
3395 
3396 		/* Check if we've reached the last page. */
3397 		if (i == (TX_PAGES - 1))
3398 			j = 0;
3399 		else
3400 			j = i + 1;
3401 
3402 		txbd->tx_bd_haddr_hi =
3403 			htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j]));
3404 		txbd->tx_bd_haddr_lo =
3405 			htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j]));
3406 	}
3407 
3408 	for (i = 0; i < TX_PAGES; ++i) {
3409 		bus_dmamap_sync(sc->tx_bd_chain_tag, sc->tx_bd_chain_map[i],
3410 				BUS_DMASYNC_PREWRITE);
3411 	}
3412 
3413 	/* Initialize the context ID for an L2 TX chain. */
3414 	val = BCE_L2CTX_TYPE_TYPE_L2;
3415 	val |= BCE_L2CTX_TYPE_SIZE_L2;
3416 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TYPE, val);
3417 
3418 	val = BCE_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3419 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_CMD_TYPE, val);
3420 
3421 	/* Point the hardware to the first page in the chain. */
3422 	val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]);
3423 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_HI, val);
3424 	val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]);
3425 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_LO, val);
3426 
3427 	DBRUN(BCE_VERBOSE_SEND, bce_dump_tx_chain(sc, 0, TOTAL_TX_BD));
3428 
3429 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__);
3430 
3431 	return(rc);
3432 }
3433 
3434 
3435 /****************************************************************************/
3436 /* Free memory and clear the TX data structures.                            */
3437 /*                                                                          */
3438 /* Returns:                                                                 */
3439 /*   Nothing.                                                               */
3440 /****************************************************************************/
3441 static void
3442 bce_free_tx_chain(struct bce_softc *sc)
3443 {
3444 	int i;
3445 
3446 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__);
3447 
3448 	/* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
3449 	for (i = 0; i < TOTAL_TX_BD; i++) {
3450 		if (sc->tx_mbuf_ptr[i] != NULL) {
3451 			bus_dmamap_sync(sc->tx_mbuf_tag, sc->tx_mbuf_map[i],
3452 					BUS_DMASYNC_POSTWRITE);
3453 			bus_dmamap_unload(sc->tx_mbuf_tag, sc->tx_mbuf_map[i]);
3454 			m_freem(sc->tx_mbuf_ptr[i]);
3455 			sc->tx_mbuf_ptr[i] = NULL;
3456 			DBRUNIF(1, sc->tx_mbuf_alloc--);
3457 		}
3458 	}
3459 
3460 	/* Clear each TX chain page. */
3461 	for (i = 0; i < TX_PAGES; i++)
3462 		bzero(sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ);
3463 	sc->used_tx_bd = 0;
3464 
3465 	/* Check if we lost any mbufs in the process. */
3466 	DBRUNIF((sc->tx_mbuf_alloc),
3467 		if_printf(&sc->arpcom.ac_if,
3468 			  "%s(%d): Memory leak! "
3469 			  "Lost %d mbufs from tx chain!\n",
3470 			  __FILE__, __LINE__, sc->tx_mbuf_alloc));
3471 
3472 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__);
3473 }
3474 
3475 
3476 /****************************************************************************/
3477 /* Allocate memory and initialize the RX data structures.                   */
3478 /*                                                                          */
3479 /* Returns:                                                                 */
3480 /*   0 for success, positive value for failure.                             */
3481 /****************************************************************************/
3482 static int
3483 bce_init_rx_chain(struct bce_softc *sc)
3484 {
3485 	struct rx_bd *rxbd;
3486 	int i, rc = 0;
3487 	uint16_t prod, chain_prod;
3488 	uint32_t prod_bseq, val;
3489 
3490 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__);
3491 
3492 	/* Initialize the RX producer and consumer indices. */
3493 	sc->rx_prod = 0;
3494 	sc->rx_cons = 0;
3495 	sc->rx_prod_bseq = 0;
3496 	sc->free_rx_bd = USABLE_RX_BD;
3497 	sc->max_rx_bd = USABLE_RX_BD;
3498 	DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD);
3499 	DBRUNIF(1, sc->rx_empty_count = 0);
3500 
3501 	/* Initialize the RX next pointer chain entries. */
3502 	for (i = 0; i < RX_PAGES; i++) {
3503 		int j;
3504 
3505 		rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
3506 
3507 		/* Check if we've reached the last page. */
3508 		if (i == (RX_PAGES - 1))
3509 			j = 0;
3510 		else
3511 			j = i + 1;
3512 
3513 		/* Setup the chain page pointers. */
3514 		rxbd->rx_bd_haddr_hi =
3515 			htole32(BCE_ADDR_HI(sc->rx_bd_chain_paddr[j]));
3516 		rxbd->rx_bd_haddr_lo =
3517 			htole32(BCE_ADDR_LO(sc->rx_bd_chain_paddr[j]));
3518 	}
3519 
3520 	/* Initialize the context ID for an L2 RX chain. */
3521 	val = BCE_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3522 	val |= BCE_L2CTX_CTX_TYPE_SIZE_L2;
3523 	val |= 0x02 << 8;
3524 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_CTX_TYPE, val);
3525 
3526 	/* Point the hardware to the first page in the chain. */
3527 	/* XXX shouldn't this after RX descriptor initialization? */
3528 	val = BCE_ADDR_HI(sc->rx_bd_chain_paddr[0]);
3529 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_HI, val);
3530 	val = BCE_ADDR_LO(sc->rx_bd_chain_paddr[0]);
3531 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_LO, val);
3532 
3533 	/* Allocate mbuf clusters for the rx_bd chain. */
3534 	prod = prod_bseq = 0;
3535 	while (prod < TOTAL_RX_BD) {
3536 		chain_prod = RX_CHAIN_IDX(prod);
3537 		if (bce_newbuf_std(sc, NULL, &prod, &chain_prod, &prod_bseq)) {
3538 			if_printf(&sc->arpcom.ac_if,
3539 				  "Error filling RX chain: rx_bd[0x%04X]!\n",
3540 				  chain_prod);
3541 			rc = ENOBUFS;
3542 			break;
3543 		}
3544 		prod = NEXT_RX_BD(prod);
3545 	}
3546 
3547 	/* Save the RX chain producer index. */
3548 	sc->rx_prod = prod;
3549 	sc->rx_prod_bseq = prod_bseq;
3550 
3551 	for (i = 0; i < RX_PAGES; i++) {
3552 		bus_dmamap_sync(sc->rx_bd_chain_tag, sc->rx_bd_chain_map[i],
3553 				BUS_DMASYNC_PREWRITE);
3554 	}
3555 
3556 	/* Tell the chip about the waiting rx_bd's. */
3557 	REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BDIDX, sc->rx_prod);
3558 	REG_WR(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
3559 
3560 	DBRUN(BCE_VERBOSE_RECV, bce_dump_rx_chain(sc, 0, TOTAL_RX_BD));
3561 
3562 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__);
3563 
3564 	return(rc);
3565 }
3566 
3567 
3568 /****************************************************************************/
3569 /* Free memory and clear the RX data structures.                            */
3570 /*                                                                          */
3571 /* Returns:                                                                 */
3572 /*   Nothing.                                                               */
3573 /****************************************************************************/
3574 static void
3575 bce_free_rx_chain(struct bce_softc *sc)
3576 {
3577 	int i;
3578 
3579 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__);
3580 
3581 	/* Free any mbufs still in the RX mbuf chain. */
3582 	for (i = 0; i < TOTAL_RX_BD; i++) {
3583 		if (sc->rx_mbuf_ptr[i] != NULL) {
3584 			bus_dmamap_sync(sc->rx_mbuf_tag, sc->rx_mbuf_map[i],
3585 					BUS_DMASYNC_POSTREAD);
3586 			bus_dmamap_unload(sc->rx_mbuf_tag, sc->rx_mbuf_map[i]);
3587 			m_freem(sc->rx_mbuf_ptr[i]);
3588 			sc->rx_mbuf_ptr[i] = NULL;
3589 			DBRUNIF(1, sc->rx_mbuf_alloc--);
3590 		}
3591 	}
3592 
3593 	/* Clear each RX chain page. */
3594 	for (i = 0; i < RX_PAGES; i++)
3595 		bzero(sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
3596 
3597 	/* Check if we lost any mbufs in the process. */
3598 	DBRUNIF((sc->rx_mbuf_alloc),
3599 		if_printf(&sc->arpcom.ac_if,
3600 			  "%s(%d): Memory leak! "
3601 			  "Lost %d mbufs from rx chain!\n",
3602 			  __FILE__, __LINE__, sc->rx_mbuf_alloc));
3603 
3604 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__);
3605 }
3606 
3607 
3608 /****************************************************************************/
3609 /* Set media options.                                                       */
3610 /*                                                                          */
3611 /* Returns:                                                                 */
3612 /*   0 for success, positive value for failure.                             */
3613 /****************************************************************************/
3614 static int
3615 bce_ifmedia_upd(struct ifnet *ifp)
3616 {
3617 	struct bce_softc *sc = ifp->if_softc;
3618 	struct mii_data *mii = device_get_softc(sc->bce_miibus);
3619 
3620 	/*
3621 	 * 'mii' will be NULL, when this function is called on following
3622 	 * code path: bce_attach() -> bce_mgmt_init()
3623 	 */
3624 	if (mii != NULL) {
3625 		/* Make sure the MII bus has been enumerated. */
3626 		sc->bce_link = 0;
3627 		if (mii->mii_instance) {
3628 			struct mii_softc *miisc;
3629 
3630 			LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3631 				mii_phy_reset(miisc);
3632 		}
3633 		mii_mediachg(mii);
3634 	}
3635 	return 0;
3636 }
3637 
3638 
3639 /****************************************************************************/
3640 /* Reports current media status.                                            */
3641 /*                                                                          */
3642 /* Returns:                                                                 */
3643 /*   Nothing.                                                               */
3644 /****************************************************************************/
3645 static void
3646 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3647 {
3648 	struct bce_softc *sc = ifp->if_softc;
3649 	struct mii_data *mii = device_get_softc(sc->bce_miibus);
3650 
3651 	mii_pollstat(mii);
3652 	ifmr->ifm_active = mii->mii_media_active;
3653 	ifmr->ifm_status = mii->mii_media_status;
3654 }
3655 
3656 
3657 /****************************************************************************/
3658 /* Handles PHY generated interrupt events.                                  */
3659 /*                                                                          */
3660 /* Returns:                                                                 */
3661 /*   Nothing.                                                               */
3662 /****************************************************************************/
3663 static void
3664 bce_phy_intr(struct bce_softc *sc)
3665 {
3666 	uint32_t new_link_state, old_link_state;
3667 	struct ifnet *ifp = &sc->arpcom.ac_if;
3668 
3669 	ASSERT_SERIALIZED(ifp->if_serializer);
3670 
3671 	new_link_state = sc->status_block->status_attn_bits &
3672 			 STATUS_ATTN_BITS_LINK_STATE;
3673 	old_link_state = sc->status_block->status_attn_bits_ack &
3674 			 STATUS_ATTN_BITS_LINK_STATE;
3675 
3676 	/* Handle any changes if the link state has changed. */
3677 	if (new_link_state != old_link_state) {	/* XXX redundant? */
3678 		DBRUN(BCE_VERBOSE_INTR, bce_dump_status_block(sc));
3679 
3680 		sc->bce_link = 0;
3681 		callout_stop(&sc->bce_stat_ch);
3682 		bce_tick_serialized(sc);
3683 
3684 		/* Update the status_attn_bits_ack field in the status block. */
3685 		if (new_link_state) {
3686 			REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD,
3687 			       STATUS_ATTN_BITS_LINK_STATE);
3688 			if (bootverbose)
3689 				if_printf(ifp, "Link is now UP.\n");
3690 		} else {
3691 			REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD,
3692 			       STATUS_ATTN_BITS_LINK_STATE);
3693 			if (bootverbose)
3694 				if_printf(ifp, "Link is now DOWN.\n");
3695 		}
3696 	}
3697 
3698 	/* Acknowledge the link change interrupt. */
3699 	REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE);
3700 }
3701 
3702 
3703 /****************************************************************************/
3704 /* Reads the receive consumer value from the status block (skipping over    */
3705 /* chain page pointer if necessary).                                        */
3706 /*                                                                          */
3707 /* Returns:                                                                 */
3708 /*   hw_cons                                                                */
3709 /****************************************************************************/
3710 static __inline uint16_t
3711 bce_get_hw_rx_cons(struct bce_softc *sc)
3712 {
3713 	uint16_t hw_cons = sc->status_block->status_rx_quick_consumer_index0;
3714 
3715 	if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
3716 		hw_cons++;
3717 	return hw_cons;
3718 }
3719 
3720 
3721 /****************************************************************************/
3722 /* Handles received frame interrupt events.                                 */
3723 /*                                                                          */
3724 /* Returns:                                                                 */
3725 /*   Nothing.                                                               */
3726 /****************************************************************************/
3727 static void
3728 bce_rx_intr(struct bce_softc *sc, int count)
3729 {
3730 	struct ifnet *ifp = &sc->arpcom.ac_if;
3731 	uint16_t hw_cons, sw_cons, sw_chain_cons, sw_prod, sw_chain_prod;
3732 	uint32_t sw_prod_bseq;
3733 	int i;
3734 #ifdef ETHER_INPUT_CHAIN
3735 	struct mbuf_chain chain[MAXCPU];
3736 #endif
3737 
3738 	ASSERT_SERIALIZED(ifp->if_serializer);
3739 
3740 #ifdef ETHER_INPUT_CHAIN
3741 	ether_input_chain_init(chain);
3742 #endif
3743 
3744 	DBRUNIF(1, sc->rx_interrupts++);
3745 
3746 	/* Prepare the RX chain pages to be accessed by the host CPU. */
3747 	for (i = 0; i < RX_PAGES; i++) {
3748 		bus_dmamap_sync(sc->rx_bd_chain_tag,
3749 				sc->rx_bd_chain_map[i], BUS_DMASYNC_POSTREAD);
3750 	}
3751 
3752 	/* Get the hardware's view of the RX consumer index. */
3753 	hw_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc);
3754 
3755 	/* Get working copies of the driver's view of the RX indices. */
3756 	sw_cons = sc->rx_cons;
3757 	sw_prod = sc->rx_prod;
3758 	sw_prod_bseq = sc->rx_prod_bseq;
3759 
3760 	DBPRINT(sc, BCE_INFO_RECV, "%s(enter): sw_prod = 0x%04X, "
3761 		"sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n",
3762 		__func__, sw_prod, sw_cons, sw_prod_bseq);
3763 
3764 	/* Prevent speculative reads from getting ahead of the status block. */
3765 	bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
3766 			  BUS_SPACE_BARRIER_READ);
3767 
3768 	/* Update some debug statistics counters */
3769 	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3770 		sc->rx_low_watermark = sc->free_rx_bd);
3771 	DBRUNIF((sc->free_rx_bd == 0), sc->rx_empty_count++);
3772 
3773 	/* Scan through the receive chain as long as there is work to do. */
3774 	while (sw_cons != hw_cons) {
3775 		struct mbuf *m = NULL;
3776 		struct l2_fhdr *l2fhdr = NULL;
3777 		struct rx_bd *rxbd;
3778 		unsigned int len;
3779 		uint32_t status = 0;
3780 
3781 #ifdef DEVICE_POLLING
3782 		if (count >= 0 && count-- == 0) {
3783 			sc->hw_rx_cons = sw_cons;
3784 			break;
3785 		}
3786 #endif
3787 
3788 		/*
3789 		 * Convert the producer/consumer indices
3790 		 * to an actual rx_bd index.
3791 		 */
3792 		sw_chain_cons = RX_CHAIN_IDX(sw_cons);
3793 		sw_chain_prod = RX_CHAIN_IDX(sw_prod);
3794 
3795 		/* Get the used rx_bd. */
3796 		rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)]
3797 				       [RX_IDX(sw_chain_cons)];
3798 		sc->free_rx_bd++;
3799 
3800 		DBRUN(BCE_VERBOSE_RECV,
3801 		      if_printf(ifp, "%s(): ", __func__);
3802 		      bce_dump_rxbd(sc, sw_chain_cons, rxbd));
3803 
3804 		/* The mbuf is stored with the last rx_bd entry of a packet. */
3805 		if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) {
3806 			/* Validate that this is the last rx_bd. */
3807 			DBRUNIF((!(rxbd->rx_bd_flags & RX_BD_FLAGS_END)),
3808 				if_printf(ifp, "%s(%d): "
3809 				"Unexpected mbuf found in rx_bd[0x%04X]!\n",
3810 				__FILE__, __LINE__, sw_chain_cons);
3811 				bce_breakpoint(sc));
3812 
3813 			/*
3814 			 * ToDo: If the received packet is small enough
3815 			 * to fit into a single, non-M_EXT mbuf,
3816 			 * allocate a new mbuf here, copy the data to
3817 			 * that mbuf, and recycle the mapped jumbo frame.
3818 			 */
3819 
3820 			/* Unmap the mbuf from DMA space. */
3821 			bus_dmamap_sync(sc->rx_mbuf_tag,
3822 					sc->rx_mbuf_map[sw_chain_cons],
3823 					BUS_DMASYNC_POSTREAD);
3824 			bus_dmamap_unload(sc->rx_mbuf_tag,
3825 					  sc->rx_mbuf_map[sw_chain_cons]);
3826 
3827 			/* Remove the mbuf from the driver's chain. */
3828 			m = sc->rx_mbuf_ptr[sw_chain_cons];
3829 			sc->rx_mbuf_ptr[sw_chain_cons] = NULL;
3830 
3831 			/*
3832 			 * Frames received on the NetXteme II are prepended
3833 			 * with an l2_fhdr structure which provides status
3834 			 * information about the received frame (including
3835 			 * VLAN tags and checksum info).  The frames are also
3836 			 * automatically adjusted to align the IP header
3837 			 * (i.e. two null bytes are inserted before the
3838 			 * Ethernet header).
3839 			 */
3840 			l2fhdr = mtod(m, struct l2_fhdr *);
3841 
3842 			len = l2fhdr->l2_fhdr_pkt_len;
3843 			status = l2fhdr->l2_fhdr_status;
3844 
3845 			DBRUNIF(DB_RANDOMTRUE(bce_debug_l2fhdr_status_check),
3846 				if_printf(ifp,
3847 				"Simulating l2_fhdr status error.\n");
3848 				status = status | L2_FHDR_ERRORS_PHY_DECODE);
3849 
3850 			/* Watch for unusual sized frames. */
3851 			DBRUNIF((len < BCE_MIN_MTU ||
3852 				 len > BCE_MAX_JUMBO_ETHER_MTU_VLAN),
3853 				if_printf(ifp,
3854 				"%s(%d): Unusual frame size found. "
3855 				"Min(%d), Actual(%d), Max(%d)\n",
3856 				__FILE__, __LINE__,
3857 				(int)BCE_MIN_MTU, len,
3858 				(int)BCE_MAX_JUMBO_ETHER_MTU_VLAN);
3859 				bce_dump_mbuf(sc, m);
3860 		 		bce_breakpoint(sc));
3861 
3862 			len -= ETHER_CRC_LEN;
3863 
3864 			/* Check the received frame for errors. */
3865 			if (status & (L2_FHDR_ERRORS_BAD_CRC |
3866 				      L2_FHDR_ERRORS_PHY_DECODE |
3867 				      L2_FHDR_ERRORS_ALIGNMENT |
3868 				      L2_FHDR_ERRORS_TOO_SHORT |
3869 				      L2_FHDR_ERRORS_GIANT_FRAME)) {
3870 				ifp->if_ierrors++;
3871 				DBRUNIF(1, sc->l2fhdr_status_errors++);
3872 
3873 				/* Reuse the mbuf for a new frame. */
3874 				if (bce_newbuf_std(sc, m, &sw_prod,
3875 						   &sw_chain_prod,
3876 						   &sw_prod_bseq)) {
3877 					DBRUNIF(1, bce_breakpoint(sc));
3878 					/* XXX */
3879 					panic("%s: Can't reuse RX mbuf!\n",
3880 					      ifp->if_xname);
3881 				}
3882 				m = NULL;
3883 				goto bce_rx_int_next_rx;
3884 			}
3885 
3886 			/*
3887 			 * Get a new mbuf for the rx_bd.   If no new
3888 			 * mbufs are available then reuse the current mbuf,
3889 			 * log an ierror on the interface, and generate
3890 			 * an error in the system log.
3891 			 */
3892 			if (bce_newbuf_std(sc, NULL, &sw_prod, &sw_chain_prod,
3893 					   &sw_prod_bseq)) {
3894 				DBRUN(BCE_WARN,
3895 				      if_printf(ifp,
3896 				      "%s(%d): Failed to allocate new mbuf, "
3897 				      "incoming frame dropped!\n",
3898 				      __FILE__, __LINE__));
3899 
3900 				ifp->if_ierrors++;
3901 
3902 				/* Try and reuse the exisitng mbuf. */
3903 				if (bce_newbuf_std(sc, m, &sw_prod,
3904 						   &sw_chain_prod,
3905 						   &sw_prod_bseq)) {
3906 					DBRUNIF(1, bce_breakpoint(sc));
3907 					/* XXX */
3908 					panic("%s: Double mbuf allocation "
3909 					      "failure!", ifp->if_xname);
3910 				}
3911 				m = NULL;
3912 				goto bce_rx_int_next_rx;
3913 			}
3914 
3915 			/*
3916 			 * Skip over the l2_fhdr when passing
3917 			 * the data up the stack.
3918 			 */
3919 			m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
3920 
3921 			m->m_pkthdr.len = m->m_len = len;
3922 			m->m_pkthdr.rcvif = ifp;
3923 
3924 			DBRUN(BCE_VERBOSE_RECV,
3925 			      struct ether_header *eh;
3926 			      eh = mtod(m, struct ether_header *);
3927 			      if_printf(ifp, "%s(): to: %6D, from: %6D, "
3928 			      		"type: 0x%04X\n", __func__,
3929 					eh->ether_dhost, ":",
3930 					eh->ether_shost, ":",
3931 					htons(eh->ether_type)));
3932 
3933 			/* Validate the checksum if offload enabled. */
3934 			if (ifp->if_capenable & IFCAP_RXCSUM) {
3935 				/* Check for an IP datagram. */
3936 				if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
3937 					m->m_pkthdr.csum_flags |=
3938 						CSUM_IP_CHECKED;
3939 
3940 					/* Check if the IP checksum is valid. */
3941 					if ((l2fhdr->l2_fhdr_ip_xsum ^
3942 					     0xffff) == 0) {
3943 						m->m_pkthdr.csum_flags |=
3944 							CSUM_IP_VALID;
3945 					} else {
3946 						DBPRINT(sc, BCE_WARN_RECV,
3947 							"%s(): Invalid IP checksum = 0x%04X!\n",
3948 							__func__, l2fhdr->l2_fhdr_ip_xsum);
3949 					}
3950 				}
3951 
3952 				/* Check for a valid TCP/UDP frame. */
3953 				if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3954 					      L2_FHDR_STATUS_UDP_DATAGRAM)) {
3955 
3956 					/* Check for a good TCP/UDP checksum. */
3957 					if ((status &
3958 					     (L2_FHDR_ERRORS_TCP_XSUM |
3959 					      L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
3960 						m->m_pkthdr.csum_data =
3961 						l2fhdr->l2_fhdr_tcp_udp_xsum;
3962 						m->m_pkthdr.csum_flags |=
3963 							CSUM_DATA_VALID |
3964 							CSUM_PSEUDO_HDR;
3965 					} else {
3966 						DBPRINT(sc, BCE_WARN_RECV,
3967 							"%s(): Invalid TCP/UDP checksum = 0x%04X!\n",
3968 							__func__, l2fhdr->l2_fhdr_tcp_udp_xsum);
3969 					}
3970 				}
3971 			}
3972 
3973 			ifp->if_ipackets++;
3974 bce_rx_int_next_rx:
3975 			sw_prod = NEXT_RX_BD(sw_prod);
3976 		}
3977 
3978 		sw_cons = NEXT_RX_BD(sw_cons);
3979 
3980 		/* If we have a packet, pass it up the stack */
3981 		if (m) {
3982 			DBPRINT(sc, BCE_VERBOSE_RECV,
3983 				"%s(): Passing received frame up.\n", __func__);
3984 
3985 			if (status & L2_FHDR_STATUS_L2_VLAN_TAG) {
3986 				m->m_flags |= M_VLANTAG;
3987 				m->m_pkthdr.ether_vlantag =
3988 					l2fhdr->l2_fhdr_vlan_tag;
3989 			}
3990 #ifdef ETHER_INPUT_CHAIN
3991 			ether_input_chain2(ifp, m, chain);
3992 #else
3993 			ifp->if_input(ifp, m);
3994 #endif
3995 
3996 			DBRUNIF(1, sc->rx_mbuf_alloc--);
3997 		}
3998 
3999 		/*
4000 		 * If polling(4) is not enabled, refresh hw_cons to see
4001 		 * whether there's new work.
4002 		 *
4003 		 * If polling(4) is enabled, i.e count >= 0, refreshing
4004 		 * should not be performed, so that we would not spend
4005 		 * too much time in RX processing.
4006 		 */
4007 		if (count < 0 && sw_cons == hw_cons)
4008 			hw_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc);
4009 
4010 		/*
4011 		 * Prevent speculative reads from getting ahead
4012 		 * of the status block.
4013 		 */
4014 		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4015 				  BUS_SPACE_BARRIER_READ);
4016 	}
4017 
4018 #ifdef ETHER_INPUT_CHAIN
4019 	ether_input_dispatch(chain);
4020 #endif
4021 
4022 	for (i = 0; i < RX_PAGES; i++) {
4023 		bus_dmamap_sync(sc->rx_bd_chain_tag,
4024 				sc->rx_bd_chain_map[i], BUS_DMASYNC_PREWRITE);
4025 	}
4026 
4027 	sc->rx_cons = sw_cons;
4028 	sc->rx_prod = sw_prod;
4029 	sc->rx_prod_bseq = sw_prod_bseq;
4030 
4031 	REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BDIDX, sc->rx_prod);
4032 	REG_WR(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
4033 
4034 	DBPRINT(sc, BCE_INFO_RECV, "%s(exit): rx_prod = 0x%04X, "
4035 		"rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
4036 		__func__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
4037 }
4038 
4039 
4040 /****************************************************************************/
4041 /* Reads the transmit consumer value from the status block (skipping over   */
4042 /* chain page pointer if necessary).                                        */
4043 /*                                                                          */
4044 /* Returns:                                                                 */
4045 /*   hw_cons                                                                */
4046 /****************************************************************************/
4047 static __inline uint16_t
4048 bce_get_hw_tx_cons(struct bce_softc *sc)
4049 {
4050 	uint16_t hw_cons = sc->status_block->status_tx_quick_consumer_index0;
4051 
4052 	if ((hw_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4053 		hw_cons++;
4054 	return hw_cons;
4055 }
4056 
4057 
4058 /****************************************************************************/
4059 /* Handles transmit completion interrupt events.                            */
4060 /*                                                                          */
4061 /* Returns:                                                                 */
4062 /*   Nothing.                                                               */
4063 /****************************************************************************/
4064 static void
4065 bce_tx_intr(struct bce_softc *sc)
4066 {
4067 	struct ifnet *ifp = &sc->arpcom.ac_if;
4068 	uint16_t hw_tx_cons, sw_tx_cons, sw_tx_chain_cons;
4069 
4070 	ASSERT_SERIALIZED(ifp->if_serializer);
4071 
4072 	DBRUNIF(1, sc->tx_interrupts++);
4073 
4074 	/* Get the hardware's view of the TX consumer index. */
4075 	hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc);
4076 	sw_tx_cons = sc->tx_cons;
4077 
4078 	/* Prevent speculative reads from getting ahead of the status block. */
4079 	bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4080 			  BUS_SPACE_BARRIER_READ);
4081 
4082 	/* Cycle through any completed TX chain page entries. */
4083 	while (sw_tx_cons != hw_tx_cons) {
4084 #ifdef BCE_DEBUG
4085 		struct tx_bd *txbd = NULL;
4086 #endif
4087 		sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons);
4088 
4089 		DBPRINT(sc, BCE_INFO_SEND,
4090 			"%s(): hw_tx_cons = 0x%04X, sw_tx_cons = 0x%04X, "
4091 			"sw_tx_chain_cons = 0x%04X\n",
4092 			__func__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons);
4093 
4094 		DBRUNIF((sw_tx_chain_cons > MAX_TX_BD),
4095 			if_printf(ifp, "%s(%d): "
4096 				  "TX chain consumer out of range! "
4097 				  " 0x%04X > 0x%04X\n",
4098 				  __FILE__, __LINE__, sw_tx_chain_cons,
4099 				  (int)MAX_TX_BD);
4100 			bce_breakpoint(sc));
4101 
4102 		DBRUNIF(1, txbd = &sc->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)]
4103 				[TX_IDX(sw_tx_chain_cons)]);
4104 
4105 		DBRUNIF((txbd == NULL),
4106 			if_printf(ifp, "%s(%d): "
4107 				  "Unexpected NULL tx_bd[0x%04X]!\n",
4108 				  __FILE__, __LINE__, sw_tx_chain_cons);
4109 			bce_breakpoint(sc));
4110 
4111 		DBRUN(BCE_INFO_SEND,
4112 		      if_printf(ifp, "%s(): ", __func__);
4113 		      bce_dump_txbd(sc, sw_tx_chain_cons, txbd));
4114 
4115 		/*
4116 		 * Free the associated mbuf. Remember
4117 		 * that only the last tx_bd of a packet
4118 		 * has an mbuf pointer and DMA map.
4119 		 */
4120 		if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) {
4121 			/* Validate that this is the last tx_bd. */
4122 			DBRUNIF((!(txbd->tx_bd_flags & TX_BD_FLAGS_END)),
4123 				if_printf(ifp, "%s(%d): "
4124 				"tx_bd END flag not set but "
4125 				"txmbuf == NULL!\n", __FILE__, __LINE__);
4126 				bce_breakpoint(sc));
4127 
4128 			DBRUN(BCE_INFO_SEND,
4129 			      if_printf(ifp, "%s(): Unloading map/freeing mbuf "
4130 			      		"from tx_bd[0x%04X]\n", __func__,
4131 					sw_tx_chain_cons));
4132 
4133 			/* Unmap the mbuf. */
4134 			bus_dmamap_unload(sc->tx_mbuf_tag,
4135 					  sc->tx_mbuf_map[sw_tx_chain_cons]);
4136 
4137 			/* Free the mbuf. */
4138 			m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]);
4139 			sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL;
4140 			DBRUNIF(1, sc->tx_mbuf_alloc--);
4141 
4142 			ifp->if_opackets++;
4143 		}
4144 
4145 		sc->used_tx_bd--;
4146 		sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
4147 
4148 		if (sw_tx_cons == hw_tx_cons) {
4149 			/* Refresh hw_cons to see if there's new work. */
4150 			hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc);
4151 		}
4152 
4153 		/*
4154 		 * Prevent speculative reads from getting
4155 		 * ahead of the status block.
4156 		 */
4157 		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4158 				  BUS_SPACE_BARRIER_READ);
4159 	}
4160 
4161 	if (sc->used_tx_bd == 0) {
4162 		/* Clear the TX timeout timer. */
4163 		ifp->if_timer = 0;
4164 	}
4165 
4166 	/* Clear the tx hardware queue full flag. */
4167 	if (sc->max_tx_bd - sc->used_tx_bd >= BCE_TX_SPARE_SPACE) {
4168 		DBRUNIF((ifp->if_flags & IFF_OACTIVE),
4169 			DBPRINT(sc, BCE_WARN_SEND,
4170 				"%s(): Open TX chain! %d/%d (used/total)\n",
4171 				__func__, sc->used_tx_bd, sc->max_tx_bd));
4172 		ifp->if_flags &= ~IFF_OACTIVE;
4173 	}
4174 	sc->tx_cons = sw_tx_cons;
4175 }
4176 
4177 
4178 /****************************************************************************/
4179 /* Disables interrupt generation.                                           */
4180 /*                                                                          */
4181 /* Returns:                                                                 */
4182 /*   Nothing.                                                               */
4183 /****************************************************************************/
4184 static void
4185 bce_disable_intr(struct bce_softc *sc)
4186 {
4187 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
4188 	REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
4189 	lwkt_serialize_handler_disable(sc->arpcom.ac_if.if_serializer);
4190 }
4191 
4192 
4193 /****************************************************************************/
4194 /* Enables interrupt generation.                                            */
4195 /*                                                                          */
4196 /* Returns:                                                                 */
4197 /*   Nothing.                                                               */
4198 /****************************************************************************/
4199 static void
4200 bce_enable_intr(struct bce_softc *sc)
4201 {
4202 	uint32_t val;
4203 
4204 	lwkt_serialize_handler_enable(sc->arpcom.ac_if.if_serializer);
4205 
4206 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4207 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
4208 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
4209 
4210 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4211 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
4212 
4213 	val = REG_RD(sc, BCE_HC_COMMAND);
4214 	REG_WR(sc, BCE_HC_COMMAND, val | BCE_HC_COMMAND_COAL_NOW);
4215 }
4216 
4217 
4218 /****************************************************************************/
4219 /* Handles controller initialization.                                       */
4220 /*                                                                          */
4221 /* Returns:                                                                 */
4222 /*   Nothing.                                                               */
4223 /****************************************************************************/
4224 static void
4225 bce_init(void *xsc)
4226 {
4227 	struct bce_softc *sc = xsc;
4228 	struct ifnet *ifp = &sc->arpcom.ac_if;
4229 	uint32_t ether_mtu;
4230 	int error;
4231 
4232 	ASSERT_SERIALIZED(ifp->if_serializer);
4233 
4234 	/* Check if the driver is still running and bail out if it is. */
4235 	if (ifp->if_flags & IFF_RUNNING)
4236 		return;
4237 
4238 	bce_stop(sc);
4239 
4240 	error = bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
4241 	if (error) {
4242 		if_printf(ifp, "Controller reset failed!\n");
4243 		goto back;
4244 	}
4245 
4246 	error = bce_chipinit(sc);
4247 	if (error) {
4248 		if_printf(ifp, "Controller initialization failed!\n");
4249 		goto back;
4250 	}
4251 
4252 	error = bce_blockinit(sc);
4253 	if (error) {
4254 		if_printf(ifp, "Block initialization failed!\n");
4255 		goto back;
4256 	}
4257 
4258 	/* Load our MAC address. */
4259 	bcopy(IF_LLADDR(ifp), sc->eaddr, ETHER_ADDR_LEN);
4260 	bce_set_mac_addr(sc);
4261 
4262 	/* Calculate and program the Ethernet MTU size. */
4263 	ether_mtu = ETHER_HDR_LEN + EVL_ENCAPLEN + ifp->if_mtu + ETHER_CRC_LEN;
4264 
4265 	DBPRINT(sc, BCE_INFO, "%s(): setting mtu = %d\n", __func__, ether_mtu);
4266 
4267 	/*
4268 	 * Program the mtu, enabling jumbo frame
4269 	 * support if necessary.  Also set the mbuf
4270 	 * allocation count for RX frames.
4271 	 */
4272 	if (ether_mtu > ETHER_MAX_LEN + EVL_ENCAPLEN) {
4273 #ifdef notyet
4274 		REG_WR(sc, BCE_EMAC_RX_MTU_SIZE,
4275 		       min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) |
4276 		       BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA);
4277 		sc->mbuf_alloc_size = MJUM9BYTES;
4278 #else
4279 		panic("jumbo buffer is not supported yet\n");
4280 #endif
4281 	} else {
4282 		REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu);
4283 		sc->mbuf_alloc_size = MCLBYTES;
4284 	}
4285 
4286 	/* Calculate the RX Ethernet frame size for rx_bd's. */
4287 	sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8;
4288 
4289 	DBPRINT(sc, BCE_INFO,
4290 		"%s(): mclbytes = %d, mbuf_alloc_size = %d, "
4291 		"max_frame_size = %d\n",
4292 		__func__, (int)MCLBYTES, sc->mbuf_alloc_size,
4293 		sc->max_frame_size);
4294 
4295 	/* Program appropriate promiscuous/multicast filtering. */
4296 	bce_set_rx_mode(sc);
4297 
4298 	/* Init RX buffer descriptor chain. */
4299 	bce_init_rx_chain(sc);	/* XXX return value */
4300 
4301 	/* Init TX buffer descriptor chain. */
4302 	bce_init_tx_chain(sc);	/* XXX return value */
4303 
4304 #ifdef DEVICE_POLLING
4305 	/* Disable interrupts if we are polling. */
4306 	if (ifp->if_flags & IFF_POLLING) {
4307 		bce_disable_intr(sc);
4308 
4309 		REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4310 		       (1 << 16) | sc->bce_rx_quick_cons_trip);
4311 		REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4312 		       (1 << 16) | sc->bce_tx_quick_cons_trip);
4313 	} else
4314 #endif
4315 	/* Enable host interrupts. */
4316 	bce_enable_intr(sc);
4317 
4318 	bce_ifmedia_upd(ifp);
4319 
4320 	ifp->if_flags |= IFF_RUNNING;
4321 	ifp->if_flags &= ~IFF_OACTIVE;
4322 
4323 	callout_reset(&sc->bce_stat_ch, hz, bce_tick, sc);
4324 back:
4325 	if (error)
4326 		bce_stop(sc);
4327 }
4328 
4329 
4330 /****************************************************************************/
4331 /* Initialize the controller just enough so that any management firmware    */
4332 /* running on the device will continue to operate corectly.                 */
4333 /*                                                                          */
4334 /* Returns:                                                                 */
4335 /*   Nothing.                                                               */
4336 /****************************************************************************/
4337 static void
4338 bce_mgmt_init(struct bce_softc *sc)
4339 {
4340 	struct ifnet *ifp = &sc->arpcom.ac_if;
4341 	uint32_t val;
4342 
4343 	/* Check if the driver is still running and bail out if it is. */
4344 	if (ifp->if_flags & IFF_RUNNING)
4345 		return;
4346 
4347 	/* Initialize the on-boards CPUs */
4348 	bce_init_cpus(sc);
4349 
4350 	/* Set the page size and clear the RV2P processor stall bits. */
4351 	val = (BCM_PAGE_BITS - 8) << 24;
4352 	REG_WR(sc, BCE_RV2P_CONFIG, val);
4353 
4354 	/* Enable all critical blocks in the MAC. */
4355 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
4356 	       BCE_MISC_ENABLE_SET_BITS_RX_V2P_ENABLE |
4357 	       BCE_MISC_ENABLE_SET_BITS_RX_DMA_ENABLE |
4358 	       BCE_MISC_ENABLE_SET_BITS_COMPLETION_ENABLE);
4359 	REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
4360 	DELAY(20);
4361 
4362 	bce_ifmedia_upd(ifp);
4363 }
4364 
4365 
4366 /****************************************************************************/
4367 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
4368 /* memory visible to the controller.                                        */
4369 /*                                                                          */
4370 /* Returns:                                                                 */
4371 /*   0 for success, positive value for failure.                             */
4372 /****************************************************************************/
4373 static int
4374 bce_encap(struct bce_softc *sc, struct mbuf **m_head)
4375 {
4376 	struct bce_dmamap_arg ctx;
4377 	bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
4378 	bus_dmamap_t map, tmp_map;
4379 	struct mbuf *m0 = *m_head;
4380 	struct tx_bd *txbd = NULL;
4381 	uint16_t vlan_tag = 0, flags = 0;
4382 	uint16_t chain_prod, chain_prod_start, prod;
4383 	uint32_t prod_bseq;
4384 	int i, error, maxsegs;
4385 #ifdef BCE_DEBUG
4386 	uint16_t debug_prod;
4387 #endif
4388 
4389 	/* Transfer any checksum offload flags to the bd. */
4390 	if (m0->m_pkthdr.csum_flags) {
4391 		if (m0->m_pkthdr.csum_flags & CSUM_IP)
4392 			flags |= TX_BD_FLAGS_IP_CKSUM;
4393 		if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
4394 			flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4395 	}
4396 
4397 	/* Transfer any VLAN tags to the bd. */
4398 	if (m0->m_flags & M_VLANTAG) {
4399 		flags |= TX_BD_FLAGS_VLAN_TAG;
4400 		vlan_tag = m0->m_pkthdr.ether_vlantag;
4401 	}
4402 
4403 	prod = sc->tx_prod;
4404 	chain_prod_start = chain_prod = TX_CHAIN_IDX(prod);
4405 
4406 	/* Map the mbuf into DMAable memory. */
4407 	map = sc->tx_mbuf_map[chain_prod_start];
4408 
4409 	maxsegs = sc->max_tx_bd - sc->used_tx_bd;
4410 	KASSERT(maxsegs >= BCE_TX_SPARE_SPACE,
4411 		("not enough segements %d\n", maxsegs));
4412 	if (maxsegs > BCE_MAX_SEGMENTS)
4413 		maxsegs = BCE_MAX_SEGMENTS;
4414 
4415 	/* Map the mbuf into our DMA address space. */
4416 	ctx.bce_maxsegs = maxsegs;
4417 	ctx.bce_segs = segs;
4418 	error = bus_dmamap_load_mbuf(sc->tx_mbuf_tag, map, m0,
4419 				     bce_dma_map_mbuf, &ctx, BUS_DMA_NOWAIT);
4420 	if (error == EFBIG || ctx.bce_maxsegs == 0) {
4421 		DBPRINT(sc, BCE_WARN, "%s(): fragmented mbuf\n", __func__);
4422 		DBRUNIF(1, bce_dump_mbuf(sc, m0););
4423 
4424 		m0 = m_defrag(*m_head, MB_DONTWAIT);
4425 		if (m0 == NULL) {
4426 			error = ENOBUFS;
4427 			goto back;
4428 		}
4429 		*m_head = m0;
4430 
4431 		ctx.bce_maxsegs = maxsegs;
4432 		ctx.bce_segs = segs;
4433 		error = bus_dmamap_load_mbuf(sc->tx_mbuf_tag, map, m0,
4434 					     bce_dma_map_mbuf, &ctx,
4435 					     BUS_DMA_NOWAIT);
4436 		if (error || ctx.bce_maxsegs == 0) {
4437 			if_printf(&sc->arpcom.ac_if,
4438 				  "Error mapping mbuf into TX chain\n");
4439 			if (error == 0)
4440 				error = EFBIG;
4441 			goto back;
4442 		}
4443 	} else if (error) {
4444 		if_printf(&sc->arpcom.ac_if,
4445 			  "Error mapping mbuf into TX chain\n");
4446 		goto back;
4447 	}
4448 
4449 	/* prod points to an empty tx_bd at this point. */
4450 	prod_bseq  = sc->tx_prod_bseq;
4451 
4452 #ifdef BCE_DEBUG
4453 	debug_prod = chain_prod;
4454 #endif
4455 
4456 	DBPRINT(sc, BCE_INFO_SEND,
4457 		"%s(): Start: prod = 0x%04X, chain_prod = %04X, "
4458 		"prod_bseq = 0x%08X\n",
4459 		__func__, prod, chain_prod, prod_bseq);
4460 
4461 	/*
4462 	 * Cycle through each mbuf segment that makes up
4463 	 * the outgoing frame, gathering the mapping info
4464 	 * for that segment and creating a tx_bd to for
4465 	 * the mbuf.
4466 	 */
4467 	for (i = 0; i < ctx.bce_maxsegs; i++) {
4468 		chain_prod = TX_CHAIN_IDX(prod);
4469 		txbd= &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
4470 
4471 		txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr));
4472 		txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr));
4473 		txbd->tx_bd_mss_nbytes = htole16(segs[i].ds_len);
4474 		txbd->tx_bd_vlan_tag = htole16(vlan_tag);
4475 		txbd->tx_bd_flags = htole16(flags);
4476 		prod_bseq += segs[i].ds_len;
4477 		if (i == 0)
4478 			txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START);
4479 		prod = NEXT_TX_BD(prod);
4480 	}
4481 
4482 	/* Set the END flag on the last TX buffer descriptor. */
4483 	txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END);
4484 
4485 	DBRUN(BCE_EXCESSIVE_SEND,
4486 	      bce_dump_tx_chain(sc, debug_prod, ctx.bce_maxsegs));
4487 
4488 	DBPRINT(sc, BCE_INFO_SEND,
4489 		"%s(): End: prod = 0x%04X, chain_prod = %04X, "
4490 		"prod_bseq = 0x%08X\n",
4491 		__func__, prod, chain_prod, prod_bseq);
4492 
4493 	bus_dmamap_sync(sc->tx_mbuf_tag, map, BUS_DMASYNC_PREWRITE);
4494 
4495 	/*
4496 	 * Ensure that the mbuf pointer for this transmission
4497 	 * is placed at the array index of the last
4498 	 * descriptor in this chain.  This is done
4499 	 * because a single map is used for all
4500 	 * segments of the mbuf and we don't want to
4501 	 * unload the map before all of the segments
4502 	 * have been freed.
4503 	 */
4504 	sc->tx_mbuf_ptr[chain_prod] = m0;
4505 
4506 	tmp_map = sc->tx_mbuf_map[chain_prod];
4507 	sc->tx_mbuf_map[chain_prod] = map;
4508 	sc->tx_mbuf_map[chain_prod_start] = tmp_map;
4509 
4510 	sc->used_tx_bd += ctx.bce_maxsegs;
4511 
4512 	/* Update some debug statistic counters */
4513 	DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark),
4514 		sc->tx_hi_watermark = sc->used_tx_bd);
4515 	DBRUNIF((sc->used_tx_bd == sc->max_tx_bd), sc->tx_full_count++);
4516 	DBRUNIF(1, sc->tx_mbuf_alloc++);
4517 
4518 	DBRUN(BCE_VERBOSE_SEND,
4519 	      bce_dump_tx_mbuf_chain(sc, chain_prod, ctx.bce_maxsegs));
4520 
4521 	/* prod points to the next free tx_bd at this point. */
4522 	sc->tx_prod = prod;
4523 	sc->tx_prod_bseq = prod_bseq;
4524 back:
4525 	if (error) {
4526 		m_freem(*m_head);
4527 		*m_head = NULL;
4528 	}
4529 	return error;
4530 }
4531 
4532 
4533 /****************************************************************************/
4534 /* Main transmit routine when called from another routine with a lock.      */
4535 /*                                                                          */
4536 /* Returns:                                                                 */
4537 /*   Nothing.                                                               */
4538 /****************************************************************************/
4539 static void
4540 bce_start(struct ifnet *ifp)
4541 {
4542 	struct bce_softc *sc = ifp->if_softc;
4543 	int count = 0;
4544 
4545 	ASSERT_SERIALIZED(ifp->if_serializer);
4546 
4547 	/* If there's no link or the transmit queue is empty then just exit. */
4548 	if (!sc->bce_link) {
4549 		ifq_purge(&ifp->if_snd);
4550 		return;
4551 	}
4552 
4553 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
4554 		return;
4555 
4556 	DBPRINT(sc, BCE_INFO_SEND,
4557 		"%s(): Start: tx_prod = 0x%04X, tx_chain_prod = %04X, "
4558 		"tx_prod_bseq = 0x%08X\n",
4559 		__func__,
4560 		sc->tx_prod, TX_CHAIN_IDX(sc->tx_prod), sc->tx_prod_bseq);
4561 
4562 	for (;;) {
4563 		struct mbuf *m_head;
4564 
4565 		/*
4566 		 * We keep BCE_TX_SPARE_SPACE entries, so bce_encap() is
4567 		 * unlikely to fail.
4568 		 */
4569 		if (sc->max_tx_bd - sc->used_tx_bd < BCE_TX_SPARE_SPACE) {
4570 			ifp->if_flags |= IFF_OACTIVE;
4571 			break;
4572 		}
4573 
4574 		/* Check for any frames to send. */
4575 		m_head = ifq_dequeue(&ifp->if_snd, NULL);
4576 		if (m_head == NULL)
4577 			break;
4578 
4579 		/*
4580 		 * Pack the data into the transmit ring. If we
4581 		 * don't have room, place the mbuf back at the
4582 		 * head of the queue and set the OACTIVE flag
4583 		 * to wait for the NIC to drain the chain.
4584 		 */
4585 		if (bce_encap(sc, &m_head)) {
4586 			ifp->if_flags |= IFF_OACTIVE;
4587 			DBPRINT(sc, BCE_INFO_SEND,
4588 				"TX chain is closed for business! "
4589 				"Total tx_bd used = %d\n",
4590 				sc->used_tx_bd);
4591 			break;
4592 		}
4593 
4594 		count++;
4595 
4596 		/* Send a copy of the frame to any BPF listeners. */
4597 		ETHER_BPF_MTAP(ifp, m_head);
4598 	}
4599 
4600 	if (count == 0) {
4601 		/* no packets were dequeued */
4602 		DBPRINT(sc, BCE_VERBOSE_SEND,
4603 			"%s(): No packets were dequeued\n", __func__);
4604 		return;
4605 	}
4606 
4607 	DBPRINT(sc, BCE_INFO_SEND,
4608 		"%s(): End: tx_prod = 0x%04X, tx_chain_prod = 0x%04X, "
4609 		"tx_prod_bseq = 0x%08X\n",
4610 		__func__,
4611 		sc->tx_prod, TX_CHAIN_IDX(sc->tx_prod), sc->tx_prod_bseq);
4612 
4613 	/* Start the transmit. */
4614 	REG_WR16(sc, MB_TX_CID_ADDR + BCE_L2CTX_TX_HOST_BIDX, sc->tx_prod);
4615 	REG_WR(sc, MB_TX_CID_ADDR + BCE_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq);
4616 
4617 	/* Set the tx timeout. */
4618 	ifp->if_timer = BCE_TX_TIMEOUT;
4619 }
4620 
4621 
4622 /****************************************************************************/
4623 /* Handles any IOCTL calls from the operating system.                       */
4624 /*                                                                          */
4625 /* Returns:                                                                 */
4626 /*   0 for success, positive value for failure.                             */
4627 /****************************************************************************/
4628 static int
4629 bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
4630 {
4631 	struct bce_softc *sc = ifp->if_softc;
4632 	struct ifreq *ifr = (struct ifreq *)data;
4633 	struct mii_data *mii;
4634 	int mask, error = 0;
4635 
4636 	ASSERT_SERIALIZED(ifp->if_serializer);
4637 
4638 	switch(command) {
4639 	case SIOCSIFMTU:
4640 		/* Check that the MTU setting is supported. */
4641 		if (ifr->ifr_mtu < BCE_MIN_MTU ||
4642 #ifdef notyet
4643 		    ifr->ifr_mtu > BCE_MAX_JUMBO_MTU
4644 #else
4645 		    ifr->ifr_mtu > ETHERMTU
4646 #endif
4647 		   ) {
4648 			error = EINVAL;
4649 			break;
4650 		}
4651 
4652 		DBPRINT(sc, BCE_INFO, "Setting new MTU of %d\n", ifr->ifr_mtu);
4653 
4654 		ifp->if_mtu = ifr->ifr_mtu;
4655 		ifp->if_flags &= ~IFF_RUNNING;	/* Force reinitialize */
4656 		bce_init(sc);
4657 		break;
4658 
4659 	case SIOCSIFFLAGS:
4660 		if (ifp->if_flags & IFF_UP) {
4661 			if (ifp->if_flags & IFF_RUNNING) {
4662 				mask = ifp->if_flags ^ sc->bce_if_flags;
4663 
4664 				if (mask & (IFF_PROMISC | IFF_ALLMULTI))
4665 					bce_set_rx_mode(sc);
4666 			} else {
4667 				bce_init(sc);
4668 			}
4669 		} else if (ifp->if_flags & IFF_RUNNING) {
4670 			bce_stop(sc);
4671 		}
4672 		sc->bce_if_flags = ifp->if_flags;
4673 		break;
4674 
4675 	case SIOCADDMULTI:
4676 	case SIOCDELMULTI:
4677 		if (ifp->if_flags & IFF_RUNNING)
4678 			bce_set_rx_mode(sc);
4679 		break;
4680 
4681 	case SIOCSIFMEDIA:
4682 	case SIOCGIFMEDIA:
4683 		DBPRINT(sc, BCE_VERBOSE, "bce_phy_flags = 0x%08X\n",
4684 			sc->bce_phy_flags);
4685 		DBPRINT(sc, BCE_VERBOSE, "Copper media set/get\n");
4686 
4687 		mii = device_get_softc(sc->bce_miibus);
4688 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
4689 		break;
4690 
4691 	case SIOCSIFCAP:
4692 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4693 		DBPRINT(sc, BCE_INFO, "Received SIOCSIFCAP = 0x%08X\n",
4694 			(uint32_t) mask);
4695 
4696 		if (mask & IFCAP_HWCSUM) {
4697 			ifp->if_capenable ^= IFCAP_HWCSUM;
4698 			if (IFCAP_HWCSUM & ifp->if_capenable)
4699 				ifp->if_hwassist = BCE_IF_HWASSIST;
4700 			else
4701 				ifp->if_hwassist = 0;
4702 		}
4703 		break;
4704 
4705 	default:
4706 		error = ether_ioctl(ifp, command, data);
4707 		break;
4708 	}
4709 	return error;
4710 }
4711 
4712 
4713 /****************************************************************************/
4714 /* Transmit timeout handler.                                                */
4715 /*                                                                          */
4716 /* Returns:                                                                 */
4717 /*   Nothing.                                                               */
4718 /****************************************************************************/
4719 static void
4720 bce_watchdog(struct ifnet *ifp)
4721 {
4722 	struct bce_softc *sc = ifp->if_softc;
4723 
4724 	ASSERT_SERIALIZED(ifp->if_serializer);
4725 
4726 	DBRUN(BCE_VERBOSE_SEND,
4727 	      bce_dump_driver_state(sc);
4728 	      bce_dump_status_block(sc));
4729 
4730 	/*
4731 	 * If we are in this routine because of pause frames, then
4732 	 * don't reset the hardware.
4733 	 */
4734 	if (REG_RD(sc, BCE_EMAC_TX_STATUS) & BCE_EMAC_TX_STATUS_XOFFED)
4735 		return;
4736 
4737 	if_printf(ifp, "Watchdog timeout occurred, resetting!\n");
4738 
4739 	/* DBRUN(BCE_FATAL, bce_breakpoint(sc)); */
4740 
4741 	ifp->if_flags &= ~IFF_RUNNING;	/* Force reinitialize */
4742 	bce_init(sc);
4743 
4744 	ifp->if_oerrors++;
4745 
4746 	if (!ifq_is_empty(&ifp->if_snd))
4747 		if_devstart(ifp);
4748 }
4749 
4750 
4751 #ifdef DEVICE_POLLING
4752 
4753 static void
4754 bce_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
4755 {
4756 	struct bce_softc *sc = ifp->if_softc;
4757 	struct status_block *sblk = sc->status_block;
4758 	uint16_t hw_tx_cons, hw_rx_cons;
4759 
4760 	ASSERT_SERIALIZED(ifp->if_serializer);
4761 
4762 	switch (cmd) {
4763 	case POLL_REGISTER:
4764 		bce_disable_intr(sc);
4765 
4766 		REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4767 		       (1 << 16) | sc->bce_rx_quick_cons_trip);
4768 		REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4769 		       (1 << 16) | sc->bce_tx_quick_cons_trip);
4770 		return;
4771 	case POLL_DEREGISTER:
4772 		bce_enable_intr(sc);
4773 
4774 		REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4775 		       (sc->bce_tx_quick_cons_trip_int << 16) |
4776 		       sc->bce_tx_quick_cons_trip);
4777 		REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4778 		       (sc->bce_rx_quick_cons_trip_int << 16) |
4779 		       sc->bce_rx_quick_cons_trip);
4780 		return;
4781 	default:
4782 		break;
4783 	}
4784 
4785 	bus_dmamap_sync(sc->status_tag, sc->status_map, BUS_DMASYNC_POSTREAD);
4786 
4787 	if (cmd == POLL_AND_CHECK_STATUS) {
4788 		uint32_t status_attn_bits;
4789 
4790 		status_attn_bits = sblk->status_attn_bits;
4791 
4792 		DBRUNIF(DB_RANDOMTRUE(bce_debug_unexpected_attention),
4793 			if_printf(ifp,
4794 			"Simulating unexpected status attention bit set.");
4795 			status_attn_bits |= STATUS_ATTN_BITS_PARITY_ERROR);
4796 
4797 		/* Was it a link change interrupt? */
4798 		if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
4799 		    (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE))
4800 			bce_phy_intr(sc);
4801 
4802 		/*
4803 		 * If any other attention is asserted then
4804 		 * the chip is toast.
4805 		 */
4806 		if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
4807 		     (sblk->status_attn_bits_ack &
4808 		      ~STATUS_ATTN_BITS_LINK_STATE)) {
4809 			DBRUN(1, sc->unexpected_attentions++);
4810 
4811 			if_printf(ifp, "Fatal attention detected: 0x%08X\n",
4812 				  sblk->status_attn_bits);
4813 
4814 			DBRUN(BCE_FATAL,
4815 			if (bce_debug_unexpected_attention == 0)
4816 				bce_breakpoint(sc));
4817 
4818 			bce_init(sc);
4819 			return;
4820 		}
4821 	}
4822 
4823 	hw_rx_cons = bce_get_hw_rx_cons(sc);
4824 	hw_tx_cons = bce_get_hw_tx_cons(sc);
4825 
4826 	/* Check for any completed RX frames. */
4827 	if (hw_rx_cons != sc->hw_rx_cons)
4828 		bce_rx_intr(sc, count);
4829 
4830 	/* Check for any completed TX frames. */
4831 	if (hw_tx_cons != sc->hw_tx_cons)
4832 		bce_tx_intr(sc);
4833 
4834 	bus_dmamap_sync(sc->status_tag,	sc->status_map, BUS_DMASYNC_PREWRITE);
4835 
4836 	/* Check for new frames to transmit. */
4837 	if (!ifq_is_empty(&ifp->if_snd))
4838 		if_devstart(ifp);
4839 }
4840 
4841 #endif	/* DEVICE_POLLING */
4842 
4843 
4844 /*
4845  * Interrupt handler.
4846  */
4847 /****************************************************************************/
4848 /* Main interrupt entry point.  Verifies that the controller generated the  */
4849 /* interrupt and then calls a separate routine for handle the various       */
4850 /* interrupt causes (PHY, TX, RX).                                          */
4851 /*                                                                          */
4852 /* Returns:                                                                 */
4853 /*   0 for success, positive value for failure.                             */
4854 /****************************************************************************/
4855 static void
4856 bce_intr(void *xsc)
4857 {
4858 	struct bce_softc *sc = xsc;
4859 	struct ifnet *ifp = &sc->arpcom.ac_if;
4860 	struct status_block *sblk;
4861 	uint16_t hw_rx_cons, hw_tx_cons;
4862 
4863 	ASSERT_SERIALIZED(ifp->if_serializer);
4864 
4865 	DBPRINT(sc, BCE_EXCESSIVE, "Entering %s()\n", __func__);
4866 	DBRUNIF(1, sc->interrupts_generated++);
4867 
4868 	bus_dmamap_sync(sc->status_tag, sc->status_map, BUS_DMASYNC_POSTREAD);
4869 	sblk = sc->status_block;
4870 
4871 	/*
4872 	 * If the hardware status block index matches the last value
4873 	 * read by the driver and we haven't asserted our interrupt
4874 	 * then there's nothing to do.
4875 	 */
4876 	if (sblk->status_idx == sc->last_status_idx &&
4877 	    (REG_RD(sc, BCE_PCICFG_MISC_STATUS) &
4878 	     BCE_PCICFG_MISC_STATUS_INTA_VALUE))
4879 		return;
4880 
4881 	/* Ack the interrupt and stop others from occuring. */
4882 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4883 	       BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
4884 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT);
4885 
4886 	/* Check if the hardware has finished any work. */
4887 	hw_rx_cons = bce_get_hw_rx_cons(sc);
4888 	hw_tx_cons = bce_get_hw_tx_cons(sc);
4889 
4890 	/* Keep processing data as long as there is work to do. */
4891 	for (;;) {
4892 		uint32_t status_attn_bits;
4893 
4894 		status_attn_bits = sblk->status_attn_bits;
4895 
4896 		DBRUNIF(DB_RANDOMTRUE(bce_debug_unexpected_attention),
4897 			if_printf(ifp,
4898 			"Simulating unexpected status attention bit set.");
4899 			status_attn_bits |= STATUS_ATTN_BITS_PARITY_ERROR);
4900 
4901 		/* Was it a link change interrupt? */
4902 		if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
4903 		    (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE))
4904 			bce_phy_intr(sc);
4905 
4906 		/*
4907 		 * If any other attention is asserted then
4908 		 * the chip is toast.
4909 		 */
4910 		if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
4911 		     (sblk->status_attn_bits_ack &
4912 		      ~STATUS_ATTN_BITS_LINK_STATE)) {
4913 			DBRUN(1, sc->unexpected_attentions++);
4914 
4915 			if_printf(ifp, "Fatal attention detected: 0x%08X\n",
4916 				  sblk->status_attn_bits);
4917 
4918 			DBRUN(BCE_FATAL,
4919 			if (bce_debug_unexpected_attention == 0)
4920 				bce_breakpoint(sc));
4921 
4922 			bce_init(sc);
4923 			return;
4924 		}
4925 
4926 		/* Check for any completed RX frames. */
4927 		if (hw_rx_cons != sc->hw_rx_cons)
4928 			bce_rx_intr(sc, -1);
4929 
4930 		/* Check for any completed TX frames. */
4931 		if (hw_tx_cons != sc->hw_tx_cons)
4932 			bce_tx_intr(sc);
4933 
4934 		/*
4935 		 * Save the status block index value
4936 		 * for use during the next interrupt.
4937 		 */
4938 		sc->last_status_idx = sblk->status_idx;
4939 
4940 		/*
4941 		 * Prevent speculative reads from getting
4942 		 * ahead of the status block.
4943 		 */
4944 		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4945 				  BUS_SPACE_BARRIER_READ);
4946 
4947 		/*
4948 		 * If there's no work left then exit the
4949 		 * interrupt service routine.
4950 		 */
4951 		hw_rx_cons = bce_get_hw_rx_cons(sc);
4952 		hw_tx_cons = bce_get_hw_tx_cons(sc);
4953 		if ((hw_rx_cons == sc->hw_rx_cons) && (hw_tx_cons == sc->hw_tx_cons))
4954 			break;
4955 	}
4956 
4957 	bus_dmamap_sync(sc->status_tag,	sc->status_map, BUS_DMASYNC_PREWRITE);
4958 
4959 	/* Re-enable interrupts. */
4960 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4961 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx |
4962 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT);
4963 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4964 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
4965 
4966 	if (sc->bce_coalchg_mask)
4967 		bce_coal_change(sc);
4968 
4969 	/* Handle any frames that arrived while handling the interrupt. */
4970 	if (!ifq_is_empty(&ifp->if_snd))
4971 		if_devstart(ifp);
4972 }
4973 
4974 
4975 /****************************************************************************/
4976 /* Programs the various packet receive modes (broadcast and multicast).     */
4977 /*                                                                          */
4978 /* Returns:                                                                 */
4979 /*   Nothing.                                                               */
4980 /****************************************************************************/
4981 static void
4982 bce_set_rx_mode(struct bce_softc *sc)
4983 {
4984 	struct ifnet *ifp = &sc->arpcom.ac_if;
4985 	struct ifmultiaddr *ifma;
4986 	uint32_t hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
4987 	uint32_t rx_mode, sort_mode;
4988 	int h, i;
4989 
4990 	ASSERT_SERIALIZED(ifp->if_serializer);
4991 
4992 	/* Initialize receive mode default settings. */
4993 	rx_mode = sc->rx_mode &
4994 		  ~(BCE_EMAC_RX_MODE_PROMISCUOUS |
4995 		    BCE_EMAC_RX_MODE_KEEP_VLAN_TAG);
4996 	sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN;
4997 
4998 	/*
4999 	 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
5000 	 * be enbled.
5001 	 */
5002 	if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) &&
5003 	    !(sc->bce_flags & BCE_MFW_ENABLE_FLAG))
5004 		rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG;
5005 
5006 	/*
5007 	 * Check for promiscuous, all multicast, or selected
5008 	 * multicast address filtering.
5009 	 */
5010 	if (ifp->if_flags & IFF_PROMISC) {
5011 		DBPRINT(sc, BCE_INFO, "Enabling promiscuous mode.\n");
5012 
5013 		/* Enable promiscuous mode. */
5014 		rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS;
5015 		sort_mode |= BCE_RPM_SORT_USER0_PROM_EN;
5016 	} else if (ifp->if_flags & IFF_ALLMULTI) {
5017 		DBPRINT(sc, BCE_INFO, "Enabling all multicast mode.\n");
5018 
5019 		/* Enable all multicast addresses. */
5020 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
5021 			REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4),
5022 			       0xffffffff);
5023 		}
5024 		sort_mode |= BCE_RPM_SORT_USER0_MC_EN;
5025 	} else {
5026 		/* Accept one or more multicast(s). */
5027 		DBPRINT(sc, BCE_INFO, "Enabling selective multicast mode.\n");
5028 
5029 		LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
5030 			if (ifma->ifma_addr->sa_family != AF_LINK)
5031 				continue;
5032 			h = ether_crc32_le(
5033 			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
5034 			    ETHER_ADDR_LEN) & 0xFF;
5035 			hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
5036 		}
5037 
5038 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
5039 			REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4),
5040 			       hashes[i]);
5041 		}
5042 		sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN;
5043 	}
5044 
5045 	/* Only make changes if the recive mode has actually changed. */
5046 	if (rx_mode != sc->rx_mode) {
5047 		DBPRINT(sc, BCE_VERBOSE, "Enabling new receive mode: 0x%08X\n",
5048 			rx_mode);
5049 
5050 		sc->rx_mode = rx_mode;
5051 		REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode);
5052 	}
5053 
5054 	/* Disable and clear the exisitng sort before enabling a new sort. */
5055 	REG_WR(sc, BCE_RPM_SORT_USER0, 0x0);
5056 	REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode);
5057 	REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA);
5058 }
5059 
5060 
5061 /****************************************************************************/
5062 /* Called periodically to updates statistics from the controllers           */
5063 /* statistics block.                                                        */
5064 /*                                                                          */
5065 /* Returns:                                                                 */
5066 /*   Nothing.                                                               */
5067 /****************************************************************************/
5068 static void
5069 bce_stats_update(struct bce_softc *sc)
5070 {
5071 	struct ifnet *ifp = &sc->arpcom.ac_if;
5072 	struct statistics_block *stats = sc->stats_block;
5073 
5074 	DBPRINT(sc, BCE_EXCESSIVE, "Entering %s()\n", __func__);
5075 
5076 	ASSERT_SERIALIZED(ifp->if_serializer);
5077 
5078 	/*
5079 	 * Update the interface statistics from the hardware statistics.
5080 	 */
5081 	ifp->if_collisions = (u_long)stats->stat_EtherStatsCollisions;
5082 
5083 	ifp->if_ierrors = (u_long)stats->stat_EtherStatsUndersizePkts +
5084 			  (u_long)stats->stat_EtherStatsOverrsizePkts +
5085 			  (u_long)stats->stat_IfInMBUFDiscards +
5086 			  (u_long)stats->stat_Dot3StatsAlignmentErrors +
5087 			  (u_long)stats->stat_Dot3StatsFCSErrors;
5088 
5089 	ifp->if_oerrors =
5090 	(u_long)stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
5091 	(u_long)stats->stat_Dot3StatsExcessiveCollisions +
5092 	(u_long)stats->stat_Dot3StatsLateCollisions;
5093 
5094 	/*
5095 	 * Certain controllers don't report carrier sense errors correctly.
5096 	 * See errata E11_5708CA0_1165.
5097 	 */
5098 	if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
5099 	    !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0)) {
5100 		ifp->if_oerrors +=
5101 			(u_long)stats->stat_Dot3StatsCarrierSenseErrors;
5102 	}
5103 
5104 	/*
5105 	 * Update the sysctl statistics from the hardware statistics.
5106 	 */
5107 	sc->stat_IfHCInOctets =
5108 		((uint64_t)stats->stat_IfHCInOctets_hi << 32) +
5109 		 (uint64_t)stats->stat_IfHCInOctets_lo;
5110 
5111 	sc->stat_IfHCInBadOctets =
5112 		((uint64_t)stats->stat_IfHCInBadOctets_hi << 32) +
5113 		 (uint64_t)stats->stat_IfHCInBadOctets_lo;
5114 
5115 	sc->stat_IfHCOutOctets =
5116 		((uint64_t)stats->stat_IfHCOutOctets_hi << 32) +
5117 		 (uint64_t)stats->stat_IfHCOutOctets_lo;
5118 
5119 	sc->stat_IfHCOutBadOctets =
5120 		((uint64_t)stats->stat_IfHCOutBadOctets_hi << 32) +
5121 		 (uint64_t)stats->stat_IfHCOutBadOctets_lo;
5122 
5123 	sc->stat_IfHCInUcastPkts =
5124 		((uint64_t)stats->stat_IfHCInUcastPkts_hi << 32) +
5125 		 (uint64_t)stats->stat_IfHCInUcastPkts_lo;
5126 
5127 	sc->stat_IfHCInMulticastPkts =
5128 		((uint64_t)stats->stat_IfHCInMulticastPkts_hi << 32) +
5129 		 (uint64_t)stats->stat_IfHCInMulticastPkts_lo;
5130 
5131 	sc->stat_IfHCInBroadcastPkts =
5132 		((uint64_t)stats->stat_IfHCInBroadcastPkts_hi << 32) +
5133 		 (uint64_t)stats->stat_IfHCInBroadcastPkts_lo;
5134 
5135 	sc->stat_IfHCOutUcastPkts =
5136 		((uint64_t)stats->stat_IfHCOutUcastPkts_hi << 32) +
5137 		 (uint64_t)stats->stat_IfHCOutUcastPkts_lo;
5138 
5139 	sc->stat_IfHCOutMulticastPkts =
5140 		((uint64_t)stats->stat_IfHCOutMulticastPkts_hi << 32) +
5141 		 (uint64_t)stats->stat_IfHCOutMulticastPkts_lo;
5142 
5143 	sc->stat_IfHCOutBroadcastPkts =
5144 		((uint64_t)stats->stat_IfHCOutBroadcastPkts_hi << 32) +
5145 		 (uint64_t)stats->stat_IfHCOutBroadcastPkts_lo;
5146 
5147 	sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
5148 		stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
5149 
5150 	sc->stat_Dot3StatsCarrierSenseErrors =
5151 		stats->stat_Dot3StatsCarrierSenseErrors;
5152 
5153 	sc->stat_Dot3StatsFCSErrors =
5154 		stats->stat_Dot3StatsFCSErrors;
5155 
5156 	sc->stat_Dot3StatsAlignmentErrors =
5157 		stats->stat_Dot3StatsAlignmentErrors;
5158 
5159 	sc->stat_Dot3StatsSingleCollisionFrames =
5160 		stats->stat_Dot3StatsSingleCollisionFrames;
5161 
5162 	sc->stat_Dot3StatsMultipleCollisionFrames =
5163 		stats->stat_Dot3StatsMultipleCollisionFrames;
5164 
5165 	sc->stat_Dot3StatsDeferredTransmissions =
5166 		stats->stat_Dot3StatsDeferredTransmissions;
5167 
5168 	sc->stat_Dot3StatsExcessiveCollisions =
5169 		stats->stat_Dot3StatsExcessiveCollisions;
5170 
5171 	sc->stat_Dot3StatsLateCollisions =
5172 		stats->stat_Dot3StatsLateCollisions;
5173 
5174 	sc->stat_EtherStatsCollisions =
5175 		stats->stat_EtherStatsCollisions;
5176 
5177 	sc->stat_EtherStatsFragments =
5178 		stats->stat_EtherStatsFragments;
5179 
5180 	sc->stat_EtherStatsJabbers =
5181 		stats->stat_EtherStatsJabbers;
5182 
5183 	sc->stat_EtherStatsUndersizePkts =
5184 		stats->stat_EtherStatsUndersizePkts;
5185 
5186 	sc->stat_EtherStatsOverrsizePkts =
5187 		stats->stat_EtherStatsOverrsizePkts;
5188 
5189 	sc->stat_EtherStatsPktsRx64Octets =
5190 		stats->stat_EtherStatsPktsRx64Octets;
5191 
5192 	sc->stat_EtherStatsPktsRx65Octetsto127Octets =
5193 		stats->stat_EtherStatsPktsRx65Octetsto127Octets;
5194 
5195 	sc->stat_EtherStatsPktsRx128Octetsto255Octets =
5196 		stats->stat_EtherStatsPktsRx128Octetsto255Octets;
5197 
5198 	sc->stat_EtherStatsPktsRx256Octetsto511Octets =
5199 		stats->stat_EtherStatsPktsRx256Octetsto511Octets;
5200 
5201 	sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
5202 		stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
5203 
5204 	sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
5205 		stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
5206 
5207 	sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
5208 		stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
5209 
5210 	sc->stat_EtherStatsPktsTx64Octets =
5211 		stats->stat_EtherStatsPktsTx64Octets;
5212 
5213 	sc->stat_EtherStatsPktsTx65Octetsto127Octets =
5214 		stats->stat_EtherStatsPktsTx65Octetsto127Octets;
5215 
5216 	sc->stat_EtherStatsPktsTx128Octetsto255Octets =
5217 		stats->stat_EtherStatsPktsTx128Octetsto255Octets;
5218 
5219 	sc->stat_EtherStatsPktsTx256Octetsto511Octets =
5220 		stats->stat_EtherStatsPktsTx256Octetsto511Octets;
5221 
5222 	sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
5223 		stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
5224 
5225 	sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
5226 		stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
5227 
5228 	sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
5229 		stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
5230 
5231 	sc->stat_XonPauseFramesReceived =
5232 		stats->stat_XonPauseFramesReceived;
5233 
5234 	sc->stat_XoffPauseFramesReceived =
5235 		stats->stat_XoffPauseFramesReceived;
5236 
5237 	sc->stat_OutXonSent =
5238 		stats->stat_OutXonSent;
5239 
5240 	sc->stat_OutXoffSent =
5241 		stats->stat_OutXoffSent;
5242 
5243 	sc->stat_FlowControlDone =
5244 		stats->stat_FlowControlDone;
5245 
5246 	sc->stat_MacControlFramesReceived =
5247 		stats->stat_MacControlFramesReceived;
5248 
5249 	sc->stat_XoffStateEntered =
5250 		stats->stat_XoffStateEntered;
5251 
5252 	sc->stat_IfInFramesL2FilterDiscards =
5253 		stats->stat_IfInFramesL2FilterDiscards;
5254 
5255 	sc->stat_IfInRuleCheckerDiscards =
5256 		stats->stat_IfInRuleCheckerDiscards;
5257 
5258 	sc->stat_IfInFTQDiscards =
5259 		stats->stat_IfInFTQDiscards;
5260 
5261 	sc->stat_IfInMBUFDiscards =
5262 		stats->stat_IfInMBUFDiscards;
5263 
5264 	sc->stat_IfInRuleCheckerP4Hit =
5265 		stats->stat_IfInRuleCheckerP4Hit;
5266 
5267 	sc->stat_CatchupInRuleCheckerDiscards =
5268 		stats->stat_CatchupInRuleCheckerDiscards;
5269 
5270 	sc->stat_CatchupInFTQDiscards =
5271 		stats->stat_CatchupInFTQDiscards;
5272 
5273 	sc->stat_CatchupInMBUFDiscards =
5274 		stats->stat_CatchupInMBUFDiscards;
5275 
5276 	sc->stat_CatchupInRuleCheckerP4Hit =
5277 		stats->stat_CatchupInRuleCheckerP4Hit;
5278 
5279 	sc->com_no_buffers = REG_RD_IND(sc, 0x120084);
5280 
5281 	DBPRINT(sc, BCE_EXCESSIVE, "Exiting %s()\n", __func__);
5282 }
5283 
5284 
5285 /****************************************************************************/
5286 /* Periodic function to perform maintenance tasks.                          */
5287 /*                                                                          */
5288 /* Returns:                                                                 */
5289 /*   Nothing.                                                               */
5290 /****************************************************************************/
5291 static void
5292 bce_tick_serialized(struct bce_softc *sc)
5293 {
5294 	struct ifnet *ifp = &sc->arpcom.ac_if;
5295 	struct mii_data *mii;
5296 	uint32_t msg;
5297 
5298 	ASSERT_SERIALIZED(ifp->if_serializer);
5299 
5300 	/* Tell the firmware that the driver is still running. */
5301 #ifdef BCE_DEBUG
5302 	msg = (uint32_t)BCE_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE;
5303 #else
5304 	msg = (uint32_t)++sc->bce_fw_drv_pulse_wr_seq;
5305 #endif
5306 	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_PULSE_MB, msg);
5307 
5308 	/* Update the statistics from the hardware statistics block. */
5309 	bce_stats_update(sc);
5310 
5311 	/* Schedule the next tick. */
5312 	callout_reset(&sc->bce_stat_ch, hz, bce_tick, sc);
5313 
5314 	/* If link is up already up then we're done. */
5315 	if (sc->bce_link)
5316 		return;
5317 
5318 	mii = device_get_softc(sc->bce_miibus);
5319 	mii_tick(mii);
5320 
5321 	/* Check if the link has come up. */
5322 	if (!sc->bce_link && (mii->mii_media_status & IFM_ACTIVE) &&
5323 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5324 		sc->bce_link++;
5325 		/* Now that link is up, handle any outstanding TX traffic. */
5326 		if (!ifq_is_empty(&ifp->if_snd))
5327 			if_devstart(ifp);
5328 	}
5329 }
5330 
5331 
5332 static void
5333 bce_tick(void *xsc)
5334 {
5335 	struct bce_softc *sc = xsc;
5336 	struct ifnet *ifp = &sc->arpcom.ac_if;
5337 
5338 	lwkt_serialize_enter(ifp->if_serializer);
5339 	bce_tick_serialized(sc);
5340 	lwkt_serialize_exit(ifp->if_serializer);
5341 }
5342 
5343 
5344 #ifdef BCE_DEBUG
5345 /****************************************************************************/
5346 /* Allows the driver state to be dumped through the sysctl interface.       */
5347 /*                                                                          */
5348 /* Returns:                                                                 */
5349 /*   0 for success, positive value for failure.                             */
5350 /****************************************************************************/
5351 static int
5352 bce_sysctl_driver_state(SYSCTL_HANDLER_ARGS)
5353 {
5354         int error;
5355         int result;
5356         struct bce_softc *sc;
5357 
5358         result = -1;
5359         error = sysctl_handle_int(oidp, &result, 0, req);
5360 
5361         if (error || !req->newptr)
5362                 return (error);
5363 
5364         if (result == 1) {
5365                 sc = (struct bce_softc *)arg1;
5366                 bce_dump_driver_state(sc);
5367         }
5368 
5369         return error;
5370 }
5371 
5372 
5373 /****************************************************************************/
5374 /* Allows the hardware state to be dumped through the sysctl interface.     */
5375 /*                                                                          */
5376 /* Returns:                                                                 */
5377 /*   0 for success, positive value for failure.                             */
5378 /****************************************************************************/
5379 static int
5380 bce_sysctl_hw_state(SYSCTL_HANDLER_ARGS)
5381 {
5382         int error;
5383         int result;
5384         struct bce_softc *sc;
5385 
5386         result = -1;
5387         error = sysctl_handle_int(oidp, &result, 0, req);
5388 
5389         if (error || !req->newptr)
5390                 return (error);
5391 
5392         if (result == 1) {
5393                 sc = (struct bce_softc *)arg1;
5394                 bce_dump_hw_state(sc);
5395         }
5396 
5397         return error;
5398 }
5399 
5400 
5401 /****************************************************************************/
5402 /* Provides a sysctl interface to allows dumping the RX chain.              */
5403 /*                                                                          */
5404 /* Returns:                                                                 */
5405 /*   0 for success, positive value for failure.                             */
5406 /****************************************************************************/
5407 static int
5408 bce_sysctl_dump_rx_chain(SYSCTL_HANDLER_ARGS)
5409 {
5410         int error;
5411         int result;
5412         struct bce_softc *sc;
5413 
5414         result = -1;
5415         error = sysctl_handle_int(oidp, &result, 0, req);
5416 
5417         if (error || !req->newptr)
5418                 return (error);
5419 
5420         if (result == 1) {
5421                 sc = (struct bce_softc *)arg1;
5422                 bce_dump_rx_chain(sc, 0, USABLE_RX_BD);
5423         }
5424 
5425         return error;
5426 }
5427 
5428 
5429 /****************************************************************************/
5430 /* Provides a sysctl interface to allows dumping the TX chain.              */
5431 /*                                                                          */
5432 /* Returns:                                                                 */
5433 /*   0 for success, positive value for failure.                             */
5434 /****************************************************************************/
5435 static int
5436 bce_sysctl_dump_tx_chain(SYSCTL_HANDLER_ARGS)
5437 {
5438         int error;
5439         int result;
5440         struct bce_softc *sc;
5441 
5442         result = -1;
5443         error = sysctl_handle_int(oidp, &result, 0, req);
5444 
5445         if (error || !req->newptr)
5446                 return (error);
5447 
5448         if (result == 1) {
5449                 sc = (struct bce_softc *)arg1;
5450                 bce_dump_tx_chain(sc, 0, USABLE_TX_BD);
5451         }
5452 
5453         return error;
5454 }
5455 
5456 
5457 /****************************************************************************/
5458 /* Provides a sysctl interface to allow reading arbitrary registers in the  */
5459 /* device.  DO NOT ENABLE ON PRODUCTION SYSTEMS!                            */
5460 /*                                                                          */
5461 /* Returns:                                                                 */
5462 /*   0 for success, positive value for failure.                             */
5463 /****************************************************************************/
5464 static int
5465 bce_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
5466 {
5467 	struct bce_softc *sc;
5468 	int error;
5469 	uint32_t val, result;
5470 
5471 	result = -1;
5472 	error = sysctl_handle_int(oidp, &result, 0, req);
5473 	if (error || (req->newptr == NULL))
5474 		return (error);
5475 
5476 	/* Make sure the register is accessible. */
5477 	if (result < 0x8000) {
5478 		sc = (struct bce_softc *)arg1;
5479 		val = REG_RD(sc, result);
5480 		if_printf(&sc->arpcom.ac_if, "reg 0x%08X = 0x%08X\n",
5481 			  result, val);
5482 	} else if (result < 0x0280000) {
5483 		sc = (struct bce_softc *)arg1;
5484 		val = REG_RD_IND(sc, result);
5485 		if_printf(&sc->arpcom.ac_if, "reg 0x%08X = 0x%08X\n",
5486 			  result, val);
5487 	}
5488 	return (error);
5489 }
5490 
5491 
5492 /****************************************************************************/
5493 /* Provides a sysctl interface to allow reading arbitrary PHY registers in  */
5494 /* the device.  DO NOT ENABLE ON PRODUCTION SYSTEMS!                        */
5495 /*                                                                          */
5496 /* Returns:                                                                 */
5497 /*   0 for success, positive value for failure.                             */
5498 /****************************************************************************/
5499 static int
5500 bce_sysctl_phy_read(SYSCTL_HANDLER_ARGS)
5501 {
5502 	struct bce_softc *sc;
5503 	device_t dev;
5504 	int error, result;
5505 	uint16_t val;
5506 
5507 	result = -1;
5508 	error = sysctl_handle_int(oidp, &result, 0, req);
5509 	if (error || (req->newptr == NULL))
5510 		return (error);
5511 
5512 	/* Make sure the register is accessible. */
5513 	if (result < 0x20) {
5514 		sc = (struct bce_softc *)arg1;
5515 		dev = sc->bce_dev;
5516 		val = bce_miibus_read_reg(dev, sc->bce_phy_addr, result);
5517 		if_printf(&sc->arpcom.ac_if,
5518 			  "phy 0x%02X = 0x%04X\n", result, val);
5519 	}
5520 	return (error);
5521 }
5522 
5523 
5524 /****************************************************************************/
5525 /* Provides a sysctl interface to forcing the driver to dump state and      */
5526 /* enter the debugger.  DO NOT ENABLE ON PRODUCTION SYSTEMS!                */
5527 /*                                                                          */
5528 /* Returns:                                                                 */
5529 /*   0 for success, positive value for failure.                             */
5530 /****************************************************************************/
5531 static int
5532 bce_sysctl_breakpoint(SYSCTL_HANDLER_ARGS)
5533 {
5534         int error;
5535         int result;
5536         struct bce_softc *sc;
5537 
5538         result = -1;
5539         error = sysctl_handle_int(oidp, &result, 0, req);
5540 
5541         if (error || !req->newptr)
5542                 return (error);
5543 
5544         if (result == 1) {
5545                 sc = (struct bce_softc *)arg1;
5546                 bce_breakpoint(sc);
5547         }
5548 
5549         return error;
5550 }
5551 #endif
5552 
5553 
5554 /****************************************************************************/
5555 /* Adds any sysctl parameters for tuning or debugging purposes.             */
5556 /*                                                                          */
5557 /* Returns:                                                                 */
5558 /*   0 for success, positive value for failure.                             */
5559 /****************************************************************************/
5560 static void
5561 bce_add_sysctls(struct bce_softc *sc)
5562 {
5563 	struct sysctl_ctx_list *ctx;
5564 	struct sysctl_oid_list *children;
5565 
5566 	sysctl_ctx_init(&sc->bce_sysctl_ctx);
5567 	sc->bce_sysctl_tree = SYSCTL_ADD_NODE(&sc->bce_sysctl_ctx,
5568 					      SYSCTL_STATIC_CHILDREN(_hw),
5569 					      OID_AUTO,
5570 					      device_get_nameunit(sc->bce_dev),
5571 					      CTLFLAG_RD, 0, "");
5572 	if (sc->bce_sysctl_tree == NULL) {
5573 		device_printf(sc->bce_dev, "can't add sysctl node\n");
5574 		return;
5575 	}
5576 
5577 	ctx = &sc->bce_sysctl_ctx;
5578 	children = SYSCTL_CHILDREN(sc->bce_sysctl_tree);
5579 
5580 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds_int",
5581 			CTLTYPE_INT | CTLFLAG_RW,
5582 			sc, 0, bce_sysctl_tx_bds_int, "I",
5583 			"Send max coalesced BD count during interrupt");
5584 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds",
5585 			CTLTYPE_INT | CTLFLAG_RW,
5586 			sc, 0, bce_sysctl_tx_bds, "I",
5587 			"Send max coalesced BD count");
5588 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks_int",
5589 			CTLTYPE_INT | CTLFLAG_RW,
5590 			sc, 0, bce_sysctl_tx_ticks_int, "I",
5591 			"Send coalescing ticks during interrupt");
5592 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks",
5593 			CTLTYPE_INT | CTLFLAG_RW,
5594 			sc, 0, bce_sysctl_tx_ticks, "I",
5595 			"Send coalescing ticks");
5596 
5597 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds_int",
5598 			CTLTYPE_INT | CTLFLAG_RW,
5599 			sc, 0, bce_sysctl_rx_bds_int, "I",
5600 			"Receive max coalesced BD count during interrupt");
5601 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds",
5602 			CTLTYPE_INT | CTLFLAG_RW,
5603 			sc, 0, bce_sysctl_rx_bds, "I",
5604 			"Receive max coalesced BD count");
5605 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks_int",
5606 			CTLTYPE_INT | CTLFLAG_RW,
5607 			sc, 0, bce_sysctl_rx_ticks_int, "I",
5608 			"Receive coalescing ticks during interrupt");
5609 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks",
5610 			CTLTYPE_INT | CTLFLAG_RW,
5611 			sc, 0, bce_sysctl_rx_ticks, "I",
5612 			"Receive coalescing ticks");
5613 
5614 #ifdef BCE_DEBUG
5615 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5616 		"rx_low_watermark",
5617 		CTLFLAG_RD, &sc->rx_low_watermark,
5618 		0, "Lowest level of free rx_bd's");
5619 
5620 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5621 		"rx_empty_count",
5622 		CTLFLAG_RD, &sc->rx_empty_count,
5623 		0, "Number of times the RX chain was empty");
5624 
5625 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5626 		"tx_hi_watermark",
5627 		CTLFLAG_RD, &sc->tx_hi_watermark,
5628 		0, "Highest level of used tx_bd's");
5629 
5630 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5631 		"tx_full_count",
5632 		CTLFLAG_RD, &sc->tx_full_count,
5633 		0, "Number of times the TX chain was full");
5634 
5635 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5636 		"l2fhdr_status_errors",
5637 		CTLFLAG_RD, &sc->l2fhdr_status_errors,
5638 		0, "l2_fhdr status errors");
5639 
5640 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5641 		"unexpected_attentions",
5642 		CTLFLAG_RD, &sc->unexpected_attentions,
5643 		0, "unexpected attentions");
5644 
5645 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5646 		"lost_status_block_updates",
5647 		CTLFLAG_RD, &sc->lost_status_block_updates,
5648 		0, "lost status block updates");
5649 
5650 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5651 		"mbuf_alloc_failed",
5652 		CTLFLAG_RD, &sc->mbuf_alloc_failed,
5653 		0, "mbuf cluster allocation failures");
5654 #endif
5655 
5656 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5657 		"stat_IfHCInOctets",
5658 		CTLFLAG_RD, &sc->stat_IfHCInOctets,
5659 		"Bytes received");
5660 
5661 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5662 		"stat_IfHCInBadOctets",
5663 		CTLFLAG_RD, &sc->stat_IfHCInBadOctets,
5664 		"Bad bytes received");
5665 
5666 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5667 		"stat_IfHCOutOctets",
5668 		CTLFLAG_RD, &sc->stat_IfHCOutOctets,
5669 		"Bytes sent");
5670 
5671 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5672 		"stat_IfHCOutBadOctets",
5673 		CTLFLAG_RD, &sc->stat_IfHCOutBadOctets,
5674 		"Bad bytes sent");
5675 
5676 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5677 		"stat_IfHCInUcastPkts",
5678 		CTLFLAG_RD, &sc->stat_IfHCInUcastPkts,
5679 		"Unicast packets received");
5680 
5681 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5682 		"stat_IfHCInMulticastPkts",
5683 		CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts,
5684 		"Multicast packets received");
5685 
5686 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5687 		"stat_IfHCInBroadcastPkts",
5688 		CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts,
5689 		"Broadcast packets received");
5690 
5691 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5692 		"stat_IfHCOutUcastPkts",
5693 		CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts,
5694 		"Unicast packets sent");
5695 
5696 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5697 		"stat_IfHCOutMulticastPkts",
5698 		CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts,
5699 		"Multicast packets sent");
5700 
5701 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5702 		"stat_IfHCOutBroadcastPkts",
5703 		CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts,
5704 		"Broadcast packets sent");
5705 
5706 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5707 		"stat_emac_tx_stat_dot3statsinternalmactransmiterrors",
5708 		CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors,
5709 		0, "Internal MAC transmit errors");
5710 
5711 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5712 		"stat_Dot3StatsCarrierSenseErrors",
5713 		CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors,
5714 		0, "Carrier sense errors");
5715 
5716 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5717 		"stat_Dot3StatsFCSErrors",
5718 		CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors,
5719 		0, "Frame check sequence errors");
5720 
5721 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5722 		"stat_Dot3StatsAlignmentErrors",
5723 		CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors,
5724 		0, "Alignment errors");
5725 
5726 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5727 		"stat_Dot3StatsSingleCollisionFrames",
5728 		CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames,
5729 		0, "Single Collision Frames");
5730 
5731 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5732 		"stat_Dot3StatsMultipleCollisionFrames",
5733 		CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames,
5734 		0, "Multiple Collision Frames");
5735 
5736 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5737 		"stat_Dot3StatsDeferredTransmissions",
5738 		CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions,
5739 		0, "Deferred Transmissions");
5740 
5741 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5742 		"stat_Dot3StatsExcessiveCollisions",
5743 		CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions,
5744 		0, "Excessive Collisions");
5745 
5746 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5747 		"stat_Dot3StatsLateCollisions",
5748 		CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions,
5749 		0, "Late Collisions");
5750 
5751 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5752 		"stat_EtherStatsCollisions",
5753 		CTLFLAG_RD, &sc->stat_EtherStatsCollisions,
5754 		0, "Collisions");
5755 
5756 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5757 		"stat_EtherStatsFragments",
5758 		CTLFLAG_RD, &sc->stat_EtherStatsFragments,
5759 		0, "Fragments");
5760 
5761 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5762 		"stat_EtherStatsJabbers",
5763 		CTLFLAG_RD, &sc->stat_EtherStatsJabbers,
5764 		0, "Jabbers");
5765 
5766 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5767 		"stat_EtherStatsUndersizePkts",
5768 		CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts,
5769 		0, "Undersize packets");
5770 
5771 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5772 		"stat_EtherStatsOverrsizePkts",
5773 		CTLFLAG_RD, &sc->stat_EtherStatsOverrsizePkts,
5774 		0, "stat_EtherStatsOverrsizePkts");
5775 
5776 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5777 		"stat_EtherStatsPktsRx64Octets",
5778 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets,
5779 		0, "Bytes received in 64 byte packets");
5780 
5781 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5782 		"stat_EtherStatsPktsRx65Octetsto127Octets",
5783 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets,
5784 		0, "Bytes received in 65 to 127 byte packets");
5785 
5786 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5787 		"stat_EtherStatsPktsRx128Octetsto255Octets",
5788 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets,
5789 		0, "Bytes received in 128 to 255 byte packets");
5790 
5791 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5792 		"stat_EtherStatsPktsRx256Octetsto511Octets",
5793 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets,
5794 		0, "Bytes received in 256 to 511 byte packets");
5795 
5796 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5797 		"stat_EtherStatsPktsRx512Octetsto1023Octets",
5798 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets,
5799 		0, "Bytes received in 512 to 1023 byte packets");
5800 
5801 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5802 		"stat_EtherStatsPktsRx1024Octetsto1522Octets",
5803 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets,
5804 		0, "Bytes received in 1024 t0 1522 byte packets");
5805 
5806 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5807 		"stat_EtherStatsPktsRx1523Octetsto9022Octets",
5808 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets,
5809 		0, "Bytes received in 1523 to 9022 byte packets");
5810 
5811 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5812 		"stat_EtherStatsPktsTx64Octets",
5813 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets,
5814 		0, "Bytes sent in 64 byte packets");
5815 
5816 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5817 		"stat_EtherStatsPktsTx65Octetsto127Octets",
5818 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets,
5819 		0, "Bytes sent in 65 to 127 byte packets");
5820 
5821 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5822 		"stat_EtherStatsPktsTx128Octetsto255Octets",
5823 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets,
5824 		0, "Bytes sent in 128 to 255 byte packets");
5825 
5826 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5827 		"stat_EtherStatsPktsTx256Octetsto511Octets",
5828 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets,
5829 		0, "Bytes sent in 256 to 511 byte packets");
5830 
5831 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5832 		"stat_EtherStatsPktsTx512Octetsto1023Octets",
5833 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets,
5834 		0, "Bytes sent in 512 to 1023 byte packets");
5835 
5836 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5837 		"stat_EtherStatsPktsTx1024Octetsto1522Octets",
5838 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets,
5839 		0, "Bytes sent in 1024 to 1522 byte packets");
5840 
5841 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5842 		"stat_EtherStatsPktsTx1523Octetsto9022Octets",
5843 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets,
5844 		0, "Bytes sent in 1523 to 9022 byte packets");
5845 
5846 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5847 		"stat_XonPauseFramesReceived",
5848 		CTLFLAG_RD, &sc->stat_XonPauseFramesReceived,
5849 		0, "XON pause frames receved");
5850 
5851 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5852 		"stat_XoffPauseFramesReceived",
5853 		CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived,
5854 		0, "XOFF pause frames received");
5855 
5856 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5857 		"stat_OutXonSent",
5858 		CTLFLAG_RD, &sc->stat_OutXonSent,
5859 		0, "XON pause frames sent");
5860 
5861 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5862 		"stat_OutXoffSent",
5863 		CTLFLAG_RD, &sc->stat_OutXoffSent,
5864 		0, "XOFF pause frames sent");
5865 
5866 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5867 		"stat_FlowControlDone",
5868 		CTLFLAG_RD, &sc->stat_FlowControlDone,
5869 		0, "Flow control done");
5870 
5871 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5872 		"stat_MacControlFramesReceived",
5873 		CTLFLAG_RD, &sc->stat_MacControlFramesReceived,
5874 		0, "MAC control frames received");
5875 
5876 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5877 		"stat_XoffStateEntered",
5878 		CTLFLAG_RD, &sc->stat_XoffStateEntered,
5879 		0, "XOFF state entered");
5880 
5881 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5882 		"stat_IfInFramesL2FilterDiscards",
5883 		CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards,
5884 		0, "Received L2 packets discarded");
5885 
5886 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5887 		"stat_IfInRuleCheckerDiscards",
5888 		CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards,
5889 		0, "Received packets discarded by rule");
5890 
5891 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5892 		"stat_IfInFTQDiscards",
5893 		CTLFLAG_RD, &sc->stat_IfInFTQDiscards,
5894 		0, "Received packet FTQ discards");
5895 
5896 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5897 		"stat_IfInMBUFDiscards",
5898 		CTLFLAG_RD, &sc->stat_IfInMBUFDiscards,
5899 		0, "Received packets discarded due to lack of controller buffer memory");
5900 
5901 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5902 		"stat_IfInRuleCheckerP4Hit",
5903 		CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit,
5904 		0, "Received packets rule checker hits");
5905 
5906 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5907 		"stat_CatchupInRuleCheckerDiscards",
5908 		CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards,
5909 		0, "Received packets discarded in Catchup path");
5910 
5911 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5912 		"stat_CatchupInFTQDiscards",
5913 		CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards,
5914 		0, "Received packets discarded in FTQ in Catchup path");
5915 
5916 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5917 		"stat_CatchupInMBUFDiscards",
5918 		CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards,
5919 		0, "Received packets discarded in controller buffer memory in Catchup path");
5920 
5921 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5922 		"stat_CatchupInRuleCheckerP4Hit",
5923 		CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit,
5924 		0, "Received packets rule checker hits in Catchup path");
5925 
5926 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5927 		"com_no_buffers",
5928 		CTLFLAG_RD, &sc->com_no_buffers,
5929 		0, "Valid packets received but no RX buffers available");
5930 
5931 #ifdef BCE_DEBUG
5932 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
5933 		"driver_state", CTLTYPE_INT | CTLFLAG_RW,
5934 		(void *)sc, 0,
5935 		bce_sysctl_driver_state, "I", "Drive state information");
5936 
5937 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
5938 		"hw_state", CTLTYPE_INT | CTLFLAG_RW,
5939 		(void *)sc, 0,
5940 		bce_sysctl_hw_state, "I", "Hardware state information");
5941 
5942 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
5943 		"dump_rx_chain", CTLTYPE_INT | CTLFLAG_RW,
5944 		(void *)sc, 0,
5945 		bce_sysctl_dump_rx_chain, "I", "Dump rx_bd chain");
5946 
5947 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
5948 		"dump_tx_chain", CTLTYPE_INT | CTLFLAG_RW,
5949 		(void *)sc, 0,
5950 		bce_sysctl_dump_tx_chain, "I", "Dump tx_bd chain");
5951 
5952 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
5953 		"breakpoint", CTLTYPE_INT | CTLFLAG_RW,
5954 		(void *)sc, 0,
5955 		bce_sysctl_breakpoint, "I", "Driver breakpoint");
5956 
5957 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
5958 		"reg_read", CTLTYPE_INT | CTLFLAG_RW,
5959 		(void *)sc, 0,
5960 		bce_sysctl_reg_read, "I", "Register read");
5961 
5962 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
5963 		"phy_read", CTLTYPE_INT | CTLFLAG_RW,
5964 		(void *)sc, 0,
5965 		bce_sysctl_phy_read, "I", "PHY register read");
5966 
5967 #endif
5968 
5969 }
5970 
5971 
5972 /****************************************************************************/
5973 /* BCE Debug Routines                                                       */
5974 /****************************************************************************/
5975 #ifdef BCE_DEBUG
5976 
5977 /****************************************************************************/
5978 /* Freezes the controller to allow for a cohesive state dump.               */
5979 /*                                                                          */
5980 /* Returns:                                                                 */
5981 /*   Nothing.                                                               */
5982 /****************************************************************************/
5983 static void
5984 bce_freeze_controller(struct bce_softc *sc)
5985 {
5986 	uint32_t val;
5987 
5988 	val = REG_RD(sc, BCE_MISC_COMMAND);
5989 	val |= BCE_MISC_COMMAND_DISABLE_ALL;
5990 	REG_WR(sc, BCE_MISC_COMMAND, val);
5991 }
5992 
5993 
5994 /****************************************************************************/
5995 /* Unfreezes the controller after a freeze operation.  This may not always  */
5996 /* work and the controller will require a reset!                            */
5997 /*                                                                          */
5998 /* Returns:                                                                 */
5999 /*   Nothing.                                                               */
6000 /****************************************************************************/
6001 static void
6002 bce_unfreeze_controller(struct bce_softc *sc)
6003 {
6004 	uint32_t val;
6005 
6006 	val = REG_RD(sc, BCE_MISC_COMMAND);
6007 	val |= BCE_MISC_COMMAND_ENABLE_ALL;
6008 	REG_WR(sc, BCE_MISC_COMMAND, val);
6009 }
6010 
6011 
6012 /****************************************************************************/
6013 /* Prints out information about an mbuf.                                    */
6014 /*                                                                          */
6015 /* Returns:                                                                 */
6016 /*   Nothing.                                                               */
6017 /****************************************************************************/
6018 static void
6019 bce_dump_mbuf(struct bce_softc *sc, struct mbuf *m)
6020 {
6021 	struct ifnet *ifp = &sc->arpcom.ac_if;
6022 	uint32_t val_hi, val_lo;
6023 	struct mbuf *mp = m;
6024 
6025 	if (m == NULL) {
6026 		/* Index out of range. */
6027 		if_printf(ifp, "mbuf: null pointer\n");
6028 		return;
6029 	}
6030 
6031 	while (mp) {
6032 		val_hi = BCE_ADDR_HI(mp);
6033 		val_lo = BCE_ADDR_LO(mp);
6034 		if_printf(ifp, "mbuf: vaddr = 0x%08X:%08X, m_len = %d, "
6035 			  "m_flags = ( ", val_hi, val_lo, mp->m_len);
6036 
6037 		if (mp->m_flags & M_EXT)
6038 			kprintf("M_EXT ");
6039 		if (mp->m_flags & M_PKTHDR)
6040 			kprintf("M_PKTHDR ");
6041 		if (mp->m_flags & M_EOR)
6042 			kprintf("M_EOR ");
6043 #ifdef M_RDONLY
6044 		if (mp->m_flags & M_RDONLY)
6045 			kprintf("M_RDONLY ");
6046 #endif
6047 
6048 		val_hi = BCE_ADDR_HI(mp->m_data);
6049 		val_lo = BCE_ADDR_LO(mp->m_data);
6050 		kprintf(") m_data = 0x%08X:%08X\n", val_hi, val_lo);
6051 
6052 		if (mp->m_flags & M_PKTHDR) {
6053 			if_printf(ifp, "- m_pkthdr: flags = ( ");
6054 			if (mp->m_flags & M_BCAST)
6055 				kprintf("M_BCAST ");
6056 			if (mp->m_flags & M_MCAST)
6057 				kprintf("M_MCAST ");
6058 			if (mp->m_flags & M_FRAG)
6059 				kprintf("M_FRAG ");
6060 			if (mp->m_flags & M_FIRSTFRAG)
6061 				kprintf("M_FIRSTFRAG ");
6062 			if (mp->m_flags & M_LASTFRAG)
6063 				kprintf("M_LASTFRAG ");
6064 #ifdef M_VLANTAG
6065 			if (mp->m_flags & M_VLANTAG)
6066 				kprintf("M_VLANTAG ");
6067 #endif
6068 #ifdef M_PROMISC
6069 			if (mp->m_flags & M_PROMISC)
6070 				kprintf("M_PROMISC ");
6071 #endif
6072 			kprintf(") csum_flags = ( ");
6073 			if (mp->m_pkthdr.csum_flags & CSUM_IP)
6074 				kprintf("CSUM_IP ");
6075 			if (mp->m_pkthdr.csum_flags & CSUM_TCP)
6076 				kprintf("CSUM_TCP ");
6077 			if (mp->m_pkthdr.csum_flags & CSUM_UDP)
6078 				kprintf("CSUM_UDP ");
6079 			if (mp->m_pkthdr.csum_flags & CSUM_IP_FRAGS)
6080 				kprintf("CSUM_IP_FRAGS ");
6081 			if (mp->m_pkthdr.csum_flags & CSUM_FRAGMENT)
6082 				kprintf("CSUM_FRAGMENT ");
6083 #ifdef CSUM_TSO
6084 			if (mp->m_pkthdr.csum_flags & CSUM_TSO)
6085 				kprintf("CSUM_TSO ");
6086 #endif
6087 			if (mp->m_pkthdr.csum_flags & CSUM_IP_CHECKED)
6088 				kprintf("CSUM_IP_CHECKED ");
6089 			if (mp->m_pkthdr.csum_flags & CSUM_IP_VALID)
6090 				kprintf("CSUM_IP_VALID ");
6091 			if (mp->m_pkthdr.csum_flags & CSUM_DATA_VALID)
6092 				kprintf("CSUM_DATA_VALID ");
6093 			kprintf(")\n");
6094 		}
6095 
6096 		if (mp->m_flags & M_EXT) {
6097 			val_hi = BCE_ADDR_HI(mp->m_ext.ext_buf);
6098 			val_lo = BCE_ADDR_LO(mp->m_ext.ext_buf);
6099 			if_printf(ifp, "- m_ext: vaddr = 0x%08X:%08X, "
6100 				  "ext_size = %d\n",
6101 				  val_hi, val_lo, mp->m_ext.ext_size);
6102 		}
6103 		mp = mp->m_next;
6104 	}
6105 }
6106 
6107 
6108 /****************************************************************************/
6109 /* Prints out the mbufs in the TX mbuf chain.                               */
6110 /*                                                                          */
6111 /* Returns:                                                                 */
6112 /*   Nothing.                                                               */
6113 /****************************************************************************/
6114 static void
6115 bce_dump_tx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count)
6116 {
6117 	struct ifnet *ifp = &sc->arpcom.ac_if;
6118 	int i;
6119 
6120 	if_printf(ifp,
6121 	"----------------------------"
6122 	"  tx mbuf data  "
6123 	"----------------------------\n");
6124 
6125 	for (i = 0; i < count; i++) {
6126 		if_printf(ifp, "txmbuf[%d]\n", chain_prod);
6127 		bce_dump_mbuf(sc, sc->tx_mbuf_ptr[chain_prod]);
6128 		chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod));
6129 	}
6130 
6131 	if_printf(ifp,
6132 	"----------------------------"
6133 	"----------------"
6134 	"----------------------------\n");
6135 }
6136 
6137 
6138 /****************************************************************************/
6139 /* Prints out the mbufs in the RX mbuf chain.                               */
6140 /*                                                                          */
6141 /* Returns:                                                                 */
6142 /*   Nothing.                                                               */
6143 /****************************************************************************/
6144 static void
6145 bce_dump_rx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count)
6146 {
6147 	struct ifnet *ifp = &sc->arpcom.ac_if;
6148 	int i;
6149 
6150 	if_printf(ifp,
6151 	"----------------------------"
6152 	"  rx mbuf data  "
6153 	"----------------------------\n");
6154 
6155 	for (i = 0; i < count; i++) {
6156 		if_printf(ifp, "rxmbuf[0x%04X]\n", chain_prod);
6157 		bce_dump_mbuf(sc, sc->rx_mbuf_ptr[chain_prod]);
6158 		chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod));
6159 	}
6160 
6161 	if_printf(ifp,
6162 	"----------------------------"
6163 	"----------------"
6164 	"----------------------------\n");
6165 }
6166 
6167 
6168 /****************************************************************************/
6169 /* Prints out a tx_bd structure.                                            */
6170 /*                                                                          */
6171 /* Returns:                                                                 */
6172 /*   Nothing.                                                               */
6173 /****************************************************************************/
6174 static void
6175 bce_dump_txbd(struct bce_softc *sc, int idx, struct tx_bd *txbd)
6176 {
6177 	struct ifnet *ifp = &sc->arpcom.ac_if;
6178 
6179 	if (idx > MAX_TX_BD) {
6180 		/* Index out of range. */
6181 		if_printf(ifp, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx);
6182 	} else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) {
6183 		/* TX Chain page pointer. */
6184 		if_printf(ifp, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, "
6185 			  "chain page pointer\n",
6186 			  idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo);
6187 	} else {
6188 		/* Normal tx_bd entry. */
6189 		if_printf(ifp, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, "
6190 			  "nbytes = 0x%08X, "
6191 			  "vlan tag= 0x%04X, flags = 0x%04X (",
6192 			  idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo,
6193 			  txbd->tx_bd_mss_nbytes,
6194 			  txbd->tx_bd_vlan_tag, txbd->tx_bd_flags);
6195 
6196 		if (txbd->tx_bd_flags & TX_BD_FLAGS_CONN_FAULT)
6197 			kprintf(" CONN_FAULT");
6198 
6199 		if (txbd->tx_bd_flags & TX_BD_FLAGS_TCP_UDP_CKSUM)
6200 			kprintf(" TCP_UDP_CKSUM");
6201 
6202 		if (txbd->tx_bd_flags & TX_BD_FLAGS_IP_CKSUM)
6203 			kprintf(" IP_CKSUM");
6204 
6205 		if (txbd->tx_bd_flags & TX_BD_FLAGS_VLAN_TAG)
6206 			kprintf("  VLAN");
6207 
6208 		if (txbd->tx_bd_flags & TX_BD_FLAGS_COAL_NOW)
6209 			kprintf(" COAL_NOW");
6210 
6211 		if (txbd->tx_bd_flags & TX_BD_FLAGS_DONT_GEN_CRC)
6212 			kprintf(" DONT_GEN_CRC");
6213 
6214 		if (txbd->tx_bd_flags & TX_BD_FLAGS_START)
6215 			kprintf(" START");
6216 
6217 		if (txbd->tx_bd_flags & TX_BD_FLAGS_END)
6218 			kprintf(" END");
6219 
6220 		if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_LSO)
6221 			kprintf(" LSO");
6222 
6223 		if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_OPTION_WORD)
6224 			kprintf(" OPTION_WORD");
6225 
6226 		if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_FLAGS)
6227 			kprintf(" FLAGS");
6228 
6229 		if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_SNAP)
6230 			kprintf(" SNAP");
6231 
6232 		kprintf(" )\n");
6233 	}
6234 }
6235 
6236 
6237 /****************************************************************************/
6238 /* Prints out a rx_bd structure.                                            */
6239 /*                                                                          */
6240 /* Returns:                                                                 */
6241 /*   Nothing.                                                               */
6242 /****************************************************************************/
6243 static void
6244 bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd)
6245 {
6246 	struct ifnet *ifp = &sc->arpcom.ac_if;
6247 
6248 	if (idx > MAX_RX_BD) {
6249 		/* Index out of range. */
6250 		if_printf(ifp, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
6251 	} else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) {
6252 		/* TX Chain page pointer. */
6253 		if_printf(ifp, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, "
6254 			  "chain page pointer\n",
6255 			  idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo);
6256 	} else {
6257 		/* Normal tx_bd entry. */
6258 		if_printf(ifp, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, "
6259 			  "nbytes = 0x%08X, flags = 0x%08X\n",
6260 			  idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo,
6261 			  rxbd->rx_bd_len, rxbd->rx_bd_flags);
6262 	}
6263 }
6264 
6265 
6266 /****************************************************************************/
6267 /* Prints out a l2_fhdr structure.                                          */
6268 /*                                                                          */
6269 /* Returns:                                                                 */
6270 /*   Nothing.                                                               */
6271 /****************************************************************************/
6272 static void
6273 bce_dump_l2fhdr(struct bce_softc *sc, int idx, struct l2_fhdr *l2fhdr)
6274 {
6275 	if_printf(&sc->arpcom.ac_if, "l2_fhdr[0x%04X]: status = 0x%08X, "
6276 		  "pkt_len = 0x%04X, vlan = 0x%04x, "
6277 		  "ip_xsum = 0x%04X, tcp_udp_xsum = 0x%04X\n",
6278 		  idx, l2fhdr->l2_fhdr_status,
6279 		  l2fhdr->l2_fhdr_pkt_len, l2fhdr->l2_fhdr_vlan_tag,
6280 		  l2fhdr->l2_fhdr_ip_xsum, l2fhdr->l2_fhdr_tcp_udp_xsum);
6281 }
6282 
6283 
6284 /****************************************************************************/
6285 /* Prints out the tx chain.                                                 */
6286 /*                                                                          */
6287 /* Returns:                                                                 */
6288 /*   Nothing.                                                               */
6289 /****************************************************************************/
6290 static void
6291 bce_dump_tx_chain(struct bce_softc *sc, int tx_prod, int count)
6292 {
6293 	struct ifnet *ifp = &sc->arpcom.ac_if;
6294 	int i;
6295 
6296 	/* First some info about the tx_bd chain structure. */
6297 	if_printf(ifp,
6298 	"----------------------------"
6299 	"  tx_bd  chain  "
6300 	"----------------------------\n");
6301 
6302 	if_printf(ifp, "page size      = 0x%08X, "
6303 		  "tx chain pages        = 0x%08X\n",
6304 		  (uint32_t)BCM_PAGE_SIZE, (uint32_t)TX_PAGES);
6305 
6306 	if_printf(ifp, "tx_bd per page = 0x%08X, "
6307 		  "usable tx_bd per page = 0x%08X\n",
6308 		  (uint32_t)TOTAL_TX_BD_PER_PAGE,
6309 		  (uint32_t)USABLE_TX_BD_PER_PAGE);
6310 
6311 	if_printf(ifp, "total tx_bd    = 0x%08X\n", (uint32_t)TOTAL_TX_BD);
6312 
6313 	if_printf(ifp,
6314 	"----------------------------"
6315 	"  tx_bd data    "
6316 	"----------------------------\n");
6317 
6318 	/* Now print out the tx_bd's themselves. */
6319 	for (i = 0; i < count; i++) {
6320 		struct tx_bd *txbd;
6321 
6322 	 	txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)];
6323 		bce_dump_txbd(sc, tx_prod, txbd);
6324 		tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod));
6325 	}
6326 
6327 	if_printf(ifp,
6328 	"----------------------------"
6329 	"----------------"
6330 	"----------------------------\n");
6331 }
6332 
6333 
6334 /****************************************************************************/
6335 /* Prints out the rx chain.                                                 */
6336 /*                                                                          */
6337 /* Returns:                                                                 */
6338 /*   Nothing.                                                               */
6339 /****************************************************************************/
6340 static void
6341 bce_dump_rx_chain(struct bce_softc *sc, int rx_prod, int count)
6342 {
6343 	struct ifnet *ifp = &sc->arpcom.ac_if;
6344 	int i;
6345 
6346 	/* First some info about the tx_bd chain structure. */
6347 	if_printf(ifp,
6348 	"----------------------------"
6349 	"  rx_bd  chain  "
6350 	"----------------------------\n");
6351 
6352 	if_printf(ifp, "page size      = 0x%08X, "
6353 		  "rx chain pages        = 0x%08X\n",
6354 		  (uint32_t)BCM_PAGE_SIZE, (uint32_t)RX_PAGES);
6355 
6356 	if_printf(ifp, "rx_bd per page = 0x%08X, "
6357 		  "usable rx_bd per page = 0x%08X\n",
6358 		  (uint32_t)TOTAL_RX_BD_PER_PAGE,
6359 		  (uint32_t)USABLE_RX_BD_PER_PAGE);
6360 
6361 	if_printf(ifp, "total rx_bd    = 0x%08X\n", (uint32_t)TOTAL_RX_BD);
6362 
6363 	if_printf(ifp,
6364 	"----------------------------"
6365 	"   rx_bd data   "
6366 	"----------------------------\n");
6367 
6368 	/* Now print out the rx_bd's themselves. */
6369 	for (i = 0; i < count; i++) {
6370 		struct rx_bd *rxbd;
6371 
6372 		rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)];
6373 		bce_dump_rxbd(sc, rx_prod, rxbd);
6374 		rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod));
6375 	}
6376 
6377 	if_printf(ifp,
6378 	"----------------------------"
6379 	"----------------"
6380 	"----------------------------\n");
6381 }
6382 
6383 
6384 /****************************************************************************/
6385 /* Prints out the status block from host memory.                            */
6386 /*                                                                          */
6387 /* Returns:                                                                 */
6388 /*   Nothing.                                                               */
6389 /****************************************************************************/
6390 static void
6391 bce_dump_status_block(struct bce_softc *sc)
6392 {
6393 	struct status_block *sblk = sc->status_block;
6394 	struct ifnet *ifp = &sc->arpcom.ac_if;
6395 
6396 	if_printf(ifp,
6397 	"----------------------------"
6398 	"  Status Block  "
6399 	"----------------------------\n");
6400 
6401 	if_printf(ifp, "    0x%08X - attn_bits\n", sblk->status_attn_bits);
6402 
6403 	if_printf(ifp, "    0x%08X - attn_bits_ack\n",
6404 		  sblk->status_attn_bits_ack);
6405 
6406 	if_printf(ifp, "0x%04X(0x%04X) - rx_cons0\n",
6407 	    sblk->status_rx_quick_consumer_index0,
6408 	    (uint16_t)RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index0));
6409 
6410 	if_printf(ifp, "0x%04X(0x%04X) - tx_cons0\n",
6411 	    sblk->status_tx_quick_consumer_index0,
6412 	    (uint16_t)TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index0));
6413 
6414 	if_printf(ifp, "        0x%04X - status_idx\n", sblk->status_idx);
6415 
6416 	/* Theses indices are not used for normal L2 drivers. */
6417 	if (sblk->status_rx_quick_consumer_index1) {
6418 		if_printf(ifp, "0x%04X(0x%04X) - rx_cons1\n",
6419 		sblk->status_rx_quick_consumer_index1,
6420 		(uint16_t)RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index1));
6421 	}
6422 
6423 	if (sblk->status_tx_quick_consumer_index1) {
6424 		if_printf(ifp, "0x%04X(0x%04X) - tx_cons1\n",
6425 		sblk->status_tx_quick_consumer_index1,
6426 		(uint16_t)TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index1));
6427 	}
6428 
6429 	if (sblk->status_rx_quick_consumer_index2) {
6430 		if_printf(ifp, "0x%04X(0x%04X)- rx_cons2\n",
6431 		sblk->status_rx_quick_consumer_index2,
6432 		(uint16_t)RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index2));
6433 	}
6434 
6435 	if (sblk->status_tx_quick_consumer_index2) {
6436 		if_printf(ifp, "0x%04X(0x%04X) - tx_cons2\n",
6437 		sblk->status_tx_quick_consumer_index2,
6438 		(uint16_t)TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index2));
6439 	}
6440 
6441 	if (sblk->status_rx_quick_consumer_index3) {
6442 		if_printf(ifp, "0x%04X(0x%04X) - rx_cons3\n",
6443 		sblk->status_rx_quick_consumer_index3,
6444 		(uint16_t)RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index3));
6445 	}
6446 
6447 	if (sblk->status_tx_quick_consumer_index3) {
6448 		if_printf(ifp, "0x%04X(0x%04X) - tx_cons3\n",
6449 		sblk->status_tx_quick_consumer_index3,
6450 		(uint16_t)TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index3));
6451 	}
6452 
6453 	if (sblk->status_rx_quick_consumer_index4 ||
6454 	    sblk->status_rx_quick_consumer_index5) {
6455 		if_printf(ifp, "rx_cons4  = 0x%08X, rx_cons5      = 0x%08X\n",
6456 			  sblk->status_rx_quick_consumer_index4,
6457 			  sblk->status_rx_quick_consumer_index5);
6458 	}
6459 
6460 	if (sblk->status_rx_quick_consumer_index6 ||
6461 	    sblk->status_rx_quick_consumer_index7) {
6462 		if_printf(ifp, "rx_cons6  = 0x%08X, rx_cons7      = 0x%08X\n",
6463 			  sblk->status_rx_quick_consumer_index6,
6464 			  sblk->status_rx_quick_consumer_index7);
6465 	}
6466 
6467 	if (sblk->status_rx_quick_consumer_index8 ||
6468 	    sblk->status_rx_quick_consumer_index9) {
6469 		if_printf(ifp, "rx_cons8  = 0x%08X, rx_cons9      = 0x%08X\n",
6470 			  sblk->status_rx_quick_consumer_index8,
6471 			  sblk->status_rx_quick_consumer_index9);
6472 	}
6473 
6474 	if (sblk->status_rx_quick_consumer_index10 ||
6475 	    sblk->status_rx_quick_consumer_index11) {
6476 		if_printf(ifp, "rx_cons10 = 0x%08X, rx_cons11     = 0x%08X\n",
6477 			  sblk->status_rx_quick_consumer_index10,
6478 			  sblk->status_rx_quick_consumer_index11);
6479 	}
6480 
6481 	if (sblk->status_rx_quick_consumer_index12 ||
6482 	    sblk->status_rx_quick_consumer_index13) {
6483 		if_printf(ifp, "rx_cons12 = 0x%08X, rx_cons13     = 0x%08X\n",
6484 			  sblk->status_rx_quick_consumer_index12,
6485 			  sblk->status_rx_quick_consumer_index13);
6486 	}
6487 
6488 	if (sblk->status_rx_quick_consumer_index14 ||
6489 	    sblk->status_rx_quick_consumer_index15) {
6490 		if_printf(ifp, "rx_cons14 = 0x%08X, rx_cons15     = 0x%08X\n",
6491 			  sblk->status_rx_quick_consumer_index14,
6492 			  sblk->status_rx_quick_consumer_index15);
6493 	}
6494 
6495 	if (sblk->status_completion_producer_index ||
6496 	    sblk->status_cmd_consumer_index) {
6497 		if_printf(ifp, "com_prod  = 0x%08X, cmd_cons      = 0x%08X\n",
6498 			  sblk->status_completion_producer_index,
6499 			  sblk->status_cmd_consumer_index);
6500 	}
6501 
6502 	if_printf(ifp,
6503 	"----------------------------"
6504 	"----------------"
6505 	"----------------------------\n");
6506 }
6507 
6508 
6509 /****************************************************************************/
6510 /* Prints out the statistics block.                                         */
6511 /*                                                                          */
6512 /* Returns:                                                                 */
6513 /*   Nothing.                                                               */
6514 /****************************************************************************/
6515 static void
6516 bce_dump_stats_block(struct bce_softc *sc)
6517 {
6518 	struct statistics_block *sblk = sc->stats_block;
6519 	struct ifnet *ifp = &sc->arpcom.ac_if;
6520 
6521 	if_printf(ifp,
6522 	"---------------"
6523 	" Stats Block  (All Stats Not Shown Are 0) "
6524 	"---------------\n");
6525 
6526 	if (sblk->stat_IfHCInOctets_hi || sblk->stat_IfHCInOctets_lo) {
6527 		if_printf(ifp, "0x%08X:%08X : IfHcInOctets\n",
6528 			  sblk->stat_IfHCInOctets_hi,
6529 			  sblk->stat_IfHCInOctets_lo);
6530 	}
6531 
6532 	if (sblk->stat_IfHCInBadOctets_hi || sblk->stat_IfHCInBadOctets_lo) {
6533 		if_printf(ifp, "0x%08X:%08X : IfHcInBadOctets\n",
6534 			  sblk->stat_IfHCInBadOctets_hi,
6535 			  sblk->stat_IfHCInBadOctets_lo);
6536 	}
6537 
6538 	if (sblk->stat_IfHCOutOctets_hi || sblk->stat_IfHCOutOctets_lo) {
6539 		if_printf(ifp, "0x%08X:%08X : IfHcOutOctets\n",
6540 			  sblk->stat_IfHCOutOctets_hi,
6541 			  sblk->stat_IfHCOutOctets_lo);
6542 	}
6543 
6544 	if (sblk->stat_IfHCOutBadOctets_hi || sblk->stat_IfHCOutBadOctets_lo) {
6545 		if_printf(ifp, "0x%08X:%08X : IfHcOutBadOctets\n",
6546 			  sblk->stat_IfHCOutBadOctets_hi,
6547 			  sblk->stat_IfHCOutBadOctets_lo);
6548 	}
6549 
6550 	if (sblk->stat_IfHCInUcastPkts_hi || sblk->stat_IfHCInUcastPkts_lo) {
6551 		if_printf(ifp, "0x%08X:%08X : IfHcInUcastPkts\n",
6552 			  sblk->stat_IfHCInUcastPkts_hi,
6553 			  sblk->stat_IfHCInUcastPkts_lo);
6554 	}
6555 
6556 	if (sblk->stat_IfHCInBroadcastPkts_hi ||
6557 	    sblk->stat_IfHCInBroadcastPkts_lo) {
6558 		if_printf(ifp, "0x%08X:%08X : IfHcInBroadcastPkts\n",
6559 			  sblk->stat_IfHCInBroadcastPkts_hi,
6560 			  sblk->stat_IfHCInBroadcastPkts_lo);
6561 	}
6562 
6563 	if (sblk->stat_IfHCInMulticastPkts_hi ||
6564 	    sblk->stat_IfHCInMulticastPkts_lo) {
6565 		if_printf(ifp, "0x%08X:%08X : IfHcInMulticastPkts\n",
6566 			  sblk->stat_IfHCInMulticastPkts_hi,
6567 			  sblk->stat_IfHCInMulticastPkts_lo);
6568 	}
6569 
6570 	if (sblk->stat_IfHCOutUcastPkts_hi || sblk->stat_IfHCOutUcastPkts_lo) {
6571 		if_printf(ifp, "0x%08X:%08X : IfHcOutUcastPkts\n",
6572 			  sblk->stat_IfHCOutUcastPkts_hi,
6573 			  sblk->stat_IfHCOutUcastPkts_lo);
6574 	}
6575 
6576 	if (sblk->stat_IfHCOutBroadcastPkts_hi ||
6577 	    sblk->stat_IfHCOutBroadcastPkts_lo) {
6578 		if_printf(ifp, "0x%08X:%08X : IfHcOutBroadcastPkts\n",
6579 			  sblk->stat_IfHCOutBroadcastPkts_hi,
6580 			  sblk->stat_IfHCOutBroadcastPkts_lo);
6581 	}
6582 
6583 	if (sblk->stat_IfHCOutMulticastPkts_hi ||
6584 	    sblk->stat_IfHCOutMulticastPkts_lo) {
6585 		if_printf(ifp, "0x%08X:%08X : IfHcOutMulticastPkts\n",
6586 			  sblk->stat_IfHCOutMulticastPkts_hi,
6587 			  sblk->stat_IfHCOutMulticastPkts_lo);
6588 	}
6589 
6590 	if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors) {
6591 		if_printf(ifp, "         0x%08X : "
6592 		"emac_tx_stat_dot3statsinternalmactransmiterrors\n",
6593 		sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors);
6594 	}
6595 
6596 	if (sblk->stat_Dot3StatsCarrierSenseErrors) {
6597 		if_printf(ifp, "         0x%08X : "
6598 			  "Dot3StatsCarrierSenseErrors\n",
6599 			  sblk->stat_Dot3StatsCarrierSenseErrors);
6600 	}
6601 
6602 	if (sblk->stat_Dot3StatsFCSErrors) {
6603 		if_printf(ifp, "         0x%08X : Dot3StatsFCSErrors\n",
6604 			  sblk->stat_Dot3StatsFCSErrors);
6605 	}
6606 
6607 	if (sblk->stat_Dot3StatsAlignmentErrors) {
6608 		if_printf(ifp, "         0x%08X : Dot3StatsAlignmentErrors\n",
6609 			  sblk->stat_Dot3StatsAlignmentErrors);
6610 	}
6611 
6612 	if (sblk->stat_Dot3StatsSingleCollisionFrames) {
6613 		if_printf(ifp, "         0x%08X : "
6614 			  "Dot3StatsSingleCollisionFrames\n",
6615 			  sblk->stat_Dot3StatsSingleCollisionFrames);
6616 	}
6617 
6618 	if (sblk->stat_Dot3StatsMultipleCollisionFrames) {
6619 		if_printf(ifp, "         0x%08X : "
6620 			  "Dot3StatsMultipleCollisionFrames\n",
6621 			  sblk->stat_Dot3StatsMultipleCollisionFrames);
6622 	}
6623 
6624 	if (sblk->stat_Dot3StatsDeferredTransmissions) {
6625 		if_printf(ifp, "         0x%08X : "
6626 			  "Dot3StatsDeferredTransmissions\n",
6627 			  sblk->stat_Dot3StatsDeferredTransmissions);
6628 	}
6629 
6630 	if (sblk->stat_Dot3StatsExcessiveCollisions) {
6631 		if_printf(ifp, "         0x%08X : "
6632 			  "Dot3StatsExcessiveCollisions\n",
6633 			  sblk->stat_Dot3StatsExcessiveCollisions);
6634 	}
6635 
6636 	if (sblk->stat_Dot3StatsLateCollisions) {
6637 		if_printf(ifp, "         0x%08X : Dot3StatsLateCollisions\n",
6638 			  sblk->stat_Dot3StatsLateCollisions);
6639 	}
6640 
6641 	if (sblk->stat_EtherStatsCollisions) {
6642 		if_printf(ifp, "         0x%08X : EtherStatsCollisions\n",
6643 			  sblk->stat_EtherStatsCollisions);
6644 	}
6645 
6646 	if (sblk->stat_EtherStatsFragments)  {
6647 		if_printf(ifp, "         0x%08X : EtherStatsFragments\n",
6648 			  sblk->stat_EtherStatsFragments);
6649 	}
6650 
6651 	if (sblk->stat_EtherStatsJabbers) {
6652 		if_printf(ifp, "         0x%08X : EtherStatsJabbers\n",
6653 			  sblk->stat_EtherStatsJabbers);
6654 	}
6655 
6656 	if (sblk->stat_EtherStatsUndersizePkts) {
6657 		if_printf(ifp, "         0x%08X : EtherStatsUndersizePkts\n",
6658 			  sblk->stat_EtherStatsUndersizePkts);
6659 	}
6660 
6661 	if (sblk->stat_EtherStatsOverrsizePkts) {
6662 		if_printf(ifp, "         0x%08X : EtherStatsOverrsizePkts\n",
6663 			  sblk->stat_EtherStatsOverrsizePkts);
6664 	}
6665 
6666 	if (sblk->stat_EtherStatsPktsRx64Octets) {
6667 		if_printf(ifp, "         0x%08X : EtherStatsPktsRx64Octets\n",
6668 			  sblk->stat_EtherStatsPktsRx64Octets);
6669 	}
6670 
6671 	if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets) {
6672 		if_printf(ifp, "         0x%08X : "
6673 			  "EtherStatsPktsRx65Octetsto127Octets\n",
6674 			  sblk->stat_EtherStatsPktsRx65Octetsto127Octets);
6675 	}
6676 
6677 	if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets) {
6678 		if_printf(ifp, "         0x%08X : "
6679 			  "EtherStatsPktsRx128Octetsto255Octets\n",
6680 			  sblk->stat_EtherStatsPktsRx128Octetsto255Octets);
6681 	}
6682 
6683 	if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets) {
6684 		if_printf(ifp, "         0x%08X : "
6685 			  "EtherStatsPktsRx256Octetsto511Octets\n",
6686 			  sblk->stat_EtherStatsPktsRx256Octetsto511Octets);
6687 	}
6688 
6689 	if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets) {
6690 		if_printf(ifp, "         0x%08X : "
6691 			  "EtherStatsPktsRx512Octetsto1023Octets\n",
6692 			  sblk->stat_EtherStatsPktsRx512Octetsto1023Octets);
6693 	}
6694 
6695 	if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets) {
6696 		if_printf(ifp, "         0x%08X : "
6697 			  "EtherStatsPktsRx1024Octetsto1522Octets\n",
6698 			  sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets);
6699 	}
6700 
6701 	if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets) {
6702 		if_printf(ifp, "         0x%08X : "
6703 			  "EtherStatsPktsRx1523Octetsto9022Octets\n",
6704 			  sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets);
6705 	}
6706 
6707 	if (sblk->stat_EtherStatsPktsTx64Octets) {
6708 		if_printf(ifp, "         0x%08X : EtherStatsPktsTx64Octets\n",
6709 			  sblk->stat_EtherStatsPktsTx64Octets);
6710 	}
6711 
6712 	if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets) {
6713 		if_printf(ifp, "         0x%08X : "
6714 			  "EtherStatsPktsTx65Octetsto127Octets\n",
6715 			  sblk->stat_EtherStatsPktsTx65Octetsto127Octets);
6716 	}
6717 
6718 	if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets) {
6719 		if_printf(ifp, "         0x%08X : "
6720 			  "EtherStatsPktsTx128Octetsto255Octets\n",
6721 			  sblk->stat_EtherStatsPktsTx128Octetsto255Octets);
6722 	}
6723 
6724 	if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets) {
6725 		if_printf(ifp, "         0x%08X : "
6726 			  "EtherStatsPktsTx256Octetsto511Octets\n",
6727 			  sblk->stat_EtherStatsPktsTx256Octetsto511Octets);
6728 	}
6729 
6730 	if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets) {
6731 		if_printf(ifp, "         0x%08X : "
6732 			  "EtherStatsPktsTx512Octetsto1023Octets\n",
6733 			  sblk->stat_EtherStatsPktsTx512Octetsto1023Octets);
6734 	}
6735 
6736 	if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets) {
6737 		if_printf(ifp, "         0x%08X : "
6738 			  "EtherStatsPktsTx1024Octetsto1522Octets\n",
6739 			  sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets);
6740 	}
6741 
6742 	if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets) {
6743 		if_printf(ifp, "         0x%08X : "
6744 			  "EtherStatsPktsTx1523Octetsto9022Octets\n",
6745 			  sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets);
6746 	}
6747 
6748 	if (sblk->stat_XonPauseFramesReceived) {
6749 		if_printf(ifp, "         0x%08X : XonPauseFramesReceived\n",
6750 			  sblk->stat_XonPauseFramesReceived);
6751 	}
6752 
6753 	if (sblk->stat_XoffPauseFramesReceived) {
6754 		if_printf(ifp, "          0x%08X : XoffPauseFramesReceived\n",
6755 			  sblk->stat_XoffPauseFramesReceived);
6756 	}
6757 
6758 	if (sblk->stat_OutXonSent) {
6759 		if_printf(ifp, "         0x%08X : OutXoffSent\n",
6760 			  sblk->stat_OutXonSent);
6761 	}
6762 
6763 	if (sblk->stat_OutXoffSent) {
6764 		if_printf(ifp, "         0x%08X : OutXoffSent\n",
6765 			  sblk->stat_OutXoffSent);
6766 	}
6767 
6768 	if (sblk->stat_FlowControlDone) {
6769 		if_printf(ifp, "         0x%08X : FlowControlDone\n",
6770 			  sblk->stat_FlowControlDone);
6771 	}
6772 
6773 	if (sblk->stat_MacControlFramesReceived) {
6774 		if_printf(ifp, "         0x%08X : MacControlFramesReceived\n",
6775 			  sblk->stat_MacControlFramesReceived);
6776 	}
6777 
6778 	if (sblk->stat_XoffStateEntered) {
6779 		if_printf(ifp, "         0x%08X : XoffStateEntered\n",
6780 			  sblk->stat_XoffStateEntered);
6781 	}
6782 
6783 	if (sblk->stat_IfInFramesL2FilterDiscards) {
6784 		if_printf(ifp, "         0x%08X : IfInFramesL2FilterDiscards\n",			  sblk->stat_IfInFramesL2FilterDiscards);
6785 	}
6786 
6787 	if (sblk->stat_IfInRuleCheckerDiscards) {
6788 		if_printf(ifp, "         0x%08X : IfInRuleCheckerDiscards\n",
6789 			  sblk->stat_IfInRuleCheckerDiscards);
6790 	}
6791 
6792 	if (sblk->stat_IfInFTQDiscards) {
6793 		if_printf(ifp, "         0x%08X : IfInFTQDiscards\n",
6794 			  sblk->stat_IfInFTQDiscards);
6795 	}
6796 
6797 	if (sblk->stat_IfInMBUFDiscards) {
6798 		if_printf(ifp, "         0x%08X : IfInMBUFDiscards\n",
6799 			  sblk->stat_IfInMBUFDiscards);
6800 	}
6801 
6802 	if (sblk->stat_IfInRuleCheckerP4Hit) {
6803 		if_printf(ifp, "         0x%08X : IfInRuleCheckerP4Hit\n",
6804 			  sblk->stat_IfInRuleCheckerP4Hit);
6805 	}
6806 
6807 	if (sblk->stat_CatchupInRuleCheckerDiscards) {
6808 		if_printf(ifp, "         0x%08X : "
6809 			  "CatchupInRuleCheckerDiscards\n",
6810 			  sblk->stat_CatchupInRuleCheckerDiscards);
6811 	}
6812 
6813 	if (sblk->stat_CatchupInFTQDiscards) {
6814 		if_printf(ifp, "         0x%08X : CatchupInFTQDiscards\n",
6815 			  sblk->stat_CatchupInFTQDiscards);
6816 	}
6817 
6818 	if (sblk->stat_CatchupInMBUFDiscards) {
6819 		if_printf(ifp, "         0x%08X : CatchupInMBUFDiscards\n",
6820 			  sblk->stat_CatchupInMBUFDiscards);
6821 	}
6822 
6823 	if (sblk->stat_CatchupInRuleCheckerP4Hit) {
6824 		if_printf(ifp, "         0x%08X : CatchupInRuleCheckerP4Hit\n",
6825 			  sblk->stat_CatchupInRuleCheckerP4Hit);
6826 	}
6827 
6828 	if_printf(ifp,
6829 	"----------------------------"
6830 	"----------------"
6831 	"----------------------------\n");
6832 }
6833 
6834 
6835 /****************************************************************************/
6836 /* Prints out a summary of the driver state.                                */
6837 /*                                                                          */
6838 /* Returns:                                                                 */
6839 /*   Nothing.                                                               */
6840 /****************************************************************************/
6841 static void
6842 bce_dump_driver_state(struct bce_softc *sc)
6843 {
6844 	struct ifnet *ifp = &sc->arpcom.ac_if;
6845 	uint32_t val_hi, val_lo;
6846 
6847 	if_printf(ifp,
6848 	"-----------------------------"
6849 	" Driver State "
6850 	"-----------------------------\n");
6851 
6852 	val_hi = BCE_ADDR_HI(sc);
6853 	val_lo = BCE_ADDR_LO(sc);
6854 	if_printf(ifp, "0x%08X:%08X - (sc) driver softc structure "
6855 		  "virtual address\n", val_hi, val_lo);
6856 
6857 	val_hi = BCE_ADDR_HI(sc->status_block);
6858 	val_lo = BCE_ADDR_LO(sc->status_block);
6859 	if_printf(ifp, "0x%08X:%08X - (sc->status_block) status block "
6860 		  "virtual address\n", val_hi, val_lo);
6861 
6862 	val_hi = BCE_ADDR_HI(sc->stats_block);
6863 	val_lo = BCE_ADDR_LO(sc->stats_block);
6864 	if_printf(ifp, "0x%08X:%08X - (sc->stats_block) statistics block "
6865 		  "virtual address\n", val_hi, val_lo);
6866 
6867 	val_hi = BCE_ADDR_HI(sc->tx_bd_chain);
6868 	val_lo = BCE_ADDR_LO(sc->tx_bd_chain);
6869 	if_printf(ifp, "0x%08X:%08X - (sc->tx_bd_chain) tx_bd chain "
6870 		  "virtual adddress\n", val_hi, val_lo);
6871 
6872 	val_hi = BCE_ADDR_HI(sc->rx_bd_chain);
6873 	val_lo = BCE_ADDR_LO(sc->rx_bd_chain);
6874 	if_printf(ifp, "0x%08X:%08X - (sc->rx_bd_chain) rx_bd chain "
6875 		  "virtual address\n", val_hi, val_lo);
6876 
6877 	val_hi = BCE_ADDR_HI(sc->tx_mbuf_ptr);
6878 	val_lo = BCE_ADDR_LO(sc->tx_mbuf_ptr);
6879 	if_printf(ifp, "0x%08X:%08X - (sc->tx_mbuf_ptr) tx mbuf chain "
6880 		  "virtual address\n", val_hi, val_lo);
6881 
6882 	val_hi = BCE_ADDR_HI(sc->rx_mbuf_ptr);
6883 	val_lo = BCE_ADDR_LO(sc->rx_mbuf_ptr);
6884 	if_printf(ifp, "0x%08X:%08X - (sc->rx_mbuf_ptr) rx mbuf chain "
6885 		  "virtual address\n", val_hi, val_lo);
6886 
6887 	if_printf(ifp, "         0x%08X - (sc->interrupts_generated) "
6888 		  "h/w intrs\n", sc->interrupts_generated);
6889 
6890 	if_printf(ifp, "         0x%08X - (sc->rx_interrupts) "
6891 		  "rx interrupts handled\n", sc->rx_interrupts);
6892 
6893 	if_printf(ifp, "         0x%08X - (sc->tx_interrupts) "
6894 		  "tx interrupts handled\n", sc->tx_interrupts);
6895 
6896 	if_printf(ifp, "         0x%08X - (sc->last_status_idx) "
6897 		  "status block index\n", sc->last_status_idx);
6898 
6899 	if_printf(ifp, "     0x%04X(0x%04X) - (sc->tx_prod) "
6900 		  "tx producer index\n",
6901 		  sc->tx_prod, (uint16_t)TX_CHAIN_IDX(sc->tx_prod));
6902 
6903 	if_printf(ifp, "     0x%04X(0x%04X) - (sc->tx_cons) "
6904 		  "tx consumer index\n",
6905 		  sc->tx_cons, (uint16_t)TX_CHAIN_IDX(sc->tx_cons));
6906 
6907 	if_printf(ifp, "         0x%08X - (sc->tx_prod_bseq) "
6908 		  "tx producer bseq index\n", sc->tx_prod_bseq);
6909 
6910 	if_printf(ifp, "     0x%04X(0x%04X) - (sc->rx_prod) "
6911 		  "rx producer index\n",
6912 		  sc->rx_prod, (uint16_t)RX_CHAIN_IDX(sc->rx_prod));
6913 
6914 	if_printf(ifp, "     0x%04X(0x%04X) - (sc->rx_cons) "
6915 		  "rx consumer index\n",
6916 		  sc->rx_cons, (uint16_t)RX_CHAIN_IDX(sc->rx_cons));
6917 
6918 	if_printf(ifp, "         0x%08X - (sc->rx_prod_bseq) "
6919 		  "rx producer bseq index\n", sc->rx_prod_bseq);
6920 
6921 	if_printf(ifp, "         0x%08X - (sc->rx_mbuf_alloc) "
6922 		  "rx mbufs allocated\n", sc->rx_mbuf_alloc);
6923 
6924 	if_printf(ifp, "         0x%08X - (sc->free_rx_bd) "
6925 		  "free rx_bd's\n", sc->free_rx_bd);
6926 
6927 	if_printf(ifp, "0x%08X/%08X - (sc->rx_low_watermark) rx "
6928 		  "low watermark\n", sc->rx_low_watermark, sc->max_rx_bd);
6929 
6930 	if_printf(ifp, "         0x%08X - (sc->txmbuf_alloc) "
6931 		  "tx mbufs allocated\n", sc->tx_mbuf_alloc);
6932 
6933 	if_printf(ifp, "         0x%08X - (sc->rx_mbuf_alloc) "
6934 		  "rx mbufs allocated\n", sc->rx_mbuf_alloc);
6935 
6936 	if_printf(ifp, "         0x%08X - (sc->used_tx_bd) used tx_bd's\n",
6937 		  sc->used_tx_bd);
6938 
6939 	if_printf(ifp, "0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n",
6940 		  sc->tx_hi_watermark, sc->max_tx_bd);
6941 
6942 	if_printf(ifp, "         0x%08X - (sc->mbuf_alloc_failed) "
6943 		  "failed mbuf alloc\n", sc->mbuf_alloc_failed);
6944 
6945 	if_printf(ifp,
6946 	"----------------------------"
6947 	"----------------"
6948 	"----------------------------\n");
6949 }
6950 
6951 
6952 /****************************************************************************/
6953 /* Prints out the hardware state through a summary of important registers,  */
6954 /* followed by a complete register dump.                                    */
6955 /*                                                                          */
6956 /* Returns:                                                                 */
6957 /*   Nothing.                                                               */
6958 /****************************************************************************/
6959 static void
6960 bce_dump_hw_state(struct bce_softc *sc)
6961 {
6962 	struct ifnet *ifp = &sc->arpcom.ac_if;
6963 	uint32_t val1;
6964 	int i;
6965 
6966 	if_printf(ifp,
6967 	"----------------------------"
6968 	" Hardware State "
6969 	"----------------------------\n");
6970 
6971 	if_printf(ifp, "0x%08X - bootcode version\n", sc->bce_fw_ver);
6972 
6973 	val1 = REG_RD(sc, BCE_MISC_ENABLE_STATUS_BITS);
6974 	if_printf(ifp, "0x%08X - (0x%06X) misc_enable_status_bits\n",
6975 		  val1, BCE_MISC_ENABLE_STATUS_BITS);
6976 
6977 	val1 = REG_RD(sc, BCE_DMA_STATUS);
6978 	if_printf(ifp, "0x%08X - (0x%04X) dma_status\n", val1, BCE_DMA_STATUS);
6979 
6980 	val1 = REG_RD(sc, BCE_CTX_STATUS);
6981 	if_printf(ifp, "0x%08X - (0x%04X) ctx_status\n", val1, BCE_CTX_STATUS);
6982 
6983 	val1 = REG_RD(sc, BCE_EMAC_STATUS);
6984 	if_printf(ifp, "0x%08X - (0x%04X) emac_status\n",
6985 		  val1, BCE_EMAC_STATUS);
6986 
6987 	val1 = REG_RD(sc, BCE_RPM_STATUS);
6988 	if_printf(ifp, "0x%08X - (0x%04X) rpm_status\n", val1, BCE_RPM_STATUS);
6989 
6990 	val1 = REG_RD(sc, BCE_TBDR_STATUS);
6991 	if_printf(ifp, "0x%08X - (0x%04X) tbdr_status\n",
6992 		  val1, BCE_TBDR_STATUS);
6993 
6994 	val1 = REG_RD(sc, BCE_TDMA_STATUS);
6995 	if_printf(ifp, "0x%08X - (0x%04X) tdma_status\n",
6996 		  val1, BCE_TDMA_STATUS);
6997 
6998 	val1 = REG_RD(sc, BCE_HC_STATUS);
6999 	if_printf(ifp, "0x%08X - (0x%06X) hc_status\n", val1, BCE_HC_STATUS);
7000 
7001 	val1 = REG_RD_IND(sc, BCE_TXP_CPU_STATE);
7002 	if_printf(ifp, "0x%08X - (0x%06X) txp_cpu_state\n",
7003 		  val1, BCE_TXP_CPU_STATE);
7004 
7005 	val1 = REG_RD_IND(sc, BCE_TPAT_CPU_STATE);
7006 	if_printf(ifp, "0x%08X - (0x%06X) tpat_cpu_state\n",
7007 		  val1, BCE_TPAT_CPU_STATE);
7008 
7009 	val1 = REG_RD_IND(sc, BCE_RXP_CPU_STATE);
7010 	if_printf(ifp, "0x%08X - (0x%06X) rxp_cpu_state\n",
7011 		  val1, BCE_RXP_CPU_STATE);
7012 
7013 	val1 = REG_RD_IND(sc, BCE_COM_CPU_STATE);
7014 	if_printf(ifp, "0x%08X - (0x%06X) com_cpu_state\n",
7015 		  val1, BCE_COM_CPU_STATE);
7016 
7017 	val1 = REG_RD_IND(sc, BCE_MCP_CPU_STATE);
7018 	if_printf(ifp, "0x%08X - (0x%06X) mcp_cpu_state\n",
7019 		  val1, BCE_MCP_CPU_STATE);
7020 
7021 	val1 = REG_RD_IND(sc, BCE_CP_CPU_STATE);
7022 	if_printf(ifp, "0x%08X - (0x%06X) cp_cpu_state\n",
7023 		  val1, BCE_CP_CPU_STATE);
7024 
7025 	if_printf(ifp,
7026 	"----------------------------"
7027 	"----------------"
7028 	"----------------------------\n");
7029 
7030 	if_printf(ifp,
7031 	"----------------------------"
7032 	" Register  Dump "
7033 	"----------------------------\n");
7034 
7035 	for (i = 0x400; i < 0x8000; i += 0x10) {
7036 		if_printf(ifp, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", i,
7037 			  REG_RD(sc, i),
7038 			  REG_RD(sc, i + 0x4),
7039 			  REG_RD(sc, i + 0x8),
7040 			  REG_RD(sc, i + 0xc));
7041 	}
7042 
7043 	if_printf(ifp,
7044 	"----------------------------"
7045 	"----------------"
7046 	"----------------------------\n");
7047 }
7048 
7049 
7050 /****************************************************************************/
7051 /* Prints out the TXP state.                                                */
7052 /*                                                                          */
7053 /* Returns:                                                                 */
7054 /*   Nothing.                                                               */
7055 /****************************************************************************/
7056 static void
7057 bce_dump_txp_state(struct bce_softc *sc)
7058 {
7059 	struct ifnet *ifp = &sc->arpcom.ac_if;
7060 	uint32_t val1;
7061 	int i;
7062 
7063 	if_printf(ifp,
7064 	"----------------------------"
7065 	"   TXP  State   "
7066 	"----------------------------\n");
7067 
7068 	val1 = REG_RD_IND(sc, BCE_TXP_CPU_MODE);
7069 	if_printf(ifp, "0x%08X - (0x%06X) txp_cpu_mode\n",
7070 		  val1, BCE_TXP_CPU_MODE);
7071 
7072 	val1 = REG_RD_IND(sc, BCE_TXP_CPU_STATE);
7073 	if_printf(ifp, "0x%08X - (0x%06X) txp_cpu_state\n",
7074 		  val1, BCE_TXP_CPU_STATE);
7075 
7076 	val1 = REG_RD_IND(sc, BCE_TXP_CPU_EVENT_MASK);
7077 	if_printf(ifp, "0x%08X - (0x%06X) txp_cpu_event_mask\n",
7078 		  val1, BCE_TXP_CPU_EVENT_MASK);
7079 
7080 	if_printf(ifp,
7081 	"----------------------------"
7082 	" Register  Dump "
7083 	"----------------------------\n");
7084 
7085 	for (i = BCE_TXP_CPU_MODE; i < 0x68000; i += 0x10) {
7086 		/* Skip the big blank spaces */
7087 		if (i < 0x454000 && i > 0x5ffff) {
7088 			if_printf(ifp, "0x%04X: "
7089 				  "0x%08X 0x%08X 0x%08X 0x%08X\n", i,
7090 				  REG_RD_IND(sc, i),
7091 				  REG_RD_IND(sc, i + 0x4),
7092 				  REG_RD_IND(sc, i + 0x8),
7093 				  REG_RD_IND(sc, i + 0xc));
7094 		}
7095 	}
7096 
7097 	if_printf(ifp,
7098 	"----------------------------"
7099 	"----------------"
7100 	"----------------------------\n");
7101 }
7102 
7103 
7104 /****************************************************************************/
7105 /* Prints out the RXP state.                                                */
7106 /*                                                                          */
7107 /* Returns:                                                                 */
7108 /*   Nothing.                                                               */
7109 /****************************************************************************/
7110 static void
7111 bce_dump_rxp_state(struct bce_softc *sc)
7112 {
7113 	struct ifnet *ifp = &sc->arpcom.ac_if;
7114 	uint32_t val1;
7115 	int i;
7116 
7117 	if_printf(ifp,
7118 	"----------------------------"
7119 	"   RXP  State   "
7120 	"----------------------------\n");
7121 
7122 	val1 = REG_RD_IND(sc, BCE_RXP_CPU_MODE);
7123 	if_printf(ifp, "0x%08X - (0x%06X) rxp_cpu_mode\n",
7124 		  val1, BCE_RXP_CPU_MODE);
7125 
7126 	val1 = REG_RD_IND(sc, BCE_RXP_CPU_STATE);
7127 	if_printf(ifp, "0x%08X - (0x%06X) rxp_cpu_state\n",
7128 		  val1, BCE_RXP_CPU_STATE);
7129 
7130 	val1 = REG_RD_IND(sc, BCE_RXP_CPU_EVENT_MASK);
7131 	if_printf(ifp, "0x%08X - (0x%06X) rxp_cpu_event_mask\n",
7132 		  val1, BCE_RXP_CPU_EVENT_MASK);
7133 
7134 	if_printf(ifp,
7135 	"----------------------------"
7136 	" Register  Dump "
7137 	"----------------------------\n");
7138 
7139 	for (i = BCE_RXP_CPU_MODE; i < 0xe8fff; i += 0x10) {
7140 		/* Skip the big blank sapces */
7141 		if (i < 0xc5400 && i > 0xdffff) {
7142 			if_printf(ifp, "0x%04X: "
7143 				  "0x%08X 0x%08X 0x%08X 0x%08X\n", i,
7144 				  REG_RD_IND(sc, i),
7145 				  REG_RD_IND(sc, i + 0x4),
7146 				  REG_RD_IND(sc, i + 0x8),
7147 				  REG_RD_IND(sc, i + 0xc));
7148 		}
7149 	}
7150 
7151 	if_printf(ifp,
7152 	"----------------------------"
7153 	"----------------"
7154 	"----------------------------\n");
7155 }
7156 
7157 
7158 /****************************************************************************/
7159 /* Prints out the TPAT state.                                               */
7160 /*                                                                          */
7161 /* Returns:                                                                 */
7162 /*   Nothing.                                                               */
7163 /****************************************************************************/
7164 static void
7165 bce_dump_tpat_state(struct bce_softc *sc)
7166 {
7167 	struct ifnet *ifp = &sc->arpcom.ac_if;
7168 	uint32_t val1;
7169 	int i;
7170 
7171 	if_printf(ifp,
7172 	"----------------------------"
7173 	"   TPAT State   "
7174 	"----------------------------\n");
7175 
7176 	val1 = REG_RD_IND(sc, BCE_TPAT_CPU_MODE);
7177 	if_printf(ifp, "0x%08X - (0x%06X) tpat_cpu_mode\n",
7178 		  val1, BCE_TPAT_CPU_MODE);
7179 
7180 	val1 = REG_RD_IND(sc, BCE_TPAT_CPU_STATE);
7181 	if_printf(ifp, "0x%08X - (0x%06X) tpat_cpu_state\n",
7182 		  val1, BCE_TPAT_CPU_STATE);
7183 
7184 	val1 = REG_RD_IND(sc, BCE_TPAT_CPU_EVENT_MASK);
7185 	if_printf(ifp, "0x%08X - (0x%06X) tpat_cpu_event_mask\n",
7186 		  val1, BCE_TPAT_CPU_EVENT_MASK);
7187 
7188 	if_printf(ifp,
7189 	"----------------------------"
7190 	" Register  Dump "
7191 	"----------------------------\n");
7192 
7193 	for (i = BCE_TPAT_CPU_MODE; i < 0xa3fff; i += 0x10) {
7194 		/* Skip the big blank spaces */
7195 		if (i < 0x854000 && i > 0x9ffff) {
7196 			if_printf(ifp, "0x%04X: "
7197 				  "0x%08X 0x%08X 0x%08X 0x%08X\n", i,
7198 				  REG_RD_IND(sc, i),
7199 				  REG_RD_IND(sc, i + 0x4),
7200 				  REG_RD_IND(sc, i + 0x8),
7201 				  REG_RD_IND(sc, i + 0xc));
7202 		}
7203 	}
7204 
7205 	if_printf(ifp,
7206 	"----------------------------"
7207 	"----------------"
7208 	"----------------------------\n");
7209 }
7210 
7211 
7212 /****************************************************************************/
7213 /* Prints out the driver state and then enters the debugger.                */
7214 /*                                                                          */
7215 /* Returns:                                                                 */
7216 /*   Nothing.                                                               */
7217 /****************************************************************************/
7218 static void
7219 bce_breakpoint(struct bce_softc *sc)
7220 {
7221 #if 0
7222 	bce_freeze_controller(sc);
7223 #endif
7224 
7225 	bce_dump_driver_state(sc);
7226 	bce_dump_status_block(sc);
7227 	bce_dump_tx_chain(sc, 0, TOTAL_TX_BD);
7228 	bce_dump_hw_state(sc);
7229 	bce_dump_txp_state(sc);
7230 
7231 #if 0
7232 	bce_unfreeze_controller(sc);
7233 #endif
7234 
7235 	/* Call the debugger. */
7236 	breakpoint();
7237 }
7238 
7239 #endif	/* BCE_DEBUG */
7240 
7241 static int
7242 bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS)
7243 {
7244 	struct bce_softc *sc = arg1;
7245 
7246 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
7247 			&sc->bce_tx_quick_cons_trip_int,
7248 			BCE_COALMASK_TX_BDS_INT);
7249 }
7250 
7251 static int
7252 bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS)
7253 {
7254 	struct bce_softc *sc = arg1;
7255 
7256 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
7257 			&sc->bce_tx_quick_cons_trip,
7258 			BCE_COALMASK_TX_BDS);
7259 }
7260 
7261 static int
7262 bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS)
7263 {
7264 	struct bce_softc *sc = arg1;
7265 
7266 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
7267 			&sc->bce_tx_ticks_int,
7268 			BCE_COALMASK_TX_TICKS_INT);
7269 }
7270 
7271 static int
7272 bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS)
7273 {
7274 	struct bce_softc *sc = arg1;
7275 
7276 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
7277 			&sc->bce_tx_ticks,
7278 			BCE_COALMASK_TX_TICKS);
7279 }
7280 
7281 static int
7282 bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS)
7283 {
7284 	struct bce_softc *sc = arg1;
7285 
7286 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
7287 			&sc->bce_rx_quick_cons_trip_int,
7288 			BCE_COALMASK_RX_BDS_INT);
7289 }
7290 
7291 static int
7292 bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS)
7293 {
7294 	struct bce_softc *sc = arg1;
7295 
7296 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
7297 			&sc->bce_rx_quick_cons_trip,
7298 			BCE_COALMASK_RX_BDS);
7299 }
7300 
7301 static int
7302 bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS)
7303 {
7304 	struct bce_softc *sc = arg1;
7305 
7306 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
7307 			&sc->bce_rx_ticks_int,
7308 			BCE_COALMASK_RX_TICKS_INT);
7309 }
7310 
7311 static int
7312 bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS)
7313 {
7314 	struct bce_softc *sc = arg1;
7315 
7316 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
7317 			&sc->bce_rx_ticks,
7318 			BCE_COALMASK_RX_TICKS);
7319 }
7320 
7321 static int
7322 bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS, uint32_t *coal,
7323 		       uint32_t coalchg_mask)
7324 {
7325 	struct bce_softc *sc = arg1;
7326 	struct ifnet *ifp = &sc->arpcom.ac_if;
7327 	int error = 0, v;
7328 
7329 	lwkt_serialize_enter(ifp->if_serializer);
7330 
7331 	v = *coal;
7332 	error = sysctl_handle_int(oidp, &v, 0, req);
7333 	if (!error && req->newptr != NULL) {
7334 		if (v < 0) {
7335 			error = EINVAL;
7336 		} else {
7337 			*coal = v;
7338 			sc->bce_coalchg_mask |= coalchg_mask;
7339 		}
7340 	}
7341 
7342 	lwkt_serialize_exit(ifp->if_serializer);
7343 	return error;
7344 }
7345 
7346 static void
7347 bce_coal_change(struct bce_softc *sc)
7348 {
7349 	struct ifnet *ifp = &sc->arpcom.ac_if;
7350 
7351 	ASSERT_SERIALIZED(ifp->if_serializer);
7352 
7353 	if ((ifp->if_flags & IFF_RUNNING) == 0) {
7354 		sc->bce_coalchg_mask = 0;
7355 		return;
7356 	}
7357 
7358 	if (sc->bce_coalchg_mask &
7359 	    (BCE_COALMASK_TX_BDS | BCE_COALMASK_TX_BDS_INT)) {
7360 		REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
7361 		       (sc->bce_tx_quick_cons_trip_int << 16) |
7362 		       sc->bce_tx_quick_cons_trip);
7363 		if (bootverbose) {
7364 			if_printf(ifp, "tx_bds %u, tx_bds_int %u\n",
7365 				  sc->bce_tx_quick_cons_trip,
7366 				  sc->bce_tx_quick_cons_trip_int);
7367 		}
7368 	}
7369 
7370 	if (sc->bce_coalchg_mask &
7371 	    (BCE_COALMASK_TX_TICKS | BCE_COALMASK_TX_TICKS_INT)) {
7372 		REG_WR(sc, BCE_HC_TX_TICKS,
7373 		       (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
7374 		if (bootverbose) {
7375 			if_printf(ifp, "tx_ticks %u, tx_ticks_int %u\n",
7376 				  sc->bce_tx_ticks, sc->bce_tx_ticks_int);
7377 		}
7378 	}
7379 
7380 	if (sc->bce_coalchg_mask &
7381 	    (BCE_COALMASK_RX_BDS | BCE_COALMASK_RX_BDS_INT)) {
7382 		REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
7383 		       (sc->bce_rx_quick_cons_trip_int << 16) |
7384 		       sc->bce_rx_quick_cons_trip);
7385 		if (bootverbose) {
7386 			if_printf(ifp, "rx_bds %u, rx_bds_int %u\n",
7387 				  sc->bce_rx_quick_cons_trip,
7388 				  sc->bce_rx_quick_cons_trip_int);
7389 		}
7390 	}
7391 
7392 	if (sc->bce_coalchg_mask &
7393 	    (BCE_COALMASK_RX_TICKS | BCE_COALMASK_RX_TICKS_INT)) {
7394 		REG_WR(sc, BCE_HC_RX_TICKS,
7395 		       (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
7396 		if (bootverbose) {
7397 			if_printf(ifp, "rx_ticks %u, rx_ticks_int %u\n",
7398 				  sc->bce_rx_ticks, sc->bce_rx_ticks_int);
7399 		}
7400 	}
7401 
7402 	sc->bce_coalchg_mask = 0;
7403 }
7404