xref: /freebsd/sys/dev/bce/if_bce.c (revision 39beb93c)
1 /*-
2  * Copyright (c) 2006-2009 Broadcom Corporation
3  *	David Christensen <davidch@broadcom.com>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the name of Broadcom Corporation nor the name of its contributors
15  *    may be used to endorse or promote products derived from this software
16  *    without specific prior written consent.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
22  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 /*
35  * The following controllers are supported by this driver:
36  *   BCM5706C A2, A3
37  *   BCM5706S A2, A3
38  *   BCM5708C B1, B2
39  *   BCM5708S B1, B2
40  *   BCM5709C A1, C0
41  *   BCM5716  C0
42  *
43  * The following controllers are not supported by this driver:
44  *   BCM5706C A0, A1 (pre-production)
45  *   BCM5706S A0, A1 (pre-production)
46  *   BCM5708C A0, B0 (pre-production)
47  *   BCM5708S A0, B0 (pre-production)
48  *   BCM5709C A0  B0, B1, B2 (pre-production)
49  *   BCM5709S A0, A1, B0, B1, B2, C0 (pre-production)
50  */
51 
52 #include "opt_bce.h"
53 
54 #include <dev/bce/if_bcereg.h>
55 #include <dev/bce/if_bcefw.h>
56 
57 /****************************************************************************/
58 /* BCE Debug Options                                                        */
59 /****************************************************************************/
60 #ifdef BCE_DEBUG
61 	u32 bce_debug = BCE_WARN;
62 
63 	/*          0 = Never              */
64 	/*          1 = 1 in 2,147,483,648 */
65 	/*        256 = 1 in     8,388,608 */
66 	/*       2048 = 1 in     1,048,576 */
67 	/*      65536 = 1 in        32,768 */
68 	/*    1048576 = 1 in         2,048 */
69 	/*  268435456 =	1 in             8 */
70 	/*  536870912 = 1 in             4 */
71 	/* 1073741824 = 1 in             2 */
72 
73 	/* Controls how often the l2_fhdr frame error check will fail. */
74 	int bce_debug_l2fhdr_status_check = 0;
75 
76 	/* Controls how often the unexpected attention check will fail. */
77 	int bce_debug_unexpected_attention = 0;
78 
79 	/* Controls how often to simulate an mbuf allocation failure. */
80 	int bce_debug_mbuf_allocation_failure = 0;
81 
82 	/* Controls how often to simulate a DMA mapping failure. */
83 	int bce_debug_dma_map_addr_failure = 0;
84 
85 	/* Controls how often to simulate a bootcode failure. */
86 	int bce_debug_bootcode_running_failure = 0;
87 #endif
88 
89 /****************************************************************************/
90 /* BCE Build Time Options                                                   */
91 /****************************************************************************/
92 /* #define BCE_NVRAM_WRITE_SUPPORT 1 */
93 
94 
95 /****************************************************************************/
96 /* PCI Device ID Table                                                      */
97 /*                                                                          */
98 /* Used by bce_probe() to identify the devices supported by this driver.    */
99 /****************************************************************************/
100 #define BCE_DEVDESC_MAX		64
101 
102 static struct bce_type bce_devs[] = {
103 	/* BCM5706C Controllers and OEM boards. */
104 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3101,
105 		"HP NC370T Multifunction Gigabit Server Adapter" },
106 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3106,
107 		"HP NC370i Multifunction Gigabit Server Adapter" },
108 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3070,
109 		"HP NC380T PCIe DP Multifunc Gig Server Adapter" },
110 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x1709,
111 		"HP NC371i Multifunction Gigabit Server Adapter" },
112 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  PCI_ANY_ID,  PCI_ANY_ID,
113 		"Broadcom NetXtreme II BCM5706 1000Base-T" },
114 
115 	/* BCM5706S controllers and OEM boards. */
116 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102,
117 		"HP NC370F Multifunction Gigabit Server Adapter" },
118 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID,  PCI_ANY_ID,
119 		"Broadcom NetXtreme II BCM5706 1000Base-SX" },
120 
121 	/* BCM5708C controllers and OEM boards. */
122 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  HP_VENDORID, 0x7037,
123 		"HP NC373T PCIe Multifunction Gig Server Adapter" },
124 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  HP_VENDORID, 0x7038,
125 		"HP NC373i Multifunction Gigabit Server Adapter" },
126 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  HP_VENDORID, 0x7045,
127 		"HP NC374m PCIe Multifunction Adapter" },
128 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  PCI_ANY_ID,  PCI_ANY_ID,
129 		"Broadcom NetXtreme II BCM5708 1000Base-T" },
130 
131 	/* BCM5708S controllers and OEM boards. */
132 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  HP_VENDORID, 0x1706,
133 		"HP NC373m Multifunction Gigabit Server Adapter" },
134 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  HP_VENDORID, 0x703b,
135 		"HP NC373i Multifunction Gigabit Server Adapter" },
136 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  HP_VENDORID, 0x703d,
137 		"HP NC373F PCIe Multifunc Giga Server Adapter" },
138 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  PCI_ANY_ID,  PCI_ANY_ID,
139 		"Broadcom NetXtreme II BCM5708 1000Base-SX" },
140 
141 	/* BCM5709C controllers and OEM boards. */
142 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709,  HP_VENDORID, 0x7055,
143 		"HP NC382i DP Multifunction Gigabit Server Adapter" },
144 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709,  HP_VENDORID, 0x7059,
145 		"HP NC382T PCIe DP Multifunction Gigabit Server Adapter" },
146 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709,  PCI_ANY_ID,  PCI_ANY_ID,
147 		"Broadcom NetXtreme II BCM5709 1000Base-T" },
148 
149 	/* BCM5709S controllers and OEM boards. */
150 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709S,  HP_VENDORID, 0x171d,
151 		"HP NC382m DP 1GbE Multifunction BL-c Adapter" },
152 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709S,  HP_VENDORID, 0x7056,
153 		"HP NC382i DP Multifunction Gigabit Server Adapter" },
154 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709S,  PCI_ANY_ID,  PCI_ANY_ID,
155 		"Broadcom NetXtreme II BCM5709 1000Base-SX" },
156 
157 	/* BCM5716 controllers and OEM boards. */
158 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5716,  PCI_ANY_ID,  PCI_ANY_ID,
159 		"Broadcom NetXtreme II BCM5716 1000Base-T" },
160 
161 	{ 0, 0, 0, 0, NULL }
162 };
163 
164 
165 /****************************************************************************/
166 /* Supported Flash NVRAM device data.                                       */
167 /****************************************************************************/
168 static struct flash_spec flash_table[] =
169 {
170 #define BUFFERED_FLAGS		(BCE_NV_BUFFERED | BCE_NV_TRANSLATE)
171 #define NONBUFFERED_FLAGS	(BCE_NV_WREN)
172 
173 	/* Slow EEPROM */
174 	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
175 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
176 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
177 	 "EEPROM - slow"},
178 	/* Expansion entry 0001 */
179 	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
180 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
181 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
182 	 "Entry 0001"},
183 	/* Saifun SA25F010 (non-buffered flash) */
184 	/* strap, cfg1, & write1 need updates */
185 	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
186 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
187 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
188 	 "Non-buffered flash (128kB)"},
189 	/* Saifun SA25F020 (non-buffered flash) */
190 	/* strap, cfg1, & write1 need updates */
191 	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
192 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
193 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
194 	 "Non-buffered flash (256kB)"},
195 	/* Expansion entry 0100 */
196 	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
197 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
198 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
199 	 "Entry 0100"},
200 	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
201 	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
202 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
203 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
204 	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
205 	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
206 	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
207 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
208 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
209 	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
210 	/* Saifun SA25F005 (non-buffered flash) */
211 	/* strap, cfg1, & write1 need updates */
212 	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
213 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
214 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
215 	 "Non-buffered flash (64kB)"},
216 	/* Fast EEPROM */
217 	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
218 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
219 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
220 	 "EEPROM - fast"},
221 	/* Expansion entry 1001 */
222 	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
223 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
225 	 "Entry 1001"},
226 	/* Expansion entry 1010 */
227 	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
228 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
229 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
230 	 "Entry 1010"},
231 	/* ATMEL AT45DB011B (buffered flash) */
232 	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
233 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
234 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
235 	 "Buffered flash (128kB)"},
236 	/* Expansion entry 1100 */
237 	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
238 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
239 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
240 	 "Entry 1100"},
241 	/* Expansion entry 1101 */
242 	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
243 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
244 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
245 	 "Entry 1101"},
246 	/* Ateml Expansion entry 1110 */
247 	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
248 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
249 	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
250 	 "Entry 1110 (Atmel)"},
251 	/* ATMEL AT45DB021B (buffered flash) */
252 	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
253 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
254 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
255 	 "Buffered flash (256kB)"},
256 };
257 
258 /*
259  * The BCM5709 controllers transparently handle the
260  * differences between Atmel 264 byte pages and all
261  * flash devices which use 256 byte pages, so no
262  * logical-to-physical mapping is required in the
263  * driver.
264  */
265 static struct flash_spec flash_5709 = {
266 	.flags		= BCE_NV_BUFFERED,
267 	.page_bits	= BCM5709_FLASH_PAGE_BITS,
268 	.page_size	= BCM5709_FLASH_PAGE_SIZE,
269 	.addr_mask	= BCM5709_FLASH_BYTE_ADDR_MASK,
270 	.total_size	= BUFFERED_FLASH_TOTAL_SIZE * 2,
271 	.name		= "5709/5716 buffered flash (256kB)",
272 };
273 
274 
275 /****************************************************************************/
276 /* FreeBSD device entry points.                                             */
277 /****************************************************************************/
278 static int  bce_probe				(device_t);
279 static int  bce_attach				(device_t);
280 static int  bce_detach				(device_t);
281 static int  bce_shutdown			(device_t);
282 
283 
284 /****************************************************************************/
285 /* BCE Debug Data Structure Dump Routines                                   */
286 /****************************************************************************/
287 #ifdef BCE_DEBUG
288 static u32	bce_reg_rd				(struct bce_softc *, u32);
289 static void	bce_reg_wr				(struct bce_softc *, u32, u32);
290 static void	bce_reg_wr16			(struct bce_softc *, u32, u16);
291 static u32  bce_ctx_rd				(struct bce_softc *, u32, u32);
292 static void bce_dump_enet           (struct bce_softc *, struct mbuf *);
293 static void bce_dump_mbuf 			(struct bce_softc *, struct mbuf *);
294 static void bce_dump_tx_mbuf_chain	(struct bce_softc *, u16, int);
295 static void bce_dump_rx_mbuf_chain	(struct bce_softc *, u16, int);
296 #ifdef ZERO_COPY_SOCKETS
297 static void bce_dump_pg_mbuf_chain	(struct bce_softc *, u16, int);
298 #endif
299 static void bce_dump_txbd			(struct bce_softc *, int, struct tx_bd *);
300 static void bce_dump_rxbd			(struct bce_softc *, int, struct rx_bd *);
301 #ifdef ZERO_COPY_SOCKETS
302 static void bce_dump_pgbd			(struct bce_softc *, int, struct rx_bd *);
303 #endif
304 static void bce_dump_l2fhdr			(struct bce_softc *, int, struct l2_fhdr *);
305 static void bce_dump_ctx			(struct bce_softc *, u16);
306 static void bce_dump_ftqs			(struct bce_softc *);
307 static void bce_dump_tx_chain		(struct bce_softc *, u16, int);
308 static void bce_dump_rx_chain		(struct bce_softc *, u16, int);
309 #ifdef ZERO_COPY_SOCKETS
310 static void bce_dump_pg_chain		(struct bce_softc *, u16, int);
311 #endif
312 static void bce_dump_status_block	(struct bce_softc *);
313 static void bce_dump_stats_block	(struct bce_softc *);
314 static void bce_dump_driver_state	(struct bce_softc *);
315 static void bce_dump_hw_state		(struct bce_softc *);
316 static void bce_dump_mq_regs        (struct bce_softc *);
317 static void bce_dump_bc_state		(struct bce_softc *);
318 static void bce_dump_txp_state		(struct bce_softc *, int);
319 static void bce_dump_rxp_state		(struct bce_softc *, int);
320 static void bce_dump_tpat_state		(struct bce_softc *, int);
321 static void bce_dump_cp_state		(struct bce_softc *, int);
322 static void bce_dump_com_state		(struct bce_softc *, int);
323 static void bce_breakpoint			(struct bce_softc *);
324 #endif
325 
326 
327 /****************************************************************************/
328 /* BCE Register/Memory Access Routines                                      */
329 /****************************************************************************/
330 static u32  bce_reg_rd_ind			(struct bce_softc *, u32);
331 static void bce_reg_wr_ind			(struct bce_softc *, u32, u32);
332 static void bce_ctx_wr				(struct bce_softc *, u32, u32, u32);
333 static int  bce_miibus_read_reg		(device_t, int, int);
334 static int  bce_miibus_write_reg	(device_t, int, int, int);
335 static void bce_miibus_statchg		(device_t);
336 
337 
338 /****************************************************************************/
339 /* BCE NVRAM Access Routines                                                */
340 /****************************************************************************/
341 static int  bce_acquire_nvram_lock	(struct bce_softc *);
342 static int  bce_release_nvram_lock	(struct bce_softc *);
343 static void bce_enable_nvram_access	(struct bce_softc *);
344 static void	bce_disable_nvram_access(struct bce_softc *);
345 static int  bce_nvram_read_dword	(struct bce_softc *, u32, u8 *, u32);
346 static int  bce_init_nvram			(struct bce_softc *);
347 static int  bce_nvram_read			(struct bce_softc *, u32, u8 *, int);
348 static int  bce_nvram_test			(struct bce_softc *);
349 #ifdef BCE_NVRAM_WRITE_SUPPORT
350 static int  bce_enable_nvram_write	(struct bce_softc *);
351 static void bce_disable_nvram_write	(struct bce_softc *);
352 static int  bce_nvram_erase_page	(struct bce_softc *, u32);
353 static int  bce_nvram_write_dword	(struct bce_softc *, u32, u8 *, u32);
354 static int  bce_nvram_write			(struct bce_softc *, u32, u8 *, int);
355 #endif
356 
357 /****************************************************************************/
358 /*                                                                          */
359 /****************************************************************************/
360 static void bce_get_media			(struct bce_softc *);
361 static void bce_dma_map_addr		(void *, bus_dma_segment_t *, int, int);
362 static int  bce_dma_alloc			(device_t);
363 static void bce_dma_free			(struct bce_softc *);
364 static void bce_release_resources	(struct bce_softc *);
365 
366 /****************************************************************************/
367 /* BCE Firmware Synchronization and Load                                    */
368 /****************************************************************************/
369 static int  bce_fw_sync				(struct bce_softc *, u32);
370 static void bce_load_rv2p_fw		(struct bce_softc *, u32 *, u32, u32);
371 static void bce_load_cpu_fw			(struct bce_softc *, struct cpu_reg *, struct fw_info *);
372 static void bce_init_rxp_cpu		(struct bce_softc *);
373 static void bce_init_txp_cpu 		(struct bce_softc *);
374 static void bce_init_tpat_cpu		(struct bce_softc *);
375 static void bce_init_cp_cpu		  	(struct bce_softc *);
376 static void bce_init_com_cpu	  	(struct bce_softc *);
377 static void bce_init_cpus			(struct bce_softc *);
378 
379 static void	bce_print_adapter_info	(struct bce_softc *);
380 static void bce_probe_pci_caps		(device_t, struct bce_softc *);
381 static void bce_stop				(struct bce_softc *);
382 static int  bce_reset				(struct bce_softc *, u32);
383 static int  bce_chipinit 			(struct bce_softc *);
384 static int  bce_blockinit 			(struct bce_softc *);
385 
386 static int  bce_init_tx_chain		(struct bce_softc *);
387 static void bce_free_tx_chain		(struct bce_softc *);
388 
389 static int  bce_get_rx_buf			(struct bce_softc *, struct mbuf *, u16 *, u16 *, u32 *);
390 static int  bce_init_rx_chain		(struct bce_softc *);
391 static void bce_fill_rx_chain		(struct bce_softc *);
392 static void bce_free_rx_chain		(struct bce_softc *);
393 
394 #ifdef ZERO_COPY_SOCKETS
395 static int  bce_get_pg_buf			(struct bce_softc *, struct mbuf *, u16 *, u16 *);
396 static int  bce_init_pg_chain		(struct bce_softc *);
397 static void bce_fill_pg_chain		(struct bce_softc *);
398 static void bce_free_pg_chain		(struct bce_softc *);
399 #endif
400 
401 static int  bce_tx_encap			(struct bce_softc *, struct mbuf **);
402 static void bce_start_locked		(struct ifnet *);
403 static void bce_start				(struct ifnet *);
404 static int  bce_ioctl				(struct ifnet *, u_long, caddr_t);
405 static void bce_watchdog			(struct bce_softc *);
406 static int  bce_ifmedia_upd			(struct ifnet *);
407 static void bce_ifmedia_upd_locked	(struct ifnet *);
408 static void bce_ifmedia_sts			(struct ifnet *, struct ifmediareq *);
409 static void bce_init_locked			(struct bce_softc *);
410 static void bce_init				(void *);
411 static void bce_mgmt_init_locked	(struct bce_softc *sc);
412 
413 static void bce_init_ctx			(struct bce_softc *);
414 static void bce_get_mac_addr		(struct bce_softc *);
415 static void bce_set_mac_addr		(struct bce_softc *);
416 static void bce_phy_intr			(struct bce_softc *);
417 static inline u16 bce_get_hw_rx_cons(struct bce_softc *);
418 static void bce_rx_intr				(struct bce_softc *);
419 static void bce_tx_intr				(struct bce_softc *);
420 static void bce_disable_intr		(struct bce_softc *);
421 static void bce_enable_intr			(struct bce_softc *, int);
422 
423 static void bce_intr				(void *);
424 static void bce_set_rx_mode			(struct bce_softc *);
425 static void bce_stats_update		(struct bce_softc *);
426 static void bce_tick				(void *);
427 static void bce_pulse				(void *);
428 static void bce_add_sysctls			(struct bce_softc *);
429 
430 
431 /****************************************************************************/
432 /* FreeBSD device dispatch table.                                           */
433 /****************************************************************************/
434 static device_method_t bce_methods[] = {
435 	/* Device interface (device_if.h) */
436 	DEVMETHOD(device_probe,		bce_probe),
437 	DEVMETHOD(device_attach,	bce_attach),
438 	DEVMETHOD(device_detach,	bce_detach),
439 	DEVMETHOD(device_shutdown,	bce_shutdown),
440 /* Supported by device interface but not used here. */
441 /*	DEVMETHOD(device_identify,	bce_identify),      */
442 /*	DEVMETHOD(device_suspend,	bce_suspend),       */
443 /*	DEVMETHOD(device_resume,	bce_resume),        */
444 /*	DEVMETHOD(device_quiesce,	bce_quiesce),       */
445 
446 	/* Bus interface (bus_if.h) */
447 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
448 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
449 
450 	/* MII interface (miibus_if.h) */
451 	DEVMETHOD(miibus_readreg,	bce_miibus_read_reg),
452 	DEVMETHOD(miibus_writereg,	bce_miibus_write_reg),
453 	DEVMETHOD(miibus_statchg,	bce_miibus_statchg),
454 /* Supported by MII interface but not used here.       */
455 /*	DEVMETHOD(miibus_linkchg,	bce_miibus_linkchg),   */
456 /*	DEVMETHOD(miibus_mediainit,	bce_miibus_mediainit), */
457 
458 	{ 0, 0 }
459 };
460 
461 static driver_t bce_driver = {
462 	"bce",
463 	bce_methods,
464 	sizeof(struct bce_softc)
465 };
466 
467 static devclass_t bce_devclass;
468 
469 MODULE_DEPEND(bce, pci, 1, 1, 1);
470 MODULE_DEPEND(bce, ether, 1, 1, 1);
471 MODULE_DEPEND(bce, miibus, 1, 1, 1);
472 
473 DRIVER_MODULE(bce, pci, bce_driver, bce_devclass, 0, 0);
474 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, 0, 0);
475 
476 
477 /****************************************************************************/
478 /* Tunable device values                                                    */
479 /****************************************************************************/
480 SYSCTL_NODE(_hw, OID_AUTO, bce, CTLFLAG_RD, 0, "bce driver parameters");
481 
482 /* Allowable values are TRUE or FALSE */
483 static int bce_tso_enable = TRUE;
484 TUNABLE_INT("hw.bce.tso_enable", &bce_tso_enable);
485 SYSCTL_UINT(_hw_bce, OID_AUTO, tso_enable, CTLFLAG_RDTUN, &bce_tso_enable, 0,
486 "TSO Enable/Disable");
487 
488 /* Allowable values are 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */
489 /* ToDo: Add MSI-X support. */
490 static int bce_msi_enable = 1;
491 TUNABLE_INT("hw.bce.msi_enable", &bce_msi_enable);
492 SYSCTL_UINT(_hw_bce, OID_AUTO, msi_enable, CTLFLAG_RDTUN, &bce_msi_enable, 0,
493 "MSI-X|MSI|INTx selector");
494 
495 /* ToDo: Add tunable to enable/disable strict MTU handling. */
496 /* Currently allows "loose" RX MTU checking (i.e. sets the  */
497 /* H/W RX MTU to the size of the largest receive buffer, or */
498 /* 2048 bytes).                                             */
499 
500 
501 /****************************************************************************/
502 /* Device probe function.                                                   */
503 /*                                                                          */
504 /* Compares the device to the driver's list of supported devices and        */
505 /* reports back to the OS whether this is the right driver for the device.  */
506 /*                                                                          */
507 /* Returns:                                                                 */
508 /*   BUS_PROBE_DEFAULT on success, positive value on failure.               */
509 /****************************************************************************/
510 static int
511 bce_probe(device_t dev)
512 {
513 	struct bce_type *t;
514 	struct bce_softc *sc;
515 	char *descbuf;
516 	u16 vid = 0, did = 0, svid = 0, sdid = 0;
517 
518 	t = bce_devs;
519 
520 	sc = device_get_softc(dev);
521 	bzero(sc, sizeof(struct bce_softc));
522 	sc->bce_unit = device_get_unit(dev);
523 	sc->bce_dev = dev;
524 
525 	/* Get the data for the device to be probed. */
526 	vid  = pci_get_vendor(dev);
527 	did  = pci_get_device(dev);
528 	svid = pci_get_subvendor(dev);
529 	sdid = pci_get_subdevice(dev);
530 
531 	DBPRINT(sc, BCE_EXTREME_LOAD,
532 		"%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, "
533 		"SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid);
534 
535 	/* Look through the list of known devices for a match. */
536 	while(t->bce_name != NULL) {
537 
538 		if ((vid == t->bce_vid) && (did == t->bce_did) &&
539 			((svid == t->bce_svid) || (t->bce_svid == PCI_ANY_ID)) &&
540 			((sdid == t->bce_sdid) || (t->bce_sdid == PCI_ANY_ID))) {
541 
542 			descbuf = malloc(BCE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
543 
544 			if (descbuf == NULL)
545 				return(ENOMEM);
546 
547 			/* Print out the device identity. */
548 			snprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)",
549 				t->bce_name,
550 			    (((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'),
551 			    (pci_read_config(dev, PCIR_REVID, 4) & 0xf));
552 
553 			device_set_desc_copy(dev, descbuf);
554 			free(descbuf, M_TEMP);
555 			return(BUS_PROBE_DEFAULT);
556 		}
557 		t++;
558 	}
559 
560 	return(ENXIO);
561 }
562 
563 
564 /****************************************************************************/
565 /* PCI Capabilities Probe Function.                                         */
566 /*                                                                          */
567 /* Walks the PCI capabiites list for the device to find what features are   */
568 /* supported.                                                               */
569 /*                                                                          */
570 /* Returns:                                                                 */
571 /*   None.                                                                  */
572 /****************************************************************************/
573 static void
574 bce_print_adapter_info(struct bce_softc *sc)
575 {
576 	DBENTER(BCE_VERBOSE_LOAD);
577 
578 	BCE_PRINTF("ASIC (0x%08X); ", sc->bce_chipid);
579 	printf("Rev (%c%d); ", ((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A',
580 		((BCE_CHIP_ID(sc) & 0x0ff0) >> 4));
581 
582 	/* Bus info. */
583 	if (sc->bce_flags & BCE_PCIE_FLAG) {
584 		printf("Bus (PCIe x%d, ", sc->link_width);
585 		switch (sc->link_speed) {
586 			case 1: printf("2.5Gbps); "); break;
587 			case 2:	printf("5Gbps); "); break;
588 			default: printf("Unknown link speed); ");
589 		}
590 	} else {
591 		printf("Bus (PCI%s, %s, %dMHz); ",
592 			((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""),
593 			((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
594 			sc->bus_speed_mhz);
595 	}
596 
597 	/* Firmware version and device features. */
598 	printf("F/W (0x%08X); Flags( ", sc->bce_fw_ver);
599 #ifdef ZERO_COPY_SOCKETS
600 	printf("SPLT ");
601 #endif
602 	if (sc->bce_flags & BCE_MFW_ENABLE_FLAG)
603 		printf("MFW ");
604 	if (sc->bce_flags & BCE_USING_MSI_FLAG)
605 		printf("MSI ");
606 	if (sc->bce_flags & BCE_USING_MSIX_FLAG)
607 		printf("MSI-X ");
608 	if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
609 		printf("2.5G ");
610 	printf(")\n");
611 
612 	DBEXIT(BCE_VERBOSE_LOAD);
613 }
614 
615 
616 /****************************************************************************/
617 /* PCI Capabilities Probe Function.                                         */
618 /*                                                                          */
619 /* Walks the PCI capabiites list for the device to find what features are   */
620 /* supported.                                                               */
621 /*                                                                          */
622 /* Returns:                                                                 */
623 /*   None.                                                                  */
624 /****************************************************************************/
625 static void
626 bce_probe_pci_caps(device_t dev, struct bce_softc *sc)
627 {
628 	u32 reg;
629 
630 	DBENTER(BCE_VERBOSE_LOAD);
631 
632 	/* Check if PCI-X capability is enabled. */
633 	if (pci_find_extcap(dev, PCIY_PCIX, &reg) == 0) {
634 		if (reg != 0)
635 			sc->bce_cap_flags |= BCE_PCIX_CAPABLE_FLAG;
636 	}
637 
638 	/* Check if PCIe capability is enabled. */
639 	if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
640 		if (reg != 0) {
641 			u16 link_status = pci_read_config(dev, reg + 0x12, 2);
642 			DBPRINT(sc, BCE_INFO_LOAD, "PCIe link_status = 0x%08X\n",
643 				link_status);
644 			sc->link_speed = link_status & 0xf;
645 			sc->link_width = (link_status >> 4) & 0x3f;
646 			sc->bce_cap_flags |= BCE_PCIE_CAPABLE_FLAG;
647 			sc->bce_flags |= BCE_PCIE_FLAG;
648 		}
649 	}
650 
651 	/* Check if MSI capability is enabled. */
652 	if (pci_find_extcap(dev, PCIY_MSI, &reg) == 0) {
653 		if (reg != 0)
654 			sc->bce_cap_flags |= BCE_MSI_CAPABLE_FLAG;
655 	}
656 
657 	/* Check if MSI-X capability is enabled. */
658 	if (pci_find_extcap(dev, PCIY_MSIX, &reg) == 0) {
659 		if (reg != 0)
660 			sc->bce_cap_flags |= BCE_MSIX_CAPABLE_FLAG;
661 	}
662 
663 	DBEXIT(BCE_VERBOSE_LOAD);
664 }
665 
666 
667 /****************************************************************************/
668 /* Device attach function.                                                  */
669 /*                                                                          */
670 /* Allocates device resources, performs secondary chip identification,      */
671 /* resets and initializes the hardware, and initializes driver instance     */
672 /* variables.                                                               */
673 /*                                                                          */
674 /* Returns:                                                                 */
675 /*   0 on success, positive value on failure.                               */
676 /****************************************************************************/
677 static int
678 bce_attach(device_t dev)
679 {
680 	struct bce_softc *sc;
681 	struct ifnet *ifp;
682 	u32 val;
683 	int error, rid, rc = 0;
684 
685 	sc = device_get_softc(dev);
686 	sc->bce_dev = dev;
687 
688 	DBENTER(BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET);
689 
690 	sc->bce_unit = device_get_unit(dev);
691 
692 	/* Set initial device and PHY flags */
693 	sc->bce_flags = 0;
694 	sc->bce_phy_flags = 0;
695 
696 	pci_enable_busmaster(dev);
697 
698 	/* Allocate PCI memory resources. */
699 	rid = PCIR_BAR(0);
700 	sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
701 		&rid, RF_ACTIVE);
702 
703 	if (sc->bce_res_mem == NULL) {
704 		BCE_PRINTF("%s(%d): PCI memory allocation failed\n",
705 			__FILE__, __LINE__);
706 		rc = ENXIO;
707 		goto bce_attach_fail;
708 	}
709 
710 	/* Get various resource handles. */
711 	sc->bce_btag    = rman_get_bustag(sc->bce_res_mem);
712 	sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem);
713 	sc->bce_vhandle = (vm_offset_t) rman_get_virtual(sc->bce_res_mem);
714 
715 	bce_probe_pci_caps(dev, sc);
716 
717 	rid = 1;
718 #if 0
719 	/* Try allocating MSI-X interrupts. */
720 	if ((sc->bce_cap_flags & BCE_MSIX_CAPABLE_FLAG) &&
721 		(bce_msi_enable >= 2) &&
722 		((sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
723 		&rid, RF_ACTIVE)) != NULL)) {
724 
725 		msi_needed = sc->bce_msi_count = 1;
726 
727 		if (((error = pci_alloc_msix(dev, &sc->bce_msi_count)) != 0) ||
728 			(sc->bce_msi_count != msi_needed)) {
729 			BCE_PRINTF("%s(%d): MSI-X allocation failed! Requested = %d,"
730 				"Received = %d, error = %d\n", __FILE__, __LINE__,
731 				msi_needed, sc->bce_msi_count, error);
732 			sc->bce_msi_count = 0;
733 			pci_release_msi(dev);
734 			bus_release_resource(dev, SYS_RES_MEMORY, rid,
735 				sc->bce_res_irq);
736 			sc->bce_res_irq = NULL;
737 		} else {
738 			DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using MSI-X interrupt.\n",
739 				__FUNCTION__);
740 			sc->bce_flags |= BCE_USING_MSIX_FLAG;
741 			sc->bce_intr = bce_intr;
742 		}
743 	}
744 #endif
745 
746 	/* Try allocating a MSI interrupt. */
747 	if ((sc->bce_cap_flags & BCE_MSI_CAPABLE_FLAG) &&
748 		(bce_msi_enable >= 1) && (sc->bce_msi_count == 0)) {
749 		sc->bce_msi_count = 1;
750 		if ((error = pci_alloc_msi(dev, &sc->bce_msi_count)) != 0) {
751 			BCE_PRINTF("%s(%d): MSI allocation failed! error = %d\n",
752 				__FILE__, __LINE__, error);
753 			sc->bce_msi_count = 0;
754 			pci_release_msi(dev);
755 		} else {
756 			DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using MSI interrupt.\n",
757 				__FUNCTION__);
758 			sc->bce_flags |= BCE_USING_MSI_FLAG;
759 			if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
760 				(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716))
761 				sc->bce_flags |= BCE_ONE_SHOT_MSI_FLAG;
762 			sc->bce_irq_rid = 1;
763 			sc->bce_intr = bce_intr;
764 		}
765 	}
766 
767 	/* Try allocating a legacy interrupt. */
768 	if (sc->bce_msi_count == 0) {
769 		DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using INTx interrupt.\n",
770 			__FUNCTION__);
771 		rid = 0;
772 		sc->bce_intr = bce_intr;
773 	}
774 
775 	sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
776 		&rid, RF_SHAREABLE | RF_ACTIVE);
777 
778 	sc->bce_irq_rid = rid;
779 
780 	/* Report any IRQ allocation errors. */
781 	if (sc->bce_res_irq == NULL) {
782 		BCE_PRINTF("%s(%d): PCI map interrupt failed!\n",
783 			__FILE__, __LINE__);
784 		rc = ENXIO;
785 		goto bce_attach_fail;
786 	}
787 
788 	/* Initialize mutex for the current device instance. */
789 	BCE_LOCK_INIT(sc, device_get_nameunit(dev));
790 
791 	/*
792 	 * Configure byte swap and enable indirect register access.
793 	 * Rely on CPU to do target byte swapping on big endian systems.
794 	 * Access to registers outside of PCI configurtion space are not
795 	 * valid until this is done.
796 	 */
797 	pci_write_config(dev, BCE_PCICFG_MISC_CONFIG,
798 			       BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
799 			       BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4);
800 
801 	/* Save ASIC revsion info. */
802 	sc->bce_chipid =  REG_RD(sc, BCE_MISC_ID);
803 
804 	/* Weed out any non-production controller revisions. */
805 	switch(BCE_CHIP_ID(sc)) {
806 		case BCE_CHIP_ID_5706_A0:
807 		case BCE_CHIP_ID_5706_A1:
808 		case BCE_CHIP_ID_5708_A0:
809 		case BCE_CHIP_ID_5708_B0:
810 		case BCE_CHIP_ID_5709_A0:
811 		case BCE_CHIP_ID_5709_B0:
812 		case BCE_CHIP_ID_5709_B1:
813 		case BCE_CHIP_ID_5709_B2:
814 			BCE_PRINTF("%s(%d): Unsupported controller revision (%c%d)!\n",
815 				__FILE__, __LINE__,
816 				(((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'),
817 			    (pci_read_config(dev, PCIR_REVID, 4) & 0xf));
818 			rc = ENODEV;
819 			goto bce_attach_fail;
820 	}
821 
822 	/*
823 	 * The embedded PCIe to PCI-X bridge (EPB)
824 	 * in the 5708 cannot address memory above
825 	 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043).
826 	 */
827 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)
828 		sc->max_bus_addr = BCE_BUS_SPACE_MAXADDR;
829 	else
830 		sc->max_bus_addr = BUS_SPACE_MAXADDR;
831 
832 	/*
833 	 * Find the base address for shared memory access.
834 	 * Newer versions of bootcode use a signature and offset
835 	 * while older versions use a fixed address.
836 	 */
837 	val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE);
838 	if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == BCE_SHM_HDR_SIGNATURE_SIG)
839 		/* Multi-port devices use different offsets in shared memory. */
840 		sc->bce_shmem_base = REG_RD_IND(sc, BCE_SHM_HDR_ADDR_0 +
841 			(pci_get_function(sc->bce_dev) << 2));
842 	else
843 		sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE;
844 
845 	DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "%s(): bce_shmem_base = 0x%08X\n",
846 		__FUNCTION__, sc->bce_shmem_base);
847 
848 	/* Fetch the bootcode revision. */
849 	sc->bce_fw_ver = REG_RD_IND(sc, sc->bce_shmem_base +
850 		BCE_DEV_INFO_BC_REV);
851 
852 	/* Check if any management firmware is running. */
853 	val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_PORT_FEATURE);
854 	if (val & (BCE_PORT_FEATURE_ASF_ENABLED | BCE_PORT_FEATURE_IMD_ENABLED))
855 		sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
856 
857 	/* Get PCI bus information (speed and type). */
858 	val = REG_RD(sc, BCE_PCICFG_MISC_STATUS);
859 	if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) {
860 		u32 clkreg;
861 
862 		sc->bce_flags |= BCE_PCIX_FLAG;
863 
864 		clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS);
865 
866 		clkreg &= BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
867 		switch (clkreg) {
868 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
869 			sc->bus_speed_mhz = 133;
870 			break;
871 
872 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
873 			sc->bus_speed_mhz = 100;
874 			break;
875 
876 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
877 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
878 			sc->bus_speed_mhz = 66;
879 			break;
880 
881 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
882 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
883 			sc->bus_speed_mhz = 50;
884 			break;
885 
886 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
887 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
888 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
889 			sc->bus_speed_mhz = 33;
890 			break;
891 		}
892 	} else {
893 		if (val & BCE_PCICFG_MISC_STATUS_M66EN)
894 			sc->bus_speed_mhz = 66;
895 		else
896 			sc->bus_speed_mhz = 33;
897 	}
898 
899 	if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET)
900 		sc->bce_flags |= BCE_PCI_32BIT_FLAG;
901 
902 	/* Reset the controller and announce to bootcode that driver is present. */
903 	if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) {
904 		BCE_PRINTF("%s(%d): Controller reset failed!\n",
905 			__FILE__, __LINE__);
906 		rc = ENXIO;
907 		goto bce_attach_fail;
908 	}
909 
910 	/* Initialize the controller. */
911 	if (bce_chipinit(sc)) {
912 		BCE_PRINTF("%s(%d): Controller initialization failed!\n",
913 			__FILE__, __LINE__);
914 		rc = ENXIO;
915 		goto bce_attach_fail;
916 	}
917 
918 	/* Perform NVRAM test. */
919 	if (bce_nvram_test(sc)) {
920 		BCE_PRINTF("%s(%d): NVRAM test failed!\n",
921 			__FILE__, __LINE__);
922 		rc = ENXIO;
923 		goto bce_attach_fail;
924 	}
925 
926 	/* Fetch the permanent Ethernet MAC address. */
927 	bce_get_mac_addr(sc);
928 
929 	/*
930 	 * Trip points control how many BDs
931 	 * should be ready before generating an
932 	 * interrupt while ticks control how long
933 	 * a BD can sit in the chain before
934 	 * generating an interrupt.  Set the default
935 	 * values for the RX and TX chains.
936 	 */
937 
938 #ifdef BCE_DEBUG
939 	/* Force more frequent interrupts. */
940 	sc->bce_tx_quick_cons_trip_int = 1;
941 	sc->bce_tx_quick_cons_trip     = 1;
942 	sc->bce_tx_ticks_int           = 0;
943 	sc->bce_tx_ticks               = 0;
944 
945 	sc->bce_rx_quick_cons_trip_int = 1;
946 	sc->bce_rx_quick_cons_trip     = 1;
947 	sc->bce_rx_ticks_int           = 0;
948 	sc->bce_rx_ticks               = 0;
949 #else
950 	/* Improve throughput at the expense of increased latency. */
951 	sc->bce_tx_quick_cons_trip_int = 20;
952 	sc->bce_tx_quick_cons_trip     = 20;
953 	sc->bce_tx_ticks_int           = 80;
954 	sc->bce_tx_ticks               = 80;
955 
956 	sc->bce_rx_quick_cons_trip_int = 6;
957 	sc->bce_rx_quick_cons_trip     = 6;
958 	sc->bce_rx_ticks_int           = 18;
959 	sc->bce_rx_ticks               = 18;
960 #endif
961 
962 	/* Update statistics once every second. */
963 	sc->bce_stats_ticks = 1000000 & 0xffff00;
964 
965 	/* Find the media type for the adapter. */
966 	bce_get_media(sc);
967 
968 	/* Store data needed by PHY driver for backplane applications */
969 	sc->bce_shared_hw_cfg = REG_RD_IND(sc, sc->bce_shmem_base +
970 		BCE_SHARED_HW_CFG_CONFIG);
971 	sc->bce_port_hw_cfg   = REG_RD_IND(sc, sc->bce_shmem_base +
972 		BCE_PORT_HW_CFG_CONFIG);
973 
974 	/* Allocate DMA memory resources. */
975 	if (bce_dma_alloc(dev)) {
976 		BCE_PRINTF("%s(%d): DMA resource allocation failed!\n",
977 		    __FILE__, __LINE__);
978 		rc = ENXIO;
979 		goto bce_attach_fail;
980 	}
981 
982 	/* Allocate an ifnet structure. */
983 	ifp = sc->bce_ifp = if_alloc(IFT_ETHER);
984 	if (ifp == NULL) {
985 		BCE_PRINTF("%s(%d): Interface allocation failed!\n",
986 			__FILE__, __LINE__);
987 		rc = ENXIO;
988 		goto bce_attach_fail;
989 	}
990 
991 	/* Initialize the ifnet interface. */
992 	ifp->if_softc        = sc;
993 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
994 	ifp->if_flags        = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
995 	ifp->if_ioctl        = bce_ioctl;
996 	ifp->if_start        = bce_start;
997 	ifp->if_init         = bce_init;
998 	ifp->if_mtu          = ETHERMTU;
999 
1000 	if (bce_tso_enable) {
1001 		ifp->if_hwassist = BCE_IF_HWASSIST | CSUM_TSO;
1002 		ifp->if_capabilities = BCE_IF_CAPABILITIES | IFCAP_TSO4;
1003 	} else {
1004 		ifp->if_hwassist = BCE_IF_HWASSIST;
1005 		ifp->if_capabilities = BCE_IF_CAPABILITIES;
1006 	}
1007 
1008 	ifp->if_capenable    = ifp->if_capabilities;
1009 
1010 	/*
1011 	 * Assume standard mbuf sizes for buffer allocation.
1012 	 * This may change later if the MTU size is set to
1013 	 * something other than 1500.
1014 	 */
1015 #ifdef ZERO_COPY_SOCKETS
1016 	sc->rx_bd_mbuf_alloc_size = MHLEN;
1017 	/* Make sure offset is 16 byte aligned for hardware. */
1018 	sc->rx_bd_mbuf_align_pad  = roundup2((MSIZE - MHLEN), 16) -
1019 		(MSIZE - MHLEN);
1020 	sc->rx_bd_mbuf_data_len   = sc->rx_bd_mbuf_alloc_size -
1021 		sc->rx_bd_mbuf_align_pad;
1022 	sc->pg_bd_mbuf_alloc_size = MCLBYTES;
1023 #else
1024 	sc->rx_bd_mbuf_alloc_size = MCLBYTES;
1025 	sc->rx_bd_mbuf_align_pad  = roundup2(MCLBYTES, 16) - MCLBYTES;
1026 	sc->rx_bd_mbuf_data_len   = sc->rx_bd_mbuf_alloc_size -
1027 		sc->rx_bd_mbuf_align_pad;
1028 #endif
1029 
1030 	ifp->if_snd.ifq_drv_maxlen = USABLE_TX_BD;
1031 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
1032 	IFQ_SET_READY(&ifp->if_snd);
1033 
1034 	if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
1035 		ifp->if_baudrate = IF_Mbps(2500ULL);
1036 	else
1037 		ifp->if_baudrate = IF_Mbps(1000);
1038 
1039 	/* Check for an MII child bus by probing the PHY. */
1040 	if (mii_phy_probe(dev, &sc->bce_miibus, bce_ifmedia_upd,
1041 		bce_ifmedia_sts)) {
1042 		BCE_PRINTF("%s(%d): No PHY found on child MII bus!\n",
1043 			__FILE__, __LINE__);
1044 		rc = ENXIO;
1045 		goto bce_attach_fail;
1046 	}
1047 
1048 	/* Attach to the Ethernet interface list. */
1049 	ether_ifattach(ifp, sc->eaddr);
1050 
1051 #if __FreeBSD_version < 500000
1052 	callout_init(&sc->bce_tick_callout);
1053 	callout_init(&sc->bce_pulse_callout);
1054 #else
1055 	callout_init_mtx(&sc->bce_tick_callout, &sc->bce_mtx, 0);
1056 	callout_init_mtx(&sc->bce_pulse_callout, &sc->bce_mtx, 0);
1057 #endif
1058 
1059 	/* Hookup IRQ last. */
1060 	rc = bus_setup_intr(dev, sc->bce_res_irq, INTR_TYPE_NET | INTR_MPSAFE,
1061 		NULL, bce_intr, sc, &sc->bce_intrhand);
1062 
1063 	if (rc) {
1064 		BCE_PRINTF("%s(%d): Failed to setup IRQ!\n",
1065 			__FILE__, __LINE__);
1066 		bce_detach(dev);
1067 		goto bce_attach_exit;
1068 	}
1069 
1070 	/*
1071 	 * At this point we've acquired all the resources
1072 	 * we need to run so there's no turning back, we're
1073 	 * cleared for launch.
1074 	 */
1075 
1076 	/* Print some important debugging info. */
1077 	DBRUNMSG(BCE_INFO, bce_dump_driver_state(sc));
1078 
1079 	/* Add the supported sysctls to the kernel. */
1080 	bce_add_sysctls(sc);
1081 
1082 	BCE_LOCK(sc);
1083 
1084 	/*
1085 	 * The chip reset earlier notified the bootcode that
1086 	 * a driver is present.  We now need to start our pulse
1087 	 * routine so that the bootcode is reminded that we're
1088 	 * still running.
1089 	 */
1090 	bce_pulse(sc);
1091 
1092 	bce_mgmt_init_locked(sc);
1093 	BCE_UNLOCK(sc);
1094 
1095 	/* Finally, print some useful adapter info */
1096 	bce_print_adapter_info(sc);
1097 	DBPRINT(sc, BCE_FATAL, "%s(): sc = %p\n",
1098 		__FUNCTION__, sc);
1099 
1100 	goto bce_attach_exit;
1101 
1102 bce_attach_fail:
1103 	bce_release_resources(sc);
1104 
1105 bce_attach_exit:
1106 
1107 	DBEXIT(BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET);
1108 
1109 	return(rc);
1110 }
1111 
1112 
1113 /****************************************************************************/
1114 /* Device detach function.                                                  */
1115 /*                                                                          */
1116 /* Stops the controller, resets the controller, and releases resources.     */
1117 /*                                                                          */
1118 /* Returns:                                                                 */
1119 /*   0 on success, positive value on failure.                               */
1120 /****************************************************************************/
1121 static int
1122 bce_detach(device_t dev)
1123 {
1124 	struct bce_softc *sc = device_get_softc(dev);
1125 	struct ifnet *ifp;
1126 	u32 msg;
1127 
1128 	DBENTER(BCE_VERBOSE_UNLOAD | BCE_VERBOSE_RESET);
1129 
1130 	ifp = sc->bce_ifp;
1131 
1132 	/* Stop and reset the controller. */
1133 	BCE_LOCK(sc);
1134 
1135 	/* Stop the pulse so the bootcode can go to driver absent state. */
1136 	callout_stop(&sc->bce_pulse_callout);
1137 
1138 	bce_stop(sc);
1139 	if (sc->bce_flags & BCE_NO_WOL_FLAG)
1140 		msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
1141 	else
1142 		msg = BCE_DRV_MSG_CODE_UNLOAD;
1143 	bce_reset(sc, msg);
1144 
1145 	BCE_UNLOCK(sc);
1146 
1147 	ether_ifdetach(ifp);
1148 
1149 	/* If we have a child device on the MII bus remove it too. */
1150 	bus_generic_detach(dev);
1151 	device_delete_child(dev, sc->bce_miibus);
1152 
1153 	/* Release all remaining resources. */
1154 	bce_release_resources(sc);
1155 
1156 	DBEXIT(BCE_VERBOSE_UNLOAD | BCE_VERBOSE_RESET);
1157 
1158 	return(0);
1159 }
1160 
1161 
1162 /****************************************************************************/
1163 /* Device shutdown function.                                                */
1164 /*                                                                          */
1165 /* Stops and resets the controller.                                         */
1166 /*                                                                          */
1167 /* Returns:                                                                 */
1168 /*   0 on success, positive value on failure.                               */
1169 /****************************************************************************/
1170 static int
1171 bce_shutdown(device_t dev)
1172 {
1173 	struct bce_softc *sc = device_get_softc(dev);
1174 	u32 msg;
1175 
1176 	DBENTER(BCE_VERBOSE);
1177 
1178 	BCE_LOCK(sc);
1179 	bce_stop(sc);
1180 	if (sc->bce_flags & BCE_NO_WOL_FLAG)
1181 		msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
1182 	else
1183 		msg = BCE_DRV_MSG_CODE_UNLOAD;
1184 	bce_reset(sc, msg);
1185 	BCE_UNLOCK(sc);
1186 
1187 	DBEXIT(BCE_VERBOSE);
1188 
1189 	return (0);
1190 }
1191 
1192 
1193 #ifdef BCE_DEBUG
1194 /****************************************************************************/
1195 /* Register read.                                                           */
1196 /*                                                                          */
1197 /* Returns:                                                                 */
1198 /*   The value of the register.                                             */
1199 /****************************************************************************/
1200 static u32
1201 bce_reg_rd(struct bce_softc *sc, u32 offset)
1202 {
1203 	u32 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, offset);
1204 	DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n",
1205 		__FUNCTION__, offset, val);
1206 	return val;
1207 }
1208 
1209 
1210 /****************************************************************************/
1211 /* Register write (16 bit).                                                 */
1212 /*                                                                          */
1213 /* Returns:                                                                 */
1214 /*   Nothing.                                                               */
1215 /****************************************************************************/
1216 static void
1217 bce_reg_wr16(struct bce_softc *sc, u32 offset, u16 val)
1218 {
1219 	DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%04X\n",
1220 		__FUNCTION__, offset, val);
1221 	bus_space_write_2(sc->bce_btag, sc->bce_bhandle, offset, val);
1222 }
1223 
1224 
1225 /****************************************************************************/
1226 /* Register write.                                                          */
1227 /*                                                                          */
1228 /* Returns:                                                                 */
1229 /*   Nothing.                                                               */
1230 /****************************************************************************/
1231 static void
1232 bce_reg_wr(struct bce_softc *sc, u32 offset, u32 val)
1233 {
1234 	DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n",
1235 		__FUNCTION__, offset, val);
1236 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, offset, val);
1237 }
1238 #endif
1239 
1240 /****************************************************************************/
1241 /* Indirect register read.                                                  */
1242 /*                                                                          */
1243 /* Reads NetXtreme II registers using an index/data register pair in PCI    */
1244 /* configuration space.  Using this mechanism avoids issues with posted     */
1245 /* reads but is much slower than memory-mapped I/O.                         */
1246 /*                                                                          */
1247 /* Returns:                                                                 */
1248 /*   The value of the register.                                             */
1249 /****************************************************************************/
1250 static u32
1251 bce_reg_rd_ind(struct bce_softc *sc, u32 offset)
1252 {
1253 	device_t dev;
1254 	dev = sc->bce_dev;
1255 
1256 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
1257 #ifdef BCE_DEBUG
1258 	{
1259 		u32 val;
1260 		val = pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
1261 		DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n",
1262 			__FUNCTION__, offset, val);
1263 		return val;
1264 	}
1265 #else
1266 	return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
1267 #endif
1268 }
1269 
1270 
1271 /****************************************************************************/
1272 /* Indirect register write.                                                 */
1273 /*                                                                          */
1274 /* Writes NetXtreme II registers using an index/data register pair in PCI   */
1275 /* configuration space.  Using this mechanism avoids issues with posted     */
1276 /* writes but is muchh slower than memory-mapped I/O.                       */
1277 /*                                                                          */
1278 /* Returns:                                                                 */
1279 /*   Nothing.                                                               */
1280 /****************************************************************************/
1281 static void
1282 bce_reg_wr_ind(struct bce_softc *sc, u32 offset, u32 val)
1283 {
1284 	device_t dev;
1285 	dev = sc->bce_dev;
1286 
1287 	DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n",
1288 		__FUNCTION__, offset, val);
1289 
1290 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
1291 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4);
1292 }
1293 
1294 
1295 #ifdef BCE_DEBUG
1296 /****************************************************************************/
1297 /* Context memory read.                                                     */
1298 /*                                                                          */
1299 /* The NetXtreme II controller uses context memory to track connection      */
1300 /* information for L2 and higher network protocols.                         */
1301 /*                                                                          */
1302 /* Returns:                                                                 */
1303 /*   The requested 32 bit value of context memory.                          */
1304 /****************************************************************************/
1305 static u32
1306 bce_ctx_rd(struct bce_softc *sc, u32 cid_addr, u32 ctx_offset)
1307 {
1308 	u32 idx, offset, retry_cnt = 5, val;
1309 
1310 	DBRUNIF((cid_addr > MAX_CID_ADDR || ctx_offset & 0x3 || cid_addr & CTX_MASK),
1311 		BCE_PRINTF("%s(): Invalid CID address: 0x%08X.\n",
1312 			__FUNCTION__, cid_addr));
1313 
1314 	offset = ctx_offset + cid_addr;
1315 
1316 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
1317 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
1318 
1319 		REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_READ_REQ));
1320 
1321 		for (idx = 0; idx < retry_cnt; idx++) {
1322 			val = REG_RD(sc, BCE_CTX_CTX_CTRL);
1323 			if ((val & BCE_CTX_CTX_CTRL_READ_REQ) == 0)
1324 				break;
1325 			DELAY(5);
1326 		}
1327 
1328 		if (val & BCE_CTX_CTX_CTRL_READ_REQ)
1329 			BCE_PRINTF("%s(%d); Unable to read CTX memory: "
1330 				"cid_addr = 0x%08X, offset = 0x%08X!\n",
1331 				__FILE__, __LINE__, cid_addr, ctx_offset);
1332 
1333 		val = REG_RD(sc, BCE_CTX_CTX_DATA);
1334 	} else {
1335 		REG_WR(sc, BCE_CTX_DATA_ADR, offset);
1336 		val = REG_RD(sc, BCE_CTX_DATA);
1337 	}
1338 
1339 	DBPRINT(sc, BCE_EXTREME_CTX, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
1340 		"val = 0x%08X\n", __FUNCTION__, cid_addr, ctx_offset, val);
1341 
1342 	return(val);
1343 }
1344 #endif
1345 
1346 
1347 /****************************************************************************/
1348 /* Context memory write.                                                    */
1349 /*                                                                          */
1350 /* The NetXtreme II controller uses context memory to track connection      */
1351 /* information for L2 and higher network protocols.                         */
1352 /*                                                                          */
1353 /* Returns:                                                                 */
1354 /*   Nothing.                                                               */
1355 /****************************************************************************/
1356 static void
1357 bce_ctx_wr(struct bce_softc *sc, u32 cid_addr, u32 ctx_offset, u32 ctx_val)
1358 {
1359 	u32 idx, offset = ctx_offset + cid_addr;
1360 	u32 val, retry_cnt = 5;
1361 
1362 	DBPRINT(sc, BCE_EXTREME_CTX, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
1363 		"val = 0x%08X\n", __FUNCTION__, cid_addr, ctx_offset, ctx_val);
1364 
1365 	DBRUNIF((cid_addr > MAX_CID_ADDR || ctx_offset & 0x3 || cid_addr & CTX_MASK),
1366 		BCE_PRINTF("%s(): Invalid CID address: 0x%08X.\n",
1367 			__FUNCTION__, cid_addr));
1368 
1369 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
1370 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
1371 
1372 		REG_WR(sc, BCE_CTX_CTX_DATA, ctx_val);
1373 		REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_WRITE_REQ));
1374 
1375 		for (idx = 0; idx < retry_cnt; idx++) {
1376 			val = REG_RD(sc, BCE_CTX_CTX_CTRL);
1377 			if ((val & BCE_CTX_CTX_CTRL_WRITE_REQ) == 0)
1378 				break;
1379 			DELAY(5);
1380 		}
1381 
1382 		if (val & BCE_CTX_CTX_CTRL_WRITE_REQ)
1383 			BCE_PRINTF("%s(%d); Unable to write CTX memory: "
1384 				"cid_addr = 0x%08X, offset = 0x%08X!\n",
1385 				__FILE__, __LINE__, cid_addr, ctx_offset);
1386 
1387 	} else {
1388 		REG_WR(sc, BCE_CTX_DATA_ADR, offset);
1389 		REG_WR(sc, BCE_CTX_DATA, ctx_val);
1390 	}
1391 }
1392 
1393 
1394 /****************************************************************************/
1395 /* PHY register read.                                                       */
1396 /*                                                                          */
1397 /* Implements register reads on the MII bus.                                */
1398 /*                                                                          */
1399 /* Returns:                                                                 */
1400 /*   The value of the register.                                             */
1401 /****************************************************************************/
1402 static int
1403 bce_miibus_read_reg(device_t dev, int phy, int reg)
1404 {
1405 	struct bce_softc *sc;
1406 	u32 val;
1407 	int i;
1408 
1409 	sc = device_get_softc(dev);
1410 
1411 	/* Make sure we are accessing the correct PHY address. */
1412 	if (phy != sc->bce_phy_addr) {
1413 		DBPRINT(sc, BCE_INSANE_PHY, "Invalid PHY address %d for PHY read!\n", phy);
1414 		return(0);
1415 	}
1416 
1417 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1418 		val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1419 		val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1420 
1421 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1422 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1423 
1424 		DELAY(40);
1425 	}
1426 
1427 
1428 	val = BCE_MIPHY(phy) | BCE_MIREG(reg) |
1429 		BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT |
1430 		BCE_EMAC_MDIO_COMM_START_BUSY;
1431 	REG_WR(sc, BCE_EMAC_MDIO_COMM, val);
1432 
1433 	for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1434 		DELAY(10);
1435 
1436 		val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1437 		if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1438 			DELAY(5);
1439 
1440 			val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1441 			val &= BCE_EMAC_MDIO_COMM_DATA;
1442 
1443 			break;
1444 		}
1445 	}
1446 
1447 	if (val & BCE_EMAC_MDIO_COMM_START_BUSY) {
1448 		BCE_PRINTF("%s(%d): Error: PHY read timeout! phy = %d, reg = 0x%04X\n",
1449 			__FILE__, __LINE__, phy, reg);
1450 		val = 0x0;
1451 	} else {
1452 		val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1453 	}
1454 
1455 
1456 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1457 		val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1458 		val |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1459 
1460 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1461 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1462 
1463 		DELAY(40);
1464 	}
1465 
1466 	DB_PRINT_PHY_REG(reg, val);
1467 	return (val & 0xffff);
1468 
1469 }
1470 
1471 
1472 /****************************************************************************/
1473 /* PHY register write.                                                      */
1474 /*                                                                          */
1475 /* Implements register writes on the MII bus.                               */
1476 /*                                                                          */
1477 /* Returns:                                                                 */
1478 /*   The value of the register.                                             */
1479 /****************************************************************************/
1480 static int
1481 bce_miibus_write_reg(device_t dev, int phy, int reg, int val)
1482 {
1483 	struct bce_softc *sc;
1484 	u32 val1;
1485 	int i;
1486 
1487 	sc = device_get_softc(dev);
1488 
1489 	/* Make sure we are accessing the correct PHY address. */
1490 	if (phy != sc->bce_phy_addr) {
1491 		DBPRINT(sc, BCE_INSANE_PHY, "Invalid PHY address %d for PHY write!\n", phy);
1492 		return(0);
1493 	}
1494 
1495 	DB_PRINT_PHY_REG(reg, val);
1496 
1497 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1498 		val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1499 		val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1500 
1501 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1502 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1503 
1504 		DELAY(40);
1505 	}
1506 
1507 	val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val |
1508 		BCE_EMAC_MDIO_COMM_COMMAND_WRITE |
1509 		BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT;
1510 	REG_WR(sc, BCE_EMAC_MDIO_COMM, val1);
1511 
1512 	for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1513 		DELAY(10);
1514 
1515 		val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1516 		if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1517 			DELAY(5);
1518 			break;
1519 		}
1520 	}
1521 
1522 	if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY)
1523 		BCE_PRINTF("%s(%d): PHY write timeout!\n",
1524 			__FILE__, __LINE__);
1525 
1526 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1527 		val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1528 		val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1529 
1530 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1531 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1532 
1533 		DELAY(40);
1534 	}
1535 
1536 	return 0;
1537 }
1538 
1539 
1540 /****************************************************************************/
1541 /* MII bus status change.                                                   */
1542 /*                                                                          */
1543 /* Called by the MII bus driver when the PHY establishes link to set the    */
1544 /* MAC interface registers.                                                 */
1545 /*                                                                          */
1546 /* Returns:                                                                 */
1547 /*   Nothing.                                                               */
1548 /****************************************************************************/
1549 static void
1550 bce_miibus_statchg(device_t dev)
1551 {
1552 	struct bce_softc *sc;
1553 	struct mii_data *mii;
1554 	int val;
1555 
1556 	sc = device_get_softc(dev);
1557 
1558 	DBENTER(BCE_VERBOSE_PHY);
1559 
1560 	mii = device_get_softc(sc->bce_miibus);
1561 
1562 	val = REG_RD(sc, BCE_EMAC_MODE);
1563 	val &= ~(BCE_EMAC_MODE_PORT | BCE_EMAC_MODE_HALF_DUPLEX |
1564 		BCE_EMAC_MODE_MAC_LOOP | BCE_EMAC_MODE_FORCE_LINK |
1565 		BCE_EMAC_MODE_25G);
1566 
1567 	/* Set MII or GMII interface based on the speed negotiated by the PHY. */
1568 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1569 	case IFM_10_T:
1570 		if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) {
1571 			DBPRINT(sc, BCE_INFO, "Enabling 10Mb interface.\n");
1572 			val |= BCE_EMAC_MODE_PORT_MII_10;
1573 			break;
1574 		}
1575 		/* fall-through */
1576 	case IFM_100_TX:
1577 		DBPRINT(sc, BCE_INFO, "Enabling MII interface.\n");
1578 		val |= BCE_EMAC_MODE_PORT_MII;
1579 		break;
1580 	case IFM_2500_SX:
1581 		DBPRINT(sc, BCE_INFO, "Enabling 2.5G MAC mode.\n");
1582 		val |= BCE_EMAC_MODE_25G;
1583 		/* fall-through */
1584 	case IFM_1000_T:
1585 	case IFM_1000_SX:
1586 		DBPRINT(sc, BCE_INFO, "Enabling GMII interface.\n");
1587 		val |= BCE_EMAC_MODE_PORT_GMII;
1588 		break;
1589 	default:
1590 		DBPRINT(sc, BCE_INFO, "Unknown speed, enabling default GMII "
1591 			"interface.\n");
1592 		val |= BCE_EMAC_MODE_PORT_GMII;
1593 	}
1594 
1595 	/* Set half or full duplex based on the duplicity negotiated by the PHY. */
1596 	if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
1597 		DBPRINT(sc, BCE_INFO, "Setting Half-Duplex interface.\n");
1598 		val |= BCE_EMAC_MODE_HALF_DUPLEX;
1599 	} else
1600 		DBPRINT(sc, BCE_INFO, "Setting Full-Duplex interface.\n");
1601 
1602 	REG_WR(sc, BCE_EMAC_MODE, val);
1603 
1604 #if 0
1605 	/* ToDo: Enable flow control support in brgphy and bge. */
1606 	/* FLAG0 is set if RX is enabled and FLAG1 if TX is enabled */
1607 	if (mii->mii_media_active & IFM_FLAG0)
1608 		BCE_SETBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_RX_MODE_FLOW_EN);
1609 	if (mii->mii_media_active & IFM_FLAG1)
1610 		BCE_SETBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_TX_MODE_FLOW_EN);
1611 #endif
1612 
1613 	DBEXIT(BCE_VERBOSE_PHY);
1614 }
1615 
1616 
1617 /****************************************************************************/
1618 /* Acquire NVRAM lock.                                                      */
1619 /*                                                                          */
1620 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock.  */
1621 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1622 /* for use by the driver.                                                   */
1623 /*                                                                          */
1624 /* Returns:                                                                 */
1625 /*   0 on success, positive value on failure.                               */
1626 /****************************************************************************/
1627 static int
1628 bce_acquire_nvram_lock(struct bce_softc *sc)
1629 {
1630 	u32 val;
1631 	int j, rc = 0;
1632 
1633 	DBENTER(BCE_VERBOSE_NVRAM);
1634 
1635 	/* Request access to the flash interface. */
1636 	REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2);
1637 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1638 		val = REG_RD(sc, BCE_NVM_SW_ARB);
1639 		if (val & BCE_NVM_SW_ARB_ARB_ARB2)
1640 			break;
1641 
1642 		DELAY(5);
1643 	}
1644 
1645 	if (j >= NVRAM_TIMEOUT_COUNT) {
1646 		DBPRINT(sc, BCE_WARN, "Timeout acquiring NVRAM lock!\n");
1647 		rc = EBUSY;
1648 	}
1649 
1650 	DBEXIT(BCE_VERBOSE_NVRAM);
1651 	return (rc);
1652 }
1653 
1654 
1655 /****************************************************************************/
1656 /* Release NVRAM lock.                                                      */
1657 /*                                                                          */
1658 /* When the caller is finished accessing NVRAM the lock must be released.   */
1659 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1660 /* for use by the driver.                                                   */
1661 /*                                                                          */
1662 /* Returns:                                                                 */
1663 /*   0 on success, positive value on failure.                               */
1664 /****************************************************************************/
1665 static int
1666 bce_release_nvram_lock(struct bce_softc *sc)
1667 {
1668 	u32 val;
1669 	int j, rc = 0;
1670 
1671 	DBENTER(BCE_VERBOSE_NVRAM);
1672 
1673 	/*
1674 	 * Relinquish nvram interface.
1675 	 */
1676 	REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2);
1677 
1678 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1679 		val = REG_RD(sc, BCE_NVM_SW_ARB);
1680 		if (!(val & BCE_NVM_SW_ARB_ARB_ARB2))
1681 			break;
1682 
1683 		DELAY(5);
1684 	}
1685 
1686 	if (j >= NVRAM_TIMEOUT_COUNT) {
1687 		DBPRINT(sc, BCE_WARN, "Timeout releasing NVRAM lock!\n");
1688 		rc = EBUSY;
1689 	}
1690 
1691 	DBEXIT(BCE_VERBOSE_NVRAM);
1692 	return (rc);
1693 }
1694 
1695 
1696 #ifdef BCE_NVRAM_WRITE_SUPPORT
1697 /****************************************************************************/
1698 /* Enable NVRAM write access.                                               */
1699 /*                                                                          */
1700 /* Before writing to NVRAM the caller must enable NVRAM writes.             */
1701 /*                                                                          */
1702 /* Returns:                                                                 */
1703 /*   0 on success, positive value on failure.                               */
1704 /****************************************************************************/
1705 static int
1706 bce_enable_nvram_write(struct bce_softc *sc)
1707 {
1708 	u32 val;
1709 	int rc = 0;
1710 
1711 	DBENTER(BCE_VERBOSE_NVRAM);
1712 
1713 	val = REG_RD(sc, BCE_MISC_CFG);
1714 	REG_WR(sc, BCE_MISC_CFG, val | BCE_MISC_CFG_NVM_WR_EN_PCI);
1715 
1716 	if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) {
1717 		int j;
1718 
1719 		REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1720 		REG_WR(sc, BCE_NVM_COMMAND,	BCE_NVM_COMMAND_WREN | BCE_NVM_COMMAND_DOIT);
1721 
1722 		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1723 			DELAY(5);
1724 
1725 			val = REG_RD(sc, BCE_NVM_COMMAND);
1726 			if (val & BCE_NVM_COMMAND_DONE)
1727 				break;
1728 		}
1729 
1730 		if (j >= NVRAM_TIMEOUT_COUNT) {
1731 			DBPRINT(sc, BCE_WARN, "Timeout writing NVRAM!\n");
1732 			rc = EBUSY;
1733 		}
1734 	}
1735 
1736 	DBENTER(BCE_VERBOSE_NVRAM);
1737 	return (rc);
1738 }
1739 
1740 
1741 /****************************************************************************/
1742 /* Disable NVRAM write access.                                              */
1743 /*                                                                          */
1744 /* When the caller is finished writing to NVRAM write access must be        */
1745 /* disabled.                                                                */
1746 /*                                                                          */
1747 /* Returns:                                                                 */
1748 /*   Nothing.                                                               */
1749 /****************************************************************************/
1750 static void
1751 bce_disable_nvram_write(struct bce_softc *sc)
1752 {
1753 	u32 val;
1754 
1755 	DBENTER(BCE_VERBOSE_NVRAM);
1756 
1757 	val = REG_RD(sc, BCE_MISC_CFG);
1758 	REG_WR(sc, BCE_MISC_CFG, val & ~BCE_MISC_CFG_NVM_WR_EN);
1759 
1760 	DBEXIT(BCE_VERBOSE_NVRAM);
1761 
1762 }
1763 #endif
1764 
1765 
1766 /****************************************************************************/
1767 /* Enable NVRAM access.                                                     */
1768 /*                                                                          */
1769 /* Before accessing NVRAM for read or write operations the caller must      */
1770 /* enabled NVRAM access.                                                    */
1771 /*                                                                          */
1772 /* Returns:                                                                 */
1773 /*   Nothing.                                                               */
1774 /****************************************************************************/
1775 static void
1776 bce_enable_nvram_access(struct bce_softc *sc)
1777 {
1778 	u32 val;
1779 
1780 	DBENTER(BCE_VERBOSE_NVRAM);
1781 
1782 	val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1783 	/* Enable both bits, even on read. */
1784 	REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1785 	       val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN);
1786 
1787 	DBEXIT(BCE_VERBOSE_NVRAM);
1788 }
1789 
1790 
1791 /****************************************************************************/
1792 /* Disable NVRAM access.                                                    */
1793 /*                                                                          */
1794 /* When the caller is finished accessing NVRAM access must be disabled.     */
1795 /*                                                                          */
1796 /* Returns:                                                                 */
1797 /*   Nothing.                                                               */
1798 /****************************************************************************/
1799 static void
1800 bce_disable_nvram_access(struct bce_softc *sc)
1801 {
1802 	u32 val;
1803 
1804 	DBENTER(BCE_VERBOSE_NVRAM);
1805 
1806 	val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1807 
1808 	/* Disable both bits, even after read. */
1809 	REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1810 		val & ~(BCE_NVM_ACCESS_ENABLE_EN |
1811 			BCE_NVM_ACCESS_ENABLE_WR_EN));
1812 
1813 	DBEXIT(BCE_VERBOSE_NVRAM);
1814 }
1815 
1816 
1817 #ifdef BCE_NVRAM_WRITE_SUPPORT
1818 /****************************************************************************/
1819 /* Erase NVRAM page before writing.                                         */
1820 /*                                                                          */
1821 /* Non-buffered flash parts require that a page be erased before it is      */
1822 /* written.                                                                 */
1823 /*                                                                          */
1824 /* Returns:                                                                 */
1825 /*   0 on success, positive value on failure.                               */
1826 /****************************************************************************/
1827 static int
1828 bce_nvram_erase_page(struct bce_softc *sc, u32 offset)
1829 {
1830 	u32 cmd;
1831 	int j, rc = 0;
1832 
1833 	DBENTER(BCE_VERBOSE_NVRAM);
1834 
1835 	/* Buffered flash doesn't require an erase. */
1836 	if (sc->bce_flash_info->flags & BCE_NV_BUFFERED)
1837 		goto bce_nvram_erase_page_exit;
1838 
1839 	/* Build an erase command. */
1840 	cmd = BCE_NVM_COMMAND_ERASE | BCE_NVM_COMMAND_WR |
1841 	      BCE_NVM_COMMAND_DOIT;
1842 
1843 	/*
1844 	 * Clear the DONE bit separately, set the NVRAM adress to erase,
1845 	 * and issue the erase command.
1846 	 */
1847 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1848 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1849 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1850 
1851 	/* Wait for completion. */
1852 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1853 		u32 val;
1854 
1855 		DELAY(5);
1856 
1857 		val = REG_RD(sc, BCE_NVM_COMMAND);
1858 		if (val & BCE_NVM_COMMAND_DONE)
1859 			break;
1860 	}
1861 
1862 	if (j >= NVRAM_TIMEOUT_COUNT) {
1863 		DBPRINT(sc, BCE_WARN, "Timeout erasing NVRAM.\n");
1864 		rc = EBUSY;
1865 	}
1866 
1867 bce_nvram_erase_page_exit:
1868 	DBEXIT(BCE_VERBOSE_NVRAM);
1869 	return (rc);
1870 }
1871 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1872 
1873 
1874 /****************************************************************************/
1875 /* Read a dword (32 bits) from NVRAM.                                       */
1876 /*                                                                          */
1877 /* Read a 32 bit word from NVRAM.  The caller is assumed to have already    */
1878 /* obtained the NVRAM lock and enabled the controller for NVRAM access.     */
1879 /*                                                                          */
1880 /* Returns:                                                                 */
1881 /*   0 on success and the 32 bit value read, positive value on failure.     */
1882 /****************************************************************************/
1883 static int
1884 bce_nvram_read_dword(struct bce_softc *sc, u32 offset, u8 *ret_val,
1885 							u32 cmd_flags)
1886 {
1887 	u32 cmd;
1888 	int i, rc = 0;
1889 
1890 	DBENTER(BCE_EXTREME_NVRAM);
1891 
1892 	/* Build the command word. */
1893 	cmd = BCE_NVM_COMMAND_DOIT | cmd_flags;
1894 
1895 	/* Calculate the offset for buffered flash if translation is used. */
1896 	if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) {
1897 		offset = ((offset / sc->bce_flash_info->page_size) <<
1898 			   sc->bce_flash_info->page_bits) +
1899 			  (offset % sc->bce_flash_info->page_size);
1900 	}
1901 
1902 	/*
1903 	 * Clear the DONE bit separately, set the address to read,
1904 	 * and issue the read.
1905 	 */
1906 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1907 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1908 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1909 
1910 	/* Wait for completion. */
1911 	for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1912 		u32 val;
1913 
1914 		DELAY(5);
1915 
1916 		val = REG_RD(sc, BCE_NVM_COMMAND);
1917 		if (val & BCE_NVM_COMMAND_DONE) {
1918 			val = REG_RD(sc, BCE_NVM_READ);
1919 
1920 			val = bce_be32toh(val);
1921 			memcpy(ret_val, &val, 4);
1922 			break;
1923 		}
1924 	}
1925 
1926 	/* Check for errors. */
1927 	if (i >= NVRAM_TIMEOUT_COUNT) {
1928 		BCE_PRINTF("%s(%d): Timeout error reading NVRAM at offset 0x%08X!\n",
1929 			__FILE__, __LINE__, offset);
1930 		rc = EBUSY;
1931 	}
1932 
1933 	DBEXIT(BCE_EXTREME_NVRAM);
1934 	return(rc);
1935 }
1936 
1937 
1938 #ifdef BCE_NVRAM_WRITE_SUPPORT
1939 /****************************************************************************/
1940 /* Write a dword (32 bits) to NVRAM.                                        */
1941 /*                                                                          */
1942 /* Write a 32 bit word to NVRAM.  The caller is assumed to have already     */
1943 /* obtained the NVRAM lock, enabled the controller for NVRAM access, and    */
1944 /* enabled NVRAM write access.                                              */
1945 /*                                                                          */
1946 /* Returns:                                                                 */
1947 /*   0 on success, positive value on failure.                               */
1948 /****************************************************************************/
1949 static int
1950 bce_nvram_write_dword(struct bce_softc *sc, u32 offset, u8 *val,
1951 	u32 cmd_flags)
1952 {
1953 	u32 cmd, val32;
1954 	int j, rc = 0;
1955 
1956 	DBENTER(BCE_VERBOSE_NVRAM);
1957 
1958 	/* Build the command word. */
1959 	cmd = BCE_NVM_COMMAND_DOIT | BCE_NVM_COMMAND_WR | cmd_flags;
1960 
1961 	/* Calculate the offset for buffered flash if translation is used. */
1962 	if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) {
1963 		offset = ((offset / sc->bce_flash_info->page_size) <<
1964 			  sc->bce_flash_info->page_bits) +
1965 			 (offset % sc->bce_flash_info->page_size);
1966 	}
1967 
1968 	/*
1969 	 * Clear the DONE bit separately, convert NVRAM data to big-endian,
1970 	 * set the NVRAM address to write, and issue the write command
1971 	 */
1972 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1973 	memcpy(&val32, val, 4);
1974 	val32 = htobe32(val32);
1975 	REG_WR(sc, BCE_NVM_WRITE, val32);
1976 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1977 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1978 
1979 	/* Wait for completion. */
1980 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1981 		DELAY(5);
1982 
1983 		if (REG_RD(sc, BCE_NVM_COMMAND) & BCE_NVM_COMMAND_DONE)
1984 			break;
1985 	}
1986 	if (j >= NVRAM_TIMEOUT_COUNT) {
1987 		BCE_PRINTF("%s(%d): Timeout error writing NVRAM at offset 0x%08X\n",
1988 			__FILE__, __LINE__, offset);
1989 		rc = EBUSY;
1990 	}
1991 
1992 	DBEXIT(BCE_VERBOSE_NVRAM);
1993 	return (rc);
1994 }
1995 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1996 
1997 
1998 /****************************************************************************/
1999 /* Initialize NVRAM access.                                                 */
2000 /*                                                                          */
2001 /* Identify the NVRAM device in use and prepare the NVRAM interface to      */
2002 /* access that device.                                                      */
2003 /*                                                                          */
2004 /* Returns:                                                                 */
2005 /*   0 on success, positive value on failure.                               */
2006 /****************************************************************************/
2007 static int
2008 bce_init_nvram(struct bce_softc *sc)
2009 {
2010 	u32 val;
2011 	int j, entry_count, rc = 0;
2012 	struct flash_spec *flash;
2013 
2014 	DBENTER(BCE_VERBOSE_NVRAM);
2015 
2016 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
2017 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
2018 		sc->bce_flash_info = &flash_5709;
2019 		goto bce_init_nvram_get_flash_size;
2020 	}
2021 
2022 	/* Determine the selected interface. */
2023 	val = REG_RD(sc, BCE_NVM_CFG1);
2024 
2025 	entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2026 
2027 	/*
2028 	 * Flash reconfiguration is required to support additional
2029 	 * NVRAM devices not directly supported in hardware.
2030 	 * Check if the flash interface was reconfigured
2031 	 * by the bootcode.
2032 	 */
2033 
2034 	if (val & 0x40000000) {
2035 		/* Flash interface reconfigured by bootcode. */
2036 
2037 		DBPRINT(sc,BCE_INFO_LOAD,
2038 			"bce_init_nvram(): Flash WAS reconfigured.\n");
2039 
2040 		for (j = 0, flash = &flash_table[0]; j < entry_count;
2041 		     j++, flash++) {
2042 			if ((val & FLASH_BACKUP_STRAP_MASK) ==
2043 			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2044 				sc->bce_flash_info = flash;
2045 				break;
2046 			}
2047 		}
2048 	} else {
2049 		/* Flash interface not yet reconfigured. */
2050 		u32 mask;
2051 
2052 		DBPRINT(sc, BCE_INFO_LOAD, "%s(): Flash was NOT reconfigured.\n",
2053 			__FUNCTION__);
2054 
2055 		if (val & (1 << 23))
2056 			mask = FLASH_BACKUP_STRAP_MASK;
2057 		else
2058 			mask = FLASH_STRAP_MASK;
2059 
2060 		/* Look for the matching NVRAM device configuration data. */
2061 		for (j = 0, flash = &flash_table[0]; j < entry_count; j++, flash++) {
2062 
2063 			/* Check if the device matches any of the known devices. */
2064 			if ((val & mask) == (flash->strapping & mask)) {
2065 				/* Found a device match. */
2066 				sc->bce_flash_info = flash;
2067 
2068 				/* Request access to the flash interface. */
2069 				if ((rc = bce_acquire_nvram_lock(sc)) != 0)
2070 					return rc;
2071 
2072 				/* Reconfigure the flash interface. */
2073 				bce_enable_nvram_access(sc);
2074 				REG_WR(sc, BCE_NVM_CFG1, flash->config1);
2075 				REG_WR(sc, BCE_NVM_CFG2, flash->config2);
2076 				REG_WR(sc, BCE_NVM_CFG3, flash->config3);
2077 				REG_WR(sc, BCE_NVM_WRITE1, flash->write1);
2078 				bce_disable_nvram_access(sc);
2079 				bce_release_nvram_lock(sc);
2080 
2081 				break;
2082 			}
2083 		}
2084 	}
2085 
2086 	/* Check if a matching device was found. */
2087 	if (j == entry_count) {
2088 		sc->bce_flash_info = NULL;
2089 		BCE_PRINTF("%s(%d): Unknown Flash NVRAM found!\n",
2090 			__FILE__, __LINE__);
2091 		rc = ENODEV;
2092 	}
2093 
2094 bce_init_nvram_get_flash_size:
2095 	/* Write the flash config data to the shared memory interface. */
2096 	val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_SHARED_HW_CFG_CONFIG2);
2097 	val &= BCE_SHARED_HW_CFG2_NVM_SIZE_MASK;
2098 	if (val)
2099 		sc->bce_flash_size = val;
2100 	else
2101 		sc->bce_flash_size = sc->bce_flash_info->total_size;
2102 
2103 	DBPRINT(sc, BCE_INFO_LOAD, "%s(): Found %s, size = 0x%08X\n",
2104 		__FUNCTION__, sc->bce_flash_info->name,
2105 		sc->bce_flash_info->total_size);
2106 
2107 	DBEXIT(BCE_VERBOSE_NVRAM);
2108 	return rc;
2109 }
2110 
2111 
2112 /****************************************************************************/
2113 /* Read an arbitrary range of data from NVRAM.                              */
2114 /*                                                                          */
2115 /* Prepares the NVRAM interface for access and reads the requested data     */
2116 /* into the supplied buffer.                                                */
2117 /*                                                                          */
2118 /* Returns:                                                                 */
2119 /*   0 on success and the data read, positive value on failure.             */
2120 /****************************************************************************/
2121 static int
2122 bce_nvram_read(struct bce_softc *sc, u32 offset, u8 *ret_buf,
2123 	int buf_size)
2124 {
2125 	int rc = 0;
2126 	u32 cmd_flags, offset32, len32, extra;
2127 
2128 	DBENTER(BCE_VERBOSE_NVRAM);
2129 
2130 	if (buf_size == 0)
2131 		goto bce_nvram_read_exit;
2132 
2133 	/* Request access to the flash interface. */
2134 	if ((rc = bce_acquire_nvram_lock(sc)) != 0)
2135 		goto bce_nvram_read_exit;
2136 
2137 	/* Enable access to flash interface */
2138 	bce_enable_nvram_access(sc);
2139 
2140 	len32 = buf_size;
2141 	offset32 = offset;
2142 	extra = 0;
2143 
2144 	cmd_flags = 0;
2145 
2146 	if (offset32 & 3) {
2147 		u8 buf[4];
2148 		u32 pre_len;
2149 
2150 		offset32 &= ~3;
2151 		pre_len = 4 - (offset & 3);
2152 
2153 		if (pre_len >= len32) {
2154 			pre_len = len32;
2155 			cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST;
2156 		}
2157 		else {
2158 			cmd_flags = BCE_NVM_COMMAND_FIRST;
2159 		}
2160 
2161 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
2162 
2163 		if (rc)
2164 			return rc;
2165 
2166 		memcpy(ret_buf, buf + (offset & 3), pre_len);
2167 
2168 		offset32 += 4;
2169 		ret_buf += pre_len;
2170 		len32 -= pre_len;
2171 	}
2172 
2173 	if (len32 & 3) {
2174 		extra = 4 - (len32 & 3);
2175 		len32 = (len32 + 4) & ~3;
2176 	}
2177 
2178 	if (len32 == 4) {
2179 		u8 buf[4];
2180 
2181 		if (cmd_flags)
2182 			cmd_flags = BCE_NVM_COMMAND_LAST;
2183 		else
2184 			cmd_flags = BCE_NVM_COMMAND_FIRST |
2185 				    BCE_NVM_COMMAND_LAST;
2186 
2187 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
2188 
2189 		memcpy(ret_buf, buf, 4 - extra);
2190 	}
2191 	else if (len32 > 0) {
2192 		u8 buf[4];
2193 
2194 		/* Read the first word. */
2195 		if (cmd_flags)
2196 			cmd_flags = 0;
2197 		else
2198 			cmd_flags = BCE_NVM_COMMAND_FIRST;
2199 
2200 		rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
2201 
2202 		/* Advance to the next dword. */
2203 		offset32 += 4;
2204 		ret_buf += 4;
2205 		len32 -= 4;
2206 
2207 		while (len32 > 4 && rc == 0) {
2208 			rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0);
2209 
2210 			/* Advance to the next dword. */
2211 			offset32 += 4;
2212 			ret_buf += 4;
2213 			len32 -= 4;
2214 		}
2215 
2216 		if (rc)
2217 			goto bce_nvram_read_locked_exit;
2218 
2219 		cmd_flags = BCE_NVM_COMMAND_LAST;
2220 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
2221 
2222 		memcpy(ret_buf, buf, 4 - extra);
2223 	}
2224 
2225 bce_nvram_read_locked_exit:
2226 	/* Disable access to flash interface and release the lock. */
2227 	bce_disable_nvram_access(sc);
2228 	bce_release_nvram_lock(sc);
2229 
2230 bce_nvram_read_exit:
2231 	DBEXIT(BCE_VERBOSE_NVRAM);
2232 	return rc;
2233 }
2234 
2235 
2236 #ifdef BCE_NVRAM_WRITE_SUPPORT
2237 /****************************************************************************/
2238 /* Write an arbitrary range of data from NVRAM.                             */
2239 /*                                                                          */
2240 /* Prepares the NVRAM interface for write access and writes the requested   */
2241 /* data from the supplied buffer.  The caller is responsible for            */
2242 /* calculating any appropriate CRCs.                                        */
2243 /*                                                                          */
2244 /* Returns:                                                                 */
2245 /*   0 on success, positive value on failure.                               */
2246 /****************************************************************************/
2247 static int
2248 bce_nvram_write(struct bce_softc *sc, u32 offset, u8 *data_buf,
2249 	int buf_size)
2250 {
2251 	u32 written, offset32, len32;
2252 	u8 *buf, start[4], end[4];
2253 	int rc = 0;
2254 	int align_start, align_end;
2255 
2256 	DBENTER(BCE_VERBOSE_NVRAM);
2257 
2258 	buf = data_buf;
2259 	offset32 = offset;
2260 	len32 = buf_size;
2261 	align_start = align_end = 0;
2262 
2263 	if ((align_start = (offset32 & 3))) {
2264 		offset32 &= ~3;
2265 		len32 += align_start;
2266 		if ((rc = bce_nvram_read(sc, offset32, start, 4)))
2267 			goto bce_nvram_write_exit;
2268 	}
2269 
2270 	if (len32 & 3) {
2271 	       	if ((len32 > 4) || !align_start) {
2272 			align_end = 4 - (len32 & 3);
2273 			len32 += align_end;
2274 			if ((rc = bce_nvram_read(sc, offset32 + len32 - 4,
2275 				end, 4))) {
2276 				goto bce_nvram_write_exit;
2277 			}
2278 		}
2279 	}
2280 
2281 	if (align_start || align_end) {
2282 		buf = malloc(len32, M_DEVBUF, M_NOWAIT);
2283 		if (buf == 0) {
2284 			rc = ENOMEM;
2285 			goto bce_nvram_write_exit;
2286 		}
2287 
2288 		if (align_start) {
2289 			memcpy(buf, start, 4);
2290 		}
2291 
2292 		if (align_end) {
2293 			memcpy(buf + len32 - 4, end, 4);
2294 		}
2295 		memcpy(buf + align_start, data_buf, buf_size);
2296 	}
2297 
2298 	written = 0;
2299 	while ((written < len32) && (rc == 0)) {
2300 		u32 page_start, page_end, data_start, data_end;
2301 		u32 addr, cmd_flags;
2302 		int i;
2303 		u8 flash_buffer[264];
2304 
2305 	    /* Find the page_start addr */
2306 		page_start = offset32 + written;
2307 		page_start -= (page_start % sc->bce_flash_info->page_size);
2308 		/* Find the page_end addr */
2309 		page_end = page_start + sc->bce_flash_info->page_size;
2310 		/* Find the data_start addr */
2311 		data_start = (written == 0) ? offset32 : page_start;
2312 		/* Find the data_end addr */
2313 		data_end = (page_end > offset32 + len32) ?
2314 			(offset32 + len32) : page_end;
2315 
2316 		/* Request access to the flash interface. */
2317 		if ((rc = bce_acquire_nvram_lock(sc)) != 0)
2318 			goto bce_nvram_write_exit;
2319 
2320 		/* Enable access to flash interface */
2321 		bce_enable_nvram_access(sc);
2322 
2323 		cmd_flags = BCE_NVM_COMMAND_FIRST;
2324 		if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) {
2325 			int j;
2326 
2327 			/* Read the whole page into the buffer
2328 			 * (non-buffer flash only) */
2329 			for (j = 0; j < sc->bce_flash_info->page_size; j += 4) {
2330 				if (j == (sc->bce_flash_info->page_size - 4)) {
2331 					cmd_flags |= BCE_NVM_COMMAND_LAST;
2332 				}
2333 				rc = bce_nvram_read_dword(sc,
2334 					page_start + j,
2335 					&flash_buffer[j],
2336 					cmd_flags);
2337 
2338 				if (rc)
2339 					goto bce_nvram_write_locked_exit;
2340 
2341 				cmd_flags = 0;
2342 			}
2343 		}
2344 
2345 		/* Enable writes to flash interface (unlock write-protect) */
2346 		if ((rc = bce_enable_nvram_write(sc)) != 0)
2347 			goto bce_nvram_write_locked_exit;
2348 
2349 		/* Erase the page */
2350 		if ((rc = bce_nvram_erase_page(sc, page_start)) != 0)
2351 			goto bce_nvram_write_locked_exit;
2352 
2353 		/* Re-enable the write again for the actual write */
2354 		bce_enable_nvram_write(sc);
2355 
2356 		/* Loop to write back the buffer data from page_start to
2357 		 * data_start */
2358 		i = 0;
2359 		if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) {
2360 			for (addr = page_start; addr < data_start;
2361 				addr += 4, i += 4) {
2362 
2363 				rc = bce_nvram_write_dword(sc, addr,
2364 					&flash_buffer[i], cmd_flags);
2365 
2366 				if (rc != 0)
2367 					goto bce_nvram_write_locked_exit;
2368 
2369 				cmd_flags = 0;
2370 			}
2371 		}
2372 
2373 		/* Loop to write the new data from data_start to data_end */
2374 		for (addr = data_start; addr < data_end; addr += 4, i++) {
2375 			if ((addr == page_end - 4) ||
2376 				((sc->bce_flash_info->flags & BCE_NV_BUFFERED) &&
2377 				(addr == data_end - 4))) {
2378 
2379 				cmd_flags |= BCE_NVM_COMMAND_LAST;
2380 			}
2381 			rc = bce_nvram_write_dword(sc, addr, buf,
2382 				cmd_flags);
2383 
2384 			if (rc != 0)
2385 				goto bce_nvram_write_locked_exit;
2386 
2387 			cmd_flags = 0;
2388 			buf += 4;
2389 		}
2390 
2391 		/* Loop to write back the buffer data from data_end
2392 		 * to page_end */
2393 		if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) {
2394 			for (addr = data_end; addr < page_end;
2395 				addr += 4, i += 4) {
2396 
2397 				if (addr == page_end-4) {
2398 					cmd_flags = BCE_NVM_COMMAND_LAST;
2399                 		}
2400 				rc = bce_nvram_write_dword(sc, addr,
2401 					&flash_buffer[i], cmd_flags);
2402 
2403 				if (rc != 0)
2404 					goto bce_nvram_write_locked_exit;
2405 
2406 				cmd_flags = 0;
2407 			}
2408 		}
2409 
2410 		/* Disable writes to flash interface (lock write-protect) */
2411 		bce_disable_nvram_write(sc);
2412 
2413 		/* Disable access to flash interface */
2414 		bce_disable_nvram_access(sc);
2415 		bce_release_nvram_lock(sc);
2416 
2417 		/* Increment written */
2418 		written += data_end - data_start;
2419 	}
2420 
2421 	goto bce_nvram_write_exit;
2422 
2423 bce_nvram_write_locked_exit:
2424 		bce_disable_nvram_write(sc);
2425 		bce_disable_nvram_access(sc);
2426 		bce_release_nvram_lock(sc);
2427 
2428 bce_nvram_write_exit:
2429 	if (align_start || align_end)
2430 		free(buf, M_DEVBUF);
2431 
2432 	DBEXIT(BCE_VERBOSE_NVRAM);
2433 	return (rc);
2434 }
2435 #endif /* BCE_NVRAM_WRITE_SUPPORT */
2436 
2437 
2438 /****************************************************************************/
2439 /* Verifies that NVRAM is accessible and contains valid data.               */
2440 /*                                                                          */
2441 /* Reads the configuration data from NVRAM and verifies that the CRC is     */
2442 /* correct.                                                                 */
2443 /*                                                                          */
2444 /* Returns:                                                                 */
2445 /*   0 on success, positive value on failure.                               */
2446 /****************************************************************************/
2447 static int
2448 bce_nvram_test(struct bce_softc *sc)
2449 {
2450 	u32 buf[BCE_NVRAM_SIZE / 4];
2451 	u8 *data = (u8 *) buf;
2452 	int rc = 0;
2453 	u32 magic, csum;
2454 
2455 	DBENTER(BCE_VERBOSE_NVRAM | BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET);
2456 
2457 	/*
2458 	 * Check that the device NVRAM is valid by reading
2459 	 * the magic value at offset 0.
2460 	 */
2461 	if ((rc = bce_nvram_read(sc, 0, data, 4)) != 0) {
2462 		BCE_PRINTF("%s(%d): Unable to read NVRAM!\n", __FILE__, __LINE__);
2463 		goto bce_nvram_test_exit;
2464 	}
2465 
2466 	/*
2467 	 * Verify that offset 0 of the NVRAM contains
2468 	 * a valid magic number.
2469 	 */
2470     magic = bce_be32toh(buf[0]);
2471 	if (magic != BCE_NVRAM_MAGIC) {
2472 		rc = ENODEV;
2473 		BCE_PRINTF("%s(%d): Invalid NVRAM magic value! Expected: 0x%08X, "
2474 			"Found: 0x%08X\n",
2475 			__FILE__, __LINE__, BCE_NVRAM_MAGIC, magic);
2476 		goto bce_nvram_test_exit;
2477 	}
2478 
2479 	/*
2480 	 * Verify that the device NVRAM includes valid
2481 	 * configuration data.
2482 	 */
2483 	if ((rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE)) != 0) {
2484 		BCE_PRINTF("%s(%d): Unable to read Manufacturing Information from "
2485 			"NVRAM!\n", __FILE__, __LINE__);
2486 		goto bce_nvram_test_exit;
2487 	}
2488 
2489 	csum = ether_crc32_le(data, 0x100);
2490 	if (csum != BCE_CRC32_RESIDUAL) {
2491 		rc = ENODEV;
2492 		BCE_PRINTF("%s(%d): Invalid Manufacturing Information NVRAM CRC! "
2493 			"Expected: 0x%08X, Found: 0x%08X\n",
2494 			__FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum);
2495 		goto bce_nvram_test_exit;
2496 	}
2497 
2498 	csum = ether_crc32_le(data + 0x100, 0x100);
2499 	if (csum != BCE_CRC32_RESIDUAL) {
2500 		rc = ENODEV;
2501 		BCE_PRINTF("%s(%d): Invalid Feature Configuration Information "
2502 			"NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
2503 			__FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum);
2504 	}
2505 
2506 bce_nvram_test_exit:
2507 	DBEXIT(BCE_VERBOSE_NVRAM | BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET);
2508 	return rc;
2509 }
2510 
2511 
2512 /****************************************************************************/
2513 /* Identifies the current media type of the controller and sets the PHY     */
2514 /* address.                                                                 */
2515 /*                                                                          */
2516 /* Returns:                                                                 */
2517 /*   Nothing.                                                               */
2518 /****************************************************************************/
2519 static void
2520 bce_get_media(struct bce_softc *sc)
2521 {
2522 	u32 val;
2523 
2524 	DBENTER(BCE_VERBOSE);
2525 
2526 	/* Assume PHY address for copper controllers. */
2527 	sc->bce_phy_addr = 1;
2528 
2529 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
2530  		u32 val = REG_RD(sc, BCE_MISC_DUAL_MEDIA_CTRL);
2531 		u32 bond_id = val & BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID;
2532 		u32 strap;
2533 
2534 		/*
2535 		 * The BCM5709S is software configurable
2536 		 * for Copper or SerDes operation.
2537 		 */
2538 		if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) {
2539 			DBPRINT(sc, BCE_INFO_LOAD, "5709 bonded for copper.\n");
2540 			goto bce_get_media_exit;
2541 		} else if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
2542 			DBPRINT(sc, BCE_INFO_LOAD, "5709 bonded for dual media.\n");
2543 			sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
2544 			goto bce_get_media_exit;
2545 		}
2546 
2547 		if (val & BCE_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
2548 			strap = (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
2549 		else
2550 			strap = (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
2551 
2552 		if (pci_get_function(sc->bce_dev) == 0) {
2553 			switch (strap) {
2554 			case 0x4:
2555 			case 0x5:
2556 			case 0x6:
2557 				DBPRINT(sc, BCE_INFO_LOAD,
2558 					"BCM5709 s/w configured for SerDes.\n");
2559 				sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
2560 			default:
2561 				DBPRINT(sc, BCE_INFO_LOAD,
2562 					"BCM5709 s/w configured for Copper.\n");
2563 			}
2564 		} else {
2565 			switch (strap) {
2566 			case 0x1:
2567 			case 0x2:
2568 			case 0x4:
2569 				DBPRINT(sc, BCE_INFO_LOAD,
2570 					"BCM5709 s/w configured for SerDes.\n");
2571 				sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
2572 			default:
2573 				DBPRINT(sc, BCE_INFO_LOAD,
2574 					"BCM5709 s/w configured for Copper.\n");
2575 			}
2576 		}
2577 
2578 	} else if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT)
2579 		sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
2580 
2581 	if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) {
2582 		sc->bce_flags |= BCE_NO_WOL_FLAG;
2583 		if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) {
2584 			sc->bce_phy_addr = 2;
2585 			val = REG_RD_IND(sc, sc->bce_shmem_base +
2586 				 BCE_SHARED_HW_CFG_CONFIG);
2587 			if (val & BCE_SHARED_HW_CFG_PHY_2_5G) {
2588 				sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG;
2589 				DBPRINT(sc, BCE_INFO_LOAD, "Found 2.5Gb capable adapter\n");
2590 			}
2591 		}
2592 	} else if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) ||
2593 		   (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708))
2594 		sc->bce_phy_flags |= BCE_PHY_CRC_FIX_FLAG;
2595 
2596 bce_get_media_exit:
2597 	DBPRINT(sc, (BCE_INFO_LOAD | BCE_INFO_PHY),
2598 		"Using PHY address %d.\n", sc->bce_phy_addr);
2599 
2600 	DBEXIT(BCE_VERBOSE);
2601 }
2602 
2603 
2604 /****************************************************************************/
2605 /* Free any DMA memory owned by the driver.                                 */
2606 /*                                                                          */
2607 /* Scans through each data structre that requires DMA memory and frees      */
2608 /* the memory if allocated.                                                 */
2609 /*                                                                          */
2610 /* Returns:                                                                 */
2611 /*   Nothing.                                                               */
2612 /****************************************************************************/
2613 static void
2614 bce_dma_free(struct bce_softc *sc)
2615 {
2616 	int i;
2617 
2618 	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_UNLOAD | BCE_VERBOSE_CTX);
2619 
2620 	/* Free, unmap, and destroy the status block. */
2621 	if (sc->status_block != NULL) {
2622 		bus_dmamem_free(
2623 			sc->status_tag,
2624 		    sc->status_block,
2625 		    sc->status_map);
2626 		sc->status_block = NULL;
2627 	}
2628 
2629 	if (sc->status_map != NULL) {
2630 		bus_dmamap_unload(
2631 			sc->status_tag,
2632 		    sc->status_map);
2633 		bus_dmamap_destroy(sc->status_tag,
2634 		    sc->status_map);
2635 		sc->status_map = NULL;
2636 	}
2637 
2638 	if (sc->status_tag != NULL) {
2639 		bus_dma_tag_destroy(sc->status_tag);
2640 		sc->status_tag = NULL;
2641 	}
2642 
2643 
2644 	/* Free, unmap, and destroy the statistics block. */
2645 	if (sc->stats_block != NULL) {
2646 		bus_dmamem_free(
2647 			sc->stats_tag,
2648 		    sc->stats_block,
2649 		    sc->stats_map);
2650 		sc->stats_block = NULL;
2651 	}
2652 
2653 	if (sc->stats_map != NULL) {
2654 		bus_dmamap_unload(
2655 			sc->stats_tag,
2656 		    sc->stats_map);
2657 		bus_dmamap_destroy(sc->stats_tag,
2658 		    sc->stats_map);
2659 		sc->stats_map = NULL;
2660 	}
2661 
2662 	if (sc->stats_tag != NULL) {
2663 		bus_dma_tag_destroy(sc->stats_tag);
2664 		sc->stats_tag = NULL;
2665 	}
2666 
2667 
2668 	/* Free, unmap and destroy all context memory pages. */
2669 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
2670 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
2671 		for (i = 0; i < sc->ctx_pages; i++ ) {
2672 			if (sc->ctx_block[i] != NULL) {
2673 				bus_dmamem_free(
2674 					sc->ctx_tag,
2675 				    sc->ctx_block[i],
2676 				    sc->ctx_map[i]);
2677 				sc->ctx_block[i] = NULL;
2678 			}
2679 
2680 			if (sc->ctx_map[i] != NULL) {
2681 				bus_dmamap_unload(
2682 					sc->ctx_tag,
2683 		    		sc->ctx_map[i]);
2684 				bus_dmamap_destroy(
2685 					sc->ctx_tag,
2686 				    sc->ctx_map[i]);
2687 				sc->ctx_map[i] = NULL;
2688 			}
2689 		}
2690 
2691 		/* Destroy the context memory tag. */
2692 		if (sc->ctx_tag != NULL) {
2693 			bus_dma_tag_destroy(sc->ctx_tag);
2694 			sc->ctx_tag = NULL;
2695 		}
2696 	}
2697 
2698 
2699 	/* Free, unmap and destroy all TX buffer descriptor chain pages. */
2700 	for (i = 0; i < TX_PAGES; i++ ) {
2701 		if (sc->tx_bd_chain[i] != NULL) {
2702 			bus_dmamem_free(
2703 				sc->tx_bd_chain_tag,
2704 			    sc->tx_bd_chain[i],
2705 			    sc->tx_bd_chain_map[i]);
2706 			sc->tx_bd_chain[i] = NULL;
2707 		}
2708 
2709 		if (sc->tx_bd_chain_map[i] != NULL) {
2710 			bus_dmamap_unload(
2711 				sc->tx_bd_chain_tag,
2712 		    	sc->tx_bd_chain_map[i]);
2713 			bus_dmamap_destroy(
2714 				sc->tx_bd_chain_tag,
2715 			    sc->tx_bd_chain_map[i]);
2716 			sc->tx_bd_chain_map[i] = NULL;
2717 		}
2718 	}
2719 
2720 	/* Destroy the TX buffer descriptor tag. */
2721 	if (sc->tx_bd_chain_tag != NULL) {
2722 		bus_dma_tag_destroy(sc->tx_bd_chain_tag);
2723 		sc->tx_bd_chain_tag = NULL;
2724 	}
2725 
2726 
2727 	/* Free, unmap and destroy all RX buffer descriptor chain pages. */
2728 	for (i = 0; i < RX_PAGES; i++ ) {
2729 		if (sc->rx_bd_chain[i] != NULL) {
2730 			bus_dmamem_free(
2731 				sc->rx_bd_chain_tag,
2732 			    sc->rx_bd_chain[i],
2733 			    sc->rx_bd_chain_map[i]);
2734 			sc->rx_bd_chain[i] = NULL;
2735 		}
2736 
2737 		if (sc->rx_bd_chain_map[i] != NULL) {
2738 			bus_dmamap_unload(
2739 				sc->rx_bd_chain_tag,
2740 		    	sc->rx_bd_chain_map[i]);
2741 			bus_dmamap_destroy(
2742 				sc->rx_bd_chain_tag,
2743 			    sc->rx_bd_chain_map[i]);
2744 			sc->rx_bd_chain_map[i] = NULL;
2745 		}
2746 	}
2747 
2748 	/* Destroy the RX buffer descriptor tag. */
2749 	if (sc->rx_bd_chain_tag != NULL) {
2750 		bus_dma_tag_destroy(sc->rx_bd_chain_tag);
2751 		sc->rx_bd_chain_tag = NULL;
2752 	}
2753 
2754 
2755 #ifdef ZERO_COPY_SOCKETS
2756 	/* Free, unmap and destroy all page buffer descriptor chain pages. */
2757 	for (i = 0; i < PG_PAGES; i++ ) {
2758 		if (sc->pg_bd_chain[i] != NULL) {
2759 			bus_dmamem_free(
2760 				sc->pg_bd_chain_tag,
2761 			    sc->pg_bd_chain[i],
2762 			    sc->pg_bd_chain_map[i]);
2763 			sc->pg_bd_chain[i] = NULL;
2764 		}
2765 
2766 		if (sc->pg_bd_chain_map[i] != NULL) {
2767 			bus_dmamap_unload(
2768 				sc->pg_bd_chain_tag,
2769 		    	sc->pg_bd_chain_map[i]);
2770 			bus_dmamap_destroy(
2771 				sc->pg_bd_chain_tag,
2772 			    sc->pg_bd_chain_map[i]);
2773 			sc->pg_bd_chain_map[i] = NULL;
2774 		}
2775 	}
2776 
2777 	/* Destroy the page buffer descriptor tag. */
2778 	if (sc->pg_bd_chain_tag != NULL) {
2779 		bus_dma_tag_destroy(sc->pg_bd_chain_tag);
2780 		sc->pg_bd_chain_tag = NULL;
2781 	}
2782 #endif
2783 
2784 
2785 	/* Unload and destroy the TX mbuf maps. */
2786 	for (i = 0; i < TOTAL_TX_BD; i++) {
2787 		if (sc->tx_mbuf_map[i] != NULL) {
2788 			bus_dmamap_unload(sc->tx_mbuf_tag,
2789 				sc->tx_mbuf_map[i]);
2790 			bus_dmamap_destroy(sc->tx_mbuf_tag,
2791 	 			sc->tx_mbuf_map[i]);
2792 			sc->tx_mbuf_map[i] = NULL;
2793 		}
2794 	}
2795 
2796 	/* Destroy the TX mbuf tag. */
2797 	if (sc->tx_mbuf_tag != NULL) {
2798 		bus_dma_tag_destroy(sc->tx_mbuf_tag);
2799 		sc->tx_mbuf_tag = NULL;
2800 	}
2801 
2802 	/* Unload and destroy the RX mbuf maps. */
2803 	for (i = 0; i < TOTAL_RX_BD; i++) {
2804 		if (sc->rx_mbuf_map[i] != NULL) {
2805 			bus_dmamap_unload(sc->rx_mbuf_tag,
2806 				sc->rx_mbuf_map[i]);
2807 			bus_dmamap_destroy(sc->rx_mbuf_tag,
2808 	 			sc->rx_mbuf_map[i]);
2809 			sc->rx_mbuf_map[i] = NULL;
2810 		}
2811 	}
2812 
2813 	/* Destroy the RX mbuf tag. */
2814 	if (sc->rx_mbuf_tag != NULL) {
2815 		bus_dma_tag_destroy(sc->rx_mbuf_tag);
2816 		sc->rx_mbuf_tag = NULL;
2817 	}
2818 
2819 #ifdef ZERO_COPY_SOCKETS
2820 	/* Unload and destroy the page mbuf maps. */
2821 	for (i = 0; i < TOTAL_PG_BD; i++) {
2822 		if (sc->pg_mbuf_map[i] != NULL) {
2823 			bus_dmamap_unload(sc->pg_mbuf_tag,
2824 				sc->pg_mbuf_map[i]);
2825 			bus_dmamap_destroy(sc->pg_mbuf_tag,
2826 	 			sc->pg_mbuf_map[i]);
2827 			sc->pg_mbuf_map[i] = NULL;
2828 		}
2829 	}
2830 
2831 	/* Destroy the page mbuf tag. */
2832 	if (sc->pg_mbuf_tag != NULL) {
2833 		bus_dma_tag_destroy(sc->pg_mbuf_tag);
2834 		sc->pg_mbuf_tag = NULL;
2835 	}
2836 #endif
2837 
2838 	/* Destroy the parent tag */
2839 	if (sc->parent_tag != NULL) {
2840 		bus_dma_tag_destroy(sc->parent_tag);
2841 		sc->parent_tag = NULL;
2842 	}
2843 
2844 	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_UNLOAD | BCE_VERBOSE_CTX);
2845 }
2846 
2847 
2848 /****************************************************************************/
2849 /* Get DMA memory from the OS.                                              */
2850 /*                                                                          */
2851 /* Validates that the OS has provided DMA buffers in response to a          */
2852 /* bus_dmamap_load() call and saves the physical address of those buffers.  */
2853 /* When the callback is used the OS will return 0 for the mapping function  */
2854 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any  */
2855 /* failures back to the caller.                                             */
2856 /*                                                                          */
2857 /* Returns:                                                                 */
2858 /*   Nothing.                                                               */
2859 /****************************************************************************/
2860 static void
2861 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2862 {
2863 	bus_addr_t *busaddr = arg;
2864 
2865 	/* Simulate a mapping failure. */
2866 	DBRUNIF(DB_RANDOMTRUE(bce_debug_dma_map_addr_failure),
2867 		printf("bce: %s(%d): Simulating DMA mapping error.\n",
2868 			__FILE__, __LINE__);
2869 		error = ENOMEM);
2870 
2871 	/* Check for an error and signal the caller that an error occurred. */
2872 	if (error) {
2873 		printf("bce %s(%d): DMA mapping error! error = %d, "
2874 		    "nseg = %d\n", __FILE__, __LINE__, error, nseg);
2875 		*busaddr = 0;
2876 		return;
2877 	}
2878 
2879 	*busaddr = segs->ds_addr;
2880 	return;
2881 }
2882 
2883 
2884 /****************************************************************************/
2885 /* Allocate any DMA memory needed by the driver.                            */
2886 /*                                                                          */
2887 /* Allocates DMA memory needed for the various global structures needed by  */
2888 /* hardware.                                                                */
2889 /*                                                                          */
2890 /* Memory alignment requirements:                                           */
2891 /* +-----------------+----------+----------+----------+----------+          */
2892 /* |                 |   5706   |   5708   |   5709   |   5716   |          */
2893 /* +-----------------+----------+----------+----------+----------+          */
2894 /* |Status Block     | 8 bytes  | 8 bytes  | 16 bytes | 16 bytes |          */
2895 /* |Statistics Block | 8 bytes  | 8 bytes  | 16 bytes | 16 bytes |          */
2896 /* |RX Buffers       | 16 bytes | 16 bytes | 16 bytes | 16 bytes |          */
2897 /* |PG Buffers       |   none   |   none   |   none   |   none   |          */
2898 /* |TX Buffers       |   none   |   none   |   none   |   none   |          */
2899 /* |Chain Pages(1)   |   4KiB   |   4KiB   |   4KiB   |   4KiB   |          */
2900 /* +-----------------+----------+----------+----------+----------+          */
2901 /*                                                                          */
2902 /* (1) Must align with CPU page size (BCM_PAGE_SZIE).                       */
2903 /*                                                                          */
2904 /* Returns:                                                                 */
2905 /*   0 for success, positive value for failure.                             */
2906 /****************************************************************************/
2907 static int
2908 bce_dma_alloc(device_t dev)
2909 {
2910 	struct bce_softc *sc;
2911 	int i, error, rc = 0;
2912 	bus_size_t max_size, max_seg_size;
2913 	int max_segments;
2914 
2915 	sc = device_get_softc(dev);
2916 
2917 	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX);
2918 
2919 	/*
2920 	 * Allocate the parent bus DMA tag appropriate for PCI.
2921 	 */
2922 	if (bus_dma_tag_create(NULL,
2923 			1,
2924 			BCE_DMA_BOUNDARY,
2925 			sc->max_bus_addr,
2926 			BUS_SPACE_MAXADDR,
2927 			NULL, NULL,
2928 			MAXBSIZE,
2929 			BUS_SPACE_UNRESTRICTED,
2930 			BUS_SPACE_MAXSIZE_32BIT,
2931 			0,
2932 			NULL, NULL,
2933 			&sc->parent_tag)) {
2934 		BCE_PRINTF("%s(%d): Could not allocate parent DMA tag!\n",
2935 			__FILE__, __LINE__);
2936 		rc = ENOMEM;
2937 		goto bce_dma_alloc_exit;
2938 	}
2939 
2940 	/*
2941 	 * Create a DMA tag for the status block, allocate and clear the
2942 	 * memory, map the memory into DMA space, and fetch the physical
2943 	 * address of the block.
2944 	 */
2945 	if (bus_dma_tag_create(sc->parent_tag,
2946 	    	BCE_DMA_ALIGN,
2947 	    	BCE_DMA_BOUNDARY,
2948 	    	sc->max_bus_addr,
2949 	    	BUS_SPACE_MAXADDR,
2950 	    	NULL, NULL,
2951 	    	BCE_STATUS_BLK_SZ,
2952 	    	1,
2953 	    	BCE_STATUS_BLK_SZ,
2954 	    	0,
2955 	    	NULL, NULL,
2956 	    	&sc->status_tag)) {
2957 		BCE_PRINTF("%s(%d): Could not allocate status block DMA tag!\n",
2958 			__FILE__, __LINE__);
2959 		rc = ENOMEM;
2960 		goto bce_dma_alloc_exit;
2961 	}
2962 
2963 	if(bus_dmamem_alloc(sc->status_tag,
2964 	    	(void **)&sc->status_block,
2965 	    	BUS_DMA_NOWAIT,
2966 	    	&sc->status_map)) {
2967 		BCE_PRINTF("%s(%d): Could not allocate status block DMA memory!\n",
2968 			__FILE__, __LINE__);
2969 		rc = ENOMEM;
2970 		goto bce_dma_alloc_exit;
2971 	}
2972 
2973 	bzero((char *)sc->status_block, BCE_STATUS_BLK_SZ);
2974 
2975 	error = bus_dmamap_load(sc->status_tag,
2976 	    	sc->status_map,
2977 	    	sc->status_block,
2978 	    	BCE_STATUS_BLK_SZ,
2979 	    	bce_dma_map_addr,
2980 	    	&sc->status_block_paddr,
2981 	    	BUS_DMA_NOWAIT);
2982 
2983 	if (error) {
2984 		BCE_PRINTF("%s(%d): Could not map status block DMA memory!\n",
2985 			__FILE__, __LINE__);
2986 		rc = ENOMEM;
2987 		goto bce_dma_alloc_exit;
2988 	}
2989 
2990 	DBPRINT(sc, BCE_INFO, "%s(): status_block_paddr = 0x%jX\n",
2991 		__FUNCTION__, (uintmax_t) sc->status_block_paddr);
2992 
2993 	/*
2994 	 * Create a DMA tag for the statistics block, allocate and clear the
2995 	 * memory, map the memory into DMA space, and fetch the physical
2996 	 * address of the block.
2997 	 */
2998 	if (bus_dma_tag_create(sc->parent_tag,
2999 	    	BCE_DMA_ALIGN,
3000 	    	BCE_DMA_BOUNDARY,
3001 	    	sc->max_bus_addr,
3002 	    	BUS_SPACE_MAXADDR,
3003 	    	NULL, NULL,
3004 	    	BCE_STATS_BLK_SZ,
3005 	    	1,
3006 	    	BCE_STATS_BLK_SZ,
3007 	    	0,
3008 	    	NULL, NULL,
3009 	    	&sc->stats_tag)) {
3010 		BCE_PRINTF("%s(%d): Could not allocate statistics block DMA tag!\n",
3011 			__FILE__, __LINE__);
3012 		rc = ENOMEM;
3013 		goto bce_dma_alloc_exit;
3014 	}
3015 
3016 	if (bus_dmamem_alloc(sc->stats_tag,
3017 	    	(void **)&sc->stats_block,
3018 	    	BUS_DMA_NOWAIT,
3019 	    	&sc->stats_map)) {
3020 		BCE_PRINTF("%s(%d): Could not allocate statistics block DMA memory!\n",
3021 			__FILE__, __LINE__);
3022 		rc = ENOMEM;
3023 		goto bce_dma_alloc_exit;
3024 	}
3025 
3026 	bzero((char *)sc->stats_block, BCE_STATS_BLK_SZ);
3027 
3028 	error = bus_dmamap_load(sc->stats_tag,
3029 	    	sc->stats_map,
3030 	    	sc->stats_block,
3031 	    	BCE_STATS_BLK_SZ,
3032 	    	bce_dma_map_addr,
3033 	    	&sc->stats_block_paddr,
3034 	    	BUS_DMA_NOWAIT);
3035 
3036 	if(error) {
3037 		BCE_PRINTF("%s(%d): Could not map statistics block DMA memory!\n",
3038 			__FILE__, __LINE__);
3039 		rc = ENOMEM;
3040 		goto bce_dma_alloc_exit;
3041 	}
3042 
3043 	DBPRINT(sc, BCE_INFO, "%s(): stats_block_paddr = 0x%jX\n",
3044 		__FUNCTION__, (uintmax_t) sc->stats_block_paddr);
3045 
3046 	/* BCM5709 uses host memory as cache for context memory. */
3047 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
3048 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
3049 		sc->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
3050 		if (sc->ctx_pages == 0)
3051 			sc->ctx_pages = 1;
3052 
3053 		DBRUNIF((sc->ctx_pages > 512),
3054 			BCE_PRINTF("%s(%d): Too many CTX pages! %d > 512\n",
3055 				__FILE__, __LINE__, sc->ctx_pages));
3056 
3057 		/*
3058 		 * Create a DMA tag for the context pages,
3059 		 * allocate and clear the memory, map the
3060 		 * memory into DMA space, and fetch the
3061 		 * physical address of the block.
3062 		 */
3063 		if(bus_dma_tag_create(sc->parent_tag,
3064 			BCM_PAGE_SIZE,
3065 		    BCE_DMA_BOUNDARY,
3066 			sc->max_bus_addr,
3067 			BUS_SPACE_MAXADDR,
3068 			NULL, NULL,
3069 			BCM_PAGE_SIZE,
3070 			1,
3071 			BCM_PAGE_SIZE,
3072 			0,
3073 			NULL, NULL,
3074 			&sc->ctx_tag)) {
3075 			BCE_PRINTF("%s(%d): Could not allocate CTX DMA tag!\n",
3076 				__FILE__, __LINE__);
3077 			rc = ENOMEM;
3078 			goto bce_dma_alloc_exit;
3079 		}
3080 
3081 		for (i = 0; i < sc->ctx_pages; i++) {
3082 
3083 			if(bus_dmamem_alloc(sc->ctx_tag,
3084 		    		(void **)&sc->ctx_block[i],
3085 	    		BUS_DMA_NOWAIT,
3086 		    	&sc->ctx_map[i])) {
3087 				BCE_PRINTF("%s(%d): Could not allocate CTX "
3088 					"DMA memory!\n", __FILE__, __LINE__);
3089 				rc = ENOMEM;
3090 				goto bce_dma_alloc_exit;
3091 			}
3092 
3093 			bzero((char *)sc->ctx_block[i], BCM_PAGE_SIZE);
3094 
3095 			error = bus_dmamap_load(sc->ctx_tag,
3096 	    		sc->ctx_map[i],
3097 	    		sc->ctx_block[i],
3098 		    	BCM_PAGE_SIZE,
3099 		    	bce_dma_map_addr,
3100 	    		&sc->ctx_paddr[i],
3101 	    		BUS_DMA_NOWAIT);
3102 
3103 			if (error) {
3104 				BCE_PRINTF("%s(%d): Could not map CTX DMA memory!\n",
3105 					__FILE__, __LINE__);
3106 				rc = ENOMEM;
3107 				goto bce_dma_alloc_exit;
3108 			}
3109 
3110 			DBPRINT(sc, BCE_INFO, "%s(): ctx_paddr[%d] = 0x%jX\n",
3111 				__FUNCTION__, i, (uintmax_t) sc->ctx_paddr[i]);
3112 		}
3113 	}
3114 
3115 	/*
3116 	 * Create a DMA tag for the TX buffer descriptor chain,
3117 	 * allocate and clear the  memory, and fetch the
3118 	 * physical address of the block.
3119 	 */
3120 	if(bus_dma_tag_create(sc->parent_tag,
3121 			BCM_PAGE_SIZE,
3122 		    BCE_DMA_BOUNDARY,
3123 			sc->max_bus_addr,
3124 			BUS_SPACE_MAXADDR,
3125 			NULL, NULL,
3126 			BCE_TX_CHAIN_PAGE_SZ,
3127 			1,
3128 			BCE_TX_CHAIN_PAGE_SZ,
3129 			0,
3130 			NULL, NULL,
3131 			&sc->tx_bd_chain_tag)) {
3132 		BCE_PRINTF("%s(%d): Could not allocate TX descriptor chain DMA tag!\n",
3133 			__FILE__, __LINE__);
3134 		rc = ENOMEM;
3135 		goto bce_dma_alloc_exit;
3136 	}
3137 
3138 	for (i = 0; i < TX_PAGES; i++) {
3139 
3140 		if(bus_dmamem_alloc(sc->tx_bd_chain_tag,
3141 	    		(void **)&sc->tx_bd_chain[i],
3142 	    		BUS_DMA_NOWAIT,
3143 		    	&sc->tx_bd_chain_map[i])) {
3144 			BCE_PRINTF("%s(%d): Could not allocate TX descriptor "
3145 				"chain DMA memory!\n", __FILE__, __LINE__);
3146 			rc = ENOMEM;
3147 			goto bce_dma_alloc_exit;
3148 		}
3149 
3150 		error = bus_dmamap_load(sc->tx_bd_chain_tag,
3151 	    		sc->tx_bd_chain_map[i],
3152 	    		sc->tx_bd_chain[i],
3153 		    	BCE_TX_CHAIN_PAGE_SZ,
3154 		    	bce_dma_map_addr,
3155 	    		&sc->tx_bd_chain_paddr[i],
3156 	    		BUS_DMA_NOWAIT);
3157 
3158 		if (error) {
3159 			BCE_PRINTF("%s(%d): Could not map TX descriptor chain DMA memory!\n",
3160 				__FILE__, __LINE__);
3161 			rc = ENOMEM;
3162 			goto bce_dma_alloc_exit;
3163 		}
3164 
3165 		DBPRINT(sc, BCE_INFO, "%s(): tx_bd_chain_paddr[%d] = 0x%jX\n",
3166 			__FUNCTION__, i, (uintmax_t) sc->tx_bd_chain_paddr[i]);
3167 	}
3168 
3169 	/* Check the required size before mapping to conserve resources. */
3170 	if (bce_tso_enable) {
3171 		max_size     = BCE_TSO_MAX_SIZE;
3172 		max_segments = BCE_MAX_SEGMENTS;
3173 		max_seg_size = BCE_TSO_MAX_SEG_SIZE;
3174 	} else {
3175 		max_size     = MCLBYTES * BCE_MAX_SEGMENTS;
3176 		max_segments = BCE_MAX_SEGMENTS;
3177 		max_seg_size = MCLBYTES;
3178 	}
3179 
3180 	/* Create a DMA tag for TX mbufs. */
3181 	if (bus_dma_tag_create(sc->parent_tag,
3182 			1,
3183 			BCE_DMA_BOUNDARY,
3184 			sc->max_bus_addr,
3185 			BUS_SPACE_MAXADDR,
3186 			NULL, NULL,
3187 			max_size,
3188 			max_segments,
3189 			max_seg_size,
3190 			0,
3191 			NULL, NULL,
3192 			&sc->tx_mbuf_tag)) {
3193 		BCE_PRINTF("%s(%d): Could not allocate TX mbuf DMA tag!\n",
3194 			__FILE__, __LINE__);
3195 		rc = ENOMEM;
3196 		goto bce_dma_alloc_exit;
3197 	}
3198 
3199 	/* Create DMA maps for the TX mbufs clusters. */
3200 	for (i = 0; i < TOTAL_TX_BD; i++) {
3201 		if (bus_dmamap_create(sc->tx_mbuf_tag, BUS_DMA_NOWAIT,
3202 			&sc->tx_mbuf_map[i])) {
3203 			BCE_PRINTF("%s(%d): Unable to create TX mbuf DMA map!\n",
3204 				__FILE__, __LINE__);
3205 			rc = ENOMEM;
3206 			goto bce_dma_alloc_exit;
3207 		}
3208 	}
3209 
3210 	/*
3211 	 * Create a DMA tag for the RX buffer descriptor chain,
3212 	 * allocate and clear the memory, and fetch the physical
3213 	 * address of the blocks.
3214 	 */
3215 	if (bus_dma_tag_create(sc->parent_tag,
3216 			BCM_PAGE_SIZE,
3217 			BCE_DMA_BOUNDARY,
3218 			BUS_SPACE_MAXADDR,
3219 			sc->max_bus_addr,
3220 			NULL, NULL,
3221 			BCE_RX_CHAIN_PAGE_SZ,
3222 			1,
3223 			BCE_RX_CHAIN_PAGE_SZ,
3224 			0,
3225 			NULL, NULL,
3226 			&sc->rx_bd_chain_tag)) {
3227 		BCE_PRINTF("%s(%d): Could not allocate RX descriptor chain DMA tag!\n",
3228 			__FILE__, __LINE__);
3229 		rc = ENOMEM;
3230 		goto bce_dma_alloc_exit;
3231 	}
3232 
3233 	for (i = 0; i < RX_PAGES; i++) {
3234 
3235 		if (bus_dmamem_alloc(sc->rx_bd_chain_tag,
3236 	    		(void **)&sc->rx_bd_chain[i],
3237 	    		BUS_DMA_NOWAIT,
3238 		    	&sc->rx_bd_chain_map[i])) {
3239 			BCE_PRINTF("%s(%d): Could not allocate RX descriptor chain "
3240 				"DMA memory!\n", __FILE__, __LINE__);
3241 			rc = ENOMEM;
3242 			goto bce_dma_alloc_exit;
3243 		}
3244 
3245 		bzero((char *)sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
3246 
3247 		error = bus_dmamap_load(sc->rx_bd_chain_tag,
3248 	    		sc->rx_bd_chain_map[i],
3249 	    		sc->rx_bd_chain[i],
3250 		    	BCE_RX_CHAIN_PAGE_SZ,
3251 		    	bce_dma_map_addr,
3252 	    		&sc->rx_bd_chain_paddr[i],
3253 	    		BUS_DMA_NOWAIT);
3254 
3255 		if (error) {
3256 			BCE_PRINTF("%s(%d): Could not map RX descriptor chain DMA memory!\n",
3257 				__FILE__, __LINE__);
3258 			rc = ENOMEM;
3259 			goto bce_dma_alloc_exit;
3260 		}
3261 
3262 		DBPRINT(sc, BCE_INFO, "%s(): rx_bd_chain_paddr[%d] = 0x%jX\n",
3263 			__FUNCTION__, i, (uintmax_t) sc->rx_bd_chain_paddr[i]);
3264 	}
3265 
3266 	/*
3267 	 * Create a DMA tag for RX mbufs.
3268 	 */
3269 #ifdef ZERO_COPY_SOCKETS
3270 	max_size = max_seg_size = ((sc->rx_bd_mbuf_alloc_size < MCLBYTES) ?
3271 		MCLBYTES : sc->rx_bd_mbuf_alloc_size);
3272 #else
3273 	max_size = max_seg_size = MJUM9BYTES;
3274 #endif
3275 
3276 	if (bus_dma_tag_create(sc->parent_tag,
3277 			1,
3278 			BCE_DMA_BOUNDARY,
3279 			sc->max_bus_addr,
3280 			BUS_SPACE_MAXADDR,
3281 			NULL, NULL,
3282 			max_size,
3283 			1,
3284 			max_seg_size,
3285 			0,
3286 			NULL, NULL,
3287 	    	&sc->rx_mbuf_tag)) {
3288 		BCE_PRINTF("%s(%d): Could not allocate RX mbuf DMA tag!\n",
3289 			__FILE__, __LINE__);
3290 		rc = ENOMEM;
3291 		goto bce_dma_alloc_exit;
3292 	}
3293 
3294 	/* Create DMA maps for the RX mbuf clusters. */
3295 	for (i = 0; i < TOTAL_RX_BD; i++) {
3296 		if (bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_NOWAIT,
3297 				&sc->rx_mbuf_map[i])) {
3298 			BCE_PRINTF("%s(%d): Unable to create RX mbuf DMA map!\n",
3299 				__FILE__, __LINE__);
3300 			rc = ENOMEM;
3301 			goto bce_dma_alloc_exit;
3302 		}
3303 	}
3304 
3305 #ifdef ZERO_COPY_SOCKETS
3306 	/*
3307 	 * Create a DMA tag for the page buffer descriptor chain,
3308 	 * allocate and clear the memory, and fetch the physical
3309 	 * address of the blocks.
3310 	 */
3311 	if (bus_dma_tag_create(sc->parent_tag,
3312 			BCM_PAGE_SIZE,
3313 			BCE_DMA_BOUNDARY,
3314 			BUS_SPACE_MAXADDR,
3315 			sc->max_bus_addr,
3316 			NULL, NULL,
3317 			BCE_PG_CHAIN_PAGE_SZ,
3318 			1,
3319 			BCE_PG_CHAIN_PAGE_SZ,
3320 			0,
3321 			NULL, NULL,
3322 			&sc->pg_bd_chain_tag)) {
3323 		BCE_PRINTF("%s(%d): Could not allocate page descriptor chain DMA tag!\n",
3324 			__FILE__, __LINE__);
3325 		rc = ENOMEM;
3326 		goto bce_dma_alloc_exit;
3327 	}
3328 
3329 	for (i = 0; i < PG_PAGES; i++) {
3330 
3331 		if (bus_dmamem_alloc(sc->pg_bd_chain_tag,
3332 	    		(void **)&sc->pg_bd_chain[i],
3333 	    		BUS_DMA_NOWAIT,
3334 		    	&sc->pg_bd_chain_map[i])) {
3335 			BCE_PRINTF("%s(%d): Could not allocate page descriptor chain "
3336 				"DMA memory!\n", __FILE__, __LINE__);
3337 			rc = ENOMEM;
3338 			goto bce_dma_alloc_exit;
3339 		}
3340 
3341 		bzero((char *)sc->pg_bd_chain[i], BCE_PG_CHAIN_PAGE_SZ);
3342 
3343 		error = bus_dmamap_load(sc->pg_bd_chain_tag,
3344 	    		sc->pg_bd_chain_map[i],
3345 	    		sc->pg_bd_chain[i],
3346 		    	BCE_PG_CHAIN_PAGE_SZ,
3347 		    	bce_dma_map_addr,
3348 	    		&sc->pg_bd_chain_paddr[i],
3349 	    		BUS_DMA_NOWAIT);
3350 
3351 		if (error) {
3352 			BCE_PRINTF("%s(%d): Could not map page descriptor chain DMA memory!\n",
3353 				__FILE__, __LINE__);
3354 			rc = ENOMEM;
3355 			goto bce_dma_alloc_exit;
3356 		}
3357 
3358 		DBPRINT(sc, BCE_INFO, "%s(): pg_bd_chain_paddr[%d] = 0x%jX\n",
3359 			__FUNCTION__, i, (uintmax_t) sc->pg_bd_chain_paddr[i]);
3360 	}
3361 
3362 	/*
3363 	 * Create a DMA tag for page mbufs.
3364 	 */
3365 	max_size = max_seg_size = ((sc->pg_bd_mbuf_alloc_size < MCLBYTES) ?
3366 		MCLBYTES : sc->pg_bd_mbuf_alloc_size);
3367 
3368 	if (bus_dma_tag_create(sc->parent_tag,
3369 			1,
3370 			BCE_DMA_BOUNDARY,
3371 			sc->max_bus_addr,
3372 			BUS_SPACE_MAXADDR,
3373 			NULL, NULL,
3374 			max_size,
3375 			1,
3376 			max_seg_size,
3377 			0,
3378 			NULL, NULL,
3379 	    	&sc->pg_mbuf_tag)) {
3380 		BCE_PRINTF("%s(%d): Could not allocate page mbuf DMA tag!\n",
3381 			__FILE__, __LINE__);
3382 		rc = ENOMEM;
3383 		goto bce_dma_alloc_exit;
3384 	}
3385 
3386 	/* Create DMA maps for the page mbuf clusters. */
3387 	for (i = 0; i < TOTAL_PG_BD; i++) {
3388 		if (bus_dmamap_create(sc->pg_mbuf_tag, BUS_DMA_NOWAIT,
3389 				&sc->pg_mbuf_map[i])) {
3390 			BCE_PRINTF("%s(%d): Unable to create page mbuf DMA map!\n",
3391 				__FILE__, __LINE__);
3392 			rc = ENOMEM;
3393 			goto bce_dma_alloc_exit;
3394 		}
3395 	}
3396 #endif
3397 
3398 bce_dma_alloc_exit:
3399 	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX);
3400 	return(rc);
3401 }
3402 
3403 
3404 /****************************************************************************/
3405 /* Release all resources used by the driver.                                */
3406 /*                                                                          */
3407 /* Releases all resources acquired by the driver including interrupts,      */
3408 /* interrupt handler, interfaces, mutexes, and DMA memory.                  */
3409 /*                                                                          */
3410 /* Returns:                                                                 */
3411 /*   Nothing.                                                               */
3412 /****************************************************************************/
3413 static void
3414 bce_release_resources(struct bce_softc *sc)
3415 {
3416 	device_t dev;
3417 
3418 	DBENTER(BCE_VERBOSE_RESET);
3419 
3420 	dev = sc->bce_dev;
3421 
3422 	bce_dma_free(sc);
3423 
3424 	if (sc->bce_intrhand != NULL) {
3425 		DBPRINT(sc, BCE_INFO_RESET, "Removing interrupt handler.\n");
3426 		bus_teardown_intr(dev, sc->bce_res_irq, sc->bce_intrhand);
3427 	}
3428 
3429 	if (sc->bce_res_irq != NULL) {
3430 		DBPRINT(sc, BCE_INFO_RESET, "Releasing IRQ.\n");
3431 		bus_release_resource(dev, SYS_RES_IRQ, sc->bce_irq_rid,
3432 			sc->bce_res_irq);
3433 	}
3434 
3435 	if (sc->bce_flags & (BCE_USING_MSI_FLAG | BCE_USING_MSIX_FLAG)) {
3436 		DBPRINT(sc, BCE_INFO_RESET, "Releasing MSI/MSI-X vector.\n");
3437 		pci_release_msi(dev);
3438 	}
3439 
3440 	if (sc->bce_res_mem != NULL) {
3441 		DBPRINT(sc, BCE_INFO_RESET, "Releasing PCI memory.\n");
3442 		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), sc->bce_res_mem);
3443 	}
3444 
3445 	if (sc->bce_ifp != NULL) {
3446 		DBPRINT(sc, BCE_INFO_RESET, "Releasing IF.\n");
3447 		if_free(sc->bce_ifp);
3448 	}
3449 
3450 	if (mtx_initialized(&sc->bce_mtx))
3451 		BCE_LOCK_DESTROY(sc);
3452 
3453 	DBEXIT(BCE_VERBOSE_RESET);
3454 }
3455 
3456 
3457 /****************************************************************************/
3458 /* Firmware synchronization.                                                */
3459 /*                                                                          */
3460 /* Before performing certain events such as a chip reset, synchronize with  */
3461 /* the firmware first.                                                      */
3462 /*                                                                          */
3463 /* Returns:                                                                 */
3464 /*   0 for success, positive value for failure.                             */
3465 /****************************************************************************/
3466 static int
3467 bce_fw_sync(struct bce_softc *sc, u32 msg_data)
3468 {
3469 	int i, rc = 0;
3470 	u32 val;
3471 
3472 	DBENTER(BCE_VERBOSE_RESET);
3473 
3474 	/* Don't waste any time if we've timed out before. */
3475 	if (sc->bce_fw_timed_out) {
3476 		rc = EBUSY;
3477 		goto bce_fw_sync_exit;
3478 	}
3479 
3480 	/* Increment the message sequence number. */
3481 	sc->bce_fw_wr_seq++;
3482 	msg_data |= sc->bce_fw_wr_seq;
3483 
3484  	DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "bce_fw_sync(): msg_data = 0x%08X\n",
3485  		msg_data);
3486 
3487 	/* Send the message to the bootcode driver mailbox. */
3488 	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
3489 
3490 	/* Wait for the bootcode to acknowledge the message. */
3491 	for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
3492 		/* Check for a response in the bootcode firmware mailbox. */
3493 		val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_FW_MB);
3494 		if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ))
3495 			break;
3496 		DELAY(1000);
3497 	}
3498 
3499 	/* If we've timed out, tell the bootcode that we've stopped waiting. */
3500 	if (((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ)) &&
3501 		((msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0)) {
3502 
3503 		BCE_PRINTF("%s(%d): Firmware synchronization timeout! "
3504 			"msg_data = 0x%08X\n",
3505 			__FILE__, __LINE__, msg_data);
3506 
3507 		msg_data &= ~BCE_DRV_MSG_CODE;
3508 		msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT;
3509 
3510 		REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
3511 
3512 		sc->bce_fw_timed_out = 1;
3513 		rc = EBUSY;
3514 	}
3515 
3516 bce_fw_sync_exit:
3517 	DBEXIT(BCE_VERBOSE_RESET);
3518 	return (rc);
3519 }
3520 
3521 
3522 /****************************************************************************/
3523 /* Load Receive Virtual 2 Physical (RV2P) processor firmware.               */
3524 /*                                                                          */
3525 /* Returns:                                                                 */
3526 /*   Nothing.                                                               */
3527 /****************************************************************************/
3528 static void
3529 bce_load_rv2p_fw(struct bce_softc *sc, u32 *rv2p_code,
3530 	u32 rv2p_code_len, u32 rv2p_proc)
3531 {
3532 	int i;
3533 	u32 val;
3534 
3535 	DBENTER(BCE_VERBOSE_RESET);
3536 
3537 	/* Set the page size used by RV2P. */
3538 	if (rv2p_proc == RV2P_PROC2) {
3539 		BCE_RV2P_PROC2_CHG_MAX_BD_PAGE(USABLE_RX_BD_PER_PAGE);
3540 	}
3541 
3542 	for (i = 0; i < rv2p_code_len; i += 8) {
3543 		REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code);
3544 		rv2p_code++;
3545 		REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code);
3546 		rv2p_code++;
3547 
3548 		if (rv2p_proc == RV2P_PROC1) {
3549 			val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR;
3550 			REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
3551 		}
3552 		else {
3553 			val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR;
3554 			REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
3555 		}
3556 	}
3557 
3558 	/* Reset the processor, un-stall is done later. */
3559 	if (rv2p_proc == RV2P_PROC1) {
3560 		REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET);
3561 	}
3562 	else {
3563 		REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET);
3564 	}
3565 
3566 	DBEXIT(BCE_VERBOSE_RESET);
3567 }
3568 
3569 
3570 /****************************************************************************/
3571 /* Load RISC processor firmware.                                            */
3572 /*                                                                          */
3573 /* Loads firmware from the file if_bcefw.h into the scratchpad memory       */
3574 /* associated with a particular processor.                                  */
3575 /*                                                                          */
3576 /* Returns:                                                                 */
3577 /*   Nothing.                                                               */
3578 /****************************************************************************/
3579 static void
3580 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg,
3581 	struct fw_info *fw)
3582 {
3583 	u32 offset;
3584 	u32 val;
3585 
3586 	DBENTER(BCE_VERBOSE_RESET);
3587 
3588 	/* Halt the CPU. */
3589 	val = REG_RD_IND(sc, cpu_reg->mode);
3590 	val |= cpu_reg->mode_value_halt;
3591 	REG_WR_IND(sc, cpu_reg->mode, val);
3592 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
3593 
3594 	/* Load the Text area. */
3595 	offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3596 	if (fw->text) {
3597 		int j;
3598 
3599 		for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3600 			REG_WR_IND(sc, offset, fw->text[j]);
3601 	        }
3602 	}
3603 
3604 	/* Load the Data area. */
3605 	offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3606 	if (fw->data) {
3607 		int j;
3608 
3609 		for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3610 			REG_WR_IND(sc, offset, fw->data[j]);
3611 		}
3612 	}
3613 
3614 	/* Load the SBSS area. */
3615 	offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3616 	if (fw->sbss) {
3617 		int j;
3618 
3619 		for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3620 			REG_WR_IND(sc, offset, fw->sbss[j]);
3621 		}
3622 	}
3623 
3624 	/* Load the BSS area. */
3625 	offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3626 	if (fw->bss) {
3627 		int j;
3628 
3629 		for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3630 			REG_WR_IND(sc, offset, fw->bss[j]);
3631 		}
3632 	}
3633 
3634 	/* Load the Read-Only area. */
3635 	offset = cpu_reg->spad_base +
3636 		(fw->rodata_addr - cpu_reg->mips_view_base);
3637 	if (fw->rodata) {
3638 		int j;
3639 
3640 		for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3641 			REG_WR_IND(sc, offset, fw->rodata[j]);
3642 		}
3643 	}
3644 
3645 	/* Clear the pre-fetch instruction. */
3646 	REG_WR_IND(sc, cpu_reg->inst, 0);
3647 	REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
3648 
3649 	/* Start the CPU. */
3650 	val = REG_RD_IND(sc, cpu_reg->mode);
3651 	val &= ~cpu_reg->mode_value_halt;
3652 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
3653 	REG_WR_IND(sc, cpu_reg->mode, val);
3654 
3655 	DBEXIT(BCE_VERBOSE_RESET);
3656 }
3657 
3658 
3659 /****************************************************************************/
3660 /* Initialize the RX CPU.                                                   */
3661 /*                                                                          */
3662 /* Returns:                                                                 */
3663 /*   Nothing.                                                               */
3664 /****************************************************************************/
3665 static void
3666 bce_init_rxp_cpu(struct bce_softc *sc)
3667 {
3668 	struct cpu_reg cpu_reg;
3669 	struct fw_info fw;
3670 
3671 	DBENTER(BCE_VERBOSE_RESET);
3672 
3673 	cpu_reg.mode = BCE_RXP_CPU_MODE;
3674 	cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
3675 	cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
3676 	cpu_reg.state = BCE_RXP_CPU_STATE;
3677 	cpu_reg.state_value_clear = 0xffffff;
3678 	cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
3679 	cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
3680 	cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
3681 	cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
3682 	cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
3683 	cpu_reg.spad_base = BCE_RXP_SCRATCH;
3684 	cpu_reg.mips_view_base = 0x8000000;
3685 
3686 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
3687 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
3688  		fw.ver_major = bce_RXP_b09FwReleaseMajor;
3689 		fw.ver_minor = bce_RXP_b09FwReleaseMinor;
3690 		fw.ver_fix = bce_RXP_b09FwReleaseFix;
3691 		fw.start_addr = bce_RXP_b09FwStartAddr;
3692 
3693 		fw.text_addr = bce_RXP_b09FwTextAddr;
3694 		fw.text_len = bce_RXP_b09FwTextLen;
3695 		fw.text_index = 0;
3696 		fw.text = bce_RXP_b09FwText;
3697 
3698 		fw.data_addr = bce_RXP_b09FwDataAddr;
3699 		fw.data_len = bce_RXP_b09FwDataLen;
3700 		fw.data_index = 0;
3701 		fw.data = bce_RXP_b09FwData;
3702 
3703 		fw.sbss_addr = bce_RXP_b09FwSbssAddr;
3704 		fw.sbss_len = bce_RXP_b09FwSbssLen;
3705 		fw.sbss_index = 0;
3706 		fw.sbss = bce_RXP_b09FwSbss;
3707 
3708 		fw.bss_addr = bce_RXP_b09FwBssAddr;
3709 		fw.bss_len = bce_RXP_b09FwBssLen;
3710 		fw.bss_index = 0;
3711 		fw.bss = bce_RXP_b09FwBss;
3712 
3713 		fw.rodata_addr = bce_RXP_b09FwRodataAddr;
3714 		fw.rodata_len = bce_RXP_b09FwRodataLen;
3715 		fw.rodata_index = 0;
3716 		fw.rodata = bce_RXP_b09FwRodata;
3717 	} else {
3718 		fw.ver_major = bce_RXP_b06FwReleaseMajor;
3719 		fw.ver_minor = bce_RXP_b06FwReleaseMinor;
3720 		fw.ver_fix = bce_RXP_b06FwReleaseFix;
3721 		fw.start_addr = bce_RXP_b06FwStartAddr;
3722 
3723 		fw.text_addr = bce_RXP_b06FwTextAddr;
3724 		fw.text_len = bce_RXP_b06FwTextLen;
3725 		fw.text_index = 0;
3726 		fw.text = bce_RXP_b06FwText;
3727 
3728 		fw.data_addr = bce_RXP_b06FwDataAddr;
3729 		fw.data_len = bce_RXP_b06FwDataLen;
3730 		fw.data_index = 0;
3731 		fw.data = bce_RXP_b06FwData;
3732 
3733 		fw.sbss_addr = bce_RXP_b06FwSbssAddr;
3734 		fw.sbss_len = bce_RXP_b06FwSbssLen;
3735 		fw.sbss_index = 0;
3736 		fw.sbss = bce_RXP_b06FwSbss;
3737 
3738 		fw.bss_addr = bce_RXP_b06FwBssAddr;
3739 		fw.bss_len = bce_RXP_b06FwBssLen;
3740 		fw.bss_index = 0;
3741 		fw.bss = bce_RXP_b06FwBss;
3742 
3743 		fw.rodata_addr = bce_RXP_b06FwRodataAddr;
3744 		fw.rodata_len = bce_RXP_b06FwRodataLen;
3745 		fw.rodata_index = 0;
3746 		fw.rodata = bce_RXP_b06FwRodata;
3747 	}
3748 
3749 	DBPRINT(sc, BCE_INFO_RESET, "Loading RX firmware.\n");
3750 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
3751 
3752 	DBEXIT(BCE_VERBOSE_RESET);
3753 }
3754 
3755 
3756 /****************************************************************************/
3757 /* Initialize the TX CPU.                                                   */
3758 /*                                                                          */
3759 /* Returns:                                                                 */
3760 /*   Nothing.                                                               */
3761 /****************************************************************************/
3762 static void
3763 bce_init_txp_cpu(struct bce_softc *sc)
3764 {
3765 	struct cpu_reg cpu_reg;
3766 	struct fw_info fw;
3767 
3768 	DBENTER(BCE_VERBOSE_RESET);
3769 
3770 	cpu_reg.mode = BCE_TXP_CPU_MODE;
3771 	cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT;
3772 	cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA;
3773 	cpu_reg.state = BCE_TXP_CPU_STATE;
3774 	cpu_reg.state_value_clear = 0xffffff;
3775 	cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE;
3776 	cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK;
3777 	cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER;
3778 	cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION;
3779 	cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT;
3780 	cpu_reg.spad_base = BCE_TXP_SCRATCH;
3781 	cpu_reg.mips_view_base = 0x8000000;
3782 
3783 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
3784 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
3785 		fw.ver_major = bce_TXP_b09FwReleaseMajor;
3786 		fw.ver_minor = bce_TXP_b09FwReleaseMinor;
3787 		fw.ver_fix = bce_TXP_b09FwReleaseFix;
3788 		fw.start_addr = bce_TXP_b09FwStartAddr;
3789 
3790 		fw.text_addr = bce_TXP_b09FwTextAddr;
3791 		fw.text_len = bce_TXP_b09FwTextLen;
3792 		fw.text_index = 0;
3793 		fw.text = bce_TXP_b09FwText;
3794 
3795 		fw.data_addr = bce_TXP_b09FwDataAddr;
3796 		fw.data_len = bce_TXP_b09FwDataLen;
3797 		fw.data_index = 0;
3798 		fw.data = bce_TXP_b09FwData;
3799 
3800 		fw.sbss_addr = bce_TXP_b09FwSbssAddr;
3801 		fw.sbss_len = bce_TXP_b09FwSbssLen;
3802 		fw.sbss_index = 0;
3803 		fw.sbss = bce_TXP_b09FwSbss;
3804 
3805 		fw.bss_addr = bce_TXP_b09FwBssAddr;
3806 		fw.bss_len = bce_TXP_b09FwBssLen;
3807 		fw.bss_index = 0;
3808 		fw.bss = bce_TXP_b09FwBss;
3809 
3810 		fw.rodata_addr = bce_TXP_b09FwRodataAddr;
3811 		fw.rodata_len = bce_TXP_b09FwRodataLen;
3812 		fw.rodata_index = 0;
3813 		fw.rodata = bce_TXP_b09FwRodata;
3814 	} else {
3815 		fw.ver_major = bce_TXP_b06FwReleaseMajor;
3816 		fw.ver_minor = bce_TXP_b06FwReleaseMinor;
3817 		fw.ver_fix = bce_TXP_b06FwReleaseFix;
3818 		fw.start_addr = bce_TXP_b06FwStartAddr;
3819 
3820 		fw.text_addr = bce_TXP_b06FwTextAddr;
3821 		fw.text_len = bce_TXP_b06FwTextLen;
3822 		fw.text_index = 0;
3823 		fw.text = bce_TXP_b06FwText;
3824 
3825 		fw.data_addr = bce_TXP_b06FwDataAddr;
3826 		fw.data_len = bce_TXP_b06FwDataLen;
3827 		fw.data_index = 0;
3828 		fw.data = bce_TXP_b06FwData;
3829 
3830 		fw.sbss_addr = bce_TXP_b06FwSbssAddr;
3831 		fw.sbss_len = bce_TXP_b06FwSbssLen;
3832 		fw.sbss_index = 0;
3833 		fw.sbss = bce_TXP_b06FwSbss;
3834 
3835 		fw.bss_addr = bce_TXP_b06FwBssAddr;
3836 		fw.bss_len = bce_TXP_b06FwBssLen;
3837 		fw.bss_index = 0;
3838 		fw.bss = bce_TXP_b06FwBss;
3839 
3840 		fw.rodata_addr = bce_TXP_b06FwRodataAddr;
3841 		fw.rodata_len = bce_TXP_b06FwRodataLen;
3842 		fw.rodata_index = 0;
3843 		fw.rodata = bce_TXP_b06FwRodata;
3844 	}
3845 
3846 	DBPRINT(sc, BCE_INFO_RESET, "Loading TX firmware.\n");
3847 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
3848 
3849 	DBEXIT(BCE_VERBOSE_RESET);
3850 }
3851 
3852 
3853 /****************************************************************************/
3854 /* Initialize the TPAT CPU.                                                 */
3855 /*                                                                          */
3856 /* Returns:                                                                 */
3857 /*   Nothing.                                                               */
3858 /****************************************************************************/
3859 static void
3860 bce_init_tpat_cpu(struct bce_softc *sc)
3861 {
3862 	struct cpu_reg cpu_reg;
3863 	struct fw_info fw;
3864 
3865 	DBENTER(BCE_VERBOSE_RESET);
3866 
3867 	cpu_reg.mode = BCE_TPAT_CPU_MODE;
3868 	cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT;
3869 	cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA;
3870 	cpu_reg.state = BCE_TPAT_CPU_STATE;
3871 	cpu_reg.state_value_clear = 0xffffff;
3872 	cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE;
3873 	cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK;
3874 	cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER;
3875 	cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION;
3876 	cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT;
3877 	cpu_reg.spad_base = BCE_TPAT_SCRATCH;
3878 	cpu_reg.mips_view_base = 0x8000000;
3879 
3880 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
3881 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
3882 		fw.ver_major = bce_TPAT_b09FwReleaseMajor;
3883 		fw.ver_minor = bce_TPAT_b09FwReleaseMinor;
3884 		fw.ver_fix = bce_TPAT_b09FwReleaseFix;
3885 		fw.start_addr = bce_TPAT_b09FwStartAddr;
3886 
3887 		fw.text_addr = bce_TPAT_b09FwTextAddr;
3888 		fw.text_len = bce_TPAT_b09FwTextLen;
3889 		fw.text_index = 0;
3890 		fw.text = bce_TPAT_b09FwText;
3891 
3892 		fw.data_addr = bce_TPAT_b09FwDataAddr;
3893 		fw.data_len = bce_TPAT_b09FwDataLen;
3894 		fw.data_index = 0;
3895 		fw.data = bce_TPAT_b09FwData;
3896 
3897 		fw.sbss_addr = bce_TPAT_b09FwSbssAddr;
3898 		fw.sbss_len = bce_TPAT_b09FwSbssLen;
3899 		fw.sbss_index = 0;
3900 		fw.sbss = bce_TPAT_b09FwSbss;
3901 
3902 		fw.bss_addr = bce_TPAT_b09FwBssAddr;
3903 		fw.bss_len = bce_TPAT_b09FwBssLen;
3904 		fw.bss_index = 0;
3905 		fw.bss = bce_TPAT_b09FwBss;
3906 
3907 		fw.rodata_addr = bce_TPAT_b09FwRodataAddr;
3908 		fw.rodata_len = bce_TPAT_b09FwRodataLen;
3909 		fw.rodata_index = 0;
3910 		fw.rodata = bce_TPAT_b09FwRodata;
3911 	} else {
3912 		fw.ver_major = bce_TPAT_b06FwReleaseMajor;
3913 		fw.ver_minor = bce_TPAT_b06FwReleaseMinor;
3914 		fw.ver_fix = bce_TPAT_b06FwReleaseFix;
3915 		fw.start_addr = bce_TPAT_b06FwStartAddr;
3916 
3917 		fw.text_addr = bce_TPAT_b06FwTextAddr;
3918 		fw.text_len = bce_TPAT_b06FwTextLen;
3919 		fw.text_index = 0;
3920 		fw.text = bce_TPAT_b06FwText;
3921 
3922 		fw.data_addr = bce_TPAT_b06FwDataAddr;
3923 		fw.data_len = bce_TPAT_b06FwDataLen;
3924 		fw.data_index = 0;
3925 		fw.data = bce_TPAT_b06FwData;
3926 
3927 		fw.sbss_addr = bce_TPAT_b06FwSbssAddr;
3928 		fw.sbss_len = bce_TPAT_b06FwSbssLen;
3929 		fw.sbss_index = 0;
3930 		fw.sbss = bce_TPAT_b06FwSbss;
3931 
3932 		fw.bss_addr = bce_TPAT_b06FwBssAddr;
3933 		fw.bss_len = bce_TPAT_b06FwBssLen;
3934 		fw.bss_index = 0;
3935 		fw.bss = bce_TPAT_b06FwBss;
3936 
3937 		fw.rodata_addr = bce_TPAT_b06FwRodataAddr;
3938 		fw.rodata_len = bce_TPAT_b06FwRodataLen;
3939 		fw.rodata_index = 0;
3940 		fw.rodata = bce_TPAT_b06FwRodata;
3941 	}
3942 
3943 	DBPRINT(sc, BCE_INFO_RESET, "Loading TPAT firmware.\n");
3944 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
3945 
3946 	DBEXIT(BCE_VERBOSE_RESET);
3947 }
3948 
3949 
3950 /****************************************************************************/
3951 /* Initialize the CP CPU.                                                   */
3952 /*                                                                          */
3953 /* Returns:                                                                 */
3954 /*   Nothing.                                                               */
3955 /****************************************************************************/
3956 static void
3957 bce_init_cp_cpu(struct bce_softc *sc)
3958 {
3959 	struct cpu_reg cpu_reg;
3960 	struct fw_info fw;
3961 
3962 	DBENTER(BCE_VERBOSE_RESET);
3963 
3964 	cpu_reg.mode = BCE_CP_CPU_MODE;
3965 	cpu_reg.mode_value_halt = BCE_CP_CPU_MODE_SOFT_HALT;
3966 	cpu_reg.mode_value_sstep = BCE_CP_CPU_MODE_STEP_ENA;
3967 	cpu_reg.state = BCE_CP_CPU_STATE;
3968 	cpu_reg.state_value_clear = 0xffffff;
3969 	cpu_reg.gpr0 = BCE_CP_CPU_REG_FILE;
3970 	cpu_reg.evmask = BCE_CP_CPU_EVENT_MASK;
3971 	cpu_reg.pc = BCE_CP_CPU_PROGRAM_COUNTER;
3972 	cpu_reg.inst = BCE_CP_CPU_INSTRUCTION;
3973 	cpu_reg.bp = BCE_CP_CPU_HW_BREAKPOINT;
3974 	cpu_reg.spad_base = BCE_CP_SCRATCH;
3975 	cpu_reg.mips_view_base = 0x8000000;
3976 
3977 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
3978 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
3979 		fw.ver_major = bce_CP_b09FwReleaseMajor;
3980 		fw.ver_minor = bce_CP_b09FwReleaseMinor;
3981 		fw.ver_fix = bce_CP_b09FwReleaseFix;
3982 		fw.start_addr = bce_CP_b09FwStartAddr;
3983 
3984 		fw.text_addr = bce_CP_b09FwTextAddr;
3985 		fw.text_len = bce_CP_b09FwTextLen;
3986 		fw.text_index = 0;
3987 		fw.text = bce_CP_b09FwText;
3988 
3989 		fw.data_addr = bce_CP_b09FwDataAddr;
3990 		fw.data_len = bce_CP_b09FwDataLen;
3991 		fw.data_index = 0;
3992 		fw.data = bce_CP_b09FwData;
3993 
3994 		fw.sbss_addr = bce_CP_b09FwSbssAddr;
3995 		fw.sbss_len = bce_CP_b09FwSbssLen;
3996 		fw.sbss_index = 0;
3997 		fw.sbss = bce_CP_b09FwSbss;
3998 
3999 		fw.bss_addr = bce_CP_b09FwBssAddr;
4000 		fw.bss_len = bce_CP_b09FwBssLen;
4001 		fw.bss_index = 0;
4002 		fw.bss = bce_CP_b09FwBss;
4003 
4004 		fw.rodata_addr = bce_CP_b09FwRodataAddr;
4005 		fw.rodata_len = bce_CP_b09FwRodataLen;
4006 		fw.rodata_index = 0;
4007 		fw.rodata = bce_CP_b09FwRodata;
4008 	} else {
4009 		fw.ver_major = bce_CP_b06FwReleaseMajor;
4010 		fw.ver_minor = bce_CP_b06FwReleaseMinor;
4011 		fw.ver_fix = bce_CP_b06FwReleaseFix;
4012 		fw.start_addr = bce_CP_b06FwStartAddr;
4013 
4014 		fw.text_addr = bce_CP_b06FwTextAddr;
4015 		fw.text_len = bce_CP_b06FwTextLen;
4016 		fw.text_index = 0;
4017 		fw.text = bce_CP_b06FwText;
4018 
4019 		fw.data_addr = bce_CP_b06FwDataAddr;
4020 		fw.data_len = bce_CP_b06FwDataLen;
4021 		fw.data_index = 0;
4022 		fw.data = bce_CP_b06FwData;
4023 
4024 		fw.sbss_addr = bce_CP_b06FwSbssAddr;
4025 		fw.sbss_len = bce_CP_b06FwSbssLen;
4026 		fw.sbss_index = 0;
4027 		fw.sbss = bce_CP_b06FwSbss;
4028 
4029 		fw.bss_addr = bce_CP_b06FwBssAddr;
4030 		fw.bss_len = bce_CP_b06FwBssLen;
4031 		fw.bss_index = 0;
4032 		fw.bss = bce_CP_b06FwBss;
4033 
4034 		fw.rodata_addr = bce_CP_b06FwRodataAddr;
4035 		fw.rodata_len = bce_CP_b06FwRodataLen;
4036 		fw.rodata_index = 0;
4037 		fw.rodata = bce_CP_b06FwRodata;
4038 	}
4039 
4040 	DBPRINT(sc, BCE_INFO_RESET, "Loading CP firmware.\n");
4041 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
4042 
4043 	DBEXIT(BCE_VERBOSE_RESET);
4044 }
4045 
4046 
4047 /****************************************************************************/
4048 /* Initialize the COM CPU.                                                 */
4049 /*                                                                          */
4050 /* Returns:                                                                 */
4051 /*   Nothing.                                                               */
4052 /****************************************************************************/
4053 static void
4054 bce_init_com_cpu(struct bce_softc *sc)
4055 {
4056 	struct cpu_reg cpu_reg;
4057 	struct fw_info fw;
4058 
4059 	DBENTER(BCE_VERBOSE_RESET);
4060 
4061 	cpu_reg.mode = BCE_COM_CPU_MODE;
4062 	cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT;
4063 	cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA;
4064 	cpu_reg.state = BCE_COM_CPU_STATE;
4065 	cpu_reg.state_value_clear = 0xffffff;
4066 	cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE;
4067 	cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK;
4068 	cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER;
4069 	cpu_reg.inst = BCE_COM_CPU_INSTRUCTION;
4070 	cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT;
4071 	cpu_reg.spad_base = BCE_COM_SCRATCH;
4072 	cpu_reg.mips_view_base = 0x8000000;
4073 
4074 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4075 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4076 		fw.ver_major = bce_COM_b09FwReleaseMajor;
4077 		fw.ver_minor = bce_COM_b09FwReleaseMinor;
4078 		fw.ver_fix = bce_COM_b09FwReleaseFix;
4079 		fw.start_addr = bce_COM_b09FwStartAddr;
4080 
4081 		fw.text_addr = bce_COM_b09FwTextAddr;
4082 		fw.text_len = bce_COM_b09FwTextLen;
4083 		fw.text_index = 0;
4084 		fw.text = bce_COM_b09FwText;
4085 
4086 		fw.data_addr = bce_COM_b09FwDataAddr;
4087 		fw.data_len = bce_COM_b09FwDataLen;
4088 		fw.data_index = 0;
4089 		fw.data = bce_COM_b09FwData;
4090 
4091 		fw.sbss_addr = bce_COM_b09FwSbssAddr;
4092 		fw.sbss_len = bce_COM_b09FwSbssLen;
4093 		fw.sbss_index = 0;
4094 		fw.sbss = bce_COM_b09FwSbss;
4095 
4096 		fw.bss_addr = bce_COM_b09FwBssAddr;
4097 		fw.bss_len = bce_COM_b09FwBssLen;
4098 		fw.bss_index = 0;
4099 		fw.bss = bce_COM_b09FwBss;
4100 
4101 		fw.rodata_addr = bce_COM_b09FwRodataAddr;
4102 		fw.rodata_len = bce_COM_b09FwRodataLen;
4103 		fw.rodata_index = 0;
4104 		fw.rodata = bce_COM_b09FwRodata;
4105 	} else {
4106 		fw.ver_major = bce_COM_b06FwReleaseMajor;
4107 		fw.ver_minor = bce_COM_b06FwReleaseMinor;
4108 		fw.ver_fix = bce_COM_b06FwReleaseFix;
4109 		fw.start_addr = bce_COM_b06FwStartAddr;
4110 
4111 		fw.text_addr = bce_COM_b06FwTextAddr;
4112 		fw.text_len = bce_COM_b06FwTextLen;
4113 		fw.text_index = 0;
4114 		fw.text = bce_COM_b06FwText;
4115 
4116 		fw.data_addr = bce_COM_b06FwDataAddr;
4117 		fw.data_len = bce_COM_b06FwDataLen;
4118 		fw.data_index = 0;
4119 		fw.data = bce_COM_b06FwData;
4120 
4121 		fw.sbss_addr = bce_COM_b06FwSbssAddr;
4122 		fw.sbss_len = bce_COM_b06FwSbssLen;
4123 		fw.sbss_index = 0;
4124 		fw.sbss = bce_COM_b06FwSbss;
4125 
4126 		fw.bss_addr = bce_COM_b06FwBssAddr;
4127 		fw.bss_len = bce_COM_b06FwBssLen;
4128 		fw.bss_index = 0;
4129 		fw.bss = bce_COM_b06FwBss;
4130 
4131 		fw.rodata_addr = bce_COM_b06FwRodataAddr;
4132 		fw.rodata_len = bce_COM_b06FwRodataLen;
4133 		fw.rodata_index = 0;
4134 		fw.rodata = bce_COM_b06FwRodata;
4135 	}
4136 
4137 	DBPRINT(sc, BCE_INFO_RESET, "Loading COM firmware.\n");
4138 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
4139 
4140 	DBEXIT(BCE_VERBOSE_RESET);
4141 }
4142 
4143 
4144 /****************************************************************************/
4145 /* Initialize the RV2P, RX, TX, TPAT, COM, and CP CPUs.                     */
4146 /*                                                                          */
4147 /* Loads the firmware for each CPU and starts the CPU.                      */
4148 /*                                                                          */
4149 /* Returns:                                                                 */
4150 /*   Nothing.                                                               */
4151 /****************************************************************************/
4152 static void
4153 bce_init_cpus(struct bce_softc *sc)
4154 {
4155 	DBENTER(BCE_VERBOSE_RESET);
4156 
4157 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4158 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4159 		bce_load_rv2p_fw(sc, bce_xi_rv2p_proc1, sizeof(bce_xi_rv2p_proc1),
4160 			RV2P_PROC1);
4161 		bce_load_rv2p_fw(sc, bce_xi_rv2p_proc2, sizeof(bce_xi_rv2p_proc2),
4162 			RV2P_PROC2);
4163 	} else {
4164 		bce_load_rv2p_fw(sc, bce_rv2p_proc1, sizeof(bce_rv2p_proc1),
4165 			RV2P_PROC1);
4166 		bce_load_rv2p_fw(sc, bce_rv2p_proc2, sizeof(bce_rv2p_proc2),
4167 			RV2P_PROC2);
4168 	}
4169 
4170 	bce_init_rxp_cpu(sc);
4171 	bce_init_txp_cpu(sc);
4172 	bce_init_tpat_cpu(sc);
4173 	bce_init_com_cpu(sc);
4174 	bce_init_cp_cpu(sc);
4175 
4176 	DBEXIT(BCE_VERBOSE_RESET);
4177 }
4178 
4179 
4180 /****************************************************************************/
4181 /* Initialize context memory.                                               */
4182 /*                                                                          */
4183 /* Clears the memory associated with each Context ID (CID).                 */
4184 /*                                                                          */
4185 /* Returns:                                                                 */
4186 /*   Nothing.                                                               */
4187 /****************************************************************************/
4188 static void
4189 bce_init_ctx(struct bce_softc *sc)
4190 {
4191 
4192 	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX);
4193 
4194 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4195 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4196 		/* DRC: Replace this constant value with a #define. */
4197 		int i, retry_cnt = 10;
4198 		u32 val;
4199 
4200 		DBPRINT(sc, BCE_INFO_CTX, "Initializing 5709 context.\n");
4201 
4202 		/*
4203 		 * BCM5709 context memory may be cached
4204 		 * in host memory so prepare the host memory
4205 		 * for access.
4206 		 */
4207 		val = BCE_CTX_COMMAND_ENABLED | BCE_CTX_COMMAND_MEM_INIT | (1 << 12);
4208 		val |= (BCM_PAGE_BITS - 8) << 16;
4209 		REG_WR(sc, BCE_CTX_COMMAND, val);
4210 
4211 		/* Wait for mem init command to complete. */
4212 		for (i = 0; i < retry_cnt; i++) {
4213 			val = REG_RD(sc, BCE_CTX_COMMAND);
4214 			if (!(val & BCE_CTX_COMMAND_MEM_INIT))
4215 				break;
4216 			DELAY(2);
4217 		}
4218 
4219 		/* ToDo: Consider returning an error here. */
4220 		DBRUNIF((val & BCE_CTX_COMMAND_MEM_INIT),
4221 			BCE_PRINTF("%s(): Context memory initialization failed!\n",
4222 			__FUNCTION__));
4223 
4224 		for (i = 0; i < sc->ctx_pages; i++) {
4225 			int j;
4226 
4227 			/* Set the physical address of the context memory cache. */
4228 			REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA0,
4229 				BCE_ADDR_LO(sc->ctx_paddr[i] & 0xfffffff0) |
4230 				BCE_CTX_HOST_PAGE_TBL_DATA0_VALID);
4231 			REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA1,
4232 				BCE_ADDR_HI(sc->ctx_paddr[i]));
4233 			REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_CTRL, i |
4234 				BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
4235 
4236 			/* Verify that the context memory write was successful. */
4237 			for (j = 0; j < retry_cnt; j++) {
4238 				val = REG_RD(sc, BCE_CTX_HOST_PAGE_TBL_CTRL);
4239 				if ((val & BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0)
4240 					break;
4241 				DELAY(5);
4242 			}
4243 
4244 			/* ToDo: Consider returning an error here. */
4245 			DBRUNIF((val & BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ),
4246 				BCE_PRINTF("%s(): Failed to initialize context page %d!\n",
4247 				__FUNCTION__, i));
4248 		}
4249 	} else {
4250 		u32 vcid_addr, offset;
4251 
4252 		DBPRINT(sc, BCE_INFO, "Initializing 5706/5708 context.\n");
4253 
4254 		/*
4255 		 * For the 5706/5708, context memory is local to
4256 		 * the controller, so initialize the controller
4257 		 * context memory.
4258 		 */
4259 
4260 		vcid_addr = GET_CID_ADDR(96);
4261 		while (vcid_addr) {
4262 
4263 			vcid_addr -= PHY_CTX_SIZE;
4264 
4265 			REG_WR(sc, BCE_CTX_VIRT_ADDR, 0);
4266 			REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr);
4267 
4268             for(offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
4269                 CTX_WR(sc, 0x00, offset, 0);
4270             }
4271 
4272 			REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr);
4273 			REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr);
4274 		}
4275 
4276 	}
4277 	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX);
4278 }
4279 
4280 
4281 /****************************************************************************/
4282 /* Fetch the permanent MAC address of the controller.                       */
4283 /*                                                                          */
4284 /* Returns:                                                                 */
4285 /*   Nothing.                                                               */
4286 /****************************************************************************/
4287 static void
4288 bce_get_mac_addr(struct bce_softc *sc)
4289 {
4290 	u32 mac_lo = 0, mac_hi = 0;
4291 
4292 	DBENTER(BCE_VERBOSE_RESET);
4293 	/*
4294 	 * The NetXtreme II bootcode populates various NIC
4295 	 * power-on and runtime configuration items in a
4296 	 * shared memory area.  The factory configured MAC
4297 	 * address is available from both NVRAM and the
4298 	 * shared memory area so we'll read the value from
4299 	 * shared memory for speed.
4300 	 */
4301 
4302 	mac_hi = REG_RD_IND(sc, sc->bce_shmem_base +
4303 		BCE_PORT_HW_CFG_MAC_UPPER);
4304 	mac_lo = REG_RD_IND(sc, sc->bce_shmem_base +
4305 		BCE_PORT_HW_CFG_MAC_LOWER);
4306 
4307 	if ((mac_lo == 0) && (mac_hi == 0)) {
4308 		BCE_PRINTF("%s(%d): Invalid Ethernet address!\n",
4309 			__FILE__, __LINE__);
4310 	} else {
4311 		sc->eaddr[0] = (u_char)(mac_hi >> 8);
4312 		sc->eaddr[1] = (u_char)(mac_hi >> 0);
4313 		sc->eaddr[2] = (u_char)(mac_lo >> 24);
4314 		sc->eaddr[3] = (u_char)(mac_lo >> 16);
4315 		sc->eaddr[4] = (u_char)(mac_lo >> 8);
4316 		sc->eaddr[5] = (u_char)(mac_lo >> 0);
4317 	}
4318 
4319 	DBPRINT(sc, BCE_INFO_MISC, "Permanent Ethernet address = %6D\n", sc->eaddr, ":");
4320 	DBEXIT(BCE_VERBOSE_RESET);
4321 }
4322 
4323 
4324 /****************************************************************************/
4325 /* Program the MAC address.                                                 */
4326 /*                                                                          */
4327 /* Returns:                                                                 */
4328 /*   Nothing.                                                               */
4329 /****************************************************************************/
4330 static void
4331 bce_set_mac_addr(struct bce_softc *sc)
4332 {
4333 	u32 val;
4334 	u8 *mac_addr = sc->eaddr;
4335 
4336 	/* ToDo: Add support for setting multiple MAC addresses. */
4337 
4338 	DBENTER(BCE_VERBOSE_RESET);
4339 	DBPRINT(sc, BCE_INFO_MISC, "Setting Ethernet address = %6D\n", sc->eaddr, ":");
4340 
4341 	val = (mac_addr[0] << 8) | mac_addr[1];
4342 
4343 	REG_WR(sc, BCE_EMAC_MAC_MATCH0, val);
4344 
4345 	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
4346 		(mac_addr[4] << 8) | mac_addr[5];
4347 
4348 	REG_WR(sc, BCE_EMAC_MAC_MATCH1, val);
4349 
4350 	DBEXIT(BCE_VERBOSE_RESET);
4351 }
4352 
4353 
4354 /****************************************************************************/
4355 /* Stop the controller.                                                     */
4356 /*                                                                          */
4357 /* Returns:                                                                 */
4358 /*   Nothing.                                                               */
4359 /****************************************************************************/
4360 static void
4361 bce_stop(struct bce_softc *sc)
4362 {
4363 	struct ifnet *ifp;
4364 	struct ifmedia_entry *ifm;
4365 	struct mii_data *mii = NULL;
4366 	int mtmp, itmp;
4367 
4368 	DBENTER(BCE_VERBOSE_RESET);
4369 
4370 	BCE_LOCK_ASSERT(sc);
4371 
4372 	ifp = sc->bce_ifp;
4373 
4374 	mii = device_get_softc(sc->bce_miibus);
4375 
4376 	callout_stop(&sc->bce_tick_callout);
4377 
4378 	/* Disable the transmit/receive blocks. */
4379 	REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, BCE_MISC_ENABLE_CLR_DEFAULT);
4380 	REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
4381 	DELAY(20);
4382 
4383 	bce_disable_intr(sc);
4384 
4385 	/* Free RX buffers. */
4386 #ifdef ZERO_COPY_SOCKETS
4387 	bce_free_pg_chain(sc);
4388 #endif
4389 	bce_free_rx_chain(sc);
4390 
4391 	/* Free TX buffers. */
4392 	bce_free_tx_chain(sc);
4393 
4394 	/*
4395 	 * Isolate/power down the PHY, but leave the media selection
4396 	 * unchanged so that things will be put back to normal when
4397 	 * we bring the interface back up.
4398 	 */
4399 
4400 	itmp = ifp->if_flags;
4401 	ifp->if_flags |= IFF_UP;
4402 
4403 	/* If we are called from bce_detach(), mii is already NULL. */
4404 	if (mii != NULL) {
4405 		ifm = mii->mii_media.ifm_cur;
4406 		mtmp = ifm->ifm_media;
4407 		ifm->ifm_media = IFM_ETHER | IFM_NONE;
4408 		mii_mediachg(mii);
4409 		ifm->ifm_media = mtmp;
4410 	}
4411 
4412 	ifp->if_flags = itmp;
4413 	sc->watchdog_timer = 0;
4414 
4415 	sc->bce_link = 0;
4416 
4417 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
4418 
4419 	DBEXIT(BCE_VERBOSE_RESET);
4420 }
4421 
4422 
4423 static int
4424 bce_reset(struct bce_softc *sc, u32 reset_code)
4425 {
4426 	u32 val;
4427 	int i, rc = 0;
4428 
4429 	DBENTER(BCE_VERBOSE_RESET);
4430 
4431 	DBPRINT(sc, BCE_VERBOSE_RESET, "%s(): reset_code = 0x%08X\n",
4432 		__FUNCTION__, reset_code);
4433 
4434 	/* Wait for pending PCI transactions to complete. */
4435 	REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS,
4436 	       BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4437 	       BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4438 	       BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4439 	       BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4440 	val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
4441 	DELAY(5);
4442 
4443 	/* Disable DMA */
4444 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4445 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4446 		val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL);
4447 		val &= ~BCE_MISC_NEW_CORE_CTL_DMA_ENABLE;
4448 		REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val);
4449 	}
4450 
4451 	/* Assume bootcode is running. */
4452 	sc->bce_fw_timed_out = 0;
4453 
4454 	/* Give the firmware a chance to prepare for the reset. */
4455 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code);
4456 	if (rc)
4457 		goto bce_reset_exit;
4458 
4459 	/* Set a firmware reminder that this is a soft reset. */
4460 	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_RESET_SIGNATURE,
4461 		   BCE_DRV_RESET_SIGNATURE_MAGIC);
4462 
4463 	/* Dummy read to force the chip to complete all current transactions. */
4464 	val = REG_RD(sc, BCE_MISC_ID);
4465 
4466 	/* Chip reset. */
4467 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4468 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4469 		REG_WR(sc, BCE_MISC_COMMAND, BCE_MISC_COMMAND_SW_RESET);
4470 		REG_RD(sc, BCE_MISC_COMMAND);
4471 		DELAY(5);
4472 
4473 		val = BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4474 		      BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4475 
4476 		pci_write_config(sc->bce_dev, BCE_PCICFG_MISC_CONFIG, val, 4);
4477 	} else {
4478 		val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4479 			BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4480 			BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4481 		REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val);
4482 
4483 		/* Allow up to 30us for reset to complete. */
4484 		for (i = 0; i < 10; i++) {
4485 			val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG);
4486 			if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4487 				BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
4488 				break;
4489 			}
4490 			DELAY(10);
4491 		}
4492 
4493 		/* Check that reset completed successfully. */
4494 		if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4495 			BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4496 			BCE_PRINTF("%s(%d): Reset failed!\n",
4497 				__FILE__, __LINE__);
4498 			rc = EBUSY;
4499 			goto bce_reset_exit;
4500 		}
4501 	}
4502 
4503 	/* Make sure byte swapping is properly configured. */
4504 	val = REG_RD(sc, BCE_PCI_SWAP_DIAG0);
4505 	if (val != 0x01020304) {
4506 		BCE_PRINTF("%s(%d): Byte swap is incorrect!\n",
4507 			__FILE__, __LINE__);
4508 		rc = ENODEV;
4509 		goto bce_reset_exit;
4510 	}
4511 
4512 	/* Just completed a reset, assume that firmware is running again. */
4513 	sc->bce_fw_timed_out = 0;
4514 
4515 	/* Wait for the firmware to finish its initialization. */
4516 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code);
4517 	if (rc)
4518 		BCE_PRINTF("%s(%d): Firmware did not complete initialization!\n",
4519 			__FILE__, __LINE__);
4520 
4521 bce_reset_exit:
4522 	DBEXIT(BCE_VERBOSE_RESET);
4523 	return (rc);
4524 }
4525 
4526 
4527 static int
4528 bce_chipinit(struct bce_softc *sc)
4529 {
4530 	u32 val;
4531 	int rc = 0;
4532 
4533 	DBENTER(BCE_VERBOSE_RESET);
4534 
4535 	bce_disable_intr(sc);
4536 
4537 	/*
4538 	 * Initialize DMA byte/word swapping, configure the number of DMA
4539 	 * channels and PCI clock compensation delay.
4540 	 */
4541 	val = BCE_DMA_CONFIG_DATA_BYTE_SWAP |
4542 	      BCE_DMA_CONFIG_DATA_WORD_SWAP |
4543 #if BYTE_ORDER == BIG_ENDIAN
4544 	      BCE_DMA_CONFIG_CNTL_BYTE_SWAP |
4545 #endif
4546 	      BCE_DMA_CONFIG_CNTL_WORD_SWAP |
4547 	      DMA_READ_CHANS << 12 |
4548 	      DMA_WRITE_CHANS << 16;
4549 
4550 	val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY;
4551 
4552 	if ((sc->bce_flags & BCE_PCIX_FLAG) && (sc->bus_speed_mhz == 133))
4553 		val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP;
4554 
4555 	/*
4556 	 * This setting resolves a problem observed on certain Intel PCI
4557 	 * chipsets that cannot handle multiple outstanding DMA operations.
4558 	 * See errata E9_5706A1_65.
4559 	 */
4560 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
4561 	    (BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0) &&
4562 	    !(sc->bce_flags & BCE_PCIX_FLAG))
4563 		val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA;
4564 
4565 	REG_WR(sc, BCE_DMA_CONFIG, val);
4566 
4567 	/* Enable the RX_V2P and Context state machines before access. */
4568 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
4569 	       BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4570 	       BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4571 	       BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4572 
4573 	/* Initialize context mapping and zero out the quick contexts. */
4574 	bce_init_ctx(sc);
4575 
4576 	/* Initialize the on-boards CPUs */
4577 	bce_init_cpus(sc);
4578 
4579 	/* Prepare NVRAM for access. */
4580 	if (bce_init_nvram(sc)) {
4581 		rc = ENODEV;
4582 		goto bce_chipinit_exit;
4583 	}
4584 
4585 	/* Set the kernel bypass block size */
4586 	val = REG_RD(sc, BCE_MQ_CONFIG);
4587 	val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4588 	val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4589 
4590 	/* Enable bins used on the 5709. */
4591 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4592 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4593 		val |= BCE_MQ_CONFIG_BIN_MQ_MODE;
4594 		if (BCE_CHIP_ID(sc) == BCE_CHIP_ID_5709_A1)
4595 			val |= BCE_MQ_CONFIG_HALT_DIS;
4596 	}
4597 
4598 	REG_WR(sc, BCE_MQ_CONFIG, val);
4599 
4600 	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4601 	REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val);
4602 	REG_WR(sc, BCE_MQ_KNL_WIND_END, val);
4603 
4604 	/* Set the page size and clear the RV2P processor stall bits. */
4605 	val = (BCM_PAGE_BITS - 8) << 24;
4606 	REG_WR(sc, BCE_RV2P_CONFIG, val);
4607 
4608 	/* Configure page size. */
4609 	val = REG_RD(sc, BCE_TBDR_CONFIG);
4610 	val &= ~BCE_TBDR_CONFIG_PAGE_SIZE;
4611 	val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4612 	REG_WR(sc, BCE_TBDR_CONFIG, val);
4613 
4614 	/* Set the perfect match control register to default. */
4615 	REG_WR_IND(sc, BCE_RXP_PM_CTRL, 0);
4616 
4617 bce_chipinit_exit:
4618 	DBEXIT(BCE_VERBOSE_RESET);
4619 
4620 	return(rc);
4621 }
4622 
4623 
4624 /****************************************************************************/
4625 /* Initialize the controller in preparation to send/receive traffic.        */
4626 /*                                                                          */
4627 /* Returns:                                                                 */
4628 /*   0 for success, positive value for failure.                             */
4629 /****************************************************************************/
4630 static int
4631 bce_blockinit(struct bce_softc *sc)
4632 {
4633 	u32 reg, val;
4634 	int rc = 0;
4635 
4636 	DBENTER(BCE_VERBOSE_RESET);
4637 
4638 	/* Load the hardware default MAC address. */
4639 	bce_set_mac_addr(sc);
4640 
4641 	/* Set the Ethernet backoff seed value */
4642 	val = sc->eaddr[0]         + (sc->eaddr[1] << 8) +
4643 	      (sc->eaddr[2] << 16) + (sc->eaddr[3]     ) +
4644 	      (sc->eaddr[4] << 8)  + (sc->eaddr[5] << 16);
4645 	REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val);
4646 
4647 	sc->last_status_idx = 0;
4648 	sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE;
4649 
4650 	/* Set up link change interrupt generation. */
4651 	REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK);
4652 
4653 	/* Program the physical address of the status block. */
4654 	REG_WR(sc, BCE_HC_STATUS_ADDR_L,
4655 		BCE_ADDR_LO(sc->status_block_paddr));
4656 	REG_WR(sc, BCE_HC_STATUS_ADDR_H,
4657 		BCE_ADDR_HI(sc->status_block_paddr));
4658 
4659 	/* Program the physical address of the statistics block. */
4660 	REG_WR(sc, BCE_HC_STATISTICS_ADDR_L,
4661 		BCE_ADDR_LO(sc->stats_block_paddr));
4662 	REG_WR(sc, BCE_HC_STATISTICS_ADDR_H,
4663 		BCE_ADDR_HI(sc->stats_block_paddr));
4664 
4665 	/* Program various host coalescing parameters. */
4666 	REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4667 		(sc->bce_tx_quick_cons_trip_int << 16) | sc->bce_tx_quick_cons_trip);
4668 	REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4669 		(sc->bce_rx_quick_cons_trip_int << 16) | sc->bce_rx_quick_cons_trip);
4670 	REG_WR(sc, BCE_HC_COMP_PROD_TRIP,
4671 		(sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip);
4672 	REG_WR(sc, BCE_HC_TX_TICKS,
4673 		(sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
4674 	REG_WR(sc, BCE_HC_RX_TICKS,
4675 		(sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
4676 	REG_WR(sc, BCE_HC_COM_TICKS,
4677 		(sc->bce_com_ticks_int << 16) | sc->bce_com_ticks);
4678 	REG_WR(sc, BCE_HC_CMD_TICKS,
4679 		(sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks);
4680 	REG_WR(sc, BCE_HC_STATS_TICKS,
4681 		(sc->bce_stats_ticks & 0xffff00));
4682 	REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4683 
4684 	/* Configure the Host Coalescing block. */
4685 	val = BCE_HC_CONFIG_RX_TMR_MODE | BCE_HC_CONFIG_TX_TMR_MODE |
4686 		      BCE_HC_CONFIG_COLLECT_STATS;
4687 
4688 #if 0
4689 	/* ToDo: Add MSI-X support. */
4690 	if (sc->bce_flags & BCE_USING_MSIX_FLAG) {
4691 		u32 base = ((BCE_TX_VEC - 1) * BCE_HC_SB_CONFIG_SIZE) +
4692 			   BCE_HC_SB_CONFIG_1;
4693 
4694 		REG_WR(sc, BCE_HC_MSIX_BIT_VECTOR, BCE_HC_MSIX_BIT_VECTOR_VAL);
4695 
4696 		REG_WR(sc, base, BCE_HC_SB_CONFIG_1_TX_TMR_MODE |
4697 			BCE_HC_SB_CONFIG_1_ONE_SHOT);
4698 
4699 		REG_WR(sc, base + BCE_HC_TX_QUICK_CONS_TRIP_OFF,
4700 			(sc->tx_quick_cons_trip_int << 16) |
4701 			 sc->tx_quick_cons_trip);
4702 
4703 		REG_WR(sc, base + BCE_HC_TX_TICKS_OFF,
4704 			(sc->tx_ticks_int << 16) | sc->tx_ticks);
4705 
4706 		val |= BCE_HC_CONFIG_SB_ADDR_INC_128B;
4707 	}
4708 
4709 	/*
4710 	 * Tell the HC block to automatically set the
4711 	 * INT_MASK bit after an MSI/MSI-X interrupt
4712 	 * is generated so the driver doesn't have to.
4713 	 */
4714 	if (sc->bce_flags & BCE_ONE_SHOT_MSI_FLAG)
4715 		val |= BCE_HC_CONFIG_ONE_SHOT;
4716 
4717 	/* Set the MSI-X status blocks to 128 byte boundaries. */
4718 	if (sc->bce_flags & BCE_USING_MSIX_FLAG)
4719 		val |= BCE_HC_CONFIG_SB_ADDR_INC_128B;
4720 #endif
4721 
4722 	REG_WR(sc, BCE_HC_CONFIG, val);
4723 
4724 	/* Clear the internal statistics counters. */
4725 	REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
4726 
4727 	/* Verify that bootcode is running. */
4728 	reg = REG_RD_IND(sc, sc->bce_shmem_base + BCE_DEV_INFO_SIGNATURE);
4729 
4730 	DBRUNIF(DB_RANDOMTRUE(bce_debug_bootcode_running_failure),
4731 		BCE_PRINTF("%s(%d): Simulating bootcode failure.\n",
4732 			__FILE__, __LINE__);
4733 		reg = 0);
4734 
4735 	if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
4736 	    BCE_DEV_INFO_SIGNATURE_MAGIC) {
4737 		BCE_PRINTF("%s(%d): Bootcode not running! Found: 0x%08X, "
4738 			"Expected: 08%08X\n", __FILE__, __LINE__,
4739 			(reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK),
4740 			BCE_DEV_INFO_SIGNATURE_MAGIC);
4741 		rc = ENODEV;
4742 		goto bce_blockinit_exit;
4743 	}
4744 
4745 	/* Enable DMA */
4746 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4747 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4748 		val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL);
4749 		val |= BCE_MISC_NEW_CORE_CTL_DMA_ENABLE;
4750 		REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val);
4751 	}
4752 
4753 	/* Allow bootcode to apply any additional fixes before enabling MAC. */
4754 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET);
4755 
4756 	/* Enable link state change interrupt generation. */
4757 	REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
4758 
4759 	/* Enable all remaining blocks in the MAC. */
4760 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709)	||
4761 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716))
4762 		REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT_XI);
4763 	else
4764 		REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT);
4765 
4766 	REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
4767 	DELAY(20);
4768 
4769 	/* Save the current host coalescing block settings. */
4770 	sc->hc_command = REG_RD(sc, BCE_HC_COMMAND);
4771 
4772 bce_blockinit_exit:
4773 	DBEXIT(BCE_VERBOSE_RESET);
4774 
4775 	return (rc);
4776 }
4777 
4778 
4779 /****************************************************************************/
4780 /* Encapsulate an mbuf into the rx_bd chain.                                */
4781 /*                                                                          */
4782 /* Returns:                                                                 */
4783 /*   0 for success, positive value for failure.                             */
4784 /****************************************************************************/
4785 static int
4786 bce_get_rx_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod,
4787 	u16 *chain_prod, u32 *prod_bseq)
4788 {
4789 	bus_dmamap_t map;
4790 	bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
4791 	struct mbuf *m_new = NULL;
4792 	struct rx_bd *rxbd;
4793 	int nsegs, error, rc = 0;
4794 #ifdef BCE_DEBUG
4795 	u16 debug_chain_prod = *chain_prod;
4796 #endif
4797 
4798 	DBENTER(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD);
4799 
4800 	/* Make sure the inputs are valid. */
4801 	DBRUNIF((*chain_prod > MAX_RX_BD),
4802 		BCE_PRINTF("%s(%d): RX producer out of range: 0x%04X > 0x%04X\n",
4803 		__FILE__, __LINE__, *chain_prod, (u16) MAX_RX_BD));
4804 
4805 	DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): prod = 0x%04X, chain_prod = 0x%04X, "
4806 		"prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq);
4807 
4808 	/* Update some debug statistic counters */
4809 	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
4810 		sc->rx_low_watermark = sc->free_rx_bd);
4811 	DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), sc->rx_empty_count++);
4812 
4813 	/* Check whether this is a new mbuf allocation. */
4814 	if (m == NULL) {
4815 
4816 		/* Simulate an mbuf allocation failure. */
4817 		DBRUNIF(DB_RANDOMTRUE(bce_debug_mbuf_allocation_failure),
4818 			sc->mbuf_alloc_failed++;
4819 			sc->debug_mbuf_sim_alloc_failed++;
4820 			rc = ENOBUFS;
4821 			goto bce_get_rx_buf_exit);
4822 
4823 		/* This is a new mbuf allocation. */
4824 #ifdef ZERO_COPY_SOCKETS
4825 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
4826 #else
4827 		if (sc->rx_bd_mbuf_alloc_size <= MCLBYTES)
4828 			m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
4829 		else
4830 			m_new = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, sc->rx_bd_mbuf_alloc_size);
4831 #endif
4832 
4833 		if (m_new == NULL) {
4834 			sc->mbuf_alloc_failed++;
4835 			rc = ENOBUFS;
4836 			goto bce_get_rx_buf_exit;
4837 		}
4838 
4839 		DBRUN(sc->debug_rx_mbuf_alloc++);
4840 	} else {
4841 		/* Reuse an existing mbuf. */
4842 		m_new = m;
4843 	}
4844 
4845 	/* Make sure we have a valid packet header. */
4846 	M_ASSERTPKTHDR(m_new);
4847 
4848 	/* Initialize the mbuf size and pad if necessary for alignment. */
4849 	m_new->m_pkthdr.len = m_new->m_len = sc->rx_bd_mbuf_alloc_size;
4850 	m_adj(m_new, sc->rx_bd_mbuf_align_pad);
4851 
4852 	/* ToDo: Consider calling m_fragment() to test error handling. */
4853 
4854 	/* Map the mbuf cluster into device memory. */
4855 	map = sc->rx_mbuf_map[*chain_prod];
4856 	error = bus_dmamap_load_mbuf_sg(sc->rx_mbuf_tag, map, m_new,
4857 	    segs, &nsegs, BUS_DMA_NOWAIT);
4858 
4859 	/* Handle any mapping errors. */
4860 	if (error) {
4861 		BCE_PRINTF("%s(%d): Error mapping mbuf into RX chain (%d)!\n",
4862 			__FILE__, __LINE__, error);
4863 
4864 		m_freem(m_new);
4865 		DBRUN(sc->debug_rx_mbuf_alloc--);
4866 
4867 		rc = ENOBUFS;
4868 		goto bce_get_rx_buf_exit;
4869 	}
4870 
4871 	/* All mbufs must map to a single segment. */
4872 	KASSERT(nsegs == 1, ("%s(): Too many segments returned (%d)!",
4873 		 __FUNCTION__, nsegs));
4874 
4875 	/* ToDo: Do we need bus_dmamap_sync(,,BUS_DMASYNC_PREWRITE) here? */
4876 
4877 	/* Setup the rx_bd for the segment. */
4878 	rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
4879 
4880 	rxbd->rx_bd_haddr_lo  = htole32(BCE_ADDR_LO(segs[0].ds_addr));
4881 	rxbd->rx_bd_haddr_hi  = htole32(BCE_ADDR_HI(segs[0].ds_addr));
4882 	rxbd->rx_bd_len       = htole32(segs[0].ds_len);
4883 	rxbd->rx_bd_flags     = htole32(RX_BD_FLAGS_START | RX_BD_FLAGS_END);
4884 	*prod_bseq += segs[0].ds_len;
4885 
4886 	/* Save the mbuf and update our counter. */
4887 	sc->rx_mbuf_ptr[*chain_prod] = m_new;
4888 	sc->free_rx_bd -= nsegs;
4889 
4890 	DBRUNMSG(BCE_INSANE_RECV, bce_dump_rx_mbuf_chain(sc, debug_chain_prod,
4891 		nsegs));
4892 
4893 	DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): prod = 0x%04X, chain_prod = 0x%04X, "
4894 		"prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq);
4895 
4896 bce_get_rx_buf_exit:
4897 	DBEXIT(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD);
4898 
4899 	return(rc);
4900 }
4901 
4902 
4903 #ifdef ZERO_COPY_SOCKETS
4904 /****************************************************************************/
4905 /* Encapsulate an mbuf cluster into the page chain.                        */
4906 /*                                                                          */
4907 /* Returns:                                                                 */
4908 /*   0 for success, positive value for failure.                             */
4909 /****************************************************************************/
4910 static int
4911 bce_get_pg_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod,
4912 	u16 *prod_idx)
4913 {
4914 	bus_dmamap_t map;
4915 	bus_addr_t busaddr;
4916 	struct mbuf *m_new = NULL;
4917 	struct rx_bd *pgbd;
4918 	int error, rc = 0;
4919 #ifdef BCE_DEBUG
4920 	u16 debug_prod_idx = *prod_idx;
4921 #endif
4922 
4923 	DBENTER(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD);
4924 
4925 	/* Make sure the inputs are valid. */
4926 	DBRUNIF((*prod_idx > MAX_PG_BD),
4927 		BCE_PRINTF("%s(%d): page producer out of range: 0x%04X > 0x%04X\n",
4928 		__FILE__, __LINE__, *prod_idx, (u16) MAX_PG_BD));
4929 
4930 	DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): prod = 0x%04X, "
4931 		"chain_prod = 0x%04X\n", __FUNCTION__, *prod, *prod_idx);
4932 
4933 	/* Update counters if we've hit a new low or run out of pages. */
4934 	DBRUNIF((sc->free_pg_bd < sc->pg_low_watermark),
4935 		sc->pg_low_watermark = sc->free_pg_bd);
4936 	DBRUNIF((sc->free_pg_bd == sc->max_pg_bd), sc->pg_empty_count++);
4937 
4938 	/* Check whether this is a new mbuf allocation. */
4939 	if (m == NULL) {
4940 
4941 		/* Simulate an mbuf allocation failure. */
4942 		DBRUNIF(DB_RANDOMTRUE(bce_debug_mbuf_allocation_failure),
4943 			sc->mbuf_alloc_failed++;
4944 			sc->debug_mbuf_sim_alloc_failed++;
4945 			rc = ENOBUFS;
4946 			goto bce_get_pg_buf_exit);
4947 
4948 		/* This is a new mbuf allocation. */
4949 		m_new = m_getcl(M_DONTWAIT, MT_DATA, 0);
4950 		if (m_new == NULL) {
4951 			sc->mbuf_alloc_failed++;
4952 			rc = ENOBUFS;
4953 			goto bce_get_pg_buf_exit;
4954 		}
4955 
4956 		DBRUN(sc->debug_pg_mbuf_alloc++);
4957 	} else {
4958 		/* Reuse an existing mbuf. */
4959 		m_new = m;
4960 		m_new->m_data = m_new->m_ext.ext_buf;
4961 	}
4962 
4963 	m_new->m_len = sc->pg_bd_mbuf_alloc_size;
4964 
4965 	/* ToDo: Consider calling m_fragment() to test error handling. */
4966 
4967 	/* Map the mbuf cluster into device memory. */
4968 	map = sc->pg_mbuf_map[*prod_idx];
4969 	error = bus_dmamap_load(sc->pg_mbuf_tag, map, mtod(m_new, void *),
4970 	    sc->pg_bd_mbuf_alloc_size, bce_dma_map_addr, &busaddr, BUS_DMA_NOWAIT);
4971 
4972 	/* Handle any mapping errors. */
4973 	if (error) {
4974 		BCE_PRINTF("%s(%d): Error mapping mbuf into page chain!\n",
4975 			__FILE__, __LINE__);
4976 
4977 		m_freem(m_new);
4978 		DBRUN(sc->debug_pg_mbuf_alloc--);
4979 
4980 		rc = ENOBUFS;
4981 		goto bce_get_pg_buf_exit;
4982 	}
4983 
4984 	/* ToDo: Do we need bus_dmamap_sync(,,BUS_DMASYNC_PREWRITE) here? */
4985 
4986 	/*
4987 	 * The page chain uses the same rx_bd data structure
4988 	 * as the receive chain but doesn't require a byte sequence (bseq).
4989 	 */
4990 	pgbd = &sc->pg_bd_chain[PG_PAGE(*prod_idx)][PG_IDX(*prod_idx)];
4991 
4992 	pgbd->rx_bd_haddr_lo  = htole32(BCE_ADDR_LO(busaddr));
4993 	pgbd->rx_bd_haddr_hi  = htole32(BCE_ADDR_HI(busaddr));
4994 	pgbd->rx_bd_len       = htole32(sc->pg_bd_mbuf_alloc_size);
4995 	pgbd->rx_bd_flags     = htole32(RX_BD_FLAGS_START | RX_BD_FLAGS_END);
4996 
4997 	/* Save the mbuf and update our counter. */
4998 	sc->pg_mbuf_ptr[*prod_idx] = m_new;
4999 	sc->free_pg_bd--;
5000 
5001 	DBRUNMSG(BCE_INSANE_RECV, bce_dump_pg_mbuf_chain(sc, debug_prod_idx,
5002 		1));
5003 
5004 	DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): prod = 0x%04X, "
5005 		"prod_idx = 0x%04X\n", __FUNCTION__, *prod, *prod_idx);
5006 
5007 bce_get_pg_buf_exit:
5008 	DBEXIT(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD);
5009 
5010 	return(rc);
5011 }
5012 #endif /* ZERO_COPY_SOCKETS */
5013 
5014 /****************************************************************************/
5015 /* Initialize the TX context memory.                                        */
5016 /*                                                                          */
5017 /* Returns:                                                                 */
5018 /*   Nothing                                                                */
5019 /****************************************************************************/
5020 static void
5021 bce_init_tx_context(struct bce_softc *sc)
5022 {
5023 	u32 val;
5024 
5025 	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_CTX);
5026 
5027 	/* Initialize the context ID for an L2 TX chain. */
5028 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
5029 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
5030 		/* Set the CID type to support an L2 connection. */
5031 		val = BCE_L2CTX_TX_TYPE_TYPE_L2_XI | BCE_L2CTX_TX_TYPE_SIZE_L2_XI;
5032 		CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TYPE_XI, val);
5033 		val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2_XI | (8 << 16);
5034 		CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_CMD_TYPE_XI, val);
5035 
5036 		/* Point the hardware to the first page in the chain. */
5037 		val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]);
5038 		CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TBDR_BHADDR_HI_XI, val);
5039 		val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]);
5040 		CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TBDR_BHADDR_LO_XI, val);
5041 	} else {
5042 		/* Set the CID type to support an L2 connection. */
5043 		val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2;
5044 		CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TYPE, val);
5045 		val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16);
5046 		CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_CMD_TYPE, val);
5047 
5048 		/* Point the hardware to the first page in the chain. */
5049 		val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]);
5050 		CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TBDR_BHADDR_HI, val);
5051 		val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]);
5052 		CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TBDR_BHADDR_LO, val);
5053 	}
5054 
5055 	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_CTX);
5056 }
5057 
5058 
5059 /****************************************************************************/
5060 /* Allocate memory and initialize the TX data structures.                   */
5061 /*                                                                          */
5062 /* Returns:                                                                 */
5063 /*   0 for success, positive value for failure.                             */
5064 /****************************************************************************/
5065 static int
5066 bce_init_tx_chain(struct bce_softc *sc)
5067 {
5068 	struct tx_bd *txbd;
5069 	int i, rc = 0;
5070 
5071 	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_LOAD);
5072 
5073 	/* Set the initial TX producer/consumer indices. */
5074 	sc->tx_prod        = 0;
5075 	sc->tx_cons        = 0;
5076 	sc->tx_prod_bseq   = 0;
5077 	sc->used_tx_bd     = 0;
5078 	sc->max_tx_bd      = USABLE_TX_BD;
5079 	DBRUN(sc->tx_hi_watermark = USABLE_TX_BD);
5080 	DBRUN(sc->tx_full_count = 0);
5081 
5082 	/*
5083 	 * The NetXtreme II supports a linked-list structre called
5084 	 * a Buffer Descriptor Chain (or BD chain).  A BD chain
5085 	 * consists of a series of 1 or more chain pages, each of which
5086 	 * consists of a fixed number of BD entries.
5087 	 * The last BD entry on each page is a pointer to the next page
5088 	 * in the chain, and the last pointer in the BD chain
5089 	 * points back to the beginning of the chain.
5090 	 */
5091 
5092 	/* Set the TX next pointer chain entries. */
5093 	for (i = 0; i < TX_PAGES; i++) {
5094 		int j;
5095 
5096 		txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
5097 
5098 		/* Check if we've reached the last page. */
5099 		if (i == (TX_PAGES - 1))
5100 			j = 0;
5101 		else
5102 			j = i + 1;
5103 
5104 		txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j]));
5105 		txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j]));
5106 	}
5107 
5108 	bce_init_tx_context(sc);
5109 
5110 	DBRUNMSG(BCE_INSANE_SEND, bce_dump_tx_chain(sc, 0, TOTAL_TX_BD));
5111 	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_LOAD);
5112 
5113 	return(rc);
5114 }
5115 
5116 
5117 /****************************************************************************/
5118 /* Free memory and clear the TX data structures.                            */
5119 /*                                                                          */
5120 /* Returns:                                                                 */
5121 /*   Nothing.                                                               */
5122 /****************************************************************************/
5123 static void
5124 bce_free_tx_chain(struct bce_softc *sc)
5125 {
5126 	int i;
5127 
5128 	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_UNLOAD);
5129 
5130 	/* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
5131 	for (i = 0; i < TOTAL_TX_BD; i++) {
5132 		if (sc->tx_mbuf_ptr[i] != NULL) {
5133 			if (sc->tx_mbuf_map[i] != NULL)
5134 				bus_dmamap_sync(sc->tx_mbuf_tag, sc->tx_mbuf_map[i],
5135 					BUS_DMASYNC_POSTWRITE);
5136 			m_freem(sc->tx_mbuf_ptr[i]);
5137 			sc->tx_mbuf_ptr[i] = NULL;
5138 			DBRUN(sc->debug_tx_mbuf_alloc--);
5139 		}
5140 	}
5141 
5142 	/* Clear each TX chain page. */
5143 	for (i = 0; i < TX_PAGES; i++)
5144 		bzero((char *)sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ);
5145 
5146 	sc->used_tx_bd     = 0;
5147 
5148 	/* Check if we lost any mbufs in the process. */
5149 	DBRUNIF((sc->debug_tx_mbuf_alloc),
5150 		BCE_PRINTF("%s(%d): Memory leak! Lost %d mbufs "
5151 			"from tx chain!\n",
5152 			__FILE__, __LINE__, sc->debug_tx_mbuf_alloc));
5153 
5154 	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_UNLOAD);
5155 }
5156 
5157 
5158 /****************************************************************************/
5159 /* Initialize the RX context memory.                                        */
5160 /*                                                                          */
5161 /* Returns:                                                                 */
5162 /*   Nothing                                                                */
5163 /****************************************************************************/
5164 static void
5165 bce_init_rx_context(struct bce_softc *sc)
5166 {
5167 	u32 val;
5168 
5169 	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_CTX);
5170 
5171 	/* Initialize the type, size, and BD cache levels for the RX context. */
5172 	val = BCE_L2CTX_RX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
5173 		BCE_L2CTX_RX_CTX_TYPE_SIZE_L2 |
5174 		(0x02 << BCE_L2CTX_RX_BD_PRE_READ_SHIFT);
5175 
5176 	/*
5177 	 * Set the level for generating pause frames
5178 	 * when the number of available rx_bd's gets
5179 	 * too low (the low watermark) and the level
5180 	 * when pause frames can be stopped (the high
5181 	 * watermark).
5182 	 */
5183 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
5184 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
5185 		u32 lo_water, hi_water;
5186 
5187 		lo_water = BCE_L2CTX_RX_LO_WATER_MARK_DEFAULT;
5188 		hi_water = USABLE_RX_BD / 4;
5189 
5190 		lo_water /= BCE_L2CTX_RX_LO_WATER_MARK_SCALE;
5191 		hi_water /= BCE_L2CTX_RX_HI_WATER_MARK_SCALE;
5192 
5193 		if (hi_water > 0xf)
5194 			hi_water = 0xf;
5195 		else if (hi_water == 0)
5196 			lo_water = 0;
5197 		val |= (lo_water << BCE_L2CTX_RX_LO_WATER_MARK_SHIFT) |
5198 			(hi_water << BCE_L2CTX_RX_HI_WATER_MARK_SHIFT);
5199 	}
5200 
5201  	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_CTX_TYPE, val);
5202 
5203 	/* Setup the MQ BIN mapping for l2_ctx_host_bseq. */
5204 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
5205 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
5206 		val = REG_RD(sc, BCE_MQ_MAP_L2_5);
5207 		REG_WR(sc, BCE_MQ_MAP_L2_5, val | BCE_MQ_MAP_L2_5_ARM);
5208 	}
5209 
5210 	/* Point the hardware to the first page in the chain. */
5211 	val = BCE_ADDR_HI(sc->rx_bd_chain_paddr[0]);
5212 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_HI, val);
5213 	val = BCE_ADDR_LO(sc->rx_bd_chain_paddr[0]);
5214 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_LO, val);
5215 
5216 	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_CTX);
5217 }
5218 
5219 
5220 /****************************************************************************/
5221 /* Allocate memory and initialize the RX data structures.                   */
5222 /*                                                                          */
5223 /* Returns:                                                                 */
5224 /*   0 for success, positive value for failure.                             */
5225 /****************************************************************************/
5226 static int
5227 bce_init_rx_chain(struct bce_softc *sc)
5228 {
5229 	struct rx_bd *rxbd;
5230 	int i, rc = 0;
5231 
5232 	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD |
5233 		BCE_VERBOSE_CTX);
5234 
5235 	/* Initialize the RX producer and consumer indices. */
5236 	sc->rx_prod        = 0;
5237 	sc->rx_cons        = 0;
5238 	sc->rx_prod_bseq   = 0;
5239 	sc->free_rx_bd     = USABLE_RX_BD;
5240 	sc->max_rx_bd      = USABLE_RX_BD;
5241 	DBRUN(sc->rx_low_watermark = sc->max_rx_bd);
5242 	DBRUN(sc->rx_empty_count = 0);
5243 
5244 	/* Initialize the RX next pointer chain entries. */
5245 	for (i = 0; i < RX_PAGES; i++) {
5246 		int j;
5247 
5248 		rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
5249 
5250 		/* Check if we've reached the last page. */
5251 		if (i == (RX_PAGES - 1))
5252 			j = 0;
5253 		else
5254 			j = i + 1;
5255 
5256 		/* Setup the chain page pointers. */
5257 		rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->rx_bd_chain_paddr[j]));
5258 		rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->rx_bd_chain_paddr[j]));
5259 	}
5260 
5261 /* Fill up the RX chain. */
5262 	bce_fill_rx_chain(sc);
5263 
5264 	for (i = 0; i < RX_PAGES; i++) {
5265 		bus_dmamap_sync(
5266 			sc->rx_bd_chain_tag,
5267 	    	sc->rx_bd_chain_map[i],
5268 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
5269 	}
5270 
5271 	bce_init_rx_context(sc);
5272 
5273 	DBRUNMSG(BCE_EXTREME_RECV, bce_dump_rx_chain(sc, 0, TOTAL_RX_BD));
5274 	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD |
5275 		BCE_VERBOSE_CTX);
5276 	/* ToDo: Are there possible failure modes here? */
5277 	return(rc);
5278 }
5279 
5280 
5281 /****************************************************************************/
5282 /* Add mbufs to the RX chain until its full or an mbuf allocation error     */
5283 /* occurs.                                                                  */
5284 /*                                                                          */
5285 /* Returns:                                                                 */
5286 /*   Nothing                                                                */
5287 /****************************************************************************/
5288 static void
5289 bce_fill_rx_chain(struct bce_softc *sc)
5290 {
5291 	u16 prod, prod_idx;
5292 	u32 prod_bseq;
5293 
5294 	DBENTER(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD |
5295 		BCE_VERBOSE_CTX);
5296 
5297 	/* Get the RX chain producer indices. */
5298 	prod      = sc->rx_prod;
5299 	prod_bseq = sc->rx_prod_bseq;
5300 
5301 	/* Keep filling the RX chain until it's full. */
5302 	while (sc->free_rx_bd > 0) {
5303 		prod_idx = RX_CHAIN_IDX(prod);
5304 		if (bce_get_rx_buf(sc, NULL, &prod, &prod_idx, &prod_bseq)) {
5305 			/* Bail out if we can't add an mbuf to the chain. */
5306 			break;
5307 		}
5308 		prod = NEXT_RX_BD(prod);
5309 	}
5310 
5311 	/* Save the RX chain producer indices. */
5312 	sc->rx_prod      = prod;
5313 	sc->rx_prod_bseq = prod_bseq;
5314 
5315 	DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE),
5316 		BCE_PRINTF("%s(): Invalid rx_prod value: 0x%04X\n",
5317 		__FUNCTION__, sc->rx_prod));
5318 
5319 	/* Write the mailbox and tell the chip about the waiting rx_bd's. */
5320 	REG_WR16(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BDIDX,
5321 		sc->rx_prod);
5322 	REG_WR(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BSEQ,
5323 		sc->rx_prod_bseq);
5324 
5325 	DBEXIT(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD |
5326 		BCE_VERBOSE_CTX);
5327 }
5328 
5329 
5330 /****************************************************************************/
5331 /* Free memory and clear the RX data structures.                            */
5332 /*                                                                          */
5333 /* Returns:                                                                 */
5334 /*   Nothing.                                                               */
5335 /****************************************************************************/
5336 static void
5337 bce_free_rx_chain(struct bce_softc *sc)
5338 {
5339 	int i;
5340 
5341 	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD);
5342 
5343 	/* Free any mbufs still in the RX mbuf chain. */
5344 	for (i = 0; i < TOTAL_RX_BD; i++) {
5345 		if (sc->rx_mbuf_ptr[i] != NULL) {
5346 			if (sc->rx_mbuf_map[i] != NULL)
5347 				bus_dmamap_sync(sc->rx_mbuf_tag, sc->rx_mbuf_map[i],
5348 					BUS_DMASYNC_POSTREAD);
5349 			m_freem(sc->rx_mbuf_ptr[i]);
5350 			sc->rx_mbuf_ptr[i] = NULL;
5351 			DBRUN(sc->debug_rx_mbuf_alloc--);
5352 		}
5353 	}
5354 
5355 	/* Clear each RX chain page. */
5356 	for (i = 0; i < RX_PAGES; i++)
5357 		bzero((char *)sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
5358 
5359 	sc->free_rx_bd = sc->max_rx_bd;
5360 
5361 	/* Check if we lost any mbufs in the process. */
5362 	DBRUNIF((sc->debug_rx_mbuf_alloc),
5363 		BCE_PRINTF("%s(): Memory leak! Lost %d mbufs from rx chain!\n",
5364 			__FUNCTION__, sc->debug_rx_mbuf_alloc));
5365 
5366 	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD);
5367 }
5368 
5369 
5370 #ifdef ZERO_COPY_SOCKETS
5371 /****************************************************************************/
5372 /* Allocate memory and initialize the page data structures.                 */
5373 /* Assumes that bce_init_rx_chain() has not already been called.            */
5374 /*                                                                          */
5375 /* Returns:                                                                 */
5376 /*   0 for success, positive value for failure.                             */
5377 /****************************************************************************/
5378 static int
5379 bce_init_pg_chain(struct bce_softc *sc)
5380 {
5381 	struct rx_bd *pgbd;
5382 	int i, rc = 0;
5383 	u32 val;
5384 
5385 	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD |
5386 		BCE_VERBOSE_CTX);
5387 
5388 	/* Initialize the page producer and consumer indices. */
5389 	sc->pg_prod        = 0;
5390 	sc->pg_cons        = 0;
5391 	sc->free_pg_bd     = USABLE_PG_BD;
5392 	sc->max_pg_bd      = USABLE_PG_BD;
5393 	DBRUN(sc->pg_low_watermark = sc->max_pg_bd);
5394 	DBRUN(sc->pg_empty_count = 0);
5395 
5396 	/* Initialize the page next pointer chain entries. */
5397 	for (i = 0; i < PG_PAGES; i++) {
5398 		int j;
5399 
5400 		pgbd = &sc->pg_bd_chain[i][USABLE_PG_BD_PER_PAGE];
5401 
5402 		/* Check if we've reached the last page. */
5403 		if (i == (PG_PAGES - 1))
5404 			j = 0;
5405 		else
5406 			j = i + 1;
5407 
5408 		/* Setup the chain page pointers. */
5409 		pgbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->pg_bd_chain_paddr[j]));
5410 		pgbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->pg_bd_chain_paddr[j]));
5411 	}
5412 
5413 	/* Setup the MQ BIN mapping for host_pg_bidx. */
5414 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709)	||
5415 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716))
5416 		REG_WR(sc, BCE_MQ_MAP_L2_3, BCE_MQ_MAP_L2_3_DEFAULT);
5417 
5418 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_PG_BUF_SIZE, 0);
5419 
5420 	/* Configure the rx_bd and page chain mbuf cluster size. */
5421 	val = (sc->rx_bd_mbuf_data_len << 16) | sc->pg_bd_mbuf_alloc_size;
5422 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_PG_BUF_SIZE, val);
5423 
5424 	/* Configure the context reserved for jumbo support. */
5425 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_RBDC_KEY,
5426 		BCE_L2CTX_RX_RBDC_JUMBO_KEY);
5427 
5428 	/* Point the hardware to the first page in the page chain. */
5429 	val = BCE_ADDR_HI(sc->pg_bd_chain_paddr[0]);
5430 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_PG_BDHADDR_HI, val);
5431 	val = BCE_ADDR_LO(sc->pg_bd_chain_paddr[0]);
5432 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_PG_BDHADDR_LO, val);
5433 
5434 	/* Fill up the page chain. */
5435 	bce_fill_pg_chain(sc);
5436 
5437 	for (i = 0; i < PG_PAGES; i++) {
5438 		bus_dmamap_sync(
5439 			sc->pg_bd_chain_tag,
5440 	    	sc->pg_bd_chain_map[i],
5441 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
5442 	}
5443 
5444 	DBRUNMSG(BCE_EXTREME_RECV, bce_dump_pg_chain(sc, 0, TOTAL_PG_BD));
5445 	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD |
5446 		BCE_VERBOSE_CTX);
5447 	return(rc);
5448 }
5449 
5450 
5451 /****************************************************************************/
5452 /* Add mbufs to the page chain until its full or an mbuf allocation error   */
5453 /* occurs.                                                                  */
5454 /*                                                                          */
5455 /* Returns:                                                                 */
5456 /*   Nothing                                                                */
5457 /****************************************************************************/
5458 static void
5459 bce_fill_pg_chain(struct bce_softc *sc)
5460 {
5461 	u16 prod, prod_idx;
5462 
5463 	DBENTER(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD |
5464 		BCE_VERBOSE_CTX);
5465 
5466 	/* Get the page chain prodcuer index. */
5467 	prod = sc->pg_prod;
5468 
5469 	/* Keep filling the page chain until it's full. */
5470 	while (sc->free_pg_bd > 0) {
5471 		prod_idx = PG_CHAIN_IDX(prod);
5472 		if (bce_get_pg_buf(sc, NULL, &prod, &prod_idx)) {
5473 			/* Bail out if we can't add an mbuf to the chain. */
5474 			break;
5475 		}
5476 		prod = NEXT_PG_BD(prod);
5477 	}
5478 
5479 	/* Save the page chain producer index. */
5480 	sc->pg_prod = prod;
5481 
5482 	DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE),
5483 		BCE_PRINTF("%s(): Invalid pg_prod value: 0x%04X\n",
5484 		__FUNCTION__, sc->pg_prod));
5485 
5486 	/*
5487 	 * Write the mailbox and tell the chip about
5488 	 * the new rx_bd's in the page chain.
5489 	 */
5490 	REG_WR16(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_PG_BDIDX,
5491 		sc->pg_prod);
5492 
5493 	DBEXIT(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD |
5494 		BCE_VERBOSE_CTX);
5495 }
5496 
5497 
5498 /****************************************************************************/
5499 /* Free memory and clear the RX data structures.                            */
5500 /*                                                                          */
5501 /* Returns:                                                                 */
5502 /*   Nothing.                                                               */
5503 /****************************************************************************/
5504 static void
5505 bce_free_pg_chain(struct bce_softc *sc)
5506 {
5507 	int i;
5508 
5509 	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD);
5510 
5511 	/* Free any mbufs still in the mbuf page chain. */
5512 	for (i = 0; i < TOTAL_PG_BD; i++) {
5513 		if (sc->pg_mbuf_ptr[i] != NULL) {
5514 			if (sc->pg_mbuf_map[i] != NULL)
5515 				bus_dmamap_sync(sc->pg_mbuf_tag, sc->pg_mbuf_map[i],
5516 					BUS_DMASYNC_POSTREAD);
5517 			m_freem(sc->pg_mbuf_ptr[i]);
5518 			sc->pg_mbuf_ptr[i] = NULL;
5519 			DBRUN(sc->debug_pg_mbuf_alloc--);
5520 		}
5521 	}
5522 
5523 	/* Clear each page chain pages. */
5524 	for (i = 0; i < PG_PAGES; i++)
5525 		bzero((char *)sc->pg_bd_chain[i], BCE_PG_CHAIN_PAGE_SZ);
5526 
5527 	sc->free_pg_bd = sc->max_pg_bd;
5528 
5529 	/* Check if we lost any mbufs in the process. */
5530 	DBRUNIF((sc->debug_pg_mbuf_alloc),
5531 		BCE_PRINTF("%s(): Memory leak! Lost %d mbufs from page chain!\n",
5532 			__FUNCTION__, sc->debug_pg_mbuf_alloc));
5533 
5534 	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD);
5535 }
5536 #endif /* ZERO_COPY_SOCKETS */
5537 
5538 
5539 /****************************************************************************/
5540 /* Set media options.                                                       */
5541 /*                                                                          */
5542 /* Returns:                                                                 */
5543 /*   0 for success, positive value for failure.                             */
5544 /****************************************************************************/
5545 static int
5546 bce_ifmedia_upd(struct ifnet *ifp)
5547 {
5548 	struct bce_softc *sc = ifp->if_softc;
5549 
5550 	DBENTER(BCE_VERBOSE);
5551 
5552 	BCE_LOCK(sc);
5553 	bce_ifmedia_upd_locked(ifp);
5554 	BCE_UNLOCK(sc);
5555 
5556 	DBEXIT(BCE_VERBOSE);
5557 	return (0);
5558 }
5559 
5560 
5561 /****************************************************************************/
5562 /* Set media options.                                                       */
5563 /*                                                                          */
5564 /* Returns:                                                                 */
5565 /*   Nothing.                                                               */
5566 /****************************************************************************/
5567 static void
5568 bce_ifmedia_upd_locked(struct ifnet *ifp)
5569 {
5570 	struct bce_softc *sc = ifp->if_softc;
5571 	struct mii_data *mii;
5572 
5573 	DBENTER(BCE_VERBOSE);
5574 
5575 	BCE_LOCK_ASSERT(sc);
5576 
5577 	mii = device_get_softc(sc->bce_miibus);
5578 
5579 	/* Make sure the MII bus has been enumerated. */
5580 	if (mii) {
5581 		sc->bce_link = 0;
5582 		if (mii->mii_instance) {
5583 			struct mii_softc *miisc;
5584 
5585 			LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
5586 				mii_phy_reset(miisc);
5587 		}
5588 		mii_mediachg(mii);
5589 	}
5590 
5591 	DBEXIT(BCE_VERBOSE);
5592 }
5593 
5594 
5595 /****************************************************************************/
5596 /* Reports current media status.                                            */
5597 /*                                                                          */
5598 /* Returns:                                                                 */
5599 /*   Nothing.                                                               */
5600 /****************************************************************************/
5601 static void
5602 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
5603 {
5604 	struct bce_softc *sc = ifp->if_softc;
5605 	struct mii_data *mii;
5606 
5607 	DBENTER(BCE_VERBOSE);
5608 
5609 	BCE_LOCK(sc);
5610 
5611 	mii = device_get_softc(sc->bce_miibus);
5612 
5613 	mii_pollstat(mii);
5614 	ifmr->ifm_active = mii->mii_media_active;
5615 	ifmr->ifm_status = mii->mii_media_status;
5616 
5617 	BCE_UNLOCK(sc);
5618 
5619 	DBEXIT(BCE_VERBOSE);
5620 }
5621 
5622 
5623 /****************************************************************************/
5624 /* Handles PHY generated interrupt events.                                  */
5625 /*                                                                          */
5626 /* Returns:                                                                 */
5627 /*   Nothing.                                                               */
5628 /****************************************************************************/
5629 static void
5630 bce_phy_intr(struct bce_softc *sc)
5631 {
5632 	u32 new_link_state, old_link_state;
5633 
5634 	DBENTER(BCE_VERBOSE_PHY | BCE_VERBOSE_INTR);
5635 
5636 	new_link_state = sc->status_block->status_attn_bits &
5637 		STATUS_ATTN_BITS_LINK_STATE;
5638 	old_link_state = sc->status_block->status_attn_bits_ack &
5639 		STATUS_ATTN_BITS_LINK_STATE;
5640 
5641 	/* Handle any changes if the link state has changed. */
5642 	if (new_link_state != old_link_state) {
5643 
5644 		/* Update the status_attn_bits_ack field in the status block. */
5645 		if (new_link_state) {
5646 			REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD,
5647 				STATUS_ATTN_BITS_LINK_STATE);
5648 			DBPRINT(sc, BCE_INFO_PHY, "%s(): Link is now UP.\n",
5649 				__FUNCTION__);
5650 		}
5651 		else {
5652 			REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD,
5653 				STATUS_ATTN_BITS_LINK_STATE);
5654 			DBPRINT(sc, BCE_INFO_PHY, "%s(): Link is now DOWN.\n",
5655 				__FUNCTION__);
5656 		}
5657 
5658 		/*
5659 		 * Assume link is down and allow
5660 		 * tick routine to update the state
5661 		 * based on the actual media state.
5662 		 */
5663 		sc->bce_link = 0;
5664 		callout_stop(&sc->bce_tick_callout);
5665 		bce_tick(sc);
5666 	}
5667 
5668 	/* Acknowledge the link change interrupt. */
5669 	REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE);
5670 
5671 	DBEXIT(BCE_VERBOSE_PHY | BCE_VERBOSE_INTR);
5672 }
5673 
5674 
5675 /****************************************************************************/
5676 /* Reads the receive consumer value from the status block (skipping over    */
5677 /* chain page pointer if necessary).                                        */
5678 /*                                                                          */
5679 /* Returns:                                                                 */
5680 /*   hw_cons                                                                */
5681 /****************************************************************************/
5682 static inline u16
5683 bce_get_hw_rx_cons(struct bce_softc *sc)
5684 {
5685 	u16 hw_cons;
5686 
5687 	rmb();
5688 	hw_cons = sc->status_block->status_rx_quick_consumer_index0;
5689 	if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
5690 		hw_cons++;
5691 
5692 	return hw_cons;
5693 }
5694 
5695 /****************************************************************************/
5696 /* Handles received frame interrupt events.                                 */
5697 /*                                                                          */
5698 /* Returns:                                                                 */
5699 /*   Nothing.                                                               */
5700 /****************************************************************************/
5701 static void
5702 bce_rx_intr(struct bce_softc *sc)
5703 {
5704 	struct ifnet *ifp = sc->bce_ifp;
5705 	struct l2_fhdr *l2fhdr;
5706 	unsigned int pkt_len;
5707 	u16 sw_rx_cons, sw_rx_cons_idx, hw_rx_cons;
5708 	u32 status;
5709 #ifdef ZERO_COPY_SOCKETS
5710 	unsigned int rem_len;
5711 	u16 sw_pg_cons, sw_pg_cons_idx;
5712 #endif
5713 
5714 	DBENTER(BCE_VERBOSE_RECV | BCE_VERBOSE_INTR);
5715 	DBRUN(sc->rx_interrupts++);
5716 	DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): rx_prod = 0x%04X, "
5717 		"rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
5718 		__FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
5719 
5720 	/* Prepare the RX chain pages to be accessed by the host CPU. */
5721 	for (int i = 0; i < RX_PAGES; i++)
5722 		bus_dmamap_sync(sc->rx_bd_chain_tag,
5723 		    sc->rx_bd_chain_map[i], BUS_DMASYNC_POSTWRITE);
5724 
5725 #ifdef ZERO_COPY_SOCKETS
5726 	/* Prepare the page chain pages to be accessed by the host CPU. */
5727 	for (int i = 0; i < PG_PAGES; i++)
5728 		bus_dmamap_sync(sc->pg_bd_chain_tag,
5729 		    sc->pg_bd_chain_map[i], BUS_DMASYNC_POSTWRITE);
5730 #endif
5731 
5732 	/* Get the hardware's view of the RX consumer index. */
5733 	hw_rx_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc);
5734 
5735 	/* Get working copies of the driver's view of the consumer indices. */
5736 	sw_rx_cons = sc->rx_cons;
5737 #ifdef ZERO_COPY_SOCKETS
5738 	sw_pg_cons = sc->pg_cons;
5739 #endif
5740 
5741 	/* Update some debug statistics counters */
5742 	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
5743 		sc->rx_low_watermark = sc->free_rx_bd);
5744 	DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), sc->rx_empty_count++);
5745 
5746 	/* Scan through the receive chain as long as there is work to do */
5747 	/* ToDo: Consider setting a limit on the number of packets processed. */
5748 	rmb();
5749 	while (sw_rx_cons != hw_rx_cons) {
5750 		struct mbuf *m0;
5751 
5752 		/* Convert the producer/consumer indices to an actual rx_bd index. */
5753 		sw_rx_cons_idx = RX_CHAIN_IDX(sw_rx_cons);
5754 
5755 		/* Unmap the mbuf from DMA space. */
5756 		bus_dmamap_sync(sc->rx_mbuf_tag,
5757 		    sc->rx_mbuf_map[sw_rx_cons_idx],
5758 	    	BUS_DMASYNC_POSTREAD);
5759 		bus_dmamap_unload(sc->rx_mbuf_tag,
5760 		    sc->rx_mbuf_map[sw_rx_cons_idx]);
5761 
5762 		/* Remove the mbuf from the RX chain. */
5763 		m0 = sc->rx_mbuf_ptr[sw_rx_cons_idx];
5764 		sc->rx_mbuf_ptr[sw_rx_cons_idx] = NULL;
5765 		DBRUN(sc->debug_rx_mbuf_alloc--);
5766 		sc->free_rx_bd++;
5767 
5768 		/*
5769 		 * Frames received on the NetXteme II are prepended
5770 		 * with an l2_fhdr structure which provides status
5771 		 * information about the received frame (including
5772 		 * VLAN tags and checksum info).  The frames are also
5773 		 * automatically adjusted to align the IP header
5774 		 * (i.e. two null bytes are inserted before the
5775 		 * Ethernet header).  As a result the data DMA'd by
5776 		 * the controller into the mbuf is as follows:
5777 		 * +---------+-----+---------------------+-----+
5778 		 * | l2_fhdr | pad | packet data         | FCS |
5779 		 * +---------+-----+---------------------+-----+
5780 		 * The l2_fhdr needs to be checked and skipped and
5781 		 * the FCS needs to be stripped before sending the
5782 		 * packet up the stack.
5783 		 */
5784 		l2fhdr  = mtod(m0, struct l2_fhdr *);
5785 
5786 		/* Get the packet data + FCS length and the status. */
5787 		pkt_len = l2fhdr->l2_fhdr_pkt_len;
5788 		status  = l2fhdr->l2_fhdr_status;
5789 
5790 		/*
5791 		 * Skip over the l2_fhdr and pad, resulting in the
5792 		 * following data in the mbuf:
5793 		 * +---------------------+-----+
5794 		 * | packet data         | FCS |
5795 		 * +---------------------+-----+
5796 		 */
5797 		m_adj(m0, sizeof(struct l2_fhdr) + ETHER_ALIGN);
5798 
5799 #ifdef ZERO_COPY_SOCKETS
5800 		/*
5801 		 * Check whether the received frame fits in a single
5802 		 * mbuf or not (i.e. packet data + FCS <=
5803 		 * sc->rx_bd_mbuf_data_len bytes).
5804 		 */
5805 		if (pkt_len > m0->m_len) {
5806 			/*
5807 			 * The received frame is larger than a single mbuf.
5808 			 * If the frame was a TCP frame then only the TCP
5809 			 * header is placed in the mbuf, the remaining
5810 			 * payload (including FCS) is placed in the page
5811 			 * chain, the SPLIT flag is set, and the header
5812 			 * length is placed in the IP checksum field.
5813 			 * If the frame is not a TCP frame then the mbuf
5814 			 * is filled and the remaining bytes are placed
5815 			 * in the page chain.
5816 			 */
5817 
5818 			DBPRINT(sc, BCE_INFO_RECV, "%s(): Found a large packet.\n",
5819 				__FUNCTION__);
5820 
5821 			/*
5822 			 * When the page chain is enabled and the TCP
5823 			 * header has been split from the TCP payload,
5824 			 * the ip_xsum structure will reflect the length
5825 			 * of the TCP header, not the IP checksum.  Set
5826 			 * the packet length of the mbuf accordingly.
5827 			 */
5828 		 	if (status & L2_FHDR_STATUS_SPLIT)
5829 				m0->m_len = l2fhdr->l2_fhdr_ip_xsum;
5830 
5831 			rem_len = pkt_len - m0->m_len;
5832 
5833 			/* Pull mbufs off the page chain for the remaining data. */
5834 			while (rem_len > 0) {
5835 				struct mbuf *m_pg;
5836 
5837 				sw_pg_cons_idx = PG_CHAIN_IDX(sw_pg_cons);
5838 
5839 				/* Remove the mbuf from the page chain. */
5840 				m_pg = sc->pg_mbuf_ptr[sw_pg_cons_idx];
5841 				sc->pg_mbuf_ptr[sw_pg_cons_idx] = NULL;
5842 				DBRUN(sc->debug_pg_mbuf_alloc--);
5843 				sc->free_pg_bd++;
5844 
5845 				/* Unmap the page chain mbuf from DMA space. */
5846 				bus_dmamap_sync(sc->pg_mbuf_tag,
5847 					sc->pg_mbuf_map[sw_pg_cons_idx],
5848 					BUS_DMASYNC_POSTREAD);
5849 				bus_dmamap_unload(sc->pg_mbuf_tag,
5850 					sc->pg_mbuf_map[sw_pg_cons_idx]);
5851 
5852 				/* Adjust the mbuf length. */
5853 				if (rem_len < m_pg->m_len) {
5854 					/* The mbuf chain is complete. */
5855 					m_pg->m_len = rem_len;
5856 					rem_len = 0;
5857 				} else {
5858 					/* More packet data is waiting. */
5859 					rem_len -= m_pg->m_len;
5860 				}
5861 
5862 				/* Concatenate the mbuf cluster to the mbuf. */
5863 				m_cat(m0, m_pg);
5864 
5865 				sw_pg_cons = NEXT_PG_BD(sw_pg_cons);
5866 			}
5867 
5868 			/* Set the total packet length. */
5869 			m0->m_pkthdr.len = pkt_len;
5870 
5871 		} else {
5872 			/*
5873 			 * The received packet is small and fits in a
5874 			 * single mbuf (i.e. the l2_fhdr + pad + packet +
5875 			 * FCS <= MHLEN).  In other words, the packet is
5876 			 * 154 bytes or less in size.
5877 			 */
5878 
5879 			DBPRINT(sc, BCE_INFO_RECV, "%s(): Found a small packet.\n",
5880 				__FUNCTION__);
5881 
5882 			/* Set the total packet length. */
5883 			m0->m_pkthdr.len = m0->m_len = pkt_len;
5884 		}
5885 #endif
5886 
5887 		/* Remove the trailing Ethernet FCS. */
5888 		m_adj(m0, -ETHER_CRC_LEN);
5889 
5890 		/* Check that the resulting mbuf chain is valid. */
5891 		DBRUN(m_sanity(m0, FALSE));
5892 		DBRUNIF(((m0->m_len < ETHER_HDR_LEN) |
5893 			(m0->m_pkthdr.len > BCE_MAX_JUMBO_ETHER_MTU_VLAN)),
5894 			BCE_PRINTF("Invalid Ethernet frame size!\n");
5895 			m_print(m0, 128));
5896 
5897 		DBRUNIF(DB_RANDOMTRUE(bce_debug_l2fhdr_status_check),
5898 			BCE_PRINTF("Simulating l2_fhdr status error.\n");
5899 			status = status | L2_FHDR_ERRORS_PHY_DECODE);
5900 
5901 		/* Check the received frame for errors. */
5902 		if (status & (L2_FHDR_ERRORS_BAD_CRC |
5903 			L2_FHDR_ERRORS_PHY_DECODE | L2_FHDR_ERRORS_ALIGNMENT |
5904 			L2_FHDR_ERRORS_TOO_SHORT  | L2_FHDR_ERRORS_GIANT_FRAME)) {
5905 
5906 			/* Log the error and release the mbuf. */
5907 			ifp->if_ierrors++;
5908 			DBRUN(sc->l2fhdr_status_errors++);
5909 
5910 			m_freem(m0);
5911 			m0 = NULL;
5912 			goto bce_rx_int_next_rx;
5913 		}
5914 
5915 		/* Send the packet to the appropriate interface. */
5916 		m0->m_pkthdr.rcvif = ifp;
5917 
5918 		/* Assume no hardware checksum. */
5919 		m0->m_pkthdr.csum_flags = 0;
5920 
5921 		/* Validate the checksum if offload enabled. */
5922 		if (ifp->if_capenable & IFCAP_RXCSUM) {
5923 
5924 			/* Check for an IP datagram. */
5925 		 	if (!(status & L2_FHDR_STATUS_SPLIT) &&
5926 				(status & L2_FHDR_STATUS_IP_DATAGRAM)) {
5927 				m0->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
5928 
5929 				/* Check if the IP checksum is valid. */
5930 				if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff) == 0)
5931 					m0->m_pkthdr.csum_flags |= CSUM_IP_VALID;
5932 			}
5933 
5934 			/* Check for a valid TCP/UDP frame. */
5935 			if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
5936 				L2_FHDR_STATUS_UDP_DATAGRAM)) {
5937 
5938 				/* Check for a good TCP/UDP checksum. */
5939 				if ((status & (L2_FHDR_ERRORS_TCP_XSUM |
5940 					      L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
5941 					m0->m_pkthdr.csum_data =
5942 					    l2fhdr->l2_fhdr_tcp_udp_xsum;
5943 					m0->m_pkthdr.csum_flags |= (CSUM_DATA_VALID
5944 						| CSUM_PSEUDO_HDR);
5945 				}
5946 			}
5947 		}
5948 
5949 		/*
5950 		 * If we received a packet with a vlan tag,
5951 		 * attach that information to the packet.
5952 		 */
5953 		if (status & L2_FHDR_STATUS_L2_VLAN_TAG) {
5954 #if __FreeBSD_version < 700000
5955 			VLAN_INPUT_TAG(ifp, m0, l2fhdr->l2_fhdr_vlan_tag, continue);
5956 #else
5957 			m0->m_pkthdr.ether_vtag = l2fhdr->l2_fhdr_vlan_tag;
5958 			m0->m_flags |= M_VLANTAG;
5959 #endif
5960 		}
5961 
5962 		/* Pass the mbuf off to the upper layers. */
5963 		ifp->if_ipackets++;
5964 
5965 bce_rx_int_next_rx:
5966 		sw_rx_cons = NEXT_RX_BD(sw_rx_cons);
5967 
5968 		/* If we have a packet, pass it up the stack */
5969 		if (m0) {
5970 			/* Make sure we don't lose our place when we release the lock. */
5971 			sc->rx_cons = sw_rx_cons;
5972 #ifdef ZERO_COPY_SOCKETS
5973 			sc->pg_cons = sw_pg_cons;
5974 #endif
5975 
5976 			BCE_UNLOCK(sc);
5977 			(*ifp->if_input)(ifp, m0);
5978 			BCE_LOCK(sc);
5979 
5980 			/* Recover our place. */
5981 			sw_rx_cons = sc->rx_cons;
5982 #ifdef ZERO_COPY_SOCKETS
5983 			sw_pg_cons = sc->pg_cons;
5984 #endif
5985 		}
5986 
5987 		/* Refresh hw_cons to see if there's new work */
5988 		if (sw_rx_cons == hw_rx_cons)
5989 			hw_rx_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc);
5990 	}
5991 
5992 	/* No new packets to process.  Refill the RX and page chains and exit. */
5993 #ifdef ZERO_COPY_SOCKETS
5994 	sc->pg_cons = sw_pg_cons;
5995 	bce_fill_pg_chain(sc);
5996 #endif
5997 
5998 	sc->rx_cons = sw_rx_cons;
5999 	bce_fill_rx_chain(sc);
6000 
6001 	for (int i = 0; i < RX_PAGES; i++)
6002 		bus_dmamap_sync(sc->rx_bd_chain_tag,
6003 		    sc->rx_bd_chain_map[i], BUS_DMASYNC_PREWRITE);
6004 
6005 #ifdef ZERO_COPY_SOCKETS
6006 	for (int i = 0; i < PG_PAGES; i++)
6007 		bus_dmamap_sync(sc->pg_bd_chain_tag,
6008 		    sc->pg_bd_chain_map[i], BUS_DMASYNC_PREWRITE);
6009 #endif
6010 
6011 	DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): rx_prod = 0x%04X, "
6012 		"rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
6013 		__FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
6014 	DBEXIT(BCE_VERBOSE_RECV | BCE_VERBOSE_INTR);
6015 }
6016 
6017 
6018 /****************************************************************************/
6019 /* Reads the transmit consumer value from the status block (skipping over   */
6020 /* chain page pointer if necessary).                                        */
6021 /*                                                                          */
6022 /* Returns:                                                                 */
6023 /*   hw_cons                                                                */
6024 /****************************************************************************/
6025 static inline u16
6026 bce_get_hw_tx_cons(struct bce_softc *sc)
6027 {
6028 	u16 hw_cons;
6029 
6030 	mb();
6031 	hw_cons = sc->status_block->status_tx_quick_consumer_index0;
6032 	if ((hw_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
6033 		hw_cons++;
6034 
6035 	return hw_cons;
6036 }
6037 
6038 
6039 /****************************************************************************/
6040 /* Handles transmit completion interrupt events.                            */
6041 /*                                                                          */
6042 /* Returns:                                                                 */
6043 /*   Nothing.                                                               */
6044 /****************************************************************************/
6045 static void
6046 bce_tx_intr(struct bce_softc *sc)
6047 {
6048 	struct ifnet *ifp = sc->bce_ifp;
6049 	u16 hw_tx_cons, sw_tx_cons, sw_tx_chain_cons;
6050 
6051 	DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_INTR);
6052 	DBRUN(sc->tx_interrupts++);
6053 	DBPRINT(sc, BCE_EXTREME_SEND, "%s(enter): tx_prod = 0x%04X, "
6054 		"tx_cons = 0x%04X, tx_prod_bseq = 0x%08X\n",
6055 		__FUNCTION__, sc->tx_prod, sc->tx_cons, sc->tx_prod_bseq);
6056 
6057 	BCE_LOCK_ASSERT(sc);
6058 
6059 	/* Get the hardware's view of the TX consumer index. */
6060 	hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc);
6061 	sw_tx_cons = sc->tx_cons;
6062 
6063 	/* Prevent speculative reads from getting ahead of the status block. */
6064 	bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
6065 		BUS_SPACE_BARRIER_READ);
6066 
6067 	/* Cycle through any completed TX chain page entries. */
6068 	while (sw_tx_cons != hw_tx_cons) {
6069 #ifdef BCE_DEBUG
6070 		struct tx_bd *txbd = NULL;
6071 #endif
6072 		sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons);
6073 
6074 		DBPRINT(sc, BCE_INFO_SEND,
6075 			"%s(): hw_tx_cons = 0x%04X, sw_tx_cons = 0x%04X, "
6076 			"sw_tx_chain_cons = 0x%04X\n",
6077 			__FUNCTION__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons);
6078 
6079 		DBRUNIF((sw_tx_chain_cons > MAX_TX_BD),
6080 			BCE_PRINTF("%s(%d): TX chain consumer out of range! "
6081 				" 0x%04X > 0x%04X\n", __FILE__, __LINE__, sw_tx_chain_cons,
6082 				(int) MAX_TX_BD);
6083 			bce_breakpoint(sc));
6084 
6085 		DBRUN(txbd = &sc->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)]
6086 				[TX_IDX(sw_tx_chain_cons)]);
6087 
6088 		DBRUNIF((txbd == NULL),
6089 			BCE_PRINTF("%s(%d): Unexpected NULL tx_bd[0x%04X]!\n",
6090 				__FILE__, __LINE__, sw_tx_chain_cons);
6091 			bce_breakpoint(sc));
6092 
6093 		DBRUNMSG(BCE_INFO_SEND, BCE_PRINTF("%s(): ", __FUNCTION__);
6094 			bce_dump_txbd(sc, sw_tx_chain_cons, txbd));
6095 
6096 		/*
6097 		 * Free the associated mbuf. Remember
6098 		 * that only the last tx_bd of a packet
6099 		 * has an mbuf pointer and DMA map.
6100 		 */
6101 		if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) {
6102 
6103 			/* Validate that this is the last tx_bd. */
6104 			DBRUNIF((!(txbd->tx_bd_flags & TX_BD_FLAGS_END)),
6105 				BCE_PRINTF("%s(%d): tx_bd END flag not set but "
6106 				"txmbuf == NULL!\n", __FILE__, __LINE__);
6107 				bce_breakpoint(sc));
6108 
6109 			DBRUNMSG(BCE_INFO_SEND,
6110 				BCE_PRINTF("%s(): Unloading map/freeing mbuf "
6111 					"from tx_bd[0x%04X]\n", __FUNCTION__, sw_tx_chain_cons));
6112 
6113 			/* Unmap the mbuf. */
6114 			bus_dmamap_unload(sc->tx_mbuf_tag,
6115 			    sc->tx_mbuf_map[sw_tx_chain_cons]);
6116 
6117 			/* Free the mbuf. */
6118 			m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]);
6119 			sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL;
6120 			DBRUN(sc->debug_tx_mbuf_alloc--);
6121 
6122 			ifp->if_opackets++;
6123 		}
6124 
6125 		sc->used_tx_bd--;
6126 		sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
6127 
6128 		/* Refresh hw_cons to see if there's new work. */
6129 		hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc);
6130 
6131 		/* Prevent speculative reads from getting ahead of the status block. */
6132 		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
6133 			BUS_SPACE_BARRIER_READ);
6134 	}
6135 
6136 	/* Clear the TX timeout timer. */
6137 	sc->watchdog_timer = 0;
6138 
6139 	/* Clear the tx hardware queue full flag. */
6140 	if (sc->used_tx_bd < sc->max_tx_bd) {
6141 		DBRUNIF((ifp->if_drv_flags & IFF_DRV_OACTIVE),
6142 			DBPRINT(sc, BCE_INFO_SEND,
6143 				"%s(): Open TX chain! %d/%d (used/total)\n",
6144 				__FUNCTION__, sc->used_tx_bd, sc->max_tx_bd));
6145 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
6146 	}
6147 
6148 	sc->tx_cons = sw_tx_cons;
6149 
6150 	DBPRINT(sc, BCE_EXTREME_SEND, "%s(exit): tx_prod = 0x%04X, "
6151 		"tx_cons = 0x%04X, tx_prod_bseq = 0x%08X\n",
6152 		__FUNCTION__, sc->tx_prod, sc->tx_cons, sc->tx_prod_bseq);
6153 	DBEXIT(BCE_VERBOSE_SEND | BCE_VERBOSE_INTR);
6154 }
6155 
6156 
6157 /****************************************************************************/
6158 /* Disables interrupt generation.                                           */
6159 /*                                                                          */
6160 /* Returns:                                                                 */
6161 /*   Nothing.                                                               */
6162 /****************************************************************************/
6163 static void
6164 bce_disable_intr(struct bce_softc *sc)
6165 {
6166 	DBENTER(BCE_VERBOSE_INTR);
6167 
6168 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
6169 	REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
6170 
6171 	DBEXIT(BCE_VERBOSE_INTR);
6172 }
6173 
6174 
6175 /****************************************************************************/
6176 /* Enables interrupt generation.                                            */
6177 /*                                                                          */
6178 /* Returns:                                                                 */
6179 /*   Nothing.                                                               */
6180 /****************************************************************************/
6181 static void
6182 bce_enable_intr(struct bce_softc *sc, int coal_now)
6183 {
6184 	DBENTER(BCE_VERBOSE_INTR);
6185 
6186 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
6187 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
6188 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
6189 
6190 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
6191 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
6192 
6193 	/* Force an immediate interrupt (whether there is new data or not). */
6194 	if (coal_now)
6195 		REG_WR(sc, BCE_HC_COMMAND, sc->hc_command | BCE_HC_COMMAND_COAL_NOW);
6196 
6197 	DBEXIT(BCE_VERBOSE_INTR);
6198 }
6199 
6200 
6201 /****************************************************************************/
6202 /* Handles controller initialization.                                       */
6203 /*                                                                          */
6204 /* Returns:                                                                 */
6205 /*   Nothing.                                                               */
6206 /****************************************************************************/
6207 static void
6208 bce_init_locked(struct bce_softc *sc)
6209 {
6210 	struct ifnet *ifp;
6211 	u32 ether_mtu = 0;
6212 
6213 	DBENTER(BCE_VERBOSE_RESET);
6214 
6215 	BCE_LOCK_ASSERT(sc);
6216 
6217 	ifp = sc->bce_ifp;
6218 
6219 	/* Check if the driver is still running and bail out if it is. */
6220 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
6221 		goto bce_init_locked_exit;
6222 
6223 	bce_stop(sc);
6224 
6225 	if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) {
6226 		BCE_PRINTF("%s(%d): Controller reset failed!\n",
6227 			__FILE__, __LINE__);
6228 		goto bce_init_locked_exit;
6229 	}
6230 
6231 	if (bce_chipinit(sc)) {
6232 		BCE_PRINTF("%s(%d): Controller initialization failed!\n",
6233 			__FILE__, __LINE__);
6234 		goto bce_init_locked_exit;
6235 	}
6236 
6237 	if (bce_blockinit(sc)) {
6238 		BCE_PRINTF("%s(%d): Block initialization failed!\n",
6239 			__FILE__, __LINE__);
6240 		goto bce_init_locked_exit;
6241 	}
6242 
6243 	/* Load our MAC address. */
6244 	bcopy(IF_LLADDR(sc->bce_ifp), sc->eaddr, ETHER_ADDR_LEN);
6245 	bce_set_mac_addr(sc);
6246 
6247 	/*
6248 	 * Calculate and program the hardware Ethernet MTU
6249 	 * size. Be generous on the receive if we have room.
6250 	 */
6251 #ifdef ZERO_COPY_SOCKETS
6252 	if (ifp->if_mtu <= (sc->rx_bd_mbuf_data_len + sc->pg_bd_mbuf_alloc_size))
6253 		ether_mtu = sc->rx_bd_mbuf_data_len + sc->pg_bd_mbuf_alloc_size;
6254 #else
6255 	if (ifp->if_mtu <= sc->rx_bd_mbuf_data_len)
6256 		ether_mtu = sc->rx_bd_mbuf_data_len;
6257 #endif
6258 	else
6259 		ether_mtu = ifp->if_mtu;
6260 
6261 	ether_mtu += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN;
6262 
6263 	DBPRINT(sc, BCE_INFO_MISC, "%s(): setting h/w mtu = %d\n", __FUNCTION__,
6264 		ether_mtu);
6265 
6266 	/* Program the mtu, enabling jumbo frame support if necessary. */
6267 	if (ether_mtu > (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN))
6268 		REG_WR(sc, BCE_EMAC_RX_MTU_SIZE,
6269 			min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) |
6270 			BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA);
6271 	else
6272 		REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu);
6273 
6274 	DBPRINT(sc, BCE_INFO_LOAD,
6275 		"%s(): rx_bd_mbuf_alloc_size = %d, rx_bce_mbuf_data_len = %d, "
6276 		"rx_bd_mbuf_align_pad = %d, pg_bd_mbuf_alloc_size = %d\n",
6277 		__FUNCTION__, sc->rx_bd_mbuf_alloc_size, sc->rx_bd_mbuf_data_len,
6278 		sc->rx_bd_mbuf_align_pad, sc->pg_bd_mbuf_alloc_size);
6279 
6280 	/* Program appropriate promiscuous/multicast filtering. */
6281 	bce_set_rx_mode(sc);
6282 
6283 #ifdef ZERO_COPY_SOCKETS
6284 	/* Init page buffer descriptor chain. */
6285 	bce_init_pg_chain(sc);
6286 #endif
6287 
6288 	/* Init RX buffer descriptor chain. */
6289 	bce_init_rx_chain(sc);
6290 
6291 	/* Init TX buffer descriptor chain. */
6292 	bce_init_tx_chain(sc);
6293 
6294 	/* Enable host interrupts. */
6295 	bce_enable_intr(sc, 1);
6296 
6297 	bce_ifmedia_upd_locked(ifp);
6298 
6299 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
6300 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
6301 
6302 	callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc);
6303 
6304 bce_init_locked_exit:
6305 	DBEXIT(BCE_VERBOSE_RESET);
6306 }
6307 
6308 
6309 /****************************************************************************/
6310 /* Initialize the controller just enough so that any management firmware    */
6311 /* running on the device will continue to operate correctly.                */
6312 /*                                                                          */
6313 /* Returns:                                                                 */
6314 /*   Nothing.                                                               */
6315 /****************************************************************************/
6316 static void
6317 bce_mgmt_init_locked(struct bce_softc *sc)
6318 {
6319 	struct ifnet *ifp;
6320 
6321 	DBENTER(BCE_VERBOSE_RESET);
6322 
6323 	BCE_LOCK_ASSERT(sc);
6324 
6325 	/* Bail out if management firmware is not running. */
6326 	if (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)) {
6327 		DBPRINT(sc, BCE_VERBOSE_SPECIAL,
6328 			"No management firmware running...\n");
6329 		goto bce_mgmt_init_locked_exit;
6330 	}
6331 
6332 	ifp = sc->bce_ifp;
6333 
6334 	/* Enable all critical blocks in the MAC. */
6335 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT);
6336 	REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
6337 	DELAY(20);
6338 
6339 	bce_ifmedia_upd_locked(ifp);
6340 
6341 bce_mgmt_init_locked_exit:
6342 	DBEXIT(BCE_VERBOSE_RESET);
6343 }
6344 
6345 
6346 /****************************************************************************/
6347 /* Handles controller initialization when called from an unlocked routine.  */
6348 /*                                                                          */
6349 /* Returns:                                                                 */
6350 /*   Nothing.                                                               */
6351 /****************************************************************************/
6352 static void
6353 bce_init(void *xsc)
6354 {
6355 	struct bce_softc *sc = xsc;
6356 
6357 	DBENTER(BCE_VERBOSE_RESET);
6358 
6359 	BCE_LOCK(sc);
6360 	bce_init_locked(sc);
6361 	BCE_UNLOCK(sc);
6362 
6363 	DBEXIT(BCE_VERBOSE_RESET);
6364 }
6365 
6366 
6367 /****************************************************************************/
6368 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
6369 /* memory visible to the controller.                                        */
6370 /*                                                                          */
6371 /* Returns:                                                                 */
6372 /*   0 for success, positive value for failure.                             */
6373 /* Modified:                                                                */
6374 /*   m_head: May be set to NULL if MBUF is excessively fragmented.          */
6375 /****************************************************************************/
6376 static int
6377 bce_tx_encap(struct bce_softc *sc, struct mbuf **m_head)
6378 {
6379 	bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
6380 	bus_dmamap_t map;
6381 	struct tx_bd *txbd = NULL;
6382 	struct mbuf *m0;
6383 	struct ether_vlan_header *eh;
6384 	struct ip *ip;
6385 	struct tcphdr *th;
6386 	u16 prod, chain_prod, etype, mss = 0, vlan_tag = 0, flags = 0;
6387 	u32 prod_bseq;
6388 	int hdr_len = 0, e_hlen = 0, ip_hlen = 0, tcp_hlen = 0, ip_len = 0;
6389 
6390 #ifdef BCE_DEBUG
6391 	u16 debug_prod;
6392 #endif
6393 	int i, error, nsegs, rc = 0;
6394 
6395 	DBENTER(BCE_VERBOSE_SEND);
6396 	DBPRINT(sc, BCE_INFO_SEND,
6397 		"%s(enter): tx_prod = 0x%04X, tx_chain_prod = %04X, "
6398 		"tx_prod_bseq = 0x%08X\n",
6399 		__FUNCTION__, sc->tx_prod, (u16) TX_CHAIN_IDX(sc->tx_prod),
6400 		sc->tx_prod_bseq);
6401 
6402 	/* Transfer any checksum offload flags to the bd. */
6403 	m0 = *m_head;
6404 	if (m0->m_pkthdr.csum_flags) {
6405 		if (m0->m_pkthdr.csum_flags & CSUM_IP)
6406 			flags |= TX_BD_FLAGS_IP_CKSUM;
6407 		if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
6408 			flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6409 		if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
6410 			/* For TSO the controller needs two pieces of info, */
6411 			/* the MSS and the IP+TCP options length.           */
6412 			mss = htole16(m0->m_pkthdr.tso_segsz);
6413 
6414 			/* Map the header and find the Ethernet type & header length */
6415 			eh = mtod(m0, struct ether_vlan_header *);
6416 			if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
6417 				etype = ntohs(eh->evl_proto);
6418 				e_hlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
6419 			} else {
6420 				etype = ntohs(eh->evl_encap_proto);
6421 				e_hlen = ETHER_HDR_LEN;
6422 			}
6423 
6424 			/* Check for supported TSO Ethernet types (only IPv4 for now) */
6425 			switch (etype) {
6426 				case ETHERTYPE_IP:
6427 					ip = (struct ip *)(m0->m_data + e_hlen);
6428 
6429 					/* TSO only supported for TCP protocol */
6430 					if (ip->ip_p != IPPROTO_TCP) {
6431 						BCE_PRINTF("%s(%d): TSO enabled for non-TCP frame!.\n",
6432 							__FILE__, __LINE__);
6433 						goto bce_tx_encap_skip_tso;
6434 					}
6435 
6436 					/* Get IP header length in bytes (min 20) */
6437 					ip_hlen = ip->ip_hl << 2;
6438 
6439 					/* Get the TCP header length in bytes (min 20) */
6440 					th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
6441 					tcp_hlen = (th->th_off << 2);
6442 
6443 					/* IP header length and checksum will be calc'd by hardware */
6444 					ip_len = ip->ip_len;
6445 					ip->ip_len = 0;
6446 					ip->ip_sum = 0;
6447 					break;
6448 				case ETHERTYPE_IPV6:
6449 					BCE_PRINTF("%s(%d): TSO over IPv6 not supported!.\n",
6450 						__FILE__, __LINE__);
6451 					goto bce_tx_encap_skip_tso;
6452 				default:
6453 					BCE_PRINTF("%s(%d): TSO enabled for unsupported protocol!.\n",
6454 						__FILE__, __LINE__);
6455 					goto bce_tx_encap_skip_tso;
6456 			}
6457 
6458 			hdr_len = e_hlen + ip_hlen + tcp_hlen;
6459 
6460 			DBPRINT(sc, BCE_EXTREME_SEND,
6461 				"%s(): hdr_len = %d, e_hlen = %d, ip_hlen = %d, tcp_hlen = %d, ip_len = %d\n",
6462 				 __FUNCTION__, hdr_len, e_hlen, ip_hlen, tcp_hlen, ip_len);
6463 
6464 			/* Set the LSO flag in the TX BD */
6465 			flags |= TX_BD_FLAGS_SW_LSO;
6466 			/* Set the length of IP + TCP options (in 32 bit words) */
6467 			flags |= (((ip_hlen + tcp_hlen - 40) >> 2) << 8);
6468 
6469 bce_tx_encap_skip_tso:
6470 			DBRUN(sc->requested_tso_frames++);
6471 		}
6472 	}
6473 
6474 	/* Transfer any VLAN tags to the bd. */
6475 	if (m0->m_flags & M_VLANTAG) {
6476 		flags |= TX_BD_FLAGS_VLAN_TAG;
6477 		vlan_tag = m0->m_pkthdr.ether_vtag;
6478 	}
6479 
6480 	/* Map the mbuf into DMAable memory. */
6481 	prod = sc->tx_prod;
6482 	chain_prod = TX_CHAIN_IDX(prod);
6483 	map = sc->tx_mbuf_map[chain_prod];
6484 
6485 	/* Map the mbuf into our DMA address space. */
6486 	error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag, map, m0,
6487 	    segs, &nsegs, BUS_DMA_NOWAIT);
6488 
6489 	/* Check if the DMA mapping was successful */
6490 	if (error == EFBIG) {
6491 
6492 		/* The mbuf is too fragmented for our DMA mapping. */
6493    		DBPRINT(sc, BCE_WARN, "%s(): fragmented mbuf (%d pieces)\n",
6494 			__FUNCTION__, nsegs);
6495 		DBRUN(bce_dump_mbuf(sc, m0););
6496 
6497 		/* Try to defrag the mbuf. */
6498 		m0 = m_defrag(*m_head, M_DONTWAIT);
6499 		if (m0 == NULL) {
6500 			/* Defrag was unsuccessful */
6501 			m_freem(*m_head);
6502 			*m_head = NULL;
6503 			sc->mbuf_alloc_failed++;
6504 			rc = ENOBUFS;
6505 			goto bce_tx_encap_exit;
6506 		}
6507 
6508 		/* Defrag was successful, try mapping again */
6509 		*m_head = m0;
6510 		error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag, map, m0,
6511 		    segs, &nsegs, BUS_DMA_NOWAIT);
6512 
6513 		/* Still getting an error after a defrag. */
6514 		if (error == ENOMEM) {
6515 			/* Insufficient DMA buffers available. */
6516 			sc->tx_dma_map_failures++;
6517 			rc = error;
6518 			goto bce_tx_encap_exit;
6519 		} else if (error != 0) {
6520 			/* Still can't map the mbuf, release it and return an error. */
6521 			BCE_PRINTF(
6522 			    "%s(%d): Unknown error mapping mbuf into TX chain!\n",
6523 			    __FILE__, __LINE__);
6524 			m_freem(m0);
6525 			*m_head = NULL;
6526 			sc->tx_dma_map_failures++;
6527 			rc = ENOBUFS;
6528 			goto bce_tx_encap_exit;
6529 		}
6530 	} else if (error == ENOMEM) {
6531 		/* Insufficient DMA buffers available. */
6532 		sc->tx_dma_map_failures++;
6533 		rc = error;
6534 		goto bce_tx_encap_exit;
6535 	} else if (error != 0) {
6536 		m_freem(m0);
6537 		*m_head = NULL;
6538 		sc->tx_dma_map_failures++;
6539 		rc = error;
6540 		goto bce_tx_encap_exit;
6541 	}
6542 
6543 	/* Make sure there's room in the chain */
6544 	if (nsegs > (sc->max_tx_bd - sc->used_tx_bd)) {
6545 		bus_dmamap_unload(sc->tx_mbuf_tag, map);
6546 		rc = ENOBUFS;
6547 		goto bce_tx_encap_exit;
6548 	}
6549 
6550 	/* prod points to an empty tx_bd at this point. */
6551 	prod_bseq  = sc->tx_prod_bseq;
6552 
6553 #ifdef BCE_DEBUG
6554 	debug_prod = chain_prod;
6555 #endif
6556 
6557 	DBPRINT(sc, BCE_INFO_SEND,
6558 		"%s(start): prod = 0x%04X, chain_prod = 0x%04X, "
6559 		"prod_bseq = 0x%08X\n",
6560 		__FUNCTION__, prod, chain_prod, prod_bseq);
6561 
6562 	/*
6563 	 * Cycle through each mbuf segment that makes up
6564 	 * the outgoing frame, gathering the mapping info
6565 	 * for that segment and creating a tx_bd for
6566 	 * the mbuf.
6567 	 */
6568 	for (i = 0; i < nsegs ; i++) {
6569 
6570 		chain_prod = TX_CHAIN_IDX(prod);
6571 		txbd= &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
6572 
6573 		txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr));
6574 		txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr));
6575 		txbd->tx_bd_mss_nbytes = htole32(mss << 16) | htole16(segs[i].ds_len);
6576 		txbd->tx_bd_vlan_tag = htole16(vlan_tag);
6577 		txbd->tx_bd_flags = htole16(flags);
6578 		prod_bseq += segs[i].ds_len;
6579 		if (i == 0)
6580 			txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START);
6581 		prod = NEXT_TX_BD(prod);
6582 	}
6583 
6584 	/* Set the END flag on the last TX buffer descriptor. */
6585 	txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END);
6586 
6587 	DBRUNMSG(BCE_EXTREME_SEND, bce_dump_tx_chain(sc, debug_prod, nsegs));
6588 
6589 	DBPRINT(sc, BCE_INFO_SEND,
6590 		"%s( end ): prod = 0x%04X, chain_prod = 0x%04X, "
6591 		"prod_bseq = 0x%08X\n",
6592 		__FUNCTION__, prod, chain_prod, prod_bseq);
6593 
6594 	/*
6595 	 * Ensure that the mbuf pointer for this transmission
6596 	 * is placed at the array index of the last
6597 	 * descriptor in this chain.  This is done
6598 	 * because a single map is used for all
6599 	 * segments of the mbuf and we don't want to
6600 	 * unload the map before all of the segments
6601 	 * have been freed.
6602 	 */
6603 	sc->tx_mbuf_ptr[chain_prod] = m0;
6604 	sc->used_tx_bd += nsegs;
6605 
6606 	/* Update some debug statistic counters */
6607 	DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark),
6608 		sc->tx_hi_watermark = sc->used_tx_bd);
6609 	DBRUNIF((sc->used_tx_bd == sc->max_tx_bd), sc->tx_full_count++);
6610 	DBRUNIF(sc->debug_tx_mbuf_alloc++);
6611 
6612 	DBRUNMSG(BCE_EXTREME_SEND, bce_dump_tx_mbuf_chain(sc, chain_prod, 1));
6613 
6614 	/* prod points to the next free tx_bd at this point. */
6615 	sc->tx_prod = prod;
6616 	sc->tx_prod_bseq = prod_bseq;
6617 
6618 	DBPRINT(sc, BCE_INFO_SEND,
6619 		"%s(exit): prod = 0x%04X, chain_prod = %04X, "
6620 		"prod_bseq = 0x%08X\n",
6621 		__FUNCTION__, sc->tx_prod, (u16) TX_CHAIN_IDX(sc->tx_prod),
6622 		sc->tx_prod_bseq);
6623 
6624 bce_tx_encap_exit:
6625 	DBEXIT(BCE_VERBOSE_SEND);
6626 	return(rc);
6627 }
6628 
6629 
6630 /****************************************************************************/
6631 /* Main transmit routine when called from another routine with a lock.      */
6632 /*                                                                          */
6633 /* Returns:                                                                 */
6634 /*   Nothing.                                                               */
6635 /****************************************************************************/
6636 static void
6637 bce_start_locked(struct ifnet *ifp)
6638 {
6639 	struct bce_softc *sc = ifp->if_softc;
6640 	struct mbuf *m_head = NULL;
6641 	int count = 0;
6642 	u16 tx_prod, tx_chain_prod;
6643 
6644 	DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_CTX);
6645 
6646 	BCE_LOCK_ASSERT(sc);
6647 
6648 	/* prod points to the next free tx_bd. */
6649 	tx_prod = sc->tx_prod;
6650 	tx_chain_prod = TX_CHAIN_IDX(tx_prod);
6651 
6652 	DBPRINT(sc, BCE_INFO_SEND,
6653 		"%s(enter): tx_prod = 0x%04X, tx_chain_prod = 0x%04X, "
6654 		"tx_prod_bseq = 0x%08X\n",
6655 		__FUNCTION__, tx_prod, tx_chain_prod, sc->tx_prod_bseq);
6656 
6657 	/* If there's no link or the transmit queue is empty then just exit. */
6658 	if (!sc->bce_link) {
6659 		DBPRINT(sc, BCE_INFO_SEND, "%s(): No link.\n",
6660 			__FUNCTION__);
6661 		goto bce_start_locked_exit;
6662 	}
6663 
6664 	if (IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
6665 		DBPRINT(sc, BCE_INFO_SEND, "%s(): Transmit queue empty.\n",
6666 			__FUNCTION__);
6667 		goto bce_start_locked_exit;
6668 	}
6669 
6670 	/*
6671 	 * Keep adding entries while there is space in the ring.
6672 	 */
6673 	while (sc->used_tx_bd < sc->max_tx_bd) {
6674 
6675 		/* Check for any frames to send. */
6676 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
6677 
6678 		/* Stop when the transmit queue is empty. */
6679 		if (m_head == NULL)
6680 			break;
6681 
6682 		/*
6683 		 * Pack the data into the transmit ring. If we
6684 		 * don't have room, place the mbuf back at the
6685 		 * head of the queue and set the OACTIVE flag
6686 		 * to wait for the NIC to drain the chain.
6687 		 */
6688 		if (bce_tx_encap(sc, &m_head)) {
6689 			/* No room, put the frame back on the transmit queue. */
6690 			if (m_head != NULL)
6691 				IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
6692 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
6693 			DBPRINT(sc, BCE_INFO_SEND,
6694 				"TX chain is closed for business! Total tx_bd used = %d\n",
6695 				sc->used_tx_bd);
6696 			break;
6697 		}
6698 
6699 		count++;
6700 
6701 		/* Send a copy of the frame to any BPF listeners. */
6702 		ETHER_BPF_MTAP(ifp, m_head);
6703 	}
6704 
6705 	/* Exit if no packets were dequeued. */
6706 	if (count == 0) {
6707 		DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): No packets were dequeued\n",
6708 			__FUNCTION__);
6709 		goto bce_start_locked_exit;
6710 	}
6711 
6712 	DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): Inserted %d frames into send queue.\n",
6713 		__FUNCTION__, count);
6714 
6715 	REG_WR(sc, BCE_MQ_COMMAND, REG_RD(sc, BCE_MQ_COMMAND) | BCE_MQ_COMMAND_NO_MAP_ERROR);
6716 
6717 	/* Write the mailbox and tell the chip about the waiting tx_bd's. */
6718 	DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): MB_GET_CID_ADDR(TX_CID) = 0x%08X; "
6719 		"BCE_L2MQ_TX_HOST_BIDX = 0x%08X, sc->tx_prod = 0x%04X\n",
6720 		__FUNCTION__,
6721 		MB_GET_CID_ADDR(TX_CID), BCE_L2MQ_TX_HOST_BIDX, sc->tx_prod);
6722 	REG_WR16(sc, MB_GET_CID_ADDR(TX_CID) + BCE_L2MQ_TX_HOST_BIDX, sc->tx_prod);
6723 	DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): MB_GET_CID_ADDR(TX_CID) = 0x%08X; "
6724 		"BCE_L2MQ_TX_HOST_BSEQ = 0x%08X, sc->tx_prod_bseq = 0x%04X\n",
6725 		__FUNCTION__,
6726 		MB_GET_CID_ADDR(TX_CID), BCE_L2MQ_TX_HOST_BSEQ, sc->tx_prod_bseq);
6727 	REG_WR(sc, MB_GET_CID_ADDR(TX_CID) + BCE_L2MQ_TX_HOST_BSEQ, sc->tx_prod_bseq);
6728 
6729 	/* Set the tx timeout. */
6730 	sc->watchdog_timer = BCE_TX_TIMEOUT;
6731 
6732 	DBRUNMSG(BCE_VERBOSE_SEND, bce_dump_ctx(sc, TX_CID));
6733 	DBRUNMSG(BCE_VERBOSE_SEND, bce_dump_mq_regs(sc));
6734 
6735 bce_start_locked_exit:
6736 	DBEXIT(BCE_VERBOSE_SEND | BCE_VERBOSE_CTX);
6737 	return;
6738 }
6739 
6740 
6741 /****************************************************************************/
6742 /* Main transmit routine when called from another routine without a lock.   */
6743 /*                                                                          */
6744 /* Returns:                                                                 */
6745 /*   Nothing.                                                               */
6746 /****************************************************************************/
6747 static void
6748 bce_start(struct ifnet *ifp)
6749 {
6750 	struct bce_softc *sc = ifp->if_softc;
6751 
6752 	DBENTER(BCE_VERBOSE_SEND);
6753 
6754 	BCE_LOCK(sc);
6755 	bce_start_locked(ifp);
6756 	BCE_UNLOCK(sc);
6757 
6758 	DBEXIT(BCE_VERBOSE_SEND);
6759 }
6760 
6761 
6762 /****************************************************************************/
6763 /* Handles any IOCTL calls from the operating system.                       */
6764 /*                                                                          */
6765 /* Returns:                                                                 */
6766 /*   0 for success, positive value for failure.                             */
6767 /****************************************************************************/
6768 static int
6769 bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
6770 {
6771 	struct bce_softc *sc = ifp->if_softc;
6772 	struct ifreq *ifr = (struct ifreq *) data;
6773 	struct mii_data *mii;
6774 	int mask, error = 0;
6775 
6776 	DBENTER(BCE_VERBOSE_MISC);
6777 
6778 	switch(command) {
6779 
6780 		/* Set the interface MTU. */
6781 		case SIOCSIFMTU:
6782 			/* Check that the MTU setting is supported. */
6783 			if ((ifr->ifr_mtu < BCE_MIN_MTU) ||
6784 				(ifr->ifr_mtu > BCE_MAX_JUMBO_MTU)) {
6785 				error = EINVAL;
6786 				break;
6787 			}
6788 
6789 			DBPRINT(sc, BCE_INFO_MISC,
6790 				"SIOCSIFMTU: Changing MTU from %d to %d\n",
6791 				(int) ifp->if_mtu, (int) ifr->ifr_mtu);
6792 
6793 			BCE_LOCK(sc);
6794 			ifp->if_mtu = ifr->ifr_mtu;
6795 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
6796 #ifdef ZERO_COPY_SOCKETS
6797 			/* No buffer allocation size changes are necessary. */
6798 #else
6799 			/* Recalculate our buffer allocation sizes. */
6800 			if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN) > MCLBYTES) {
6801 				sc->rx_bd_mbuf_alloc_size = MJUM9BYTES;
6802 				sc->rx_bd_mbuf_align_pad  = roundup2(MJUM9BYTES, 16) - MJUM9BYTES;
6803 				sc->rx_bd_mbuf_data_len   = sc->rx_bd_mbuf_alloc_size -
6804 					sc->rx_bd_mbuf_align_pad;
6805 			} else {
6806 				sc->rx_bd_mbuf_alloc_size = MCLBYTES;
6807 				sc->rx_bd_mbuf_align_pad  = roundup2(MCLBYTES, 16) - MCLBYTES;
6808 				sc->rx_bd_mbuf_data_len   = sc->rx_bd_mbuf_alloc_size -
6809 					sc->rx_bd_mbuf_align_pad;
6810 			}
6811 #endif
6812 
6813 			bce_init_locked(sc);
6814 			BCE_UNLOCK(sc);
6815 			break;
6816 
6817 		/* Set interface flags. */
6818 		case SIOCSIFFLAGS:
6819 			DBPRINT(sc, BCE_VERBOSE_SPECIAL, "Received SIOCSIFFLAGS\n");
6820 
6821 			BCE_LOCK(sc);
6822 
6823 			/* Check if the interface is up. */
6824 			if (ifp->if_flags & IFF_UP) {
6825 				if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
6826 					/* Change promiscuous/multicast flags as necessary. */
6827 					bce_set_rx_mode(sc);
6828 				} else {
6829 					/* Start the HW */
6830 					bce_init_locked(sc);
6831 				}
6832 			} else {
6833 				/* The interface is down, check if driver is running. */
6834 				if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
6835 					bce_stop(sc);
6836 
6837 					/* If MFW is running, restart the controller a bit. */
6838 					if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
6839 						bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
6840 						bce_chipinit(sc);
6841 						bce_mgmt_init_locked(sc);
6842 					}
6843 				}
6844 			}
6845 
6846 			BCE_UNLOCK(sc);
6847 			error = 0;
6848 
6849 			break;
6850 
6851 		/* Add/Delete multicast address */
6852 		case SIOCADDMULTI:
6853 		case SIOCDELMULTI:
6854 			DBPRINT(sc, BCE_VERBOSE_MISC, "Received SIOCADDMULTI/SIOCDELMULTI\n");
6855 
6856 			BCE_LOCK(sc);
6857 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
6858 				bce_set_rx_mode(sc);
6859 				error = 0;
6860 			}
6861 			BCE_UNLOCK(sc);
6862 
6863 			break;
6864 
6865 		/* Set/Get Interface media */
6866 		case SIOCSIFMEDIA:
6867 		case SIOCGIFMEDIA:
6868 			DBPRINT(sc, BCE_VERBOSE_MISC, "Received SIOCSIFMEDIA/SIOCGIFMEDIA\n");
6869 
6870 			mii = device_get_softc(sc->bce_miibus);
6871 			error = ifmedia_ioctl(ifp, ifr,
6872 			    &mii->mii_media, command);
6873 			break;
6874 
6875 		/* Set interface capability */
6876 		case SIOCSIFCAP:
6877 			mask = ifr->ifr_reqcap ^ ifp->if_capenable;
6878 			DBPRINT(sc, BCE_INFO_MISC, "Received SIOCSIFCAP = 0x%08X\n", (u32) mask);
6879 
6880 			/* Toggle the TX checksum capabilites enable flag. */
6881 			if (mask & IFCAP_TXCSUM) {
6882 				ifp->if_capenable ^= IFCAP_TXCSUM;
6883 				if (IFCAP_TXCSUM & ifp->if_capenable)
6884 					ifp->if_hwassist = BCE_IF_HWASSIST;
6885 				else
6886 					ifp->if_hwassist = 0;
6887 			}
6888 
6889 			/* Toggle the RX checksum capabilities enable flag. */
6890 			if (mask & IFCAP_RXCSUM) {
6891 				ifp->if_capenable ^= IFCAP_RXCSUM;
6892 				if (IFCAP_RXCSUM & ifp->if_capenable)
6893 					ifp->if_hwassist = BCE_IF_HWASSIST;
6894 				else
6895 					ifp->if_hwassist = 0;
6896 			}
6897 
6898 			/* Toggle the TSO capabilities enable flag. */
6899 			if (bce_tso_enable && (mask & IFCAP_TSO4)) {
6900 				ifp->if_capenable ^= IFCAP_TSO4;
6901 				if (IFCAP_RXCSUM & ifp->if_capenable)
6902 					ifp->if_hwassist = BCE_IF_HWASSIST;
6903 				else
6904 					ifp->if_hwassist = 0;
6905 			}
6906 
6907 			/* Toggle VLAN_MTU capabilities enable flag. */
6908 			if (mask & IFCAP_VLAN_MTU) {
6909 				BCE_PRINTF("%s(%d): Changing VLAN_MTU not supported.\n",
6910 					__FILE__, __LINE__);
6911 			}
6912 
6913 			/* Toggle VLANHWTAG capabilities enabled flag. */
6914 			if (mask & IFCAP_VLAN_HWTAGGING) {
6915 				if (sc->bce_flags & BCE_MFW_ENABLE_FLAG)
6916 					BCE_PRINTF("%s(%d): Cannot change VLAN_HWTAGGING while "
6917 						"management firmware (ASF/IPMI/UMP) is running!\n",
6918 						__FILE__, __LINE__);
6919 				else
6920 					BCE_PRINTF("%s(%d): Changing VLAN_HWTAGGING not supported!\n",
6921 						__FILE__, __LINE__);
6922 			}
6923 
6924 			break;
6925 		default:
6926 			/* We don't know how to handle the IOCTL, pass it on. */
6927 			error = ether_ioctl(ifp, command, data);
6928 			break;
6929 	}
6930 
6931 	DBEXIT(BCE_VERBOSE_MISC);
6932 	return(error);
6933 }
6934 
6935 
6936 /****************************************************************************/
6937 /* Transmit timeout handler.                                                */
6938 /*                                                                          */
6939 /* Returns:                                                                 */
6940 /*   Nothing.                                                               */
6941 /****************************************************************************/
6942 static void
6943 bce_watchdog(struct bce_softc *sc)
6944 {
6945 	DBENTER(BCE_EXTREME_SEND);
6946 
6947 	BCE_LOCK_ASSERT(sc);
6948 
6949 	/* If the watchdog timer hasn't expired then just exit. */
6950 	if (sc->watchdog_timer == 0 || --sc->watchdog_timer)
6951 		goto bce_watchdog_exit;
6952 
6953 	/* If pause frames are active then don't reset the hardware. */
6954 	/* ToDo: Should we reset the timer here? */
6955 	if (REG_RD(sc, BCE_EMAC_TX_STATUS) & BCE_EMAC_TX_STATUS_XOFFED)
6956 		goto bce_watchdog_exit;
6957 
6958 	BCE_PRINTF("%s(%d): Watchdog timeout occurred, resetting!\n",
6959 		__FILE__, __LINE__);
6960 
6961 	DBRUNMSG(BCE_INFO,
6962 		bce_dump_driver_state(sc);
6963 		bce_dump_status_block(sc);
6964 		bce_dump_stats_block(sc);
6965 		bce_dump_ftqs(sc);
6966 		bce_dump_txp_state(sc, 0);
6967 		bce_dump_rxp_state(sc, 0);
6968 		bce_dump_tpat_state(sc, 0);
6969 		bce_dump_cp_state(sc, 0);
6970 		bce_dump_com_state(sc, 0));
6971 
6972 	DBRUN(bce_breakpoint(sc));
6973 
6974 	sc->bce_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
6975 
6976 	bce_init_locked(sc);
6977 	sc->bce_ifp->if_oerrors++;
6978 
6979 bce_watchdog_exit:
6980 	DBEXIT(BCE_EXTREME_SEND);
6981 }
6982 
6983 
6984 /*
6985  * Interrupt handler.
6986  */
6987 /****************************************************************************/
6988 /* Main interrupt entry point.  Verifies that the controller generated the  */
6989 /* interrupt and then calls a separate routine for handle the various       */
6990 /* interrupt causes (PHY, TX, RX).                                          */
6991 /*                                                                          */
6992 /* Returns:                                                                 */
6993 /*   0 for success, positive value for failure.                             */
6994 /****************************************************************************/
6995 static void
6996 bce_intr(void *xsc)
6997 {
6998 	struct bce_softc *sc;
6999 	struct ifnet *ifp;
7000 	u32 status_attn_bits;
7001 	u16 hw_rx_cons, hw_tx_cons;
7002 
7003 	sc = xsc;
7004 	ifp = sc->bce_ifp;
7005 
7006 	DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_RECV | BCE_VERBOSE_INTR);
7007 	DBRUNMSG(BCE_VERBOSE_INTR, bce_dump_status_block(sc));
7008 
7009 	BCE_LOCK(sc);
7010 
7011 	DBRUN(sc->interrupts_generated++);
7012 
7013 	bus_dmamap_sync(sc->status_tag, sc->status_map,
7014 	    BUS_DMASYNC_POSTWRITE);
7015 
7016 	/*
7017 	 * If the hardware status block index
7018 	 * matches the last value read by the
7019 	 * driver and we haven't asserted our
7020 	 * interrupt then there's nothing to do.
7021 	 */
7022 	if ((sc->status_block->status_idx == sc->last_status_idx) &&
7023 		(REG_RD(sc, BCE_PCICFG_MISC_STATUS) & BCE_PCICFG_MISC_STATUS_INTA_VALUE)) {
7024 			DBPRINT(sc, BCE_VERBOSE_INTR, "%s(): Spurious interrupt.\n",
7025 				__FUNCTION__);
7026 			goto bce_intr_exit;
7027 	}
7028 
7029 	/* Ack the interrupt and stop others from occuring. */
7030 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
7031 		BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
7032 		BCE_PCICFG_INT_ACK_CMD_MASK_INT);
7033 
7034 	/* Check if the hardware has finished any work. */
7035 	hw_rx_cons = bce_get_hw_rx_cons(sc);
7036 	hw_tx_cons = bce_get_hw_tx_cons(sc);
7037 
7038 	/* Keep processing data as long as there is work to do. */
7039 	for (;;) {
7040 
7041 		status_attn_bits = sc->status_block->status_attn_bits;
7042 
7043 		DBRUNIF(DB_RANDOMTRUE(bce_debug_unexpected_attention),
7044 			BCE_PRINTF("Simulating unexpected status attention bit set.");
7045 			status_attn_bits = status_attn_bits | STATUS_ATTN_BITS_PARITY_ERROR);
7046 
7047 		/* Was it a link change interrupt? */
7048 		if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
7049 			(sc->status_block->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) {
7050 			bce_phy_intr(sc);
7051 
7052 			/* Clear any transient status updates during link state change. */
7053 			REG_WR(sc, BCE_HC_COMMAND,
7054 				sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT);
7055 			REG_RD(sc, BCE_HC_COMMAND);
7056 		}
7057 
7058 		/* If any other attention is asserted then the chip is toast. */
7059 		if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
7060 			(sc->status_block->status_attn_bits_ack &
7061 			~STATUS_ATTN_BITS_LINK_STATE))) {
7062 
7063 			DBRUN(sc->unexpected_attentions++);
7064 
7065 			BCE_PRINTF("%s(%d): Fatal attention detected: 0x%08X\n",
7066 				__FILE__, __LINE__, sc->status_block->status_attn_bits);
7067 
7068 			DBRUNMSG(BCE_FATAL,
7069 				if (bce_debug_unexpected_attention == 0)
7070 					bce_breakpoint(sc));
7071 
7072 			bce_init_locked(sc);
7073 			goto bce_intr_exit;
7074 		}
7075 
7076 		/* Check for any completed RX frames. */
7077 		if (hw_rx_cons != sc->hw_rx_cons)
7078 			bce_rx_intr(sc);
7079 
7080 		/* Check for any completed TX frames. */
7081 		if (hw_tx_cons != sc->hw_tx_cons)
7082 			bce_tx_intr(sc);
7083 
7084 		/* Save the status block index value for use during the next interrupt. */
7085 		sc->last_status_idx = sc->status_block->status_idx;
7086 
7087 		/* Prevent speculative reads from getting ahead of the status block. */
7088 		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
7089 			BUS_SPACE_BARRIER_READ);
7090 
7091 		/* If there's no work left then exit the interrupt service routine. */
7092 		hw_rx_cons = bce_get_hw_rx_cons(sc);
7093 		hw_tx_cons = bce_get_hw_tx_cons(sc);
7094 
7095 		if ((hw_rx_cons == sc->hw_rx_cons) && (hw_tx_cons == sc->hw_tx_cons))
7096 			break;
7097 
7098 	}
7099 
7100 	bus_dmamap_sync(sc->status_tag,	sc->status_map,
7101 	    BUS_DMASYNC_PREWRITE);
7102 
7103 	/* Re-enable interrupts. */
7104 	bce_enable_intr(sc, 0);
7105 
7106 	/* Handle any frames that arrived while handling the interrupt. */
7107 	if (ifp->if_drv_flags & IFF_DRV_RUNNING && !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
7108 		bce_start_locked(ifp);
7109 
7110 bce_intr_exit:
7111 	BCE_UNLOCK(sc);
7112 
7113 	DBEXIT(BCE_VERBOSE_SEND | BCE_VERBOSE_RECV | BCE_VERBOSE_INTR);
7114 }
7115 
7116 
7117 /****************************************************************************/
7118 /* Programs the various packet receive modes (broadcast and multicast).     */
7119 /*                                                                          */
7120 /* Returns:                                                                 */
7121 /*   Nothing.                                                               */
7122 /****************************************************************************/
7123 static void
7124 bce_set_rx_mode(struct bce_softc *sc)
7125 {
7126 	struct ifnet *ifp;
7127 	struct ifmultiaddr *ifma;
7128 	u32 hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
7129 	u32 rx_mode, sort_mode;
7130 	int h, i;
7131 
7132 	DBENTER(BCE_VERBOSE_MISC);
7133 
7134 	BCE_LOCK_ASSERT(sc);
7135 
7136 	ifp = sc->bce_ifp;
7137 
7138 	/* Initialize receive mode default settings. */
7139 	rx_mode   = sc->rx_mode & ~(BCE_EMAC_RX_MODE_PROMISCUOUS |
7140 			    BCE_EMAC_RX_MODE_KEEP_VLAN_TAG);
7141 	sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN;
7142 
7143 	/*
7144 	 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
7145 	 * be enbled.
7146 	 */
7147 	if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) &&
7148 		(!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)))
7149 		rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG;
7150 
7151 	/*
7152 	 * Check for promiscuous, all multicast, or selected
7153 	 * multicast address filtering.
7154 	 */
7155 	if (ifp->if_flags & IFF_PROMISC) {
7156 		DBPRINT(sc, BCE_INFO_MISC, "Enabling promiscuous mode.\n");
7157 
7158 		/* Enable promiscuous mode. */
7159 		rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS;
7160 		sort_mode |= BCE_RPM_SORT_USER0_PROM_EN;
7161 	} else if (ifp->if_flags & IFF_ALLMULTI) {
7162 		DBPRINT(sc, BCE_INFO_MISC, "Enabling all multicast mode.\n");
7163 
7164 		/* Enable all multicast addresses. */
7165 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
7166 			REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 0xffffffff);
7167        	}
7168 		sort_mode |= BCE_RPM_SORT_USER0_MC_EN;
7169 	} else {
7170 		/* Accept one or more multicast(s). */
7171 		DBPRINT(sc, BCE_INFO_MISC, "Enabling selective multicast mode.\n");
7172 
7173 		IF_ADDR_LOCK(ifp);
7174 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
7175 			if (ifma->ifma_addr->sa_family != AF_LINK)
7176 				continue;
7177 			h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
7178 			    ifma->ifma_addr), ETHER_ADDR_LEN) & 0xFF;
7179 			    hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
7180 		}
7181 		IF_ADDR_UNLOCK(ifp);
7182 
7183 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
7184 			REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), hashes[i]);
7185 
7186 		sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN;
7187 	}
7188 
7189 	/* Only make changes if the recive mode has actually changed. */
7190 	if (rx_mode != sc->rx_mode) {
7191 		DBPRINT(sc, BCE_VERBOSE_MISC, "Enabling new receive mode: 0x%08X\n",
7192 			rx_mode);
7193 
7194 		sc->rx_mode = rx_mode;
7195 		REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode);
7196 	}
7197 
7198 	/* Disable and clear the exisitng sort before enabling a new sort. */
7199 	REG_WR(sc, BCE_RPM_SORT_USER0, 0x0);
7200 	REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode);
7201 	REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA);
7202 
7203 	DBEXIT(BCE_VERBOSE_MISC);
7204 }
7205 
7206 
7207 /****************************************************************************/
7208 /* Called periodically to updates statistics from the controllers           */
7209 /* statistics block.                                                        */
7210 /*                                                                          */
7211 /* Returns:                                                                 */
7212 /*   Nothing.                                                               */
7213 /****************************************************************************/
7214 static void
7215 bce_stats_update(struct bce_softc *sc)
7216 {
7217 	struct ifnet *ifp;
7218 	struct statistics_block *stats;
7219 
7220 	DBENTER(BCE_EXTREME_MISC);
7221 
7222 	ifp = sc->bce_ifp;
7223 
7224 	stats = (struct statistics_block *) sc->stats_block;
7225 
7226 	/*
7227 	 * Certain controllers don't report
7228 	 * carrier sense errors correctly.
7229 	 * See errata E11_5708CA0_1165.
7230 	 */
7231 	if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
7232 	    !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0))
7233 		ifp->if_oerrors += (u_long) stats->stat_Dot3StatsCarrierSenseErrors;
7234 
7235 	/*
7236 	 * Update the sysctl statistics from the
7237 	 * hardware statistics.
7238 	 */
7239 	sc->stat_IfHCInOctets =
7240 		((u64) stats->stat_IfHCInOctets_hi << 32) +
7241 		 (u64) stats->stat_IfHCInOctets_lo;
7242 
7243 	sc->stat_IfHCInBadOctets =
7244 		((u64) stats->stat_IfHCInBadOctets_hi << 32) +
7245 		 (u64) stats->stat_IfHCInBadOctets_lo;
7246 
7247 	sc->stat_IfHCOutOctets =
7248 		((u64) stats->stat_IfHCOutOctets_hi << 32) +
7249 		 (u64) stats->stat_IfHCOutOctets_lo;
7250 
7251 	sc->stat_IfHCOutBadOctets =
7252 		((u64) stats->stat_IfHCOutBadOctets_hi << 32) +
7253 		 (u64) stats->stat_IfHCOutBadOctets_lo;
7254 
7255 	sc->stat_IfHCInUcastPkts =
7256 		((u64) stats->stat_IfHCInUcastPkts_hi << 32) +
7257 		 (u64) stats->stat_IfHCInUcastPkts_lo;
7258 
7259 	sc->stat_IfHCInMulticastPkts =
7260 		((u64) stats->stat_IfHCInMulticastPkts_hi << 32) +
7261 		 (u64) stats->stat_IfHCInMulticastPkts_lo;
7262 
7263 	sc->stat_IfHCInBroadcastPkts =
7264 		((u64) stats->stat_IfHCInBroadcastPkts_hi << 32) +
7265 		 (u64) stats->stat_IfHCInBroadcastPkts_lo;
7266 
7267 	sc->stat_IfHCOutUcastPkts =
7268 		((u64) stats->stat_IfHCOutUcastPkts_hi << 32) +
7269 		 (u64) stats->stat_IfHCOutUcastPkts_lo;
7270 
7271 	sc->stat_IfHCOutMulticastPkts =
7272 		((u64) stats->stat_IfHCOutMulticastPkts_hi << 32) +
7273 		 (u64) stats->stat_IfHCOutMulticastPkts_lo;
7274 
7275 	sc->stat_IfHCOutBroadcastPkts =
7276 		((u64) stats->stat_IfHCOutBroadcastPkts_hi << 32) +
7277 		 (u64) stats->stat_IfHCOutBroadcastPkts_lo;
7278 
7279 	sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
7280 		stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
7281 
7282 	sc->stat_Dot3StatsCarrierSenseErrors =
7283 		stats->stat_Dot3StatsCarrierSenseErrors;
7284 
7285 	sc->stat_Dot3StatsFCSErrors =
7286 		stats->stat_Dot3StatsFCSErrors;
7287 
7288 	sc->stat_Dot3StatsAlignmentErrors =
7289 		stats->stat_Dot3StatsAlignmentErrors;
7290 
7291 	sc->stat_Dot3StatsSingleCollisionFrames =
7292 		stats->stat_Dot3StatsSingleCollisionFrames;
7293 
7294 	sc->stat_Dot3StatsMultipleCollisionFrames =
7295 		stats->stat_Dot3StatsMultipleCollisionFrames;
7296 
7297 	sc->stat_Dot3StatsDeferredTransmissions =
7298 		stats->stat_Dot3StatsDeferredTransmissions;
7299 
7300 	sc->stat_Dot3StatsExcessiveCollisions =
7301 		stats->stat_Dot3StatsExcessiveCollisions;
7302 
7303 	sc->stat_Dot3StatsLateCollisions =
7304 		stats->stat_Dot3StatsLateCollisions;
7305 
7306 	sc->stat_EtherStatsCollisions =
7307 		stats->stat_EtherStatsCollisions;
7308 
7309 	sc->stat_EtherStatsFragments =
7310 		stats->stat_EtherStatsFragments;
7311 
7312 	sc->stat_EtherStatsJabbers =
7313 		stats->stat_EtherStatsJabbers;
7314 
7315 	sc->stat_EtherStatsUndersizePkts =
7316 		stats->stat_EtherStatsUndersizePkts;
7317 
7318 	sc->stat_EtherStatsOverrsizePkts =
7319 		stats->stat_EtherStatsOverrsizePkts;
7320 
7321 	sc->stat_EtherStatsPktsRx64Octets =
7322 		stats->stat_EtherStatsPktsRx64Octets;
7323 
7324 	sc->stat_EtherStatsPktsRx65Octetsto127Octets =
7325 		stats->stat_EtherStatsPktsRx65Octetsto127Octets;
7326 
7327 	sc->stat_EtherStatsPktsRx128Octetsto255Octets =
7328 		stats->stat_EtherStatsPktsRx128Octetsto255Octets;
7329 
7330 	sc->stat_EtherStatsPktsRx256Octetsto511Octets =
7331 		stats->stat_EtherStatsPktsRx256Octetsto511Octets;
7332 
7333 	sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
7334 		stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
7335 
7336 	sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
7337 		stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
7338 
7339 	sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
7340 		stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
7341 
7342 	sc->stat_EtherStatsPktsTx64Octets =
7343 		stats->stat_EtherStatsPktsTx64Octets;
7344 
7345 	sc->stat_EtherStatsPktsTx65Octetsto127Octets =
7346 		stats->stat_EtherStatsPktsTx65Octetsto127Octets;
7347 
7348 	sc->stat_EtherStatsPktsTx128Octetsto255Octets =
7349 		stats->stat_EtherStatsPktsTx128Octetsto255Octets;
7350 
7351 	sc->stat_EtherStatsPktsTx256Octetsto511Octets =
7352 		stats->stat_EtherStatsPktsTx256Octetsto511Octets;
7353 
7354 	sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
7355 		stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
7356 
7357 	sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
7358 		stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
7359 
7360 	sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
7361 		stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
7362 
7363 	sc->stat_XonPauseFramesReceived =
7364 		stats->stat_XonPauseFramesReceived;
7365 
7366 	sc->stat_XoffPauseFramesReceived =
7367 		stats->stat_XoffPauseFramesReceived;
7368 
7369 	sc->stat_OutXonSent =
7370 		stats->stat_OutXonSent;
7371 
7372 	sc->stat_OutXoffSent =
7373 		stats->stat_OutXoffSent;
7374 
7375 	sc->stat_FlowControlDone =
7376 		stats->stat_FlowControlDone;
7377 
7378 	sc->stat_MacControlFramesReceived =
7379 		stats->stat_MacControlFramesReceived;
7380 
7381 	sc->stat_XoffStateEntered =
7382 		stats->stat_XoffStateEntered;
7383 
7384 	sc->stat_IfInFramesL2FilterDiscards =
7385 		stats->stat_IfInFramesL2FilterDiscards;
7386 
7387 	sc->stat_IfInRuleCheckerDiscards =
7388 		stats->stat_IfInRuleCheckerDiscards;
7389 
7390 	sc->stat_IfInFTQDiscards =
7391 		stats->stat_IfInFTQDiscards;
7392 
7393 	sc->stat_IfInMBUFDiscards =
7394 		stats->stat_IfInMBUFDiscards;
7395 
7396 	sc->stat_IfInRuleCheckerP4Hit =
7397 		stats->stat_IfInRuleCheckerP4Hit;
7398 
7399 	sc->stat_CatchupInRuleCheckerDiscards =
7400 		stats->stat_CatchupInRuleCheckerDiscards;
7401 
7402 	sc->stat_CatchupInFTQDiscards =
7403 		stats->stat_CatchupInFTQDiscards;
7404 
7405 	sc->stat_CatchupInMBUFDiscards =
7406 		stats->stat_CatchupInMBUFDiscards;
7407 
7408 	sc->stat_CatchupInRuleCheckerP4Hit =
7409 		stats->stat_CatchupInRuleCheckerP4Hit;
7410 
7411 	sc->com_no_buffers = REG_RD_IND(sc, 0x120084);
7412 
7413 	/*
7414 	 * Update the interface statistics from the
7415 	 * hardware statistics.
7416 	 */
7417 	ifp->if_collisions =
7418 		(u_long) sc->stat_EtherStatsCollisions;
7419 
7420 	/* ToDo: This method loses soft errors. */
7421 	ifp->if_ierrors =
7422 		(u_long) sc->stat_EtherStatsUndersizePkts +
7423 		(u_long) sc->stat_EtherStatsOverrsizePkts +
7424 		(u_long) sc->stat_IfInMBUFDiscards +
7425 		(u_long) sc->stat_Dot3StatsAlignmentErrors +
7426 		(u_long) sc->stat_Dot3StatsFCSErrors +
7427 		(u_long) sc->stat_IfInRuleCheckerDiscards +
7428 		(u_long) sc->stat_IfInFTQDiscards +
7429 		(u_long) sc->com_no_buffers;
7430 
7431 	/* ToDo: This method loses soft errors. */
7432 	ifp->if_oerrors =
7433 		(u_long) sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
7434 		(u_long) sc->stat_Dot3StatsExcessiveCollisions +
7435 		(u_long) sc->stat_Dot3StatsLateCollisions;
7436 
7437 	/* ToDo: Add additional statistics. */
7438 
7439 	DBEXIT(BCE_EXTREME_MISC);
7440 }
7441 
7442 
7443 /****************************************************************************/
7444 /* Periodic function to notify the bootcode that the driver is still        */
7445 /* present.                                                                 */
7446 /*                                                                          */
7447 /* Returns:                                                                 */
7448 /*   Nothing.                                                               */
7449 /****************************************************************************/
7450 static void
7451 bce_pulse(void *xsc)
7452 {
7453 	struct bce_softc *sc = xsc;
7454 	u32 msg;
7455 
7456 	DBENTER(BCE_EXTREME_MISC);
7457 
7458 	BCE_LOCK_ASSERT(sc);
7459 
7460 	/* Tell the firmware that the driver is still running. */
7461 	msg = (u32) ++sc->bce_fw_drv_pulse_wr_seq;
7462 	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_PULSE_MB, msg);
7463 
7464 	/* Schedule the next pulse. */
7465 	callout_reset(&sc->bce_pulse_callout, hz, bce_pulse, sc);
7466 
7467 	DBEXIT(BCE_EXTREME_MISC);
7468 }
7469 
7470 
7471 /****************************************************************************/
7472 /* Periodic function to perform maintenance tasks.                          */
7473 /*                                                                          */
7474 /* Returns:                                                                 */
7475 /*   Nothing.                                                               */
7476 /****************************************************************************/
7477 static void
7478 bce_tick(void *xsc)
7479 {
7480 	struct bce_softc *sc = xsc;
7481 	struct mii_data *mii;
7482 	struct ifnet *ifp;
7483 
7484 	ifp = sc->bce_ifp;
7485 
7486 	DBENTER(BCE_EXTREME_MISC);
7487 
7488 	BCE_LOCK_ASSERT(sc);
7489 
7490 	/* Schedule the next tick. */
7491 	callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc);
7492 
7493 	/* Update the statistics from the hardware statistics block. */
7494 	bce_stats_update(sc);
7495 
7496 	/* Top off the receive and page chains. */
7497 #ifdef ZERO_COPY_SOCKETS
7498 	bce_fill_pg_chain(sc);
7499 #endif
7500 	bce_fill_rx_chain(sc);
7501 
7502 	/* Check that chip hasn't hung. */
7503 	bce_watchdog(sc);
7504 
7505 	/* If link is up already up then we're done. */
7506 	if (sc->bce_link)
7507 		goto bce_tick_exit;
7508 
7509 	/* Link is down.  Check what the PHY's doing. */
7510 	mii = device_get_softc(sc->bce_miibus);
7511 	mii_tick(mii);
7512 
7513 	/* Check if the link has come up. */
7514 	if ((mii->mii_media_status & IFM_ACTIVE) &&
7515 	    (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)) {
7516 		DBPRINT(sc, BCE_VERBOSE_MISC, "%s(): Link up!\n", __FUNCTION__);
7517 		sc->bce_link++;
7518 		if ((IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
7519 		    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) &&
7520 		    bootverbose)
7521 			BCE_PRINTF("Gigabit link up!\n");
7522 		/* Now that link is up, handle any outstanding TX traffic. */
7523 		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
7524 			DBPRINT(sc, BCE_VERBOSE_MISC, "%s(): Found pending TX traffic.\n",
7525 				 __FUNCTION__);
7526 			bce_start_locked(ifp);
7527 		}
7528 	}
7529 
7530 bce_tick_exit:
7531 	DBEXIT(BCE_EXTREME_MISC);
7532 	return;
7533 }
7534 
7535 
7536 #ifdef BCE_DEBUG
7537 /****************************************************************************/
7538 /* Allows the driver state to be dumped through the sysctl interface.       */
7539 /*                                                                          */
7540 /* Returns:                                                                 */
7541 /*   0 for success, positive value for failure.                             */
7542 /****************************************************************************/
7543 static int
7544 bce_sysctl_driver_state(SYSCTL_HANDLER_ARGS)
7545 {
7546         int error;
7547         int result;
7548         struct bce_softc *sc;
7549 
7550         result = -1;
7551         error = sysctl_handle_int(oidp, &result, 0, req);
7552 
7553         if (error || !req->newptr)
7554                 return (error);
7555 
7556         if (result == 1) {
7557                 sc = (struct bce_softc *)arg1;
7558                 bce_dump_driver_state(sc);
7559         }
7560 
7561         return error;
7562 }
7563 
7564 
7565 /****************************************************************************/
7566 /* Allows the hardware state to be dumped through the sysctl interface.     */
7567 /*                                                                          */
7568 /* Returns:                                                                 */
7569 /*   0 for success, positive value for failure.                             */
7570 /****************************************************************************/
7571 static int
7572 bce_sysctl_hw_state(SYSCTL_HANDLER_ARGS)
7573 {
7574         int error;
7575         int result;
7576         struct bce_softc *sc;
7577 
7578         result = -1;
7579         error = sysctl_handle_int(oidp, &result, 0, req);
7580 
7581         if (error || !req->newptr)
7582                 return (error);
7583 
7584         if (result == 1) {
7585                 sc = (struct bce_softc *)arg1;
7586                 bce_dump_hw_state(sc);
7587         }
7588 
7589         return error;
7590 }
7591 
7592 
7593 /****************************************************************************/
7594 /* Allows the bootcode state to be dumped through the sysctl interface.     */
7595 /*                                                                          */
7596 /* Returns:                                                                 */
7597 /*   0 for success, positive value for failure.                             */
7598 /****************************************************************************/
7599 static int
7600 bce_sysctl_bc_state(SYSCTL_HANDLER_ARGS)
7601 {
7602         int error;
7603         int result;
7604         struct bce_softc *sc;
7605 
7606         result = -1;
7607         error = sysctl_handle_int(oidp, &result, 0, req);
7608 
7609         if (error || !req->newptr)
7610                 return (error);
7611 
7612         if (result == 1) {
7613                 sc = (struct bce_softc *)arg1;
7614                 bce_dump_bc_state(sc);
7615         }
7616 
7617         return error;
7618 }
7619 
7620 
7621 /****************************************************************************/
7622 /* Provides a sysctl interface to allow dumping the RX chain.               */
7623 /*                                                                          */
7624 /* Returns:                                                                 */
7625 /*   0 for success, positive value for failure.                             */
7626 /****************************************************************************/
7627 static int
7628 bce_sysctl_dump_rx_chain(SYSCTL_HANDLER_ARGS)
7629 {
7630         int error;
7631         int result;
7632         struct bce_softc *sc;
7633 
7634         result = -1;
7635         error = sysctl_handle_int(oidp, &result, 0, req);
7636 
7637         if (error || !req->newptr)
7638                 return (error);
7639 
7640         if (result == 1) {
7641                 sc = (struct bce_softc *)arg1;
7642                 bce_dump_rx_chain(sc, 0, TOTAL_RX_BD);
7643         }
7644 
7645         return error;
7646 }
7647 
7648 
7649 /****************************************************************************/
7650 /* Provides a sysctl interface to allow dumping the TX chain.               */
7651 /*                                                                          */
7652 /* Returns:                                                                 */
7653 /*   0 for success, positive value for failure.                             */
7654 /****************************************************************************/
7655 static int
7656 bce_sysctl_dump_tx_chain(SYSCTL_HANDLER_ARGS)
7657 {
7658         int error;
7659         int result;
7660         struct bce_softc *sc;
7661 
7662         result = -1;
7663         error = sysctl_handle_int(oidp, &result, 0, req);
7664 
7665         if (error || !req->newptr)
7666                 return (error);
7667 
7668         if (result == 1) {
7669                 sc = (struct bce_softc *)arg1;
7670                 bce_dump_tx_chain(sc, 0, USABLE_TX_BD);
7671         }
7672 
7673         return error;
7674 }
7675 
7676 
7677 #ifdef ZERO_COPY_SOCKETS
7678 /****************************************************************************/
7679 /* Provides a sysctl interface to allow dumping the page chain.             */
7680 /*                                                                          */
7681 /* Returns:                                                                 */
7682 /*   0 for success, positive value for failure.                             */
7683 /****************************************************************************/
7684 static int
7685 bce_sysctl_dump_pg_chain(SYSCTL_HANDLER_ARGS)
7686 {
7687         int error;
7688         int result;
7689         struct bce_softc *sc;
7690 
7691         result = -1;
7692         error = sysctl_handle_int(oidp, &result, 0, req);
7693 
7694         if (error || !req->newptr)
7695                 return (error);
7696 
7697         if (result == 1) {
7698                 sc = (struct bce_softc *)arg1;
7699                 bce_dump_pg_chain(sc, 0, TOTAL_PG_BD);
7700         }
7701 
7702         return error;
7703 }
7704 #endif
7705 
7706 /****************************************************************************/
7707 /* Provides a sysctl interface to allow reading arbitrary NVRAM offsets in  */
7708 /* the device.  DO NOT ENABLE ON PRODUCTION SYSTEMS!                        */
7709 /*                                                                          */
7710 /* Returns:                                                                 */
7711 /*   0 for success, positive value for failure.                             */
7712 /****************************************************************************/
7713 static int
7714 bce_sysctl_nvram_read(SYSCTL_HANDLER_ARGS)
7715 {
7716 	struct bce_softc *sc = (struct bce_softc *)arg1;
7717 	int error;
7718 	u32 result;
7719 	u32 val[1];
7720 	u8 *data = (u8 *) val;
7721 
7722 	result = -1;
7723 	error = sysctl_handle_int(oidp, &result, 0, req);
7724 	if (error || (req->newptr == NULL))
7725 		return (error);
7726 
7727 	bce_nvram_read(sc, result, data, 4);
7728 	BCE_PRINTF("offset 0x%08X = 0x%08X\n", result, bce_be32toh(val[0]));
7729 
7730 	return (error);
7731 }
7732 
7733 
7734 /****************************************************************************/
7735 /* Provides a sysctl interface to allow reading arbitrary registers in the  */
7736 /* device.  DO NOT ENABLE ON PRODUCTION SYSTEMS!                            */
7737 /*                                                                          */
7738 /* Returns:                                                                 */
7739 /*   0 for success, positive value for failure.                             */
7740 /****************************************************************************/
7741 static int
7742 bce_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
7743 {
7744 	struct bce_softc *sc = (struct bce_softc *)arg1;
7745 	int error;
7746 	u32 val, result;
7747 
7748 	result = -1;
7749 	error = sysctl_handle_int(oidp, &result, 0, req);
7750 	if (error || (req->newptr == NULL))
7751 		return (error);
7752 
7753 	/* Make sure the register is accessible. */
7754 	if (result < 0x8000) {
7755 		val = REG_RD(sc, result);
7756 		BCE_PRINTF("reg 0x%08X = 0x%08X\n", result, val);
7757 	} else if (result < 0x0280000) {
7758 		val = REG_RD_IND(sc, result);
7759 		BCE_PRINTF("reg 0x%08X = 0x%08X\n", result, val);
7760 	}
7761 
7762 	return (error);
7763 }
7764 
7765 
7766 /****************************************************************************/
7767 /* Provides a sysctl interface to allow reading arbitrary PHY registers in  */
7768 /* the device.  DO NOT ENABLE ON PRODUCTION SYSTEMS!                        */
7769 /*                                                                          */
7770 /* Returns:                                                                 */
7771 /*   0 for success, positive value for failure.                             */
7772 /****************************************************************************/
7773 static int
7774 bce_sysctl_phy_read(SYSCTL_HANDLER_ARGS)
7775 {
7776 	struct bce_softc *sc;
7777 	device_t dev;
7778 	int error, result;
7779 	u16 val;
7780 
7781 	result = -1;
7782 	error = sysctl_handle_int(oidp, &result, 0, req);
7783 	if (error || (req->newptr == NULL))
7784 		return (error);
7785 
7786 	/* Make sure the register is accessible. */
7787 	if (result < 0x20) {
7788 		sc = (struct bce_softc *)arg1;
7789 		dev = sc->bce_dev;
7790 		val = bce_miibus_read_reg(dev, sc->bce_phy_addr, result);
7791 		BCE_PRINTF("phy 0x%02X = 0x%04X\n", result, val);
7792 	}
7793 	return (error);
7794 }
7795 
7796 
7797 /****************************************************************************/
7798 /* Provides a sysctl interface to allow reading a CID.                      */
7799 /*                                                                          */
7800 /* Returns:                                                                 */
7801 /*   0 for success, positive value for failure.                             */
7802 /****************************************************************************/
7803 static int
7804 bce_sysctl_dump_ctx(SYSCTL_HANDLER_ARGS)
7805 {
7806 	struct bce_softc *sc;
7807 	int error;
7808 	u16 result;
7809 
7810 	result = -1;
7811 	error = sysctl_handle_int(oidp, &result, 0, req);
7812 	if (error || (req->newptr == NULL))
7813 		return (error);
7814 
7815 	/* Make sure the register is accessible. */
7816 	if (result <= TX_CID) {
7817 		sc = (struct bce_softc *)arg1;
7818 		bce_dump_ctx(sc, result);
7819 	}
7820 
7821 	return (error);
7822 }
7823 
7824 
7825  /****************************************************************************/
7826 /* Provides a sysctl interface to forcing the driver to dump state and      */
7827 /* enter the debugger.  DO NOT ENABLE ON PRODUCTION SYSTEMS!                */
7828 /*                                                                          */
7829 /* Returns:                                                                 */
7830 /*   0 for success, positive value for failure.                             */
7831 /****************************************************************************/
7832 static int
7833 bce_sysctl_breakpoint(SYSCTL_HANDLER_ARGS)
7834 {
7835         int error;
7836         int result;
7837         struct bce_softc *sc;
7838 
7839         result = -1;
7840         error = sysctl_handle_int(oidp, &result, 0, req);
7841 
7842         if (error || !req->newptr)
7843                 return (error);
7844 
7845         if (result == 1) {
7846                 sc = (struct bce_softc *)arg1;
7847                 bce_breakpoint(sc);
7848         }
7849 
7850         return error;
7851 }
7852 #endif
7853 
7854 
7855 /****************************************************************************/
7856 /* Adds any sysctl parameters for tuning or debugging purposes.             */
7857 /*                                                                          */
7858 /* Returns:                                                                 */
7859 /*   0 for success, positive value for failure.                             */
7860 /****************************************************************************/
7861 static void
7862 bce_add_sysctls(struct bce_softc *sc)
7863 {
7864 	struct sysctl_ctx_list *ctx;
7865 	struct sysctl_oid_list *children;
7866 
7867 	DBENTER(BCE_VERBOSE_MISC);
7868 
7869 	ctx = device_get_sysctl_ctx(sc->bce_dev);
7870 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bce_dev));
7871 
7872 #ifdef BCE_DEBUG
7873 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
7874 		"rx_low_watermark",
7875 		CTLFLAG_RD, &sc->rx_low_watermark,
7876 		0, "Lowest level of free rx_bd's");
7877 
7878 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
7879 		"rx_empty_count",
7880 		CTLFLAG_RD, &sc->rx_empty_count,
7881 		0, "Number of times the RX chain was empty");
7882 
7883 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
7884 		"tx_hi_watermark",
7885 		CTLFLAG_RD, &sc->tx_hi_watermark,
7886 		0, "Highest level of used tx_bd's");
7887 
7888 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
7889 		"tx_full_count",
7890 		CTLFLAG_RD, &sc->tx_full_count,
7891 		0, "Number of times the TX chain was full");
7892 
7893 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
7894 		"l2fhdr_status_errors",
7895 		CTLFLAG_RD, &sc->l2fhdr_status_errors,
7896 		0, "l2_fhdr status errors");
7897 
7898 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
7899 		"unexpected_attentions",
7900 		CTLFLAG_RD, &sc->unexpected_attentions,
7901 		0, "Unexpected attentions");
7902 
7903 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
7904 		"lost_status_block_updates",
7905 		CTLFLAG_RD, &sc->lost_status_block_updates,
7906 		0, "Lost status block updates");
7907 
7908 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
7909 		"debug_mbuf_sim_alloc_failed",
7910 		CTLFLAG_RD, &sc->debug_mbuf_sim_alloc_failed,
7911 		0, "Simulated mbuf cluster allocation failures");
7912 
7913 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
7914 		"requested_tso_frames",
7915 		CTLFLAG_RD, &sc->requested_tso_frames,
7916 		0, "Number of TSO frames received");
7917 
7918 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
7919 		"rx_interrupts",
7920 		CTLFLAG_RD, &sc->rx_interrupts,
7921 		0, "Number of RX interrupts");
7922 
7923 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
7924 		"tx_interrupts",
7925 		CTLFLAG_RD, &sc->tx_interrupts,
7926 		0, "Number of TX interrupts");
7927 
7928 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
7929 		"rx_intr_time",
7930 		CTLFLAG_RD, &sc->rx_intr_time,
7931 		"RX interrupt time");
7932 
7933 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
7934 		"tx_intr_time",
7935 		CTLFLAG_RD, &sc->tx_intr_time,
7936 		"TX interrupt time");
7937 #endif
7938 
7939 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
7940 		"mbuf_alloc_failed",
7941 		CTLFLAG_RD, &sc->mbuf_alloc_failed,
7942 		0, "mbuf cluster allocation failures");
7943 
7944 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
7945 		"tx_dma_map_failures",
7946 		CTLFLAG_RD, &sc->tx_dma_map_failures,
7947 		0, "tx dma mapping failures");
7948 
7949 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
7950 		"stat_IfHcInOctets",
7951 		CTLFLAG_RD, &sc->stat_IfHCInOctets,
7952 		"Bytes received");
7953 
7954 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
7955 		"stat_IfHCInBadOctets",
7956 		CTLFLAG_RD, &sc->stat_IfHCInBadOctets,
7957 		"Bad bytes received");
7958 
7959 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
7960 		"stat_IfHCOutOctets",
7961 		CTLFLAG_RD, &sc->stat_IfHCOutOctets,
7962 		"Bytes sent");
7963 
7964 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
7965 		"stat_IfHCOutBadOctets",
7966 		CTLFLAG_RD, &sc->stat_IfHCOutBadOctets,
7967 		"Bad bytes sent");
7968 
7969 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
7970 		"stat_IfHCInUcastPkts",
7971 		CTLFLAG_RD, &sc->stat_IfHCInUcastPkts,
7972 		"Unicast packets received");
7973 
7974 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
7975 		"stat_IfHCInMulticastPkts",
7976 		CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts,
7977 		"Multicast packets received");
7978 
7979 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
7980 		"stat_IfHCInBroadcastPkts",
7981 		CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts,
7982 		"Broadcast packets received");
7983 
7984 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
7985 		"stat_IfHCOutUcastPkts",
7986 		CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts,
7987 		"Unicast packets sent");
7988 
7989 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
7990 		"stat_IfHCOutMulticastPkts",
7991 		CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts,
7992 		"Multicast packets sent");
7993 
7994 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
7995 		"stat_IfHCOutBroadcastPkts",
7996 		CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts,
7997 		"Broadcast packets sent");
7998 
7999 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8000 		"stat_emac_tx_stat_dot3statsinternalmactransmiterrors",
8001 		CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors,
8002 		0, "Internal MAC transmit errors");
8003 
8004 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8005 		"stat_Dot3StatsCarrierSenseErrors",
8006 		CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors,
8007 		0, "Carrier sense errors");
8008 
8009 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8010 		"stat_Dot3StatsFCSErrors",
8011 		CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors,
8012 		0, "Frame check sequence errors");
8013 
8014 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8015 		"stat_Dot3StatsAlignmentErrors",
8016 		CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors,
8017 		0, "Alignment errors");
8018 
8019 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8020 		"stat_Dot3StatsSingleCollisionFrames",
8021 		CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames,
8022 		0, "Single Collision Frames");
8023 
8024 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8025 		"stat_Dot3StatsMultipleCollisionFrames",
8026 		CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames,
8027 		0, "Multiple Collision Frames");
8028 
8029 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8030 		"stat_Dot3StatsDeferredTransmissions",
8031 		CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions,
8032 		0, "Deferred Transmissions");
8033 
8034 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8035 		"stat_Dot3StatsExcessiveCollisions",
8036 		CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions,
8037 		0, "Excessive Collisions");
8038 
8039 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8040 		"stat_Dot3StatsLateCollisions",
8041 		CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions,
8042 		0, "Late Collisions");
8043 
8044 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8045 		"stat_EtherStatsCollisions",
8046 		CTLFLAG_RD, &sc->stat_EtherStatsCollisions,
8047 		0, "Collisions");
8048 
8049 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8050 		"stat_EtherStatsFragments",
8051 		CTLFLAG_RD, &sc->stat_EtherStatsFragments,
8052 		0, "Fragments");
8053 
8054 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8055 		"stat_EtherStatsJabbers",
8056 		CTLFLAG_RD, &sc->stat_EtherStatsJabbers,
8057 		0, "Jabbers");
8058 
8059 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8060 		"stat_EtherStatsUndersizePkts",
8061 		CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts,
8062 		0, "Undersize packets");
8063 
8064 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8065 		"stat_EtherStatsOverrsizePkts",
8066 		CTLFLAG_RD, &sc->stat_EtherStatsOverrsizePkts,
8067 		0, "stat_EtherStatsOverrsizePkts");
8068 
8069 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8070 		"stat_EtherStatsPktsRx64Octets",
8071 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets,
8072 		0, "Bytes received in 64 byte packets");
8073 
8074 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8075 		"stat_EtherStatsPktsRx65Octetsto127Octets",
8076 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets,
8077 		0, "Bytes received in 65 to 127 byte packets");
8078 
8079 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8080 		"stat_EtherStatsPktsRx128Octetsto255Octets",
8081 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets,
8082 		0, "Bytes received in 128 to 255 byte packets");
8083 
8084 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8085 		"stat_EtherStatsPktsRx256Octetsto511Octets",
8086 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets,
8087 		0, "Bytes received in 256 to 511 byte packets");
8088 
8089 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8090 		"stat_EtherStatsPktsRx512Octetsto1023Octets",
8091 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets,
8092 		0, "Bytes received in 512 to 1023 byte packets");
8093 
8094 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8095 		"stat_EtherStatsPktsRx1024Octetsto1522Octets",
8096 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets,
8097 		0, "Bytes received in 1024 t0 1522 byte packets");
8098 
8099 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8100 		"stat_EtherStatsPktsRx1523Octetsto9022Octets",
8101 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets,
8102 		0, "Bytes received in 1523 to 9022 byte packets");
8103 
8104 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8105 		"stat_EtherStatsPktsTx64Octets",
8106 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets,
8107 		0, "Bytes sent in 64 byte packets");
8108 
8109 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8110 		"stat_EtherStatsPktsTx65Octetsto127Octets",
8111 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets,
8112 		0, "Bytes sent in 65 to 127 byte packets");
8113 
8114 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8115 		"stat_EtherStatsPktsTx128Octetsto255Octets",
8116 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets,
8117 		0, "Bytes sent in 128 to 255 byte packets");
8118 
8119 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8120 		"stat_EtherStatsPktsTx256Octetsto511Octets",
8121 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets,
8122 		0, "Bytes sent in 256 to 511 byte packets");
8123 
8124 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8125 		"stat_EtherStatsPktsTx512Octetsto1023Octets",
8126 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets,
8127 		0, "Bytes sent in 512 to 1023 byte packets");
8128 
8129 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8130 		"stat_EtherStatsPktsTx1024Octetsto1522Octets",
8131 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets,
8132 		0, "Bytes sent in 1024 to 1522 byte packets");
8133 
8134 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8135 		"stat_EtherStatsPktsTx1523Octetsto9022Octets",
8136 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets,
8137 		0, "Bytes sent in 1523 to 9022 byte packets");
8138 
8139 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8140 		"stat_XonPauseFramesReceived",
8141 		CTLFLAG_RD, &sc->stat_XonPauseFramesReceived,
8142 		0, "XON pause frames receved");
8143 
8144 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8145 		"stat_XoffPauseFramesReceived",
8146 		CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived,
8147 		0, "XOFF pause frames received");
8148 
8149 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8150 		"stat_OutXonSent",
8151 		CTLFLAG_RD, &sc->stat_OutXonSent,
8152 		0, "XON pause frames sent");
8153 
8154 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8155 		"stat_OutXoffSent",
8156 		CTLFLAG_RD, &sc->stat_OutXoffSent,
8157 		0, "XOFF pause frames sent");
8158 
8159 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8160 		"stat_FlowControlDone",
8161 		CTLFLAG_RD, &sc->stat_FlowControlDone,
8162 		0, "Flow control done");
8163 
8164 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8165 		"stat_MacControlFramesReceived",
8166 		CTLFLAG_RD, &sc->stat_MacControlFramesReceived,
8167 		0, "MAC control frames received");
8168 
8169 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8170 		"stat_XoffStateEntered",
8171 		CTLFLAG_RD, &sc->stat_XoffStateEntered,
8172 		0, "XOFF state entered");
8173 
8174 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8175 		"stat_IfInFramesL2FilterDiscards",
8176 		CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards,
8177 		0, "Received L2 packets discarded");
8178 
8179 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8180 		"stat_IfInRuleCheckerDiscards",
8181 		CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards,
8182 		0, "Received packets discarded by rule");
8183 
8184 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8185 		"stat_IfInFTQDiscards",
8186 		CTLFLAG_RD, &sc->stat_IfInFTQDiscards,
8187 		0, "Received packet FTQ discards");
8188 
8189 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8190 		"stat_IfInMBUFDiscards",
8191 		CTLFLAG_RD, &sc->stat_IfInMBUFDiscards,
8192 		0, "Received packets discarded due to lack of controller buffer memory");
8193 
8194 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8195 		"stat_IfInRuleCheckerP4Hit",
8196 		CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit,
8197 		0, "Received packets rule checker hits");
8198 
8199 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8200 		"stat_CatchupInRuleCheckerDiscards",
8201 		CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards,
8202 		0, "Received packets discarded in Catchup path");
8203 
8204 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8205 		"stat_CatchupInFTQDiscards",
8206 		CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards,
8207 		0, "Received packets discarded in FTQ in Catchup path");
8208 
8209 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8210 		"stat_CatchupInMBUFDiscards",
8211 		CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards,
8212 		0, "Received packets discarded in controller buffer memory in Catchup path");
8213 
8214 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8215 		"stat_CatchupInRuleCheckerP4Hit",
8216 		CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit,
8217 		0, "Received packets rule checker hits in Catchup path");
8218 
8219 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8220 		"com_no_buffers",
8221 		CTLFLAG_RD, &sc->com_no_buffers,
8222 		0, "Valid packets received but no RX buffers available");
8223 
8224 #ifdef BCE_DEBUG
8225 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8226 		"driver_state", CTLTYPE_INT | CTLFLAG_RW,
8227 		(void *)sc, 0,
8228 		bce_sysctl_driver_state, "I", "Drive state information");
8229 
8230 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8231 		"hw_state", CTLTYPE_INT | CTLFLAG_RW,
8232 		(void *)sc, 0,
8233 		bce_sysctl_hw_state, "I", "Hardware state information");
8234 
8235 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8236 		"bc_state", CTLTYPE_INT | CTLFLAG_RW,
8237 		(void *)sc, 0,
8238 		bce_sysctl_bc_state, "I", "Bootcode state information");
8239 
8240 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8241 		"dump_rx_chain", CTLTYPE_INT | CTLFLAG_RW,
8242 		(void *)sc, 0,
8243 		bce_sysctl_dump_rx_chain, "I", "Dump rx_bd chain");
8244 
8245 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8246 		"dump_tx_chain", CTLTYPE_INT | CTLFLAG_RW,
8247 		(void *)sc, 0,
8248 		bce_sysctl_dump_tx_chain, "I", "Dump tx_bd chain");
8249 
8250 #ifdef ZERO_COPY_SOCKETS
8251 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8252 		"dump_pg_chain", CTLTYPE_INT | CTLFLAG_RW,
8253 		(void *)sc, 0,
8254 		bce_sysctl_dump_pg_chain, "I", "Dump page chain");
8255 #endif
8256 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8257 		"dump_ctx", CTLTYPE_INT | CTLFLAG_RW,
8258 		(void *)sc, 0,
8259 		bce_sysctl_dump_ctx, "I", "Dump context memory");
8260 
8261 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8262 		"breakpoint", CTLTYPE_INT | CTLFLAG_RW,
8263 		(void *)sc, 0,
8264 		bce_sysctl_breakpoint, "I", "Driver breakpoint");
8265 
8266 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8267 		"reg_read", CTLTYPE_INT | CTLFLAG_RW,
8268 		(void *)sc, 0,
8269 		bce_sysctl_reg_read, "I", "Register read");
8270 
8271 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8272 		"nvram_read", CTLTYPE_INT | CTLFLAG_RW,
8273 		(void *)sc, 0,
8274 		bce_sysctl_nvram_read, "I", "NVRAM read");
8275 
8276 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8277 		"phy_read", CTLTYPE_INT | CTLFLAG_RW,
8278 		(void *)sc, 0,
8279 		bce_sysctl_phy_read, "I", "PHY register read");
8280 
8281 #endif
8282 
8283 	DBEXIT(BCE_VERBOSE_MISC);
8284 }
8285 
8286 
8287 /****************************************************************************/
8288 /* BCE Debug Routines                                                       */
8289 /****************************************************************************/
8290 #ifdef BCE_DEBUG
8291 
8292 /****************************************************************************/
8293 /* Freezes the controller to allow for a cohesive state dump.               */
8294 /*                                                                          */
8295 /* Returns:                                                                 */
8296 /*   Nothing.                                                               */
8297 /****************************************************************************/
8298 static void
8299 bce_freeze_controller(struct bce_softc *sc)
8300 {
8301 	u32 val;
8302 	val = REG_RD(sc, BCE_MISC_COMMAND);
8303 	val |= BCE_MISC_COMMAND_DISABLE_ALL;
8304 	REG_WR(sc, BCE_MISC_COMMAND, val);
8305 }
8306 
8307 
8308 /****************************************************************************/
8309 /* Unfreezes the controller after a freeze operation.  This may not always  */
8310 /* work and the controller will require a reset!                            */
8311 /*                                                                          */
8312 /* Returns:                                                                 */
8313 /*   Nothing.                                                               */
8314 /****************************************************************************/
8315 static void
8316 bce_unfreeze_controller(struct bce_softc *sc)
8317 {
8318 	u32 val;
8319 	val = REG_RD(sc, BCE_MISC_COMMAND);
8320 	val |= BCE_MISC_COMMAND_ENABLE_ALL;
8321 	REG_WR(sc, BCE_MISC_COMMAND, val);
8322 }
8323 
8324 
8325 /****************************************************************************/
8326 /* Prints out Ethernet frame information from an mbuf.                      */
8327 /*                                                                          */
8328 /* Partially decode an Ethernet frame to look at some important headers.    */
8329 /*                                                                          */
8330 /* Returns:                                                                 */
8331 /*   Nothing.                                                               */
8332 /****************************************************************************/
8333 static void
8334 bce_dump_enet(struct bce_softc *sc, struct mbuf *m)
8335 {
8336 	struct ether_vlan_header *eh;
8337 	u16 etype;
8338 	int ehlen;
8339 	struct ip *ip;
8340 	struct tcphdr *th;
8341 	struct udphdr *uh;
8342 	struct arphdr *ah;
8343 
8344 		BCE_PRINTF(
8345 			"-----------------------------"
8346 			" Frame Decode "
8347 			"-----------------------------\n");
8348 
8349 	eh = mtod(m, struct ether_vlan_header *);
8350 
8351 	/* Handle VLAN encapsulation if present. */
8352 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
8353 		etype = ntohs(eh->evl_proto);
8354 		ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
8355 	} else {
8356 		etype = ntohs(eh->evl_encap_proto);
8357 		ehlen = ETHER_HDR_LEN;
8358 	}
8359 
8360 	/* ToDo: Add VLAN output. */
8361 	BCE_PRINTF("enet: dest = %6D, src = %6D, type = 0x%04X, hlen = %d\n",
8362 		eh->evl_dhost, ":", eh->evl_shost, ":", etype, ehlen);
8363 
8364 	switch (etype) {
8365 		case ETHERTYPE_IP:
8366 			ip = (struct ip *)(m->m_data + ehlen);
8367 			BCE_PRINTF("--ip: dest = 0x%08X , src = 0x%08X, len = %d bytes, "
8368 				"protocol = 0x%02X, xsum = 0x%04X\n",
8369 				ntohl(ip->ip_dst.s_addr), ntohl(ip->ip_src.s_addr),
8370 				ntohs(ip->ip_len), ip->ip_p, ntohs(ip->ip_sum));
8371 
8372 			switch (ip->ip_p) {
8373 				case IPPROTO_TCP:
8374 					th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
8375 					BCE_PRINTF("-tcp: dest = %d, src = %d, hlen = %d bytes, "
8376 						"flags = 0x%b, csum = 0x%04X\n",
8377 						ntohs(th->th_dport), ntohs(th->th_sport), (th->th_off << 2),
8378 						th->th_flags, "\20\10CWR\07ECE\06URG\05ACK\04PSH\03RST\02SYN\01FIN",
8379 						ntohs(th->th_sum));
8380 					break;
8381 				case IPPROTO_UDP:
8382         		    uh = (struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2));
8383 					BCE_PRINTF("-udp: dest = %d, src = %d, len = %d bytes, "
8384 						"csum = 0x%04X\n", ntohs(uh->uh_dport), ntohs(uh->uh_sport),
8385 						ntohs(uh->uh_ulen), ntohs(uh->uh_sum));
8386 					break;
8387 				case IPPROTO_ICMP:
8388 					BCE_PRINTF("icmp:\n");
8389 					break;
8390 				default:
8391 					BCE_PRINTF("----: Other IP protocol.\n");
8392 			}
8393 			break;
8394 		case ETHERTYPE_IPV6:
8395 			BCE_PRINTF("ipv6: No decode supported.\n");
8396 			break;
8397 		case ETHERTYPE_ARP:
8398 			BCE_PRINTF("-arp: ");
8399 			ah = (struct arphdr *) (m->m_data + ehlen);
8400 			switch (ntohs(ah->ar_op)) {
8401 				case ARPOP_REVREQUEST:
8402 					printf("reverse ARP request\n");
8403 					break;
8404 				case ARPOP_REVREPLY:
8405 					printf("reverse ARP reply\n");
8406 					break;
8407 				case ARPOP_REQUEST:
8408 					printf("ARP request\n");
8409 					break;
8410 				case ARPOP_REPLY:
8411 					printf("ARP reply\n");
8412 					break;
8413 				default:
8414 					printf("other ARP operation\n");
8415 			}
8416 			break;
8417 		default:
8418 			BCE_PRINTF("----: Other protocol.\n");
8419 	}
8420 
8421 	BCE_PRINTF(
8422 		"-----------------------------"
8423 		"--------------"
8424 		"-----------------------------\n");
8425 }
8426 
8427 
8428 /****************************************************************************/
8429 /* Prints out information about an mbuf.                                    */
8430 /*                                                                          */
8431 /* Returns:                                                                 */
8432 /*   Nothing.                                                               */
8433 /****************************************************************************/
8434 static __attribute__ ((noinline)) void
8435 bce_dump_mbuf(struct bce_softc *sc, struct mbuf *m)
8436 {
8437 	struct mbuf *mp = m;
8438 
8439 	if (m == NULL) {
8440 		BCE_PRINTF("mbuf: null pointer\n");
8441 		return;
8442 	}
8443 
8444 	while (mp) {
8445 		BCE_PRINTF("mbuf: %p, m_len = %d, m_flags = 0x%b, m_data = %p\n",
8446 			mp, mp->m_len, mp->m_flags,
8447 			"\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY",
8448 			mp->m_data);
8449 
8450 		if (mp->m_flags & M_PKTHDR) {
8451 			BCE_PRINTF("- m_pkthdr: len = %d, flags = 0x%b, csum_flags = %b\n",
8452 				mp->m_pkthdr.len, mp->m_flags,
8453 				"\20\12M_BCAST\13M_MCAST\14M_FRAG\15M_FIRSTFRAG"
8454 				"\16M_LASTFRAG\21M_VLANTAG\22M_PROMISC\23M_NOFREE",
8455 				mp->m_pkthdr.csum_flags,
8456 				"\20\1CSUM_IP\2CSUM_TCP\3CSUM_UDP\4CSUM_IP_FRAGS"
8457 				"\5CSUM_FRAGMENT\6CSUM_TSO\11CSUM_IP_CHECKED"
8458 				"\12CSUM_IP_VALID\13CSUM_DATA_VALID\14CSUM_PSEUDO_HDR");
8459 		}
8460 
8461 		if (mp->m_flags & M_EXT) {
8462 			BCE_PRINTF("- m_ext: %p, ext_size = %d, type = ",
8463 				mp->m_ext.ext_buf, mp->m_ext.ext_size);
8464 			switch (mp->m_ext.ext_type) {
8465 				case EXT_CLUSTER:    printf("EXT_CLUSTER\n"); break;
8466 				case EXT_SFBUF:      printf("EXT_SFBUF\n"); break;
8467 				case EXT_JUMBO9:     printf("EXT_JUMBO9\n"); break;
8468 				case EXT_JUMBO16:    printf("EXT_JUMBO16\n"); break;
8469 				case EXT_PACKET:     printf("EXT_PACKET\n"); break;
8470 				case EXT_MBUF:       printf("EXT_MBUF\n"); break;
8471 				case EXT_NET_DRV:    printf("EXT_NET_DRV\n"); break;
8472 				case EXT_MOD_TYPE:   printf("EXT_MDD_TYPE\n"); break;
8473 				case EXT_DISPOSABLE: printf("EXT_DISPOSABLE\n"); break;
8474 				case EXT_EXTREF:     printf("EXT_EXTREF\n"); break;
8475 				default:             printf("UNKNOWN\n");
8476 			}
8477 		}
8478 
8479 		mp = mp->m_next;
8480 	}
8481 }
8482 
8483 
8484 /****************************************************************************/
8485 /* Prints out the mbufs in the TX mbuf chain.                               */
8486 /*                                                                          */
8487 /* Returns:                                                                 */
8488 /*   Nothing.                                                               */
8489 /****************************************************************************/
8490 static __attribute__ ((noinline)) void
8491 bce_dump_tx_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count)
8492 {
8493 	struct mbuf *m;
8494 
8495 	BCE_PRINTF(
8496 		"----------------------------"
8497 		"  tx mbuf data  "
8498 		"----------------------------\n");
8499 
8500 	for (int i = 0; i < count; i++) {
8501 	 	m = sc->tx_mbuf_ptr[chain_prod];
8502 		BCE_PRINTF("txmbuf[0x%04X]\n", chain_prod);
8503 		bce_dump_mbuf(sc, m);
8504 		chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod));
8505 	}
8506 
8507 	BCE_PRINTF(
8508 		"----------------------------"
8509 		"----------------"
8510 		"----------------------------\n");
8511 }
8512 
8513 
8514 /****************************************************************************/
8515 /* Prints out the mbufs in the RX mbuf chain.                               */
8516 /*                                                                          */
8517 /* Returns:                                                                 */
8518 /*   Nothing.                                                               */
8519 /****************************************************************************/
8520 static __attribute__ ((noinline)) void
8521 bce_dump_rx_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count)
8522 {
8523 	struct mbuf *m;
8524 
8525 	BCE_PRINTF(
8526 		"----------------------------"
8527 		"  rx mbuf data  "
8528 		"----------------------------\n");
8529 
8530 	for (int i = 0; i < count; i++) {
8531 	 	m = sc->rx_mbuf_ptr[chain_prod];
8532 		BCE_PRINTF("rxmbuf[0x%04X]\n", chain_prod);
8533 		bce_dump_mbuf(sc, m);
8534 		chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod));
8535 	}
8536 
8537 
8538 	BCE_PRINTF(
8539 		"----------------------------"
8540 		"----------------"
8541 		"----------------------------\n");
8542 }
8543 
8544 
8545 #ifdef ZERO_COPY_SOCKETS
8546 /****************************************************************************/
8547 /* Prints out the mbufs in the mbuf page chain.                             */
8548 /*                                                                          */
8549 /* Returns:                                                                 */
8550 /*   Nothing.                                                               */
8551 /****************************************************************************/
8552 static __attribute__ ((noinline)) void
8553 bce_dump_pg_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count)
8554 {
8555 	struct mbuf *m;
8556 
8557 	BCE_PRINTF(
8558 		"----------------------------"
8559 		"  pg mbuf data  "
8560 		"----------------------------\n");
8561 
8562 	for (int i = 0; i < count; i++) {
8563 	 	m = sc->pg_mbuf_ptr[chain_prod];
8564 		BCE_PRINTF("pgmbuf[0x%04X]\n", chain_prod);
8565 		bce_dump_mbuf(sc, m);
8566 		chain_prod = PG_CHAIN_IDX(NEXT_PG_BD(chain_prod));
8567 	}
8568 
8569 
8570 	BCE_PRINTF(
8571 		"----------------------------"
8572 		"----------------"
8573 		"----------------------------\n");
8574 }
8575 #endif
8576 
8577 
8578 /****************************************************************************/
8579 /* Prints out a tx_bd structure.                                            */
8580 /*                                                                          */
8581 /* Returns:                                                                 */
8582 /*   Nothing.                                                               */
8583 /****************************************************************************/
8584 static __attribute__ ((noinline)) void
8585 bce_dump_txbd(struct bce_softc *sc, int idx, struct tx_bd *txbd)
8586 {
8587 	if (idx > MAX_TX_BD)
8588 		/* Index out of range. */
8589 		BCE_PRINTF("tx_bd[0x%04X]: Invalid tx_bd index!\n", idx);
8590 	else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
8591 		/* TX Chain page pointer. */
8592 		BCE_PRINTF("tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
8593 			idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo);
8594 	else {
8595 			/* Normal tx_bd entry. */
8596 			BCE_PRINTF("tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
8597 				"vlan tag= 0x%04X, flags = 0x%04X (", idx,
8598 				txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo,
8599 				txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag,
8600 				txbd->tx_bd_flags);
8601 
8602 			if (txbd->tx_bd_flags & TX_BD_FLAGS_CONN_FAULT)
8603 				printf(" CONN_FAULT");
8604 
8605 			if (txbd->tx_bd_flags & TX_BD_FLAGS_TCP_UDP_CKSUM)
8606 				printf(" TCP_UDP_CKSUM");
8607 
8608 			if (txbd->tx_bd_flags & TX_BD_FLAGS_IP_CKSUM)
8609 				printf(" IP_CKSUM");
8610 
8611 			if (txbd->tx_bd_flags & TX_BD_FLAGS_VLAN_TAG)
8612 				printf("  VLAN");
8613 
8614 			if (txbd->tx_bd_flags & TX_BD_FLAGS_COAL_NOW)
8615 				printf(" COAL_NOW");
8616 
8617 			if (txbd->tx_bd_flags & TX_BD_FLAGS_DONT_GEN_CRC)
8618 				printf(" DONT_GEN_CRC");
8619 
8620 			if (txbd->tx_bd_flags & TX_BD_FLAGS_START)
8621 				printf(" START");
8622 
8623 			if (txbd->tx_bd_flags & TX_BD_FLAGS_END)
8624 				printf(" END");
8625 
8626 			if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_LSO)
8627 				printf(" LSO");
8628 
8629 			if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_OPTION_WORD)
8630 				printf(" OPTION_WORD");
8631 
8632 			if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_FLAGS)
8633 				printf(" FLAGS");
8634 
8635 			if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_SNAP)
8636 				printf(" SNAP");
8637 
8638 			printf(" )\n");
8639 		}
8640 
8641 }
8642 
8643 
8644 /****************************************************************************/
8645 /* Prints out a rx_bd structure.                                            */
8646 /*                                                                          */
8647 /* Returns:                                                                 */
8648 /*   Nothing.                                                               */
8649 /****************************************************************************/
8650 static __attribute__ ((noinline)) void
8651 bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd)
8652 {
8653 	if (idx > MAX_RX_BD)
8654 		/* Index out of range. */
8655 		BCE_PRINTF("rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
8656 	else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
8657 		/* RX Chain page pointer. */
8658 		BCE_PRINTF("rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
8659 			idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo);
8660 	else
8661 		/* Normal rx_bd entry. */
8662 		BCE_PRINTF("rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
8663 			"flags = 0x%08X\n", idx,
8664 			rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo,
8665 			rxbd->rx_bd_len, rxbd->rx_bd_flags);
8666 }
8667 
8668 
8669 #ifdef ZERO_COPY_SOCKETS
8670 /****************************************************************************/
8671 /* Prints out a rx_bd structure in the page chain.                          */
8672 /*                                                                          */
8673 /* Returns:                                                                 */
8674 /*   Nothing.                                                               */
8675 /****************************************************************************/
8676 static __attribute__ ((noinline)) void
8677 bce_dump_pgbd(struct bce_softc *sc, int idx, struct rx_bd *pgbd)
8678 {
8679 	if (idx > MAX_PG_BD)
8680 		/* Index out of range. */
8681 		BCE_PRINTF("pg_bd[0x%04X]: Invalid pg_bd index!\n", idx);
8682 	else if ((idx & USABLE_PG_BD_PER_PAGE) == USABLE_PG_BD_PER_PAGE)
8683 		/* Page Chain page pointer. */
8684 		BCE_PRINTF("px_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
8685 			idx, pgbd->rx_bd_haddr_hi, pgbd->rx_bd_haddr_lo);
8686 	else
8687 		/* Normal rx_bd entry. */
8688 		BCE_PRINTF("pg_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
8689 			"flags = 0x%08X\n", idx,
8690 			pgbd->rx_bd_haddr_hi, pgbd->rx_bd_haddr_lo,
8691 			pgbd->rx_bd_len, pgbd->rx_bd_flags);
8692 }
8693 #endif
8694 
8695 
8696 /****************************************************************************/
8697 /* Prints out a l2_fhdr structure.                                          */
8698 /*                                                                          */
8699 /* Returns:                                                                 */
8700 /*   Nothing.                                                               */
8701 /****************************************************************************/
8702 static __attribute__ ((noinline)) void
8703 bce_dump_l2fhdr(struct bce_softc *sc, int idx, struct l2_fhdr *l2fhdr)
8704 {
8705 	BCE_PRINTF("l2_fhdr[0x%04X]: status = 0x%b, "
8706 		"pkt_len = %d, vlan = 0x%04x, ip_xsum/hdr_len = 0x%04X, "
8707 		"tcp_udp_xsum = 0x%04X\n", idx,
8708 		l2fhdr->l2_fhdr_status, BCE_L2FHDR_PRINTFB,
8709 		l2fhdr->l2_fhdr_pkt_len, l2fhdr->l2_fhdr_vlan_tag,
8710 		l2fhdr->l2_fhdr_ip_xsum, l2fhdr->l2_fhdr_tcp_udp_xsum);
8711 }
8712 
8713 
8714 /****************************************************************************/
8715 /* Prints out context memory info.  (Only useful for CID 0 to 16.)          */
8716 /*                                                                          */
8717 /* Returns:                                                                 */
8718 /*   Nothing.                                                               */
8719 /****************************************************************************/
8720 static __attribute__ ((noinline)) void
8721 bce_dump_ctx(struct bce_softc *sc, u16 cid)
8722 {
8723 	if (cid <= TX_CID) {
8724 		BCE_PRINTF(
8725 			"----------------------------"
8726 			"    CTX Data    "
8727 			"----------------------------\n");
8728 
8729 		BCE_PRINTF("     0x%04X - (CID) Context ID\n", cid);
8730 
8731 		if (cid == RX_CID) {
8732 			BCE_PRINTF(" 0x%08X - (L2CTX_RX_HOST_BDIDX) host rx "
8733 				"producer index\n",
8734 				CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_HOST_BDIDX));
8735 			BCE_PRINTF(" 0x%08X - (L2CTX_RX_HOST_BSEQ) host byte sequence\n",
8736 				CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_HOST_BSEQ));
8737 			BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BSEQ) h/w byte sequence\n",
8738 				CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BSEQ));
8739 			BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BDHADDR_HI) h/w buffer "
8740 				"descriptor address\n",
8741  				CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BDHADDR_HI));
8742 			BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BDHADDR_LO) h/w buffer "
8743 				"descriptor address\n",
8744 				CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BDHADDR_LO));
8745 			BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BDIDX) h/w rx consumer index\n",
8746 				CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BDIDX));
8747 			BCE_PRINTF(" 0x%08X - (L2CTX_RX_HOST_PG_BDIDX) host page "
8748 				"producer index\n",
8749 				CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_HOST_PG_BDIDX));
8750 			BCE_PRINTF(" 0x%08X - (L2CTX_RX_PG_BUF_SIZE) host rx_bd/page "
8751 				"buffer size\n",
8752 				CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_PG_BUF_SIZE));
8753 			BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_PG_BDHADDR_HI) h/w page "
8754 				"chain address\n",
8755 				CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_PG_BDHADDR_HI));
8756 			BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_PG_BDHADDR_LO) h/w page "
8757 				"chain address\n",
8758 				CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_PG_BDHADDR_LO));
8759 			BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_PG_BDIDX) h/w page "
8760 				"consumer index\n",
8761 				CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_PG_BDIDX));
8762 		} else if (cid == TX_CID) {
8763 			if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
8764 				(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
8765 				BCE_PRINTF(" 0x%08X - (L2CTX_TX_TYPE_XI) ctx type\n",
8766 					CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_TYPE_XI));
8767 				BCE_PRINTF(" 0x%08X - (L2CTX_CMD_TX_TYPE_XI) ctx cmd\n",
8768 					CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_CMD_TYPE_XI));
8769 				BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BDHADDR_HI_XI) h/w buffer "
8770 					"descriptor address\n",	CTX_RD(sc,
8771 					GET_CID_ADDR(cid), BCE_L2CTX_TX_TBDR_BHADDR_HI_XI));
8772 				BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BHADDR_LO_XI) h/w buffer "
8773 					"descriptor address\n", CTX_RD(sc,
8774 					GET_CID_ADDR(cid), BCE_L2CTX_TX_TBDR_BHADDR_LO_XI));
8775 				BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BIDX_XI) host producer "
8776 					"index\n", CTX_RD(sc, GET_CID_ADDR(cid),
8777 					BCE_L2CTX_TX_HOST_BIDX_XI));
8778 				BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BSEQ_XI) host byte "
8779 					"sequence\n", CTX_RD(sc, GET_CID_ADDR(cid),
8780 					BCE_L2CTX_TX_HOST_BSEQ_XI));
8781 			} else {
8782 				BCE_PRINTF(" 0x%08X - (L2CTX_TX_TYPE) ctx type\n",
8783 					CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_TYPE));
8784 				BCE_PRINTF(" 0x%08X - (L2CTX_TX_CMD_TYPE) ctx cmd\n",
8785 					CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_CMD_TYPE));
8786 				BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BDHADDR_HI) h/w buffer "
8787 					"descriptor address\n", CTX_RD(sc, GET_CID_ADDR(cid),
8788 					BCE_L2CTX_TX_TBDR_BHADDR_HI));
8789 				BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BHADDR_LO) h/w buffer "
8790 					"descriptor address\n", CTX_RD(sc, GET_CID_ADDR(cid),
8791 					BCE_L2CTX_TX_TBDR_BHADDR_LO));
8792 				BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BIDX) host producer "
8793 					"index\n", CTX_RD(sc, GET_CID_ADDR(cid),
8794 					BCE_L2CTX_TX_HOST_BIDX));
8795 				BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BSEQ) host byte "
8796 					"sequence\n", CTX_RD(sc, GET_CID_ADDR(cid),
8797 					BCE_L2CTX_TX_HOST_BSEQ));
8798 			}
8799 		} else
8800 			BCE_PRINTF(" Unknown CID\n");
8801 
8802 		BCE_PRINTF(
8803 			"----------------------------"
8804 			"    Raw CTX     "
8805 			"----------------------------\n");
8806 
8807 		for (int i = 0x0; i < 0x300; i += 0x10) {
8808 			BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", i,
8809 				CTX_RD(sc, GET_CID_ADDR(cid), i),
8810 				CTX_RD(sc, GET_CID_ADDR(cid), i + 0x4),
8811 				CTX_RD(sc, GET_CID_ADDR(cid), i + 0x8),
8812 				CTX_RD(sc, GET_CID_ADDR(cid), i + 0xc));
8813 		}
8814 
8815 
8816 		BCE_PRINTF(
8817 			"----------------------------"
8818 			"----------------"
8819 			"----------------------------\n");
8820 	}
8821 }
8822 
8823 
8824 /****************************************************************************/
8825 /* Prints out the FTQ data.                                                 */
8826 /*                                                                          */
8827 /* Returns:                                                                */
8828 /*   Nothing.                                                               */
8829 /****************************************************************************/
8830 static __attribute__ ((noinline)) void
8831 bce_dump_ftqs(struct bce_softc *sc)
8832 {
8833 	u32 cmd, ctl, cur_depth, max_depth, valid_cnt, val;
8834 
8835 	BCE_PRINTF(
8836 		"----------------------------"
8837 		"    FTQ Data    "
8838 		"----------------------------\n");
8839 
8840 	BCE_PRINTF("   FTQ    Command    Control   Depth_Now  Max_Depth  Valid_Cnt \n");
8841 	BCE_PRINTF(" ------- ---------- ---------- ---------- ---------- ----------\n");
8842 
8843 	/* Setup the generic statistic counters for the FTQ valid count. */
8844 	val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PPQ_VALID_CNT << 24) |
8845 		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPCQ_VALID_CNT  << 16) |
8846 		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPQ_VALID_CNT   <<  8) |
8847 		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RLUPQ_VALID_CNT);
8848 	REG_WR(sc, BCE_HC_STAT_GEN_SEL_0, val);
8849 
8850 	val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TSCHQ_VALID_CNT  << 24) |
8851 		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RDMAQ_VALID_CNT  << 16) |
8852 		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PTQ_VALID_CNT <<  8) |
8853 		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PMQ_VALID_CNT);
8854 	REG_WR(sc, BCE_HC_STAT_GEN_SEL_1, val);
8855 
8856 	val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPATQ_VALID_CNT  << 24) |
8857 		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TDMAQ_VALID_CNT  << 16) |
8858 		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXPQ_VALID_CNT   <<  8) |
8859 		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TBDRQ_VALID_CNT);
8860 	REG_WR(sc, BCE_HC_STAT_GEN_SEL_2, val);
8861 
8862 	val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMQ_VALID_CNT   << 24) |
8863 		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMTQ_VALID_CNT  << 16) |
8864 		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMXQ_VALID_CNT  <<  8) |
8865 		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TASQ_VALID_CNT);
8866 	REG_WR(sc, BCE_HC_STAT_GEN_SEL_3, val);
8867 
8868 	/* Input queue to the Receive Lookup state machine */
8869 	cmd = REG_RD(sc, BCE_RLUP_FTQ_CMD);
8870 	ctl = REG_RD(sc, BCE_RLUP_FTQ_CTL);
8871 	cur_depth = (ctl & BCE_RLUP_FTQ_CTL_CUR_DEPTH) >> 22;
8872 	max_depth = (ctl & BCE_RLUP_FTQ_CTL_MAX_DEPTH) >> 12;
8873 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT0);
8874 	BCE_PRINTF(" RLUP    0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
8875 		cmd, ctl, cur_depth, max_depth, valid_cnt);
8876 
8877 	/* Input queue to the Receive Processor */
8878 	cmd = REG_RD_IND(sc, BCE_RXP_FTQ_CMD);
8879 	ctl = REG_RD_IND(sc, BCE_RXP_FTQ_CTL);
8880 	cur_depth = (ctl & BCE_RXP_FTQ_CTL_CUR_DEPTH) >> 22;
8881 	max_depth = (ctl & BCE_RXP_FTQ_CTL_MAX_DEPTH) >> 12;
8882 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT1);
8883 	BCE_PRINTF(" RXP     0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
8884 		cmd, ctl, cur_depth, max_depth, valid_cnt);
8885 
8886 	/* Input queue to the Recevie Processor */
8887 	cmd = REG_RD_IND(sc, BCE_RXP_CFTQ_CMD);
8888 	ctl = REG_RD_IND(sc, BCE_RXP_CFTQ_CTL);
8889 	cur_depth = (ctl & BCE_RXP_CFTQ_CTL_CUR_DEPTH) >> 22;
8890 	max_depth = (ctl & BCE_RXP_CFTQ_CTL_MAX_DEPTH) >> 12;
8891 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT2);
8892 	BCE_PRINTF(" RXPC    0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
8893 		cmd, ctl, cur_depth, max_depth, valid_cnt);
8894 
8895 	/* Input queue to the Receive Virtual to Physical state machine */
8896 	cmd = REG_RD(sc, BCE_RV2P_PFTQ_CMD);
8897 	ctl = REG_RD(sc, BCE_RV2P_PFTQ_CTL);
8898 	cur_depth = (ctl & BCE_RV2P_PFTQ_CTL_CUR_DEPTH) >> 22;
8899 	max_depth = (ctl & BCE_RV2P_PFTQ_CTL_MAX_DEPTH) >> 12;
8900 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT3);
8901 	BCE_PRINTF(" RV2PP   0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
8902 		cmd, ctl, cur_depth, max_depth, valid_cnt);
8903 
8904 	/* Input queue to the Recevie Virtual to Physical state machine */
8905 	cmd = REG_RD(sc, BCE_RV2P_MFTQ_CMD);
8906 	ctl = REG_RD(sc, BCE_RV2P_MFTQ_CTL);
8907 	cur_depth = (ctl & BCE_RV2P_MFTQ_CTL_CUR_DEPTH) >> 22;
8908 	max_depth = (ctl & BCE_RV2P_MFTQ_CTL_MAX_DEPTH) >> 12;
8909 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT4);
8910 	BCE_PRINTF(" RV2PM   0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
8911 		cmd, ctl, cur_depth, max_depth, valid_cnt);
8912 
8913 	/* Input queue to the Receive Virtual to Physical state machine */
8914 	cmd = REG_RD(sc, BCE_RV2P_TFTQ_CMD);
8915 	ctl = REG_RD(sc, BCE_RV2P_TFTQ_CTL);
8916 	cur_depth = (ctl & BCE_RV2P_TFTQ_CTL_CUR_DEPTH) >> 22;
8917 	max_depth = (ctl & BCE_RV2P_TFTQ_CTL_MAX_DEPTH) >> 12;
8918 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT5);
8919 	BCE_PRINTF(" RV2PT   0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
8920 		cmd, ctl, cur_depth, max_depth, valid_cnt);
8921 
8922 	/* Input queue to the Receive DMA state machine */
8923 	cmd = REG_RD(sc, BCE_RDMA_FTQ_CMD);
8924 	ctl = REG_RD(sc, BCE_RDMA_FTQ_CTL);
8925 	cur_depth = (ctl & BCE_RDMA_FTQ_CTL_CUR_DEPTH) >> 22;
8926 	max_depth = (ctl & BCE_RDMA_FTQ_CTL_MAX_DEPTH) >> 12;
8927 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT6);
8928 	BCE_PRINTF(" RDMA    0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
8929 		cmd, ctl, cur_depth, max_depth, valid_cnt);
8930 
8931 	/* Input queue to the Transmit Scheduler state machine */
8932 	cmd = REG_RD(sc, BCE_TSCH_FTQ_CMD);
8933 	ctl = REG_RD(sc, BCE_TSCH_FTQ_CTL);
8934 	cur_depth = (ctl & BCE_TSCH_FTQ_CTL_CUR_DEPTH) >> 22;
8935 	max_depth = (ctl & BCE_TSCH_FTQ_CTL_MAX_DEPTH) >> 12;
8936 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT7);
8937 	BCE_PRINTF(" TSCH    0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
8938 		cmd, ctl, cur_depth, max_depth, valid_cnt);
8939 
8940 	/* Input queue to the Transmit Buffer Descriptor state machine */
8941 	cmd = REG_RD(sc, BCE_TBDR_FTQ_CMD);
8942 	ctl = REG_RD(sc, BCE_TBDR_FTQ_CTL);
8943 	cur_depth = (ctl & BCE_TBDR_FTQ_CTL_CUR_DEPTH) >> 22;
8944 	max_depth = (ctl & BCE_TBDR_FTQ_CTL_MAX_DEPTH) >> 12;
8945 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT8);
8946 	BCE_PRINTF(" TBDR    0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
8947 		cmd, ctl, cur_depth, max_depth, valid_cnt);
8948 
8949 	/* Input queue to the Transmit Processor */
8950 	cmd = REG_RD_IND(sc, BCE_TXP_FTQ_CMD);
8951 	ctl = REG_RD_IND(sc, BCE_TXP_FTQ_CTL);
8952 	cur_depth = (ctl & BCE_TXP_FTQ_CTL_CUR_DEPTH) >> 22;
8953 	max_depth = (ctl & BCE_TXP_FTQ_CTL_MAX_DEPTH) >> 12;
8954 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT9);
8955 	BCE_PRINTF(" TXP     0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
8956 		cmd, ctl, cur_depth, max_depth, valid_cnt);
8957 
8958 	/* Input queue to the Transmit DMA state machine */
8959 	cmd = REG_RD(sc, BCE_TDMA_FTQ_CMD);
8960 	ctl = REG_RD(sc, BCE_TDMA_FTQ_CTL);
8961 	cur_depth = (ctl & BCE_TDMA_FTQ_CTL_CUR_DEPTH) >> 22;
8962 	max_depth = (ctl & BCE_TDMA_FTQ_CTL_MAX_DEPTH) >> 12;
8963 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT10);
8964 	BCE_PRINTF(" TDMA    0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
8965 		cmd, ctl, cur_depth, max_depth, valid_cnt);
8966 
8967 	/* Input queue to the Transmit Patch-Up Processor */
8968 	cmd = REG_RD_IND(sc, BCE_TPAT_FTQ_CMD);
8969 	ctl = REG_RD_IND(sc, BCE_TPAT_FTQ_CTL);
8970 	cur_depth = (ctl & BCE_TPAT_FTQ_CTL_CUR_DEPTH) >> 22;
8971 	max_depth = (ctl & BCE_TPAT_FTQ_CTL_MAX_DEPTH) >> 12;
8972 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT11);
8973 	BCE_PRINTF(" TPAT    0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
8974 		cmd, ctl, cur_depth, max_depth, valid_cnt);
8975 
8976 	/* Input queue to the Transmit Assembler state machine */
8977 	cmd = REG_RD_IND(sc, BCE_TAS_FTQ_CMD);
8978 	ctl = REG_RD_IND(sc, BCE_TAS_FTQ_CTL);
8979 	cur_depth = (ctl & BCE_TAS_FTQ_CTL_CUR_DEPTH) >> 22;
8980 	max_depth = (ctl & BCE_TAS_FTQ_CTL_MAX_DEPTH) >> 12;
8981 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT12);
8982 	BCE_PRINTF(" TAS     0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
8983 		cmd, ctl, cur_depth, max_depth, valid_cnt);
8984 
8985 	/* Input queue to the Completion Processor */
8986 	cmd = REG_RD_IND(sc, BCE_COM_COMXQ_FTQ_CMD);
8987 	ctl = REG_RD_IND(sc, BCE_COM_COMXQ_FTQ_CTL);
8988 	cur_depth = (ctl & BCE_COM_COMXQ_FTQ_CTL_CUR_DEPTH) >> 22;
8989 	max_depth = (ctl & BCE_COM_COMXQ_FTQ_CTL_MAX_DEPTH) >> 12;
8990 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT13);
8991 	BCE_PRINTF(" COMX    0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
8992 		cmd, ctl, cur_depth, max_depth, valid_cnt);
8993 
8994 	/* Input queue to the Completion Processor */
8995 	cmd = REG_RD_IND(sc, BCE_COM_COMTQ_FTQ_CMD);
8996 	ctl = REG_RD_IND(sc, BCE_COM_COMTQ_FTQ_CTL);
8997 	cur_depth = (ctl & BCE_COM_COMTQ_FTQ_CTL_CUR_DEPTH) >> 22;
8998 	max_depth = (ctl & BCE_COM_COMTQ_FTQ_CTL_MAX_DEPTH) >> 12;
8999 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT14);
9000 	BCE_PRINTF(" COMT    0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9001 		cmd, ctl, cur_depth, max_depth, valid_cnt);
9002 
9003 	/* Input queue to the Completion Processor */
9004 	cmd = REG_RD_IND(sc, BCE_COM_COMQ_FTQ_CMD);
9005 	ctl = REG_RD_IND(sc, BCE_COM_COMQ_FTQ_CTL);
9006 	cur_depth = (ctl & BCE_COM_COMQ_FTQ_CTL_CUR_DEPTH) >> 22;
9007 	max_depth = (ctl & BCE_COM_COMQ_FTQ_CTL_MAX_DEPTH) >> 12;
9008 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT15);
9009 	BCE_PRINTF(" COMX    0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9010 		cmd, ctl, cur_depth, max_depth, valid_cnt);
9011 
9012 	/* Setup the generic statistic counters for the FTQ valid count. */
9013 	val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_CSQ_VALID_CNT  << 16) |
9014 		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_CPQ_VALID_CNT  <<  8) |
9015 		(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_MGMQ_VALID_CNT);
9016 
9017 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709)	||
9018 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716))
9019 		val = val | (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PCSQ_VALID_CNT_XI << 24);
9020 		REG_WR(sc, BCE_HC_STAT_GEN_SEL_0, val);
9021 
9022 	/* Input queue to the Management Control Processor */
9023 	cmd = REG_RD_IND(sc, BCE_MCP_MCPQ_FTQ_CMD);
9024 	ctl = REG_RD_IND(sc, BCE_MCP_MCPQ_FTQ_CTL);
9025 	cur_depth = (ctl & BCE_MCP_MCPQ_FTQ_CTL_CUR_DEPTH) >> 22;
9026 	max_depth = (ctl & BCE_MCP_MCPQ_FTQ_CTL_MAX_DEPTH) >> 12;
9027 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT0);
9028 	BCE_PRINTF(" MCP     0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9029 		cmd, ctl, cur_depth, max_depth, valid_cnt);
9030 
9031 	/* Input queue to the Command Processor */
9032 	cmd = REG_RD_IND(sc, BCE_CP_CPQ_FTQ_CMD);
9033 	ctl = REG_RD_IND(sc, BCE_CP_CPQ_FTQ_CTL);
9034 	cur_depth = (ctl & BCE_CP_CPQ_FTQ_CTL_CUR_DEPTH) >> 22;
9035 	max_depth = (ctl & BCE_CP_CPQ_FTQ_CTL_MAX_DEPTH) >> 12;
9036 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT1);
9037 	BCE_PRINTF(" CP      0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9038 		cmd, ctl, cur_depth, max_depth, valid_cnt);
9039 
9040 	/* Input queue to the Completion Scheduler state machine */
9041 	cmd = REG_RD(sc, BCE_CSCH_CH_FTQ_CMD);
9042 	ctl = REG_RD(sc, BCE_CSCH_CH_FTQ_CTL);
9043 	cur_depth = (ctl & BCE_CSCH_CH_FTQ_CTL_CUR_DEPTH) >> 22;
9044 	max_depth = (ctl & BCE_CSCH_CH_FTQ_CTL_MAX_DEPTH) >> 12;
9045 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT2);
9046 	BCE_PRINTF(" CS      0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9047 		cmd, ctl, cur_depth, max_depth, valid_cnt);
9048 
9049 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
9050 		(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
9051 		/* Input queue to the Receive Virtual to Physical Command Scheduler */
9052 		cmd = REG_RD(sc, BCE_RV2PCSR_FTQ_CMD);
9053 		ctl = REG_RD(sc, BCE_RV2PCSR_FTQ_CTL);
9054 		cur_depth = (ctl & 0xFFC00000) >> 22;
9055 		max_depth = (ctl & 0x003FF000) >> 12;
9056 		valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT3);
9057 		BCE_PRINTF(" RV2PCSR 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9058 			cmd, ctl, cur_depth, max_depth, valid_cnt);
9059 	}
9060 
9061 	BCE_PRINTF(
9062 		"----------------------------"
9063 		"----------------"
9064 		"----------------------------\n");
9065 }
9066 
9067 
9068 /****************************************************************************/
9069 /* Prints out the TX chain.                                                 */
9070 /*                                                                          */
9071 /* Returns:                                                                 */
9072 /*   Nothing.                                                               */
9073 /****************************************************************************/
9074 static __attribute__ ((noinline)) void
9075 bce_dump_tx_chain(struct bce_softc *sc, u16 tx_prod, int count)
9076 {
9077 	struct tx_bd *txbd;
9078 
9079 	/* First some info about the tx_bd chain structure. */
9080 	BCE_PRINTF(
9081 		"----------------------------"
9082 		"  tx_bd  chain  "
9083 		"----------------------------\n");
9084 
9085 	BCE_PRINTF("page size      = 0x%08X, tx chain pages        = 0x%08X\n",
9086 		(u32) BCM_PAGE_SIZE, (u32) TX_PAGES);
9087 
9088 	BCE_PRINTF("tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n",
9089 		(u32) TOTAL_TX_BD_PER_PAGE, (u32) USABLE_TX_BD_PER_PAGE);
9090 
9091 	BCE_PRINTF("total tx_bd    = 0x%08X\n", (u32) TOTAL_TX_BD);
9092 
9093 	BCE_PRINTF(
9094 		"----------------------------"
9095 		"   tx_bd data   "
9096 		"----------------------------\n");
9097 
9098 	/* Now print out the tx_bd's themselves. */
9099 	for (int i = 0; i < count; i++) {
9100 	 	txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)];
9101 		bce_dump_txbd(sc, tx_prod, txbd);
9102 		tx_prod = NEXT_TX_BD(tx_prod);
9103 	}
9104 
9105 	BCE_PRINTF(
9106 		"----------------------------"
9107 		"----------------"
9108 		"----------------------------\n");
9109 }
9110 
9111 
9112 /****************************************************************************/
9113 /* Prints out the RX chain.                                                 */
9114 /*                                                                          */
9115 /* Returns:                                                                 */
9116 /*   Nothing.                                                               */
9117 /****************************************************************************/
9118 static __attribute__ ((noinline)) void
9119 bce_dump_rx_chain(struct bce_softc *sc, u16 rx_prod, int count)
9120 {
9121 	struct rx_bd *rxbd;
9122 
9123 	/* First some info about the rx_bd chain structure. */
9124 	BCE_PRINTF(
9125 		"----------------------------"
9126 		"  rx_bd  chain  "
9127 		"----------------------------\n");
9128 
9129 	BCE_PRINTF("page size      = 0x%08X, rx chain pages        = 0x%08X\n",
9130 		(u32) BCM_PAGE_SIZE, (u32) RX_PAGES);
9131 
9132 	BCE_PRINTF("rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
9133 		(u32) TOTAL_RX_BD_PER_PAGE, (u32) USABLE_RX_BD_PER_PAGE);
9134 
9135 	BCE_PRINTF("total rx_bd    = 0x%08X\n", (u32) TOTAL_RX_BD);
9136 
9137 	BCE_PRINTF(
9138 		"----------------------------"
9139 		"   rx_bd data   "
9140 		"----------------------------\n");
9141 
9142 	/* Now print out the rx_bd's themselves. */
9143 	for (int i = 0; i < count; i++) {
9144 		rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)];
9145 		bce_dump_rxbd(sc, rx_prod, rxbd);
9146 		rx_prod = RX_CHAIN_IDX(rx_prod + 1);
9147 	}
9148 
9149 	BCE_PRINTF(
9150 		"----------------------------"
9151 		"----------------"
9152 		"----------------------------\n");
9153 }
9154 
9155 
9156 #ifdef ZERO_COPY_SOCKETS
9157 /****************************************************************************/
9158 /* Prints out the page chain.                                               */
9159 /*                                                                          */
9160 /* Returns:                                                                 */
9161 /*   Nothing.                                                               */
9162 /****************************************************************************/
9163 static __attribute__ ((noinline)) void
9164 bce_dump_pg_chain(struct bce_softc *sc, u16 pg_prod, int count)
9165 {
9166 	struct rx_bd *pgbd;
9167 
9168 	/* First some info about the page chain structure. */
9169 	BCE_PRINTF(
9170 		"----------------------------"
9171 		"   page chain   "
9172 		"----------------------------\n");
9173 
9174 	BCE_PRINTF("page size      = 0x%08X, pg chain pages        = 0x%08X\n",
9175 		(u32) BCM_PAGE_SIZE, (u32) PG_PAGES);
9176 
9177 	BCE_PRINTF("rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
9178 		(u32) TOTAL_PG_BD_PER_PAGE, (u32) USABLE_PG_BD_PER_PAGE);
9179 
9180 	BCE_PRINTF("total rx_bd    = 0x%08X, max_pg_bd             = 0x%08X\n",
9181 		(u32) TOTAL_PG_BD, (u32) MAX_PG_BD);
9182 
9183 	BCE_PRINTF(
9184 		"----------------------------"
9185 		"   page data    "
9186 		"----------------------------\n");
9187 
9188 	/* Now print out the rx_bd's themselves. */
9189 	for (int i = 0; i < count; i++) {
9190 		pgbd = &sc->pg_bd_chain[PG_PAGE(pg_prod)][PG_IDX(pg_prod)];
9191 		bce_dump_pgbd(sc, pg_prod, pgbd);
9192 		pg_prod = PG_CHAIN_IDX(pg_prod + 1);
9193 	}
9194 
9195 	BCE_PRINTF(
9196 		"----------------------------"
9197 		"----------------"
9198 		"----------------------------\n");
9199 }
9200 #endif
9201 
9202 
9203 /****************************************************************************/
9204 /* Prints out the status block from host memory.                            */
9205 /*                                                                          */
9206 /* Returns:                                                                 */
9207 /*   Nothing.                                                               */
9208 /****************************************************************************/
9209 static __attribute__ ((noinline)) void
9210 bce_dump_status_block(struct bce_softc *sc)
9211 {
9212 	struct status_block *sblk;
9213 
9214 	sblk = sc->status_block;
9215 
9216    	BCE_PRINTF(
9217 		"----------------------------"
9218 		"  Status Block  "
9219 		"----------------------------\n");
9220 
9221 	BCE_PRINTF("    0x%08X - attn_bits\n",
9222 		sblk->status_attn_bits);
9223 
9224 	BCE_PRINTF("    0x%08X - attn_bits_ack\n",
9225 		sblk->status_attn_bits_ack);
9226 
9227 	BCE_PRINTF("0x%04X(0x%04X) - rx_cons0\n",
9228 		sblk->status_rx_quick_consumer_index0,
9229 		(u16) RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index0));
9230 
9231 	BCE_PRINTF("0x%04X(0x%04X) - tx_cons0\n",
9232 		sblk->status_tx_quick_consumer_index0,
9233 		(u16) TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index0));
9234 
9235 	BCE_PRINTF("        0x%04X - status_idx\n", sblk->status_idx);
9236 
9237 	/* Theses indices are not used for normal L2 drivers. */
9238 	if (sblk->status_rx_quick_consumer_index1)
9239 		BCE_PRINTF("0x%04X(0x%04X) - rx_cons1\n",
9240 			sblk->status_rx_quick_consumer_index1,
9241 			(u16) RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index1));
9242 
9243 	if (sblk->status_tx_quick_consumer_index1)
9244 		BCE_PRINTF("0x%04X(0x%04X) - tx_cons1\n",
9245 			sblk->status_tx_quick_consumer_index1,
9246 			(u16) TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index1));
9247 
9248 	if (sblk->status_rx_quick_consumer_index2)
9249 		BCE_PRINTF("0x%04X(0x%04X)- rx_cons2\n",
9250 			sblk->status_rx_quick_consumer_index2,
9251 			(u16) RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index2));
9252 
9253 	if (sblk->status_tx_quick_consumer_index2)
9254 		BCE_PRINTF("0x%04X(0x%04X) - tx_cons2\n",
9255 			sblk->status_tx_quick_consumer_index2,
9256 			(u16) TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index2));
9257 
9258 	if (sblk->status_rx_quick_consumer_index3)
9259 		BCE_PRINTF("0x%04X(0x%04X) - rx_cons3\n",
9260 			sblk->status_rx_quick_consumer_index3,
9261 			(u16) RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index3));
9262 
9263 	if (sblk->status_tx_quick_consumer_index3)
9264 		BCE_PRINTF("0x%04X(0x%04X) - tx_cons3\n",
9265 			sblk->status_tx_quick_consumer_index3,
9266 			(u16) TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index3));
9267 
9268 	if (sblk->status_rx_quick_consumer_index4 ||
9269 		sblk->status_rx_quick_consumer_index5)
9270 		BCE_PRINTF("rx_cons4  = 0x%08X, rx_cons5      = 0x%08X\n",
9271 			sblk->status_rx_quick_consumer_index4,
9272 			sblk->status_rx_quick_consumer_index5);
9273 
9274 	if (sblk->status_rx_quick_consumer_index6 ||
9275 		sblk->status_rx_quick_consumer_index7)
9276 		BCE_PRINTF("rx_cons6  = 0x%08X, rx_cons7      = 0x%08X\n",
9277 			sblk->status_rx_quick_consumer_index6,
9278 			sblk->status_rx_quick_consumer_index7);
9279 
9280 	if (sblk->status_rx_quick_consumer_index8 ||
9281 		sblk->status_rx_quick_consumer_index9)
9282 		BCE_PRINTF("rx_cons8  = 0x%08X, rx_cons9      = 0x%08X\n",
9283 			sblk->status_rx_quick_consumer_index8,
9284 			sblk->status_rx_quick_consumer_index9);
9285 
9286 	if (sblk->status_rx_quick_consumer_index10 ||
9287 		sblk->status_rx_quick_consumer_index11)
9288 		BCE_PRINTF("rx_cons10 = 0x%08X, rx_cons11     = 0x%08X\n",
9289 			sblk->status_rx_quick_consumer_index10,
9290 			sblk->status_rx_quick_consumer_index11);
9291 
9292 	if (sblk->status_rx_quick_consumer_index12 ||
9293 		sblk->status_rx_quick_consumer_index13)
9294 		BCE_PRINTF("rx_cons12 = 0x%08X, rx_cons13     = 0x%08X\n",
9295 			sblk->status_rx_quick_consumer_index12,
9296 			sblk->status_rx_quick_consumer_index13);
9297 
9298 	if (sblk->status_rx_quick_consumer_index14 ||
9299 		sblk->status_rx_quick_consumer_index15)
9300 		BCE_PRINTF("rx_cons14 = 0x%08X, rx_cons15     = 0x%08X\n",
9301 			sblk->status_rx_quick_consumer_index14,
9302 			sblk->status_rx_quick_consumer_index15);
9303 
9304 	if (sblk->status_completion_producer_index ||
9305 		sblk->status_cmd_consumer_index)
9306 		BCE_PRINTF("com_prod  = 0x%08X, cmd_cons      = 0x%08X\n",
9307 			sblk->status_completion_producer_index,
9308 			sblk->status_cmd_consumer_index);
9309 
9310 	BCE_PRINTF(
9311 		"----------------------------"
9312 		"----------------"
9313 		"----------------------------\n");
9314 }
9315 
9316 
9317 /****************************************************************************/
9318 /* Prints out the statistics block from host memory.                        */
9319 /*                                                                          */
9320 /* Returns:                                                                 */
9321 /*   Nothing.                                                               */
9322 /****************************************************************************/
9323 static __attribute__ ((noinline)) void
9324 bce_dump_stats_block(struct bce_softc *sc)
9325 {
9326 	struct statistics_block *sblk;
9327 
9328 	sblk = sc->stats_block;
9329 
9330 	BCE_PRINTF(
9331 		"---------------"
9332 		" Stats Block  (All Stats Not Shown Are 0) "
9333 		"---------------\n");
9334 
9335 	if (sblk->stat_IfHCInOctets_hi
9336 		|| sblk->stat_IfHCInOctets_lo)
9337 		BCE_PRINTF("0x%08X:%08X : "
9338 			"IfHcInOctets\n",
9339 			sblk->stat_IfHCInOctets_hi,
9340 			sblk->stat_IfHCInOctets_lo);
9341 
9342 	if (sblk->stat_IfHCInBadOctets_hi
9343 		|| sblk->stat_IfHCInBadOctets_lo)
9344 		BCE_PRINTF("0x%08X:%08X : "
9345 			"IfHcInBadOctets\n",
9346 			sblk->stat_IfHCInBadOctets_hi,
9347 			sblk->stat_IfHCInBadOctets_lo);
9348 
9349 	if (sblk->stat_IfHCOutOctets_hi
9350 		|| sblk->stat_IfHCOutOctets_lo)
9351 		BCE_PRINTF("0x%08X:%08X : "
9352 			"IfHcOutOctets\n",
9353 			sblk->stat_IfHCOutOctets_hi,
9354 			sblk->stat_IfHCOutOctets_lo);
9355 
9356 	if (sblk->stat_IfHCOutBadOctets_hi
9357 		|| sblk->stat_IfHCOutBadOctets_lo)
9358 		BCE_PRINTF("0x%08X:%08X : "
9359 			"IfHcOutBadOctets\n",
9360 			sblk->stat_IfHCOutBadOctets_hi,
9361 			sblk->stat_IfHCOutBadOctets_lo);
9362 
9363 	if (sblk->stat_IfHCInUcastPkts_hi
9364 		|| sblk->stat_IfHCInUcastPkts_lo)
9365 		BCE_PRINTF("0x%08X:%08X : "
9366 			"IfHcInUcastPkts\n",
9367 			sblk->stat_IfHCInUcastPkts_hi,
9368 			sblk->stat_IfHCInUcastPkts_lo);
9369 
9370 	if (sblk->stat_IfHCInBroadcastPkts_hi
9371 		|| sblk->stat_IfHCInBroadcastPkts_lo)
9372 		BCE_PRINTF("0x%08X:%08X : "
9373 			"IfHcInBroadcastPkts\n",
9374 			sblk->stat_IfHCInBroadcastPkts_hi,
9375 			sblk->stat_IfHCInBroadcastPkts_lo);
9376 
9377 	if (sblk->stat_IfHCInMulticastPkts_hi
9378 		|| sblk->stat_IfHCInMulticastPkts_lo)
9379 		BCE_PRINTF("0x%08X:%08X : "
9380 			"IfHcInMulticastPkts\n",
9381 			sblk->stat_IfHCInMulticastPkts_hi,
9382 			sblk->stat_IfHCInMulticastPkts_lo);
9383 
9384 	if (sblk->stat_IfHCOutUcastPkts_hi
9385 		|| sblk->stat_IfHCOutUcastPkts_lo)
9386 		BCE_PRINTF("0x%08X:%08X : "
9387 			"IfHcOutUcastPkts\n",
9388 			sblk->stat_IfHCOutUcastPkts_hi,
9389 			sblk->stat_IfHCOutUcastPkts_lo);
9390 
9391 	if (sblk->stat_IfHCOutBroadcastPkts_hi
9392 		|| sblk->stat_IfHCOutBroadcastPkts_lo)
9393 		BCE_PRINTF("0x%08X:%08X : "
9394 			"IfHcOutBroadcastPkts\n",
9395 			sblk->stat_IfHCOutBroadcastPkts_hi,
9396 			sblk->stat_IfHCOutBroadcastPkts_lo);
9397 
9398 	if (sblk->stat_IfHCOutMulticastPkts_hi
9399 		|| sblk->stat_IfHCOutMulticastPkts_lo)
9400 		BCE_PRINTF("0x%08X:%08X : "
9401 			"IfHcOutMulticastPkts\n",
9402 			sblk->stat_IfHCOutMulticastPkts_hi,
9403 			sblk->stat_IfHCOutMulticastPkts_lo);
9404 
9405 	if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors)
9406 		BCE_PRINTF("         0x%08X : "
9407 			"emac_tx_stat_dot3statsinternalmactransmiterrors\n",
9408 			sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors);
9409 
9410 	if (sblk->stat_Dot3StatsCarrierSenseErrors)
9411 		BCE_PRINTF("         0x%08X : Dot3StatsCarrierSenseErrors\n",
9412 			sblk->stat_Dot3StatsCarrierSenseErrors);
9413 
9414 	if (sblk->stat_Dot3StatsFCSErrors)
9415 		BCE_PRINTF("         0x%08X : Dot3StatsFCSErrors\n",
9416 			sblk->stat_Dot3StatsFCSErrors);
9417 
9418 	if (sblk->stat_Dot3StatsAlignmentErrors)
9419 		BCE_PRINTF("         0x%08X : Dot3StatsAlignmentErrors\n",
9420 			sblk->stat_Dot3StatsAlignmentErrors);
9421 
9422 	if (sblk->stat_Dot3StatsSingleCollisionFrames)
9423 		BCE_PRINTF("         0x%08X : Dot3StatsSingleCollisionFrames\n",
9424 			sblk->stat_Dot3StatsSingleCollisionFrames);
9425 
9426 	if (sblk->stat_Dot3StatsMultipleCollisionFrames)
9427 		BCE_PRINTF("         0x%08X : Dot3StatsMultipleCollisionFrames\n",
9428 			sblk->stat_Dot3StatsMultipleCollisionFrames);
9429 
9430 	if (sblk->stat_Dot3StatsDeferredTransmissions)
9431 		BCE_PRINTF("         0x%08X : Dot3StatsDeferredTransmissions\n",
9432 			sblk->stat_Dot3StatsDeferredTransmissions);
9433 
9434 	if (sblk->stat_Dot3StatsExcessiveCollisions)
9435 		BCE_PRINTF("         0x%08X : Dot3StatsExcessiveCollisions\n",
9436 			sblk->stat_Dot3StatsExcessiveCollisions);
9437 
9438 	if (sblk->stat_Dot3StatsLateCollisions)
9439 		BCE_PRINTF("         0x%08X : Dot3StatsLateCollisions\n",
9440 			sblk->stat_Dot3StatsLateCollisions);
9441 
9442 	if (sblk->stat_EtherStatsCollisions)
9443 		BCE_PRINTF("         0x%08X : EtherStatsCollisions\n",
9444 			sblk->stat_EtherStatsCollisions);
9445 
9446 	if (sblk->stat_EtherStatsFragments)
9447 		BCE_PRINTF("         0x%08X : EtherStatsFragments\n",
9448 			sblk->stat_EtherStatsFragments);
9449 
9450 	if (sblk->stat_EtherStatsJabbers)
9451 		BCE_PRINTF("         0x%08X : EtherStatsJabbers\n",
9452 			sblk->stat_EtherStatsJabbers);
9453 
9454 	if (sblk->stat_EtherStatsUndersizePkts)
9455 		BCE_PRINTF("         0x%08X : EtherStatsUndersizePkts\n",
9456 			sblk->stat_EtherStatsUndersizePkts);
9457 
9458 	if (sblk->stat_EtherStatsOverrsizePkts)
9459 		BCE_PRINTF("         0x%08X : EtherStatsOverrsizePkts\n",
9460 			sblk->stat_EtherStatsOverrsizePkts);
9461 
9462 	if (sblk->stat_EtherStatsPktsRx64Octets)
9463 		BCE_PRINTF("         0x%08X : EtherStatsPktsRx64Octets\n",
9464 			sblk->stat_EtherStatsPktsRx64Octets);
9465 
9466 	if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets)
9467 		BCE_PRINTF("         0x%08X : EtherStatsPktsRx65Octetsto127Octets\n",
9468 			sblk->stat_EtherStatsPktsRx65Octetsto127Octets);
9469 
9470 	if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets)
9471 		BCE_PRINTF("         0x%08X : EtherStatsPktsRx128Octetsto255Octets\n",
9472 			sblk->stat_EtherStatsPktsRx128Octetsto255Octets);
9473 
9474 	if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets)
9475 		BCE_PRINTF("         0x%08X : EtherStatsPktsRx256Octetsto511Octets\n",
9476 			sblk->stat_EtherStatsPktsRx256Octetsto511Octets);
9477 
9478 	if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets)
9479 		BCE_PRINTF("         0x%08X : EtherStatsPktsRx512Octetsto1023Octets\n",
9480 			sblk->stat_EtherStatsPktsRx512Octetsto1023Octets);
9481 
9482 	if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets)
9483 		BCE_PRINTF("         0x%08X : EtherStatsPktsRx1024Octetsto1522Octets\n",
9484 			sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets);
9485 
9486 	if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets)
9487 		BCE_PRINTF("         0x%08X : EtherStatsPktsRx1523Octetsto9022Octets\n",
9488 			sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets);
9489 
9490 	if (sblk->stat_EtherStatsPktsTx64Octets)
9491 		BCE_PRINTF("         0x%08X : EtherStatsPktsTx64Octets\n",
9492 			sblk->stat_EtherStatsPktsTx64Octets);
9493 
9494 	if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets)
9495 		BCE_PRINTF("         0x%08X : EtherStatsPktsTx65Octetsto127Octets\n",
9496 			sblk->stat_EtherStatsPktsTx65Octetsto127Octets);
9497 
9498 	if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets)
9499 		BCE_PRINTF("         0x%08X : EtherStatsPktsTx128Octetsto255Octets\n",
9500 			sblk->stat_EtherStatsPktsTx128Octetsto255Octets);
9501 
9502 	if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets)
9503 		BCE_PRINTF("         0x%08X : EtherStatsPktsTx256Octetsto511Octets\n",
9504 			sblk->stat_EtherStatsPktsTx256Octetsto511Octets);
9505 
9506 	if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets)
9507 		BCE_PRINTF("         0x%08X : EtherStatsPktsTx512Octetsto1023Octets\n",
9508 			sblk->stat_EtherStatsPktsTx512Octetsto1023Octets);
9509 
9510 	if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets)
9511 		BCE_PRINTF("         0x%08X : EtherStatsPktsTx1024Octetsto1522Octets\n",
9512 			sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets);
9513 
9514 	if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets)
9515 		BCE_PRINTF("         0x%08X : EtherStatsPktsTx1523Octetsto9022Octets\n",
9516 			sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets);
9517 
9518 	if (sblk->stat_XonPauseFramesReceived)
9519 		BCE_PRINTF("         0x%08X : XonPauseFramesReceived\n",
9520 			sblk->stat_XonPauseFramesReceived);
9521 
9522 	if (sblk->stat_XoffPauseFramesReceived)
9523 	   BCE_PRINTF("          0x%08X : XoffPauseFramesReceived\n",
9524 			sblk->stat_XoffPauseFramesReceived);
9525 
9526 	if (sblk->stat_OutXonSent)
9527 		BCE_PRINTF("         0x%08X : OutXonSent\n",
9528 			sblk->stat_OutXonSent);
9529 
9530 	if (sblk->stat_OutXoffSent)
9531 		BCE_PRINTF("         0x%08X : OutXoffSent\n",
9532 			sblk->stat_OutXoffSent);
9533 
9534 	if (sblk->stat_FlowControlDone)
9535 		BCE_PRINTF("         0x%08X : FlowControlDone\n",
9536 			sblk->stat_FlowControlDone);
9537 
9538 	if (sblk->stat_MacControlFramesReceived)
9539 		BCE_PRINTF("         0x%08X : MacControlFramesReceived\n",
9540 			sblk->stat_MacControlFramesReceived);
9541 
9542 	if (sblk->stat_XoffStateEntered)
9543 		BCE_PRINTF("         0x%08X : XoffStateEntered\n",
9544 			sblk->stat_XoffStateEntered);
9545 
9546 	if (sblk->stat_IfInFramesL2FilterDiscards)
9547 		BCE_PRINTF("         0x%08X : IfInFramesL2FilterDiscards\n",
9548 			sblk->stat_IfInFramesL2FilterDiscards);
9549 
9550 	if (sblk->stat_IfInRuleCheckerDiscards)
9551 		BCE_PRINTF("         0x%08X : IfInRuleCheckerDiscards\n",
9552 			sblk->stat_IfInRuleCheckerDiscards);
9553 
9554 	if (sblk->stat_IfInFTQDiscards)
9555 		BCE_PRINTF("         0x%08X : IfInFTQDiscards\n",
9556 			sblk->stat_IfInFTQDiscards);
9557 
9558 	if (sblk->stat_IfInMBUFDiscards)
9559 		BCE_PRINTF("         0x%08X : IfInMBUFDiscards\n",
9560 			sblk->stat_IfInMBUFDiscards);
9561 
9562 	if (sblk->stat_IfInRuleCheckerP4Hit)
9563 		BCE_PRINTF("         0x%08X : IfInRuleCheckerP4Hit\n",
9564 			sblk->stat_IfInRuleCheckerP4Hit);
9565 
9566 	if (sblk->stat_CatchupInRuleCheckerDiscards)
9567 		BCE_PRINTF("         0x%08X : CatchupInRuleCheckerDiscards\n",
9568 			sblk->stat_CatchupInRuleCheckerDiscards);
9569 
9570 	if (sblk->stat_CatchupInFTQDiscards)
9571 		BCE_PRINTF("         0x%08X : CatchupInFTQDiscards\n",
9572 			sblk->stat_CatchupInFTQDiscards);
9573 
9574 	if (sblk->stat_CatchupInMBUFDiscards)
9575 		BCE_PRINTF("         0x%08X : CatchupInMBUFDiscards\n",
9576 			sblk->stat_CatchupInMBUFDiscards);
9577 
9578 	if (sblk->stat_CatchupInRuleCheckerP4Hit)
9579 		BCE_PRINTF("         0x%08X : CatchupInRuleCheckerP4Hit\n",
9580 			sblk->stat_CatchupInRuleCheckerP4Hit);
9581 
9582 	BCE_PRINTF(
9583 		"----------------------------"
9584 		"----------------"
9585 		"----------------------------\n");
9586 }
9587 
9588 
9589 /****************************************************************************/
9590 /* Prints out a summary of the driver state.                                */
9591 /*                                                                          */
9592 /* Returns:                                                                 */
9593 /*   Nothing.                                                               */
9594 /****************************************************************************/
9595 static __attribute__ ((noinline)) void
9596 bce_dump_driver_state(struct bce_softc *sc)
9597 {
9598 	u32 val_hi, val_lo;
9599 
9600 	BCE_PRINTF(
9601 		"-----------------------------"
9602 		" Driver State "
9603 		"-----------------------------\n");
9604 
9605 	val_hi = BCE_ADDR_HI(sc);
9606 	val_lo = BCE_ADDR_LO(sc);
9607 	BCE_PRINTF("0x%08X:%08X - (sc) driver softc structure virtual address\n",
9608 		val_hi, val_lo);
9609 
9610 	val_hi = BCE_ADDR_HI(sc->bce_vhandle);
9611 	val_lo = BCE_ADDR_LO(sc->bce_vhandle);
9612 	BCE_PRINTF("0x%08X:%08X - (sc->bce_vhandle) PCI BAR virtual address\n",
9613 		val_hi, val_lo);
9614 
9615 	val_hi = BCE_ADDR_HI(sc->status_block);
9616 	val_lo = BCE_ADDR_LO(sc->status_block);
9617 	BCE_PRINTF("0x%08X:%08X - (sc->status_block) status block virtual address\n",
9618 		val_hi, val_lo);
9619 
9620 	val_hi = BCE_ADDR_HI(sc->stats_block);
9621 	val_lo = BCE_ADDR_LO(sc->stats_block);
9622 	BCE_PRINTF("0x%08X:%08X - (sc->stats_block) statistics block virtual address\n",
9623 		val_hi, val_lo);
9624 
9625 	val_hi = BCE_ADDR_HI(sc->tx_bd_chain);
9626 	val_lo = BCE_ADDR_LO(sc->tx_bd_chain);
9627 	BCE_PRINTF(
9628 		"0x%08X:%08X - (sc->tx_bd_chain) tx_bd chain virtual adddress\n",
9629 		val_hi, val_lo);
9630 
9631 	val_hi = BCE_ADDR_HI(sc->rx_bd_chain);
9632 	val_lo = BCE_ADDR_LO(sc->rx_bd_chain);
9633 	BCE_PRINTF(
9634 		"0x%08X:%08X - (sc->rx_bd_chain) rx_bd chain virtual address\n",
9635 		val_hi, val_lo);
9636 
9637 #ifdef ZERO_COPY_SOCKETS
9638 	val_hi = BCE_ADDR_HI(sc->pg_bd_chain);
9639 	val_lo = BCE_ADDR_LO(sc->pg_bd_chain);
9640 	BCE_PRINTF(
9641 		"0x%08X:%08X - (sc->pg_bd_chain) page chain virtual address\n",
9642 		val_hi, val_lo);
9643 #endif
9644 
9645 	val_hi = BCE_ADDR_HI(sc->tx_mbuf_ptr);
9646 	val_lo = BCE_ADDR_LO(sc->tx_mbuf_ptr);
9647 	BCE_PRINTF(
9648 		"0x%08X:%08X - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n",
9649 		val_hi, val_lo);
9650 
9651 	val_hi = BCE_ADDR_HI(sc->rx_mbuf_ptr);
9652 	val_lo = BCE_ADDR_LO(sc->rx_mbuf_ptr);
9653 	BCE_PRINTF(
9654 		"0x%08X:%08X - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n",
9655 		val_hi, val_lo);
9656 
9657 #ifdef ZERO_COPY_SOCKETS
9658 	val_hi = BCE_ADDR_HI(sc->pg_mbuf_ptr);
9659 	val_lo = BCE_ADDR_LO(sc->pg_mbuf_ptr);
9660 	BCE_PRINTF(
9661 		"0x%08X:%08X - (sc->pg_mbuf_ptr) page mbuf chain virtual address\n",
9662 		val_hi, val_lo);
9663 #endif
9664 
9665 	BCE_PRINTF("         0x%08X - (sc->interrupts_generated) h/w intrs\n",
9666 		sc->interrupts_generated);
9667 
9668 	BCE_PRINTF("         0x%08X - (sc->rx_interrupts) rx interrupts handled\n",
9669 		sc->rx_interrupts);
9670 
9671 	BCE_PRINTF("         0x%08X - (sc->tx_interrupts) tx interrupts handled\n",
9672 		sc->tx_interrupts);
9673 
9674 	BCE_PRINTF("         0x%08X - (sc->last_status_idx) status block index\n",
9675 		sc->last_status_idx);
9676 
9677 	BCE_PRINTF("     0x%04X(0x%04X) - (sc->tx_prod) tx producer index\n",
9678 		sc->tx_prod, (u16) TX_CHAIN_IDX(sc->tx_prod));
9679 
9680 	BCE_PRINTF("     0x%04X(0x%04X) - (sc->tx_cons) tx consumer index\n",
9681 		sc->tx_cons, (u16) TX_CHAIN_IDX(sc->tx_cons));
9682 
9683 	BCE_PRINTF("         0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n",
9684 		sc->tx_prod_bseq);
9685 
9686 	BCE_PRINTF("         0x%08X - (sc->debug_tx_mbuf_alloc) tx mbufs allocated\n",
9687 		sc->debug_tx_mbuf_alloc);
9688 
9689 	BCE_PRINTF("         0x%08X - (sc->used_tx_bd) used tx_bd's\n",
9690 		sc->used_tx_bd);
9691 
9692 	BCE_PRINTF("0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n",
9693 		sc->tx_hi_watermark, sc->max_tx_bd);
9694 
9695 	BCE_PRINTF("     0x%04X(0x%04X) - (sc->rx_prod) rx producer index\n",
9696 		sc->rx_prod, (u16) RX_CHAIN_IDX(sc->rx_prod));
9697 
9698 	BCE_PRINTF("     0x%04X(0x%04X) - (sc->rx_cons) rx consumer index\n",
9699 		sc->rx_cons, (u16) RX_CHAIN_IDX(sc->rx_cons));
9700 
9701 	BCE_PRINTF("         0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n",
9702 		sc->rx_prod_bseq);
9703 
9704 	BCE_PRINTF("         0x%08X - (sc->debug_rx_mbuf_alloc) rx mbufs allocated\n",
9705 		sc->debug_rx_mbuf_alloc);
9706 
9707 	BCE_PRINTF("         0x%08X - (sc->free_rx_bd) free rx_bd's\n",
9708 		sc->free_rx_bd);
9709 
9710 #ifdef ZERO_COPY_SOCKETS
9711 	BCE_PRINTF("     0x%04X(0x%04X) - (sc->pg_prod) page producer index\n",
9712 		sc->pg_prod, (u16) PG_CHAIN_IDX(sc->pg_prod));
9713 
9714 	BCE_PRINTF("     0x%04X(0x%04X) - (sc->pg_cons) page consumer index\n",
9715 		sc->pg_cons, (u16) PG_CHAIN_IDX(sc->pg_cons));
9716 
9717 	BCE_PRINTF("         0x%08X - (sc->debug_pg_mbuf_alloc) page mbufs allocated\n",
9718 		sc->debug_pg_mbuf_alloc);
9719 
9720 	BCE_PRINTF("         0x%08X - (sc->free_pg_bd) free page rx_bd's\n",
9721 		sc->free_pg_bd);
9722 
9723 	BCE_PRINTF("0x%08X/%08X - (sc->pg_low_watermark) page low watermark\n",
9724 		sc->pg_low_watermark, sc->max_pg_bd);
9725 #endif
9726 
9727 	BCE_PRINTF("         0x%08X - (sc->mbuf_alloc_failed) "
9728 		"mbuf alloc failures\n",
9729 		sc->mbuf_alloc_failed);
9730 
9731 	BCE_PRINTF("         0x%08X - (sc->debug_mbuf_sim_alloc_failed) "
9732 		"simulated mbuf alloc failures\n",
9733 		sc->debug_mbuf_sim_alloc_failed);
9734 
9735 	BCE_PRINTF("         0x%08X - (sc->bce_flags) bce mac flags\n",
9736 		sc->bce_flags);
9737 
9738 	BCE_PRINTF("         0x%08X - (sc->bce_phy_flags) bce phy flags\n",
9739 		sc->bce_phy_flags);
9740 
9741 	BCE_PRINTF(
9742 		"----------------------------"
9743 		"----------------"
9744 		"----------------------------\n");
9745 }
9746 
9747 
9748 /****************************************************************************/
9749 /* Prints out the hardware state through a summary of important register,   */
9750 /* followed by a complete register dump.                                    */
9751 /*                                                                          */
9752 /* Returns:                                                                 */
9753 /*   Nothing.                                                               */
9754 /****************************************************************************/
9755 static __attribute__ ((noinline)) void
9756 bce_dump_hw_state(struct bce_softc *sc)
9757 {
9758 	u32 val;
9759 
9760 	BCE_PRINTF(
9761 		"----------------------------"
9762 		" Hardware State "
9763 		"----------------------------\n");
9764 
9765 	BCE_PRINTF("0x%08X - bootcode version\n", sc->bce_fw_ver);
9766 
9767 	val = REG_RD(sc, BCE_MISC_ENABLE_STATUS_BITS);
9768 	BCE_PRINTF("0x%08X - (0x%06X) misc_enable_status_bits\n",
9769 		val, BCE_MISC_ENABLE_STATUS_BITS);
9770 
9771 	val = REG_RD(sc, BCE_DMA_STATUS);
9772 	BCE_PRINTF("0x%08X - (0x%06X) dma_status\n", val, BCE_DMA_STATUS);
9773 
9774 	val = REG_RD(sc, BCE_CTX_STATUS);
9775 	BCE_PRINTF("0x%08X - (0x%06X) ctx_status\n", val, BCE_CTX_STATUS);
9776 
9777 	val = REG_RD(sc, BCE_EMAC_STATUS);
9778 	BCE_PRINTF("0x%08X - (0x%06X) emac_status\n", val, BCE_EMAC_STATUS);
9779 
9780 	val = REG_RD(sc, BCE_RPM_STATUS);
9781 	BCE_PRINTF("0x%08X - (0x%06X) rpm_status\n", val, BCE_RPM_STATUS);
9782 
9783 	val = REG_RD(sc, 0x2004);
9784 	BCE_PRINTF("0x%08X - (0x%06X) rlup_status\n", val, 0x2004);
9785 
9786 	val = REG_RD(sc, BCE_RV2P_STATUS);
9787 	BCE_PRINTF("0x%08X - (0x%06X) rv2p_status\n", val, BCE_RV2P_STATUS);
9788 
9789 	val = REG_RD(sc, 0x2c04);
9790 	BCE_PRINTF("0x%08X - (0x%06X) rdma_status\n", val, 0x2c04);
9791 
9792 	val = REG_RD(sc, BCE_TBDR_STATUS);
9793 	BCE_PRINTF("0x%08X - (0x%06X) tbdr_status\n", val, BCE_TBDR_STATUS);
9794 
9795 	val = REG_RD(sc, BCE_TDMA_STATUS);
9796 	BCE_PRINTF("0x%08X - (0x%06X) tdma_status\n", val, BCE_TDMA_STATUS);
9797 
9798 	val = REG_RD(sc, BCE_HC_STATUS);
9799 	BCE_PRINTF("0x%08X - (0x%06X) hc_status\n", val, BCE_HC_STATUS);
9800 
9801 	val = REG_RD_IND(sc, BCE_TXP_CPU_STATE);
9802 	BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_state\n", val, BCE_TXP_CPU_STATE);
9803 
9804 	val = REG_RD_IND(sc, BCE_TPAT_CPU_STATE);
9805 	BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_state\n", val, BCE_TPAT_CPU_STATE);
9806 
9807 	val = REG_RD_IND(sc, BCE_RXP_CPU_STATE);
9808 	BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_state\n", val, BCE_RXP_CPU_STATE);
9809 
9810 	val = REG_RD_IND(sc, BCE_COM_CPU_STATE);
9811 	BCE_PRINTF("0x%08X - (0x%06X) com_cpu_state\n", val, BCE_COM_CPU_STATE);
9812 
9813 	val = REG_RD_IND(sc, BCE_MCP_CPU_STATE);
9814 	BCE_PRINTF("0x%08X - (0x%06X) mcp_cpu_state\n", val, BCE_MCP_CPU_STATE);
9815 
9816 	val = REG_RD_IND(sc, BCE_CP_CPU_STATE);
9817 	BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_state\n", val, BCE_CP_CPU_STATE);
9818 
9819 	BCE_PRINTF(
9820 		"----------------------------"
9821 		"----------------"
9822 		"----------------------------\n");
9823 
9824 	BCE_PRINTF(
9825 		"----------------------------"
9826 		" Register  Dump "
9827 		"----------------------------\n");
9828 
9829 	for (int i = 0x400; i < 0x8000; i += 0x10) {
9830 		BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
9831 			i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
9832 			REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
9833 	}
9834 
9835 	BCE_PRINTF(
9836 		"----------------------------"
9837 		"----------------"
9838 		"----------------------------\n");
9839 }
9840 
9841 
9842 /****************************************************************************/
9843 /* Prints out the mailbox queue registers.                                  */
9844 /*                                                                          */
9845 /* Returns:                                                                 */
9846 /*   Nothing.                                                               */
9847 /****************************************************************************/
9848 static __attribute__ ((noinline)) void
9849 bce_dump_mq_regs(struct bce_softc *sc)
9850 {
9851 	BCE_PRINTF(
9852 		"----------------------------"
9853 		"    MQ Regs     "
9854 		"----------------------------\n");
9855 
9856 	BCE_PRINTF(
9857 		"----------------------------"
9858 		"----------------"
9859 		"----------------------------\n");
9860 
9861 	for (int i = 0x3c00; i < 0x4000; i += 0x10) {
9862 		BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
9863 			i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
9864 			REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
9865 	}
9866 
9867 	BCE_PRINTF(
9868 		"----------------------------"
9869 		"----------------"
9870 		"----------------------------\n");
9871 }
9872 
9873 
9874 /****************************************************************************/
9875 /* Prints out the bootcode state.                                           */
9876 /*                                                                          */
9877 /* Returns:                                                                 */
9878 /*   Nothing.                                                               */
9879 /****************************************************************************/
9880 static __attribute__ ((noinline)) void
9881 bce_dump_bc_state(struct bce_softc *sc)
9882 {
9883 	u32 val;
9884 
9885 	BCE_PRINTF(
9886 		"----------------------------"
9887 		" Bootcode State "
9888 		"----------------------------\n");
9889 
9890 	BCE_PRINTF("0x%08X - bootcode version\n", sc->bce_fw_ver);
9891 
9892 	val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_BC_RESET_TYPE);
9893 	BCE_PRINTF("0x%08X - (0x%06X) reset_type\n",
9894 		val, BCE_BC_RESET_TYPE);
9895 
9896 	val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_BC_STATE);
9897 	BCE_PRINTF("0x%08X - (0x%06X) state\n",
9898 		val, BCE_BC_STATE);
9899 
9900 	val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_BC_CONDITION);
9901 	BCE_PRINTF("0x%08X - (0x%06X) condition\n",
9902 		val, BCE_BC_CONDITION);
9903 
9904 	val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_BC_STATE_DEBUG_CMD);
9905 	BCE_PRINTF("0x%08X - (0x%06X) debug_cmd\n",
9906 		val, BCE_BC_STATE_DEBUG_CMD);
9907 
9908 	BCE_PRINTF(
9909 		"----------------------------"
9910 		"----------------"
9911 		"----------------------------\n");
9912 }
9913 
9914 
9915 /****************************************************************************/
9916 /* Prints out the TXP processor state.                                      */
9917 /*                                                                          */
9918 /* Returns:                                                                 */
9919 /*   Nothing.                                                               */
9920 /****************************************************************************/
9921 static __attribute__ ((noinline)) void
9922 bce_dump_txp_state(struct bce_softc *sc, int regs)
9923 {
9924 	u32 val;
9925 	u32 fw_version[3];
9926 
9927 	BCE_PRINTF(
9928 		"----------------------------"
9929 		"   TXP  State   "
9930 		"----------------------------\n");
9931 
9932 	for (int i = 0; i < 3; i++)
9933 		fw_version[i] = htonl(REG_RD_IND(sc,
9934 			(BCE_TXP_SCRATCH + 0x10 + i * 4)));
9935 	BCE_PRINTF("Firmware version - %s\n", (char *) fw_version);
9936 
9937 	val = REG_RD_IND(sc, BCE_TXP_CPU_MODE);
9938 	BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_mode\n", val, BCE_TXP_CPU_MODE);
9939 
9940 	val = REG_RD_IND(sc, BCE_TXP_CPU_STATE);
9941 	BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_state\n", val, BCE_TXP_CPU_STATE);
9942 
9943 	val = REG_RD_IND(sc, BCE_TXP_CPU_EVENT_MASK);
9944 	BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_event_mask\n", val,
9945 		BCE_TXP_CPU_EVENT_MASK);
9946 
9947 	if (regs) {
9948 		BCE_PRINTF(
9949 			"----------------------------"
9950 			" Register  Dump "
9951 			"----------------------------\n");
9952 
9953 		for (int i = BCE_TXP_CPU_MODE; i < 0x68000; i += 0x10) {
9954 			/* Skip the big blank spaces */
9955 			if (i < 0x454000 && i > 0x5ffff)
9956 				BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
9957 					i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4),
9958 					REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC));
9959 		}
9960 	}
9961 
9962 	BCE_PRINTF(
9963 		"----------------------------"
9964 		"----------------"
9965 		"----------------------------\n");
9966 }
9967 
9968 
9969 /****************************************************************************/
9970 /* Prints out the RXP processor state.                                      */
9971 /*                                                                          */
9972 /* Returns:                                                                 */
9973 /*   Nothing.                                                               */
9974 /****************************************************************************/
9975 static __attribute__ ((noinline)) void
9976 bce_dump_rxp_state(struct bce_softc *sc, int regs)
9977 {
9978 	u32 val;
9979 	u32 fw_version[3];
9980 
9981 	BCE_PRINTF(
9982 		"----------------------------"
9983 		"   RXP  State   "
9984 		"----------------------------\n");
9985 
9986 	for (int i = 0; i < 3; i++)
9987 		fw_version[i] = htonl(REG_RD_IND(sc,
9988 			(BCE_RXP_SCRATCH + 0x10 + i * 4)));
9989 	BCE_PRINTF("Firmware version - %s\n", (char *) fw_version);
9990 
9991 	val = REG_RD_IND(sc, BCE_RXP_CPU_MODE);
9992 	BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_mode\n", val, BCE_RXP_CPU_MODE);
9993 
9994 	val = REG_RD_IND(sc, BCE_RXP_CPU_STATE);
9995 	BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_state\n", val, BCE_RXP_CPU_STATE);
9996 
9997 	val = REG_RD_IND(sc, BCE_RXP_CPU_EVENT_MASK);
9998 	BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_event_mask\n", val,
9999 		BCE_RXP_CPU_EVENT_MASK);
10000 
10001 	if (regs) {
10002 		BCE_PRINTF(
10003 			"----------------------------"
10004 			" Register  Dump "
10005 			"----------------------------\n");
10006 
10007 		for (int i = BCE_RXP_CPU_MODE; i < 0xe8fff; i += 0x10) {
10008 			/* Skip the big blank sapces */
10009 			if (i < 0xc5400 && i > 0xdffff)
10010 				BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
10011 	 				i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4),
10012 					REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC));
10013 		}
10014 	}
10015 
10016 	BCE_PRINTF(
10017 		"----------------------------"
10018 		"----------------"
10019 		"----------------------------\n");
10020 }
10021 
10022 
10023 /****************************************************************************/
10024 /* Prints out the TPAT processor state.                                     */
10025 /*                                                                          */
10026 /* Returns:                                                                 */
10027 /*   Nothing.                                                               */
10028 /****************************************************************************/
10029 static __attribute__ ((noinline)) void
10030 bce_dump_tpat_state(struct bce_softc *sc, int regs)
10031 {
10032 	u32 val;
10033 	u32 fw_version[3];
10034 
10035 	BCE_PRINTF(
10036 		"----------------------------"
10037 		"   TPAT State   "
10038 		"----------------------------\n");
10039 
10040 	for (int i = 0; i < 3; i++)
10041 		fw_version[i] = htonl(REG_RD_IND(sc,
10042 			(BCE_TPAT_SCRATCH + 0x410 + i * 4)));
10043 	BCE_PRINTF("Firmware version - %s\n", (char *) fw_version);
10044 
10045 	val = REG_RD_IND(sc, BCE_TPAT_CPU_MODE);
10046 	BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_mode\n", val, BCE_TPAT_CPU_MODE);
10047 
10048 	val = REG_RD_IND(sc, BCE_TPAT_CPU_STATE);
10049 	BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_state\n", val, BCE_TPAT_CPU_STATE);
10050 
10051 	val = REG_RD_IND(sc, BCE_TPAT_CPU_EVENT_MASK);
10052 	BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_event_mask\n", val,
10053 		BCE_TPAT_CPU_EVENT_MASK);
10054 
10055 	if (regs) {
10056 		BCE_PRINTF(
10057 			"----------------------------"
10058 			" Register  Dump "
10059 			"----------------------------\n");
10060 
10061 		for (int i = BCE_TPAT_CPU_MODE; i < 0xa3fff; i += 0x10) {
10062 			/* Skip the big blank spaces */
10063 			if (i < 0x854000 && i > 0x9ffff)
10064 				BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
10065 					i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4),
10066 					REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC));
10067 		}
10068 	}
10069 
10070 	BCE_PRINTF(
10071 		"----------------------------"
10072 		"----------------"
10073 		"----------------------------\n");
10074 }
10075 
10076 
10077 /****************************************************************************/
10078 /* Prints out the Command Procesor (CP) state.                              */
10079 /*                                                                          */
10080 /* Returns:                                                                 */
10081 /*   Nothing.                                                               */
10082 /****************************************************************************/
10083 static __attribute__ ((noinline)) void
10084 bce_dump_cp_state(struct bce_softc *sc, int regs)
10085 {
10086 	u32 val;
10087 	u32 fw_version[3];
10088 
10089 	BCE_PRINTF(
10090 		"----------------------------"
10091 		"    CP State    "
10092 		"----------------------------\n");
10093 
10094 	for (int i = 0; i < 3; i++)
10095 		fw_version[i] = htonl(REG_RD_IND(sc,
10096 			(BCE_CP_SCRATCH + 0x10 + i * 4)));
10097 	BCE_PRINTF("Firmware version - %s\n", (char *) fw_version);
10098 
10099 	val = REG_RD_IND(sc, BCE_CP_CPU_MODE);
10100 	BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_mode\n", val, BCE_CP_CPU_MODE);
10101 
10102 	val = REG_RD_IND(sc, BCE_CP_CPU_STATE);
10103 	BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_state\n", val, BCE_CP_CPU_STATE);
10104 
10105 	val = REG_RD_IND(sc, BCE_CP_CPU_EVENT_MASK);
10106 	BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_event_mask\n", val,
10107 		BCE_CP_CPU_EVENT_MASK);
10108 
10109 	if (regs) {
10110 		BCE_PRINTF(
10111 			"----------------------------"
10112 			" Register  Dump "
10113 			"----------------------------\n");
10114 
10115 		for (int i = BCE_CP_CPU_MODE; i < 0x1aa000; i += 0x10) {
10116 			/* Skip the big blank spaces */
10117 			if (i < 0x185400 && i > 0x19ffff)
10118 				BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
10119 					i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4),
10120 					REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC));
10121 		}
10122 	}
10123 
10124 	BCE_PRINTF(
10125 		"----------------------------"
10126 		"----------------"
10127 		"----------------------------\n");
10128 }
10129 
10130 
10131 /****************************************************************************/
10132 /* Prints out the Completion Procesor (COM) state.                          */
10133 /*                                                                          */
10134 /* Returns:                                                                 */
10135 /*   Nothing.                                                               */
10136 /****************************************************************************/
10137 static __attribute__ ((noinline)) void
10138 bce_dump_com_state(struct bce_softc *sc, int regs)
10139 {
10140 	u32 val;
10141 	u32 fw_version[3];
10142 
10143 	BCE_PRINTF(
10144 		"----------------------------"
10145 		"   COM State    "
10146 		"----------------------------\n");
10147 
10148 	for (int i = 0; i < 3; i++)
10149 		fw_version[i] = htonl(REG_RD_IND(sc,
10150 			(BCE_COM_SCRATCH + 0x10 + i * 4)));
10151 	BCE_PRINTF("Firmware version - %s\n", (char *) fw_version);
10152 
10153 	val = REG_RD_IND(sc, BCE_COM_CPU_MODE);
10154 	BCE_PRINTF("0x%08X - (0x%06X) com_cpu_mode\n", val, BCE_COM_CPU_MODE);
10155 
10156 	val = REG_RD_IND(sc, BCE_COM_CPU_STATE);
10157 	BCE_PRINTF("0x%08X - (0x%06X) com_cpu_state\n", val, BCE_COM_CPU_STATE);
10158 
10159 	val = REG_RD_IND(sc, BCE_COM_CPU_EVENT_MASK);
10160 	BCE_PRINTF("0x%08X - (0x%06X) com_cpu_event_mask\n", val,
10161 		BCE_COM_CPU_EVENT_MASK);
10162 
10163 	if (regs) {
10164 		BCE_PRINTF(
10165 			"----------------------------"
10166 			" Register  Dump "
10167 			"----------------------------\n");
10168 
10169 		for (int i = BCE_COM_CPU_MODE; i < 0x1053e8; i += 0x10) {
10170 			BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
10171 				i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4),
10172 				REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC));
10173 		}
10174 	}
10175 
10176 	BCE_PRINTF(
10177 		"----------------------------"
10178 		"----------------"
10179 		"----------------------------\n");
10180 }
10181 
10182 
10183 /****************************************************************************/
10184 /* Prints out the driver state and then enters the debugger.                */
10185 /*                                                                          */
10186 /* Returns:                                                                 */
10187 /*   Nothing.                                                               */
10188 /****************************************************************************/
10189 static void
10190 bce_breakpoint(struct bce_softc *sc)
10191 {
10192 
10193 	/*
10194 	 * Unreachable code to silence compiler warnings
10195 	 * about unused functions.
10196 	 */
10197 	if (0) {
10198 		bce_freeze_controller(sc);
10199 		bce_unfreeze_controller(sc);
10200 		bce_dump_enet(sc, NULL);
10201    		bce_dump_txbd(sc, 0, NULL);
10202 		bce_dump_rxbd(sc, 0, NULL);
10203 		bce_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD);
10204 		bce_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD);
10205 		bce_dump_l2fhdr(sc, 0, NULL);
10206 		bce_dump_ctx(sc, RX_CID);
10207 		bce_dump_ftqs(sc);
10208 		bce_dump_tx_chain(sc, 0, USABLE_TX_BD);
10209 		bce_dump_rx_chain(sc, 0, USABLE_RX_BD);
10210 		bce_dump_status_block(sc);
10211 		bce_dump_stats_block(sc);
10212 		bce_dump_driver_state(sc);
10213 		bce_dump_hw_state(sc);
10214 		bce_dump_bc_state(sc);
10215 		bce_dump_txp_state(sc, 0);
10216 		bce_dump_rxp_state(sc, 0);
10217 		bce_dump_tpat_state(sc, 0);
10218 		bce_dump_cp_state(sc, 0);
10219 		bce_dump_com_state(sc, 0);
10220 #ifdef ZERO_COPY_SOCKETS
10221 		bce_dump_pgbd(sc, 0, NULL);
10222 		bce_dump_pg_mbuf_chain(sc, 0, USABLE_PG_BD);
10223 		bce_dump_pg_chain(sc, 0, USABLE_PG_BD);
10224 #endif
10225 	}
10226 
10227 	bce_dump_status_block(sc);
10228 	bce_dump_driver_state(sc);
10229 
10230 	/* Call the debugger. */
10231 	breakpoint();
10232 
10233 	return;
10234 }
10235 #endif
10236 
10237