xref: /dragonfly/sys/dev/netif/bce/if_bce.c (revision c9c5aa9e)
1 /*-
2  * Copyright (c) 2006-2007 Broadcom Corporation
3  *	David Christensen <davidch@broadcom.com>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the name of Broadcom Corporation nor the name of its contributors
15  *    may be used to endorse or promote products derived from this software
16  *    without specific prior written consent.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
22  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  *
30  * $FreeBSD: src/sys/dev/bce/if_bce.c,v 1.31 2007/05/16 23:34:11 davidch Exp $
31  */
32 
33 /*
34  * The following controllers are supported by this driver:
35  *   BCM5706C A2, A3
36  *   BCM5706S A2, A3
37  *   BCM5708C B1, B2
38  *   BCM5708S B1, B2
39  *   BCM5709C A1, B2, C0
40  *   BCM5716  C0
41  *
42  * The following controllers are not supported by this driver:
43  *   BCM5706C A0, A1
44  *   BCM5706S A0, A1
45  *   BCM5708C A0, B0
46  *   BCM5708S A0, B0
47  *   BCM5709C A0, B0, B1
48  *   BCM5709S A0, A1, B0, B1, B2, C0
49  *
50  *
51  * Note about MSI-X on 5709/5716:
52  * - 9 MSI-X vectors are supported.
53  * - MSI-X vectors, RX/TX rings and status blocks' association
54  *   are fixed:
55  *   o  The first RX ring and the first TX ring use the first
56  *      status block.
57  *   o  The first MSI-X vector is associated with the first
58  *      status block.
59  *   o  The second RX ring and the second TX ring use the second
60  *      status block.
61  *   o  The second MSI-X vector is associated with the second
62  *      status block.
63  *   ...
64  *   and so on so forth.
65  * - Status blocks must reside in physically contiguous memory
66  *   and each status block consumes 128bytes.  In addition to
67  *   this, the memory for the status blocks is aligned on 128bytes
68  *   in this driver.  (see bce_dma_alloc() and HC_CONFIG)
69  * - Each status block has its own coalesce parameters, which also
70  *   serve as the related MSI-X vector's interrupt moderation
71  *   parameters.  (see bce_coal_change())
72  */
73 
74 #include "opt_bce.h"
75 #include "opt_ifpoll.h"
76 
77 #include <sys/param.h>
78 #include <sys/bus.h>
79 #include <sys/endian.h>
80 #include <sys/kernel.h>
81 #include <sys/interrupt.h>
82 #include <sys/mbuf.h>
83 #include <sys/malloc.h>
84 #include <sys/queue.h>
85 #include <sys/rman.h>
86 #include <sys/serialize.h>
87 #include <sys/socket.h>
88 #include <sys/sockio.h>
89 #include <sys/sysctl.h>
90 
91 #include <netinet/ip.h>
92 #include <netinet/tcp.h>
93 
94 #include <net/bpf.h>
95 #include <net/ethernet.h>
96 #include <net/if.h>
97 #include <net/if_arp.h>
98 #include <net/if_dl.h>
99 #include <net/if_media.h>
100 #include <net/if_poll.h>
101 #include <net/if_types.h>
102 #include <net/ifq_var.h>
103 #include <net/if_ringmap.h>
104 #include <net/toeplitz.h>
105 #include <net/toeplitz2.h>
106 #include <net/vlan/if_vlan_var.h>
107 #include <net/vlan/if_vlan_ether.h>
108 
109 #include <dev/netif/mii_layer/mii.h>
110 #include <dev/netif/mii_layer/miivar.h>
111 #include <dev/netif/mii_layer/brgphyreg.h>
112 
113 #include <bus/pci/pcireg.h>
114 #include <bus/pci/pcivar.h>
115 
116 #include "miibus_if.h"
117 
118 #include <dev/netif/bce/if_bcereg.h>
119 #include <dev/netif/bce/if_bcefw.h>
120 
121 #define BCE_MSI_CKINTVL		((10 * hz) / 1000)	/* 10ms */
122 
123 #ifdef BCE_RSS_DEBUG
124 #define BCE_RSS_DPRINTF(sc, lvl, fmt, ...) \
125 do { \
126 	if (sc->rss_debug >= lvl) \
127 		if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \
128 } while (0)
129 #else	/* !BCE_RSS_DEBUG */
130 #define BCE_RSS_DPRINTF(sc, lvl, fmt, ...)	((void)0)
131 #endif	/* BCE_RSS_DEBUG */
132 
133 /****************************************************************************/
134 /* PCI Device ID Table                                                      */
135 /*                                                                          */
136 /* Used by bce_probe() to identify the devices supported by this driver.    */
137 /****************************************************************************/
138 #define BCE_DEVDESC_MAX		64
139 
140 static struct bce_type bce_devs[] = {
141 	/* BCM5706C Controllers and OEM boards. */
142 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3101,
143 		"HP NC370T Multifunction Gigabit Server Adapter" },
144 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3106,
145 		"HP NC370i Multifunction Gigabit Server Adapter" },
146 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3070,
147 		"HP NC380T PCIe DP Multifunc Gig Server Adapter" },
148 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x1709,
149 		"HP NC371i Multifunction Gigabit Server Adapter" },
150 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  PCI_ANY_ID,  PCI_ANY_ID,
151 		"Broadcom NetXtreme II BCM5706 1000Base-T" },
152 
153 	/* BCM5706S controllers and OEM boards. */
154 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102,
155 		"HP NC370F Multifunction Gigabit Server Adapter" },
156 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID,  PCI_ANY_ID,
157 		"Broadcom NetXtreme II BCM5706 1000Base-SX" },
158 
159 	/* BCM5708C controllers and OEM boards. */
160 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  HP_VENDORID, 0x7037,
161 		"HP NC373T PCIe Multifunction Gig Server Adapter" },
162 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  HP_VENDORID, 0x7038,
163 		"HP NC373i Multifunction Gigabit Server Adapter" },
164 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  HP_VENDORID, 0x7045,
165 		"HP NC374m PCIe Multifunction Adapter" },
166 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  PCI_ANY_ID,  PCI_ANY_ID,
167 		"Broadcom NetXtreme II BCM5708 1000Base-T" },
168 
169 	/* BCM5708S controllers and OEM boards. */
170 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  HP_VENDORID, 0x1706,
171 		"HP NC373m Multifunction Gigabit Server Adapter" },
172 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  HP_VENDORID, 0x703b,
173 		"HP NC373i Multifunction Gigabit Server Adapter" },
174 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  HP_VENDORID, 0x703d,
175 		"HP NC373F PCIe Multifunc Giga Server Adapter" },
176 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  PCI_ANY_ID,  PCI_ANY_ID,
177 		"Broadcom NetXtreme II BCM5708S 1000Base-T" },
178 
179 	/* BCM5709C controllers and OEM boards. */
180 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709,  HP_VENDORID, 0x7055,
181 		"HP NC382i DP Multifunction Gigabit Server Adapter" },
182 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709,  HP_VENDORID, 0x7059,
183 		"HP NC382T PCIe DP Multifunction Gigabit Server Adapter" },
184 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709,  PCI_ANY_ID,  PCI_ANY_ID,
185 		"Broadcom NetXtreme II BCM5709 1000Base-T" },
186 
187 	/* BCM5709S controllers and OEM boards. */
188 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709S,  HP_VENDORID, 0x171d,
189 		"HP NC382m DP 1GbE Multifunction BL-c Adapter" },
190 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709S,  HP_VENDORID, 0x7056,
191 		"HP NC382i DP Multifunction Gigabit Server Adapter" },
192 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709S,  PCI_ANY_ID,  PCI_ANY_ID,
193 		"Broadcom NetXtreme II BCM5709 1000Base-SX" },
194 
195 	/* BCM5716 controllers and OEM boards. */
196 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5716,   PCI_ANY_ID,  PCI_ANY_ID,
197 		"Broadcom NetXtreme II BCM5716 1000Base-T" },
198 
199 	{ 0, 0, 0, 0, NULL }
200 };
201 
202 /****************************************************************************/
203 /* Supported Flash NVRAM device data.                                       */
204 /****************************************************************************/
205 static const struct flash_spec flash_table[] =
206 {
207 #define BUFFERED_FLAGS		(BCE_NV_BUFFERED | BCE_NV_TRANSLATE)
208 #define NONBUFFERED_FLAGS	(BCE_NV_WREN)
209 
210 	/* Slow EEPROM */
211 	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
212 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
213 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
214 	 "EEPROM - slow"},
215 	/* Expansion entry 0001 */
216 	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
217 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
218 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
219 	 "Entry 0001"},
220 	/* Saifun SA25F010 (non-buffered flash) */
221 	/* strap, cfg1, & write1 need updates */
222 	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
223 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
225 	 "Non-buffered flash (128kB)"},
226 	/* Saifun SA25F020 (non-buffered flash) */
227 	/* strap, cfg1, & write1 need updates */
228 	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
229 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
230 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
231 	 "Non-buffered flash (256kB)"},
232 	/* Expansion entry 0100 */
233 	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
234 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
235 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
236 	 "Entry 0100"},
237 	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
238 	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
239 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
240 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
241 	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
242 	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
243 	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
244 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
245 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
246 	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
247 	/* Saifun SA25F005 (non-buffered flash) */
248 	/* strap, cfg1, & write1 need updates */
249 	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
250 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
251 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
252 	 "Non-buffered flash (64kB)"},
253 	/* Fast EEPROM */
254 	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
255 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
256 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
257 	 "EEPROM - fast"},
258 	/* Expansion entry 1001 */
259 	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
260 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
261 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
262 	 "Entry 1001"},
263 	/* Expansion entry 1010 */
264 	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
265 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
266 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
267 	 "Entry 1010"},
268 	/* ATMEL AT45DB011B (buffered flash) */
269 	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
270 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
271 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
272 	 "Buffered flash (128kB)"},
273 	/* Expansion entry 1100 */
274 	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
275 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
276 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
277 	 "Entry 1100"},
278 	/* Expansion entry 1101 */
279 	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
280 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
281 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
282 	 "Entry 1101"},
283 	/* Ateml Expansion entry 1110 */
284 	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
285 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
286 	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
287 	 "Entry 1110 (Atmel)"},
288 	/* ATMEL AT45DB021B (buffered flash) */
289 	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
290 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
291 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
292 	 "Buffered flash (256kB)"},
293 };
294 
295 /*
296  * The BCM5709 controllers transparently handle the
297  * differences between Atmel 264 byte pages and all
298  * flash devices which use 256 byte pages, so no
299  * logical-to-physical mapping is required in the
300  * driver.
301  */
302 static struct flash_spec flash_5709 = {
303 	.flags		= BCE_NV_BUFFERED,
304 	.page_bits	= BCM5709_FLASH_PAGE_BITS,
305 	.page_size	= BCM5709_FLASH_PAGE_SIZE,
306 	.addr_mask	= BCM5709_FLASH_BYTE_ADDR_MASK,
307 	.total_size	= BUFFERED_FLASH_TOTAL_SIZE * 2,
308 	.name		= "5709/5716 buffered flash (256kB)",
309 };
310 
311 /****************************************************************************/
312 /* DragonFly device entry points.                                           */
313 /****************************************************************************/
314 static int	bce_probe(device_t);
315 static int	bce_attach(device_t);
316 static int	bce_detach(device_t);
317 static void	bce_shutdown(device_t);
318 static int	bce_miibus_read_reg(device_t, int, int);
319 static int	bce_miibus_write_reg(device_t, int, int, int);
320 static void	bce_miibus_statchg(device_t);
321 
322 /****************************************************************************/
323 /* BCE Register/Memory Access Routines                                      */
324 /****************************************************************************/
325 static uint32_t	bce_reg_rd_ind(struct bce_softc *, uint32_t);
326 static void	bce_reg_wr_ind(struct bce_softc *, uint32_t, uint32_t);
327 static void	bce_shmem_wr(struct bce_softc *, uint32_t, uint32_t);
328 static uint32_t	bce_shmem_rd(struct bce_softc *, u32);
329 static void	bce_ctx_wr(struct bce_softc *, uint32_t, uint32_t, uint32_t);
330 
331 /****************************************************************************/
332 /* BCE NVRAM Access Routines                                                */
333 /****************************************************************************/
334 static int	bce_acquire_nvram_lock(struct bce_softc *);
335 static int	bce_release_nvram_lock(struct bce_softc *);
336 static void	bce_enable_nvram_access(struct bce_softc *);
337 static void	bce_disable_nvram_access(struct bce_softc *);
338 static int	bce_nvram_read_dword(struct bce_softc *, uint32_t, uint8_t *,
339 		    uint32_t);
340 static int	bce_init_nvram(struct bce_softc *);
341 static int	bce_nvram_read(struct bce_softc *, uint32_t, uint8_t *, int);
342 static int	bce_nvram_test(struct bce_softc *);
343 
344 /****************************************************************************/
345 /* BCE DMA Allocate/Free Routines                                           */
346 /****************************************************************************/
347 static int	bce_dma_alloc(struct bce_softc *);
348 static void	bce_dma_free(struct bce_softc *);
349 static void	bce_dma_map_addr(void *, bus_dma_segment_t *, int, int);
350 
351 /****************************************************************************/
352 /* BCE Firmware Synchronization and Load                                    */
353 /****************************************************************************/
354 static int	bce_fw_sync(struct bce_softc *, uint32_t);
355 static void	bce_load_rv2p_fw(struct bce_softc *, uint32_t *,
356 		    uint32_t, uint32_t);
357 static void	bce_load_cpu_fw(struct bce_softc *, struct cpu_reg *,
358 		    struct fw_info *);
359 static void	bce_start_cpu(struct bce_softc *, struct cpu_reg *);
360 static void	bce_halt_cpu(struct bce_softc *, struct cpu_reg *);
361 static void	bce_start_rxp_cpu(struct bce_softc *);
362 static void	bce_init_rxp_cpu(struct bce_softc *);
363 static void	bce_init_txp_cpu(struct bce_softc *);
364 static void	bce_init_tpat_cpu(struct bce_softc *);
365 static void	bce_init_cp_cpu(struct bce_softc *);
366 static void	bce_init_com_cpu(struct bce_softc *);
367 static void	bce_init_cpus(struct bce_softc *);
368 static void	bce_setup_msix_table(struct bce_softc *);
369 static void	bce_init_rss(struct bce_softc *);
370 
371 static void	bce_stop(struct bce_softc *);
372 static int	bce_reset(struct bce_softc *, uint32_t);
373 static int	bce_chipinit(struct bce_softc *);
374 static int	bce_blockinit(struct bce_softc *);
375 static void	bce_probe_pci_caps(struct bce_softc *);
376 static void	bce_print_adapter_info(struct bce_softc *);
377 static void	bce_get_media(struct bce_softc *);
378 static void	bce_mgmt_init(struct bce_softc *);
379 static int	bce_init_ctx(struct bce_softc *);
380 static void	bce_get_mac_addr(struct bce_softc *);
381 static void	bce_set_mac_addr(struct bce_softc *);
382 static void	bce_set_rx_mode(struct bce_softc *);
383 static void	bce_coal_change(struct bce_softc *);
384 static void	bce_npoll_coal_change(struct bce_softc *);
385 static void	bce_setup_serialize(struct bce_softc *);
386 static void	bce_serialize_skipmain(struct bce_softc *);
387 static void	bce_deserialize_skipmain(struct bce_softc *);
388 static void	bce_set_timer_cpuid(struct bce_softc *, boolean_t);
389 static int	bce_alloc_intr(struct bce_softc *);
390 static void	bce_free_intr(struct bce_softc *);
391 static void	bce_try_alloc_msix(struct bce_softc *);
392 static void	bce_free_msix(struct bce_softc *, boolean_t);
393 static void	bce_setup_ring_cnt(struct bce_softc *);
394 static int	bce_setup_intr(struct bce_softc *);
395 static void	bce_teardown_intr(struct bce_softc *);
396 static int	bce_setup_msix(struct bce_softc *);
397 static void	bce_teardown_msix(struct bce_softc *, int);
398 
399 static int	bce_create_tx_ring(struct bce_tx_ring *);
400 static void	bce_destroy_tx_ring(struct bce_tx_ring *);
401 static void	bce_init_tx_context(struct bce_tx_ring *);
402 static int	bce_init_tx_chain(struct bce_tx_ring *);
403 static void	bce_free_tx_chain(struct bce_tx_ring *);
404 static void	bce_xmit(struct bce_tx_ring *);
405 static int	bce_encap(struct bce_tx_ring *, struct mbuf **, int *);
406 static int	bce_tso_setup(struct bce_tx_ring *, struct mbuf **,
407 		    uint16_t *, uint16_t *);
408 
409 static int	bce_create_rx_ring(struct bce_rx_ring *);
410 static void	bce_destroy_rx_ring(struct bce_rx_ring *);
411 static void	bce_init_rx_context(struct bce_rx_ring *);
412 static int	bce_init_rx_chain(struct bce_rx_ring *);
413 static void	bce_free_rx_chain(struct bce_rx_ring *);
414 static int	bce_newbuf_std(struct bce_rx_ring *, uint16_t *, uint16_t,
415 		    uint32_t *, int);
416 static void	bce_setup_rxdesc_std(struct bce_rx_ring *, uint16_t,
417 		    uint32_t *);
418 static struct pktinfo *bce_rss_pktinfo(struct pktinfo *, uint32_t,
419 		    const struct l2_fhdr *);
420 
421 static void	bce_start(struct ifnet *, struct ifaltq_subque *);
422 static int	bce_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
423 static void	bce_watchdog(struct ifaltq_subque *);
424 static int	bce_ifmedia_upd(struct ifnet *);
425 static void	bce_ifmedia_sts(struct ifnet *, struct ifmediareq *);
426 static void	bce_init(void *);
427 #ifdef IFPOLL_ENABLE
428 static void	bce_npoll(struct ifnet *, struct ifpoll_info *);
429 static void	bce_npoll_rx(struct ifnet *, void *, int);
430 static void	bce_npoll_tx(struct ifnet *, void *, int);
431 static void	bce_npoll_status(struct ifnet *);
432 static void	bce_npoll_rx_pack(struct ifnet *, void *, int);
433 #endif
434 static void	bce_serialize(struct ifnet *, enum ifnet_serialize);
435 static void	bce_deserialize(struct ifnet *, enum ifnet_serialize);
436 static int	bce_tryserialize(struct ifnet *, enum ifnet_serialize);
437 #ifdef INVARIANTS
438 static void	bce_serialize_assert(struct ifnet *, enum ifnet_serialize,
439 		    boolean_t);
440 #endif
441 
442 static void	bce_intr(struct bce_softc *);
443 static void	bce_intr_legacy(void *);
444 static void	bce_intr_msi(void *);
445 static void	bce_intr_msi_oneshot(void *);
446 static void	bce_intr_msix_rxtx(void *);
447 static void	bce_intr_msix_rx(void *);
448 static void	bce_tx_intr(struct bce_tx_ring *, uint16_t);
449 static void	bce_rx_intr(struct bce_rx_ring *, int, uint16_t);
450 static void	bce_phy_intr(struct bce_softc *);
451 static void	bce_disable_intr(struct bce_softc *);
452 static void	bce_enable_intr(struct bce_softc *);
453 static void	bce_reenable_intr(struct bce_rx_ring *);
454 static void	bce_check_msi(void *);
455 
456 static void	bce_stats_update(struct bce_softc *);
457 static void	bce_tick(void *);
458 static void	bce_tick_serialized(struct bce_softc *);
459 static void	bce_pulse(void *);
460 
461 static void	bce_add_sysctls(struct bce_softc *);
462 static int	bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS);
463 static int	bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS);
464 static int	bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS);
465 static int	bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS);
466 static int	bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS);
467 static int	bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS);
468 static int	bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS);
469 static int	bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS);
470 static int	bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS,
471 		    uint32_t *, uint32_t);
472 
473 /*
474  * NOTE:
475  * Don't set bce_tx_ticks_int/bce_tx_ticks to 1023.  Linux's bnx2
476  * takes 1023 as the TX ticks limit.  However, using 1023 will
477  * cause 5708(B2) to generate extra interrupts (~2000/s) even when
478  * there is _no_ network activity on the NIC.
479  */
480 static uint32_t	bce_tx_bds_int = 255;		/* bcm: 20 */
481 static uint32_t	bce_tx_bds = 255;		/* bcm: 20 */
482 static uint32_t	bce_tx_ticks_int = 1022;	/* bcm: 80 */
483 static uint32_t	bce_tx_ticks = 1022;		/* bcm: 80 */
484 static uint32_t	bce_rx_bds_int = 128;		/* bcm: 6 */
485 static uint32_t	bce_rx_bds = 0;			/* bcm: 6 */
486 static uint32_t	bce_rx_ticks_int = 150;		/* bcm: 18 */
487 static uint32_t	bce_rx_ticks = 150;		/* bcm: 18 */
488 
489 static int	bce_tx_wreg = 8;
490 
491 static int	bce_msi_enable = 1;
492 static int	bce_msix_enable = 1;
493 
494 static int	bce_rx_pages = RX_PAGES_DEFAULT;
495 static int	bce_tx_pages = TX_PAGES_DEFAULT;
496 
497 static int	bce_rx_rings = 0;	/* auto */
498 static int	bce_tx_rings = 0;	/* auto */
499 
500 TUNABLE_INT("hw.bce.tx_bds_int", &bce_tx_bds_int);
501 TUNABLE_INT("hw.bce.tx_bds", &bce_tx_bds);
502 TUNABLE_INT("hw.bce.tx_ticks_int", &bce_tx_ticks_int);
503 TUNABLE_INT("hw.bce.tx_ticks", &bce_tx_ticks);
504 TUNABLE_INT("hw.bce.rx_bds_int", &bce_rx_bds_int);
505 TUNABLE_INT("hw.bce.rx_bds", &bce_rx_bds);
506 TUNABLE_INT("hw.bce.rx_ticks_int", &bce_rx_ticks_int);
507 TUNABLE_INT("hw.bce.rx_ticks", &bce_rx_ticks);
508 TUNABLE_INT("hw.bce.msi.enable", &bce_msi_enable);
509 TUNABLE_INT("hw.bce.msix.enable", &bce_msix_enable);
510 TUNABLE_INT("hw.bce.rx_pages", &bce_rx_pages);
511 TUNABLE_INT("hw.bce.tx_pages", &bce_tx_pages);
512 TUNABLE_INT("hw.bce.tx_wreg", &bce_tx_wreg);
513 TUNABLE_INT("hw.bce.tx_rings", &bce_tx_rings);
514 TUNABLE_INT("hw.bce.rx_rings", &bce_rx_rings);
515 
516 /****************************************************************************/
517 /* DragonFly device dispatch table.                                         */
518 /****************************************************************************/
519 static device_method_t bce_methods[] = {
520 	/* Device interface */
521 	DEVMETHOD(device_probe,		bce_probe),
522 	DEVMETHOD(device_attach,	bce_attach),
523 	DEVMETHOD(device_detach,	bce_detach),
524 	DEVMETHOD(device_shutdown,	bce_shutdown),
525 
526 	/* bus interface */
527 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
528 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
529 
530 	/* MII interface */
531 	DEVMETHOD(miibus_readreg,	bce_miibus_read_reg),
532 	DEVMETHOD(miibus_writereg,	bce_miibus_write_reg),
533 	DEVMETHOD(miibus_statchg,	bce_miibus_statchg),
534 
535 	DEVMETHOD_END
536 };
537 
538 static driver_t bce_driver = {
539 	"bce",
540 	bce_methods,
541 	sizeof(struct bce_softc)
542 };
543 
544 static devclass_t bce_devclass;
545 
546 DECLARE_DUMMY_MODULE(if_bce);
547 MODULE_DEPEND(bce, miibus, 1, 1, 1);
548 DRIVER_MODULE(if_bce, pci, bce_driver, bce_devclass, NULL, NULL);
549 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, NULL, NULL);
550 
551 /****************************************************************************/
552 /* Device probe function.                                                   */
553 /*                                                                          */
554 /* Compares the device to the driver's list of supported devices and        */
555 /* reports back to the OS whether this is the right driver for the device.  */
556 /*                                                                          */
557 /* Returns:                                                                 */
558 /*   BUS_PROBE_DEFAULT on success, positive value on failure.               */
559 /****************************************************************************/
560 static int
561 bce_probe(device_t dev)
562 {
563 	struct bce_type *t;
564 	uint16_t vid, did, svid, sdid;
565 
566 	/* Get the data for the device to be probed. */
567 	vid  = pci_get_vendor(dev);
568 	did  = pci_get_device(dev);
569 	svid = pci_get_subvendor(dev);
570 	sdid = pci_get_subdevice(dev);
571 
572 	/* Look through the list of known devices for a match. */
573 	for (t = bce_devs; t->bce_name != NULL; ++t) {
574 		if (vid == t->bce_vid && did == t->bce_did &&
575 		    (svid == t->bce_svid || t->bce_svid == PCI_ANY_ID) &&
576 		    (sdid == t->bce_sdid || t->bce_sdid == PCI_ANY_ID)) {
577 		    	uint32_t revid = pci_read_config(dev, PCIR_REVID, 4);
578 			char *descbuf;
579 
580 			descbuf = kmalloc(BCE_DEVDESC_MAX, M_TEMP, M_WAITOK);
581 
582 			/* Print out the device identity. */
583 			ksnprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)",
584 				  t->bce_name,
585 				  ((revid & 0xf0) >> 4) + 'A', revid & 0xf);
586 
587 			device_set_desc_copy(dev, descbuf);
588 			kfree(descbuf, M_TEMP);
589 			return 0;
590 		}
591 	}
592 	return ENXIO;
593 }
594 
595 /****************************************************************************/
596 /* PCI Capabilities Probe Function.                                         */
597 /*                                                                          */
598 /* Walks the PCI capabiites list for the device to find what features are   */
599 /* supported.                                                               */
600 /*                                                                          */
601 /* Returns:                                                                 */
602 /*   None.                                                                  */
603 /****************************************************************************/
604 static void
605 bce_print_adapter_info(struct bce_softc *sc)
606 {
607 	device_printf(sc->bce_dev, "ASIC (0x%08X); ", sc->bce_chipid);
608 
609 	kprintf("Rev (%c%d); ", ((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A',
610 		((BCE_CHIP_ID(sc) & 0x0ff0) >> 4));
611 
612 	/* Bus info. */
613 	if (sc->bce_flags & BCE_PCIE_FLAG) {
614 		kprintf("Bus (PCIe x%d, ", sc->link_width);
615 		switch (sc->link_speed) {
616 		case 1:
617 			kprintf("2.5Gbps); ");
618 			break;
619 		case 2:
620 			kprintf("5Gbps); ");
621 			break;
622 		default:
623 			kprintf("Unknown link speed); ");
624 			break;
625 		}
626 	} else {
627 		kprintf("Bus (PCI%s, %s, %dMHz); ",
628 		    ((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""),
629 		    ((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
630 		    sc->bus_speed_mhz);
631 	}
632 
633 	/* Firmware version and device features. */
634 	kprintf("B/C (%s)", sc->bce_bc_ver);
635 
636 	if ((sc->bce_flags & BCE_MFW_ENABLE_FLAG) ||
637 	    (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)) {
638 		kprintf("; Flags(");
639 		if (sc->bce_flags & BCE_MFW_ENABLE_FLAG)
640 			kprintf("MFW[%s]", sc->bce_mfw_ver);
641 		if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
642 			kprintf(" 2.5G");
643 		kprintf(")");
644 	}
645 	kprintf("\n");
646 }
647 
648 /****************************************************************************/
649 /* PCI Capabilities Probe Function.                                         */
650 /*                                                                          */
651 /* Walks the PCI capabiites list for the device to find what features are   */
652 /* supported.                                                               */
653 /*                                                                          */
654 /* Returns:                                                                 */
655 /*   None.                                                                  */
656 /****************************************************************************/
657 static void
658 bce_probe_pci_caps(struct bce_softc *sc)
659 {
660 	device_t dev = sc->bce_dev;
661 	uint8_t ptr;
662 
663 	if (pci_is_pcix(dev))
664 		sc->bce_cap_flags |= BCE_PCIX_CAPABLE_FLAG;
665 
666 	ptr = pci_get_pciecap_ptr(dev);
667 	if (ptr) {
668 		uint16_t link_status = pci_read_config(dev, ptr + 0x12, 2);
669 
670 		sc->link_speed = link_status & 0xf;
671 		sc->link_width = (link_status >> 4) & 0x3f;
672 		sc->bce_cap_flags |= BCE_PCIE_CAPABLE_FLAG;
673 		sc->bce_flags |= BCE_PCIE_FLAG;
674 	}
675 }
676 
677 /****************************************************************************/
678 /* Device attach function.                                                  */
679 /*                                                                          */
680 /* Allocates device resources, performs secondary chip identification,      */
681 /* resets and initializes the hardware, and initializes driver instance     */
682 /* variables.                                                               */
683 /*                                                                          */
684 /* Returns:                                                                 */
685 /*   0 on success, positive value on failure.                               */
686 /****************************************************************************/
687 static int
688 bce_attach(device_t dev)
689 {
690 	struct bce_softc *sc = device_get_softc(dev);
691 	struct ifnet *ifp = &sc->arpcom.ac_if;
692 	uint32_t val;
693 	int rid, rc = 0;
694 	int i, j;
695 	struct mii_probe_args mii_args;
696 	uintptr_t mii_priv = 0;
697 
698 	sc->bce_dev = dev;
699 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
700 
701 	lwkt_serialize_init(&sc->main_serialize);
702 	for (i = 0; i < BCE_MSIX_MAX; ++i) {
703 		struct bce_msix_data *msix = &sc->bce_msix[i];
704 
705 		msix->msix_cpuid = -1;
706 		msix->msix_rid = -1;
707 	}
708 
709 	pci_enable_busmaster(dev);
710 
711 	bce_probe_pci_caps(sc);
712 
713 	/* Allocate PCI memory resources. */
714 	rid = PCIR_BAR(0);
715 	sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
716 						 RF_ACTIVE | PCI_RF_DENSE);
717 	if (sc->bce_res_mem == NULL) {
718 		device_printf(dev, "PCI memory allocation failed\n");
719 		return ENXIO;
720 	}
721 	sc->bce_btag = rman_get_bustag(sc->bce_res_mem);
722 	sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem);
723 
724 	/*
725 	 * Configure byte swap and enable indirect register access.
726 	 * Rely on CPU to do target byte swapping on big endian systems.
727 	 * Access to registers outside of PCI configurtion space are not
728 	 * valid until this is done.
729 	 */
730 	pci_write_config(dev, BCE_PCICFG_MISC_CONFIG,
731 			 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
732 			 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4);
733 
734 	/* Save ASIC revsion info. */
735 	sc->bce_chipid =  REG_RD(sc, BCE_MISC_ID);
736 
737 	/* Weed out any non-production controller revisions. */
738 	switch (BCE_CHIP_ID(sc)) {
739 	case BCE_CHIP_ID_5706_A0:
740 	case BCE_CHIP_ID_5706_A1:
741 	case BCE_CHIP_ID_5708_A0:
742 	case BCE_CHIP_ID_5708_B0:
743 	case BCE_CHIP_ID_5709_A0:
744 	case BCE_CHIP_ID_5709_B0:
745 	case BCE_CHIP_ID_5709_B1:
746 #ifdef foo
747 	/* 5709C B2 seems to work fine */
748 	case BCE_CHIP_ID_5709_B2:
749 #endif
750 		device_printf(dev, "Unsupported chip id 0x%08x!\n",
751 			      BCE_CHIP_ID(sc));
752 		rc = ENODEV;
753 		goto fail;
754 	}
755 
756 	mii_priv |= BRGPHY_FLAG_WIRESPEED;
757 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
758 		if (BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax ||
759 		    BCE_CHIP_REV(sc) == BCE_CHIP_REV_Bx)
760 			mii_priv |= BRGPHY_FLAG_NO_EARLYDAC;
761 	} else {
762 		mii_priv |= BRGPHY_FLAG_BER_BUG;
763 	}
764 
765 	/*
766 	 * Find the base address for shared memory access.
767 	 * Newer versions of bootcode use a signature and offset
768 	 * while older versions use a fixed address.
769 	 */
770 	val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE);
771 	if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) ==
772 	    BCE_SHM_HDR_SIGNATURE_SIG) {
773 		/* Multi-port devices use different offsets in shared memory. */
774 		sc->bce_shmem_base = REG_RD_IND(sc,
775 		    BCE_SHM_HDR_ADDR_0 + (pci_get_function(sc->bce_dev) << 2));
776 	} else {
777 		sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE;
778 	}
779 
780 	/* Fetch the bootcode revision. */
781 	val = bce_shmem_rd(sc, BCE_DEV_INFO_BC_REV);
782 	for (i = 0, j = 0; i < 3; i++) {
783 		uint8_t num;
784 		int k, skip0;
785 
786 		num = (uint8_t)(val >> (24 - (i * 8)));
787 		for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
788 			if (num >= k || !skip0 || k == 1) {
789 				sc->bce_bc_ver[j++] = (num / k) + '0';
790 				skip0 = 0;
791 			}
792 		}
793 		if (i != 2)
794 			sc->bce_bc_ver[j++] = '.';
795 	}
796 
797 	/* Check if any management firwmare is running. */
798 	val = bce_shmem_rd(sc, BCE_PORT_FEATURE);
799 	if (val & BCE_PORT_FEATURE_ASF_ENABLED) {
800 		sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
801 
802 		/* Allow time for firmware to enter the running state. */
803 		for (i = 0; i < 30; i++) {
804 			val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
805 			if (val & BCE_CONDITION_MFW_RUN_MASK)
806 				break;
807 			DELAY(10000);
808 		}
809 	}
810 
811 	/* Check the current bootcode state. */
812 	val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION) &
813 	    BCE_CONDITION_MFW_RUN_MASK;
814 	if (val != BCE_CONDITION_MFW_RUN_UNKNOWN &&
815 	    val != BCE_CONDITION_MFW_RUN_NONE) {
816 		uint32_t addr = bce_shmem_rd(sc, BCE_MFW_VER_PTR);
817 
818 		for (i = 0, j = 0; j < 3; j++) {
819 			val = bce_reg_rd_ind(sc, addr + j * 4);
820 			val = bswap32(val);
821 			memcpy(&sc->bce_mfw_ver[i], &val, 4);
822 			i += 4;
823 		}
824 	}
825 
826 	/* Get PCI bus information (speed and type). */
827 	val = REG_RD(sc, BCE_PCICFG_MISC_STATUS);
828 	if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) {
829 		uint32_t clkreg;
830 
831 		sc->bce_flags |= BCE_PCIX_FLAG;
832 
833 		clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS) &
834 			 BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
835 		switch (clkreg) {
836 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
837 			sc->bus_speed_mhz = 133;
838 			break;
839 
840 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
841 			sc->bus_speed_mhz = 100;
842 			break;
843 
844 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
845 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
846 			sc->bus_speed_mhz = 66;
847 			break;
848 
849 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
850 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
851 			sc->bus_speed_mhz = 50;
852 			break;
853 
854 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
855 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
856 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
857 			sc->bus_speed_mhz = 33;
858 			break;
859 		}
860 	} else {
861 		if (val & BCE_PCICFG_MISC_STATUS_M66EN)
862 			sc->bus_speed_mhz = 66;
863 		else
864 			sc->bus_speed_mhz = 33;
865 	}
866 
867 	if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET)
868 		sc->bce_flags |= BCE_PCI_32BIT_FLAG;
869 
870 	/* Reset the controller. */
871 	rc = bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
872 	if (rc != 0)
873 		goto fail;
874 
875 	/* Initialize the controller. */
876 	rc = bce_chipinit(sc);
877 	if (rc != 0) {
878 		device_printf(dev, "Controller initialization failed!\n");
879 		goto fail;
880 	}
881 
882 	/* Perform NVRAM test. */
883 	rc = bce_nvram_test(sc);
884 	if (rc != 0) {
885 		device_printf(dev, "NVRAM test failed!\n");
886 		goto fail;
887 	}
888 
889 	/* Fetch the permanent Ethernet MAC address. */
890 	bce_get_mac_addr(sc);
891 
892 	/*
893 	 * Trip points control how many BDs
894 	 * should be ready before generating an
895 	 * interrupt while ticks control how long
896 	 * a BD can sit in the chain before
897 	 * generating an interrupt.  Set the default
898 	 * values for the RX and TX rings.
899 	 */
900 
901 #ifdef BCE_DRBUG
902 	/* Force more frequent interrupts. */
903 	sc->bce_tx_quick_cons_trip_int = 1;
904 	sc->bce_tx_quick_cons_trip     = 1;
905 	sc->bce_tx_ticks_int           = 0;
906 	sc->bce_tx_ticks               = 0;
907 
908 	sc->bce_rx_quick_cons_trip_int = 1;
909 	sc->bce_rx_quick_cons_trip     = 1;
910 	sc->bce_rx_ticks_int           = 0;
911 	sc->bce_rx_ticks               = 0;
912 #else
913 	sc->bce_tx_quick_cons_trip_int = bce_tx_bds_int;
914 	sc->bce_tx_quick_cons_trip     = bce_tx_bds;
915 	sc->bce_tx_ticks_int           = bce_tx_ticks_int;
916 	sc->bce_tx_ticks               = bce_tx_ticks;
917 
918 	sc->bce_rx_quick_cons_trip_int = bce_rx_bds_int;
919 	sc->bce_rx_quick_cons_trip     = bce_rx_bds;
920 	sc->bce_rx_ticks_int           = bce_rx_ticks_int;
921 	sc->bce_rx_ticks               = bce_rx_ticks;
922 #endif
923 
924 	/* Update statistics once every second. */
925 	sc->bce_stats_ticks = 1000000 & 0xffff00;
926 
927 	/* Find the media type for the adapter. */
928 	bce_get_media(sc);
929 
930 	/* Find out RX/TX ring count */
931 	bce_setup_ring_cnt(sc);
932 
933 	/* Allocate DMA memory resources. */
934 	rc = bce_dma_alloc(sc);
935 	if (rc != 0) {
936 		device_printf(dev, "DMA resource allocation failed!\n");
937 		goto fail;
938 	}
939 
940 	/* Allocate PCI IRQ resources. */
941 	rc = bce_alloc_intr(sc);
942 	if (rc != 0)
943 		goto fail;
944 
945 	/* Setup serializer */
946 	bce_setup_serialize(sc);
947 
948 	/* Initialize the ifnet interface. */
949 	ifp->if_softc = sc;
950 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
951 	ifp->if_ioctl = bce_ioctl;
952 	ifp->if_start = bce_start;
953 	ifp->if_init = bce_init;
954 	ifp->if_serialize = bce_serialize;
955 	ifp->if_deserialize = bce_deserialize;
956 	ifp->if_tryserialize = bce_tryserialize;
957 #ifdef INVARIANTS
958 	ifp->if_serialize_assert = bce_serialize_assert;
959 #endif
960 #ifdef IFPOLL_ENABLE
961 	ifp->if_npoll = bce_npoll;
962 #endif
963 
964 	ifp->if_mtu = ETHERMTU;
965 	ifp->if_hwassist = BCE_CSUM_FEATURES | CSUM_TSO;
966 	ifp->if_capabilities = BCE_IF_CAPABILITIES;
967 	if (sc->rx_ring_cnt > 1)
968 		ifp->if_capabilities |= IFCAP_RSS;
969 	ifp->if_capenable = ifp->if_capabilities;
970 
971 	if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
972 		ifp->if_baudrate = IF_Mbps(2500ULL);
973 	else
974 		ifp->if_baudrate = IF_Mbps(1000ULL);
975 
976 	ifp->if_nmbclusters = sc->rx_ring_cnt * USABLE_RX_BD(&sc->rx_rings[0]);
977 
978 	ifq_set_maxlen(&ifp->if_snd, USABLE_TX_BD(&sc->tx_rings[0]));
979 	ifq_set_ready(&ifp->if_snd);
980 	ifq_set_subq_cnt(&ifp->if_snd, sc->tx_ring_cnt);
981 
982 	if (sc->tx_ring_cnt > 1) {
983 		ifp->if_mapsubq = ifq_mapsubq_modulo;
984 		ifq_set_subq_divisor(&ifp->if_snd, sc->tx_ring_cnt);
985 	}
986 
987 	/*
988 	 * Look for our PHY.
989 	 */
990 	mii_probe_args_init(&mii_args, bce_ifmedia_upd, bce_ifmedia_sts);
991 	mii_args.mii_probemask = 1 << sc->bce_phy_addr;
992 	mii_args.mii_privtag = MII_PRIVTAG_BRGPHY;
993 	mii_args.mii_priv = mii_priv;
994 
995 	rc = mii_probe(dev, &sc->bce_miibus, &mii_args);
996 	if (rc != 0) {
997 		device_printf(dev, "PHY probe failed!\n");
998 		goto fail;
999 	}
1000 
1001 	/* Attach to the Ethernet interface list. */
1002 	ether_ifattach(ifp, sc->eaddr, NULL);
1003 
1004 	/* Setup TX rings and subqueues */
1005 	for (i = 0; i < sc->tx_ring_cnt; ++i) {
1006 		struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i);
1007 		struct bce_tx_ring *txr = &sc->tx_rings[i];
1008 
1009 		ifsq_set_cpuid(ifsq, sc->bce_msix[i].msix_cpuid);
1010 		ifsq_set_priv(ifsq, txr);
1011 		ifsq_set_hw_serialize(ifsq, &txr->tx_serialize);
1012 		txr->ifsq = ifsq;
1013 
1014 		ifsq_watchdog_init(&txr->tx_watchdog, ifsq, bce_watchdog, 0);
1015 	}
1016 
1017 	callout_init_mp(&sc->bce_tick_callout);
1018 	callout_init_mp(&sc->bce_pulse_callout);
1019 	callout_init_mp(&sc->bce_ckmsi_callout);
1020 
1021 	rc = bce_setup_intr(sc);
1022 	if (rc != 0) {
1023 		device_printf(dev, "Failed to setup IRQ!\n");
1024 		ether_ifdetach(ifp);
1025 		goto fail;
1026 	}
1027 
1028 	/* Set timer CPUID */
1029 	bce_set_timer_cpuid(sc, FALSE);
1030 
1031 	/* Add the supported sysctls to the kernel. */
1032 	bce_add_sysctls(sc);
1033 
1034 	/*
1035 	 * The chip reset earlier notified the bootcode that
1036 	 * a driver is present.  We now need to start our pulse
1037 	 * routine so that the bootcode is reminded that we're
1038 	 * still running.
1039 	 */
1040 	bce_pulse(sc);
1041 
1042 	/* Get the firmware running so IPMI still works */
1043 	bce_mgmt_init(sc);
1044 
1045 	if (bootverbose)
1046 		bce_print_adapter_info(sc);
1047 
1048 	return 0;
1049 fail:
1050 	bce_detach(dev);
1051 	return(rc);
1052 }
1053 
1054 /****************************************************************************/
1055 /* Device detach function.                                                  */
1056 /*                                                                          */
1057 /* Stops the controller, resets the controller, and releases resources.     */
1058 /*                                                                          */
1059 /* Returns:                                                                 */
1060 /*   0 on success, positive value on failure.                               */
1061 /****************************************************************************/
1062 static int
1063 bce_detach(device_t dev)
1064 {
1065 	struct bce_softc *sc = device_get_softc(dev);
1066 
1067 	if (device_is_attached(dev)) {
1068 		struct ifnet *ifp = &sc->arpcom.ac_if;
1069 		uint32_t msg;
1070 
1071 		ifnet_serialize_all(ifp);
1072 
1073 		/* Stop and reset the controller. */
1074 		callout_stop(&sc->bce_pulse_callout);
1075 		bce_stop(sc);
1076 		if (sc->bce_flags & BCE_NO_WOL_FLAG)
1077 			msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
1078 		else
1079 			msg = BCE_DRV_MSG_CODE_UNLOAD;
1080 		bce_reset(sc, msg);
1081 
1082 		bce_teardown_intr(sc);
1083 
1084 		ifnet_deserialize_all(ifp);
1085 
1086 		ether_ifdetach(ifp);
1087 	}
1088 
1089 	/* If we have a child device on the MII bus remove it too. */
1090 	if (sc->bce_miibus)
1091 		device_delete_child(dev, sc->bce_miibus);
1092 	bus_generic_detach(dev);
1093 
1094 	bce_free_intr(sc);
1095 
1096 	if (sc->bce_res_mem != NULL) {
1097 		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
1098 				     sc->bce_res_mem);
1099 	}
1100 
1101 	bce_dma_free(sc);
1102 
1103 	if (sc->serializes != NULL)
1104 		kfree(sc->serializes, M_DEVBUF);
1105 
1106 	if (sc->tx_rmap != NULL)
1107 		if_ringmap_free(sc->tx_rmap);
1108 	if (sc->rx_rmap != NULL)
1109 		if_ringmap_free(sc->rx_rmap);
1110 
1111 	return 0;
1112 }
1113 
1114 /****************************************************************************/
1115 /* Device shutdown function.                                                */
1116 /*                                                                          */
1117 /* Stops and resets the controller.                                         */
1118 /*                                                                          */
1119 /* Returns:                                                                 */
1120 /*   Nothing                                                                */
1121 /****************************************************************************/
1122 static void
1123 bce_shutdown(device_t dev)
1124 {
1125 	struct bce_softc *sc = device_get_softc(dev);
1126 	struct ifnet *ifp = &sc->arpcom.ac_if;
1127 	uint32_t msg;
1128 
1129 	ifnet_serialize_all(ifp);
1130 
1131 	bce_stop(sc);
1132 	if (sc->bce_flags & BCE_NO_WOL_FLAG)
1133 		msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
1134 	else
1135 		msg = BCE_DRV_MSG_CODE_UNLOAD;
1136 	bce_reset(sc, msg);
1137 
1138 	ifnet_deserialize_all(ifp);
1139 }
1140 
1141 /****************************************************************************/
1142 /* Indirect register read.                                                  */
1143 /*                                                                          */
1144 /* Reads NetXtreme II registers using an index/data register pair in PCI    */
1145 /* configuration space.  Using this mechanism avoids issues with posted     */
1146 /* reads but is much slower than memory-mapped I/O.                         */
1147 /*                                                                          */
1148 /* Returns:                                                                 */
1149 /*   The value of the register.                                             */
1150 /****************************************************************************/
1151 static uint32_t
1152 bce_reg_rd_ind(struct bce_softc *sc, uint32_t offset)
1153 {
1154 	device_t dev = sc->bce_dev;
1155 
1156 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
1157 	return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
1158 }
1159 
1160 /****************************************************************************/
1161 /* Indirect register write.                                                 */
1162 /*                                                                          */
1163 /* Writes NetXtreme II registers using an index/data register pair in PCI   */
1164 /* configuration space.  Using this mechanism avoids issues with posted     */
1165 /* writes but is muchh slower than memory-mapped I/O.                       */
1166 /*                                                                          */
1167 /* Returns:                                                                 */
1168 /*   Nothing.                                                               */
1169 /****************************************************************************/
1170 static void
1171 bce_reg_wr_ind(struct bce_softc *sc, uint32_t offset, uint32_t val)
1172 {
1173 	device_t dev = sc->bce_dev;
1174 
1175 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
1176 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4);
1177 }
1178 
1179 /****************************************************************************/
1180 /* Shared memory write.                                                     */
1181 /*                                                                          */
1182 /* Writes NetXtreme II shared memory region.                                */
1183 /*                                                                          */
1184 /* Returns:                                                                 */
1185 /*   Nothing.                                                               */
1186 /****************************************************************************/
1187 static void
1188 bce_shmem_wr(struct bce_softc *sc, uint32_t offset, uint32_t val)
1189 {
1190 	bce_reg_wr_ind(sc, sc->bce_shmem_base + offset, val);
1191 }
1192 
1193 /****************************************************************************/
1194 /* Shared memory read.                                                      */
1195 /*                                                                          */
1196 /* Reads NetXtreme II shared memory region.                                 */
1197 /*                                                                          */
1198 /* Returns:                                                                 */
1199 /*   The 32 bit value read.                                                 */
1200 /****************************************************************************/
1201 static u32
1202 bce_shmem_rd(struct bce_softc *sc, uint32_t offset)
1203 {
1204 	return bce_reg_rd_ind(sc, sc->bce_shmem_base + offset);
1205 }
1206 
1207 /****************************************************************************/
1208 /* Context memory write.                                                    */
1209 /*                                                                          */
1210 /* The NetXtreme II controller uses context memory to track connection      */
1211 /* information for L2 and higher network protocols.                         */
1212 /*                                                                          */
1213 /* Returns:                                                                 */
1214 /*   Nothing.                                                               */
1215 /****************************************************************************/
1216 static void
1217 bce_ctx_wr(struct bce_softc *sc, uint32_t cid_addr, uint32_t ctx_offset,
1218     uint32_t ctx_val)
1219 {
1220 	uint32_t idx, offset = ctx_offset + cid_addr;
1221 	uint32_t val, retry_cnt = 5;
1222 
1223 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
1224 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
1225 		REG_WR(sc, BCE_CTX_CTX_DATA, ctx_val);
1226 		REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_WRITE_REQ));
1227 
1228 		for (idx = 0; idx < retry_cnt; idx++) {
1229 			val = REG_RD(sc, BCE_CTX_CTX_CTRL);
1230 			if ((val & BCE_CTX_CTX_CTRL_WRITE_REQ) == 0)
1231 				break;
1232 			DELAY(5);
1233 		}
1234 
1235 		if (val & BCE_CTX_CTX_CTRL_WRITE_REQ) {
1236 			device_printf(sc->bce_dev,
1237 			    "Unable to write CTX memory: "
1238 			    "cid_addr = 0x%08X, offset = 0x%08X!\n",
1239 			    cid_addr, ctx_offset);
1240 		}
1241 	} else {
1242 		REG_WR(sc, BCE_CTX_DATA_ADR, offset);
1243 		REG_WR(sc, BCE_CTX_DATA, ctx_val);
1244 	}
1245 }
1246 
1247 /****************************************************************************/
1248 /* PHY register read.                                                       */
1249 /*                                                                          */
1250 /* Implements register reads on the MII bus.                                */
1251 /*                                                                          */
1252 /* Returns:                                                                 */
1253 /*   The value of the register.                                             */
1254 /****************************************************************************/
1255 static int
1256 bce_miibus_read_reg(device_t dev, int phy, int reg)
1257 {
1258 	struct bce_softc *sc = device_get_softc(dev);
1259 	uint32_t val;
1260 	int i;
1261 
1262 	/* Make sure we are accessing the correct PHY address. */
1263 	KASSERT(phy == sc->bce_phy_addr,
1264 	    ("invalid phyno %d, should be %d\n", phy, sc->bce_phy_addr));
1265 
1266 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1267 		val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1268 		val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1269 
1270 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1271 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1272 
1273 		DELAY(40);
1274 	}
1275 
1276 	val = BCE_MIPHY(phy) | BCE_MIREG(reg) |
1277 	      BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT |
1278 	      BCE_EMAC_MDIO_COMM_START_BUSY;
1279 	REG_WR(sc, BCE_EMAC_MDIO_COMM, val);
1280 
1281 	for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1282 		DELAY(10);
1283 
1284 		val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1285 		if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1286 			DELAY(5);
1287 
1288 			val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1289 			val &= BCE_EMAC_MDIO_COMM_DATA;
1290 			break;
1291 		}
1292 	}
1293 
1294 	if (val & BCE_EMAC_MDIO_COMM_START_BUSY) {
1295 		if_printf(&sc->arpcom.ac_if,
1296 			  "Error: PHY read timeout! phy = %d, reg = 0x%04X\n",
1297 			  phy, reg);
1298 		val = 0x0;
1299 	} else {
1300 		val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1301 	}
1302 
1303 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1304 		val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1305 		val |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1306 
1307 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1308 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1309 
1310 		DELAY(40);
1311 	}
1312 	return (val & 0xffff);
1313 }
1314 
1315 /****************************************************************************/
1316 /* PHY register write.                                                      */
1317 /*                                                                          */
1318 /* Implements register writes on the MII bus.                               */
1319 /*                                                                          */
1320 /* Returns:                                                                 */
1321 /*   The value of the register.                                             */
1322 /****************************************************************************/
1323 static int
1324 bce_miibus_write_reg(device_t dev, int phy, int reg, int val)
1325 {
1326 	struct bce_softc *sc = device_get_softc(dev);
1327 	uint32_t val1;
1328 	int i;
1329 
1330 	/* Make sure we are accessing the correct PHY address. */
1331 	KASSERT(phy == sc->bce_phy_addr,
1332 	    ("invalid phyno %d, should be %d\n", phy, sc->bce_phy_addr));
1333 
1334 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1335 		val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1336 		val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1337 
1338 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1339 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1340 
1341 		DELAY(40);
1342 	}
1343 
1344 	val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val |
1345 		BCE_EMAC_MDIO_COMM_COMMAND_WRITE |
1346 		BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT;
1347 	REG_WR(sc, BCE_EMAC_MDIO_COMM, val1);
1348 
1349 	for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1350 		DELAY(10);
1351 
1352 		val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1353 		if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1354 			DELAY(5);
1355 			break;
1356 		}
1357 	}
1358 
1359 	if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY)
1360 		if_printf(&sc->arpcom.ac_if, "PHY write timeout!\n");
1361 
1362 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1363 		val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1364 		val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1365 
1366 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1367 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1368 
1369 		DELAY(40);
1370 	}
1371 	return 0;
1372 }
1373 
1374 /****************************************************************************/
1375 /* MII bus status change.                                                   */
1376 /*                                                                          */
1377 /* Called by the MII bus driver when the PHY establishes link to set the    */
1378 /* MAC interface registers.                                                 */
1379 /*                                                                          */
1380 /* Returns:                                                                 */
1381 /*   Nothing.                                                               */
1382 /****************************************************************************/
1383 static void
1384 bce_miibus_statchg(device_t dev)
1385 {
1386 	struct bce_softc *sc = device_get_softc(dev);
1387 	struct mii_data *mii = device_get_softc(sc->bce_miibus);
1388 
1389 	BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT);
1390 
1391 	/*
1392 	 * Set MII or GMII interface based on the speed negotiated
1393 	 * by the PHY.
1394 	 */
1395 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
1396 	    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) {
1397 		BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_GMII);
1398 	} else {
1399 		BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_MII);
1400 	}
1401 
1402 	/*
1403 	 * Set half or full duplex based on the duplicity negotiated
1404 	 * by the PHY.
1405 	 */
1406 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1407 		BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1408 	} else {
1409 		BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1410 	}
1411 }
1412 
1413 /****************************************************************************/
1414 /* Acquire NVRAM lock.                                                      */
1415 /*                                                                          */
1416 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock.  */
1417 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1418 /* for use by the driver.                                                   */
1419 /*                                                                          */
1420 /* Returns:                                                                 */
1421 /*   0 on success, positive value on failure.                               */
1422 /****************************************************************************/
1423 static int
1424 bce_acquire_nvram_lock(struct bce_softc *sc)
1425 {
1426 	uint32_t val;
1427 	int j;
1428 
1429 	/* Request access to the flash interface. */
1430 	REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2);
1431 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1432 		val = REG_RD(sc, BCE_NVM_SW_ARB);
1433 		if (val & BCE_NVM_SW_ARB_ARB_ARB2)
1434 			break;
1435 
1436 		DELAY(5);
1437 	}
1438 
1439 	if (j >= NVRAM_TIMEOUT_COUNT) {
1440 		return EBUSY;
1441 	}
1442 	return 0;
1443 }
1444 
1445 /****************************************************************************/
1446 /* Release NVRAM lock.                                                      */
1447 /*                                                                          */
1448 /* When the caller is finished accessing NVRAM the lock must be released.   */
1449 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1450 /* for use by the driver.                                                   */
1451 /*                                                                          */
1452 /* Returns:                                                                 */
1453 /*   0 on success, positive value on failure.                               */
1454 /****************************************************************************/
1455 static int
1456 bce_release_nvram_lock(struct bce_softc *sc)
1457 {
1458 	int j;
1459 	uint32_t val;
1460 
1461 	/*
1462 	 * Relinquish nvram interface.
1463 	 */
1464 	REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2);
1465 
1466 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1467 		val = REG_RD(sc, BCE_NVM_SW_ARB);
1468 		if (!(val & BCE_NVM_SW_ARB_ARB_ARB2))
1469 			break;
1470 
1471 		DELAY(5);
1472 	}
1473 
1474 	if (j >= NVRAM_TIMEOUT_COUNT) {
1475 		return EBUSY;
1476 	}
1477 	return 0;
1478 }
1479 
1480 /****************************************************************************/
1481 /* Enable NVRAM access.                                                     */
1482 /*                                                                          */
1483 /* Before accessing NVRAM for read or write operations the caller must      */
1484 /* enabled NVRAM access.                                                    */
1485 /*                                                                          */
1486 /* Returns:                                                                 */
1487 /*   Nothing.                                                               */
1488 /****************************************************************************/
1489 static void
1490 bce_enable_nvram_access(struct bce_softc *sc)
1491 {
1492 	uint32_t val;
1493 
1494 	val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1495 	/* Enable both bits, even on read. */
1496 	REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1497 	       val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN);
1498 }
1499 
1500 /****************************************************************************/
1501 /* Disable NVRAM access.                                                    */
1502 /*                                                                          */
1503 /* When the caller is finished accessing NVRAM access must be disabled.     */
1504 /*                                                                          */
1505 /* Returns:                                                                 */
1506 /*   Nothing.                                                               */
1507 /****************************************************************************/
1508 static void
1509 bce_disable_nvram_access(struct bce_softc *sc)
1510 {
1511 	uint32_t val;
1512 
1513 	val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1514 
1515 	/* Disable both bits, even after read. */
1516 	REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1517 	       val & ~(BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN));
1518 }
1519 
1520 /****************************************************************************/
1521 /* Read a dword (32 bits) from NVRAM.                                       */
1522 /*                                                                          */
1523 /* Read a 32 bit word from NVRAM.  The caller is assumed to have already    */
1524 /* obtained the NVRAM lock and enabled the controller for NVRAM access.     */
1525 /*                                                                          */
1526 /* Returns:                                                                 */
1527 /*   0 on success and the 32 bit value read, positive value on failure.     */
1528 /****************************************************************************/
1529 static int
1530 bce_nvram_read_dword(struct bce_softc *sc, uint32_t offset, uint8_t *ret_val,
1531 		     uint32_t cmd_flags)
1532 {
1533 	uint32_t cmd;
1534 	int i, rc = 0;
1535 
1536 	/* Build the command word. */
1537 	cmd = BCE_NVM_COMMAND_DOIT | cmd_flags;
1538 
1539 	/* Calculate the offset for buffered flash. */
1540 	if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) {
1541 		offset = ((offset / sc->bce_flash_info->page_size) <<
1542 			  sc->bce_flash_info->page_bits) +
1543 			 (offset % sc->bce_flash_info->page_size);
1544 	}
1545 
1546 	/*
1547 	 * Clear the DONE bit separately, set the address to read,
1548 	 * and issue the read.
1549 	 */
1550 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1551 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1552 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1553 
1554 	/* Wait for completion. */
1555 	for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1556 		uint32_t val;
1557 
1558 		DELAY(5);
1559 
1560 		val = REG_RD(sc, BCE_NVM_COMMAND);
1561 		if (val & BCE_NVM_COMMAND_DONE) {
1562 			val = REG_RD(sc, BCE_NVM_READ);
1563 
1564 			val = be32toh(val);
1565 			memcpy(ret_val, &val, 4);
1566 			break;
1567 		}
1568 	}
1569 
1570 	/* Check for errors. */
1571 	if (i >= NVRAM_TIMEOUT_COUNT) {
1572 		if_printf(&sc->arpcom.ac_if,
1573 			  "Timeout error reading NVRAM at offset 0x%08X!\n",
1574 			  offset);
1575 		rc = EBUSY;
1576 	}
1577 	return rc;
1578 }
1579 
1580 /****************************************************************************/
1581 /* Initialize NVRAM access.                                                 */
1582 /*                                                                          */
1583 /* Identify the NVRAM device in use and prepare the NVRAM interface to      */
1584 /* access that device.                                                      */
1585 /*                                                                          */
1586 /* Returns:                                                                 */
1587 /*   0 on success, positive value on failure.                               */
1588 /****************************************************************************/
1589 static int
1590 bce_init_nvram(struct bce_softc *sc)
1591 {
1592 	uint32_t val;
1593 	int j, entry_count, rc = 0;
1594 	const struct flash_spec *flash;
1595 
1596 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
1597 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
1598 		sc->bce_flash_info = &flash_5709;
1599 		goto bce_init_nvram_get_flash_size;
1600 	}
1601 
1602 	/* Determine the selected interface. */
1603 	val = REG_RD(sc, BCE_NVM_CFG1);
1604 
1605 	entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1606 
1607 	/*
1608 	 * Flash reconfiguration is required to support additional
1609 	 * NVRAM devices not directly supported in hardware.
1610 	 * Check if the flash interface was reconfigured
1611 	 * by the bootcode.
1612 	 */
1613 
1614 	if (val & 0x40000000) {
1615 		/* Flash interface reconfigured by bootcode. */
1616 		for (j = 0, flash = flash_table; j < entry_count;
1617 		     j++, flash++) {
1618 			if ((val & FLASH_BACKUP_STRAP_MASK) ==
1619 			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1620 				sc->bce_flash_info = flash;
1621 				break;
1622 			}
1623 		}
1624 	} else {
1625 		/* Flash interface not yet reconfigured. */
1626 		uint32_t mask;
1627 
1628 		if (val & (1 << 23))
1629 			mask = FLASH_BACKUP_STRAP_MASK;
1630 		else
1631 			mask = FLASH_STRAP_MASK;
1632 
1633 		/* Look for the matching NVRAM device configuration data. */
1634 		for (j = 0, flash = flash_table; j < entry_count;
1635 		     j++, flash++) {
1636 			/* Check if the device matches any of the known devices. */
1637 			if ((val & mask) == (flash->strapping & mask)) {
1638 				/* Found a device match. */
1639 				sc->bce_flash_info = flash;
1640 
1641 				/* Request access to the flash interface. */
1642 				rc = bce_acquire_nvram_lock(sc);
1643 				if (rc != 0)
1644 					return rc;
1645 
1646 				/* Reconfigure the flash interface. */
1647 				bce_enable_nvram_access(sc);
1648 				REG_WR(sc, BCE_NVM_CFG1, flash->config1);
1649 				REG_WR(sc, BCE_NVM_CFG2, flash->config2);
1650 				REG_WR(sc, BCE_NVM_CFG3, flash->config3);
1651 				REG_WR(sc, BCE_NVM_WRITE1, flash->write1);
1652 				bce_disable_nvram_access(sc);
1653 				bce_release_nvram_lock(sc);
1654 				break;
1655 			}
1656 		}
1657 	}
1658 
1659 	/* Check if a matching device was found. */
1660 	if (j == entry_count) {
1661 		sc->bce_flash_info = NULL;
1662 		if_printf(&sc->arpcom.ac_if, "Unknown Flash NVRAM found!\n");
1663 		return ENODEV;
1664 	}
1665 
1666 bce_init_nvram_get_flash_size:
1667 	/* Write the flash config data to the shared memory interface. */
1668 	val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG2) &
1669 	    BCE_SHARED_HW_CFG2_NVM_SIZE_MASK;
1670 	if (val)
1671 		sc->bce_flash_size = val;
1672 	else
1673 		sc->bce_flash_size = sc->bce_flash_info->total_size;
1674 
1675 	return rc;
1676 }
1677 
1678 /****************************************************************************/
1679 /* Read an arbitrary range of data from NVRAM.                              */
1680 /*                                                                          */
1681 /* Prepares the NVRAM interface for access and reads the requested data     */
1682 /* into the supplied buffer.                                                */
1683 /*                                                                          */
1684 /* Returns:                                                                 */
1685 /*   0 on success and the data read, positive value on failure.             */
1686 /****************************************************************************/
1687 static int
1688 bce_nvram_read(struct bce_softc *sc, uint32_t offset, uint8_t *ret_buf,
1689 	       int buf_size)
1690 {
1691 	uint32_t cmd_flags, offset32, len32, extra;
1692 	int rc = 0;
1693 
1694 	if (buf_size == 0)
1695 		return 0;
1696 
1697 	/* Request access to the flash interface. */
1698 	rc = bce_acquire_nvram_lock(sc);
1699 	if (rc != 0)
1700 		return rc;
1701 
1702 	/* Enable access to flash interface */
1703 	bce_enable_nvram_access(sc);
1704 
1705 	len32 = buf_size;
1706 	offset32 = offset;
1707 	extra = 0;
1708 
1709 	cmd_flags = 0;
1710 
1711 	/* XXX should we release nvram lock if read_dword() fails? */
1712 	if (offset32 & 3) {
1713 		uint8_t buf[4];
1714 		uint32_t pre_len;
1715 
1716 		offset32 &= ~3;
1717 		pre_len = 4 - (offset & 3);
1718 
1719 		if (pre_len >= len32) {
1720 			pre_len = len32;
1721 			cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST;
1722 		} else {
1723 			cmd_flags = BCE_NVM_COMMAND_FIRST;
1724 		}
1725 
1726 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1727 		if (rc)
1728 			return rc;
1729 
1730 		memcpy(ret_buf, buf + (offset & 3), pre_len);
1731 
1732 		offset32 += 4;
1733 		ret_buf += pre_len;
1734 		len32 -= pre_len;
1735 	}
1736 
1737 	if (len32 & 3) {
1738 		extra = 4 - (len32 & 3);
1739 		len32 = (len32 + 4) & ~3;
1740 	}
1741 
1742 	if (len32 == 4) {
1743 		uint8_t buf[4];
1744 
1745 		if (cmd_flags)
1746 			cmd_flags = BCE_NVM_COMMAND_LAST;
1747 		else
1748 			cmd_flags = BCE_NVM_COMMAND_FIRST |
1749 				    BCE_NVM_COMMAND_LAST;
1750 
1751 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1752 
1753 		memcpy(ret_buf, buf, 4 - extra);
1754 	} else if (len32 > 0) {
1755 		uint8_t buf[4];
1756 
1757 		/* Read the first word. */
1758 		if (cmd_flags)
1759 			cmd_flags = 0;
1760 		else
1761 			cmd_flags = BCE_NVM_COMMAND_FIRST;
1762 
1763 		rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1764 
1765 		/* Advance to the next dword. */
1766 		offset32 += 4;
1767 		ret_buf += 4;
1768 		len32 -= 4;
1769 
1770 		while (len32 > 4 && rc == 0) {
1771 			rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0);
1772 
1773 			/* Advance to the next dword. */
1774 			offset32 += 4;
1775 			ret_buf += 4;
1776 			len32 -= 4;
1777 		}
1778 
1779 		if (rc)
1780 			goto bce_nvram_read_locked_exit;
1781 
1782 		cmd_flags = BCE_NVM_COMMAND_LAST;
1783 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1784 
1785 		memcpy(ret_buf, buf, 4 - extra);
1786 	}
1787 
1788 bce_nvram_read_locked_exit:
1789 	/* Disable access to flash interface and release the lock. */
1790 	bce_disable_nvram_access(sc);
1791 	bce_release_nvram_lock(sc);
1792 
1793 	return rc;
1794 }
1795 
1796 /****************************************************************************/
1797 /* Verifies that NVRAM is accessible and contains valid data.               */
1798 /*                                                                          */
1799 /* Reads the configuration data from NVRAM and verifies that the CRC is     */
1800 /* correct.                                                                 */
1801 /*                                                                          */
1802 /* Returns:                                                                 */
1803 /*   0 on success, positive value on failure.                               */
1804 /****************************************************************************/
1805 static int
1806 bce_nvram_test(struct bce_softc *sc)
1807 {
1808 	uint32_t buf[BCE_NVRAM_SIZE / 4];
1809 	uint32_t magic, csum;
1810 	uint8_t *data = (uint8_t *)buf;
1811 	int rc = 0;
1812 
1813 	/*
1814 	 * Check that the device NVRAM is valid by reading
1815 	 * the magic value at offset 0.
1816 	 */
1817 	rc = bce_nvram_read(sc, 0, data, 4);
1818 	if (rc != 0)
1819 		return rc;
1820 
1821 	magic = be32toh(buf[0]);
1822 	if (magic != BCE_NVRAM_MAGIC) {
1823 		if_printf(&sc->arpcom.ac_if,
1824 			  "Invalid NVRAM magic value! Expected: 0x%08X, "
1825 			  "Found: 0x%08X\n", BCE_NVRAM_MAGIC, magic);
1826 		return ENODEV;
1827 	}
1828 
1829 	/*
1830 	 * Verify that the device NVRAM includes valid
1831 	 * configuration data.
1832 	 */
1833 	rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE);
1834 	if (rc != 0)
1835 		return rc;
1836 
1837 	csum = ether_crc32_le(data, 0x100);
1838 	if (csum != BCE_CRC32_RESIDUAL) {
1839 		if_printf(&sc->arpcom.ac_if,
1840 			  "Invalid Manufacturing Information NVRAM CRC! "
1841 			  "Expected: 0x%08X, Found: 0x%08X\n",
1842 			  BCE_CRC32_RESIDUAL, csum);
1843 		return ENODEV;
1844 	}
1845 
1846 	csum = ether_crc32_le(data + 0x100, 0x100);
1847 	if (csum != BCE_CRC32_RESIDUAL) {
1848 		if_printf(&sc->arpcom.ac_if,
1849 			  "Invalid Feature Configuration Information "
1850 			  "NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
1851 			  BCE_CRC32_RESIDUAL, csum);
1852 		rc = ENODEV;
1853 	}
1854 	return rc;
1855 }
1856 
1857 /****************************************************************************/
1858 /* Identifies the current media type of the controller and sets the PHY     */
1859 /* address.                                                                 */
1860 /*                                                                          */
1861 /* Returns:                                                                 */
1862 /*   Nothing.                                                               */
1863 /****************************************************************************/
1864 static void
1865 bce_get_media(struct bce_softc *sc)
1866 {
1867 	uint32_t val;
1868 
1869 	sc->bce_phy_addr = 1;
1870 
1871 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
1872 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
1873  		uint32_t val = REG_RD(sc, BCE_MISC_DUAL_MEDIA_CTRL);
1874 		uint32_t bond_id = val & BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID;
1875 		uint32_t strap;
1876 
1877 		/*
1878 		 * The BCM5709S is software configurable
1879 		 * for Copper or SerDes operation.
1880 		 */
1881 		if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) {
1882 			return;
1883 		} else if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
1884 			sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1885 			return;
1886 		}
1887 
1888 		if (val & BCE_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE) {
1889 			strap = (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
1890 		} else {
1891 			strap =
1892 			(val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
1893 		}
1894 
1895 		if (pci_get_function(sc->bce_dev) == 0) {
1896 			switch (strap) {
1897 			case 0x4:
1898 			case 0x5:
1899 			case 0x6:
1900 				sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1901 				break;
1902 			}
1903 		} else {
1904 			switch (strap) {
1905 			case 0x1:
1906 			case 0x2:
1907 			case 0x4:
1908 				sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1909 				break;
1910 			}
1911 		}
1912 	} else if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) {
1913 		sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1914 	}
1915 
1916 	if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) {
1917 		sc->bce_flags |= BCE_NO_WOL_FLAG;
1918 		if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) {
1919 			sc->bce_phy_addr = 2;
1920 			val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG);
1921 			if (val & BCE_SHARED_HW_CFG_PHY_2_5G)
1922 				sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG;
1923 		}
1924 	} else if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) ||
1925 	    (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)) {
1926 		sc->bce_phy_flags |= BCE_PHY_CRC_FIX_FLAG;
1927 	}
1928 }
1929 
1930 static void
1931 bce_destroy_tx_ring(struct bce_tx_ring *txr)
1932 {
1933 	int i;
1934 
1935 	/* Destroy the TX buffer descriptor DMA stuffs. */
1936 	if (txr->tx_bd_chain_tag != NULL) {
1937 		for (i = 0; i < txr->tx_pages; i++) {
1938 			if (txr->tx_bd_chain[i] != NULL) {
1939 				bus_dmamap_unload(txr->tx_bd_chain_tag,
1940 				    txr->tx_bd_chain_map[i]);
1941 				bus_dmamem_free(txr->tx_bd_chain_tag,
1942 				    txr->tx_bd_chain[i],
1943 				    txr->tx_bd_chain_map[i]);
1944 			}
1945 		}
1946 		bus_dma_tag_destroy(txr->tx_bd_chain_tag);
1947 	}
1948 
1949 	/* Destroy the TX mbuf DMA stuffs. */
1950 	if (txr->tx_mbuf_tag != NULL) {
1951 		for (i = 0; i < TOTAL_TX_BD(txr); i++) {
1952 			/* Must have been unloaded in bce_stop() */
1953 			KKASSERT(txr->tx_bufs[i].tx_mbuf_ptr == NULL);
1954 			bus_dmamap_destroy(txr->tx_mbuf_tag,
1955 			    txr->tx_bufs[i].tx_mbuf_map);
1956 		}
1957 		bus_dma_tag_destroy(txr->tx_mbuf_tag);
1958 	}
1959 
1960 	if (txr->tx_bd_chain_map != NULL)
1961 		kfree(txr->tx_bd_chain_map, M_DEVBUF);
1962 	if (txr->tx_bd_chain != NULL)
1963 		kfree(txr->tx_bd_chain, M_DEVBUF);
1964 	if (txr->tx_bd_chain_paddr != NULL)
1965 		kfree(txr->tx_bd_chain_paddr, M_DEVBUF);
1966 
1967 	if (txr->tx_bufs != NULL)
1968 		kfree(txr->tx_bufs, M_DEVBUF);
1969 }
1970 
1971 static void
1972 bce_destroy_rx_ring(struct bce_rx_ring *rxr)
1973 {
1974 	int i;
1975 
1976 	/* Destroy the RX buffer descriptor DMA stuffs. */
1977 	if (rxr->rx_bd_chain_tag != NULL) {
1978 		for (i = 0; i < rxr->rx_pages; i++) {
1979 			if (rxr->rx_bd_chain[i] != NULL) {
1980 				bus_dmamap_unload(rxr->rx_bd_chain_tag,
1981 				    rxr->rx_bd_chain_map[i]);
1982 				bus_dmamem_free(rxr->rx_bd_chain_tag,
1983 				    rxr->rx_bd_chain[i],
1984 				    rxr->rx_bd_chain_map[i]);
1985 			}
1986 		}
1987 		bus_dma_tag_destroy(rxr->rx_bd_chain_tag);
1988 	}
1989 
1990 	/* Destroy the RX mbuf DMA stuffs. */
1991 	if (rxr->rx_mbuf_tag != NULL) {
1992 		for (i = 0; i < TOTAL_RX_BD(rxr); i++) {
1993 			/* Must have been unloaded in bce_stop() */
1994 			KKASSERT(rxr->rx_bufs[i].rx_mbuf_ptr == NULL);
1995 			bus_dmamap_destroy(rxr->rx_mbuf_tag,
1996 			    rxr->rx_bufs[i].rx_mbuf_map);
1997 		}
1998 		bus_dmamap_destroy(rxr->rx_mbuf_tag, rxr->rx_mbuf_tmpmap);
1999 		bus_dma_tag_destroy(rxr->rx_mbuf_tag);
2000 	}
2001 
2002 	if (rxr->rx_bd_chain_map != NULL)
2003 		kfree(rxr->rx_bd_chain_map, M_DEVBUF);
2004 	if (rxr->rx_bd_chain != NULL)
2005 		kfree(rxr->rx_bd_chain, M_DEVBUF);
2006 	if (rxr->rx_bd_chain_paddr != NULL)
2007 		kfree(rxr->rx_bd_chain_paddr, M_DEVBUF);
2008 
2009 	if (rxr->rx_bufs != NULL)
2010 		kfree(rxr->rx_bufs, M_DEVBUF);
2011 }
2012 
2013 /****************************************************************************/
2014 /* Free any DMA memory owned by the driver.                                 */
2015 /*                                                                          */
2016 /* Scans through each data structre that requires DMA memory and frees      */
2017 /* the memory if allocated.                                                 */
2018 /*                                                                          */
2019 /* Returns:                                                                 */
2020 /*   Nothing.                                                               */
2021 /****************************************************************************/
2022 static void
2023 bce_dma_free(struct bce_softc *sc)
2024 {
2025 	int i;
2026 
2027 	/* Destroy the status block. */
2028 	if (sc->status_tag != NULL) {
2029 		if (sc->status_block != NULL) {
2030 			bus_dmamap_unload(sc->status_tag, sc->status_map);
2031 			bus_dmamem_free(sc->status_tag, sc->status_block,
2032 					sc->status_map);
2033 		}
2034 		bus_dma_tag_destroy(sc->status_tag);
2035 	}
2036 
2037 	/* Destroy the statistics block. */
2038 	if (sc->stats_tag != NULL) {
2039 		if (sc->stats_block != NULL) {
2040 			bus_dmamap_unload(sc->stats_tag, sc->stats_map);
2041 			bus_dmamem_free(sc->stats_tag, sc->stats_block,
2042 					sc->stats_map);
2043 		}
2044 		bus_dma_tag_destroy(sc->stats_tag);
2045 	}
2046 
2047 	/* Destroy the CTX DMA stuffs. */
2048 	if (sc->ctx_tag != NULL) {
2049 		for (i = 0; i < sc->ctx_pages; i++) {
2050 			if (sc->ctx_block[i] != NULL) {
2051 				bus_dmamap_unload(sc->ctx_tag, sc->ctx_map[i]);
2052 				bus_dmamem_free(sc->ctx_tag, sc->ctx_block[i],
2053 						sc->ctx_map[i]);
2054 			}
2055 		}
2056 		bus_dma_tag_destroy(sc->ctx_tag);
2057 	}
2058 
2059 	/* Free TX rings */
2060 	if (sc->tx_rings != NULL) {
2061 		for (i = 0; i < sc->tx_ring_cnt; ++i)
2062 			bce_destroy_tx_ring(&sc->tx_rings[i]);
2063 		kfree(sc->tx_rings, M_DEVBUF);
2064 	}
2065 
2066 	/* Free RX rings */
2067 	if (sc->rx_rings != NULL) {
2068 		for (i = 0; i < sc->rx_ring_cnt; ++i)
2069 			bce_destroy_rx_ring(&sc->rx_rings[i]);
2070 		kfree(sc->rx_rings, M_DEVBUF);
2071 	}
2072 
2073 	/* Destroy the parent tag */
2074 	if (sc->parent_tag != NULL)
2075 		bus_dma_tag_destroy(sc->parent_tag);
2076 }
2077 
2078 /****************************************************************************/
2079 /* Get DMA memory from the OS.                                              */
2080 /*                                                                          */
2081 /* Validates that the OS has provided DMA buffers in response to a          */
2082 /* bus_dmamap_load() call and saves the physical address of those buffers.  */
2083 /* When the callback is used the OS will return 0 for the mapping function  */
2084 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any  */
2085 /* failures back to the caller.                                             */
2086 /*                                                                          */
2087 /* Returns:                                                                 */
2088 /*   Nothing.                                                               */
2089 /****************************************************************************/
2090 static void
2091 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2092 {
2093 	bus_addr_t *busaddr = arg;
2094 
2095 	/* Check for an error and signal the caller that an error occurred. */
2096 	if (error)
2097 		return;
2098 
2099 	KASSERT(nseg == 1, ("only one segment is allowed"));
2100 	*busaddr = segs->ds_addr;
2101 }
2102 
2103 static int
2104 bce_create_tx_ring(struct bce_tx_ring *txr)
2105 {
2106 	int pages, rc, i;
2107 
2108 	lwkt_serialize_init(&txr->tx_serialize);
2109 	txr->tx_wreg = bce_tx_wreg;
2110 
2111 	pages = device_getenv_int(txr->sc->bce_dev, "tx_pages", bce_tx_pages);
2112 	if (pages <= 0 || pages > TX_PAGES_MAX || !powerof2(pages)) {
2113 		device_printf(txr->sc->bce_dev, "invalid # of TX pages\n");
2114 		pages = TX_PAGES_DEFAULT;
2115 	}
2116 	txr->tx_pages = pages;
2117 
2118 	txr->tx_bd_chain_map = kmalloc(sizeof(bus_dmamap_t) * txr->tx_pages,
2119 	    M_DEVBUF, M_WAITOK | M_ZERO);
2120 	txr->tx_bd_chain = kmalloc(sizeof(struct tx_bd *) * txr->tx_pages,
2121 	    M_DEVBUF, M_WAITOK | M_ZERO);
2122 	txr->tx_bd_chain_paddr = kmalloc(sizeof(bus_addr_t) * txr->tx_pages,
2123 	    M_DEVBUF, M_WAITOK | M_ZERO);
2124 
2125 	txr->tx_bufs = kmalloc(sizeof(struct bce_tx_buf) * TOTAL_TX_BD(txr),
2126 			       M_DEVBUF,
2127 			       M_WAITOK | M_ZERO | M_CACHEALIGN);
2128 
2129 	/*
2130 	 * Create a DMA tag for the TX buffer descriptor chain,
2131 	 * allocate and clear the  memory, and fetch the
2132 	 * physical address of the block.
2133 	 */
2134 	rc = bus_dma_tag_create(txr->sc->parent_tag, BCM_PAGE_SIZE, 0,
2135 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2136 	    BCE_TX_CHAIN_PAGE_SZ, 1, BCE_TX_CHAIN_PAGE_SZ,
2137 	    0, &txr->tx_bd_chain_tag);
2138 	if (rc != 0) {
2139 		device_printf(txr->sc->bce_dev, "Could not allocate "
2140 		    "TX descriptor chain DMA tag!\n");
2141 		return rc;
2142 	}
2143 
2144 	for (i = 0; i < txr->tx_pages; i++) {
2145 		bus_addr_t busaddr;
2146 
2147 		rc = bus_dmamem_alloc(txr->tx_bd_chain_tag,
2148 		    (void **)&txr->tx_bd_chain[i],
2149 		    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
2150 		    &txr->tx_bd_chain_map[i]);
2151 		if (rc != 0) {
2152 			device_printf(txr->sc->bce_dev,
2153 			    "Could not allocate %dth TX descriptor "
2154 			    "chain DMA memory!\n", i);
2155 			return rc;
2156 		}
2157 
2158 		rc = bus_dmamap_load(txr->tx_bd_chain_tag,
2159 		    txr->tx_bd_chain_map[i],
2160 		    txr->tx_bd_chain[i],
2161 		    BCE_TX_CHAIN_PAGE_SZ,
2162 		    bce_dma_map_addr, &busaddr,
2163 		    BUS_DMA_WAITOK);
2164 		if (rc != 0) {
2165 			if (rc == EINPROGRESS) {
2166 				panic("%s coherent memory loading "
2167 				    "is still in progress!",
2168 				    txr->sc->arpcom.ac_if.if_xname);
2169 			}
2170 			device_printf(txr->sc->bce_dev, "Could not map %dth "
2171 			    "TX descriptor chain DMA memory!\n", i);
2172 			bus_dmamem_free(txr->tx_bd_chain_tag,
2173 			    txr->tx_bd_chain[i],
2174 			    txr->tx_bd_chain_map[i]);
2175 			txr->tx_bd_chain[i] = NULL;
2176 			return rc;
2177 		}
2178 
2179 		txr->tx_bd_chain_paddr[i] = busaddr;
2180 	}
2181 
2182 	/* Create a DMA tag for TX mbufs. */
2183 	rc = bus_dma_tag_create(txr->sc->parent_tag, 1, 0,
2184 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2185 	    IP_MAXPACKET + sizeof(struct ether_vlan_header),
2186 	    BCE_MAX_SEGMENTS, PAGE_SIZE,
2187 	    BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
2188 	    &txr->tx_mbuf_tag);
2189 	if (rc != 0) {
2190 		device_printf(txr->sc->bce_dev,
2191 		    "Could not allocate TX mbuf DMA tag!\n");
2192 		return rc;
2193 	}
2194 
2195 	/* Create DMA maps for the TX mbufs clusters. */
2196 	for (i = 0; i < TOTAL_TX_BD(txr); i++) {
2197 		rc = bus_dmamap_create(txr->tx_mbuf_tag,
2198 		    BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
2199 		    &txr->tx_bufs[i].tx_mbuf_map);
2200 		if (rc != 0) {
2201 			int j;
2202 
2203 			for (j = 0; j < i; ++j) {
2204 				bus_dmamap_destroy(txr->tx_mbuf_tag,
2205 				    txr->tx_bufs[j].tx_mbuf_map);
2206 			}
2207 			bus_dma_tag_destroy(txr->tx_mbuf_tag);
2208 			txr->tx_mbuf_tag = NULL;
2209 
2210 			device_printf(txr->sc->bce_dev, "Unable to create "
2211 			    "%dth TX mbuf DMA map!\n", i);
2212 			return rc;
2213 		}
2214 	}
2215 	return 0;
2216 }
2217 
2218 static int
2219 bce_create_rx_ring(struct bce_rx_ring *rxr)
2220 {
2221 	int pages, rc, i;
2222 
2223 	lwkt_serialize_init(&rxr->rx_serialize);
2224 
2225 	pages = device_getenv_int(rxr->sc->bce_dev, "rx_pages", bce_rx_pages);
2226 	if (pages <= 0 || pages > RX_PAGES_MAX || !powerof2(pages)) {
2227 		device_printf(rxr->sc->bce_dev, "invalid # of RX pages\n");
2228 		pages = RX_PAGES_DEFAULT;
2229 	}
2230 	rxr->rx_pages = pages;
2231 
2232 	rxr->rx_bd_chain_map = kmalloc(sizeof(bus_dmamap_t) * rxr->rx_pages,
2233 	    M_DEVBUF, M_WAITOK | M_ZERO);
2234 	rxr->rx_bd_chain = kmalloc(sizeof(struct rx_bd *) * rxr->rx_pages,
2235 	    M_DEVBUF, M_WAITOK | M_ZERO);
2236 	rxr->rx_bd_chain_paddr = kmalloc(sizeof(bus_addr_t) * rxr->rx_pages,
2237 	    M_DEVBUF, M_WAITOK | M_ZERO);
2238 
2239 	rxr->rx_bufs = kmalloc(sizeof(struct bce_rx_buf) * TOTAL_RX_BD(rxr),
2240 			       M_DEVBUF,
2241 			       M_WAITOK | M_ZERO | M_CACHEALIGN);
2242 
2243 	/*
2244 	 * Create a DMA tag for the RX buffer descriptor chain,
2245 	 * allocate and clear the  memory, and fetch the physical
2246 	 * address of the blocks.
2247 	 */
2248 	rc = bus_dma_tag_create(rxr->sc->parent_tag, BCM_PAGE_SIZE, 0,
2249 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2250 	    BCE_RX_CHAIN_PAGE_SZ, 1, BCE_RX_CHAIN_PAGE_SZ,
2251 	    0, &rxr->rx_bd_chain_tag);
2252 	if (rc != 0) {
2253 		device_printf(rxr->sc->bce_dev, "Could not allocate "
2254 		    "RX descriptor chain DMA tag!\n");
2255 		return rc;
2256 	}
2257 
2258 	for (i = 0; i < rxr->rx_pages; i++) {
2259 		bus_addr_t busaddr;
2260 
2261 		rc = bus_dmamem_alloc(rxr->rx_bd_chain_tag,
2262 		    (void **)&rxr->rx_bd_chain[i],
2263 		    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
2264 		    &rxr->rx_bd_chain_map[i]);
2265 		if (rc != 0) {
2266 			device_printf(rxr->sc->bce_dev,
2267 			    "Could not allocate %dth RX descriptor "
2268 			    "chain DMA memory!\n", i);
2269 			return rc;
2270 		}
2271 
2272 		rc = bus_dmamap_load(rxr->rx_bd_chain_tag,
2273 		    rxr->rx_bd_chain_map[i],
2274 		    rxr->rx_bd_chain[i],
2275 		    BCE_RX_CHAIN_PAGE_SZ,
2276 		    bce_dma_map_addr, &busaddr,
2277 		    BUS_DMA_WAITOK);
2278 		if (rc != 0) {
2279 			if (rc == EINPROGRESS) {
2280 				panic("%s coherent memory loading "
2281 				    "is still in progress!",
2282 				    rxr->sc->arpcom.ac_if.if_xname);
2283 			}
2284 			device_printf(rxr->sc->bce_dev,
2285 			    "Could not map %dth RX descriptor "
2286 			    "chain DMA memory!\n", i);
2287 			bus_dmamem_free(rxr->rx_bd_chain_tag,
2288 			    rxr->rx_bd_chain[i],
2289 			    rxr->rx_bd_chain_map[i]);
2290 			rxr->rx_bd_chain[i] = NULL;
2291 			return rc;
2292 		}
2293 
2294 		rxr->rx_bd_chain_paddr[i] = busaddr;
2295 	}
2296 
2297 	/* Create a DMA tag for RX mbufs. */
2298 	rc = bus_dma_tag_create(rxr->sc->parent_tag, BCE_DMA_RX_ALIGN, 0,
2299 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2300 	    MCLBYTES, 1, MCLBYTES,
2301 	    BUS_DMA_ALLOCNOW | BUS_DMA_ALIGNED | BUS_DMA_WAITOK,
2302 	    &rxr->rx_mbuf_tag);
2303 	if (rc != 0) {
2304 		device_printf(rxr->sc->bce_dev,
2305 		    "Could not allocate RX mbuf DMA tag!\n");
2306 		return rc;
2307 	}
2308 
2309 	/* Create tmp DMA map for RX mbuf clusters. */
2310 	rc = bus_dmamap_create(rxr->rx_mbuf_tag, BUS_DMA_WAITOK,
2311 	    &rxr->rx_mbuf_tmpmap);
2312 	if (rc != 0) {
2313 		bus_dma_tag_destroy(rxr->rx_mbuf_tag);
2314 		rxr->rx_mbuf_tag = NULL;
2315 
2316 		device_printf(rxr->sc->bce_dev,
2317 		    "Could not create RX mbuf tmp DMA map!\n");
2318 		return rc;
2319 	}
2320 
2321 	/* Create DMA maps for the RX mbuf clusters. */
2322 	for (i = 0; i < TOTAL_RX_BD(rxr); i++) {
2323 		rc = bus_dmamap_create(rxr->rx_mbuf_tag, BUS_DMA_WAITOK,
2324 		    &rxr->rx_bufs[i].rx_mbuf_map);
2325 		if (rc != 0) {
2326 			int j;
2327 
2328 			for (j = 0; j < i; ++j) {
2329 				bus_dmamap_destroy(rxr->rx_mbuf_tag,
2330 				    rxr->rx_bufs[j].rx_mbuf_map);
2331 			}
2332 			bus_dma_tag_destroy(rxr->rx_mbuf_tag);
2333 			rxr->rx_mbuf_tag = NULL;
2334 
2335 			device_printf(rxr->sc->bce_dev, "Unable to create "
2336 			    "%dth RX mbuf DMA map!\n", i);
2337 			return rc;
2338 		}
2339 	}
2340 	return 0;
2341 }
2342 
2343 /****************************************************************************/
2344 /* Allocate any DMA memory needed by the driver.                            */
2345 /*                                                                          */
2346 /* Allocates DMA memory needed for the various global structures needed by  */
2347 /* hardware.                                                                */
2348 /*                                                                          */
2349 /* Memory alignment requirements:                                           */
2350 /* -----------------+----------+----------+----------+----------+           */
2351 /*  Data Structure  |   5706   |   5708   |   5709   |   5716   |           */
2352 /* -----------------+----------+----------+----------+----------+           */
2353 /* Status Block     | 8 bytes  | 8 bytes  | 16 bytes | 16 bytes |           */
2354 /* Statistics Block | 8 bytes  | 8 bytes  | 16 bytes | 16 bytes |           */
2355 /* RX Buffers       | 16 bytes | 16 bytes | 16 bytes | 16 bytes |           */
2356 /* PG Buffers       |   none   |   none   |   none   |   none   |           */
2357 /* TX Buffers       |   none   |   none   |   none   |   none   |           */
2358 /* Chain Pages(1)   |   4KiB   |   4KiB   |   4KiB   |   4KiB   |           */
2359 /* Context Pages(1) |   N/A    |   N/A    |   4KiB   |   4KiB   |           */
2360 /* -----------------+----------+----------+----------+----------+           */
2361 /*                                                                          */
2362 /* (1) Must align with CPU page size (BCM_PAGE_SZIE).                       */
2363 /*                                                                          */
2364 /* Returns:                                                                 */
2365 /*   0 for success, positive value for failure.                             */
2366 /****************************************************************************/
2367 static int
2368 bce_dma_alloc(struct bce_softc *sc)
2369 {
2370 	struct ifnet *ifp = &sc->arpcom.ac_if;
2371 	int i, rc = 0;
2372 	bus_addr_t busaddr, max_busaddr;
2373 	bus_size_t status_align, stats_align, status_size;
2374 
2375 	/*
2376 	 * The embedded PCIe to PCI-X bridge (EPB)
2377 	 * in the 5708 cannot address memory above
2378 	 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043).
2379 	 */
2380 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)
2381 		max_busaddr = BCE_BUS_SPACE_MAXADDR;
2382 	else
2383 		max_busaddr = BUS_SPACE_MAXADDR;
2384 
2385 	/*
2386 	 * BCM5709 and BCM5716 uses host memory as cache for context memory.
2387 	 */
2388 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
2389 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
2390 		sc->ctx_pages = BCE_CTX_BLK_SZ / BCM_PAGE_SIZE;
2391 		if (sc->ctx_pages == 0)
2392 			sc->ctx_pages = 1;
2393 		if (sc->ctx_pages > BCE_CTX_PAGES) {
2394 			device_printf(sc->bce_dev, "excessive ctx pages %d\n",
2395 			    sc->ctx_pages);
2396 			return ENOMEM;
2397 		}
2398 		status_align = 16;
2399 		stats_align = 16;
2400 	} else {
2401 		status_align = 8;
2402 		stats_align = 8;
2403 	}
2404 
2405 	/*
2406 	 * Each MSI-X vector needs a status block; each status block
2407 	 * consumes 128bytes and is 128bytes aligned.
2408 	 */
2409 	if (sc->rx_ring_cnt > 1) {
2410 		status_size = BCE_MSIX_MAX * BCE_STATUS_BLK_MSIX_ALIGN;
2411 		status_align = BCE_STATUS_BLK_MSIX_ALIGN;
2412 	} else {
2413 		status_size = BCE_STATUS_BLK_SZ;
2414 	}
2415 
2416 	/*
2417 	 * Allocate the parent bus DMA tag appropriate for PCI.
2418 	 */
2419 	rc = bus_dma_tag_create(NULL, 1, BCE_DMA_BOUNDARY,
2420 				max_busaddr, BUS_SPACE_MAXADDR,
2421 				NULL, NULL,
2422 				BUS_SPACE_MAXSIZE_32BIT, 0,
2423 				BUS_SPACE_MAXSIZE_32BIT,
2424 				0, &sc->parent_tag);
2425 	if (rc != 0) {
2426 		if_printf(ifp, "Could not allocate parent DMA tag!\n");
2427 		return rc;
2428 	}
2429 
2430 	/*
2431 	 * Allocate status block.
2432 	 */
2433 	sc->status_block = bus_dmamem_coherent_any(sc->parent_tag,
2434 				status_align, status_size,
2435 				BUS_DMA_WAITOK | BUS_DMA_ZERO,
2436 				&sc->status_tag, &sc->status_map,
2437 				&sc->status_block_paddr);
2438 	if (sc->status_block == NULL) {
2439 		if_printf(ifp, "Could not allocate status block!\n");
2440 		return ENOMEM;
2441 	}
2442 
2443 	/*
2444 	 * Allocate statistics block.
2445 	 */
2446 	sc->stats_block = bus_dmamem_coherent_any(sc->parent_tag,
2447 				stats_align, BCE_STATS_BLK_SZ,
2448 				BUS_DMA_WAITOK | BUS_DMA_ZERO,
2449 				&sc->stats_tag, &sc->stats_map,
2450 				&sc->stats_block_paddr);
2451 	if (sc->stats_block == NULL) {
2452 		if_printf(ifp, "Could not allocate statistics block!\n");
2453 		return ENOMEM;
2454 	}
2455 
2456 	/*
2457 	 * Allocate context block, if needed
2458 	 */
2459 	if (sc->ctx_pages != 0) {
2460 		rc = bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 0,
2461 					BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
2462 					NULL, NULL,
2463 					BCM_PAGE_SIZE, 1, BCM_PAGE_SIZE,
2464 					0, &sc->ctx_tag);
2465 		if (rc != 0) {
2466 			if_printf(ifp, "Could not allocate "
2467 				  "context block DMA tag!\n");
2468 			return rc;
2469 		}
2470 
2471 		for (i = 0; i < sc->ctx_pages; i++) {
2472 			rc = bus_dmamem_alloc(sc->ctx_tag,
2473 					      (void **)&sc->ctx_block[i],
2474 					      BUS_DMA_WAITOK | BUS_DMA_ZERO |
2475 					      BUS_DMA_COHERENT,
2476 					      &sc->ctx_map[i]);
2477 			if (rc != 0) {
2478 				if_printf(ifp, "Could not allocate %dth context "
2479 					  "DMA memory!\n", i);
2480 				return rc;
2481 			}
2482 
2483 			rc = bus_dmamap_load(sc->ctx_tag, sc->ctx_map[i],
2484 					     sc->ctx_block[i], BCM_PAGE_SIZE,
2485 					     bce_dma_map_addr, &busaddr,
2486 					     BUS_DMA_WAITOK);
2487 			if (rc != 0) {
2488 				if (rc == EINPROGRESS) {
2489 					panic("%s coherent memory loading "
2490 					      "is still in progress!", ifp->if_xname);
2491 				}
2492 				if_printf(ifp, "Could not map %dth context "
2493 					  "DMA memory!\n", i);
2494 				bus_dmamem_free(sc->ctx_tag, sc->ctx_block[i],
2495 						sc->ctx_map[i]);
2496 				sc->ctx_block[i] = NULL;
2497 				return rc;
2498 			}
2499 			sc->ctx_paddr[i] = busaddr;
2500 		}
2501 	}
2502 
2503 	sc->tx_rings = kmalloc(sizeof(struct bce_tx_ring) * sc->tx_ring_cnt,
2504 			       M_DEVBUF,
2505 			       M_WAITOK | M_ZERO | M_CACHEALIGN);
2506 	for (i = 0; i < sc->tx_ring_cnt; ++i) {
2507 		sc->tx_rings[i].sc = sc;
2508 		if (i == 0) {
2509 			sc->tx_rings[i].tx_cid = TX_CID;
2510 			sc->tx_rings[i].tx_hw_cons =
2511 			    &sc->status_block->status_tx_quick_consumer_index0;
2512 		} else {
2513 			struct status_block_msix *sblk =
2514 			    (struct status_block_msix *)
2515 			    (((uint8_t *)(sc->status_block)) +
2516 			     (i * BCE_STATUS_BLK_MSIX_ALIGN));
2517 
2518 			sc->tx_rings[i].tx_cid = TX_TSS_CID + i - 1;
2519 			sc->tx_rings[i].tx_hw_cons =
2520 			    &sblk->status_tx_quick_consumer_index;
2521 		}
2522 
2523 		rc = bce_create_tx_ring(&sc->tx_rings[i]);
2524 		if (rc != 0) {
2525 			device_printf(sc->bce_dev,
2526 			    "can't create %dth tx ring\n", i);
2527 			return rc;
2528 		}
2529 	}
2530 
2531 	sc->rx_rings = kmalloc(sizeof(struct bce_rx_ring) * sc->rx_ring_cnt,
2532 			       M_DEVBUF,
2533 			       M_WAITOK | M_ZERO | M_CACHEALIGN);
2534 	for (i = 0; i < sc->rx_ring_cnt; ++i) {
2535 		sc->rx_rings[i].sc = sc;
2536 		sc->rx_rings[i].idx = i;
2537 		if (i == 0) {
2538 			sc->rx_rings[i].rx_cid = RX_CID;
2539 			sc->rx_rings[i].rx_hw_cons =
2540 			    &sc->status_block->status_rx_quick_consumer_index0;
2541 			sc->rx_rings[i].hw_status_idx =
2542 			    &sc->status_block->status_idx;
2543 		} else {
2544 			struct status_block_msix *sblk =
2545 			    (struct status_block_msix *)
2546 			    (((uint8_t *)(sc->status_block)) +
2547 			     (i * BCE_STATUS_BLK_MSIX_ALIGN));
2548 
2549 			sc->rx_rings[i].rx_cid = RX_RSS_CID + i - 1;
2550 			sc->rx_rings[i].rx_hw_cons =
2551 			    &sblk->status_rx_quick_consumer_index;
2552 			sc->rx_rings[i].hw_status_idx = &sblk->status_idx;
2553 		}
2554 
2555 		rc = bce_create_rx_ring(&sc->rx_rings[i]);
2556 		if (rc != 0) {
2557 			device_printf(sc->bce_dev,
2558 			    "can't create %dth rx ring\n", i);
2559 			return rc;
2560 		}
2561 	}
2562 
2563 	return 0;
2564 }
2565 
2566 /****************************************************************************/
2567 /* Firmware synchronization.                                                */
2568 /*                                                                          */
2569 /* Before performing certain events such as a chip reset, synchronize with  */
2570 /* the firmware first.                                                      */
2571 /*                                                                          */
2572 /* Returns:                                                                 */
2573 /*   0 for success, positive value for failure.                             */
2574 /****************************************************************************/
2575 static int
2576 bce_fw_sync(struct bce_softc *sc, uint32_t msg_data)
2577 {
2578 	int i, rc = 0;
2579 	uint32_t val;
2580 
2581 	/* Don't waste any time if we've timed out before. */
2582 	if (sc->bce_fw_timed_out)
2583 		return EBUSY;
2584 
2585 	/* Increment the message sequence number. */
2586 	sc->bce_fw_wr_seq++;
2587 	msg_data |= sc->bce_fw_wr_seq;
2588 
2589 	/* Send the message to the bootcode driver mailbox. */
2590 	bce_shmem_wr(sc, BCE_DRV_MB, msg_data);
2591 
2592 	/* Wait for the bootcode to acknowledge the message. */
2593 	for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2594 		/* Check for a response in the bootcode firmware mailbox. */
2595 		val = bce_shmem_rd(sc, BCE_FW_MB);
2596 		if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ))
2597 			break;
2598 		DELAY(1000);
2599 	}
2600 
2601 	/* If we've timed out, tell the bootcode that we've stopped waiting. */
2602 	if ((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ) &&
2603 	    (msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0) {
2604 		if_printf(&sc->arpcom.ac_if,
2605 			  "Firmware synchronization timeout! "
2606 			  "msg_data = 0x%08X\n", msg_data);
2607 
2608 		msg_data &= ~BCE_DRV_MSG_CODE;
2609 		msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT;
2610 
2611 		bce_shmem_wr(sc, BCE_DRV_MB, msg_data);
2612 
2613 		sc->bce_fw_timed_out = 1;
2614 		rc = EBUSY;
2615 	}
2616 	return rc;
2617 }
2618 
2619 /****************************************************************************/
2620 /* Load Receive Virtual 2 Physical (RV2P) processor firmware.               */
2621 /*                                                                          */
2622 /* Returns:                                                                 */
2623 /*   Nothing.                                                               */
2624 /****************************************************************************/
2625 static void
2626 bce_load_rv2p_fw(struct bce_softc *sc, uint32_t *rv2p_code,
2627 		 uint32_t rv2p_code_len, uint32_t rv2p_proc)
2628 {
2629 	int i;
2630 	uint32_t val;
2631 
2632 	for (i = 0; i < rv2p_code_len; i += 8) {
2633 		REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code);
2634 		rv2p_code++;
2635 		REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code);
2636 		rv2p_code++;
2637 
2638 		if (rv2p_proc == RV2P_PROC1) {
2639 			val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR;
2640 			REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
2641 		} else {
2642 			val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR;
2643 			REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
2644 		}
2645 	}
2646 
2647 	/* Reset the processor, un-stall is done later. */
2648 	if (rv2p_proc == RV2P_PROC1)
2649 		REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET);
2650 	else
2651 		REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET);
2652 }
2653 
2654 /****************************************************************************/
2655 /* Load RISC processor firmware.                                            */
2656 /*                                                                          */
2657 /* Loads firmware from the file if_bcefw.h into the scratchpad memory       */
2658 /* associated with a particular processor.                                  */
2659 /*                                                                          */
2660 /* Returns:                                                                 */
2661 /*   Nothing.                                                               */
2662 /****************************************************************************/
2663 static void
2664 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg,
2665 		struct fw_info *fw)
2666 {
2667 	uint32_t offset;
2668 	int j;
2669 
2670 	bce_halt_cpu(sc, cpu_reg);
2671 
2672 	/* Load the Text area. */
2673 	offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2674 	if (fw->text) {
2675 		for (j = 0; j < (fw->text_len / 4); j++, offset += 4)
2676 			REG_WR_IND(sc, offset, fw->text[j]);
2677 	}
2678 
2679 	/* Load the Data area. */
2680 	offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2681 	if (fw->data) {
2682 		for (j = 0; j < (fw->data_len / 4); j++, offset += 4)
2683 			REG_WR_IND(sc, offset, fw->data[j]);
2684 	}
2685 
2686 	/* Load the SBSS area. */
2687 	offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2688 	if (fw->sbss) {
2689 		for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4)
2690 			REG_WR_IND(sc, offset, fw->sbss[j]);
2691 	}
2692 
2693 	/* Load the BSS area. */
2694 	offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2695 	if (fw->bss) {
2696 		for (j = 0; j < (fw->bss_len/4); j++, offset += 4)
2697 			REG_WR_IND(sc, offset, fw->bss[j]);
2698 	}
2699 
2700 	/* Load the Read-Only area. */
2701 	offset = cpu_reg->spad_base +
2702 		(fw->rodata_addr - cpu_reg->mips_view_base);
2703 	if (fw->rodata) {
2704 		for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4)
2705 			REG_WR_IND(sc, offset, fw->rodata[j]);
2706 	}
2707 
2708 	/* Clear the pre-fetch instruction and set the FW start address. */
2709 	REG_WR_IND(sc, cpu_reg->inst, 0);
2710 	REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2711 }
2712 
2713 /****************************************************************************/
2714 /* Starts the RISC processor.                                               */
2715 /*                                                                          */
2716 /* Assumes the CPU starting address has already been set.                   */
2717 /*                                                                          */
2718 /* Returns:                                                                 */
2719 /*   Nothing.                                                               */
2720 /****************************************************************************/
2721 static void
2722 bce_start_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg)
2723 {
2724 	uint32_t val;
2725 
2726 	/* Start the CPU. */
2727 	val = REG_RD_IND(sc, cpu_reg->mode);
2728 	val &= ~cpu_reg->mode_value_halt;
2729 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2730 	REG_WR_IND(sc, cpu_reg->mode, val);
2731 }
2732 
2733 /****************************************************************************/
2734 /* Halts the RISC processor.                                                */
2735 /*                                                                          */
2736 /* Returns:                                                                 */
2737 /*   Nothing.                                                               */
2738 /****************************************************************************/
2739 static void
2740 bce_halt_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg)
2741 {
2742 	uint32_t val;
2743 
2744 	/* Halt the CPU. */
2745 	val = REG_RD_IND(sc, cpu_reg->mode);
2746 	val |= cpu_reg->mode_value_halt;
2747 	REG_WR_IND(sc, cpu_reg->mode, val);
2748 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2749 }
2750 
2751 /****************************************************************************/
2752 /* Start the RX CPU.                                                        */
2753 /*                                                                          */
2754 /* Returns:                                                                 */
2755 /*   Nothing.                                                               */
2756 /****************************************************************************/
2757 static void
2758 bce_start_rxp_cpu(struct bce_softc *sc)
2759 {
2760 	struct cpu_reg cpu_reg;
2761 
2762 	cpu_reg.mode = BCE_RXP_CPU_MODE;
2763 	cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
2764 	cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
2765 	cpu_reg.state = BCE_RXP_CPU_STATE;
2766 	cpu_reg.state_value_clear = 0xffffff;
2767 	cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
2768 	cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
2769 	cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
2770 	cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
2771 	cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
2772 	cpu_reg.spad_base = BCE_RXP_SCRATCH;
2773 	cpu_reg.mips_view_base = 0x8000000;
2774 
2775 	bce_start_cpu(sc, &cpu_reg);
2776 }
2777 
2778 /****************************************************************************/
2779 /* Initialize the RX CPU.                                                   */
2780 /*                                                                          */
2781 /* Returns:                                                                 */
2782 /*   Nothing.                                                               */
2783 /****************************************************************************/
2784 static void
2785 bce_init_rxp_cpu(struct bce_softc *sc)
2786 {
2787 	struct cpu_reg cpu_reg;
2788 	struct fw_info fw;
2789 
2790 	cpu_reg.mode = BCE_RXP_CPU_MODE;
2791 	cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
2792 	cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
2793 	cpu_reg.state = BCE_RXP_CPU_STATE;
2794 	cpu_reg.state_value_clear = 0xffffff;
2795 	cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
2796 	cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
2797 	cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
2798 	cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
2799 	cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
2800 	cpu_reg.spad_base = BCE_RXP_SCRATCH;
2801 	cpu_reg.mips_view_base = 0x8000000;
2802 
2803 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
2804 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
2805  		fw.ver_major = bce_RXP_b09FwReleaseMajor;
2806 		fw.ver_minor = bce_RXP_b09FwReleaseMinor;
2807 		fw.ver_fix = bce_RXP_b09FwReleaseFix;
2808 		fw.start_addr = bce_RXP_b09FwStartAddr;
2809 
2810 		fw.text_addr = bce_RXP_b09FwTextAddr;
2811 		fw.text_len = bce_RXP_b09FwTextLen;
2812 		fw.text_index = 0;
2813 		fw.text = bce_RXP_b09FwText;
2814 
2815 		fw.data_addr = bce_RXP_b09FwDataAddr;
2816 		fw.data_len = bce_RXP_b09FwDataLen;
2817 		fw.data_index = 0;
2818 		fw.data = bce_RXP_b09FwData;
2819 
2820 		fw.sbss_addr = bce_RXP_b09FwSbssAddr;
2821 		fw.sbss_len = bce_RXP_b09FwSbssLen;
2822 		fw.sbss_index = 0;
2823 		fw.sbss = bce_RXP_b09FwSbss;
2824 
2825 		fw.bss_addr = bce_RXP_b09FwBssAddr;
2826 		fw.bss_len = bce_RXP_b09FwBssLen;
2827 		fw.bss_index = 0;
2828 		fw.bss = bce_RXP_b09FwBss;
2829 
2830 		fw.rodata_addr = bce_RXP_b09FwRodataAddr;
2831 		fw.rodata_len = bce_RXP_b09FwRodataLen;
2832 		fw.rodata_index = 0;
2833 		fw.rodata = bce_RXP_b09FwRodata;
2834 	} else {
2835 		fw.ver_major = bce_RXP_b06FwReleaseMajor;
2836 		fw.ver_minor = bce_RXP_b06FwReleaseMinor;
2837 		fw.ver_fix = bce_RXP_b06FwReleaseFix;
2838 		fw.start_addr = bce_RXP_b06FwStartAddr;
2839 
2840 		fw.text_addr = bce_RXP_b06FwTextAddr;
2841 		fw.text_len = bce_RXP_b06FwTextLen;
2842 		fw.text_index = 0;
2843 		fw.text = bce_RXP_b06FwText;
2844 
2845 		fw.data_addr = bce_RXP_b06FwDataAddr;
2846 		fw.data_len = bce_RXP_b06FwDataLen;
2847 		fw.data_index = 0;
2848 		fw.data = bce_RXP_b06FwData;
2849 
2850 		fw.sbss_addr = bce_RXP_b06FwSbssAddr;
2851 		fw.sbss_len = bce_RXP_b06FwSbssLen;
2852 		fw.sbss_index = 0;
2853 		fw.sbss = bce_RXP_b06FwSbss;
2854 
2855 		fw.bss_addr = bce_RXP_b06FwBssAddr;
2856 		fw.bss_len = bce_RXP_b06FwBssLen;
2857 		fw.bss_index = 0;
2858 		fw.bss = bce_RXP_b06FwBss;
2859 
2860 		fw.rodata_addr = bce_RXP_b06FwRodataAddr;
2861 		fw.rodata_len = bce_RXP_b06FwRodataLen;
2862 		fw.rodata_index = 0;
2863 		fw.rodata = bce_RXP_b06FwRodata;
2864 	}
2865 
2866 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
2867 	/* Delay RXP start until initialization is complete. */
2868 }
2869 
2870 /****************************************************************************/
2871 /* Initialize the TX CPU.                                                   */
2872 /*                                                                          */
2873 /* Returns:                                                                 */
2874 /*   Nothing.                                                               */
2875 /****************************************************************************/
2876 static void
2877 bce_init_txp_cpu(struct bce_softc *sc)
2878 {
2879 	struct cpu_reg cpu_reg;
2880 	struct fw_info fw;
2881 
2882 	cpu_reg.mode = BCE_TXP_CPU_MODE;
2883 	cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT;
2884 	cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA;
2885 	cpu_reg.state = BCE_TXP_CPU_STATE;
2886 	cpu_reg.state_value_clear = 0xffffff;
2887 	cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE;
2888 	cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK;
2889 	cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER;
2890 	cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION;
2891 	cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT;
2892 	cpu_reg.spad_base = BCE_TXP_SCRATCH;
2893 	cpu_reg.mips_view_base = 0x8000000;
2894 
2895 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
2896 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
2897 		fw.ver_major = bce_TXP_b09FwReleaseMajor;
2898 		fw.ver_minor = bce_TXP_b09FwReleaseMinor;
2899 		fw.ver_fix = bce_TXP_b09FwReleaseFix;
2900 		fw.start_addr = bce_TXP_b09FwStartAddr;
2901 
2902 		fw.text_addr = bce_TXP_b09FwTextAddr;
2903 		fw.text_len = bce_TXP_b09FwTextLen;
2904 		fw.text_index = 0;
2905 		fw.text = bce_TXP_b09FwText;
2906 
2907 		fw.data_addr = bce_TXP_b09FwDataAddr;
2908 		fw.data_len = bce_TXP_b09FwDataLen;
2909 		fw.data_index = 0;
2910 		fw.data = bce_TXP_b09FwData;
2911 
2912 		fw.sbss_addr = bce_TXP_b09FwSbssAddr;
2913 		fw.sbss_len = bce_TXP_b09FwSbssLen;
2914 		fw.sbss_index = 0;
2915 		fw.sbss = bce_TXP_b09FwSbss;
2916 
2917 		fw.bss_addr = bce_TXP_b09FwBssAddr;
2918 		fw.bss_len = bce_TXP_b09FwBssLen;
2919 		fw.bss_index = 0;
2920 		fw.bss = bce_TXP_b09FwBss;
2921 
2922 		fw.rodata_addr = bce_TXP_b09FwRodataAddr;
2923 		fw.rodata_len = bce_TXP_b09FwRodataLen;
2924 		fw.rodata_index = 0;
2925 		fw.rodata = bce_TXP_b09FwRodata;
2926 	} else {
2927 		fw.ver_major = bce_TXP_b06FwReleaseMajor;
2928 		fw.ver_minor = bce_TXP_b06FwReleaseMinor;
2929 		fw.ver_fix = bce_TXP_b06FwReleaseFix;
2930 		fw.start_addr = bce_TXP_b06FwStartAddr;
2931 
2932 		fw.text_addr = bce_TXP_b06FwTextAddr;
2933 		fw.text_len = bce_TXP_b06FwTextLen;
2934 		fw.text_index = 0;
2935 		fw.text = bce_TXP_b06FwText;
2936 
2937 		fw.data_addr = bce_TXP_b06FwDataAddr;
2938 		fw.data_len = bce_TXP_b06FwDataLen;
2939 		fw.data_index = 0;
2940 		fw.data = bce_TXP_b06FwData;
2941 
2942 		fw.sbss_addr = bce_TXP_b06FwSbssAddr;
2943 		fw.sbss_len = bce_TXP_b06FwSbssLen;
2944 		fw.sbss_index = 0;
2945 		fw.sbss = bce_TXP_b06FwSbss;
2946 
2947 		fw.bss_addr = bce_TXP_b06FwBssAddr;
2948 		fw.bss_len = bce_TXP_b06FwBssLen;
2949 		fw.bss_index = 0;
2950 		fw.bss = bce_TXP_b06FwBss;
2951 
2952 		fw.rodata_addr = bce_TXP_b06FwRodataAddr;
2953 		fw.rodata_len = bce_TXP_b06FwRodataLen;
2954 		fw.rodata_index = 0;
2955 		fw.rodata = bce_TXP_b06FwRodata;
2956 	}
2957 
2958 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
2959 	bce_start_cpu(sc, &cpu_reg);
2960 }
2961 
2962 /****************************************************************************/
2963 /* Initialize the TPAT CPU.                                                 */
2964 /*                                                                          */
2965 /* Returns:                                                                 */
2966 /*   Nothing.                                                               */
2967 /****************************************************************************/
2968 static void
2969 bce_init_tpat_cpu(struct bce_softc *sc)
2970 {
2971 	struct cpu_reg cpu_reg;
2972 	struct fw_info fw;
2973 
2974 	cpu_reg.mode = BCE_TPAT_CPU_MODE;
2975 	cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT;
2976 	cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA;
2977 	cpu_reg.state = BCE_TPAT_CPU_STATE;
2978 	cpu_reg.state_value_clear = 0xffffff;
2979 	cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE;
2980 	cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK;
2981 	cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER;
2982 	cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION;
2983 	cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT;
2984 	cpu_reg.spad_base = BCE_TPAT_SCRATCH;
2985 	cpu_reg.mips_view_base = 0x8000000;
2986 
2987 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
2988 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
2989 		fw.ver_major = bce_TPAT_b09FwReleaseMajor;
2990 		fw.ver_minor = bce_TPAT_b09FwReleaseMinor;
2991 		fw.ver_fix = bce_TPAT_b09FwReleaseFix;
2992 		fw.start_addr = bce_TPAT_b09FwStartAddr;
2993 
2994 		fw.text_addr = bce_TPAT_b09FwTextAddr;
2995 		fw.text_len = bce_TPAT_b09FwTextLen;
2996 		fw.text_index = 0;
2997 		fw.text = bce_TPAT_b09FwText;
2998 
2999 		fw.data_addr = bce_TPAT_b09FwDataAddr;
3000 		fw.data_len = bce_TPAT_b09FwDataLen;
3001 		fw.data_index = 0;
3002 		fw.data = bce_TPAT_b09FwData;
3003 
3004 		fw.sbss_addr = bce_TPAT_b09FwSbssAddr;
3005 		fw.sbss_len = bce_TPAT_b09FwSbssLen;
3006 		fw.sbss_index = 0;
3007 		fw.sbss = bce_TPAT_b09FwSbss;
3008 
3009 		fw.bss_addr = bce_TPAT_b09FwBssAddr;
3010 		fw.bss_len = bce_TPAT_b09FwBssLen;
3011 		fw.bss_index = 0;
3012 		fw.bss = bce_TPAT_b09FwBss;
3013 
3014 		fw.rodata_addr = bce_TPAT_b09FwRodataAddr;
3015 		fw.rodata_len = bce_TPAT_b09FwRodataLen;
3016 		fw.rodata_index = 0;
3017 		fw.rodata = bce_TPAT_b09FwRodata;
3018 	} else {
3019 		fw.ver_major = bce_TPAT_b06FwReleaseMajor;
3020 		fw.ver_minor = bce_TPAT_b06FwReleaseMinor;
3021 		fw.ver_fix = bce_TPAT_b06FwReleaseFix;
3022 		fw.start_addr = bce_TPAT_b06FwStartAddr;
3023 
3024 		fw.text_addr = bce_TPAT_b06FwTextAddr;
3025 		fw.text_len = bce_TPAT_b06FwTextLen;
3026 		fw.text_index = 0;
3027 		fw.text = bce_TPAT_b06FwText;
3028 
3029 		fw.data_addr = bce_TPAT_b06FwDataAddr;
3030 		fw.data_len = bce_TPAT_b06FwDataLen;
3031 		fw.data_index = 0;
3032 		fw.data = bce_TPAT_b06FwData;
3033 
3034 		fw.sbss_addr = bce_TPAT_b06FwSbssAddr;
3035 		fw.sbss_len = bce_TPAT_b06FwSbssLen;
3036 		fw.sbss_index = 0;
3037 		fw.sbss = bce_TPAT_b06FwSbss;
3038 
3039 		fw.bss_addr = bce_TPAT_b06FwBssAddr;
3040 		fw.bss_len = bce_TPAT_b06FwBssLen;
3041 		fw.bss_index = 0;
3042 		fw.bss = bce_TPAT_b06FwBss;
3043 
3044 		fw.rodata_addr = bce_TPAT_b06FwRodataAddr;
3045 		fw.rodata_len = bce_TPAT_b06FwRodataLen;
3046 		fw.rodata_index = 0;
3047 		fw.rodata = bce_TPAT_b06FwRodata;
3048 	}
3049 
3050 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
3051 	bce_start_cpu(sc, &cpu_reg);
3052 }
3053 
3054 /****************************************************************************/
3055 /* Initialize the CP CPU.                                                   */
3056 /*                                                                          */
3057 /* Returns:                                                                 */
3058 /*   Nothing.                                                               */
3059 /****************************************************************************/
3060 static void
3061 bce_init_cp_cpu(struct bce_softc *sc)
3062 {
3063 	struct cpu_reg cpu_reg;
3064 	struct fw_info fw;
3065 
3066 	cpu_reg.mode = BCE_CP_CPU_MODE;
3067 	cpu_reg.mode_value_halt = BCE_CP_CPU_MODE_SOFT_HALT;
3068 	cpu_reg.mode_value_sstep = BCE_CP_CPU_MODE_STEP_ENA;
3069 	cpu_reg.state = BCE_CP_CPU_STATE;
3070 	cpu_reg.state_value_clear = 0xffffff;
3071 	cpu_reg.gpr0 = BCE_CP_CPU_REG_FILE;
3072 	cpu_reg.evmask = BCE_CP_CPU_EVENT_MASK;
3073 	cpu_reg.pc = BCE_CP_CPU_PROGRAM_COUNTER;
3074 	cpu_reg.inst = BCE_CP_CPU_INSTRUCTION;
3075 	cpu_reg.bp = BCE_CP_CPU_HW_BREAKPOINT;
3076 	cpu_reg.spad_base = BCE_CP_SCRATCH;
3077 	cpu_reg.mips_view_base = 0x8000000;
3078 
3079 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3080 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3081 		fw.ver_major = bce_CP_b09FwReleaseMajor;
3082 		fw.ver_minor = bce_CP_b09FwReleaseMinor;
3083 		fw.ver_fix = bce_CP_b09FwReleaseFix;
3084 		fw.start_addr = bce_CP_b09FwStartAddr;
3085 
3086 		fw.text_addr = bce_CP_b09FwTextAddr;
3087 		fw.text_len = bce_CP_b09FwTextLen;
3088 		fw.text_index = 0;
3089 		fw.text = bce_CP_b09FwText;
3090 
3091 		fw.data_addr = bce_CP_b09FwDataAddr;
3092 		fw.data_len = bce_CP_b09FwDataLen;
3093 		fw.data_index = 0;
3094 		fw.data = bce_CP_b09FwData;
3095 
3096 		fw.sbss_addr = bce_CP_b09FwSbssAddr;
3097 		fw.sbss_len = bce_CP_b09FwSbssLen;
3098 		fw.sbss_index = 0;
3099 		fw.sbss = bce_CP_b09FwSbss;
3100 
3101 		fw.bss_addr = bce_CP_b09FwBssAddr;
3102 		fw.bss_len = bce_CP_b09FwBssLen;
3103 		fw.bss_index = 0;
3104 		fw.bss = bce_CP_b09FwBss;
3105 
3106 		fw.rodata_addr = bce_CP_b09FwRodataAddr;
3107 		fw.rodata_len = bce_CP_b09FwRodataLen;
3108 		fw.rodata_index = 0;
3109 		fw.rodata = bce_CP_b09FwRodata;
3110 	} else {
3111 		fw.ver_major = bce_CP_b06FwReleaseMajor;
3112 		fw.ver_minor = bce_CP_b06FwReleaseMinor;
3113 		fw.ver_fix = bce_CP_b06FwReleaseFix;
3114 		fw.start_addr = bce_CP_b06FwStartAddr;
3115 
3116 		fw.text_addr = bce_CP_b06FwTextAddr;
3117 		fw.text_len = bce_CP_b06FwTextLen;
3118 		fw.text_index = 0;
3119 		fw.text = bce_CP_b06FwText;
3120 
3121 		fw.data_addr = bce_CP_b06FwDataAddr;
3122 		fw.data_len = bce_CP_b06FwDataLen;
3123 		fw.data_index = 0;
3124 		fw.data = bce_CP_b06FwData;
3125 
3126 		fw.sbss_addr = bce_CP_b06FwSbssAddr;
3127 		fw.sbss_len = bce_CP_b06FwSbssLen;
3128 		fw.sbss_index = 0;
3129 		fw.sbss = bce_CP_b06FwSbss;
3130 
3131 		fw.bss_addr = bce_CP_b06FwBssAddr;
3132 		fw.bss_len = bce_CP_b06FwBssLen;
3133 		fw.bss_index = 0;
3134 		fw.bss = bce_CP_b06FwBss;
3135 
3136 		fw.rodata_addr = bce_CP_b06FwRodataAddr;
3137 		fw.rodata_len = bce_CP_b06FwRodataLen;
3138 		fw.rodata_index = 0;
3139 		fw.rodata = bce_CP_b06FwRodata;
3140 	}
3141 
3142 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
3143 	bce_start_cpu(sc, &cpu_reg);
3144 }
3145 
3146 /****************************************************************************/
3147 /* Initialize the COM CPU.                                                 */
3148 /*                                                                          */
3149 /* Returns:                                                                 */
3150 /*   Nothing.                                                               */
3151 /****************************************************************************/
3152 static void
3153 bce_init_com_cpu(struct bce_softc *sc)
3154 {
3155 	struct cpu_reg cpu_reg;
3156 	struct fw_info fw;
3157 
3158 	cpu_reg.mode = BCE_COM_CPU_MODE;
3159 	cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT;
3160 	cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA;
3161 	cpu_reg.state = BCE_COM_CPU_STATE;
3162 	cpu_reg.state_value_clear = 0xffffff;
3163 	cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE;
3164 	cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK;
3165 	cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER;
3166 	cpu_reg.inst = BCE_COM_CPU_INSTRUCTION;
3167 	cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT;
3168 	cpu_reg.spad_base = BCE_COM_SCRATCH;
3169 	cpu_reg.mips_view_base = 0x8000000;
3170 
3171 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3172 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3173 		fw.ver_major = bce_COM_b09FwReleaseMajor;
3174 		fw.ver_minor = bce_COM_b09FwReleaseMinor;
3175 		fw.ver_fix = bce_COM_b09FwReleaseFix;
3176 		fw.start_addr = bce_COM_b09FwStartAddr;
3177 
3178 		fw.text_addr = bce_COM_b09FwTextAddr;
3179 		fw.text_len = bce_COM_b09FwTextLen;
3180 		fw.text_index = 0;
3181 		fw.text = bce_COM_b09FwText;
3182 
3183 		fw.data_addr = bce_COM_b09FwDataAddr;
3184 		fw.data_len = bce_COM_b09FwDataLen;
3185 		fw.data_index = 0;
3186 		fw.data = bce_COM_b09FwData;
3187 
3188 		fw.sbss_addr = bce_COM_b09FwSbssAddr;
3189 		fw.sbss_len = bce_COM_b09FwSbssLen;
3190 		fw.sbss_index = 0;
3191 		fw.sbss = bce_COM_b09FwSbss;
3192 
3193 		fw.bss_addr = bce_COM_b09FwBssAddr;
3194 		fw.bss_len = bce_COM_b09FwBssLen;
3195 		fw.bss_index = 0;
3196 		fw.bss = bce_COM_b09FwBss;
3197 
3198 		fw.rodata_addr = bce_COM_b09FwRodataAddr;
3199 		fw.rodata_len = bce_COM_b09FwRodataLen;
3200 		fw.rodata_index = 0;
3201 		fw.rodata = bce_COM_b09FwRodata;
3202 	} else {
3203 		fw.ver_major = bce_COM_b06FwReleaseMajor;
3204 		fw.ver_minor = bce_COM_b06FwReleaseMinor;
3205 		fw.ver_fix = bce_COM_b06FwReleaseFix;
3206 		fw.start_addr = bce_COM_b06FwStartAddr;
3207 
3208 		fw.text_addr = bce_COM_b06FwTextAddr;
3209 		fw.text_len = bce_COM_b06FwTextLen;
3210 		fw.text_index = 0;
3211 		fw.text = bce_COM_b06FwText;
3212 
3213 		fw.data_addr = bce_COM_b06FwDataAddr;
3214 		fw.data_len = bce_COM_b06FwDataLen;
3215 		fw.data_index = 0;
3216 		fw.data = bce_COM_b06FwData;
3217 
3218 		fw.sbss_addr = bce_COM_b06FwSbssAddr;
3219 		fw.sbss_len = bce_COM_b06FwSbssLen;
3220 		fw.sbss_index = 0;
3221 		fw.sbss = bce_COM_b06FwSbss;
3222 
3223 		fw.bss_addr = bce_COM_b06FwBssAddr;
3224 		fw.bss_len = bce_COM_b06FwBssLen;
3225 		fw.bss_index = 0;
3226 		fw.bss = bce_COM_b06FwBss;
3227 
3228 		fw.rodata_addr = bce_COM_b06FwRodataAddr;
3229 		fw.rodata_len = bce_COM_b06FwRodataLen;
3230 		fw.rodata_index = 0;
3231 		fw.rodata = bce_COM_b06FwRodata;
3232 	}
3233 
3234 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
3235 	bce_start_cpu(sc, &cpu_reg);
3236 }
3237 
3238 /****************************************************************************/
3239 /* Initialize the RV2P, RX, TX, TPAT, COM, and CP CPUs.                     */
3240 /*                                                                          */
3241 /* Loads the firmware for each CPU and starts the CPU.                      */
3242 /*                                                                          */
3243 /* Returns:                                                                 */
3244 /*   Nothing.                                                               */
3245 /****************************************************************************/
3246 static void
3247 bce_init_cpus(struct bce_softc *sc)
3248 {
3249 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3250 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3251 		if (BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax) {
3252 			bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc1,
3253 			    sizeof(bce_xi90_rv2p_proc1), RV2P_PROC1);
3254 			bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc2,
3255 			    sizeof(bce_xi90_rv2p_proc2), RV2P_PROC2);
3256 		} else {
3257 			bce_load_rv2p_fw(sc, bce_xi_rv2p_proc1,
3258 			    sizeof(bce_xi_rv2p_proc1), RV2P_PROC1);
3259 			bce_load_rv2p_fw(sc, bce_xi_rv2p_proc2,
3260 			    sizeof(bce_xi_rv2p_proc2), RV2P_PROC2);
3261 		}
3262 	} else {
3263 		bce_load_rv2p_fw(sc, bce_rv2p_proc1,
3264 		    sizeof(bce_rv2p_proc1), RV2P_PROC1);
3265 		bce_load_rv2p_fw(sc, bce_rv2p_proc2,
3266 		    sizeof(bce_rv2p_proc2), RV2P_PROC2);
3267 	}
3268 
3269 	bce_init_rxp_cpu(sc);
3270 	bce_init_txp_cpu(sc);
3271 	bce_init_tpat_cpu(sc);
3272 	bce_init_com_cpu(sc);
3273 	bce_init_cp_cpu(sc);
3274 }
3275 
3276 /****************************************************************************/
3277 /* Initialize context memory.                                               */
3278 /*                                                                          */
3279 /* Clears the memory associated with each Context ID (CID).                 */
3280 /*                                                                          */
3281 /* Returns:                                                                 */
3282 /*   Nothing.                                                               */
3283 /****************************************************************************/
3284 static int
3285 bce_init_ctx(struct bce_softc *sc)
3286 {
3287 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3288 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3289 		/* DRC: Replace this constant value with a #define. */
3290 		int i, retry_cnt = 10;
3291 		uint32_t val;
3292 
3293 		/*
3294 		 * BCM5709 context memory may be cached
3295 		 * in host memory so prepare the host memory
3296 		 * for access.
3297 		 */
3298 		val = BCE_CTX_COMMAND_ENABLED | BCE_CTX_COMMAND_MEM_INIT |
3299 		    (1 << 12);
3300 		val |= (BCM_PAGE_BITS - 8) << 16;
3301 		REG_WR(sc, BCE_CTX_COMMAND, val);
3302 
3303 		/* Wait for mem init command to complete. */
3304 		for (i = 0; i < retry_cnt; i++) {
3305 			val = REG_RD(sc, BCE_CTX_COMMAND);
3306 			if (!(val & BCE_CTX_COMMAND_MEM_INIT))
3307 				break;
3308 			DELAY(2);
3309 		}
3310 		if (i == retry_cnt) {
3311 			device_printf(sc->bce_dev,
3312 			    "Context memory initialization failed!\n");
3313 			return ETIMEDOUT;
3314 		}
3315 
3316 		for (i = 0; i < sc->ctx_pages; i++) {
3317 			int j;
3318 
3319 			/*
3320 			 * Set the physical address of the context
3321 			 * memory cache.
3322 			 */
3323 			REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA0,
3324 			    BCE_ADDR_LO(sc->ctx_paddr[i] & 0xfffffff0) |
3325 			    BCE_CTX_HOST_PAGE_TBL_DATA0_VALID);
3326 			REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA1,
3327 			    BCE_ADDR_HI(sc->ctx_paddr[i]));
3328 			REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_CTRL,
3329 			    i | BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
3330 
3331 			/*
3332 			 * Verify that the context memory write was successful.
3333 			 */
3334 			for (j = 0; j < retry_cnt; j++) {
3335 				val = REG_RD(sc, BCE_CTX_HOST_PAGE_TBL_CTRL);
3336 				if ((val &
3337 				    BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0)
3338 					break;
3339 				DELAY(5);
3340 			}
3341 			if (j == retry_cnt) {
3342 				device_printf(sc->bce_dev,
3343 				    "Failed to initialize context page!\n");
3344 				return ETIMEDOUT;
3345 			}
3346 		}
3347 	} else {
3348 		uint32_t vcid_addr, offset;
3349 
3350 		/*
3351 		 * For the 5706/5708, context memory is local to
3352 		 * the controller, so initialize the controller
3353 		 * context memory.
3354 		 */
3355 
3356 		vcid_addr = GET_CID_ADDR(96);
3357 		while (vcid_addr) {
3358 			vcid_addr -= PHY_CTX_SIZE;
3359 
3360 			REG_WR(sc, BCE_CTX_VIRT_ADDR, 0);
3361 			REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr);
3362 
3363 			for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
3364 				CTX_WR(sc, 0x00, offset, 0);
3365 
3366 			REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr);
3367 			REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr);
3368 		}
3369 	}
3370 	return 0;
3371 }
3372 
3373 /****************************************************************************/
3374 /* Fetch the permanent MAC address of the controller.                       */
3375 /*                                                                          */
3376 /* Returns:                                                                 */
3377 /*   Nothing.                                                               */
3378 /****************************************************************************/
3379 static void
3380 bce_get_mac_addr(struct bce_softc *sc)
3381 {
3382 	uint32_t mac_lo = 0, mac_hi = 0;
3383 
3384 	/*
3385 	 * The NetXtreme II bootcode populates various NIC
3386 	 * power-on and runtime configuration items in a
3387 	 * shared memory area.  The factory configured MAC
3388 	 * address is available from both NVRAM and the
3389 	 * shared memory area so we'll read the value from
3390 	 * shared memory for speed.
3391 	 */
3392 
3393 	mac_hi = bce_shmem_rd(sc,  BCE_PORT_HW_CFG_MAC_UPPER);
3394 	mac_lo = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_LOWER);
3395 
3396 	if (mac_lo == 0 && mac_hi == 0) {
3397 		if_printf(&sc->arpcom.ac_if, "Invalid Ethernet address!\n");
3398 	} else {
3399 		sc->eaddr[0] = (u_char)(mac_hi >> 8);
3400 		sc->eaddr[1] = (u_char)(mac_hi >> 0);
3401 		sc->eaddr[2] = (u_char)(mac_lo >> 24);
3402 		sc->eaddr[3] = (u_char)(mac_lo >> 16);
3403 		sc->eaddr[4] = (u_char)(mac_lo >> 8);
3404 		sc->eaddr[5] = (u_char)(mac_lo >> 0);
3405 	}
3406 }
3407 
3408 /****************************************************************************/
3409 /* Program the MAC address.                                                 */
3410 /*                                                                          */
3411 /* Returns:                                                                 */
3412 /*   Nothing.                                                               */
3413 /****************************************************************************/
3414 static void
3415 bce_set_mac_addr(struct bce_softc *sc)
3416 {
3417 	const uint8_t *mac_addr = sc->eaddr;
3418 	uint32_t val;
3419 
3420 	val = (mac_addr[0] << 8) | mac_addr[1];
3421 	REG_WR(sc, BCE_EMAC_MAC_MATCH0, val);
3422 
3423 	val = (mac_addr[2] << 24) |
3424 	      (mac_addr[3] << 16) |
3425 	      (mac_addr[4] << 8) |
3426 	      mac_addr[5];
3427 	REG_WR(sc, BCE_EMAC_MAC_MATCH1, val);
3428 }
3429 
3430 /****************************************************************************/
3431 /* Stop the controller.                                                     */
3432 /*                                                                          */
3433 /* Returns:                                                                 */
3434 /*   Nothing.                                                               */
3435 /****************************************************************************/
3436 static void
3437 bce_stop(struct bce_softc *sc)
3438 {
3439 	struct ifnet *ifp = &sc->arpcom.ac_if;
3440 	int i;
3441 
3442 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
3443 
3444 	callout_stop(&sc->bce_tick_callout);
3445 
3446 	/* Disable the transmit/receive blocks. */
3447 	REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, BCE_MISC_ENABLE_CLR_DEFAULT);
3448 	REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3449 	DELAY(20);
3450 
3451 	bce_disable_intr(sc);
3452 
3453 	ifp->if_flags &= ~IFF_RUNNING;
3454 	for (i = 0; i < sc->tx_ring_cnt; ++i) {
3455 		ifsq_clr_oactive(sc->tx_rings[i].ifsq);
3456 		ifsq_watchdog_stop(&sc->tx_rings[i].tx_watchdog);
3457 	}
3458 
3459 	/* Free the RX lists. */
3460 	for (i = 0; i < sc->rx_ring_cnt; ++i)
3461 		bce_free_rx_chain(&sc->rx_rings[i]);
3462 
3463 	/* Free TX buffers. */
3464 	for (i = 0; i < sc->tx_ring_cnt; ++i)
3465 		bce_free_tx_chain(&sc->tx_rings[i]);
3466 
3467 	sc->bce_link = 0;
3468 	sc->bce_coalchg_mask = 0;
3469 }
3470 
3471 static int
3472 bce_reset(struct bce_softc *sc, uint32_t reset_code)
3473 {
3474 	uint32_t val;
3475 	int i, rc = 0;
3476 
3477 	/* Wait for pending PCI transactions to complete. */
3478 	REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS,
3479 	       BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3480 	       BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3481 	       BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3482 	       BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3483 	val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3484 	DELAY(5);
3485 
3486 	/* Disable DMA */
3487 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3488 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3489 		val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL);
3490 		val &= ~BCE_MISC_NEW_CORE_CTL_DMA_ENABLE;
3491 		REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val);
3492 	}
3493 
3494 	/* Assume bootcode is running. */
3495 	sc->bce_fw_timed_out = 0;
3496 	sc->bce_drv_cardiac_arrest = 0;
3497 
3498 	/* Give the firmware a chance to prepare for the reset. */
3499 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code);
3500 	if (rc) {
3501 		if_printf(&sc->arpcom.ac_if,
3502 			  "Firmware is not ready for reset\n");
3503 		return rc;
3504 	}
3505 
3506 	/* Set a firmware reminder that this is a soft reset. */
3507 	bce_shmem_wr(sc, BCE_DRV_RESET_SIGNATURE,
3508 	    BCE_DRV_RESET_SIGNATURE_MAGIC);
3509 
3510 	/* Dummy read to force the chip to complete all current transactions. */
3511 	val = REG_RD(sc, BCE_MISC_ID);
3512 
3513 	/* Chip reset. */
3514 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3515 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3516 		REG_WR(sc, BCE_MISC_COMMAND, BCE_MISC_COMMAND_SW_RESET);
3517 		REG_RD(sc, BCE_MISC_COMMAND);
3518 		DELAY(5);
3519 
3520 		val = BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3521 		    BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3522 
3523 		pci_write_config(sc->bce_dev, BCE_PCICFG_MISC_CONFIG, val, 4);
3524 	} else {
3525 		val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3526 		    BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3527 		    BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3528 		REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val);
3529 
3530 		/* Allow up to 30us for reset to complete. */
3531 		for (i = 0; i < 10; i++) {
3532 			val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG);
3533 			if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3534 			    BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3535 				break;
3536 			DELAY(10);
3537 		}
3538 
3539 		/* Check that reset completed successfully. */
3540 		if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3541 		    BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3542 			if_printf(&sc->arpcom.ac_if, "Reset failed!\n");
3543 			return EBUSY;
3544 		}
3545 	}
3546 
3547 	/* Make sure byte swapping is properly configured. */
3548 	val = REG_RD(sc, BCE_PCI_SWAP_DIAG0);
3549 	if (val != 0x01020304) {
3550 		if_printf(&sc->arpcom.ac_if, "Byte swap is incorrect!\n");
3551 		return ENODEV;
3552 	}
3553 
3554 	/* Just completed a reset, assume that firmware is running again. */
3555 	sc->bce_fw_timed_out = 0;
3556 	sc->bce_drv_cardiac_arrest = 0;
3557 
3558 	/* Wait for the firmware to finish its initialization. */
3559 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code);
3560 	if (rc) {
3561 		if_printf(&sc->arpcom.ac_if,
3562 			  "Firmware did not complete initialization!\n");
3563 	}
3564 
3565 	if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX) {
3566 		bce_setup_msix_table(sc);
3567 		/* Prevent MSIX table reads and write from timing out */
3568 		REG_WR(sc, BCE_MISC_ECO_HW_CTL,
3569 		    BCE_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
3570 
3571 	}
3572 	return rc;
3573 }
3574 
3575 static int
3576 bce_chipinit(struct bce_softc *sc)
3577 {
3578 	uint32_t val;
3579 	int rc = 0;
3580 
3581 	/* Make sure the interrupt is not active. */
3582 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
3583 	REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
3584 
3585 	/*
3586 	 * Initialize DMA byte/word swapping, configure the number of DMA
3587 	 * channels and PCI clock compensation delay.
3588 	 */
3589 	val = BCE_DMA_CONFIG_DATA_BYTE_SWAP |
3590 	      BCE_DMA_CONFIG_DATA_WORD_SWAP |
3591 #if BYTE_ORDER == BIG_ENDIAN
3592 	      BCE_DMA_CONFIG_CNTL_BYTE_SWAP |
3593 #endif
3594 	      BCE_DMA_CONFIG_CNTL_WORD_SWAP |
3595 	      DMA_READ_CHANS << 12 |
3596 	      DMA_WRITE_CHANS << 16;
3597 
3598 	val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3599 
3600 	if ((sc->bce_flags & BCE_PCIX_FLAG) && sc->bus_speed_mhz == 133)
3601 		val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP;
3602 
3603 	/*
3604 	 * This setting resolves a problem observed on certain Intel PCI
3605 	 * chipsets that cannot handle multiple outstanding DMA operations.
3606 	 * See errata E9_5706A1_65.
3607 	 */
3608 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706 &&
3609 	    BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0 &&
3610 	    !(sc->bce_flags & BCE_PCIX_FLAG))
3611 		val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA;
3612 
3613 	REG_WR(sc, BCE_DMA_CONFIG, val);
3614 
3615 	/* Enable the RX_V2P and Context state machines before access. */
3616 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3617 	       BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3618 	       BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3619 	       BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3620 
3621 	/* Initialize context mapping and zero out the quick contexts. */
3622 	rc = bce_init_ctx(sc);
3623 	if (rc != 0)
3624 		return rc;
3625 
3626 	/* Initialize the on-boards CPUs */
3627 	bce_init_cpus(sc);
3628 
3629 	/* Enable management frames (NC-SI) to flow to the MCP. */
3630 	if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
3631 		val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) |
3632 		    BCE_RPM_MGMT_PKT_CTRL_MGMT_EN;
3633 		REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val);
3634 	}
3635 
3636 	/* Prepare NVRAM for access. */
3637 	rc = bce_init_nvram(sc);
3638 	if (rc != 0)
3639 		return rc;
3640 
3641 	/* Set the kernel bypass block size */
3642 	val = REG_RD(sc, BCE_MQ_CONFIG);
3643 	val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3644 	val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3645 
3646 	/* Enable bins used on the 5709/5716. */
3647 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3648 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3649 		val |= BCE_MQ_CONFIG_BIN_MQ_MODE;
3650 		if (BCE_CHIP_ID(sc) == BCE_CHIP_ID_5709_A1)
3651 			val |= BCE_MQ_CONFIG_HALT_DIS;
3652 	}
3653 
3654 	REG_WR(sc, BCE_MQ_CONFIG, val);
3655 
3656 	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3657 	REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val);
3658 	REG_WR(sc, BCE_MQ_KNL_WIND_END, val);
3659 
3660 	/* Set the page size and clear the RV2P processor stall bits. */
3661 	val = (BCM_PAGE_BITS - 8) << 24;
3662 	REG_WR(sc, BCE_RV2P_CONFIG, val);
3663 
3664 	/* Configure page size. */
3665 	val = REG_RD(sc, BCE_TBDR_CONFIG);
3666 	val &= ~BCE_TBDR_CONFIG_PAGE_SIZE;
3667 	val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3668 	REG_WR(sc, BCE_TBDR_CONFIG, val);
3669 
3670 	/* Set the perfect match control register to default. */
3671 	REG_WR_IND(sc, BCE_RXP_PM_CTRL, 0);
3672 
3673 	return 0;
3674 }
3675 
3676 /****************************************************************************/
3677 /* Initialize the controller in preparation to send/receive traffic.        */
3678 /*                                                                          */
3679 /* Returns:                                                                 */
3680 /*   0 for success, positive value for failure.                             */
3681 /****************************************************************************/
3682 static int
3683 bce_blockinit(struct bce_softc *sc)
3684 {
3685 	uint32_t reg, val;
3686 	int i;
3687 
3688 	/* Load the hardware default MAC address. */
3689 	bce_set_mac_addr(sc);
3690 
3691 	/* Set the Ethernet backoff seed value */
3692 	val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) +
3693 	      sc->eaddr[3] + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16);
3694 	REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val);
3695 
3696 	sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE;
3697 
3698 	/* Set up link change interrupt generation. */
3699 	REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK);
3700 
3701 	/* Program the physical address of the status block. */
3702 	REG_WR(sc, BCE_HC_STATUS_ADDR_L, BCE_ADDR_LO(sc->status_block_paddr));
3703 	REG_WR(sc, BCE_HC_STATUS_ADDR_H, BCE_ADDR_HI(sc->status_block_paddr));
3704 
3705 	/* Program the physical address of the statistics block. */
3706 	REG_WR(sc, BCE_HC_STATISTICS_ADDR_L,
3707 	       BCE_ADDR_LO(sc->stats_block_paddr));
3708 	REG_WR(sc, BCE_HC_STATISTICS_ADDR_H,
3709 	       BCE_ADDR_HI(sc->stats_block_paddr));
3710 
3711 	/* Program various host coalescing parameters. */
3712 	REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
3713 	       (sc->bce_tx_quick_cons_trip_int << 16) |
3714 	       sc->bce_tx_quick_cons_trip);
3715 	REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
3716 	       (sc->bce_rx_quick_cons_trip_int << 16) |
3717 	       sc->bce_rx_quick_cons_trip);
3718 	REG_WR(sc, BCE_HC_COMP_PROD_TRIP,
3719 	       (sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip);
3720 	REG_WR(sc, BCE_HC_TX_TICKS,
3721 	       (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
3722 	REG_WR(sc, BCE_HC_RX_TICKS,
3723 	       (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
3724 	REG_WR(sc, BCE_HC_COM_TICKS,
3725 	       (sc->bce_com_ticks_int << 16) | sc->bce_com_ticks);
3726 	REG_WR(sc, BCE_HC_CMD_TICKS,
3727 	       (sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks);
3728 	REG_WR(sc, BCE_HC_STATS_TICKS, (sc->bce_stats_ticks & 0xffff00));
3729 	REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8);	/* 3ms */
3730 
3731 	if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX)
3732 		REG_WR(sc, BCE_HC_MSIX_BIT_VECTOR, BCE_HC_MSIX_BIT_VECTOR_VAL);
3733 
3734 	val = BCE_HC_CONFIG_TX_TMR_MODE | BCE_HC_CONFIG_COLLECT_STATS;
3735 	if ((sc->bce_flags & BCE_ONESHOT_MSI_FLAG) ||
3736 	    sc->bce_irq_type == PCI_INTR_TYPE_MSIX) {
3737 		if (bootverbose) {
3738 			if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX) {
3739 				if_printf(&sc->arpcom.ac_if,
3740 				    "using MSI-X\n");
3741 			} else {
3742 				if_printf(&sc->arpcom.ac_if,
3743 				    "using oneshot MSI\n");
3744 			}
3745 		}
3746 		val |= BCE_HC_CONFIG_ONE_SHOT | BCE_HC_CONFIG_USE_INT_PARAM;
3747 		if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX)
3748 			val |= BCE_HC_CONFIG_SB_ADDR_INC_128B;
3749 	}
3750 	REG_WR(sc, BCE_HC_CONFIG, val);
3751 
3752 	for (i = 1; i < sc->rx_ring_cnt; ++i) {
3753 		uint32_t base;
3754 
3755 		base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) + BCE_HC_SB_CONFIG_1;
3756 		KKASSERT(base <= BCE_HC_SB_CONFIG_8);
3757 
3758 		REG_WR(sc, base,
3759 		    BCE_HC_SB_CONFIG_1_TX_TMR_MODE |
3760 		    /* BCE_HC_SB_CONFIG_1_RX_TMR_MODE | */
3761 		    BCE_HC_SB_CONFIG_1_ONE_SHOT);
3762 
3763 		REG_WR(sc, base + BCE_HC_TX_QUICK_CONS_TRIP_OFF,
3764 		    (sc->bce_tx_quick_cons_trip_int << 16) |
3765 		    sc->bce_tx_quick_cons_trip);
3766 		REG_WR(sc, base + BCE_HC_RX_QUICK_CONS_TRIP_OFF,
3767 		    (sc->bce_rx_quick_cons_trip_int << 16) |
3768 		    sc->bce_rx_quick_cons_trip);
3769 		REG_WR(sc, base + BCE_HC_TX_TICKS_OFF,
3770 		    (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
3771 		REG_WR(sc, base + BCE_HC_RX_TICKS_OFF,
3772 		    (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
3773 	}
3774 
3775 	/* Clear the internal statistics counters. */
3776 	REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
3777 
3778 	/* Verify that bootcode is running. */
3779 	reg = bce_shmem_rd(sc, BCE_DEV_INFO_SIGNATURE);
3780 
3781 	if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3782 	    BCE_DEV_INFO_SIGNATURE_MAGIC) {
3783 		if_printf(&sc->arpcom.ac_if,
3784 			  "Bootcode not running! Found: 0x%08X, "
3785 			  "Expected: 08%08X\n",
3786 			  reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK,
3787 			  BCE_DEV_INFO_SIGNATURE_MAGIC);
3788 		return ENODEV;
3789 	}
3790 
3791 	/* Enable DMA */
3792 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3793 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3794 		val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL);
3795 		val |= BCE_MISC_NEW_CORE_CTL_DMA_ENABLE;
3796 		REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val);
3797 	}
3798 
3799 	/* Allow bootcode to apply any additional fixes before enabling MAC. */
3800 	bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET);
3801 
3802 	/* Enable link state change interrupt generation. */
3803 	REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3804 
3805 	/* Enable the RXP. */
3806 	bce_start_rxp_cpu(sc);
3807 
3808 	/* Disable management frames (NC-SI) from flowing to the MCP. */
3809 	if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
3810 		val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) &
3811 		    ~BCE_RPM_MGMT_PKT_CTRL_MGMT_EN;
3812 		REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val);
3813 	}
3814 
3815 	/* Enable all remaining blocks in the MAC. */
3816 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3817 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3818 		REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3819 		    BCE_MISC_ENABLE_DEFAULT_XI);
3820 	} else {
3821 		REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT);
3822 	}
3823 	REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
3824 	DELAY(20);
3825 
3826 	/* Save the current host coalescing block settings. */
3827 	sc->hc_command = REG_RD(sc, BCE_HC_COMMAND);
3828 
3829 	return 0;
3830 }
3831 
3832 /****************************************************************************/
3833 /* Encapsulate an mbuf cluster into the rx_bd chain.                        */
3834 /*                                                                          */
3835 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's.     */
3836 /* This routine will map an mbuf cluster into 1 or more rx_bd's as          */
3837 /* necessary.                                                               */
3838 /*                                                                          */
3839 /* Returns:                                                                 */
3840 /*   0 for success, positive value for failure.                             */
3841 /****************************************************************************/
3842 static int
3843 bce_newbuf_std(struct bce_rx_ring *rxr, uint16_t *prod, uint16_t chain_prod,
3844     uint32_t *prod_bseq, int init)
3845 {
3846 	struct bce_rx_buf *rx_buf;
3847 	bus_dmamap_t map;
3848 	bus_dma_segment_t seg;
3849 	struct mbuf *m_new;
3850 	int error, nseg;
3851 
3852 	/* This is a new mbuf allocation. */
3853 	m_new = m_getcl(init ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR);
3854 	if (m_new == NULL)
3855 		return ENOBUFS;
3856 
3857 	m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
3858 
3859 	/* Map the mbuf cluster into device memory. */
3860 	error = bus_dmamap_load_mbuf_segment(rxr->rx_mbuf_tag,
3861 	    rxr->rx_mbuf_tmpmap, m_new, &seg, 1, &nseg, BUS_DMA_NOWAIT);
3862 	if (error) {
3863 		m_freem(m_new);
3864 		if (init) {
3865 			if_printf(&rxr->sc->arpcom.ac_if,
3866 			    "Error mapping mbuf into RX chain!\n");
3867 		}
3868 		return error;
3869 	}
3870 
3871 	rx_buf = &rxr->rx_bufs[chain_prod];
3872 	if (rx_buf->rx_mbuf_ptr != NULL)
3873 		bus_dmamap_unload(rxr->rx_mbuf_tag, rx_buf->rx_mbuf_map);
3874 
3875 	map = rx_buf->rx_mbuf_map;
3876 	rx_buf->rx_mbuf_map = rxr->rx_mbuf_tmpmap;
3877 	rxr->rx_mbuf_tmpmap = map;
3878 
3879 	/* Save the mbuf and update our counter. */
3880 	rx_buf->rx_mbuf_ptr = m_new;
3881 	rx_buf->rx_mbuf_paddr = seg.ds_addr;
3882 	rxr->free_rx_bd--;
3883 
3884 	bce_setup_rxdesc_std(rxr, chain_prod, prod_bseq);
3885 
3886 	return 0;
3887 }
3888 
3889 static void
3890 bce_setup_rxdesc_std(struct bce_rx_ring *rxr, uint16_t chain_prod,
3891     uint32_t *prod_bseq)
3892 {
3893 	const struct bce_rx_buf *rx_buf;
3894 	struct rx_bd *rxbd;
3895 	bus_addr_t paddr;
3896 	int len;
3897 
3898 	rx_buf = &rxr->rx_bufs[chain_prod];
3899 	paddr = rx_buf->rx_mbuf_paddr;
3900 	len = rx_buf->rx_mbuf_ptr->m_len;
3901 
3902 	/* Setup the rx_bd for the first segment. */
3903 	rxbd = &rxr->rx_bd_chain[RX_PAGE(chain_prod)][RX_IDX(chain_prod)];
3904 
3905 	rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(paddr));
3906 	rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(paddr));
3907 	rxbd->rx_bd_len = htole32(len);
3908 	rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START);
3909 	*prod_bseq += len;
3910 
3911 	rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END);
3912 }
3913 
3914 /****************************************************************************/
3915 /* Initialize the TX context memory.                                        */
3916 /*                                                                          */
3917 /* Returns:                                                                 */
3918 /*   Nothing                                                                */
3919 /****************************************************************************/
3920 static void
3921 bce_init_tx_context(struct bce_tx_ring *txr)
3922 {
3923 	uint32_t val;
3924 
3925 	/* Initialize the context ID for an L2 TX chain. */
3926 	if (BCE_CHIP_NUM(txr->sc) == BCE_CHIP_NUM_5709 ||
3927 	    BCE_CHIP_NUM(txr->sc) == BCE_CHIP_NUM_5716) {
3928 		/* Set the CID type to support an L2 connection. */
3929 		val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2;
3930 		CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3931 		    BCE_L2CTX_TX_TYPE_XI, val);
3932 		val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16);
3933 		CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3934 		    BCE_L2CTX_TX_CMD_TYPE_XI, val);
3935 
3936 		/* Point the hardware to the first page in the chain. */
3937 		val = BCE_ADDR_HI(txr->tx_bd_chain_paddr[0]);
3938 		CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3939 		    BCE_L2CTX_TX_TBDR_BHADDR_HI_XI, val);
3940 		val = BCE_ADDR_LO(txr->tx_bd_chain_paddr[0]);
3941 		CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3942 		    BCE_L2CTX_TX_TBDR_BHADDR_LO_XI, val);
3943 	} else {
3944 		/* Set the CID type to support an L2 connection. */
3945 		val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2;
3946 		CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3947 		    BCE_L2CTX_TX_TYPE, val);
3948 		val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16);
3949 		CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3950 		    BCE_L2CTX_TX_CMD_TYPE, val);
3951 
3952 		/* Point the hardware to the first page in the chain. */
3953 		val = BCE_ADDR_HI(txr->tx_bd_chain_paddr[0]);
3954 		CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3955 		    BCE_L2CTX_TX_TBDR_BHADDR_HI, val);
3956 		val = BCE_ADDR_LO(txr->tx_bd_chain_paddr[0]);
3957 		CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3958 		    BCE_L2CTX_TX_TBDR_BHADDR_LO, val);
3959 	}
3960 }
3961 
3962 /****************************************************************************/
3963 /* Allocate memory and initialize the TX data structures.                   */
3964 /*                                                                          */
3965 /* Returns:                                                                 */
3966 /*   0 for success, positive value for failure.                             */
3967 /****************************************************************************/
3968 static int
3969 bce_init_tx_chain(struct bce_tx_ring *txr)
3970 {
3971 	struct tx_bd *txbd;
3972 	int i, rc = 0;
3973 
3974 	/* Set the initial TX producer/consumer indices. */
3975 	txr->tx_prod = 0;
3976 	txr->tx_cons = 0;
3977 	txr->tx_prod_bseq = 0;
3978 	txr->used_tx_bd = 0;
3979 	txr->max_tx_bd = USABLE_TX_BD(txr);
3980 
3981 	/*
3982 	 * The NetXtreme II supports a linked-list structre called
3983 	 * a Buffer Descriptor Chain (or BD chain).  A BD chain
3984 	 * consists of a series of 1 or more chain pages, each of which
3985 	 * consists of a fixed number of BD entries.
3986 	 * The last BD entry on each page is a pointer to the next page
3987 	 * in the chain, and the last pointer in the BD chain
3988 	 * points back to the beginning of the chain.
3989 	 */
3990 
3991 	/* Set the TX next pointer chain entries. */
3992 	for (i = 0; i < txr->tx_pages; i++) {
3993 		int j;
3994 
3995 		txbd = &txr->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
3996 
3997 		/* Check if we've reached the last page. */
3998 		if (i == (txr->tx_pages - 1))
3999 			j = 0;
4000 		else
4001 			j = i + 1;
4002 
4003 		txbd->tx_bd_haddr_hi =
4004 		    htole32(BCE_ADDR_HI(txr->tx_bd_chain_paddr[j]));
4005 		txbd->tx_bd_haddr_lo =
4006 		    htole32(BCE_ADDR_LO(txr->tx_bd_chain_paddr[j]));
4007 	}
4008 	bce_init_tx_context(txr);
4009 
4010 	return(rc);
4011 }
4012 
4013 /****************************************************************************/
4014 /* Free memory and clear the TX data structures.                            */
4015 /*                                                                          */
4016 /* Returns:                                                                 */
4017 /*   Nothing.                                                               */
4018 /****************************************************************************/
4019 static void
4020 bce_free_tx_chain(struct bce_tx_ring *txr)
4021 {
4022 	int i;
4023 
4024 	/* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
4025 	for (i = 0; i < TOTAL_TX_BD(txr); i++) {
4026 		struct bce_tx_buf *tx_buf = &txr->tx_bufs[i];
4027 
4028 		if (tx_buf->tx_mbuf_ptr != NULL) {
4029 			bus_dmamap_unload(txr->tx_mbuf_tag,
4030 			    tx_buf->tx_mbuf_map);
4031 			m_freem(tx_buf->tx_mbuf_ptr);
4032 			tx_buf->tx_mbuf_ptr = NULL;
4033 		}
4034 	}
4035 
4036 	/* Clear each TX chain page. */
4037 	for (i = 0; i < txr->tx_pages; i++)
4038 		bzero(txr->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ);
4039 	txr->used_tx_bd = 0;
4040 }
4041 
4042 /****************************************************************************/
4043 /* Initialize the RX context memory.                                        */
4044 /*                                                                          */
4045 /* Returns:                                                                 */
4046 /*   Nothing                                                                */
4047 /****************************************************************************/
4048 static void
4049 bce_init_rx_context(struct bce_rx_ring *rxr)
4050 {
4051 	uint32_t val;
4052 
4053 	/* Initialize the context ID for an L2 RX chain. */
4054 	val = BCE_L2CTX_RX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
4055 	    BCE_L2CTX_RX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
4056 
4057 	/*
4058 	 * Set the level for generating pause frames
4059 	 * when the number of available rx_bd's gets
4060 	 * too low (the low watermark) and the level
4061 	 * when pause frames can be stopped (the high
4062 	 * watermark).
4063 	 */
4064 	if (BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5709 ||
4065 	    BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5716) {
4066 		uint32_t lo_water, hi_water;
4067 
4068 		lo_water = BCE_L2CTX_RX_LO_WATER_MARK_DEFAULT;
4069 		hi_water = USABLE_RX_BD(rxr) / 4;
4070 
4071 		lo_water /= BCE_L2CTX_RX_LO_WATER_MARK_SCALE;
4072 		hi_water /= BCE_L2CTX_RX_HI_WATER_MARK_SCALE;
4073 
4074 		if (hi_water > 0xf)
4075 			hi_water = 0xf;
4076 		else if (hi_water == 0)
4077 			lo_water = 0;
4078 		val |= lo_water |
4079 		    (hi_water << BCE_L2CTX_RX_HI_WATER_MARK_SHIFT);
4080 	}
4081 
4082  	CTX_WR(rxr->sc, GET_CID_ADDR(rxr->rx_cid),
4083 	    BCE_L2CTX_RX_CTX_TYPE, val);
4084 
4085 	/* Setup the MQ BIN mapping for l2_ctx_host_bseq. */
4086 	if (BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5709 ||
4087 	    BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5716) {
4088 		val = REG_RD(rxr->sc, BCE_MQ_MAP_L2_5);
4089 		REG_WR(rxr->sc, BCE_MQ_MAP_L2_5, val | BCE_MQ_MAP_L2_5_ARM);
4090 	}
4091 
4092 	/* Point the hardware to the first page in the chain. */
4093 	val = BCE_ADDR_HI(rxr->rx_bd_chain_paddr[0]);
4094 	CTX_WR(rxr->sc, GET_CID_ADDR(rxr->rx_cid),
4095 	    BCE_L2CTX_RX_NX_BDHADDR_HI, val);
4096 	val = BCE_ADDR_LO(rxr->rx_bd_chain_paddr[0]);
4097 	CTX_WR(rxr->sc, GET_CID_ADDR(rxr->rx_cid),
4098 	    BCE_L2CTX_RX_NX_BDHADDR_LO, val);
4099 }
4100 
4101 /****************************************************************************/
4102 /* Allocate memory and initialize the RX data structures.                   */
4103 /*                                                                          */
4104 /* Returns:                                                                 */
4105 /*   0 for success, positive value for failure.                             */
4106 /****************************************************************************/
4107 static int
4108 bce_init_rx_chain(struct bce_rx_ring *rxr)
4109 {
4110 	struct rx_bd *rxbd;
4111 	int i, rc = 0;
4112 	uint16_t prod, chain_prod;
4113 	uint32_t prod_bseq;
4114 
4115 	/* Initialize the RX producer and consumer indices. */
4116 	rxr->rx_prod = 0;
4117 	rxr->rx_cons = 0;
4118 	rxr->rx_prod_bseq = 0;
4119 	rxr->free_rx_bd = USABLE_RX_BD(rxr);
4120 	rxr->max_rx_bd = USABLE_RX_BD(rxr);
4121 
4122 	/* Clear cache status index */
4123 	rxr->last_status_idx = 0;
4124 
4125 	/* Initialize the RX next pointer chain entries. */
4126 	for (i = 0; i < rxr->rx_pages; i++) {
4127 		int j;
4128 
4129 		rxbd = &rxr->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
4130 
4131 		/* Check if we've reached the last page. */
4132 		if (i == (rxr->rx_pages - 1))
4133 			j = 0;
4134 		else
4135 			j = i + 1;
4136 
4137 		/* Setup the chain page pointers. */
4138 		rxbd->rx_bd_haddr_hi =
4139 		    htole32(BCE_ADDR_HI(rxr->rx_bd_chain_paddr[j]));
4140 		rxbd->rx_bd_haddr_lo =
4141 		    htole32(BCE_ADDR_LO(rxr->rx_bd_chain_paddr[j]));
4142 	}
4143 
4144 	/* Allocate mbuf clusters for the rx_bd chain. */
4145 	prod = prod_bseq = 0;
4146 	while (prod < TOTAL_RX_BD(rxr)) {
4147 		chain_prod = RX_CHAIN_IDX(rxr, prod);
4148 		if (bce_newbuf_std(rxr, &prod, chain_prod, &prod_bseq, 1)) {
4149 			if_printf(&rxr->sc->arpcom.ac_if,
4150 			    "Error filling RX chain: rx_bd[0x%04X]!\n",
4151 			    chain_prod);
4152 			rc = ENOBUFS;
4153 			break;
4154 		}
4155 		prod = NEXT_RX_BD(prod);
4156 	}
4157 
4158 	/* Save the RX chain producer index. */
4159 	rxr->rx_prod = prod;
4160 	rxr->rx_prod_bseq = prod_bseq;
4161 
4162 	/* Tell the chip about the waiting rx_bd's. */
4163 	REG_WR16(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BDIDX,
4164 	    rxr->rx_prod);
4165 	REG_WR(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BSEQ,
4166 	    rxr->rx_prod_bseq);
4167 
4168 	bce_init_rx_context(rxr);
4169 
4170 	return(rc);
4171 }
4172 
4173 /****************************************************************************/
4174 /* Free memory and clear the RX data structures.                            */
4175 /*                                                                          */
4176 /* Returns:                                                                 */
4177 /*   Nothing.                                                               */
4178 /****************************************************************************/
4179 static void
4180 bce_free_rx_chain(struct bce_rx_ring *rxr)
4181 {
4182 	int i;
4183 
4184 	/* Free any mbufs still in the RX mbuf chain. */
4185 	for (i = 0; i < TOTAL_RX_BD(rxr); i++) {
4186 		struct bce_rx_buf *rx_buf = &rxr->rx_bufs[i];
4187 
4188 		if (rx_buf->rx_mbuf_ptr != NULL) {
4189 			bus_dmamap_unload(rxr->rx_mbuf_tag,
4190 			    rx_buf->rx_mbuf_map);
4191 			m_freem(rx_buf->rx_mbuf_ptr);
4192 			rx_buf->rx_mbuf_ptr = NULL;
4193 		}
4194 	}
4195 
4196 	/* Clear each RX chain page. */
4197 	for (i = 0; i < rxr->rx_pages; i++)
4198 		bzero(rxr->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
4199 }
4200 
4201 /****************************************************************************/
4202 /* Set media options.                                                       */
4203 /*                                                                          */
4204 /* Returns:                                                                 */
4205 /*   0 for success, positive value for failure.                             */
4206 /****************************************************************************/
4207 static int
4208 bce_ifmedia_upd(struct ifnet *ifp)
4209 {
4210 	struct bce_softc *sc = ifp->if_softc;
4211 	struct mii_data *mii = device_get_softc(sc->bce_miibus);
4212 	int error = 0;
4213 
4214 	/*
4215 	 * 'mii' will be NULL, when this function is called on following
4216 	 * code path: bce_attach() -> bce_mgmt_init()
4217 	 */
4218 	if (mii != NULL) {
4219 		/* Make sure the MII bus has been enumerated. */
4220 		sc->bce_link = 0;
4221 		if (mii->mii_instance) {
4222 			struct mii_softc *miisc;
4223 
4224 			LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4225 				mii_phy_reset(miisc);
4226 		}
4227 		error = mii_mediachg(mii);
4228 	}
4229 	return error;
4230 }
4231 
4232 /****************************************************************************/
4233 /* Reports current media status.                                            */
4234 /*                                                                          */
4235 /* Returns:                                                                 */
4236 /*   Nothing.                                                               */
4237 /****************************************************************************/
4238 static void
4239 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4240 {
4241 	struct bce_softc *sc = ifp->if_softc;
4242 	struct mii_data *mii = device_get_softc(sc->bce_miibus);
4243 
4244 	mii_pollstat(mii);
4245 	ifmr->ifm_active = mii->mii_media_active;
4246 	ifmr->ifm_status = mii->mii_media_status;
4247 }
4248 
4249 /****************************************************************************/
4250 /* Handles PHY generated interrupt events.                                  */
4251 /*                                                                          */
4252 /* Returns:                                                                 */
4253 /*   Nothing.                                                               */
4254 /****************************************************************************/
4255 static void
4256 bce_phy_intr(struct bce_softc *sc)
4257 {
4258 	uint32_t new_link_state, old_link_state;
4259 	struct ifnet *ifp = &sc->arpcom.ac_if;
4260 
4261 	ASSERT_SERIALIZED(&sc->main_serialize);
4262 
4263 	new_link_state = sc->status_block->status_attn_bits &
4264 			 STATUS_ATTN_BITS_LINK_STATE;
4265 	old_link_state = sc->status_block->status_attn_bits_ack &
4266 			 STATUS_ATTN_BITS_LINK_STATE;
4267 
4268 	/* Handle any changes if the link state has changed. */
4269 	if (new_link_state != old_link_state) {	/* XXX redundant? */
4270 		/* Update the status_attn_bits_ack field in the status block. */
4271 		if (new_link_state) {
4272 			REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD,
4273 			       STATUS_ATTN_BITS_LINK_STATE);
4274 			if (bootverbose)
4275 				if_printf(ifp, "Link is now UP.\n");
4276 		} else {
4277 			REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD,
4278 			       STATUS_ATTN_BITS_LINK_STATE);
4279 			if (bootverbose)
4280 				if_printf(ifp, "Link is now DOWN.\n");
4281 		}
4282 
4283 		/*
4284 		 * Assume link is down and allow tick routine to
4285 		 * update the state based on the actual media state.
4286 		 */
4287 		sc->bce_link = 0;
4288 		callout_stop(&sc->bce_tick_callout);
4289 		bce_tick_serialized(sc);
4290 	}
4291 
4292 	/* Acknowledge the link change interrupt. */
4293 	REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE);
4294 }
4295 
4296 /****************************************************************************/
4297 /* Reads the receive consumer value from the status block (skipping over    */
4298 /* chain page pointer if necessary).                                        */
4299 /*                                                                          */
4300 /* Returns:                                                                 */
4301 /*   hw_cons                                                                */
4302 /****************************************************************************/
4303 static __inline uint16_t
4304 bce_get_hw_rx_cons(struct bce_rx_ring *rxr)
4305 {
4306 	uint16_t hw_cons = *rxr->rx_hw_cons;
4307 
4308 	if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4309 		hw_cons++;
4310 	return hw_cons;
4311 }
4312 
4313 /****************************************************************************/
4314 /* Handles received frame interrupt events.                                 */
4315 /*                                                                          */
4316 /* Returns:                                                                 */
4317 /*   Nothing.                                                               */
4318 /****************************************************************************/
4319 static void
4320 bce_rx_intr(struct bce_rx_ring *rxr, int count, uint16_t hw_cons)
4321 {
4322 	struct ifnet *ifp = &rxr->sc->arpcom.ac_if;
4323 	uint16_t sw_cons, sw_chain_cons, sw_prod, sw_chain_prod;
4324 	uint32_t sw_prod_bseq;
4325 	int cpuid = mycpuid;
4326 
4327 	ASSERT_SERIALIZED(&rxr->rx_serialize);
4328 
4329 	/* Get working copies of the driver's view of the RX indices. */
4330 	sw_cons = rxr->rx_cons;
4331 	sw_prod = rxr->rx_prod;
4332 	sw_prod_bseq = rxr->rx_prod_bseq;
4333 
4334 	/* Scan through the receive chain as long as there is work to do. */
4335 	while (sw_cons != hw_cons) {
4336 		struct pktinfo pi0, *pi = NULL;
4337 		struct bce_rx_buf *rx_buf;
4338 		struct mbuf *m = NULL;
4339 		struct l2_fhdr *l2fhdr = NULL;
4340 		unsigned int len;
4341 		uint32_t status = 0;
4342 
4343 #ifdef IFPOLL_ENABLE
4344 		if (count >= 0 && count-- == 0)
4345 			break;
4346 #endif
4347 
4348 		/*
4349 		 * Convert the producer/consumer indices
4350 		 * to an actual rx_bd index.
4351 		 */
4352 		sw_chain_cons = RX_CHAIN_IDX(rxr, sw_cons);
4353 		sw_chain_prod = RX_CHAIN_IDX(rxr, sw_prod);
4354 		rx_buf = &rxr->rx_bufs[sw_chain_cons];
4355 
4356 		rxr->free_rx_bd++;
4357 
4358 		/* The mbuf is stored with the last rx_bd entry of a packet. */
4359 		if (rx_buf->rx_mbuf_ptr != NULL) {
4360 			if (sw_chain_cons != sw_chain_prod) {
4361 				if_printf(ifp, "RX cons(%d) != prod(%d), "
4362 				    "drop!\n", sw_chain_cons, sw_chain_prod);
4363 				IFNET_STAT_INC(ifp, ierrors, 1);
4364 
4365 				bce_setup_rxdesc_std(rxr, sw_chain_cons,
4366 				    &sw_prod_bseq);
4367 				m = NULL;
4368 				goto bce_rx_int_next_rx;
4369 			}
4370 
4371 			/* Unmap the mbuf from DMA space. */
4372 			bus_dmamap_sync(rxr->rx_mbuf_tag, rx_buf->rx_mbuf_map,
4373 			    BUS_DMASYNC_POSTREAD);
4374 
4375 			/* Save the mbuf from the driver's chain. */
4376 			m = rx_buf->rx_mbuf_ptr;
4377 
4378 			/*
4379 			 * Frames received on the NetXteme II are prepended
4380 			 * with an l2_fhdr structure which provides status
4381 			 * information about the received frame (including
4382 			 * VLAN tags and checksum info).  The frames are also
4383 			 * automatically adjusted to align the IP header
4384 			 * (i.e. two null bytes are inserted before the
4385 			 * Ethernet header).  As a result the data DMA'd by
4386 			 * the controller into the mbuf is as follows:
4387 			 *
4388 			 * +---------+-----+---------------------+-----+
4389 			 * | l2_fhdr | pad | packet data         | FCS |
4390 			 * +---------+-----+---------------------+-----+
4391 			 *
4392 			 * The l2_fhdr needs to be checked and skipped and the
4393 			 * FCS needs to be stripped before sending the packet
4394 			 * up the stack.
4395 			 */
4396 			l2fhdr = mtod(m, struct l2_fhdr *);
4397 
4398 			len = l2fhdr->l2_fhdr_pkt_len;
4399 			status = l2fhdr->l2_fhdr_status;
4400 
4401 			len -= ETHER_CRC_LEN;
4402 
4403 			/* Check the received frame for errors. */
4404 			if (status & (L2_FHDR_ERRORS_BAD_CRC |
4405 				      L2_FHDR_ERRORS_PHY_DECODE |
4406 				      L2_FHDR_ERRORS_ALIGNMENT |
4407 				      L2_FHDR_ERRORS_TOO_SHORT |
4408 				      L2_FHDR_ERRORS_GIANT_FRAME)) {
4409 				IFNET_STAT_INC(ifp, ierrors, 1);
4410 
4411 				/* Reuse the mbuf for a new frame. */
4412 				bce_setup_rxdesc_std(rxr, sw_chain_prod,
4413 				    &sw_prod_bseq);
4414 				m = NULL;
4415 				goto bce_rx_int_next_rx;
4416 			}
4417 
4418 			/*
4419 			 * Get a new mbuf for the rx_bd.   If no new
4420 			 * mbufs are available then reuse the current mbuf,
4421 			 * log an ierror on the interface, and generate
4422 			 * an error in the system log.
4423 			 */
4424 			if (bce_newbuf_std(rxr, &sw_prod, sw_chain_prod,
4425 			    &sw_prod_bseq, 0)) {
4426 				IFNET_STAT_INC(ifp, ierrors, 1);
4427 
4428 				/* Try and reuse the exisitng mbuf. */
4429 				bce_setup_rxdesc_std(rxr, sw_chain_prod,
4430 				    &sw_prod_bseq);
4431 				m = NULL;
4432 				goto bce_rx_int_next_rx;
4433 			}
4434 
4435 			/*
4436 			 * Skip over the l2_fhdr when passing
4437 			 * the data up the stack.
4438 			 */
4439 			m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
4440 
4441 			m->m_pkthdr.len = m->m_len = len;
4442 			m->m_pkthdr.rcvif = ifp;
4443 
4444 			/* Validate the checksum if offload enabled. */
4445 			if (ifp->if_capenable & IFCAP_RXCSUM) {
4446 				/* Check for an IP datagram. */
4447 				if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
4448 					m->m_pkthdr.csum_flags |=
4449 						CSUM_IP_CHECKED;
4450 
4451 					/* Check if the IP checksum is valid. */
4452 					if ((l2fhdr->l2_fhdr_ip_xsum ^
4453 					     0xffff) == 0) {
4454 						m->m_pkthdr.csum_flags |=
4455 							CSUM_IP_VALID;
4456 					}
4457 				}
4458 
4459 				/* Check for a valid TCP/UDP frame. */
4460 				if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
4461 					      L2_FHDR_STATUS_UDP_DATAGRAM)) {
4462 
4463 					/* Check for a good TCP/UDP checksum. */
4464 					if ((status &
4465 					     (L2_FHDR_ERRORS_TCP_XSUM |
4466 					      L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
4467 						m->m_pkthdr.csum_data =
4468 						l2fhdr->l2_fhdr_tcp_udp_xsum;
4469 						m->m_pkthdr.csum_flags |=
4470 							CSUM_DATA_VALID |
4471 							CSUM_PSEUDO_HDR;
4472 					}
4473 				}
4474 			}
4475 			if (ifp->if_capenable & IFCAP_RSS) {
4476 				pi = bce_rss_pktinfo(&pi0, status, l2fhdr);
4477 				if (pi != NULL &&
4478 				    (status & L2_FHDR_STATUS_RSS_HASH)) {
4479 					m_sethash(m,
4480 					    toeplitz_hash(l2fhdr->l2_fhdr_hash));
4481 				}
4482 			}
4483 
4484 			IFNET_STAT_INC(ifp, ipackets, 1);
4485 bce_rx_int_next_rx:
4486 			sw_prod = NEXT_RX_BD(sw_prod);
4487 		}
4488 
4489 		sw_cons = NEXT_RX_BD(sw_cons);
4490 
4491 		/* If we have a packet, pass it up the stack */
4492 		if (m) {
4493 			if (status & L2_FHDR_STATUS_L2_VLAN_TAG) {
4494 				m->m_flags |= M_VLANTAG;
4495 				m->m_pkthdr.ether_vlantag =
4496 					l2fhdr->l2_fhdr_vlan_tag;
4497 			}
4498 			ifp->if_input(ifp, m, pi, cpuid);
4499 #ifdef BCE_RSS_DEBUG
4500 			rxr->rx_pkts++;
4501 #endif
4502 		}
4503 	}
4504 
4505 	rxr->rx_cons = sw_cons;
4506 	rxr->rx_prod = sw_prod;
4507 	rxr->rx_prod_bseq = sw_prod_bseq;
4508 
4509 	REG_WR16(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BDIDX,
4510 	    rxr->rx_prod);
4511 	REG_WR(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BSEQ,
4512 	    rxr->rx_prod_bseq);
4513 }
4514 
4515 /****************************************************************************/
4516 /* Reads the transmit consumer value from the status block (skipping over   */
4517 /* chain page pointer if necessary).                                        */
4518 /*                                                                          */
4519 /* Returns:                                                                 */
4520 /*   hw_cons                                                                */
4521 /****************************************************************************/
4522 static __inline uint16_t
4523 bce_get_hw_tx_cons(struct bce_tx_ring *txr)
4524 {
4525 	uint16_t hw_cons = *txr->tx_hw_cons;
4526 
4527 	if ((hw_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4528 		hw_cons++;
4529 	return hw_cons;
4530 }
4531 
4532 /****************************************************************************/
4533 /* Handles transmit completion interrupt events.                            */
4534 /*                                                                          */
4535 /* Returns:                                                                 */
4536 /*   Nothing.                                                               */
4537 /****************************************************************************/
4538 static void
4539 bce_tx_intr(struct bce_tx_ring *txr, uint16_t hw_tx_cons)
4540 {
4541 	struct ifnet *ifp = &txr->sc->arpcom.ac_if;
4542 	uint16_t sw_tx_cons, sw_tx_chain_cons;
4543 
4544 	ASSERT_SERIALIZED(&txr->tx_serialize);
4545 
4546 	/* Get the hardware's view of the TX consumer index. */
4547 	sw_tx_cons = txr->tx_cons;
4548 
4549 	/* Cycle through any completed TX chain page entries. */
4550 	while (sw_tx_cons != hw_tx_cons) {
4551 		struct bce_tx_buf *tx_buf;
4552 
4553 		sw_tx_chain_cons = TX_CHAIN_IDX(txr, sw_tx_cons);
4554 		tx_buf = &txr->tx_bufs[sw_tx_chain_cons];
4555 
4556 		/*
4557 		 * Free the associated mbuf. Remember
4558 		 * that only the last tx_bd of a packet
4559 		 * has an mbuf pointer and DMA map.
4560 		 */
4561 		if (tx_buf->tx_mbuf_ptr != NULL) {
4562 			/* Unmap the mbuf. */
4563 			bus_dmamap_unload(txr->tx_mbuf_tag,
4564 			    tx_buf->tx_mbuf_map);
4565 
4566 			/* Free the mbuf. */
4567 			m_freem(tx_buf->tx_mbuf_ptr);
4568 			tx_buf->tx_mbuf_ptr = NULL;
4569 
4570 			IFNET_STAT_INC(ifp, opackets, 1);
4571 #ifdef BCE_TSS_DEBUG
4572 			txr->tx_pkts++;
4573 #endif
4574 		}
4575 
4576 		txr->used_tx_bd--;
4577 		sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
4578 	}
4579 
4580 	if (txr->used_tx_bd == 0) {
4581 		/* Clear the TX timeout timer. */
4582 		ifsq_watchdog_set_count(&txr->tx_watchdog, 0);
4583 	}
4584 
4585 	/* Clear the tx hardware queue full flag. */
4586 	if (txr->max_tx_bd - txr->used_tx_bd >= BCE_TX_SPARE_SPACE)
4587 		ifsq_clr_oactive(txr->ifsq);
4588 	txr->tx_cons = sw_tx_cons;
4589 }
4590 
4591 /****************************************************************************/
4592 /* Disables interrupt generation.                                           */
4593 /*                                                                          */
4594 /* Returns:                                                                 */
4595 /*   Nothing.                                                               */
4596 /****************************************************************************/
4597 static void
4598 bce_disable_intr(struct bce_softc *sc)
4599 {
4600 	int i;
4601 
4602 	for (i = 0; i < sc->rx_ring_cnt; ++i) {
4603 		REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4604 		    (sc->rx_rings[i].idx << 24) |
4605 		    BCE_PCICFG_INT_ACK_CMD_MASK_INT);
4606 	}
4607 	REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
4608 
4609 	callout_stop(&sc->bce_ckmsi_callout);
4610 	sc->bce_msi_maylose = FALSE;
4611 	sc->bce_check_rx_cons = 0;
4612 	sc->bce_check_tx_cons = 0;
4613 	sc->bce_check_status_idx = 0xffff;
4614 
4615 	for (i = 0; i < sc->rx_ring_cnt; ++i)
4616 		lwkt_serialize_handler_disable(sc->bce_msix[i].msix_serialize);
4617 }
4618 
4619 /****************************************************************************/
4620 /* Enables interrupt generation.                                            */
4621 /*                                                                          */
4622 /* Returns:                                                                 */
4623 /*   Nothing.                                                               */
4624 /****************************************************************************/
4625 static void
4626 bce_enable_intr(struct bce_softc *sc)
4627 {
4628 	int i;
4629 
4630 	for (i = 0; i < sc->rx_ring_cnt; ++i)
4631 		lwkt_serialize_handler_enable(sc->bce_msix[i].msix_serialize);
4632 
4633 	for (i = 0; i < sc->rx_ring_cnt; ++i) {
4634 		struct bce_rx_ring *rxr = &sc->rx_rings[i];
4635 
4636 		REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, (rxr->idx << 24) |
4637 		       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
4638 		       BCE_PCICFG_INT_ACK_CMD_MASK_INT |
4639 		       rxr->last_status_idx);
4640 		REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, (rxr->idx << 24) |
4641 		       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
4642 		       rxr->last_status_idx);
4643 	}
4644 	REG_WR(sc, BCE_HC_COMMAND, sc->hc_command | BCE_HC_COMMAND_COAL_NOW);
4645 
4646 	if (sc->bce_flags & BCE_CHECK_MSI_FLAG) {
4647 		sc->bce_msi_maylose = FALSE;
4648 		sc->bce_check_rx_cons = 0;
4649 		sc->bce_check_tx_cons = 0;
4650 		sc->bce_check_status_idx = 0xffff;
4651 
4652 		if (bootverbose)
4653 			if_printf(&sc->arpcom.ac_if, "check msi\n");
4654 
4655 		callout_reset_bycpu(&sc->bce_ckmsi_callout, BCE_MSI_CKINTVL,
4656 		    bce_check_msi, sc, sc->bce_msix[0].msix_cpuid);
4657 	}
4658 }
4659 
4660 /****************************************************************************/
4661 /* Reenables interrupt generation during interrupt handling.                */
4662 /*                                                                          */
4663 /* Returns:                                                                 */
4664 /*   Nothing.                                                               */
4665 /****************************************************************************/
4666 static void
4667 bce_reenable_intr(struct bce_rx_ring *rxr)
4668 {
4669 	REG_WR(rxr->sc, BCE_PCICFG_INT_ACK_CMD, (rxr->idx << 24) |
4670 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | rxr->last_status_idx);
4671 }
4672 
4673 /****************************************************************************/
4674 /* Handles controller initialization.                                       */
4675 /*                                                                          */
4676 /* Returns:                                                                 */
4677 /*   Nothing.                                                               */
4678 /****************************************************************************/
4679 static void
4680 bce_init(void *xsc)
4681 {
4682 	struct bce_softc *sc = xsc;
4683 	struct ifnet *ifp = &sc->arpcom.ac_if;
4684 	uint32_t ether_mtu;
4685 	int error, i;
4686 	boolean_t polling;
4687 
4688 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
4689 
4690 	/* Check if the driver is still running and bail out if it is. */
4691 	if (ifp->if_flags & IFF_RUNNING)
4692 		return;
4693 
4694 	bce_stop(sc);
4695 
4696 	error = bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
4697 	if (error) {
4698 		if_printf(ifp, "Controller reset failed!\n");
4699 		goto back;
4700 	}
4701 
4702 	error = bce_chipinit(sc);
4703 	if (error) {
4704 		if_printf(ifp, "Controller initialization failed!\n");
4705 		goto back;
4706 	}
4707 
4708 	error = bce_blockinit(sc);
4709 	if (error) {
4710 		if_printf(ifp, "Block initialization failed!\n");
4711 		goto back;
4712 	}
4713 
4714 	/* Load our MAC address. */
4715 	bcopy(IF_LLADDR(ifp), sc->eaddr, ETHER_ADDR_LEN);
4716 	bce_set_mac_addr(sc);
4717 
4718 	/* Calculate and program the Ethernet MTU size. */
4719 	ether_mtu = ETHER_HDR_LEN + EVL_ENCAPLEN + ifp->if_mtu + ETHER_CRC_LEN;
4720 
4721 	/*
4722 	 * Program the mtu, enabling jumbo frame
4723 	 * support if necessary.  Also set the mbuf
4724 	 * allocation count for RX frames.
4725 	 */
4726 	if (ether_mtu > ETHER_MAX_LEN + EVL_ENCAPLEN) {
4727 #ifdef notyet
4728 		REG_WR(sc, BCE_EMAC_RX_MTU_SIZE,
4729 		       min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) |
4730 		       BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA);
4731 #else
4732 		panic("jumbo buffer is not supported yet");
4733 #endif
4734 	} else {
4735 		REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu);
4736 	}
4737 
4738 	/* Program appropriate promiscuous/multicast filtering. */
4739 	bce_set_rx_mode(sc);
4740 
4741 	/*
4742 	 * Init RX buffer descriptor chain.
4743 	 */
4744 	REG_WR(sc, BCE_RLUP_RSS_CONFIG, 0);
4745 	bce_reg_wr_ind(sc, BCE_RXP_SCRATCH_RSS_TBL_SZ, 0);
4746 
4747 	for (i = 0; i < sc->rx_ring_cnt; ++i)
4748 		bce_init_rx_chain(&sc->rx_rings[i]);	/* XXX return value */
4749 
4750 	if (sc->rx_ring_cnt > 1)
4751 		bce_init_rss(sc);
4752 
4753 	/*
4754 	 * Init TX buffer descriptor chain.
4755 	 */
4756 	REG_WR(sc, BCE_TSCH_TSS_CFG, 0);
4757 
4758 	for (i = 0; i < sc->tx_ring_cnt; ++i)
4759 		bce_init_tx_chain(&sc->tx_rings[i]);
4760 
4761 	if (sc->tx_ring_cnt > 1) {
4762 		REG_WR(sc, BCE_TSCH_TSS_CFG,
4763 		    ((sc->tx_ring_cnt - 1) << 24) | (TX_TSS_CID << 7));
4764 	}
4765 
4766 	polling = FALSE;
4767 #ifdef IFPOLL_ENABLE
4768 	if (ifp->if_flags & IFF_NPOLLING)
4769 		polling = TRUE;
4770 #endif
4771 
4772 	if (polling) {
4773 		/* Disable interrupts if we are polling. */
4774 		bce_disable_intr(sc);
4775 
4776 		/* Change coalesce parameters */
4777 		bce_npoll_coal_change(sc);
4778 	} else {
4779 		/* Enable host interrupts. */
4780 		bce_enable_intr(sc);
4781 	}
4782 	bce_set_timer_cpuid(sc, polling);
4783 
4784 	bce_ifmedia_upd(ifp);
4785 
4786 	ifp->if_flags |= IFF_RUNNING;
4787 	for (i = 0; i < sc->tx_ring_cnt; ++i) {
4788 		ifsq_clr_oactive(sc->tx_rings[i].ifsq);
4789 		ifsq_watchdog_start(&sc->tx_rings[i].tx_watchdog);
4790 	}
4791 
4792 	callout_reset_bycpu(&sc->bce_tick_callout, hz, bce_tick, sc,
4793 	    sc->bce_timer_cpuid);
4794 back:
4795 	if (error)
4796 		bce_stop(sc);
4797 }
4798 
4799 /****************************************************************************/
4800 /* Initialize the controller just enough so that any management firmware    */
4801 /* running on the device will continue to operate corectly.                 */
4802 /*                                                                          */
4803 /* Returns:                                                                 */
4804 /*   Nothing.                                                               */
4805 /****************************************************************************/
4806 static void
4807 bce_mgmt_init(struct bce_softc *sc)
4808 {
4809 	struct ifnet *ifp = &sc->arpcom.ac_if;
4810 
4811 	/* Bail out if management firmware is not running. */
4812 	if (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG))
4813 		return;
4814 
4815 	/* Enable all critical blocks in the MAC. */
4816 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
4817 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
4818 		REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
4819 		    BCE_MISC_ENABLE_DEFAULT_XI);
4820 	} else {
4821 		REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT);
4822 	}
4823 	REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
4824 	DELAY(20);
4825 
4826 	bce_ifmedia_upd(ifp);
4827 }
4828 
4829 /****************************************************************************/
4830 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
4831 /* memory visible to the controller.                                        */
4832 /*                                                                          */
4833 /* Returns:                                                                 */
4834 /*   0 for success, positive value for failure.                             */
4835 /****************************************************************************/
4836 static int
4837 bce_encap(struct bce_tx_ring *txr, struct mbuf **m_head, int *nsegs_used)
4838 {
4839 	bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
4840 	bus_dmamap_t map, tmp_map;
4841 	struct mbuf *m0 = *m_head;
4842 	struct tx_bd *txbd = NULL;
4843 	uint16_t vlan_tag = 0, flags = 0, mss = 0;
4844 	uint16_t chain_prod, chain_prod_start, prod;
4845 	uint32_t prod_bseq;
4846 	int i, error, maxsegs, nsegs;
4847 
4848 	/* Transfer any checksum offload flags to the bd. */
4849 	if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
4850 		error = bce_tso_setup(txr, m_head, &flags, &mss);
4851 		if (error)
4852 			return ENOBUFS;
4853 		m0 = *m_head;
4854 	} else if (m0->m_pkthdr.csum_flags & BCE_CSUM_FEATURES) {
4855 		if (m0->m_pkthdr.csum_flags & CSUM_IP)
4856 			flags |= TX_BD_FLAGS_IP_CKSUM;
4857 		if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
4858 			flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4859 	}
4860 
4861 	/* Transfer any VLAN tags to the bd. */
4862 	if (m0->m_flags & M_VLANTAG) {
4863 		flags |= TX_BD_FLAGS_VLAN_TAG;
4864 		vlan_tag = m0->m_pkthdr.ether_vlantag;
4865 	}
4866 
4867 	prod = txr->tx_prod;
4868 	chain_prod_start = chain_prod = TX_CHAIN_IDX(txr, prod);
4869 
4870 	/* Map the mbuf into DMAable memory. */
4871 	map = txr->tx_bufs[chain_prod_start].tx_mbuf_map;
4872 
4873 	maxsegs = txr->max_tx_bd - txr->used_tx_bd;
4874 	KASSERT(maxsegs >= BCE_TX_SPARE_SPACE,
4875 		("not enough segments %d", maxsegs));
4876 	if (maxsegs > BCE_MAX_SEGMENTS)
4877 		maxsegs = BCE_MAX_SEGMENTS;
4878 
4879 	/* Map the mbuf into our DMA address space. */
4880 	error = bus_dmamap_load_mbuf_defrag(txr->tx_mbuf_tag, map, m_head,
4881 			segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
4882 	if (error)
4883 		goto back;
4884 	bus_dmamap_sync(txr->tx_mbuf_tag, map, BUS_DMASYNC_PREWRITE);
4885 
4886 	*nsegs_used += nsegs;
4887 
4888 	/* Reset m0 */
4889 	m0 = *m_head;
4890 
4891 	/* prod points to an empty tx_bd at this point. */
4892 	prod_bseq  = txr->tx_prod_bseq;
4893 
4894 	/*
4895 	 * Cycle through each mbuf segment that makes up
4896 	 * the outgoing frame, gathering the mapping info
4897 	 * for that segment and creating a tx_bd to for
4898 	 * the mbuf.
4899 	 */
4900 	for (i = 0; i < nsegs; i++) {
4901 		chain_prod = TX_CHAIN_IDX(txr, prod);
4902 		txbd =
4903 		&txr->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
4904 
4905 		txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr));
4906 		txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr));
4907 		txbd->tx_bd_mss_nbytes = htole32(mss << 16) |
4908 		    htole16(segs[i].ds_len);
4909 		txbd->tx_bd_vlan_tag = htole16(vlan_tag);
4910 		txbd->tx_bd_flags = htole16(flags);
4911 
4912 		prod_bseq += segs[i].ds_len;
4913 		if (i == 0)
4914 			txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START);
4915 		prod = NEXT_TX_BD(prod);
4916 	}
4917 
4918 	/* Set the END flag on the last TX buffer descriptor. */
4919 	txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END);
4920 
4921 	/*
4922 	 * Ensure that the mbuf pointer for this transmission
4923 	 * is placed at the array index of the last
4924 	 * descriptor in this chain.  This is done
4925 	 * because a single map is used for all
4926 	 * segments of the mbuf and we don't want to
4927 	 * unload the map before all of the segments
4928 	 * have been freed.
4929 	 */
4930 	txr->tx_bufs[chain_prod].tx_mbuf_ptr = m0;
4931 
4932 	tmp_map = txr->tx_bufs[chain_prod].tx_mbuf_map;
4933 	txr->tx_bufs[chain_prod].tx_mbuf_map = map;
4934 	txr->tx_bufs[chain_prod_start].tx_mbuf_map = tmp_map;
4935 
4936 	txr->used_tx_bd += nsegs;
4937 
4938 	/* prod points to the next free tx_bd at this point. */
4939 	txr->tx_prod = prod;
4940 	txr->tx_prod_bseq = prod_bseq;
4941 back:
4942 	if (error) {
4943 		m_freem(*m_head);
4944 		*m_head = NULL;
4945 	}
4946 	return error;
4947 }
4948 
4949 static void
4950 bce_xmit(struct bce_tx_ring *txr)
4951 {
4952 	/* Start the transmit. */
4953 	REG_WR16(txr->sc, MB_GET_CID_ADDR(txr->tx_cid) + BCE_L2CTX_TX_HOST_BIDX,
4954 	    txr->tx_prod);
4955 	REG_WR(txr->sc, MB_GET_CID_ADDR(txr->tx_cid) + BCE_L2CTX_TX_HOST_BSEQ,
4956 	    txr->tx_prod_bseq);
4957 }
4958 
4959 /****************************************************************************/
4960 /* Main transmit routine when called from another routine with a lock.      */
4961 /*                                                                          */
4962 /* Returns:                                                                 */
4963 /*   Nothing.                                                               */
4964 /****************************************************************************/
4965 static void
4966 bce_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
4967 {
4968 	struct bce_softc *sc = ifp->if_softc;
4969 	struct bce_tx_ring *txr = ifsq_get_priv(ifsq);
4970 	int count = 0;
4971 
4972 	KKASSERT(txr->ifsq == ifsq);
4973 	ASSERT_SERIALIZED(&txr->tx_serialize);
4974 
4975 	/* If there's no link or the transmit queue is empty then just exit. */
4976 	if (!sc->bce_link) {
4977 		ifsq_purge(ifsq);
4978 		return;
4979 	}
4980 
4981 	if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq))
4982 		return;
4983 
4984 	for (;;) {
4985 		struct mbuf *m_head;
4986 
4987 		/*
4988 		 * We keep BCE_TX_SPARE_SPACE entries, so bce_encap() is
4989 		 * unlikely to fail.
4990 		 */
4991 		if (txr->max_tx_bd - txr->used_tx_bd < BCE_TX_SPARE_SPACE) {
4992 			ifsq_set_oactive(ifsq);
4993 			break;
4994 		}
4995 
4996 		/* Check for any frames to send. */
4997 		m_head = ifsq_dequeue(ifsq);
4998 		if (m_head == NULL)
4999 			break;
5000 
5001 		/*
5002 		 * Pack the data into the transmit ring. If we
5003 		 * don't have room, place the mbuf back at the
5004 		 * head of the queue and set the OACTIVE flag
5005 		 * to wait for the NIC to drain the chain.
5006 		 */
5007 		if (bce_encap(txr, &m_head, &count)) {
5008 			IFNET_STAT_INC(ifp, oerrors, 1);
5009 			if (txr->used_tx_bd == 0) {
5010 				continue;
5011 			} else {
5012 				ifsq_set_oactive(ifsq);
5013 				break;
5014 			}
5015 		}
5016 
5017 		if (count >= txr->tx_wreg) {
5018 			bce_xmit(txr);
5019 			count = 0;
5020 		}
5021 
5022 		/* Send a copy of the frame to any BPF listeners. */
5023 		ETHER_BPF_MTAP(ifp, m_head);
5024 
5025 		/* Set the tx timeout. */
5026 		ifsq_watchdog_set_count(&txr->tx_watchdog, BCE_TX_TIMEOUT);
5027 	}
5028 	if (count > 0)
5029 		bce_xmit(txr);
5030 }
5031 
5032 /****************************************************************************/
5033 /* Handles any IOCTL calls from the operating system.                       */
5034 /*                                                                          */
5035 /* Returns:                                                                 */
5036 /*   0 for success, positive value for failure.                             */
5037 /****************************************************************************/
5038 static int
5039 bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
5040 {
5041 	struct bce_softc *sc = ifp->if_softc;
5042 	struct ifreq *ifr = (struct ifreq *)data;
5043 	struct mii_data *mii;
5044 	int mask, error = 0;
5045 
5046 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
5047 
5048 	switch(command) {
5049 	case SIOCSIFMTU:
5050 		/* Check that the MTU setting is supported. */
5051 		if (ifr->ifr_mtu < BCE_MIN_MTU ||
5052 #ifdef notyet
5053 		    ifr->ifr_mtu > BCE_MAX_JUMBO_MTU
5054 #else
5055 		    ifr->ifr_mtu > ETHERMTU
5056 #endif
5057 		   ) {
5058 			error = EINVAL;
5059 			break;
5060 		}
5061 
5062 		ifp->if_mtu = ifr->ifr_mtu;
5063 		ifp->if_flags &= ~IFF_RUNNING;	/* Force reinitialize */
5064 		bce_init(sc);
5065 		break;
5066 
5067 	case SIOCSIFFLAGS:
5068 		if (ifp->if_flags & IFF_UP) {
5069 			if (ifp->if_flags & IFF_RUNNING) {
5070 				mask = ifp->if_flags ^ sc->bce_if_flags;
5071 
5072 				if (mask & (IFF_PROMISC | IFF_ALLMULTI))
5073 					bce_set_rx_mode(sc);
5074 			} else {
5075 				bce_init(sc);
5076 			}
5077 		} else if (ifp->if_flags & IFF_RUNNING) {
5078 			bce_stop(sc);
5079 
5080 			/* If MFW is running, restart the controller a bit. */
5081 			if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
5082 				bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
5083 				bce_chipinit(sc);
5084 				bce_mgmt_init(sc);
5085 			}
5086 		}
5087 		sc->bce_if_flags = ifp->if_flags;
5088 		break;
5089 
5090 	case SIOCADDMULTI:
5091 	case SIOCDELMULTI:
5092 		if (ifp->if_flags & IFF_RUNNING)
5093 			bce_set_rx_mode(sc);
5094 		break;
5095 
5096 	case SIOCSIFMEDIA:
5097 	case SIOCGIFMEDIA:
5098 		mii = device_get_softc(sc->bce_miibus);
5099 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
5100 		break;
5101 
5102 	case SIOCSIFCAP:
5103 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
5104 		if (mask & IFCAP_HWCSUM) {
5105 			ifp->if_capenable ^= (mask & IFCAP_HWCSUM);
5106 			if (ifp->if_capenable & IFCAP_TXCSUM)
5107 				ifp->if_hwassist |= BCE_CSUM_FEATURES;
5108 			else
5109 				ifp->if_hwassist &= ~BCE_CSUM_FEATURES;
5110 		}
5111 		if (mask & IFCAP_TSO) {
5112 			ifp->if_capenable ^= IFCAP_TSO;
5113 			if (ifp->if_capenable & IFCAP_TSO)
5114 				ifp->if_hwassist |= CSUM_TSO;
5115 			else
5116 				ifp->if_hwassist &= ~CSUM_TSO;
5117 		}
5118 		if (mask & IFCAP_RSS)
5119 			ifp->if_capenable ^= IFCAP_RSS;
5120 		break;
5121 
5122 	default:
5123 		error = ether_ioctl(ifp, command, data);
5124 		break;
5125 	}
5126 	return error;
5127 }
5128 
5129 /****************************************************************************/
5130 /* Transmit timeout handler.                                                */
5131 /*                                                                          */
5132 /* Returns:                                                                 */
5133 /*   Nothing.                                                               */
5134 /****************************************************************************/
5135 static void
5136 bce_watchdog(struct ifaltq_subque *ifsq)
5137 {
5138 	struct ifnet *ifp = ifsq_get_ifp(ifsq);
5139 	struct bce_softc *sc = ifp->if_softc;
5140 	int i;
5141 
5142 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
5143 
5144 	/*
5145 	 * If we are in this routine because of pause frames, then
5146 	 * don't reset the hardware.
5147 	 */
5148 	if (REG_RD(sc, BCE_EMAC_TX_STATUS) & BCE_EMAC_TX_STATUS_XOFFED)
5149 		return;
5150 
5151 	if_printf(ifp, "Watchdog timeout occurred, resetting!\n");
5152 
5153 	ifp->if_flags &= ~IFF_RUNNING;	/* Force reinitialize */
5154 	bce_init(sc);
5155 
5156 	IFNET_STAT_INC(ifp, oerrors, 1);
5157 
5158 	for (i = 0; i < sc->tx_ring_cnt; ++i)
5159 		ifsq_devstart_sched(sc->tx_rings[i].ifsq);
5160 }
5161 
5162 #ifdef IFPOLL_ENABLE
5163 
5164 static void
5165 bce_npoll_status(struct ifnet *ifp)
5166 {
5167 	struct bce_softc *sc = ifp->if_softc;
5168 	struct status_block *sblk = sc->status_block;
5169 	uint32_t status_attn_bits;
5170 
5171 	ASSERT_SERIALIZED(&sc->main_serialize);
5172 
5173 	status_attn_bits = sblk->status_attn_bits;
5174 
5175 	/* Was it a link change interrupt? */
5176 	if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5177 	    (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) {
5178 		bce_phy_intr(sc);
5179 
5180 		/*
5181 		 * Clear any transient status updates during link state change.
5182 		 */
5183 		REG_WR(sc, BCE_HC_COMMAND,
5184 		    sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT);
5185 		REG_RD(sc, BCE_HC_COMMAND);
5186 	}
5187 
5188 	/*
5189 	 * If any other attention is asserted then the chip is toast.
5190 	 */
5191 	if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5192 	     (sblk->status_attn_bits_ack & ~STATUS_ATTN_BITS_LINK_STATE)) {
5193 		if_printf(ifp, "Fatal attention detected: 0x%08X\n",
5194 		    sblk->status_attn_bits);
5195 		bce_serialize_skipmain(sc);
5196 		bce_init(sc);
5197 		bce_deserialize_skipmain(sc);
5198 	}
5199 }
5200 
5201 static void
5202 bce_npoll_rx(struct ifnet *ifp, void *arg, int count)
5203 {
5204 	struct bce_rx_ring *rxr = arg;
5205 	uint16_t hw_rx_cons;
5206 
5207 	ASSERT_SERIALIZED(&rxr->rx_serialize);
5208 
5209 	/*
5210 	 * Save the status block index value for use when enabling
5211 	 * the interrupt.
5212 	 */
5213 	rxr->last_status_idx = *rxr->hw_status_idx;
5214 
5215 	/* Make sure status index is extracted before RX/TX cons */
5216 	cpu_lfence();
5217 
5218 	hw_rx_cons = bce_get_hw_rx_cons(rxr);
5219 
5220 	/* Check for any completed RX frames. */
5221 	if (hw_rx_cons != rxr->rx_cons)
5222 		bce_rx_intr(rxr, count, hw_rx_cons);
5223 }
5224 
5225 static void
5226 bce_npoll_rx_pack(struct ifnet *ifp, void *arg, int count)
5227 {
5228 	struct bce_rx_ring *rxr = arg;
5229 
5230 	KASSERT(rxr->idx == 0, ("not the first RX ring, but %d", rxr->idx));
5231 	bce_npoll_rx(ifp, rxr, count);
5232 
5233 	KASSERT(rxr->sc->rx_ring_cnt != rxr->sc->rx_ring_cnt2,
5234 	    ("RX ring count %d, count2 %d", rxr->sc->rx_ring_cnt,
5235 	     rxr->sc->rx_ring_cnt2));
5236 
5237 	/* Last ring carries packets whose masked hash is 0 */
5238 	rxr = &rxr->sc->rx_rings[rxr->sc->rx_ring_cnt - 1];
5239 
5240 	lwkt_serialize_enter(&rxr->rx_serialize);
5241 	bce_npoll_rx(ifp, rxr, count);
5242 	lwkt_serialize_exit(&rxr->rx_serialize);
5243 }
5244 
5245 static void
5246 bce_npoll_tx(struct ifnet *ifp, void *arg, int count __unused)
5247 {
5248 	struct bce_tx_ring *txr = arg;
5249 	uint16_t hw_tx_cons;
5250 
5251 	ASSERT_SERIALIZED(&txr->tx_serialize);
5252 
5253 	hw_tx_cons = bce_get_hw_tx_cons(txr);
5254 
5255 	/* Check for any completed TX frames. */
5256 	if (hw_tx_cons != txr->tx_cons) {
5257 		bce_tx_intr(txr, hw_tx_cons);
5258 		if (!ifsq_is_empty(txr->ifsq))
5259 			ifsq_devstart(txr->ifsq);
5260 	}
5261 }
5262 
5263 static void
5264 bce_npoll(struct ifnet *ifp, struct ifpoll_info *info)
5265 {
5266 	struct bce_softc *sc = ifp->if_softc;
5267 	int i;
5268 
5269 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
5270 
5271 	if (info != NULL) {
5272 		int cpu;
5273 
5274 		info->ifpi_status.status_func = bce_npoll_status;
5275 		info->ifpi_status.serializer = &sc->main_serialize;
5276 
5277 		for (i = 0; i < sc->tx_ring_cnt; ++i) {
5278 			struct bce_tx_ring *txr = &sc->tx_rings[i];
5279 
5280 			cpu = if_ringmap_cpumap(sc->tx_rmap, i);
5281 			KKASSERT(cpu < netisr_ncpus);
5282 			info->ifpi_tx[cpu].poll_func = bce_npoll_tx;
5283 			info->ifpi_tx[cpu].arg = txr;
5284 			info->ifpi_tx[cpu].serializer = &txr->tx_serialize;
5285 			ifsq_set_cpuid(txr->ifsq, cpu);
5286 		}
5287 
5288 		for (i = 0; i < sc->rx_ring_cnt2; ++i) {
5289 			struct bce_rx_ring *rxr = &sc->rx_rings[i];
5290 
5291 			cpu = if_ringmap_cpumap(sc->rx_rmap, i);
5292 			KKASSERT(cpu < netisr_ncpus);
5293 			if (i == 0 && sc->rx_ring_cnt2 != sc->rx_ring_cnt) {
5294 				/*
5295 				 * If RSS is enabled, the packets whose
5296 				 * masked hash are 0 are queued to the
5297 				 * last RX ring; piggyback the last RX
5298 				 * ring's processing in the first RX
5299 				 * polling handler. (see also: comment
5300 				 * in bce_setup_ring_cnt())
5301 				 */
5302 				if (bootverbose) {
5303 					if_printf(ifp, "npoll pack last "
5304 					    "RX ring on cpu%d\n", cpu);
5305 				}
5306 				info->ifpi_rx[cpu].poll_func =
5307 				    bce_npoll_rx_pack;
5308 			} else {
5309 				info->ifpi_rx[cpu].poll_func = bce_npoll_rx;
5310 			}
5311 			info->ifpi_rx[cpu].arg = rxr;
5312 			info->ifpi_rx[cpu].serializer = &rxr->rx_serialize;
5313 		}
5314 
5315 		if (ifp->if_flags & IFF_RUNNING) {
5316 			bce_set_timer_cpuid(sc, TRUE);
5317 			bce_disable_intr(sc);
5318 			bce_npoll_coal_change(sc);
5319 		}
5320 	} else {
5321 		for (i = 0; i < sc->tx_ring_cnt; ++i) {
5322 			ifsq_set_cpuid(sc->tx_rings[i].ifsq,
5323 			    sc->bce_msix[i].msix_cpuid);
5324 		}
5325 
5326 		if (ifp->if_flags & IFF_RUNNING) {
5327 			bce_set_timer_cpuid(sc, FALSE);
5328 			bce_enable_intr(sc);
5329 
5330 			sc->bce_coalchg_mask |= BCE_COALMASK_TX_BDS_INT |
5331 			    BCE_COALMASK_RX_BDS_INT;
5332 			bce_coal_change(sc);
5333 		}
5334 	}
5335 }
5336 
5337 #endif	/* IFPOLL_ENABLE */
5338 
5339 /*
5340  * Interrupt handler.
5341  */
5342 /****************************************************************************/
5343 /* Main interrupt entry point.  Verifies that the controller generated the  */
5344 /* interrupt and then calls a separate routine for handle the various       */
5345 /* interrupt causes (PHY, TX, RX).                                          */
5346 /*                                                                          */
5347 /* Returns:                                                                 */
5348 /*   0 for success, positive value for failure.                             */
5349 /****************************************************************************/
5350 static void
5351 bce_intr(struct bce_softc *sc)
5352 {
5353 	struct ifnet *ifp = &sc->arpcom.ac_if;
5354 	struct status_block *sblk;
5355 	uint16_t hw_rx_cons, hw_tx_cons;
5356 	uint32_t status_attn_bits;
5357 	struct bce_tx_ring *txr = &sc->tx_rings[0];
5358 	struct bce_rx_ring *rxr = &sc->rx_rings[0];
5359 
5360 	ASSERT_SERIALIZED(&sc->main_serialize);
5361 
5362 	sblk = sc->status_block;
5363 
5364 	/*
5365 	 * Save the status block index value for use during
5366 	 * the next interrupt.
5367 	 */
5368 	rxr->last_status_idx = *rxr->hw_status_idx;
5369 
5370 	/* Make sure status index is extracted before RX/TX cons */
5371 	cpu_lfence();
5372 
5373 	/* Check if the hardware has finished any work. */
5374 	hw_rx_cons = bce_get_hw_rx_cons(rxr);
5375 	hw_tx_cons = bce_get_hw_tx_cons(txr);
5376 
5377 	status_attn_bits = sblk->status_attn_bits;
5378 
5379 	/* Was it a link change interrupt? */
5380 	if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5381 	    (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) {
5382 		bce_phy_intr(sc);
5383 
5384 		/*
5385 		 * Clear any transient status updates during link state
5386 		 * change.
5387 		 */
5388 		REG_WR(sc, BCE_HC_COMMAND,
5389 		    sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT);
5390 		REG_RD(sc, BCE_HC_COMMAND);
5391 	}
5392 
5393 	/*
5394 	 * If any other attention is asserted then
5395 	 * the chip is toast.
5396 	 */
5397 	if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5398 	    (sblk->status_attn_bits_ack & ~STATUS_ATTN_BITS_LINK_STATE)) {
5399 		if_printf(ifp, "Fatal attention detected: 0x%08X\n",
5400 			  sblk->status_attn_bits);
5401 		bce_serialize_skipmain(sc);
5402 		bce_init(sc);
5403 		bce_deserialize_skipmain(sc);
5404 		return;
5405 	}
5406 
5407 	/* Check for any completed RX frames. */
5408 	lwkt_serialize_enter(&rxr->rx_serialize);
5409 	if (hw_rx_cons != rxr->rx_cons)
5410 		bce_rx_intr(rxr, -1, hw_rx_cons);
5411 	lwkt_serialize_exit(&rxr->rx_serialize);
5412 
5413 	/* Check for any completed TX frames. */
5414 	lwkt_serialize_enter(&txr->tx_serialize);
5415 	if (hw_tx_cons != txr->tx_cons) {
5416 		bce_tx_intr(txr, hw_tx_cons);
5417 		if (!ifsq_is_empty(txr->ifsq))
5418 			ifsq_devstart(txr->ifsq);
5419 	}
5420 	lwkt_serialize_exit(&txr->tx_serialize);
5421 }
5422 
5423 static void
5424 bce_intr_legacy(void *xsc)
5425 {
5426 	struct bce_softc *sc = xsc;
5427 	struct bce_rx_ring *rxr = &sc->rx_rings[0];
5428 	struct status_block *sblk;
5429 
5430 	sblk = sc->status_block;
5431 
5432 	/*
5433 	 * If the hardware status block index matches the last value
5434 	 * read by the driver and we haven't asserted our interrupt
5435 	 * then there's nothing to do.
5436 	 */
5437 	if (sblk->status_idx == rxr->last_status_idx &&
5438 	    (REG_RD(sc, BCE_PCICFG_MISC_STATUS) &
5439 	     BCE_PCICFG_MISC_STATUS_INTA_VALUE))
5440 		return;
5441 
5442 	/* Ack the interrupt and stop others from occuring. */
5443 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5444 	       BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
5445 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5446 
5447 	/*
5448 	 * Read back to deassert IRQ immediately to avoid too
5449 	 * many spurious interrupts.
5450 	 */
5451 	REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
5452 
5453 	bce_intr(sc);
5454 
5455 	/* Re-enable interrupts. */
5456 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5457 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
5458 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT | rxr->last_status_idx);
5459 	bce_reenable_intr(rxr);
5460 }
5461 
5462 static void
5463 bce_intr_msi(void *xsc)
5464 {
5465 	struct bce_softc *sc = xsc;
5466 
5467 	/* Ack the interrupt and stop others from occuring. */
5468 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5469 	       BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
5470 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5471 
5472 	bce_intr(sc);
5473 
5474 	/* Re-enable interrupts */
5475 	bce_reenable_intr(&sc->rx_rings[0]);
5476 }
5477 
5478 static void
5479 bce_intr_msi_oneshot(void *xsc)
5480 {
5481 	struct bce_softc *sc = xsc;
5482 
5483 	bce_intr(sc);
5484 
5485 	/* Re-enable interrupts */
5486 	bce_reenable_intr(&sc->rx_rings[0]);
5487 }
5488 
5489 static void
5490 bce_intr_msix_rxtx(void *xrxr)
5491 {
5492 	struct bce_rx_ring *rxr = xrxr;
5493 	struct bce_tx_ring *txr;
5494 	uint16_t hw_rx_cons, hw_tx_cons;
5495 
5496 	ASSERT_SERIALIZED(&rxr->rx_serialize);
5497 
5498 	KKASSERT(rxr->idx < rxr->sc->tx_ring_cnt);
5499 	txr = &rxr->sc->tx_rings[rxr->idx];
5500 
5501 	/*
5502 	 * Save the status block index value for use during
5503 	 * the next interrupt.
5504 	 */
5505 	rxr->last_status_idx = *rxr->hw_status_idx;
5506 
5507 	/* Make sure status index is extracted before RX/TX cons */
5508 	cpu_lfence();
5509 
5510 	/* Check if the hardware has finished any work. */
5511 	hw_rx_cons = bce_get_hw_rx_cons(rxr);
5512 	if (hw_rx_cons != rxr->rx_cons)
5513 		bce_rx_intr(rxr, -1, hw_rx_cons);
5514 
5515 	/* Check for any completed TX frames. */
5516 	hw_tx_cons = bce_get_hw_tx_cons(txr);
5517 	lwkt_serialize_enter(&txr->tx_serialize);
5518 	if (hw_tx_cons != txr->tx_cons) {
5519 		bce_tx_intr(txr, hw_tx_cons);
5520 		if (!ifsq_is_empty(txr->ifsq))
5521 			ifsq_devstart(txr->ifsq);
5522 	}
5523 	lwkt_serialize_exit(&txr->tx_serialize);
5524 
5525 	/* Re-enable interrupts */
5526 	bce_reenable_intr(rxr);
5527 }
5528 
5529 static void
5530 bce_intr_msix_rx(void *xrxr)
5531 {
5532 	struct bce_rx_ring *rxr = xrxr;
5533 	uint16_t hw_rx_cons;
5534 
5535 	ASSERT_SERIALIZED(&rxr->rx_serialize);
5536 
5537 	/*
5538 	 * Save the status block index value for use during
5539 	 * the next interrupt.
5540 	 */
5541 	rxr->last_status_idx = *rxr->hw_status_idx;
5542 
5543 	/* Make sure status index is extracted before RX cons */
5544 	cpu_lfence();
5545 
5546 	/* Check if the hardware has finished any work. */
5547 	hw_rx_cons = bce_get_hw_rx_cons(rxr);
5548 	if (hw_rx_cons != rxr->rx_cons)
5549 		bce_rx_intr(rxr, -1, hw_rx_cons);
5550 
5551 	/* Re-enable interrupts */
5552 	bce_reenable_intr(rxr);
5553 }
5554 
5555 /****************************************************************************/
5556 /* Programs the various packet receive modes (broadcast and multicast).     */
5557 /*                                                                          */
5558 /* Returns:                                                                 */
5559 /*   Nothing.                                                               */
5560 /****************************************************************************/
5561 static void
5562 bce_set_rx_mode(struct bce_softc *sc)
5563 {
5564 	struct ifnet *ifp = &sc->arpcom.ac_if;
5565 	struct ifmultiaddr *ifma;
5566 	uint32_t hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
5567 	uint32_t rx_mode, sort_mode;
5568 	int h, i;
5569 
5570 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
5571 
5572 	/* Initialize receive mode default settings. */
5573 	rx_mode = sc->rx_mode &
5574 		  ~(BCE_EMAC_RX_MODE_PROMISCUOUS |
5575 		    BCE_EMAC_RX_MODE_KEEP_VLAN_TAG);
5576 	sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN;
5577 
5578 	/*
5579 	 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
5580 	 * be enbled.
5581 	 */
5582 	if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) &&
5583 	    !(sc->bce_flags & BCE_MFW_ENABLE_FLAG))
5584 		rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG;
5585 
5586 	/*
5587 	 * Check for promiscuous, all multicast, or selected
5588 	 * multicast address filtering.
5589 	 */
5590 	if (ifp->if_flags & IFF_PROMISC) {
5591 		/* Enable promiscuous mode. */
5592 		rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS;
5593 		sort_mode |= BCE_RPM_SORT_USER0_PROM_EN;
5594 	} else if (ifp->if_flags & IFF_ALLMULTI) {
5595 		/* Enable all multicast addresses. */
5596 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
5597 			REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4),
5598 			       0xffffffff);
5599 		}
5600 		sort_mode |= BCE_RPM_SORT_USER0_MC_EN;
5601 	} else {
5602 		/* Accept one or more multicast(s). */
5603 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
5604 			if (ifma->ifma_addr->sa_family != AF_LINK)
5605 				continue;
5606 			h = ether_crc32_le(
5607 			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
5608 			    ETHER_ADDR_LEN) & 0xFF;
5609 			hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
5610 		}
5611 
5612 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
5613 			REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4),
5614 			       hashes[i]);
5615 		}
5616 		sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN;
5617 	}
5618 
5619 	/* Only make changes if the recive mode has actually changed. */
5620 	if (rx_mode != sc->rx_mode) {
5621 		sc->rx_mode = rx_mode;
5622 		REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode);
5623 	}
5624 
5625 	/* Disable and clear the exisitng sort before enabling a new sort. */
5626 	REG_WR(sc, BCE_RPM_SORT_USER0, 0x0);
5627 	REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode);
5628 	REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA);
5629 }
5630 
5631 /****************************************************************************/
5632 /* Called periodically to updates statistics from the controllers           */
5633 /* statistics block.                                                        */
5634 /*                                                                          */
5635 /* Returns:                                                                 */
5636 /*   Nothing.                                                               */
5637 /****************************************************************************/
5638 static void
5639 bce_stats_update(struct bce_softc *sc)
5640 {
5641 	struct ifnet *ifp = &sc->arpcom.ac_if;
5642 	struct statistics_block *stats = sc->stats_block;
5643 
5644 	ASSERT_SERIALIZED(&sc->main_serialize);
5645 
5646 	/*
5647 	 * Certain controllers don't report carrier sense errors correctly.
5648 	 * See errata E11_5708CA0_1165.
5649 	 */
5650 	if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
5651 	    !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0)) {
5652 		IFNET_STAT_INC(ifp, oerrors,
5653 			(u_long)stats->stat_Dot3StatsCarrierSenseErrors);
5654 	}
5655 
5656 	/*
5657 	 * Update the sysctl statistics from the hardware statistics.
5658 	 */
5659 	sc->stat_IfHCInOctets =
5660 		((uint64_t)stats->stat_IfHCInOctets_hi << 32) +
5661 		 (uint64_t)stats->stat_IfHCInOctets_lo;
5662 
5663 	sc->stat_IfHCInBadOctets =
5664 		((uint64_t)stats->stat_IfHCInBadOctets_hi << 32) +
5665 		 (uint64_t)stats->stat_IfHCInBadOctets_lo;
5666 
5667 	sc->stat_IfHCOutOctets =
5668 		((uint64_t)stats->stat_IfHCOutOctets_hi << 32) +
5669 		 (uint64_t)stats->stat_IfHCOutOctets_lo;
5670 
5671 	sc->stat_IfHCOutBadOctets =
5672 		((uint64_t)stats->stat_IfHCOutBadOctets_hi << 32) +
5673 		 (uint64_t)stats->stat_IfHCOutBadOctets_lo;
5674 
5675 	sc->stat_IfHCInUcastPkts =
5676 		((uint64_t)stats->stat_IfHCInUcastPkts_hi << 32) +
5677 		 (uint64_t)stats->stat_IfHCInUcastPkts_lo;
5678 
5679 	sc->stat_IfHCInMulticastPkts =
5680 		((uint64_t)stats->stat_IfHCInMulticastPkts_hi << 32) +
5681 		 (uint64_t)stats->stat_IfHCInMulticastPkts_lo;
5682 
5683 	sc->stat_IfHCInBroadcastPkts =
5684 		((uint64_t)stats->stat_IfHCInBroadcastPkts_hi << 32) +
5685 		 (uint64_t)stats->stat_IfHCInBroadcastPkts_lo;
5686 
5687 	sc->stat_IfHCOutUcastPkts =
5688 		((uint64_t)stats->stat_IfHCOutUcastPkts_hi << 32) +
5689 		 (uint64_t)stats->stat_IfHCOutUcastPkts_lo;
5690 
5691 	sc->stat_IfHCOutMulticastPkts =
5692 		((uint64_t)stats->stat_IfHCOutMulticastPkts_hi << 32) +
5693 		 (uint64_t)stats->stat_IfHCOutMulticastPkts_lo;
5694 
5695 	sc->stat_IfHCOutBroadcastPkts =
5696 		((uint64_t)stats->stat_IfHCOutBroadcastPkts_hi << 32) +
5697 		 (uint64_t)stats->stat_IfHCOutBroadcastPkts_lo;
5698 
5699 	sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
5700 		stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
5701 
5702 	sc->stat_Dot3StatsCarrierSenseErrors =
5703 		stats->stat_Dot3StatsCarrierSenseErrors;
5704 
5705 	sc->stat_Dot3StatsFCSErrors =
5706 		stats->stat_Dot3StatsFCSErrors;
5707 
5708 	sc->stat_Dot3StatsAlignmentErrors =
5709 		stats->stat_Dot3StatsAlignmentErrors;
5710 
5711 	sc->stat_Dot3StatsSingleCollisionFrames =
5712 		stats->stat_Dot3StatsSingleCollisionFrames;
5713 
5714 	sc->stat_Dot3StatsMultipleCollisionFrames =
5715 		stats->stat_Dot3StatsMultipleCollisionFrames;
5716 
5717 	sc->stat_Dot3StatsDeferredTransmissions =
5718 		stats->stat_Dot3StatsDeferredTransmissions;
5719 
5720 	sc->stat_Dot3StatsExcessiveCollisions =
5721 		stats->stat_Dot3StatsExcessiveCollisions;
5722 
5723 	sc->stat_Dot3StatsLateCollisions =
5724 		stats->stat_Dot3StatsLateCollisions;
5725 
5726 	sc->stat_EtherStatsCollisions =
5727 		stats->stat_EtherStatsCollisions;
5728 
5729 	sc->stat_EtherStatsFragments =
5730 		stats->stat_EtherStatsFragments;
5731 
5732 	sc->stat_EtherStatsJabbers =
5733 		stats->stat_EtherStatsJabbers;
5734 
5735 	sc->stat_EtherStatsUndersizePkts =
5736 		stats->stat_EtherStatsUndersizePkts;
5737 
5738 	sc->stat_EtherStatsOverrsizePkts =
5739 		stats->stat_EtherStatsOverrsizePkts;
5740 
5741 	sc->stat_EtherStatsPktsRx64Octets =
5742 		stats->stat_EtherStatsPktsRx64Octets;
5743 
5744 	sc->stat_EtherStatsPktsRx65Octetsto127Octets =
5745 		stats->stat_EtherStatsPktsRx65Octetsto127Octets;
5746 
5747 	sc->stat_EtherStatsPktsRx128Octetsto255Octets =
5748 		stats->stat_EtherStatsPktsRx128Octetsto255Octets;
5749 
5750 	sc->stat_EtherStatsPktsRx256Octetsto511Octets =
5751 		stats->stat_EtherStatsPktsRx256Octetsto511Octets;
5752 
5753 	sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
5754 		stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
5755 
5756 	sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
5757 		stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
5758 
5759 	sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
5760 		stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
5761 
5762 	sc->stat_EtherStatsPktsTx64Octets =
5763 		stats->stat_EtherStatsPktsTx64Octets;
5764 
5765 	sc->stat_EtherStatsPktsTx65Octetsto127Octets =
5766 		stats->stat_EtherStatsPktsTx65Octetsto127Octets;
5767 
5768 	sc->stat_EtherStatsPktsTx128Octetsto255Octets =
5769 		stats->stat_EtherStatsPktsTx128Octetsto255Octets;
5770 
5771 	sc->stat_EtherStatsPktsTx256Octetsto511Octets =
5772 		stats->stat_EtherStatsPktsTx256Octetsto511Octets;
5773 
5774 	sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
5775 		stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
5776 
5777 	sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
5778 		stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
5779 
5780 	sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
5781 		stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
5782 
5783 	sc->stat_XonPauseFramesReceived =
5784 		stats->stat_XonPauseFramesReceived;
5785 
5786 	sc->stat_XoffPauseFramesReceived =
5787 		stats->stat_XoffPauseFramesReceived;
5788 
5789 	sc->stat_OutXonSent =
5790 		stats->stat_OutXonSent;
5791 
5792 	sc->stat_OutXoffSent =
5793 		stats->stat_OutXoffSent;
5794 
5795 	sc->stat_FlowControlDone =
5796 		stats->stat_FlowControlDone;
5797 
5798 	sc->stat_MacControlFramesReceived =
5799 		stats->stat_MacControlFramesReceived;
5800 
5801 	sc->stat_XoffStateEntered =
5802 		stats->stat_XoffStateEntered;
5803 
5804 	sc->stat_IfInFramesL2FilterDiscards =
5805 		stats->stat_IfInFramesL2FilterDiscards;
5806 
5807 	sc->stat_IfInRuleCheckerDiscards =
5808 		stats->stat_IfInRuleCheckerDiscards;
5809 
5810 	sc->stat_IfInFTQDiscards =
5811 		stats->stat_IfInFTQDiscards;
5812 
5813 	sc->stat_IfInMBUFDiscards =
5814 		stats->stat_IfInMBUFDiscards;
5815 
5816 	sc->stat_IfInRuleCheckerP4Hit =
5817 		stats->stat_IfInRuleCheckerP4Hit;
5818 
5819 	sc->stat_CatchupInRuleCheckerDiscards =
5820 		stats->stat_CatchupInRuleCheckerDiscards;
5821 
5822 	sc->stat_CatchupInFTQDiscards =
5823 		stats->stat_CatchupInFTQDiscards;
5824 
5825 	sc->stat_CatchupInMBUFDiscards =
5826 		stats->stat_CatchupInMBUFDiscards;
5827 
5828 	sc->stat_CatchupInRuleCheckerP4Hit =
5829 		stats->stat_CatchupInRuleCheckerP4Hit;
5830 
5831 	sc->com_no_buffers = REG_RD_IND(sc, 0x120084);
5832 
5833 	/*
5834 	 * Update the interface statistics from the
5835 	 * hardware statistics.
5836 	 */
5837 	IFNET_STAT_SET(ifp, collisions, (u_long)sc->stat_EtherStatsCollisions);
5838 
5839 	IFNET_STAT_SET(ifp, ierrors, (u_long)sc->stat_EtherStatsUndersizePkts +
5840 	    (u_long)sc->stat_EtherStatsOverrsizePkts +
5841 	    (u_long)sc->stat_IfInMBUFDiscards +
5842 	    (u_long)sc->stat_Dot3StatsAlignmentErrors +
5843 	    (u_long)sc->stat_Dot3StatsFCSErrors +
5844 	    (u_long)sc->stat_IfInRuleCheckerDiscards +
5845 	    (u_long)sc->stat_IfInFTQDiscards +
5846 	    (u_long)sc->com_no_buffers);
5847 
5848 	IFNET_STAT_SET(ifp, oerrors,
5849 	    (u_long)sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
5850 	    (u_long)sc->stat_Dot3StatsExcessiveCollisions +
5851 	    (u_long)sc->stat_Dot3StatsLateCollisions);
5852 }
5853 
5854 /****************************************************************************/
5855 /* Periodic function to notify the bootcode that the driver is still        */
5856 /* present.                                                                 */
5857 /*                                                                          */
5858 /* Returns:                                                                 */
5859 /*   Nothing.                                                               */
5860 /****************************************************************************/
5861 static void
5862 bce_pulse(void *xsc)
5863 {
5864 	struct bce_softc *sc = xsc;
5865 	struct ifnet *ifp = &sc->arpcom.ac_if;
5866 	uint32_t msg;
5867 
5868 	lwkt_serialize_enter(&sc->main_serialize);
5869 
5870 	/* Tell the firmware that the driver is still running. */
5871 	msg = (uint32_t)++sc->bce_fw_drv_pulse_wr_seq;
5872 	bce_shmem_wr(sc, BCE_DRV_PULSE_MB, msg);
5873 
5874 	/* Update the bootcode condition. */
5875 	sc->bc_state = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
5876 
5877 	/* Report whether the bootcode still knows the driver is running. */
5878 	if (!sc->bce_drv_cardiac_arrest) {
5879 		if (!(sc->bc_state & BCE_CONDITION_DRV_PRESENT)) {
5880 			sc->bce_drv_cardiac_arrest = 1;
5881 			if_printf(ifp, "Bootcode lost the driver pulse! "
5882 			    "(bc_state = 0x%08X)\n", sc->bc_state);
5883 		}
5884 	} else {
5885  		/*
5886  		 * Not supported by all bootcode versions.
5887  		 * (v5.0.11+ and v5.2.1+)  Older bootcode
5888  		 * will require the driver to reset the
5889  		 * controller to clear this condition.
5890 		 */
5891 		if (sc->bc_state & BCE_CONDITION_DRV_PRESENT) {
5892 			sc->bce_drv_cardiac_arrest = 0;
5893 			if_printf(ifp, "Bootcode found the driver pulse! "
5894 			    "(bc_state = 0x%08X)\n", sc->bc_state);
5895 		}
5896 	}
5897 
5898 	/* Schedule the next pulse. */
5899 	callout_reset_bycpu(&sc->bce_pulse_callout, hz, bce_pulse, sc,
5900 	    sc->bce_timer_cpuid);
5901 
5902 	lwkt_serialize_exit(&sc->main_serialize);
5903 }
5904 
5905 /****************************************************************************/
5906 /* Periodic function to check whether MSI is lost                           */
5907 /*                                                                          */
5908 /* Returns:                                                                 */
5909 /*   Nothing.                                                               */
5910 /****************************************************************************/
5911 static void
5912 bce_check_msi(void *xsc)
5913 {
5914 	struct bce_softc *sc = xsc;
5915 	struct ifnet *ifp = &sc->arpcom.ac_if;
5916 	struct status_block *sblk = sc->status_block;
5917 	struct bce_tx_ring *txr = &sc->tx_rings[0];
5918 	struct bce_rx_ring *rxr = &sc->rx_rings[0];
5919 
5920 	lwkt_serialize_enter(&sc->main_serialize);
5921 
5922 	KKASSERT(mycpuid == sc->bce_msix[0].msix_cpuid);
5923 
5924 	if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) {
5925 		lwkt_serialize_exit(&sc->main_serialize);
5926 		return;
5927 	}
5928 
5929 	if (bce_get_hw_rx_cons(rxr) != rxr->rx_cons ||
5930 	    bce_get_hw_tx_cons(txr) != txr->tx_cons ||
5931 	    (sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5932 	    (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) {
5933 		if (sc->bce_check_rx_cons == rxr->rx_cons &&
5934 		    sc->bce_check_tx_cons == txr->tx_cons &&
5935 		    sc->bce_check_status_idx == rxr->last_status_idx) {
5936 			uint32_t msi_ctrl;
5937 
5938 			if (!sc->bce_msi_maylose) {
5939 				sc->bce_msi_maylose = TRUE;
5940 				goto done;
5941 			}
5942 
5943 			msi_ctrl = REG_RD(sc, BCE_PCICFG_MSI_CONTROL);
5944 			if (msi_ctrl & BCE_PCICFG_MSI_CONTROL_ENABLE) {
5945 				if (bootverbose)
5946 					if_printf(ifp, "lost MSI\n");
5947 
5948 				REG_WR(sc, BCE_PCICFG_MSI_CONTROL,
5949 				    msi_ctrl & ~BCE_PCICFG_MSI_CONTROL_ENABLE);
5950 				REG_WR(sc, BCE_PCICFG_MSI_CONTROL, msi_ctrl);
5951 
5952 				bce_intr_msi(sc);
5953 			} else if (bootverbose) {
5954 				if_printf(ifp, "MSI may be lost\n");
5955 			}
5956 		}
5957 	}
5958 	sc->bce_msi_maylose = FALSE;
5959 	sc->bce_check_rx_cons = rxr->rx_cons;
5960 	sc->bce_check_tx_cons = txr->tx_cons;
5961 	sc->bce_check_status_idx = rxr->last_status_idx;
5962 
5963 done:
5964 	callout_reset(&sc->bce_ckmsi_callout, BCE_MSI_CKINTVL,
5965 	    bce_check_msi, sc);
5966 	lwkt_serialize_exit(&sc->main_serialize);
5967 }
5968 
5969 /****************************************************************************/
5970 /* Periodic function to perform maintenance tasks.                          */
5971 /*                                                                          */
5972 /* Returns:                                                                 */
5973 /*   Nothing.                                                               */
5974 /****************************************************************************/
5975 static void
5976 bce_tick_serialized(struct bce_softc *sc)
5977 {
5978 	struct mii_data *mii;
5979 
5980 	ASSERT_SERIALIZED(&sc->main_serialize);
5981 
5982 	/* Update the statistics from the hardware statistics block. */
5983 	bce_stats_update(sc);
5984 
5985 	/* Schedule the next tick. */
5986 	callout_reset_bycpu(&sc->bce_tick_callout, hz, bce_tick, sc,
5987 	    sc->bce_timer_cpuid);
5988 
5989 	/* If link is up already up then we're done. */
5990 	if (sc->bce_link)
5991 		return;
5992 
5993 	mii = device_get_softc(sc->bce_miibus);
5994 	mii_tick(mii);
5995 
5996 	/* Check if the link has come up. */
5997 	if ((mii->mii_media_status & IFM_ACTIVE) &&
5998 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5999 		int i;
6000 
6001 		sc->bce_link++;
6002 		/* Now that link is up, handle any outstanding TX traffic. */
6003 		for (i = 0; i < sc->tx_ring_cnt; ++i)
6004 			ifsq_devstart_sched(sc->tx_rings[i].ifsq);
6005 	}
6006 }
6007 
6008 static void
6009 bce_tick(void *xsc)
6010 {
6011 	struct bce_softc *sc = xsc;
6012 
6013 	lwkt_serialize_enter(&sc->main_serialize);
6014 	bce_tick_serialized(sc);
6015 	lwkt_serialize_exit(&sc->main_serialize);
6016 }
6017 
6018 /****************************************************************************/
6019 /* Adds any sysctl parameters for tuning or debugging purposes.             */
6020 /*                                                                          */
6021 /* Returns:                                                                 */
6022 /*   0 for success, positive value for failure.                             */
6023 /****************************************************************************/
6024 static void
6025 bce_add_sysctls(struct bce_softc *sc)
6026 {
6027 	struct sysctl_ctx_list *ctx;
6028 	struct sysctl_oid_list *children;
6029 #if defined(BCE_TSS_DEBUG) || defined(BCE_RSS_DEBUG)
6030 	char node[32];
6031 	int i;
6032 #endif
6033 
6034 	ctx = device_get_sysctl_ctx(sc->bce_dev);
6035 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bce_dev));
6036 
6037 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds_int",
6038 			CTLTYPE_INT | CTLFLAG_RW,
6039 			sc, 0, bce_sysctl_tx_bds_int, "I",
6040 			"Send max coalesced BD count during interrupt");
6041 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds",
6042 			CTLTYPE_INT | CTLFLAG_RW,
6043 			sc, 0, bce_sysctl_tx_bds, "I",
6044 			"Send max coalesced BD count");
6045 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks_int",
6046 			CTLTYPE_INT | CTLFLAG_RW,
6047 			sc, 0, bce_sysctl_tx_ticks_int, "I",
6048 			"Send coalescing ticks during interrupt");
6049 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks",
6050 			CTLTYPE_INT | CTLFLAG_RW,
6051 			sc, 0, bce_sysctl_tx_ticks, "I",
6052 			"Send coalescing ticks");
6053 
6054 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds_int",
6055 			CTLTYPE_INT | CTLFLAG_RW,
6056 			sc, 0, bce_sysctl_rx_bds_int, "I",
6057 			"Receive max coalesced BD count during interrupt");
6058 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds",
6059 			CTLTYPE_INT | CTLFLAG_RW,
6060 			sc, 0, bce_sysctl_rx_bds, "I",
6061 			"Receive max coalesced BD count");
6062 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks_int",
6063 			CTLTYPE_INT | CTLFLAG_RW,
6064 			sc, 0, bce_sysctl_rx_ticks_int, "I",
6065 			"Receive coalescing ticks during interrupt");
6066 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks",
6067 			CTLTYPE_INT | CTLFLAG_RW,
6068 			sc, 0, bce_sysctl_rx_ticks, "I",
6069 			"Receive coalescing ticks");
6070 
6071 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_rings",
6072 		CTLFLAG_RD, &sc->rx_ring_cnt, 0, "# of RX rings");
6073 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_pages",
6074 		CTLFLAG_RD, &sc->rx_rings[0].rx_pages, 0, "# of RX pages");
6075 
6076 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_rings",
6077 		CTLFLAG_RD, &sc->tx_ring_cnt, 0, "# of TX rings");
6078 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_pages",
6079 		CTLFLAG_RD, &sc->tx_rings[0].tx_pages, 0, "# of TX pages");
6080 
6081 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_wreg",
6082 	    	CTLFLAG_RW, &sc->tx_rings[0].tx_wreg, 0,
6083 		"# segments before write to hardware registers");
6084 
6085 	if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX) {
6086 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_cpumap",
6087 		    CTLTYPE_OPAQUE | CTLFLAG_RD, sc->tx_rmap, 0,
6088 		    if_ringmap_cpumap_sysctl, "I", "TX ring CPU map");
6089 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_cpumap",
6090 		    CTLTYPE_OPAQUE | CTLFLAG_RD, sc->rx_rmap, 0,
6091 		    if_ringmap_cpumap_sysctl, "I", "RX ring CPU map");
6092 	} else {
6093 #ifdef IFPOLL_ENABLE
6094 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_poll_cpumap",
6095 		    CTLTYPE_OPAQUE | CTLFLAG_RD, sc->tx_rmap, 0,
6096 		    if_ringmap_cpumap_sysctl, "I", "TX poll CPU map");
6097 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_poll_cpumap",
6098 		    CTLTYPE_OPAQUE | CTLFLAG_RD, sc->rx_rmap, 0,
6099 		    if_ringmap_cpumap_sysctl, "I", "RX poll CPU map");
6100 #endif
6101 	}
6102 
6103 #ifdef BCE_RSS_DEBUG
6104 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rss_debug",
6105 	    CTLFLAG_RW, &sc->rss_debug, 0, "RSS debug level");
6106 	for (i = 0; i < sc->rx_ring_cnt; ++i) {
6107 		ksnprintf(node, sizeof(node), "rx%d_pkt", i);
6108 		SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, node,
6109 		    CTLFLAG_RW, &sc->rx_rings[i].rx_pkts,
6110 		    "RXed packets");
6111 	}
6112 #endif
6113 
6114 #ifdef BCE_TSS_DEBUG
6115 	for (i = 0; i < sc->tx_ring_cnt; ++i) {
6116 		ksnprintf(node, sizeof(node), "tx%d_pkt", i);
6117 		SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, node,
6118 		    CTLFLAG_RW, &sc->tx_rings[i].tx_pkts,
6119 		    "TXed packets");
6120 	}
6121 #endif
6122 
6123 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6124 		"stat_IfHCInOctets",
6125 		CTLFLAG_RD, &sc->stat_IfHCInOctets,
6126 		"Bytes received");
6127 
6128 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6129 		"stat_IfHCInBadOctets",
6130 		CTLFLAG_RD, &sc->stat_IfHCInBadOctets,
6131 		"Bad bytes received");
6132 
6133 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6134 		"stat_IfHCOutOctets",
6135 		CTLFLAG_RD, &sc->stat_IfHCOutOctets,
6136 		"Bytes sent");
6137 
6138 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6139 		"stat_IfHCOutBadOctets",
6140 		CTLFLAG_RD, &sc->stat_IfHCOutBadOctets,
6141 		"Bad bytes sent");
6142 
6143 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6144 		"stat_IfHCInUcastPkts",
6145 		CTLFLAG_RD, &sc->stat_IfHCInUcastPkts,
6146 		"Unicast packets received");
6147 
6148 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6149 		"stat_IfHCInMulticastPkts",
6150 		CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts,
6151 		"Multicast packets received");
6152 
6153 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6154 		"stat_IfHCInBroadcastPkts",
6155 		CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts,
6156 		"Broadcast packets received");
6157 
6158 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6159 		"stat_IfHCOutUcastPkts",
6160 		CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts,
6161 		"Unicast packets sent");
6162 
6163 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6164 		"stat_IfHCOutMulticastPkts",
6165 		CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts,
6166 		"Multicast packets sent");
6167 
6168 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6169 		"stat_IfHCOutBroadcastPkts",
6170 		CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts,
6171 		"Broadcast packets sent");
6172 
6173 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6174 		"stat_emac_tx_stat_dot3statsinternalmactransmiterrors",
6175 		CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors,
6176 		0, "Internal MAC transmit errors");
6177 
6178 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6179 		"stat_Dot3StatsCarrierSenseErrors",
6180 		CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors,
6181 		0, "Carrier sense errors");
6182 
6183 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6184 		"stat_Dot3StatsFCSErrors",
6185 		CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors,
6186 		0, "Frame check sequence errors");
6187 
6188 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6189 		"stat_Dot3StatsAlignmentErrors",
6190 		CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors,
6191 		0, "Alignment errors");
6192 
6193 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6194 		"stat_Dot3StatsSingleCollisionFrames",
6195 		CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames,
6196 		0, "Single Collision Frames");
6197 
6198 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6199 		"stat_Dot3StatsMultipleCollisionFrames",
6200 		CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames,
6201 		0, "Multiple Collision Frames");
6202 
6203 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6204 		"stat_Dot3StatsDeferredTransmissions",
6205 		CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions,
6206 		0, "Deferred Transmissions");
6207 
6208 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6209 		"stat_Dot3StatsExcessiveCollisions",
6210 		CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions,
6211 		0, "Excessive Collisions");
6212 
6213 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6214 		"stat_Dot3StatsLateCollisions",
6215 		CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions,
6216 		0, "Late Collisions");
6217 
6218 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6219 		"stat_EtherStatsCollisions",
6220 		CTLFLAG_RD, &sc->stat_EtherStatsCollisions,
6221 		0, "Collisions");
6222 
6223 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6224 		"stat_EtherStatsFragments",
6225 		CTLFLAG_RD, &sc->stat_EtherStatsFragments,
6226 		0, "Fragments");
6227 
6228 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6229 		"stat_EtherStatsJabbers",
6230 		CTLFLAG_RD, &sc->stat_EtherStatsJabbers,
6231 		0, "Jabbers");
6232 
6233 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6234 		"stat_EtherStatsUndersizePkts",
6235 		CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts,
6236 		0, "Undersize packets");
6237 
6238 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6239 		"stat_EtherStatsOverrsizePkts",
6240 		CTLFLAG_RD, &sc->stat_EtherStatsOverrsizePkts,
6241 		0, "stat_EtherStatsOverrsizePkts");
6242 
6243 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6244 		"stat_EtherStatsPktsRx64Octets",
6245 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets,
6246 		0, "Bytes received in 64 byte packets");
6247 
6248 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6249 		"stat_EtherStatsPktsRx65Octetsto127Octets",
6250 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets,
6251 		0, "Bytes received in 65 to 127 byte packets");
6252 
6253 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6254 		"stat_EtherStatsPktsRx128Octetsto255Octets",
6255 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets,
6256 		0, "Bytes received in 128 to 255 byte packets");
6257 
6258 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6259 		"stat_EtherStatsPktsRx256Octetsto511Octets",
6260 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets,
6261 		0, "Bytes received in 256 to 511 byte packets");
6262 
6263 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6264 		"stat_EtherStatsPktsRx512Octetsto1023Octets",
6265 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets,
6266 		0, "Bytes received in 512 to 1023 byte packets");
6267 
6268 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6269 		"stat_EtherStatsPktsRx1024Octetsto1522Octets",
6270 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets,
6271 		0, "Bytes received in 1024 t0 1522 byte packets");
6272 
6273 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6274 		"stat_EtherStatsPktsRx1523Octetsto9022Octets",
6275 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets,
6276 		0, "Bytes received in 1523 to 9022 byte packets");
6277 
6278 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6279 		"stat_EtherStatsPktsTx64Octets",
6280 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets,
6281 		0, "Bytes sent in 64 byte packets");
6282 
6283 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6284 		"stat_EtherStatsPktsTx65Octetsto127Octets",
6285 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets,
6286 		0, "Bytes sent in 65 to 127 byte packets");
6287 
6288 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6289 		"stat_EtherStatsPktsTx128Octetsto255Octets",
6290 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets,
6291 		0, "Bytes sent in 128 to 255 byte packets");
6292 
6293 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6294 		"stat_EtherStatsPktsTx256Octetsto511Octets",
6295 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets,
6296 		0, "Bytes sent in 256 to 511 byte packets");
6297 
6298 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6299 		"stat_EtherStatsPktsTx512Octetsto1023Octets",
6300 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets,
6301 		0, "Bytes sent in 512 to 1023 byte packets");
6302 
6303 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6304 		"stat_EtherStatsPktsTx1024Octetsto1522Octets",
6305 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets,
6306 		0, "Bytes sent in 1024 to 1522 byte packets");
6307 
6308 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6309 		"stat_EtherStatsPktsTx1523Octetsto9022Octets",
6310 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets,
6311 		0, "Bytes sent in 1523 to 9022 byte packets");
6312 
6313 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6314 		"stat_XonPauseFramesReceived",
6315 		CTLFLAG_RD, &sc->stat_XonPauseFramesReceived,
6316 		0, "XON pause frames receved");
6317 
6318 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6319 		"stat_XoffPauseFramesReceived",
6320 		CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived,
6321 		0, "XOFF pause frames received");
6322 
6323 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6324 		"stat_OutXonSent",
6325 		CTLFLAG_RD, &sc->stat_OutXonSent,
6326 		0, "XON pause frames sent");
6327 
6328 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6329 		"stat_OutXoffSent",
6330 		CTLFLAG_RD, &sc->stat_OutXoffSent,
6331 		0, "XOFF pause frames sent");
6332 
6333 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6334 		"stat_FlowControlDone",
6335 		CTLFLAG_RD, &sc->stat_FlowControlDone,
6336 		0, "Flow control done");
6337 
6338 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6339 		"stat_MacControlFramesReceived",
6340 		CTLFLAG_RD, &sc->stat_MacControlFramesReceived,
6341 		0, "MAC control frames received");
6342 
6343 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6344 		"stat_XoffStateEntered",
6345 		CTLFLAG_RD, &sc->stat_XoffStateEntered,
6346 		0, "XOFF state entered");
6347 
6348 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6349 		"stat_IfInFramesL2FilterDiscards",
6350 		CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards,
6351 		0, "Received L2 packets discarded");
6352 
6353 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6354 		"stat_IfInRuleCheckerDiscards",
6355 		CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards,
6356 		0, "Received packets discarded by rule");
6357 
6358 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6359 		"stat_IfInFTQDiscards",
6360 		CTLFLAG_RD, &sc->stat_IfInFTQDiscards,
6361 		0, "Received packet FTQ discards");
6362 
6363 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6364 		"stat_IfInMBUFDiscards",
6365 		CTLFLAG_RD, &sc->stat_IfInMBUFDiscards,
6366 		0, "Received packets discarded due to lack of controller buffer memory");
6367 
6368 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6369 		"stat_IfInRuleCheckerP4Hit",
6370 		CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit,
6371 		0, "Received packets rule checker hits");
6372 
6373 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6374 		"stat_CatchupInRuleCheckerDiscards",
6375 		CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards,
6376 		0, "Received packets discarded in Catchup path");
6377 
6378 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6379 		"stat_CatchupInFTQDiscards",
6380 		CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards,
6381 		0, "Received packets discarded in FTQ in Catchup path");
6382 
6383 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6384 		"stat_CatchupInMBUFDiscards",
6385 		CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards,
6386 		0, "Received packets discarded in controller buffer memory in Catchup path");
6387 
6388 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6389 		"stat_CatchupInRuleCheckerP4Hit",
6390 		CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit,
6391 		0, "Received packets rule checker hits in Catchup path");
6392 
6393 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6394 		"com_no_buffers",
6395 		CTLFLAG_RD, &sc->com_no_buffers,
6396 		0, "Valid packets received but no RX buffers available");
6397 }
6398 
6399 static int
6400 bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS)
6401 {
6402 	struct bce_softc *sc = arg1;
6403 
6404 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6405 			&sc->bce_tx_quick_cons_trip_int,
6406 			BCE_COALMASK_TX_BDS_INT);
6407 }
6408 
6409 static int
6410 bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS)
6411 {
6412 	struct bce_softc *sc = arg1;
6413 
6414 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6415 			&sc->bce_tx_quick_cons_trip,
6416 			BCE_COALMASK_TX_BDS);
6417 }
6418 
6419 static int
6420 bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS)
6421 {
6422 	struct bce_softc *sc = arg1;
6423 
6424 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6425 			&sc->bce_tx_ticks_int,
6426 			BCE_COALMASK_TX_TICKS_INT);
6427 }
6428 
6429 static int
6430 bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS)
6431 {
6432 	struct bce_softc *sc = arg1;
6433 
6434 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6435 			&sc->bce_tx_ticks,
6436 			BCE_COALMASK_TX_TICKS);
6437 }
6438 
6439 static int
6440 bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS)
6441 {
6442 	struct bce_softc *sc = arg1;
6443 
6444 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6445 			&sc->bce_rx_quick_cons_trip_int,
6446 			BCE_COALMASK_RX_BDS_INT);
6447 }
6448 
6449 static int
6450 bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS)
6451 {
6452 	struct bce_softc *sc = arg1;
6453 
6454 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6455 			&sc->bce_rx_quick_cons_trip,
6456 			BCE_COALMASK_RX_BDS);
6457 }
6458 
6459 static int
6460 bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS)
6461 {
6462 	struct bce_softc *sc = arg1;
6463 
6464 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6465 			&sc->bce_rx_ticks_int,
6466 			BCE_COALMASK_RX_TICKS_INT);
6467 }
6468 
6469 static int
6470 bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS)
6471 {
6472 	struct bce_softc *sc = arg1;
6473 
6474 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6475 			&sc->bce_rx_ticks,
6476 			BCE_COALMASK_RX_TICKS);
6477 }
6478 
6479 static int
6480 bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS, uint32_t *coal,
6481     uint32_t coalchg_mask)
6482 {
6483 	struct bce_softc *sc = arg1;
6484 	struct ifnet *ifp = &sc->arpcom.ac_if;
6485 	int error = 0, v;
6486 
6487 	ifnet_serialize_all(ifp);
6488 
6489 	v = *coal;
6490 	error = sysctl_handle_int(oidp, &v, 0, req);
6491 	if (!error && req->newptr != NULL) {
6492 		if (v < 0) {
6493 			error = EINVAL;
6494 		} else {
6495 			*coal = v;
6496 			sc->bce_coalchg_mask |= coalchg_mask;
6497 
6498 			/* Commit changes */
6499 			bce_coal_change(sc);
6500 		}
6501 	}
6502 
6503 	ifnet_deserialize_all(ifp);
6504 	return error;
6505 }
6506 
6507 static void
6508 bce_coal_change(struct bce_softc *sc)
6509 {
6510 	struct ifnet *ifp = &sc->arpcom.ac_if;
6511 	int i;
6512 
6513 	ASSERT_SERIALIZED(&sc->main_serialize);
6514 
6515 	if ((ifp->if_flags & IFF_RUNNING) == 0) {
6516 		sc->bce_coalchg_mask = 0;
6517 		return;
6518 	}
6519 
6520 	if (sc->bce_coalchg_mask &
6521 	    (BCE_COALMASK_TX_BDS | BCE_COALMASK_TX_BDS_INT)) {
6522 		REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
6523 		       (sc->bce_tx_quick_cons_trip_int << 16) |
6524 		       sc->bce_tx_quick_cons_trip);
6525 		for (i = 1; i < sc->rx_ring_cnt; ++i) {
6526 			uint32_t base;
6527 
6528 			base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) +
6529 			    BCE_HC_SB_CONFIG_1;
6530 			REG_WR(sc, base + BCE_HC_TX_QUICK_CONS_TRIP_OFF,
6531 			    (sc->bce_tx_quick_cons_trip_int << 16) |
6532 			    sc->bce_tx_quick_cons_trip);
6533 		}
6534 		if (bootverbose) {
6535 			if_printf(ifp, "tx_bds %u, tx_bds_int %u\n",
6536 				  sc->bce_tx_quick_cons_trip,
6537 				  sc->bce_tx_quick_cons_trip_int);
6538 		}
6539 	}
6540 
6541 	if (sc->bce_coalchg_mask &
6542 	    (BCE_COALMASK_TX_TICKS | BCE_COALMASK_TX_TICKS_INT)) {
6543 		REG_WR(sc, BCE_HC_TX_TICKS,
6544 		       (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
6545 		for (i = 1; i < sc->rx_ring_cnt; ++i) {
6546 			uint32_t base;
6547 
6548 			base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) +
6549 			    BCE_HC_SB_CONFIG_1;
6550 			REG_WR(sc, base + BCE_HC_TX_TICKS_OFF,
6551 			    (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
6552 		}
6553 		if (bootverbose) {
6554 			if_printf(ifp, "tx_ticks %u, tx_ticks_int %u\n",
6555 				  sc->bce_tx_ticks, sc->bce_tx_ticks_int);
6556 		}
6557 	}
6558 
6559 	if (sc->bce_coalchg_mask &
6560 	    (BCE_COALMASK_RX_BDS | BCE_COALMASK_RX_BDS_INT)) {
6561 		REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
6562 		       (sc->bce_rx_quick_cons_trip_int << 16) |
6563 		       sc->bce_rx_quick_cons_trip);
6564 		for (i = 1; i < sc->rx_ring_cnt; ++i) {
6565 			uint32_t base;
6566 
6567 			base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) +
6568 			    BCE_HC_SB_CONFIG_1;
6569 			REG_WR(sc, base + BCE_HC_RX_QUICK_CONS_TRIP_OFF,
6570 			    (sc->bce_rx_quick_cons_trip_int << 16) |
6571 			    sc->bce_rx_quick_cons_trip);
6572 		}
6573 		if (bootverbose) {
6574 			if_printf(ifp, "rx_bds %u, rx_bds_int %u\n",
6575 				  sc->bce_rx_quick_cons_trip,
6576 				  sc->bce_rx_quick_cons_trip_int);
6577 		}
6578 	}
6579 
6580 	if (sc->bce_coalchg_mask &
6581 	    (BCE_COALMASK_RX_TICKS | BCE_COALMASK_RX_TICKS_INT)) {
6582 		REG_WR(sc, BCE_HC_RX_TICKS,
6583 		       (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
6584 		for (i = 1; i < sc->rx_ring_cnt; ++i) {
6585 			uint32_t base;
6586 
6587 			base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) +
6588 			    BCE_HC_SB_CONFIG_1;
6589 			REG_WR(sc, base + BCE_HC_RX_TICKS_OFF,
6590 			    (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
6591 		}
6592 		if (bootverbose) {
6593 			if_printf(ifp, "rx_ticks %u, rx_ticks_int %u\n",
6594 				  sc->bce_rx_ticks, sc->bce_rx_ticks_int);
6595 		}
6596 	}
6597 
6598 	sc->bce_coalchg_mask = 0;
6599 }
6600 
6601 static int
6602 bce_tso_setup(struct bce_tx_ring *txr, struct mbuf **mp,
6603     uint16_t *flags0, uint16_t *mss0)
6604 {
6605 	struct mbuf *m;
6606 	uint16_t flags;
6607 	int thoff, iphlen, hoff;
6608 
6609 	m = *mp;
6610 	KASSERT(M_WRITABLE(m), ("TSO mbuf not writable"));
6611 
6612 	hoff = m->m_pkthdr.csum_lhlen;
6613 	iphlen = m->m_pkthdr.csum_iphlen;
6614 	thoff = m->m_pkthdr.csum_thlen;
6615 
6616 	KASSERT(hoff >= sizeof(struct ether_header),
6617 	    ("invalid ether header len %d", hoff));
6618 	KASSERT(iphlen >= sizeof(struct ip),
6619 	    ("invalid ip header len %d", iphlen));
6620 	KASSERT(thoff >= sizeof(struct tcphdr),
6621 	    ("invalid tcp header len %d", thoff));
6622 
6623 	if (__predict_false(m->m_len < hoff + iphlen + thoff)) {
6624 		m = m_pullup(m, hoff + iphlen + thoff);
6625 		if (m == NULL) {
6626 			*mp = NULL;
6627 			return ENOBUFS;
6628 		}
6629 		*mp = m;
6630 	}
6631 
6632 	/* Set the LSO flag in the TX BD */
6633 	flags = TX_BD_FLAGS_SW_LSO;
6634 
6635 	/* Set the length of IP + TCP options (in 32 bit words) */
6636 	flags |= (((iphlen + thoff -
6637 	    sizeof(struct ip) - sizeof(struct tcphdr)) >> 2) << 8);
6638 
6639 	*mss0 = htole16(m->m_pkthdr.tso_segsz);
6640 	*flags0 = flags;
6641 
6642 	return 0;
6643 }
6644 
6645 static void
6646 bce_setup_serialize(struct bce_softc *sc)
6647 {
6648 	int i, j;
6649 
6650 	/*
6651 	 * Allocate serializer array
6652 	 */
6653 
6654 	/* Main + TX + RX */
6655 	sc->serialize_cnt = 1 + sc->tx_ring_cnt + sc->rx_ring_cnt;
6656 
6657 	sc->serializes =
6658 	    kmalloc(sc->serialize_cnt * sizeof(struct lwkt_serialize *),
6659 	        M_DEVBUF, M_WAITOK | M_ZERO);
6660 
6661 	/*
6662 	 * Setup serializers
6663 	 *
6664 	 * NOTE: Order is critical
6665 	 */
6666 
6667 	i = 0;
6668 
6669 	KKASSERT(i < sc->serialize_cnt);
6670 	sc->serializes[i++] = &sc->main_serialize;
6671 
6672 	for (j = 0; j < sc->rx_ring_cnt; ++j) {
6673 		KKASSERT(i < sc->serialize_cnt);
6674 		sc->serializes[i++] = &sc->rx_rings[j].rx_serialize;
6675 	}
6676 
6677 	for (j = 0; j < sc->tx_ring_cnt; ++j) {
6678 		KKASSERT(i < sc->serialize_cnt);
6679 		sc->serializes[i++] = &sc->tx_rings[j].tx_serialize;
6680 	}
6681 
6682 	KKASSERT(i == sc->serialize_cnt);
6683 }
6684 
6685 static void
6686 bce_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
6687 {
6688 	struct bce_softc *sc = ifp->if_softc;
6689 
6690 	ifnet_serialize_array_enter(sc->serializes, sc->serialize_cnt, slz);
6691 }
6692 
6693 static void
6694 bce_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
6695 {
6696 	struct bce_softc *sc = ifp->if_softc;
6697 
6698 	ifnet_serialize_array_exit(sc->serializes, sc->serialize_cnt, slz);
6699 }
6700 
6701 static int
6702 bce_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
6703 {
6704 	struct bce_softc *sc = ifp->if_softc;
6705 
6706 	return ifnet_serialize_array_try(sc->serializes, sc->serialize_cnt,
6707 	    slz);
6708 }
6709 
6710 #ifdef INVARIANTS
6711 
6712 static void
6713 bce_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
6714     boolean_t serialized)
6715 {
6716 	struct bce_softc *sc = ifp->if_softc;
6717 
6718 	ifnet_serialize_array_assert(sc->serializes, sc->serialize_cnt,
6719 	    slz, serialized);
6720 }
6721 
6722 #endif	/* INVARIANTS */
6723 
6724 static void
6725 bce_serialize_skipmain(struct bce_softc *sc)
6726 {
6727 	lwkt_serialize_array_enter(sc->serializes, sc->serialize_cnt, 1);
6728 }
6729 
6730 static void
6731 bce_deserialize_skipmain(struct bce_softc *sc)
6732 {
6733 	lwkt_serialize_array_exit(sc->serializes, sc->serialize_cnt, 1);
6734 }
6735 
6736 static void
6737 bce_set_timer_cpuid(struct bce_softc *sc, boolean_t polling)
6738 {
6739 	if (polling)
6740 		sc->bce_timer_cpuid = 0; /* XXX */
6741 	else
6742 		sc->bce_timer_cpuid = sc->bce_msix[0].msix_cpuid;
6743 }
6744 
6745 static int
6746 bce_alloc_intr(struct bce_softc *sc)
6747 {
6748 	u_int irq_flags;
6749 
6750 	bce_try_alloc_msix(sc);
6751 	if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX)
6752 		return 0;
6753 
6754 	sc->bce_irq_type = pci_alloc_1intr(sc->bce_dev, bce_msi_enable,
6755 	    &sc->bce_irq_rid, &irq_flags);
6756 
6757 	sc->bce_res_irq = bus_alloc_resource_any(sc->bce_dev, SYS_RES_IRQ,
6758 	    &sc->bce_irq_rid, irq_flags);
6759 	if (sc->bce_res_irq == NULL) {
6760 		device_printf(sc->bce_dev, "PCI map interrupt failed\n");
6761 		return ENXIO;
6762 	}
6763 	sc->bce_msix[0].msix_cpuid = rman_get_cpuid(sc->bce_res_irq);
6764 	sc->bce_msix[0].msix_serialize = &sc->main_serialize;
6765 
6766 	return 0;
6767 }
6768 
6769 static void
6770 bce_try_alloc_msix(struct bce_softc *sc)
6771 {
6772 	struct bce_msix_data *msix;
6773 	int i, error;
6774 	boolean_t setup = FALSE;
6775 
6776 	if (sc->rx_ring_cnt == 1)
6777 		return;
6778 
6779 	msix = &sc->bce_msix[0];
6780 	msix->msix_serialize = &sc->main_serialize;
6781 	msix->msix_func = bce_intr_msi_oneshot;
6782 	msix->msix_arg = sc;
6783 	msix->msix_cpuid = if_ringmap_cpumap(sc->rx_rmap, 0);
6784 	KKASSERT(msix->msix_cpuid < netisr_ncpus);
6785 	ksnprintf(msix->msix_desc, sizeof(msix->msix_desc), "%s combo",
6786 	    device_get_nameunit(sc->bce_dev));
6787 
6788 	for (i = 1; i < sc->rx_ring_cnt; ++i) {
6789 		struct bce_rx_ring *rxr = &sc->rx_rings[i];
6790 
6791 		msix = &sc->bce_msix[i];
6792 
6793 		msix->msix_serialize = &rxr->rx_serialize;
6794 		msix->msix_arg = rxr;
6795 		msix->msix_cpuid = if_ringmap_cpumap(sc->rx_rmap,
6796 		    i % sc->rx_ring_cnt2);
6797 		KKASSERT(msix->msix_cpuid < netisr_ncpus);
6798 
6799 		if (i < sc->tx_ring_cnt) {
6800 			msix->msix_func = bce_intr_msix_rxtx;
6801 			ksnprintf(msix->msix_desc, sizeof(msix->msix_desc),
6802 			    "%s rxtx%d", device_get_nameunit(sc->bce_dev), i);
6803 		} else {
6804 			msix->msix_func = bce_intr_msix_rx;
6805 			ksnprintf(msix->msix_desc, sizeof(msix->msix_desc),
6806 			    "%s rx%d", device_get_nameunit(sc->bce_dev), i);
6807 		}
6808 	}
6809 
6810 	/*
6811 	 * Setup MSI-X table
6812 	 */
6813 	bce_setup_msix_table(sc);
6814 	REG_WR(sc, BCE_PCI_MSIX_CONTROL, BCE_MSIX_MAX - 1);
6815 	REG_WR(sc, BCE_PCI_MSIX_TBL_OFF_BIR, BCE_PCI_GRC_WINDOW2_BASE);
6816 	REG_WR(sc, BCE_PCI_MSIX_PBA_OFF_BIT, BCE_PCI_GRC_WINDOW3_BASE);
6817 	/* Flush */
6818 	REG_RD(sc, BCE_PCI_MSIX_CONTROL);
6819 
6820 	error = pci_setup_msix(sc->bce_dev);
6821 	if (error) {
6822 		device_printf(sc->bce_dev, "Setup MSI-X failed\n");
6823 		goto back;
6824 	}
6825 	setup = TRUE;
6826 
6827 	for (i = 0; i < sc->rx_ring_cnt; ++i) {
6828 		msix = &sc->bce_msix[i];
6829 
6830 		error = pci_alloc_msix_vector(sc->bce_dev, i, &msix->msix_rid,
6831 		    msix->msix_cpuid);
6832 		if (error) {
6833 			device_printf(sc->bce_dev,
6834 			    "Unable to allocate MSI-X %d on cpu%d\n",
6835 			    i, msix->msix_cpuid);
6836 			goto back;
6837 		}
6838 
6839 		msix->msix_res = bus_alloc_resource_any(sc->bce_dev,
6840 		    SYS_RES_IRQ, &msix->msix_rid, RF_ACTIVE);
6841 		if (msix->msix_res == NULL) {
6842 			device_printf(sc->bce_dev,
6843 			    "Unable to allocate MSI-X %d resource\n", i);
6844 			error = ENOMEM;
6845 			goto back;
6846 		}
6847 	}
6848 
6849 	pci_enable_msix(sc->bce_dev);
6850 	sc->bce_irq_type = PCI_INTR_TYPE_MSIX;
6851 back:
6852 	if (error)
6853 		bce_free_msix(sc, setup);
6854 }
6855 
6856 static void
6857 bce_setup_ring_cnt(struct bce_softc *sc)
6858 {
6859 	int msix_enable, msix_cnt, msix_ring;
6860 	int ring_max, ring_cnt;
6861 
6862 	sc->rx_rmap = if_ringmap_alloc(sc->bce_dev, 1, 1);
6863 
6864 	if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5709 &&
6865 	    BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5716)
6866 		goto skip_rx;
6867 
6868 	msix_enable = device_getenv_int(sc->bce_dev, "msix.enable",
6869 	    bce_msix_enable);
6870 	if (!msix_enable)
6871 		goto skip_rx;
6872 
6873 	if (netisr_ncpus == 1)
6874 		goto skip_rx;
6875 
6876 	/*
6877 	 * One extra RX ring will be needed (see below), so make sure
6878 	 * that there are enough MSI-X vectors.
6879 	 */
6880 	msix_cnt = pci_msix_count(sc->bce_dev);
6881 	if (msix_cnt <= 2)
6882 		goto skip_rx;
6883 	msix_ring = msix_cnt - 1;
6884 
6885 	/*
6886 	 * Setup RX ring count
6887 	 */
6888 	ring_max = BCE_RX_RING_MAX;
6889 	if (ring_max > msix_ring)
6890 		ring_max = msix_ring;
6891 	ring_cnt = device_getenv_int(sc->bce_dev, "rx_rings", bce_rx_rings);
6892 
6893 	if_ringmap_free(sc->rx_rmap);
6894 	sc->rx_rmap = if_ringmap_alloc(sc->bce_dev, ring_cnt, ring_max);
6895 
6896 skip_rx:
6897 	sc->rx_ring_cnt2 = if_ringmap_count(sc->rx_rmap);
6898 
6899 	/*
6900 	 * Setup TX ring count
6901 	 *
6902 	 * NOTE:
6903 	 * TX ring count must be less than the effective RSS RX ring
6904 	 * count, since we use RX ring software data struct to save
6905 	 * status index and various other MSI-X related stuffs.
6906 	 */
6907 	ring_max = BCE_TX_RING_MAX;
6908 	if (ring_max > sc->rx_ring_cnt2)
6909 		ring_max = sc->rx_ring_cnt2;
6910 	ring_cnt = device_getenv_int(sc->bce_dev, "tx_rings", bce_tx_rings);
6911 
6912 	sc->tx_rmap = if_ringmap_alloc(sc->bce_dev, ring_cnt, ring_max);
6913 	if_ringmap_align(sc->bce_dev, sc->rx_rmap, sc->tx_rmap);
6914 
6915 	sc->tx_ring_cnt = if_ringmap_count(sc->tx_rmap);
6916 
6917 	if (sc->rx_ring_cnt2 == 1) {
6918 		/*
6919 		 * Don't use MSI-X, if the effective RX ring count is 1.
6920 		 * Since if the effective RX ring count is 1, the TX ring
6921 		 * count will be 1.  This RX ring and the TX ring must be
6922 		 * bundled into one MSI-X vector, so the hot path will be
6923 		 * exact same as using MSI.  Besides, the first RX ring
6924 		 * must be fully populated, which only accepts packets whose
6925 		 * RSS hash can't calculated, e.g. ARP packets; waste of
6926 		 * resource at least.
6927 		 */
6928 		sc->rx_ring_cnt = 1;
6929 	} else {
6930 		/*
6931 		 * One extra RX ring is allocated, since the first RX ring
6932 		 * could not be used for RSS hashed packets whose masked
6933 		 * hash is 0.  The first RX ring is only used for packets
6934 		 * whose RSS hash could not be calculated, e.g. ARP packets.
6935 		 * This extra RX ring will be used for packets whose masked
6936 		 * hash is 0.  The effective RX ring count involved in RSS
6937 		 * is still sc->rx_ring_cnt2.
6938 		 */
6939 		sc->rx_ring_cnt = sc->rx_ring_cnt2 + 1;
6940 	}
6941 }
6942 
6943 static void
6944 bce_free_msix(struct bce_softc *sc, boolean_t setup)
6945 {
6946 	int i;
6947 
6948 	KKASSERT(sc->rx_ring_cnt > 1);
6949 
6950 	for (i = 0; i < sc->rx_ring_cnt; ++i) {
6951 		struct bce_msix_data *msix = &sc->bce_msix[i];
6952 
6953 		if (msix->msix_res != NULL) {
6954 			bus_release_resource(sc->bce_dev, SYS_RES_IRQ,
6955 			    msix->msix_rid, msix->msix_res);
6956 		}
6957 		if (msix->msix_rid >= 0)
6958 			pci_release_msix_vector(sc->bce_dev, msix->msix_rid);
6959 	}
6960 	if (setup)
6961 		pci_teardown_msix(sc->bce_dev);
6962 }
6963 
6964 static void
6965 bce_free_intr(struct bce_softc *sc)
6966 {
6967 	if (sc->bce_irq_type != PCI_INTR_TYPE_MSIX) {
6968 		if (sc->bce_res_irq != NULL) {
6969 			bus_release_resource(sc->bce_dev, SYS_RES_IRQ,
6970 			    sc->bce_irq_rid, sc->bce_res_irq);
6971 		}
6972 		if (sc->bce_irq_type == PCI_INTR_TYPE_MSI)
6973 			pci_release_msi(sc->bce_dev);
6974 	} else {
6975 		bce_free_msix(sc, TRUE);
6976 	}
6977 }
6978 
6979 static void
6980 bce_setup_msix_table(struct bce_softc *sc)
6981 {
6982 	REG_WR(sc, BCE_PCI_GRC_WINDOW_ADDR, BCE_PCI_GRC_WINDOW_ADDR_SEP_WIN);
6983 	REG_WR(sc, BCE_PCI_GRC_WINDOW2_ADDR, BCE_MSIX_TABLE_ADDR);
6984 	REG_WR(sc, BCE_PCI_GRC_WINDOW3_ADDR, BCE_MSIX_PBA_ADDR);
6985 }
6986 
6987 static int
6988 bce_setup_intr(struct bce_softc *sc)
6989 {
6990 	void (*irq_handle)(void *);
6991 	int error;
6992 
6993 	if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX)
6994 		return bce_setup_msix(sc);
6995 
6996 	if (sc->bce_irq_type == PCI_INTR_TYPE_LEGACY) {
6997 		irq_handle = bce_intr_legacy;
6998 	} else if (sc->bce_irq_type == PCI_INTR_TYPE_MSI) {
6999 		if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
7000 		    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
7001 			irq_handle = bce_intr_msi_oneshot;
7002 			sc->bce_flags |= BCE_ONESHOT_MSI_FLAG;
7003 		} else {
7004 			irq_handle = bce_intr_msi;
7005 			sc->bce_flags |= BCE_CHECK_MSI_FLAG;
7006 		}
7007 	} else {
7008 		panic("%s: unsupported intr type %d",
7009 		    device_get_nameunit(sc->bce_dev), sc->bce_irq_type);
7010 	}
7011 
7012 	error = bus_setup_intr(sc->bce_dev, sc->bce_res_irq, INTR_MPSAFE,
7013 	    irq_handle, sc, &sc->bce_intrhand, &sc->main_serialize);
7014 	if (error != 0) {
7015 		device_printf(sc->bce_dev, "Failed to setup IRQ!\n");
7016 		return error;
7017 	}
7018 
7019 	return 0;
7020 }
7021 
7022 static void
7023 bce_teardown_intr(struct bce_softc *sc)
7024 {
7025 	if (sc->bce_irq_type != PCI_INTR_TYPE_MSIX)
7026 		bus_teardown_intr(sc->bce_dev, sc->bce_res_irq, sc->bce_intrhand);
7027 	else
7028 		bce_teardown_msix(sc, sc->rx_ring_cnt);
7029 }
7030 
7031 static int
7032 bce_setup_msix(struct bce_softc *sc)
7033 {
7034 	int i;
7035 
7036 	for (i = 0; i < sc->rx_ring_cnt; ++i) {
7037 		struct bce_msix_data *msix = &sc->bce_msix[i];
7038 		int error;
7039 
7040 		error = bus_setup_intr_descr(sc->bce_dev, msix->msix_res,
7041 		    INTR_MPSAFE, msix->msix_func, msix->msix_arg,
7042 		    &msix->msix_handle, msix->msix_serialize, msix->msix_desc);
7043 		if (error) {
7044 			device_printf(sc->bce_dev, "could not set up %s "
7045 			    "interrupt handler.\n", msix->msix_desc);
7046 			bce_teardown_msix(sc, i);
7047 			return error;
7048 		}
7049 	}
7050 	return 0;
7051 }
7052 
7053 static void
7054 bce_teardown_msix(struct bce_softc *sc, int msix_cnt)
7055 {
7056 	int i;
7057 
7058 	for (i = 0; i < msix_cnt; ++i) {
7059 		struct bce_msix_data *msix = &sc->bce_msix[i];
7060 
7061 		bus_teardown_intr(sc->bce_dev, msix->msix_res,
7062 		    msix->msix_handle);
7063 	}
7064 }
7065 
7066 static void
7067 bce_init_rss(struct bce_softc *sc)
7068 {
7069 	uint8_t key[BCE_RLUP_RSS_KEY_CNT * BCE_RLUP_RSS_KEY_SIZE];
7070 	uint32_t tbl = 0;
7071 	int i;
7072 
7073 	KKASSERT(sc->rx_ring_cnt > 2);
7074 
7075 	/*
7076 	 * Configure RSS keys
7077 	 */
7078 	toeplitz_get_key(key, sizeof(key));
7079 	for (i = 0; i < BCE_RLUP_RSS_KEY_CNT; ++i) {
7080 		uint32_t rss_key;
7081 
7082 		rss_key = BCE_RLUP_RSS_KEYVAL(key, i);
7083 		BCE_RSS_DPRINTF(sc, 1, "rss_key%d 0x%08x\n", i, rss_key);
7084 
7085 		REG_WR(sc, BCE_RLUP_RSS_KEY(i), rss_key);
7086 	}
7087 
7088 	/*
7089 	 * Configure the redirect table
7090 	 *
7091 	 * NOTE:
7092 	 * - The "queue ID" in redirect table is the software RX ring's
7093 	 *   index _minus_ one.
7094 	 * - The last RX ring, whose "queue ID" is (sc->rx_ring_cnt - 2)
7095 	 *   will be used for packets whose masked hash is 0.
7096 	 *   (see also: comment in bce_setup_ring_cnt())
7097 	 */
7098 	if_ringmap_rdrtable(sc->rx_rmap, sc->rdr_table,
7099 	    BCE_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
7100 	for (i = 0; i < BCE_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
7101 		int shift = (i % 8) << 2, qid;
7102 
7103 		qid = sc->rdr_table[i];
7104 		KKASSERT(qid >= 0 && qid < sc->rx_ring_cnt2);
7105 		if (qid > 0)
7106 			--qid;
7107 		else
7108 			qid = sc->rx_ring_cnt - 2;
7109 		KKASSERT(qid < (sc->rx_ring_cnt - 1));
7110 
7111 		tbl |= qid << shift;
7112 		if (i % 8 == 7) {
7113 			BCE_RSS_DPRINTF(sc, 1, "tbl 0x%08x\n", tbl);
7114 			REG_WR(sc, BCE_RLUP_RSS_DATA, tbl);
7115 			REG_WR(sc, BCE_RLUP_RSS_COMMAND, (i >> 3) |
7116 			    BCE_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
7117 			    BCE_RLUP_RSS_COMMAND_WRITE |
7118 			    BCE_RLUP_RSS_COMMAND_HASH_MASK);
7119 			tbl = 0;
7120 		}
7121 	}
7122 	REG_WR(sc, BCE_RLUP_RSS_CONFIG,
7123 	    BCE_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI);
7124 }
7125 
7126 static void
7127 bce_npoll_coal_change(struct bce_softc *sc)
7128 {
7129 	uint32_t old_rx_cons, old_tx_cons;
7130 
7131 	old_rx_cons = sc->bce_rx_quick_cons_trip_int;
7132 	old_tx_cons = sc->bce_tx_quick_cons_trip_int;
7133 	sc->bce_rx_quick_cons_trip_int = 1;
7134 	sc->bce_tx_quick_cons_trip_int = 1;
7135 
7136 	sc->bce_coalchg_mask |= BCE_COALMASK_TX_BDS_INT |
7137 	    BCE_COALMASK_RX_BDS_INT;
7138 	bce_coal_change(sc);
7139 
7140 	sc->bce_rx_quick_cons_trip_int = old_rx_cons;
7141 	sc->bce_tx_quick_cons_trip_int = old_tx_cons;
7142 }
7143 
7144 static struct pktinfo *
7145 bce_rss_pktinfo(struct pktinfo *pi, uint32_t status,
7146     const struct l2_fhdr *l2fhdr)
7147 {
7148 	/* Check for an IP datagram. */
7149 	if ((status & L2_FHDR_STATUS_IP_DATAGRAM) == 0)
7150 		return NULL;
7151 
7152 	/* Check if the IP checksum is valid. */
7153 	if (l2fhdr->l2_fhdr_ip_xsum != 0xffff)
7154 		return NULL;
7155 
7156 	/* Check for a valid TCP/UDP frame. */
7157 	if (status & L2_FHDR_STATUS_TCP_SEGMENT) {
7158 		if (status & L2_FHDR_ERRORS_TCP_XSUM)
7159 			return NULL;
7160 		if (l2fhdr->l2_fhdr_tcp_udp_xsum != 0xffff)
7161 			return NULL;
7162 		pi->pi_l3proto = IPPROTO_TCP;
7163 	} else if (status & L2_FHDR_STATUS_UDP_DATAGRAM) {
7164 		if (status & L2_FHDR_ERRORS_UDP_XSUM)
7165 			return NULL;
7166 		if (l2fhdr->l2_fhdr_tcp_udp_xsum != 0xffff)
7167 			return NULL;
7168 		pi->pi_l3proto = IPPROTO_UDP;
7169 	} else {
7170 		return NULL;
7171 	}
7172 	pi->pi_netisr = NETISR_IP;
7173 	pi->pi_flags = 0;
7174 
7175 	return pi;
7176 }
7177