xref: /dragonfly/sys/dev/netif/bce/if_bce.c (revision 548a3528)
1 /*-
2  * Copyright (c) 2006-2007 Broadcom Corporation
3  *	David Christensen <davidch@broadcom.com>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the name of Broadcom Corporation nor the name of its contributors
15  *    may be used to endorse or promote products derived from this software
16  *    without specific prior written consent.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
22  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  *
30  * $FreeBSD: src/sys/dev/bce/if_bce.c,v 1.31 2007/05/16 23:34:11 davidch Exp $
31  */
32 
33 /*
34  * The following controllers are supported by this driver:
35  *   BCM5706C A2, A3
36  *   BCM5706S A2, A3
37  *   BCM5708C B1, B2
38  *   BCM5708S B1, B2
39  *   BCM5709C A1, B2, C0
40  *   BCM5716  C0
41  *
42  * The following controllers are not supported by this driver:
43  *   BCM5706C A0, A1
44  *   BCM5706S A0, A1
45  *   BCM5708C A0, B0
46  *   BCM5708S A0, B0
47  *   BCM5709C A0, B0, B1
48  *   BCM5709S A0, A1, B0, B1, B2, C0
49  *
50  *
51  * Note about MSI-X on 5709/5716:
52  * - 9 MSI-X vectors are supported.
53  * - MSI-X vectors, RX/TX rings and status blocks' association
54  *   are fixed:
55  *   o  The first RX ring and the first TX ring use the first
56  *      status block.
57  *   o  The first MSI-X vector is associated with the first
58  *      status block.
59  *   o  The second RX ring and the second TX ring use the second
60  *      status block.
61  *   o  The second MSI-X vector is associated with the second
62  *      status block.
63  *   ...
64  *   and so on so forth.
65  * - Status blocks must reside in physically contiguous memory
66  *   and each status block consumes 128bytes.  In addition to
67  *   this, the memory for the status blocks is aligned on 128bytes
68  *   in this driver.  (see bce_dma_alloc() and HC_CONFIG)
69  * - Each status block has its own coalesce parameters, which also
70  *   serve as the related MSI-X vector's interrupt moderation
71  *   parameters.  (see bce_coal_change())
72  */
73 
74 #include "opt_bce.h"
75 #include "opt_ifpoll.h"
76 
77 #include <sys/param.h>
78 #include <sys/bus.h>
79 #include <sys/endian.h>
80 #include <sys/kernel.h>
81 #include <sys/interrupt.h>
82 #include <sys/mbuf.h>
83 #include <sys/malloc.h>
84 #include <sys/queue.h>
85 #include <sys/rman.h>
86 #include <sys/serialize.h>
87 #include <sys/socket.h>
88 #include <sys/sockio.h>
89 #include <sys/sysctl.h>
90 
91 #include <netinet/ip.h>
92 #include <netinet/tcp.h>
93 
94 #include <net/bpf.h>
95 #include <net/ethernet.h>
96 #include <net/if.h>
97 #include <net/if_arp.h>
98 #include <net/if_dl.h>
99 #include <net/if_media.h>
100 #include <net/if_poll.h>
101 #include <net/if_types.h>
102 #include <net/ifq_var.h>
103 #include <net/toeplitz.h>
104 #include <net/toeplitz2.h>
105 #include <net/vlan/if_vlan_var.h>
106 #include <net/vlan/if_vlan_ether.h>
107 
108 #include <dev/netif/mii_layer/mii.h>
109 #include <dev/netif/mii_layer/miivar.h>
110 #include <dev/netif/mii_layer/brgphyreg.h>
111 
112 #include <bus/pci/pcireg.h>
113 #include <bus/pci/pcivar.h>
114 
115 #include "miibus_if.h"
116 
117 #include <dev/netif/bce/if_bcereg.h>
118 #include <dev/netif/bce/if_bcefw.h>
119 
120 #define BCE_MSI_CKINTVL		((10 * hz) / 1000)	/* 10ms */
121 
122 #ifdef BCE_RSS_DEBUG
123 #define BCE_RSS_DPRINTF(sc, lvl, fmt, ...) \
124 do { \
125 	if (sc->rss_debug >= lvl) \
126 		if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \
127 } while (0)
128 #else	/* !BCE_RSS_DEBUG */
129 #define BCE_RSS_DPRINTF(sc, lvl, fmt, ...)	((void)0)
130 #endif	/* BCE_RSS_DEBUG */
131 
132 /****************************************************************************/
133 /* PCI Device ID Table                                                      */
134 /*                                                                          */
135 /* Used by bce_probe() to identify the devices supported by this driver.    */
136 /****************************************************************************/
137 #define BCE_DEVDESC_MAX		64
138 
139 static struct bce_type bce_devs[] = {
140 	/* BCM5706C Controllers and OEM boards. */
141 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3101,
142 		"HP NC370T Multifunction Gigabit Server Adapter" },
143 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3106,
144 		"HP NC370i Multifunction Gigabit Server Adapter" },
145 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3070,
146 		"HP NC380T PCIe DP Multifunc Gig Server Adapter" },
147 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x1709,
148 		"HP NC371i Multifunction Gigabit Server Adapter" },
149 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  PCI_ANY_ID,  PCI_ANY_ID,
150 		"Broadcom NetXtreme II BCM5706 1000Base-T" },
151 
152 	/* BCM5706S controllers and OEM boards. */
153 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102,
154 		"HP NC370F Multifunction Gigabit Server Adapter" },
155 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID,  PCI_ANY_ID,
156 		"Broadcom NetXtreme II BCM5706 1000Base-SX" },
157 
158 	/* BCM5708C controllers and OEM boards. */
159 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  HP_VENDORID, 0x7037,
160 		"HP NC373T PCIe Multifunction Gig Server Adapter" },
161 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  HP_VENDORID, 0x7038,
162 		"HP NC373i Multifunction Gigabit Server Adapter" },
163 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  HP_VENDORID, 0x7045,
164 		"HP NC374m PCIe Multifunction Adapter" },
165 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  PCI_ANY_ID,  PCI_ANY_ID,
166 		"Broadcom NetXtreme II BCM5708 1000Base-T" },
167 
168 	/* BCM5708S controllers and OEM boards. */
169 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  HP_VENDORID, 0x1706,
170 		"HP NC373m Multifunction Gigabit Server Adapter" },
171 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  HP_VENDORID, 0x703b,
172 		"HP NC373i Multifunction Gigabit Server Adapter" },
173 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  HP_VENDORID, 0x703d,
174 		"HP NC373F PCIe Multifunc Giga Server Adapter" },
175 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  PCI_ANY_ID,  PCI_ANY_ID,
176 		"Broadcom NetXtreme II BCM5708S 1000Base-T" },
177 
178 	/* BCM5709C controllers and OEM boards. */
179 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709,  HP_VENDORID, 0x7055,
180 		"HP NC382i DP Multifunction Gigabit Server Adapter" },
181 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709,  HP_VENDORID, 0x7059,
182 		"HP NC382T PCIe DP Multifunction Gigabit Server Adapter" },
183 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709,  PCI_ANY_ID,  PCI_ANY_ID,
184 		"Broadcom NetXtreme II BCM5709 1000Base-T" },
185 
186 	/* BCM5709S controllers and OEM boards. */
187 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709S,  HP_VENDORID, 0x171d,
188 		"HP NC382m DP 1GbE Multifunction BL-c Adapter" },
189 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709S,  HP_VENDORID, 0x7056,
190 		"HP NC382i DP Multifunction Gigabit Server Adapter" },
191 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709S,  PCI_ANY_ID,  PCI_ANY_ID,
192 		"Broadcom NetXtreme II BCM5709 1000Base-SX" },
193 
194 	/* BCM5716 controllers and OEM boards. */
195 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5716,   PCI_ANY_ID,  PCI_ANY_ID,
196 		"Broadcom NetXtreme II BCM5716 1000Base-T" },
197 
198 	{ 0, 0, 0, 0, NULL }
199 };
200 
201 /****************************************************************************/
202 /* Supported Flash NVRAM device data.                                       */
203 /****************************************************************************/
204 static const struct flash_spec flash_table[] =
205 {
206 #define BUFFERED_FLAGS		(BCE_NV_BUFFERED | BCE_NV_TRANSLATE)
207 #define NONBUFFERED_FLAGS	(BCE_NV_WREN)
208 
209 	/* Slow EEPROM */
210 	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
211 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
212 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
213 	 "EEPROM - slow"},
214 	/* Expansion entry 0001 */
215 	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
216 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
217 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
218 	 "Entry 0001"},
219 	/* Saifun SA25F010 (non-buffered flash) */
220 	/* strap, cfg1, & write1 need updates */
221 	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
222 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
223 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
224 	 "Non-buffered flash (128kB)"},
225 	/* Saifun SA25F020 (non-buffered flash) */
226 	/* strap, cfg1, & write1 need updates */
227 	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
228 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
229 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
230 	 "Non-buffered flash (256kB)"},
231 	/* Expansion entry 0100 */
232 	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
233 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
234 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
235 	 "Entry 0100"},
236 	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
237 	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
238 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
239 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
240 	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
241 	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
242 	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
243 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
244 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
245 	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
246 	/* Saifun SA25F005 (non-buffered flash) */
247 	/* strap, cfg1, & write1 need updates */
248 	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
249 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
250 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
251 	 "Non-buffered flash (64kB)"},
252 	/* Fast EEPROM */
253 	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
254 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
255 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
256 	 "EEPROM - fast"},
257 	/* Expansion entry 1001 */
258 	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
259 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
260 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
261 	 "Entry 1001"},
262 	/* Expansion entry 1010 */
263 	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
264 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
265 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
266 	 "Entry 1010"},
267 	/* ATMEL AT45DB011B (buffered flash) */
268 	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
269 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
270 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
271 	 "Buffered flash (128kB)"},
272 	/* Expansion entry 1100 */
273 	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
274 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
275 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
276 	 "Entry 1100"},
277 	/* Expansion entry 1101 */
278 	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
279 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
280 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
281 	 "Entry 1101"},
282 	/* Ateml Expansion entry 1110 */
283 	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
284 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
285 	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
286 	 "Entry 1110 (Atmel)"},
287 	/* ATMEL AT45DB021B (buffered flash) */
288 	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
289 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
290 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
291 	 "Buffered flash (256kB)"},
292 };
293 
294 /*
295  * The BCM5709 controllers transparently handle the
296  * differences between Atmel 264 byte pages and all
297  * flash devices which use 256 byte pages, so no
298  * logical-to-physical mapping is required in the
299  * driver.
300  */
301 static struct flash_spec flash_5709 = {
302 	.flags		= BCE_NV_BUFFERED,
303 	.page_bits	= BCM5709_FLASH_PAGE_BITS,
304 	.page_size	= BCM5709_FLASH_PAGE_SIZE,
305 	.addr_mask	= BCM5709_FLASH_BYTE_ADDR_MASK,
306 	.total_size	= BUFFERED_FLASH_TOTAL_SIZE * 2,
307 	.name		= "5709/5716 buffered flash (256kB)",
308 };
309 
310 /****************************************************************************/
311 /* DragonFly device entry points.                                           */
312 /****************************************************************************/
313 static int	bce_probe(device_t);
314 static int	bce_attach(device_t);
315 static int	bce_detach(device_t);
316 static void	bce_shutdown(device_t);
317 static int	bce_miibus_read_reg(device_t, int, int);
318 static int	bce_miibus_write_reg(device_t, int, int, int);
319 static void	bce_miibus_statchg(device_t);
320 
321 /****************************************************************************/
322 /* BCE Register/Memory Access Routines                                      */
323 /****************************************************************************/
324 static uint32_t	bce_reg_rd_ind(struct bce_softc *, uint32_t);
325 static void	bce_reg_wr_ind(struct bce_softc *, uint32_t, uint32_t);
326 static void	bce_shmem_wr(struct bce_softc *, uint32_t, uint32_t);
327 static uint32_t	bce_shmem_rd(struct bce_softc *, u32);
328 static void	bce_ctx_wr(struct bce_softc *, uint32_t, uint32_t, uint32_t);
329 
330 /****************************************************************************/
331 /* BCE NVRAM Access Routines                                                */
332 /****************************************************************************/
333 static int	bce_acquire_nvram_lock(struct bce_softc *);
334 static int	bce_release_nvram_lock(struct bce_softc *);
335 static void	bce_enable_nvram_access(struct bce_softc *);
336 static void	bce_disable_nvram_access(struct bce_softc *);
337 static int	bce_nvram_read_dword(struct bce_softc *, uint32_t, uint8_t *,
338 		    uint32_t);
339 static int	bce_init_nvram(struct bce_softc *);
340 static int	bce_nvram_read(struct bce_softc *, uint32_t, uint8_t *, int);
341 static int	bce_nvram_test(struct bce_softc *);
342 
343 /****************************************************************************/
344 /* BCE DMA Allocate/Free Routines                                           */
345 /****************************************************************************/
346 static int	bce_dma_alloc(struct bce_softc *);
347 static void	bce_dma_free(struct bce_softc *);
348 static void	bce_dma_map_addr(void *, bus_dma_segment_t *, int, int);
349 
350 /****************************************************************************/
351 /* BCE Firmware Synchronization and Load                                    */
352 /****************************************************************************/
353 static int	bce_fw_sync(struct bce_softc *, uint32_t);
354 static void	bce_load_rv2p_fw(struct bce_softc *, uint32_t *,
355 		    uint32_t, uint32_t);
356 static void	bce_load_cpu_fw(struct bce_softc *, struct cpu_reg *,
357 		    struct fw_info *);
358 static void	bce_start_cpu(struct bce_softc *, struct cpu_reg *);
359 static void	bce_halt_cpu(struct bce_softc *, struct cpu_reg *);
360 static void	bce_start_rxp_cpu(struct bce_softc *);
361 static void	bce_init_rxp_cpu(struct bce_softc *);
362 static void	bce_init_txp_cpu(struct bce_softc *);
363 static void	bce_init_tpat_cpu(struct bce_softc *);
364 static void	bce_init_cp_cpu(struct bce_softc *);
365 static void	bce_init_com_cpu(struct bce_softc *);
366 static void	bce_init_cpus(struct bce_softc *);
367 static void	bce_setup_msix_table(struct bce_softc *);
368 static void	bce_init_rss(struct bce_softc *);
369 
370 static void	bce_stop(struct bce_softc *);
371 static int	bce_reset(struct bce_softc *, uint32_t);
372 static int	bce_chipinit(struct bce_softc *);
373 static int	bce_blockinit(struct bce_softc *);
374 static void	bce_probe_pci_caps(struct bce_softc *);
375 static void	bce_print_adapter_info(struct bce_softc *);
376 static void	bce_get_media(struct bce_softc *);
377 static void	bce_mgmt_init(struct bce_softc *);
378 static int	bce_init_ctx(struct bce_softc *);
379 static void	bce_get_mac_addr(struct bce_softc *);
380 static void	bce_set_mac_addr(struct bce_softc *);
381 static void	bce_set_rx_mode(struct bce_softc *);
382 static void	bce_coal_change(struct bce_softc *);
383 static void	bce_npoll_coal_change(struct bce_softc *);
384 static void	bce_setup_serialize(struct bce_softc *);
385 static void	bce_serialize_skipmain(struct bce_softc *);
386 static void	bce_deserialize_skipmain(struct bce_softc *);
387 static void	bce_set_timer_cpuid(struct bce_softc *, boolean_t);
388 static int	bce_alloc_intr(struct bce_softc *);
389 static void	bce_free_intr(struct bce_softc *);
390 static void	bce_try_alloc_msix(struct bce_softc *);
391 static void	bce_free_msix(struct bce_softc *, boolean_t);
392 static void	bce_setup_ring_cnt(struct bce_softc *);
393 static int	bce_setup_intr(struct bce_softc *);
394 static void	bce_teardown_intr(struct bce_softc *);
395 static int	bce_setup_msix(struct bce_softc *);
396 static void	bce_teardown_msix(struct bce_softc *, int);
397 
398 static int	bce_create_tx_ring(struct bce_tx_ring *);
399 static void	bce_destroy_tx_ring(struct bce_tx_ring *);
400 static void	bce_init_tx_context(struct bce_tx_ring *);
401 static int	bce_init_tx_chain(struct bce_tx_ring *);
402 static void	bce_free_tx_chain(struct bce_tx_ring *);
403 static void	bce_xmit(struct bce_tx_ring *);
404 static int	bce_encap(struct bce_tx_ring *, struct mbuf **, int *);
405 static int	bce_tso_setup(struct bce_tx_ring *, struct mbuf **,
406 		    uint16_t *, uint16_t *);
407 
408 static int	bce_create_rx_ring(struct bce_rx_ring *);
409 static void	bce_destroy_rx_ring(struct bce_rx_ring *);
410 static void	bce_init_rx_context(struct bce_rx_ring *);
411 static int	bce_init_rx_chain(struct bce_rx_ring *);
412 static void	bce_free_rx_chain(struct bce_rx_ring *);
413 static int	bce_newbuf_std(struct bce_rx_ring *, uint16_t *, uint16_t,
414 		    uint32_t *, int);
415 static void	bce_setup_rxdesc_std(struct bce_rx_ring *, uint16_t,
416 		    uint32_t *);
417 static struct pktinfo *bce_rss_pktinfo(struct pktinfo *, uint32_t,
418 		    const struct l2_fhdr *);
419 
420 static void	bce_start(struct ifnet *, struct ifaltq_subque *);
421 static int	bce_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
422 static void	bce_watchdog(struct ifaltq_subque *);
423 static int	bce_ifmedia_upd(struct ifnet *);
424 static void	bce_ifmedia_sts(struct ifnet *, struct ifmediareq *);
425 static void	bce_init(void *);
426 #ifdef IFPOLL_ENABLE
427 static void	bce_npoll(struct ifnet *, struct ifpoll_info *);
428 static void	bce_npoll_rx(struct ifnet *, void *, int);
429 static void	bce_npoll_tx(struct ifnet *, void *, int);
430 static void	bce_npoll_status(struct ifnet *);
431 static void	bce_npoll_rx_pack(struct ifnet *, void *, int);
432 #endif
433 static void	bce_serialize(struct ifnet *, enum ifnet_serialize);
434 static void	bce_deserialize(struct ifnet *, enum ifnet_serialize);
435 static int	bce_tryserialize(struct ifnet *, enum ifnet_serialize);
436 #ifdef INVARIANTS
437 static void	bce_serialize_assert(struct ifnet *, enum ifnet_serialize,
438 		    boolean_t);
439 #endif
440 
441 static void	bce_intr(struct bce_softc *);
442 static void	bce_intr_legacy(void *);
443 static void	bce_intr_msi(void *);
444 static void	bce_intr_msi_oneshot(void *);
445 static void	bce_intr_msix_rxtx(void *);
446 static void	bce_intr_msix_rx(void *);
447 static void	bce_tx_intr(struct bce_tx_ring *, uint16_t);
448 static void	bce_rx_intr(struct bce_rx_ring *, int, uint16_t);
449 static void	bce_phy_intr(struct bce_softc *);
450 static void	bce_disable_intr(struct bce_softc *);
451 static void	bce_enable_intr(struct bce_softc *);
452 static void	bce_reenable_intr(struct bce_rx_ring *);
453 static void	bce_check_msi(void *);
454 
455 static void	bce_stats_update(struct bce_softc *);
456 static void	bce_tick(void *);
457 static void	bce_tick_serialized(struct bce_softc *);
458 static void	bce_pulse(void *);
459 
460 static void	bce_add_sysctls(struct bce_softc *);
461 static int	bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS);
462 static int	bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS);
463 static int	bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS);
464 static int	bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS);
465 static int	bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS);
466 static int	bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS);
467 static int	bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS);
468 static int	bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS);
469 #ifdef IFPOLL_ENABLE
470 static int	bce_sysctl_npoll_offset(SYSCTL_HANDLER_ARGS);
471 #endif
472 static int	bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS,
473 		    uint32_t *, uint32_t);
474 
475 /*
476  * NOTE:
477  * Don't set bce_tx_ticks_int/bce_tx_ticks to 1023.  Linux's bnx2
478  * takes 1023 as the TX ticks limit.  However, using 1023 will
479  * cause 5708(B2) to generate extra interrupts (~2000/s) even when
480  * there is _no_ network activity on the NIC.
481  */
482 static uint32_t	bce_tx_bds_int = 255;		/* bcm: 20 */
483 static uint32_t	bce_tx_bds = 255;		/* bcm: 20 */
484 static uint32_t	bce_tx_ticks_int = 1022;	/* bcm: 80 */
485 static uint32_t	bce_tx_ticks = 1022;		/* bcm: 80 */
486 static uint32_t	bce_rx_bds_int = 128;		/* bcm: 6 */
487 static uint32_t	bce_rx_bds = 0;			/* bcm: 6 */
488 static uint32_t	bce_rx_ticks_int = 150;		/* bcm: 18 */
489 static uint32_t	bce_rx_ticks = 150;		/* bcm: 18 */
490 
491 static int	bce_tx_wreg = 8;
492 
493 static int	bce_msi_enable = 1;
494 static int	bce_msix_enable = 1;
495 
496 static int	bce_rx_pages = RX_PAGES_DEFAULT;
497 static int	bce_tx_pages = TX_PAGES_DEFAULT;
498 
499 static int	bce_rx_rings = 0;	/* auto */
500 static int	bce_tx_rings = 0;	/* auto */
501 
502 TUNABLE_INT("hw.bce.tx_bds_int", &bce_tx_bds_int);
503 TUNABLE_INT("hw.bce.tx_bds", &bce_tx_bds);
504 TUNABLE_INT("hw.bce.tx_ticks_int", &bce_tx_ticks_int);
505 TUNABLE_INT("hw.bce.tx_ticks", &bce_tx_ticks);
506 TUNABLE_INT("hw.bce.rx_bds_int", &bce_rx_bds_int);
507 TUNABLE_INT("hw.bce.rx_bds", &bce_rx_bds);
508 TUNABLE_INT("hw.bce.rx_ticks_int", &bce_rx_ticks_int);
509 TUNABLE_INT("hw.bce.rx_ticks", &bce_rx_ticks);
510 TUNABLE_INT("hw.bce.msi.enable", &bce_msi_enable);
511 TUNABLE_INT("hw.bce.msix.enable", &bce_msix_enable);
512 TUNABLE_INT("hw.bce.rx_pages", &bce_rx_pages);
513 TUNABLE_INT("hw.bce.tx_pages", &bce_tx_pages);
514 TUNABLE_INT("hw.bce.tx_wreg", &bce_tx_wreg);
515 TUNABLE_INT("hw.bce.tx_rings", &bce_tx_rings);
516 TUNABLE_INT("hw.bce.rx_rings", &bce_rx_rings);
517 
518 /****************************************************************************/
519 /* DragonFly device dispatch table.                                         */
520 /****************************************************************************/
521 static device_method_t bce_methods[] = {
522 	/* Device interface */
523 	DEVMETHOD(device_probe,		bce_probe),
524 	DEVMETHOD(device_attach,	bce_attach),
525 	DEVMETHOD(device_detach,	bce_detach),
526 	DEVMETHOD(device_shutdown,	bce_shutdown),
527 
528 	/* bus interface */
529 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
530 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
531 
532 	/* MII interface */
533 	DEVMETHOD(miibus_readreg,	bce_miibus_read_reg),
534 	DEVMETHOD(miibus_writereg,	bce_miibus_write_reg),
535 	DEVMETHOD(miibus_statchg,	bce_miibus_statchg),
536 
537 	DEVMETHOD_END
538 };
539 
540 static driver_t bce_driver = {
541 	"bce",
542 	bce_methods,
543 	sizeof(struct bce_softc)
544 };
545 
546 static devclass_t bce_devclass;
547 
548 DECLARE_DUMMY_MODULE(if_bce);
549 MODULE_DEPEND(bce, miibus, 1, 1, 1);
550 DRIVER_MODULE(if_bce, pci, bce_driver, bce_devclass, NULL, NULL);
551 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, NULL, NULL);
552 
553 /****************************************************************************/
554 /* Device probe function.                                                   */
555 /*                                                                          */
556 /* Compares the device to the driver's list of supported devices and        */
557 /* reports back to the OS whether this is the right driver for the device.  */
558 /*                                                                          */
559 /* Returns:                                                                 */
560 /*   BUS_PROBE_DEFAULT on success, positive value on failure.               */
561 /****************************************************************************/
562 static int
563 bce_probe(device_t dev)
564 {
565 	struct bce_type *t;
566 	uint16_t vid, did, svid, sdid;
567 
568 	/* Get the data for the device to be probed. */
569 	vid  = pci_get_vendor(dev);
570 	did  = pci_get_device(dev);
571 	svid = pci_get_subvendor(dev);
572 	sdid = pci_get_subdevice(dev);
573 
574 	/* Look through the list of known devices for a match. */
575 	for (t = bce_devs; t->bce_name != NULL; ++t) {
576 		if (vid == t->bce_vid && did == t->bce_did &&
577 		    (svid == t->bce_svid || t->bce_svid == PCI_ANY_ID) &&
578 		    (sdid == t->bce_sdid || t->bce_sdid == PCI_ANY_ID)) {
579 		    	uint32_t revid = pci_read_config(dev, PCIR_REVID, 4);
580 			char *descbuf;
581 
582 			descbuf = kmalloc(BCE_DEVDESC_MAX, M_TEMP, M_WAITOK);
583 
584 			/* Print out the device identity. */
585 			ksnprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)",
586 				  t->bce_name,
587 				  ((revid & 0xf0) >> 4) + 'A', revid & 0xf);
588 
589 			device_set_desc_copy(dev, descbuf);
590 			kfree(descbuf, M_TEMP);
591 			return 0;
592 		}
593 	}
594 	return ENXIO;
595 }
596 
597 /****************************************************************************/
598 /* PCI Capabilities Probe Function.                                         */
599 /*                                                                          */
600 /* Walks the PCI capabiites list for the device to find what features are   */
601 /* supported.                                                               */
602 /*                                                                          */
603 /* Returns:                                                                 */
604 /*   None.                                                                  */
605 /****************************************************************************/
606 static void
607 bce_print_adapter_info(struct bce_softc *sc)
608 {
609 	device_printf(sc->bce_dev, "ASIC (0x%08X); ", sc->bce_chipid);
610 
611 	kprintf("Rev (%c%d); ", ((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A',
612 		((BCE_CHIP_ID(sc) & 0x0ff0) >> 4));
613 
614 	/* Bus info. */
615 	if (sc->bce_flags & BCE_PCIE_FLAG) {
616 		kprintf("Bus (PCIe x%d, ", sc->link_width);
617 		switch (sc->link_speed) {
618 		case 1:
619 			kprintf("2.5Gbps); ");
620 			break;
621 		case 2:
622 			kprintf("5Gbps); ");
623 			break;
624 		default:
625 			kprintf("Unknown link speed); ");
626 			break;
627 		}
628 	} else {
629 		kprintf("Bus (PCI%s, %s, %dMHz); ",
630 		    ((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""),
631 		    ((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
632 		    sc->bus_speed_mhz);
633 	}
634 
635 	/* Firmware version and device features. */
636 	kprintf("B/C (%s)", sc->bce_bc_ver);
637 
638 	if ((sc->bce_flags & BCE_MFW_ENABLE_FLAG) ||
639 	    (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)) {
640 		kprintf("; Flags(");
641 		if (sc->bce_flags & BCE_MFW_ENABLE_FLAG)
642 			kprintf("MFW[%s]", sc->bce_mfw_ver);
643 		if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
644 			kprintf(" 2.5G");
645 		kprintf(")");
646 	}
647 	kprintf("\n");
648 }
649 
650 /****************************************************************************/
651 /* PCI Capabilities Probe Function.                                         */
652 /*                                                                          */
653 /* Walks the PCI capabiites list for the device to find what features are   */
654 /* supported.                                                               */
655 /*                                                                          */
656 /* Returns:                                                                 */
657 /*   None.                                                                  */
658 /****************************************************************************/
659 static void
660 bce_probe_pci_caps(struct bce_softc *sc)
661 {
662 	device_t dev = sc->bce_dev;
663 	uint8_t ptr;
664 
665 	if (pci_is_pcix(dev))
666 		sc->bce_cap_flags |= BCE_PCIX_CAPABLE_FLAG;
667 
668 	ptr = pci_get_pciecap_ptr(dev);
669 	if (ptr) {
670 		uint16_t link_status = pci_read_config(dev, ptr + 0x12, 2);
671 
672 		sc->link_speed = link_status & 0xf;
673 		sc->link_width = (link_status >> 4) & 0x3f;
674 		sc->bce_cap_flags |= BCE_PCIE_CAPABLE_FLAG;
675 		sc->bce_flags |= BCE_PCIE_FLAG;
676 	}
677 }
678 
679 /****************************************************************************/
680 /* Device attach function.                                                  */
681 /*                                                                          */
682 /* Allocates device resources, performs secondary chip identification,      */
683 /* resets and initializes the hardware, and initializes driver instance     */
684 /* variables.                                                               */
685 /*                                                                          */
686 /* Returns:                                                                 */
687 /*   0 on success, positive value on failure.                               */
688 /****************************************************************************/
689 static int
690 bce_attach(device_t dev)
691 {
692 	struct bce_softc *sc = device_get_softc(dev);
693 	struct ifnet *ifp = &sc->arpcom.ac_if;
694 	uint32_t val;
695 	int rid, rc = 0;
696 	int i, j;
697 	struct mii_probe_args mii_args;
698 	uintptr_t mii_priv = 0;
699 #ifdef IFPOLL_ENABLE
700 	int offset, offset_def;
701 #endif
702 
703 	sc->bce_dev = dev;
704 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
705 
706 	lwkt_serialize_init(&sc->main_serialize);
707 	for (i = 0; i < BCE_MSIX_MAX; ++i) {
708 		struct bce_msix_data *msix = &sc->bce_msix[i];
709 
710 		msix->msix_cpuid = -1;
711 		msix->msix_rid = -1;
712 	}
713 
714 	pci_enable_busmaster(dev);
715 
716 	bce_probe_pci_caps(sc);
717 
718 	/* Allocate PCI memory resources. */
719 	rid = PCIR_BAR(0);
720 	sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
721 						 RF_ACTIVE | PCI_RF_DENSE);
722 	if (sc->bce_res_mem == NULL) {
723 		device_printf(dev, "PCI memory allocation failed\n");
724 		return ENXIO;
725 	}
726 	sc->bce_btag = rman_get_bustag(sc->bce_res_mem);
727 	sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem);
728 
729 	/*
730 	 * Configure byte swap and enable indirect register access.
731 	 * Rely on CPU to do target byte swapping on big endian systems.
732 	 * Access to registers outside of PCI configurtion space are not
733 	 * valid until this is done.
734 	 */
735 	pci_write_config(dev, BCE_PCICFG_MISC_CONFIG,
736 			 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
737 			 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4);
738 
739 	/* Save ASIC revsion info. */
740 	sc->bce_chipid =  REG_RD(sc, BCE_MISC_ID);
741 
742 	/* Weed out any non-production controller revisions. */
743 	switch (BCE_CHIP_ID(sc)) {
744 	case BCE_CHIP_ID_5706_A0:
745 	case BCE_CHIP_ID_5706_A1:
746 	case BCE_CHIP_ID_5708_A0:
747 	case BCE_CHIP_ID_5708_B0:
748 	case BCE_CHIP_ID_5709_A0:
749 	case BCE_CHIP_ID_5709_B0:
750 	case BCE_CHIP_ID_5709_B1:
751 #ifdef foo
752 	/* 5709C B2 seems to work fine */
753 	case BCE_CHIP_ID_5709_B2:
754 #endif
755 		device_printf(dev, "Unsupported chip id 0x%08x!\n",
756 			      BCE_CHIP_ID(sc));
757 		rc = ENODEV;
758 		goto fail;
759 	}
760 
761 	mii_priv |= BRGPHY_FLAG_WIRESPEED;
762 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
763 		if (BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax ||
764 		    BCE_CHIP_REV(sc) == BCE_CHIP_REV_Bx)
765 			mii_priv |= BRGPHY_FLAG_NO_EARLYDAC;
766 	} else {
767 		mii_priv |= BRGPHY_FLAG_BER_BUG;
768 	}
769 
770 	/*
771 	 * Find the base address for shared memory access.
772 	 * Newer versions of bootcode use a signature and offset
773 	 * while older versions use a fixed address.
774 	 */
775 	val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE);
776 	if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) ==
777 	    BCE_SHM_HDR_SIGNATURE_SIG) {
778 		/* Multi-port devices use different offsets in shared memory. */
779 		sc->bce_shmem_base = REG_RD_IND(sc,
780 		    BCE_SHM_HDR_ADDR_0 + (pci_get_function(sc->bce_dev) << 2));
781 	} else {
782 		sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE;
783 	}
784 
785 	/* Fetch the bootcode revision. */
786 	val = bce_shmem_rd(sc, BCE_DEV_INFO_BC_REV);
787 	for (i = 0, j = 0; i < 3; i++) {
788 		uint8_t num;
789 		int k, skip0;
790 
791 		num = (uint8_t)(val >> (24 - (i * 8)));
792 		for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
793 			if (num >= k || !skip0 || k == 1) {
794 				sc->bce_bc_ver[j++] = (num / k) + '0';
795 				skip0 = 0;
796 			}
797 		}
798 		if (i != 2)
799 			sc->bce_bc_ver[j++] = '.';
800 	}
801 
802 	/* Check if any management firwmare is running. */
803 	val = bce_shmem_rd(sc, BCE_PORT_FEATURE);
804 	if (val & BCE_PORT_FEATURE_ASF_ENABLED) {
805 		sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
806 
807 		/* Allow time for firmware to enter the running state. */
808 		for (i = 0; i < 30; i++) {
809 			val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
810 			if (val & BCE_CONDITION_MFW_RUN_MASK)
811 				break;
812 			DELAY(10000);
813 		}
814 	}
815 
816 	/* Check the current bootcode state. */
817 	val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION) &
818 	    BCE_CONDITION_MFW_RUN_MASK;
819 	if (val != BCE_CONDITION_MFW_RUN_UNKNOWN &&
820 	    val != BCE_CONDITION_MFW_RUN_NONE) {
821 		uint32_t addr = bce_shmem_rd(sc, BCE_MFW_VER_PTR);
822 
823 		for (i = 0, j = 0; j < 3; j++) {
824 			val = bce_reg_rd_ind(sc, addr + j * 4);
825 			val = bswap32(val);
826 			memcpy(&sc->bce_mfw_ver[i], &val, 4);
827 			i += 4;
828 		}
829 	}
830 
831 	/* Get PCI bus information (speed and type). */
832 	val = REG_RD(sc, BCE_PCICFG_MISC_STATUS);
833 	if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) {
834 		uint32_t clkreg;
835 
836 		sc->bce_flags |= BCE_PCIX_FLAG;
837 
838 		clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS) &
839 			 BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
840 		switch (clkreg) {
841 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
842 			sc->bus_speed_mhz = 133;
843 			break;
844 
845 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
846 			sc->bus_speed_mhz = 100;
847 			break;
848 
849 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
850 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
851 			sc->bus_speed_mhz = 66;
852 			break;
853 
854 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
855 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
856 			sc->bus_speed_mhz = 50;
857 			break;
858 
859 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
860 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
861 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
862 			sc->bus_speed_mhz = 33;
863 			break;
864 		}
865 	} else {
866 		if (val & BCE_PCICFG_MISC_STATUS_M66EN)
867 			sc->bus_speed_mhz = 66;
868 		else
869 			sc->bus_speed_mhz = 33;
870 	}
871 
872 	if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET)
873 		sc->bce_flags |= BCE_PCI_32BIT_FLAG;
874 
875 	/* Reset the controller. */
876 	rc = bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
877 	if (rc != 0)
878 		goto fail;
879 
880 	/* Initialize the controller. */
881 	rc = bce_chipinit(sc);
882 	if (rc != 0) {
883 		device_printf(dev, "Controller initialization failed!\n");
884 		goto fail;
885 	}
886 
887 	/* Perform NVRAM test. */
888 	rc = bce_nvram_test(sc);
889 	if (rc != 0) {
890 		device_printf(dev, "NVRAM test failed!\n");
891 		goto fail;
892 	}
893 
894 	/* Fetch the permanent Ethernet MAC address. */
895 	bce_get_mac_addr(sc);
896 
897 	/*
898 	 * Trip points control how many BDs
899 	 * should be ready before generating an
900 	 * interrupt while ticks control how long
901 	 * a BD can sit in the chain before
902 	 * generating an interrupt.  Set the default
903 	 * values for the RX and TX rings.
904 	 */
905 
906 #ifdef BCE_DRBUG
907 	/* Force more frequent interrupts. */
908 	sc->bce_tx_quick_cons_trip_int = 1;
909 	sc->bce_tx_quick_cons_trip     = 1;
910 	sc->bce_tx_ticks_int           = 0;
911 	sc->bce_tx_ticks               = 0;
912 
913 	sc->bce_rx_quick_cons_trip_int = 1;
914 	sc->bce_rx_quick_cons_trip     = 1;
915 	sc->bce_rx_ticks_int           = 0;
916 	sc->bce_rx_ticks               = 0;
917 #else
918 	sc->bce_tx_quick_cons_trip_int = bce_tx_bds_int;
919 	sc->bce_tx_quick_cons_trip     = bce_tx_bds;
920 	sc->bce_tx_ticks_int           = bce_tx_ticks_int;
921 	sc->bce_tx_ticks               = bce_tx_ticks;
922 
923 	sc->bce_rx_quick_cons_trip_int = bce_rx_bds_int;
924 	sc->bce_rx_quick_cons_trip     = bce_rx_bds;
925 	sc->bce_rx_ticks_int           = bce_rx_ticks_int;
926 	sc->bce_rx_ticks               = bce_rx_ticks;
927 #endif
928 
929 	/* Update statistics once every second. */
930 	sc->bce_stats_ticks = 1000000 & 0xffff00;
931 
932 	/* Find the media type for the adapter. */
933 	bce_get_media(sc);
934 
935 	/* Find out RX/TX ring count */
936 	bce_setup_ring_cnt(sc);
937 
938 	/* Allocate DMA memory resources. */
939 	rc = bce_dma_alloc(sc);
940 	if (rc != 0) {
941 		device_printf(dev, "DMA resource allocation failed!\n");
942 		goto fail;
943 	}
944 
945 #ifdef IFPOLL_ENABLE
946 	/*
947 	 * NPOLLING RX/TX CPU offset
948 	 */
949 	if (sc->rx_ring_cnt2 == ncpus2) {
950 		offset = 0;
951 	} else {
952 		offset_def = (sc->rx_ring_cnt2 * device_get_unit(dev)) % ncpus2;
953 		offset = device_getenv_int(dev, "npoll.offset", offset_def);
954 		if (offset >= ncpus2 ||
955 		    offset % sc->rx_ring_cnt2 != 0) {
956 			device_printf(dev, "invalid npoll.offset %d, use %d\n",
957 			    offset, offset_def);
958 			offset = offset_def;
959 		}
960 	}
961 	sc->npoll_ofs = offset;
962 #endif
963 
964 	/* Allocate PCI IRQ resources. */
965 	rc = bce_alloc_intr(sc);
966 	if (rc != 0)
967 		goto fail;
968 
969 	/* Setup serializer */
970 	bce_setup_serialize(sc);
971 
972 	/* Initialize the ifnet interface. */
973 	ifp->if_softc = sc;
974 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
975 	ifp->if_ioctl = bce_ioctl;
976 	ifp->if_start = bce_start;
977 	ifp->if_init = bce_init;
978 	ifp->if_serialize = bce_serialize;
979 	ifp->if_deserialize = bce_deserialize;
980 	ifp->if_tryserialize = bce_tryserialize;
981 #ifdef INVARIANTS
982 	ifp->if_serialize_assert = bce_serialize_assert;
983 #endif
984 #ifdef IFPOLL_ENABLE
985 	ifp->if_npoll = bce_npoll;
986 #endif
987 
988 	ifp->if_mtu = ETHERMTU;
989 	ifp->if_hwassist = BCE_CSUM_FEATURES | CSUM_TSO;
990 	ifp->if_capabilities = BCE_IF_CAPABILITIES;
991 	if (sc->rx_ring_cnt > 1)
992 		ifp->if_capabilities |= IFCAP_RSS;
993 	ifp->if_capenable = ifp->if_capabilities;
994 
995 	if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
996 		ifp->if_baudrate = IF_Gbps(2.5);
997 	else
998 		ifp->if_baudrate = IF_Gbps(1);
999 
1000 	ifq_set_maxlen(&ifp->if_snd, USABLE_TX_BD(&sc->tx_rings[0]));
1001 	ifq_set_ready(&ifp->if_snd);
1002 	ifq_set_subq_cnt(&ifp->if_snd, sc->tx_ring_cnt);
1003 
1004 	if (sc->tx_ring_cnt > 1) {
1005 		ifp->if_mapsubq = ifq_mapsubq_mask;
1006 		ifq_set_subq_mask(&ifp->if_snd, sc->tx_ring_cnt - 1);
1007 	}
1008 
1009 	/*
1010 	 * Look for our PHY.
1011 	 */
1012 	mii_probe_args_init(&mii_args, bce_ifmedia_upd, bce_ifmedia_sts);
1013 	mii_args.mii_probemask = 1 << sc->bce_phy_addr;
1014 	mii_args.mii_privtag = MII_PRIVTAG_BRGPHY;
1015 	mii_args.mii_priv = mii_priv;
1016 
1017 	rc = mii_probe(dev, &sc->bce_miibus, &mii_args);
1018 	if (rc != 0) {
1019 		device_printf(dev, "PHY probe failed!\n");
1020 		goto fail;
1021 	}
1022 
1023 	/* Attach to the Ethernet interface list. */
1024 	ether_ifattach(ifp, sc->eaddr, NULL);
1025 
1026 	/* Setup TX rings and subqueues */
1027 	for (i = 0; i < sc->tx_ring_cnt; ++i) {
1028 		struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i);
1029 		struct bce_tx_ring *txr = &sc->tx_rings[i];
1030 
1031 		ifsq_set_cpuid(ifsq, sc->bce_msix[i].msix_cpuid);
1032 		ifsq_set_priv(ifsq, txr);
1033 		ifsq_set_hw_serialize(ifsq, &txr->tx_serialize);
1034 		txr->ifsq = ifsq;
1035 
1036 		ifsq_watchdog_init(&txr->tx_watchdog, ifsq, bce_watchdog);
1037 	}
1038 
1039 	callout_init_mp(&sc->bce_tick_callout);
1040 	callout_init_mp(&sc->bce_pulse_callout);
1041 	callout_init_mp(&sc->bce_ckmsi_callout);
1042 
1043 	rc = bce_setup_intr(sc);
1044 	if (rc != 0) {
1045 		device_printf(dev, "Failed to setup IRQ!\n");
1046 		ether_ifdetach(ifp);
1047 		goto fail;
1048 	}
1049 
1050 	/* Set timer CPUID */
1051 	bce_set_timer_cpuid(sc, FALSE);
1052 
1053 	/* Add the supported sysctls to the kernel. */
1054 	bce_add_sysctls(sc);
1055 
1056 	/*
1057 	 * The chip reset earlier notified the bootcode that
1058 	 * a driver is present.  We now need to start our pulse
1059 	 * routine so that the bootcode is reminded that we're
1060 	 * still running.
1061 	 */
1062 	bce_pulse(sc);
1063 
1064 	/* Get the firmware running so IPMI still works */
1065 	bce_mgmt_init(sc);
1066 
1067 	if (bootverbose)
1068 		bce_print_adapter_info(sc);
1069 
1070 	return 0;
1071 fail:
1072 	bce_detach(dev);
1073 	return(rc);
1074 }
1075 
1076 /****************************************************************************/
1077 /* Device detach function.                                                  */
1078 /*                                                                          */
1079 /* Stops the controller, resets the controller, and releases resources.     */
1080 /*                                                                          */
1081 /* Returns:                                                                 */
1082 /*   0 on success, positive value on failure.                               */
1083 /****************************************************************************/
1084 static int
1085 bce_detach(device_t dev)
1086 {
1087 	struct bce_softc *sc = device_get_softc(dev);
1088 
1089 	if (device_is_attached(dev)) {
1090 		struct ifnet *ifp = &sc->arpcom.ac_if;
1091 		uint32_t msg;
1092 
1093 		ifnet_serialize_all(ifp);
1094 
1095 		/* Stop and reset the controller. */
1096 		callout_stop(&sc->bce_pulse_callout);
1097 		bce_stop(sc);
1098 		if (sc->bce_flags & BCE_NO_WOL_FLAG)
1099 			msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
1100 		else
1101 			msg = BCE_DRV_MSG_CODE_UNLOAD;
1102 		bce_reset(sc, msg);
1103 
1104 		bce_teardown_intr(sc);
1105 
1106 		ifnet_deserialize_all(ifp);
1107 
1108 		ether_ifdetach(ifp);
1109 	}
1110 
1111 	/* If we have a child device on the MII bus remove it too. */
1112 	if (sc->bce_miibus)
1113 		device_delete_child(dev, sc->bce_miibus);
1114 	bus_generic_detach(dev);
1115 
1116 	bce_free_intr(sc);
1117 
1118 	if (sc->bce_res_mem != NULL) {
1119 		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
1120 				     sc->bce_res_mem);
1121 	}
1122 
1123 	bce_dma_free(sc);
1124 
1125 	if (sc->bce_sysctl_tree != NULL)
1126 		sysctl_ctx_free(&sc->bce_sysctl_ctx);
1127 
1128 	if (sc->serializes != NULL)
1129 		kfree(sc->serializes, M_DEVBUF);
1130 
1131 	return 0;
1132 }
1133 
1134 /****************************************************************************/
1135 /* Device shutdown function.                                                */
1136 /*                                                                          */
1137 /* Stops and resets the controller.                                         */
1138 /*                                                                          */
1139 /* Returns:                                                                 */
1140 /*   Nothing                                                                */
1141 /****************************************************************************/
1142 static void
1143 bce_shutdown(device_t dev)
1144 {
1145 	struct bce_softc *sc = device_get_softc(dev);
1146 	struct ifnet *ifp = &sc->arpcom.ac_if;
1147 	uint32_t msg;
1148 
1149 	ifnet_serialize_all(ifp);
1150 
1151 	bce_stop(sc);
1152 	if (sc->bce_flags & BCE_NO_WOL_FLAG)
1153 		msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
1154 	else
1155 		msg = BCE_DRV_MSG_CODE_UNLOAD;
1156 	bce_reset(sc, msg);
1157 
1158 	ifnet_deserialize_all(ifp);
1159 }
1160 
1161 /****************************************************************************/
1162 /* Indirect register read.                                                  */
1163 /*                                                                          */
1164 /* Reads NetXtreme II registers using an index/data register pair in PCI    */
1165 /* configuration space.  Using this mechanism avoids issues with posted     */
1166 /* reads but is much slower than memory-mapped I/O.                         */
1167 /*                                                                          */
1168 /* Returns:                                                                 */
1169 /*   The value of the register.                                             */
1170 /****************************************************************************/
1171 static uint32_t
1172 bce_reg_rd_ind(struct bce_softc *sc, uint32_t offset)
1173 {
1174 	device_t dev = sc->bce_dev;
1175 
1176 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
1177 	return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
1178 }
1179 
1180 /****************************************************************************/
1181 /* Indirect register write.                                                 */
1182 /*                                                                          */
1183 /* Writes NetXtreme II registers using an index/data register pair in PCI   */
1184 /* configuration space.  Using this mechanism avoids issues with posted     */
1185 /* writes but is muchh slower than memory-mapped I/O.                       */
1186 /*                                                                          */
1187 /* Returns:                                                                 */
1188 /*   Nothing.                                                               */
1189 /****************************************************************************/
1190 static void
1191 bce_reg_wr_ind(struct bce_softc *sc, uint32_t offset, uint32_t val)
1192 {
1193 	device_t dev = sc->bce_dev;
1194 
1195 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
1196 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4);
1197 }
1198 
1199 /****************************************************************************/
1200 /* Shared memory write.                                                     */
1201 /*                                                                          */
1202 /* Writes NetXtreme II shared memory region.                                */
1203 /*                                                                          */
1204 /* Returns:                                                                 */
1205 /*   Nothing.                                                               */
1206 /****************************************************************************/
1207 static void
1208 bce_shmem_wr(struct bce_softc *sc, uint32_t offset, uint32_t val)
1209 {
1210 	bce_reg_wr_ind(sc, sc->bce_shmem_base + offset, val);
1211 }
1212 
1213 /****************************************************************************/
1214 /* Shared memory read.                                                      */
1215 /*                                                                          */
1216 /* Reads NetXtreme II shared memory region.                                 */
1217 /*                                                                          */
1218 /* Returns:                                                                 */
1219 /*   The 32 bit value read.                                                 */
1220 /****************************************************************************/
1221 static u32
1222 bce_shmem_rd(struct bce_softc *sc, uint32_t offset)
1223 {
1224 	return bce_reg_rd_ind(sc, sc->bce_shmem_base + offset);
1225 }
1226 
1227 /****************************************************************************/
1228 /* Context memory write.                                                    */
1229 /*                                                                          */
1230 /* The NetXtreme II controller uses context memory to track connection      */
1231 /* information for L2 and higher network protocols.                         */
1232 /*                                                                          */
1233 /* Returns:                                                                 */
1234 /*   Nothing.                                                               */
1235 /****************************************************************************/
1236 static void
1237 bce_ctx_wr(struct bce_softc *sc, uint32_t cid_addr, uint32_t ctx_offset,
1238     uint32_t ctx_val)
1239 {
1240 	uint32_t idx, offset = ctx_offset + cid_addr;
1241 	uint32_t val, retry_cnt = 5;
1242 
1243 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
1244 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
1245 		REG_WR(sc, BCE_CTX_CTX_DATA, ctx_val);
1246 		REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_WRITE_REQ));
1247 
1248 		for (idx = 0; idx < retry_cnt; idx++) {
1249 			val = REG_RD(sc, BCE_CTX_CTX_CTRL);
1250 			if ((val & BCE_CTX_CTX_CTRL_WRITE_REQ) == 0)
1251 				break;
1252 			DELAY(5);
1253 		}
1254 
1255 		if (val & BCE_CTX_CTX_CTRL_WRITE_REQ) {
1256 			device_printf(sc->bce_dev,
1257 			    "Unable to write CTX memory: "
1258 			    "cid_addr = 0x%08X, offset = 0x%08X!\n",
1259 			    cid_addr, ctx_offset);
1260 		}
1261 	} else {
1262 		REG_WR(sc, BCE_CTX_DATA_ADR, offset);
1263 		REG_WR(sc, BCE_CTX_DATA, ctx_val);
1264 	}
1265 }
1266 
1267 /****************************************************************************/
1268 /* PHY register read.                                                       */
1269 /*                                                                          */
1270 /* Implements register reads on the MII bus.                                */
1271 /*                                                                          */
1272 /* Returns:                                                                 */
1273 /*   The value of the register.                                             */
1274 /****************************************************************************/
1275 static int
1276 bce_miibus_read_reg(device_t dev, int phy, int reg)
1277 {
1278 	struct bce_softc *sc = device_get_softc(dev);
1279 	uint32_t val;
1280 	int i;
1281 
1282 	/* Make sure we are accessing the correct PHY address. */
1283 	KASSERT(phy == sc->bce_phy_addr,
1284 	    ("invalid phyno %d, should be %d\n", phy, sc->bce_phy_addr));
1285 
1286 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1287 		val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1288 		val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1289 
1290 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1291 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1292 
1293 		DELAY(40);
1294 	}
1295 
1296 	val = BCE_MIPHY(phy) | BCE_MIREG(reg) |
1297 	      BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT |
1298 	      BCE_EMAC_MDIO_COMM_START_BUSY;
1299 	REG_WR(sc, BCE_EMAC_MDIO_COMM, val);
1300 
1301 	for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1302 		DELAY(10);
1303 
1304 		val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1305 		if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1306 			DELAY(5);
1307 
1308 			val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1309 			val &= BCE_EMAC_MDIO_COMM_DATA;
1310 			break;
1311 		}
1312 	}
1313 
1314 	if (val & BCE_EMAC_MDIO_COMM_START_BUSY) {
1315 		if_printf(&sc->arpcom.ac_if,
1316 			  "Error: PHY read timeout! phy = %d, reg = 0x%04X\n",
1317 			  phy, reg);
1318 		val = 0x0;
1319 	} else {
1320 		val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1321 	}
1322 
1323 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1324 		val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1325 		val |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1326 
1327 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1328 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1329 
1330 		DELAY(40);
1331 	}
1332 	return (val & 0xffff);
1333 }
1334 
1335 /****************************************************************************/
1336 /* PHY register write.                                                      */
1337 /*                                                                          */
1338 /* Implements register writes on the MII bus.                               */
1339 /*                                                                          */
1340 /* Returns:                                                                 */
1341 /*   The value of the register.                                             */
1342 /****************************************************************************/
1343 static int
1344 bce_miibus_write_reg(device_t dev, int phy, int reg, int val)
1345 {
1346 	struct bce_softc *sc = device_get_softc(dev);
1347 	uint32_t val1;
1348 	int i;
1349 
1350 	/* Make sure we are accessing the correct PHY address. */
1351 	KASSERT(phy == sc->bce_phy_addr,
1352 	    ("invalid phyno %d, should be %d\n", phy, sc->bce_phy_addr));
1353 
1354 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1355 		val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1356 		val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1357 
1358 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1359 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1360 
1361 		DELAY(40);
1362 	}
1363 
1364 	val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val |
1365 		BCE_EMAC_MDIO_COMM_COMMAND_WRITE |
1366 		BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT;
1367 	REG_WR(sc, BCE_EMAC_MDIO_COMM, val1);
1368 
1369 	for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1370 		DELAY(10);
1371 
1372 		val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1373 		if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1374 			DELAY(5);
1375 			break;
1376 		}
1377 	}
1378 
1379 	if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY)
1380 		if_printf(&sc->arpcom.ac_if, "PHY write timeout!\n");
1381 
1382 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1383 		val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1384 		val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1385 
1386 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1387 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1388 
1389 		DELAY(40);
1390 	}
1391 	return 0;
1392 }
1393 
1394 /****************************************************************************/
1395 /* MII bus status change.                                                   */
1396 /*                                                                          */
1397 /* Called by the MII bus driver when the PHY establishes link to set the    */
1398 /* MAC interface registers.                                                 */
1399 /*                                                                          */
1400 /* Returns:                                                                 */
1401 /*   Nothing.                                                               */
1402 /****************************************************************************/
1403 static void
1404 bce_miibus_statchg(device_t dev)
1405 {
1406 	struct bce_softc *sc = device_get_softc(dev);
1407 	struct mii_data *mii = device_get_softc(sc->bce_miibus);
1408 
1409 	BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT);
1410 
1411 	/*
1412 	 * Set MII or GMII interface based on the speed negotiated
1413 	 * by the PHY.
1414 	 */
1415 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
1416 	    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) {
1417 		BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_GMII);
1418 	} else {
1419 		BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_MII);
1420 	}
1421 
1422 	/*
1423 	 * Set half or full duplex based on the duplicity negotiated
1424 	 * by the PHY.
1425 	 */
1426 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1427 		BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1428 	} else {
1429 		BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1430 	}
1431 }
1432 
1433 /****************************************************************************/
1434 /* Acquire NVRAM lock.                                                      */
1435 /*                                                                          */
1436 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock.  */
1437 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1438 /* for use by the driver.                                                   */
1439 /*                                                                          */
1440 /* Returns:                                                                 */
1441 /*   0 on success, positive value on failure.                               */
1442 /****************************************************************************/
1443 static int
1444 bce_acquire_nvram_lock(struct bce_softc *sc)
1445 {
1446 	uint32_t val;
1447 	int j;
1448 
1449 	/* Request access to the flash interface. */
1450 	REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2);
1451 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1452 		val = REG_RD(sc, BCE_NVM_SW_ARB);
1453 		if (val & BCE_NVM_SW_ARB_ARB_ARB2)
1454 			break;
1455 
1456 		DELAY(5);
1457 	}
1458 
1459 	if (j >= NVRAM_TIMEOUT_COUNT) {
1460 		return EBUSY;
1461 	}
1462 	return 0;
1463 }
1464 
1465 /****************************************************************************/
1466 /* Release NVRAM lock.                                                      */
1467 /*                                                                          */
1468 /* When the caller is finished accessing NVRAM the lock must be released.   */
1469 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1470 /* for use by the driver.                                                   */
1471 /*                                                                          */
1472 /* Returns:                                                                 */
1473 /*   0 on success, positive value on failure.                               */
1474 /****************************************************************************/
1475 static int
1476 bce_release_nvram_lock(struct bce_softc *sc)
1477 {
1478 	int j;
1479 	uint32_t val;
1480 
1481 	/*
1482 	 * Relinquish nvram interface.
1483 	 */
1484 	REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2);
1485 
1486 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1487 		val = REG_RD(sc, BCE_NVM_SW_ARB);
1488 		if (!(val & BCE_NVM_SW_ARB_ARB_ARB2))
1489 			break;
1490 
1491 		DELAY(5);
1492 	}
1493 
1494 	if (j >= NVRAM_TIMEOUT_COUNT) {
1495 		return EBUSY;
1496 	}
1497 	return 0;
1498 }
1499 
1500 /****************************************************************************/
1501 /* Enable NVRAM access.                                                     */
1502 /*                                                                          */
1503 /* Before accessing NVRAM for read or write operations the caller must      */
1504 /* enabled NVRAM access.                                                    */
1505 /*                                                                          */
1506 /* Returns:                                                                 */
1507 /*   Nothing.                                                               */
1508 /****************************************************************************/
1509 static void
1510 bce_enable_nvram_access(struct bce_softc *sc)
1511 {
1512 	uint32_t val;
1513 
1514 	val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1515 	/* Enable both bits, even on read. */
1516 	REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1517 	       val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN);
1518 }
1519 
1520 /****************************************************************************/
1521 /* Disable NVRAM access.                                                    */
1522 /*                                                                          */
1523 /* When the caller is finished accessing NVRAM access must be disabled.     */
1524 /*                                                                          */
1525 /* Returns:                                                                 */
1526 /*   Nothing.                                                               */
1527 /****************************************************************************/
1528 static void
1529 bce_disable_nvram_access(struct bce_softc *sc)
1530 {
1531 	uint32_t val;
1532 
1533 	val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1534 
1535 	/* Disable both bits, even after read. */
1536 	REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1537 	       val & ~(BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN));
1538 }
1539 
1540 /****************************************************************************/
1541 /* Read a dword (32 bits) from NVRAM.                                       */
1542 /*                                                                          */
1543 /* Read a 32 bit word from NVRAM.  The caller is assumed to have already    */
1544 /* obtained the NVRAM lock and enabled the controller for NVRAM access.     */
1545 /*                                                                          */
1546 /* Returns:                                                                 */
1547 /*   0 on success and the 32 bit value read, positive value on failure.     */
1548 /****************************************************************************/
1549 static int
1550 bce_nvram_read_dword(struct bce_softc *sc, uint32_t offset, uint8_t *ret_val,
1551 		     uint32_t cmd_flags)
1552 {
1553 	uint32_t cmd;
1554 	int i, rc = 0;
1555 
1556 	/* Build the command word. */
1557 	cmd = BCE_NVM_COMMAND_DOIT | cmd_flags;
1558 
1559 	/* Calculate the offset for buffered flash. */
1560 	if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) {
1561 		offset = ((offset / sc->bce_flash_info->page_size) <<
1562 			  sc->bce_flash_info->page_bits) +
1563 			 (offset % sc->bce_flash_info->page_size);
1564 	}
1565 
1566 	/*
1567 	 * Clear the DONE bit separately, set the address to read,
1568 	 * and issue the read.
1569 	 */
1570 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1571 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1572 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1573 
1574 	/* Wait for completion. */
1575 	for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1576 		uint32_t val;
1577 
1578 		DELAY(5);
1579 
1580 		val = REG_RD(sc, BCE_NVM_COMMAND);
1581 		if (val & BCE_NVM_COMMAND_DONE) {
1582 			val = REG_RD(sc, BCE_NVM_READ);
1583 
1584 			val = be32toh(val);
1585 			memcpy(ret_val, &val, 4);
1586 			break;
1587 		}
1588 	}
1589 
1590 	/* Check for errors. */
1591 	if (i >= NVRAM_TIMEOUT_COUNT) {
1592 		if_printf(&sc->arpcom.ac_if,
1593 			  "Timeout error reading NVRAM at offset 0x%08X!\n",
1594 			  offset);
1595 		rc = EBUSY;
1596 	}
1597 	return rc;
1598 }
1599 
1600 /****************************************************************************/
1601 /* Initialize NVRAM access.                                                 */
1602 /*                                                                          */
1603 /* Identify the NVRAM device in use and prepare the NVRAM interface to      */
1604 /* access that device.                                                      */
1605 /*                                                                          */
1606 /* Returns:                                                                 */
1607 /*   0 on success, positive value on failure.                               */
1608 /****************************************************************************/
1609 static int
1610 bce_init_nvram(struct bce_softc *sc)
1611 {
1612 	uint32_t val;
1613 	int j, entry_count, rc = 0;
1614 	const struct flash_spec *flash;
1615 
1616 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
1617 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
1618 		sc->bce_flash_info = &flash_5709;
1619 		goto bce_init_nvram_get_flash_size;
1620 	}
1621 
1622 	/* Determine the selected interface. */
1623 	val = REG_RD(sc, BCE_NVM_CFG1);
1624 
1625 	entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1626 
1627 	/*
1628 	 * Flash reconfiguration is required to support additional
1629 	 * NVRAM devices not directly supported in hardware.
1630 	 * Check if the flash interface was reconfigured
1631 	 * by the bootcode.
1632 	 */
1633 
1634 	if (val & 0x40000000) {
1635 		/* Flash interface reconfigured by bootcode. */
1636 		for (j = 0, flash = flash_table; j < entry_count;
1637 		     j++, flash++) {
1638 			if ((val & FLASH_BACKUP_STRAP_MASK) ==
1639 			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1640 				sc->bce_flash_info = flash;
1641 				break;
1642 			}
1643 		}
1644 	} else {
1645 		/* Flash interface not yet reconfigured. */
1646 		uint32_t mask;
1647 
1648 		if (val & (1 << 23))
1649 			mask = FLASH_BACKUP_STRAP_MASK;
1650 		else
1651 			mask = FLASH_STRAP_MASK;
1652 
1653 		/* Look for the matching NVRAM device configuration data. */
1654 		for (j = 0, flash = flash_table; j < entry_count;
1655 		     j++, flash++) {
1656 			/* Check if the device matches any of the known devices. */
1657 			if ((val & mask) == (flash->strapping & mask)) {
1658 				/* Found a device match. */
1659 				sc->bce_flash_info = flash;
1660 
1661 				/* Request access to the flash interface. */
1662 				rc = bce_acquire_nvram_lock(sc);
1663 				if (rc != 0)
1664 					return rc;
1665 
1666 				/* Reconfigure the flash interface. */
1667 				bce_enable_nvram_access(sc);
1668 				REG_WR(sc, BCE_NVM_CFG1, flash->config1);
1669 				REG_WR(sc, BCE_NVM_CFG2, flash->config2);
1670 				REG_WR(sc, BCE_NVM_CFG3, flash->config3);
1671 				REG_WR(sc, BCE_NVM_WRITE1, flash->write1);
1672 				bce_disable_nvram_access(sc);
1673 				bce_release_nvram_lock(sc);
1674 				break;
1675 			}
1676 		}
1677 	}
1678 
1679 	/* Check if a matching device was found. */
1680 	if (j == entry_count) {
1681 		sc->bce_flash_info = NULL;
1682 		if_printf(&sc->arpcom.ac_if, "Unknown Flash NVRAM found!\n");
1683 		return ENODEV;
1684 	}
1685 
1686 bce_init_nvram_get_flash_size:
1687 	/* Write the flash config data to the shared memory interface. */
1688 	val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG2) &
1689 	    BCE_SHARED_HW_CFG2_NVM_SIZE_MASK;
1690 	if (val)
1691 		sc->bce_flash_size = val;
1692 	else
1693 		sc->bce_flash_size = sc->bce_flash_info->total_size;
1694 
1695 	return rc;
1696 }
1697 
1698 /****************************************************************************/
1699 /* Read an arbitrary range of data from NVRAM.                              */
1700 /*                                                                          */
1701 /* Prepares the NVRAM interface for access and reads the requested data     */
1702 /* into the supplied buffer.                                                */
1703 /*                                                                          */
1704 /* Returns:                                                                 */
1705 /*   0 on success and the data read, positive value on failure.             */
1706 /****************************************************************************/
1707 static int
1708 bce_nvram_read(struct bce_softc *sc, uint32_t offset, uint8_t *ret_buf,
1709 	       int buf_size)
1710 {
1711 	uint32_t cmd_flags, offset32, len32, extra;
1712 	int rc = 0;
1713 
1714 	if (buf_size == 0)
1715 		return 0;
1716 
1717 	/* Request access to the flash interface. */
1718 	rc = bce_acquire_nvram_lock(sc);
1719 	if (rc != 0)
1720 		return rc;
1721 
1722 	/* Enable access to flash interface */
1723 	bce_enable_nvram_access(sc);
1724 
1725 	len32 = buf_size;
1726 	offset32 = offset;
1727 	extra = 0;
1728 
1729 	cmd_flags = 0;
1730 
1731 	/* XXX should we release nvram lock if read_dword() fails? */
1732 	if (offset32 & 3) {
1733 		uint8_t buf[4];
1734 		uint32_t pre_len;
1735 
1736 		offset32 &= ~3;
1737 		pre_len = 4 - (offset & 3);
1738 
1739 		if (pre_len >= len32) {
1740 			pre_len = len32;
1741 			cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST;
1742 		} else {
1743 			cmd_flags = BCE_NVM_COMMAND_FIRST;
1744 		}
1745 
1746 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1747 		if (rc)
1748 			return rc;
1749 
1750 		memcpy(ret_buf, buf + (offset & 3), pre_len);
1751 
1752 		offset32 += 4;
1753 		ret_buf += pre_len;
1754 		len32 -= pre_len;
1755 	}
1756 
1757 	if (len32 & 3) {
1758 		extra = 4 - (len32 & 3);
1759 		len32 = (len32 + 4) & ~3;
1760 	}
1761 
1762 	if (len32 == 4) {
1763 		uint8_t buf[4];
1764 
1765 		if (cmd_flags)
1766 			cmd_flags = BCE_NVM_COMMAND_LAST;
1767 		else
1768 			cmd_flags = BCE_NVM_COMMAND_FIRST |
1769 				    BCE_NVM_COMMAND_LAST;
1770 
1771 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1772 
1773 		memcpy(ret_buf, buf, 4 - extra);
1774 	} else if (len32 > 0) {
1775 		uint8_t buf[4];
1776 
1777 		/* Read the first word. */
1778 		if (cmd_flags)
1779 			cmd_flags = 0;
1780 		else
1781 			cmd_flags = BCE_NVM_COMMAND_FIRST;
1782 
1783 		rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1784 
1785 		/* Advance to the next dword. */
1786 		offset32 += 4;
1787 		ret_buf += 4;
1788 		len32 -= 4;
1789 
1790 		while (len32 > 4 && rc == 0) {
1791 			rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0);
1792 
1793 			/* Advance to the next dword. */
1794 			offset32 += 4;
1795 			ret_buf += 4;
1796 			len32 -= 4;
1797 		}
1798 
1799 		if (rc)
1800 			goto bce_nvram_read_locked_exit;
1801 
1802 		cmd_flags = BCE_NVM_COMMAND_LAST;
1803 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1804 
1805 		memcpy(ret_buf, buf, 4 - extra);
1806 	}
1807 
1808 bce_nvram_read_locked_exit:
1809 	/* Disable access to flash interface and release the lock. */
1810 	bce_disable_nvram_access(sc);
1811 	bce_release_nvram_lock(sc);
1812 
1813 	return rc;
1814 }
1815 
1816 /****************************************************************************/
1817 /* Verifies that NVRAM is accessible and contains valid data.               */
1818 /*                                                                          */
1819 /* Reads the configuration data from NVRAM and verifies that the CRC is     */
1820 /* correct.                                                                 */
1821 /*                                                                          */
1822 /* Returns:                                                                 */
1823 /*   0 on success, positive value on failure.                               */
1824 /****************************************************************************/
1825 static int
1826 bce_nvram_test(struct bce_softc *sc)
1827 {
1828 	uint32_t buf[BCE_NVRAM_SIZE / 4];
1829 	uint32_t magic, csum;
1830 	uint8_t *data = (uint8_t *)buf;
1831 	int rc = 0;
1832 
1833 	/*
1834 	 * Check that the device NVRAM is valid by reading
1835 	 * the magic value at offset 0.
1836 	 */
1837 	rc = bce_nvram_read(sc, 0, data, 4);
1838 	if (rc != 0)
1839 		return rc;
1840 
1841 	magic = be32toh(buf[0]);
1842 	if (magic != BCE_NVRAM_MAGIC) {
1843 		if_printf(&sc->arpcom.ac_if,
1844 			  "Invalid NVRAM magic value! Expected: 0x%08X, "
1845 			  "Found: 0x%08X\n", BCE_NVRAM_MAGIC, magic);
1846 		return ENODEV;
1847 	}
1848 
1849 	/*
1850 	 * Verify that the device NVRAM includes valid
1851 	 * configuration data.
1852 	 */
1853 	rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE);
1854 	if (rc != 0)
1855 		return rc;
1856 
1857 	csum = ether_crc32_le(data, 0x100);
1858 	if (csum != BCE_CRC32_RESIDUAL) {
1859 		if_printf(&sc->arpcom.ac_if,
1860 			  "Invalid Manufacturing Information NVRAM CRC! "
1861 			  "Expected: 0x%08X, Found: 0x%08X\n",
1862 			  BCE_CRC32_RESIDUAL, csum);
1863 		return ENODEV;
1864 	}
1865 
1866 	csum = ether_crc32_le(data + 0x100, 0x100);
1867 	if (csum != BCE_CRC32_RESIDUAL) {
1868 		if_printf(&sc->arpcom.ac_if,
1869 			  "Invalid Feature Configuration Information "
1870 			  "NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
1871 			  BCE_CRC32_RESIDUAL, csum);
1872 		rc = ENODEV;
1873 	}
1874 	return rc;
1875 }
1876 
1877 /****************************************************************************/
1878 /* Identifies the current media type of the controller and sets the PHY     */
1879 /* address.                                                                 */
1880 /*                                                                          */
1881 /* Returns:                                                                 */
1882 /*   Nothing.                                                               */
1883 /****************************************************************************/
1884 static void
1885 bce_get_media(struct bce_softc *sc)
1886 {
1887 	uint32_t val;
1888 
1889 	sc->bce_phy_addr = 1;
1890 
1891 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
1892 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
1893  		uint32_t val = REG_RD(sc, BCE_MISC_DUAL_MEDIA_CTRL);
1894 		uint32_t bond_id = val & BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID;
1895 		uint32_t strap;
1896 
1897 		/*
1898 		 * The BCM5709S is software configurable
1899 		 * for Copper or SerDes operation.
1900 		 */
1901 		if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) {
1902 			return;
1903 		} else if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
1904 			sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1905 			return;
1906 		}
1907 
1908 		if (val & BCE_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE) {
1909 			strap = (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
1910 		} else {
1911 			strap =
1912 			(val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
1913 		}
1914 
1915 		if (pci_get_function(sc->bce_dev) == 0) {
1916 			switch (strap) {
1917 			case 0x4:
1918 			case 0x5:
1919 			case 0x6:
1920 				sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1921 				break;
1922 			}
1923 		} else {
1924 			switch (strap) {
1925 			case 0x1:
1926 			case 0x2:
1927 			case 0x4:
1928 				sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1929 				break;
1930 			}
1931 		}
1932 	} else if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) {
1933 		sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1934 	}
1935 
1936 	if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) {
1937 		sc->bce_flags |= BCE_NO_WOL_FLAG;
1938 		if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) {
1939 			sc->bce_phy_addr = 2;
1940 			val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG);
1941 			if (val & BCE_SHARED_HW_CFG_PHY_2_5G)
1942 				sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG;
1943 		}
1944 	} else if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) ||
1945 	    (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)) {
1946 		sc->bce_phy_flags |= BCE_PHY_CRC_FIX_FLAG;
1947 	}
1948 }
1949 
1950 static void
1951 bce_destroy_tx_ring(struct bce_tx_ring *txr)
1952 {
1953 	int i;
1954 
1955 	/* Destroy the TX buffer descriptor DMA stuffs. */
1956 	if (txr->tx_bd_chain_tag != NULL) {
1957 		for (i = 0; i < txr->tx_pages; i++) {
1958 			if (txr->tx_bd_chain[i] != NULL) {
1959 				bus_dmamap_unload(txr->tx_bd_chain_tag,
1960 				    txr->tx_bd_chain_map[i]);
1961 				bus_dmamem_free(txr->tx_bd_chain_tag,
1962 				    txr->tx_bd_chain[i],
1963 				    txr->tx_bd_chain_map[i]);
1964 			}
1965 		}
1966 		bus_dma_tag_destroy(txr->tx_bd_chain_tag);
1967 	}
1968 
1969 	/* Destroy the TX mbuf DMA stuffs. */
1970 	if (txr->tx_mbuf_tag != NULL) {
1971 		for (i = 0; i < TOTAL_TX_BD(txr); i++) {
1972 			/* Must have been unloaded in bce_stop() */
1973 			KKASSERT(txr->tx_bufs[i].tx_mbuf_ptr == NULL);
1974 			bus_dmamap_destroy(txr->tx_mbuf_tag,
1975 			    txr->tx_bufs[i].tx_mbuf_map);
1976 		}
1977 		bus_dma_tag_destroy(txr->tx_mbuf_tag);
1978 	}
1979 
1980 	if (txr->tx_bd_chain_map != NULL)
1981 		kfree(txr->tx_bd_chain_map, M_DEVBUF);
1982 	if (txr->tx_bd_chain != NULL)
1983 		kfree(txr->tx_bd_chain, M_DEVBUF);
1984 	if (txr->tx_bd_chain_paddr != NULL)
1985 		kfree(txr->tx_bd_chain_paddr, M_DEVBUF);
1986 
1987 	if (txr->tx_bufs != NULL)
1988 		kfree(txr->tx_bufs, M_DEVBUF);
1989 }
1990 
1991 static void
1992 bce_destroy_rx_ring(struct bce_rx_ring *rxr)
1993 {
1994 	int i;
1995 
1996 	/* Destroy the RX buffer descriptor DMA stuffs. */
1997 	if (rxr->rx_bd_chain_tag != NULL) {
1998 		for (i = 0; i < rxr->rx_pages; i++) {
1999 			if (rxr->rx_bd_chain[i] != NULL) {
2000 				bus_dmamap_unload(rxr->rx_bd_chain_tag,
2001 				    rxr->rx_bd_chain_map[i]);
2002 				bus_dmamem_free(rxr->rx_bd_chain_tag,
2003 				    rxr->rx_bd_chain[i],
2004 				    rxr->rx_bd_chain_map[i]);
2005 			}
2006 		}
2007 		bus_dma_tag_destroy(rxr->rx_bd_chain_tag);
2008 	}
2009 
2010 	/* Destroy the RX mbuf DMA stuffs. */
2011 	if (rxr->rx_mbuf_tag != NULL) {
2012 		for (i = 0; i < TOTAL_RX_BD(rxr); i++) {
2013 			/* Must have been unloaded in bce_stop() */
2014 			KKASSERT(rxr->rx_bufs[i].rx_mbuf_ptr == NULL);
2015 			bus_dmamap_destroy(rxr->rx_mbuf_tag,
2016 			    rxr->rx_bufs[i].rx_mbuf_map);
2017 		}
2018 		bus_dmamap_destroy(rxr->rx_mbuf_tag, rxr->rx_mbuf_tmpmap);
2019 		bus_dma_tag_destroy(rxr->rx_mbuf_tag);
2020 	}
2021 
2022 	if (rxr->rx_bd_chain_map != NULL)
2023 		kfree(rxr->rx_bd_chain_map, M_DEVBUF);
2024 	if (rxr->rx_bd_chain != NULL)
2025 		kfree(rxr->rx_bd_chain, M_DEVBUF);
2026 	if (rxr->rx_bd_chain_paddr != NULL)
2027 		kfree(rxr->rx_bd_chain_paddr, M_DEVBUF);
2028 
2029 	if (rxr->rx_bufs != NULL)
2030 		kfree(rxr->rx_bufs, M_DEVBUF);
2031 }
2032 
2033 /****************************************************************************/
2034 /* Free any DMA memory owned by the driver.                                 */
2035 /*                                                                          */
2036 /* Scans through each data structre that requires DMA memory and frees      */
2037 /* the memory if allocated.                                                 */
2038 /*                                                                          */
2039 /* Returns:                                                                 */
2040 /*   Nothing.                                                               */
2041 /****************************************************************************/
2042 static void
2043 bce_dma_free(struct bce_softc *sc)
2044 {
2045 	int i;
2046 
2047 	/* Destroy the status block. */
2048 	if (sc->status_tag != NULL) {
2049 		if (sc->status_block != NULL) {
2050 			bus_dmamap_unload(sc->status_tag, sc->status_map);
2051 			bus_dmamem_free(sc->status_tag, sc->status_block,
2052 					sc->status_map);
2053 		}
2054 		bus_dma_tag_destroy(sc->status_tag);
2055 	}
2056 
2057 	/* Destroy the statistics block. */
2058 	if (sc->stats_tag != NULL) {
2059 		if (sc->stats_block != NULL) {
2060 			bus_dmamap_unload(sc->stats_tag, sc->stats_map);
2061 			bus_dmamem_free(sc->stats_tag, sc->stats_block,
2062 					sc->stats_map);
2063 		}
2064 		bus_dma_tag_destroy(sc->stats_tag);
2065 	}
2066 
2067 	/* Destroy the CTX DMA stuffs. */
2068 	if (sc->ctx_tag != NULL) {
2069 		for (i = 0; i < sc->ctx_pages; i++) {
2070 			if (sc->ctx_block[i] != NULL) {
2071 				bus_dmamap_unload(sc->ctx_tag, sc->ctx_map[i]);
2072 				bus_dmamem_free(sc->ctx_tag, sc->ctx_block[i],
2073 						sc->ctx_map[i]);
2074 			}
2075 		}
2076 		bus_dma_tag_destroy(sc->ctx_tag);
2077 	}
2078 
2079 	/* Free TX rings */
2080 	if (sc->tx_rings != NULL) {
2081 		for (i = 0; i < sc->tx_ring_cnt; ++i)
2082 			bce_destroy_tx_ring(&sc->tx_rings[i]);
2083 		kfree(sc->tx_rings, M_DEVBUF);
2084 	}
2085 
2086 	/* Free RX rings */
2087 	if (sc->rx_rings != NULL) {
2088 		for (i = 0; i < sc->rx_ring_cnt; ++i)
2089 			bce_destroy_rx_ring(&sc->rx_rings[i]);
2090 		kfree(sc->rx_rings, M_DEVBUF);
2091 	}
2092 
2093 	/* Destroy the parent tag */
2094 	if (sc->parent_tag != NULL)
2095 		bus_dma_tag_destroy(sc->parent_tag);
2096 }
2097 
2098 /****************************************************************************/
2099 /* Get DMA memory from the OS.                                              */
2100 /*                                                                          */
2101 /* Validates that the OS has provided DMA buffers in response to a          */
2102 /* bus_dmamap_load() call and saves the physical address of those buffers.  */
2103 /* When the callback is used the OS will return 0 for the mapping function  */
2104 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any  */
2105 /* failures back to the caller.                                             */
2106 /*                                                                          */
2107 /* Returns:                                                                 */
2108 /*   Nothing.                                                               */
2109 /****************************************************************************/
2110 static void
2111 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2112 {
2113 	bus_addr_t *busaddr = arg;
2114 
2115 	/* Check for an error and signal the caller that an error occurred. */
2116 	if (error)
2117 		return;
2118 
2119 	KASSERT(nseg == 1, ("only one segment is allowed"));
2120 	*busaddr = segs->ds_addr;
2121 }
2122 
2123 static int
2124 bce_create_tx_ring(struct bce_tx_ring *txr)
2125 {
2126 	int pages, rc, i;
2127 
2128 	lwkt_serialize_init(&txr->tx_serialize);
2129 	txr->tx_wreg = bce_tx_wreg;
2130 
2131 	pages = device_getenv_int(txr->sc->bce_dev, "tx_pages", bce_tx_pages);
2132 	if (pages <= 0 || pages > TX_PAGES_MAX || !powerof2(pages)) {
2133 		device_printf(txr->sc->bce_dev, "invalid # of TX pages\n");
2134 		pages = TX_PAGES_DEFAULT;
2135 	}
2136 	txr->tx_pages = pages;
2137 
2138 	txr->tx_bd_chain_map = kmalloc(sizeof(bus_dmamap_t) * txr->tx_pages,
2139 	    M_DEVBUF, M_WAITOK | M_ZERO);
2140 	txr->tx_bd_chain = kmalloc(sizeof(struct tx_bd *) * txr->tx_pages,
2141 	    M_DEVBUF, M_WAITOK | M_ZERO);
2142 	txr->tx_bd_chain_paddr = kmalloc(sizeof(bus_addr_t) * txr->tx_pages,
2143 	    M_DEVBUF, M_WAITOK | M_ZERO);
2144 
2145 	txr->tx_bufs = kmalloc_cachealign(
2146 	    sizeof(struct bce_tx_buf) * TOTAL_TX_BD(txr),
2147 	    M_DEVBUF, M_WAITOK | M_ZERO);
2148 
2149 	/*
2150 	 * Create a DMA tag for the TX buffer descriptor chain,
2151 	 * allocate and clear the  memory, and fetch the
2152 	 * physical address of the block.
2153 	 */
2154 	rc = bus_dma_tag_create(txr->sc->parent_tag, BCM_PAGE_SIZE, 0,
2155 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2156 	    BCE_TX_CHAIN_PAGE_SZ, 1, BCE_TX_CHAIN_PAGE_SZ,
2157 	    0, &txr->tx_bd_chain_tag);
2158 	if (rc != 0) {
2159 		device_printf(txr->sc->bce_dev, "Could not allocate "
2160 		    "TX descriptor chain DMA tag!\n");
2161 		return rc;
2162 	}
2163 
2164 	for (i = 0; i < txr->tx_pages; i++) {
2165 		bus_addr_t busaddr;
2166 
2167 		rc = bus_dmamem_alloc(txr->tx_bd_chain_tag,
2168 		    (void **)&txr->tx_bd_chain[i],
2169 		    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
2170 		    &txr->tx_bd_chain_map[i]);
2171 		if (rc != 0) {
2172 			device_printf(txr->sc->bce_dev,
2173 			    "Could not allocate %dth TX descriptor "
2174 			    "chain DMA memory!\n", i);
2175 			return rc;
2176 		}
2177 
2178 		rc = bus_dmamap_load(txr->tx_bd_chain_tag,
2179 		    txr->tx_bd_chain_map[i],
2180 		    txr->tx_bd_chain[i],
2181 		    BCE_TX_CHAIN_PAGE_SZ,
2182 		    bce_dma_map_addr, &busaddr,
2183 		    BUS_DMA_WAITOK);
2184 		if (rc != 0) {
2185 			if (rc == EINPROGRESS) {
2186 				panic("%s coherent memory loading "
2187 				    "is still in progress!",
2188 				    txr->sc->arpcom.ac_if.if_xname);
2189 			}
2190 			device_printf(txr->sc->bce_dev, "Could not map %dth "
2191 			    "TX descriptor chain DMA memory!\n", i);
2192 			bus_dmamem_free(txr->tx_bd_chain_tag,
2193 			    txr->tx_bd_chain[i],
2194 			    txr->tx_bd_chain_map[i]);
2195 			txr->tx_bd_chain[i] = NULL;
2196 			return rc;
2197 		}
2198 
2199 		txr->tx_bd_chain_paddr[i] = busaddr;
2200 	}
2201 
2202 	/* Create a DMA tag for TX mbufs. */
2203 	rc = bus_dma_tag_create(txr->sc->parent_tag, 1, 0,
2204 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2205 	    IP_MAXPACKET + sizeof(struct ether_vlan_header),
2206 	    BCE_MAX_SEGMENTS, PAGE_SIZE,
2207 	    BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
2208 	    &txr->tx_mbuf_tag);
2209 	if (rc != 0) {
2210 		device_printf(txr->sc->bce_dev,
2211 		    "Could not allocate TX mbuf DMA tag!\n");
2212 		return rc;
2213 	}
2214 
2215 	/* Create DMA maps for the TX mbufs clusters. */
2216 	for (i = 0; i < TOTAL_TX_BD(txr); i++) {
2217 		rc = bus_dmamap_create(txr->tx_mbuf_tag,
2218 		    BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
2219 		    &txr->tx_bufs[i].tx_mbuf_map);
2220 		if (rc != 0) {
2221 			int j;
2222 
2223 			for (j = 0; j < i; ++j) {
2224 				bus_dmamap_destroy(txr->tx_mbuf_tag,
2225 				    txr->tx_bufs[j].tx_mbuf_map);
2226 			}
2227 			bus_dma_tag_destroy(txr->tx_mbuf_tag);
2228 			txr->tx_mbuf_tag = NULL;
2229 
2230 			device_printf(txr->sc->bce_dev, "Unable to create "
2231 			    "%dth TX mbuf DMA map!\n", i);
2232 			return rc;
2233 		}
2234 	}
2235 	return 0;
2236 }
2237 
2238 static int
2239 bce_create_rx_ring(struct bce_rx_ring *rxr)
2240 {
2241 	int pages, rc, i;
2242 
2243 	lwkt_serialize_init(&rxr->rx_serialize);
2244 
2245 	pages = device_getenv_int(rxr->sc->bce_dev, "rx_pages", bce_rx_pages);
2246 	if (pages <= 0 || pages > RX_PAGES_MAX || !powerof2(pages)) {
2247 		device_printf(rxr->sc->bce_dev, "invalid # of RX pages\n");
2248 		pages = RX_PAGES_DEFAULT;
2249 	}
2250 	rxr->rx_pages = pages;
2251 
2252 	rxr->rx_bd_chain_map = kmalloc(sizeof(bus_dmamap_t) * rxr->rx_pages,
2253 	    M_DEVBUF, M_WAITOK | M_ZERO);
2254 	rxr->rx_bd_chain = kmalloc(sizeof(struct rx_bd *) * rxr->rx_pages,
2255 	    M_DEVBUF, M_WAITOK | M_ZERO);
2256 	rxr->rx_bd_chain_paddr = kmalloc(sizeof(bus_addr_t) * rxr->rx_pages,
2257 	    M_DEVBUF, M_WAITOK | M_ZERO);
2258 
2259 	rxr->rx_bufs = kmalloc_cachealign(
2260 	    sizeof(struct bce_rx_buf) * TOTAL_RX_BD(rxr),
2261 	    M_DEVBUF, M_WAITOK | M_ZERO);
2262 
2263 	/*
2264 	 * Create a DMA tag for the RX buffer descriptor chain,
2265 	 * allocate and clear the  memory, and fetch the physical
2266 	 * address of the blocks.
2267 	 */
2268 	rc = bus_dma_tag_create(rxr->sc->parent_tag, BCM_PAGE_SIZE, 0,
2269 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2270 	    BCE_RX_CHAIN_PAGE_SZ, 1, BCE_RX_CHAIN_PAGE_SZ,
2271 	    0, &rxr->rx_bd_chain_tag);
2272 	if (rc != 0) {
2273 		device_printf(rxr->sc->bce_dev, "Could not allocate "
2274 		    "RX descriptor chain DMA tag!\n");
2275 		return rc;
2276 	}
2277 
2278 	for (i = 0; i < rxr->rx_pages; i++) {
2279 		bus_addr_t busaddr;
2280 
2281 		rc = bus_dmamem_alloc(rxr->rx_bd_chain_tag,
2282 		    (void **)&rxr->rx_bd_chain[i],
2283 		    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
2284 		    &rxr->rx_bd_chain_map[i]);
2285 		if (rc != 0) {
2286 			device_printf(rxr->sc->bce_dev,
2287 			    "Could not allocate %dth RX descriptor "
2288 			    "chain DMA memory!\n", i);
2289 			return rc;
2290 		}
2291 
2292 		rc = bus_dmamap_load(rxr->rx_bd_chain_tag,
2293 		    rxr->rx_bd_chain_map[i],
2294 		    rxr->rx_bd_chain[i],
2295 		    BCE_RX_CHAIN_PAGE_SZ,
2296 		    bce_dma_map_addr, &busaddr,
2297 		    BUS_DMA_WAITOK);
2298 		if (rc != 0) {
2299 			if (rc == EINPROGRESS) {
2300 				panic("%s coherent memory loading "
2301 				    "is still in progress!",
2302 				    rxr->sc->arpcom.ac_if.if_xname);
2303 			}
2304 			device_printf(rxr->sc->bce_dev,
2305 			    "Could not map %dth RX descriptor "
2306 			    "chain DMA memory!\n", i);
2307 			bus_dmamem_free(rxr->rx_bd_chain_tag,
2308 			    rxr->rx_bd_chain[i],
2309 			    rxr->rx_bd_chain_map[i]);
2310 			rxr->rx_bd_chain[i] = NULL;
2311 			return rc;
2312 		}
2313 
2314 		rxr->rx_bd_chain_paddr[i] = busaddr;
2315 	}
2316 
2317 	/* Create a DMA tag for RX mbufs. */
2318 	rc = bus_dma_tag_create(rxr->sc->parent_tag, BCE_DMA_RX_ALIGN, 0,
2319 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2320 	    MCLBYTES, 1, MCLBYTES,
2321 	    BUS_DMA_ALLOCNOW | BUS_DMA_ALIGNED | BUS_DMA_WAITOK,
2322 	    &rxr->rx_mbuf_tag);
2323 	if (rc != 0) {
2324 		device_printf(rxr->sc->bce_dev,
2325 		    "Could not allocate RX mbuf DMA tag!\n");
2326 		return rc;
2327 	}
2328 
2329 	/* Create tmp DMA map for RX mbuf clusters. */
2330 	rc = bus_dmamap_create(rxr->rx_mbuf_tag, BUS_DMA_WAITOK,
2331 	    &rxr->rx_mbuf_tmpmap);
2332 	if (rc != 0) {
2333 		bus_dma_tag_destroy(rxr->rx_mbuf_tag);
2334 		rxr->rx_mbuf_tag = NULL;
2335 
2336 		device_printf(rxr->sc->bce_dev,
2337 		    "Could not create RX mbuf tmp DMA map!\n");
2338 		return rc;
2339 	}
2340 
2341 	/* Create DMA maps for the RX mbuf clusters. */
2342 	for (i = 0; i < TOTAL_RX_BD(rxr); i++) {
2343 		rc = bus_dmamap_create(rxr->rx_mbuf_tag, BUS_DMA_WAITOK,
2344 		    &rxr->rx_bufs[i].rx_mbuf_map);
2345 		if (rc != 0) {
2346 			int j;
2347 
2348 			for (j = 0; j < i; ++j) {
2349 				bus_dmamap_destroy(rxr->rx_mbuf_tag,
2350 				    rxr->rx_bufs[j].rx_mbuf_map);
2351 			}
2352 			bus_dma_tag_destroy(rxr->rx_mbuf_tag);
2353 			rxr->rx_mbuf_tag = NULL;
2354 
2355 			device_printf(rxr->sc->bce_dev, "Unable to create "
2356 			    "%dth RX mbuf DMA map!\n", i);
2357 			return rc;
2358 		}
2359 	}
2360 	return 0;
2361 }
2362 
2363 /****************************************************************************/
2364 /* Allocate any DMA memory needed by the driver.                            */
2365 /*                                                                          */
2366 /* Allocates DMA memory needed for the various global structures needed by  */
2367 /* hardware.                                                                */
2368 /*                                                                          */
2369 /* Memory alignment requirements:                                           */
2370 /* -----------------+----------+----------+----------+----------+           */
2371 /*  Data Structure  |   5706   |   5708   |   5709   |   5716   |           */
2372 /* -----------------+----------+----------+----------+----------+           */
2373 /* Status Block     | 8 bytes  | 8 bytes  | 16 bytes | 16 bytes |           */
2374 /* Statistics Block | 8 bytes  | 8 bytes  | 16 bytes | 16 bytes |           */
2375 /* RX Buffers       | 16 bytes | 16 bytes | 16 bytes | 16 bytes |           */
2376 /* PG Buffers       |   none   |   none   |   none   |   none   |           */
2377 /* TX Buffers       |   none   |   none   |   none   |   none   |           */
2378 /* Chain Pages(1)   |   4KiB   |   4KiB   |   4KiB   |   4KiB   |           */
2379 /* Context Pages(1) |   N/A    |   N/A    |   4KiB   |   4KiB   |           */
2380 /* -----------------+----------+----------+----------+----------+           */
2381 /*                                                                          */
2382 /* (1) Must align with CPU page size (BCM_PAGE_SZIE).                       */
2383 /*                                                                          */
2384 /* Returns:                                                                 */
2385 /*   0 for success, positive value for failure.                             */
2386 /****************************************************************************/
2387 static int
2388 bce_dma_alloc(struct bce_softc *sc)
2389 {
2390 	struct ifnet *ifp = &sc->arpcom.ac_if;
2391 	int i, rc = 0;
2392 	bus_addr_t busaddr, max_busaddr;
2393 	bus_size_t status_align, stats_align, status_size;
2394 
2395 	/*
2396 	 * The embedded PCIe to PCI-X bridge (EPB)
2397 	 * in the 5708 cannot address memory above
2398 	 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043).
2399 	 */
2400 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)
2401 		max_busaddr = BCE_BUS_SPACE_MAXADDR;
2402 	else
2403 		max_busaddr = BUS_SPACE_MAXADDR;
2404 
2405 	/*
2406 	 * BCM5709 and BCM5716 uses host memory as cache for context memory.
2407 	 */
2408 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
2409 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
2410 		sc->ctx_pages = BCE_CTX_BLK_SZ / BCM_PAGE_SIZE;
2411 		if (sc->ctx_pages == 0)
2412 			sc->ctx_pages = 1;
2413 		if (sc->ctx_pages > BCE_CTX_PAGES) {
2414 			device_printf(sc->bce_dev, "excessive ctx pages %d\n",
2415 			    sc->ctx_pages);
2416 			return ENOMEM;
2417 		}
2418 		status_align = 16;
2419 		stats_align = 16;
2420 	} else {
2421 		status_align = 8;
2422 		stats_align = 8;
2423 	}
2424 
2425 	/*
2426 	 * Each MSI-X vector needs a status block; each status block
2427 	 * consumes 128bytes and is 128bytes aligned.
2428 	 */
2429 	if (sc->rx_ring_cnt > 1) {
2430 		status_size = BCE_MSIX_MAX * BCE_STATUS_BLK_MSIX_ALIGN;
2431 		status_align = BCE_STATUS_BLK_MSIX_ALIGN;
2432 	} else {
2433 		status_size = BCE_STATUS_BLK_SZ;
2434 	}
2435 
2436 	/*
2437 	 * Allocate the parent bus DMA tag appropriate for PCI.
2438 	 */
2439 	rc = bus_dma_tag_create(NULL, 1, BCE_DMA_BOUNDARY,
2440 				max_busaddr, BUS_SPACE_MAXADDR,
2441 				NULL, NULL,
2442 				BUS_SPACE_MAXSIZE_32BIT, 0,
2443 				BUS_SPACE_MAXSIZE_32BIT,
2444 				0, &sc->parent_tag);
2445 	if (rc != 0) {
2446 		if_printf(ifp, "Could not allocate parent DMA tag!\n");
2447 		return rc;
2448 	}
2449 
2450 	/*
2451 	 * Allocate status block.
2452 	 */
2453 	sc->status_block = bus_dmamem_coherent_any(sc->parent_tag,
2454 				status_align, status_size,
2455 				BUS_DMA_WAITOK | BUS_DMA_ZERO,
2456 				&sc->status_tag, &sc->status_map,
2457 				&sc->status_block_paddr);
2458 	if (sc->status_block == NULL) {
2459 		if_printf(ifp, "Could not allocate status block!\n");
2460 		return ENOMEM;
2461 	}
2462 
2463 	/*
2464 	 * Allocate statistics block.
2465 	 */
2466 	sc->stats_block = bus_dmamem_coherent_any(sc->parent_tag,
2467 				stats_align, BCE_STATS_BLK_SZ,
2468 				BUS_DMA_WAITOK | BUS_DMA_ZERO,
2469 				&sc->stats_tag, &sc->stats_map,
2470 				&sc->stats_block_paddr);
2471 	if (sc->stats_block == NULL) {
2472 		if_printf(ifp, "Could not allocate statistics block!\n");
2473 		return ENOMEM;
2474 	}
2475 
2476 	/*
2477 	 * Allocate context block, if needed
2478 	 */
2479 	if (sc->ctx_pages != 0) {
2480 		rc = bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 0,
2481 					BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
2482 					NULL, NULL,
2483 					BCM_PAGE_SIZE, 1, BCM_PAGE_SIZE,
2484 					0, &sc->ctx_tag);
2485 		if (rc != 0) {
2486 			if_printf(ifp, "Could not allocate "
2487 				  "context block DMA tag!\n");
2488 			return rc;
2489 		}
2490 
2491 		for (i = 0; i < sc->ctx_pages; i++) {
2492 			rc = bus_dmamem_alloc(sc->ctx_tag,
2493 					      (void **)&sc->ctx_block[i],
2494 					      BUS_DMA_WAITOK | BUS_DMA_ZERO |
2495 					      BUS_DMA_COHERENT,
2496 					      &sc->ctx_map[i]);
2497 			if (rc != 0) {
2498 				if_printf(ifp, "Could not allocate %dth context "
2499 					  "DMA memory!\n", i);
2500 				return rc;
2501 			}
2502 
2503 			rc = bus_dmamap_load(sc->ctx_tag, sc->ctx_map[i],
2504 					     sc->ctx_block[i], BCM_PAGE_SIZE,
2505 					     bce_dma_map_addr, &busaddr,
2506 					     BUS_DMA_WAITOK);
2507 			if (rc != 0) {
2508 				if (rc == EINPROGRESS) {
2509 					panic("%s coherent memory loading "
2510 					      "is still in progress!", ifp->if_xname);
2511 				}
2512 				if_printf(ifp, "Could not map %dth context "
2513 					  "DMA memory!\n", i);
2514 				bus_dmamem_free(sc->ctx_tag, sc->ctx_block[i],
2515 						sc->ctx_map[i]);
2516 				sc->ctx_block[i] = NULL;
2517 				return rc;
2518 			}
2519 			sc->ctx_paddr[i] = busaddr;
2520 		}
2521 	}
2522 
2523 	sc->tx_rings = kmalloc_cachealign(
2524 	    sizeof(struct bce_tx_ring) * sc->tx_ring_cnt, M_DEVBUF,
2525 	    M_WAITOK | M_ZERO);
2526 	for (i = 0; i < sc->tx_ring_cnt; ++i) {
2527 		sc->tx_rings[i].sc = sc;
2528 		if (i == 0) {
2529 			sc->tx_rings[i].tx_cid = TX_CID;
2530 			sc->tx_rings[i].tx_hw_cons =
2531 			    &sc->status_block->status_tx_quick_consumer_index0;
2532 		} else {
2533 			struct status_block_msix *sblk =
2534 			    (struct status_block_msix *)
2535 			    (((uint8_t *)(sc->status_block)) +
2536 			     (i * BCE_STATUS_BLK_MSIX_ALIGN));
2537 
2538 			sc->tx_rings[i].tx_cid = TX_TSS_CID + i - 1;
2539 			sc->tx_rings[i].tx_hw_cons =
2540 			    &sblk->status_tx_quick_consumer_index;
2541 		}
2542 
2543 		rc = bce_create_tx_ring(&sc->tx_rings[i]);
2544 		if (rc != 0) {
2545 			device_printf(sc->bce_dev,
2546 			    "can't create %dth tx ring\n", i);
2547 			return rc;
2548 		}
2549 	}
2550 
2551 	sc->rx_rings = kmalloc_cachealign(
2552 	    sizeof(struct bce_rx_ring) * sc->rx_ring_cnt, M_DEVBUF,
2553 	    M_WAITOK | M_ZERO);
2554 	for (i = 0; i < sc->rx_ring_cnt; ++i) {
2555 		sc->rx_rings[i].sc = sc;
2556 		sc->rx_rings[i].idx = i;
2557 		if (i == 0) {
2558 			sc->rx_rings[i].rx_cid = RX_CID;
2559 			sc->rx_rings[i].rx_hw_cons =
2560 			    &sc->status_block->status_rx_quick_consumer_index0;
2561 			sc->rx_rings[i].hw_status_idx =
2562 			    &sc->status_block->status_idx;
2563 		} else {
2564 			struct status_block_msix *sblk =
2565 			    (struct status_block_msix *)
2566 			    (((uint8_t *)(sc->status_block)) +
2567 			     (i * BCE_STATUS_BLK_MSIX_ALIGN));
2568 
2569 			sc->rx_rings[i].rx_cid = RX_RSS_CID + i - 1;
2570 			sc->rx_rings[i].rx_hw_cons =
2571 			    &sblk->status_rx_quick_consumer_index;
2572 			sc->rx_rings[i].hw_status_idx = &sblk->status_idx;
2573 		}
2574 
2575 		rc = bce_create_rx_ring(&sc->rx_rings[i]);
2576 		if (rc != 0) {
2577 			device_printf(sc->bce_dev,
2578 			    "can't create %dth rx ring\n", i);
2579 			return rc;
2580 		}
2581 	}
2582 
2583 	return 0;
2584 }
2585 
2586 /****************************************************************************/
2587 /* Firmware synchronization.                                                */
2588 /*                                                                          */
2589 /* Before performing certain events such as a chip reset, synchronize with  */
2590 /* the firmware first.                                                      */
2591 /*                                                                          */
2592 /* Returns:                                                                 */
2593 /*   0 for success, positive value for failure.                             */
2594 /****************************************************************************/
2595 static int
2596 bce_fw_sync(struct bce_softc *sc, uint32_t msg_data)
2597 {
2598 	int i, rc = 0;
2599 	uint32_t val;
2600 
2601 	/* Don't waste any time if we've timed out before. */
2602 	if (sc->bce_fw_timed_out)
2603 		return EBUSY;
2604 
2605 	/* Increment the message sequence number. */
2606 	sc->bce_fw_wr_seq++;
2607 	msg_data |= sc->bce_fw_wr_seq;
2608 
2609 	/* Send the message to the bootcode driver mailbox. */
2610 	bce_shmem_wr(sc, BCE_DRV_MB, msg_data);
2611 
2612 	/* Wait for the bootcode to acknowledge the message. */
2613 	for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2614 		/* Check for a response in the bootcode firmware mailbox. */
2615 		val = bce_shmem_rd(sc, BCE_FW_MB);
2616 		if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ))
2617 			break;
2618 		DELAY(1000);
2619 	}
2620 
2621 	/* If we've timed out, tell the bootcode that we've stopped waiting. */
2622 	if ((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ) &&
2623 	    (msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0) {
2624 		if_printf(&sc->arpcom.ac_if,
2625 			  "Firmware synchronization timeout! "
2626 			  "msg_data = 0x%08X\n", msg_data);
2627 
2628 		msg_data &= ~BCE_DRV_MSG_CODE;
2629 		msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT;
2630 
2631 		bce_shmem_wr(sc, BCE_DRV_MB, msg_data);
2632 
2633 		sc->bce_fw_timed_out = 1;
2634 		rc = EBUSY;
2635 	}
2636 	return rc;
2637 }
2638 
2639 /****************************************************************************/
2640 /* Load Receive Virtual 2 Physical (RV2P) processor firmware.               */
2641 /*                                                                          */
2642 /* Returns:                                                                 */
2643 /*   Nothing.                                                               */
2644 /****************************************************************************/
2645 static void
2646 bce_load_rv2p_fw(struct bce_softc *sc, uint32_t *rv2p_code,
2647 		 uint32_t rv2p_code_len, uint32_t rv2p_proc)
2648 {
2649 	int i;
2650 	uint32_t val;
2651 
2652 	for (i = 0; i < rv2p_code_len; i += 8) {
2653 		REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code);
2654 		rv2p_code++;
2655 		REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code);
2656 		rv2p_code++;
2657 
2658 		if (rv2p_proc == RV2P_PROC1) {
2659 			val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR;
2660 			REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
2661 		} else {
2662 			val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR;
2663 			REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
2664 		}
2665 	}
2666 
2667 	/* Reset the processor, un-stall is done later. */
2668 	if (rv2p_proc == RV2P_PROC1)
2669 		REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET);
2670 	else
2671 		REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET);
2672 }
2673 
2674 /****************************************************************************/
2675 /* Load RISC processor firmware.                                            */
2676 /*                                                                          */
2677 /* Loads firmware from the file if_bcefw.h into the scratchpad memory       */
2678 /* associated with a particular processor.                                  */
2679 /*                                                                          */
2680 /* Returns:                                                                 */
2681 /*   Nothing.                                                               */
2682 /****************************************************************************/
2683 static void
2684 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg,
2685 		struct fw_info *fw)
2686 {
2687 	uint32_t offset;
2688 	int j;
2689 
2690 	bce_halt_cpu(sc, cpu_reg);
2691 
2692 	/* Load the Text area. */
2693 	offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2694 	if (fw->text) {
2695 		for (j = 0; j < (fw->text_len / 4); j++, offset += 4)
2696 			REG_WR_IND(sc, offset, fw->text[j]);
2697 	}
2698 
2699 	/* Load the Data area. */
2700 	offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2701 	if (fw->data) {
2702 		for (j = 0; j < (fw->data_len / 4); j++, offset += 4)
2703 			REG_WR_IND(sc, offset, fw->data[j]);
2704 	}
2705 
2706 	/* Load the SBSS area. */
2707 	offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2708 	if (fw->sbss) {
2709 		for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4)
2710 			REG_WR_IND(sc, offset, fw->sbss[j]);
2711 	}
2712 
2713 	/* Load the BSS area. */
2714 	offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2715 	if (fw->bss) {
2716 		for (j = 0; j < (fw->bss_len/4); j++, offset += 4)
2717 			REG_WR_IND(sc, offset, fw->bss[j]);
2718 	}
2719 
2720 	/* Load the Read-Only area. */
2721 	offset = cpu_reg->spad_base +
2722 		(fw->rodata_addr - cpu_reg->mips_view_base);
2723 	if (fw->rodata) {
2724 		for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4)
2725 			REG_WR_IND(sc, offset, fw->rodata[j]);
2726 	}
2727 
2728 	/* Clear the pre-fetch instruction and set the FW start address. */
2729 	REG_WR_IND(sc, cpu_reg->inst, 0);
2730 	REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2731 }
2732 
2733 /****************************************************************************/
2734 /* Starts the RISC processor.                                               */
2735 /*                                                                          */
2736 /* Assumes the CPU starting address has already been set.                   */
2737 /*                                                                          */
2738 /* Returns:                                                                 */
2739 /*   Nothing.                                                               */
2740 /****************************************************************************/
2741 static void
2742 bce_start_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg)
2743 {
2744 	uint32_t val;
2745 
2746 	/* Start the CPU. */
2747 	val = REG_RD_IND(sc, cpu_reg->mode);
2748 	val &= ~cpu_reg->mode_value_halt;
2749 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2750 	REG_WR_IND(sc, cpu_reg->mode, val);
2751 }
2752 
2753 /****************************************************************************/
2754 /* Halts the RISC processor.                                                */
2755 /*                                                                          */
2756 /* Returns:                                                                 */
2757 /*   Nothing.                                                               */
2758 /****************************************************************************/
2759 static void
2760 bce_halt_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg)
2761 {
2762 	uint32_t val;
2763 
2764 	/* Halt the CPU. */
2765 	val = REG_RD_IND(sc, cpu_reg->mode);
2766 	val |= cpu_reg->mode_value_halt;
2767 	REG_WR_IND(sc, cpu_reg->mode, val);
2768 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2769 }
2770 
2771 /****************************************************************************/
2772 /* Start the RX CPU.                                                        */
2773 /*                                                                          */
2774 /* Returns:                                                                 */
2775 /*   Nothing.                                                               */
2776 /****************************************************************************/
2777 static void
2778 bce_start_rxp_cpu(struct bce_softc *sc)
2779 {
2780 	struct cpu_reg cpu_reg;
2781 
2782 	cpu_reg.mode = BCE_RXP_CPU_MODE;
2783 	cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
2784 	cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
2785 	cpu_reg.state = BCE_RXP_CPU_STATE;
2786 	cpu_reg.state_value_clear = 0xffffff;
2787 	cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
2788 	cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
2789 	cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
2790 	cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
2791 	cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
2792 	cpu_reg.spad_base = BCE_RXP_SCRATCH;
2793 	cpu_reg.mips_view_base = 0x8000000;
2794 
2795 	bce_start_cpu(sc, &cpu_reg);
2796 }
2797 
2798 /****************************************************************************/
2799 /* Initialize the RX CPU.                                                   */
2800 /*                                                                          */
2801 /* Returns:                                                                 */
2802 /*   Nothing.                                                               */
2803 /****************************************************************************/
2804 static void
2805 bce_init_rxp_cpu(struct bce_softc *sc)
2806 {
2807 	struct cpu_reg cpu_reg;
2808 	struct fw_info fw;
2809 
2810 	cpu_reg.mode = BCE_RXP_CPU_MODE;
2811 	cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
2812 	cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
2813 	cpu_reg.state = BCE_RXP_CPU_STATE;
2814 	cpu_reg.state_value_clear = 0xffffff;
2815 	cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
2816 	cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
2817 	cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
2818 	cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
2819 	cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
2820 	cpu_reg.spad_base = BCE_RXP_SCRATCH;
2821 	cpu_reg.mips_view_base = 0x8000000;
2822 
2823 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
2824 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
2825  		fw.ver_major = bce_RXP_b09FwReleaseMajor;
2826 		fw.ver_minor = bce_RXP_b09FwReleaseMinor;
2827 		fw.ver_fix = bce_RXP_b09FwReleaseFix;
2828 		fw.start_addr = bce_RXP_b09FwStartAddr;
2829 
2830 		fw.text_addr = bce_RXP_b09FwTextAddr;
2831 		fw.text_len = bce_RXP_b09FwTextLen;
2832 		fw.text_index = 0;
2833 		fw.text = bce_RXP_b09FwText;
2834 
2835 		fw.data_addr = bce_RXP_b09FwDataAddr;
2836 		fw.data_len = bce_RXP_b09FwDataLen;
2837 		fw.data_index = 0;
2838 		fw.data = bce_RXP_b09FwData;
2839 
2840 		fw.sbss_addr = bce_RXP_b09FwSbssAddr;
2841 		fw.sbss_len = bce_RXP_b09FwSbssLen;
2842 		fw.sbss_index = 0;
2843 		fw.sbss = bce_RXP_b09FwSbss;
2844 
2845 		fw.bss_addr = bce_RXP_b09FwBssAddr;
2846 		fw.bss_len = bce_RXP_b09FwBssLen;
2847 		fw.bss_index = 0;
2848 		fw.bss = bce_RXP_b09FwBss;
2849 
2850 		fw.rodata_addr = bce_RXP_b09FwRodataAddr;
2851 		fw.rodata_len = bce_RXP_b09FwRodataLen;
2852 		fw.rodata_index = 0;
2853 		fw.rodata = bce_RXP_b09FwRodata;
2854 	} else {
2855 		fw.ver_major = bce_RXP_b06FwReleaseMajor;
2856 		fw.ver_minor = bce_RXP_b06FwReleaseMinor;
2857 		fw.ver_fix = bce_RXP_b06FwReleaseFix;
2858 		fw.start_addr = bce_RXP_b06FwStartAddr;
2859 
2860 		fw.text_addr = bce_RXP_b06FwTextAddr;
2861 		fw.text_len = bce_RXP_b06FwTextLen;
2862 		fw.text_index = 0;
2863 		fw.text = bce_RXP_b06FwText;
2864 
2865 		fw.data_addr = bce_RXP_b06FwDataAddr;
2866 		fw.data_len = bce_RXP_b06FwDataLen;
2867 		fw.data_index = 0;
2868 		fw.data = bce_RXP_b06FwData;
2869 
2870 		fw.sbss_addr = bce_RXP_b06FwSbssAddr;
2871 		fw.sbss_len = bce_RXP_b06FwSbssLen;
2872 		fw.sbss_index = 0;
2873 		fw.sbss = bce_RXP_b06FwSbss;
2874 
2875 		fw.bss_addr = bce_RXP_b06FwBssAddr;
2876 		fw.bss_len = bce_RXP_b06FwBssLen;
2877 		fw.bss_index = 0;
2878 		fw.bss = bce_RXP_b06FwBss;
2879 
2880 		fw.rodata_addr = bce_RXP_b06FwRodataAddr;
2881 		fw.rodata_len = bce_RXP_b06FwRodataLen;
2882 		fw.rodata_index = 0;
2883 		fw.rodata = bce_RXP_b06FwRodata;
2884 	}
2885 
2886 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
2887 	/* Delay RXP start until initialization is complete. */
2888 }
2889 
2890 /****************************************************************************/
2891 /* Initialize the TX CPU.                                                   */
2892 /*                                                                          */
2893 /* Returns:                                                                 */
2894 /*   Nothing.                                                               */
2895 /****************************************************************************/
2896 static void
2897 bce_init_txp_cpu(struct bce_softc *sc)
2898 {
2899 	struct cpu_reg cpu_reg;
2900 	struct fw_info fw;
2901 
2902 	cpu_reg.mode = BCE_TXP_CPU_MODE;
2903 	cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT;
2904 	cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA;
2905 	cpu_reg.state = BCE_TXP_CPU_STATE;
2906 	cpu_reg.state_value_clear = 0xffffff;
2907 	cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE;
2908 	cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK;
2909 	cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER;
2910 	cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION;
2911 	cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT;
2912 	cpu_reg.spad_base = BCE_TXP_SCRATCH;
2913 	cpu_reg.mips_view_base = 0x8000000;
2914 
2915 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
2916 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
2917 		fw.ver_major = bce_TXP_b09FwReleaseMajor;
2918 		fw.ver_minor = bce_TXP_b09FwReleaseMinor;
2919 		fw.ver_fix = bce_TXP_b09FwReleaseFix;
2920 		fw.start_addr = bce_TXP_b09FwStartAddr;
2921 
2922 		fw.text_addr = bce_TXP_b09FwTextAddr;
2923 		fw.text_len = bce_TXP_b09FwTextLen;
2924 		fw.text_index = 0;
2925 		fw.text = bce_TXP_b09FwText;
2926 
2927 		fw.data_addr = bce_TXP_b09FwDataAddr;
2928 		fw.data_len = bce_TXP_b09FwDataLen;
2929 		fw.data_index = 0;
2930 		fw.data = bce_TXP_b09FwData;
2931 
2932 		fw.sbss_addr = bce_TXP_b09FwSbssAddr;
2933 		fw.sbss_len = bce_TXP_b09FwSbssLen;
2934 		fw.sbss_index = 0;
2935 		fw.sbss = bce_TXP_b09FwSbss;
2936 
2937 		fw.bss_addr = bce_TXP_b09FwBssAddr;
2938 		fw.bss_len = bce_TXP_b09FwBssLen;
2939 		fw.bss_index = 0;
2940 		fw.bss = bce_TXP_b09FwBss;
2941 
2942 		fw.rodata_addr = bce_TXP_b09FwRodataAddr;
2943 		fw.rodata_len = bce_TXP_b09FwRodataLen;
2944 		fw.rodata_index = 0;
2945 		fw.rodata = bce_TXP_b09FwRodata;
2946 	} else {
2947 		fw.ver_major = bce_TXP_b06FwReleaseMajor;
2948 		fw.ver_minor = bce_TXP_b06FwReleaseMinor;
2949 		fw.ver_fix = bce_TXP_b06FwReleaseFix;
2950 		fw.start_addr = bce_TXP_b06FwStartAddr;
2951 
2952 		fw.text_addr = bce_TXP_b06FwTextAddr;
2953 		fw.text_len = bce_TXP_b06FwTextLen;
2954 		fw.text_index = 0;
2955 		fw.text = bce_TXP_b06FwText;
2956 
2957 		fw.data_addr = bce_TXP_b06FwDataAddr;
2958 		fw.data_len = bce_TXP_b06FwDataLen;
2959 		fw.data_index = 0;
2960 		fw.data = bce_TXP_b06FwData;
2961 
2962 		fw.sbss_addr = bce_TXP_b06FwSbssAddr;
2963 		fw.sbss_len = bce_TXP_b06FwSbssLen;
2964 		fw.sbss_index = 0;
2965 		fw.sbss = bce_TXP_b06FwSbss;
2966 
2967 		fw.bss_addr = bce_TXP_b06FwBssAddr;
2968 		fw.bss_len = bce_TXP_b06FwBssLen;
2969 		fw.bss_index = 0;
2970 		fw.bss = bce_TXP_b06FwBss;
2971 
2972 		fw.rodata_addr = bce_TXP_b06FwRodataAddr;
2973 		fw.rodata_len = bce_TXP_b06FwRodataLen;
2974 		fw.rodata_index = 0;
2975 		fw.rodata = bce_TXP_b06FwRodata;
2976 	}
2977 
2978 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
2979 	bce_start_cpu(sc, &cpu_reg);
2980 }
2981 
2982 /****************************************************************************/
2983 /* Initialize the TPAT CPU.                                                 */
2984 /*                                                                          */
2985 /* Returns:                                                                 */
2986 /*   Nothing.                                                               */
2987 /****************************************************************************/
2988 static void
2989 bce_init_tpat_cpu(struct bce_softc *sc)
2990 {
2991 	struct cpu_reg cpu_reg;
2992 	struct fw_info fw;
2993 
2994 	cpu_reg.mode = BCE_TPAT_CPU_MODE;
2995 	cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT;
2996 	cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA;
2997 	cpu_reg.state = BCE_TPAT_CPU_STATE;
2998 	cpu_reg.state_value_clear = 0xffffff;
2999 	cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE;
3000 	cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK;
3001 	cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER;
3002 	cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION;
3003 	cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT;
3004 	cpu_reg.spad_base = BCE_TPAT_SCRATCH;
3005 	cpu_reg.mips_view_base = 0x8000000;
3006 
3007 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3008 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3009 		fw.ver_major = bce_TPAT_b09FwReleaseMajor;
3010 		fw.ver_minor = bce_TPAT_b09FwReleaseMinor;
3011 		fw.ver_fix = bce_TPAT_b09FwReleaseFix;
3012 		fw.start_addr = bce_TPAT_b09FwStartAddr;
3013 
3014 		fw.text_addr = bce_TPAT_b09FwTextAddr;
3015 		fw.text_len = bce_TPAT_b09FwTextLen;
3016 		fw.text_index = 0;
3017 		fw.text = bce_TPAT_b09FwText;
3018 
3019 		fw.data_addr = bce_TPAT_b09FwDataAddr;
3020 		fw.data_len = bce_TPAT_b09FwDataLen;
3021 		fw.data_index = 0;
3022 		fw.data = bce_TPAT_b09FwData;
3023 
3024 		fw.sbss_addr = bce_TPAT_b09FwSbssAddr;
3025 		fw.sbss_len = bce_TPAT_b09FwSbssLen;
3026 		fw.sbss_index = 0;
3027 		fw.sbss = bce_TPAT_b09FwSbss;
3028 
3029 		fw.bss_addr = bce_TPAT_b09FwBssAddr;
3030 		fw.bss_len = bce_TPAT_b09FwBssLen;
3031 		fw.bss_index = 0;
3032 		fw.bss = bce_TPAT_b09FwBss;
3033 
3034 		fw.rodata_addr = bce_TPAT_b09FwRodataAddr;
3035 		fw.rodata_len = bce_TPAT_b09FwRodataLen;
3036 		fw.rodata_index = 0;
3037 		fw.rodata = bce_TPAT_b09FwRodata;
3038 	} else {
3039 		fw.ver_major = bce_TPAT_b06FwReleaseMajor;
3040 		fw.ver_minor = bce_TPAT_b06FwReleaseMinor;
3041 		fw.ver_fix = bce_TPAT_b06FwReleaseFix;
3042 		fw.start_addr = bce_TPAT_b06FwStartAddr;
3043 
3044 		fw.text_addr = bce_TPAT_b06FwTextAddr;
3045 		fw.text_len = bce_TPAT_b06FwTextLen;
3046 		fw.text_index = 0;
3047 		fw.text = bce_TPAT_b06FwText;
3048 
3049 		fw.data_addr = bce_TPAT_b06FwDataAddr;
3050 		fw.data_len = bce_TPAT_b06FwDataLen;
3051 		fw.data_index = 0;
3052 		fw.data = bce_TPAT_b06FwData;
3053 
3054 		fw.sbss_addr = bce_TPAT_b06FwSbssAddr;
3055 		fw.sbss_len = bce_TPAT_b06FwSbssLen;
3056 		fw.sbss_index = 0;
3057 		fw.sbss = bce_TPAT_b06FwSbss;
3058 
3059 		fw.bss_addr = bce_TPAT_b06FwBssAddr;
3060 		fw.bss_len = bce_TPAT_b06FwBssLen;
3061 		fw.bss_index = 0;
3062 		fw.bss = bce_TPAT_b06FwBss;
3063 
3064 		fw.rodata_addr = bce_TPAT_b06FwRodataAddr;
3065 		fw.rodata_len = bce_TPAT_b06FwRodataLen;
3066 		fw.rodata_index = 0;
3067 		fw.rodata = bce_TPAT_b06FwRodata;
3068 	}
3069 
3070 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
3071 	bce_start_cpu(sc, &cpu_reg);
3072 }
3073 
3074 /****************************************************************************/
3075 /* Initialize the CP CPU.                                                   */
3076 /*                                                                          */
3077 /* Returns:                                                                 */
3078 /*   Nothing.                                                               */
3079 /****************************************************************************/
3080 static void
3081 bce_init_cp_cpu(struct bce_softc *sc)
3082 {
3083 	struct cpu_reg cpu_reg;
3084 	struct fw_info fw;
3085 
3086 	cpu_reg.mode = BCE_CP_CPU_MODE;
3087 	cpu_reg.mode_value_halt = BCE_CP_CPU_MODE_SOFT_HALT;
3088 	cpu_reg.mode_value_sstep = BCE_CP_CPU_MODE_STEP_ENA;
3089 	cpu_reg.state = BCE_CP_CPU_STATE;
3090 	cpu_reg.state_value_clear = 0xffffff;
3091 	cpu_reg.gpr0 = BCE_CP_CPU_REG_FILE;
3092 	cpu_reg.evmask = BCE_CP_CPU_EVENT_MASK;
3093 	cpu_reg.pc = BCE_CP_CPU_PROGRAM_COUNTER;
3094 	cpu_reg.inst = BCE_CP_CPU_INSTRUCTION;
3095 	cpu_reg.bp = BCE_CP_CPU_HW_BREAKPOINT;
3096 	cpu_reg.spad_base = BCE_CP_SCRATCH;
3097 	cpu_reg.mips_view_base = 0x8000000;
3098 
3099 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3100 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3101 		fw.ver_major = bce_CP_b09FwReleaseMajor;
3102 		fw.ver_minor = bce_CP_b09FwReleaseMinor;
3103 		fw.ver_fix = bce_CP_b09FwReleaseFix;
3104 		fw.start_addr = bce_CP_b09FwStartAddr;
3105 
3106 		fw.text_addr = bce_CP_b09FwTextAddr;
3107 		fw.text_len = bce_CP_b09FwTextLen;
3108 		fw.text_index = 0;
3109 		fw.text = bce_CP_b09FwText;
3110 
3111 		fw.data_addr = bce_CP_b09FwDataAddr;
3112 		fw.data_len = bce_CP_b09FwDataLen;
3113 		fw.data_index = 0;
3114 		fw.data = bce_CP_b09FwData;
3115 
3116 		fw.sbss_addr = bce_CP_b09FwSbssAddr;
3117 		fw.sbss_len = bce_CP_b09FwSbssLen;
3118 		fw.sbss_index = 0;
3119 		fw.sbss = bce_CP_b09FwSbss;
3120 
3121 		fw.bss_addr = bce_CP_b09FwBssAddr;
3122 		fw.bss_len = bce_CP_b09FwBssLen;
3123 		fw.bss_index = 0;
3124 		fw.bss = bce_CP_b09FwBss;
3125 
3126 		fw.rodata_addr = bce_CP_b09FwRodataAddr;
3127 		fw.rodata_len = bce_CP_b09FwRodataLen;
3128 		fw.rodata_index = 0;
3129 		fw.rodata = bce_CP_b09FwRodata;
3130 	} else {
3131 		fw.ver_major = bce_CP_b06FwReleaseMajor;
3132 		fw.ver_minor = bce_CP_b06FwReleaseMinor;
3133 		fw.ver_fix = bce_CP_b06FwReleaseFix;
3134 		fw.start_addr = bce_CP_b06FwStartAddr;
3135 
3136 		fw.text_addr = bce_CP_b06FwTextAddr;
3137 		fw.text_len = bce_CP_b06FwTextLen;
3138 		fw.text_index = 0;
3139 		fw.text = bce_CP_b06FwText;
3140 
3141 		fw.data_addr = bce_CP_b06FwDataAddr;
3142 		fw.data_len = bce_CP_b06FwDataLen;
3143 		fw.data_index = 0;
3144 		fw.data = bce_CP_b06FwData;
3145 
3146 		fw.sbss_addr = bce_CP_b06FwSbssAddr;
3147 		fw.sbss_len = bce_CP_b06FwSbssLen;
3148 		fw.sbss_index = 0;
3149 		fw.sbss = bce_CP_b06FwSbss;
3150 
3151 		fw.bss_addr = bce_CP_b06FwBssAddr;
3152 		fw.bss_len = bce_CP_b06FwBssLen;
3153 		fw.bss_index = 0;
3154 		fw.bss = bce_CP_b06FwBss;
3155 
3156 		fw.rodata_addr = bce_CP_b06FwRodataAddr;
3157 		fw.rodata_len = bce_CP_b06FwRodataLen;
3158 		fw.rodata_index = 0;
3159 		fw.rodata = bce_CP_b06FwRodata;
3160 	}
3161 
3162 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
3163 	bce_start_cpu(sc, &cpu_reg);
3164 }
3165 
3166 /****************************************************************************/
3167 /* Initialize the COM CPU.                                                 */
3168 /*                                                                          */
3169 /* Returns:                                                                 */
3170 /*   Nothing.                                                               */
3171 /****************************************************************************/
3172 static void
3173 bce_init_com_cpu(struct bce_softc *sc)
3174 {
3175 	struct cpu_reg cpu_reg;
3176 	struct fw_info fw;
3177 
3178 	cpu_reg.mode = BCE_COM_CPU_MODE;
3179 	cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT;
3180 	cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA;
3181 	cpu_reg.state = BCE_COM_CPU_STATE;
3182 	cpu_reg.state_value_clear = 0xffffff;
3183 	cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE;
3184 	cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK;
3185 	cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER;
3186 	cpu_reg.inst = BCE_COM_CPU_INSTRUCTION;
3187 	cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT;
3188 	cpu_reg.spad_base = BCE_COM_SCRATCH;
3189 	cpu_reg.mips_view_base = 0x8000000;
3190 
3191 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3192 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3193 		fw.ver_major = bce_COM_b09FwReleaseMajor;
3194 		fw.ver_minor = bce_COM_b09FwReleaseMinor;
3195 		fw.ver_fix = bce_COM_b09FwReleaseFix;
3196 		fw.start_addr = bce_COM_b09FwStartAddr;
3197 
3198 		fw.text_addr = bce_COM_b09FwTextAddr;
3199 		fw.text_len = bce_COM_b09FwTextLen;
3200 		fw.text_index = 0;
3201 		fw.text = bce_COM_b09FwText;
3202 
3203 		fw.data_addr = bce_COM_b09FwDataAddr;
3204 		fw.data_len = bce_COM_b09FwDataLen;
3205 		fw.data_index = 0;
3206 		fw.data = bce_COM_b09FwData;
3207 
3208 		fw.sbss_addr = bce_COM_b09FwSbssAddr;
3209 		fw.sbss_len = bce_COM_b09FwSbssLen;
3210 		fw.sbss_index = 0;
3211 		fw.sbss = bce_COM_b09FwSbss;
3212 
3213 		fw.bss_addr = bce_COM_b09FwBssAddr;
3214 		fw.bss_len = bce_COM_b09FwBssLen;
3215 		fw.bss_index = 0;
3216 		fw.bss = bce_COM_b09FwBss;
3217 
3218 		fw.rodata_addr = bce_COM_b09FwRodataAddr;
3219 		fw.rodata_len = bce_COM_b09FwRodataLen;
3220 		fw.rodata_index = 0;
3221 		fw.rodata = bce_COM_b09FwRodata;
3222 	} else {
3223 		fw.ver_major = bce_COM_b06FwReleaseMajor;
3224 		fw.ver_minor = bce_COM_b06FwReleaseMinor;
3225 		fw.ver_fix = bce_COM_b06FwReleaseFix;
3226 		fw.start_addr = bce_COM_b06FwStartAddr;
3227 
3228 		fw.text_addr = bce_COM_b06FwTextAddr;
3229 		fw.text_len = bce_COM_b06FwTextLen;
3230 		fw.text_index = 0;
3231 		fw.text = bce_COM_b06FwText;
3232 
3233 		fw.data_addr = bce_COM_b06FwDataAddr;
3234 		fw.data_len = bce_COM_b06FwDataLen;
3235 		fw.data_index = 0;
3236 		fw.data = bce_COM_b06FwData;
3237 
3238 		fw.sbss_addr = bce_COM_b06FwSbssAddr;
3239 		fw.sbss_len = bce_COM_b06FwSbssLen;
3240 		fw.sbss_index = 0;
3241 		fw.sbss = bce_COM_b06FwSbss;
3242 
3243 		fw.bss_addr = bce_COM_b06FwBssAddr;
3244 		fw.bss_len = bce_COM_b06FwBssLen;
3245 		fw.bss_index = 0;
3246 		fw.bss = bce_COM_b06FwBss;
3247 
3248 		fw.rodata_addr = bce_COM_b06FwRodataAddr;
3249 		fw.rodata_len = bce_COM_b06FwRodataLen;
3250 		fw.rodata_index = 0;
3251 		fw.rodata = bce_COM_b06FwRodata;
3252 	}
3253 
3254 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
3255 	bce_start_cpu(sc, &cpu_reg);
3256 }
3257 
3258 /****************************************************************************/
3259 /* Initialize the RV2P, RX, TX, TPAT, COM, and CP CPUs.                     */
3260 /*                                                                          */
3261 /* Loads the firmware for each CPU and starts the CPU.                      */
3262 /*                                                                          */
3263 /* Returns:                                                                 */
3264 /*   Nothing.                                                               */
3265 /****************************************************************************/
3266 static void
3267 bce_init_cpus(struct bce_softc *sc)
3268 {
3269 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3270 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3271 		if (BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax) {
3272 			bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc1,
3273 			    sizeof(bce_xi90_rv2p_proc1), RV2P_PROC1);
3274 			bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc2,
3275 			    sizeof(bce_xi90_rv2p_proc2), RV2P_PROC2);
3276 		} else {
3277 			bce_load_rv2p_fw(sc, bce_xi_rv2p_proc1,
3278 			    sizeof(bce_xi_rv2p_proc1), RV2P_PROC1);
3279 			bce_load_rv2p_fw(sc, bce_xi_rv2p_proc2,
3280 			    sizeof(bce_xi_rv2p_proc2), RV2P_PROC2);
3281 		}
3282 	} else {
3283 		bce_load_rv2p_fw(sc, bce_rv2p_proc1,
3284 		    sizeof(bce_rv2p_proc1), RV2P_PROC1);
3285 		bce_load_rv2p_fw(sc, bce_rv2p_proc2,
3286 		    sizeof(bce_rv2p_proc2), RV2P_PROC2);
3287 	}
3288 
3289 	bce_init_rxp_cpu(sc);
3290 	bce_init_txp_cpu(sc);
3291 	bce_init_tpat_cpu(sc);
3292 	bce_init_com_cpu(sc);
3293 	bce_init_cp_cpu(sc);
3294 }
3295 
3296 /****************************************************************************/
3297 /* Initialize context memory.                                               */
3298 /*                                                                          */
3299 /* Clears the memory associated with each Context ID (CID).                 */
3300 /*                                                                          */
3301 /* Returns:                                                                 */
3302 /*   Nothing.                                                               */
3303 /****************************************************************************/
3304 static int
3305 bce_init_ctx(struct bce_softc *sc)
3306 {
3307 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3308 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3309 		/* DRC: Replace this constant value with a #define. */
3310 		int i, retry_cnt = 10;
3311 		uint32_t val;
3312 
3313 		/*
3314 		 * BCM5709 context memory may be cached
3315 		 * in host memory so prepare the host memory
3316 		 * for access.
3317 		 */
3318 		val = BCE_CTX_COMMAND_ENABLED | BCE_CTX_COMMAND_MEM_INIT |
3319 		    (1 << 12);
3320 		val |= (BCM_PAGE_BITS - 8) << 16;
3321 		REG_WR(sc, BCE_CTX_COMMAND, val);
3322 
3323 		/* Wait for mem init command to complete. */
3324 		for (i = 0; i < retry_cnt; i++) {
3325 			val = REG_RD(sc, BCE_CTX_COMMAND);
3326 			if (!(val & BCE_CTX_COMMAND_MEM_INIT))
3327 				break;
3328 			DELAY(2);
3329 		}
3330 		if (i == retry_cnt) {
3331 			device_printf(sc->bce_dev,
3332 			    "Context memory initialization failed!\n");
3333 			return ETIMEDOUT;
3334 		}
3335 
3336 		for (i = 0; i < sc->ctx_pages; i++) {
3337 			int j;
3338 
3339 			/*
3340 			 * Set the physical address of the context
3341 			 * memory cache.
3342 			 */
3343 			REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA0,
3344 			    BCE_ADDR_LO(sc->ctx_paddr[i] & 0xfffffff0) |
3345 			    BCE_CTX_HOST_PAGE_TBL_DATA0_VALID);
3346 			REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA1,
3347 			    BCE_ADDR_HI(sc->ctx_paddr[i]));
3348 			REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_CTRL,
3349 			    i | BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
3350 
3351 			/*
3352 			 * Verify that the context memory write was successful.
3353 			 */
3354 			for (j = 0; j < retry_cnt; j++) {
3355 				val = REG_RD(sc, BCE_CTX_HOST_PAGE_TBL_CTRL);
3356 				if ((val &
3357 				    BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0)
3358 					break;
3359 				DELAY(5);
3360 			}
3361 			if (j == retry_cnt) {
3362 				device_printf(sc->bce_dev,
3363 				    "Failed to initialize context page!\n");
3364 				return ETIMEDOUT;
3365 			}
3366 		}
3367 	} else {
3368 		uint32_t vcid_addr, offset;
3369 
3370 		/*
3371 		 * For the 5706/5708, context memory is local to
3372 		 * the controller, so initialize the controller
3373 		 * context memory.
3374 		 */
3375 
3376 		vcid_addr = GET_CID_ADDR(96);
3377 		while (vcid_addr) {
3378 			vcid_addr -= PHY_CTX_SIZE;
3379 
3380 			REG_WR(sc, BCE_CTX_VIRT_ADDR, 0);
3381 			REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr);
3382 
3383 			for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
3384 				CTX_WR(sc, 0x00, offset, 0);
3385 
3386 			REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr);
3387 			REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr);
3388 		}
3389 	}
3390 	return 0;
3391 }
3392 
3393 /****************************************************************************/
3394 /* Fetch the permanent MAC address of the controller.                       */
3395 /*                                                                          */
3396 /* Returns:                                                                 */
3397 /*   Nothing.                                                               */
3398 /****************************************************************************/
3399 static void
3400 bce_get_mac_addr(struct bce_softc *sc)
3401 {
3402 	uint32_t mac_lo = 0, mac_hi = 0;
3403 
3404 	/*
3405 	 * The NetXtreme II bootcode populates various NIC
3406 	 * power-on and runtime configuration items in a
3407 	 * shared memory area.  The factory configured MAC
3408 	 * address is available from both NVRAM and the
3409 	 * shared memory area so we'll read the value from
3410 	 * shared memory for speed.
3411 	 */
3412 
3413 	mac_hi = bce_shmem_rd(sc,  BCE_PORT_HW_CFG_MAC_UPPER);
3414 	mac_lo = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_LOWER);
3415 
3416 	if (mac_lo == 0 && mac_hi == 0) {
3417 		if_printf(&sc->arpcom.ac_if, "Invalid Ethernet address!\n");
3418 	} else {
3419 		sc->eaddr[0] = (u_char)(mac_hi >> 8);
3420 		sc->eaddr[1] = (u_char)(mac_hi >> 0);
3421 		sc->eaddr[2] = (u_char)(mac_lo >> 24);
3422 		sc->eaddr[3] = (u_char)(mac_lo >> 16);
3423 		sc->eaddr[4] = (u_char)(mac_lo >> 8);
3424 		sc->eaddr[5] = (u_char)(mac_lo >> 0);
3425 	}
3426 }
3427 
3428 /****************************************************************************/
3429 /* Program the MAC address.                                                 */
3430 /*                                                                          */
3431 /* Returns:                                                                 */
3432 /*   Nothing.                                                               */
3433 /****************************************************************************/
3434 static void
3435 bce_set_mac_addr(struct bce_softc *sc)
3436 {
3437 	const uint8_t *mac_addr = sc->eaddr;
3438 	uint32_t val;
3439 
3440 	val = (mac_addr[0] << 8) | mac_addr[1];
3441 	REG_WR(sc, BCE_EMAC_MAC_MATCH0, val);
3442 
3443 	val = (mac_addr[2] << 24) |
3444 	      (mac_addr[3] << 16) |
3445 	      (mac_addr[4] << 8) |
3446 	      mac_addr[5];
3447 	REG_WR(sc, BCE_EMAC_MAC_MATCH1, val);
3448 }
3449 
3450 /****************************************************************************/
3451 /* Stop the controller.                                                     */
3452 /*                                                                          */
3453 /* Returns:                                                                 */
3454 /*   Nothing.                                                               */
3455 /****************************************************************************/
3456 static void
3457 bce_stop(struct bce_softc *sc)
3458 {
3459 	struct ifnet *ifp = &sc->arpcom.ac_if;
3460 	int i;
3461 
3462 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
3463 
3464 	callout_stop(&sc->bce_tick_callout);
3465 
3466 	/* Disable the transmit/receive blocks. */
3467 	REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, BCE_MISC_ENABLE_CLR_DEFAULT);
3468 	REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3469 	DELAY(20);
3470 
3471 	bce_disable_intr(sc);
3472 
3473 	ifp->if_flags &= ~IFF_RUNNING;
3474 	for (i = 0; i < sc->tx_ring_cnt; ++i) {
3475 		ifsq_clr_oactive(sc->tx_rings[i].ifsq);
3476 		ifsq_watchdog_stop(&sc->tx_rings[i].tx_watchdog);
3477 	}
3478 
3479 	/* Free the RX lists. */
3480 	for (i = 0; i < sc->rx_ring_cnt; ++i)
3481 		bce_free_rx_chain(&sc->rx_rings[i]);
3482 
3483 	/* Free TX buffers. */
3484 	for (i = 0; i < sc->tx_ring_cnt; ++i)
3485 		bce_free_tx_chain(&sc->tx_rings[i]);
3486 
3487 	sc->bce_link = 0;
3488 	sc->bce_coalchg_mask = 0;
3489 }
3490 
3491 static int
3492 bce_reset(struct bce_softc *sc, uint32_t reset_code)
3493 {
3494 	uint32_t val;
3495 	int i, rc = 0;
3496 
3497 	/* Wait for pending PCI transactions to complete. */
3498 	REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS,
3499 	       BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3500 	       BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3501 	       BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3502 	       BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3503 	val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3504 	DELAY(5);
3505 
3506 	/* Disable DMA */
3507 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3508 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3509 		val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL);
3510 		val &= ~BCE_MISC_NEW_CORE_CTL_DMA_ENABLE;
3511 		REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val);
3512 	}
3513 
3514 	/* Assume bootcode is running. */
3515 	sc->bce_fw_timed_out = 0;
3516 	sc->bce_drv_cardiac_arrest = 0;
3517 
3518 	/* Give the firmware a chance to prepare for the reset. */
3519 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code);
3520 	if (rc) {
3521 		if_printf(&sc->arpcom.ac_if,
3522 			  "Firmware is not ready for reset\n");
3523 		return rc;
3524 	}
3525 
3526 	/* Set a firmware reminder that this is a soft reset. */
3527 	bce_shmem_wr(sc, BCE_DRV_RESET_SIGNATURE,
3528 	    BCE_DRV_RESET_SIGNATURE_MAGIC);
3529 
3530 	/* Dummy read to force the chip to complete all current transactions. */
3531 	val = REG_RD(sc, BCE_MISC_ID);
3532 
3533 	/* Chip reset. */
3534 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3535 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3536 		REG_WR(sc, BCE_MISC_COMMAND, BCE_MISC_COMMAND_SW_RESET);
3537 		REG_RD(sc, BCE_MISC_COMMAND);
3538 		DELAY(5);
3539 
3540 		val = BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3541 		    BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3542 
3543 		pci_write_config(sc->bce_dev, BCE_PCICFG_MISC_CONFIG, val, 4);
3544 	} else {
3545 		val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3546 		    BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3547 		    BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3548 		REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val);
3549 
3550 		/* Allow up to 30us for reset to complete. */
3551 		for (i = 0; i < 10; i++) {
3552 			val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG);
3553 			if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3554 			    BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3555 				break;
3556 			DELAY(10);
3557 		}
3558 
3559 		/* Check that reset completed successfully. */
3560 		if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3561 		    BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3562 			if_printf(&sc->arpcom.ac_if, "Reset failed!\n");
3563 			return EBUSY;
3564 		}
3565 	}
3566 
3567 	/* Make sure byte swapping is properly configured. */
3568 	val = REG_RD(sc, BCE_PCI_SWAP_DIAG0);
3569 	if (val != 0x01020304) {
3570 		if_printf(&sc->arpcom.ac_if, "Byte swap is incorrect!\n");
3571 		return ENODEV;
3572 	}
3573 
3574 	/* Just completed a reset, assume that firmware is running again. */
3575 	sc->bce_fw_timed_out = 0;
3576 	sc->bce_drv_cardiac_arrest = 0;
3577 
3578 	/* Wait for the firmware to finish its initialization. */
3579 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code);
3580 	if (rc) {
3581 		if_printf(&sc->arpcom.ac_if,
3582 			  "Firmware did not complete initialization!\n");
3583 	}
3584 
3585 	if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX) {
3586 		bce_setup_msix_table(sc);
3587 		/* Prevent MSIX table reads and write from timing out */
3588 		REG_WR(sc, BCE_MISC_ECO_HW_CTL,
3589 		    BCE_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
3590 
3591 	}
3592 	return rc;
3593 }
3594 
3595 static int
3596 bce_chipinit(struct bce_softc *sc)
3597 {
3598 	uint32_t val;
3599 	int rc = 0;
3600 
3601 	/* Make sure the interrupt is not active. */
3602 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
3603 	REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
3604 
3605 	/*
3606 	 * Initialize DMA byte/word swapping, configure the number of DMA
3607 	 * channels and PCI clock compensation delay.
3608 	 */
3609 	val = BCE_DMA_CONFIG_DATA_BYTE_SWAP |
3610 	      BCE_DMA_CONFIG_DATA_WORD_SWAP |
3611 #if BYTE_ORDER == BIG_ENDIAN
3612 	      BCE_DMA_CONFIG_CNTL_BYTE_SWAP |
3613 #endif
3614 	      BCE_DMA_CONFIG_CNTL_WORD_SWAP |
3615 	      DMA_READ_CHANS << 12 |
3616 	      DMA_WRITE_CHANS << 16;
3617 
3618 	val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3619 
3620 	if ((sc->bce_flags & BCE_PCIX_FLAG) && sc->bus_speed_mhz == 133)
3621 		val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP;
3622 
3623 	/*
3624 	 * This setting resolves a problem observed on certain Intel PCI
3625 	 * chipsets that cannot handle multiple outstanding DMA operations.
3626 	 * See errata E9_5706A1_65.
3627 	 */
3628 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706 &&
3629 	    BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0 &&
3630 	    !(sc->bce_flags & BCE_PCIX_FLAG))
3631 		val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA;
3632 
3633 	REG_WR(sc, BCE_DMA_CONFIG, val);
3634 
3635 	/* Enable the RX_V2P and Context state machines before access. */
3636 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3637 	       BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3638 	       BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3639 	       BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3640 
3641 	/* Initialize context mapping and zero out the quick contexts. */
3642 	rc = bce_init_ctx(sc);
3643 	if (rc != 0)
3644 		return rc;
3645 
3646 	/* Initialize the on-boards CPUs */
3647 	bce_init_cpus(sc);
3648 
3649 	/* Enable management frames (NC-SI) to flow to the MCP. */
3650 	if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
3651 		val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) |
3652 		    BCE_RPM_MGMT_PKT_CTRL_MGMT_EN;
3653 		REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val);
3654 	}
3655 
3656 	/* Prepare NVRAM for access. */
3657 	rc = bce_init_nvram(sc);
3658 	if (rc != 0)
3659 		return rc;
3660 
3661 	/* Set the kernel bypass block size */
3662 	val = REG_RD(sc, BCE_MQ_CONFIG);
3663 	val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3664 	val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3665 
3666 	/* Enable bins used on the 5709/5716. */
3667 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3668 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3669 		val |= BCE_MQ_CONFIG_BIN_MQ_MODE;
3670 		if (BCE_CHIP_ID(sc) == BCE_CHIP_ID_5709_A1)
3671 			val |= BCE_MQ_CONFIG_HALT_DIS;
3672 	}
3673 
3674 	REG_WR(sc, BCE_MQ_CONFIG, val);
3675 
3676 	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3677 	REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val);
3678 	REG_WR(sc, BCE_MQ_KNL_WIND_END, val);
3679 
3680 	/* Set the page size and clear the RV2P processor stall bits. */
3681 	val = (BCM_PAGE_BITS - 8) << 24;
3682 	REG_WR(sc, BCE_RV2P_CONFIG, val);
3683 
3684 	/* Configure page size. */
3685 	val = REG_RD(sc, BCE_TBDR_CONFIG);
3686 	val &= ~BCE_TBDR_CONFIG_PAGE_SIZE;
3687 	val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3688 	REG_WR(sc, BCE_TBDR_CONFIG, val);
3689 
3690 	/* Set the perfect match control register to default. */
3691 	REG_WR_IND(sc, BCE_RXP_PM_CTRL, 0);
3692 
3693 	return 0;
3694 }
3695 
3696 /****************************************************************************/
3697 /* Initialize the controller in preparation to send/receive traffic.        */
3698 /*                                                                          */
3699 /* Returns:                                                                 */
3700 /*   0 for success, positive value for failure.                             */
3701 /****************************************************************************/
3702 static int
3703 bce_blockinit(struct bce_softc *sc)
3704 {
3705 	uint32_t reg, val;
3706 	int i;
3707 
3708 	/* Load the hardware default MAC address. */
3709 	bce_set_mac_addr(sc);
3710 
3711 	/* Set the Ethernet backoff seed value */
3712 	val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) +
3713 	      sc->eaddr[3] + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16);
3714 	REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val);
3715 
3716 	sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE;
3717 
3718 	/* Set up link change interrupt generation. */
3719 	REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK);
3720 
3721 	/* Program the physical address of the status block. */
3722 	REG_WR(sc, BCE_HC_STATUS_ADDR_L, BCE_ADDR_LO(sc->status_block_paddr));
3723 	REG_WR(sc, BCE_HC_STATUS_ADDR_H, BCE_ADDR_HI(sc->status_block_paddr));
3724 
3725 	/* Program the physical address of the statistics block. */
3726 	REG_WR(sc, BCE_HC_STATISTICS_ADDR_L,
3727 	       BCE_ADDR_LO(sc->stats_block_paddr));
3728 	REG_WR(sc, BCE_HC_STATISTICS_ADDR_H,
3729 	       BCE_ADDR_HI(sc->stats_block_paddr));
3730 
3731 	/* Program various host coalescing parameters. */
3732 	REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
3733 	       (sc->bce_tx_quick_cons_trip_int << 16) |
3734 	       sc->bce_tx_quick_cons_trip);
3735 	REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
3736 	       (sc->bce_rx_quick_cons_trip_int << 16) |
3737 	       sc->bce_rx_quick_cons_trip);
3738 	REG_WR(sc, BCE_HC_COMP_PROD_TRIP,
3739 	       (sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip);
3740 	REG_WR(sc, BCE_HC_TX_TICKS,
3741 	       (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
3742 	REG_WR(sc, BCE_HC_RX_TICKS,
3743 	       (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
3744 	REG_WR(sc, BCE_HC_COM_TICKS,
3745 	       (sc->bce_com_ticks_int << 16) | sc->bce_com_ticks);
3746 	REG_WR(sc, BCE_HC_CMD_TICKS,
3747 	       (sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks);
3748 	REG_WR(sc, BCE_HC_STATS_TICKS, (sc->bce_stats_ticks & 0xffff00));
3749 	REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8);	/* 3ms */
3750 
3751 	if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX)
3752 		REG_WR(sc, BCE_HC_MSIX_BIT_VECTOR, BCE_HC_MSIX_BIT_VECTOR_VAL);
3753 
3754 	val = BCE_HC_CONFIG_TX_TMR_MODE | BCE_HC_CONFIG_COLLECT_STATS;
3755 	if ((sc->bce_flags & BCE_ONESHOT_MSI_FLAG) ||
3756 	    sc->bce_irq_type == PCI_INTR_TYPE_MSIX) {
3757 		if (bootverbose) {
3758 			if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX) {
3759 				if_printf(&sc->arpcom.ac_if,
3760 				    "using MSI-X\n");
3761 			} else {
3762 				if_printf(&sc->arpcom.ac_if,
3763 				    "using oneshot MSI\n");
3764 			}
3765 		}
3766 		val |= BCE_HC_CONFIG_ONE_SHOT | BCE_HC_CONFIG_USE_INT_PARAM;
3767 		if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX)
3768 			val |= BCE_HC_CONFIG_SB_ADDR_INC_128B;
3769 	}
3770 	REG_WR(sc, BCE_HC_CONFIG, val);
3771 
3772 	for (i = 1; i < sc->rx_ring_cnt; ++i) {
3773 		uint32_t base;
3774 
3775 		base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) + BCE_HC_SB_CONFIG_1;
3776 		KKASSERT(base <= BCE_HC_SB_CONFIG_8);
3777 
3778 		REG_WR(sc, base,
3779 		    BCE_HC_SB_CONFIG_1_TX_TMR_MODE |
3780 		    /* BCE_HC_SB_CONFIG_1_RX_TMR_MODE | */
3781 		    BCE_HC_SB_CONFIG_1_ONE_SHOT);
3782 
3783 		REG_WR(sc, base + BCE_HC_TX_QUICK_CONS_TRIP_OFF,
3784 		    (sc->bce_tx_quick_cons_trip_int << 16) |
3785 		    sc->bce_tx_quick_cons_trip);
3786 		REG_WR(sc, base + BCE_HC_RX_QUICK_CONS_TRIP_OFF,
3787 		    (sc->bce_rx_quick_cons_trip_int << 16) |
3788 		    sc->bce_rx_quick_cons_trip);
3789 		REG_WR(sc, base + BCE_HC_TX_TICKS_OFF,
3790 		    (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
3791 		REG_WR(sc, base + BCE_HC_RX_TICKS_OFF,
3792 		    (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
3793 	}
3794 
3795 	/* Clear the internal statistics counters. */
3796 	REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
3797 
3798 	/* Verify that bootcode is running. */
3799 	reg = bce_shmem_rd(sc, BCE_DEV_INFO_SIGNATURE);
3800 
3801 	if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3802 	    BCE_DEV_INFO_SIGNATURE_MAGIC) {
3803 		if_printf(&sc->arpcom.ac_if,
3804 			  "Bootcode not running! Found: 0x%08X, "
3805 			  "Expected: 08%08X\n",
3806 			  reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK,
3807 			  BCE_DEV_INFO_SIGNATURE_MAGIC);
3808 		return ENODEV;
3809 	}
3810 
3811 	/* Enable DMA */
3812 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3813 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3814 		val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL);
3815 		val |= BCE_MISC_NEW_CORE_CTL_DMA_ENABLE;
3816 		REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val);
3817 	}
3818 
3819 	/* Allow bootcode to apply any additional fixes before enabling MAC. */
3820 	bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET);
3821 
3822 	/* Enable link state change interrupt generation. */
3823 	REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3824 
3825 	/* Enable the RXP. */
3826 	bce_start_rxp_cpu(sc);
3827 
3828 	/* Disable management frames (NC-SI) from flowing to the MCP. */
3829 	if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
3830 		val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) &
3831 		    ~BCE_RPM_MGMT_PKT_CTRL_MGMT_EN;
3832 		REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val);
3833 	}
3834 
3835 	/* Enable all remaining blocks in the MAC. */
3836 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3837 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3838 		REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3839 		    BCE_MISC_ENABLE_DEFAULT_XI);
3840 	} else {
3841 		REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT);
3842 	}
3843 	REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
3844 	DELAY(20);
3845 
3846 	/* Save the current host coalescing block settings. */
3847 	sc->hc_command = REG_RD(sc, BCE_HC_COMMAND);
3848 
3849 	return 0;
3850 }
3851 
3852 /****************************************************************************/
3853 /* Encapsulate an mbuf cluster into the rx_bd chain.                        */
3854 /*                                                                          */
3855 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's.     */
3856 /* This routine will map an mbuf cluster into 1 or more rx_bd's as          */
3857 /* necessary.                                                               */
3858 /*                                                                          */
3859 /* Returns:                                                                 */
3860 /*   0 for success, positive value for failure.                             */
3861 /****************************************************************************/
3862 static int
3863 bce_newbuf_std(struct bce_rx_ring *rxr, uint16_t *prod, uint16_t chain_prod,
3864     uint32_t *prod_bseq, int init)
3865 {
3866 	struct bce_rx_buf *rx_buf;
3867 	bus_dmamap_t map;
3868 	bus_dma_segment_t seg;
3869 	struct mbuf *m_new;
3870 	int error, nseg;
3871 
3872 	/* This is a new mbuf allocation. */
3873 	m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
3874 	if (m_new == NULL)
3875 		return ENOBUFS;
3876 
3877 	m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
3878 
3879 	/* Map the mbuf cluster into device memory. */
3880 	error = bus_dmamap_load_mbuf_segment(rxr->rx_mbuf_tag,
3881 	    rxr->rx_mbuf_tmpmap, m_new, &seg, 1, &nseg, BUS_DMA_NOWAIT);
3882 	if (error) {
3883 		m_freem(m_new);
3884 		if (init) {
3885 			if_printf(&rxr->sc->arpcom.ac_if,
3886 			    "Error mapping mbuf into RX chain!\n");
3887 		}
3888 		return error;
3889 	}
3890 
3891 	rx_buf = &rxr->rx_bufs[chain_prod];
3892 	if (rx_buf->rx_mbuf_ptr != NULL)
3893 		bus_dmamap_unload(rxr->rx_mbuf_tag, rx_buf->rx_mbuf_map);
3894 
3895 	map = rx_buf->rx_mbuf_map;
3896 	rx_buf->rx_mbuf_map = rxr->rx_mbuf_tmpmap;
3897 	rxr->rx_mbuf_tmpmap = map;
3898 
3899 	/* Save the mbuf and update our counter. */
3900 	rx_buf->rx_mbuf_ptr = m_new;
3901 	rx_buf->rx_mbuf_paddr = seg.ds_addr;
3902 	rxr->free_rx_bd--;
3903 
3904 	bce_setup_rxdesc_std(rxr, chain_prod, prod_bseq);
3905 
3906 	return 0;
3907 }
3908 
3909 static void
3910 bce_setup_rxdesc_std(struct bce_rx_ring *rxr, uint16_t chain_prod,
3911     uint32_t *prod_bseq)
3912 {
3913 	const struct bce_rx_buf *rx_buf;
3914 	struct rx_bd *rxbd;
3915 	bus_addr_t paddr;
3916 	int len;
3917 
3918 	rx_buf = &rxr->rx_bufs[chain_prod];
3919 	paddr = rx_buf->rx_mbuf_paddr;
3920 	len = rx_buf->rx_mbuf_ptr->m_len;
3921 
3922 	/* Setup the rx_bd for the first segment. */
3923 	rxbd = &rxr->rx_bd_chain[RX_PAGE(chain_prod)][RX_IDX(chain_prod)];
3924 
3925 	rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(paddr));
3926 	rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(paddr));
3927 	rxbd->rx_bd_len = htole32(len);
3928 	rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START);
3929 	*prod_bseq += len;
3930 
3931 	rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END);
3932 }
3933 
3934 /****************************************************************************/
3935 /* Initialize the TX context memory.                                        */
3936 /*                                                                          */
3937 /* Returns:                                                                 */
3938 /*   Nothing                                                                */
3939 /****************************************************************************/
3940 static void
3941 bce_init_tx_context(struct bce_tx_ring *txr)
3942 {
3943 	uint32_t val;
3944 
3945 	/* Initialize the context ID for an L2 TX chain. */
3946 	if (BCE_CHIP_NUM(txr->sc) == BCE_CHIP_NUM_5709 ||
3947 	    BCE_CHIP_NUM(txr->sc) == BCE_CHIP_NUM_5716) {
3948 		/* Set the CID type to support an L2 connection. */
3949 		val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2;
3950 		CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3951 		    BCE_L2CTX_TX_TYPE_XI, val);
3952 		val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16);
3953 		CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3954 		    BCE_L2CTX_TX_CMD_TYPE_XI, val);
3955 
3956 		/* Point the hardware to the first page in the chain. */
3957 		val = BCE_ADDR_HI(txr->tx_bd_chain_paddr[0]);
3958 		CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3959 		    BCE_L2CTX_TX_TBDR_BHADDR_HI_XI, val);
3960 		val = BCE_ADDR_LO(txr->tx_bd_chain_paddr[0]);
3961 		CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3962 		    BCE_L2CTX_TX_TBDR_BHADDR_LO_XI, val);
3963 	} else {
3964 		/* Set the CID type to support an L2 connection. */
3965 		val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2;
3966 		CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3967 		    BCE_L2CTX_TX_TYPE, val);
3968 		val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16);
3969 		CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3970 		    BCE_L2CTX_TX_CMD_TYPE, val);
3971 
3972 		/* Point the hardware to the first page in the chain. */
3973 		val = BCE_ADDR_HI(txr->tx_bd_chain_paddr[0]);
3974 		CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3975 		    BCE_L2CTX_TX_TBDR_BHADDR_HI, val);
3976 		val = BCE_ADDR_LO(txr->tx_bd_chain_paddr[0]);
3977 		CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3978 		    BCE_L2CTX_TX_TBDR_BHADDR_LO, val);
3979 	}
3980 }
3981 
3982 /****************************************************************************/
3983 /* Allocate memory and initialize the TX data structures.                   */
3984 /*                                                                          */
3985 /* Returns:                                                                 */
3986 /*   0 for success, positive value for failure.                             */
3987 /****************************************************************************/
3988 static int
3989 bce_init_tx_chain(struct bce_tx_ring *txr)
3990 {
3991 	struct tx_bd *txbd;
3992 	int i, rc = 0;
3993 
3994 	/* Set the initial TX producer/consumer indices. */
3995 	txr->tx_prod = 0;
3996 	txr->tx_cons = 0;
3997 	txr->tx_prod_bseq = 0;
3998 	txr->used_tx_bd = 0;
3999 	txr->max_tx_bd = USABLE_TX_BD(txr);
4000 
4001 	/*
4002 	 * The NetXtreme II supports a linked-list structre called
4003 	 * a Buffer Descriptor Chain (or BD chain).  A BD chain
4004 	 * consists of a series of 1 or more chain pages, each of which
4005 	 * consists of a fixed number of BD entries.
4006 	 * The last BD entry on each page is a pointer to the next page
4007 	 * in the chain, and the last pointer in the BD chain
4008 	 * points back to the beginning of the chain.
4009 	 */
4010 
4011 	/* Set the TX next pointer chain entries. */
4012 	for (i = 0; i < txr->tx_pages; i++) {
4013 		int j;
4014 
4015 		txbd = &txr->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
4016 
4017 		/* Check if we've reached the last page. */
4018 		if (i == (txr->tx_pages - 1))
4019 			j = 0;
4020 		else
4021 			j = i + 1;
4022 
4023 		txbd->tx_bd_haddr_hi =
4024 		    htole32(BCE_ADDR_HI(txr->tx_bd_chain_paddr[j]));
4025 		txbd->tx_bd_haddr_lo =
4026 		    htole32(BCE_ADDR_LO(txr->tx_bd_chain_paddr[j]));
4027 	}
4028 	bce_init_tx_context(txr);
4029 
4030 	return(rc);
4031 }
4032 
4033 /****************************************************************************/
4034 /* Free memory and clear the TX data structures.                            */
4035 /*                                                                          */
4036 /* Returns:                                                                 */
4037 /*   Nothing.                                                               */
4038 /****************************************************************************/
4039 static void
4040 bce_free_tx_chain(struct bce_tx_ring *txr)
4041 {
4042 	int i;
4043 
4044 	/* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
4045 	for (i = 0; i < TOTAL_TX_BD(txr); i++) {
4046 		struct bce_tx_buf *tx_buf = &txr->tx_bufs[i];
4047 
4048 		if (tx_buf->tx_mbuf_ptr != NULL) {
4049 			bus_dmamap_unload(txr->tx_mbuf_tag,
4050 			    tx_buf->tx_mbuf_map);
4051 			m_freem(tx_buf->tx_mbuf_ptr);
4052 			tx_buf->tx_mbuf_ptr = NULL;
4053 		}
4054 	}
4055 
4056 	/* Clear each TX chain page. */
4057 	for (i = 0; i < txr->tx_pages; i++)
4058 		bzero(txr->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ);
4059 	txr->used_tx_bd = 0;
4060 }
4061 
4062 /****************************************************************************/
4063 /* Initialize the RX context memory.                                        */
4064 /*                                                                          */
4065 /* Returns:                                                                 */
4066 /*   Nothing                                                                */
4067 /****************************************************************************/
4068 static void
4069 bce_init_rx_context(struct bce_rx_ring *rxr)
4070 {
4071 	uint32_t val;
4072 
4073 	/* Initialize the context ID for an L2 RX chain. */
4074 	val = BCE_L2CTX_RX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
4075 	    BCE_L2CTX_RX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
4076 
4077 	/*
4078 	 * Set the level for generating pause frames
4079 	 * when the number of available rx_bd's gets
4080 	 * too low (the low watermark) and the level
4081 	 * when pause frames can be stopped (the high
4082 	 * watermark).
4083 	 */
4084 	if (BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5709 ||
4085 	    BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5716) {
4086 		uint32_t lo_water, hi_water;
4087 
4088 		lo_water = BCE_L2CTX_RX_LO_WATER_MARK_DEFAULT;
4089 		hi_water = USABLE_RX_BD(rxr) / 4;
4090 
4091 		lo_water /= BCE_L2CTX_RX_LO_WATER_MARK_SCALE;
4092 		hi_water /= BCE_L2CTX_RX_HI_WATER_MARK_SCALE;
4093 
4094 		if (hi_water > 0xf)
4095 			hi_water = 0xf;
4096 		else if (hi_water == 0)
4097 			lo_water = 0;
4098 		val |= lo_water |
4099 		    (hi_water << BCE_L2CTX_RX_HI_WATER_MARK_SHIFT);
4100 	}
4101 
4102  	CTX_WR(rxr->sc, GET_CID_ADDR(rxr->rx_cid),
4103 	    BCE_L2CTX_RX_CTX_TYPE, val);
4104 
4105 	/* Setup the MQ BIN mapping for l2_ctx_host_bseq. */
4106 	if (BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5709 ||
4107 	    BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5716) {
4108 		val = REG_RD(rxr->sc, BCE_MQ_MAP_L2_5);
4109 		REG_WR(rxr->sc, BCE_MQ_MAP_L2_5, val | BCE_MQ_MAP_L2_5_ARM);
4110 	}
4111 
4112 	/* Point the hardware to the first page in the chain. */
4113 	val = BCE_ADDR_HI(rxr->rx_bd_chain_paddr[0]);
4114 	CTX_WR(rxr->sc, GET_CID_ADDR(rxr->rx_cid),
4115 	    BCE_L2CTX_RX_NX_BDHADDR_HI, val);
4116 	val = BCE_ADDR_LO(rxr->rx_bd_chain_paddr[0]);
4117 	CTX_WR(rxr->sc, GET_CID_ADDR(rxr->rx_cid),
4118 	    BCE_L2CTX_RX_NX_BDHADDR_LO, val);
4119 }
4120 
4121 /****************************************************************************/
4122 /* Allocate memory and initialize the RX data structures.                   */
4123 /*                                                                          */
4124 /* Returns:                                                                 */
4125 /*   0 for success, positive value for failure.                             */
4126 /****************************************************************************/
4127 static int
4128 bce_init_rx_chain(struct bce_rx_ring *rxr)
4129 {
4130 	struct rx_bd *rxbd;
4131 	int i, rc = 0;
4132 	uint16_t prod, chain_prod;
4133 	uint32_t prod_bseq;
4134 
4135 	/* Initialize the RX producer and consumer indices. */
4136 	rxr->rx_prod = 0;
4137 	rxr->rx_cons = 0;
4138 	rxr->rx_prod_bseq = 0;
4139 	rxr->free_rx_bd = USABLE_RX_BD(rxr);
4140 	rxr->max_rx_bd = USABLE_RX_BD(rxr);
4141 
4142 	/* Clear cache status index */
4143 	rxr->last_status_idx = 0;
4144 
4145 	/* Initialize the RX next pointer chain entries. */
4146 	for (i = 0; i < rxr->rx_pages; i++) {
4147 		int j;
4148 
4149 		rxbd = &rxr->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
4150 
4151 		/* Check if we've reached the last page. */
4152 		if (i == (rxr->rx_pages - 1))
4153 			j = 0;
4154 		else
4155 			j = i + 1;
4156 
4157 		/* Setup the chain page pointers. */
4158 		rxbd->rx_bd_haddr_hi =
4159 		    htole32(BCE_ADDR_HI(rxr->rx_bd_chain_paddr[j]));
4160 		rxbd->rx_bd_haddr_lo =
4161 		    htole32(BCE_ADDR_LO(rxr->rx_bd_chain_paddr[j]));
4162 	}
4163 
4164 	/* Allocate mbuf clusters for the rx_bd chain. */
4165 	prod = prod_bseq = 0;
4166 	while (prod < TOTAL_RX_BD(rxr)) {
4167 		chain_prod = RX_CHAIN_IDX(rxr, prod);
4168 		if (bce_newbuf_std(rxr, &prod, chain_prod, &prod_bseq, 1)) {
4169 			if_printf(&rxr->sc->arpcom.ac_if,
4170 			    "Error filling RX chain: rx_bd[0x%04X]!\n",
4171 			    chain_prod);
4172 			rc = ENOBUFS;
4173 			break;
4174 		}
4175 		prod = NEXT_RX_BD(prod);
4176 	}
4177 
4178 	/* Save the RX chain producer index. */
4179 	rxr->rx_prod = prod;
4180 	rxr->rx_prod_bseq = prod_bseq;
4181 
4182 	/* Tell the chip about the waiting rx_bd's. */
4183 	REG_WR16(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BDIDX,
4184 	    rxr->rx_prod);
4185 	REG_WR(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BSEQ,
4186 	    rxr->rx_prod_bseq);
4187 
4188 	bce_init_rx_context(rxr);
4189 
4190 	return(rc);
4191 }
4192 
4193 /****************************************************************************/
4194 /* Free memory and clear the RX data structures.                            */
4195 /*                                                                          */
4196 /* Returns:                                                                 */
4197 /*   Nothing.                                                               */
4198 /****************************************************************************/
4199 static void
4200 bce_free_rx_chain(struct bce_rx_ring *rxr)
4201 {
4202 	int i;
4203 
4204 	/* Free any mbufs still in the RX mbuf chain. */
4205 	for (i = 0; i < TOTAL_RX_BD(rxr); i++) {
4206 		struct bce_rx_buf *rx_buf = &rxr->rx_bufs[i];
4207 
4208 		if (rx_buf->rx_mbuf_ptr != NULL) {
4209 			bus_dmamap_unload(rxr->rx_mbuf_tag,
4210 			    rx_buf->rx_mbuf_map);
4211 			m_freem(rx_buf->rx_mbuf_ptr);
4212 			rx_buf->rx_mbuf_ptr = NULL;
4213 		}
4214 	}
4215 
4216 	/* Clear each RX chain page. */
4217 	for (i = 0; i < rxr->rx_pages; i++)
4218 		bzero(rxr->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
4219 }
4220 
4221 /****************************************************************************/
4222 /* Set media options.                                                       */
4223 /*                                                                          */
4224 /* Returns:                                                                 */
4225 /*   0 for success, positive value for failure.                             */
4226 /****************************************************************************/
4227 static int
4228 bce_ifmedia_upd(struct ifnet *ifp)
4229 {
4230 	struct bce_softc *sc = ifp->if_softc;
4231 	struct mii_data *mii = device_get_softc(sc->bce_miibus);
4232 	int error = 0;
4233 
4234 	/*
4235 	 * 'mii' will be NULL, when this function is called on following
4236 	 * code path: bce_attach() -> bce_mgmt_init()
4237 	 */
4238 	if (mii != NULL) {
4239 		/* Make sure the MII bus has been enumerated. */
4240 		sc->bce_link = 0;
4241 		if (mii->mii_instance) {
4242 			struct mii_softc *miisc;
4243 
4244 			LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4245 				mii_phy_reset(miisc);
4246 		}
4247 		error = mii_mediachg(mii);
4248 	}
4249 	return error;
4250 }
4251 
4252 /****************************************************************************/
4253 /* Reports current media status.                                            */
4254 /*                                                                          */
4255 /* Returns:                                                                 */
4256 /*   Nothing.                                                               */
4257 /****************************************************************************/
4258 static void
4259 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4260 {
4261 	struct bce_softc *sc = ifp->if_softc;
4262 	struct mii_data *mii = device_get_softc(sc->bce_miibus);
4263 
4264 	mii_pollstat(mii);
4265 	ifmr->ifm_active = mii->mii_media_active;
4266 	ifmr->ifm_status = mii->mii_media_status;
4267 }
4268 
4269 /****************************************************************************/
4270 /* Handles PHY generated interrupt events.                                  */
4271 /*                                                                          */
4272 /* Returns:                                                                 */
4273 /*   Nothing.                                                               */
4274 /****************************************************************************/
4275 static void
4276 bce_phy_intr(struct bce_softc *sc)
4277 {
4278 	uint32_t new_link_state, old_link_state;
4279 	struct ifnet *ifp = &sc->arpcom.ac_if;
4280 
4281 	ASSERT_SERIALIZED(&sc->main_serialize);
4282 
4283 	new_link_state = sc->status_block->status_attn_bits &
4284 			 STATUS_ATTN_BITS_LINK_STATE;
4285 	old_link_state = sc->status_block->status_attn_bits_ack &
4286 			 STATUS_ATTN_BITS_LINK_STATE;
4287 
4288 	/* Handle any changes if the link state has changed. */
4289 	if (new_link_state != old_link_state) {	/* XXX redundant? */
4290 		/* Update the status_attn_bits_ack field in the status block. */
4291 		if (new_link_state) {
4292 			REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD,
4293 			       STATUS_ATTN_BITS_LINK_STATE);
4294 			if (bootverbose)
4295 				if_printf(ifp, "Link is now UP.\n");
4296 		} else {
4297 			REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD,
4298 			       STATUS_ATTN_BITS_LINK_STATE);
4299 			if (bootverbose)
4300 				if_printf(ifp, "Link is now DOWN.\n");
4301 		}
4302 
4303 		/*
4304 		 * Assume link is down and allow tick routine to
4305 		 * update the state based on the actual media state.
4306 		 */
4307 		sc->bce_link = 0;
4308 		callout_stop(&sc->bce_tick_callout);
4309 		bce_tick_serialized(sc);
4310 	}
4311 
4312 	/* Acknowledge the link change interrupt. */
4313 	REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE);
4314 }
4315 
4316 /****************************************************************************/
4317 /* Reads the receive consumer value from the status block (skipping over    */
4318 /* chain page pointer if necessary).                                        */
4319 /*                                                                          */
4320 /* Returns:                                                                 */
4321 /*   hw_cons                                                                */
4322 /****************************************************************************/
4323 static __inline uint16_t
4324 bce_get_hw_rx_cons(struct bce_rx_ring *rxr)
4325 {
4326 	uint16_t hw_cons = *rxr->rx_hw_cons;
4327 
4328 	if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4329 		hw_cons++;
4330 	return hw_cons;
4331 }
4332 
4333 /****************************************************************************/
4334 /* Handles received frame interrupt events.                                 */
4335 /*                                                                          */
4336 /* Returns:                                                                 */
4337 /*   Nothing.                                                               */
4338 /****************************************************************************/
4339 static void
4340 bce_rx_intr(struct bce_rx_ring *rxr, int count, uint16_t hw_cons)
4341 {
4342 	struct ifnet *ifp = &rxr->sc->arpcom.ac_if;
4343 	uint16_t sw_cons, sw_chain_cons, sw_prod, sw_chain_prod;
4344 	uint32_t sw_prod_bseq;
4345 	int cpuid = mycpuid;
4346 
4347 	ASSERT_SERIALIZED(&rxr->rx_serialize);
4348 
4349 	/* Get working copies of the driver's view of the RX indices. */
4350 	sw_cons = rxr->rx_cons;
4351 	sw_prod = rxr->rx_prod;
4352 	sw_prod_bseq = rxr->rx_prod_bseq;
4353 
4354 	/* Scan through the receive chain as long as there is work to do. */
4355 	while (sw_cons != hw_cons) {
4356 		struct pktinfo pi0, *pi = NULL;
4357 		struct bce_rx_buf *rx_buf;
4358 		struct mbuf *m = NULL;
4359 		struct l2_fhdr *l2fhdr = NULL;
4360 		unsigned int len;
4361 		uint32_t status = 0;
4362 
4363 #ifdef IFPOLL_ENABLE
4364 		if (count >= 0 && count-- == 0)
4365 			break;
4366 #endif
4367 
4368 		/*
4369 		 * Convert the producer/consumer indices
4370 		 * to an actual rx_bd index.
4371 		 */
4372 		sw_chain_cons = RX_CHAIN_IDX(rxr, sw_cons);
4373 		sw_chain_prod = RX_CHAIN_IDX(rxr, sw_prod);
4374 		rx_buf = &rxr->rx_bufs[sw_chain_cons];
4375 
4376 		rxr->free_rx_bd++;
4377 
4378 		/* The mbuf is stored with the last rx_bd entry of a packet. */
4379 		if (rx_buf->rx_mbuf_ptr != NULL) {
4380 			if (sw_chain_cons != sw_chain_prod) {
4381 				if_printf(ifp, "RX cons(%d) != prod(%d), "
4382 				    "drop!\n", sw_chain_cons, sw_chain_prod);
4383 				IFNET_STAT_INC(ifp, ierrors, 1);
4384 
4385 				bce_setup_rxdesc_std(rxr, sw_chain_cons,
4386 				    &sw_prod_bseq);
4387 				m = NULL;
4388 				goto bce_rx_int_next_rx;
4389 			}
4390 
4391 			/* Unmap the mbuf from DMA space. */
4392 			bus_dmamap_sync(rxr->rx_mbuf_tag, rx_buf->rx_mbuf_map,
4393 			    BUS_DMASYNC_POSTREAD);
4394 
4395 			/* Save the mbuf from the driver's chain. */
4396 			m = rx_buf->rx_mbuf_ptr;
4397 
4398 			/*
4399 			 * Frames received on the NetXteme II are prepended
4400 			 * with an l2_fhdr structure which provides status
4401 			 * information about the received frame (including
4402 			 * VLAN tags and checksum info).  The frames are also
4403 			 * automatically adjusted to align the IP header
4404 			 * (i.e. two null bytes are inserted before the
4405 			 * Ethernet header).  As a result the data DMA'd by
4406 			 * the controller into the mbuf is as follows:
4407 			 *
4408 			 * +---------+-----+---------------------+-----+
4409 			 * | l2_fhdr | pad | packet data         | FCS |
4410 			 * +---------+-----+---------------------+-----+
4411 			 *
4412 			 * The l2_fhdr needs to be checked and skipped and the
4413 			 * FCS needs to be stripped before sending the packet
4414 			 * up the stack.
4415 			 */
4416 			l2fhdr = mtod(m, struct l2_fhdr *);
4417 
4418 			len = l2fhdr->l2_fhdr_pkt_len;
4419 			status = l2fhdr->l2_fhdr_status;
4420 
4421 			len -= ETHER_CRC_LEN;
4422 
4423 			/* Check the received frame for errors. */
4424 			if (status & (L2_FHDR_ERRORS_BAD_CRC |
4425 				      L2_FHDR_ERRORS_PHY_DECODE |
4426 				      L2_FHDR_ERRORS_ALIGNMENT |
4427 				      L2_FHDR_ERRORS_TOO_SHORT |
4428 				      L2_FHDR_ERRORS_GIANT_FRAME)) {
4429 				IFNET_STAT_INC(ifp, ierrors, 1);
4430 
4431 				/* Reuse the mbuf for a new frame. */
4432 				bce_setup_rxdesc_std(rxr, sw_chain_prod,
4433 				    &sw_prod_bseq);
4434 				m = NULL;
4435 				goto bce_rx_int_next_rx;
4436 			}
4437 
4438 			/*
4439 			 * Get a new mbuf for the rx_bd.   If no new
4440 			 * mbufs are available then reuse the current mbuf,
4441 			 * log an ierror on the interface, and generate
4442 			 * an error in the system log.
4443 			 */
4444 			if (bce_newbuf_std(rxr, &sw_prod, sw_chain_prod,
4445 			    &sw_prod_bseq, 0)) {
4446 				IFNET_STAT_INC(ifp, ierrors, 1);
4447 
4448 				/* Try and reuse the exisitng mbuf. */
4449 				bce_setup_rxdesc_std(rxr, sw_chain_prod,
4450 				    &sw_prod_bseq);
4451 				m = NULL;
4452 				goto bce_rx_int_next_rx;
4453 			}
4454 
4455 			/*
4456 			 * Skip over the l2_fhdr when passing
4457 			 * the data up the stack.
4458 			 */
4459 			m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
4460 
4461 			m->m_pkthdr.len = m->m_len = len;
4462 			m->m_pkthdr.rcvif = ifp;
4463 
4464 			/* Validate the checksum if offload enabled. */
4465 			if (ifp->if_capenable & IFCAP_RXCSUM) {
4466 				/* Check for an IP datagram. */
4467 				if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
4468 					m->m_pkthdr.csum_flags |=
4469 						CSUM_IP_CHECKED;
4470 
4471 					/* Check if the IP checksum is valid. */
4472 					if ((l2fhdr->l2_fhdr_ip_xsum ^
4473 					     0xffff) == 0) {
4474 						m->m_pkthdr.csum_flags |=
4475 							CSUM_IP_VALID;
4476 					}
4477 				}
4478 
4479 				/* Check for a valid TCP/UDP frame. */
4480 				if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
4481 					      L2_FHDR_STATUS_UDP_DATAGRAM)) {
4482 
4483 					/* Check for a good TCP/UDP checksum. */
4484 					if ((status &
4485 					     (L2_FHDR_ERRORS_TCP_XSUM |
4486 					      L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
4487 						m->m_pkthdr.csum_data =
4488 						l2fhdr->l2_fhdr_tcp_udp_xsum;
4489 						m->m_pkthdr.csum_flags |=
4490 							CSUM_DATA_VALID |
4491 							CSUM_PSEUDO_HDR;
4492 					}
4493 				}
4494 			}
4495 			if (ifp->if_capenable & IFCAP_RSS) {
4496 				pi = bce_rss_pktinfo(&pi0, status, l2fhdr);
4497 				if (pi != NULL &&
4498 				    (status & L2_FHDR_STATUS_RSS_HASH)) {
4499 					m->m_flags |= M_HASH;
4500 					m->m_pkthdr.hash =
4501 					    toeplitz_hash(l2fhdr->l2_fhdr_hash);
4502 				}
4503 			}
4504 
4505 			IFNET_STAT_INC(ifp, ipackets, 1);
4506 bce_rx_int_next_rx:
4507 			sw_prod = NEXT_RX_BD(sw_prod);
4508 		}
4509 
4510 		sw_cons = NEXT_RX_BD(sw_cons);
4511 
4512 		/* If we have a packet, pass it up the stack */
4513 		if (m) {
4514 			if (status & L2_FHDR_STATUS_L2_VLAN_TAG) {
4515 				m->m_flags |= M_VLANTAG;
4516 				m->m_pkthdr.ether_vlantag =
4517 					l2fhdr->l2_fhdr_vlan_tag;
4518 			}
4519 			ether_input_pkt(ifp, m, pi, cpuid);
4520 #ifdef BCE_RSS_DEBUG
4521 			rxr->rx_pkts++;
4522 #endif
4523 		}
4524 	}
4525 
4526 	rxr->rx_cons = sw_cons;
4527 	rxr->rx_prod = sw_prod;
4528 	rxr->rx_prod_bseq = sw_prod_bseq;
4529 
4530 	REG_WR16(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BDIDX,
4531 	    rxr->rx_prod);
4532 	REG_WR(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BSEQ,
4533 	    rxr->rx_prod_bseq);
4534 }
4535 
4536 /****************************************************************************/
4537 /* Reads the transmit consumer value from the status block (skipping over   */
4538 /* chain page pointer if necessary).                                        */
4539 /*                                                                          */
4540 /* Returns:                                                                 */
4541 /*   hw_cons                                                                */
4542 /****************************************************************************/
4543 static __inline uint16_t
4544 bce_get_hw_tx_cons(struct bce_tx_ring *txr)
4545 {
4546 	uint16_t hw_cons = *txr->tx_hw_cons;
4547 
4548 	if ((hw_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4549 		hw_cons++;
4550 	return hw_cons;
4551 }
4552 
4553 /****************************************************************************/
4554 /* Handles transmit completion interrupt events.                            */
4555 /*                                                                          */
4556 /* Returns:                                                                 */
4557 /*   Nothing.                                                               */
4558 /****************************************************************************/
4559 static void
4560 bce_tx_intr(struct bce_tx_ring *txr, uint16_t hw_tx_cons)
4561 {
4562 	struct ifnet *ifp = &txr->sc->arpcom.ac_if;
4563 	uint16_t sw_tx_cons, sw_tx_chain_cons;
4564 
4565 	ASSERT_SERIALIZED(&txr->tx_serialize);
4566 
4567 	/* Get the hardware's view of the TX consumer index. */
4568 	sw_tx_cons = txr->tx_cons;
4569 
4570 	/* Cycle through any completed TX chain page entries. */
4571 	while (sw_tx_cons != hw_tx_cons) {
4572 		struct bce_tx_buf *tx_buf;
4573 
4574 		sw_tx_chain_cons = TX_CHAIN_IDX(txr, sw_tx_cons);
4575 		tx_buf = &txr->tx_bufs[sw_tx_chain_cons];
4576 
4577 		/*
4578 		 * Free the associated mbuf. Remember
4579 		 * that only the last tx_bd of a packet
4580 		 * has an mbuf pointer and DMA map.
4581 		 */
4582 		if (tx_buf->tx_mbuf_ptr != NULL) {
4583 			/* Unmap the mbuf. */
4584 			bus_dmamap_unload(txr->tx_mbuf_tag,
4585 			    tx_buf->tx_mbuf_map);
4586 
4587 			/* Free the mbuf. */
4588 			m_freem(tx_buf->tx_mbuf_ptr);
4589 			tx_buf->tx_mbuf_ptr = NULL;
4590 
4591 			IFNET_STAT_INC(ifp, opackets, 1);
4592 #ifdef BCE_TSS_DEBUG
4593 			txr->tx_pkts++;
4594 #endif
4595 		}
4596 
4597 		txr->used_tx_bd--;
4598 		sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
4599 	}
4600 
4601 	if (txr->used_tx_bd == 0) {
4602 		/* Clear the TX timeout timer. */
4603 		txr->tx_watchdog.wd_timer = 0;
4604 	}
4605 
4606 	/* Clear the tx hardware queue full flag. */
4607 	if (txr->max_tx_bd - txr->used_tx_bd >= BCE_TX_SPARE_SPACE)
4608 		ifsq_clr_oactive(txr->ifsq);
4609 	txr->tx_cons = sw_tx_cons;
4610 }
4611 
4612 /****************************************************************************/
4613 /* Disables interrupt generation.                                           */
4614 /*                                                                          */
4615 /* Returns:                                                                 */
4616 /*   Nothing.                                                               */
4617 /****************************************************************************/
4618 static void
4619 bce_disable_intr(struct bce_softc *sc)
4620 {
4621 	int i;
4622 
4623 	for (i = 0; i < sc->rx_ring_cnt; ++i) {
4624 		REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4625 		    (sc->rx_rings[i].idx << 24) |
4626 		    BCE_PCICFG_INT_ACK_CMD_MASK_INT);
4627 	}
4628 	REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
4629 
4630 	callout_stop(&sc->bce_ckmsi_callout);
4631 	sc->bce_msi_maylose = FALSE;
4632 	sc->bce_check_rx_cons = 0;
4633 	sc->bce_check_tx_cons = 0;
4634 	sc->bce_check_status_idx = 0xffff;
4635 
4636 	for (i = 0; i < sc->rx_ring_cnt; ++i)
4637 		lwkt_serialize_handler_disable(sc->bce_msix[i].msix_serialize);
4638 }
4639 
4640 /****************************************************************************/
4641 /* Enables interrupt generation.                                            */
4642 /*                                                                          */
4643 /* Returns:                                                                 */
4644 /*   Nothing.                                                               */
4645 /****************************************************************************/
4646 static void
4647 bce_enable_intr(struct bce_softc *sc)
4648 {
4649 	int i;
4650 
4651 	for (i = 0; i < sc->rx_ring_cnt; ++i)
4652 		lwkt_serialize_handler_enable(sc->bce_msix[i].msix_serialize);
4653 
4654 	for (i = 0; i < sc->rx_ring_cnt; ++i) {
4655 		struct bce_rx_ring *rxr = &sc->rx_rings[i];
4656 
4657 		REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, (rxr->idx << 24) |
4658 		       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
4659 		       BCE_PCICFG_INT_ACK_CMD_MASK_INT |
4660 		       rxr->last_status_idx);
4661 		REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, (rxr->idx << 24) |
4662 		       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
4663 		       rxr->last_status_idx);
4664 	}
4665 	REG_WR(sc, BCE_HC_COMMAND, sc->hc_command | BCE_HC_COMMAND_COAL_NOW);
4666 
4667 	if (sc->bce_flags & BCE_CHECK_MSI_FLAG) {
4668 		sc->bce_msi_maylose = FALSE;
4669 		sc->bce_check_rx_cons = 0;
4670 		sc->bce_check_tx_cons = 0;
4671 		sc->bce_check_status_idx = 0xffff;
4672 
4673 		if (bootverbose)
4674 			if_printf(&sc->arpcom.ac_if, "check msi\n");
4675 
4676 		callout_reset_bycpu(&sc->bce_ckmsi_callout, BCE_MSI_CKINTVL,
4677 		    bce_check_msi, sc, sc->bce_msix[0].msix_cpuid);
4678 	}
4679 }
4680 
4681 /****************************************************************************/
4682 /* Reenables interrupt generation during interrupt handling.                */
4683 /*                                                                          */
4684 /* Returns:                                                                 */
4685 /*   Nothing.                                                               */
4686 /****************************************************************************/
4687 static void
4688 bce_reenable_intr(struct bce_rx_ring *rxr)
4689 {
4690 	REG_WR(rxr->sc, BCE_PCICFG_INT_ACK_CMD, (rxr->idx << 24) |
4691 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | rxr->last_status_idx);
4692 }
4693 
4694 /****************************************************************************/
4695 /* Handles controller initialization.                                       */
4696 /*                                                                          */
4697 /* Returns:                                                                 */
4698 /*   Nothing.                                                               */
4699 /****************************************************************************/
4700 static void
4701 bce_init(void *xsc)
4702 {
4703 	struct bce_softc *sc = xsc;
4704 	struct ifnet *ifp = &sc->arpcom.ac_if;
4705 	uint32_t ether_mtu;
4706 	int error, i;
4707 	boolean_t polling;
4708 
4709 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
4710 
4711 	/* Check if the driver is still running and bail out if it is. */
4712 	if (ifp->if_flags & IFF_RUNNING)
4713 		return;
4714 
4715 	bce_stop(sc);
4716 
4717 	error = bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
4718 	if (error) {
4719 		if_printf(ifp, "Controller reset failed!\n");
4720 		goto back;
4721 	}
4722 
4723 	error = bce_chipinit(sc);
4724 	if (error) {
4725 		if_printf(ifp, "Controller initialization failed!\n");
4726 		goto back;
4727 	}
4728 
4729 	error = bce_blockinit(sc);
4730 	if (error) {
4731 		if_printf(ifp, "Block initialization failed!\n");
4732 		goto back;
4733 	}
4734 
4735 	/* Load our MAC address. */
4736 	bcopy(IF_LLADDR(ifp), sc->eaddr, ETHER_ADDR_LEN);
4737 	bce_set_mac_addr(sc);
4738 
4739 	/* Calculate and program the Ethernet MTU size. */
4740 	ether_mtu = ETHER_HDR_LEN + EVL_ENCAPLEN + ifp->if_mtu + ETHER_CRC_LEN;
4741 
4742 	/*
4743 	 * Program the mtu, enabling jumbo frame
4744 	 * support if necessary.  Also set the mbuf
4745 	 * allocation count for RX frames.
4746 	 */
4747 	if (ether_mtu > ETHER_MAX_LEN + EVL_ENCAPLEN) {
4748 #ifdef notyet
4749 		REG_WR(sc, BCE_EMAC_RX_MTU_SIZE,
4750 		       min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) |
4751 		       BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA);
4752 #else
4753 		panic("jumbo buffer is not supported yet");
4754 #endif
4755 	} else {
4756 		REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu);
4757 	}
4758 
4759 	/* Program appropriate promiscuous/multicast filtering. */
4760 	bce_set_rx_mode(sc);
4761 
4762 	/*
4763 	 * Init RX buffer descriptor chain.
4764 	 */
4765 	REG_WR(sc, BCE_RLUP_RSS_CONFIG, 0);
4766 	bce_reg_wr_ind(sc, BCE_RXP_SCRATCH_RSS_TBL_SZ, 0);
4767 
4768 	for (i = 0; i < sc->rx_ring_cnt; ++i)
4769 		bce_init_rx_chain(&sc->rx_rings[i]);	/* XXX return value */
4770 
4771 	if (sc->rx_ring_cnt > 1)
4772 		bce_init_rss(sc);
4773 
4774 	/*
4775 	 * Init TX buffer descriptor chain.
4776 	 */
4777 	REG_WR(sc, BCE_TSCH_TSS_CFG, 0);
4778 
4779 	for (i = 0; i < sc->tx_ring_cnt; ++i)
4780 		bce_init_tx_chain(&sc->tx_rings[i]);
4781 
4782 	if (sc->tx_ring_cnt > 1) {
4783 		REG_WR(sc, BCE_TSCH_TSS_CFG,
4784 		    ((sc->tx_ring_cnt - 1) << 24) | (TX_TSS_CID << 7));
4785 	}
4786 
4787 	polling = FALSE;
4788 #ifdef IFPOLL_ENABLE
4789 	if (ifp->if_flags & IFF_NPOLLING)
4790 		polling = TRUE;
4791 #endif
4792 
4793 	if (polling) {
4794 		/* Disable interrupts if we are polling. */
4795 		bce_disable_intr(sc);
4796 
4797 		/* Change coalesce parameters */
4798 		bce_npoll_coal_change(sc);
4799 	} else {
4800 		/* Enable host interrupts. */
4801 		bce_enable_intr(sc);
4802 	}
4803 	bce_set_timer_cpuid(sc, polling);
4804 
4805 	bce_ifmedia_upd(ifp);
4806 
4807 	ifp->if_flags |= IFF_RUNNING;
4808 	for (i = 0; i < sc->tx_ring_cnt; ++i) {
4809 		ifsq_clr_oactive(sc->tx_rings[i].ifsq);
4810 		ifsq_watchdog_start(&sc->tx_rings[i].tx_watchdog);
4811 	}
4812 
4813 	callout_reset_bycpu(&sc->bce_tick_callout, hz, bce_tick, sc,
4814 	    sc->bce_timer_cpuid);
4815 back:
4816 	if (error)
4817 		bce_stop(sc);
4818 }
4819 
4820 /****************************************************************************/
4821 /* Initialize the controller just enough so that any management firmware    */
4822 /* running on the device will continue to operate corectly.                 */
4823 /*                                                                          */
4824 /* Returns:                                                                 */
4825 /*   Nothing.                                                               */
4826 /****************************************************************************/
4827 static void
4828 bce_mgmt_init(struct bce_softc *sc)
4829 {
4830 	struct ifnet *ifp = &sc->arpcom.ac_if;
4831 
4832 	/* Bail out if management firmware is not running. */
4833 	if (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG))
4834 		return;
4835 
4836 	/* Enable all critical blocks in the MAC. */
4837 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
4838 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
4839 		REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
4840 		    BCE_MISC_ENABLE_DEFAULT_XI);
4841 	} else {
4842 		REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT);
4843 	}
4844 	REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
4845 	DELAY(20);
4846 
4847 	bce_ifmedia_upd(ifp);
4848 }
4849 
4850 /****************************************************************************/
4851 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
4852 /* memory visible to the controller.                                        */
4853 /*                                                                          */
4854 /* Returns:                                                                 */
4855 /*   0 for success, positive value for failure.                             */
4856 /****************************************************************************/
4857 static int
4858 bce_encap(struct bce_tx_ring *txr, struct mbuf **m_head, int *nsegs_used)
4859 {
4860 	bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
4861 	bus_dmamap_t map, tmp_map;
4862 	struct mbuf *m0 = *m_head;
4863 	struct tx_bd *txbd = NULL;
4864 	uint16_t vlan_tag = 0, flags = 0, mss = 0;
4865 	uint16_t chain_prod, chain_prod_start, prod;
4866 	uint32_t prod_bseq;
4867 	int i, error, maxsegs, nsegs;
4868 
4869 	/* Transfer any checksum offload flags to the bd. */
4870 	if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
4871 		error = bce_tso_setup(txr, m_head, &flags, &mss);
4872 		if (error)
4873 			return ENOBUFS;
4874 		m0 = *m_head;
4875 	} else if (m0->m_pkthdr.csum_flags & BCE_CSUM_FEATURES) {
4876 		if (m0->m_pkthdr.csum_flags & CSUM_IP)
4877 			flags |= TX_BD_FLAGS_IP_CKSUM;
4878 		if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
4879 			flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4880 	}
4881 
4882 	/* Transfer any VLAN tags to the bd. */
4883 	if (m0->m_flags & M_VLANTAG) {
4884 		flags |= TX_BD_FLAGS_VLAN_TAG;
4885 		vlan_tag = m0->m_pkthdr.ether_vlantag;
4886 	}
4887 
4888 	prod = txr->tx_prod;
4889 	chain_prod_start = chain_prod = TX_CHAIN_IDX(txr, prod);
4890 
4891 	/* Map the mbuf into DMAable memory. */
4892 	map = txr->tx_bufs[chain_prod_start].tx_mbuf_map;
4893 
4894 	maxsegs = txr->max_tx_bd - txr->used_tx_bd;
4895 	KASSERT(maxsegs >= BCE_TX_SPARE_SPACE,
4896 		("not enough segments %d", maxsegs));
4897 	if (maxsegs > BCE_MAX_SEGMENTS)
4898 		maxsegs = BCE_MAX_SEGMENTS;
4899 
4900 	/* Map the mbuf into our DMA address space. */
4901 	error = bus_dmamap_load_mbuf_defrag(txr->tx_mbuf_tag, map, m_head,
4902 			segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
4903 	if (error)
4904 		goto back;
4905 	bus_dmamap_sync(txr->tx_mbuf_tag, map, BUS_DMASYNC_PREWRITE);
4906 
4907 	*nsegs_used += nsegs;
4908 
4909 	/* Reset m0 */
4910 	m0 = *m_head;
4911 
4912 	/* prod points to an empty tx_bd at this point. */
4913 	prod_bseq  = txr->tx_prod_bseq;
4914 
4915 	/*
4916 	 * Cycle through each mbuf segment that makes up
4917 	 * the outgoing frame, gathering the mapping info
4918 	 * for that segment and creating a tx_bd to for
4919 	 * the mbuf.
4920 	 */
4921 	for (i = 0; i < nsegs; i++) {
4922 		chain_prod = TX_CHAIN_IDX(txr, prod);
4923 		txbd =
4924 		&txr->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
4925 
4926 		txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr));
4927 		txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr));
4928 		txbd->tx_bd_mss_nbytes = htole32(mss << 16) |
4929 		    htole16(segs[i].ds_len);
4930 		txbd->tx_bd_vlan_tag = htole16(vlan_tag);
4931 		txbd->tx_bd_flags = htole16(flags);
4932 
4933 		prod_bseq += segs[i].ds_len;
4934 		if (i == 0)
4935 			txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START);
4936 		prod = NEXT_TX_BD(prod);
4937 	}
4938 
4939 	/* Set the END flag on the last TX buffer descriptor. */
4940 	txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END);
4941 
4942 	/*
4943 	 * Ensure that the mbuf pointer for this transmission
4944 	 * is placed at the array index of the last
4945 	 * descriptor in this chain.  This is done
4946 	 * because a single map is used for all
4947 	 * segments of the mbuf and we don't want to
4948 	 * unload the map before all of the segments
4949 	 * have been freed.
4950 	 */
4951 	txr->tx_bufs[chain_prod].tx_mbuf_ptr = m0;
4952 
4953 	tmp_map = txr->tx_bufs[chain_prod].tx_mbuf_map;
4954 	txr->tx_bufs[chain_prod].tx_mbuf_map = map;
4955 	txr->tx_bufs[chain_prod_start].tx_mbuf_map = tmp_map;
4956 
4957 	txr->used_tx_bd += nsegs;
4958 
4959 	/* prod points to the next free tx_bd at this point. */
4960 	txr->tx_prod = prod;
4961 	txr->tx_prod_bseq = prod_bseq;
4962 back:
4963 	if (error) {
4964 		m_freem(*m_head);
4965 		*m_head = NULL;
4966 	}
4967 	return error;
4968 }
4969 
4970 static void
4971 bce_xmit(struct bce_tx_ring *txr)
4972 {
4973 	/* Start the transmit. */
4974 	REG_WR16(txr->sc, MB_GET_CID_ADDR(txr->tx_cid) + BCE_L2CTX_TX_HOST_BIDX,
4975 	    txr->tx_prod);
4976 	REG_WR(txr->sc, MB_GET_CID_ADDR(txr->tx_cid) + BCE_L2CTX_TX_HOST_BSEQ,
4977 	    txr->tx_prod_bseq);
4978 }
4979 
4980 /****************************************************************************/
4981 /* Main transmit routine when called from another routine with a lock.      */
4982 /*                                                                          */
4983 /* Returns:                                                                 */
4984 /*   Nothing.                                                               */
4985 /****************************************************************************/
4986 static void
4987 bce_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
4988 {
4989 	struct bce_softc *sc = ifp->if_softc;
4990 	struct bce_tx_ring *txr = ifsq_get_priv(ifsq);
4991 	int count = 0;
4992 
4993 	KKASSERT(txr->ifsq == ifsq);
4994 	ASSERT_SERIALIZED(&txr->tx_serialize);
4995 
4996 	/* If there's no link or the transmit queue is empty then just exit. */
4997 	if (!sc->bce_link) {
4998 		ifsq_purge(ifsq);
4999 		return;
5000 	}
5001 
5002 	if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq))
5003 		return;
5004 
5005 	for (;;) {
5006 		struct mbuf *m_head;
5007 
5008 		/*
5009 		 * We keep BCE_TX_SPARE_SPACE entries, so bce_encap() is
5010 		 * unlikely to fail.
5011 		 */
5012 		if (txr->max_tx_bd - txr->used_tx_bd < BCE_TX_SPARE_SPACE) {
5013 			ifsq_set_oactive(ifsq);
5014 			break;
5015 		}
5016 
5017 		/* Check for any frames to send. */
5018 		m_head = ifsq_dequeue(ifsq);
5019 		if (m_head == NULL)
5020 			break;
5021 
5022 		/*
5023 		 * Pack the data into the transmit ring. If we
5024 		 * don't have room, place the mbuf back at the
5025 		 * head of the queue and set the OACTIVE flag
5026 		 * to wait for the NIC to drain the chain.
5027 		 */
5028 		if (bce_encap(txr, &m_head, &count)) {
5029 			IFNET_STAT_INC(ifp, oerrors, 1);
5030 			if (txr->used_tx_bd == 0) {
5031 				continue;
5032 			} else {
5033 				ifsq_set_oactive(ifsq);
5034 				break;
5035 			}
5036 		}
5037 
5038 		if (count >= txr->tx_wreg) {
5039 			bce_xmit(txr);
5040 			count = 0;
5041 		}
5042 
5043 		/* Send a copy of the frame to any BPF listeners. */
5044 		ETHER_BPF_MTAP(ifp, m_head);
5045 
5046 		/* Set the tx timeout. */
5047 		txr->tx_watchdog.wd_timer = BCE_TX_TIMEOUT;
5048 	}
5049 	if (count > 0)
5050 		bce_xmit(txr);
5051 }
5052 
5053 /****************************************************************************/
5054 /* Handles any IOCTL calls from the operating system.                       */
5055 /*                                                                          */
5056 /* Returns:                                                                 */
5057 /*   0 for success, positive value for failure.                             */
5058 /****************************************************************************/
5059 static int
5060 bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
5061 {
5062 	struct bce_softc *sc = ifp->if_softc;
5063 	struct ifreq *ifr = (struct ifreq *)data;
5064 	struct mii_data *mii;
5065 	int mask, error = 0;
5066 
5067 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
5068 
5069 	switch(command) {
5070 	case SIOCSIFMTU:
5071 		/* Check that the MTU setting is supported. */
5072 		if (ifr->ifr_mtu < BCE_MIN_MTU ||
5073 #ifdef notyet
5074 		    ifr->ifr_mtu > BCE_MAX_JUMBO_MTU
5075 #else
5076 		    ifr->ifr_mtu > ETHERMTU
5077 #endif
5078 		   ) {
5079 			error = EINVAL;
5080 			break;
5081 		}
5082 
5083 		ifp->if_mtu = ifr->ifr_mtu;
5084 		ifp->if_flags &= ~IFF_RUNNING;	/* Force reinitialize */
5085 		bce_init(sc);
5086 		break;
5087 
5088 	case SIOCSIFFLAGS:
5089 		if (ifp->if_flags & IFF_UP) {
5090 			if (ifp->if_flags & IFF_RUNNING) {
5091 				mask = ifp->if_flags ^ sc->bce_if_flags;
5092 
5093 				if (mask & (IFF_PROMISC | IFF_ALLMULTI))
5094 					bce_set_rx_mode(sc);
5095 			} else {
5096 				bce_init(sc);
5097 			}
5098 		} else if (ifp->if_flags & IFF_RUNNING) {
5099 			bce_stop(sc);
5100 
5101 			/* If MFW is running, restart the controller a bit. */
5102 			if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
5103 				bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
5104 				bce_chipinit(sc);
5105 				bce_mgmt_init(sc);
5106 			}
5107 		}
5108 		sc->bce_if_flags = ifp->if_flags;
5109 		break;
5110 
5111 	case SIOCADDMULTI:
5112 	case SIOCDELMULTI:
5113 		if (ifp->if_flags & IFF_RUNNING)
5114 			bce_set_rx_mode(sc);
5115 		break;
5116 
5117 	case SIOCSIFMEDIA:
5118 	case SIOCGIFMEDIA:
5119 		mii = device_get_softc(sc->bce_miibus);
5120 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
5121 		break;
5122 
5123 	case SIOCSIFCAP:
5124 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
5125 		if (mask & IFCAP_HWCSUM) {
5126 			ifp->if_capenable ^= (mask & IFCAP_HWCSUM);
5127 			if (ifp->if_capenable & IFCAP_TXCSUM)
5128 				ifp->if_hwassist |= BCE_CSUM_FEATURES;
5129 			else
5130 				ifp->if_hwassist &= ~BCE_CSUM_FEATURES;
5131 		}
5132 		if (mask & IFCAP_TSO) {
5133 			ifp->if_capenable ^= IFCAP_TSO;
5134 			if (ifp->if_capenable & IFCAP_TSO)
5135 				ifp->if_hwassist |= CSUM_TSO;
5136 			else
5137 				ifp->if_hwassist &= ~CSUM_TSO;
5138 		}
5139 		if (mask & IFCAP_RSS)
5140 			ifp->if_capenable ^= IFCAP_RSS;
5141 		break;
5142 
5143 	default:
5144 		error = ether_ioctl(ifp, command, data);
5145 		break;
5146 	}
5147 	return error;
5148 }
5149 
5150 /****************************************************************************/
5151 /* Transmit timeout handler.                                                */
5152 /*                                                                          */
5153 /* Returns:                                                                 */
5154 /*   Nothing.                                                               */
5155 /****************************************************************************/
5156 static void
5157 bce_watchdog(struct ifaltq_subque *ifsq)
5158 {
5159 	struct ifnet *ifp = ifsq_get_ifp(ifsq);
5160 	struct bce_softc *sc = ifp->if_softc;
5161 	int i;
5162 
5163 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
5164 
5165 	/*
5166 	 * If we are in this routine because of pause frames, then
5167 	 * don't reset the hardware.
5168 	 */
5169 	if (REG_RD(sc, BCE_EMAC_TX_STATUS) & BCE_EMAC_TX_STATUS_XOFFED)
5170 		return;
5171 
5172 	if_printf(ifp, "Watchdog timeout occurred, resetting!\n");
5173 
5174 	ifp->if_flags &= ~IFF_RUNNING;	/* Force reinitialize */
5175 	bce_init(sc);
5176 
5177 	IFNET_STAT_INC(ifp, oerrors, 1);
5178 
5179 	for (i = 0; i < sc->tx_ring_cnt; ++i)
5180 		ifsq_devstart_sched(sc->tx_rings[i].ifsq);
5181 }
5182 
5183 #ifdef IFPOLL_ENABLE
5184 
5185 static void
5186 bce_npoll_status(struct ifnet *ifp)
5187 {
5188 	struct bce_softc *sc = ifp->if_softc;
5189 	struct status_block *sblk = sc->status_block;
5190 	uint32_t status_attn_bits;
5191 
5192 	ASSERT_SERIALIZED(&sc->main_serialize);
5193 
5194 	status_attn_bits = sblk->status_attn_bits;
5195 
5196 	/* Was it a link change interrupt? */
5197 	if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5198 	    (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) {
5199 		bce_phy_intr(sc);
5200 
5201 		/*
5202 		 * Clear any transient status updates during link state change.
5203 		 */
5204 		REG_WR(sc, BCE_HC_COMMAND,
5205 		    sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT);
5206 		REG_RD(sc, BCE_HC_COMMAND);
5207 	}
5208 
5209 	/*
5210 	 * If any other attention is asserted then the chip is toast.
5211 	 */
5212 	if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5213 	     (sblk->status_attn_bits_ack & ~STATUS_ATTN_BITS_LINK_STATE)) {
5214 		if_printf(ifp, "Fatal attention detected: 0x%08X\n",
5215 		    sblk->status_attn_bits);
5216 		bce_serialize_skipmain(sc);
5217 		bce_init(sc);
5218 		bce_deserialize_skipmain(sc);
5219 	}
5220 }
5221 
5222 static void
5223 bce_npoll_rx(struct ifnet *ifp, void *arg, int count)
5224 {
5225 	struct bce_rx_ring *rxr = arg;
5226 	uint16_t hw_rx_cons;
5227 
5228 	ASSERT_SERIALIZED(&rxr->rx_serialize);
5229 
5230 	/*
5231 	 * Save the status block index value for use when enabling
5232 	 * the interrupt.
5233 	 */
5234 	rxr->last_status_idx = *rxr->hw_status_idx;
5235 
5236 	/* Make sure status index is extracted before RX/TX cons */
5237 	cpu_lfence();
5238 
5239 	hw_rx_cons = bce_get_hw_rx_cons(rxr);
5240 
5241 	/* Check for any completed RX frames. */
5242 	if (hw_rx_cons != rxr->rx_cons)
5243 		bce_rx_intr(rxr, count, hw_rx_cons);
5244 }
5245 
5246 static void
5247 bce_npoll_rx_pack(struct ifnet *ifp, void *arg, int count)
5248 {
5249 	struct bce_rx_ring *rxr = arg;
5250 
5251 	KASSERT(rxr->idx == 0, ("not the first RX ring, but %d", rxr->idx));
5252 	bce_npoll_rx(ifp, rxr, count);
5253 
5254 	KASSERT(rxr->sc->rx_ring_cnt != rxr->sc->rx_ring_cnt2,
5255 	    ("RX ring count %d, count2 %d", rxr->sc->rx_ring_cnt,
5256 	     rxr->sc->rx_ring_cnt2));
5257 
5258 	/* Last ring carries packets whose masked hash is 0 */
5259 	rxr = &rxr->sc->rx_rings[rxr->sc->rx_ring_cnt - 1];
5260 
5261 	lwkt_serialize_enter(&rxr->rx_serialize);
5262 	bce_npoll_rx(ifp, rxr, count);
5263 	lwkt_serialize_exit(&rxr->rx_serialize);
5264 }
5265 
5266 static void
5267 bce_npoll_tx(struct ifnet *ifp, void *arg, int count __unused)
5268 {
5269 	struct bce_tx_ring *txr = arg;
5270 	uint16_t hw_tx_cons;
5271 
5272 	ASSERT_SERIALIZED(&txr->tx_serialize);
5273 
5274 	hw_tx_cons = bce_get_hw_tx_cons(txr);
5275 
5276 	/* Check for any completed TX frames. */
5277 	if (hw_tx_cons != txr->tx_cons) {
5278 		bce_tx_intr(txr, hw_tx_cons);
5279 		if (!ifsq_is_empty(txr->ifsq))
5280 			ifsq_devstart(txr->ifsq);
5281 	}
5282 }
5283 
5284 static void
5285 bce_npoll(struct ifnet *ifp, struct ifpoll_info *info)
5286 {
5287 	struct bce_softc *sc = ifp->if_softc;
5288 	int i;
5289 
5290 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
5291 
5292 	if (info != NULL) {
5293 		info->ifpi_status.status_func = bce_npoll_status;
5294 		info->ifpi_status.serializer = &sc->main_serialize;
5295 
5296 		for (i = 0; i < sc->tx_ring_cnt; ++i) {
5297 			struct bce_tx_ring *txr = &sc->tx_rings[i];
5298 			int idx = i + sc->npoll_ofs;
5299 
5300 			KKASSERT(idx < ncpus2);
5301 			info->ifpi_tx[idx].poll_func = bce_npoll_tx;
5302 			info->ifpi_tx[idx].arg = txr;
5303 			info->ifpi_tx[idx].serializer = &txr->tx_serialize;
5304 			ifsq_set_cpuid(txr->ifsq, idx);
5305 		}
5306 
5307 		for (i = 0; i < sc->rx_ring_cnt2; ++i) {
5308 			struct bce_rx_ring *rxr = &sc->rx_rings[i];
5309 			int idx = i + sc->npoll_ofs;
5310 
5311 			KKASSERT(idx < ncpus2);
5312 			if (i == 0 && sc->rx_ring_cnt2 != sc->rx_ring_cnt) {
5313 				/*
5314 				 * If RSS is enabled, the packets whose
5315 				 * masked hash are 0 are queued to the
5316 				 * last RX ring; piggyback the last RX
5317 				 * ring's processing in the first RX
5318 				 * polling handler. (see also: comment
5319 				 * in bce_setup_ring_cnt())
5320 				 */
5321 				if (bootverbose) {
5322 					if_printf(ifp, "npoll pack last "
5323 					    "RX ring on cpu%d\n", idx);
5324 				}
5325 				info->ifpi_rx[idx].poll_func =
5326 				    bce_npoll_rx_pack;
5327 			} else {
5328 				info->ifpi_rx[idx].poll_func = bce_npoll_rx;
5329 			}
5330 			info->ifpi_rx[idx].arg = rxr;
5331 			info->ifpi_rx[idx].serializer = &rxr->rx_serialize;
5332 		}
5333 
5334 		if (ifp->if_flags & IFF_RUNNING) {
5335 			bce_set_timer_cpuid(sc, TRUE);
5336 			bce_disable_intr(sc);
5337 			bce_npoll_coal_change(sc);
5338 		}
5339 	} else {
5340 		for (i = 0; i < sc->tx_ring_cnt; ++i) {
5341 			ifsq_set_cpuid(sc->tx_rings[i].ifsq,
5342 			    sc->bce_msix[i].msix_cpuid);
5343 		}
5344 
5345 		if (ifp->if_flags & IFF_RUNNING) {
5346 			bce_set_timer_cpuid(sc, FALSE);
5347 			bce_enable_intr(sc);
5348 
5349 			sc->bce_coalchg_mask |= BCE_COALMASK_TX_BDS_INT |
5350 			    BCE_COALMASK_RX_BDS_INT;
5351 			bce_coal_change(sc);
5352 		}
5353 	}
5354 }
5355 
5356 #endif	/* IFPOLL_ENABLE */
5357 
5358 /*
5359  * Interrupt handler.
5360  */
5361 /****************************************************************************/
5362 /* Main interrupt entry point.  Verifies that the controller generated the  */
5363 /* interrupt and then calls a separate routine for handle the various       */
5364 /* interrupt causes (PHY, TX, RX).                                          */
5365 /*                                                                          */
5366 /* Returns:                                                                 */
5367 /*   0 for success, positive value for failure.                             */
5368 /****************************************************************************/
5369 static void
5370 bce_intr(struct bce_softc *sc)
5371 {
5372 	struct ifnet *ifp = &sc->arpcom.ac_if;
5373 	struct status_block *sblk;
5374 	uint16_t hw_rx_cons, hw_tx_cons;
5375 	uint32_t status_attn_bits;
5376 	struct bce_tx_ring *txr = &sc->tx_rings[0];
5377 	struct bce_rx_ring *rxr = &sc->rx_rings[0];
5378 
5379 	ASSERT_SERIALIZED(&sc->main_serialize);
5380 
5381 	sblk = sc->status_block;
5382 
5383 	/*
5384 	 * Save the status block index value for use during
5385 	 * the next interrupt.
5386 	 */
5387 	rxr->last_status_idx = *rxr->hw_status_idx;
5388 
5389 	/* Make sure status index is extracted before RX/TX cons */
5390 	cpu_lfence();
5391 
5392 	/* Check if the hardware has finished any work. */
5393 	hw_rx_cons = bce_get_hw_rx_cons(rxr);
5394 	hw_tx_cons = bce_get_hw_tx_cons(txr);
5395 
5396 	status_attn_bits = sblk->status_attn_bits;
5397 
5398 	/* Was it a link change interrupt? */
5399 	if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5400 	    (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) {
5401 		bce_phy_intr(sc);
5402 
5403 		/*
5404 		 * Clear any transient status updates during link state
5405 		 * change.
5406 		 */
5407 		REG_WR(sc, BCE_HC_COMMAND,
5408 		    sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT);
5409 		REG_RD(sc, BCE_HC_COMMAND);
5410 	}
5411 
5412 	/*
5413 	 * If any other attention is asserted then
5414 	 * the chip is toast.
5415 	 */
5416 	if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5417 	    (sblk->status_attn_bits_ack & ~STATUS_ATTN_BITS_LINK_STATE)) {
5418 		if_printf(ifp, "Fatal attention detected: 0x%08X\n",
5419 			  sblk->status_attn_bits);
5420 		bce_serialize_skipmain(sc);
5421 		bce_init(sc);
5422 		bce_deserialize_skipmain(sc);
5423 		return;
5424 	}
5425 
5426 	/* Check for any completed RX frames. */
5427 	lwkt_serialize_enter(&rxr->rx_serialize);
5428 	if (hw_rx_cons != rxr->rx_cons)
5429 		bce_rx_intr(rxr, -1, hw_rx_cons);
5430 	lwkt_serialize_exit(&rxr->rx_serialize);
5431 
5432 	/* Check for any completed TX frames. */
5433 	lwkt_serialize_enter(&txr->tx_serialize);
5434 	if (hw_tx_cons != txr->tx_cons) {
5435 		bce_tx_intr(txr, hw_tx_cons);
5436 		if (!ifsq_is_empty(txr->ifsq))
5437 			ifsq_devstart(txr->ifsq);
5438 	}
5439 	lwkt_serialize_exit(&txr->tx_serialize);
5440 }
5441 
5442 static void
5443 bce_intr_legacy(void *xsc)
5444 {
5445 	struct bce_softc *sc = xsc;
5446 	struct bce_rx_ring *rxr = &sc->rx_rings[0];
5447 	struct status_block *sblk;
5448 
5449 	sblk = sc->status_block;
5450 
5451 	/*
5452 	 * If the hardware status block index matches the last value
5453 	 * read by the driver and we haven't asserted our interrupt
5454 	 * then there's nothing to do.
5455 	 */
5456 	if (sblk->status_idx == rxr->last_status_idx &&
5457 	    (REG_RD(sc, BCE_PCICFG_MISC_STATUS) &
5458 	     BCE_PCICFG_MISC_STATUS_INTA_VALUE))
5459 		return;
5460 
5461 	/* Ack the interrupt and stop others from occuring. */
5462 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5463 	       BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
5464 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5465 
5466 	/*
5467 	 * Read back to deassert IRQ immediately to avoid too
5468 	 * many spurious interrupts.
5469 	 */
5470 	REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
5471 
5472 	bce_intr(sc);
5473 
5474 	/* Re-enable interrupts. */
5475 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5476 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
5477 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT | rxr->last_status_idx);
5478 	bce_reenable_intr(rxr);
5479 }
5480 
5481 static void
5482 bce_intr_msi(void *xsc)
5483 {
5484 	struct bce_softc *sc = xsc;
5485 
5486 	/* Ack the interrupt and stop others from occuring. */
5487 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5488 	       BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
5489 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5490 
5491 	bce_intr(sc);
5492 
5493 	/* Re-enable interrupts */
5494 	bce_reenable_intr(&sc->rx_rings[0]);
5495 }
5496 
5497 static void
5498 bce_intr_msi_oneshot(void *xsc)
5499 {
5500 	struct bce_softc *sc = xsc;
5501 
5502 	bce_intr(sc);
5503 
5504 	/* Re-enable interrupts */
5505 	bce_reenable_intr(&sc->rx_rings[0]);
5506 }
5507 
5508 static void
5509 bce_intr_msix_rxtx(void *xrxr)
5510 {
5511 	struct bce_rx_ring *rxr = xrxr;
5512 	struct bce_tx_ring *txr;
5513 	uint16_t hw_rx_cons, hw_tx_cons;
5514 
5515 	ASSERT_SERIALIZED(&rxr->rx_serialize);
5516 
5517 	KKASSERT(rxr->idx < rxr->sc->tx_ring_cnt);
5518 	txr = &rxr->sc->tx_rings[rxr->idx];
5519 
5520 	/*
5521 	 * Save the status block index value for use during
5522 	 * the next interrupt.
5523 	 */
5524 	rxr->last_status_idx = *rxr->hw_status_idx;
5525 
5526 	/* Make sure status index is extracted before RX/TX cons */
5527 	cpu_lfence();
5528 
5529 	/* Check if the hardware has finished any work. */
5530 	hw_rx_cons = bce_get_hw_rx_cons(rxr);
5531 	if (hw_rx_cons != rxr->rx_cons)
5532 		bce_rx_intr(rxr, -1, hw_rx_cons);
5533 
5534 	/* Check for any completed TX frames. */
5535 	hw_tx_cons = bce_get_hw_tx_cons(txr);
5536 	lwkt_serialize_enter(&txr->tx_serialize);
5537 	if (hw_tx_cons != txr->tx_cons) {
5538 		bce_tx_intr(txr, hw_tx_cons);
5539 		if (!ifsq_is_empty(txr->ifsq))
5540 			ifsq_devstart(txr->ifsq);
5541 	}
5542 	lwkt_serialize_exit(&txr->tx_serialize);
5543 
5544 	/* Re-enable interrupts */
5545 	bce_reenable_intr(rxr);
5546 }
5547 
5548 static void
5549 bce_intr_msix_rx(void *xrxr)
5550 {
5551 	struct bce_rx_ring *rxr = xrxr;
5552 	uint16_t hw_rx_cons;
5553 
5554 	ASSERT_SERIALIZED(&rxr->rx_serialize);
5555 
5556 	/*
5557 	 * Save the status block index value for use during
5558 	 * the next interrupt.
5559 	 */
5560 	rxr->last_status_idx = *rxr->hw_status_idx;
5561 
5562 	/* Make sure status index is extracted before RX cons */
5563 	cpu_lfence();
5564 
5565 	/* Check if the hardware has finished any work. */
5566 	hw_rx_cons = bce_get_hw_rx_cons(rxr);
5567 	if (hw_rx_cons != rxr->rx_cons)
5568 		bce_rx_intr(rxr, -1, hw_rx_cons);
5569 
5570 	/* Re-enable interrupts */
5571 	bce_reenable_intr(rxr);
5572 }
5573 
5574 /****************************************************************************/
5575 /* Programs the various packet receive modes (broadcast and multicast).     */
5576 /*                                                                          */
5577 /* Returns:                                                                 */
5578 /*   Nothing.                                                               */
5579 /****************************************************************************/
5580 static void
5581 bce_set_rx_mode(struct bce_softc *sc)
5582 {
5583 	struct ifnet *ifp = &sc->arpcom.ac_if;
5584 	struct ifmultiaddr *ifma;
5585 	uint32_t hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
5586 	uint32_t rx_mode, sort_mode;
5587 	int h, i;
5588 
5589 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
5590 
5591 	/* Initialize receive mode default settings. */
5592 	rx_mode = sc->rx_mode &
5593 		  ~(BCE_EMAC_RX_MODE_PROMISCUOUS |
5594 		    BCE_EMAC_RX_MODE_KEEP_VLAN_TAG);
5595 	sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN;
5596 
5597 	/*
5598 	 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
5599 	 * be enbled.
5600 	 */
5601 	if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) &&
5602 	    !(sc->bce_flags & BCE_MFW_ENABLE_FLAG))
5603 		rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG;
5604 
5605 	/*
5606 	 * Check for promiscuous, all multicast, or selected
5607 	 * multicast address filtering.
5608 	 */
5609 	if (ifp->if_flags & IFF_PROMISC) {
5610 		/* Enable promiscuous mode. */
5611 		rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS;
5612 		sort_mode |= BCE_RPM_SORT_USER0_PROM_EN;
5613 	} else if (ifp->if_flags & IFF_ALLMULTI) {
5614 		/* Enable all multicast addresses. */
5615 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
5616 			REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4),
5617 			       0xffffffff);
5618 		}
5619 		sort_mode |= BCE_RPM_SORT_USER0_MC_EN;
5620 	} else {
5621 		/* Accept one or more multicast(s). */
5622 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
5623 			if (ifma->ifma_addr->sa_family != AF_LINK)
5624 				continue;
5625 			h = ether_crc32_le(
5626 			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
5627 			    ETHER_ADDR_LEN) & 0xFF;
5628 			hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
5629 		}
5630 
5631 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
5632 			REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4),
5633 			       hashes[i]);
5634 		}
5635 		sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN;
5636 	}
5637 
5638 	/* Only make changes if the recive mode has actually changed. */
5639 	if (rx_mode != sc->rx_mode) {
5640 		sc->rx_mode = rx_mode;
5641 		REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode);
5642 	}
5643 
5644 	/* Disable and clear the exisitng sort before enabling a new sort. */
5645 	REG_WR(sc, BCE_RPM_SORT_USER0, 0x0);
5646 	REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode);
5647 	REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA);
5648 }
5649 
5650 /****************************************************************************/
5651 /* Called periodically to updates statistics from the controllers           */
5652 /* statistics block.                                                        */
5653 /*                                                                          */
5654 /* Returns:                                                                 */
5655 /*   Nothing.                                                               */
5656 /****************************************************************************/
5657 static void
5658 bce_stats_update(struct bce_softc *sc)
5659 {
5660 	struct ifnet *ifp = &sc->arpcom.ac_if;
5661 	struct statistics_block *stats = sc->stats_block;
5662 
5663 	ASSERT_SERIALIZED(&sc->main_serialize);
5664 
5665 	/*
5666 	 * Certain controllers don't report carrier sense errors correctly.
5667 	 * See errata E11_5708CA0_1165.
5668 	 */
5669 	if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
5670 	    !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0)) {
5671 		IFNET_STAT_INC(ifp, oerrors,
5672 			(u_long)stats->stat_Dot3StatsCarrierSenseErrors);
5673 	}
5674 
5675 	/*
5676 	 * Update the sysctl statistics from the hardware statistics.
5677 	 */
5678 	sc->stat_IfHCInOctets =
5679 		((uint64_t)stats->stat_IfHCInOctets_hi << 32) +
5680 		 (uint64_t)stats->stat_IfHCInOctets_lo;
5681 
5682 	sc->stat_IfHCInBadOctets =
5683 		((uint64_t)stats->stat_IfHCInBadOctets_hi << 32) +
5684 		 (uint64_t)stats->stat_IfHCInBadOctets_lo;
5685 
5686 	sc->stat_IfHCOutOctets =
5687 		((uint64_t)stats->stat_IfHCOutOctets_hi << 32) +
5688 		 (uint64_t)stats->stat_IfHCOutOctets_lo;
5689 
5690 	sc->stat_IfHCOutBadOctets =
5691 		((uint64_t)stats->stat_IfHCOutBadOctets_hi << 32) +
5692 		 (uint64_t)stats->stat_IfHCOutBadOctets_lo;
5693 
5694 	sc->stat_IfHCInUcastPkts =
5695 		((uint64_t)stats->stat_IfHCInUcastPkts_hi << 32) +
5696 		 (uint64_t)stats->stat_IfHCInUcastPkts_lo;
5697 
5698 	sc->stat_IfHCInMulticastPkts =
5699 		((uint64_t)stats->stat_IfHCInMulticastPkts_hi << 32) +
5700 		 (uint64_t)stats->stat_IfHCInMulticastPkts_lo;
5701 
5702 	sc->stat_IfHCInBroadcastPkts =
5703 		((uint64_t)stats->stat_IfHCInBroadcastPkts_hi << 32) +
5704 		 (uint64_t)stats->stat_IfHCInBroadcastPkts_lo;
5705 
5706 	sc->stat_IfHCOutUcastPkts =
5707 		((uint64_t)stats->stat_IfHCOutUcastPkts_hi << 32) +
5708 		 (uint64_t)stats->stat_IfHCOutUcastPkts_lo;
5709 
5710 	sc->stat_IfHCOutMulticastPkts =
5711 		((uint64_t)stats->stat_IfHCOutMulticastPkts_hi << 32) +
5712 		 (uint64_t)stats->stat_IfHCOutMulticastPkts_lo;
5713 
5714 	sc->stat_IfHCOutBroadcastPkts =
5715 		((uint64_t)stats->stat_IfHCOutBroadcastPkts_hi << 32) +
5716 		 (uint64_t)stats->stat_IfHCOutBroadcastPkts_lo;
5717 
5718 	sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
5719 		stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
5720 
5721 	sc->stat_Dot3StatsCarrierSenseErrors =
5722 		stats->stat_Dot3StatsCarrierSenseErrors;
5723 
5724 	sc->stat_Dot3StatsFCSErrors =
5725 		stats->stat_Dot3StatsFCSErrors;
5726 
5727 	sc->stat_Dot3StatsAlignmentErrors =
5728 		stats->stat_Dot3StatsAlignmentErrors;
5729 
5730 	sc->stat_Dot3StatsSingleCollisionFrames =
5731 		stats->stat_Dot3StatsSingleCollisionFrames;
5732 
5733 	sc->stat_Dot3StatsMultipleCollisionFrames =
5734 		stats->stat_Dot3StatsMultipleCollisionFrames;
5735 
5736 	sc->stat_Dot3StatsDeferredTransmissions =
5737 		stats->stat_Dot3StatsDeferredTransmissions;
5738 
5739 	sc->stat_Dot3StatsExcessiveCollisions =
5740 		stats->stat_Dot3StatsExcessiveCollisions;
5741 
5742 	sc->stat_Dot3StatsLateCollisions =
5743 		stats->stat_Dot3StatsLateCollisions;
5744 
5745 	sc->stat_EtherStatsCollisions =
5746 		stats->stat_EtherStatsCollisions;
5747 
5748 	sc->stat_EtherStatsFragments =
5749 		stats->stat_EtherStatsFragments;
5750 
5751 	sc->stat_EtherStatsJabbers =
5752 		stats->stat_EtherStatsJabbers;
5753 
5754 	sc->stat_EtherStatsUndersizePkts =
5755 		stats->stat_EtherStatsUndersizePkts;
5756 
5757 	sc->stat_EtherStatsOverrsizePkts =
5758 		stats->stat_EtherStatsOverrsizePkts;
5759 
5760 	sc->stat_EtherStatsPktsRx64Octets =
5761 		stats->stat_EtherStatsPktsRx64Octets;
5762 
5763 	sc->stat_EtherStatsPktsRx65Octetsto127Octets =
5764 		stats->stat_EtherStatsPktsRx65Octetsto127Octets;
5765 
5766 	sc->stat_EtherStatsPktsRx128Octetsto255Octets =
5767 		stats->stat_EtherStatsPktsRx128Octetsto255Octets;
5768 
5769 	sc->stat_EtherStatsPktsRx256Octetsto511Octets =
5770 		stats->stat_EtherStatsPktsRx256Octetsto511Octets;
5771 
5772 	sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
5773 		stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
5774 
5775 	sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
5776 		stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
5777 
5778 	sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
5779 		stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
5780 
5781 	sc->stat_EtherStatsPktsTx64Octets =
5782 		stats->stat_EtherStatsPktsTx64Octets;
5783 
5784 	sc->stat_EtherStatsPktsTx65Octetsto127Octets =
5785 		stats->stat_EtherStatsPktsTx65Octetsto127Octets;
5786 
5787 	sc->stat_EtherStatsPktsTx128Octetsto255Octets =
5788 		stats->stat_EtherStatsPktsTx128Octetsto255Octets;
5789 
5790 	sc->stat_EtherStatsPktsTx256Octetsto511Octets =
5791 		stats->stat_EtherStatsPktsTx256Octetsto511Octets;
5792 
5793 	sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
5794 		stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
5795 
5796 	sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
5797 		stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
5798 
5799 	sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
5800 		stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
5801 
5802 	sc->stat_XonPauseFramesReceived =
5803 		stats->stat_XonPauseFramesReceived;
5804 
5805 	sc->stat_XoffPauseFramesReceived =
5806 		stats->stat_XoffPauseFramesReceived;
5807 
5808 	sc->stat_OutXonSent =
5809 		stats->stat_OutXonSent;
5810 
5811 	sc->stat_OutXoffSent =
5812 		stats->stat_OutXoffSent;
5813 
5814 	sc->stat_FlowControlDone =
5815 		stats->stat_FlowControlDone;
5816 
5817 	sc->stat_MacControlFramesReceived =
5818 		stats->stat_MacControlFramesReceived;
5819 
5820 	sc->stat_XoffStateEntered =
5821 		stats->stat_XoffStateEntered;
5822 
5823 	sc->stat_IfInFramesL2FilterDiscards =
5824 		stats->stat_IfInFramesL2FilterDiscards;
5825 
5826 	sc->stat_IfInRuleCheckerDiscards =
5827 		stats->stat_IfInRuleCheckerDiscards;
5828 
5829 	sc->stat_IfInFTQDiscards =
5830 		stats->stat_IfInFTQDiscards;
5831 
5832 	sc->stat_IfInMBUFDiscards =
5833 		stats->stat_IfInMBUFDiscards;
5834 
5835 	sc->stat_IfInRuleCheckerP4Hit =
5836 		stats->stat_IfInRuleCheckerP4Hit;
5837 
5838 	sc->stat_CatchupInRuleCheckerDiscards =
5839 		stats->stat_CatchupInRuleCheckerDiscards;
5840 
5841 	sc->stat_CatchupInFTQDiscards =
5842 		stats->stat_CatchupInFTQDiscards;
5843 
5844 	sc->stat_CatchupInMBUFDiscards =
5845 		stats->stat_CatchupInMBUFDiscards;
5846 
5847 	sc->stat_CatchupInRuleCheckerP4Hit =
5848 		stats->stat_CatchupInRuleCheckerP4Hit;
5849 
5850 	sc->com_no_buffers = REG_RD_IND(sc, 0x120084);
5851 
5852 	/*
5853 	 * Update the interface statistics from the
5854 	 * hardware statistics.
5855 	 */
5856 	IFNET_STAT_SET(ifp, collisions, (u_long)sc->stat_EtherStatsCollisions);
5857 
5858 	IFNET_STAT_SET(ifp, ierrors, (u_long)sc->stat_EtherStatsUndersizePkts +
5859 	    (u_long)sc->stat_EtherStatsOverrsizePkts +
5860 	    (u_long)sc->stat_IfInMBUFDiscards +
5861 	    (u_long)sc->stat_Dot3StatsAlignmentErrors +
5862 	    (u_long)sc->stat_Dot3StatsFCSErrors +
5863 	    (u_long)sc->stat_IfInRuleCheckerDiscards +
5864 	    (u_long)sc->stat_IfInFTQDiscards +
5865 	    (u_long)sc->com_no_buffers);
5866 
5867 	IFNET_STAT_SET(ifp, oerrors,
5868 	    (u_long)sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
5869 	    (u_long)sc->stat_Dot3StatsExcessiveCollisions +
5870 	    (u_long)sc->stat_Dot3StatsLateCollisions);
5871 }
5872 
5873 /****************************************************************************/
5874 /* Periodic function to notify the bootcode that the driver is still        */
5875 /* present.                                                                 */
5876 /*                                                                          */
5877 /* Returns:                                                                 */
5878 /*   Nothing.                                                               */
5879 /****************************************************************************/
5880 static void
5881 bce_pulse(void *xsc)
5882 {
5883 	struct bce_softc *sc = xsc;
5884 	struct ifnet *ifp = &sc->arpcom.ac_if;
5885 	uint32_t msg;
5886 
5887 	lwkt_serialize_enter(&sc->main_serialize);
5888 
5889 	/* Tell the firmware that the driver is still running. */
5890 	msg = (uint32_t)++sc->bce_fw_drv_pulse_wr_seq;
5891 	bce_shmem_wr(sc, BCE_DRV_PULSE_MB, msg);
5892 
5893 	/* Update the bootcode condition. */
5894 	sc->bc_state = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
5895 
5896 	/* Report whether the bootcode still knows the driver is running. */
5897 	if (!sc->bce_drv_cardiac_arrest) {
5898 		if (!(sc->bc_state & BCE_CONDITION_DRV_PRESENT)) {
5899 			sc->bce_drv_cardiac_arrest = 1;
5900 			if_printf(ifp, "Bootcode lost the driver pulse! "
5901 			    "(bc_state = 0x%08X)\n", sc->bc_state);
5902 		}
5903 	} else {
5904  		/*
5905  		 * Not supported by all bootcode versions.
5906  		 * (v5.0.11+ and v5.2.1+)  Older bootcode
5907  		 * will require the driver to reset the
5908  		 * controller to clear this condition.
5909 		 */
5910 		if (sc->bc_state & BCE_CONDITION_DRV_PRESENT) {
5911 			sc->bce_drv_cardiac_arrest = 0;
5912 			if_printf(ifp, "Bootcode found the driver pulse! "
5913 			    "(bc_state = 0x%08X)\n", sc->bc_state);
5914 		}
5915 	}
5916 
5917 	/* Schedule the next pulse. */
5918 	callout_reset_bycpu(&sc->bce_pulse_callout, hz, bce_pulse, sc,
5919 	    sc->bce_timer_cpuid);
5920 
5921 	lwkt_serialize_exit(&sc->main_serialize);
5922 }
5923 
5924 /****************************************************************************/
5925 /* Periodic function to check whether MSI is lost                           */
5926 /*                                                                          */
5927 /* Returns:                                                                 */
5928 /*   Nothing.                                                               */
5929 /****************************************************************************/
5930 static void
5931 bce_check_msi(void *xsc)
5932 {
5933 	struct bce_softc *sc = xsc;
5934 	struct ifnet *ifp = &sc->arpcom.ac_if;
5935 	struct status_block *sblk = sc->status_block;
5936 	struct bce_tx_ring *txr = &sc->tx_rings[0];
5937 	struct bce_rx_ring *rxr = &sc->rx_rings[0];
5938 
5939 	lwkt_serialize_enter(&sc->main_serialize);
5940 
5941 	KKASSERT(mycpuid == sc->bce_msix[0].msix_cpuid);
5942 
5943 	if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) {
5944 		lwkt_serialize_exit(&sc->main_serialize);
5945 		return;
5946 	}
5947 
5948 	if (bce_get_hw_rx_cons(rxr) != rxr->rx_cons ||
5949 	    bce_get_hw_tx_cons(txr) != txr->tx_cons ||
5950 	    (sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5951 	    (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) {
5952 		if (sc->bce_check_rx_cons == rxr->rx_cons &&
5953 		    sc->bce_check_tx_cons == txr->tx_cons &&
5954 		    sc->bce_check_status_idx == rxr->last_status_idx) {
5955 			uint32_t msi_ctrl;
5956 
5957 			if (!sc->bce_msi_maylose) {
5958 				sc->bce_msi_maylose = TRUE;
5959 				goto done;
5960 			}
5961 
5962 			msi_ctrl = REG_RD(sc, BCE_PCICFG_MSI_CONTROL);
5963 			if (msi_ctrl & BCE_PCICFG_MSI_CONTROL_ENABLE) {
5964 				if (bootverbose)
5965 					if_printf(ifp, "lost MSI\n");
5966 
5967 				REG_WR(sc, BCE_PCICFG_MSI_CONTROL,
5968 				    msi_ctrl & ~BCE_PCICFG_MSI_CONTROL_ENABLE);
5969 				REG_WR(sc, BCE_PCICFG_MSI_CONTROL, msi_ctrl);
5970 
5971 				bce_intr_msi(sc);
5972 			} else if (bootverbose) {
5973 				if_printf(ifp, "MSI may be lost\n");
5974 			}
5975 		}
5976 	}
5977 	sc->bce_msi_maylose = FALSE;
5978 	sc->bce_check_rx_cons = rxr->rx_cons;
5979 	sc->bce_check_tx_cons = txr->tx_cons;
5980 	sc->bce_check_status_idx = rxr->last_status_idx;
5981 
5982 done:
5983 	callout_reset(&sc->bce_ckmsi_callout, BCE_MSI_CKINTVL,
5984 	    bce_check_msi, sc);
5985 	lwkt_serialize_exit(&sc->main_serialize);
5986 }
5987 
5988 /****************************************************************************/
5989 /* Periodic function to perform maintenance tasks.                          */
5990 /*                                                                          */
5991 /* Returns:                                                                 */
5992 /*   Nothing.                                                               */
5993 /****************************************************************************/
5994 static void
5995 bce_tick_serialized(struct bce_softc *sc)
5996 {
5997 	struct mii_data *mii;
5998 
5999 	ASSERT_SERIALIZED(&sc->main_serialize);
6000 
6001 	/* Update the statistics from the hardware statistics block. */
6002 	bce_stats_update(sc);
6003 
6004 	/* Schedule the next tick. */
6005 	callout_reset_bycpu(&sc->bce_tick_callout, hz, bce_tick, sc,
6006 	    sc->bce_timer_cpuid);
6007 
6008 	/* If link is up already up then we're done. */
6009 	if (sc->bce_link)
6010 		return;
6011 
6012 	mii = device_get_softc(sc->bce_miibus);
6013 	mii_tick(mii);
6014 
6015 	/* Check if the link has come up. */
6016 	if ((mii->mii_media_status & IFM_ACTIVE) &&
6017 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
6018 		int i;
6019 
6020 		sc->bce_link++;
6021 		/* Now that link is up, handle any outstanding TX traffic. */
6022 		for (i = 0; i < sc->tx_ring_cnt; ++i)
6023 			ifsq_devstart_sched(sc->tx_rings[i].ifsq);
6024 	}
6025 }
6026 
6027 static void
6028 bce_tick(void *xsc)
6029 {
6030 	struct bce_softc *sc = xsc;
6031 
6032 	lwkt_serialize_enter(&sc->main_serialize);
6033 	bce_tick_serialized(sc);
6034 	lwkt_serialize_exit(&sc->main_serialize);
6035 }
6036 
6037 /****************************************************************************/
6038 /* Adds any sysctl parameters for tuning or debugging purposes.             */
6039 /*                                                                          */
6040 /* Returns:                                                                 */
6041 /*   0 for success, positive value for failure.                             */
6042 /****************************************************************************/
6043 static void
6044 bce_add_sysctls(struct bce_softc *sc)
6045 {
6046 	struct sysctl_ctx_list *ctx;
6047 	struct sysctl_oid_list *children;
6048 #if defined(BCE_TSS_DEBUG) || defined(BCE_RSS_DEBUG)
6049 	char node[32];
6050 	int i;
6051 #endif
6052 
6053 	sysctl_ctx_init(&sc->bce_sysctl_ctx);
6054 	sc->bce_sysctl_tree = SYSCTL_ADD_NODE(&sc->bce_sysctl_ctx,
6055 					      SYSCTL_STATIC_CHILDREN(_hw),
6056 					      OID_AUTO,
6057 					      device_get_nameunit(sc->bce_dev),
6058 					      CTLFLAG_RD, 0, "");
6059 	if (sc->bce_sysctl_tree == NULL) {
6060 		device_printf(sc->bce_dev, "can't add sysctl node\n");
6061 		return;
6062 	}
6063 
6064 	ctx = &sc->bce_sysctl_ctx;
6065 	children = SYSCTL_CHILDREN(sc->bce_sysctl_tree);
6066 
6067 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds_int",
6068 			CTLTYPE_INT | CTLFLAG_RW,
6069 			sc, 0, bce_sysctl_tx_bds_int, "I",
6070 			"Send max coalesced BD count during interrupt");
6071 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds",
6072 			CTLTYPE_INT | CTLFLAG_RW,
6073 			sc, 0, bce_sysctl_tx_bds, "I",
6074 			"Send max coalesced BD count");
6075 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks_int",
6076 			CTLTYPE_INT | CTLFLAG_RW,
6077 			sc, 0, bce_sysctl_tx_ticks_int, "I",
6078 			"Send coalescing ticks during interrupt");
6079 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks",
6080 			CTLTYPE_INT | CTLFLAG_RW,
6081 			sc, 0, bce_sysctl_tx_ticks, "I",
6082 			"Send coalescing ticks");
6083 
6084 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds_int",
6085 			CTLTYPE_INT | CTLFLAG_RW,
6086 			sc, 0, bce_sysctl_rx_bds_int, "I",
6087 			"Receive max coalesced BD count during interrupt");
6088 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds",
6089 			CTLTYPE_INT | CTLFLAG_RW,
6090 			sc, 0, bce_sysctl_rx_bds, "I",
6091 			"Receive max coalesced BD count");
6092 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks_int",
6093 			CTLTYPE_INT | CTLFLAG_RW,
6094 			sc, 0, bce_sysctl_rx_ticks_int, "I",
6095 			"Receive coalescing ticks during interrupt");
6096 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks",
6097 			CTLTYPE_INT | CTLFLAG_RW,
6098 			sc, 0, bce_sysctl_rx_ticks, "I",
6099 			"Receive coalescing ticks");
6100 
6101 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_rings",
6102 		CTLFLAG_RD, &sc->rx_ring_cnt, 0, "# of RX rings");
6103 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_pages",
6104 		CTLFLAG_RD, &sc->rx_rings[0].rx_pages, 0, "# of RX pages");
6105 
6106 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_rings",
6107 		CTLFLAG_RD, &sc->tx_ring_cnt, 0, "# of TX rings");
6108 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_pages",
6109 		CTLFLAG_RD, &sc->tx_rings[0].tx_pages, 0, "# of TX pages");
6110 
6111 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_wreg",
6112 	    	CTLFLAG_RW, &sc->tx_rings[0].tx_wreg, 0,
6113 		"# segments before write to hardware registers");
6114 
6115 #ifdef IFPOLL_ENABLE
6116 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "npoll_offset",
6117 	    CTLTYPE_INT|CTLFLAG_RW, sc, 0, bce_sysctl_npoll_offset,
6118 	    "I", "NPOLLING cpu offset");
6119 #endif
6120 
6121 #ifdef BCE_RSS_DEBUG
6122 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rss_debug",
6123 	    CTLFLAG_RW, &sc->rss_debug, 0, "RSS debug level");
6124 	for (i = 0; i < sc->rx_ring_cnt; ++i) {
6125 		ksnprintf(node, sizeof(node), "rx%d_pkt", i);
6126 		SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, node,
6127 		    CTLFLAG_RW, &sc->rx_rings[i].rx_pkts,
6128 		    "RXed packets");
6129 	}
6130 #endif
6131 
6132 #ifdef BCE_TSS_DEBUG
6133 	for (i = 0; i < sc->tx_ring_cnt; ++i) {
6134 		ksnprintf(node, sizeof(node), "tx%d_pkt", i);
6135 		SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, node,
6136 		    CTLFLAG_RW, &sc->tx_rings[i].tx_pkts,
6137 		    "TXed packets");
6138 	}
6139 #endif
6140 
6141 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6142 		"stat_IfHCInOctets",
6143 		CTLFLAG_RD, &sc->stat_IfHCInOctets,
6144 		"Bytes received");
6145 
6146 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6147 		"stat_IfHCInBadOctets",
6148 		CTLFLAG_RD, &sc->stat_IfHCInBadOctets,
6149 		"Bad bytes received");
6150 
6151 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6152 		"stat_IfHCOutOctets",
6153 		CTLFLAG_RD, &sc->stat_IfHCOutOctets,
6154 		"Bytes sent");
6155 
6156 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6157 		"stat_IfHCOutBadOctets",
6158 		CTLFLAG_RD, &sc->stat_IfHCOutBadOctets,
6159 		"Bad bytes sent");
6160 
6161 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6162 		"stat_IfHCInUcastPkts",
6163 		CTLFLAG_RD, &sc->stat_IfHCInUcastPkts,
6164 		"Unicast packets received");
6165 
6166 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6167 		"stat_IfHCInMulticastPkts",
6168 		CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts,
6169 		"Multicast packets received");
6170 
6171 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6172 		"stat_IfHCInBroadcastPkts",
6173 		CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts,
6174 		"Broadcast packets received");
6175 
6176 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6177 		"stat_IfHCOutUcastPkts",
6178 		CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts,
6179 		"Unicast packets sent");
6180 
6181 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6182 		"stat_IfHCOutMulticastPkts",
6183 		CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts,
6184 		"Multicast packets sent");
6185 
6186 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6187 		"stat_IfHCOutBroadcastPkts",
6188 		CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts,
6189 		"Broadcast packets sent");
6190 
6191 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6192 		"stat_emac_tx_stat_dot3statsinternalmactransmiterrors",
6193 		CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors,
6194 		0, "Internal MAC transmit errors");
6195 
6196 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6197 		"stat_Dot3StatsCarrierSenseErrors",
6198 		CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors,
6199 		0, "Carrier sense errors");
6200 
6201 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6202 		"stat_Dot3StatsFCSErrors",
6203 		CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors,
6204 		0, "Frame check sequence errors");
6205 
6206 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6207 		"stat_Dot3StatsAlignmentErrors",
6208 		CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors,
6209 		0, "Alignment errors");
6210 
6211 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6212 		"stat_Dot3StatsSingleCollisionFrames",
6213 		CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames,
6214 		0, "Single Collision Frames");
6215 
6216 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6217 		"stat_Dot3StatsMultipleCollisionFrames",
6218 		CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames,
6219 		0, "Multiple Collision Frames");
6220 
6221 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6222 		"stat_Dot3StatsDeferredTransmissions",
6223 		CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions,
6224 		0, "Deferred Transmissions");
6225 
6226 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6227 		"stat_Dot3StatsExcessiveCollisions",
6228 		CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions,
6229 		0, "Excessive Collisions");
6230 
6231 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6232 		"stat_Dot3StatsLateCollisions",
6233 		CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions,
6234 		0, "Late Collisions");
6235 
6236 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6237 		"stat_EtherStatsCollisions",
6238 		CTLFLAG_RD, &sc->stat_EtherStatsCollisions,
6239 		0, "Collisions");
6240 
6241 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6242 		"stat_EtherStatsFragments",
6243 		CTLFLAG_RD, &sc->stat_EtherStatsFragments,
6244 		0, "Fragments");
6245 
6246 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6247 		"stat_EtherStatsJabbers",
6248 		CTLFLAG_RD, &sc->stat_EtherStatsJabbers,
6249 		0, "Jabbers");
6250 
6251 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6252 		"stat_EtherStatsUndersizePkts",
6253 		CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts,
6254 		0, "Undersize packets");
6255 
6256 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6257 		"stat_EtherStatsOverrsizePkts",
6258 		CTLFLAG_RD, &sc->stat_EtherStatsOverrsizePkts,
6259 		0, "stat_EtherStatsOverrsizePkts");
6260 
6261 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6262 		"stat_EtherStatsPktsRx64Octets",
6263 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets,
6264 		0, "Bytes received in 64 byte packets");
6265 
6266 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6267 		"stat_EtherStatsPktsRx65Octetsto127Octets",
6268 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets,
6269 		0, "Bytes received in 65 to 127 byte packets");
6270 
6271 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6272 		"stat_EtherStatsPktsRx128Octetsto255Octets",
6273 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets,
6274 		0, "Bytes received in 128 to 255 byte packets");
6275 
6276 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6277 		"stat_EtherStatsPktsRx256Octetsto511Octets",
6278 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets,
6279 		0, "Bytes received in 256 to 511 byte packets");
6280 
6281 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6282 		"stat_EtherStatsPktsRx512Octetsto1023Octets",
6283 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets,
6284 		0, "Bytes received in 512 to 1023 byte packets");
6285 
6286 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6287 		"stat_EtherStatsPktsRx1024Octetsto1522Octets",
6288 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets,
6289 		0, "Bytes received in 1024 t0 1522 byte packets");
6290 
6291 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6292 		"stat_EtherStatsPktsRx1523Octetsto9022Octets",
6293 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets,
6294 		0, "Bytes received in 1523 to 9022 byte packets");
6295 
6296 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6297 		"stat_EtherStatsPktsTx64Octets",
6298 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets,
6299 		0, "Bytes sent in 64 byte packets");
6300 
6301 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6302 		"stat_EtherStatsPktsTx65Octetsto127Octets",
6303 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets,
6304 		0, "Bytes sent in 65 to 127 byte packets");
6305 
6306 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6307 		"stat_EtherStatsPktsTx128Octetsto255Octets",
6308 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets,
6309 		0, "Bytes sent in 128 to 255 byte packets");
6310 
6311 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6312 		"stat_EtherStatsPktsTx256Octetsto511Octets",
6313 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets,
6314 		0, "Bytes sent in 256 to 511 byte packets");
6315 
6316 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6317 		"stat_EtherStatsPktsTx512Octetsto1023Octets",
6318 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets,
6319 		0, "Bytes sent in 512 to 1023 byte packets");
6320 
6321 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6322 		"stat_EtherStatsPktsTx1024Octetsto1522Octets",
6323 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets,
6324 		0, "Bytes sent in 1024 to 1522 byte packets");
6325 
6326 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6327 		"stat_EtherStatsPktsTx1523Octetsto9022Octets",
6328 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets,
6329 		0, "Bytes sent in 1523 to 9022 byte packets");
6330 
6331 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6332 		"stat_XonPauseFramesReceived",
6333 		CTLFLAG_RD, &sc->stat_XonPauseFramesReceived,
6334 		0, "XON pause frames receved");
6335 
6336 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6337 		"stat_XoffPauseFramesReceived",
6338 		CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived,
6339 		0, "XOFF pause frames received");
6340 
6341 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6342 		"stat_OutXonSent",
6343 		CTLFLAG_RD, &sc->stat_OutXonSent,
6344 		0, "XON pause frames sent");
6345 
6346 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6347 		"stat_OutXoffSent",
6348 		CTLFLAG_RD, &sc->stat_OutXoffSent,
6349 		0, "XOFF pause frames sent");
6350 
6351 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6352 		"stat_FlowControlDone",
6353 		CTLFLAG_RD, &sc->stat_FlowControlDone,
6354 		0, "Flow control done");
6355 
6356 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6357 		"stat_MacControlFramesReceived",
6358 		CTLFLAG_RD, &sc->stat_MacControlFramesReceived,
6359 		0, "MAC control frames received");
6360 
6361 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6362 		"stat_XoffStateEntered",
6363 		CTLFLAG_RD, &sc->stat_XoffStateEntered,
6364 		0, "XOFF state entered");
6365 
6366 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6367 		"stat_IfInFramesL2FilterDiscards",
6368 		CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards,
6369 		0, "Received L2 packets discarded");
6370 
6371 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6372 		"stat_IfInRuleCheckerDiscards",
6373 		CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards,
6374 		0, "Received packets discarded by rule");
6375 
6376 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6377 		"stat_IfInFTQDiscards",
6378 		CTLFLAG_RD, &sc->stat_IfInFTQDiscards,
6379 		0, "Received packet FTQ discards");
6380 
6381 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6382 		"stat_IfInMBUFDiscards",
6383 		CTLFLAG_RD, &sc->stat_IfInMBUFDiscards,
6384 		0, "Received packets discarded due to lack of controller buffer memory");
6385 
6386 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6387 		"stat_IfInRuleCheckerP4Hit",
6388 		CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit,
6389 		0, "Received packets rule checker hits");
6390 
6391 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6392 		"stat_CatchupInRuleCheckerDiscards",
6393 		CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards,
6394 		0, "Received packets discarded in Catchup path");
6395 
6396 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6397 		"stat_CatchupInFTQDiscards",
6398 		CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards,
6399 		0, "Received packets discarded in FTQ in Catchup path");
6400 
6401 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6402 		"stat_CatchupInMBUFDiscards",
6403 		CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards,
6404 		0, "Received packets discarded in controller buffer memory in Catchup path");
6405 
6406 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6407 		"stat_CatchupInRuleCheckerP4Hit",
6408 		CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit,
6409 		0, "Received packets rule checker hits in Catchup path");
6410 
6411 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6412 		"com_no_buffers",
6413 		CTLFLAG_RD, &sc->com_no_buffers,
6414 		0, "Valid packets received but no RX buffers available");
6415 }
6416 
6417 static int
6418 bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS)
6419 {
6420 	struct bce_softc *sc = arg1;
6421 
6422 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6423 			&sc->bce_tx_quick_cons_trip_int,
6424 			BCE_COALMASK_TX_BDS_INT);
6425 }
6426 
6427 static int
6428 bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS)
6429 {
6430 	struct bce_softc *sc = arg1;
6431 
6432 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6433 			&sc->bce_tx_quick_cons_trip,
6434 			BCE_COALMASK_TX_BDS);
6435 }
6436 
6437 static int
6438 bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS)
6439 {
6440 	struct bce_softc *sc = arg1;
6441 
6442 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6443 			&sc->bce_tx_ticks_int,
6444 			BCE_COALMASK_TX_TICKS_INT);
6445 }
6446 
6447 static int
6448 bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS)
6449 {
6450 	struct bce_softc *sc = arg1;
6451 
6452 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6453 			&sc->bce_tx_ticks,
6454 			BCE_COALMASK_TX_TICKS);
6455 }
6456 
6457 static int
6458 bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS)
6459 {
6460 	struct bce_softc *sc = arg1;
6461 
6462 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6463 			&sc->bce_rx_quick_cons_trip_int,
6464 			BCE_COALMASK_RX_BDS_INT);
6465 }
6466 
6467 static int
6468 bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS)
6469 {
6470 	struct bce_softc *sc = arg1;
6471 
6472 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6473 			&sc->bce_rx_quick_cons_trip,
6474 			BCE_COALMASK_RX_BDS);
6475 }
6476 
6477 static int
6478 bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS)
6479 {
6480 	struct bce_softc *sc = arg1;
6481 
6482 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6483 			&sc->bce_rx_ticks_int,
6484 			BCE_COALMASK_RX_TICKS_INT);
6485 }
6486 
6487 static int
6488 bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS)
6489 {
6490 	struct bce_softc *sc = arg1;
6491 
6492 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6493 			&sc->bce_rx_ticks,
6494 			BCE_COALMASK_RX_TICKS);
6495 }
6496 
6497 static int
6498 bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS, uint32_t *coal,
6499     uint32_t coalchg_mask)
6500 {
6501 	struct bce_softc *sc = arg1;
6502 	struct ifnet *ifp = &sc->arpcom.ac_if;
6503 	int error = 0, v;
6504 
6505 	ifnet_serialize_all(ifp);
6506 
6507 	v = *coal;
6508 	error = sysctl_handle_int(oidp, &v, 0, req);
6509 	if (!error && req->newptr != NULL) {
6510 		if (v < 0) {
6511 			error = EINVAL;
6512 		} else {
6513 			*coal = v;
6514 			sc->bce_coalchg_mask |= coalchg_mask;
6515 
6516 			/* Commit changes */
6517 			bce_coal_change(sc);
6518 		}
6519 	}
6520 
6521 	ifnet_deserialize_all(ifp);
6522 	return error;
6523 }
6524 
6525 static void
6526 bce_coal_change(struct bce_softc *sc)
6527 {
6528 	struct ifnet *ifp = &sc->arpcom.ac_if;
6529 	int i;
6530 
6531 	ASSERT_SERIALIZED(&sc->main_serialize);
6532 
6533 	if ((ifp->if_flags & IFF_RUNNING) == 0) {
6534 		sc->bce_coalchg_mask = 0;
6535 		return;
6536 	}
6537 
6538 	if (sc->bce_coalchg_mask &
6539 	    (BCE_COALMASK_TX_BDS | BCE_COALMASK_TX_BDS_INT)) {
6540 		REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
6541 		       (sc->bce_tx_quick_cons_trip_int << 16) |
6542 		       sc->bce_tx_quick_cons_trip);
6543 		for (i = 1; i < sc->rx_ring_cnt; ++i) {
6544 			uint32_t base;
6545 
6546 			base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) +
6547 			    BCE_HC_SB_CONFIG_1;
6548 			REG_WR(sc, base + BCE_HC_TX_QUICK_CONS_TRIP_OFF,
6549 			    (sc->bce_tx_quick_cons_trip_int << 16) |
6550 			    sc->bce_tx_quick_cons_trip);
6551 		}
6552 		if (bootverbose) {
6553 			if_printf(ifp, "tx_bds %u, tx_bds_int %u\n",
6554 				  sc->bce_tx_quick_cons_trip,
6555 				  sc->bce_tx_quick_cons_trip_int);
6556 		}
6557 	}
6558 
6559 	if (sc->bce_coalchg_mask &
6560 	    (BCE_COALMASK_TX_TICKS | BCE_COALMASK_TX_TICKS_INT)) {
6561 		REG_WR(sc, BCE_HC_TX_TICKS,
6562 		       (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
6563 		for (i = 1; i < sc->rx_ring_cnt; ++i) {
6564 			uint32_t base;
6565 
6566 			base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) +
6567 			    BCE_HC_SB_CONFIG_1;
6568 			REG_WR(sc, base + BCE_HC_TX_TICKS_OFF,
6569 			    (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
6570 		}
6571 		if (bootverbose) {
6572 			if_printf(ifp, "tx_ticks %u, tx_ticks_int %u\n",
6573 				  sc->bce_tx_ticks, sc->bce_tx_ticks_int);
6574 		}
6575 	}
6576 
6577 	if (sc->bce_coalchg_mask &
6578 	    (BCE_COALMASK_RX_BDS | BCE_COALMASK_RX_BDS_INT)) {
6579 		REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
6580 		       (sc->bce_rx_quick_cons_trip_int << 16) |
6581 		       sc->bce_rx_quick_cons_trip);
6582 		for (i = 1; i < sc->rx_ring_cnt; ++i) {
6583 			uint32_t base;
6584 
6585 			base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) +
6586 			    BCE_HC_SB_CONFIG_1;
6587 			REG_WR(sc, base + BCE_HC_RX_QUICK_CONS_TRIP_OFF,
6588 			    (sc->bce_rx_quick_cons_trip_int << 16) |
6589 			    sc->bce_rx_quick_cons_trip);
6590 		}
6591 		if (bootverbose) {
6592 			if_printf(ifp, "rx_bds %u, rx_bds_int %u\n",
6593 				  sc->bce_rx_quick_cons_trip,
6594 				  sc->bce_rx_quick_cons_trip_int);
6595 		}
6596 	}
6597 
6598 	if (sc->bce_coalchg_mask &
6599 	    (BCE_COALMASK_RX_TICKS | BCE_COALMASK_RX_TICKS_INT)) {
6600 		REG_WR(sc, BCE_HC_RX_TICKS,
6601 		       (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
6602 		for (i = 1; i < sc->rx_ring_cnt; ++i) {
6603 			uint32_t base;
6604 
6605 			base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) +
6606 			    BCE_HC_SB_CONFIG_1;
6607 			REG_WR(sc, base + BCE_HC_RX_TICKS_OFF,
6608 			    (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
6609 		}
6610 		if (bootverbose) {
6611 			if_printf(ifp, "rx_ticks %u, rx_ticks_int %u\n",
6612 				  sc->bce_rx_ticks, sc->bce_rx_ticks_int);
6613 		}
6614 	}
6615 
6616 	sc->bce_coalchg_mask = 0;
6617 }
6618 
6619 static int
6620 bce_tso_setup(struct bce_tx_ring *txr, struct mbuf **mp,
6621     uint16_t *flags0, uint16_t *mss0)
6622 {
6623 	struct mbuf *m;
6624 	uint16_t flags;
6625 	int thoff, iphlen, hoff;
6626 
6627 	m = *mp;
6628 	KASSERT(M_WRITABLE(m), ("TSO mbuf not writable"));
6629 
6630 	hoff = m->m_pkthdr.csum_lhlen;
6631 	iphlen = m->m_pkthdr.csum_iphlen;
6632 	thoff = m->m_pkthdr.csum_thlen;
6633 
6634 	KASSERT(hoff >= sizeof(struct ether_header),
6635 	    ("invalid ether header len %d", hoff));
6636 	KASSERT(iphlen >= sizeof(struct ip),
6637 	    ("invalid ip header len %d", iphlen));
6638 	KASSERT(thoff >= sizeof(struct tcphdr),
6639 	    ("invalid tcp header len %d", thoff));
6640 
6641 	if (__predict_false(m->m_len < hoff + iphlen + thoff)) {
6642 		m = m_pullup(m, hoff + iphlen + thoff);
6643 		if (m == NULL) {
6644 			*mp = NULL;
6645 			return ENOBUFS;
6646 		}
6647 		*mp = m;
6648 	}
6649 
6650 	/* Set the LSO flag in the TX BD */
6651 	flags = TX_BD_FLAGS_SW_LSO;
6652 
6653 	/* Set the length of IP + TCP options (in 32 bit words) */
6654 	flags |= (((iphlen + thoff -
6655 	    sizeof(struct ip) - sizeof(struct tcphdr)) >> 2) << 8);
6656 
6657 	*mss0 = htole16(m->m_pkthdr.tso_segsz);
6658 	*flags0 = flags;
6659 
6660 	return 0;
6661 }
6662 
6663 static void
6664 bce_setup_serialize(struct bce_softc *sc)
6665 {
6666 	int i, j;
6667 
6668 	/*
6669 	 * Allocate serializer array
6670 	 */
6671 
6672 	/* Main + TX + RX */
6673 	sc->serialize_cnt = 1 + sc->tx_ring_cnt + sc->rx_ring_cnt;
6674 
6675 	sc->serializes =
6676 	    kmalloc(sc->serialize_cnt * sizeof(struct lwkt_serialize *),
6677 	        M_DEVBUF, M_WAITOK | M_ZERO);
6678 
6679 	/*
6680 	 * Setup serializers
6681 	 *
6682 	 * NOTE: Order is critical
6683 	 */
6684 
6685 	i = 0;
6686 
6687 	KKASSERT(i < sc->serialize_cnt);
6688 	sc->serializes[i++] = &sc->main_serialize;
6689 
6690 	for (j = 0; j < sc->rx_ring_cnt; ++j) {
6691 		KKASSERT(i < sc->serialize_cnt);
6692 		sc->serializes[i++] = &sc->rx_rings[j].rx_serialize;
6693 	}
6694 
6695 	for (j = 0; j < sc->tx_ring_cnt; ++j) {
6696 		KKASSERT(i < sc->serialize_cnt);
6697 		sc->serializes[i++] = &sc->tx_rings[j].tx_serialize;
6698 	}
6699 
6700 	KKASSERT(i == sc->serialize_cnt);
6701 }
6702 
6703 static void
6704 bce_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
6705 {
6706 	struct bce_softc *sc = ifp->if_softc;
6707 
6708 	ifnet_serialize_array_enter(sc->serializes, sc->serialize_cnt, slz);
6709 }
6710 
6711 static void
6712 bce_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
6713 {
6714 	struct bce_softc *sc = ifp->if_softc;
6715 
6716 	ifnet_serialize_array_exit(sc->serializes, sc->serialize_cnt, slz);
6717 }
6718 
6719 static int
6720 bce_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
6721 {
6722 	struct bce_softc *sc = ifp->if_softc;
6723 
6724 	return ifnet_serialize_array_try(sc->serializes, sc->serialize_cnt,
6725 	    slz);
6726 }
6727 
6728 #ifdef INVARIANTS
6729 
6730 static void
6731 bce_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
6732     boolean_t serialized)
6733 {
6734 	struct bce_softc *sc = ifp->if_softc;
6735 
6736 	ifnet_serialize_array_assert(sc->serializes, sc->serialize_cnt,
6737 	    slz, serialized);
6738 }
6739 
6740 #endif	/* INVARIANTS */
6741 
6742 static void
6743 bce_serialize_skipmain(struct bce_softc *sc)
6744 {
6745 	lwkt_serialize_array_enter(sc->serializes, sc->serialize_cnt, 1);
6746 }
6747 
6748 static void
6749 bce_deserialize_skipmain(struct bce_softc *sc)
6750 {
6751 	lwkt_serialize_array_exit(sc->serializes, sc->serialize_cnt, 1);
6752 }
6753 
6754 #ifdef IFPOLL_ENABLE
6755 
6756 static int
6757 bce_sysctl_npoll_offset(SYSCTL_HANDLER_ARGS)
6758 {
6759 	struct bce_softc *sc = (void *)arg1;
6760 	struct ifnet *ifp = &sc->arpcom.ac_if;
6761 	int error, off;
6762 
6763 	off = sc->npoll_ofs;
6764 	error = sysctl_handle_int(oidp, &off, 0, req);
6765 	if (error || req->newptr == NULL)
6766 		return error;
6767 	if (off < 0)
6768 		return EINVAL;
6769 
6770 	ifnet_serialize_all(ifp);
6771 	if (off >= ncpus2 || off % sc->rx_ring_cnt2 != 0) {
6772 		error = EINVAL;
6773 	} else {
6774 		error = 0;
6775 		sc->npoll_ofs = off;
6776 	}
6777 	ifnet_deserialize_all(ifp);
6778 
6779 	return error;
6780 }
6781 
6782 #endif	/* IFPOLL_ENABLE */
6783 
6784 static void
6785 bce_set_timer_cpuid(struct bce_softc *sc, boolean_t polling)
6786 {
6787 	if (polling)
6788 		sc->bce_timer_cpuid = 0; /* XXX */
6789 	else
6790 		sc->bce_timer_cpuid = sc->bce_msix[0].msix_cpuid;
6791 }
6792 
6793 static int
6794 bce_alloc_intr(struct bce_softc *sc)
6795 {
6796 	u_int irq_flags;
6797 
6798 	bce_try_alloc_msix(sc);
6799 	if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX)
6800 		return 0;
6801 
6802 	sc->bce_irq_type = pci_alloc_1intr(sc->bce_dev, bce_msi_enable,
6803 	    &sc->bce_irq_rid, &irq_flags);
6804 
6805 	sc->bce_res_irq = bus_alloc_resource_any(sc->bce_dev, SYS_RES_IRQ,
6806 	    &sc->bce_irq_rid, irq_flags);
6807 	if (sc->bce_res_irq == NULL) {
6808 		device_printf(sc->bce_dev, "PCI map interrupt failed\n");
6809 		return ENXIO;
6810 	}
6811 	sc->bce_msix[0].msix_cpuid = rman_get_cpuid(sc->bce_res_irq);
6812 	sc->bce_msix[0].msix_serialize = &sc->main_serialize;
6813 
6814 	return 0;
6815 }
6816 
6817 static void
6818 bce_try_alloc_msix(struct bce_softc *sc)
6819 {
6820 	struct bce_msix_data *msix;
6821 	int offset, i, error;
6822 	boolean_t setup = FALSE;
6823 
6824 	if (sc->rx_ring_cnt == 1)
6825 		return;
6826 
6827 	if (sc->rx_ring_cnt2 == ncpus2) {
6828 		offset = 0;
6829 	} else {
6830 		int offset_def =
6831 		    (sc->rx_ring_cnt2 * device_get_unit(sc->bce_dev)) % ncpus2;
6832 
6833 		offset = device_getenv_int(sc->bce_dev,
6834 		    "msix.offset", offset_def);
6835 		if (offset >= ncpus2 || offset % sc->rx_ring_cnt2 != 0) {
6836 			device_printf(sc->bce_dev,
6837 			    "invalid msix.offset %d, use %d\n",
6838 			    offset, offset_def);
6839 			offset = offset_def;
6840 		}
6841 	}
6842 
6843 	msix = &sc->bce_msix[0];
6844 	msix->msix_serialize = &sc->main_serialize;
6845 	msix->msix_func = bce_intr_msi_oneshot;
6846 	msix->msix_arg = sc;
6847 	KKASSERT(offset < ncpus2);
6848 	msix->msix_cpuid = offset;
6849 	ksnprintf(msix->msix_desc, sizeof(msix->msix_desc), "%s combo",
6850 	    device_get_nameunit(sc->bce_dev));
6851 
6852 	for (i = 1; i < sc->rx_ring_cnt; ++i) {
6853 		struct bce_rx_ring *rxr = &sc->rx_rings[i];
6854 
6855 		msix = &sc->bce_msix[i];
6856 
6857 		msix->msix_serialize = &rxr->rx_serialize;
6858 		msix->msix_arg = rxr;
6859 		msix->msix_cpuid = offset + (i % sc->rx_ring_cnt2);
6860 		KKASSERT(msix->msix_cpuid < ncpus2);
6861 
6862 		if (i < sc->tx_ring_cnt) {
6863 			msix->msix_func = bce_intr_msix_rxtx;
6864 			ksnprintf(msix->msix_desc, sizeof(msix->msix_desc),
6865 			    "%s rxtx%d", device_get_nameunit(sc->bce_dev), i);
6866 		} else {
6867 			msix->msix_func = bce_intr_msix_rx;
6868 			ksnprintf(msix->msix_desc, sizeof(msix->msix_desc),
6869 			    "%s rx%d", device_get_nameunit(sc->bce_dev), i);
6870 		}
6871 	}
6872 
6873 	/*
6874 	 * Setup MSI-X table
6875 	 */
6876 	bce_setup_msix_table(sc);
6877 	REG_WR(sc, BCE_PCI_MSIX_CONTROL, BCE_MSIX_MAX - 1);
6878 	REG_WR(sc, BCE_PCI_MSIX_TBL_OFF_BIR, BCE_PCI_GRC_WINDOW2_BASE);
6879 	REG_WR(sc, BCE_PCI_MSIX_PBA_OFF_BIT, BCE_PCI_GRC_WINDOW3_BASE);
6880 	/* Flush */
6881 	REG_RD(sc, BCE_PCI_MSIX_CONTROL);
6882 
6883 	error = pci_setup_msix(sc->bce_dev);
6884 	if (error) {
6885 		device_printf(sc->bce_dev, "Setup MSI-X failed\n");
6886 		goto back;
6887 	}
6888 	setup = TRUE;
6889 
6890 	for (i = 0; i < sc->rx_ring_cnt; ++i) {
6891 		msix = &sc->bce_msix[i];
6892 
6893 		error = pci_alloc_msix_vector(sc->bce_dev, i, &msix->msix_rid,
6894 		    msix->msix_cpuid);
6895 		if (error) {
6896 			device_printf(sc->bce_dev,
6897 			    "Unable to allocate MSI-X %d on cpu%d\n",
6898 			    i, msix->msix_cpuid);
6899 			goto back;
6900 		}
6901 
6902 		msix->msix_res = bus_alloc_resource_any(sc->bce_dev,
6903 		    SYS_RES_IRQ, &msix->msix_rid, RF_ACTIVE);
6904 		if (msix->msix_res == NULL) {
6905 			device_printf(sc->bce_dev,
6906 			    "Unable to allocate MSI-X %d resource\n", i);
6907 			error = ENOMEM;
6908 			goto back;
6909 		}
6910 	}
6911 
6912 	pci_enable_msix(sc->bce_dev);
6913 	sc->bce_irq_type = PCI_INTR_TYPE_MSIX;
6914 back:
6915 	if (error)
6916 		bce_free_msix(sc, setup);
6917 }
6918 
6919 static void
6920 bce_setup_ring_cnt(struct bce_softc *sc)
6921 {
6922 	int msix_enable, ring_max, msix_cnt2, msix_cnt, i;
6923 
6924 	sc->rx_ring_cnt = 1;
6925 	sc->rx_ring_cnt2 = 1;
6926 	sc->tx_ring_cnt = 1;
6927 
6928 	if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5709 &&
6929 	    BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5716)
6930 		return;
6931 
6932 	msix_enable = device_getenv_int(sc->bce_dev, "msix.enable",
6933 	    bce_msix_enable);
6934 	if (!msix_enable)
6935 		return;
6936 
6937 	if (ncpus2 == 1)
6938 		return;
6939 
6940 	msix_cnt = pci_msix_count(sc->bce_dev);
6941 	if (msix_cnt <= 1)
6942 		return;
6943 
6944 	i = 0;
6945 	while ((1 << (i + 1)) <= msix_cnt)
6946 		++i;
6947 	msix_cnt2 = 1 << i;
6948 
6949 	/*
6950 	 * One extra RX ring will be needed (see below), so make sure
6951 	 * that there are enough MSI-X vectors.
6952 	 */
6953 	if (msix_cnt == msix_cnt2) {
6954 		/*
6955 		 * XXX
6956 		 * This probably will not happen; 5709/5716
6957 		 * come with 9 MSI-X vectors.
6958 		 */
6959 		msix_cnt2 >>= 1;
6960 		if (msix_cnt2 <= 1) {
6961 			device_printf(sc->bce_dev,
6962 			    "MSI-X count %d could not be used\n", msix_cnt);
6963 			return;
6964 		}
6965 		device_printf(sc->bce_dev, "MSI-X count %d is power of 2\n",
6966 		    msix_cnt);
6967 	}
6968 
6969 	/*
6970 	 * Setup RX ring count
6971 	 */
6972 	ring_max = BCE_RX_RING_MAX;
6973 	if (ring_max > msix_cnt2)
6974 		ring_max = msix_cnt2;
6975 	sc->rx_ring_cnt2 = device_getenv_int(sc->bce_dev, "rx_rings",
6976 	    bce_rx_rings);
6977 	sc->rx_ring_cnt2 = if_ring_count2(sc->rx_ring_cnt2, ring_max);
6978 
6979 	/*
6980 	 * Don't use MSI-X, if the effective RX ring count is 1.
6981 	 * Since if the effective RX ring count is 1, the TX ring
6982 	 * count will be 1.  This RX ring and the TX ring must be
6983 	 * bundled into one MSI-X vector, so the hot path will be
6984 	 * exact same as using MSI.  Besides, the first RX ring
6985 	 * must be fully populated, which only accepts packets whose
6986 	 * RSS hash can't calculated, e.g. ARP packets; waste of
6987 	 * resource at least.
6988 	 */
6989 	if (sc->rx_ring_cnt2 == 1)
6990 		return;
6991 
6992 	/*
6993 	 * One extra RX ring is allocated, since the first RX ring
6994 	 * could not be used for RSS hashed packets whose masked
6995 	 * hash is 0.  The first RX ring is only used for packets
6996 	 * whose RSS hash could not be calculated, e.g. ARP packets.
6997 	 * This extra RX ring will be used for packets whose masked
6998 	 * hash is 0.  The effective RX ring count involved in RSS
6999 	 * is still sc->rx_ring_cnt2.
7000 	 */
7001 	KKASSERT(sc->rx_ring_cnt2 + 1 <= msix_cnt);
7002 	sc->rx_ring_cnt = sc->rx_ring_cnt2 + 1;
7003 
7004 	/*
7005 	 * Setup TX ring count
7006 	 *
7007 	 * NOTE:
7008 	 * TX ring count must be less than the effective RSS RX ring
7009 	 * count, since we use RX ring software data struct to save
7010 	 * status index and various other MSI-X related stuffs.
7011 	 */
7012 	ring_max = BCE_TX_RING_MAX;
7013 	if (ring_max > msix_cnt2)
7014 		ring_max = msix_cnt2;
7015 	if (ring_max > sc->rx_ring_cnt2)
7016 		ring_max = sc->rx_ring_cnt2;
7017 	sc->tx_ring_cnt = device_getenv_int(sc->bce_dev, "tx_rings",
7018 	    bce_tx_rings);
7019 	sc->tx_ring_cnt = if_ring_count2(sc->tx_ring_cnt, ring_max);
7020 }
7021 
7022 static void
7023 bce_free_msix(struct bce_softc *sc, boolean_t setup)
7024 {
7025 	int i;
7026 
7027 	KKASSERT(sc->rx_ring_cnt > 1);
7028 
7029 	for (i = 0; i < sc->rx_ring_cnt; ++i) {
7030 		struct bce_msix_data *msix = &sc->bce_msix[i];
7031 
7032 		if (msix->msix_res != NULL) {
7033 			bus_release_resource(sc->bce_dev, SYS_RES_IRQ,
7034 			    msix->msix_rid, msix->msix_res);
7035 		}
7036 		if (msix->msix_rid >= 0)
7037 			pci_release_msix_vector(sc->bce_dev, msix->msix_rid);
7038 	}
7039 	if (setup)
7040 		pci_teardown_msix(sc->bce_dev);
7041 }
7042 
7043 static void
7044 bce_free_intr(struct bce_softc *sc)
7045 {
7046 	if (sc->bce_irq_type != PCI_INTR_TYPE_MSIX) {
7047 		if (sc->bce_res_irq != NULL) {
7048 			bus_release_resource(sc->bce_dev, SYS_RES_IRQ,
7049 			    sc->bce_irq_rid, sc->bce_res_irq);
7050 		}
7051 		if (sc->bce_irq_type == PCI_INTR_TYPE_MSI)
7052 			pci_release_msi(sc->bce_dev);
7053 	} else {
7054 		bce_free_msix(sc, TRUE);
7055 	}
7056 }
7057 
7058 static void
7059 bce_setup_msix_table(struct bce_softc *sc)
7060 {
7061 	REG_WR(sc, BCE_PCI_GRC_WINDOW_ADDR, BCE_PCI_GRC_WINDOW_ADDR_SEP_WIN);
7062 	REG_WR(sc, BCE_PCI_GRC_WINDOW2_ADDR, BCE_MSIX_TABLE_ADDR);
7063 	REG_WR(sc, BCE_PCI_GRC_WINDOW3_ADDR, BCE_MSIX_PBA_ADDR);
7064 }
7065 
7066 static int
7067 bce_setup_intr(struct bce_softc *sc)
7068 {
7069 	void (*irq_handle)(void *);
7070 	int error;
7071 
7072 	if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX)
7073 		return bce_setup_msix(sc);
7074 
7075 	if (sc->bce_irq_type == PCI_INTR_TYPE_LEGACY) {
7076 		irq_handle = bce_intr_legacy;
7077 	} else if (sc->bce_irq_type == PCI_INTR_TYPE_MSI) {
7078 		if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
7079 		    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
7080 			irq_handle = bce_intr_msi_oneshot;
7081 			sc->bce_flags |= BCE_ONESHOT_MSI_FLAG;
7082 		} else {
7083 			irq_handle = bce_intr_msi;
7084 			sc->bce_flags |= BCE_CHECK_MSI_FLAG;
7085 		}
7086 	} else {
7087 		panic("%s: unsupported intr type %d",
7088 		    device_get_nameunit(sc->bce_dev), sc->bce_irq_type);
7089 	}
7090 
7091 	error = bus_setup_intr(sc->bce_dev, sc->bce_res_irq, INTR_MPSAFE,
7092 	    irq_handle, sc, &sc->bce_intrhand, &sc->main_serialize);
7093 	if (error != 0) {
7094 		device_printf(sc->bce_dev, "Failed to setup IRQ!\n");
7095 		return error;
7096 	}
7097 
7098 	return 0;
7099 }
7100 
7101 static void
7102 bce_teardown_intr(struct bce_softc *sc)
7103 {
7104 	if (sc->bce_irq_type != PCI_INTR_TYPE_MSIX)
7105 		bus_teardown_intr(sc->bce_dev, sc->bce_res_irq, sc->bce_intrhand);
7106 	else
7107 		bce_teardown_msix(sc, sc->rx_ring_cnt);
7108 }
7109 
7110 static int
7111 bce_setup_msix(struct bce_softc *sc)
7112 {
7113 	int i;
7114 
7115 	for (i = 0; i < sc->rx_ring_cnt; ++i) {
7116 		struct bce_msix_data *msix = &sc->bce_msix[i];
7117 		int error;
7118 
7119 		error = bus_setup_intr_descr(sc->bce_dev, msix->msix_res,
7120 		    INTR_MPSAFE, msix->msix_func, msix->msix_arg,
7121 		    &msix->msix_handle, msix->msix_serialize, msix->msix_desc);
7122 		if (error) {
7123 			device_printf(sc->bce_dev, "could not set up %s "
7124 			    "interrupt handler.\n", msix->msix_desc);
7125 			bce_teardown_msix(sc, i);
7126 			return error;
7127 		}
7128 	}
7129 	return 0;
7130 }
7131 
7132 static void
7133 bce_teardown_msix(struct bce_softc *sc, int msix_cnt)
7134 {
7135 	int i;
7136 
7137 	for (i = 0; i < msix_cnt; ++i) {
7138 		struct bce_msix_data *msix = &sc->bce_msix[i];
7139 
7140 		bus_teardown_intr(sc->bce_dev, msix->msix_res,
7141 		    msix->msix_handle);
7142 	}
7143 }
7144 
7145 static void
7146 bce_init_rss(struct bce_softc *sc)
7147 {
7148 	uint8_t key[BCE_RLUP_RSS_KEY_CNT * BCE_RLUP_RSS_KEY_SIZE];
7149 	uint32_t tbl = 0;
7150 	int i;
7151 
7152 	KKASSERT(sc->rx_ring_cnt > 2);
7153 
7154 	/*
7155 	 * Configure RSS keys
7156 	 */
7157 	toeplitz_get_key(key, sizeof(key));
7158 	for (i = 0; i < BCE_RLUP_RSS_KEY_CNT; ++i) {
7159 		uint32_t rss_key;
7160 
7161 		rss_key = BCE_RLUP_RSS_KEYVAL(key, i);
7162 		BCE_RSS_DPRINTF(sc, 1, "rss_key%d 0x%08x\n", i, rss_key);
7163 
7164 		REG_WR(sc, BCE_RLUP_RSS_KEY(i), rss_key);
7165 	}
7166 
7167 	/*
7168 	 * Configure the redirect table
7169 	 *
7170 	 * NOTE:
7171 	 * - The "queue ID" in redirect table is the software RX ring's
7172 	 *   index _minus_ one.
7173 	 * - The last RX ring, whose "queue ID" is (sc->rx_ring_cnt - 2)
7174 	 *   will be used for packets whose masked hash is 0.
7175 	 *   (see also: comment in bce_setup_ring_cnt())
7176 	 *
7177 	 * The redirect table is configured in following fashion, except
7178 	 * for the masked hash 0, which is noted above:
7179 	 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
7180 	 */
7181 	for (i = 0; i < BCE_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
7182 		int shift = (i % 8) << 2, qid;
7183 
7184 		qid = i % sc->rx_ring_cnt2;
7185 		if (qid > 0)
7186 			--qid;
7187 		else
7188 			qid = sc->rx_ring_cnt - 2;
7189 		KKASSERT(qid < (sc->rx_ring_cnt - 1));
7190 
7191 		tbl |= qid << shift;
7192 		if (i % 8 == 7) {
7193 			BCE_RSS_DPRINTF(sc, 1, "tbl 0x%08x\n", tbl);
7194 			REG_WR(sc, BCE_RLUP_RSS_DATA, tbl);
7195 			REG_WR(sc, BCE_RLUP_RSS_COMMAND, (i >> 3) |
7196 			    BCE_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
7197 			    BCE_RLUP_RSS_COMMAND_WRITE |
7198 			    BCE_RLUP_RSS_COMMAND_HASH_MASK);
7199 			tbl = 0;
7200 		}
7201 	}
7202 	REG_WR(sc, BCE_RLUP_RSS_CONFIG,
7203 	    BCE_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI);
7204 }
7205 
7206 static void
7207 bce_npoll_coal_change(struct bce_softc *sc)
7208 {
7209 	uint32_t old_rx_cons, old_tx_cons;
7210 
7211 	old_rx_cons = sc->bce_rx_quick_cons_trip_int;
7212 	old_tx_cons = sc->bce_tx_quick_cons_trip_int;
7213 	sc->bce_rx_quick_cons_trip_int = 1;
7214 	sc->bce_tx_quick_cons_trip_int = 1;
7215 
7216 	sc->bce_coalchg_mask |= BCE_COALMASK_TX_BDS_INT |
7217 	    BCE_COALMASK_RX_BDS_INT;
7218 	bce_coal_change(sc);
7219 
7220 	sc->bce_rx_quick_cons_trip_int = old_rx_cons;
7221 	sc->bce_tx_quick_cons_trip_int = old_tx_cons;
7222 }
7223 
7224 static struct pktinfo *
7225 bce_rss_pktinfo(struct pktinfo *pi, uint32_t status,
7226     const struct l2_fhdr *l2fhdr)
7227 {
7228 	/* Check for an IP datagram. */
7229 	if ((status & L2_FHDR_STATUS_IP_DATAGRAM) == 0)
7230 		return NULL;
7231 
7232 	/* Check if the IP checksum is valid. */
7233 	if (l2fhdr->l2_fhdr_ip_xsum != 0xffff)
7234 		return NULL;
7235 
7236 	/* Check for a valid TCP/UDP frame. */
7237 	if (status & L2_FHDR_STATUS_TCP_SEGMENT) {
7238 		if (status & L2_FHDR_ERRORS_TCP_XSUM)
7239 			return NULL;
7240 		if (l2fhdr->l2_fhdr_tcp_udp_xsum != 0xffff)
7241 			return NULL;
7242 		pi->pi_l3proto = IPPROTO_TCP;
7243 	} else if (status & L2_FHDR_STATUS_UDP_DATAGRAM) {
7244 		if (status & L2_FHDR_ERRORS_UDP_XSUM)
7245 			return NULL;
7246 		if (l2fhdr->l2_fhdr_tcp_udp_xsum != 0xffff)
7247 			return NULL;
7248 		pi->pi_l3proto = IPPROTO_UDP;
7249 	} else {
7250 		return NULL;
7251 	}
7252 	pi->pi_netisr = NETISR_IP;
7253 	pi->pi_flags = 0;
7254 
7255 	return pi;
7256 }
7257