xref: /dragonfly/sys/dev/netif/bce/if_bce.c (revision 030b0c8c)
1 /*-
2  * Copyright (c) 2006-2007 Broadcom Corporation
3  *	David Christensen <davidch@broadcom.com>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the name of Broadcom Corporation nor the name of its contributors
15  *    may be used to endorse or promote products derived from this software
16  *    without specific prior written consent.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
22  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  *
30  * $FreeBSD: src/sys/dev/bce/if_bce.c,v 1.31 2007/05/16 23:34:11 davidch Exp $
31  */
32 
33 /*
34  * The following controllers are supported by this driver:
35  *   BCM5706C A2, A3
36  *   BCM5706S A2, A3
37  *   BCM5708C B1, B2
38  *   BCM5708S B1, B2
39  *   BCM5709C A1, B2, C0
40  *   BCM5716  C0
41  *
42  * The following controllers are not supported by this driver:
43  *   BCM5706C A0, A1
44  *   BCM5706S A0, A1
45  *   BCM5708C A0, B0
46  *   BCM5708S A0, B0
47  *   BCM5709C A0, B0, B1
48  *   BCM5709S A0, A1, B0, B1, B2, C0
49  *
50  *
51  * Note about MSI-X on 5709/5716:
52  * - 9 MSI-X vectors are supported.
53  * - MSI-X vectors, RX/TX rings and status blocks' association
54  *   are fixed:
55  *   o  The first RX ring and the first TX ring use the first
56  *      status block.
57  *   o  The first MSI-X vector is associated with the first
58  *      status block.
59  *   o  The second RX ring and the second TX ring use the second
60  *      status block.
61  *   o  The second MSI-X vector is associated with the second
62  *      status block.
63  *   ...
64  *   and so on so forth.
65  * - Status blocks must reside in physically contiguous memory
66  *   and each status block consumes 128bytes.  In addition to
67  *   this, the memory for the status blocks is aligned on 128bytes
68  *   in this driver.  (see bce_dma_alloc() and HC_CONFIG)
69  * - Each status block has its own coalesce parameters, which also
70  *   serve as the related MSI-X vector's interrupt moderation
71  *   parameters.  (see bce_coal_change())
72  */
73 
74 #include "opt_bce.h"
75 #include "opt_ifpoll.h"
76 
77 #include <sys/param.h>
78 #include <sys/bus.h>
79 #include <sys/endian.h>
80 #include <sys/kernel.h>
81 #include <sys/interrupt.h>
82 #include <sys/mbuf.h>
83 #include <sys/malloc.h>
84 #include <sys/queue.h>
85 #include <sys/rman.h>
86 #include <sys/serialize.h>
87 #include <sys/socket.h>
88 #include <sys/sockio.h>
89 #include <sys/sysctl.h>
90 
91 #include <netinet/ip.h>
92 #include <netinet/tcp.h>
93 
94 #include <net/bpf.h>
95 #include <net/ethernet.h>
96 #include <net/if.h>
97 #include <net/if_arp.h>
98 #include <net/if_dl.h>
99 #include <net/if_media.h>
100 #include <net/if_poll.h>
101 #include <net/if_types.h>
102 #include <net/ifq_var.h>
103 #include <net/if_ringmap.h>
104 #include <net/toeplitz.h>
105 #include <net/toeplitz2.h>
106 #include <net/vlan/if_vlan_var.h>
107 #include <net/vlan/if_vlan_ether.h>
108 
109 #include <dev/netif/mii_layer/mii.h>
110 #include <dev/netif/mii_layer/miivar.h>
111 #include <dev/netif/mii_layer/brgphyreg.h>
112 
113 #include <bus/pci/pcireg.h>
114 #include <bus/pci/pcivar.h>
115 
116 #include "miibus_if.h"
117 
118 #include <dev/netif/bce/if_bcereg.h>
119 #include <dev/netif/bce/if_bcefw.h>
120 
121 #define BCE_MSI_CKINTVL		((10 * hz) / 1000)	/* 10ms */
122 
123 #ifdef BCE_RSS_DEBUG
124 #define BCE_RSS_DPRINTF(sc, lvl, fmt, ...) \
125 do { \
126 	if (sc->rss_debug >= lvl) \
127 		if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \
128 } while (0)
129 #else	/* !BCE_RSS_DEBUG */
130 #define BCE_RSS_DPRINTF(sc, lvl, fmt, ...)	((void)0)
131 #endif	/* BCE_RSS_DEBUG */
132 
133 /****************************************************************************/
134 /* PCI Device ID Table                                                      */
135 /*                                                                          */
136 /* Used by bce_probe() to identify the devices supported by this driver.    */
137 /****************************************************************************/
138 #define BCE_DEVDESC_MAX		64
139 
140 static struct bce_type bce_devs[] = {
141 	/* BCM5706C Controllers and OEM boards. */
142 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3101,
143 		"HP NC370T Multifunction Gigabit Server Adapter" },
144 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3106,
145 		"HP NC370i Multifunction Gigabit Server Adapter" },
146 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3070,
147 		"HP NC380T PCIe DP Multifunc Gig Server Adapter" },
148 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x1709,
149 		"HP NC371i Multifunction Gigabit Server Adapter" },
150 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  PCI_ANY_ID,  PCI_ANY_ID,
151 		"Broadcom NetXtreme II BCM5706 1000Base-T" },
152 
153 	/* BCM5706S controllers and OEM boards. */
154 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102,
155 		"HP NC370F Multifunction Gigabit Server Adapter" },
156 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID,  PCI_ANY_ID,
157 		"Broadcom NetXtreme II BCM5706 1000Base-SX" },
158 
159 	/* BCM5708C controllers and OEM boards. */
160 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  HP_VENDORID, 0x7037,
161 		"HP NC373T PCIe Multifunction Gig Server Adapter" },
162 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  HP_VENDORID, 0x7038,
163 		"HP NC373i Multifunction Gigabit Server Adapter" },
164 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  HP_VENDORID, 0x7045,
165 		"HP NC374m PCIe Multifunction Adapter" },
166 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  PCI_ANY_ID,  PCI_ANY_ID,
167 		"Broadcom NetXtreme II BCM5708 1000Base-T" },
168 
169 	/* BCM5708S controllers and OEM boards. */
170 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  HP_VENDORID, 0x1706,
171 		"HP NC373m Multifunction Gigabit Server Adapter" },
172 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  HP_VENDORID, 0x703b,
173 		"HP NC373i Multifunction Gigabit Server Adapter" },
174 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  HP_VENDORID, 0x703d,
175 		"HP NC373F PCIe Multifunc Giga Server Adapter" },
176 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  PCI_ANY_ID,  PCI_ANY_ID,
177 		"Broadcom NetXtreme II BCM5708S 1000Base-T" },
178 
179 	/* BCM5709C controllers and OEM boards. */
180 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709,  HP_VENDORID, 0x7055,
181 		"HP NC382i DP Multifunction Gigabit Server Adapter" },
182 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709,  HP_VENDORID, 0x7059,
183 		"HP NC382T PCIe DP Multifunction Gigabit Server Adapter" },
184 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709,  PCI_ANY_ID,  PCI_ANY_ID,
185 		"Broadcom NetXtreme II BCM5709 1000Base-T" },
186 
187 	/* BCM5709S controllers and OEM boards. */
188 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709S,  HP_VENDORID, 0x171d,
189 		"HP NC382m DP 1GbE Multifunction BL-c Adapter" },
190 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709S,  HP_VENDORID, 0x7056,
191 		"HP NC382i DP Multifunction Gigabit Server Adapter" },
192 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709S,  PCI_ANY_ID,  PCI_ANY_ID,
193 		"Broadcom NetXtreme II BCM5709 1000Base-SX" },
194 
195 	/* BCM5716 controllers and OEM boards. */
196 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5716,   PCI_ANY_ID,  PCI_ANY_ID,
197 		"Broadcom NetXtreme II BCM5716 1000Base-T" },
198 
199 	{ 0, 0, 0, 0, NULL }
200 };
201 
202 /****************************************************************************/
203 /* Supported Flash NVRAM device data.                                       */
204 /****************************************************************************/
205 static const struct flash_spec flash_table[] =
206 {
207 #define BUFFERED_FLAGS		(BCE_NV_BUFFERED | BCE_NV_TRANSLATE)
208 #define NONBUFFERED_FLAGS	(BCE_NV_WREN)
209 
210 	/* Slow EEPROM */
211 	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
212 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
213 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
214 	 "EEPROM - slow"},
215 	/* Expansion entry 0001 */
216 	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
217 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
218 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
219 	 "Entry 0001"},
220 	/* Saifun SA25F010 (non-buffered flash) */
221 	/* strap, cfg1, & write1 need updates */
222 	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
223 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
225 	 "Non-buffered flash (128kB)"},
226 	/* Saifun SA25F020 (non-buffered flash) */
227 	/* strap, cfg1, & write1 need updates */
228 	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
229 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
230 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
231 	 "Non-buffered flash (256kB)"},
232 	/* Expansion entry 0100 */
233 	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
234 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
235 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
236 	 "Entry 0100"},
237 	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
238 	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
239 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
240 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
241 	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
242 	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
243 	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
244 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
245 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
246 	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
247 	/* Saifun SA25F005 (non-buffered flash) */
248 	/* strap, cfg1, & write1 need updates */
249 	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
250 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
251 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
252 	 "Non-buffered flash (64kB)"},
253 	/* Fast EEPROM */
254 	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
255 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
256 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
257 	 "EEPROM - fast"},
258 	/* Expansion entry 1001 */
259 	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
260 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
261 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
262 	 "Entry 1001"},
263 	/* Expansion entry 1010 */
264 	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
265 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
266 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
267 	 "Entry 1010"},
268 	/* ATMEL AT45DB011B (buffered flash) */
269 	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
270 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
271 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
272 	 "Buffered flash (128kB)"},
273 	/* Expansion entry 1100 */
274 	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
275 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
276 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
277 	 "Entry 1100"},
278 	/* Expansion entry 1101 */
279 	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
280 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
281 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
282 	 "Entry 1101"},
283 	/* Ateml Expansion entry 1110 */
284 	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
285 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
286 	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
287 	 "Entry 1110 (Atmel)"},
288 	/* ATMEL AT45DB021B (buffered flash) */
289 	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
290 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
291 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
292 	 "Buffered flash (256kB)"},
293 };
294 
295 /*
296  * The BCM5709 controllers transparently handle the
297  * differences between Atmel 264 byte pages and all
298  * flash devices which use 256 byte pages, so no
299  * logical-to-physical mapping is required in the
300  * driver.
301  */
302 static struct flash_spec flash_5709 = {
303 	.flags		= BCE_NV_BUFFERED,
304 	.page_bits	= BCM5709_FLASH_PAGE_BITS,
305 	.page_size	= BCM5709_FLASH_PAGE_SIZE,
306 	.addr_mask	= BCM5709_FLASH_BYTE_ADDR_MASK,
307 	.total_size	= BUFFERED_FLASH_TOTAL_SIZE * 2,
308 	.name		= "5709/5716 buffered flash (256kB)",
309 };
310 
311 /****************************************************************************/
312 /* DragonFly device entry points.                                           */
313 /****************************************************************************/
314 static int	bce_probe(device_t);
315 static int	bce_attach(device_t);
316 static int	bce_detach(device_t);
317 static void	bce_shutdown(device_t);
318 static int	bce_miibus_read_reg(device_t, int, int);
319 static int	bce_miibus_write_reg(device_t, int, int, int);
320 static void	bce_miibus_statchg(device_t);
321 
322 /****************************************************************************/
323 /* BCE Register/Memory Access Routines                                      */
324 /****************************************************************************/
325 static uint32_t	bce_reg_rd_ind(struct bce_softc *, uint32_t);
326 static void	bce_reg_wr_ind(struct bce_softc *, uint32_t, uint32_t);
327 static void	bce_shmem_wr(struct bce_softc *, uint32_t, uint32_t);
328 static uint32_t	bce_shmem_rd(struct bce_softc *, u32);
329 static void	bce_ctx_wr(struct bce_softc *, uint32_t, uint32_t, uint32_t);
330 
331 /****************************************************************************/
332 /* BCE NVRAM Access Routines                                                */
333 /****************************************************************************/
334 static int	bce_acquire_nvram_lock(struct bce_softc *);
335 static int	bce_release_nvram_lock(struct bce_softc *);
336 static void	bce_enable_nvram_access(struct bce_softc *);
337 static void	bce_disable_nvram_access(struct bce_softc *);
338 static int	bce_nvram_read_dword(struct bce_softc *, uint32_t, uint8_t *,
339 		    uint32_t);
340 static int	bce_init_nvram(struct bce_softc *);
341 static int	bce_nvram_read(struct bce_softc *, uint32_t, uint8_t *, int);
342 static int	bce_nvram_test(struct bce_softc *);
343 
344 /****************************************************************************/
345 /* BCE DMA Allocate/Free Routines                                           */
346 /****************************************************************************/
347 static int	bce_dma_alloc(struct bce_softc *);
348 static void	bce_dma_free(struct bce_softc *);
349 static void	bce_dma_map_addr(void *, bus_dma_segment_t *, int, int);
350 
351 /****************************************************************************/
352 /* BCE Firmware Synchronization and Load                                    */
353 /****************************************************************************/
354 static int	bce_fw_sync(struct bce_softc *, uint32_t);
355 static void	bce_load_rv2p_fw(struct bce_softc *, uint32_t *,
356 		    uint32_t, uint32_t);
357 static void	bce_load_cpu_fw(struct bce_softc *, struct cpu_reg *,
358 		    struct fw_info *);
359 static void	bce_start_cpu(struct bce_softc *, struct cpu_reg *);
360 static void	bce_halt_cpu(struct bce_softc *, struct cpu_reg *);
361 static void	bce_start_rxp_cpu(struct bce_softc *);
362 static void	bce_init_rxp_cpu(struct bce_softc *);
363 static void	bce_init_txp_cpu(struct bce_softc *);
364 static void	bce_init_tpat_cpu(struct bce_softc *);
365 static void	bce_init_cp_cpu(struct bce_softc *);
366 static void	bce_init_com_cpu(struct bce_softc *);
367 static void	bce_init_cpus(struct bce_softc *);
368 static void	bce_setup_msix_table(struct bce_softc *);
369 static void	bce_init_rss(struct bce_softc *);
370 
371 static void	bce_stop(struct bce_softc *);
372 static int	bce_reset(struct bce_softc *, uint32_t);
373 static int	bce_chipinit(struct bce_softc *);
374 static int	bce_blockinit(struct bce_softc *);
375 static void	bce_probe_pci_caps(struct bce_softc *);
376 static void	bce_print_adapter_info(struct bce_softc *);
377 static void	bce_get_media(struct bce_softc *);
378 static void	bce_mgmt_init(struct bce_softc *);
379 static int	bce_init_ctx(struct bce_softc *);
380 static void	bce_get_mac_addr(struct bce_softc *);
381 static void	bce_set_mac_addr(struct bce_softc *);
382 static void	bce_set_rx_mode(struct bce_softc *);
383 static void	bce_coal_change(struct bce_softc *);
384 static void	bce_npoll_coal_change(struct bce_softc *);
385 static void	bce_setup_serialize(struct bce_softc *);
386 static void	bce_serialize_skipmain(struct bce_softc *);
387 static void	bce_deserialize_skipmain(struct bce_softc *);
388 static void	bce_set_timer_cpuid(struct bce_softc *, boolean_t);
389 static int	bce_alloc_intr(struct bce_softc *);
390 static void	bce_free_intr(struct bce_softc *);
391 static void	bce_try_alloc_msix(struct bce_softc *);
392 static void	bce_free_msix(struct bce_softc *, boolean_t);
393 static void	bce_setup_ring_cnt(struct bce_softc *);
394 static int	bce_setup_intr(struct bce_softc *);
395 static void	bce_teardown_intr(struct bce_softc *);
396 static int	bce_setup_msix(struct bce_softc *);
397 static void	bce_teardown_msix(struct bce_softc *, int);
398 
399 static int	bce_create_tx_ring(struct bce_tx_ring *);
400 static void	bce_destroy_tx_ring(struct bce_tx_ring *);
401 static void	bce_init_tx_context(struct bce_tx_ring *);
402 static int	bce_init_tx_chain(struct bce_tx_ring *);
403 static void	bce_free_tx_chain(struct bce_tx_ring *);
404 static void	bce_xmit(struct bce_tx_ring *);
405 static int	bce_encap(struct bce_tx_ring *, struct mbuf **, int *);
406 static int	bce_tso_setup(struct bce_tx_ring *, struct mbuf **,
407 		    uint16_t *, uint16_t *);
408 
409 static int	bce_create_rx_ring(struct bce_rx_ring *);
410 static void	bce_destroy_rx_ring(struct bce_rx_ring *);
411 static void	bce_init_rx_context(struct bce_rx_ring *);
412 static int	bce_init_rx_chain(struct bce_rx_ring *);
413 static void	bce_free_rx_chain(struct bce_rx_ring *);
414 static int	bce_newbuf_std(struct bce_rx_ring *, uint16_t *, uint16_t,
415 		    uint32_t *, int);
416 static void	bce_setup_rxdesc_std(struct bce_rx_ring *, uint16_t,
417 		    uint32_t *);
418 static struct pktinfo *bce_rss_pktinfo(struct pktinfo *, uint32_t,
419 		    const struct l2_fhdr *);
420 
421 static void	bce_start(struct ifnet *, struct ifaltq_subque *);
422 static int	bce_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
423 static void	bce_watchdog(struct ifaltq_subque *);
424 static int	bce_ifmedia_upd(struct ifnet *);
425 static void	bce_ifmedia_sts(struct ifnet *, struct ifmediareq *);
426 static void	bce_init(void *);
427 #ifdef IFPOLL_ENABLE
428 static void	bce_npoll(struct ifnet *, struct ifpoll_info *);
429 static void	bce_npoll_rx(struct ifnet *, void *, int);
430 static void	bce_npoll_tx(struct ifnet *, void *, int);
431 static void	bce_npoll_status(struct ifnet *);
432 static void	bce_npoll_rx_pack(struct ifnet *, void *, int);
433 #endif
434 static void	bce_serialize(struct ifnet *, enum ifnet_serialize);
435 static void	bce_deserialize(struct ifnet *, enum ifnet_serialize);
436 static int	bce_tryserialize(struct ifnet *, enum ifnet_serialize);
437 #ifdef INVARIANTS
438 static void	bce_serialize_assert(struct ifnet *, enum ifnet_serialize,
439 		    boolean_t);
440 #endif
441 
442 static void	bce_intr(struct bce_softc *);
443 static void	bce_intr_legacy(void *);
444 static void	bce_intr_msi(void *);
445 static void	bce_intr_msi_oneshot(void *);
446 static void	bce_intr_msix_rxtx(void *);
447 static void	bce_intr_msix_rx(void *);
448 static void	bce_tx_intr(struct bce_tx_ring *, uint16_t);
449 static void	bce_rx_intr(struct bce_rx_ring *, int, uint16_t);
450 static void	bce_phy_intr(struct bce_softc *);
451 static void	bce_disable_intr(struct bce_softc *);
452 static void	bce_enable_intr(struct bce_softc *);
453 static void	bce_reenable_intr(struct bce_rx_ring *);
454 static void	bce_check_msi(void *);
455 
456 static void	bce_stats_update(struct bce_softc *);
457 static void	bce_tick(void *);
458 static void	bce_tick_serialized(struct bce_softc *);
459 static void	bce_pulse(void *);
460 
461 static void	bce_add_sysctls(struct bce_softc *);
462 static int	bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS);
463 static int	bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS);
464 static int	bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS);
465 static int	bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS);
466 static int	bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS);
467 static int	bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS);
468 static int	bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS);
469 static int	bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS);
470 static int	bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS,
471 		    uint32_t *, uint32_t);
472 
473 /*
474  * NOTE:
475  * Don't set bce_tx_ticks_int/bce_tx_ticks to 1023.  Linux's bnx2
476  * takes 1023 as the TX ticks limit.  However, using 1023 will
477  * cause 5708(B2) to generate extra interrupts (~2000/s) even when
478  * there is _no_ network activity on the NIC.
479  */
480 static uint32_t	bce_tx_bds_int = 255;		/* bcm: 20 */
481 static uint32_t	bce_tx_bds = 255;		/* bcm: 20 */
482 static uint32_t	bce_tx_ticks_int = 1022;	/* bcm: 80 */
483 static uint32_t	bce_tx_ticks = 1022;		/* bcm: 80 */
484 static uint32_t	bce_rx_bds_int = 128;		/* bcm: 6 */
485 static uint32_t	bce_rx_bds = 0;			/* bcm: 6 */
486 static uint32_t	bce_rx_ticks_int = 150;		/* bcm: 18 */
487 static uint32_t	bce_rx_ticks = 150;		/* bcm: 18 */
488 
489 static int	bce_tx_wreg = 8;
490 
491 static int	bce_msi_enable = 1;
492 static int	bce_msix_enable = 1;
493 
494 static int	bce_rx_pages = RX_PAGES_DEFAULT;
495 static int	bce_tx_pages = TX_PAGES_DEFAULT;
496 
497 static int	bce_rx_rings = 0;	/* auto */
498 static int	bce_tx_rings = 0;	/* auto */
499 
500 TUNABLE_INT("hw.bce.tx_bds_int", &bce_tx_bds_int);
501 TUNABLE_INT("hw.bce.tx_bds", &bce_tx_bds);
502 TUNABLE_INT("hw.bce.tx_ticks_int", &bce_tx_ticks_int);
503 TUNABLE_INT("hw.bce.tx_ticks", &bce_tx_ticks);
504 TUNABLE_INT("hw.bce.rx_bds_int", &bce_rx_bds_int);
505 TUNABLE_INT("hw.bce.rx_bds", &bce_rx_bds);
506 TUNABLE_INT("hw.bce.rx_ticks_int", &bce_rx_ticks_int);
507 TUNABLE_INT("hw.bce.rx_ticks", &bce_rx_ticks);
508 TUNABLE_INT("hw.bce.msi.enable", &bce_msi_enable);
509 TUNABLE_INT("hw.bce.msix.enable", &bce_msix_enable);
510 TUNABLE_INT("hw.bce.rx_pages", &bce_rx_pages);
511 TUNABLE_INT("hw.bce.tx_pages", &bce_tx_pages);
512 TUNABLE_INT("hw.bce.tx_wreg", &bce_tx_wreg);
513 TUNABLE_INT("hw.bce.tx_rings", &bce_tx_rings);
514 TUNABLE_INT("hw.bce.rx_rings", &bce_rx_rings);
515 
516 /****************************************************************************/
517 /* DragonFly device dispatch table.                                         */
518 /****************************************************************************/
519 static device_method_t bce_methods[] = {
520 	/* Device interface */
521 	DEVMETHOD(device_probe,		bce_probe),
522 	DEVMETHOD(device_attach,	bce_attach),
523 	DEVMETHOD(device_detach,	bce_detach),
524 	DEVMETHOD(device_shutdown,	bce_shutdown),
525 
526 	/* bus interface */
527 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
528 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
529 
530 	/* MII interface */
531 	DEVMETHOD(miibus_readreg,	bce_miibus_read_reg),
532 	DEVMETHOD(miibus_writereg,	bce_miibus_write_reg),
533 	DEVMETHOD(miibus_statchg,	bce_miibus_statchg),
534 
535 	DEVMETHOD_END
536 };
537 
538 static driver_t bce_driver = {
539 	"bce",
540 	bce_methods,
541 	sizeof(struct bce_softc)
542 };
543 
544 static devclass_t bce_devclass;
545 
546 DECLARE_DUMMY_MODULE(if_bce);
547 MODULE_DEPEND(bce, miibus, 1, 1, 1);
548 DRIVER_MODULE(if_bce, pci, bce_driver, bce_devclass, NULL, NULL);
549 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, NULL, NULL);
550 
551 /****************************************************************************/
552 /* Device probe function.                                                   */
553 /*                                                                          */
554 /* Compares the device to the driver's list of supported devices and        */
555 /* reports back to the OS whether this is the right driver for the device.  */
556 /*                                                                          */
557 /* Returns:                                                                 */
558 /*   BUS_PROBE_DEFAULT on success, positive value on failure.               */
559 /****************************************************************************/
560 static int
bce_probe(device_t dev)561 bce_probe(device_t dev)
562 {
563 	struct bce_type *t;
564 	uint16_t vid, did, svid, sdid;
565 
566 	/* Get the data for the device to be probed. */
567 	vid  = pci_get_vendor(dev);
568 	did  = pci_get_device(dev);
569 	svid = pci_get_subvendor(dev);
570 	sdid = pci_get_subdevice(dev);
571 
572 	/* Look through the list of known devices for a match. */
573 	for (t = bce_devs; t->bce_name != NULL; ++t) {
574 		if (vid == t->bce_vid && did == t->bce_did &&
575 		    (svid == t->bce_svid || t->bce_svid == PCI_ANY_ID) &&
576 		    (sdid == t->bce_sdid || t->bce_sdid == PCI_ANY_ID)) {
577 		    	uint32_t revid = pci_read_config(dev, PCIR_REVID, 4);
578 			char *descbuf;
579 
580 			descbuf = kmalloc(BCE_DEVDESC_MAX, M_TEMP, M_WAITOK);
581 
582 			/* Print out the device identity. */
583 			ksnprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)",
584 				  t->bce_name,
585 				  ((revid & 0xf0) >> 4) + 'A', revid & 0xf);
586 
587 			device_set_desc_copy(dev, descbuf);
588 			kfree(descbuf, M_TEMP);
589 			return 0;
590 		}
591 	}
592 	return ENXIO;
593 }
594 
595 /****************************************************************************/
596 /* PCI Capabilities Probe Function.                                         */
597 /*                                                                          */
598 /* Walks the PCI capabiites list for the device to find what features are   */
599 /* supported.                                                               */
600 /*                                                                          */
601 /* Returns:                                                                 */
602 /*   None.                                                                  */
603 /****************************************************************************/
604 static void
bce_print_adapter_info(struct bce_softc * sc)605 bce_print_adapter_info(struct bce_softc *sc)
606 {
607 	device_printf(sc->bce_dev, "ASIC (0x%08X); ", sc->bce_chipid);
608 
609 	kprintf("Rev (%c%d); ", ((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A',
610 		((BCE_CHIP_ID(sc) & 0x0ff0) >> 4));
611 
612 	/* Bus info. */
613 	if (sc->bce_flags & BCE_PCIE_FLAG) {
614 		kprintf("Bus (PCIe x%d, ", sc->link_width);
615 		switch (sc->link_speed) {
616 		case 1:
617 			kprintf("2.5Gbps); ");
618 			break;
619 		case 2:
620 			kprintf("5Gbps); ");
621 			break;
622 		default:
623 			kprintf("Unknown link speed); ");
624 			break;
625 		}
626 	} else {
627 		kprintf("Bus (PCI%s, %s, %dMHz); ",
628 		    ((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""),
629 		    ((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
630 		    sc->bus_speed_mhz);
631 	}
632 
633 	/* Firmware version and device features. */
634 	kprintf("B/C (%s)", sc->bce_bc_ver);
635 
636 	if ((sc->bce_flags & BCE_MFW_ENABLE_FLAG) ||
637 	    (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)) {
638 		kprintf("; Flags(");
639 		if (sc->bce_flags & BCE_MFW_ENABLE_FLAG)
640 			kprintf("MFW[%s]", sc->bce_mfw_ver);
641 		if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
642 			kprintf(" 2.5G");
643 		kprintf(")");
644 	}
645 	kprintf("\n");
646 }
647 
648 /****************************************************************************/
649 /* PCI Capabilities Probe Function.                                         */
650 /*                                                                          */
651 /* Walks the PCI capabiites list for the device to find what features are   */
652 /* supported.                                                               */
653 /*                                                                          */
654 /* Returns:                                                                 */
655 /*   None.                                                                  */
656 /****************************************************************************/
657 static void
bce_probe_pci_caps(struct bce_softc * sc)658 bce_probe_pci_caps(struct bce_softc *sc)
659 {
660 	device_t dev = sc->bce_dev;
661 	uint8_t ptr;
662 
663 	if (pci_is_pcix(dev))
664 		sc->bce_cap_flags |= BCE_PCIX_CAPABLE_FLAG;
665 
666 	ptr = pci_get_pciecap_ptr(dev);
667 	if (ptr) {
668 		uint16_t link_status = pci_read_config(dev, ptr + 0x12, 2);
669 
670 		sc->link_speed = link_status & 0xf;
671 		sc->link_width = (link_status >> 4) & 0x3f;
672 		sc->bce_cap_flags |= BCE_PCIE_CAPABLE_FLAG;
673 		sc->bce_flags |= BCE_PCIE_FLAG;
674 	}
675 }
676 
677 /****************************************************************************/
678 /* Device attach function.                                                  */
679 /*                                                                          */
680 /* Allocates device resources, performs secondary chip identification,      */
681 /* resets and initializes the hardware, and initializes driver instance     */
682 /* variables.                                                               */
683 /*                                                                          */
684 /* Returns:                                                                 */
685 /*   0 on success, positive value on failure.                               */
686 /****************************************************************************/
687 static int
bce_attach(device_t dev)688 bce_attach(device_t dev)
689 {
690 	struct bce_softc *sc = device_get_softc(dev);
691 	struct ifnet *ifp = &sc->arpcom.ac_if;
692 	uint32_t val;
693 	int rid, rc = 0;
694 	int i, j;
695 	struct mii_probe_args mii_args;
696 	uintptr_t mii_priv = 0;
697 
698 	sc->bce_dev = dev;
699 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
700 
701 	lwkt_serialize_init(&sc->main_serialize);
702 	for (i = 0; i < BCE_MSIX_MAX; ++i) {
703 		struct bce_msix_data *msix = &sc->bce_msix[i];
704 
705 		msix->msix_cpuid = -1;
706 		msix->msix_rid = -1;
707 	}
708 
709 	pci_enable_busmaster(dev);
710 
711 	bce_probe_pci_caps(sc);
712 
713 	/* Allocate PCI memory resources. */
714 	rid = PCIR_BAR(0);
715 	sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
716 						 RF_ACTIVE | PCI_RF_DENSE);
717 	if (sc->bce_res_mem == NULL) {
718 		device_printf(dev, "PCI memory allocation failed\n");
719 		return ENXIO;
720 	}
721 	sc->bce_btag = rman_get_bustag(sc->bce_res_mem);
722 	sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem);
723 
724 	/*
725 	 * Configure byte swap and enable indirect register access.
726 	 * Rely on CPU to do target byte swapping on big endian systems.
727 	 * Access to registers outside of PCI configurtion space are not
728 	 * valid until this is done.
729 	 */
730 	pci_write_config(dev, BCE_PCICFG_MISC_CONFIG,
731 			 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
732 			 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4);
733 
734 	/* Save ASIC revsion info. */
735 	sc->bce_chipid =  REG_RD(sc, BCE_MISC_ID);
736 
737 	/* Weed out any non-production controller revisions. */
738 	switch (BCE_CHIP_ID(sc)) {
739 	case BCE_CHIP_ID_5706_A0:
740 	case BCE_CHIP_ID_5706_A1:
741 	case BCE_CHIP_ID_5708_A0:
742 	case BCE_CHIP_ID_5708_B0:
743 	case BCE_CHIP_ID_5709_A0:
744 	case BCE_CHIP_ID_5709_B0:
745 	case BCE_CHIP_ID_5709_B1:
746 #ifdef foo
747 	/* 5709C B2 seems to work fine */
748 	case BCE_CHIP_ID_5709_B2:
749 #endif
750 		device_printf(dev, "Unsupported chip id 0x%08x!\n",
751 			      BCE_CHIP_ID(sc));
752 		rc = ENODEV;
753 		goto fail;
754 	}
755 
756 	mii_priv |= BRGPHY_FLAG_WIRESPEED;
757 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
758 		if (BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax ||
759 		    BCE_CHIP_REV(sc) == BCE_CHIP_REV_Bx)
760 			mii_priv |= BRGPHY_FLAG_NO_EARLYDAC;
761 	} else {
762 		mii_priv |= BRGPHY_FLAG_BER_BUG;
763 	}
764 
765 	/*
766 	 * Find the base address for shared memory access.
767 	 * Newer versions of bootcode use a signature and offset
768 	 * while older versions use a fixed address.
769 	 */
770 	val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE);
771 	if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) ==
772 	    BCE_SHM_HDR_SIGNATURE_SIG) {
773 		/* Multi-port devices use different offsets in shared memory. */
774 		sc->bce_shmem_base = REG_RD_IND(sc,
775 		    BCE_SHM_HDR_ADDR_0 + (pci_get_function(sc->bce_dev) << 2));
776 	} else {
777 		sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE;
778 	}
779 
780 	/* Fetch the bootcode revision. */
781 	val = bce_shmem_rd(sc, BCE_DEV_INFO_BC_REV);
782 	for (i = 0, j = 0; i < 3; i++) {
783 		uint8_t num;
784 		int k, skip0;
785 
786 		num = (uint8_t)(val >> (24 - (i * 8)));
787 		for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
788 			if (num >= k || !skip0 || k == 1) {
789 				sc->bce_bc_ver[j++] = (num / k) + '0';
790 				skip0 = 0;
791 			}
792 		}
793 		if (i != 2)
794 			sc->bce_bc_ver[j++] = '.';
795 	}
796 
797 	/* Check if any management firwmare is running. */
798 	val = bce_shmem_rd(sc, BCE_PORT_FEATURE);
799 	if (val & BCE_PORT_FEATURE_ASF_ENABLED) {
800 		sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
801 
802 		/* Allow time for firmware to enter the running state. */
803 		for (i = 0; i < 30; i++) {
804 			val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
805 			if (val & BCE_CONDITION_MFW_RUN_MASK)
806 				break;
807 			DELAY(10000);
808 		}
809 	}
810 
811 	/* Check the current bootcode state. */
812 	val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION) &
813 	    BCE_CONDITION_MFW_RUN_MASK;
814 	if (val != BCE_CONDITION_MFW_RUN_UNKNOWN &&
815 	    val != BCE_CONDITION_MFW_RUN_NONE) {
816 		uint32_t addr = bce_shmem_rd(sc, BCE_MFW_VER_PTR);
817 
818 		for (i = 0, j = 0; j < 3; j++) {
819 			val = bce_reg_rd_ind(sc, addr + j * 4);
820 			val = bswap32(val);
821 			memcpy(&sc->bce_mfw_ver[i], &val, 4);
822 			i += 4;
823 		}
824 	}
825 
826 	/* Get PCI bus information (speed and type). */
827 	val = REG_RD(sc, BCE_PCICFG_MISC_STATUS);
828 	if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) {
829 		uint32_t clkreg;
830 
831 		sc->bce_flags |= BCE_PCIX_FLAG;
832 
833 		clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS) &
834 			 BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
835 		switch (clkreg) {
836 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
837 			sc->bus_speed_mhz = 133;
838 			break;
839 
840 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
841 			sc->bus_speed_mhz = 100;
842 			break;
843 
844 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
845 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
846 			sc->bus_speed_mhz = 66;
847 			break;
848 
849 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
850 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
851 			sc->bus_speed_mhz = 50;
852 			break;
853 
854 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
855 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
856 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
857 			sc->bus_speed_mhz = 33;
858 			break;
859 		}
860 	} else {
861 		if (val & BCE_PCICFG_MISC_STATUS_M66EN)
862 			sc->bus_speed_mhz = 66;
863 		else
864 			sc->bus_speed_mhz = 33;
865 	}
866 
867 	if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET)
868 		sc->bce_flags |= BCE_PCI_32BIT_FLAG;
869 
870 	/* Reset the controller. */
871 	rc = bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
872 	if (rc != 0)
873 		goto fail;
874 
875 	/* Initialize the controller. */
876 	rc = bce_chipinit(sc);
877 	if (rc != 0) {
878 		device_printf(dev, "Controller initialization failed!\n");
879 		goto fail;
880 	}
881 
882 	/* Perform NVRAM test. */
883 	rc = bce_nvram_test(sc);
884 	if (rc != 0) {
885 		device_printf(dev, "NVRAM test failed!\n");
886 		goto fail;
887 	}
888 
889 	/* Fetch the permanent Ethernet MAC address. */
890 	bce_get_mac_addr(sc);
891 
892 	/*
893 	 * Trip points control how many BDs
894 	 * should be ready before generating an
895 	 * interrupt while ticks control how long
896 	 * a BD can sit in the chain before
897 	 * generating an interrupt.  Set the default
898 	 * values for the RX and TX rings.
899 	 */
900 
901 #ifdef BCE_DRBUG
902 	/* Force more frequent interrupts. */
903 	sc->bce_tx_quick_cons_trip_int = 1;
904 	sc->bce_tx_quick_cons_trip     = 1;
905 	sc->bce_tx_ticks_int           = 0;
906 	sc->bce_tx_ticks               = 0;
907 
908 	sc->bce_rx_quick_cons_trip_int = 1;
909 	sc->bce_rx_quick_cons_trip     = 1;
910 	sc->bce_rx_ticks_int           = 0;
911 	sc->bce_rx_ticks               = 0;
912 #else
913 	sc->bce_tx_quick_cons_trip_int = bce_tx_bds_int;
914 	sc->bce_tx_quick_cons_trip     = bce_tx_bds;
915 	sc->bce_tx_ticks_int           = bce_tx_ticks_int;
916 	sc->bce_tx_ticks               = bce_tx_ticks;
917 
918 	sc->bce_rx_quick_cons_trip_int = bce_rx_bds_int;
919 	sc->bce_rx_quick_cons_trip     = bce_rx_bds;
920 	sc->bce_rx_ticks_int           = bce_rx_ticks_int;
921 	sc->bce_rx_ticks               = bce_rx_ticks;
922 #endif
923 
924 	/* Update statistics once every second. */
925 	sc->bce_stats_ticks = 1000000 & 0xffff00;
926 
927 	/* Find the media type for the adapter. */
928 	bce_get_media(sc);
929 
930 	/* Find out RX/TX ring count */
931 	bce_setup_ring_cnt(sc);
932 
933 	/* Allocate DMA memory resources. */
934 	rc = bce_dma_alloc(sc);
935 	if (rc != 0) {
936 		device_printf(dev, "DMA resource allocation failed!\n");
937 		goto fail;
938 	}
939 
940 	/* Allocate PCI IRQ resources. */
941 	rc = bce_alloc_intr(sc);
942 	if (rc != 0)
943 		goto fail;
944 
945 	/* Setup serializer */
946 	bce_setup_serialize(sc);
947 
948 	/* Initialize the ifnet interface. */
949 	ifp->if_softc = sc;
950 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
951 	ifp->if_ioctl = bce_ioctl;
952 	ifp->if_start = bce_start;
953 	ifp->if_init = bce_init;
954 	ifp->if_serialize = bce_serialize;
955 	ifp->if_deserialize = bce_deserialize;
956 	ifp->if_tryserialize = bce_tryserialize;
957 #ifdef INVARIANTS
958 	ifp->if_serialize_assert = bce_serialize_assert;
959 #endif
960 #ifdef IFPOLL_ENABLE
961 	ifp->if_npoll = bce_npoll;
962 #endif
963 
964 	ifp->if_mtu = ETHERMTU;
965 	ifp->if_hwassist = BCE_CSUM_FEATURES | CSUM_TSO;
966 	ifp->if_capabilities = BCE_IF_CAPABILITIES;
967 	if (sc->rx_ring_cnt > 1)
968 		ifp->if_capabilities |= IFCAP_RSS;
969 	ifp->if_capenable = ifp->if_capabilities;
970 
971 	if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
972 		ifp->if_baudrate = IF_Mbps(2500ULL);
973 	else
974 		ifp->if_baudrate = IF_Mbps(1000ULL);
975 
976 	ifp->if_nmbclusters = sc->rx_ring_cnt * USABLE_RX_BD(&sc->rx_rings[0]);
977 
978 	ifq_set_maxlen(&ifp->if_snd, USABLE_TX_BD(&sc->tx_rings[0]));
979 	ifq_set_ready(&ifp->if_snd);
980 	ifq_set_subq_cnt(&ifp->if_snd, sc->tx_ring_cnt);
981 
982 	if (sc->tx_ring_cnt > 1) {
983 		ifp->if_mapsubq = ifq_mapsubq_modulo;
984 		ifq_set_subq_divisor(&ifp->if_snd, sc->tx_ring_cnt);
985 	}
986 
987 	/*
988 	 * Look for our PHY.
989 	 */
990 	mii_probe_args_init(&mii_args, bce_ifmedia_upd, bce_ifmedia_sts);
991 	mii_args.mii_probemask = 1 << sc->bce_phy_addr;
992 	mii_args.mii_privtag = MII_PRIVTAG_BRGPHY;
993 	mii_args.mii_priv = mii_priv;
994 
995 	rc = mii_probe(dev, &sc->bce_miibus, &mii_args);
996 	if (rc != 0) {
997 		device_printf(dev, "PHY probe failed!\n");
998 		goto fail;
999 	}
1000 
1001 	/* Attach to the Ethernet interface list. */
1002 	ether_ifattach(ifp, sc->eaddr, NULL);
1003 
1004 	/* Setup TX rings and subqueues */
1005 	for (i = 0; i < sc->tx_ring_cnt; ++i) {
1006 		struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i);
1007 		struct bce_tx_ring *txr = &sc->tx_rings[i];
1008 
1009 		ifsq_set_cpuid(ifsq, sc->bce_msix[i].msix_cpuid);
1010 		ifsq_set_priv(ifsq, txr);
1011 		ifsq_set_hw_serialize(ifsq, &txr->tx_serialize);
1012 		txr->ifsq = ifsq;
1013 
1014 		ifsq_watchdog_init(&txr->tx_watchdog, ifsq, bce_watchdog, 0);
1015 	}
1016 
1017 	callout_init_mp(&sc->bce_tick_callout);
1018 	callout_init_mp(&sc->bce_pulse_callout);
1019 	callout_init_mp(&sc->bce_ckmsi_callout);
1020 
1021 	rc = bce_setup_intr(sc);
1022 	if (rc != 0) {
1023 		device_printf(dev, "Failed to setup IRQ!\n");
1024 		ether_ifdetach(ifp);
1025 		goto fail;
1026 	}
1027 
1028 	/* Set timer CPUID */
1029 	bce_set_timer_cpuid(sc, FALSE);
1030 
1031 	/* Add the supported sysctls to the kernel. */
1032 	bce_add_sysctls(sc);
1033 
1034 	/*
1035 	 * The chip reset earlier notified the bootcode that
1036 	 * a driver is present.  We now need to start our pulse
1037 	 * routine so that the bootcode is reminded that we're
1038 	 * still running.
1039 	 */
1040 	bce_pulse(sc);
1041 
1042 	/* Get the firmware running so IPMI still works */
1043 	bce_mgmt_init(sc);
1044 
1045 	if (bootverbose)
1046 		bce_print_adapter_info(sc);
1047 
1048 	return 0;
1049 fail:
1050 	bce_detach(dev);
1051 	return(rc);
1052 }
1053 
1054 /****************************************************************************/
1055 /* Device detach function.                                                  */
1056 /*                                                                          */
1057 /* Stops the controller, resets the controller, and releases resources.     */
1058 /*                                                                          */
1059 /* Returns:                                                                 */
1060 /*   0 on success, positive value on failure.                               */
1061 /****************************************************************************/
1062 static int
bce_detach(device_t dev)1063 bce_detach(device_t dev)
1064 {
1065 	struct bce_softc *sc = device_get_softc(dev);
1066 
1067 	if (device_is_attached(dev)) {
1068 		struct ifnet *ifp = &sc->arpcom.ac_if;
1069 		uint32_t msg;
1070 
1071 		ifnet_serialize_all(ifp);
1072 
1073 		/* Stop and reset the controller. */
1074 		callout_stop(&sc->bce_pulse_callout);
1075 		bce_stop(sc);
1076 		if (sc->bce_flags & BCE_NO_WOL_FLAG)
1077 			msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
1078 		else
1079 			msg = BCE_DRV_MSG_CODE_UNLOAD;
1080 		bce_reset(sc, msg);
1081 
1082 		bce_teardown_intr(sc);
1083 
1084 		ifnet_deserialize_all(ifp);
1085 
1086 		ether_ifdetach(ifp);
1087 	}
1088 
1089 	/* If we have a child device on the MII bus remove it too. */
1090 	if (sc->bce_miibus)
1091 		device_delete_child(dev, sc->bce_miibus);
1092 	bus_generic_detach(dev);
1093 
1094 	bce_free_intr(sc);
1095 
1096 	if (sc->bce_res_mem != NULL) {
1097 		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
1098 				     sc->bce_res_mem);
1099 	}
1100 
1101 	bce_dma_free(sc);
1102 
1103 	if (sc->serializes != NULL)
1104 		kfree(sc->serializes, M_DEVBUF);
1105 
1106 	if (sc->tx_rmap != NULL)
1107 		if_ringmap_free(sc->tx_rmap);
1108 	if (sc->rx_rmap != NULL)
1109 		if_ringmap_free(sc->rx_rmap);
1110 
1111 	return 0;
1112 }
1113 
1114 /****************************************************************************/
1115 /* Device shutdown function.                                                */
1116 /*                                                                          */
1117 /* Stops and resets the controller.                                         */
1118 /*                                                                          */
1119 /* Returns:                                                                 */
1120 /*   Nothing                                                                */
1121 /****************************************************************************/
1122 static void
bce_shutdown(device_t dev)1123 bce_shutdown(device_t dev)
1124 {
1125 	struct bce_softc *sc = device_get_softc(dev);
1126 	struct ifnet *ifp = &sc->arpcom.ac_if;
1127 	uint32_t msg;
1128 
1129 	ifnet_serialize_all(ifp);
1130 
1131 	bce_stop(sc);
1132 	if (sc->bce_flags & BCE_NO_WOL_FLAG)
1133 		msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
1134 	else
1135 		msg = BCE_DRV_MSG_CODE_UNLOAD;
1136 	bce_reset(sc, msg);
1137 
1138 	ifnet_deserialize_all(ifp);
1139 }
1140 
1141 /****************************************************************************/
1142 /* Indirect register read.                                                  */
1143 /*                                                                          */
1144 /* Reads NetXtreme II registers using an index/data register pair in PCI    */
1145 /* configuration space.  Using this mechanism avoids issues with posted     */
1146 /* reads but is much slower than memory-mapped I/O.                         */
1147 /*                                                                          */
1148 /* Returns:                                                                 */
1149 /*   The value of the register.                                             */
1150 /****************************************************************************/
1151 static uint32_t
bce_reg_rd_ind(struct bce_softc * sc,uint32_t offset)1152 bce_reg_rd_ind(struct bce_softc *sc, uint32_t offset)
1153 {
1154 	device_t dev = sc->bce_dev;
1155 
1156 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
1157 	return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
1158 }
1159 
1160 /****************************************************************************/
1161 /* Indirect register write.                                                 */
1162 /*                                                                          */
1163 /* Writes NetXtreme II registers using an index/data register pair in PCI   */
1164 /* configuration space.  Using this mechanism avoids issues with posted     */
1165 /* writes but is muchh slower than memory-mapped I/O.                       */
1166 /*                                                                          */
1167 /* Returns:                                                                 */
1168 /*   Nothing.                                                               */
1169 /****************************************************************************/
1170 static void
bce_reg_wr_ind(struct bce_softc * sc,uint32_t offset,uint32_t val)1171 bce_reg_wr_ind(struct bce_softc *sc, uint32_t offset, uint32_t val)
1172 {
1173 	device_t dev = sc->bce_dev;
1174 
1175 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
1176 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4);
1177 }
1178 
1179 /****************************************************************************/
1180 /* Shared memory write.                                                     */
1181 /*                                                                          */
1182 /* Writes NetXtreme II shared memory region.                                */
1183 /*                                                                          */
1184 /* Returns:                                                                 */
1185 /*   Nothing.                                                               */
1186 /****************************************************************************/
1187 static void
bce_shmem_wr(struct bce_softc * sc,uint32_t offset,uint32_t val)1188 bce_shmem_wr(struct bce_softc *sc, uint32_t offset, uint32_t val)
1189 {
1190 	bce_reg_wr_ind(sc, sc->bce_shmem_base + offset, val);
1191 }
1192 
1193 /****************************************************************************/
1194 /* Shared memory read.                                                      */
1195 /*                                                                          */
1196 /* Reads NetXtreme II shared memory region.                                 */
1197 /*                                                                          */
1198 /* Returns:                                                                 */
1199 /*   The 32 bit value read.                                                 */
1200 /****************************************************************************/
1201 static u32
bce_shmem_rd(struct bce_softc * sc,uint32_t offset)1202 bce_shmem_rd(struct bce_softc *sc, uint32_t offset)
1203 {
1204 	return bce_reg_rd_ind(sc, sc->bce_shmem_base + offset);
1205 }
1206 
1207 /****************************************************************************/
1208 /* Context memory write.                                                    */
1209 /*                                                                          */
1210 /* The NetXtreme II controller uses context memory to track connection      */
1211 /* information for L2 and higher network protocols.                         */
1212 /*                                                                          */
1213 /* Returns:                                                                 */
1214 /*   Nothing.                                                               */
1215 /****************************************************************************/
1216 static void
bce_ctx_wr(struct bce_softc * sc,uint32_t cid_addr,uint32_t ctx_offset,uint32_t ctx_val)1217 bce_ctx_wr(struct bce_softc *sc, uint32_t cid_addr, uint32_t ctx_offset,
1218     uint32_t ctx_val)
1219 {
1220 	uint32_t idx, offset = ctx_offset + cid_addr;
1221 	uint32_t val, retry_cnt = 5;
1222 
1223 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
1224 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
1225 		REG_WR(sc, BCE_CTX_CTX_DATA, ctx_val);
1226 		REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_WRITE_REQ));
1227 
1228 		for (idx = 0; idx < retry_cnt; idx++) {
1229 			val = REG_RD(sc, BCE_CTX_CTX_CTRL);
1230 			if ((val & BCE_CTX_CTX_CTRL_WRITE_REQ) == 0)
1231 				break;
1232 			DELAY(5);
1233 		}
1234 
1235 		if (val & BCE_CTX_CTX_CTRL_WRITE_REQ) {
1236 			device_printf(sc->bce_dev,
1237 			    "Unable to write CTX memory: "
1238 			    "cid_addr = 0x%08X, offset = 0x%08X!\n",
1239 			    cid_addr, ctx_offset);
1240 		}
1241 	} else {
1242 		REG_WR(sc, BCE_CTX_DATA_ADR, offset);
1243 		REG_WR(sc, BCE_CTX_DATA, ctx_val);
1244 	}
1245 }
1246 
1247 /****************************************************************************/
1248 /* PHY register read.                                                       */
1249 /*                                                                          */
1250 /* Implements register reads on the MII bus.                                */
1251 /*                                                                          */
1252 /* Returns:                                                                 */
1253 /*   The value of the register.                                             */
1254 /****************************************************************************/
1255 static int
bce_miibus_read_reg(device_t dev,int phy,int reg)1256 bce_miibus_read_reg(device_t dev, int phy, int reg)
1257 {
1258 	struct bce_softc *sc = device_get_softc(dev);
1259 	uint32_t val;
1260 	int i;
1261 
1262 	/* Make sure we are accessing the correct PHY address. */
1263 	KASSERT(phy == sc->bce_phy_addr,
1264 	    ("invalid phyno %d, should be %d\n", phy, sc->bce_phy_addr));
1265 
1266 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1267 		val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1268 		val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1269 
1270 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1271 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1272 
1273 		DELAY(40);
1274 	}
1275 
1276 	val = BCE_MIPHY(phy) | BCE_MIREG(reg) |
1277 	      BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT |
1278 	      BCE_EMAC_MDIO_COMM_START_BUSY;
1279 	REG_WR(sc, BCE_EMAC_MDIO_COMM, val);
1280 
1281 	for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1282 		DELAY(10);
1283 
1284 		val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1285 		if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1286 			DELAY(5);
1287 
1288 			val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1289 			val &= BCE_EMAC_MDIO_COMM_DATA;
1290 			break;
1291 		}
1292 	}
1293 
1294 	if (val & BCE_EMAC_MDIO_COMM_START_BUSY) {
1295 		if_printf(&sc->arpcom.ac_if,
1296 			  "Error: PHY read timeout! phy = %d, reg = 0x%04X\n",
1297 			  phy, reg);
1298 		val = 0x0;
1299 	} else {
1300 		val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1301 	}
1302 
1303 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1304 		val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1305 		val |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1306 
1307 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1308 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1309 
1310 		DELAY(40);
1311 	}
1312 	return (val & 0xffff);
1313 }
1314 
1315 /****************************************************************************/
1316 /* PHY register write.                                                      */
1317 /*                                                                          */
1318 /* Implements register writes on the MII bus.                               */
1319 /*                                                                          */
1320 /* Returns:                                                                 */
1321 /*   The value of the register.                                             */
1322 /****************************************************************************/
1323 static int
bce_miibus_write_reg(device_t dev,int phy,int reg,int val)1324 bce_miibus_write_reg(device_t dev, int phy, int reg, int val)
1325 {
1326 	struct bce_softc *sc = device_get_softc(dev);
1327 	uint32_t val1;
1328 	int i;
1329 
1330 	/* Make sure we are accessing the correct PHY address. */
1331 	KASSERT(phy == sc->bce_phy_addr,
1332 	    ("invalid phyno %d, should be %d\n", phy, sc->bce_phy_addr));
1333 
1334 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1335 		val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1336 		val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1337 
1338 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1339 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1340 
1341 		DELAY(40);
1342 	}
1343 
1344 	val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val |
1345 		BCE_EMAC_MDIO_COMM_COMMAND_WRITE |
1346 		BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT;
1347 	REG_WR(sc, BCE_EMAC_MDIO_COMM, val1);
1348 
1349 	for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1350 		DELAY(10);
1351 
1352 		val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1353 		if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1354 			DELAY(5);
1355 			break;
1356 		}
1357 	}
1358 
1359 	if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY)
1360 		if_printf(&sc->arpcom.ac_if, "PHY write timeout!\n");
1361 
1362 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1363 		val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1364 		val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1365 
1366 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1367 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1368 
1369 		DELAY(40);
1370 	}
1371 	return 0;
1372 }
1373 
1374 /****************************************************************************/
1375 /* MII bus status change.                                                   */
1376 /*                                                                          */
1377 /* Called by the MII bus driver when the PHY establishes link to set the    */
1378 /* MAC interface registers.                                                 */
1379 /*                                                                          */
1380 /* Returns:                                                                 */
1381 /*   Nothing.                                                               */
1382 /****************************************************************************/
1383 static void
bce_miibus_statchg(device_t dev)1384 bce_miibus_statchg(device_t dev)
1385 {
1386 	struct bce_softc *sc = device_get_softc(dev);
1387 	struct mii_data *mii = device_get_softc(sc->bce_miibus);
1388 
1389 	BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT);
1390 
1391 	/*
1392 	 * Set MII or GMII interface based on the speed negotiated
1393 	 * by the PHY.
1394 	 */
1395 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
1396 	    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) {
1397 		BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_GMII);
1398 	} else {
1399 		BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_MII);
1400 	}
1401 
1402 	/*
1403 	 * Set half or full duplex based on the duplicity negotiated
1404 	 * by the PHY.
1405 	 */
1406 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1407 		BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1408 	} else {
1409 		BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1410 	}
1411 }
1412 
1413 /****************************************************************************/
1414 /* Acquire NVRAM lock.                                                      */
1415 /*                                                                          */
1416 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock.  */
1417 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1418 /* for use by the driver.                                                   */
1419 /*                                                                          */
1420 /* Returns:                                                                 */
1421 /*   0 on success, positive value on failure.                               */
1422 /****************************************************************************/
1423 static int
bce_acquire_nvram_lock(struct bce_softc * sc)1424 bce_acquire_nvram_lock(struct bce_softc *sc)
1425 {
1426 	uint32_t val;
1427 	int j;
1428 
1429 	/* Request access to the flash interface. */
1430 	REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2);
1431 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1432 		val = REG_RD(sc, BCE_NVM_SW_ARB);
1433 		if (val & BCE_NVM_SW_ARB_ARB_ARB2)
1434 			break;
1435 
1436 		DELAY(5);
1437 	}
1438 
1439 	if (j >= NVRAM_TIMEOUT_COUNT) {
1440 		return EBUSY;
1441 	}
1442 	return 0;
1443 }
1444 
1445 /****************************************************************************/
1446 /* Release NVRAM lock.                                                      */
1447 /*                                                                          */
1448 /* When the caller is finished accessing NVRAM the lock must be released.   */
1449 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1450 /* for use by the driver.                                                   */
1451 /*                                                                          */
1452 /* Returns:                                                                 */
1453 /*   0 on success, positive value on failure.                               */
1454 /****************************************************************************/
1455 static int
bce_release_nvram_lock(struct bce_softc * sc)1456 bce_release_nvram_lock(struct bce_softc *sc)
1457 {
1458 	int j;
1459 	uint32_t val;
1460 
1461 	/*
1462 	 * Relinquish nvram interface.
1463 	 */
1464 	REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2);
1465 
1466 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1467 		val = REG_RD(sc, BCE_NVM_SW_ARB);
1468 		if (!(val & BCE_NVM_SW_ARB_ARB_ARB2))
1469 			break;
1470 
1471 		DELAY(5);
1472 	}
1473 
1474 	if (j >= NVRAM_TIMEOUT_COUNT) {
1475 		return EBUSY;
1476 	}
1477 	return 0;
1478 }
1479 
1480 /****************************************************************************/
1481 /* Enable NVRAM access.                                                     */
1482 /*                                                                          */
1483 /* Before accessing NVRAM for read or write operations the caller must      */
1484 /* enabled NVRAM access.                                                    */
1485 /*                                                                          */
1486 /* Returns:                                                                 */
1487 /*   Nothing.                                                               */
1488 /****************************************************************************/
1489 static void
bce_enable_nvram_access(struct bce_softc * sc)1490 bce_enable_nvram_access(struct bce_softc *sc)
1491 {
1492 	uint32_t val;
1493 
1494 	val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1495 	/* Enable both bits, even on read. */
1496 	REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1497 	       val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN);
1498 }
1499 
1500 /****************************************************************************/
1501 /* Disable NVRAM access.                                                    */
1502 /*                                                                          */
1503 /* When the caller is finished accessing NVRAM access must be disabled.     */
1504 /*                                                                          */
1505 /* Returns:                                                                 */
1506 /*   Nothing.                                                               */
1507 /****************************************************************************/
1508 static void
bce_disable_nvram_access(struct bce_softc * sc)1509 bce_disable_nvram_access(struct bce_softc *sc)
1510 {
1511 	uint32_t val;
1512 
1513 	val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1514 
1515 	/* Disable both bits, even after read. */
1516 	REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1517 	       val & ~(BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN));
1518 }
1519 
1520 /****************************************************************************/
1521 /* Read a dword (32 bits) from NVRAM.                                       */
1522 /*                                                                          */
1523 /* Read a 32 bit word from NVRAM.  The caller is assumed to have already    */
1524 /* obtained the NVRAM lock and enabled the controller for NVRAM access.     */
1525 /*                                                                          */
1526 /* Returns:                                                                 */
1527 /*   0 on success and the 32 bit value read, positive value on failure.     */
1528 /****************************************************************************/
1529 static int
bce_nvram_read_dword(struct bce_softc * sc,uint32_t offset,uint8_t * ret_val,uint32_t cmd_flags)1530 bce_nvram_read_dword(struct bce_softc *sc, uint32_t offset, uint8_t *ret_val,
1531 		     uint32_t cmd_flags)
1532 {
1533 	uint32_t cmd;
1534 	int i, rc = 0;
1535 
1536 	/* Build the command word. */
1537 	cmd = BCE_NVM_COMMAND_DOIT | cmd_flags;
1538 
1539 	/* Calculate the offset for buffered flash. */
1540 	if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) {
1541 		offset = ((offset / sc->bce_flash_info->page_size) <<
1542 			  sc->bce_flash_info->page_bits) +
1543 			 (offset % sc->bce_flash_info->page_size);
1544 	}
1545 
1546 	/*
1547 	 * Clear the DONE bit separately, set the address to read,
1548 	 * and issue the read.
1549 	 */
1550 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1551 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1552 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1553 
1554 	/* Wait for completion. */
1555 	for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1556 		uint32_t val;
1557 
1558 		DELAY(5);
1559 
1560 		val = REG_RD(sc, BCE_NVM_COMMAND);
1561 		if (val & BCE_NVM_COMMAND_DONE) {
1562 			val = REG_RD(sc, BCE_NVM_READ);
1563 
1564 			val = be32toh(val);
1565 			memcpy(ret_val, &val, 4);
1566 			break;
1567 		}
1568 	}
1569 
1570 	/* Check for errors. */
1571 	if (i >= NVRAM_TIMEOUT_COUNT) {
1572 		if_printf(&sc->arpcom.ac_if,
1573 			  "Timeout error reading NVRAM at offset 0x%08X!\n",
1574 			  offset);
1575 		rc = EBUSY;
1576 	}
1577 	return rc;
1578 }
1579 
1580 /****************************************************************************/
1581 /* Initialize NVRAM access.                                                 */
1582 /*                                                                          */
1583 /* Identify the NVRAM device in use and prepare the NVRAM interface to      */
1584 /* access that device.                                                      */
1585 /*                                                                          */
1586 /* Returns:                                                                 */
1587 /*   0 on success, positive value on failure.                               */
1588 /****************************************************************************/
1589 static int
bce_init_nvram(struct bce_softc * sc)1590 bce_init_nvram(struct bce_softc *sc)
1591 {
1592 	uint32_t val;
1593 	int j, entry_count, rc = 0;
1594 	const struct flash_spec *flash;
1595 
1596 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
1597 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
1598 		sc->bce_flash_info = &flash_5709;
1599 		goto bce_init_nvram_get_flash_size;
1600 	}
1601 
1602 	/* Determine the selected interface. */
1603 	val = REG_RD(sc, BCE_NVM_CFG1);
1604 
1605 	entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1606 
1607 	/*
1608 	 * Flash reconfiguration is required to support additional
1609 	 * NVRAM devices not directly supported in hardware.
1610 	 * Check if the flash interface was reconfigured
1611 	 * by the bootcode.
1612 	 */
1613 
1614 	if (val & 0x40000000) {
1615 		/* Flash interface reconfigured by bootcode. */
1616 		for (j = 0, flash = flash_table; j < entry_count;
1617 		     j++, flash++) {
1618 			if ((val & FLASH_BACKUP_STRAP_MASK) ==
1619 			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1620 				sc->bce_flash_info = flash;
1621 				break;
1622 			}
1623 		}
1624 	} else {
1625 		/* Flash interface not yet reconfigured. */
1626 		uint32_t mask;
1627 
1628 		if (val & (1 << 23))
1629 			mask = FLASH_BACKUP_STRAP_MASK;
1630 		else
1631 			mask = FLASH_STRAP_MASK;
1632 
1633 		/* Look for the matching NVRAM device configuration data. */
1634 		for (j = 0, flash = flash_table; j < entry_count;
1635 		     j++, flash++) {
1636 			/* Check if the device matches any of the known devices. */
1637 			if ((val & mask) == (flash->strapping & mask)) {
1638 				/* Found a device match. */
1639 				sc->bce_flash_info = flash;
1640 
1641 				/* Request access to the flash interface. */
1642 				rc = bce_acquire_nvram_lock(sc);
1643 				if (rc != 0)
1644 					return rc;
1645 
1646 				/* Reconfigure the flash interface. */
1647 				bce_enable_nvram_access(sc);
1648 				REG_WR(sc, BCE_NVM_CFG1, flash->config1);
1649 				REG_WR(sc, BCE_NVM_CFG2, flash->config2);
1650 				REG_WR(sc, BCE_NVM_CFG3, flash->config3);
1651 				REG_WR(sc, BCE_NVM_WRITE1, flash->write1);
1652 				bce_disable_nvram_access(sc);
1653 				bce_release_nvram_lock(sc);
1654 				break;
1655 			}
1656 		}
1657 	}
1658 
1659 	/* Check if a matching device was found. */
1660 	if (j == entry_count) {
1661 		sc->bce_flash_info = NULL;
1662 		if_printf(&sc->arpcom.ac_if, "Unknown Flash NVRAM found!\n");
1663 		return ENODEV;
1664 	}
1665 
1666 bce_init_nvram_get_flash_size:
1667 	/* Write the flash config data to the shared memory interface. */
1668 	val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG2) &
1669 	    BCE_SHARED_HW_CFG2_NVM_SIZE_MASK;
1670 	if (val)
1671 		sc->bce_flash_size = val;
1672 	else
1673 		sc->bce_flash_size = sc->bce_flash_info->total_size;
1674 
1675 	return rc;
1676 }
1677 
1678 /****************************************************************************/
1679 /* Read an arbitrary range of data from NVRAM.                              */
1680 /*                                                                          */
1681 /* Prepares the NVRAM interface for access and reads the requested data     */
1682 /* into the supplied buffer.                                                */
1683 /*                                                                          */
1684 /* Returns:                                                                 */
1685 /*   0 on success and the data read, positive value on failure.             */
1686 /****************************************************************************/
1687 static int
bce_nvram_read(struct bce_softc * sc,uint32_t offset,uint8_t * ret_buf,int buf_size)1688 bce_nvram_read(struct bce_softc *sc, uint32_t offset, uint8_t *ret_buf,
1689 	       int buf_size)
1690 {
1691 	uint32_t cmd_flags, offset32, len32, extra;
1692 	int rc = 0;
1693 
1694 	if (buf_size == 0)
1695 		return 0;
1696 
1697 	/* Request access to the flash interface. */
1698 	rc = bce_acquire_nvram_lock(sc);
1699 	if (rc != 0)
1700 		return rc;
1701 
1702 	/* Enable access to flash interface */
1703 	bce_enable_nvram_access(sc);
1704 
1705 	len32 = buf_size;
1706 	offset32 = offset;
1707 	extra = 0;
1708 
1709 	cmd_flags = 0;
1710 
1711 	/* XXX should we release nvram lock if read_dword() fails? */
1712 	if (offset32 & 3) {
1713 		uint8_t buf[4];
1714 		uint32_t pre_len;
1715 
1716 		offset32 &= ~3;
1717 		pre_len = 4 - (offset & 3);
1718 
1719 		if (pre_len >= len32) {
1720 			pre_len = len32;
1721 			cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST;
1722 		} else {
1723 			cmd_flags = BCE_NVM_COMMAND_FIRST;
1724 		}
1725 
1726 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1727 		if (rc)
1728 			return rc;
1729 
1730 		memcpy(ret_buf, buf + (offset & 3), pre_len);
1731 
1732 		offset32 += 4;
1733 		ret_buf += pre_len;
1734 		len32 -= pre_len;
1735 	}
1736 
1737 	if (len32 & 3) {
1738 		extra = 4 - (len32 & 3);
1739 		len32 = (len32 + 4) & ~3;
1740 	}
1741 
1742 	if (len32 == 4) {
1743 		uint8_t buf[4];
1744 
1745 		if (cmd_flags)
1746 			cmd_flags = BCE_NVM_COMMAND_LAST;
1747 		else
1748 			cmd_flags = BCE_NVM_COMMAND_FIRST |
1749 				    BCE_NVM_COMMAND_LAST;
1750 
1751 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1752 
1753 		memcpy(ret_buf, buf, 4 - extra);
1754 	} else if (len32 > 0) {
1755 		uint8_t buf[4];
1756 
1757 		/* Read the first word. */
1758 		if (cmd_flags)
1759 			cmd_flags = 0;
1760 		else
1761 			cmd_flags = BCE_NVM_COMMAND_FIRST;
1762 
1763 		rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1764 
1765 		/* Advance to the next dword. */
1766 		offset32 += 4;
1767 		ret_buf += 4;
1768 		len32 -= 4;
1769 
1770 		while (len32 > 4 && rc == 0) {
1771 			rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0);
1772 
1773 			/* Advance to the next dword. */
1774 			offset32 += 4;
1775 			ret_buf += 4;
1776 			len32 -= 4;
1777 		}
1778 
1779 		if (rc)
1780 			goto bce_nvram_read_locked_exit;
1781 
1782 		cmd_flags = BCE_NVM_COMMAND_LAST;
1783 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1784 
1785 		memcpy(ret_buf, buf, 4 - extra);
1786 	}
1787 
1788 bce_nvram_read_locked_exit:
1789 	/* Disable access to flash interface and release the lock. */
1790 	bce_disable_nvram_access(sc);
1791 	bce_release_nvram_lock(sc);
1792 
1793 	return rc;
1794 }
1795 
1796 /****************************************************************************/
1797 /* Verifies that NVRAM is accessible and contains valid data.               */
1798 /*                                                                          */
1799 /* Reads the configuration data from NVRAM and verifies that the CRC is     */
1800 /* correct.                                                                 */
1801 /*                                                                          */
1802 /* Returns:                                                                 */
1803 /*   0 on success, positive value on failure.                               */
1804 /****************************************************************************/
1805 static int
bce_nvram_test(struct bce_softc * sc)1806 bce_nvram_test(struct bce_softc *sc)
1807 {
1808 	uint32_t buf[BCE_NVRAM_SIZE / 4];
1809 	uint32_t magic, csum;
1810 	uint8_t *data = (uint8_t *)buf;
1811 	int rc = 0;
1812 
1813 	/*
1814 	 * Check that the device NVRAM is valid by reading
1815 	 * the magic value at offset 0.
1816 	 */
1817 	rc = bce_nvram_read(sc, 0, data, 4);
1818 	if (rc != 0)
1819 		return rc;
1820 
1821 	magic = be32toh(buf[0]);
1822 	if (magic != BCE_NVRAM_MAGIC) {
1823 		if_printf(&sc->arpcom.ac_if,
1824 			  "Invalid NVRAM magic value! Expected: 0x%08X, "
1825 			  "Found: 0x%08X\n", BCE_NVRAM_MAGIC, magic);
1826 		return ENODEV;
1827 	}
1828 
1829 	/*
1830 	 * Verify that the device NVRAM includes valid
1831 	 * configuration data.
1832 	 */
1833 	rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE);
1834 	if (rc != 0)
1835 		return rc;
1836 
1837 	csum = ether_crc32_le(data, 0x100);
1838 	if (csum != BCE_CRC32_RESIDUAL) {
1839 		if_printf(&sc->arpcom.ac_if,
1840 			  "Invalid Manufacturing Information NVRAM CRC! "
1841 			  "Expected: 0x%08X, Found: 0x%08X\n",
1842 			  BCE_CRC32_RESIDUAL, csum);
1843 		return ENODEV;
1844 	}
1845 
1846 	csum = ether_crc32_le(data + 0x100, 0x100);
1847 	if (csum != BCE_CRC32_RESIDUAL) {
1848 		if_printf(&sc->arpcom.ac_if,
1849 			  "Invalid Feature Configuration Information "
1850 			  "NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
1851 			  BCE_CRC32_RESIDUAL, csum);
1852 		rc = ENODEV;
1853 	}
1854 	return rc;
1855 }
1856 
1857 /****************************************************************************/
1858 /* Identifies the current media type of the controller and sets the PHY     */
1859 /* address.                                                                 */
1860 /*                                                                          */
1861 /* Returns:                                                                 */
1862 /*   Nothing.                                                               */
1863 /****************************************************************************/
1864 static void
bce_get_media(struct bce_softc * sc)1865 bce_get_media(struct bce_softc *sc)
1866 {
1867 	uint32_t val;
1868 
1869 	sc->bce_phy_addr = 1;
1870 
1871 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
1872 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
1873  		uint32_t val = REG_RD(sc, BCE_MISC_DUAL_MEDIA_CTRL);
1874 		uint32_t bond_id = val & BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID;
1875 		uint32_t strap;
1876 
1877 		/*
1878 		 * The BCM5709S is software configurable
1879 		 * for Copper or SerDes operation.
1880 		 */
1881 		if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) {
1882 			return;
1883 		} else if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
1884 			sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1885 			return;
1886 		}
1887 
1888 		if (val & BCE_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE) {
1889 			strap = (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
1890 		} else {
1891 			strap =
1892 			(val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
1893 		}
1894 
1895 		if (pci_get_function(sc->bce_dev) == 0) {
1896 			switch (strap) {
1897 			case 0x4:
1898 			case 0x5:
1899 			case 0x6:
1900 				sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1901 				break;
1902 			}
1903 		} else {
1904 			switch (strap) {
1905 			case 0x1:
1906 			case 0x2:
1907 			case 0x4:
1908 				sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1909 				break;
1910 			}
1911 		}
1912 	} else if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) {
1913 		sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1914 	}
1915 
1916 	if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) {
1917 		sc->bce_flags |= BCE_NO_WOL_FLAG;
1918 		if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) {
1919 			sc->bce_phy_addr = 2;
1920 			val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG);
1921 			if (val & BCE_SHARED_HW_CFG_PHY_2_5G)
1922 				sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG;
1923 		}
1924 	} else if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) ||
1925 	    (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)) {
1926 		sc->bce_phy_flags |= BCE_PHY_CRC_FIX_FLAG;
1927 	}
1928 }
1929 
1930 static void
bce_destroy_tx_ring(struct bce_tx_ring * txr)1931 bce_destroy_tx_ring(struct bce_tx_ring *txr)
1932 {
1933 	int i;
1934 
1935 	/* Destroy the TX buffer descriptor DMA stuffs. */
1936 	if (txr->tx_bd_chain_tag != NULL) {
1937 		for (i = 0; i < txr->tx_pages; i++) {
1938 			if (txr->tx_bd_chain[i] != NULL) {
1939 				bus_dmamap_unload(txr->tx_bd_chain_tag,
1940 				    txr->tx_bd_chain_map[i]);
1941 				bus_dmamem_free(txr->tx_bd_chain_tag,
1942 				    txr->tx_bd_chain[i],
1943 				    txr->tx_bd_chain_map[i]);
1944 			}
1945 		}
1946 		bus_dma_tag_destroy(txr->tx_bd_chain_tag);
1947 	}
1948 
1949 	/* Destroy the TX mbuf DMA stuffs. */
1950 	if (txr->tx_mbuf_tag != NULL) {
1951 		for (i = 0; i < TOTAL_TX_BD(txr); i++) {
1952 			/* Must have been unloaded in bce_stop() */
1953 			KKASSERT(txr->tx_bufs[i].tx_mbuf_ptr == NULL);
1954 			bus_dmamap_destroy(txr->tx_mbuf_tag,
1955 			    txr->tx_bufs[i].tx_mbuf_map);
1956 		}
1957 		bus_dma_tag_destroy(txr->tx_mbuf_tag);
1958 	}
1959 
1960 	if (txr->tx_bd_chain_map != NULL)
1961 		kfree(txr->tx_bd_chain_map, M_DEVBUF);
1962 	if (txr->tx_bd_chain != NULL)
1963 		kfree(txr->tx_bd_chain, M_DEVBUF);
1964 	if (txr->tx_bd_chain_paddr != NULL)
1965 		kfree(txr->tx_bd_chain_paddr, M_DEVBUF);
1966 
1967 	if (txr->tx_bufs != NULL)
1968 		kfree(txr->tx_bufs, M_DEVBUF);
1969 }
1970 
1971 static void
bce_destroy_rx_ring(struct bce_rx_ring * rxr)1972 bce_destroy_rx_ring(struct bce_rx_ring *rxr)
1973 {
1974 	int i;
1975 
1976 	/* Destroy the RX buffer descriptor DMA stuffs. */
1977 	if (rxr->rx_bd_chain_tag != NULL) {
1978 		for (i = 0; i < rxr->rx_pages; i++) {
1979 			if (rxr->rx_bd_chain[i] != NULL) {
1980 				bus_dmamap_unload(rxr->rx_bd_chain_tag,
1981 				    rxr->rx_bd_chain_map[i]);
1982 				bus_dmamem_free(rxr->rx_bd_chain_tag,
1983 				    rxr->rx_bd_chain[i],
1984 				    rxr->rx_bd_chain_map[i]);
1985 			}
1986 		}
1987 		bus_dma_tag_destroy(rxr->rx_bd_chain_tag);
1988 	}
1989 
1990 	/* Destroy the RX mbuf DMA stuffs. */
1991 	if (rxr->rx_mbuf_tag != NULL) {
1992 		for (i = 0; i < TOTAL_RX_BD(rxr); i++) {
1993 			/* Must have been unloaded in bce_stop() */
1994 			KKASSERT(rxr->rx_bufs[i].rx_mbuf_ptr == NULL);
1995 			bus_dmamap_destroy(rxr->rx_mbuf_tag,
1996 			    rxr->rx_bufs[i].rx_mbuf_map);
1997 		}
1998 		bus_dmamap_destroy(rxr->rx_mbuf_tag, rxr->rx_mbuf_tmpmap);
1999 		bus_dma_tag_destroy(rxr->rx_mbuf_tag);
2000 	}
2001 
2002 	if (rxr->rx_bd_chain_map != NULL)
2003 		kfree(rxr->rx_bd_chain_map, M_DEVBUF);
2004 	if (rxr->rx_bd_chain != NULL)
2005 		kfree(rxr->rx_bd_chain, M_DEVBUF);
2006 	if (rxr->rx_bd_chain_paddr != NULL)
2007 		kfree(rxr->rx_bd_chain_paddr, M_DEVBUF);
2008 
2009 	if (rxr->rx_bufs != NULL)
2010 		kfree(rxr->rx_bufs, M_DEVBUF);
2011 }
2012 
2013 /****************************************************************************/
2014 /* Free any DMA memory owned by the driver.                                 */
2015 /*                                                                          */
2016 /* Scans through each data structre that requires DMA memory and frees      */
2017 /* the memory if allocated.                                                 */
2018 /*                                                                          */
2019 /* Returns:                                                                 */
2020 /*   Nothing.                                                               */
2021 /****************************************************************************/
2022 static void
bce_dma_free(struct bce_softc * sc)2023 bce_dma_free(struct bce_softc *sc)
2024 {
2025 	int i;
2026 
2027 	/* Destroy the status block. */
2028 	if (sc->status_tag != NULL) {
2029 		if (sc->status_block != NULL) {
2030 			bus_dmamap_unload(sc->status_tag, sc->status_map);
2031 			bus_dmamem_free(sc->status_tag, sc->status_block,
2032 					sc->status_map);
2033 		}
2034 		bus_dma_tag_destroy(sc->status_tag);
2035 	}
2036 
2037 	/* Destroy the statistics block. */
2038 	if (sc->stats_tag != NULL) {
2039 		if (sc->stats_block != NULL) {
2040 			bus_dmamap_unload(sc->stats_tag, sc->stats_map);
2041 			bus_dmamem_free(sc->stats_tag, sc->stats_block,
2042 					sc->stats_map);
2043 		}
2044 		bus_dma_tag_destroy(sc->stats_tag);
2045 	}
2046 
2047 	/* Destroy the CTX DMA stuffs. */
2048 	if (sc->ctx_tag != NULL) {
2049 		for (i = 0; i < sc->ctx_pages; i++) {
2050 			if (sc->ctx_block[i] != NULL) {
2051 				bus_dmamap_unload(sc->ctx_tag, sc->ctx_map[i]);
2052 				bus_dmamem_free(sc->ctx_tag, sc->ctx_block[i],
2053 						sc->ctx_map[i]);
2054 			}
2055 		}
2056 		bus_dma_tag_destroy(sc->ctx_tag);
2057 	}
2058 
2059 	/* Free TX rings */
2060 	if (sc->tx_rings != NULL) {
2061 		for (i = 0; i < sc->tx_ring_cnt; ++i)
2062 			bce_destroy_tx_ring(&sc->tx_rings[i]);
2063 		kfree(sc->tx_rings, M_DEVBUF);
2064 	}
2065 
2066 	/* Free RX rings */
2067 	if (sc->rx_rings != NULL) {
2068 		for (i = 0; i < sc->rx_ring_cnt; ++i)
2069 			bce_destroy_rx_ring(&sc->rx_rings[i]);
2070 		kfree(sc->rx_rings, M_DEVBUF);
2071 	}
2072 
2073 	/* Destroy the parent tag */
2074 	if (sc->parent_tag != NULL)
2075 		bus_dma_tag_destroy(sc->parent_tag);
2076 }
2077 
2078 /****************************************************************************/
2079 /* Get DMA memory from the OS.                                              */
2080 /*                                                                          */
2081 /* Validates that the OS has provided DMA buffers in response to a          */
2082 /* bus_dmamap_load() call and saves the physical address of those buffers.  */
2083 /* When the callback is used the OS will return 0 for the mapping function  */
2084 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any  */
2085 /* failures back to the caller.                                             */
2086 /*                                                                          */
2087 /* Returns:                                                                 */
2088 /*   Nothing.                                                               */
2089 /****************************************************************************/
2090 static void
bce_dma_map_addr(void * arg,bus_dma_segment_t * segs,int nseg,int error)2091 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2092 {
2093 	bus_addr_t *busaddr = arg;
2094 
2095 	/* Check for an error and signal the caller that an error occurred. */
2096 	if (error)
2097 		return;
2098 
2099 	KASSERT(nseg == 1, ("only one segment is allowed"));
2100 	*busaddr = segs->ds_addr;
2101 }
2102 
2103 static int
bce_create_tx_ring(struct bce_tx_ring * txr)2104 bce_create_tx_ring(struct bce_tx_ring *txr)
2105 {
2106 	int pages, rc, i;
2107 
2108 	lwkt_serialize_init(&txr->tx_serialize);
2109 	txr->tx_wreg = bce_tx_wreg;
2110 
2111 	pages = device_getenv_int(txr->sc->bce_dev, "tx_pages", bce_tx_pages);
2112 	if (pages <= 0 || pages > TX_PAGES_MAX || !powerof2(pages)) {
2113 		device_printf(txr->sc->bce_dev, "invalid # of TX pages\n");
2114 		pages = TX_PAGES_DEFAULT;
2115 	}
2116 	txr->tx_pages = pages;
2117 
2118 	txr->tx_bd_chain_map = kmalloc(sizeof(bus_dmamap_t) * txr->tx_pages,
2119 	    M_DEVBUF, M_WAITOK | M_ZERO);
2120 	txr->tx_bd_chain = kmalloc(sizeof(struct tx_bd *) * txr->tx_pages,
2121 	    M_DEVBUF, M_WAITOK | M_ZERO);
2122 	txr->tx_bd_chain_paddr = kmalloc(sizeof(bus_addr_t) * txr->tx_pages,
2123 	    M_DEVBUF, M_WAITOK | M_ZERO);
2124 
2125 	txr->tx_bufs = kmalloc(sizeof(struct bce_tx_buf) * TOTAL_TX_BD(txr),
2126 			       M_DEVBUF,
2127 			       M_WAITOK | M_ZERO | M_CACHEALIGN);
2128 
2129 	/*
2130 	 * Create a DMA tag for the TX buffer descriptor chain,
2131 	 * allocate and clear the  memory, and fetch the
2132 	 * physical address of the block.
2133 	 */
2134 	rc = bus_dma_tag_create(txr->sc->parent_tag, BCM_PAGE_SIZE, 0,
2135 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
2136 	    BCE_TX_CHAIN_PAGE_SZ, 1, BCE_TX_CHAIN_PAGE_SZ,
2137 	    0, &txr->tx_bd_chain_tag);
2138 	if (rc != 0) {
2139 		device_printf(txr->sc->bce_dev, "Could not allocate "
2140 		    "TX descriptor chain DMA tag!\n");
2141 		return rc;
2142 	}
2143 
2144 	for (i = 0; i < txr->tx_pages; i++) {
2145 		bus_addr_t busaddr;
2146 
2147 		rc = bus_dmamem_alloc(txr->tx_bd_chain_tag,
2148 		    (void **)&txr->tx_bd_chain[i],
2149 		    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
2150 		    &txr->tx_bd_chain_map[i]);
2151 		if (rc != 0) {
2152 			device_printf(txr->sc->bce_dev,
2153 			    "Could not allocate %dth TX descriptor "
2154 			    "chain DMA memory!\n", i);
2155 			return rc;
2156 		}
2157 
2158 		rc = bus_dmamap_load(txr->tx_bd_chain_tag,
2159 		    txr->tx_bd_chain_map[i],
2160 		    txr->tx_bd_chain[i],
2161 		    BCE_TX_CHAIN_PAGE_SZ,
2162 		    bce_dma_map_addr, &busaddr,
2163 		    BUS_DMA_WAITOK);
2164 		if (rc != 0) {
2165 			if (rc == EINPROGRESS) {
2166 				panic("%s coherent memory loading "
2167 				    "is still in progress!",
2168 				    txr->sc->arpcom.ac_if.if_xname);
2169 			}
2170 			device_printf(txr->sc->bce_dev, "Could not map %dth "
2171 			    "TX descriptor chain DMA memory!\n", i);
2172 			bus_dmamem_free(txr->tx_bd_chain_tag,
2173 			    txr->tx_bd_chain[i],
2174 			    txr->tx_bd_chain_map[i]);
2175 			txr->tx_bd_chain[i] = NULL;
2176 			return rc;
2177 		}
2178 
2179 		txr->tx_bd_chain_paddr[i] = busaddr;
2180 	}
2181 
2182 	/* Create a DMA tag for TX mbufs. */
2183 	rc = bus_dma_tag_create(txr->sc->parent_tag, 1, 0,
2184 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
2185 	    IP_MAXPACKET + sizeof(struct ether_vlan_header),
2186 	    BCE_MAX_SEGMENTS, PAGE_SIZE,
2187 	    BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
2188 	    &txr->tx_mbuf_tag);
2189 	if (rc != 0) {
2190 		device_printf(txr->sc->bce_dev,
2191 		    "Could not allocate TX mbuf DMA tag!\n");
2192 		return rc;
2193 	}
2194 
2195 	/* Create DMA maps for the TX mbufs clusters. */
2196 	for (i = 0; i < TOTAL_TX_BD(txr); i++) {
2197 		rc = bus_dmamap_create(txr->tx_mbuf_tag,
2198 		    BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
2199 		    &txr->tx_bufs[i].tx_mbuf_map);
2200 		if (rc != 0) {
2201 			int j;
2202 
2203 			for (j = 0; j < i; ++j) {
2204 				bus_dmamap_destroy(txr->tx_mbuf_tag,
2205 				    txr->tx_bufs[j].tx_mbuf_map);
2206 			}
2207 			bus_dma_tag_destroy(txr->tx_mbuf_tag);
2208 			txr->tx_mbuf_tag = NULL;
2209 
2210 			device_printf(txr->sc->bce_dev, "Unable to create "
2211 			    "%dth TX mbuf DMA map!\n", i);
2212 			return rc;
2213 		}
2214 	}
2215 	return 0;
2216 }
2217 
2218 static int
bce_create_rx_ring(struct bce_rx_ring * rxr)2219 bce_create_rx_ring(struct bce_rx_ring *rxr)
2220 {
2221 	int pages, rc, i;
2222 
2223 	lwkt_serialize_init(&rxr->rx_serialize);
2224 
2225 	pages = device_getenv_int(rxr->sc->bce_dev, "rx_pages", bce_rx_pages);
2226 	if (pages <= 0 || pages > RX_PAGES_MAX || !powerof2(pages)) {
2227 		device_printf(rxr->sc->bce_dev, "invalid # of RX pages\n");
2228 		pages = RX_PAGES_DEFAULT;
2229 	}
2230 	rxr->rx_pages = pages;
2231 
2232 	rxr->rx_bd_chain_map = kmalloc(sizeof(bus_dmamap_t) * rxr->rx_pages,
2233 	    M_DEVBUF, M_WAITOK | M_ZERO);
2234 	rxr->rx_bd_chain = kmalloc(sizeof(struct rx_bd *) * rxr->rx_pages,
2235 	    M_DEVBUF, M_WAITOK | M_ZERO);
2236 	rxr->rx_bd_chain_paddr = kmalloc(sizeof(bus_addr_t) * rxr->rx_pages,
2237 	    M_DEVBUF, M_WAITOK | M_ZERO);
2238 
2239 	rxr->rx_bufs = kmalloc(sizeof(struct bce_rx_buf) * TOTAL_RX_BD(rxr),
2240 			       M_DEVBUF,
2241 			       M_WAITOK | M_ZERO | M_CACHEALIGN);
2242 
2243 	/*
2244 	 * Create a DMA tag for the RX buffer descriptor chain,
2245 	 * allocate and clear the  memory, and fetch the physical
2246 	 * address of the blocks.
2247 	 */
2248 	rc = bus_dma_tag_create(rxr->sc->parent_tag, BCM_PAGE_SIZE, 0,
2249 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
2250 	    BCE_RX_CHAIN_PAGE_SZ, 1, BCE_RX_CHAIN_PAGE_SZ,
2251 	    0, &rxr->rx_bd_chain_tag);
2252 	if (rc != 0) {
2253 		device_printf(rxr->sc->bce_dev, "Could not allocate "
2254 		    "RX descriptor chain DMA tag!\n");
2255 		return rc;
2256 	}
2257 
2258 	for (i = 0; i < rxr->rx_pages; i++) {
2259 		bus_addr_t busaddr;
2260 
2261 		rc = bus_dmamem_alloc(rxr->rx_bd_chain_tag,
2262 		    (void **)&rxr->rx_bd_chain[i],
2263 		    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
2264 		    &rxr->rx_bd_chain_map[i]);
2265 		if (rc != 0) {
2266 			device_printf(rxr->sc->bce_dev,
2267 			    "Could not allocate %dth RX descriptor "
2268 			    "chain DMA memory!\n", i);
2269 			return rc;
2270 		}
2271 
2272 		rc = bus_dmamap_load(rxr->rx_bd_chain_tag,
2273 		    rxr->rx_bd_chain_map[i],
2274 		    rxr->rx_bd_chain[i],
2275 		    BCE_RX_CHAIN_PAGE_SZ,
2276 		    bce_dma_map_addr, &busaddr,
2277 		    BUS_DMA_WAITOK);
2278 		if (rc != 0) {
2279 			if (rc == EINPROGRESS) {
2280 				panic("%s coherent memory loading "
2281 				    "is still in progress!",
2282 				    rxr->sc->arpcom.ac_if.if_xname);
2283 			}
2284 			device_printf(rxr->sc->bce_dev,
2285 			    "Could not map %dth RX descriptor "
2286 			    "chain DMA memory!\n", i);
2287 			bus_dmamem_free(rxr->rx_bd_chain_tag,
2288 			    rxr->rx_bd_chain[i],
2289 			    rxr->rx_bd_chain_map[i]);
2290 			rxr->rx_bd_chain[i] = NULL;
2291 			return rc;
2292 		}
2293 
2294 		rxr->rx_bd_chain_paddr[i] = busaddr;
2295 	}
2296 
2297 	/* Create a DMA tag for RX mbufs. */
2298 	rc = bus_dma_tag_create(rxr->sc->parent_tag, BCE_DMA_RX_ALIGN, 0,
2299 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
2300 	    MCLBYTES, 1, MCLBYTES,
2301 	    BUS_DMA_ALLOCNOW | BUS_DMA_ALIGNED | BUS_DMA_WAITOK,
2302 	    &rxr->rx_mbuf_tag);
2303 	if (rc != 0) {
2304 		device_printf(rxr->sc->bce_dev,
2305 		    "Could not allocate RX mbuf DMA tag!\n");
2306 		return rc;
2307 	}
2308 
2309 	/* Create tmp DMA map for RX mbuf clusters. */
2310 	rc = bus_dmamap_create(rxr->rx_mbuf_tag, BUS_DMA_WAITOK,
2311 	    &rxr->rx_mbuf_tmpmap);
2312 	if (rc != 0) {
2313 		bus_dma_tag_destroy(rxr->rx_mbuf_tag);
2314 		rxr->rx_mbuf_tag = NULL;
2315 
2316 		device_printf(rxr->sc->bce_dev,
2317 		    "Could not create RX mbuf tmp DMA map!\n");
2318 		return rc;
2319 	}
2320 
2321 	/* Create DMA maps for the RX mbuf clusters. */
2322 	for (i = 0; i < TOTAL_RX_BD(rxr); i++) {
2323 		rc = bus_dmamap_create(rxr->rx_mbuf_tag, BUS_DMA_WAITOK,
2324 		    &rxr->rx_bufs[i].rx_mbuf_map);
2325 		if (rc != 0) {
2326 			int j;
2327 
2328 			for (j = 0; j < i; ++j) {
2329 				bus_dmamap_destroy(rxr->rx_mbuf_tag,
2330 				    rxr->rx_bufs[j].rx_mbuf_map);
2331 			}
2332 			bus_dma_tag_destroy(rxr->rx_mbuf_tag);
2333 			rxr->rx_mbuf_tag = NULL;
2334 
2335 			device_printf(rxr->sc->bce_dev, "Unable to create "
2336 			    "%dth RX mbuf DMA map!\n", i);
2337 			return rc;
2338 		}
2339 	}
2340 	return 0;
2341 }
2342 
2343 /****************************************************************************/
2344 /* Allocate any DMA memory needed by the driver.                            */
2345 /*                                                                          */
2346 /* Allocates DMA memory needed for the various global structures needed by  */
2347 /* hardware.                                                                */
2348 /*                                                                          */
2349 /* Memory alignment requirements:                                           */
2350 /* -----------------+----------+----------+----------+----------+           */
2351 /*  Data Structure  |   5706   |   5708   |   5709   |   5716   |           */
2352 /* -----------------+----------+----------+----------+----------+           */
2353 /* Status Block     | 8 bytes  | 8 bytes  | 16 bytes | 16 bytes |           */
2354 /* Statistics Block | 8 bytes  | 8 bytes  | 16 bytes | 16 bytes |           */
2355 /* RX Buffers       | 16 bytes | 16 bytes | 16 bytes | 16 bytes |           */
2356 /* PG Buffers       |   none   |   none   |   none   |   none   |           */
2357 /* TX Buffers       |   none   |   none   |   none   |   none   |           */
2358 /* Chain Pages(1)   |   4KiB   |   4KiB   |   4KiB   |   4KiB   |           */
2359 /* Context Pages(1) |   N/A    |   N/A    |   4KiB   |   4KiB   |           */
2360 /* -----------------+----------+----------+----------+----------+           */
2361 /*                                                                          */
2362 /* (1) Must align with CPU page size (BCM_PAGE_SZIE).                       */
2363 /*                                                                          */
2364 /* Returns:                                                                 */
2365 /*   0 for success, positive value for failure.                             */
2366 /****************************************************************************/
2367 static int
bce_dma_alloc(struct bce_softc * sc)2368 bce_dma_alloc(struct bce_softc *sc)
2369 {
2370 	struct ifnet *ifp = &sc->arpcom.ac_if;
2371 	int i, rc = 0;
2372 	bus_addr_t busaddr, max_busaddr;
2373 	bus_size_t status_align, stats_align, status_size;
2374 
2375 	/*
2376 	 * The embedded PCIe to PCI-X bridge (EPB)
2377 	 * in the 5708 cannot address memory above
2378 	 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043).
2379 	 */
2380 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)
2381 		max_busaddr = BCE_BUS_SPACE_MAXADDR;
2382 	else
2383 		max_busaddr = BUS_SPACE_MAXADDR;
2384 
2385 	/*
2386 	 * BCM5709 and BCM5716 uses host memory as cache for context memory.
2387 	 */
2388 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
2389 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
2390 		sc->ctx_pages = BCE_CTX_BLK_SZ / BCM_PAGE_SIZE;
2391 		if (sc->ctx_pages == 0)
2392 			sc->ctx_pages = 1;
2393 		if (sc->ctx_pages > BCE_CTX_PAGES) {
2394 			device_printf(sc->bce_dev, "excessive ctx pages %d\n",
2395 			    sc->ctx_pages);
2396 			return ENOMEM;
2397 		}
2398 		status_align = 16;
2399 		stats_align = 16;
2400 	} else {
2401 		status_align = 8;
2402 		stats_align = 8;
2403 	}
2404 
2405 	/*
2406 	 * Each MSI-X vector needs a status block; each status block
2407 	 * consumes 128bytes and is 128bytes aligned.
2408 	 */
2409 	if (sc->rx_ring_cnt > 1) {
2410 		status_size = BCE_MSIX_MAX * BCE_STATUS_BLK_MSIX_ALIGN;
2411 		status_align = BCE_STATUS_BLK_MSIX_ALIGN;
2412 	} else {
2413 		status_size = BCE_STATUS_BLK_SZ;
2414 	}
2415 
2416 	/*
2417 	 * Allocate the parent bus DMA tag appropriate for PCI.
2418 	 */
2419 	rc = bus_dma_tag_create(NULL, 1, BCE_DMA_BOUNDARY,
2420 				max_busaddr, BUS_SPACE_MAXADDR,
2421 				BUS_SPACE_MAXSIZE_32BIT, 0,
2422 				BUS_SPACE_MAXSIZE_32BIT,
2423 				0, &sc->parent_tag);
2424 	if (rc != 0) {
2425 		if_printf(ifp, "Could not allocate parent DMA tag!\n");
2426 		return rc;
2427 	}
2428 
2429 	/*
2430 	 * Allocate status block.
2431 	 */
2432 	sc->status_block = bus_dmamem_coherent_any(sc->parent_tag,
2433 				status_align, status_size,
2434 				BUS_DMA_WAITOK | BUS_DMA_ZERO,
2435 				&sc->status_tag, &sc->status_map,
2436 				&sc->status_block_paddr);
2437 	if (sc->status_block == NULL) {
2438 		if_printf(ifp, "Could not allocate status block!\n");
2439 		return ENOMEM;
2440 	}
2441 
2442 	/*
2443 	 * Allocate statistics block.
2444 	 */
2445 	sc->stats_block = bus_dmamem_coherent_any(sc->parent_tag,
2446 				stats_align, BCE_STATS_BLK_SZ,
2447 				BUS_DMA_WAITOK | BUS_DMA_ZERO,
2448 				&sc->stats_tag, &sc->stats_map,
2449 				&sc->stats_block_paddr);
2450 	if (sc->stats_block == NULL) {
2451 		if_printf(ifp, "Could not allocate statistics block!\n");
2452 		return ENOMEM;
2453 	}
2454 
2455 	/*
2456 	 * Allocate context block, if needed
2457 	 */
2458 	if (sc->ctx_pages != 0) {
2459 		rc = bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 0,
2460 					BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
2461 					BCM_PAGE_SIZE, 1, BCM_PAGE_SIZE,
2462 					0, &sc->ctx_tag);
2463 		if (rc != 0) {
2464 			if_printf(ifp, "Could not allocate "
2465 				  "context block DMA tag!\n");
2466 			return rc;
2467 		}
2468 
2469 		for (i = 0; i < sc->ctx_pages; i++) {
2470 			rc = bus_dmamem_alloc(sc->ctx_tag,
2471 					      (void **)&sc->ctx_block[i],
2472 					      BUS_DMA_WAITOK | BUS_DMA_ZERO |
2473 					      BUS_DMA_COHERENT,
2474 					      &sc->ctx_map[i]);
2475 			if (rc != 0) {
2476 				if_printf(ifp, "Could not allocate %dth context "
2477 					  "DMA memory!\n", i);
2478 				return rc;
2479 			}
2480 
2481 			rc = bus_dmamap_load(sc->ctx_tag, sc->ctx_map[i],
2482 					     sc->ctx_block[i], BCM_PAGE_SIZE,
2483 					     bce_dma_map_addr, &busaddr,
2484 					     BUS_DMA_WAITOK);
2485 			if (rc != 0) {
2486 				if (rc == EINPROGRESS) {
2487 					panic("%s coherent memory loading "
2488 					      "is still in progress!", ifp->if_xname);
2489 				}
2490 				if_printf(ifp, "Could not map %dth context "
2491 					  "DMA memory!\n", i);
2492 				bus_dmamem_free(sc->ctx_tag, sc->ctx_block[i],
2493 						sc->ctx_map[i]);
2494 				sc->ctx_block[i] = NULL;
2495 				return rc;
2496 			}
2497 			sc->ctx_paddr[i] = busaddr;
2498 		}
2499 	}
2500 
2501 	sc->tx_rings = kmalloc(sizeof(struct bce_tx_ring) * sc->tx_ring_cnt,
2502 			       M_DEVBUF,
2503 			       M_WAITOK | M_ZERO | M_CACHEALIGN);
2504 	for (i = 0; i < sc->tx_ring_cnt; ++i) {
2505 		sc->tx_rings[i].sc = sc;
2506 		if (i == 0) {
2507 			sc->tx_rings[i].tx_cid = TX_CID;
2508 			sc->tx_rings[i].tx_hw_cons =
2509 			    &sc->status_block->status_tx_quick_consumer_index0;
2510 		} else {
2511 			struct status_block_msix *sblk =
2512 			    (struct status_block_msix *)
2513 			    (((uint8_t *)(sc->status_block)) +
2514 			     (i * BCE_STATUS_BLK_MSIX_ALIGN));
2515 
2516 			sc->tx_rings[i].tx_cid = TX_TSS_CID + i - 1;
2517 			sc->tx_rings[i].tx_hw_cons =
2518 			    &sblk->status_tx_quick_consumer_index;
2519 		}
2520 
2521 		rc = bce_create_tx_ring(&sc->tx_rings[i]);
2522 		if (rc != 0) {
2523 			device_printf(sc->bce_dev,
2524 			    "can't create %dth tx ring\n", i);
2525 			return rc;
2526 		}
2527 	}
2528 
2529 	sc->rx_rings = kmalloc(sizeof(struct bce_rx_ring) * sc->rx_ring_cnt,
2530 			       M_DEVBUF,
2531 			       M_WAITOK | M_ZERO | M_CACHEALIGN);
2532 	for (i = 0; i < sc->rx_ring_cnt; ++i) {
2533 		sc->rx_rings[i].sc = sc;
2534 		sc->rx_rings[i].idx = i;
2535 		if (i == 0) {
2536 			sc->rx_rings[i].rx_cid = RX_CID;
2537 			sc->rx_rings[i].rx_hw_cons =
2538 			    &sc->status_block->status_rx_quick_consumer_index0;
2539 			sc->rx_rings[i].hw_status_idx =
2540 			    &sc->status_block->status_idx;
2541 		} else {
2542 			struct status_block_msix *sblk =
2543 			    (struct status_block_msix *)
2544 			    (((uint8_t *)(sc->status_block)) +
2545 			     (i * BCE_STATUS_BLK_MSIX_ALIGN));
2546 
2547 			sc->rx_rings[i].rx_cid = RX_RSS_CID + i - 1;
2548 			sc->rx_rings[i].rx_hw_cons =
2549 			    &sblk->status_rx_quick_consumer_index;
2550 			sc->rx_rings[i].hw_status_idx = &sblk->status_idx;
2551 		}
2552 
2553 		rc = bce_create_rx_ring(&sc->rx_rings[i]);
2554 		if (rc != 0) {
2555 			device_printf(sc->bce_dev,
2556 			    "can't create %dth rx ring\n", i);
2557 			return rc;
2558 		}
2559 	}
2560 
2561 	return 0;
2562 }
2563 
2564 /****************************************************************************/
2565 /* Firmware synchronization.                                                */
2566 /*                                                                          */
2567 /* Before performing certain events such as a chip reset, synchronize with  */
2568 /* the firmware first.                                                      */
2569 /*                                                                          */
2570 /* Returns:                                                                 */
2571 /*   0 for success, positive value for failure.                             */
2572 /****************************************************************************/
2573 static int
bce_fw_sync(struct bce_softc * sc,uint32_t msg_data)2574 bce_fw_sync(struct bce_softc *sc, uint32_t msg_data)
2575 {
2576 	int i, rc = 0;
2577 	uint32_t val;
2578 
2579 	/* Don't waste any time if we've timed out before. */
2580 	if (sc->bce_fw_timed_out)
2581 		return EBUSY;
2582 
2583 	/* Increment the message sequence number. */
2584 	sc->bce_fw_wr_seq++;
2585 	msg_data |= sc->bce_fw_wr_seq;
2586 
2587 	/* Send the message to the bootcode driver mailbox. */
2588 	bce_shmem_wr(sc, BCE_DRV_MB, msg_data);
2589 
2590 	/* Wait for the bootcode to acknowledge the message. */
2591 	for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2592 		/* Check for a response in the bootcode firmware mailbox. */
2593 		val = bce_shmem_rd(sc, BCE_FW_MB);
2594 		if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ))
2595 			break;
2596 		DELAY(1000);
2597 	}
2598 
2599 	/* If we've timed out, tell the bootcode that we've stopped waiting. */
2600 	if ((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ) &&
2601 	    (msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0) {
2602 		if_printf(&sc->arpcom.ac_if,
2603 			  "Firmware synchronization timeout! "
2604 			  "msg_data = 0x%08X\n", msg_data);
2605 
2606 		msg_data &= ~BCE_DRV_MSG_CODE;
2607 		msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT;
2608 
2609 		bce_shmem_wr(sc, BCE_DRV_MB, msg_data);
2610 
2611 		sc->bce_fw_timed_out = 1;
2612 		rc = EBUSY;
2613 	}
2614 	return rc;
2615 }
2616 
2617 /****************************************************************************/
2618 /* Load Receive Virtual 2 Physical (RV2P) processor firmware.               */
2619 /*                                                                          */
2620 /* Returns:                                                                 */
2621 /*   Nothing.                                                               */
2622 /****************************************************************************/
2623 static void
bce_load_rv2p_fw(struct bce_softc * sc,uint32_t * rv2p_code,uint32_t rv2p_code_len,uint32_t rv2p_proc)2624 bce_load_rv2p_fw(struct bce_softc *sc, uint32_t *rv2p_code,
2625 		 uint32_t rv2p_code_len, uint32_t rv2p_proc)
2626 {
2627 	int i;
2628 	uint32_t val;
2629 
2630 	for (i = 0; i < rv2p_code_len; i += 8) {
2631 		REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code);
2632 		rv2p_code++;
2633 		REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code);
2634 		rv2p_code++;
2635 
2636 		if (rv2p_proc == RV2P_PROC1) {
2637 			val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR;
2638 			REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
2639 		} else {
2640 			val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR;
2641 			REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
2642 		}
2643 	}
2644 
2645 	/* Reset the processor, un-stall is done later. */
2646 	if (rv2p_proc == RV2P_PROC1)
2647 		REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET);
2648 	else
2649 		REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET);
2650 }
2651 
2652 /****************************************************************************/
2653 /* Load RISC processor firmware.                                            */
2654 /*                                                                          */
2655 /* Loads firmware from the file if_bcefw.h into the scratchpad memory       */
2656 /* associated with a particular processor.                                  */
2657 /*                                                                          */
2658 /* Returns:                                                                 */
2659 /*   Nothing.                                                               */
2660 /****************************************************************************/
2661 static void
bce_load_cpu_fw(struct bce_softc * sc,struct cpu_reg * cpu_reg,struct fw_info * fw)2662 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg,
2663 		struct fw_info *fw)
2664 {
2665 	uint32_t offset;
2666 	int j;
2667 
2668 	bce_halt_cpu(sc, cpu_reg);
2669 
2670 	/* Load the Text area. */
2671 	offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2672 	if (fw->text) {
2673 		for (j = 0; j < (fw->text_len / 4); j++, offset += 4)
2674 			REG_WR_IND(sc, offset, fw->text[j]);
2675 	}
2676 
2677 	/* Load the Data area. */
2678 	offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2679 	if (fw->data) {
2680 		for (j = 0; j < (fw->data_len / 4); j++, offset += 4)
2681 			REG_WR_IND(sc, offset, fw->data[j]);
2682 	}
2683 
2684 	/* Load the SBSS area. */
2685 	offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2686 	if (fw->sbss) {
2687 		for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4)
2688 			REG_WR_IND(sc, offset, fw->sbss[j]);
2689 	}
2690 
2691 	/* Load the BSS area. */
2692 	offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2693 	if (fw->bss) {
2694 		for (j = 0; j < (fw->bss_len/4); j++, offset += 4)
2695 			REG_WR_IND(sc, offset, fw->bss[j]);
2696 	}
2697 
2698 	/* Load the Read-Only area. */
2699 	offset = cpu_reg->spad_base +
2700 		(fw->rodata_addr - cpu_reg->mips_view_base);
2701 	if (fw->rodata) {
2702 		for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4)
2703 			REG_WR_IND(sc, offset, fw->rodata[j]);
2704 	}
2705 
2706 	/* Clear the pre-fetch instruction and set the FW start address. */
2707 	REG_WR_IND(sc, cpu_reg->inst, 0);
2708 	REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2709 }
2710 
2711 /****************************************************************************/
2712 /* Starts the RISC processor.                                               */
2713 /*                                                                          */
2714 /* Assumes the CPU starting address has already been set.                   */
2715 /*                                                                          */
2716 /* Returns:                                                                 */
2717 /*   Nothing.                                                               */
2718 /****************************************************************************/
2719 static void
bce_start_cpu(struct bce_softc * sc,struct cpu_reg * cpu_reg)2720 bce_start_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg)
2721 {
2722 	uint32_t val;
2723 
2724 	/* Start the CPU. */
2725 	val = REG_RD_IND(sc, cpu_reg->mode);
2726 	val &= ~cpu_reg->mode_value_halt;
2727 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2728 	REG_WR_IND(sc, cpu_reg->mode, val);
2729 }
2730 
2731 /****************************************************************************/
2732 /* Halts the RISC processor.                                                */
2733 /*                                                                          */
2734 /* Returns:                                                                 */
2735 /*   Nothing.                                                               */
2736 /****************************************************************************/
2737 static void
bce_halt_cpu(struct bce_softc * sc,struct cpu_reg * cpu_reg)2738 bce_halt_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg)
2739 {
2740 	uint32_t val;
2741 
2742 	/* Halt the CPU. */
2743 	val = REG_RD_IND(sc, cpu_reg->mode);
2744 	val |= cpu_reg->mode_value_halt;
2745 	REG_WR_IND(sc, cpu_reg->mode, val);
2746 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2747 }
2748 
2749 /****************************************************************************/
2750 /* Start the RX CPU.                                                        */
2751 /*                                                                          */
2752 /* Returns:                                                                 */
2753 /*   Nothing.                                                               */
2754 /****************************************************************************/
2755 static void
bce_start_rxp_cpu(struct bce_softc * sc)2756 bce_start_rxp_cpu(struct bce_softc *sc)
2757 {
2758 	struct cpu_reg cpu_reg;
2759 
2760 	cpu_reg.mode = BCE_RXP_CPU_MODE;
2761 	cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
2762 	cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
2763 	cpu_reg.state = BCE_RXP_CPU_STATE;
2764 	cpu_reg.state_value_clear = 0xffffff;
2765 	cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
2766 	cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
2767 	cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
2768 	cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
2769 	cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
2770 	cpu_reg.spad_base = BCE_RXP_SCRATCH;
2771 	cpu_reg.mips_view_base = 0x8000000;
2772 
2773 	bce_start_cpu(sc, &cpu_reg);
2774 }
2775 
2776 /****************************************************************************/
2777 /* Initialize the RX CPU.                                                   */
2778 /*                                                                          */
2779 /* Returns:                                                                 */
2780 /*   Nothing.                                                               */
2781 /****************************************************************************/
2782 static void
bce_init_rxp_cpu(struct bce_softc * sc)2783 bce_init_rxp_cpu(struct bce_softc *sc)
2784 {
2785 	struct cpu_reg cpu_reg;
2786 	struct fw_info fw;
2787 
2788 	cpu_reg.mode = BCE_RXP_CPU_MODE;
2789 	cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
2790 	cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
2791 	cpu_reg.state = BCE_RXP_CPU_STATE;
2792 	cpu_reg.state_value_clear = 0xffffff;
2793 	cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
2794 	cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
2795 	cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
2796 	cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
2797 	cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
2798 	cpu_reg.spad_base = BCE_RXP_SCRATCH;
2799 	cpu_reg.mips_view_base = 0x8000000;
2800 
2801 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
2802 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
2803  		fw.ver_major = bce_RXP_b09FwReleaseMajor;
2804 		fw.ver_minor = bce_RXP_b09FwReleaseMinor;
2805 		fw.ver_fix = bce_RXP_b09FwReleaseFix;
2806 		fw.start_addr = bce_RXP_b09FwStartAddr;
2807 
2808 		fw.text_addr = bce_RXP_b09FwTextAddr;
2809 		fw.text_len = bce_RXP_b09FwTextLen;
2810 		fw.text_index = 0;
2811 		fw.text = bce_RXP_b09FwText;
2812 
2813 		fw.data_addr = bce_RXP_b09FwDataAddr;
2814 		fw.data_len = bce_RXP_b09FwDataLen;
2815 		fw.data_index = 0;
2816 		fw.data = bce_RXP_b09FwData;
2817 
2818 		fw.sbss_addr = bce_RXP_b09FwSbssAddr;
2819 		fw.sbss_len = bce_RXP_b09FwSbssLen;
2820 		fw.sbss_index = 0;
2821 		fw.sbss = bce_RXP_b09FwSbss;
2822 
2823 		fw.bss_addr = bce_RXP_b09FwBssAddr;
2824 		fw.bss_len = bce_RXP_b09FwBssLen;
2825 		fw.bss_index = 0;
2826 		fw.bss = bce_RXP_b09FwBss;
2827 
2828 		fw.rodata_addr = bce_RXP_b09FwRodataAddr;
2829 		fw.rodata_len = bce_RXP_b09FwRodataLen;
2830 		fw.rodata_index = 0;
2831 		fw.rodata = bce_RXP_b09FwRodata;
2832 	} else {
2833 		fw.ver_major = bce_RXP_b06FwReleaseMajor;
2834 		fw.ver_minor = bce_RXP_b06FwReleaseMinor;
2835 		fw.ver_fix = bce_RXP_b06FwReleaseFix;
2836 		fw.start_addr = bce_RXP_b06FwStartAddr;
2837 
2838 		fw.text_addr = bce_RXP_b06FwTextAddr;
2839 		fw.text_len = bce_RXP_b06FwTextLen;
2840 		fw.text_index = 0;
2841 		fw.text = bce_RXP_b06FwText;
2842 
2843 		fw.data_addr = bce_RXP_b06FwDataAddr;
2844 		fw.data_len = bce_RXP_b06FwDataLen;
2845 		fw.data_index = 0;
2846 		fw.data = bce_RXP_b06FwData;
2847 
2848 		fw.sbss_addr = bce_RXP_b06FwSbssAddr;
2849 		fw.sbss_len = bce_RXP_b06FwSbssLen;
2850 		fw.sbss_index = 0;
2851 		fw.sbss = bce_RXP_b06FwSbss;
2852 
2853 		fw.bss_addr = bce_RXP_b06FwBssAddr;
2854 		fw.bss_len = bce_RXP_b06FwBssLen;
2855 		fw.bss_index = 0;
2856 		fw.bss = bce_RXP_b06FwBss;
2857 
2858 		fw.rodata_addr = bce_RXP_b06FwRodataAddr;
2859 		fw.rodata_len = bce_RXP_b06FwRodataLen;
2860 		fw.rodata_index = 0;
2861 		fw.rodata = bce_RXP_b06FwRodata;
2862 	}
2863 
2864 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
2865 	/* Delay RXP start until initialization is complete. */
2866 }
2867 
2868 /****************************************************************************/
2869 /* Initialize the TX CPU.                                                   */
2870 /*                                                                          */
2871 /* Returns:                                                                 */
2872 /*   Nothing.                                                               */
2873 /****************************************************************************/
2874 static void
bce_init_txp_cpu(struct bce_softc * sc)2875 bce_init_txp_cpu(struct bce_softc *sc)
2876 {
2877 	struct cpu_reg cpu_reg;
2878 	struct fw_info fw;
2879 
2880 	cpu_reg.mode = BCE_TXP_CPU_MODE;
2881 	cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT;
2882 	cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA;
2883 	cpu_reg.state = BCE_TXP_CPU_STATE;
2884 	cpu_reg.state_value_clear = 0xffffff;
2885 	cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE;
2886 	cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK;
2887 	cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER;
2888 	cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION;
2889 	cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT;
2890 	cpu_reg.spad_base = BCE_TXP_SCRATCH;
2891 	cpu_reg.mips_view_base = 0x8000000;
2892 
2893 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
2894 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
2895 		fw.ver_major = bce_TXP_b09FwReleaseMajor;
2896 		fw.ver_minor = bce_TXP_b09FwReleaseMinor;
2897 		fw.ver_fix = bce_TXP_b09FwReleaseFix;
2898 		fw.start_addr = bce_TXP_b09FwStartAddr;
2899 
2900 		fw.text_addr = bce_TXP_b09FwTextAddr;
2901 		fw.text_len = bce_TXP_b09FwTextLen;
2902 		fw.text_index = 0;
2903 		fw.text = bce_TXP_b09FwText;
2904 
2905 		fw.data_addr = bce_TXP_b09FwDataAddr;
2906 		fw.data_len = bce_TXP_b09FwDataLen;
2907 		fw.data_index = 0;
2908 		fw.data = bce_TXP_b09FwData;
2909 
2910 		fw.sbss_addr = bce_TXP_b09FwSbssAddr;
2911 		fw.sbss_len = bce_TXP_b09FwSbssLen;
2912 		fw.sbss_index = 0;
2913 		fw.sbss = bce_TXP_b09FwSbss;
2914 
2915 		fw.bss_addr = bce_TXP_b09FwBssAddr;
2916 		fw.bss_len = bce_TXP_b09FwBssLen;
2917 		fw.bss_index = 0;
2918 		fw.bss = bce_TXP_b09FwBss;
2919 
2920 		fw.rodata_addr = bce_TXP_b09FwRodataAddr;
2921 		fw.rodata_len = bce_TXP_b09FwRodataLen;
2922 		fw.rodata_index = 0;
2923 		fw.rodata = bce_TXP_b09FwRodata;
2924 	} else {
2925 		fw.ver_major = bce_TXP_b06FwReleaseMajor;
2926 		fw.ver_minor = bce_TXP_b06FwReleaseMinor;
2927 		fw.ver_fix = bce_TXP_b06FwReleaseFix;
2928 		fw.start_addr = bce_TXP_b06FwStartAddr;
2929 
2930 		fw.text_addr = bce_TXP_b06FwTextAddr;
2931 		fw.text_len = bce_TXP_b06FwTextLen;
2932 		fw.text_index = 0;
2933 		fw.text = bce_TXP_b06FwText;
2934 
2935 		fw.data_addr = bce_TXP_b06FwDataAddr;
2936 		fw.data_len = bce_TXP_b06FwDataLen;
2937 		fw.data_index = 0;
2938 		fw.data = bce_TXP_b06FwData;
2939 
2940 		fw.sbss_addr = bce_TXP_b06FwSbssAddr;
2941 		fw.sbss_len = bce_TXP_b06FwSbssLen;
2942 		fw.sbss_index = 0;
2943 		fw.sbss = bce_TXP_b06FwSbss;
2944 
2945 		fw.bss_addr = bce_TXP_b06FwBssAddr;
2946 		fw.bss_len = bce_TXP_b06FwBssLen;
2947 		fw.bss_index = 0;
2948 		fw.bss = bce_TXP_b06FwBss;
2949 
2950 		fw.rodata_addr = bce_TXP_b06FwRodataAddr;
2951 		fw.rodata_len = bce_TXP_b06FwRodataLen;
2952 		fw.rodata_index = 0;
2953 		fw.rodata = bce_TXP_b06FwRodata;
2954 	}
2955 
2956 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
2957 	bce_start_cpu(sc, &cpu_reg);
2958 }
2959 
2960 /****************************************************************************/
2961 /* Initialize the TPAT CPU.                                                 */
2962 /*                                                                          */
2963 /* Returns:                                                                 */
2964 /*   Nothing.                                                               */
2965 /****************************************************************************/
2966 static void
bce_init_tpat_cpu(struct bce_softc * sc)2967 bce_init_tpat_cpu(struct bce_softc *sc)
2968 {
2969 	struct cpu_reg cpu_reg;
2970 	struct fw_info fw;
2971 
2972 	cpu_reg.mode = BCE_TPAT_CPU_MODE;
2973 	cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT;
2974 	cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA;
2975 	cpu_reg.state = BCE_TPAT_CPU_STATE;
2976 	cpu_reg.state_value_clear = 0xffffff;
2977 	cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE;
2978 	cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK;
2979 	cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER;
2980 	cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION;
2981 	cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT;
2982 	cpu_reg.spad_base = BCE_TPAT_SCRATCH;
2983 	cpu_reg.mips_view_base = 0x8000000;
2984 
2985 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
2986 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
2987 		fw.ver_major = bce_TPAT_b09FwReleaseMajor;
2988 		fw.ver_minor = bce_TPAT_b09FwReleaseMinor;
2989 		fw.ver_fix = bce_TPAT_b09FwReleaseFix;
2990 		fw.start_addr = bce_TPAT_b09FwStartAddr;
2991 
2992 		fw.text_addr = bce_TPAT_b09FwTextAddr;
2993 		fw.text_len = bce_TPAT_b09FwTextLen;
2994 		fw.text_index = 0;
2995 		fw.text = bce_TPAT_b09FwText;
2996 
2997 		fw.data_addr = bce_TPAT_b09FwDataAddr;
2998 		fw.data_len = bce_TPAT_b09FwDataLen;
2999 		fw.data_index = 0;
3000 		fw.data = bce_TPAT_b09FwData;
3001 
3002 		fw.sbss_addr = bce_TPAT_b09FwSbssAddr;
3003 		fw.sbss_len = bce_TPAT_b09FwSbssLen;
3004 		fw.sbss_index = 0;
3005 		fw.sbss = bce_TPAT_b09FwSbss;
3006 
3007 		fw.bss_addr = bce_TPAT_b09FwBssAddr;
3008 		fw.bss_len = bce_TPAT_b09FwBssLen;
3009 		fw.bss_index = 0;
3010 		fw.bss = bce_TPAT_b09FwBss;
3011 
3012 		fw.rodata_addr = bce_TPAT_b09FwRodataAddr;
3013 		fw.rodata_len = bce_TPAT_b09FwRodataLen;
3014 		fw.rodata_index = 0;
3015 		fw.rodata = bce_TPAT_b09FwRodata;
3016 	} else {
3017 		fw.ver_major = bce_TPAT_b06FwReleaseMajor;
3018 		fw.ver_minor = bce_TPAT_b06FwReleaseMinor;
3019 		fw.ver_fix = bce_TPAT_b06FwReleaseFix;
3020 		fw.start_addr = bce_TPAT_b06FwStartAddr;
3021 
3022 		fw.text_addr = bce_TPAT_b06FwTextAddr;
3023 		fw.text_len = bce_TPAT_b06FwTextLen;
3024 		fw.text_index = 0;
3025 		fw.text = bce_TPAT_b06FwText;
3026 
3027 		fw.data_addr = bce_TPAT_b06FwDataAddr;
3028 		fw.data_len = bce_TPAT_b06FwDataLen;
3029 		fw.data_index = 0;
3030 		fw.data = bce_TPAT_b06FwData;
3031 
3032 		fw.sbss_addr = bce_TPAT_b06FwSbssAddr;
3033 		fw.sbss_len = bce_TPAT_b06FwSbssLen;
3034 		fw.sbss_index = 0;
3035 		fw.sbss = bce_TPAT_b06FwSbss;
3036 
3037 		fw.bss_addr = bce_TPAT_b06FwBssAddr;
3038 		fw.bss_len = bce_TPAT_b06FwBssLen;
3039 		fw.bss_index = 0;
3040 		fw.bss = bce_TPAT_b06FwBss;
3041 
3042 		fw.rodata_addr = bce_TPAT_b06FwRodataAddr;
3043 		fw.rodata_len = bce_TPAT_b06FwRodataLen;
3044 		fw.rodata_index = 0;
3045 		fw.rodata = bce_TPAT_b06FwRodata;
3046 	}
3047 
3048 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
3049 	bce_start_cpu(sc, &cpu_reg);
3050 }
3051 
3052 /****************************************************************************/
3053 /* Initialize the CP CPU.                                                   */
3054 /*                                                                          */
3055 /* Returns:                                                                 */
3056 /*   Nothing.                                                               */
3057 /****************************************************************************/
3058 static void
bce_init_cp_cpu(struct bce_softc * sc)3059 bce_init_cp_cpu(struct bce_softc *sc)
3060 {
3061 	struct cpu_reg cpu_reg;
3062 	struct fw_info fw;
3063 
3064 	cpu_reg.mode = BCE_CP_CPU_MODE;
3065 	cpu_reg.mode_value_halt = BCE_CP_CPU_MODE_SOFT_HALT;
3066 	cpu_reg.mode_value_sstep = BCE_CP_CPU_MODE_STEP_ENA;
3067 	cpu_reg.state = BCE_CP_CPU_STATE;
3068 	cpu_reg.state_value_clear = 0xffffff;
3069 	cpu_reg.gpr0 = BCE_CP_CPU_REG_FILE;
3070 	cpu_reg.evmask = BCE_CP_CPU_EVENT_MASK;
3071 	cpu_reg.pc = BCE_CP_CPU_PROGRAM_COUNTER;
3072 	cpu_reg.inst = BCE_CP_CPU_INSTRUCTION;
3073 	cpu_reg.bp = BCE_CP_CPU_HW_BREAKPOINT;
3074 	cpu_reg.spad_base = BCE_CP_SCRATCH;
3075 	cpu_reg.mips_view_base = 0x8000000;
3076 
3077 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3078 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3079 		fw.ver_major = bce_CP_b09FwReleaseMajor;
3080 		fw.ver_minor = bce_CP_b09FwReleaseMinor;
3081 		fw.ver_fix = bce_CP_b09FwReleaseFix;
3082 		fw.start_addr = bce_CP_b09FwStartAddr;
3083 
3084 		fw.text_addr = bce_CP_b09FwTextAddr;
3085 		fw.text_len = bce_CP_b09FwTextLen;
3086 		fw.text_index = 0;
3087 		fw.text = bce_CP_b09FwText;
3088 
3089 		fw.data_addr = bce_CP_b09FwDataAddr;
3090 		fw.data_len = bce_CP_b09FwDataLen;
3091 		fw.data_index = 0;
3092 		fw.data = bce_CP_b09FwData;
3093 
3094 		fw.sbss_addr = bce_CP_b09FwSbssAddr;
3095 		fw.sbss_len = bce_CP_b09FwSbssLen;
3096 		fw.sbss_index = 0;
3097 		fw.sbss = bce_CP_b09FwSbss;
3098 
3099 		fw.bss_addr = bce_CP_b09FwBssAddr;
3100 		fw.bss_len = bce_CP_b09FwBssLen;
3101 		fw.bss_index = 0;
3102 		fw.bss = bce_CP_b09FwBss;
3103 
3104 		fw.rodata_addr = bce_CP_b09FwRodataAddr;
3105 		fw.rodata_len = bce_CP_b09FwRodataLen;
3106 		fw.rodata_index = 0;
3107 		fw.rodata = bce_CP_b09FwRodata;
3108 	} else {
3109 		fw.ver_major = bce_CP_b06FwReleaseMajor;
3110 		fw.ver_minor = bce_CP_b06FwReleaseMinor;
3111 		fw.ver_fix = bce_CP_b06FwReleaseFix;
3112 		fw.start_addr = bce_CP_b06FwStartAddr;
3113 
3114 		fw.text_addr = bce_CP_b06FwTextAddr;
3115 		fw.text_len = bce_CP_b06FwTextLen;
3116 		fw.text_index = 0;
3117 		fw.text = bce_CP_b06FwText;
3118 
3119 		fw.data_addr = bce_CP_b06FwDataAddr;
3120 		fw.data_len = bce_CP_b06FwDataLen;
3121 		fw.data_index = 0;
3122 		fw.data = bce_CP_b06FwData;
3123 
3124 		fw.sbss_addr = bce_CP_b06FwSbssAddr;
3125 		fw.sbss_len = bce_CP_b06FwSbssLen;
3126 		fw.sbss_index = 0;
3127 		fw.sbss = bce_CP_b06FwSbss;
3128 
3129 		fw.bss_addr = bce_CP_b06FwBssAddr;
3130 		fw.bss_len = bce_CP_b06FwBssLen;
3131 		fw.bss_index = 0;
3132 		fw.bss = bce_CP_b06FwBss;
3133 
3134 		fw.rodata_addr = bce_CP_b06FwRodataAddr;
3135 		fw.rodata_len = bce_CP_b06FwRodataLen;
3136 		fw.rodata_index = 0;
3137 		fw.rodata = bce_CP_b06FwRodata;
3138 	}
3139 
3140 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
3141 	bce_start_cpu(sc, &cpu_reg);
3142 }
3143 
3144 /****************************************************************************/
3145 /* Initialize the COM CPU.                                                 */
3146 /*                                                                          */
3147 /* Returns:                                                                 */
3148 /*   Nothing.                                                               */
3149 /****************************************************************************/
3150 static void
bce_init_com_cpu(struct bce_softc * sc)3151 bce_init_com_cpu(struct bce_softc *sc)
3152 {
3153 	struct cpu_reg cpu_reg;
3154 	struct fw_info fw;
3155 
3156 	cpu_reg.mode = BCE_COM_CPU_MODE;
3157 	cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT;
3158 	cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA;
3159 	cpu_reg.state = BCE_COM_CPU_STATE;
3160 	cpu_reg.state_value_clear = 0xffffff;
3161 	cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE;
3162 	cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK;
3163 	cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER;
3164 	cpu_reg.inst = BCE_COM_CPU_INSTRUCTION;
3165 	cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT;
3166 	cpu_reg.spad_base = BCE_COM_SCRATCH;
3167 	cpu_reg.mips_view_base = 0x8000000;
3168 
3169 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3170 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3171 		fw.ver_major = bce_COM_b09FwReleaseMajor;
3172 		fw.ver_minor = bce_COM_b09FwReleaseMinor;
3173 		fw.ver_fix = bce_COM_b09FwReleaseFix;
3174 		fw.start_addr = bce_COM_b09FwStartAddr;
3175 
3176 		fw.text_addr = bce_COM_b09FwTextAddr;
3177 		fw.text_len = bce_COM_b09FwTextLen;
3178 		fw.text_index = 0;
3179 		fw.text = bce_COM_b09FwText;
3180 
3181 		fw.data_addr = bce_COM_b09FwDataAddr;
3182 		fw.data_len = bce_COM_b09FwDataLen;
3183 		fw.data_index = 0;
3184 		fw.data = bce_COM_b09FwData;
3185 
3186 		fw.sbss_addr = bce_COM_b09FwSbssAddr;
3187 		fw.sbss_len = bce_COM_b09FwSbssLen;
3188 		fw.sbss_index = 0;
3189 		fw.sbss = bce_COM_b09FwSbss;
3190 
3191 		fw.bss_addr = bce_COM_b09FwBssAddr;
3192 		fw.bss_len = bce_COM_b09FwBssLen;
3193 		fw.bss_index = 0;
3194 		fw.bss = bce_COM_b09FwBss;
3195 
3196 		fw.rodata_addr = bce_COM_b09FwRodataAddr;
3197 		fw.rodata_len = bce_COM_b09FwRodataLen;
3198 		fw.rodata_index = 0;
3199 		fw.rodata = bce_COM_b09FwRodata;
3200 	} else {
3201 		fw.ver_major = bce_COM_b06FwReleaseMajor;
3202 		fw.ver_minor = bce_COM_b06FwReleaseMinor;
3203 		fw.ver_fix = bce_COM_b06FwReleaseFix;
3204 		fw.start_addr = bce_COM_b06FwStartAddr;
3205 
3206 		fw.text_addr = bce_COM_b06FwTextAddr;
3207 		fw.text_len = bce_COM_b06FwTextLen;
3208 		fw.text_index = 0;
3209 		fw.text = bce_COM_b06FwText;
3210 
3211 		fw.data_addr = bce_COM_b06FwDataAddr;
3212 		fw.data_len = bce_COM_b06FwDataLen;
3213 		fw.data_index = 0;
3214 		fw.data = bce_COM_b06FwData;
3215 
3216 		fw.sbss_addr = bce_COM_b06FwSbssAddr;
3217 		fw.sbss_len = bce_COM_b06FwSbssLen;
3218 		fw.sbss_index = 0;
3219 		fw.sbss = bce_COM_b06FwSbss;
3220 
3221 		fw.bss_addr = bce_COM_b06FwBssAddr;
3222 		fw.bss_len = bce_COM_b06FwBssLen;
3223 		fw.bss_index = 0;
3224 		fw.bss = bce_COM_b06FwBss;
3225 
3226 		fw.rodata_addr = bce_COM_b06FwRodataAddr;
3227 		fw.rodata_len = bce_COM_b06FwRodataLen;
3228 		fw.rodata_index = 0;
3229 		fw.rodata = bce_COM_b06FwRodata;
3230 	}
3231 
3232 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
3233 	bce_start_cpu(sc, &cpu_reg);
3234 }
3235 
3236 /****************************************************************************/
3237 /* Initialize the RV2P, RX, TX, TPAT, COM, and CP CPUs.                     */
3238 /*                                                                          */
3239 /* Loads the firmware for each CPU and starts the CPU.                      */
3240 /*                                                                          */
3241 /* Returns:                                                                 */
3242 /*   Nothing.                                                               */
3243 /****************************************************************************/
3244 static void
bce_init_cpus(struct bce_softc * sc)3245 bce_init_cpus(struct bce_softc *sc)
3246 {
3247 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3248 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3249 		if (BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax) {
3250 			bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc1,
3251 			    sizeof(bce_xi90_rv2p_proc1), RV2P_PROC1);
3252 			bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc2,
3253 			    sizeof(bce_xi90_rv2p_proc2), RV2P_PROC2);
3254 		} else {
3255 			bce_load_rv2p_fw(sc, bce_xi_rv2p_proc1,
3256 			    sizeof(bce_xi_rv2p_proc1), RV2P_PROC1);
3257 			bce_load_rv2p_fw(sc, bce_xi_rv2p_proc2,
3258 			    sizeof(bce_xi_rv2p_proc2), RV2P_PROC2);
3259 		}
3260 	} else {
3261 		bce_load_rv2p_fw(sc, bce_rv2p_proc1,
3262 		    sizeof(bce_rv2p_proc1), RV2P_PROC1);
3263 		bce_load_rv2p_fw(sc, bce_rv2p_proc2,
3264 		    sizeof(bce_rv2p_proc2), RV2P_PROC2);
3265 	}
3266 
3267 	bce_init_rxp_cpu(sc);
3268 	bce_init_txp_cpu(sc);
3269 	bce_init_tpat_cpu(sc);
3270 	bce_init_com_cpu(sc);
3271 	bce_init_cp_cpu(sc);
3272 }
3273 
3274 /****************************************************************************/
3275 /* Initialize context memory.                                               */
3276 /*                                                                          */
3277 /* Clears the memory associated with each Context ID (CID).                 */
3278 /*                                                                          */
3279 /* Returns:                                                                 */
3280 /*   Nothing.                                                               */
3281 /****************************************************************************/
3282 static int
bce_init_ctx(struct bce_softc * sc)3283 bce_init_ctx(struct bce_softc *sc)
3284 {
3285 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3286 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3287 		/* DRC: Replace this constant value with a #define. */
3288 		int i, retry_cnt = 10;
3289 		uint32_t val;
3290 
3291 		/*
3292 		 * BCM5709 context memory may be cached
3293 		 * in host memory so prepare the host memory
3294 		 * for access.
3295 		 */
3296 		val = BCE_CTX_COMMAND_ENABLED | BCE_CTX_COMMAND_MEM_INIT |
3297 		    (1 << 12);
3298 		val |= (BCM_PAGE_BITS - 8) << 16;
3299 		REG_WR(sc, BCE_CTX_COMMAND, val);
3300 
3301 		/* Wait for mem init command to complete. */
3302 		for (i = 0; i < retry_cnt; i++) {
3303 			val = REG_RD(sc, BCE_CTX_COMMAND);
3304 			if (!(val & BCE_CTX_COMMAND_MEM_INIT))
3305 				break;
3306 			DELAY(2);
3307 		}
3308 		if (i == retry_cnt) {
3309 			device_printf(sc->bce_dev,
3310 			    "Context memory initialization failed!\n");
3311 			return ETIMEDOUT;
3312 		}
3313 
3314 		for (i = 0; i < sc->ctx_pages; i++) {
3315 			int j;
3316 
3317 			/*
3318 			 * Set the physical address of the context
3319 			 * memory cache.
3320 			 */
3321 			REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA0,
3322 			    BCE_ADDR_LO(sc->ctx_paddr[i] & 0xfffffff0) |
3323 			    BCE_CTX_HOST_PAGE_TBL_DATA0_VALID);
3324 			REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA1,
3325 			    BCE_ADDR_HI(sc->ctx_paddr[i]));
3326 			REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_CTRL,
3327 			    i | BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
3328 
3329 			/*
3330 			 * Verify that the context memory write was successful.
3331 			 */
3332 			for (j = 0; j < retry_cnt; j++) {
3333 				val = REG_RD(sc, BCE_CTX_HOST_PAGE_TBL_CTRL);
3334 				if ((val &
3335 				    BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0)
3336 					break;
3337 				DELAY(5);
3338 			}
3339 			if (j == retry_cnt) {
3340 				device_printf(sc->bce_dev,
3341 				    "Failed to initialize context page!\n");
3342 				return ETIMEDOUT;
3343 			}
3344 		}
3345 	} else {
3346 		uint32_t vcid_addr, offset;
3347 
3348 		/*
3349 		 * For the 5706/5708, context memory is local to
3350 		 * the controller, so initialize the controller
3351 		 * context memory.
3352 		 */
3353 
3354 		vcid_addr = GET_CID_ADDR(96);
3355 		while (vcid_addr) {
3356 			vcid_addr -= PHY_CTX_SIZE;
3357 
3358 			REG_WR(sc, BCE_CTX_VIRT_ADDR, 0);
3359 			REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr);
3360 
3361 			for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
3362 				CTX_WR(sc, 0x00, offset, 0);
3363 
3364 			REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr);
3365 			REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr);
3366 		}
3367 	}
3368 	return 0;
3369 }
3370 
3371 /****************************************************************************/
3372 /* Fetch the permanent MAC address of the controller.                       */
3373 /*                                                                          */
3374 /* Returns:                                                                 */
3375 /*   Nothing.                                                               */
3376 /****************************************************************************/
3377 static void
bce_get_mac_addr(struct bce_softc * sc)3378 bce_get_mac_addr(struct bce_softc *sc)
3379 {
3380 	uint32_t mac_lo = 0, mac_hi = 0;
3381 
3382 	/*
3383 	 * The NetXtreme II bootcode populates various NIC
3384 	 * power-on and runtime configuration items in a
3385 	 * shared memory area.  The factory configured MAC
3386 	 * address is available from both NVRAM and the
3387 	 * shared memory area so we'll read the value from
3388 	 * shared memory for speed.
3389 	 */
3390 
3391 	mac_hi = bce_shmem_rd(sc,  BCE_PORT_HW_CFG_MAC_UPPER);
3392 	mac_lo = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_LOWER);
3393 
3394 	if (mac_lo == 0 && mac_hi == 0) {
3395 		if_printf(&sc->arpcom.ac_if, "Invalid Ethernet address!\n");
3396 	} else {
3397 		sc->eaddr[0] = (u_char)(mac_hi >> 8);
3398 		sc->eaddr[1] = (u_char)(mac_hi >> 0);
3399 		sc->eaddr[2] = (u_char)(mac_lo >> 24);
3400 		sc->eaddr[3] = (u_char)(mac_lo >> 16);
3401 		sc->eaddr[4] = (u_char)(mac_lo >> 8);
3402 		sc->eaddr[5] = (u_char)(mac_lo >> 0);
3403 	}
3404 }
3405 
3406 /****************************************************************************/
3407 /* Program the MAC address.                                                 */
3408 /*                                                                          */
3409 /* Returns:                                                                 */
3410 /*   Nothing.                                                               */
3411 /****************************************************************************/
3412 static void
bce_set_mac_addr(struct bce_softc * sc)3413 bce_set_mac_addr(struct bce_softc *sc)
3414 {
3415 	const uint8_t *mac_addr = sc->eaddr;
3416 	uint32_t val;
3417 
3418 	val = (mac_addr[0] << 8) | mac_addr[1];
3419 	REG_WR(sc, BCE_EMAC_MAC_MATCH0, val);
3420 
3421 	val = (mac_addr[2] << 24) |
3422 	      (mac_addr[3] << 16) |
3423 	      (mac_addr[4] << 8) |
3424 	      mac_addr[5];
3425 	REG_WR(sc, BCE_EMAC_MAC_MATCH1, val);
3426 }
3427 
3428 /****************************************************************************/
3429 /* Stop the controller.                                                     */
3430 /*                                                                          */
3431 /* Returns:                                                                 */
3432 /*   Nothing.                                                               */
3433 /****************************************************************************/
3434 static void
bce_stop(struct bce_softc * sc)3435 bce_stop(struct bce_softc *sc)
3436 {
3437 	struct ifnet *ifp = &sc->arpcom.ac_if;
3438 	int i;
3439 
3440 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
3441 
3442 	callout_stop(&sc->bce_tick_callout);
3443 
3444 	/* Disable the transmit/receive blocks. */
3445 	REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, BCE_MISC_ENABLE_CLR_DEFAULT);
3446 	REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3447 	DELAY(20);
3448 
3449 	bce_disable_intr(sc);
3450 
3451 	ifp->if_flags &= ~IFF_RUNNING;
3452 	for (i = 0; i < sc->tx_ring_cnt; ++i) {
3453 		ifsq_clr_oactive(sc->tx_rings[i].ifsq);
3454 		ifsq_watchdog_stop(&sc->tx_rings[i].tx_watchdog);
3455 	}
3456 
3457 	/* Free the RX lists. */
3458 	for (i = 0; i < sc->rx_ring_cnt; ++i)
3459 		bce_free_rx_chain(&sc->rx_rings[i]);
3460 
3461 	/* Free TX buffers. */
3462 	for (i = 0; i < sc->tx_ring_cnt; ++i)
3463 		bce_free_tx_chain(&sc->tx_rings[i]);
3464 
3465 	sc->bce_link = 0;
3466 	sc->bce_coalchg_mask = 0;
3467 }
3468 
3469 static int
bce_reset(struct bce_softc * sc,uint32_t reset_code)3470 bce_reset(struct bce_softc *sc, uint32_t reset_code)
3471 {
3472 	uint32_t val;
3473 	int i, rc = 0;
3474 
3475 	/* Wait for pending PCI transactions to complete. */
3476 	REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS,
3477 	       BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3478 	       BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3479 	       BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3480 	       BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3481 	val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3482 	DELAY(5);
3483 
3484 	/* Disable DMA */
3485 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3486 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3487 		val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL);
3488 		val &= ~BCE_MISC_NEW_CORE_CTL_DMA_ENABLE;
3489 		REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val);
3490 	}
3491 
3492 	/* Assume bootcode is running. */
3493 	sc->bce_fw_timed_out = 0;
3494 	sc->bce_drv_cardiac_arrest = 0;
3495 
3496 	/* Give the firmware a chance to prepare for the reset. */
3497 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code);
3498 	if (rc) {
3499 		if_printf(&sc->arpcom.ac_if,
3500 			  "Firmware is not ready for reset\n");
3501 		return rc;
3502 	}
3503 
3504 	/* Set a firmware reminder that this is a soft reset. */
3505 	bce_shmem_wr(sc, BCE_DRV_RESET_SIGNATURE,
3506 	    BCE_DRV_RESET_SIGNATURE_MAGIC);
3507 
3508 	/* Dummy read to force the chip to complete all current transactions. */
3509 	val = REG_RD(sc, BCE_MISC_ID);
3510 
3511 	/* Chip reset. */
3512 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3513 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3514 		REG_WR(sc, BCE_MISC_COMMAND, BCE_MISC_COMMAND_SW_RESET);
3515 		REG_RD(sc, BCE_MISC_COMMAND);
3516 		DELAY(5);
3517 
3518 		val = BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3519 		    BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3520 
3521 		pci_write_config(sc->bce_dev, BCE_PCICFG_MISC_CONFIG, val, 4);
3522 	} else {
3523 		val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3524 		    BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3525 		    BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3526 		REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val);
3527 
3528 		/* Allow up to 30us for reset to complete. */
3529 		for (i = 0; i < 10; i++) {
3530 			val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG);
3531 			if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3532 			    BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3533 				break;
3534 			DELAY(10);
3535 		}
3536 
3537 		/* Check that reset completed successfully. */
3538 		if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3539 		    BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3540 			if_printf(&sc->arpcom.ac_if, "Reset failed!\n");
3541 			return EBUSY;
3542 		}
3543 	}
3544 
3545 	/* Make sure byte swapping is properly configured. */
3546 	val = REG_RD(sc, BCE_PCI_SWAP_DIAG0);
3547 	if (val != 0x01020304) {
3548 		if_printf(&sc->arpcom.ac_if, "Byte swap is incorrect!\n");
3549 		return ENODEV;
3550 	}
3551 
3552 	/* Just completed a reset, assume that firmware is running again. */
3553 	sc->bce_fw_timed_out = 0;
3554 	sc->bce_drv_cardiac_arrest = 0;
3555 
3556 	/* Wait for the firmware to finish its initialization. */
3557 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code);
3558 	if (rc) {
3559 		if_printf(&sc->arpcom.ac_if,
3560 			  "Firmware did not complete initialization!\n");
3561 	}
3562 
3563 	if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX) {
3564 		bce_setup_msix_table(sc);
3565 		/* Prevent MSIX table reads and write from timing out */
3566 		REG_WR(sc, BCE_MISC_ECO_HW_CTL,
3567 		    BCE_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
3568 
3569 	}
3570 	return rc;
3571 }
3572 
3573 static int
bce_chipinit(struct bce_softc * sc)3574 bce_chipinit(struct bce_softc *sc)
3575 {
3576 	uint32_t val;
3577 	int rc = 0;
3578 
3579 	/* Make sure the interrupt is not active. */
3580 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
3581 	REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
3582 
3583 	/*
3584 	 * Initialize DMA byte/word swapping, configure the number of DMA
3585 	 * channels and PCI clock compensation delay.
3586 	 */
3587 	val = BCE_DMA_CONFIG_DATA_BYTE_SWAP |
3588 	      BCE_DMA_CONFIG_DATA_WORD_SWAP |
3589 #if BYTE_ORDER == BIG_ENDIAN
3590 	      BCE_DMA_CONFIG_CNTL_BYTE_SWAP |
3591 #endif
3592 	      BCE_DMA_CONFIG_CNTL_WORD_SWAP |
3593 	      DMA_READ_CHANS << 12 |
3594 	      DMA_WRITE_CHANS << 16;
3595 
3596 	val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3597 
3598 	if ((sc->bce_flags & BCE_PCIX_FLAG) && sc->bus_speed_mhz == 133)
3599 		val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP;
3600 
3601 	/*
3602 	 * This setting resolves a problem observed on certain Intel PCI
3603 	 * chipsets that cannot handle multiple outstanding DMA operations.
3604 	 * See errata E9_5706A1_65.
3605 	 */
3606 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706 &&
3607 	    BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0 &&
3608 	    !(sc->bce_flags & BCE_PCIX_FLAG))
3609 		val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA;
3610 
3611 	REG_WR(sc, BCE_DMA_CONFIG, val);
3612 
3613 	/* Enable the RX_V2P and Context state machines before access. */
3614 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3615 	       BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3616 	       BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3617 	       BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3618 
3619 	/* Initialize context mapping and zero out the quick contexts. */
3620 	rc = bce_init_ctx(sc);
3621 	if (rc != 0)
3622 		return rc;
3623 
3624 	/* Initialize the on-boards CPUs */
3625 	bce_init_cpus(sc);
3626 
3627 	/* Enable management frames (NC-SI) to flow to the MCP. */
3628 	if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
3629 		val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) |
3630 		    BCE_RPM_MGMT_PKT_CTRL_MGMT_EN;
3631 		REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val);
3632 	}
3633 
3634 	/* Prepare NVRAM for access. */
3635 	rc = bce_init_nvram(sc);
3636 	if (rc != 0)
3637 		return rc;
3638 
3639 	/* Set the kernel bypass block size */
3640 	val = REG_RD(sc, BCE_MQ_CONFIG);
3641 	val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3642 	val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3643 
3644 	/* Enable bins used on the 5709/5716. */
3645 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3646 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3647 		val |= BCE_MQ_CONFIG_BIN_MQ_MODE;
3648 		if (BCE_CHIP_ID(sc) == BCE_CHIP_ID_5709_A1)
3649 			val |= BCE_MQ_CONFIG_HALT_DIS;
3650 	}
3651 
3652 	REG_WR(sc, BCE_MQ_CONFIG, val);
3653 
3654 	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3655 	REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val);
3656 	REG_WR(sc, BCE_MQ_KNL_WIND_END, val);
3657 
3658 	/* Set the page size and clear the RV2P processor stall bits. */
3659 	val = (BCM_PAGE_BITS - 8) << 24;
3660 	REG_WR(sc, BCE_RV2P_CONFIG, val);
3661 
3662 	/* Configure page size. */
3663 	val = REG_RD(sc, BCE_TBDR_CONFIG);
3664 	val &= ~BCE_TBDR_CONFIG_PAGE_SIZE;
3665 	val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3666 	REG_WR(sc, BCE_TBDR_CONFIG, val);
3667 
3668 	/* Set the perfect match control register to default. */
3669 	REG_WR_IND(sc, BCE_RXP_PM_CTRL, 0);
3670 
3671 	return 0;
3672 }
3673 
3674 /****************************************************************************/
3675 /* Initialize the controller in preparation to send/receive traffic.        */
3676 /*                                                                          */
3677 /* Returns:                                                                 */
3678 /*   0 for success, positive value for failure.                             */
3679 /****************************************************************************/
3680 static int
bce_blockinit(struct bce_softc * sc)3681 bce_blockinit(struct bce_softc *sc)
3682 {
3683 	uint32_t reg, val;
3684 	int i;
3685 
3686 	/* Load the hardware default MAC address. */
3687 	bce_set_mac_addr(sc);
3688 
3689 	/* Set the Ethernet backoff seed value */
3690 	val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) +
3691 	      sc->eaddr[3] + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16);
3692 	REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val);
3693 
3694 	sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE;
3695 
3696 	/* Set up link change interrupt generation. */
3697 	REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK);
3698 
3699 	/* Program the physical address of the status block. */
3700 	REG_WR(sc, BCE_HC_STATUS_ADDR_L, BCE_ADDR_LO(sc->status_block_paddr));
3701 	REG_WR(sc, BCE_HC_STATUS_ADDR_H, BCE_ADDR_HI(sc->status_block_paddr));
3702 
3703 	/* Program the physical address of the statistics block. */
3704 	REG_WR(sc, BCE_HC_STATISTICS_ADDR_L,
3705 	       BCE_ADDR_LO(sc->stats_block_paddr));
3706 	REG_WR(sc, BCE_HC_STATISTICS_ADDR_H,
3707 	       BCE_ADDR_HI(sc->stats_block_paddr));
3708 
3709 	/* Program various host coalescing parameters. */
3710 	REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
3711 	       (sc->bce_tx_quick_cons_trip_int << 16) |
3712 	       sc->bce_tx_quick_cons_trip);
3713 	REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
3714 	       (sc->bce_rx_quick_cons_trip_int << 16) |
3715 	       sc->bce_rx_quick_cons_trip);
3716 	REG_WR(sc, BCE_HC_COMP_PROD_TRIP,
3717 	       (sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip);
3718 	REG_WR(sc, BCE_HC_TX_TICKS,
3719 	       (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
3720 	REG_WR(sc, BCE_HC_RX_TICKS,
3721 	       (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
3722 	REG_WR(sc, BCE_HC_COM_TICKS,
3723 	       (sc->bce_com_ticks_int << 16) | sc->bce_com_ticks);
3724 	REG_WR(sc, BCE_HC_CMD_TICKS,
3725 	       (sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks);
3726 	REG_WR(sc, BCE_HC_STATS_TICKS, (sc->bce_stats_ticks & 0xffff00));
3727 	REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8);	/* 3ms */
3728 
3729 	if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX)
3730 		REG_WR(sc, BCE_HC_MSIX_BIT_VECTOR, BCE_HC_MSIX_BIT_VECTOR_VAL);
3731 
3732 	val = BCE_HC_CONFIG_TX_TMR_MODE | BCE_HC_CONFIG_COLLECT_STATS;
3733 	if ((sc->bce_flags & BCE_ONESHOT_MSI_FLAG) ||
3734 	    sc->bce_irq_type == PCI_INTR_TYPE_MSIX) {
3735 		if (bootverbose) {
3736 			if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX) {
3737 				if_printf(&sc->arpcom.ac_if,
3738 				    "using MSI-X\n");
3739 			} else {
3740 				if_printf(&sc->arpcom.ac_if,
3741 				    "using oneshot MSI\n");
3742 			}
3743 		}
3744 		val |= BCE_HC_CONFIG_ONE_SHOT | BCE_HC_CONFIG_USE_INT_PARAM;
3745 		if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX)
3746 			val |= BCE_HC_CONFIG_SB_ADDR_INC_128B;
3747 	}
3748 	REG_WR(sc, BCE_HC_CONFIG, val);
3749 
3750 	for (i = 1; i < sc->rx_ring_cnt; ++i) {
3751 		uint32_t base;
3752 
3753 		base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) + BCE_HC_SB_CONFIG_1;
3754 		KKASSERT(base <= BCE_HC_SB_CONFIG_8);
3755 
3756 		REG_WR(sc, base,
3757 		    BCE_HC_SB_CONFIG_1_TX_TMR_MODE |
3758 		    /* BCE_HC_SB_CONFIG_1_RX_TMR_MODE | */
3759 		    BCE_HC_SB_CONFIG_1_ONE_SHOT);
3760 
3761 		REG_WR(sc, base + BCE_HC_TX_QUICK_CONS_TRIP_OFF,
3762 		    (sc->bce_tx_quick_cons_trip_int << 16) |
3763 		    sc->bce_tx_quick_cons_trip);
3764 		REG_WR(sc, base + BCE_HC_RX_QUICK_CONS_TRIP_OFF,
3765 		    (sc->bce_rx_quick_cons_trip_int << 16) |
3766 		    sc->bce_rx_quick_cons_trip);
3767 		REG_WR(sc, base + BCE_HC_TX_TICKS_OFF,
3768 		    (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
3769 		REG_WR(sc, base + BCE_HC_RX_TICKS_OFF,
3770 		    (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
3771 	}
3772 
3773 	/* Clear the internal statistics counters. */
3774 	REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
3775 
3776 	/* Verify that bootcode is running. */
3777 	reg = bce_shmem_rd(sc, BCE_DEV_INFO_SIGNATURE);
3778 
3779 	if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3780 	    BCE_DEV_INFO_SIGNATURE_MAGIC) {
3781 		if_printf(&sc->arpcom.ac_if,
3782 			  "Bootcode not running! Found: 0x%08X, "
3783 			  "Expected: 08%08X\n",
3784 			  reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK,
3785 			  BCE_DEV_INFO_SIGNATURE_MAGIC);
3786 		return ENODEV;
3787 	}
3788 
3789 	/* Enable DMA */
3790 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3791 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3792 		val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL);
3793 		val |= BCE_MISC_NEW_CORE_CTL_DMA_ENABLE;
3794 		REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val);
3795 	}
3796 
3797 	/* Allow bootcode to apply any additional fixes before enabling MAC. */
3798 	bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET);
3799 
3800 	/* Enable link state change interrupt generation. */
3801 	REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3802 
3803 	/* Enable the RXP. */
3804 	bce_start_rxp_cpu(sc);
3805 
3806 	/* Disable management frames (NC-SI) from flowing to the MCP. */
3807 	if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
3808 		val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) &
3809 		    ~BCE_RPM_MGMT_PKT_CTRL_MGMT_EN;
3810 		REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val);
3811 	}
3812 
3813 	/* Enable all remaining blocks in the MAC. */
3814 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3815 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3816 		REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3817 		    BCE_MISC_ENABLE_DEFAULT_XI);
3818 	} else {
3819 		REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT);
3820 	}
3821 	REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
3822 	DELAY(20);
3823 
3824 	/* Save the current host coalescing block settings. */
3825 	sc->hc_command = REG_RD(sc, BCE_HC_COMMAND);
3826 
3827 	return 0;
3828 }
3829 
3830 /****************************************************************************/
3831 /* Encapsulate an mbuf cluster into the rx_bd chain.                        */
3832 /*                                                                          */
3833 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's.     */
3834 /* This routine will map an mbuf cluster into 1 or more rx_bd's as          */
3835 /* necessary.                                                               */
3836 /*                                                                          */
3837 /* Returns:                                                                 */
3838 /*   0 for success, positive value for failure.                             */
3839 /****************************************************************************/
3840 static int
bce_newbuf_std(struct bce_rx_ring * rxr,uint16_t * prod,uint16_t chain_prod,uint32_t * prod_bseq,int init)3841 bce_newbuf_std(struct bce_rx_ring *rxr, uint16_t *prod, uint16_t chain_prod,
3842     uint32_t *prod_bseq, int init)
3843 {
3844 	struct bce_rx_buf *rx_buf;
3845 	bus_dmamap_t map;
3846 	bus_dma_segment_t seg;
3847 	struct mbuf *m_new;
3848 	int error, nseg;
3849 
3850 	/* This is a new mbuf allocation. */
3851 	m_new = m_getcl(init ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR);
3852 	if (m_new == NULL)
3853 		return ENOBUFS;
3854 
3855 	m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
3856 
3857 	/* Map the mbuf cluster into device memory. */
3858 	error = bus_dmamap_load_mbuf_segment(rxr->rx_mbuf_tag,
3859 	    rxr->rx_mbuf_tmpmap, m_new, &seg, 1, &nseg, BUS_DMA_NOWAIT);
3860 	if (error) {
3861 		m_freem(m_new);
3862 		if (init) {
3863 			if_printf(&rxr->sc->arpcom.ac_if,
3864 			    "Error mapping mbuf into RX chain!\n");
3865 		}
3866 		return error;
3867 	}
3868 
3869 	rx_buf = &rxr->rx_bufs[chain_prod];
3870 	if (rx_buf->rx_mbuf_ptr != NULL)
3871 		bus_dmamap_unload(rxr->rx_mbuf_tag, rx_buf->rx_mbuf_map);
3872 
3873 	map = rx_buf->rx_mbuf_map;
3874 	rx_buf->rx_mbuf_map = rxr->rx_mbuf_tmpmap;
3875 	rxr->rx_mbuf_tmpmap = map;
3876 
3877 	/* Save the mbuf and update our counter. */
3878 	rx_buf->rx_mbuf_ptr = m_new;
3879 	rx_buf->rx_mbuf_paddr = seg.ds_addr;
3880 	rxr->free_rx_bd--;
3881 
3882 	bce_setup_rxdesc_std(rxr, chain_prod, prod_bseq);
3883 
3884 	return 0;
3885 }
3886 
3887 static void
bce_setup_rxdesc_std(struct bce_rx_ring * rxr,uint16_t chain_prod,uint32_t * prod_bseq)3888 bce_setup_rxdesc_std(struct bce_rx_ring *rxr, uint16_t chain_prod,
3889     uint32_t *prod_bseq)
3890 {
3891 	const struct bce_rx_buf *rx_buf;
3892 	struct rx_bd *rxbd;
3893 	bus_addr_t paddr;
3894 	int len;
3895 
3896 	rx_buf = &rxr->rx_bufs[chain_prod];
3897 	paddr = rx_buf->rx_mbuf_paddr;
3898 	len = rx_buf->rx_mbuf_ptr->m_len;
3899 
3900 	/* Setup the rx_bd for the first segment. */
3901 	rxbd = &rxr->rx_bd_chain[RX_PAGE(chain_prod)][RX_IDX(chain_prod)];
3902 
3903 	rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(paddr));
3904 	rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(paddr));
3905 	rxbd->rx_bd_len = htole32(len);
3906 	rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START);
3907 	*prod_bseq += len;
3908 
3909 	rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END);
3910 }
3911 
3912 /****************************************************************************/
3913 /* Initialize the TX context memory.                                        */
3914 /*                                                                          */
3915 /* Returns:                                                                 */
3916 /*   Nothing                                                                */
3917 /****************************************************************************/
3918 static void
bce_init_tx_context(struct bce_tx_ring * txr)3919 bce_init_tx_context(struct bce_tx_ring *txr)
3920 {
3921 	uint32_t val;
3922 
3923 	/* Initialize the context ID for an L2 TX chain. */
3924 	if (BCE_CHIP_NUM(txr->sc) == BCE_CHIP_NUM_5709 ||
3925 	    BCE_CHIP_NUM(txr->sc) == BCE_CHIP_NUM_5716) {
3926 		/* Set the CID type to support an L2 connection. */
3927 		val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2;
3928 		CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3929 		    BCE_L2CTX_TX_TYPE_XI, val);
3930 		val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16);
3931 		CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3932 		    BCE_L2CTX_TX_CMD_TYPE_XI, val);
3933 
3934 		/* Point the hardware to the first page in the chain. */
3935 		val = BCE_ADDR_HI(txr->tx_bd_chain_paddr[0]);
3936 		CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3937 		    BCE_L2CTX_TX_TBDR_BHADDR_HI_XI, val);
3938 		val = BCE_ADDR_LO(txr->tx_bd_chain_paddr[0]);
3939 		CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3940 		    BCE_L2CTX_TX_TBDR_BHADDR_LO_XI, val);
3941 	} else {
3942 		/* Set the CID type to support an L2 connection. */
3943 		val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2;
3944 		CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3945 		    BCE_L2CTX_TX_TYPE, val);
3946 		val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16);
3947 		CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3948 		    BCE_L2CTX_TX_CMD_TYPE, val);
3949 
3950 		/* Point the hardware to the first page in the chain. */
3951 		val = BCE_ADDR_HI(txr->tx_bd_chain_paddr[0]);
3952 		CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3953 		    BCE_L2CTX_TX_TBDR_BHADDR_HI, val);
3954 		val = BCE_ADDR_LO(txr->tx_bd_chain_paddr[0]);
3955 		CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3956 		    BCE_L2CTX_TX_TBDR_BHADDR_LO, val);
3957 	}
3958 }
3959 
3960 /****************************************************************************/
3961 /* Allocate memory and initialize the TX data structures.                   */
3962 /*                                                                          */
3963 /* Returns:                                                                 */
3964 /*   0 for success, positive value for failure.                             */
3965 /****************************************************************************/
3966 static int
bce_init_tx_chain(struct bce_tx_ring * txr)3967 bce_init_tx_chain(struct bce_tx_ring *txr)
3968 {
3969 	struct tx_bd *txbd;
3970 	int i, rc = 0;
3971 
3972 	/* Set the initial TX producer/consumer indices. */
3973 	txr->tx_prod = 0;
3974 	txr->tx_cons = 0;
3975 	txr->tx_prod_bseq = 0;
3976 	txr->used_tx_bd = 0;
3977 	txr->max_tx_bd = USABLE_TX_BD(txr);
3978 
3979 	/*
3980 	 * The NetXtreme II supports a linked-list structre called
3981 	 * a Buffer Descriptor Chain (or BD chain).  A BD chain
3982 	 * consists of a series of 1 or more chain pages, each of which
3983 	 * consists of a fixed number of BD entries.
3984 	 * The last BD entry on each page is a pointer to the next page
3985 	 * in the chain, and the last pointer in the BD chain
3986 	 * points back to the beginning of the chain.
3987 	 */
3988 
3989 	/* Set the TX next pointer chain entries. */
3990 	for (i = 0; i < txr->tx_pages; i++) {
3991 		int j;
3992 
3993 		txbd = &txr->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
3994 
3995 		/* Check if we've reached the last page. */
3996 		if (i == (txr->tx_pages - 1))
3997 			j = 0;
3998 		else
3999 			j = i + 1;
4000 
4001 		txbd->tx_bd_haddr_hi =
4002 		    htole32(BCE_ADDR_HI(txr->tx_bd_chain_paddr[j]));
4003 		txbd->tx_bd_haddr_lo =
4004 		    htole32(BCE_ADDR_LO(txr->tx_bd_chain_paddr[j]));
4005 	}
4006 	bce_init_tx_context(txr);
4007 
4008 	return(rc);
4009 }
4010 
4011 /****************************************************************************/
4012 /* Free memory and clear the TX data structures.                            */
4013 /*                                                                          */
4014 /* Returns:                                                                 */
4015 /*   Nothing.                                                               */
4016 /****************************************************************************/
4017 static void
bce_free_tx_chain(struct bce_tx_ring * txr)4018 bce_free_tx_chain(struct bce_tx_ring *txr)
4019 {
4020 	int i;
4021 
4022 	/* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
4023 	for (i = 0; i < TOTAL_TX_BD(txr); i++) {
4024 		struct bce_tx_buf *tx_buf = &txr->tx_bufs[i];
4025 
4026 		if (tx_buf->tx_mbuf_ptr != NULL) {
4027 			bus_dmamap_unload(txr->tx_mbuf_tag,
4028 			    tx_buf->tx_mbuf_map);
4029 			m_freem(tx_buf->tx_mbuf_ptr);
4030 			tx_buf->tx_mbuf_ptr = NULL;
4031 		}
4032 	}
4033 
4034 	/* Clear each TX chain page. */
4035 	for (i = 0; i < txr->tx_pages; i++)
4036 		bzero(txr->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ);
4037 	txr->used_tx_bd = 0;
4038 }
4039 
4040 /****************************************************************************/
4041 /* Initialize the RX context memory.                                        */
4042 /*                                                                          */
4043 /* Returns:                                                                 */
4044 /*   Nothing                                                                */
4045 /****************************************************************************/
4046 static void
bce_init_rx_context(struct bce_rx_ring * rxr)4047 bce_init_rx_context(struct bce_rx_ring *rxr)
4048 {
4049 	uint32_t val;
4050 
4051 	/* Initialize the context ID for an L2 RX chain. */
4052 	val = BCE_L2CTX_RX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
4053 	    BCE_L2CTX_RX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
4054 
4055 	/*
4056 	 * Set the level for generating pause frames
4057 	 * when the number of available rx_bd's gets
4058 	 * too low (the low watermark) and the level
4059 	 * when pause frames can be stopped (the high
4060 	 * watermark).
4061 	 */
4062 	if (BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5709 ||
4063 	    BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5716) {
4064 		uint32_t lo_water, hi_water;
4065 
4066 		lo_water = BCE_L2CTX_RX_LO_WATER_MARK_DEFAULT;
4067 		hi_water = USABLE_RX_BD(rxr) / 4;
4068 
4069 		lo_water /= BCE_L2CTX_RX_LO_WATER_MARK_SCALE;
4070 		hi_water /= BCE_L2CTX_RX_HI_WATER_MARK_SCALE;
4071 
4072 		if (hi_water > 0xf)
4073 			hi_water = 0xf;
4074 		else if (hi_water == 0)
4075 			lo_water = 0;
4076 		val |= lo_water |
4077 		    (hi_water << BCE_L2CTX_RX_HI_WATER_MARK_SHIFT);
4078 	}
4079 
4080  	CTX_WR(rxr->sc, GET_CID_ADDR(rxr->rx_cid),
4081 	    BCE_L2CTX_RX_CTX_TYPE, val);
4082 
4083 	/* Setup the MQ BIN mapping for l2_ctx_host_bseq. */
4084 	if (BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5709 ||
4085 	    BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5716) {
4086 		val = REG_RD(rxr->sc, BCE_MQ_MAP_L2_5);
4087 		REG_WR(rxr->sc, BCE_MQ_MAP_L2_5, val | BCE_MQ_MAP_L2_5_ARM);
4088 	}
4089 
4090 	/* Point the hardware to the first page in the chain. */
4091 	val = BCE_ADDR_HI(rxr->rx_bd_chain_paddr[0]);
4092 	CTX_WR(rxr->sc, GET_CID_ADDR(rxr->rx_cid),
4093 	    BCE_L2CTX_RX_NX_BDHADDR_HI, val);
4094 	val = BCE_ADDR_LO(rxr->rx_bd_chain_paddr[0]);
4095 	CTX_WR(rxr->sc, GET_CID_ADDR(rxr->rx_cid),
4096 	    BCE_L2CTX_RX_NX_BDHADDR_LO, val);
4097 }
4098 
4099 /****************************************************************************/
4100 /* Allocate memory and initialize the RX data structures.                   */
4101 /*                                                                          */
4102 /* Returns:                                                                 */
4103 /*   0 for success, positive value for failure.                             */
4104 /****************************************************************************/
4105 static int
bce_init_rx_chain(struct bce_rx_ring * rxr)4106 bce_init_rx_chain(struct bce_rx_ring *rxr)
4107 {
4108 	struct rx_bd *rxbd;
4109 	int i, rc = 0;
4110 	uint16_t prod, chain_prod;
4111 	uint32_t prod_bseq;
4112 
4113 	/* Initialize the RX producer and consumer indices. */
4114 	rxr->rx_prod = 0;
4115 	rxr->rx_cons = 0;
4116 	rxr->rx_prod_bseq = 0;
4117 	rxr->free_rx_bd = USABLE_RX_BD(rxr);
4118 	rxr->max_rx_bd = USABLE_RX_BD(rxr);
4119 
4120 	/* Clear cache status index */
4121 	rxr->last_status_idx = 0;
4122 
4123 	/* Initialize the RX next pointer chain entries. */
4124 	for (i = 0; i < rxr->rx_pages; i++) {
4125 		int j;
4126 
4127 		rxbd = &rxr->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
4128 
4129 		/* Check if we've reached the last page. */
4130 		if (i == (rxr->rx_pages - 1))
4131 			j = 0;
4132 		else
4133 			j = i + 1;
4134 
4135 		/* Setup the chain page pointers. */
4136 		rxbd->rx_bd_haddr_hi =
4137 		    htole32(BCE_ADDR_HI(rxr->rx_bd_chain_paddr[j]));
4138 		rxbd->rx_bd_haddr_lo =
4139 		    htole32(BCE_ADDR_LO(rxr->rx_bd_chain_paddr[j]));
4140 	}
4141 
4142 	/* Allocate mbuf clusters for the rx_bd chain. */
4143 	prod = prod_bseq = 0;
4144 	while (prod < TOTAL_RX_BD(rxr)) {
4145 		chain_prod = RX_CHAIN_IDX(rxr, prod);
4146 		if (bce_newbuf_std(rxr, &prod, chain_prod, &prod_bseq, 1)) {
4147 			if_printf(&rxr->sc->arpcom.ac_if,
4148 			    "Error filling RX chain: rx_bd[0x%04X]!\n",
4149 			    chain_prod);
4150 			rc = ENOBUFS;
4151 			break;
4152 		}
4153 		prod = NEXT_RX_BD(prod);
4154 	}
4155 
4156 	/* Save the RX chain producer index. */
4157 	rxr->rx_prod = prod;
4158 	rxr->rx_prod_bseq = prod_bseq;
4159 
4160 	/* Tell the chip about the waiting rx_bd's. */
4161 	REG_WR16(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BDIDX,
4162 	    rxr->rx_prod);
4163 	REG_WR(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BSEQ,
4164 	    rxr->rx_prod_bseq);
4165 
4166 	bce_init_rx_context(rxr);
4167 
4168 	return(rc);
4169 }
4170 
4171 /****************************************************************************/
4172 /* Free memory and clear the RX data structures.                            */
4173 /*                                                                          */
4174 /* Returns:                                                                 */
4175 /*   Nothing.                                                               */
4176 /****************************************************************************/
4177 static void
bce_free_rx_chain(struct bce_rx_ring * rxr)4178 bce_free_rx_chain(struct bce_rx_ring *rxr)
4179 {
4180 	int i;
4181 
4182 	/* Free any mbufs still in the RX mbuf chain. */
4183 	for (i = 0; i < TOTAL_RX_BD(rxr); i++) {
4184 		struct bce_rx_buf *rx_buf = &rxr->rx_bufs[i];
4185 
4186 		if (rx_buf->rx_mbuf_ptr != NULL) {
4187 			bus_dmamap_unload(rxr->rx_mbuf_tag,
4188 			    rx_buf->rx_mbuf_map);
4189 			m_freem(rx_buf->rx_mbuf_ptr);
4190 			rx_buf->rx_mbuf_ptr = NULL;
4191 		}
4192 	}
4193 
4194 	/* Clear each RX chain page. */
4195 	for (i = 0; i < rxr->rx_pages; i++)
4196 		bzero(rxr->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
4197 }
4198 
4199 /****************************************************************************/
4200 /* Set media options.                                                       */
4201 /*                                                                          */
4202 /* Returns:                                                                 */
4203 /*   0 for success, positive value for failure.                             */
4204 /****************************************************************************/
4205 static int
bce_ifmedia_upd(struct ifnet * ifp)4206 bce_ifmedia_upd(struct ifnet *ifp)
4207 {
4208 	struct bce_softc *sc = ifp->if_softc;
4209 	struct mii_data *mii = device_get_softc(sc->bce_miibus);
4210 	int error = 0;
4211 
4212 	/*
4213 	 * 'mii' will be NULL, when this function is called on following
4214 	 * code path: bce_attach() -> bce_mgmt_init()
4215 	 */
4216 	if (mii != NULL) {
4217 		/* Make sure the MII bus has been enumerated. */
4218 		sc->bce_link = 0;
4219 		if (mii->mii_instance) {
4220 			struct mii_softc *miisc;
4221 
4222 			LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4223 				mii_phy_reset(miisc);
4224 		}
4225 		error = mii_mediachg(mii);
4226 	}
4227 	return error;
4228 }
4229 
4230 /****************************************************************************/
4231 /* Reports current media status.                                            */
4232 /*                                                                          */
4233 /* Returns:                                                                 */
4234 /*   Nothing.                                                               */
4235 /****************************************************************************/
4236 static void
bce_ifmedia_sts(struct ifnet * ifp,struct ifmediareq * ifmr)4237 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4238 {
4239 	struct bce_softc *sc = ifp->if_softc;
4240 	struct mii_data *mii = device_get_softc(sc->bce_miibus);
4241 
4242 	mii_pollstat(mii);
4243 	ifmr->ifm_active = mii->mii_media_active;
4244 	ifmr->ifm_status = mii->mii_media_status;
4245 }
4246 
4247 /****************************************************************************/
4248 /* Handles PHY generated interrupt events.                                  */
4249 /*                                                                          */
4250 /* Returns:                                                                 */
4251 /*   Nothing.                                                               */
4252 /****************************************************************************/
4253 static void
bce_phy_intr(struct bce_softc * sc)4254 bce_phy_intr(struct bce_softc *sc)
4255 {
4256 	uint32_t new_link_state, old_link_state;
4257 	struct ifnet *ifp = &sc->arpcom.ac_if;
4258 
4259 	ASSERT_SERIALIZED(&sc->main_serialize);
4260 
4261 	new_link_state = sc->status_block->status_attn_bits &
4262 			 STATUS_ATTN_BITS_LINK_STATE;
4263 	old_link_state = sc->status_block->status_attn_bits_ack &
4264 			 STATUS_ATTN_BITS_LINK_STATE;
4265 
4266 	/* Handle any changes if the link state has changed. */
4267 	if (new_link_state != old_link_state) {	/* XXX redundant? */
4268 		/* Update the status_attn_bits_ack field in the status block. */
4269 		if (new_link_state) {
4270 			REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD,
4271 			       STATUS_ATTN_BITS_LINK_STATE);
4272 			if (bootverbose)
4273 				if_printf(ifp, "Link is now UP.\n");
4274 		} else {
4275 			REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD,
4276 			       STATUS_ATTN_BITS_LINK_STATE);
4277 			if (bootverbose)
4278 				if_printf(ifp, "Link is now DOWN.\n");
4279 		}
4280 
4281 		/*
4282 		 * Assume link is down and allow tick routine to
4283 		 * update the state based on the actual media state.
4284 		 */
4285 		sc->bce_link = 0;
4286 		callout_stop(&sc->bce_tick_callout);
4287 		bce_tick_serialized(sc);
4288 	}
4289 
4290 	/* Acknowledge the link change interrupt. */
4291 	REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE);
4292 }
4293 
4294 /****************************************************************************/
4295 /* Reads the receive consumer value from the status block (skipping over    */
4296 /* chain page pointer if necessary).                                        */
4297 /*                                                                          */
4298 /* Returns:                                                                 */
4299 /*   hw_cons                                                                */
4300 /****************************************************************************/
4301 static __inline uint16_t
bce_get_hw_rx_cons(struct bce_rx_ring * rxr)4302 bce_get_hw_rx_cons(struct bce_rx_ring *rxr)
4303 {
4304 	uint16_t hw_cons = *rxr->rx_hw_cons;
4305 
4306 	if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4307 		hw_cons++;
4308 	return hw_cons;
4309 }
4310 
4311 /****************************************************************************/
4312 /* Handles received frame interrupt events.                                 */
4313 /*                                                                          */
4314 /* Returns:                                                                 */
4315 /*   Nothing.                                                               */
4316 /****************************************************************************/
4317 static void
bce_rx_intr(struct bce_rx_ring * rxr,int count,uint16_t hw_cons)4318 bce_rx_intr(struct bce_rx_ring *rxr, int count, uint16_t hw_cons)
4319 {
4320 	struct ifnet *ifp = &rxr->sc->arpcom.ac_if;
4321 	uint16_t sw_cons, sw_chain_cons, sw_prod, sw_chain_prod;
4322 	uint32_t sw_prod_bseq;
4323 	int cpuid = mycpuid;
4324 
4325 	ASSERT_SERIALIZED(&rxr->rx_serialize);
4326 
4327 	/* Get working copies of the driver's view of the RX indices. */
4328 	sw_cons = rxr->rx_cons;
4329 	sw_prod = rxr->rx_prod;
4330 	sw_prod_bseq = rxr->rx_prod_bseq;
4331 
4332 	/* Scan through the receive chain as long as there is work to do. */
4333 	while (sw_cons != hw_cons) {
4334 		struct pktinfo pi0, *pi = NULL;
4335 		struct bce_rx_buf *rx_buf;
4336 		struct mbuf *m = NULL;
4337 		struct l2_fhdr *l2fhdr = NULL;
4338 		unsigned int len;
4339 		uint32_t status = 0;
4340 
4341 #ifdef IFPOLL_ENABLE
4342 		if (count >= 0 && count-- == 0)
4343 			break;
4344 #endif
4345 
4346 		/*
4347 		 * Convert the producer/consumer indices
4348 		 * to an actual rx_bd index.
4349 		 */
4350 		sw_chain_cons = RX_CHAIN_IDX(rxr, sw_cons);
4351 		sw_chain_prod = RX_CHAIN_IDX(rxr, sw_prod);
4352 		rx_buf = &rxr->rx_bufs[sw_chain_cons];
4353 
4354 		rxr->free_rx_bd++;
4355 
4356 		/* The mbuf is stored with the last rx_bd entry of a packet. */
4357 		if (rx_buf->rx_mbuf_ptr != NULL) {
4358 			if (sw_chain_cons != sw_chain_prod) {
4359 				if_printf(ifp, "RX cons(%d) != prod(%d), "
4360 				    "drop!\n", sw_chain_cons, sw_chain_prod);
4361 				IFNET_STAT_INC(ifp, ierrors, 1);
4362 
4363 				bce_setup_rxdesc_std(rxr, sw_chain_cons,
4364 				    &sw_prod_bseq);
4365 				m = NULL;
4366 				goto bce_rx_int_next_rx;
4367 			}
4368 
4369 			/* Unmap the mbuf from DMA space. */
4370 			bus_dmamap_sync(rxr->rx_mbuf_tag, rx_buf->rx_mbuf_map,
4371 			    BUS_DMASYNC_POSTREAD);
4372 
4373 			/* Save the mbuf from the driver's chain. */
4374 			m = rx_buf->rx_mbuf_ptr;
4375 
4376 			/*
4377 			 * Frames received on the NetXteme II are prepended
4378 			 * with an l2_fhdr structure which provides status
4379 			 * information about the received frame (including
4380 			 * VLAN tags and checksum info).  The frames are also
4381 			 * automatically adjusted to align the IP header
4382 			 * (i.e. two null bytes are inserted before the
4383 			 * Ethernet header).  As a result the data DMA'd by
4384 			 * the controller into the mbuf is as follows:
4385 			 *
4386 			 * +---------+-----+---------------------+-----+
4387 			 * | l2_fhdr | pad | packet data         | FCS |
4388 			 * +---------+-----+---------------------+-----+
4389 			 *
4390 			 * The l2_fhdr needs to be checked and skipped and the
4391 			 * FCS needs to be stripped before sending the packet
4392 			 * up the stack.
4393 			 */
4394 			l2fhdr = mtod(m, struct l2_fhdr *);
4395 
4396 			len = l2fhdr->l2_fhdr_pkt_len;
4397 			status = l2fhdr->l2_fhdr_status;
4398 
4399 			len -= ETHER_CRC_LEN;
4400 
4401 			/* Check the received frame for errors. */
4402 			if (status & (L2_FHDR_ERRORS_BAD_CRC |
4403 				      L2_FHDR_ERRORS_PHY_DECODE |
4404 				      L2_FHDR_ERRORS_ALIGNMENT |
4405 				      L2_FHDR_ERRORS_TOO_SHORT |
4406 				      L2_FHDR_ERRORS_GIANT_FRAME)) {
4407 				IFNET_STAT_INC(ifp, ierrors, 1);
4408 
4409 				/* Reuse the mbuf for a new frame. */
4410 				bce_setup_rxdesc_std(rxr, sw_chain_prod,
4411 				    &sw_prod_bseq);
4412 				m = NULL;
4413 				goto bce_rx_int_next_rx;
4414 			}
4415 
4416 			/*
4417 			 * Get a new mbuf for the rx_bd.   If no new
4418 			 * mbufs are available then reuse the current mbuf,
4419 			 * log an ierror on the interface, and generate
4420 			 * an error in the system log.
4421 			 */
4422 			if (bce_newbuf_std(rxr, &sw_prod, sw_chain_prod,
4423 			    &sw_prod_bseq, 0)) {
4424 				IFNET_STAT_INC(ifp, ierrors, 1);
4425 
4426 				/* Try and reuse the exisitng mbuf. */
4427 				bce_setup_rxdesc_std(rxr, sw_chain_prod,
4428 				    &sw_prod_bseq);
4429 				m = NULL;
4430 				goto bce_rx_int_next_rx;
4431 			}
4432 
4433 			/*
4434 			 * Skip over the l2_fhdr when passing
4435 			 * the data up the stack.
4436 			 */
4437 			m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
4438 
4439 			m->m_pkthdr.len = m->m_len = len;
4440 			m->m_pkthdr.rcvif = ifp;
4441 
4442 			/* Validate the checksum if offload enabled. */
4443 			if (ifp->if_capenable & IFCAP_RXCSUM) {
4444 				/* Check for an IP datagram. */
4445 				if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
4446 					m->m_pkthdr.csum_flags |=
4447 						CSUM_IP_CHECKED;
4448 
4449 					/* Check if the IP checksum is valid. */
4450 					if ((l2fhdr->l2_fhdr_ip_xsum ^
4451 					     0xffff) == 0) {
4452 						m->m_pkthdr.csum_flags |=
4453 							CSUM_IP_VALID;
4454 					}
4455 				}
4456 
4457 				/* Check for a valid TCP/UDP frame. */
4458 				if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
4459 					      L2_FHDR_STATUS_UDP_DATAGRAM)) {
4460 
4461 					/* Check for a good TCP/UDP checksum. */
4462 					if ((status &
4463 					     (L2_FHDR_ERRORS_TCP_XSUM |
4464 					      L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
4465 						m->m_pkthdr.csum_data =
4466 						l2fhdr->l2_fhdr_tcp_udp_xsum;
4467 						m->m_pkthdr.csum_flags |=
4468 							CSUM_DATA_VALID |
4469 							CSUM_PSEUDO_HDR;
4470 					}
4471 				}
4472 			}
4473 			if (ifp->if_capenable & IFCAP_RSS) {
4474 				pi = bce_rss_pktinfo(&pi0, status, l2fhdr);
4475 				if (pi != NULL &&
4476 				    (status & L2_FHDR_STATUS_RSS_HASH)) {
4477 					m_sethash(m,
4478 					    toeplitz_hash(l2fhdr->l2_fhdr_hash));
4479 				}
4480 			}
4481 
4482 			IFNET_STAT_INC(ifp, ipackets, 1);
4483 bce_rx_int_next_rx:
4484 			sw_prod = NEXT_RX_BD(sw_prod);
4485 		}
4486 
4487 		sw_cons = NEXT_RX_BD(sw_cons);
4488 
4489 		/* If we have a packet, pass it up the stack */
4490 		if (m) {
4491 			if (status & L2_FHDR_STATUS_L2_VLAN_TAG) {
4492 				m->m_flags |= M_VLANTAG;
4493 				m->m_pkthdr.ether_vlantag =
4494 					l2fhdr->l2_fhdr_vlan_tag;
4495 			}
4496 			ifp->if_input(ifp, m, pi, cpuid);
4497 #ifdef BCE_RSS_DEBUG
4498 			rxr->rx_pkts++;
4499 #endif
4500 		}
4501 	}
4502 
4503 	rxr->rx_cons = sw_cons;
4504 	rxr->rx_prod = sw_prod;
4505 	rxr->rx_prod_bseq = sw_prod_bseq;
4506 
4507 	REG_WR16(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BDIDX,
4508 	    rxr->rx_prod);
4509 	REG_WR(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BSEQ,
4510 	    rxr->rx_prod_bseq);
4511 }
4512 
4513 /****************************************************************************/
4514 /* Reads the transmit consumer value from the status block (skipping over   */
4515 /* chain page pointer if necessary).                                        */
4516 /*                                                                          */
4517 /* Returns:                                                                 */
4518 /*   hw_cons                                                                */
4519 /****************************************************************************/
4520 static __inline uint16_t
bce_get_hw_tx_cons(struct bce_tx_ring * txr)4521 bce_get_hw_tx_cons(struct bce_tx_ring *txr)
4522 {
4523 	uint16_t hw_cons = *txr->tx_hw_cons;
4524 
4525 	if ((hw_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4526 		hw_cons++;
4527 	return hw_cons;
4528 }
4529 
4530 /****************************************************************************/
4531 /* Handles transmit completion interrupt events.                            */
4532 /*                                                                          */
4533 /* Returns:                                                                 */
4534 /*   Nothing.                                                               */
4535 /****************************************************************************/
4536 static void
bce_tx_intr(struct bce_tx_ring * txr,uint16_t hw_tx_cons)4537 bce_tx_intr(struct bce_tx_ring *txr, uint16_t hw_tx_cons)
4538 {
4539 	struct ifnet *ifp = &txr->sc->arpcom.ac_if;
4540 	uint16_t sw_tx_cons, sw_tx_chain_cons;
4541 
4542 	ASSERT_SERIALIZED(&txr->tx_serialize);
4543 
4544 	/* Get the hardware's view of the TX consumer index. */
4545 	sw_tx_cons = txr->tx_cons;
4546 
4547 	/* Cycle through any completed TX chain page entries. */
4548 	while (sw_tx_cons != hw_tx_cons) {
4549 		struct bce_tx_buf *tx_buf;
4550 
4551 		sw_tx_chain_cons = TX_CHAIN_IDX(txr, sw_tx_cons);
4552 		tx_buf = &txr->tx_bufs[sw_tx_chain_cons];
4553 
4554 		/*
4555 		 * Free the associated mbuf. Remember
4556 		 * that only the last tx_bd of a packet
4557 		 * has an mbuf pointer and DMA map.
4558 		 */
4559 		if (tx_buf->tx_mbuf_ptr != NULL) {
4560 			/* Unmap the mbuf. */
4561 			bus_dmamap_unload(txr->tx_mbuf_tag,
4562 			    tx_buf->tx_mbuf_map);
4563 
4564 			/* Free the mbuf. */
4565 			m_freem(tx_buf->tx_mbuf_ptr);
4566 			tx_buf->tx_mbuf_ptr = NULL;
4567 
4568 			IFNET_STAT_INC(ifp, opackets, 1);
4569 #ifdef BCE_TSS_DEBUG
4570 			txr->tx_pkts++;
4571 #endif
4572 		}
4573 
4574 		txr->used_tx_bd--;
4575 		sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
4576 	}
4577 
4578 	if (txr->used_tx_bd == 0) {
4579 		/* Clear the TX timeout timer. */
4580 		ifsq_watchdog_set_count(&txr->tx_watchdog, 0);
4581 	}
4582 
4583 	/* Clear the tx hardware queue full flag. */
4584 	if (txr->max_tx_bd - txr->used_tx_bd >= BCE_TX_SPARE_SPACE)
4585 		ifsq_clr_oactive(txr->ifsq);
4586 	txr->tx_cons = sw_tx_cons;
4587 }
4588 
4589 /****************************************************************************/
4590 /* Disables interrupt generation.                                           */
4591 /*                                                                          */
4592 /* Returns:                                                                 */
4593 /*   Nothing.                                                               */
4594 /****************************************************************************/
4595 static void
bce_disable_intr(struct bce_softc * sc)4596 bce_disable_intr(struct bce_softc *sc)
4597 {
4598 	int i;
4599 
4600 	for (i = 0; i < sc->rx_ring_cnt; ++i) {
4601 		REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4602 		    (sc->rx_rings[i].idx << 24) |
4603 		    BCE_PCICFG_INT_ACK_CMD_MASK_INT);
4604 	}
4605 	REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
4606 
4607 	callout_stop(&sc->bce_ckmsi_callout);
4608 	sc->bce_msi_maylose = FALSE;
4609 	sc->bce_check_rx_cons = 0;
4610 	sc->bce_check_tx_cons = 0;
4611 	sc->bce_check_status_idx = 0xffff;
4612 
4613 	for (i = 0; i < sc->rx_ring_cnt; ++i)
4614 		lwkt_serialize_handler_disable(sc->bce_msix[i].msix_serialize);
4615 }
4616 
4617 /****************************************************************************/
4618 /* Enables interrupt generation.                                            */
4619 /*                                                                          */
4620 /* Returns:                                                                 */
4621 /*   Nothing.                                                               */
4622 /****************************************************************************/
4623 static void
bce_enable_intr(struct bce_softc * sc)4624 bce_enable_intr(struct bce_softc *sc)
4625 {
4626 	int i;
4627 
4628 	for (i = 0; i < sc->rx_ring_cnt; ++i)
4629 		lwkt_serialize_handler_enable(sc->bce_msix[i].msix_serialize);
4630 
4631 	for (i = 0; i < sc->rx_ring_cnt; ++i) {
4632 		struct bce_rx_ring *rxr = &sc->rx_rings[i];
4633 
4634 		REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, (rxr->idx << 24) |
4635 		       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
4636 		       BCE_PCICFG_INT_ACK_CMD_MASK_INT |
4637 		       rxr->last_status_idx);
4638 		REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, (rxr->idx << 24) |
4639 		       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
4640 		       rxr->last_status_idx);
4641 	}
4642 	REG_WR(sc, BCE_HC_COMMAND, sc->hc_command | BCE_HC_COMMAND_COAL_NOW);
4643 
4644 	if (sc->bce_flags & BCE_CHECK_MSI_FLAG) {
4645 		sc->bce_msi_maylose = FALSE;
4646 		sc->bce_check_rx_cons = 0;
4647 		sc->bce_check_tx_cons = 0;
4648 		sc->bce_check_status_idx = 0xffff;
4649 
4650 		if (bootverbose)
4651 			if_printf(&sc->arpcom.ac_if, "check msi\n");
4652 
4653 		callout_reset_bycpu(&sc->bce_ckmsi_callout, BCE_MSI_CKINTVL,
4654 		    bce_check_msi, sc, sc->bce_msix[0].msix_cpuid);
4655 	}
4656 }
4657 
4658 /****************************************************************************/
4659 /* Reenables interrupt generation during interrupt handling.                */
4660 /*                                                                          */
4661 /* Returns:                                                                 */
4662 /*   Nothing.                                                               */
4663 /****************************************************************************/
4664 static void
bce_reenable_intr(struct bce_rx_ring * rxr)4665 bce_reenable_intr(struct bce_rx_ring *rxr)
4666 {
4667 	REG_WR(rxr->sc, BCE_PCICFG_INT_ACK_CMD, (rxr->idx << 24) |
4668 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | rxr->last_status_idx);
4669 }
4670 
4671 /****************************************************************************/
4672 /* Handles controller initialization.                                       */
4673 /*                                                                          */
4674 /* Returns:                                                                 */
4675 /*   Nothing.                                                               */
4676 /****************************************************************************/
4677 static void
bce_init(void * xsc)4678 bce_init(void *xsc)
4679 {
4680 	struct bce_softc *sc = xsc;
4681 	struct ifnet *ifp = &sc->arpcom.ac_if;
4682 	uint32_t ether_mtu;
4683 	int error, i;
4684 	boolean_t polling;
4685 
4686 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
4687 
4688 	/* Check if the driver is still running and bail out if it is. */
4689 	if (ifp->if_flags & IFF_RUNNING)
4690 		return;
4691 
4692 	bce_stop(sc);
4693 
4694 	error = bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
4695 	if (error) {
4696 		if_printf(ifp, "Controller reset failed!\n");
4697 		goto back;
4698 	}
4699 
4700 	error = bce_chipinit(sc);
4701 	if (error) {
4702 		if_printf(ifp, "Controller initialization failed!\n");
4703 		goto back;
4704 	}
4705 
4706 	error = bce_blockinit(sc);
4707 	if (error) {
4708 		if_printf(ifp, "Block initialization failed!\n");
4709 		goto back;
4710 	}
4711 
4712 	/* Load our MAC address. */
4713 	bcopy(IF_LLADDR(ifp), sc->eaddr, ETHER_ADDR_LEN);
4714 	bce_set_mac_addr(sc);
4715 
4716 	/* Calculate and program the Ethernet MTU size. */
4717 	ether_mtu = ETHER_HDR_LEN + EVL_ENCAPLEN + ifp->if_mtu + ETHER_CRC_LEN;
4718 
4719 	/*
4720 	 * Program the mtu, enabling jumbo frame
4721 	 * support if necessary.  Also set the mbuf
4722 	 * allocation count for RX frames.
4723 	 */
4724 	if (ether_mtu > ETHER_MAX_LEN + EVL_ENCAPLEN) {
4725 #ifdef notyet
4726 		REG_WR(sc, BCE_EMAC_RX_MTU_SIZE,
4727 		       min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) |
4728 		       BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA);
4729 #else
4730 		panic("jumbo buffer is not supported yet");
4731 #endif
4732 	} else {
4733 		REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu);
4734 	}
4735 
4736 	/* Program appropriate promiscuous/multicast filtering. */
4737 	bce_set_rx_mode(sc);
4738 
4739 	/*
4740 	 * Init RX buffer descriptor chain.
4741 	 */
4742 	REG_WR(sc, BCE_RLUP_RSS_CONFIG, 0);
4743 	bce_reg_wr_ind(sc, BCE_RXP_SCRATCH_RSS_TBL_SZ, 0);
4744 
4745 	for (i = 0; i < sc->rx_ring_cnt; ++i)
4746 		bce_init_rx_chain(&sc->rx_rings[i]);	/* XXX return value */
4747 
4748 	if (sc->rx_ring_cnt > 1)
4749 		bce_init_rss(sc);
4750 
4751 	/*
4752 	 * Init TX buffer descriptor chain.
4753 	 */
4754 	REG_WR(sc, BCE_TSCH_TSS_CFG, 0);
4755 
4756 	for (i = 0; i < sc->tx_ring_cnt; ++i)
4757 		bce_init_tx_chain(&sc->tx_rings[i]);
4758 
4759 	if (sc->tx_ring_cnt > 1) {
4760 		REG_WR(sc, BCE_TSCH_TSS_CFG,
4761 		    ((sc->tx_ring_cnt - 1) << 24) | (TX_TSS_CID << 7));
4762 	}
4763 
4764 	polling = FALSE;
4765 #ifdef IFPOLL_ENABLE
4766 	if (ifp->if_flags & IFF_NPOLLING)
4767 		polling = TRUE;
4768 #endif
4769 
4770 	if (polling) {
4771 		/* Disable interrupts if we are polling. */
4772 		bce_disable_intr(sc);
4773 
4774 		/* Change coalesce parameters */
4775 		bce_npoll_coal_change(sc);
4776 	} else {
4777 		/* Enable host interrupts. */
4778 		bce_enable_intr(sc);
4779 	}
4780 	bce_set_timer_cpuid(sc, polling);
4781 
4782 	bce_ifmedia_upd(ifp);
4783 
4784 	ifp->if_flags |= IFF_RUNNING;
4785 	for (i = 0; i < sc->tx_ring_cnt; ++i) {
4786 		ifsq_clr_oactive(sc->tx_rings[i].ifsq);
4787 		ifsq_watchdog_start(&sc->tx_rings[i].tx_watchdog);
4788 	}
4789 
4790 	callout_reset_bycpu(&sc->bce_tick_callout, hz, bce_tick, sc,
4791 	    sc->bce_timer_cpuid);
4792 back:
4793 	if (error)
4794 		bce_stop(sc);
4795 }
4796 
4797 /****************************************************************************/
4798 /* Initialize the controller just enough so that any management firmware    */
4799 /* running on the device will continue to operate corectly.                 */
4800 /*                                                                          */
4801 /* Returns:                                                                 */
4802 /*   Nothing.                                                               */
4803 /****************************************************************************/
4804 static void
bce_mgmt_init(struct bce_softc * sc)4805 bce_mgmt_init(struct bce_softc *sc)
4806 {
4807 	struct ifnet *ifp = &sc->arpcom.ac_if;
4808 
4809 	/* Bail out if management firmware is not running. */
4810 	if (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG))
4811 		return;
4812 
4813 	/* Enable all critical blocks in the MAC. */
4814 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
4815 	    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
4816 		REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
4817 		    BCE_MISC_ENABLE_DEFAULT_XI);
4818 	} else {
4819 		REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT);
4820 	}
4821 	REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
4822 	DELAY(20);
4823 
4824 	bce_ifmedia_upd(ifp);
4825 }
4826 
4827 /****************************************************************************/
4828 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
4829 /* memory visible to the controller.                                        */
4830 /*                                                                          */
4831 /* Returns:                                                                 */
4832 /*   0 for success, positive value for failure.                             */
4833 /****************************************************************************/
4834 static int
bce_encap(struct bce_tx_ring * txr,struct mbuf ** m_head,int * nsegs_used)4835 bce_encap(struct bce_tx_ring *txr, struct mbuf **m_head, int *nsegs_used)
4836 {
4837 	bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
4838 	bus_dmamap_t map, tmp_map;
4839 	struct mbuf *m0 = *m_head;
4840 	struct tx_bd *txbd = NULL;
4841 	uint16_t vlan_tag = 0, flags = 0, mss = 0;
4842 	uint16_t chain_prod, chain_prod_start, prod;
4843 	uint32_t prod_bseq;
4844 	int i, error, maxsegs, nsegs;
4845 
4846 	/* Transfer any checksum offload flags to the bd. */
4847 	if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
4848 		error = bce_tso_setup(txr, m_head, &flags, &mss);
4849 		if (error)
4850 			return ENOBUFS;
4851 		m0 = *m_head;
4852 	} else if (m0->m_pkthdr.csum_flags & BCE_CSUM_FEATURES) {
4853 		if (m0->m_pkthdr.csum_flags & CSUM_IP)
4854 			flags |= TX_BD_FLAGS_IP_CKSUM;
4855 		if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
4856 			flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4857 	}
4858 
4859 	/* Transfer any VLAN tags to the bd. */
4860 	if (m0->m_flags & M_VLANTAG) {
4861 		flags |= TX_BD_FLAGS_VLAN_TAG;
4862 		vlan_tag = m0->m_pkthdr.ether_vlantag;
4863 	}
4864 
4865 	prod = txr->tx_prod;
4866 	chain_prod_start = chain_prod = TX_CHAIN_IDX(txr, prod);
4867 
4868 	/* Map the mbuf into DMAable memory. */
4869 	map = txr->tx_bufs[chain_prod_start].tx_mbuf_map;
4870 
4871 	maxsegs = txr->max_tx_bd - txr->used_tx_bd;
4872 	KASSERT(maxsegs >= BCE_TX_SPARE_SPACE,
4873 		("not enough segments %d", maxsegs));
4874 	if (maxsegs > BCE_MAX_SEGMENTS)
4875 		maxsegs = BCE_MAX_SEGMENTS;
4876 
4877 	/* Map the mbuf into our DMA address space. */
4878 	error = bus_dmamap_load_mbuf_defrag(txr->tx_mbuf_tag, map, m_head,
4879 			segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
4880 	if (error)
4881 		goto back;
4882 	bus_dmamap_sync(txr->tx_mbuf_tag, map, BUS_DMASYNC_PREWRITE);
4883 
4884 	*nsegs_used += nsegs;
4885 
4886 	/* Reset m0 */
4887 	m0 = *m_head;
4888 
4889 	/* prod points to an empty tx_bd at this point. */
4890 	prod_bseq  = txr->tx_prod_bseq;
4891 
4892 	/*
4893 	 * Cycle through each mbuf segment that makes up
4894 	 * the outgoing frame, gathering the mapping info
4895 	 * for that segment and creating a tx_bd to for
4896 	 * the mbuf.
4897 	 */
4898 	for (i = 0; i < nsegs; i++) {
4899 		chain_prod = TX_CHAIN_IDX(txr, prod);
4900 		txbd =
4901 		&txr->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
4902 
4903 		txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr));
4904 		txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr));
4905 		txbd->tx_bd_mss_nbytes = htole32(mss << 16) |
4906 		    htole16(segs[i].ds_len);
4907 		txbd->tx_bd_vlan_tag = htole16(vlan_tag);
4908 		txbd->tx_bd_flags = htole16(flags);
4909 
4910 		prod_bseq += segs[i].ds_len;
4911 		if (i == 0)
4912 			txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START);
4913 		prod = NEXT_TX_BD(prod);
4914 	}
4915 
4916 	/* Set the END flag on the last TX buffer descriptor. */
4917 	txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END);
4918 
4919 	/*
4920 	 * Ensure that the mbuf pointer for this transmission
4921 	 * is placed at the array index of the last
4922 	 * descriptor in this chain.  This is done
4923 	 * because a single map is used for all
4924 	 * segments of the mbuf and we don't want to
4925 	 * unload the map before all of the segments
4926 	 * have been freed.
4927 	 */
4928 	txr->tx_bufs[chain_prod].tx_mbuf_ptr = m0;
4929 
4930 	tmp_map = txr->tx_bufs[chain_prod].tx_mbuf_map;
4931 	txr->tx_bufs[chain_prod].tx_mbuf_map = map;
4932 	txr->tx_bufs[chain_prod_start].tx_mbuf_map = tmp_map;
4933 
4934 	txr->used_tx_bd += nsegs;
4935 
4936 	/* prod points to the next free tx_bd at this point. */
4937 	txr->tx_prod = prod;
4938 	txr->tx_prod_bseq = prod_bseq;
4939 back:
4940 	if (error) {
4941 		m_freem(*m_head);
4942 		*m_head = NULL;
4943 	}
4944 	return error;
4945 }
4946 
4947 static void
bce_xmit(struct bce_tx_ring * txr)4948 bce_xmit(struct bce_tx_ring *txr)
4949 {
4950 	/* Start the transmit. */
4951 	REG_WR16(txr->sc, MB_GET_CID_ADDR(txr->tx_cid) + BCE_L2CTX_TX_HOST_BIDX,
4952 	    txr->tx_prod);
4953 	REG_WR(txr->sc, MB_GET_CID_ADDR(txr->tx_cid) + BCE_L2CTX_TX_HOST_BSEQ,
4954 	    txr->tx_prod_bseq);
4955 }
4956 
4957 /****************************************************************************/
4958 /* Main transmit routine when called from another routine with a lock.      */
4959 /*                                                                          */
4960 /* Returns:                                                                 */
4961 /*   Nothing.                                                               */
4962 /****************************************************************************/
4963 static void
bce_start(struct ifnet * ifp,struct ifaltq_subque * ifsq)4964 bce_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
4965 {
4966 	struct bce_softc *sc = ifp->if_softc;
4967 	struct bce_tx_ring *txr = ifsq_get_priv(ifsq);
4968 	int count = 0;
4969 
4970 	KKASSERT(txr->ifsq == ifsq);
4971 	ASSERT_SERIALIZED(&txr->tx_serialize);
4972 
4973 	/* If there's no link or the transmit queue is empty then just exit. */
4974 	if (!sc->bce_link) {
4975 		ifsq_purge(ifsq);
4976 		return;
4977 	}
4978 
4979 	if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq))
4980 		return;
4981 
4982 	for (;;) {
4983 		struct mbuf *m_head;
4984 
4985 		/*
4986 		 * We keep BCE_TX_SPARE_SPACE entries, so bce_encap() is
4987 		 * unlikely to fail.
4988 		 */
4989 		if (txr->max_tx_bd - txr->used_tx_bd < BCE_TX_SPARE_SPACE) {
4990 			ifsq_set_oactive(ifsq);
4991 			break;
4992 		}
4993 
4994 		/* Check for any frames to send. */
4995 		m_head = ifsq_dequeue(ifsq);
4996 		if (m_head == NULL)
4997 			break;
4998 
4999 		/*
5000 		 * Pack the data into the transmit ring. If we
5001 		 * don't have room, place the mbuf back at the
5002 		 * head of the queue and set the OACTIVE flag
5003 		 * to wait for the NIC to drain the chain.
5004 		 */
5005 		if (bce_encap(txr, &m_head, &count)) {
5006 			IFNET_STAT_INC(ifp, oerrors, 1);
5007 			if (txr->used_tx_bd == 0) {
5008 				continue;
5009 			} else {
5010 				ifsq_set_oactive(ifsq);
5011 				break;
5012 			}
5013 		}
5014 
5015 		if (count >= txr->tx_wreg) {
5016 			bce_xmit(txr);
5017 			count = 0;
5018 		}
5019 
5020 		/* Send a copy of the frame to any BPF listeners. */
5021 		ETHER_BPF_MTAP(ifp, m_head);
5022 
5023 		/* Set the tx timeout. */
5024 		ifsq_watchdog_set_count(&txr->tx_watchdog, BCE_TX_TIMEOUT);
5025 	}
5026 	if (count > 0)
5027 		bce_xmit(txr);
5028 }
5029 
5030 /****************************************************************************/
5031 /* Handles any IOCTL calls from the operating system.                       */
5032 /*                                                                          */
5033 /* Returns:                                                                 */
5034 /*   0 for success, positive value for failure.                             */
5035 /****************************************************************************/
5036 static int
bce_ioctl(struct ifnet * ifp,u_long command,caddr_t data,struct ucred * cr)5037 bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
5038 {
5039 	struct bce_softc *sc = ifp->if_softc;
5040 	struct ifreq *ifr = (struct ifreq *)data;
5041 	struct mii_data *mii;
5042 	int mask, error = 0;
5043 
5044 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
5045 
5046 	switch(command) {
5047 	case SIOCSIFMTU:
5048 		/* Check that the MTU setting is supported. */
5049 		if (ifr->ifr_mtu < BCE_MIN_MTU ||
5050 #ifdef notyet
5051 		    ifr->ifr_mtu > BCE_MAX_JUMBO_MTU
5052 #else
5053 		    ifr->ifr_mtu > ETHERMTU
5054 #endif
5055 		   ) {
5056 			error = EINVAL;
5057 			break;
5058 		}
5059 
5060 		ifp->if_mtu = ifr->ifr_mtu;
5061 		ifp->if_flags &= ~IFF_RUNNING;	/* Force reinitialize */
5062 		bce_init(sc);
5063 		break;
5064 
5065 	case SIOCSIFFLAGS:
5066 		if (ifp->if_flags & IFF_UP) {
5067 			if (ifp->if_flags & IFF_RUNNING) {
5068 				mask = ifp->if_flags ^ sc->bce_if_flags;
5069 
5070 				if (mask & (IFF_PROMISC | IFF_ALLMULTI))
5071 					bce_set_rx_mode(sc);
5072 			} else {
5073 				bce_init(sc);
5074 			}
5075 		} else if (ifp->if_flags & IFF_RUNNING) {
5076 			bce_stop(sc);
5077 
5078 			/* If MFW is running, restart the controller a bit. */
5079 			if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
5080 				bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
5081 				bce_chipinit(sc);
5082 				bce_mgmt_init(sc);
5083 			}
5084 		}
5085 		sc->bce_if_flags = ifp->if_flags;
5086 		break;
5087 
5088 	case SIOCADDMULTI:
5089 	case SIOCDELMULTI:
5090 		if (ifp->if_flags & IFF_RUNNING)
5091 			bce_set_rx_mode(sc);
5092 		break;
5093 
5094 	case SIOCSIFMEDIA:
5095 	case SIOCGIFMEDIA:
5096 		mii = device_get_softc(sc->bce_miibus);
5097 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
5098 		break;
5099 
5100 	case SIOCSIFCAP:
5101 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
5102 		if (mask & IFCAP_HWCSUM) {
5103 			ifp->if_capenable ^= (mask & IFCAP_HWCSUM);
5104 			if (ifp->if_capenable & IFCAP_TXCSUM)
5105 				ifp->if_hwassist |= BCE_CSUM_FEATURES;
5106 			else
5107 				ifp->if_hwassist &= ~BCE_CSUM_FEATURES;
5108 		}
5109 		if (mask & IFCAP_TSO) {
5110 			ifp->if_capenable ^= IFCAP_TSO;
5111 			if (ifp->if_capenable & IFCAP_TSO)
5112 				ifp->if_hwassist |= CSUM_TSO;
5113 			else
5114 				ifp->if_hwassist &= ~CSUM_TSO;
5115 		}
5116 		if (mask & IFCAP_RSS)
5117 			ifp->if_capenable ^= IFCAP_RSS;
5118 		break;
5119 
5120 	default:
5121 		error = ether_ioctl(ifp, command, data);
5122 		break;
5123 	}
5124 	return error;
5125 }
5126 
5127 /****************************************************************************/
5128 /* Transmit timeout handler.                                                */
5129 /*                                                                          */
5130 /* Returns:                                                                 */
5131 /*   Nothing.                                                               */
5132 /****************************************************************************/
5133 static void
bce_watchdog(struct ifaltq_subque * ifsq)5134 bce_watchdog(struct ifaltq_subque *ifsq)
5135 {
5136 	struct ifnet *ifp = ifsq_get_ifp(ifsq);
5137 	struct bce_softc *sc = ifp->if_softc;
5138 	int i;
5139 
5140 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
5141 
5142 	/*
5143 	 * If we are in this routine because of pause frames, then
5144 	 * don't reset the hardware.
5145 	 */
5146 	if (REG_RD(sc, BCE_EMAC_TX_STATUS) & BCE_EMAC_TX_STATUS_XOFFED)
5147 		return;
5148 
5149 	if_printf(ifp, "Watchdog timeout occurred, resetting!\n");
5150 
5151 	ifp->if_flags &= ~IFF_RUNNING;	/* Force reinitialize */
5152 	bce_init(sc);
5153 
5154 	IFNET_STAT_INC(ifp, oerrors, 1);
5155 
5156 	for (i = 0; i < sc->tx_ring_cnt; ++i)
5157 		ifsq_devstart_sched(sc->tx_rings[i].ifsq);
5158 }
5159 
5160 #ifdef IFPOLL_ENABLE
5161 
5162 static void
bce_npoll_status(struct ifnet * ifp)5163 bce_npoll_status(struct ifnet *ifp)
5164 {
5165 	struct bce_softc *sc = ifp->if_softc;
5166 	struct status_block *sblk = sc->status_block;
5167 	uint32_t status_attn_bits;
5168 
5169 	ASSERT_SERIALIZED(&sc->main_serialize);
5170 
5171 	status_attn_bits = sblk->status_attn_bits;
5172 
5173 	/* Was it a link change interrupt? */
5174 	if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5175 	    (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) {
5176 		bce_phy_intr(sc);
5177 
5178 		/*
5179 		 * Clear any transient status updates during link state change.
5180 		 */
5181 		REG_WR(sc, BCE_HC_COMMAND,
5182 		    sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT);
5183 		REG_RD(sc, BCE_HC_COMMAND);
5184 	}
5185 
5186 	/*
5187 	 * If any other attention is asserted then the chip is toast.
5188 	 */
5189 	if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5190 	     (sblk->status_attn_bits_ack & ~STATUS_ATTN_BITS_LINK_STATE)) {
5191 		if_printf(ifp, "Fatal attention detected: 0x%08X\n",
5192 		    sblk->status_attn_bits);
5193 		bce_serialize_skipmain(sc);
5194 		bce_init(sc);
5195 		bce_deserialize_skipmain(sc);
5196 	}
5197 }
5198 
5199 static void
bce_npoll_rx(struct ifnet * ifp,void * arg,int count)5200 bce_npoll_rx(struct ifnet *ifp, void *arg, int count)
5201 {
5202 	struct bce_rx_ring *rxr = arg;
5203 	uint16_t hw_rx_cons;
5204 
5205 	ASSERT_SERIALIZED(&rxr->rx_serialize);
5206 
5207 	/*
5208 	 * Save the status block index value for use when enabling
5209 	 * the interrupt.
5210 	 */
5211 	rxr->last_status_idx = *rxr->hw_status_idx;
5212 
5213 	/* Make sure status index is extracted before RX/TX cons */
5214 	cpu_lfence();
5215 
5216 	hw_rx_cons = bce_get_hw_rx_cons(rxr);
5217 
5218 	/* Check for any completed RX frames. */
5219 	if (hw_rx_cons != rxr->rx_cons)
5220 		bce_rx_intr(rxr, count, hw_rx_cons);
5221 }
5222 
5223 static void
bce_npoll_rx_pack(struct ifnet * ifp,void * arg,int count)5224 bce_npoll_rx_pack(struct ifnet *ifp, void *arg, int count)
5225 {
5226 	struct bce_rx_ring *rxr = arg;
5227 
5228 	KASSERT(rxr->idx == 0, ("not the first RX ring, but %d", rxr->idx));
5229 	bce_npoll_rx(ifp, rxr, count);
5230 
5231 	KASSERT(rxr->sc->rx_ring_cnt != rxr->sc->rx_ring_cnt2,
5232 	    ("RX ring count %d, count2 %d", rxr->sc->rx_ring_cnt,
5233 	     rxr->sc->rx_ring_cnt2));
5234 
5235 	/* Last ring carries packets whose masked hash is 0 */
5236 	rxr = &rxr->sc->rx_rings[rxr->sc->rx_ring_cnt - 1];
5237 
5238 	lwkt_serialize_enter(&rxr->rx_serialize);
5239 	bce_npoll_rx(ifp, rxr, count);
5240 	lwkt_serialize_exit(&rxr->rx_serialize);
5241 }
5242 
5243 static void
bce_npoll_tx(struct ifnet * ifp,void * arg,int count __unused)5244 bce_npoll_tx(struct ifnet *ifp, void *arg, int count __unused)
5245 {
5246 	struct bce_tx_ring *txr = arg;
5247 	uint16_t hw_tx_cons;
5248 
5249 	ASSERT_SERIALIZED(&txr->tx_serialize);
5250 
5251 	hw_tx_cons = bce_get_hw_tx_cons(txr);
5252 
5253 	/* Check for any completed TX frames. */
5254 	if (hw_tx_cons != txr->tx_cons) {
5255 		bce_tx_intr(txr, hw_tx_cons);
5256 		if (!ifsq_is_empty(txr->ifsq))
5257 			ifsq_devstart(txr->ifsq);
5258 	}
5259 }
5260 
5261 static void
bce_npoll(struct ifnet * ifp,struct ifpoll_info * info)5262 bce_npoll(struct ifnet *ifp, struct ifpoll_info *info)
5263 {
5264 	struct bce_softc *sc = ifp->if_softc;
5265 	int i;
5266 
5267 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
5268 
5269 	if (info != NULL) {
5270 		int cpu;
5271 
5272 		info->ifpi_status.status_func = bce_npoll_status;
5273 		info->ifpi_status.serializer = &sc->main_serialize;
5274 
5275 		for (i = 0; i < sc->tx_ring_cnt; ++i) {
5276 			struct bce_tx_ring *txr = &sc->tx_rings[i];
5277 
5278 			cpu = if_ringmap_cpumap(sc->tx_rmap, i);
5279 			KKASSERT(cpu < netisr_ncpus);
5280 			info->ifpi_tx[cpu].poll_func = bce_npoll_tx;
5281 			info->ifpi_tx[cpu].arg = txr;
5282 			info->ifpi_tx[cpu].serializer = &txr->tx_serialize;
5283 			ifsq_set_cpuid(txr->ifsq, cpu);
5284 		}
5285 
5286 		for (i = 0; i < sc->rx_ring_cnt2; ++i) {
5287 			struct bce_rx_ring *rxr = &sc->rx_rings[i];
5288 
5289 			cpu = if_ringmap_cpumap(sc->rx_rmap, i);
5290 			KKASSERT(cpu < netisr_ncpus);
5291 			if (i == 0 && sc->rx_ring_cnt2 != sc->rx_ring_cnt) {
5292 				/*
5293 				 * If RSS is enabled, the packets whose
5294 				 * masked hash are 0 are queued to the
5295 				 * last RX ring; piggyback the last RX
5296 				 * ring's processing in the first RX
5297 				 * polling handler. (see also: comment
5298 				 * in bce_setup_ring_cnt())
5299 				 */
5300 				if (bootverbose) {
5301 					if_printf(ifp, "npoll pack last "
5302 					    "RX ring on cpu%d\n", cpu);
5303 				}
5304 				info->ifpi_rx[cpu].poll_func =
5305 				    bce_npoll_rx_pack;
5306 			} else {
5307 				info->ifpi_rx[cpu].poll_func = bce_npoll_rx;
5308 			}
5309 			info->ifpi_rx[cpu].arg = rxr;
5310 			info->ifpi_rx[cpu].serializer = &rxr->rx_serialize;
5311 		}
5312 
5313 		if (ifp->if_flags & IFF_RUNNING) {
5314 			bce_set_timer_cpuid(sc, TRUE);
5315 			bce_disable_intr(sc);
5316 			bce_npoll_coal_change(sc);
5317 		}
5318 	} else {
5319 		for (i = 0; i < sc->tx_ring_cnt; ++i) {
5320 			ifsq_set_cpuid(sc->tx_rings[i].ifsq,
5321 			    sc->bce_msix[i].msix_cpuid);
5322 		}
5323 
5324 		if (ifp->if_flags & IFF_RUNNING) {
5325 			bce_set_timer_cpuid(sc, FALSE);
5326 			bce_enable_intr(sc);
5327 
5328 			sc->bce_coalchg_mask |= BCE_COALMASK_TX_BDS_INT |
5329 			    BCE_COALMASK_RX_BDS_INT;
5330 			bce_coal_change(sc);
5331 		}
5332 	}
5333 }
5334 
5335 #endif	/* IFPOLL_ENABLE */
5336 
5337 /*
5338  * Interrupt handler.
5339  */
5340 /****************************************************************************/
5341 /* Main interrupt entry point.  Verifies that the controller generated the  */
5342 /* interrupt and then calls a separate routine for handle the various       */
5343 /* interrupt causes (PHY, TX, RX).                                          */
5344 /*                                                                          */
5345 /* Returns:                                                                 */
5346 /*   0 for success, positive value for failure.                             */
5347 /****************************************************************************/
5348 static void
bce_intr(struct bce_softc * sc)5349 bce_intr(struct bce_softc *sc)
5350 {
5351 	struct ifnet *ifp = &sc->arpcom.ac_if;
5352 	struct status_block *sblk;
5353 	uint16_t hw_rx_cons, hw_tx_cons;
5354 	uint32_t status_attn_bits;
5355 	struct bce_tx_ring *txr = &sc->tx_rings[0];
5356 	struct bce_rx_ring *rxr = &sc->rx_rings[0];
5357 
5358 	ASSERT_SERIALIZED(&sc->main_serialize);
5359 
5360 	sblk = sc->status_block;
5361 
5362 	/*
5363 	 * Save the status block index value for use during
5364 	 * the next interrupt.
5365 	 */
5366 	rxr->last_status_idx = *rxr->hw_status_idx;
5367 
5368 	/* Make sure status index is extracted before RX/TX cons */
5369 	cpu_lfence();
5370 
5371 	/* Check if the hardware has finished any work. */
5372 	hw_rx_cons = bce_get_hw_rx_cons(rxr);
5373 	hw_tx_cons = bce_get_hw_tx_cons(txr);
5374 
5375 	status_attn_bits = sblk->status_attn_bits;
5376 
5377 	/* Was it a link change interrupt? */
5378 	if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5379 	    (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) {
5380 		bce_phy_intr(sc);
5381 
5382 		/*
5383 		 * Clear any transient status updates during link state
5384 		 * change.
5385 		 */
5386 		REG_WR(sc, BCE_HC_COMMAND,
5387 		    sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT);
5388 		REG_RD(sc, BCE_HC_COMMAND);
5389 	}
5390 
5391 	/*
5392 	 * If any other attention is asserted then
5393 	 * the chip is toast.
5394 	 */
5395 	if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5396 	    (sblk->status_attn_bits_ack & ~STATUS_ATTN_BITS_LINK_STATE)) {
5397 		if_printf(ifp, "Fatal attention detected: 0x%08X\n",
5398 			  sblk->status_attn_bits);
5399 		bce_serialize_skipmain(sc);
5400 		bce_init(sc);
5401 		bce_deserialize_skipmain(sc);
5402 		return;
5403 	}
5404 
5405 	/* Check for any completed RX frames. */
5406 	lwkt_serialize_enter(&rxr->rx_serialize);
5407 	if (hw_rx_cons != rxr->rx_cons)
5408 		bce_rx_intr(rxr, -1, hw_rx_cons);
5409 	lwkt_serialize_exit(&rxr->rx_serialize);
5410 
5411 	/* Check for any completed TX frames. */
5412 	lwkt_serialize_enter(&txr->tx_serialize);
5413 	if (hw_tx_cons != txr->tx_cons) {
5414 		bce_tx_intr(txr, hw_tx_cons);
5415 		if (!ifsq_is_empty(txr->ifsq))
5416 			ifsq_devstart(txr->ifsq);
5417 	}
5418 	lwkt_serialize_exit(&txr->tx_serialize);
5419 }
5420 
5421 static void
bce_intr_legacy(void * xsc)5422 bce_intr_legacy(void *xsc)
5423 {
5424 	struct bce_softc *sc = xsc;
5425 	struct bce_rx_ring *rxr = &sc->rx_rings[0];
5426 	struct status_block *sblk;
5427 
5428 	sblk = sc->status_block;
5429 
5430 	/*
5431 	 * If the hardware status block index matches the last value
5432 	 * read by the driver and we haven't asserted our interrupt
5433 	 * then there's nothing to do.
5434 	 */
5435 	if (sblk->status_idx == rxr->last_status_idx &&
5436 	    (REG_RD(sc, BCE_PCICFG_MISC_STATUS) &
5437 	     BCE_PCICFG_MISC_STATUS_INTA_VALUE))
5438 		return;
5439 
5440 	/* Ack the interrupt and stop others from occuring. */
5441 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5442 	       BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
5443 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5444 
5445 	/*
5446 	 * Read back to deassert IRQ immediately to avoid too
5447 	 * many spurious interrupts.
5448 	 */
5449 	REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
5450 
5451 	bce_intr(sc);
5452 
5453 	/* Re-enable interrupts. */
5454 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5455 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
5456 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT | rxr->last_status_idx);
5457 	bce_reenable_intr(rxr);
5458 }
5459 
5460 static void
bce_intr_msi(void * xsc)5461 bce_intr_msi(void *xsc)
5462 {
5463 	struct bce_softc *sc = xsc;
5464 
5465 	/* Ack the interrupt and stop others from occuring. */
5466 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5467 	       BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
5468 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5469 
5470 	bce_intr(sc);
5471 
5472 	/* Re-enable interrupts */
5473 	bce_reenable_intr(&sc->rx_rings[0]);
5474 }
5475 
5476 static void
bce_intr_msi_oneshot(void * xsc)5477 bce_intr_msi_oneshot(void *xsc)
5478 {
5479 	struct bce_softc *sc = xsc;
5480 
5481 	bce_intr(sc);
5482 
5483 	/* Re-enable interrupts */
5484 	bce_reenable_intr(&sc->rx_rings[0]);
5485 }
5486 
5487 static void
bce_intr_msix_rxtx(void * xrxr)5488 bce_intr_msix_rxtx(void *xrxr)
5489 {
5490 	struct bce_rx_ring *rxr = xrxr;
5491 	struct bce_tx_ring *txr;
5492 	uint16_t hw_rx_cons, hw_tx_cons;
5493 
5494 	ASSERT_SERIALIZED(&rxr->rx_serialize);
5495 
5496 	KKASSERT(rxr->idx < rxr->sc->tx_ring_cnt);
5497 	txr = &rxr->sc->tx_rings[rxr->idx];
5498 
5499 	/*
5500 	 * Save the status block index value for use during
5501 	 * the next interrupt.
5502 	 */
5503 	rxr->last_status_idx = *rxr->hw_status_idx;
5504 
5505 	/* Make sure status index is extracted before RX/TX cons */
5506 	cpu_lfence();
5507 
5508 	/* Check if the hardware has finished any work. */
5509 	hw_rx_cons = bce_get_hw_rx_cons(rxr);
5510 	if (hw_rx_cons != rxr->rx_cons)
5511 		bce_rx_intr(rxr, -1, hw_rx_cons);
5512 
5513 	/* Check for any completed TX frames. */
5514 	hw_tx_cons = bce_get_hw_tx_cons(txr);
5515 	lwkt_serialize_enter(&txr->tx_serialize);
5516 	if (hw_tx_cons != txr->tx_cons) {
5517 		bce_tx_intr(txr, hw_tx_cons);
5518 		if (!ifsq_is_empty(txr->ifsq))
5519 			ifsq_devstart(txr->ifsq);
5520 	}
5521 	lwkt_serialize_exit(&txr->tx_serialize);
5522 
5523 	/* Re-enable interrupts */
5524 	bce_reenable_intr(rxr);
5525 }
5526 
5527 static void
bce_intr_msix_rx(void * xrxr)5528 bce_intr_msix_rx(void *xrxr)
5529 {
5530 	struct bce_rx_ring *rxr = xrxr;
5531 	uint16_t hw_rx_cons;
5532 
5533 	ASSERT_SERIALIZED(&rxr->rx_serialize);
5534 
5535 	/*
5536 	 * Save the status block index value for use during
5537 	 * the next interrupt.
5538 	 */
5539 	rxr->last_status_idx = *rxr->hw_status_idx;
5540 
5541 	/* Make sure status index is extracted before RX cons */
5542 	cpu_lfence();
5543 
5544 	/* Check if the hardware has finished any work. */
5545 	hw_rx_cons = bce_get_hw_rx_cons(rxr);
5546 	if (hw_rx_cons != rxr->rx_cons)
5547 		bce_rx_intr(rxr, -1, hw_rx_cons);
5548 
5549 	/* Re-enable interrupts */
5550 	bce_reenable_intr(rxr);
5551 }
5552 
5553 /****************************************************************************/
5554 /* Programs the various packet receive modes (broadcast and multicast).     */
5555 /*                                                                          */
5556 /* Returns:                                                                 */
5557 /*   Nothing.                                                               */
5558 /****************************************************************************/
5559 static void
bce_set_rx_mode(struct bce_softc * sc)5560 bce_set_rx_mode(struct bce_softc *sc)
5561 {
5562 	struct ifnet *ifp = &sc->arpcom.ac_if;
5563 	struct ifmultiaddr *ifma;
5564 	uint32_t hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
5565 	uint32_t rx_mode, sort_mode;
5566 	int h, i;
5567 
5568 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
5569 
5570 	/* Initialize receive mode default settings. */
5571 	rx_mode = sc->rx_mode &
5572 		  ~(BCE_EMAC_RX_MODE_PROMISCUOUS |
5573 		    BCE_EMAC_RX_MODE_KEEP_VLAN_TAG);
5574 	sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN;
5575 
5576 	/*
5577 	 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
5578 	 * be enbled.
5579 	 */
5580 	if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) &&
5581 	    !(sc->bce_flags & BCE_MFW_ENABLE_FLAG))
5582 		rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG;
5583 
5584 	/*
5585 	 * Check for promiscuous, all multicast, or selected
5586 	 * multicast address filtering.
5587 	 */
5588 	if (ifp->if_flags & IFF_PROMISC) {
5589 		/* Enable promiscuous mode. */
5590 		rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS;
5591 		sort_mode |= BCE_RPM_SORT_USER0_PROM_EN;
5592 	} else if (ifp->if_flags & IFF_ALLMULTI) {
5593 		/* Enable all multicast addresses. */
5594 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
5595 			REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4),
5596 			       0xffffffff);
5597 		}
5598 		sort_mode |= BCE_RPM_SORT_USER0_MC_EN;
5599 	} else {
5600 		/* Accept one or more multicast(s). */
5601 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
5602 			if (ifma->ifma_addr->sa_family != AF_LINK)
5603 				continue;
5604 			h = ether_crc32_le(
5605 			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
5606 			    ETHER_ADDR_LEN) & 0xFF;
5607 			hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
5608 		}
5609 
5610 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
5611 			REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4),
5612 			       hashes[i]);
5613 		}
5614 		sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN;
5615 	}
5616 
5617 	/* Only make changes if the recive mode has actually changed. */
5618 	if (rx_mode != sc->rx_mode) {
5619 		sc->rx_mode = rx_mode;
5620 		REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode);
5621 	}
5622 
5623 	/* Disable and clear the exisitng sort before enabling a new sort. */
5624 	REG_WR(sc, BCE_RPM_SORT_USER0, 0x0);
5625 	REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode);
5626 	REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA);
5627 }
5628 
5629 /****************************************************************************/
5630 /* Called periodically to updates statistics from the controllers           */
5631 /* statistics block.                                                        */
5632 /*                                                                          */
5633 /* Returns:                                                                 */
5634 /*   Nothing.                                                               */
5635 /****************************************************************************/
5636 static void
bce_stats_update(struct bce_softc * sc)5637 bce_stats_update(struct bce_softc *sc)
5638 {
5639 	struct ifnet *ifp = &sc->arpcom.ac_if;
5640 	struct statistics_block *stats = sc->stats_block;
5641 
5642 	ASSERT_SERIALIZED(&sc->main_serialize);
5643 
5644 	/*
5645 	 * Certain controllers don't report carrier sense errors correctly.
5646 	 * See errata E11_5708CA0_1165.
5647 	 */
5648 	if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
5649 	    !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0)) {
5650 		IFNET_STAT_INC(ifp, oerrors,
5651 			(u_long)stats->stat_Dot3StatsCarrierSenseErrors);
5652 	}
5653 
5654 	/*
5655 	 * Update the sysctl statistics from the hardware statistics.
5656 	 */
5657 	sc->stat_IfHCInOctets =
5658 		((uint64_t)stats->stat_IfHCInOctets_hi << 32) +
5659 		 (uint64_t)stats->stat_IfHCInOctets_lo;
5660 
5661 	sc->stat_IfHCInBadOctets =
5662 		((uint64_t)stats->stat_IfHCInBadOctets_hi << 32) +
5663 		 (uint64_t)stats->stat_IfHCInBadOctets_lo;
5664 
5665 	sc->stat_IfHCOutOctets =
5666 		((uint64_t)stats->stat_IfHCOutOctets_hi << 32) +
5667 		 (uint64_t)stats->stat_IfHCOutOctets_lo;
5668 
5669 	sc->stat_IfHCOutBadOctets =
5670 		((uint64_t)stats->stat_IfHCOutBadOctets_hi << 32) +
5671 		 (uint64_t)stats->stat_IfHCOutBadOctets_lo;
5672 
5673 	sc->stat_IfHCInUcastPkts =
5674 		((uint64_t)stats->stat_IfHCInUcastPkts_hi << 32) +
5675 		 (uint64_t)stats->stat_IfHCInUcastPkts_lo;
5676 
5677 	sc->stat_IfHCInMulticastPkts =
5678 		((uint64_t)stats->stat_IfHCInMulticastPkts_hi << 32) +
5679 		 (uint64_t)stats->stat_IfHCInMulticastPkts_lo;
5680 
5681 	sc->stat_IfHCInBroadcastPkts =
5682 		((uint64_t)stats->stat_IfHCInBroadcastPkts_hi << 32) +
5683 		 (uint64_t)stats->stat_IfHCInBroadcastPkts_lo;
5684 
5685 	sc->stat_IfHCOutUcastPkts =
5686 		((uint64_t)stats->stat_IfHCOutUcastPkts_hi << 32) +
5687 		 (uint64_t)stats->stat_IfHCOutUcastPkts_lo;
5688 
5689 	sc->stat_IfHCOutMulticastPkts =
5690 		((uint64_t)stats->stat_IfHCOutMulticastPkts_hi << 32) +
5691 		 (uint64_t)stats->stat_IfHCOutMulticastPkts_lo;
5692 
5693 	sc->stat_IfHCOutBroadcastPkts =
5694 		((uint64_t)stats->stat_IfHCOutBroadcastPkts_hi << 32) +
5695 		 (uint64_t)stats->stat_IfHCOutBroadcastPkts_lo;
5696 
5697 	sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
5698 		stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
5699 
5700 	sc->stat_Dot3StatsCarrierSenseErrors =
5701 		stats->stat_Dot3StatsCarrierSenseErrors;
5702 
5703 	sc->stat_Dot3StatsFCSErrors =
5704 		stats->stat_Dot3StatsFCSErrors;
5705 
5706 	sc->stat_Dot3StatsAlignmentErrors =
5707 		stats->stat_Dot3StatsAlignmentErrors;
5708 
5709 	sc->stat_Dot3StatsSingleCollisionFrames =
5710 		stats->stat_Dot3StatsSingleCollisionFrames;
5711 
5712 	sc->stat_Dot3StatsMultipleCollisionFrames =
5713 		stats->stat_Dot3StatsMultipleCollisionFrames;
5714 
5715 	sc->stat_Dot3StatsDeferredTransmissions =
5716 		stats->stat_Dot3StatsDeferredTransmissions;
5717 
5718 	sc->stat_Dot3StatsExcessiveCollisions =
5719 		stats->stat_Dot3StatsExcessiveCollisions;
5720 
5721 	sc->stat_Dot3StatsLateCollisions =
5722 		stats->stat_Dot3StatsLateCollisions;
5723 
5724 	sc->stat_EtherStatsCollisions =
5725 		stats->stat_EtherStatsCollisions;
5726 
5727 	sc->stat_EtherStatsFragments =
5728 		stats->stat_EtherStatsFragments;
5729 
5730 	sc->stat_EtherStatsJabbers =
5731 		stats->stat_EtherStatsJabbers;
5732 
5733 	sc->stat_EtherStatsUndersizePkts =
5734 		stats->stat_EtherStatsUndersizePkts;
5735 
5736 	sc->stat_EtherStatsOverrsizePkts =
5737 		stats->stat_EtherStatsOverrsizePkts;
5738 
5739 	sc->stat_EtherStatsPktsRx64Octets =
5740 		stats->stat_EtherStatsPktsRx64Octets;
5741 
5742 	sc->stat_EtherStatsPktsRx65Octetsto127Octets =
5743 		stats->stat_EtherStatsPktsRx65Octetsto127Octets;
5744 
5745 	sc->stat_EtherStatsPktsRx128Octetsto255Octets =
5746 		stats->stat_EtherStatsPktsRx128Octetsto255Octets;
5747 
5748 	sc->stat_EtherStatsPktsRx256Octetsto511Octets =
5749 		stats->stat_EtherStatsPktsRx256Octetsto511Octets;
5750 
5751 	sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
5752 		stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
5753 
5754 	sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
5755 		stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
5756 
5757 	sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
5758 		stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
5759 
5760 	sc->stat_EtherStatsPktsTx64Octets =
5761 		stats->stat_EtherStatsPktsTx64Octets;
5762 
5763 	sc->stat_EtherStatsPktsTx65Octetsto127Octets =
5764 		stats->stat_EtherStatsPktsTx65Octetsto127Octets;
5765 
5766 	sc->stat_EtherStatsPktsTx128Octetsto255Octets =
5767 		stats->stat_EtherStatsPktsTx128Octetsto255Octets;
5768 
5769 	sc->stat_EtherStatsPktsTx256Octetsto511Octets =
5770 		stats->stat_EtherStatsPktsTx256Octetsto511Octets;
5771 
5772 	sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
5773 		stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
5774 
5775 	sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
5776 		stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
5777 
5778 	sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
5779 		stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
5780 
5781 	sc->stat_XonPauseFramesReceived =
5782 		stats->stat_XonPauseFramesReceived;
5783 
5784 	sc->stat_XoffPauseFramesReceived =
5785 		stats->stat_XoffPauseFramesReceived;
5786 
5787 	sc->stat_OutXonSent =
5788 		stats->stat_OutXonSent;
5789 
5790 	sc->stat_OutXoffSent =
5791 		stats->stat_OutXoffSent;
5792 
5793 	sc->stat_FlowControlDone =
5794 		stats->stat_FlowControlDone;
5795 
5796 	sc->stat_MacControlFramesReceived =
5797 		stats->stat_MacControlFramesReceived;
5798 
5799 	sc->stat_XoffStateEntered =
5800 		stats->stat_XoffStateEntered;
5801 
5802 	sc->stat_IfInFramesL2FilterDiscards =
5803 		stats->stat_IfInFramesL2FilterDiscards;
5804 
5805 	sc->stat_IfInRuleCheckerDiscards =
5806 		stats->stat_IfInRuleCheckerDiscards;
5807 
5808 	sc->stat_IfInFTQDiscards =
5809 		stats->stat_IfInFTQDiscards;
5810 
5811 	sc->stat_IfInMBUFDiscards =
5812 		stats->stat_IfInMBUFDiscards;
5813 
5814 	sc->stat_IfInRuleCheckerP4Hit =
5815 		stats->stat_IfInRuleCheckerP4Hit;
5816 
5817 	sc->stat_CatchupInRuleCheckerDiscards =
5818 		stats->stat_CatchupInRuleCheckerDiscards;
5819 
5820 	sc->stat_CatchupInFTQDiscards =
5821 		stats->stat_CatchupInFTQDiscards;
5822 
5823 	sc->stat_CatchupInMBUFDiscards =
5824 		stats->stat_CatchupInMBUFDiscards;
5825 
5826 	sc->stat_CatchupInRuleCheckerP4Hit =
5827 		stats->stat_CatchupInRuleCheckerP4Hit;
5828 
5829 	sc->com_no_buffers = REG_RD_IND(sc, 0x120084);
5830 
5831 	/*
5832 	 * Update the interface statistics from the
5833 	 * hardware statistics.
5834 	 */
5835 	IFNET_STAT_SET(ifp, collisions, (u_long)sc->stat_EtherStatsCollisions);
5836 
5837 	IFNET_STAT_SET(ifp, ierrors, (u_long)sc->stat_EtherStatsUndersizePkts +
5838 	    (u_long)sc->stat_EtherStatsOverrsizePkts +
5839 	    (u_long)sc->stat_IfInMBUFDiscards +
5840 	    (u_long)sc->stat_Dot3StatsAlignmentErrors +
5841 	    (u_long)sc->stat_Dot3StatsFCSErrors +
5842 	    (u_long)sc->stat_IfInRuleCheckerDiscards +
5843 	    (u_long)sc->stat_IfInFTQDiscards +
5844 	    (u_long)sc->com_no_buffers);
5845 
5846 	IFNET_STAT_SET(ifp, oerrors,
5847 	    (u_long)sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
5848 	    (u_long)sc->stat_Dot3StatsExcessiveCollisions +
5849 	    (u_long)sc->stat_Dot3StatsLateCollisions);
5850 }
5851 
5852 /****************************************************************************/
5853 /* Periodic function to notify the bootcode that the driver is still        */
5854 /* present.                                                                 */
5855 /*                                                                          */
5856 /* Returns:                                                                 */
5857 /*   Nothing.                                                               */
5858 /****************************************************************************/
5859 static void
bce_pulse(void * xsc)5860 bce_pulse(void *xsc)
5861 {
5862 	struct bce_softc *sc = xsc;
5863 	struct ifnet *ifp = &sc->arpcom.ac_if;
5864 	uint32_t msg;
5865 
5866 	lwkt_serialize_enter(&sc->main_serialize);
5867 
5868 	/* Tell the firmware that the driver is still running. */
5869 	msg = (uint32_t)++sc->bce_fw_drv_pulse_wr_seq;
5870 	bce_shmem_wr(sc, BCE_DRV_PULSE_MB, msg);
5871 
5872 	/* Update the bootcode condition. */
5873 	sc->bc_state = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
5874 
5875 	/* Report whether the bootcode still knows the driver is running. */
5876 	if (!sc->bce_drv_cardiac_arrest) {
5877 		if (!(sc->bc_state & BCE_CONDITION_DRV_PRESENT)) {
5878 			sc->bce_drv_cardiac_arrest = 1;
5879 			if_printf(ifp, "Bootcode lost the driver pulse! "
5880 			    "(bc_state = 0x%08X)\n", sc->bc_state);
5881 		}
5882 	} else {
5883  		/*
5884  		 * Not supported by all bootcode versions.
5885  		 * (v5.0.11+ and v5.2.1+)  Older bootcode
5886  		 * will require the driver to reset the
5887  		 * controller to clear this condition.
5888 		 */
5889 		if (sc->bc_state & BCE_CONDITION_DRV_PRESENT) {
5890 			sc->bce_drv_cardiac_arrest = 0;
5891 			if_printf(ifp, "Bootcode found the driver pulse! "
5892 			    "(bc_state = 0x%08X)\n", sc->bc_state);
5893 		}
5894 	}
5895 
5896 	/* Schedule the next pulse. */
5897 	callout_reset_bycpu(&sc->bce_pulse_callout, hz, bce_pulse, sc,
5898 	    sc->bce_timer_cpuid);
5899 
5900 	lwkt_serialize_exit(&sc->main_serialize);
5901 }
5902 
5903 /****************************************************************************/
5904 /* Periodic function to check whether MSI is lost                           */
5905 /*                                                                          */
5906 /* Returns:                                                                 */
5907 /*   Nothing.                                                               */
5908 /****************************************************************************/
5909 static void
bce_check_msi(void * xsc)5910 bce_check_msi(void *xsc)
5911 {
5912 	struct bce_softc *sc = xsc;
5913 	struct ifnet *ifp = &sc->arpcom.ac_if;
5914 	struct status_block *sblk = sc->status_block;
5915 	struct bce_tx_ring *txr = &sc->tx_rings[0];
5916 	struct bce_rx_ring *rxr = &sc->rx_rings[0];
5917 
5918 	lwkt_serialize_enter(&sc->main_serialize);
5919 
5920 	KKASSERT(mycpuid == sc->bce_msix[0].msix_cpuid);
5921 
5922 	if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) {
5923 		lwkt_serialize_exit(&sc->main_serialize);
5924 		return;
5925 	}
5926 
5927 	if (bce_get_hw_rx_cons(rxr) != rxr->rx_cons ||
5928 	    bce_get_hw_tx_cons(txr) != txr->tx_cons ||
5929 	    (sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5930 	    (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) {
5931 		if (sc->bce_check_rx_cons == rxr->rx_cons &&
5932 		    sc->bce_check_tx_cons == txr->tx_cons &&
5933 		    sc->bce_check_status_idx == rxr->last_status_idx) {
5934 			uint32_t msi_ctrl;
5935 
5936 			if (!sc->bce_msi_maylose) {
5937 				sc->bce_msi_maylose = TRUE;
5938 				goto done;
5939 			}
5940 
5941 			msi_ctrl = REG_RD(sc, BCE_PCICFG_MSI_CONTROL);
5942 			if (msi_ctrl & BCE_PCICFG_MSI_CONTROL_ENABLE) {
5943 				if (bootverbose)
5944 					if_printf(ifp, "lost MSI\n");
5945 
5946 				REG_WR(sc, BCE_PCICFG_MSI_CONTROL,
5947 				    msi_ctrl & ~BCE_PCICFG_MSI_CONTROL_ENABLE);
5948 				REG_WR(sc, BCE_PCICFG_MSI_CONTROL, msi_ctrl);
5949 
5950 				bce_intr_msi(sc);
5951 			} else if (bootverbose) {
5952 				if_printf(ifp, "MSI may be lost\n");
5953 			}
5954 		}
5955 	}
5956 	sc->bce_msi_maylose = FALSE;
5957 	sc->bce_check_rx_cons = rxr->rx_cons;
5958 	sc->bce_check_tx_cons = txr->tx_cons;
5959 	sc->bce_check_status_idx = rxr->last_status_idx;
5960 
5961 done:
5962 	callout_reset(&sc->bce_ckmsi_callout, BCE_MSI_CKINTVL,
5963 	    bce_check_msi, sc);
5964 	lwkt_serialize_exit(&sc->main_serialize);
5965 }
5966 
5967 /****************************************************************************/
5968 /* Periodic function to perform maintenance tasks.                          */
5969 /*                                                                          */
5970 /* Returns:                                                                 */
5971 /*   Nothing.                                                               */
5972 /****************************************************************************/
5973 static void
bce_tick_serialized(struct bce_softc * sc)5974 bce_tick_serialized(struct bce_softc *sc)
5975 {
5976 	struct mii_data *mii;
5977 
5978 	ASSERT_SERIALIZED(&sc->main_serialize);
5979 
5980 	/* Update the statistics from the hardware statistics block. */
5981 	bce_stats_update(sc);
5982 
5983 	/* Schedule the next tick. */
5984 	callout_reset_bycpu(&sc->bce_tick_callout, hz, bce_tick, sc,
5985 	    sc->bce_timer_cpuid);
5986 
5987 	/* If link is up already up then we're done. */
5988 	if (sc->bce_link)
5989 		return;
5990 
5991 	mii = device_get_softc(sc->bce_miibus);
5992 	mii_tick(mii);
5993 
5994 	/* Check if the link has come up. */
5995 	if ((mii->mii_media_status & IFM_ACTIVE) &&
5996 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5997 		int i;
5998 
5999 		sc->bce_link++;
6000 		/* Now that link is up, handle any outstanding TX traffic. */
6001 		for (i = 0; i < sc->tx_ring_cnt; ++i)
6002 			ifsq_devstart_sched(sc->tx_rings[i].ifsq);
6003 	}
6004 }
6005 
6006 static void
bce_tick(void * xsc)6007 bce_tick(void *xsc)
6008 {
6009 	struct bce_softc *sc = xsc;
6010 
6011 	lwkt_serialize_enter(&sc->main_serialize);
6012 	bce_tick_serialized(sc);
6013 	lwkt_serialize_exit(&sc->main_serialize);
6014 }
6015 
6016 /****************************************************************************/
6017 /* Adds any sysctl parameters for tuning or debugging purposes.             */
6018 /*                                                                          */
6019 /* Returns:                                                                 */
6020 /*   0 for success, positive value for failure.                             */
6021 /****************************************************************************/
6022 static void
bce_add_sysctls(struct bce_softc * sc)6023 bce_add_sysctls(struct bce_softc *sc)
6024 {
6025 	struct sysctl_ctx_list *ctx;
6026 	struct sysctl_oid_list *children;
6027 #if defined(BCE_TSS_DEBUG) || defined(BCE_RSS_DEBUG)
6028 	char node[32];
6029 	int i;
6030 #endif
6031 
6032 	ctx = device_get_sysctl_ctx(sc->bce_dev);
6033 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bce_dev));
6034 
6035 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds_int",
6036 			CTLTYPE_INT | CTLFLAG_RW,
6037 			sc, 0, bce_sysctl_tx_bds_int, "I",
6038 			"Send max coalesced BD count during interrupt");
6039 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds",
6040 			CTLTYPE_INT | CTLFLAG_RW,
6041 			sc, 0, bce_sysctl_tx_bds, "I",
6042 			"Send max coalesced BD count");
6043 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks_int",
6044 			CTLTYPE_INT | CTLFLAG_RW,
6045 			sc, 0, bce_sysctl_tx_ticks_int, "I",
6046 			"Send coalescing ticks during interrupt");
6047 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks",
6048 			CTLTYPE_INT | CTLFLAG_RW,
6049 			sc, 0, bce_sysctl_tx_ticks, "I",
6050 			"Send coalescing ticks");
6051 
6052 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds_int",
6053 			CTLTYPE_INT | CTLFLAG_RW,
6054 			sc, 0, bce_sysctl_rx_bds_int, "I",
6055 			"Receive max coalesced BD count during interrupt");
6056 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds",
6057 			CTLTYPE_INT | CTLFLAG_RW,
6058 			sc, 0, bce_sysctl_rx_bds, "I",
6059 			"Receive max coalesced BD count");
6060 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks_int",
6061 			CTLTYPE_INT | CTLFLAG_RW,
6062 			sc, 0, bce_sysctl_rx_ticks_int, "I",
6063 			"Receive coalescing ticks during interrupt");
6064 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks",
6065 			CTLTYPE_INT | CTLFLAG_RW,
6066 			sc, 0, bce_sysctl_rx_ticks, "I",
6067 			"Receive coalescing ticks");
6068 
6069 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_rings",
6070 		CTLFLAG_RD, &sc->rx_ring_cnt, 0, "# of RX rings");
6071 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_pages",
6072 		CTLFLAG_RD, &sc->rx_rings[0].rx_pages, 0, "# of RX pages");
6073 
6074 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_rings",
6075 		CTLFLAG_RD, &sc->tx_ring_cnt, 0, "# of TX rings");
6076 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_pages",
6077 		CTLFLAG_RD, &sc->tx_rings[0].tx_pages, 0, "# of TX pages");
6078 
6079 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_wreg",
6080 	    	CTLFLAG_RW, &sc->tx_rings[0].tx_wreg, 0,
6081 		"# segments before write to hardware registers");
6082 
6083 	if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX) {
6084 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_cpumap",
6085 		    CTLTYPE_OPAQUE | CTLFLAG_RD, sc->tx_rmap, 0,
6086 		    if_ringmap_cpumap_sysctl, "I", "TX ring CPU map");
6087 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_cpumap",
6088 		    CTLTYPE_OPAQUE | CTLFLAG_RD, sc->rx_rmap, 0,
6089 		    if_ringmap_cpumap_sysctl, "I", "RX ring CPU map");
6090 	} else {
6091 #ifdef IFPOLL_ENABLE
6092 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_poll_cpumap",
6093 		    CTLTYPE_OPAQUE | CTLFLAG_RD, sc->tx_rmap, 0,
6094 		    if_ringmap_cpumap_sysctl, "I", "TX poll CPU map");
6095 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_poll_cpumap",
6096 		    CTLTYPE_OPAQUE | CTLFLAG_RD, sc->rx_rmap, 0,
6097 		    if_ringmap_cpumap_sysctl, "I", "RX poll CPU map");
6098 #endif
6099 	}
6100 
6101 #ifdef BCE_RSS_DEBUG
6102 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rss_debug",
6103 	    CTLFLAG_RW, &sc->rss_debug, 0, "RSS debug level");
6104 	for (i = 0; i < sc->rx_ring_cnt; ++i) {
6105 		ksnprintf(node, sizeof(node), "rx%d_pkt", i);
6106 		SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, node,
6107 		    CTLFLAG_RW, &sc->rx_rings[i].rx_pkts,
6108 		    "RXed packets");
6109 	}
6110 #endif
6111 
6112 #ifdef BCE_TSS_DEBUG
6113 	for (i = 0; i < sc->tx_ring_cnt; ++i) {
6114 		ksnprintf(node, sizeof(node), "tx%d_pkt", i);
6115 		SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, node,
6116 		    CTLFLAG_RW, &sc->tx_rings[i].tx_pkts,
6117 		    "TXed packets");
6118 	}
6119 #endif
6120 
6121 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6122 		"stat_IfHCInOctets",
6123 		CTLFLAG_RD, &sc->stat_IfHCInOctets,
6124 		"Bytes received");
6125 
6126 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6127 		"stat_IfHCInBadOctets",
6128 		CTLFLAG_RD, &sc->stat_IfHCInBadOctets,
6129 		"Bad bytes received");
6130 
6131 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6132 		"stat_IfHCOutOctets",
6133 		CTLFLAG_RD, &sc->stat_IfHCOutOctets,
6134 		"Bytes sent");
6135 
6136 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6137 		"stat_IfHCOutBadOctets",
6138 		CTLFLAG_RD, &sc->stat_IfHCOutBadOctets,
6139 		"Bad bytes sent");
6140 
6141 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6142 		"stat_IfHCInUcastPkts",
6143 		CTLFLAG_RD, &sc->stat_IfHCInUcastPkts,
6144 		"Unicast packets received");
6145 
6146 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6147 		"stat_IfHCInMulticastPkts",
6148 		CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts,
6149 		"Multicast packets received");
6150 
6151 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6152 		"stat_IfHCInBroadcastPkts",
6153 		CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts,
6154 		"Broadcast packets received");
6155 
6156 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6157 		"stat_IfHCOutUcastPkts",
6158 		CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts,
6159 		"Unicast packets sent");
6160 
6161 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6162 		"stat_IfHCOutMulticastPkts",
6163 		CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts,
6164 		"Multicast packets sent");
6165 
6166 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6167 		"stat_IfHCOutBroadcastPkts",
6168 		CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts,
6169 		"Broadcast packets sent");
6170 
6171 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6172 		"stat_emac_tx_stat_dot3statsinternalmactransmiterrors",
6173 		CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors,
6174 		0, "Internal MAC transmit errors");
6175 
6176 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6177 		"stat_Dot3StatsCarrierSenseErrors",
6178 		CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors,
6179 		0, "Carrier sense errors");
6180 
6181 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6182 		"stat_Dot3StatsFCSErrors",
6183 		CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors,
6184 		0, "Frame check sequence errors");
6185 
6186 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6187 		"stat_Dot3StatsAlignmentErrors",
6188 		CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors,
6189 		0, "Alignment errors");
6190 
6191 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6192 		"stat_Dot3StatsSingleCollisionFrames",
6193 		CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames,
6194 		0, "Single Collision Frames");
6195 
6196 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6197 		"stat_Dot3StatsMultipleCollisionFrames",
6198 		CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames,
6199 		0, "Multiple Collision Frames");
6200 
6201 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6202 		"stat_Dot3StatsDeferredTransmissions",
6203 		CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions,
6204 		0, "Deferred Transmissions");
6205 
6206 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6207 		"stat_Dot3StatsExcessiveCollisions",
6208 		CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions,
6209 		0, "Excessive Collisions");
6210 
6211 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6212 		"stat_Dot3StatsLateCollisions",
6213 		CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions,
6214 		0, "Late Collisions");
6215 
6216 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6217 		"stat_EtherStatsCollisions",
6218 		CTLFLAG_RD, &sc->stat_EtherStatsCollisions,
6219 		0, "Collisions");
6220 
6221 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6222 		"stat_EtherStatsFragments",
6223 		CTLFLAG_RD, &sc->stat_EtherStatsFragments,
6224 		0, "Fragments");
6225 
6226 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6227 		"stat_EtherStatsJabbers",
6228 		CTLFLAG_RD, &sc->stat_EtherStatsJabbers,
6229 		0, "Jabbers");
6230 
6231 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6232 		"stat_EtherStatsUndersizePkts",
6233 		CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts,
6234 		0, "Undersize packets");
6235 
6236 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6237 		"stat_EtherStatsOverrsizePkts",
6238 		CTLFLAG_RD, &sc->stat_EtherStatsOverrsizePkts,
6239 		0, "stat_EtherStatsOverrsizePkts");
6240 
6241 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6242 		"stat_EtherStatsPktsRx64Octets",
6243 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets,
6244 		0, "Bytes received in 64 byte packets");
6245 
6246 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6247 		"stat_EtherStatsPktsRx65Octetsto127Octets",
6248 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets,
6249 		0, "Bytes received in 65 to 127 byte packets");
6250 
6251 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6252 		"stat_EtherStatsPktsRx128Octetsto255Octets",
6253 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets,
6254 		0, "Bytes received in 128 to 255 byte packets");
6255 
6256 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6257 		"stat_EtherStatsPktsRx256Octetsto511Octets",
6258 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets,
6259 		0, "Bytes received in 256 to 511 byte packets");
6260 
6261 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6262 		"stat_EtherStatsPktsRx512Octetsto1023Octets",
6263 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets,
6264 		0, "Bytes received in 512 to 1023 byte packets");
6265 
6266 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6267 		"stat_EtherStatsPktsRx1024Octetsto1522Octets",
6268 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets,
6269 		0, "Bytes received in 1024 t0 1522 byte packets");
6270 
6271 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6272 		"stat_EtherStatsPktsRx1523Octetsto9022Octets",
6273 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets,
6274 		0, "Bytes received in 1523 to 9022 byte packets");
6275 
6276 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6277 		"stat_EtherStatsPktsTx64Octets",
6278 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets,
6279 		0, "Bytes sent in 64 byte packets");
6280 
6281 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6282 		"stat_EtherStatsPktsTx65Octetsto127Octets",
6283 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets,
6284 		0, "Bytes sent in 65 to 127 byte packets");
6285 
6286 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6287 		"stat_EtherStatsPktsTx128Octetsto255Octets",
6288 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets,
6289 		0, "Bytes sent in 128 to 255 byte packets");
6290 
6291 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6292 		"stat_EtherStatsPktsTx256Octetsto511Octets",
6293 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets,
6294 		0, "Bytes sent in 256 to 511 byte packets");
6295 
6296 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6297 		"stat_EtherStatsPktsTx512Octetsto1023Octets",
6298 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets,
6299 		0, "Bytes sent in 512 to 1023 byte packets");
6300 
6301 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6302 		"stat_EtherStatsPktsTx1024Octetsto1522Octets",
6303 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets,
6304 		0, "Bytes sent in 1024 to 1522 byte packets");
6305 
6306 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6307 		"stat_EtherStatsPktsTx1523Octetsto9022Octets",
6308 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets,
6309 		0, "Bytes sent in 1523 to 9022 byte packets");
6310 
6311 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6312 		"stat_XonPauseFramesReceived",
6313 		CTLFLAG_RD, &sc->stat_XonPauseFramesReceived,
6314 		0, "XON pause frames receved");
6315 
6316 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6317 		"stat_XoffPauseFramesReceived",
6318 		CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived,
6319 		0, "XOFF pause frames received");
6320 
6321 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6322 		"stat_OutXonSent",
6323 		CTLFLAG_RD, &sc->stat_OutXonSent,
6324 		0, "XON pause frames sent");
6325 
6326 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6327 		"stat_OutXoffSent",
6328 		CTLFLAG_RD, &sc->stat_OutXoffSent,
6329 		0, "XOFF pause frames sent");
6330 
6331 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6332 		"stat_FlowControlDone",
6333 		CTLFLAG_RD, &sc->stat_FlowControlDone,
6334 		0, "Flow control done");
6335 
6336 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6337 		"stat_MacControlFramesReceived",
6338 		CTLFLAG_RD, &sc->stat_MacControlFramesReceived,
6339 		0, "MAC control frames received");
6340 
6341 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6342 		"stat_XoffStateEntered",
6343 		CTLFLAG_RD, &sc->stat_XoffStateEntered,
6344 		0, "XOFF state entered");
6345 
6346 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6347 		"stat_IfInFramesL2FilterDiscards",
6348 		CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards,
6349 		0, "Received L2 packets discarded");
6350 
6351 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6352 		"stat_IfInRuleCheckerDiscards",
6353 		CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards,
6354 		0, "Received packets discarded by rule");
6355 
6356 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6357 		"stat_IfInFTQDiscards",
6358 		CTLFLAG_RD, &sc->stat_IfInFTQDiscards,
6359 		0, "Received packet FTQ discards");
6360 
6361 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6362 		"stat_IfInMBUFDiscards",
6363 		CTLFLAG_RD, &sc->stat_IfInMBUFDiscards,
6364 		0, "Received packets discarded due to lack of controller buffer memory");
6365 
6366 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6367 		"stat_IfInRuleCheckerP4Hit",
6368 		CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit,
6369 		0, "Received packets rule checker hits");
6370 
6371 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6372 		"stat_CatchupInRuleCheckerDiscards",
6373 		CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards,
6374 		0, "Received packets discarded in Catchup path");
6375 
6376 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6377 		"stat_CatchupInFTQDiscards",
6378 		CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards,
6379 		0, "Received packets discarded in FTQ in Catchup path");
6380 
6381 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6382 		"stat_CatchupInMBUFDiscards",
6383 		CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards,
6384 		0, "Received packets discarded in controller buffer memory in Catchup path");
6385 
6386 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6387 		"stat_CatchupInRuleCheckerP4Hit",
6388 		CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit,
6389 		0, "Received packets rule checker hits in Catchup path");
6390 
6391 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6392 		"com_no_buffers",
6393 		CTLFLAG_RD, &sc->com_no_buffers,
6394 		0, "Valid packets received but no RX buffers available");
6395 }
6396 
6397 static int
bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS)6398 bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS)
6399 {
6400 	struct bce_softc *sc = arg1;
6401 
6402 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6403 			&sc->bce_tx_quick_cons_trip_int,
6404 			BCE_COALMASK_TX_BDS_INT);
6405 }
6406 
6407 static int
bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS)6408 bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS)
6409 {
6410 	struct bce_softc *sc = arg1;
6411 
6412 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6413 			&sc->bce_tx_quick_cons_trip,
6414 			BCE_COALMASK_TX_BDS);
6415 }
6416 
6417 static int
bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS)6418 bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS)
6419 {
6420 	struct bce_softc *sc = arg1;
6421 
6422 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6423 			&sc->bce_tx_ticks_int,
6424 			BCE_COALMASK_TX_TICKS_INT);
6425 }
6426 
6427 static int
bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS)6428 bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS)
6429 {
6430 	struct bce_softc *sc = arg1;
6431 
6432 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6433 			&sc->bce_tx_ticks,
6434 			BCE_COALMASK_TX_TICKS);
6435 }
6436 
6437 static int
bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS)6438 bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS)
6439 {
6440 	struct bce_softc *sc = arg1;
6441 
6442 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6443 			&sc->bce_rx_quick_cons_trip_int,
6444 			BCE_COALMASK_RX_BDS_INT);
6445 }
6446 
6447 static int
bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS)6448 bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS)
6449 {
6450 	struct bce_softc *sc = arg1;
6451 
6452 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6453 			&sc->bce_rx_quick_cons_trip,
6454 			BCE_COALMASK_RX_BDS);
6455 }
6456 
6457 static int
bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS)6458 bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS)
6459 {
6460 	struct bce_softc *sc = arg1;
6461 
6462 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6463 			&sc->bce_rx_ticks_int,
6464 			BCE_COALMASK_RX_TICKS_INT);
6465 }
6466 
6467 static int
bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS)6468 bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS)
6469 {
6470 	struct bce_softc *sc = arg1;
6471 
6472 	return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6473 			&sc->bce_rx_ticks,
6474 			BCE_COALMASK_RX_TICKS);
6475 }
6476 
6477 static int
bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS,uint32_t * coal,uint32_t coalchg_mask)6478 bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS, uint32_t *coal,
6479     uint32_t coalchg_mask)
6480 {
6481 	struct bce_softc *sc = arg1;
6482 	struct ifnet *ifp = &sc->arpcom.ac_if;
6483 	int error = 0, v;
6484 
6485 	ifnet_serialize_all(ifp);
6486 
6487 	v = *coal;
6488 	error = sysctl_handle_int(oidp, &v, 0, req);
6489 	if (!error && req->newptr != NULL) {
6490 		if (v < 0) {
6491 			error = EINVAL;
6492 		} else {
6493 			*coal = v;
6494 			sc->bce_coalchg_mask |= coalchg_mask;
6495 
6496 			/* Commit changes */
6497 			bce_coal_change(sc);
6498 		}
6499 	}
6500 
6501 	ifnet_deserialize_all(ifp);
6502 	return error;
6503 }
6504 
6505 static void
bce_coal_change(struct bce_softc * sc)6506 bce_coal_change(struct bce_softc *sc)
6507 {
6508 	struct ifnet *ifp = &sc->arpcom.ac_if;
6509 	int i;
6510 
6511 	ASSERT_SERIALIZED(&sc->main_serialize);
6512 
6513 	if ((ifp->if_flags & IFF_RUNNING) == 0) {
6514 		sc->bce_coalchg_mask = 0;
6515 		return;
6516 	}
6517 
6518 	if (sc->bce_coalchg_mask &
6519 	    (BCE_COALMASK_TX_BDS | BCE_COALMASK_TX_BDS_INT)) {
6520 		REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
6521 		       (sc->bce_tx_quick_cons_trip_int << 16) |
6522 		       sc->bce_tx_quick_cons_trip);
6523 		for (i = 1; i < sc->rx_ring_cnt; ++i) {
6524 			uint32_t base;
6525 
6526 			base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) +
6527 			    BCE_HC_SB_CONFIG_1;
6528 			REG_WR(sc, base + BCE_HC_TX_QUICK_CONS_TRIP_OFF,
6529 			    (sc->bce_tx_quick_cons_trip_int << 16) |
6530 			    sc->bce_tx_quick_cons_trip);
6531 		}
6532 		if (bootverbose) {
6533 			if_printf(ifp, "tx_bds %u, tx_bds_int %u\n",
6534 				  sc->bce_tx_quick_cons_trip,
6535 				  sc->bce_tx_quick_cons_trip_int);
6536 		}
6537 	}
6538 
6539 	if (sc->bce_coalchg_mask &
6540 	    (BCE_COALMASK_TX_TICKS | BCE_COALMASK_TX_TICKS_INT)) {
6541 		REG_WR(sc, BCE_HC_TX_TICKS,
6542 		       (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
6543 		for (i = 1; i < sc->rx_ring_cnt; ++i) {
6544 			uint32_t base;
6545 
6546 			base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) +
6547 			    BCE_HC_SB_CONFIG_1;
6548 			REG_WR(sc, base + BCE_HC_TX_TICKS_OFF,
6549 			    (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
6550 		}
6551 		if (bootverbose) {
6552 			if_printf(ifp, "tx_ticks %u, tx_ticks_int %u\n",
6553 				  sc->bce_tx_ticks, sc->bce_tx_ticks_int);
6554 		}
6555 	}
6556 
6557 	if (sc->bce_coalchg_mask &
6558 	    (BCE_COALMASK_RX_BDS | BCE_COALMASK_RX_BDS_INT)) {
6559 		REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
6560 		       (sc->bce_rx_quick_cons_trip_int << 16) |
6561 		       sc->bce_rx_quick_cons_trip);
6562 		for (i = 1; i < sc->rx_ring_cnt; ++i) {
6563 			uint32_t base;
6564 
6565 			base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) +
6566 			    BCE_HC_SB_CONFIG_1;
6567 			REG_WR(sc, base + BCE_HC_RX_QUICK_CONS_TRIP_OFF,
6568 			    (sc->bce_rx_quick_cons_trip_int << 16) |
6569 			    sc->bce_rx_quick_cons_trip);
6570 		}
6571 		if (bootverbose) {
6572 			if_printf(ifp, "rx_bds %u, rx_bds_int %u\n",
6573 				  sc->bce_rx_quick_cons_trip,
6574 				  sc->bce_rx_quick_cons_trip_int);
6575 		}
6576 	}
6577 
6578 	if (sc->bce_coalchg_mask &
6579 	    (BCE_COALMASK_RX_TICKS | BCE_COALMASK_RX_TICKS_INT)) {
6580 		REG_WR(sc, BCE_HC_RX_TICKS,
6581 		       (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
6582 		for (i = 1; i < sc->rx_ring_cnt; ++i) {
6583 			uint32_t base;
6584 
6585 			base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) +
6586 			    BCE_HC_SB_CONFIG_1;
6587 			REG_WR(sc, base + BCE_HC_RX_TICKS_OFF,
6588 			    (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
6589 		}
6590 		if (bootverbose) {
6591 			if_printf(ifp, "rx_ticks %u, rx_ticks_int %u\n",
6592 				  sc->bce_rx_ticks, sc->bce_rx_ticks_int);
6593 		}
6594 	}
6595 
6596 	sc->bce_coalchg_mask = 0;
6597 }
6598 
6599 static int
bce_tso_setup(struct bce_tx_ring * txr,struct mbuf ** mp,uint16_t * flags0,uint16_t * mss0)6600 bce_tso_setup(struct bce_tx_ring *txr, struct mbuf **mp,
6601     uint16_t *flags0, uint16_t *mss0)
6602 {
6603 	struct mbuf *m;
6604 	uint16_t flags;
6605 	int thoff, iphlen, hoff;
6606 
6607 	m = *mp;
6608 	KASSERT(M_WRITABLE(m), ("TSO mbuf not writable"));
6609 
6610 	hoff = m->m_pkthdr.csum_lhlen;
6611 	iphlen = m->m_pkthdr.csum_iphlen;
6612 	thoff = m->m_pkthdr.csum_thlen;
6613 
6614 	KASSERT(hoff >= sizeof(struct ether_header),
6615 	    ("invalid ether header len %d", hoff));
6616 	KASSERT(iphlen >= sizeof(struct ip),
6617 	    ("invalid ip header len %d", iphlen));
6618 	KASSERT(thoff >= sizeof(struct tcphdr),
6619 	    ("invalid tcp header len %d", thoff));
6620 
6621 	if (__predict_false(m->m_len < hoff + iphlen + thoff)) {
6622 		m = m_pullup(m, hoff + iphlen + thoff);
6623 		if (m == NULL) {
6624 			*mp = NULL;
6625 			return ENOBUFS;
6626 		}
6627 		*mp = m;
6628 	}
6629 
6630 	/* Set the LSO flag in the TX BD */
6631 	flags = TX_BD_FLAGS_SW_LSO;
6632 
6633 	/* Set the length of IP + TCP options (in 32 bit words) */
6634 	flags |= (((iphlen + thoff -
6635 	    sizeof(struct ip) - sizeof(struct tcphdr)) >> 2) << 8);
6636 
6637 	*mss0 = htole16(m->m_pkthdr.tso_segsz);
6638 	*flags0 = flags;
6639 
6640 	return 0;
6641 }
6642 
6643 static void
bce_setup_serialize(struct bce_softc * sc)6644 bce_setup_serialize(struct bce_softc *sc)
6645 {
6646 	int i, j;
6647 
6648 	/*
6649 	 * Allocate serializer array
6650 	 */
6651 
6652 	/* Main + TX + RX */
6653 	sc->serialize_cnt = 1 + sc->tx_ring_cnt + sc->rx_ring_cnt;
6654 
6655 	sc->serializes =
6656 	    kmalloc(sc->serialize_cnt * sizeof(struct lwkt_serialize *),
6657 	        M_DEVBUF, M_WAITOK | M_ZERO);
6658 
6659 	/*
6660 	 * Setup serializers
6661 	 *
6662 	 * NOTE: Order is critical
6663 	 */
6664 
6665 	i = 0;
6666 
6667 	KKASSERT(i < sc->serialize_cnt);
6668 	sc->serializes[i++] = &sc->main_serialize;
6669 
6670 	for (j = 0; j < sc->rx_ring_cnt; ++j) {
6671 		KKASSERT(i < sc->serialize_cnt);
6672 		sc->serializes[i++] = &sc->rx_rings[j].rx_serialize;
6673 	}
6674 
6675 	for (j = 0; j < sc->tx_ring_cnt; ++j) {
6676 		KKASSERT(i < sc->serialize_cnt);
6677 		sc->serializes[i++] = &sc->tx_rings[j].tx_serialize;
6678 	}
6679 
6680 	KKASSERT(i == sc->serialize_cnt);
6681 }
6682 
6683 static void
bce_serialize(struct ifnet * ifp,enum ifnet_serialize slz)6684 bce_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
6685 {
6686 	struct bce_softc *sc = ifp->if_softc;
6687 
6688 	ifnet_serialize_array_enter(sc->serializes, sc->serialize_cnt, slz);
6689 }
6690 
6691 static void
bce_deserialize(struct ifnet * ifp,enum ifnet_serialize slz)6692 bce_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
6693 {
6694 	struct bce_softc *sc = ifp->if_softc;
6695 
6696 	ifnet_serialize_array_exit(sc->serializes, sc->serialize_cnt, slz);
6697 }
6698 
6699 static int
bce_tryserialize(struct ifnet * ifp,enum ifnet_serialize slz)6700 bce_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
6701 {
6702 	struct bce_softc *sc = ifp->if_softc;
6703 
6704 	return ifnet_serialize_array_try(sc->serializes, sc->serialize_cnt,
6705 	    slz);
6706 }
6707 
6708 #ifdef INVARIANTS
6709 
6710 static void
bce_serialize_assert(struct ifnet * ifp,enum ifnet_serialize slz,boolean_t serialized)6711 bce_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
6712     boolean_t serialized)
6713 {
6714 	struct bce_softc *sc = ifp->if_softc;
6715 
6716 	ifnet_serialize_array_assert(sc->serializes, sc->serialize_cnt,
6717 	    slz, serialized);
6718 }
6719 
6720 #endif	/* INVARIANTS */
6721 
6722 static void
bce_serialize_skipmain(struct bce_softc * sc)6723 bce_serialize_skipmain(struct bce_softc *sc)
6724 {
6725 	lwkt_serialize_array_enter(sc->serializes, sc->serialize_cnt, 1);
6726 }
6727 
6728 static void
bce_deserialize_skipmain(struct bce_softc * sc)6729 bce_deserialize_skipmain(struct bce_softc *sc)
6730 {
6731 	lwkt_serialize_array_exit(sc->serializes, sc->serialize_cnt, 1);
6732 }
6733 
6734 static void
bce_set_timer_cpuid(struct bce_softc * sc,boolean_t polling)6735 bce_set_timer_cpuid(struct bce_softc *sc, boolean_t polling)
6736 {
6737 	if (polling)
6738 		sc->bce_timer_cpuid = 0; /* XXX */
6739 	else
6740 		sc->bce_timer_cpuid = sc->bce_msix[0].msix_cpuid;
6741 }
6742 
6743 static int
bce_alloc_intr(struct bce_softc * sc)6744 bce_alloc_intr(struct bce_softc *sc)
6745 {
6746 	u_int irq_flags;
6747 
6748 	bce_try_alloc_msix(sc);
6749 	if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX)
6750 		return 0;
6751 
6752 	sc->bce_irq_type = pci_alloc_1intr(sc->bce_dev, bce_msi_enable,
6753 	    &sc->bce_irq_rid, &irq_flags);
6754 
6755 	sc->bce_res_irq = bus_alloc_resource_any(sc->bce_dev, SYS_RES_IRQ,
6756 	    &sc->bce_irq_rid, irq_flags);
6757 	if (sc->bce_res_irq == NULL) {
6758 		device_printf(sc->bce_dev, "PCI map interrupt failed\n");
6759 		return ENXIO;
6760 	}
6761 	sc->bce_msix[0].msix_cpuid = rman_get_cpuid(sc->bce_res_irq);
6762 	sc->bce_msix[0].msix_serialize = &sc->main_serialize;
6763 
6764 	return 0;
6765 }
6766 
6767 static void
bce_try_alloc_msix(struct bce_softc * sc)6768 bce_try_alloc_msix(struct bce_softc *sc)
6769 {
6770 	struct bce_msix_data *msix;
6771 	int i, error;
6772 	boolean_t setup = FALSE;
6773 
6774 	if (sc->rx_ring_cnt == 1)
6775 		return;
6776 
6777 	msix = &sc->bce_msix[0];
6778 	msix->msix_serialize = &sc->main_serialize;
6779 	msix->msix_func = bce_intr_msi_oneshot;
6780 	msix->msix_arg = sc;
6781 	msix->msix_cpuid = if_ringmap_cpumap(sc->rx_rmap, 0);
6782 	KKASSERT(msix->msix_cpuid < netisr_ncpus);
6783 	ksnprintf(msix->msix_desc, sizeof(msix->msix_desc), "%s combo",
6784 	    device_get_nameunit(sc->bce_dev));
6785 
6786 	for (i = 1; i < sc->rx_ring_cnt; ++i) {
6787 		struct bce_rx_ring *rxr = &sc->rx_rings[i];
6788 
6789 		msix = &sc->bce_msix[i];
6790 
6791 		msix->msix_serialize = &rxr->rx_serialize;
6792 		msix->msix_arg = rxr;
6793 		msix->msix_cpuid = if_ringmap_cpumap(sc->rx_rmap,
6794 		    i % sc->rx_ring_cnt2);
6795 		KKASSERT(msix->msix_cpuid < netisr_ncpus);
6796 
6797 		if (i < sc->tx_ring_cnt) {
6798 			msix->msix_func = bce_intr_msix_rxtx;
6799 			ksnprintf(msix->msix_desc, sizeof(msix->msix_desc),
6800 			    "%s rxtx%d", device_get_nameunit(sc->bce_dev), i);
6801 		} else {
6802 			msix->msix_func = bce_intr_msix_rx;
6803 			ksnprintf(msix->msix_desc, sizeof(msix->msix_desc),
6804 			    "%s rx%d", device_get_nameunit(sc->bce_dev), i);
6805 		}
6806 	}
6807 
6808 	/*
6809 	 * Setup MSI-X table
6810 	 */
6811 	bce_setup_msix_table(sc);
6812 	REG_WR(sc, BCE_PCI_MSIX_CONTROL, BCE_MSIX_MAX - 1);
6813 	REG_WR(sc, BCE_PCI_MSIX_TBL_OFF_BIR, BCE_PCI_GRC_WINDOW2_BASE);
6814 	REG_WR(sc, BCE_PCI_MSIX_PBA_OFF_BIT, BCE_PCI_GRC_WINDOW3_BASE);
6815 	/* Flush */
6816 	REG_RD(sc, BCE_PCI_MSIX_CONTROL);
6817 
6818 	error = pci_setup_msix(sc->bce_dev);
6819 	if (error) {
6820 		device_printf(sc->bce_dev, "Setup MSI-X failed\n");
6821 		goto back;
6822 	}
6823 	setup = TRUE;
6824 
6825 	for (i = 0; i < sc->rx_ring_cnt; ++i) {
6826 		msix = &sc->bce_msix[i];
6827 
6828 		error = pci_alloc_msix_vector(sc->bce_dev, i, &msix->msix_rid,
6829 		    msix->msix_cpuid);
6830 		if (error) {
6831 			device_printf(sc->bce_dev,
6832 			    "Unable to allocate MSI-X %d on cpu%d\n",
6833 			    i, msix->msix_cpuid);
6834 			goto back;
6835 		}
6836 
6837 		msix->msix_res = bus_alloc_resource_any(sc->bce_dev,
6838 		    SYS_RES_IRQ, &msix->msix_rid, RF_ACTIVE);
6839 		if (msix->msix_res == NULL) {
6840 			device_printf(sc->bce_dev,
6841 			    "Unable to allocate MSI-X %d resource\n", i);
6842 			error = ENOMEM;
6843 			goto back;
6844 		}
6845 	}
6846 
6847 	pci_enable_msix(sc->bce_dev);
6848 	sc->bce_irq_type = PCI_INTR_TYPE_MSIX;
6849 back:
6850 	if (error)
6851 		bce_free_msix(sc, setup);
6852 }
6853 
6854 static void
bce_setup_ring_cnt(struct bce_softc * sc)6855 bce_setup_ring_cnt(struct bce_softc *sc)
6856 {
6857 	int msix_enable, msix_cnt, msix_ring;
6858 	int ring_max, ring_cnt;
6859 
6860 	sc->rx_rmap = if_ringmap_alloc(sc->bce_dev, 1, 1);
6861 
6862 	if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5709 &&
6863 	    BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5716)
6864 		goto skip_rx;
6865 
6866 	msix_enable = device_getenv_int(sc->bce_dev, "msix.enable",
6867 	    bce_msix_enable);
6868 	if (!msix_enable)
6869 		goto skip_rx;
6870 
6871 	if (netisr_ncpus == 1)
6872 		goto skip_rx;
6873 
6874 	/*
6875 	 * One extra RX ring will be needed (see below), so make sure
6876 	 * that there are enough MSI-X vectors.
6877 	 */
6878 	msix_cnt = pci_msix_count(sc->bce_dev);
6879 	if (msix_cnt <= 2)
6880 		goto skip_rx;
6881 	msix_ring = msix_cnt - 1;
6882 
6883 	/*
6884 	 * Setup RX ring count
6885 	 */
6886 	ring_max = BCE_RX_RING_MAX;
6887 	if (ring_max > msix_ring)
6888 		ring_max = msix_ring;
6889 	ring_cnt = device_getenv_int(sc->bce_dev, "rx_rings", bce_rx_rings);
6890 
6891 	if_ringmap_free(sc->rx_rmap);
6892 	sc->rx_rmap = if_ringmap_alloc(sc->bce_dev, ring_cnt, ring_max);
6893 
6894 skip_rx:
6895 	sc->rx_ring_cnt2 = if_ringmap_count(sc->rx_rmap);
6896 
6897 	/*
6898 	 * Setup TX ring count
6899 	 *
6900 	 * NOTE:
6901 	 * TX ring count must be less than the effective RSS RX ring
6902 	 * count, since we use RX ring software data struct to save
6903 	 * status index and various other MSI-X related stuffs.
6904 	 */
6905 	ring_max = BCE_TX_RING_MAX;
6906 	if (ring_max > sc->rx_ring_cnt2)
6907 		ring_max = sc->rx_ring_cnt2;
6908 	ring_cnt = device_getenv_int(sc->bce_dev, "tx_rings", bce_tx_rings);
6909 
6910 	sc->tx_rmap = if_ringmap_alloc(sc->bce_dev, ring_cnt, ring_max);
6911 	if_ringmap_align(sc->bce_dev, sc->rx_rmap, sc->tx_rmap);
6912 
6913 	sc->tx_ring_cnt = if_ringmap_count(sc->tx_rmap);
6914 
6915 	if (sc->rx_ring_cnt2 == 1) {
6916 		/*
6917 		 * Don't use MSI-X, if the effective RX ring count is 1.
6918 		 * Since if the effective RX ring count is 1, the TX ring
6919 		 * count will be 1.  This RX ring and the TX ring must be
6920 		 * bundled into one MSI-X vector, so the hot path will be
6921 		 * exact same as using MSI.  Besides, the first RX ring
6922 		 * must be fully populated, which only accepts packets whose
6923 		 * RSS hash can't calculated, e.g. ARP packets; waste of
6924 		 * resource at least.
6925 		 */
6926 		sc->rx_ring_cnt = 1;
6927 	} else {
6928 		/*
6929 		 * One extra RX ring is allocated, since the first RX ring
6930 		 * could not be used for RSS hashed packets whose masked
6931 		 * hash is 0.  The first RX ring is only used for packets
6932 		 * whose RSS hash could not be calculated, e.g. ARP packets.
6933 		 * This extra RX ring will be used for packets whose masked
6934 		 * hash is 0.  The effective RX ring count involved in RSS
6935 		 * is still sc->rx_ring_cnt2.
6936 		 */
6937 		sc->rx_ring_cnt = sc->rx_ring_cnt2 + 1;
6938 	}
6939 }
6940 
6941 static void
bce_free_msix(struct bce_softc * sc,boolean_t setup)6942 bce_free_msix(struct bce_softc *sc, boolean_t setup)
6943 {
6944 	int i;
6945 
6946 	KKASSERT(sc->rx_ring_cnt > 1);
6947 
6948 	for (i = 0; i < sc->rx_ring_cnt; ++i) {
6949 		struct bce_msix_data *msix = &sc->bce_msix[i];
6950 
6951 		if (msix->msix_res != NULL) {
6952 			bus_release_resource(sc->bce_dev, SYS_RES_IRQ,
6953 			    msix->msix_rid, msix->msix_res);
6954 		}
6955 		if (msix->msix_rid >= 0)
6956 			pci_release_msix_vector(sc->bce_dev, msix->msix_rid);
6957 	}
6958 	if (setup)
6959 		pci_teardown_msix(sc->bce_dev);
6960 }
6961 
6962 static void
bce_free_intr(struct bce_softc * sc)6963 bce_free_intr(struct bce_softc *sc)
6964 {
6965 	if (sc->bce_irq_type != PCI_INTR_TYPE_MSIX) {
6966 		if (sc->bce_res_irq != NULL) {
6967 			bus_release_resource(sc->bce_dev, SYS_RES_IRQ,
6968 			    sc->bce_irq_rid, sc->bce_res_irq);
6969 		}
6970 		if (sc->bce_irq_type == PCI_INTR_TYPE_MSI)
6971 			pci_release_msi(sc->bce_dev);
6972 	} else {
6973 		bce_free_msix(sc, TRUE);
6974 	}
6975 }
6976 
6977 static void
bce_setup_msix_table(struct bce_softc * sc)6978 bce_setup_msix_table(struct bce_softc *sc)
6979 {
6980 	REG_WR(sc, BCE_PCI_GRC_WINDOW_ADDR, BCE_PCI_GRC_WINDOW_ADDR_SEP_WIN);
6981 	REG_WR(sc, BCE_PCI_GRC_WINDOW2_ADDR, BCE_MSIX_TABLE_ADDR);
6982 	REG_WR(sc, BCE_PCI_GRC_WINDOW3_ADDR, BCE_MSIX_PBA_ADDR);
6983 }
6984 
6985 static int
bce_setup_intr(struct bce_softc * sc)6986 bce_setup_intr(struct bce_softc *sc)
6987 {
6988 	void (*irq_handle)(void *);
6989 	int error;
6990 
6991 	if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX)
6992 		return bce_setup_msix(sc);
6993 
6994 	if (sc->bce_irq_type == PCI_INTR_TYPE_LEGACY) {
6995 		irq_handle = bce_intr_legacy;
6996 	} else if (sc->bce_irq_type == PCI_INTR_TYPE_MSI) {
6997 		if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
6998 		    BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
6999 			irq_handle = bce_intr_msi_oneshot;
7000 			sc->bce_flags |= BCE_ONESHOT_MSI_FLAG;
7001 		} else {
7002 			irq_handle = bce_intr_msi;
7003 			sc->bce_flags |= BCE_CHECK_MSI_FLAG;
7004 		}
7005 	} else {
7006 		panic("%s: unsupported intr type %d",
7007 		    device_get_nameunit(sc->bce_dev), sc->bce_irq_type);
7008 	}
7009 
7010 	error = bus_setup_intr(sc->bce_dev, sc->bce_res_irq, INTR_MPSAFE,
7011 	    irq_handle, sc, &sc->bce_intrhand, &sc->main_serialize);
7012 	if (error != 0) {
7013 		device_printf(sc->bce_dev, "Failed to setup IRQ!\n");
7014 		return error;
7015 	}
7016 
7017 	return 0;
7018 }
7019 
7020 static void
bce_teardown_intr(struct bce_softc * sc)7021 bce_teardown_intr(struct bce_softc *sc)
7022 {
7023 	if (sc->bce_irq_type != PCI_INTR_TYPE_MSIX)
7024 		bus_teardown_intr(sc->bce_dev, sc->bce_res_irq, sc->bce_intrhand);
7025 	else
7026 		bce_teardown_msix(sc, sc->rx_ring_cnt);
7027 }
7028 
7029 static int
bce_setup_msix(struct bce_softc * sc)7030 bce_setup_msix(struct bce_softc *sc)
7031 {
7032 	int i;
7033 
7034 	for (i = 0; i < sc->rx_ring_cnt; ++i) {
7035 		struct bce_msix_data *msix = &sc->bce_msix[i];
7036 		int error;
7037 
7038 		error = bus_setup_intr_descr(sc->bce_dev, msix->msix_res,
7039 		    INTR_MPSAFE, msix->msix_func, msix->msix_arg,
7040 		    &msix->msix_handle, msix->msix_serialize, msix->msix_desc);
7041 		if (error) {
7042 			device_printf(sc->bce_dev, "could not set up %s "
7043 			    "interrupt handler.\n", msix->msix_desc);
7044 			bce_teardown_msix(sc, i);
7045 			return error;
7046 		}
7047 	}
7048 	return 0;
7049 }
7050 
7051 static void
bce_teardown_msix(struct bce_softc * sc,int msix_cnt)7052 bce_teardown_msix(struct bce_softc *sc, int msix_cnt)
7053 {
7054 	int i;
7055 
7056 	for (i = 0; i < msix_cnt; ++i) {
7057 		struct bce_msix_data *msix = &sc->bce_msix[i];
7058 
7059 		bus_teardown_intr(sc->bce_dev, msix->msix_res,
7060 		    msix->msix_handle);
7061 	}
7062 }
7063 
7064 static void
bce_init_rss(struct bce_softc * sc)7065 bce_init_rss(struct bce_softc *sc)
7066 {
7067 	uint8_t key[BCE_RLUP_RSS_KEY_CNT * BCE_RLUP_RSS_KEY_SIZE];
7068 	uint32_t tbl = 0;
7069 	int i;
7070 
7071 	KKASSERT(sc->rx_ring_cnt > 2);
7072 
7073 	/*
7074 	 * Configure RSS keys
7075 	 */
7076 	toeplitz_get_key(key, sizeof(key));
7077 	for (i = 0; i < BCE_RLUP_RSS_KEY_CNT; ++i) {
7078 		uint32_t rss_key;
7079 
7080 		rss_key = BCE_RLUP_RSS_KEYVAL(key, i);
7081 		BCE_RSS_DPRINTF(sc, 1, "rss_key%d 0x%08x\n", i, rss_key);
7082 
7083 		REG_WR(sc, BCE_RLUP_RSS_KEY(i), rss_key);
7084 	}
7085 
7086 	/*
7087 	 * Configure the redirect table
7088 	 *
7089 	 * NOTE:
7090 	 * - The "queue ID" in redirect table is the software RX ring's
7091 	 *   index _minus_ one.
7092 	 * - The last RX ring, whose "queue ID" is (sc->rx_ring_cnt - 2)
7093 	 *   will be used for packets whose masked hash is 0.
7094 	 *   (see also: comment in bce_setup_ring_cnt())
7095 	 */
7096 	if_ringmap_rdrtable(sc->rx_rmap, sc->rdr_table,
7097 	    BCE_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
7098 	for (i = 0; i < BCE_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
7099 		int shift = (i % 8) << 2, qid;
7100 
7101 		qid = sc->rdr_table[i];
7102 		KKASSERT(qid >= 0 && qid < sc->rx_ring_cnt2);
7103 		if (qid > 0)
7104 			--qid;
7105 		else
7106 			qid = sc->rx_ring_cnt - 2;
7107 		KKASSERT(qid < (sc->rx_ring_cnt - 1));
7108 
7109 		tbl |= qid << shift;
7110 		if (i % 8 == 7) {
7111 			BCE_RSS_DPRINTF(sc, 1, "tbl 0x%08x\n", tbl);
7112 			REG_WR(sc, BCE_RLUP_RSS_DATA, tbl);
7113 			REG_WR(sc, BCE_RLUP_RSS_COMMAND, (i >> 3) |
7114 			    BCE_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
7115 			    BCE_RLUP_RSS_COMMAND_WRITE |
7116 			    BCE_RLUP_RSS_COMMAND_HASH_MASK);
7117 			tbl = 0;
7118 		}
7119 	}
7120 	REG_WR(sc, BCE_RLUP_RSS_CONFIG,
7121 	    BCE_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI);
7122 }
7123 
7124 static void
bce_npoll_coal_change(struct bce_softc * sc)7125 bce_npoll_coal_change(struct bce_softc *sc)
7126 {
7127 	uint32_t old_rx_cons, old_tx_cons;
7128 
7129 	old_rx_cons = sc->bce_rx_quick_cons_trip_int;
7130 	old_tx_cons = sc->bce_tx_quick_cons_trip_int;
7131 	sc->bce_rx_quick_cons_trip_int = 1;
7132 	sc->bce_tx_quick_cons_trip_int = 1;
7133 
7134 	sc->bce_coalchg_mask |= BCE_COALMASK_TX_BDS_INT |
7135 	    BCE_COALMASK_RX_BDS_INT;
7136 	bce_coal_change(sc);
7137 
7138 	sc->bce_rx_quick_cons_trip_int = old_rx_cons;
7139 	sc->bce_tx_quick_cons_trip_int = old_tx_cons;
7140 }
7141 
7142 static struct pktinfo *
bce_rss_pktinfo(struct pktinfo * pi,uint32_t status,const struct l2_fhdr * l2fhdr)7143 bce_rss_pktinfo(struct pktinfo *pi, uint32_t status,
7144     const struct l2_fhdr *l2fhdr)
7145 {
7146 	/* Check for an IP datagram. */
7147 	if ((status & L2_FHDR_STATUS_IP_DATAGRAM) == 0)
7148 		return NULL;
7149 
7150 	/* Check if the IP checksum is valid. */
7151 	if (l2fhdr->l2_fhdr_ip_xsum != 0xffff)
7152 		return NULL;
7153 
7154 	/* Check for a valid TCP/UDP frame. */
7155 	if (status & L2_FHDR_STATUS_TCP_SEGMENT) {
7156 		if (status & L2_FHDR_ERRORS_TCP_XSUM)
7157 			return NULL;
7158 		if (l2fhdr->l2_fhdr_tcp_udp_xsum != 0xffff)
7159 			return NULL;
7160 		pi->pi_l3proto = IPPROTO_TCP;
7161 	} else if (status & L2_FHDR_STATUS_UDP_DATAGRAM) {
7162 		if (status & L2_FHDR_ERRORS_UDP_XSUM)
7163 			return NULL;
7164 		if (l2fhdr->l2_fhdr_tcp_udp_xsum != 0xffff)
7165 			return NULL;
7166 		pi->pi_l3proto = IPPROTO_UDP;
7167 	} else {
7168 		return NULL;
7169 	}
7170 	pi->pi_netisr = NETISR_IP;
7171 	pi->pi_flags = 0;
7172 
7173 	return pi;
7174 }
7175