xref: /freebsd/sys/dev/bnxt/if_bnxt.c (revision 2a58b312)
1 /*-
2  * Broadcom NetXtreme-C/E network driver.
3  *
4  * Copyright (c) 2016 Broadcom, All Rights Reserved.
5  * The term Broadcom refers to Broadcom Limited and/or its subsidiaries
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26  * THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/socket.h>
34 #include <sys/kernel.h>
35 #include <sys/bus.h>
36 #include <sys/module.h>
37 #include <sys/rman.h>
38 #include <sys/endian.h>
39 #include <sys/sockio.h>
40 #include <sys/priv.h>
41 
42 #include <machine/bus.h>
43 #include <machine/resource.h>
44 
45 #include <dev/pci/pcireg.h>
46 #include <dev/pci/pcivar.h>
47 
48 #include <net/if.h>
49 #include <net/if_dl.h>
50 #include <net/if_media.h>
51 #include <net/if_var.h>
52 #include <net/ethernet.h>
53 #include <net/iflib.h>
54 
55 #include "opt_inet.h"
56 #include "opt_inet6.h"
57 #include "opt_rss.h"
58 
59 #include "ifdi_if.h"
60 
61 #include "bnxt.h"
62 #include "bnxt_hwrm.h"
63 #include "bnxt_ioctl.h"
64 #include "bnxt_sysctl.h"
65 #include "hsi_struct_def.h"
66 #include "bnxt_mgmt.h"
67 
68 /*
69  * PCI Device ID Table
70  */
71 
72 static pci_vendor_info_t bnxt_vendor_info_array[] =
73 {
74     PVID(BROADCOM_VENDOR_ID, BCM57301,
75 	"Broadcom BCM57301 NetXtreme-C 10Gb Ethernet Controller"),
76     PVID(BROADCOM_VENDOR_ID, BCM57302,
77 	"Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet Controller"),
78     PVID(BROADCOM_VENDOR_ID, BCM57304,
79 	"Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet Controller"),
80     PVID(BROADCOM_VENDOR_ID, BCM57311,
81 	"Broadcom BCM57311 NetXtreme-C 10Gb Ethernet"),
82     PVID(BROADCOM_VENDOR_ID, BCM57312,
83 	"Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet"),
84     PVID(BROADCOM_VENDOR_ID, BCM57314,
85 	"Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet"),
86     PVID(BROADCOM_VENDOR_ID, BCM57402,
87 	"Broadcom BCM57402 NetXtreme-E 10Gb Ethernet Controller"),
88     PVID(BROADCOM_VENDOR_ID, BCM57402_NPAR,
89 	"Broadcom BCM57402 NetXtreme-E Partition"),
90     PVID(BROADCOM_VENDOR_ID, BCM57404,
91 	"Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet Controller"),
92     PVID(BROADCOM_VENDOR_ID, BCM57404_NPAR,
93 	"Broadcom BCM57404 NetXtreme-E Partition"),
94     PVID(BROADCOM_VENDOR_ID, BCM57406,
95 	"Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet Controller"),
96     PVID(BROADCOM_VENDOR_ID, BCM57406_NPAR,
97 	"Broadcom BCM57406 NetXtreme-E Partition"),
98     PVID(BROADCOM_VENDOR_ID, BCM57407,
99 	"Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet Controller"),
100     PVID(BROADCOM_VENDOR_ID, BCM57407_NPAR,
101 	"Broadcom BCM57407 NetXtreme-E Ethernet Partition"),
102     PVID(BROADCOM_VENDOR_ID, BCM57407_SFP,
103 	"Broadcom BCM57407 NetXtreme-E 25Gb Ethernet Controller"),
104     PVID(BROADCOM_VENDOR_ID, BCM57412,
105 	"Broadcom BCM57412 NetXtreme-E 10Gb Ethernet"),
106     PVID(BROADCOM_VENDOR_ID, BCM57412_NPAR1,
107 	"Broadcom BCM57412 NetXtreme-E Ethernet Partition"),
108     PVID(BROADCOM_VENDOR_ID, BCM57412_NPAR2,
109 	"Broadcom BCM57412 NetXtreme-E Ethernet Partition"),
110     PVID(BROADCOM_VENDOR_ID, BCM57414,
111 	"Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet"),
112     PVID(BROADCOM_VENDOR_ID, BCM57414_NPAR1,
113 	"Broadcom BCM57414 NetXtreme-E Ethernet Partition"),
114     PVID(BROADCOM_VENDOR_ID, BCM57414_NPAR2,
115 	"Broadcom BCM57414 NetXtreme-E Ethernet Partition"),
116     PVID(BROADCOM_VENDOR_ID, BCM57416,
117 	"Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet"),
118     PVID(BROADCOM_VENDOR_ID, BCM57416_NPAR1,
119 	"Broadcom BCM57416 NetXtreme-E Ethernet Partition"),
120     PVID(BROADCOM_VENDOR_ID, BCM57416_NPAR2,
121 	"Broadcom BCM57416 NetXtreme-E Ethernet Partition"),
122     PVID(BROADCOM_VENDOR_ID, BCM57416_SFP,
123 	"Broadcom BCM57416 NetXtreme-E 10Gb Ethernet"),
124     PVID(BROADCOM_VENDOR_ID, BCM57417,
125 	"Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet"),
126     PVID(BROADCOM_VENDOR_ID, BCM57417_NPAR1,
127 	"Broadcom BCM57417 NetXtreme-E Ethernet Partition"),
128     PVID(BROADCOM_VENDOR_ID, BCM57417_NPAR2,
129 	"Broadcom BCM57417 NetXtreme-E Ethernet Partition"),
130     PVID(BROADCOM_VENDOR_ID, BCM57417_SFP,
131 	"Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet"),
132     PVID(BROADCOM_VENDOR_ID, BCM57454,
133 	"Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet"),
134     PVID(BROADCOM_VENDOR_ID, BCM58700,
135 	"Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet"),
136     PVID(BROADCOM_VENDOR_ID, BCM57508,
137 	"Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet"),
138     PVID(BROADCOM_VENDOR_ID, BCM57504,
139 	"Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet"),
140     PVID(BROADCOM_VENDOR_ID, BCM57502,
141 	"Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet"),
142     PVID(BROADCOM_VENDOR_ID, NETXTREME_C_VF1,
143 	"Broadcom NetXtreme-C Ethernet Virtual Function"),
144     PVID(BROADCOM_VENDOR_ID, NETXTREME_C_VF2,
145 	"Broadcom NetXtreme-C Ethernet Virtual Function"),
146     PVID(BROADCOM_VENDOR_ID, NETXTREME_C_VF3,
147 	"Broadcom NetXtreme-C Ethernet Virtual Function"),
148     PVID(BROADCOM_VENDOR_ID, NETXTREME_E_VF1,
149 	"Broadcom NetXtreme-E Ethernet Virtual Function"),
150     PVID(BROADCOM_VENDOR_ID, NETXTREME_E_VF2,
151 	"Broadcom NetXtreme-E Ethernet Virtual Function"),
152     PVID(BROADCOM_VENDOR_ID, NETXTREME_E_VF3,
153 	"Broadcom NetXtreme-E Ethernet Virtual Function"),
154     /* required last entry */
155 
156     PVID_END
157 };
158 
159 /*
160  * Function prototypes
161  */
162 
163 SLIST_HEAD(softc_list, bnxt_softc_list) pf_list;
164 int bnxt_num_pfs = 0;
165 
166 static void *bnxt_register(device_t dev);
167 
168 /* Soft queue setup and teardown */
169 static int bnxt_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
170     uint64_t *paddrs, int ntxqs, int ntxqsets);
171 static int bnxt_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
172     uint64_t *paddrs, int nrxqs, int nrxqsets);
173 static void bnxt_queues_free(if_ctx_t ctx);
174 
175 /* Device setup and teardown */
176 static int bnxt_attach_pre(if_ctx_t ctx);
177 static int bnxt_attach_post(if_ctx_t ctx);
178 static int bnxt_detach(if_ctx_t ctx);
179 
180 /* Device configuration */
181 static void bnxt_init(if_ctx_t ctx);
182 static void bnxt_stop(if_ctx_t ctx);
183 static void bnxt_multi_set(if_ctx_t ctx);
184 static int bnxt_mtu_set(if_ctx_t ctx, uint32_t mtu);
185 static void bnxt_media_status(if_ctx_t ctx, struct ifmediareq * ifmr);
186 static int bnxt_media_change(if_ctx_t ctx);
187 static int bnxt_promisc_set(if_ctx_t ctx, int flags);
188 static uint64_t	bnxt_get_counter(if_ctx_t, ift_counter);
189 static void bnxt_update_admin_status(if_ctx_t ctx);
190 static void bnxt_if_timer(if_ctx_t ctx, uint16_t qid);
191 
192 /* Interrupt enable / disable */
193 static void bnxt_intr_enable(if_ctx_t ctx);
194 static int bnxt_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
195 static int bnxt_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
196 static void bnxt_disable_intr(if_ctx_t ctx);
197 static int bnxt_msix_intr_assign(if_ctx_t ctx, int msix);
198 
199 /* vlan support */
200 static void bnxt_vlan_register(if_ctx_t ctx, uint16_t vtag);
201 static void bnxt_vlan_unregister(if_ctx_t ctx, uint16_t vtag);
202 
203 /* ioctl */
204 static int bnxt_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data);
205 
206 static int bnxt_shutdown(if_ctx_t ctx);
207 static int bnxt_suspend(if_ctx_t ctx);
208 static int bnxt_resume(if_ctx_t ctx);
209 
210 /* Internal support functions */
211 static int bnxt_probe_phy(struct bnxt_softc *softc);
212 static void bnxt_add_media_types(struct bnxt_softc *softc);
213 static int bnxt_pci_mapping(struct bnxt_softc *softc);
214 static void bnxt_pci_mapping_free(struct bnxt_softc *softc);
215 static int bnxt_update_link(struct bnxt_softc *softc, bool chng_link_state);
216 static int bnxt_handle_def_cp(void *arg);
217 static int bnxt_handle_isr(void *arg);
218 static void bnxt_clear_ids(struct bnxt_softc *softc);
219 static void inline bnxt_do_enable_intr(struct bnxt_cp_ring *cpr);
220 static void inline bnxt_do_disable_intr(struct bnxt_cp_ring *cpr);
221 static void bnxt_mark_cpr_invalid(struct bnxt_cp_ring *cpr);
222 static void bnxt_def_cp_task(void *context);
223 static void bnxt_handle_async_event(struct bnxt_softc *softc,
224     struct cmpl_base *cmpl);
225 static uint8_t get_phy_type(struct bnxt_softc *softc);
226 static uint64_t bnxt_get_baudrate(struct bnxt_link_info *link);
227 static void bnxt_get_wol_settings(struct bnxt_softc *softc);
228 static int bnxt_wol_config(if_ctx_t ctx);
229 
230 /*
231  * Device Interface Declaration
232  */
233 
234 static device_method_t bnxt_methods[] = {
235 	/* Device interface */
236 	DEVMETHOD(device_register, bnxt_register),
237 	DEVMETHOD(device_probe, iflib_device_probe),
238 	DEVMETHOD(device_attach, iflib_device_attach),
239 	DEVMETHOD(device_detach, iflib_device_detach),
240 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
241 	DEVMETHOD(device_suspend, iflib_device_suspend),
242 	DEVMETHOD(device_resume, iflib_device_resume),
243 	DEVMETHOD_END
244 };
245 
246 static driver_t bnxt_driver = {
247 	"bnxt", bnxt_methods, sizeof(struct bnxt_softc),
248 };
249 
250 DRIVER_MODULE(bnxt, pci, bnxt_driver, 0, 0);
251 
252 MODULE_DEPEND(bnxt, pci, 1, 1, 1);
253 MODULE_DEPEND(bnxt, ether, 1, 1, 1);
254 MODULE_DEPEND(bnxt, iflib, 1, 1, 1);
255 
256 IFLIB_PNP_INFO(pci, bnxt, bnxt_vendor_info_array);
257 
258 static device_method_t bnxt_iflib_methods[] = {
259 	DEVMETHOD(ifdi_tx_queues_alloc, bnxt_tx_queues_alloc),
260 	DEVMETHOD(ifdi_rx_queues_alloc, bnxt_rx_queues_alloc),
261 	DEVMETHOD(ifdi_queues_free, bnxt_queues_free),
262 
263 	DEVMETHOD(ifdi_attach_pre, bnxt_attach_pre),
264 	DEVMETHOD(ifdi_attach_post, bnxt_attach_post),
265 	DEVMETHOD(ifdi_detach, bnxt_detach),
266 
267 	DEVMETHOD(ifdi_init, bnxt_init),
268 	DEVMETHOD(ifdi_stop, bnxt_stop),
269 	DEVMETHOD(ifdi_multi_set, bnxt_multi_set),
270 	DEVMETHOD(ifdi_mtu_set, bnxt_mtu_set),
271 	DEVMETHOD(ifdi_media_status, bnxt_media_status),
272 	DEVMETHOD(ifdi_media_change, bnxt_media_change),
273 	DEVMETHOD(ifdi_promisc_set, bnxt_promisc_set),
274 	DEVMETHOD(ifdi_get_counter, bnxt_get_counter),
275 	DEVMETHOD(ifdi_update_admin_status, bnxt_update_admin_status),
276 	DEVMETHOD(ifdi_timer, bnxt_if_timer),
277 
278 	DEVMETHOD(ifdi_intr_enable, bnxt_intr_enable),
279 	DEVMETHOD(ifdi_tx_queue_intr_enable, bnxt_tx_queue_intr_enable),
280 	DEVMETHOD(ifdi_rx_queue_intr_enable, bnxt_rx_queue_intr_enable),
281 	DEVMETHOD(ifdi_intr_disable, bnxt_disable_intr),
282 	DEVMETHOD(ifdi_msix_intr_assign, bnxt_msix_intr_assign),
283 
284 	DEVMETHOD(ifdi_vlan_register, bnxt_vlan_register),
285 	DEVMETHOD(ifdi_vlan_unregister, bnxt_vlan_unregister),
286 
287 	DEVMETHOD(ifdi_priv_ioctl, bnxt_priv_ioctl),
288 
289 	DEVMETHOD(ifdi_suspend, bnxt_suspend),
290 	DEVMETHOD(ifdi_shutdown, bnxt_shutdown),
291 	DEVMETHOD(ifdi_resume, bnxt_resume),
292 
293 	DEVMETHOD_END
294 };
295 
296 static driver_t bnxt_iflib_driver = {
297 	"bnxt", bnxt_iflib_methods, sizeof(struct bnxt_softc)
298 };
299 
300 /*
301  * iflib shared context
302  */
303 
304 #define BNXT_DRIVER_VERSION	"2.20.0.1"
305 char bnxt_driver_version[] = BNXT_DRIVER_VERSION;
306 extern struct if_txrx bnxt_txrx;
307 static struct if_shared_ctx bnxt_sctx_init = {
308 	.isc_magic = IFLIB_MAGIC,
309 	.isc_driver = &bnxt_iflib_driver,
310 	.isc_nfl = 2,				// Number of Free Lists
311 	.isc_flags = IFLIB_HAS_RXCQ | IFLIB_HAS_TXCQ | IFLIB_NEED_ETHER_PAD,
312 	.isc_q_align = PAGE_SIZE,
313 	.isc_tx_maxsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
314 	.isc_tx_maxsegsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
315 	.isc_tso_maxsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
316 	.isc_tso_maxsegsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
317 	.isc_rx_maxsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
318 	.isc_rx_maxsegsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
319 
320 	// Only use a single segment to avoid page size constraints
321 	.isc_rx_nsegments = 1,
322 	.isc_ntxqs = 3,
323 	.isc_nrxqs = 3,
324 	.isc_nrxd_min = {16, 16, 16},
325 	.isc_nrxd_default = {PAGE_SIZE / sizeof(struct cmpl_base) * 8,
326 	    PAGE_SIZE / sizeof(struct rx_prod_pkt_bd),
327 	    PAGE_SIZE / sizeof(struct rx_prod_pkt_bd)},
328 	.isc_nrxd_max = {BNXT_MAX_RXD, BNXT_MAX_RXD, BNXT_MAX_RXD},
329 	.isc_ntxd_min = {16, 16, 16},
330 	.isc_ntxd_default = {PAGE_SIZE / sizeof(struct cmpl_base) * 2,
331 	    PAGE_SIZE / sizeof(struct tx_bd_short),
332 	    PAGE_SIZE / sizeof(struct cmpl_base) * 2},
333 	.isc_ntxd_max = {BNXT_MAX_TXD, BNXT_MAX_TXD, BNXT_MAX_TXD},
334 
335 	.isc_admin_intrcnt = 1,
336 	.isc_vendor_info = bnxt_vendor_info_array,
337 	.isc_driver_version = bnxt_driver_version,
338 };
339 
340 /*
341  * Device Methods
342  */
343 
344 static void *
345 bnxt_register(device_t dev)
346 {
347 	return (&bnxt_sctx_init);
348 }
349 
350 static void
351 bnxt_nq_alloc(struct bnxt_softc *softc, int nqsets)
352 {
353 
354 	if (softc->nq_rings)
355 		return;
356 
357 	softc->nq_rings = malloc(sizeof(struct bnxt_cp_ring) * nqsets,
358 	    M_DEVBUF, M_NOWAIT | M_ZERO);
359 }
360 
361 static void
362 bnxt_nq_free(struct bnxt_softc *softc)
363 {
364 
365 	if (softc->nq_rings)
366 		free(softc->nq_rings, M_DEVBUF);
367 	softc->nq_rings = NULL;
368 }
369 
370 /*
371  * Device Dependent Configuration Functions
372 */
373 
374 /* Soft queue setup and teardown */
375 static int
376 bnxt_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
377     uint64_t *paddrs, int ntxqs, int ntxqsets)
378 {
379 	struct bnxt_softc *softc;
380 	int i;
381 	int rc;
382 
383 	softc = iflib_get_softc(ctx);
384 
385 	if (BNXT_CHIP_P5(softc)) {
386 		bnxt_nq_alloc(softc, ntxqsets);
387 		if (!softc->nq_rings) {
388 			device_printf(iflib_get_dev(ctx),
389 					"unable to allocate NQ rings\n");
390 			rc = ENOMEM;
391 			goto nq_alloc_fail;
392 		}
393 	}
394 
395 	softc->tx_cp_rings = malloc(sizeof(struct bnxt_cp_ring) * ntxqsets,
396 	    M_DEVBUF, M_NOWAIT | M_ZERO);
397 	if (!softc->tx_cp_rings) {
398 		device_printf(iflib_get_dev(ctx),
399 		    "unable to allocate TX completion rings\n");
400 		rc = ENOMEM;
401 		goto cp_alloc_fail;
402 	}
403 	softc->tx_rings = malloc(sizeof(struct bnxt_ring) * ntxqsets,
404 	    M_DEVBUF, M_NOWAIT | M_ZERO);
405 	if (!softc->tx_rings) {
406 		device_printf(iflib_get_dev(ctx),
407 		    "unable to allocate TX rings\n");
408 		rc = ENOMEM;
409 		goto ring_alloc_fail;
410 	}
411 
412 	for (i=0; i < ntxqsets; i++) {
413 		rc = iflib_dma_alloc(ctx, sizeof(struct ctx_hw_stats),
414 				&softc->tx_stats[i], 0);
415 		if (rc)
416 			goto dma_alloc_fail;
417 		bus_dmamap_sync(softc->tx_stats[i].idi_tag, softc->tx_stats[i].idi_map,
418 				BUS_DMASYNC_PREREAD);
419 	}
420 
421 	for (i = 0; i < ntxqsets; i++) {
422 		/* Set up the completion ring */
423 		softc->tx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
424 		softc->tx_cp_rings[i].ring.phys_id =
425 		    (uint16_t)HWRM_NA_SIGNATURE;
426 		softc->tx_cp_rings[i].ring.softc = softc;
427 		softc->tx_cp_rings[i].ring.idx = i;
428 		softc->tx_cp_rings[i].ring.id =
429 		    (softc->scctx->isc_nrxqsets * 2) + 1 + i;
430 		softc->tx_cp_rings[i].ring.doorbell = (BNXT_CHIP_P5(softc)) ?
431 			DB_PF_OFFSET_P5: softc->tx_cp_rings[i].ring.id * 0x80;
432 		softc->tx_cp_rings[i].ring.ring_size =
433 		    softc->scctx->isc_ntxd[0];
434 		softc->tx_cp_rings[i].ring.vaddr = vaddrs[i * ntxqs];
435 		softc->tx_cp_rings[i].ring.paddr = paddrs[i * ntxqs];
436 
437 		/* Set up the TX ring */
438 		softc->tx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
439 		softc->tx_rings[i].softc = softc;
440 		softc->tx_rings[i].idx = i;
441 		softc->tx_rings[i].id =
442 		    (softc->scctx->isc_nrxqsets * 2) + 1 + i;
443 		softc->tx_rings[i].doorbell = (BNXT_CHIP_P5(softc)) ?
444 			DB_PF_OFFSET_P5 : softc->tx_rings[i].id * 0x80;
445 		softc->tx_rings[i].ring_size = softc->scctx->isc_ntxd[1];
446 		softc->tx_rings[i].vaddr = vaddrs[i * ntxqs + 1];
447 		softc->tx_rings[i].paddr = paddrs[i * ntxqs + 1];
448 
449 		bnxt_create_tx_sysctls(softc, i);
450 
451 		if (BNXT_CHIP_P5(softc)) {
452 			/* Set up the Notification ring (NQ) */
453 			softc->nq_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
454 			softc->nq_rings[i].ring.phys_id =
455 				(uint16_t)HWRM_NA_SIGNATURE;
456 			softc->nq_rings[i].ring.softc = softc;
457 			softc->nq_rings[i].ring.idx = i;
458 			softc->nq_rings[i].ring.id = i;
459 			softc->nq_rings[i].ring.doorbell = (BNXT_CHIP_P5(softc)) ?
460 				DB_PF_OFFSET_P5 : softc->nq_rings[i].ring.id * 0x80;
461 			softc->nq_rings[i].ring.ring_size = softc->scctx->isc_ntxd[2];
462 			softc->nq_rings[i].ring.vaddr = vaddrs[i * ntxqs + 2];
463 			softc->nq_rings[i].ring.paddr = paddrs[i * ntxqs + 2];
464 		}
465 	}
466 
467 	softc->ntxqsets = ntxqsets;
468 	return rc;
469 
470 dma_alloc_fail:
471 	for (i = i - 1; i >= 0; i--)
472 		iflib_dma_free(&softc->tx_stats[i]);
473 	free(softc->tx_rings, M_DEVBUF);
474 ring_alloc_fail:
475 	free(softc->tx_cp_rings, M_DEVBUF);
476 cp_alloc_fail:
477 	bnxt_nq_free(softc);
478 nq_alloc_fail:
479 	return rc;
480 }
481 
482 static void
483 bnxt_queues_free(if_ctx_t ctx)
484 {
485 	struct bnxt_softc *softc = iflib_get_softc(ctx);
486 	int i;
487 
488 	// Free TX queues
489 	for (i=0; i<softc->ntxqsets; i++)
490 		iflib_dma_free(&softc->tx_stats[i]);
491 	free(softc->tx_rings, M_DEVBUF);
492 	softc->tx_rings = NULL;
493 	free(softc->tx_cp_rings, M_DEVBUF);
494 	softc->tx_cp_rings = NULL;
495 	softc->ntxqsets = 0;
496 
497 	// Free RX queues
498 	for (i=0; i<softc->nrxqsets; i++)
499 		iflib_dma_free(&softc->rx_stats[i]);
500 	iflib_dma_free(&softc->hw_tx_port_stats);
501 	iflib_dma_free(&softc->hw_rx_port_stats);
502 	free(softc->grp_info, M_DEVBUF);
503 	free(softc->ag_rings, M_DEVBUF);
504 	free(softc->rx_rings, M_DEVBUF);
505 	free(softc->rx_cp_rings, M_DEVBUF);
506 	bnxt_nq_free(softc);
507 }
508 
509 static int
510 bnxt_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
511     uint64_t *paddrs, int nrxqs, int nrxqsets)
512 {
513 	struct bnxt_softc *softc;
514 	int i;
515 	int rc;
516 
517 	softc = iflib_get_softc(ctx);
518 
519 	softc->rx_cp_rings = malloc(sizeof(struct bnxt_cp_ring) * nrxqsets,
520 	    M_DEVBUF, M_NOWAIT | M_ZERO);
521 	if (!softc->rx_cp_rings) {
522 		device_printf(iflib_get_dev(ctx),
523 		    "unable to allocate RX completion rings\n");
524 		rc = ENOMEM;
525 		goto cp_alloc_fail;
526 	}
527 	softc->rx_rings = malloc(sizeof(struct bnxt_ring) * nrxqsets,
528 	    M_DEVBUF, M_NOWAIT | M_ZERO);
529 	if (!softc->rx_rings) {
530 		device_printf(iflib_get_dev(ctx),
531 		    "unable to allocate RX rings\n");
532 		rc = ENOMEM;
533 		goto ring_alloc_fail;
534 	}
535 	softc->ag_rings = malloc(sizeof(struct bnxt_ring) * nrxqsets,
536 	    M_DEVBUF, M_NOWAIT | M_ZERO);
537 	if (!softc->ag_rings) {
538 		device_printf(iflib_get_dev(ctx),
539 		    "unable to allocate aggregation rings\n");
540 		rc = ENOMEM;
541 		goto ag_alloc_fail;
542 	}
543 	softc->grp_info = malloc(sizeof(struct bnxt_grp_info) * nrxqsets,
544 	    M_DEVBUF, M_NOWAIT | M_ZERO);
545 	if (!softc->grp_info) {
546 		device_printf(iflib_get_dev(ctx),
547 		    "unable to allocate ring groups\n");
548 		rc = ENOMEM;
549 		goto grp_alloc_fail;
550 	}
551 
552 	for (i=0; i < nrxqsets; i++) {
553 		rc = iflib_dma_alloc(ctx, sizeof(struct ctx_hw_stats),
554 				&softc->rx_stats[i], 0);
555 		if (rc)
556 			goto hw_stats_alloc_fail;
557 		bus_dmamap_sync(softc->rx_stats[i].idi_tag, softc->rx_stats[i].idi_map,
558 				BUS_DMASYNC_PREREAD);
559 	}
560 
561 /*
562  * Additional 512 bytes for future expansion.
563  * To prevent corruption when loaded with newer firmwares with added counters.
564  * This can be deleted when there will be no further additions of counters.
565  */
566 #define BNXT_PORT_STAT_PADDING  512
567 
568 	rc = iflib_dma_alloc(ctx, sizeof(struct rx_port_stats) + BNXT_PORT_STAT_PADDING,
569 	    &softc->hw_rx_port_stats, 0);
570 	if (rc)
571 		goto hw_port_rx_stats_alloc_fail;
572 
573 	bus_dmamap_sync(softc->hw_rx_port_stats.idi_tag,
574             softc->hw_rx_port_stats.idi_map, BUS_DMASYNC_PREREAD);
575 
576 	rc = iflib_dma_alloc(ctx, sizeof(struct tx_port_stats) + BNXT_PORT_STAT_PADDING,
577 	    &softc->hw_tx_port_stats, 0);
578 
579 	if (rc)
580 		goto hw_port_tx_stats_alloc_fail;
581 
582 	bus_dmamap_sync(softc->hw_tx_port_stats.idi_tag,
583             softc->hw_tx_port_stats.idi_map, BUS_DMASYNC_PREREAD);
584 
585 	softc->rx_port_stats = (void *) softc->hw_rx_port_stats.idi_vaddr;
586 	softc->tx_port_stats = (void *) softc->hw_tx_port_stats.idi_vaddr;
587 
588 	for (i = 0; i < nrxqsets; i++) {
589 		/* Allocation the completion ring */
590 		softc->rx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
591 		softc->rx_cp_rings[i].ring.phys_id =
592 		    (uint16_t)HWRM_NA_SIGNATURE;
593 		softc->rx_cp_rings[i].ring.softc = softc;
594 		softc->rx_cp_rings[i].ring.idx = i;
595 		softc->rx_cp_rings[i].ring.id = i + 1;
596 		softc->rx_cp_rings[i].ring.doorbell = (BNXT_CHIP_P5(softc)) ?
597 			DB_PF_OFFSET_P5 : softc->rx_cp_rings[i].ring.id * 0x80;
598 		/*
599 		 * If this ring overflows, RX stops working.
600 		 */
601 		softc->rx_cp_rings[i].ring.ring_size =
602 		    softc->scctx->isc_nrxd[0];
603 		softc->rx_cp_rings[i].ring.vaddr = vaddrs[i * nrxqs];
604 		softc->rx_cp_rings[i].ring.paddr = paddrs[i * nrxqs];
605 
606 		/* Allocate the RX ring */
607 		softc->rx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
608 		softc->rx_rings[i].softc = softc;
609 		softc->rx_rings[i].idx = i;
610 		softc->rx_rings[i].id = i + 1;
611 		softc->rx_rings[i].doorbell = (BNXT_CHIP_P5(softc)) ?
612 			DB_PF_OFFSET_P5 : softc->rx_rings[i].id * 0x80;
613 		softc->rx_rings[i].ring_size = softc->scctx->isc_nrxd[1];
614 		softc->rx_rings[i].vaddr = vaddrs[i * nrxqs + 1];
615 		softc->rx_rings[i].paddr = paddrs[i * nrxqs + 1];
616 
617 		/* Allocate the TPA start buffer */
618 		softc->rx_rings[i].tpa_start = malloc(sizeof(struct bnxt_full_tpa_start) *
619 	    		(RX_TPA_START_CMPL_AGG_ID_MASK >> RX_TPA_START_CMPL_AGG_ID_SFT),
620 	    		M_DEVBUF, M_NOWAIT | M_ZERO);
621 		if (softc->rx_rings[i].tpa_start == NULL) {
622 			rc = -ENOMEM;
623 			device_printf(softc->dev,
624 					"Unable to allocate space for TPA\n");
625 			goto tpa_alloc_fail;
626 		}
627 
628 		/* Allocate the AG ring */
629 		softc->ag_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
630 		softc->ag_rings[i].softc = softc;
631 		softc->ag_rings[i].idx = i;
632 		softc->ag_rings[i].id = nrxqsets + i + 1;
633 		softc->ag_rings[i].doorbell = (BNXT_CHIP_P5(softc)) ?
634 			DB_PF_OFFSET_P5 : softc->ag_rings[i].id * 0x80;
635 		softc->ag_rings[i].ring_size = softc->scctx->isc_nrxd[2];
636 		softc->ag_rings[i].vaddr = vaddrs[i * nrxqs + 2];
637 		softc->ag_rings[i].paddr = paddrs[i * nrxqs + 2];
638 
639 		/* Allocate the ring group */
640 		softc->grp_info[i].grp_id = (uint16_t)HWRM_NA_SIGNATURE;
641 		softc->grp_info[i].stats_ctx =
642 		    softc->rx_cp_rings[i].stats_ctx_id;
643 		softc->grp_info[i].rx_ring_id = softc->rx_rings[i].phys_id;
644 		softc->grp_info[i].ag_ring_id = softc->ag_rings[i].phys_id;
645 		softc->grp_info[i].cp_ring_id =
646 		    softc->rx_cp_rings[i].ring.phys_id;
647 
648 		bnxt_create_rx_sysctls(softc, i);
649 	}
650 
651 	/*
652 	 * When SR-IOV is enabled, avoid each VF sending PORT_QSTATS
653          * HWRM every sec with which firmware timeouts can happen
654          */
655 	if (BNXT_PF(softc))
656         	bnxt_create_port_stats_sysctls(softc);
657 
658 	/* And finally, the VNIC */
659 	softc->vnic_info.id = (uint16_t)HWRM_NA_SIGNATURE;
660 	softc->vnic_info.filter_id = -1;
661 	softc->vnic_info.def_ring_grp = (uint16_t)HWRM_NA_SIGNATURE;
662 	softc->vnic_info.cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
663 	softc->vnic_info.lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
664 	softc->vnic_info.rx_mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
665 		HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN;
666 	softc->vnic_info.mc_list_count = 0;
667 	softc->vnic_info.flags = BNXT_VNIC_FLAG_DEFAULT;
668 	rc = iflib_dma_alloc(ctx, BNXT_MAX_MC_ADDRS * ETHER_ADDR_LEN,
669 	    &softc->vnic_info.mc_list, 0);
670 	if (rc)
671 		goto mc_list_alloc_fail;
672 
673 	/* The VNIC RSS Hash Key */
674 	rc = iflib_dma_alloc(ctx, HW_HASH_KEY_SIZE,
675 	    &softc->vnic_info.rss_hash_key_tbl, 0);
676 	if (rc)
677 		goto rss_hash_alloc_fail;
678 	bus_dmamap_sync(softc->vnic_info.rss_hash_key_tbl.idi_tag,
679 	    softc->vnic_info.rss_hash_key_tbl.idi_map,
680 	    BUS_DMASYNC_PREWRITE);
681 	memcpy(softc->vnic_info.rss_hash_key_tbl.idi_vaddr,
682 	    softc->vnic_info.rss_hash_key, HW_HASH_KEY_SIZE);
683 
684 	/* Allocate the RSS tables */
685 	rc = iflib_dma_alloc(ctx, HW_HASH_INDEX_SIZE * sizeof(uint16_t),
686 	    &softc->vnic_info.rss_grp_tbl, 0);
687 	if (rc)
688 		goto rss_grp_alloc_fail;
689 	bus_dmamap_sync(softc->vnic_info.rss_grp_tbl.idi_tag,
690 	    softc->vnic_info.rss_grp_tbl.idi_map,
691 	    BUS_DMASYNC_PREWRITE);
692 	memset(softc->vnic_info.rss_grp_tbl.idi_vaddr, 0xff,
693 	    softc->vnic_info.rss_grp_tbl.idi_size);
694 
695 	softc->nrxqsets = nrxqsets;
696 	return rc;
697 
698 rss_grp_alloc_fail:
699 	iflib_dma_free(&softc->vnic_info.rss_hash_key_tbl);
700 rss_hash_alloc_fail:
701 	iflib_dma_free(&softc->vnic_info.mc_list);
702 tpa_alloc_fail:
703 mc_list_alloc_fail:
704 	for (i = i - 1; i >= 0; i--)
705 		free(softc->rx_rings[i].tpa_start, M_DEVBUF);
706 	iflib_dma_free(&softc->hw_tx_port_stats);
707 hw_port_tx_stats_alloc_fail:
708 	iflib_dma_free(&softc->hw_rx_port_stats);
709 hw_port_rx_stats_alloc_fail:
710 	for (i = i - 1; i >= 0; i--)
711 		iflib_dma_free(&softc->rx_stats[i]);
712 hw_stats_alloc_fail:
713 	free(softc->grp_info, M_DEVBUF);
714 grp_alloc_fail:
715 	free(softc->ag_rings, M_DEVBUF);
716 ag_alloc_fail:
717 	free(softc->rx_rings, M_DEVBUF);
718 ring_alloc_fail:
719 	free(softc->rx_cp_rings, M_DEVBUF);
720 cp_alloc_fail:
721 	return rc;
722 }
723 
724 static void bnxt_free_hwrm_short_cmd_req(struct bnxt_softc *softc)
725 {
726 	if (softc->hwrm_short_cmd_req_addr.idi_vaddr)
727 		iflib_dma_free(&softc->hwrm_short_cmd_req_addr);
728 	softc->hwrm_short_cmd_req_addr.idi_vaddr = NULL;
729 }
730 
731 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt_softc *softc)
732 {
733 	int rc;
734 
735 	rc = iflib_dma_alloc(softc->ctx, softc->hwrm_max_req_len,
736 	    &softc->hwrm_short_cmd_req_addr, BUS_DMA_NOWAIT);
737 
738 	return rc;
739 }
740 
741 static void bnxt_free_ring(struct bnxt_softc *bp, struct bnxt_ring_mem_info *rmem)
742 {
743         int i;
744 
745         for (i = 0; i < rmem->nr_pages; i++) {
746                 if (!rmem->pg_arr[i].idi_vaddr)
747                         continue;
748 
749 		iflib_dma_free(&rmem->pg_arr[i]);
750                 rmem->pg_arr[i].idi_vaddr = NULL;
751         }
752         if (rmem->pg_tbl.idi_vaddr) {
753 		iflib_dma_free(&rmem->pg_tbl);
754                 rmem->pg_tbl.idi_vaddr = NULL;
755 
756         }
757         if (rmem->vmem_size && *rmem->vmem) {
758                 free(*rmem->vmem, M_DEVBUF);
759                 *rmem->vmem = NULL;
760         }
761 }
762 
763 static int bnxt_alloc_ring(struct bnxt_softc *softc, struct bnxt_ring_mem_info *rmem)
764 {
765 	uint64_t valid_bit = 0;
766 	int i;
767 	int rc;
768 
769 	if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
770 		valid_bit = PTU_PTE_VALID;
771 
772 	if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl.idi_vaddr) {
773 		size_t pg_tbl_size = rmem->nr_pages * 8;
774 
775 		if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
776 			pg_tbl_size = rmem->page_size;
777 
778 		rc = iflib_dma_alloc(softc->ctx, pg_tbl_size, &rmem->pg_tbl, 0);
779 		if (rc)
780 			return -ENOMEM;
781 	}
782 
783 	for (i = 0; i < rmem->nr_pages; i++) {
784 		uint64_t extra_bits = valid_bit;
785 		uint64_t *ptr;
786 
787 		rc = iflib_dma_alloc(softc->ctx, rmem->page_size, &rmem->pg_arr[i], 0);
788 		if (rc)
789 			return -ENOMEM;
790 
791 		if (rmem->init_val)
792                         memset(rmem->pg_arr[i].idi_vaddr, rmem->init_val, rmem->page_size);
793 
794 		if (rmem->nr_pages > 1 || rmem->depth > 0) {
795 			if (i == rmem->nr_pages - 2 &&
796 					(rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
797 				extra_bits |= PTU_PTE_NEXT_TO_LAST;
798 			else if (i == rmem->nr_pages - 1 &&
799 					(rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
800 				extra_bits |= PTU_PTE_LAST;
801 
802 			ptr = (void *) rmem->pg_tbl.idi_vaddr;
803 			ptr[i]  = htole64(rmem->pg_arr[i].idi_paddr | extra_bits);
804 		}
805 	}
806 
807 	if (rmem->vmem_size) {
808 		*rmem->vmem = malloc(rmem->vmem_size, M_DEVBUF, M_NOWAIT | M_ZERO);
809 		if (!(*rmem->vmem))
810 			return -ENOMEM;
811 	}
812 	return 0;
813 }
814 
815 #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES			\
816 	(HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP |		\
817 	 HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ |		\
818 	 HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ |		\
819 	 HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC |		\
820 	 HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT)
821 
822 static int bnxt_alloc_ctx_mem_blk(struct bnxt_softc *softc,
823 				  struct bnxt_ctx_pg_info *ctx_pg)
824 {
825 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
826 
827 	rmem->page_size = BNXT_PAGE_SIZE;
828 	rmem->pg_arr = ctx_pg->ctx_arr;
829 	rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
830 	if (rmem->depth >= 1)
831 		rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
832 
833 	return bnxt_alloc_ring(softc, rmem);
834 }
835 
836 static int bnxt_alloc_ctx_pg_tbls(struct bnxt_softc *softc,
837 				  struct bnxt_ctx_pg_info *ctx_pg, uint32_t mem_size,
838 				  uint8_t depth, bool use_init_val)
839 {
840 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
841 	int rc;
842 
843 	if (!mem_size)
844 		return 0;
845 
846 	ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
847 	if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
848 		ctx_pg->nr_pages = 0;
849 		return -EINVAL;
850 	}
851 	if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
852 		int nr_tbls, i;
853 
854 		rmem->depth = 2;
855 		ctx_pg->ctx_pg_tbl = malloc(MAX_CTX_PAGES * sizeof(ctx_pg),
856 				M_DEVBUF, M_NOWAIT | M_ZERO);
857 		if (!ctx_pg->ctx_pg_tbl)
858 			return -ENOMEM;
859 		nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
860 		rmem->nr_pages = nr_tbls;
861 		rc = bnxt_alloc_ctx_mem_blk(softc, ctx_pg);
862 		if (rc)
863 			return rc;
864 		for (i = 0; i < nr_tbls; i++) {
865 			struct bnxt_ctx_pg_info *pg_tbl;
866 
867 			pg_tbl = malloc(sizeof(*pg_tbl), M_DEVBUF, M_NOWAIT | M_ZERO);
868 			if (!pg_tbl)
869 				return -ENOMEM;
870 			ctx_pg->ctx_pg_tbl[i] = pg_tbl;
871 			rmem = &pg_tbl->ring_mem;
872 			memcpy(&rmem->pg_tbl, &ctx_pg->ctx_arr[i], sizeof(struct iflib_dma_info));
873 			rmem->depth = 1;
874 			rmem->nr_pages = MAX_CTX_PAGES;
875 			if (use_init_val)
876                                 rmem->init_val = softc->ctx_mem->ctx_kind_initializer;
877 			if (i == (nr_tbls - 1)) {
878 				int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
879 
880 				if (rem)
881 					rmem->nr_pages = rem;
882 			}
883 			rc = bnxt_alloc_ctx_mem_blk(softc, pg_tbl);
884 			if (rc)
885 				break;
886 		}
887 	} else {
888 		rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
889 		if (rmem->nr_pages > 1 || depth)
890 			rmem->depth = 1;
891 		if (use_init_val)
892 			rmem->init_val = softc->ctx_mem->ctx_kind_initializer;
893 		rc = bnxt_alloc_ctx_mem_blk(softc, ctx_pg);
894 	}
895 	return rc;
896 }
897 
898 static void bnxt_free_ctx_pg_tbls(struct bnxt_softc *softc,
899 				  struct bnxt_ctx_pg_info *ctx_pg)
900 {
901 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
902 
903 	if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
904 	    ctx_pg->ctx_pg_tbl) {
905 		int i, nr_tbls = rmem->nr_pages;
906 
907 		for (i = 0; i < nr_tbls; i++) {
908 			struct bnxt_ctx_pg_info *pg_tbl;
909 			struct bnxt_ring_mem_info *rmem2;
910 
911 			pg_tbl = ctx_pg->ctx_pg_tbl[i];
912 			if (!pg_tbl)
913 				continue;
914 			rmem2 = &pg_tbl->ring_mem;
915 			bnxt_free_ring(softc, rmem2);
916 			ctx_pg->ctx_arr[i].idi_vaddr = NULL;
917 			free(pg_tbl , M_DEVBUF);
918 			ctx_pg->ctx_pg_tbl[i] = NULL;
919 		}
920 		free(ctx_pg->ctx_pg_tbl , M_DEVBUF);
921 		ctx_pg->ctx_pg_tbl = NULL;
922 	}
923 	bnxt_free_ring(softc, rmem);
924 	ctx_pg->nr_pages = 0;
925 }
926 
927 static void bnxt_free_ctx_mem(struct bnxt_softc *softc)
928 {
929 	struct bnxt_ctx_mem_info *ctx = softc->ctx_mem;
930 	int i;
931 
932 	if (!ctx)
933 		return;
934 
935 	if (ctx->tqm_mem[0]) {
936 		for (i = 0; i < softc->max_q + 1; i++) {
937 			if (!ctx->tqm_mem[i])
938 				continue;
939 			bnxt_free_ctx_pg_tbls(softc, ctx->tqm_mem[i]);
940 		}
941 		free(ctx->tqm_mem[0] , M_DEVBUF);
942 		ctx->tqm_mem[0] = NULL;
943 	}
944 
945 	bnxt_free_ctx_pg_tbls(softc, &ctx->tim_mem);
946 	bnxt_free_ctx_pg_tbls(softc, &ctx->mrav_mem);
947 	bnxt_free_ctx_pg_tbls(softc, &ctx->stat_mem);
948 	bnxt_free_ctx_pg_tbls(softc, &ctx->vnic_mem);
949 	bnxt_free_ctx_pg_tbls(softc, &ctx->cq_mem);
950 	bnxt_free_ctx_pg_tbls(softc, &ctx->srq_mem);
951 	bnxt_free_ctx_pg_tbls(softc, &ctx->qp_mem);
952 	ctx->flags &= ~BNXT_CTX_FLAG_INITED;
953 	free(softc->ctx_mem, M_DEVBUF);
954 	softc->ctx_mem = NULL;
955 }
956 
957 static int bnxt_alloc_ctx_mem(struct bnxt_softc *softc)
958 {
959 	struct bnxt_ctx_pg_info *ctx_pg;
960 	struct bnxt_ctx_mem_info *ctx;
961 	uint32_t mem_size, ena, entries;
962 	int i, rc;
963 
964 	if (!BNXT_CHIP_P5(softc))
965 		return 0;
966 
967 	rc = bnxt_hwrm_func_backing_store_qcaps(softc);
968 	if (rc) {
969 		device_printf(softc->dev, "Failed querying context mem capability, rc = %d.\n",
970 			   rc);
971 		return rc;
972 	}
973 	ctx = softc->ctx_mem;
974 	if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
975 		return 0;
976 
977 	ctx_pg = &ctx->qp_mem;
978 	ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
979 			  (1024 * 64); /* FIXME: Enable 64K QPs */
980 	mem_size = ctx->qp_entry_size * ctx_pg->entries;
981 	rc = bnxt_alloc_ctx_pg_tbls(softc, ctx_pg, mem_size, 2, true);
982 	if (rc)
983 		return rc;
984 
985 	ctx_pg = &ctx->srq_mem;
986 	/* FIXME: Temporarily enable 8K RoCE SRQs */
987 	ctx_pg->entries = ctx->srq_max_l2_entries + (1024 * 8);
988 	mem_size = ctx->srq_entry_size * ctx_pg->entries;
989 	rc = bnxt_alloc_ctx_pg_tbls(softc, ctx_pg, mem_size, 2, true);
990 	if (rc)
991 		return rc;
992 
993 	ctx_pg = &ctx->cq_mem;
994 	/* FIXME: Temporarily enable 64K RoCE CQ */
995 	ctx_pg->entries = ctx->cq_max_l2_entries + (1024 * 64 * 2);
996 	mem_size = ctx->cq_entry_size * ctx_pg->entries;
997 	rc = bnxt_alloc_ctx_pg_tbls(softc, ctx_pg, mem_size, 2, true);
998 	if (rc)
999 		return rc;
1000 
1001 	ctx_pg = &ctx->vnic_mem;
1002 	ctx_pg->entries = ctx->vnic_max_vnic_entries +
1003 			  ctx->vnic_max_ring_table_entries;
1004 	mem_size = ctx->vnic_entry_size * ctx_pg->entries;
1005 	rc = bnxt_alloc_ctx_pg_tbls(softc, ctx_pg, mem_size, 1, true);
1006 	if (rc)
1007 		return rc;
1008 
1009 	ctx_pg = &ctx->stat_mem;
1010 	ctx_pg->entries = ctx->stat_max_entries;
1011 	mem_size = ctx->stat_entry_size * ctx_pg->entries;
1012 	rc = bnxt_alloc_ctx_pg_tbls(softc, ctx_pg, mem_size, 1, true);
1013 	if (rc)
1014 		return rc;
1015 
1016 	ctx_pg = &ctx->mrav_mem;
1017 	/* FIXME: Temporarily enable 256K RoCE MRs */
1018 	ctx_pg->entries = 1024 * 256;
1019 	mem_size = ctx->mrav_entry_size * ctx_pg->entries;
1020 	rc = bnxt_alloc_ctx_pg_tbls(softc, ctx_pg, mem_size, 2, true);
1021 	if (rc)
1022 		return rc;
1023 	ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_MRAV;
1024 
1025 	ctx_pg = &ctx->tim_mem;
1026 	/* Firmware needs number of TIM entries equal to
1027 	 * number of Total QP contexts enabled, including
1028 	 * L2 QPs.
1029 	 */
1030 	ctx_pg->entries = ctx->qp_min_qp1_entries +
1031 			  ctx->qp_max_l2_entries + 1024 * 64;
1032 	/* FIXME: L2 driver is not able to create queue depth
1033 	 *  worth of 1M 32bit timers. Need a fix when l2-roce
1034 	 *  interface is well designed.
1035 	 */
1036 	mem_size = ctx->tim_entry_size * ctx_pg->entries;
1037 	rc = bnxt_alloc_ctx_pg_tbls(softc, ctx_pg, mem_size, 2, false);
1038 	if (rc)
1039 		return rc;
1040 	ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TIM;
1041 
1042 	/* FIXME: Temporarily increase the TQM queue depth
1043 	 * by 1K for 1K RoCE QPs.
1044 	 */
1045 	entries = ctx->qp_max_l2_entries + 1024 * 64;
1046 	entries = roundup(entries, ctx->tqm_entries_multiple);
1047 	entries = clamp_t(uint32_t, entries, ctx->tqm_min_entries_per_ring,
1048 			  ctx->tqm_max_entries_per_ring);
1049 	for (i = 0; i < softc->max_q + 1; i++) {
1050 		ctx_pg = ctx->tqm_mem[i];
1051 		ctx_pg->entries = entries;
1052 		mem_size = ctx->tqm_entry_size * entries;
1053 		rc = bnxt_alloc_ctx_pg_tbls(softc, ctx_pg, mem_size, 2, false);
1054 		if (rc)
1055 			return rc;
1056 		ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i;
1057 	}
1058 	ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES;
1059 	rc = bnxt_hwrm_func_backing_store_cfg(softc, ena);
1060 	if (rc)
1061 		device_printf(softc->dev, "Failed configuring context mem, rc = %d.\n",
1062 			   rc);
1063 	else
1064 		ctx->flags |= BNXT_CTX_FLAG_INITED;
1065 
1066 	return 0;
1067 }
1068 /*
1069  * If we update the index, a write barrier is needed after the write to ensure
1070  * the completion ring has space before the RX/TX ring does.  Since we can't
1071  * make the RX and AG doorbells covered by the same barrier without remapping
1072  * MSI-X vectors, we create the barrier over the enture doorbell bar.
1073  * TODO: Remap the MSI-X vectors to allow a barrier to only cover the doorbells
1074  *       for a single ring group.
1075  *
1076  * A barrier of just the size of the write is used to ensure the ordering
1077  * remains correct and no writes are lost.
1078  */
1079 
1080 static void bnxt_cuw_db_rx(void *db_ptr, uint16_t idx)
1081 {
1082 	struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1083 	struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1084 
1085 	bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 4,
1086 			BUS_SPACE_BARRIER_WRITE);
1087 	bus_space_write_4(db_bar->tag, db_bar->handle, ring->doorbell,
1088 			htole32(RX_DOORBELL_KEY_RX | idx));
1089 }
1090 
1091 static void bnxt_cuw_db_tx(void *db_ptr, uint16_t idx)
1092 {
1093 	struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1094 	struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1095 
1096 	bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 4,
1097 			BUS_SPACE_BARRIER_WRITE);
1098 	bus_space_write_4(db_bar->tag, db_bar->handle, ring->doorbell,
1099 			htole32(TX_DOORBELL_KEY_TX | idx));
1100 }
1101 
1102 static void bnxt_cuw_db_cq(void *db_ptr, bool enable_irq)
1103 {
1104 	struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1105 	struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1106 
1107 	bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 4,
1108 			BUS_SPACE_BARRIER_WRITE);
1109 	bus_space_write_4(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1110 			htole32(CMPL_DOORBELL_KEY_CMPL |
1111 				((cpr->cons == UINT32_MAX) ? 0 :
1112 				 (cpr->cons | CMPL_DOORBELL_IDX_VALID)) |
1113 				((enable_irq) ? 0 : CMPL_DOORBELL_MASK)));
1114 	bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1115 			BUS_SPACE_BARRIER_WRITE);
1116 }
1117 
1118 static void bnxt_thor_db_rx(void *db_ptr, uint16_t idx)
1119 {
1120 	struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1121 	struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1122 
1123 	bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 8,
1124 			BUS_SPACE_BARRIER_WRITE);
1125 	bus_space_write_8(db_bar->tag, db_bar->handle, ring->doorbell,
1126 			htole64((DBR_PATH_L2 | DBR_TYPE_SRQ | idx) |
1127 				((uint64_t)ring->phys_id << DBR_XID_SFT)));
1128 }
1129 
1130 static void bnxt_thor_db_tx(void *db_ptr, uint16_t idx)
1131 {
1132 	struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1133 	struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1134 
1135 	bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 8,
1136 			BUS_SPACE_BARRIER_WRITE);
1137 	bus_space_write_8(db_bar->tag, db_bar->handle, ring->doorbell,
1138 			htole64((DBR_PATH_L2 | DBR_TYPE_SQ | idx) |
1139 				((uint64_t)ring->phys_id << DBR_XID_SFT)));
1140 }
1141 
1142 static void bnxt_thor_db_rx_cq(void *db_ptr, bool enable_irq)
1143 {
1144 	struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1145 	struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1146 	dbc_dbc_t db_msg = { 0 };
1147 	uint32_t cons = cpr->cons;
1148 
1149 	if (cons == UINT32_MAX)
1150 		cons = 0;
1151 	else
1152 		cons = RING_NEXT(&cpr->ring, cons);
1153 
1154 	db_msg.index = ((cons << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK);
1155 
1156 	db_msg.type_path_xid = ((cpr->ring.phys_id << DBC_DBC_XID_SFT) &
1157 			DBC_DBC_XID_MASK) | DBC_DBC_PATH_L2 |
1158 		((enable_irq) ? DBC_DBC_TYPE_CQ_ARMALL: DBC_DBC_TYPE_CQ);
1159 
1160 	bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
1161 			BUS_SPACE_BARRIER_WRITE);
1162 	bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1163 			htole64(*(uint64_t *)&db_msg));
1164 	bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1165 			BUS_SPACE_BARRIER_WRITE);
1166 }
1167 
1168 static void bnxt_thor_db_tx_cq(void *db_ptr, bool enable_irq)
1169 {
1170 	struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1171 	struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1172 	dbc_dbc_t db_msg = { 0 };
1173 	uint32_t cons = cpr->cons;
1174 
1175 	db_msg.index = ((cons << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK);
1176 
1177 	db_msg.type_path_xid = ((cpr->ring.phys_id << DBC_DBC_XID_SFT) &
1178 			DBC_DBC_XID_MASK) | DBC_DBC_PATH_L2 |
1179 		((enable_irq) ? DBC_DBC_TYPE_CQ_ARMALL: DBC_DBC_TYPE_CQ);
1180 
1181 	bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
1182 			BUS_SPACE_BARRIER_WRITE);
1183 	bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1184 			htole64(*(uint64_t *)&db_msg));
1185 	bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1186 			BUS_SPACE_BARRIER_WRITE);
1187 }
1188 
1189 static void bnxt_thor_db_nq(void *db_ptr, bool enable_irq)
1190 {
1191 	struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1192 	struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1193 	dbc_dbc_t db_msg = { 0 };
1194 	uint32_t cons = cpr->cons;
1195 
1196 	db_msg.index = ((cons << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK);
1197 
1198 	db_msg.type_path_xid = ((cpr->ring.phys_id << DBC_DBC_XID_SFT) &
1199 			DBC_DBC_XID_MASK) | DBC_DBC_PATH_L2 |
1200 		((enable_irq) ? DBC_DBC_TYPE_NQ_ARM: DBC_DBC_TYPE_NQ);
1201 
1202 	bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
1203 			BUS_SPACE_BARRIER_WRITE);
1204 	bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1205 			htole64(*(uint64_t *)&db_msg));
1206 	bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1207 			BUS_SPACE_BARRIER_WRITE);
1208 }
1209 
1210 struct bnxt_softc *bnxt_find_dev(uint32_t domain, uint32_t bus, uint32_t dev_fn, char *dev_name)
1211 {
1212 	struct bnxt_softc_list *sc = NULL;
1213 
1214 	SLIST_FOREACH(sc, &pf_list, next) {
1215 		/* get the softc reference based on device name */
1216 		if (dev_name && !strncmp(dev_name, if_name(iflib_get_ifp(sc->softc->ctx)), BNXT_MAX_STR)) {
1217 			return sc->softc;
1218 		}
1219 		/* get the softc reference based on domain,bus,device,function */
1220 		if (!dev_name &&
1221 		    (domain == sc->softc->domain) &&
1222 		    (bus == sc->softc->bus) &&
1223 		    (dev_fn == sc->softc->dev_fn)) {
1224 			return sc->softc;
1225 
1226 		}
1227 	}
1228 
1229 	return NULL;
1230 }
1231 
1232 /* Device setup and teardown */
1233 static int
1234 bnxt_attach_pre(if_ctx_t ctx)
1235 {
1236 	struct bnxt_softc *softc = iflib_get_softc(ctx);
1237 	if_softc_ctx_t scctx;
1238 	int rc = 0;
1239 
1240 	softc->ctx = ctx;
1241 	softc->dev = iflib_get_dev(ctx);
1242 	softc->media = iflib_get_media(ctx);
1243 	softc->scctx = iflib_get_softc_ctx(ctx);
1244 	softc->sctx = iflib_get_sctx(ctx);
1245 	scctx = softc->scctx;
1246 
1247 	/* TODO: Better way of detecting NPAR/VF is needed */
1248 	switch (pci_get_device(softc->dev)) {
1249 	case BCM57402_NPAR:
1250 	case BCM57404_NPAR:
1251 	case BCM57406_NPAR:
1252 	case BCM57407_NPAR:
1253 	case BCM57412_NPAR1:
1254 	case BCM57412_NPAR2:
1255 	case BCM57414_NPAR1:
1256 	case BCM57414_NPAR2:
1257 	case BCM57416_NPAR1:
1258 	case BCM57416_NPAR2:
1259 		softc->flags |= BNXT_FLAG_NPAR;
1260 		break;
1261 	case NETXTREME_C_VF1:
1262 	case NETXTREME_C_VF2:
1263 	case NETXTREME_C_VF3:
1264 	case NETXTREME_E_VF1:
1265 	case NETXTREME_E_VF2:
1266 	case NETXTREME_E_VF3:
1267 		softc->flags |= BNXT_FLAG_VF;
1268 		break;
1269 	}
1270 
1271 #define PCI_DEVFN(device, func) ((((device) & 0x1f) << 3) | ((func) & 0x07))
1272 	softc->domain = pci_get_domain(softc->dev);
1273 	softc->bus = pci_get_bus(softc->dev);
1274 	softc->slot = pci_get_slot(softc->dev);
1275 	softc->function = pci_get_function(softc->dev);
1276 	softc->dev_fn = PCI_DEVFN(softc->slot, softc->function);
1277 
1278 	if (bnxt_num_pfs == 0)
1279 		  SLIST_INIT(&pf_list);
1280 	bnxt_num_pfs++;
1281 	softc->list.softc = softc;
1282 	SLIST_INSERT_HEAD(&pf_list, &softc->list, next);
1283 
1284 	pci_enable_busmaster(softc->dev);
1285 
1286 	if (bnxt_pci_mapping(softc))
1287 		return (ENXIO);
1288 
1289 	/* HWRM setup/init */
1290 	BNXT_HWRM_LOCK_INIT(softc, device_get_nameunit(softc->dev));
1291 	rc = bnxt_alloc_hwrm_dma_mem(softc);
1292 	if (rc)
1293 		goto dma_fail;
1294 
1295 	/* Get firmware version and compare with driver */
1296 	softc->ver_info = malloc(sizeof(struct bnxt_ver_info),
1297 	    M_DEVBUF, M_NOWAIT | M_ZERO);
1298 	if (softc->ver_info == NULL) {
1299 		rc = ENOMEM;
1300 		device_printf(softc->dev,
1301 		    "Unable to allocate space for version info\n");
1302 		goto ver_alloc_fail;
1303 	}
1304 	/* Default minimum required HWRM version */
1305 	softc->ver_info->hwrm_min_major = HWRM_VERSION_MAJOR;
1306 	softc->ver_info->hwrm_min_minor = HWRM_VERSION_MINOR;
1307 	softc->ver_info->hwrm_min_update = HWRM_VERSION_UPDATE;
1308 
1309 	rc = bnxt_hwrm_ver_get(softc);
1310 	if (rc) {
1311 		device_printf(softc->dev, "attach: hwrm ver get failed\n");
1312 		goto ver_fail;
1313 	}
1314 
1315 	/* Now perform a function reset */
1316 	rc = bnxt_hwrm_func_reset(softc);
1317 
1318 	if ((softc->flags & BNXT_FLAG_SHORT_CMD) ||
1319 	    softc->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
1320 		rc = bnxt_alloc_hwrm_short_cmd_req(softc);
1321 		if (rc)
1322 			goto hwrm_short_cmd_alloc_fail;
1323 	}
1324 
1325 	if ((softc->ver_info->chip_num == BCM57508) ||
1326 	    (softc->ver_info->chip_num == BCM57504) ||
1327 	    (softc->ver_info->chip_num == BCM57502))
1328 		softc->flags |= BNXT_FLAG_CHIP_P5;
1329 
1330 	softc->flags |= BNXT_FLAG_TPA;
1331 
1332 	/* No TPA for Thor A0 */
1333 	if (BNXT_CHIP_P5(softc) && (!softc->ver_info->chip_rev) &&
1334 			(!softc->ver_info->chip_metal))
1335 		softc->flags &= ~BNXT_FLAG_TPA;
1336 
1337 	/* TBD ++ Add TPA support from Thor B1 */
1338 	if (BNXT_CHIP_P5(softc))
1339 		softc->flags &= ~BNXT_FLAG_TPA;
1340 
1341 	/* Get NVRAM info */
1342 	if (BNXT_PF(softc)) {
1343 		softc->nvm_info = malloc(sizeof(struct bnxt_nvram_info),
1344 		    M_DEVBUF, M_NOWAIT | M_ZERO);
1345 		if (softc->nvm_info == NULL) {
1346 			rc = ENOMEM;
1347 			device_printf(softc->dev,
1348 			    "Unable to allocate space for NVRAM info\n");
1349 			goto nvm_alloc_fail;
1350 		}
1351 
1352 		rc = bnxt_hwrm_nvm_get_dev_info(softc, &softc->nvm_info->mfg_id,
1353 		    &softc->nvm_info->device_id, &softc->nvm_info->sector_size,
1354 		    &softc->nvm_info->size, &softc->nvm_info->reserved_size,
1355 		    &softc->nvm_info->available_size);
1356 	}
1357 
1358 	if (BNXT_CHIP_P5(softc)) {
1359 		softc->db_ops.bnxt_db_tx = bnxt_thor_db_tx;
1360 		softc->db_ops.bnxt_db_rx = bnxt_thor_db_rx;
1361 		softc->db_ops.bnxt_db_rx_cq = bnxt_thor_db_rx_cq;
1362 		softc->db_ops.bnxt_db_tx_cq = bnxt_thor_db_tx_cq;
1363 		softc->db_ops.bnxt_db_nq = bnxt_thor_db_nq;
1364 	} else {
1365 		softc->db_ops.bnxt_db_tx = bnxt_cuw_db_tx;
1366 		softc->db_ops.bnxt_db_rx = bnxt_cuw_db_rx;
1367 		softc->db_ops.bnxt_db_rx_cq = bnxt_cuw_db_cq;
1368 		softc->db_ops.bnxt_db_tx_cq = bnxt_cuw_db_cq;
1369 	}
1370 
1371 	/* Register the driver with the FW */
1372 	rc = bnxt_hwrm_func_drv_rgtr(softc);
1373 	if (rc) {
1374 		device_printf(softc->dev, "attach: hwrm drv rgtr failed\n");
1375 		goto drv_rgtr_fail;
1376 	}
1377 
1378         rc = bnxt_hwrm_func_rgtr_async_events(softc, NULL, 0);
1379 	if (rc) {
1380 		device_printf(softc->dev, "attach: hwrm rgtr async evts failed\n");
1381 		goto drv_rgtr_fail;
1382 	}
1383 
1384 	/* Get the queue config */
1385 	rc = bnxt_hwrm_queue_qportcfg(softc);
1386 	if (rc) {
1387 		device_printf(softc->dev, "attach: hwrm qportcfg failed\n");
1388 		goto failed;
1389 	}
1390 
1391 	if (softc->hwrm_spec_code >= 0x10803) {
1392 		rc = bnxt_alloc_ctx_mem(softc);
1393 		if (rc) {
1394 			device_printf(softc->dev, "attach: alloc_ctx_mem failed\n");
1395 			return rc;
1396 		}
1397 		rc = bnxt_hwrm_func_resc_qcaps(softc, true);
1398 		if (!rc)
1399 			softc->flags |= BNXT_FLAG_FW_CAP_NEW_RM;
1400 	}
1401 
1402 	/* Get the HW capabilities */
1403 	rc = bnxt_hwrm_func_qcaps(softc);
1404 	if (rc)
1405 		goto failed;
1406 
1407 	/* Get the current configuration of this function */
1408 	rc = bnxt_hwrm_func_qcfg(softc);
1409 	if (rc) {
1410 		device_printf(softc->dev, "attach: hwrm func qcfg failed\n");
1411 		goto failed;
1412 	}
1413 
1414 	iflib_set_mac(ctx, softc->func.mac_addr);
1415 
1416 	scctx->isc_txrx = &bnxt_txrx;
1417 	scctx->isc_tx_csum_flags = (CSUM_IP | CSUM_TCP | CSUM_UDP |
1418 	    CSUM_TCP_IPV6 | CSUM_UDP_IPV6 | CSUM_TSO);
1419 	scctx->isc_capabilities = scctx->isc_capenable =
1420 	    /* These are translated to hwassit bits */
1421 	    IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 | IFCAP_TSO4 | IFCAP_TSO6 |
1422 	    /* These are checked by iflib */
1423 	    IFCAP_LRO | IFCAP_VLAN_HWFILTER |
1424 	    /* These are part of the iflib mask */
1425 	    IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_VLAN_MTU |
1426 	    IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO |
1427 	    /* These likely get lost... */
1428 	    IFCAP_VLAN_HWCSUM | IFCAP_JUMBO_MTU;
1429 
1430 	if (bnxt_wol_supported(softc))
1431 		scctx->isc_capabilities |= IFCAP_WOL_MAGIC;
1432 	bnxt_get_wol_settings(softc);
1433 	if (softc->wol)
1434 		scctx->isc_capenable |= IFCAP_WOL_MAGIC;
1435 
1436 	/* Get the queue config */
1437 	bnxt_get_wol_settings(softc);
1438 	if (BNXT_CHIP_P5(softc))
1439 		bnxt_hwrm_reserve_pf_rings(softc);
1440 	rc = bnxt_hwrm_func_qcfg(softc);
1441 	if (rc) {
1442 		device_printf(softc->dev, "attach: hwrm func qcfg failed\n");
1443 		goto failed;
1444 	}
1445 
1446 	bnxt_clear_ids(softc);
1447 	if (rc)
1448 		goto failed;
1449 
1450 	/* Now set up iflib sc */
1451 	scctx->isc_tx_nsegments = 31,
1452 	scctx->isc_tx_tso_segments_max = 31;
1453 	scctx->isc_tx_tso_size_max = BNXT_TSO_SIZE;
1454 	scctx->isc_tx_tso_segsize_max = BNXT_TSO_SIZE;
1455 	scctx->isc_vectors = softc->func.max_cp_rings;
1456 	scctx->isc_min_frame_size = BNXT_MIN_FRAME_SIZE;
1457 	scctx->isc_txrx = &bnxt_txrx;
1458 
1459 	if (scctx->isc_nrxd[0] <
1460 	    ((scctx->isc_nrxd[1] * 4) + scctx->isc_nrxd[2]))
1461 		device_printf(softc->dev,
1462 		    "WARNING: nrxd0 (%d) should be at least 4 * nrxd1 (%d) + nrxd2 (%d).  Driver may be unstable\n",
1463 		    scctx->isc_nrxd[0], scctx->isc_nrxd[1], scctx->isc_nrxd[2]);
1464 	if (scctx->isc_ntxd[0] < scctx->isc_ntxd[1] * 2)
1465 		device_printf(softc->dev,
1466 		    "WARNING: ntxd0 (%d) should be at least 2 * ntxd1 (%d).  Driver may be unstable\n",
1467 		    scctx->isc_ntxd[0], scctx->isc_ntxd[1]);
1468 	scctx->isc_txqsizes[0] = sizeof(struct cmpl_base) * scctx->isc_ntxd[0];
1469 	scctx->isc_txqsizes[1] = sizeof(struct tx_bd_short) *
1470 	    scctx->isc_ntxd[1];
1471 	scctx->isc_txqsizes[2] = sizeof(struct cmpl_base) * scctx->isc_ntxd[2];
1472 	scctx->isc_rxqsizes[0] = sizeof(struct cmpl_base) * scctx->isc_nrxd[0];
1473 	scctx->isc_rxqsizes[1] = sizeof(struct rx_prod_pkt_bd) *
1474 	    scctx->isc_nrxd[1];
1475 	scctx->isc_rxqsizes[2] = sizeof(struct rx_prod_pkt_bd) *
1476 	    scctx->isc_nrxd[2];
1477 
1478 	scctx->isc_nrxqsets_max = min(pci_msix_count(softc->dev)-1,
1479 	    softc->fn_qcfg.alloc_completion_rings - 1);
1480 	scctx->isc_nrxqsets_max = min(scctx->isc_nrxqsets_max,
1481 	    softc->fn_qcfg.alloc_rx_rings);
1482 	scctx->isc_nrxqsets_max = min(scctx->isc_nrxqsets_max,
1483 	    softc->fn_qcfg.alloc_vnics);
1484 	scctx->isc_ntxqsets_max = min(softc->fn_qcfg.alloc_tx_rings,
1485 	    softc->fn_qcfg.alloc_completion_rings - scctx->isc_nrxqsets_max - 1);
1486 
1487 	scctx->isc_rss_table_size = HW_HASH_INDEX_SIZE;
1488 	scctx->isc_rss_table_mask = scctx->isc_rss_table_size - 1;
1489 
1490 	/* iflib will map and release this bar */
1491 	scctx->isc_msix_bar = pci_msix_table_bar(softc->dev);
1492 
1493         /*
1494          * Default settings for HW LRO (TPA):
1495          *  Disable HW LRO by default
1496          *  Can be enabled after taking care of 'packet forwarding'
1497          */
1498 	if (softc->flags & BNXT_FLAG_TPA) {
1499 		softc->hw_lro.enable = 0;
1500 		softc->hw_lro.is_mode_gro = 0;
1501 		softc->hw_lro.max_agg_segs = 5; /* 2^5 = 32 segs */
1502 		softc->hw_lro.max_aggs = HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX;
1503 		softc->hw_lro.min_agg_len = 512;
1504 	}
1505 
1506 	/* Allocate the default completion ring */
1507 	softc->def_cp_ring.stats_ctx_id = HWRM_NA_SIGNATURE;
1508 	softc->def_cp_ring.ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
1509 	softc->def_cp_ring.ring.softc = softc;
1510 	softc->def_cp_ring.ring.id = 0;
1511 	softc->def_cp_ring.ring.doorbell = (BNXT_CHIP_P5(softc)) ?
1512 		DB_PF_OFFSET_P5 : softc->def_cp_ring.ring.id * 0x80;
1513 	softc->def_cp_ring.ring.ring_size = PAGE_SIZE /
1514 	    sizeof(struct cmpl_base);
1515 	rc = iflib_dma_alloc(ctx,
1516 	    sizeof(struct cmpl_base) * softc->def_cp_ring.ring.ring_size,
1517 	    &softc->def_cp_ring_mem, 0);
1518 	softc->def_cp_ring.ring.vaddr = softc->def_cp_ring_mem.idi_vaddr;
1519 	softc->def_cp_ring.ring.paddr = softc->def_cp_ring_mem.idi_paddr;
1520 	iflib_config_gtask_init(ctx, &softc->def_cp_task, bnxt_def_cp_task,
1521 	    "dflt_cp");
1522 
1523 	rc = bnxt_init_sysctl_ctx(softc);
1524 	if (rc)
1525 		goto init_sysctl_failed;
1526 	if (BNXT_PF(softc)) {
1527 		rc = bnxt_create_nvram_sysctls(softc->nvm_info);
1528 		if (rc)
1529 			goto failed;
1530 	}
1531 
1532 	arc4rand(softc->vnic_info.rss_hash_key, HW_HASH_KEY_SIZE, 0);
1533 	softc->vnic_info.rss_hash_type =
1534 	    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 |
1535 	    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4 |
1536 	    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4 |
1537 	    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6 |
1538 	    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6 |
1539 	    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
1540 	rc = bnxt_create_config_sysctls_pre(softc);
1541 	if (rc)
1542 		goto failed;
1543 
1544 	rc = bnxt_create_hw_lro_sysctls(softc);
1545 	if (rc)
1546 		goto failed;
1547 
1548 	rc = bnxt_create_pause_fc_sysctls(softc);
1549 	if (rc)
1550 		goto failed;
1551 
1552 	/* Initialize the vlan list */
1553 	SLIST_INIT(&softc->vnic_info.vlan_tags);
1554 	softc->vnic_info.vlan_tag_list.idi_vaddr = NULL;
1555 	softc->state_bv = bit_alloc(BNXT_STATE_MAX, M_DEVBUF,
1556 			M_WAITOK|M_ZERO);
1557 
1558 	return (rc);
1559 
1560 failed:
1561 	bnxt_free_sysctl_ctx(softc);
1562 init_sysctl_failed:
1563 	bnxt_hwrm_func_drv_unrgtr(softc, false);
1564 drv_rgtr_fail:
1565 	if (BNXT_PF(softc))
1566 		free(softc->nvm_info, M_DEVBUF);
1567 nvm_alloc_fail:
1568 	bnxt_free_hwrm_short_cmd_req(softc);
1569 hwrm_short_cmd_alloc_fail:
1570 ver_fail:
1571 	free(softc->ver_info, M_DEVBUF);
1572 ver_alloc_fail:
1573 	bnxt_free_hwrm_dma_mem(softc);
1574 dma_fail:
1575 	BNXT_HWRM_LOCK_DESTROY(softc);
1576 	bnxt_pci_mapping_free(softc);
1577 	pci_disable_busmaster(softc->dev);
1578 	return (rc);
1579 }
1580 
1581 static int
1582 bnxt_attach_post(if_ctx_t ctx)
1583 {
1584 	struct bnxt_softc *softc = iflib_get_softc(ctx);
1585 	if_t ifp = iflib_get_ifp(ctx);
1586 	int rc;
1587 
1588 	bnxt_create_config_sysctls_post(softc);
1589 
1590 	/* Update link state etc... */
1591 	rc = bnxt_probe_phy(softc);
1592 	if (rc)
1593 		goto failed;
1594 
1595 	/* Needs to be done after probing the phy */
1596 	bnxt_create_ver_sysctls(softc);
1597 	bnxt_add_media_types(softc);
1598 	ifmedia_set(softc->media, IFM_ETHER | IFM_AUTO);
1599 
1600 	softc->scctx->isc_max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN +
1601 	    ETHER_CRC_LEN;
1602 
1603 	softc->rx_buf_size = min(softc->scctx->isc_max_frame_size, BNXT_PAGE_SIZE);
1604 
1605 failed:
1606 	return rc;
1607 }
1608 
1609 static int
1610 bnxt_detach(if_ctx_t ctx)
1611 {
1612 	struct bnxt_softc *softc = iflib_get_softc(ctx);
1613 	struct bnxt_vlan_tag *tag;
1614 	struct bnxt_vlan_tag *tmp;
1615 	int i;
1616 
1617 	SLIST_REMOVE(&pf_list, &softc->list, bnxt_softc_list, next);
1618 	bnxt_num_pfs--;
1619 	bnxt_wol_config(ctx);
1620 	bnxt_do_disable_intr(&softc->def_cp_ring);
1621 	bnxt_free_sysctl_ctx(softc);
1622 	bnxt_hwrm_func_reset(softc);
1623 	bnxt_free_ctx_mem(softc);
1624 	bnxt_clear_ids(softc);
1625 	iflib_irq_free(ctx, &softc->def_cp_ring.irq);
1626 	iflib_config_gtask_deinit(&softc->def_cp_task);
1627 	/* We need to free() these here... */
1628 	for (i = softc->nrxqsets-1; i>=0; i--) {
1629 		if (BNXT_CHIP_P5(softc))
1630 			iflib_irq_free(ctx, &softc->nq_rings[i].irq);
1631 		else
1632 			iflib_irq_free(ctx, &softc->rx_cp_rings[i].irq);
1633 
1634 	}
1635 	iflib_dma_free(&softc->vnic_info.mc_list);
1636 	iflib_dma_free(&softc->vnic_info.rss_hash_key_tbl);
1637 	iflib_dma_free(&softc->vnic_info.rss_grp_tbl);
1638 	if (softc->vnic_info.vlan_tag_list.idi_vaddr)
1639 		iflib_dma_free(&softc->vnic_info.vlan_tag_list);
1640 	SLIST_FOREACH_SAFE(tag, &softc->vnic_info.vlan_tags, next, tmp)
1641 		free(tag, M_DEVBUF);
1642 	iflib_dma_free(&softc->def_cp_ring_mem);
1643 	for (i = 0; i < softc->nrxqsets; i++)
1644 		free(softc->rx_rings[i].tpa_start, M_DEVBUF);
1645 	free(softc->ver_info, M_DEVBUF);
1646 	if (BNXT_PF(softc))
1647 		free(softc->nvm_info, M_DEVBUF);
1648 
1649 	bnxt_hwrm_func_drv_unrgtr(softc, false);
1650 	bnxt_free_hwrm_dma_mem(softc);
1651 	bnxt_free_hwrm_short_cmd_req(softc);
1652 	BNXT_HWRM_LOCK_DESTROY(softc);
1653 
1654 	free(softc->state_bv, M_DEVBUF);
1655 	pci_disable_busmaster(softc->dev);
1656 	bnxt_pci_mapping_free(softc);
1657 
1658 	return 0;
1659 }
1660 
1661 static void
1662 bnxt_hwrm_resource_free(struct bnxt_softc *softc)
1663 {
1664 	int i, rc = 0;
1665 
1666 	rc = bnxt_hwrm_ring_free(softc,
1667 			HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
1668 			&softc->def_cp_ring.ring,
1669 			(uint16_t)HWRM_NA_SIGNATURE);
1670 	if (rc)
1671 		goto fail;
1672 
1673 	for (i = 0; i < softc->ntxqsets; i++) {
1674 		rc = bnxt_hwrm_ring_free(softc,
1675 				HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
1676 				&softc->tx_rings[i],
1677 				softc->tx_cp_rings[i].ring.phys_id);
1678 		if (rc)
1679 			goto fail;
1680 
1681 		rc = bnxt_hwrm_ring_free(softc,
1682 				HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
1683 				&softc->tx_cp_rings[i].ring,
1684 				(uint16_t)HWRM_NA_SIGNATURE);
1685 		if (rc)
1686 			goto fail;
1687 
1688 		rc = bnxt_hwrm_stat_ctx_free(softc, &softc->tx_cp_rings[i]);
1689 		if (rc)
1690 			goto fail;
1691 	}
1692 	rc = bnxt_hwrm_free_filter(softc);
1693 	if (rc)
1694 		goto fail;
1695 
1696 	rc = bnxt_hwrm_vnic_free(softc, &softc->vnic_info);
1697 	if (rc)
1698 		goto fail;
1699 
1700 	rc = bnxt_hwrm_vnic_ctx_free(softc, softc->vnic_info.rss_id);
1701 	if (rc)
1702 		goto fail;
1703 
1704 	for (i = 0; i < softc->nrxqsets; i++) {
1705 		rc = bnxt_hwrm_ring_grp_free(softc, &softc->grp_info[i]);
1706 		if (rc)
1707 			goto fail;
1708 
1709 		rc = bnxt_hwrm_ring_free(softc,
1710 				HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG,
1711 				&softc->ag_rings[i],
1712 				(uint16_t)HWRM_NA_SIGNATURE);
1713 		if (rc)
1714 			goto fail;
1715 
1716 		rc = bnxt_hwrm_ring_free(softc,
1717 				HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
1718 				&softc->rx_rings[i],
1719 				softc->rx_cp_rings[i].ring.phys_id);
1720 		if (rc)
1721 			goto fail;
1722 
1723 		rc = bnxt_hwrm_ring_free(softc,
1724 				HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
1725 				&softc->rx_cp_rings[i].ring,
1726 				(uint16_t)HWRM_NA_SIGNATURE);
1727 		if (rc)
1728 			goto fail;
1729 
1730 		if (BNXT_CHIP_P5(softc)) {
1731 			rc = bnxt_hwrm_ring_free(softc,
1732 					HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ,
1733 					&softc->nq_rings[i].ring,
1734 					(uint16_t)HWRM_NA_SIGNATURE);
1735 			if (rc)
1736 				goto fail;
1737 		}
1738 
1739 		rc = bnxt_hwrm_stat_ctx_free(softc, &softc->rx_cp_rings[i]);
1740 		if (rc)
1741 			goto fail;
1742 	}
1743 
1744 fail:
1745 	return;
1746 }
1747 
1748 
1749 static void
1750 bnxt_func_reset(struct bnxt_softc *softc)
1751 {
1752 
1753 	if (!BNXT_CHIP_P5(softc)) {
1754 		bnxt_hwrm_func_reset(softc);
1755 		return;
1756 	}
1757 
1758 	bnxt_hwrm_resource_free(softc);
1759 	return;
1760 }
1761 
1762 static void
1763 bnxt_rss_grp_tbl_init(struct bnxt_softc *softc)
1764 {
1765 	uint16_t *rgt = (uint16_t *) softc->vnic_info.rss_grp_tbl.idi_vaddr;
1766 	int i, j;
1767 
1768 	for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
1769 		if (BNXT_CHIP_P5(softc)) {
1770 			rgt[i++] = htole16(softc->rx_rings[j].phys_id);
1771 			rgt[i] = htole16(softc->rx_cp_rings[j].ring.phys_id);
1772 		} else {
1773 			rgt[i] = htole16(softc->grp_info[j].grp_id);
1774 		}
1775 		if (++j == softc->nrxqsets)
1776 			j = 0;
1777 	}
1778 }
1779 
1780 /* Device configuration */
1781 static void
1782 bnxt_init(if_ctx_t ctx)
1783 {
1784 	struct bnxt_softc *softc = iflib_get_softc(ctx);
1785 	struct ifmediareq ifmr;
1786 	int i;
1787 	int rc;
1788 
1789 	if (!BNXT_CHIP_P5(softc)) {
1790 		rc = bnxt_hwrm_func_reset(softc);
1791 		if (rc)
1792 			return;
1793 	} else if (softc->is_dev_init) {
1794 		bnxt_stop(ctx);
1795 	}
1796 
1797 	softc->is_dev_init = true;
1798 	bnxt_clear_ids(softc);
1799 
1800 	// TBD -- Check if it is needed for Thor as well
1801 	if (BNXT_CHIP_P5(softc))
1802 		goto skip_def_cp_ring;
1803 	/* Allocate the default completion ring */
1804 	softc->def_cp_ring.cons = UINT32_MAX;
1805 	softc->def_cp_ring.v_bit = 1;
1806 	bnxt_mark_cpr_invalid(&softc->def_cp_ring);
1807 	rc = bnxt_hwrm_ring_alloc(softc,
1808 			HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
1809 			&softc->def_cp_ring.ring);
1810 	if (rc)
1811 		goto fail;
1812 skip_def_cp_ring:
1813 	for (i = 0; i < softc->nrxqsets; i++) {
1814 		/* Allocate the statistics context */
1815 		rc = bnxt_hwrm_stat_ctx_alloc(softc, &softc->rx_cp_rings[i],
1816 		    softc->rx_stats[i].idi_paddr);
1817 		if (rc)
1818 			goto fail;
1819 
1820 		if (BNXT_CHIP_P5(softc)) {
1821 			/* Allocate the NQ */
1822 			softc->nq_rings[i].cons = 0;
1823 			softc->nq_rings[i].v_bit = 1;
1824 			softc->nq_rings[i].last_idx = UINT32_MAX;
1825 			bnxt_mark_cpr_invalid(&softc->nq_rings[i]);
1826 			rc = bnxt_hwrm_ring_alloc(softc,
1827 					HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ,
1828 					&softc->nq_rings[i].ring);
1829 			if (rc)
1830 				goto fail;
1831 
1832 			softc->db_ops.bnxt_db_nq(&softc->nq_rings[i], 1);
1833 		}
1834 		/* Allocate the completion ring */
1835 		softc->rx_cp_rings[i].cons = UINT32_MAX;
1836 		softc->rx_cp_rings[i].v_bit = 1;
1837 		softc->rx_cp_rings[i].last_idx = UINT32_MAX;
1838 		bnxt_mark_cpr_invalid(&softc->rx_cp_rings[i]);
1839 		rc = bnxt_hwrm_ring_alloc(softc,
1840 				HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
1841 				&softc->rx_cp_rings[i].ring);
1842 		if (rc)
1843 			goto fail;
1844 
1845 		if (BNXT_CHIP_P5(softc))
1846 			softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[i], 1);
1847 
1848 		/* Allocate the RX ring */
1849 		rc = bnxt_hwrm_ring_alloc(softc,
1850 		    HWRM_RING_ALLOC_INPUT_RING_TYPE_RX, &softc->rx_rings[i]);
1851 		if (rc)
1852 			goto fail;
1853 		softc->db_ops.bnxt_db_rx(&softc->rx_rings[i], 0);
1854 
1855 		/* Allocate the AG ring */
1856 		rc = bnxt_hwrm_ring_alloc(softc,
1857 				HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG,
1858 				&softc->ag_rings[i]);
1859 		if (rc)
1860 			goto fail;
1861 		softc->db_ops.bnxt_db_rx(&softc->ag_rings[i], 0);
1862 
1863 		/* Allocate the ring group */
1864 		softc->grp_info[i].stats_ctx =
1865 		    softc->rx_cp_rings[i].stats_ctx_id;
1866 		softc->grp_info[i].rx_ring_id = softc->rx_rings[i].phys_id;
1867 		softc->grp_info[i].ag_ring_id = softc->ag_rings[i].phys_id;
1868 		softc->grp_info[i].cp_ring_id =
1869 		    softc->rx_cp_rings[i].ring.phys_id;
1870 		rc = bnxt_hwrm_ring_grp_alloc(softc, &softc->grp_info[i]);
1871 		if (rc)
1872 			goto fail;
1873 	}
1874 
1875 	/* And now set the default CP / NQ ring for the async */
1876 	rc = bnxt_cfg_async_cr(softc);
1877 	if (rc)
1878 		goto fail;
1879 
1880 	/* Allocate the VNIC RSS context */
1881 	rc = bnxt_hwrm_vnic_ctx_alloc(softc, &softc->vnic_info.rss_id);
1882 	if (rc)
1883 		goto fail;
1884 
1885 	/* Allocate the vnic */
1886 	softc->vnic_info.def_ring_grp = softc->grp_info[0].grp_id;
1887 	softc->vnic_info.mru = softc->scctx->isc_max_frame_size;
1888 	rc = bnxt_hwrm_vnic_alloc(softc, &softc->vnic_info);
1889 	if (rc)
1890 		goto fail;
1891 	rc = bnxt_hwrm_vnic_cfg(softc, &softc->vnic_info);
1892 	if (rc)
1893 		goto fail;
1894 	rc = bnxt_hwrm_vnic_set_hds(softc, &softc->vnic_info);
1895 	if (rc)
1896 		goto fail;
1897 	rc = bnxt_hwrm_set_filter(softc);
1898 	if (rc)
1899 		goto fail;
1900 
1901 	bnxt_rss_grp_tbl_init(softc);
1902 
1903 	rc = bnxt_hwrm_rss_cfg(softc, &softc->vnic_info,
1904 	    softc->vnic_info.rss_hash_type);
1905 	if (rc)
1906 		goto fail;
1907 
1908 	rc = bnxt_hwrm_vnic_tpa_cfg(softc);
1909 	if (rc)
1910 		goto fail;
1911 
1912 	for (i = 0; i < softc->ntxqsets; i++) {
1913 		/* Allocate the statistics context */
1914 		rc = bnxt_hwrm_stat_ctx_alloc(softc, &softc->tx_cp_rings[i],
1915 		    softc->tx_stats[i].idi_paddr);
1916 		if (rc)
1917 			goto fail;
1918 
1919 		/* Allocate the completion ring */
1920 		softc->tx_cp_rings[i].cons = UINT32_MAX;
1921 		softc->tx_cp_rings[i].v_bit = 1;
1922 		bnxt_mark_cpr_invalid(&softc->tx_cp_rings[i]);
1923 		rc = bnxt_hwrm_ring_alloc(softc,
1924 				HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
1925 				&softc->tx_cp_rings[i].ring);
1926 		if (rc)
1927 			goto fail;
1928 
1929 		if (BNXT_CHIP_P5(softc))
1930 			softc->db_ops.bnxt_db_tx_cq(&softc->tx_cp_rings[i], 1);
1931 
1932 		/* Allocate the TX ring */
1933 		rc = bnxt_hwrm_ring_alloc(softc,
1934 				HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
1935 				&softc->tx_rings[i]);
1936 		if (rc)
1937 			goto fail;
1938 		softc->db_ops.bnxt_db_tx(&softc->tx_rings[i], 0);
1939 	}
1940 
1941 	bnxt_do_enable_intr(&softc->def_cp_ring);
1942 	bnxt_media_status(softc->ctx, &ifmr);
1943 	bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info);
1944 	return;
1945 
1946 fail:
1947 	bnxt_func_reset(softc);
1948 	bnxt_clear_ids(softc);
1949 	return;
1950 }
1951 
1952 static void
1953 bnxt_stop(if_ctx_t ctx)
1954 {
1955 	struct bnxt_softc *softc = iflib_get_softc(ctx);
1956 
1957 	softc->is_dev_init = false;
1958 	bnxt_do_disable_intr(&softc->def_cp_ring);
1959 	bnxt_func_reset(softc);
1960 	bnxt_clear_ids(softc);
1961 	return;
1962 }
1963 
1964 static u_int
1965 bnxt_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
1966 {
1967 	uint8_t *mta = arg;
1968 
1969 	if (cnt == BNXT_MAX_MC_ADDRS)
1970 		return (1);
1971 
1972 	bcopy(LLADDR(sdl), &mta[cnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN);
1973 
1974 	return (1);
1975 }
1976 
1977 static void
1978 bnxt_multi_set(if_ctx_t ctx)
1979 {
1980 	struct bnxt_softc *softc = iflib_get_softc(ctx);
1981 	if_t ifp = iflib_get_ifp(ctx);
1982 	uint8_t *mta;
1983 	int mcnt;
1984 
1985 	mta = softc->vnic_info.mc_list.idi_vaddr;
1986 	bzero(mta, softc->vnic_info.mc_list.idi_size);
1987 	mcnt = if_foreach_llmaddr(ifp, bnxt_copy_maddr, mta);
1988 
1989 	if (mcnt > BNXT_MAX_MC_ADDRS) {
1990 		softc->vnic_info.rx_mask |=
1991 		    HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
1992 		bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info);
1993 	} else {
1994 		softc->vnic_info.rx_mask &=
1995 		    ~HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
1996 		bus_dmamap_sync(softc->vnic_info.mc_list.idi_tag,
1997 		    softc->vnic_info.mc_list.idi_map, BUS_DMASYNC_PREWRITE);
1998 		softc->vnic_info.mc_list_count = mcnt;
1999 		softc->vnic_info.rx_mask |=
2000 		    HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
2001 		if (bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info))
2002 			device_printf(softc->dev,
2003 			    "set_multi: rx_mask set failed\n");
2004 	}
2005 }
2006 
2007 static int
2008 bnxt_mtu_set(if_ctx_t ctx, uint32_t mtu)
2009 {
2010 	struct bnxt_softc *softc = iflib_get_softc(ctx);
2011 
2012 	if (mtu > BNXT_MAX_MTU)
2013 		return EINVAL;
2014 
2015 	softc->scctx->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2016 	softc->rx_buf_size = min(softc->scctx->isc_max_frame_size, BNXT_PAGE_SIZE);
2017 	return 0;
2018 }
2019 
2020 static void
2021 bnxt_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
2022 {
2023 	struct bnxt_softc *softc = iflib_get_softc(ctx);
2024 	struct bnxt_link_info *link_info = &softc->link_info;
2025 	struct ifmedia_entry *next;
2026 	uint64_t target_baudrate = bnxt_get_baudrate(link_info);
2027 	int active_media = IFM_UNKNOWN;
2028 
2029 	bnxt_update_link(softc, true);
2030 
2031 	ifmr->ifm_status = IFM_AVALID;
2032 	ifmr->ifm_active = IFM_ETHER;
2033 
2034 	if (link_info->link_up)
2035 		ifmr->ifm_status |= IFM_ACTIVE;
2036 	else
2037 		ifmr->ifm_status &= ~IFM_ACTIVE;
2038 
2039 	if (link_info->duplex == HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_FULL)
2040 		ifmr->ifm_active |= IFM_FDX;
2041 	else
2042 		ifmr->ifm_active |= IFM_HDX;
2043 
2044         /*
2045          * Go through the list of supported media which got prepared
2046          * as part of bnxt_add_media_types() using api ifmedia_add().
2047          */
2048 	LIST_FOREACH(next, &(iflib_get_media(ctx)->ifm_list), ifm_list) {
2049 		if (ifmedia_baudrate(next->ifm_media) == target_baudrate) {
2050 			active_media = next->ifm_media;
2051 			break;
2052 		}
2053 	}
2054 	ifmr->ifm_active |= active_media;
2055 
2056 	if (link_info->flow_ctrl.rx)
2057 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2058 	if (link_info->flow_ctrl.tx)
2059 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2060 
2061 	bnxt_report_link(softc);
2062 	return;
2063 }
2064 
2065 static int
2066 bnxt_media_change(if_ctx_t ctx)
2067 {
2068 	struct bnxt_softc *softc = iflib_get_softc(ctx);
2069 	struct ifmedia *ifm = iflib_get_media(ctx);
2070 	struct ifmediareq ifmr;
2071 	int rc;
2072 
2073 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2074 		return EINVAL;
2075 
2076 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
2077 	case IFM_100_T:
2078 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
2079 		softc->link_info.req_link_speed =
2080 		    HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100MB;
2081 		break;
2082 	case IFM_1000_KX:
2083 	case IFM_1000_T:
2084 	case IFM_1000_SGMII:
2085 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
2086 		softc->link_info.req_link_speed =
2087 		    HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_1GB;
2088 		break;
2089 	case IFM_2500_KX:
2090 	case IFM_2500_T:
2091 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
2092 		softc->link_info.req_link_speed =
2093 		    HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2_5GB;
2094 		break;
2095 	case IFM_10G_CR1:
2096 	case IFM_10G_KR:
2097 	case IFM_10G_LR:
2098 	case IFM_10G_SR:
2099 	case IFM_10G_T:
2100 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
2101 		softc->link_info.req_link_speed =
2102 		    HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2103 		break;
2104 	case IFM_20G_KR2:
2105 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
2106 		softc->link_info.req_link_speed =
2107 		    HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_20GB;
2108 		break;
2109 	case IFM_25G_CR:
2110 	case IFM_25G_KR:
2111 	case IFM_25G_SR:
2112 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
2113 		softc->link_info.req_link_speed =
2114 		    HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_25GB;
2115 		break;
2116 	case IFM_40G_CR4:
2117 	case IFM_40G_KR4:
2118 	case IFM_40G_LR4:
2119 	case IFM_40G_SR4:
2120 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
2121 		softc->link_info.req_link_speed =
2122 		    HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2123 		break;
2124 	case IFM_50G_CR2:
2125 	case IFM_50G_KR2:
2126 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
2127 		softc->link_info.req_link_speed =
2128 		    HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2129 		break;
2130 	case IFM_100G_CR4:
2131 	case IFM_100G_KR4:
2132 	case IFM_100G_LR4:
2133 	case IFM_100G_SR4:
2134 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
2135 		softc->link_info.req_link_speed =
2136 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2137 		break;
2138 	default:
2139 		device_printf(softc->dev,
2140 		    "Unsupported media type!  Using auto\n");
2141 		/* Fall-through */
2142 	case IFM_AUTO:
2143 		// Auto
2144 		softc->link_info.autoneg |= BNXT_AUTONEG_SPEED;
2145 		break;
2146 	}
2147 	rc = bnxt_hwrm_set_link_setting(softc, true, true, true);
2148 	bnxt_media_status(softc->ctx, &ifmr);
2149 	return rc;
2150 }
2151 
2152 static int
2153 bnxt_promisc_set(if_ctx_t ctx, int flags)
2154 {
2155 	struct bnxt_softc *softc = iflib_get_softc(ctx);
2156 	if_t ifp = iflib_get_ifp(ctx);
2157 	int rc;
2158 
2159 	if (if_getflags(ifp) & IFF_ALLMULTI ||
2160 	    if_llmaddr_count(ifp) > BNXT_MAX_MC_ADDRS)
2161 		softc->vnic_info.rx_mask |=
2162 		    HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
2163 	else
2164 		softc->vnic_info.rx_mask &=
2165 		    ~HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
2166 
2167 	if (if_getflags(ifp) & IFF_PROMISC)
2168 		softc->vnic_info.rx_mask |=
2169 		    HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS |
2170 		    HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN;
2171 	else
2172 		softc->vnic_info.rx_mask &=
2173 		    ~(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS);
2174 
2175 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info);
2176 
2177 	return rc;
2178 }
2179 
2180 static uint64_t
2181 bnxt_get_counter(if_ctx_t ctx, ift_counter cnt)
2182 {
2183 	if_t ifp = iflib_get_ifp(ctx);
2184 
2185 	if (cnt < IFCOUNTERS)
2186 		return if_get_counter_default(ifp, cnt);
2187 
2188 	return 0;
2189 }
2190 
2191 static void
2192 bnxt_update_admin_status(if_ctx_t ctx)
2193 {
2194 	struct bnxt_softc *softc = iflib_get_softc(ctx);
2195 
2196 	/*
2197 	 * When SR-IOV is enabled, avoid each VF sending this HWRM
2198 	 * request every sec with which firmware timeouts can happen
2199 	 */
2200 	if (!BNXT_PF(softc))
2201 		return;
2202 
2203 	bnxt_hwrm_port_qstats(softc);
2204 
2205 	if (BNXT_CHIP_P5(softc)) {
2206 		struct ifmediareq ifmr;
2207 
2208 		if (bit_test(softc->state_bv, BNXT_STATE_LINK_CHANGE)) {
2209 			bit_clear(softc->state_bv, BNXT_STATE_LINK_CHANGE);
2210 			bnxt_media_status(softc->ctx, &ifmr);
2211 		}
2212 	}
2213 
2214 	return;
2215 }
2216 
2217 static void
2218 bnxt_if_timer(if_ctx_t ctx, uint16_t qid)
2219 {
2220 
2221 	struct bnxt_softc *softc = iflib_get_softc(ctx);
2222 	uint64_t ticks_now = ticks;
2223 
2224         /* Schedule bnxt_update_admin_status() once per sec */
2225 	if (ticks_now - softc->admin_ticks >= hz) {
2226 		softc->admin_ticks = ticks_now;
2227 		iflib_admin_intr_deferred(ctx);
2228 	}
2229 
2230 	return;
2231 }
2232 
2233 static void inline
2234 bnxt_do_enable_intr(struct bnxt_cp_ring *cpr)
2235 {
2236 	struct bnxt_softc *softc = cpr->ring.softc;
2237 
2238 	if (cpr->ring.phys_id == (uint16_t)HWRM_NA_SIGNATURE)
2239 		return;
2240 
2241 	if (BNXT_CHIP_P5(softc))
2242 		softc->db_ops.bnxt_db_nq(cpr, 1);
2243 	else
2244 		softc->db_ops.bnxt_db_rx_cq(cpr, 1);
2245 }
2246 
2247 static void inline
2248 bnxt_do_disable_intr(struct bnxt_cp_ring *cpr)
2249 {
2250 	struct bnxt_softc *softc = cpr->ring.softc;
2251 
2252 	if (cpr->ring.phys_id == (uint16_t)HWRM_NA_SIGNATURE)
2253 		return;
2254 
2255 	if (BNXT_CHIP_P5(softc))
2256 		softc->db_ops.bnxt_db_nq(cpr, 0);
2257 	else
2258 		softc->db_ops.bnxt_db_rx_cq(cpr, 0);
2259 }
2260 
2261 /* Enable all interrupts */
2262 static void
2263 bnxt_intr_enable(if_ctx_t ctx)
2264 {
2265 	struct bnxt_softc *softc = iflib_get_softc(ctx);
2266 	int i;
2267 
2268 	bnxt_do_enable_intr(&softc->def_cp_ring);
2269 	for (i = 0; i < softc->nrxqsets; i++)
2270 		if (BNXT_CHIP_P5(softc))
2271 			softc->db_ops.bnxt_db_nq(&softc->nq_rings[i], 1);
2272 		else
2273 			softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[i], 1);
2274 
2275 	return;
2276 }
2277 
2278 /* Enable interrupt for a single queue */
2279 static int
2280 bnxt_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
2281 {
2282 	struct bnxt_softc *softc = iflib_get_softc(ctx);
2283 
2284 	if (BNXT_CHIP_P5(softc))
2285 		softc->db_ops.bnxt_db_nq(&softc->nq_rings[qid], 1);
2286 	else
2287 		softc->db_ops.bnxt_db_rx_cq(&softc->tx_cp_rings[qid], 1);
2288 
2289 	return 0;
2290 }
2291 
2292 static void
2293 bnxt_process_cmd_cmpl(struct bnxt_softc *softc, hwrm_cmpl_t *cmd_cmpl)
2294 {
2295 	device_printf(softc->dev, "cmd sequence number %d\n",
2296 			cmd_cmpl->sequence_id);
2297 	return;
2298 }
2299 
2300 static void
2301 bnxt_process_async_msg(struct bnxt_cp_ring *cpr, tx_cmpl_t *cmpl)
2302 {
2303 	struct bnxt_softc *softc = cpr->ring.softc;
2304 	uint16_t type = cmpl->flags_type & TX_CMPL_TYPE_MASK;
2305 
2306 	switch (type) {
2307 	case HWRM_CMPL_TYPE_HWRM_DONE:
2308 		bnxt_process_cmd_cmpl(softc, (hwrm_cmpl_t *)cmpl);
2309 		break;
2310 	case HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT:
2311 		bnxt_handle_async_event(softc, (cmpl_base_t *) cmpl);
2312 		break;
2313 	default:
2314 		device_printf(softc->dev, "%s:%d Unhandled async message %x\n",
2315 				__FUNCTION__, __LINE__, type);
2316 		break;
2317 	}
2318 }
2319 
2320 static void
2321 process_nq(struct bnxt_softc *softc, uint16_t nqid)
2322 {
2323 	struct bnxt_cp_ring *cpr = &softc->nq_rings[nqid];
2324 	nq_cn_t *cmp = (nq_cn_t *) cpr->ring.vaddr;
2325 	bool v_bit = cpr->v_bit;
2326 	uint32_t cons = cpr->cons;
2327 	uint16_t nq_type, nqe_cnt = 0;
2328 
2329 	while (1) {
2330 		if (!NQ_VALID(&cmp[cons], v_bit))
2331 			goto done;
2332 
2333 		nq_type = NQ_CN_TYPE_MASK & cmp[cons].type;
2334 
2335 		if (nq_type != NQ_CN_TYPE_CQ_NOTIFICATION)
2336 			 bnxt_process_async_msg(cpr, (tx_cmpl_t *)&cmp[cons]);
2337 
2338 		NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
2339 		nqe_cnt++;
2340 	}
2341 done:
2342 	if (nqe_cnt) {
2343 		cpr->cons = cons;
2344 		cpr->v_bit = v_bit;
2345 	}
2346 }
2347 
2348 static int
2349 bnxt_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
2350 {
2351 	struct bnxt_softc *softc = iflib_get_softc(ctx);
2352 
2353 	if (BNXT_CHIP_P5(softc)) {
2354 		process_nq(softc, qid);
2355 		softc->db_ops.bnxt_db_nq(&softc->nq_rings[qid], 1);
2356 	}
2357 	softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[qid], 1);
2358         return 0;
2359 }
2360 
2361 /* Disable all interrupts */
2362 static void
2363 bnxt_disable_intr(if_ctx_t ctx)
2364 {
2365 	struct bnxt_softc *softc = iflib_get_softc(ctx);
2366 	int i;
2367 
2368 	/*
2369 	 * NOTE: These TX interrupts should never get enabled, so don't
2370 	 * update the index
2371 	 */
2372 	for (i = 0; i < softc->nrxqsets; i++)
2373 		if (BNXT_CHIP_P5(softc))
2374 			softc->db_ops.bnxt_db_nq(&softc->nq_rings[i], 0);
2375 		else
2376 			softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[i], 0);
2377 
2378 
2379 	return;
2380 }
2381 
2382 static int
2383 bnxt_msix_intr_assign(if_ctx_t ctx, int msix)
2384 {
2385 	struct bnxt_softc *softc = iflib_get_softc(ctx);
2386 	struct bnxt_cp_ring *ring;
2387 	struct if_irq *irq;
2388 	uint16_t id;
2389 	int rc;
2390 	int i;
2391 	char irq_name[16];
2392 
2393 	if (BNXT_CHIP_P5(softc))
2394 		goto skip_default_cp;
2395 
2396 	rc = iflib_irq_alloc_generic(ctx, &softc->def_cp_ring.irq,
2397 	    softc->def_cp_ring.ring.id + 1, IFLIB_INTR_ADMIN,
2398 	    bnxt_handle_def_cp, softc, 0, "def_cp");
2399 	if (rc) {
2400 		device_printf(iflib_get_dev(ctx),
2401 		    "Failed to register default completion ring handler\n");
2402 		return rc;
2403 	}
2404 
2405 skip_default_cp:
2406 	for (i=0; i<softc->scctx->isc_nrxqsets; i++) {
2407 		if (BNXT_CHIP_P5(softc)) {
2408 			irq = &softc->nq_rings[i].irq;
2409 			id = softc->nq_rings[i].ring.id;
2410 			ring = &softc->nq_rings[i];
2411 		} else {
2412 			irq = &softc->rx_cp_rings[i].irq;
2413 			id = softc->rx_cp_rings[i].ring.id ;
2414 			ring = &softc->rx_cp_rings[i];
2415 		}
2416 		snprintf(irq_name, sizeof(irq_name), "rxq%d", i);
2417 		rc = iflib_irq_alloc_generic(ctx, irq, id + 1, IFLIB_INTR_RX,
2418 				bnxt_handle_isr, ring, i, irq_name);
2419 		if (rc) {
2420 			device_printf(iflib_get_dev(ctx),
2421 			    "Failed to register RX completion ring handler\n");
2422 			i--;
2423 			goto fail;
2424 		}
2425 	}
2426 
2427 	for (i=0; i<softc->scctx->isc_ntxqsets; i++)
2428 		iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_TX, NULL, i, "tx_cp");
2429 
2430 	return rc;
2431 
2432 fail:
2433 	for (; i>=0; i--)
2434 		iflib_irq_free(ctx, &softc->rx_cp_rings[i].irq);
2435 	iflib_irq_free(ctx, &softc->def_cp_ring.irq);
2436 	return rc;
2437 }
2438 
2439 /*
2440  * We're explicitly allowing duplicates here.  They will need to be
2441  * removed as many times as they are added.
2442  */
2443 static void
2444 bnxt_vlan_register(if_ctx_t ctx, uint16_t vtag)
2445 {
2446 	struct bnxt_softc *softc = iflib_get_softc(ctx);
2447 	struct bnxt_vlan_tag *new_tag;
2448 
2449 	new_tag = malloc(sizeof(struct bnxt_vlan_tag), M_DEVBUF, M_NOWAIT);
2450 	if (new_tag == NULL)
2451 		return;
2452 	new_tag->tag = vtag;
2453 	new_tag->filter_id = -1;
2454 	SLIST_INSERT_HEAD(&softc->vnic_info.vlan_tags, new_tag, next);
2455 };
2456 
2457 static void
2458 bnxt_vlan_unregister(if_ctx_t ctx, uint16_t vtag)
2459 {
2460 	struct bnxt_softc *softc = iflib_get_softc(ctx);
2461 	struct bnxt_vlan_tag *vlan_tag;
2462 
2463 	SLIST_FOREACH(vlan_tag, &softc->vnic_info.vlan_tags, next) {
2464 		if (vlan_tag->tag == vtag) {
2465 			SLIST_REMOVE(&softc->vnic_info.vlan_tags, vlan_tag,
2466 			    bnxt_vlan_tag, next);
2467 			free(vlan_tag, M_DEVBUF);
2468 			break;
2469 		}
2470 	}
2471 }
2472 
2473 static int
2474 bnxt_wol_config(if_ctx_t ctx)
2475 {
2476 	struct bnxt_softc *softc = iflib_get_softc(ctx);
2477 	if_t ifp = iflib_get_ifp(ctx);
2478 
2479 	if (!softc)
2480 		return -EBUSY;
2481 
2482 	if (!bnxt_wol_supported(softc))
2483 		return -ENOTSUP;
2484 
2485 	if (if_getcapenable(ifp) & IFCAP_WOL_MAGIC) {
2486 		if (!softc->wol) {
2487 			if (bnxt_hwrm_alloc_wol_fltr(softc))
2488 				return -EBUSY;
2489 			softc->wol = 1;
2490 		}
2491 	} else {
2492 		if (softc->wol) {
2493 			if (bnxt_hwrm_free_wol_fltr(softc))
2494 				return -EBUSY;
2495 			softc->wol = 0;
2496 		}
2497 	}
2498 
2499 	return 0;
2500 }
2501 
2502 static int
2503 bnxt_shutdown(if_ctx_t ctx)
2504 {
2505 	bnxt_wol_config(ctx);
2506 	return 0;
2507 }
2508 
2509 static int
2510 bnxt_suspend(if_ctx_t ctx)
2511 {
2512 	bnxt_wol_config(ctx);
2513 	return 0;
2514 }
2515 
2516 static int
2517 bnxt_resume(if_ctx_t ctx)
2518 {
2519 	struct bnxt_softc *softc = iflib_get_softc(ctx);
2520 
2521 	bnxt_get_wol_settings(softc);
2522 	return 0;
2523 }
2524 
2525 static int
2526 bnxt_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data)
2527 {
2528 	struct bnxt_softc *softc = iflib_get_softc(ctx);
2529 	struct ifreq *ifr = (struct ifreq *)data;
2530 	struct bnxt_ioctl_header *ioh;
2531 	size_t iol;
2532 	int rc = ENOTSUP;
2533 	struct bnxt_ioctl_data iod_storage, *iod = &iod_storage;
2534 
2535 	switch (command) {
2536 	case SIOCGPRIVATE_0:
2537 		if ((rc = priv_check(curthread, PRIV_DRIVER)) != 0)
2538 			goto exit;
2539 
2540 		ioh = ifr_buffer_get_buffer(ifr);
2541 		iol = ifr_buffer_get_length(ifr);
2542 		if (iol > sizeof(iod_storage))
2543 			return (EINVAL);
2544 
2545 		if ((rc = copyin(ioh, iod, iol)) != 0)
2546 			goto exit;
2547 
2548 		switch (iod->hdr.type) {
2549 		case BNXT_HWRM_NVM_FIND_DIR_ENTRY:
2550 		{
2551 			struct bnxt_ioctl_hwrm_nvm_find_dir_entry *find =
2552 			    &iod->find;
2553 
2554 			rc = bnxt_hwrm_nvm_find_dir_entry(softc, find->type,
2555 			    &find->ordinal, find->ext, &find->index,
2556 			    find->use_index, find->search_opt,
2557 			    &find->data_length, &find->item_length,
2558 			    &find->fw_ver);
2559 			if (rc) {
2560 				iod->hdr.rc = rc;
2561 				copyout(&iod->hdr.rc, &ioh->rc,
2562 				    sizeof(ioh->rc));
2563 			}
2564 			else {
2565 				iod->hdr.rc = 0;
2566 				copyout(iod, ioh, iol);
2567 			}
2568 
2569 			rc = 0;
2570 			goto exit;
2571 		}
2572 		case BNXT_HWRM_NVM_READ:
2573 		{
2574 			struct bnxt_ioctl_hwrm_nvm_read *rd = &iod->read;
2575 			struct iflib_dma_info dma_data;
2576 			size_t offset;
2577 			size_t remain;
2578 			size_t csize;
2579 
2580 			/*
2581 			 * Some HWRM versions can't read more than 0x8000 bytes
2582 			 */
2583 			rc = iflib_dma_alloc(softc->ctx,
2584 			    min(rd->length, 0x8000), &dma_data, BUS_DMA_NOWAIT);
2585 			if (rc)
2586 				break;
2587 			for (remain = rd->length, offset = 0;
2588 			    remain && offset < rd->length; offset += 0x8000) {
2589 				csize = min(remain, 0x8000);
2590 				rc = bnxt_hwrm_nvm_read(softc, rd->index,
2591 				    rd->offset + offset, csize, &dma_data);
2592 				if (rc) {
2593 					iod->hdr.rc = rc;
2594 					copyout(&iod->hdr.rc, &ioh->rc,
2595 					    sizeof(ioh->rc));
2596 					break;
2597 				}
2598 				else {
2599 					copyout(dma_data.idi_vaddr,
2600 					    rd->data + offset, csize);
2601 					iod->hdr.rc = 0;
2602 				}
2603 				remain -= csize;
2604 			}
2605 			if (iod->hdr.rc == 0)
2606 				copyout(iod, ioh, iol);
2607 
2608 			iflib_dma_free(&dma_data);
2609 			rc = 0;
2610 			goto exit;
2611 		}
2612 		case BNXT_HWRM_FW_RESET:
2613 		{
2614 			struct bnxt_ioctl_hwrm_fw_reset *rst =
2615 			    &iod->reset;
2616 
2617 			rc = bnxt_hwrm_fw_reset(softc, rst->processor,
2618 			    &rst->selfreset);
2619 			if (rc) {
2620 				iod->hdr.rc = rc;
2621 				copyout(&iod->hdr.rc, &ioh->rc,
2622 				    sizeof(ioh->rc));
2623 			}
2624 			else {
2625 				iod->hdr.rc = 0;
2626 				copyout(iod, ioh, iol);
2627 			}
2628 
2629 			rc = 0;
2630 			goto exit;
2631 		}
2632 		case BNXT_HWRM_FW_QSTATUS:
2633 		{
2634 			struct bnxt_ioctl_hwrm_fw_qstatus *qstat =
2635 			    &iod->status;
2636 
2637 			rc = bnxt_hwrm_fw_qstatus(softc, qstat->processor,
2638 			    &qstat->selfreset);
2639 			if (rc) {
2640 				iod->hdr.rc = rc;
2641 				copyout(&iod->hdr.rc, &ioh->rc,
2642 				    sizeof(ioh->rc));
2643 			}
2644 			else {
2645 				iod->hdr.rc = 0;
2646 				copyout(iod, ioh, iol);
2647 			}
2648 
2649 			rc = 0;
2650 			goto exit;
2651 		}
2652 		case BNXT_HWRM_NVM_WRITE:
2653 		{
2654 			struct bnxt_ioctl_hwrm_nvm_write *wr =
2655 			    &iod->write;
2656 
2657 			rc = bnxt_hwrm_nvm_write(softc, wr->data, true,
2658 			    wr->type, wr->ordinal, wr->ext, wr->attr,
2659 			    wr->option, wr->data_length, wr->keep,
2660 			    &wr->item_length, &wr->index);
2661 			if (rc) {
2662 				iod->hdr.rc = rc;
2663 				copyout(&iod->hdr.rc, &ioh->rc,
2664 				    sizeof(ioh->rc));
2665 			}
2666 			else {
2667 				iod->hdr.rc = 0;
2668 				copyout(iod, ioh, iol);
2669 			}
2670 
2671 			rc = 0;
2672 			goto exit;
2673 		}
2674 		case BNXT_HWRM_NVM_ERASE_DIR_ENTRY:
2675 		{
2676 			struct bnxt_ioctl_hwrm_nvm_erase_dir_entry *erase =
2677 			    &iod->erase;
2678 
2679 			rc = bnxt_hwrm_nvm_erase_dir_entry(softc, erase->index);
2680 			if (rc) {
2681 				iod->hdr.rc = rc;
2682 				copyout(&iod->hdr.rc, &ioh->rc,
2683 				    sizeof(ioh->rc));
2684 			}
2685 			else {
2686 				iod->hdr.rc = 0;
2687 				copyout(iod, ioh, iol);
2688 			}
2689 
2690 			rc = 0;
2691 			goto exit;
2692 		}
2693 		case BNXT_HWRM_NVM_GET_DIR_INFO:
2694 		{
2695 			struct bnxt_ioctl_hwrm_nvm_get_dir_info *info =
2696 			    &iod->dir_info;
2697 
2698 			rc = bnxt_hwrm_nvm_get_dir_info(softc, &info->entries,
2699 			    &info->entry_length);
2700 			if (rc) {
2701 				iod->hdr.rc = rc;
2702 				copyout(&iod->hdr.rc, &ioh->rc,
2703 				    sizeof(ioh->rc));
2704 			}
2705 			else {
2706 				iod->hdr.rc = 0;
2707 				copyout(iod, ioh, iol);
2708 			}
2709 
2710 			rc = 0;
2711 			goto exit;
2712 		}
2713 		case BNXT_HWRM_NVM_GET_DIR_ENTRIES:
2714 		{
2715 			struct bnxt_ioctl_hwrm_nvm_get_dir_entries *get =
2716 			    &iod->dir_entries;
2717 			struct iflib_dma_info dma_data;
2718 
2719 			rc = iflib_dma_alloc(softc->ctx, get->max_size,
2720 			    &dma_data, BUS_DMA_NOWAIT);
2721 			if (rc)
2722 				break;
2723 			rc = bnxt_hwrm_nvm_get_dir_entries(softc, &get->entries,
2724 			    &get->entry_length, &dma_data);
2725 			if (rc) {
2726 				iod->hdr.rc = rc;
2727 				copyout(&iod->hdr.rc, &ioh->rc,
2728 				    sizeof(ioh->rc));
2729 			}
2730 			else {
2731 				copyout(dma_data.idi_vaddr, get->data,
2732 				    get->entry_length * get->entries);
2733 				iod->hdr.rc = 0;
2734 				copyout(iod, ioh, iol);
2735 			}
2736 			iflib_dma_free(&dma_data);
2737 
2738 			rc = 0;
2739 			goto exit;
2740 		}
2741 		case BNXT_HWRM_NVM_VERIFY_UPDATE:
2742 		{
2743 			struct bnxt_ioctl_hwrm_nvm_verify_update *vrfy =
2744 			    &iod->verify;
2745 
2746 			rc = bnxt_hwrm_nvm_verify_update(softc, vrfy->type,
2747 			    vrfy->ordinal, vrfy->ext);
2748 			if (rc) {
2749 				iod->hdr.rc = rc;
2750 				copyout(&iod->hdr.rc, &ioh->rc,
2751 				    sizeof(ioh->rc));
2752 			}
2753 			else {
2754 				iod->hdr.rc = 0;
2755 				copyout(iod, ioh, iol);
2756 			}
2757 
2758 			rc = 0;
2759 			goto exit;
2760 		}
2761 		case BNXT_HWRM_NVM_INSTALL_UPDATE:
2762 		{
2763 			struct bnxt_ioctl_hwrm_nvm_install_update *inst =
2764 			    &iod->install;
2765 
2766 			rc = bnxt_hwrm_nvm_install_update(softc,
2767 			    inst->install_type, &inst->installed_items,
2768 			    &inst->result, &inst->problem_item,
2769 			    &inst->reset_required);
2770 			if (rc) {
2771 				iod->hdr.rc = rc;
2772 				copyout(&iod->hdr.rc, &ioh->rc,
2773 				    sizeof(ioh->rc));
2774 			}
2775 			else {
2776 				iod->hdr.rc = 0;
2777 				copyout(iod, ioh, iol);
2778 			}
2779 
2780 			rc = 0;
2781 			goto exit;
2782 		}
2783 		case BNXT_HWRM_NVM_MODIFY:
2784 		{
2785 			struct bnxt_ioctl_hwrm_nvm_modify *mod = &iod->modify;
2786 
2787 			rc = bnxt_hwrm_nvm_modify(softc, mod->index,
2788 			    mod->offset, mod->data, true, mod->length);
2789 			if (rc) {
2790 				iod->hdr.rc = rc;
2791 				copyout(&iod->hdr.rc, &ioh->rc,
2792 				    sizeof(ioh->rc));
2793 			}
2794 			else {
2795 				iod->hdr.rc = 0;
2796 				copyout(iod, ioh, iol);
2797 			}
2798 
2799 			rc = 0;
2800 			goto exit;
2801 		}
2802 		case BNXT_HWRM_FW_GET_TIME:
2803 		{
2804 			struct bnxt_ioctl_hwrm_fw_get_time *gtm =
2805 			    &iod->get_time;
2806 
2807 			rc = bnxt_hwrm_fw_get_time(softc, &gtm->year,
2808 			    &gtm->month, &gtm->day, &gtm->hour, &gtm->minute,
2809 			    &gtm->second, &gtm->millisecond, &gtm->zone);
2810 			if (rc) {
2811 				iod->hdr.rc = rc;
2812 				copyout(&iod->hdr.rc, &ioh->rc,
2813 				    sizeof(ioh->rc));
2814 			}
2815 			else {
2816 				iod->hdr.rc = 0;
2817 				copyout(iod, ioh, iol);
2818 			}
2819 
2820 			rc = 0;
2821 			goto exit;
2822 		}
2823 		case BNXT_HWRM_FW_SET_TIME:
2824 		{
2825 			struct bnxt_ioctl_hwrm_fw_set_time *stm =
2826 			    &iod->set_time;
2827 
2828 			rc = bnxt_hwrm_fw_set_time(softc, stm->year,
2829 			    stm->month, stm->day, stm->hour, stm->minute,
2830 			    stm->second, stm->millisecond, stm->zone);
2831 			if (rc) {
2832 				iod->hdr.rc = rc;
2833 				copyout(&iod->hdr.rc, &ioh->rc,
2834 				    sizeof(ioh->rc));
2835 			}
2836 			else {
2837 				iod->hdr.rc = 0;
2838 				copyout(iod, ioh, iol);
2839 			}
2840 
2841 			rc = 0;
2842 			goto exit;
2843 		}
2844 		}
2845 		break;
2846 	}
2847 
2848 exit:
2849 	return rc;
2850 }
2851 
2852 /*
2853  * Support functions
2854  */
2855 static int
2856 bnxt_probe_phy(struct bnxt_softc *softc)
2857 {
2858 	struct bnxt_link_info *link_info = &softc->link_info;
2859 	int rc = 0;
2860 
2861 	rc = bnxt_update_link(softc, false);
2862 	if (rc) {
2863 		device_printf(softc->dev,
2864 		    "Probe phy can't update link (rc: %x)\n", rc);
2865 		return (rc);
2866 	}
2867 
2868 	/*initialize the ethool setting copy with NVM settings */
2869 	if (link_info->auto_mode != HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE)
2870 		link_info->autoneg |= BNXT_AUTONEG_SPEED;
2871 
2872 	link_info->req_duplex = link_info->duplex_setting;
2873 	if (link_info->autoneg & BNXT_AUTONEG_SPEED)
2874 		link_info->req_link_speed = link_info->auto_link_speed;
2875 	else
2876 		link_info->req_link_speed = link_info->force_link_speed;
2877 	return (rc);
2878 }
2879 
2880 static void
2881 bnxt_add_media_types(struct bnxt_softc *softc)
2882 {
2883 	struct bnxt_link_info *link_info = &softc->link_info;
2884 	uint16_t supported;
2885 	uint8_t phy_type = get_phy_type(softc);
2886 
2887 	supported = link_info->support_speeds;
2888 
2889 	/* Auto is always supported */
2890 	ifmedia_add(softc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2891 
2892 	if (softc->flags & BNXT_FLAG_NPAR)
2893 		return;
2894 
2895 	switch (phy_type) {
2896 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR4:
2897 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASECR4:
2898 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_L:
2899 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_S:
2900 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_N:
2901 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR:
2902 		BNXT_IFMEDIA_ADD(supported, SPEEDS_100GB, IFM_100G_CR4);
2903 		BNXT_IFMEDIA_ADD(supported, SPEEDS_50GB, IFM_50G_CR2);
2904 		BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_CR4);
2905 		BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_CR);
2906 		BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_CR1);
2907 		BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_T);
2908 		break;
2909 
2910 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR4:
2911 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASELR4:
2912 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASELR:
2913 		BNXT_IFMEDIA_ADD(supported, SPEEDS_100GB, IFM_100G_LR4);
2914 		BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_LR4);
2915 		BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_LR);
2916 		BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_LR);
2917 		BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_LX);
2918 		break;
2919 
2920 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR10:
2921 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR4:
2922 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASESR4:
2923 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR:
2924 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASEER4:
2925 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER4:
2926 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASESR:
2927 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASESX:
2928 		BNXT_IFMEDIA_ADD(supported, SPEEDS_100GB, IFM_100G_SR4);
2929 		BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_SR4);
2930 		BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_SR);
2931 		BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_SR);
2932 		BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_SX);
2933 		break;
2934 
2935 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR4:
2936 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR2:
2937 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR:
2938 		BNXT_IFMEDIA_ADD(supported, SPEEDS_100GB, IFM_100G_KR4);
2939 		BNXT_IFMEDIA_ADD(supported, SPEEDS_50GB, IFM_50G_KR2);
2940 		BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_KR4);
2941 		BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_KR);
2942 		BNXT_IFMEDIA_ADD(supported, SPEEDS_20GB, IFM_20G_KR2);
2943 		BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_KR);
2944 		BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_KX);
2945 		break;
2946 
2947 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_ACTIVE_CABLE:
2948 		BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_ACC);
2949 		BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_AOC);
2950 		break;
2951 
2952 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASECX:
2953 		BNXT_IFMEDIA_ADD(supported, SPEEDS_1GBHD, IFM_1000_CX);
2954 		break;
2955 
2956 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASET:
2957 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET:
2958 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE:
2959 		BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_T);
2960 		BNXT_IFMEDIA_ADD(supported, SPEEDS_2_5GB, IFM_2500_T);
2961 		BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_T);
2962 		BNXT_IFMEDIA_ADD(supported, SPEEDS_100MB, IFM_100_T);
2963 		BNXT_IFMEDIA_ADD(supported, SPEEDS_10MB, IFM_10_T);
2964 		break;
2965 
2966 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX:
2967 		BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_KR);
2968 		BNXT_IFMEDIA_ADD(supported, SPEEDS_2_5GB, IFM_2500_KX);
2969 		BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_KX);
2970 		break;
2971 
2972 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_SGMIIEXTPHY:
2973 		BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_SGMII);
2974 		break;
2975 
2976 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN:
2977 		/* Only Autoneg is supported for TYPE_UNKNOWN */
2978 		device_printf(softc->dev, "Unknown phy type\n");
2979 		break;
2980 
2981         default:
2982 		/* Only Autoneg is supported for new phy type values */
2983 		device_printf(softc->dev, "phy type %d not supported by driver\n", phy_type);
2984 		break;
2985 	}
2986 
2987 	return;
2988 }
2989 
2990 static int
2991 bnxt_map_bar(struct bnxt_softc *softc, struct bnxt_bar_info *bar, int bar_num, bool shareable)
2992 {
2993 	uint32_t	flag;
2994 
2995 	if (bar->res != NULL) {
2996 		device_printf(softc->dev, "Bar %d already mapped\n", bar_num);
2997 		return EDOOFUS;
2998 	}
2999 
3000 	bar->rid = PCIR_BAR(bar_num);
3001 	flag = RF_ACTIVE;
3002 	if (shareable)
3003 		flag |= RF_SHAREABLE;
3004 
3005 	if ((bar->res =
3006 		bus_alloc_resource_any(softc->dev,
3007 			   SYS_RES_MEMORY,
3008 			   &bar->rid,
3009 			   flag)) == NULL) {
3010 		device_printf(softc->dev,
3011 		    "PCI BAR%d mapping failure\n", bar_num);
3012 		return (ENXIO);
3013 	}
3014 	bar->tag = rman_get_bustag(bar->res);
3015 	bar->handle = rman_get_bushandle(bar->res);
3016 	bar->size = rman_get_size(bar->res);
3017 
3018 	return 0;
3019 }
3020 
3021 static int
3022 bnxt_pci_mapping(struct bnxt_softc *softc)
3023 {
3024 	int rc;
3025 
3026 	rc = bnxt_map_bar(softc, &softc->hwrm_bar, 0, true);
3027 	if (rc)
3028 		return rc;
3029 
3030 	rc = bnxt_map_bar(softc, &softc->doorbell_bar, 2, false);
3031 
3032 	return rc;
3033 }
3034 
3035 static void
3036 bnxt_pci_mapping_free(struct bnxt_softc *softc)
3037 {
3038 	if (softc->hwrm_bar.res != NULL)
3039 		bus_release_resource(softc->dev, SYS_RES_MEMORY,
3040 		    softc->hwrm_bar.rid, softc->hwrm_bar.res);
3041 	softc->hwrm_bar.res = NULL;
3042 
3043 	if (softc->doorbell_bar.res != NULL)
3044 		bus_release_resource(softc->dev, SYS_RES_MEMORY,
3045 		    softc->doorbell_bar.rid, softc->doorbell_bar.res);
3046 	softc->doorbell_bar.res = NULL;
3047 }
3048 
3049 static int
3050 bnxt_update_link(struct bnxt_softc *softc, bool chng_link_state)
3051 {
3052 	struct bnxt_link_info *link_info = &softc->link_info;
3053 	uint8_t link_up = link_info->link_up;
3054 	int rc = 0;
3055 
3056 	rc = bnxt_hwrm_port_phy_qcfg(softc);
3057 	if (rc)
3058 		goto exit;
3059 
3060 	/* TODO: need to add more logic to report VF link */
3061 	if (chng_link_state) {
3062 		if (link_info->phy_link_status ==
3063 		    HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK)
3064 			link_info->link_up = 1;
3065 		else
3066 			link_info->link_up = 0;
3067 		if (link_up != link_info->link_up)
3068 			bnxt_report_link(softc);
3069 	} else {
3070 		/* always link down if not require to update link state */
3071 		link_info->link_up = 0;
3072 	}
3073 
3074 exit:
3075 	return rc;
3076 }
3077 
3078 void
3079 bnxt_report_link(struct bnxt_softc *softc)
3080 {
3081 	struct bnxt_link_info *link_info = &softc->link_info;
3082 	const char *duplex = NULL, *flow_ctrl = NULL;
3083 
3084 	if (link_info->link_up == link_info->last_link_up) {
3085 		if (!link_info->link_up)
3086 			return;
3087 		if ((link_info->duplex == link_info->last_duplex) &&
3088                     (!(BNXT_IS_FLOW_CTRL_CHANGED(link_info))))
3089 			return;
3090 	}
3091 
3092 	if (link_info->link_up) {
3093 		if (link_info->duplex ==
3094 		    HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_FULL)
3095 			duplex = "full duplex";
3096 		else
3097 			duplex = "half duplex";
3098 		if (link_info->flow_ctrl.tx & link_info->flow_ctrl.rx)
3099 			flow_ctrl = "FC - receive & transmit";
3100 		else if (link_info->flow_ctrl.tx)
3101 			flow_ctrl = "FC - transmit";
3102 		else if (link_info->flow_ctrl.rx)
3103 			flow_ctrl = "FC - receive";
3104 		else
3105 			flow_ctrl = "FC - none";
3106 		iflib_link_state_change(softc->ctx, LINK_STATE_UP,
3107 		    IF_Gbps(100));
3108 		device_printf(softc->dev, "Link is UP %s, %s - %d Mbps \n", duplex,
3109 		    flow_ctrl, (link_info->link_speed * 100));
3110 	} else {
3111 		iflib_link_state_change(softc->ctx, LINK_STATE_DOWN,
3112 		    bnxt_get_baudrate(&softc->link_info));
3113 		device_printf(softc->dev, "Link is Down\n");
3114 	}
3115 
3116 	link_info->last_link_up = link_info->link_up;
3117 	link_info->last_duplex = link_info->duplex;
3118 	link_info->last_flow_ctrl.tx = link_info->flow_ctrl.tx;
3119 	link_info->last_flow_ctrl.rx = link_info->flow_ctrl.rx;
3120 	link_info->last_flow_ctrl.autoneg = link_info->flow_ctrl.autoneg;
3121 	/* update media types */
3122 	ifmedia_removeall(softc->media);
3123 	bnxt_add_media_types(softc);
3124 	ifmedia_set(softc->media, IFM_ETHER | IFM_AUTO);
3125 }
3126 
3127 static int
3128 bnxt_handle_isr(void *arg)
3129 {
3130 	struct bnxt_cp_ring *cpr = arg;
3131 	struct bnxt_softc *softc = cpr->ring.softc;
3132 
3133 	cpr->int_count++;
3134 	/* Disable further interrupts for this queue */
3135 	if (!BNXT_CHIP_P5(softc))
3136 		softc->db_ops.bnxt_db_rx_cq(cpr, 0);
3137 
3138 	return FILTER_SCHEDULE_THREAD;
3139 }
3140 
3141 static int
3142 bnxt_handle_def_cp(void *arg)
3143 {
3144 	struct bnxt_softc *softc = arg;
3145 
3146 	softc->db_ops.bnxt_db_rx_cq(&softc->def_cp_ring, 0);
3147 	GROUPTASK_ENQUEUE(&softc->def_cp_task);
3148 	return FILTER_HANDLED;
3149 }
3150 
3151 static void
3152 bnxt_clear_ids(struct bnxt_softc *softc)
3153 {
3154 	int i;
3155 
3156 	softc->def_cp_ring.stats_ctx_id = HWRM_NA_SIGNATURE;
3157 	softc->def_cp_ring.ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
3158 	softc->def_nq_ring.stats_ctx_id = HWRM_NA_SIGNATURE;
3159 	softc->def_nq_ring.ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
3160 	for (i = 0; i < softc->ntxqsets; i++) {
3161 		softc->tx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
3162 		softc->tx_cp_rings[i].ring.phys_id =
3163 		    (uint16_t)HWRM_NA_SIGNATURE;
3164 		softc->tx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
3165 
3166 		if (!softc->nq_rings)
3167 			continue;
3168 		softc->nq_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
3169 		softc->nq_rings[i].ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
3170 	}
3171 	for (i = 0; i < softc->nrxqsets; i++) {
3172 		softc->rx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
3173 		softc->rx_cp_rings[i].ring.phys_id =
3174 		    (uint16_t)HWRM_NA_SIGNATURE;
3175 		softc->rx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
3176 		softc->ag_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
3177 		softc->grp_info[i].grp_id = (uint16_t)HWRM_NA_SIGNATURE;
3178 	}
3179 	softc->vnic_info.filter_id = -1;
3180 	softc->vnic_info.id = (uint16_t)HWRM_NA_SIGNATURE;
3181 	softc->vnic_info.rss_id = (uint16_t)HWRM_NA_SIGNATURE;
3182 	memset(softc->vnic_info.rss_grp_tbl.idi_vaddr, 0xff,
3183 	    softc->vnic_info.rss_grp_tbl.idi_size);
3184 }
3185 
3186 static void
3187 bnxt_mark_cpr_invalid(struct bnxt_cp_ring *cpr)
3188 {
3189 	struct cmpl_base *cmp = (void *)cpr->ring.vaddr;
3190 	int i;
3191 
3192 	for (i = 0; i < cpr->ring.ring_size; i++)
3193 		cmp[i].info3_v = !cpr->v_bit;
3194 }
3195 
3196 static void
3197 bnxt_handle_async_event(struct bnxt_softc *softc, struct cmpl_base *cmpl)
3198 {
3199 	struct hwrm_async_event_cmpl *ae = (void *)cmpl;
3200 	uint16_t async_id = le16toh(ae->event_id);
3201 	struct ifmediareq ifmr;
3202 
3203 	switch (async_id) {
3204 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
3205 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
3206 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE:
3207 		if (BNXT_CHIP_P5(softc))
3208 			bit_set(softc->state_bv, BNXT_STATE_LINK_CHANGE);
3209 		else
3210 			bnxt_media_status(softc->ctx, &ifmr);
3211 		break;
3212 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE:
3213 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE:
3214 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED:
3215 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED:
3216 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD:
3217 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD:
3218 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
3219 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD:
3220 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR:
3221 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE:
3222 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE:
3223 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
3224 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR:
3225 		device_printf(softc->dev,
3226 		    "Unhandled async completion type %u\n", async_id);
3227 		break;
3228 	default:
3229 		device_printf(softc->dev,
3230 		    "Unknown async completion type %u\n", async_id);
3231 		break;
3232 	}
3233 }
3234 
3235 static void
3236 bnxt_def_cp_task(void *context)
3237 {
3238 	if_ctx_t ctx = context;
3239 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3240 	struct bnxt_cp_ring *cpr = &softc->def_cp_ring;
3241 
3242 	/* Handle completions on the default completion ring */
3243 	struct cmpl_base *cmpl;
3244 	uint32_t cons = cpr->cons;
3245 	bool v_bit = cpr->v_bit;
3246 	bool last_v_bit;
3247 	uint32_t last_cons;
3248 	uint16_t type;
3249 
3250 	for (;;) {
3251 		last_cons = cons;
3252 		last_v_bit = v_bit;
3253 		NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
3254 		cmpl = &((struct cmpl_base *)cpr->ring.vaddr)[cons];
3255 
3256 		if (!CMP_VALID(cmpl, v_bit))
3257 			break;
3258 
3259 		type = le16toh(cmpl->type) & CMPL_BASE_TYPE_MASK;
3260 		switch (type) {
3261 		case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
3262 			bnxt_handle_async_event(softc, cmpl);
3263 			break;
3264 		case CMPL_BASE_TYPE_TX_L2:
3265 		case CMPL_BASE_TYPE_RX_L2:
3266 		case CMPL_BASE_TYPE_RX_AGG:
3267 		case CMPL_BASE_TYPE_RX_TPA_START:
3268 		case CMPL_BASE_TYPE_RX_TPA_END:
3269 		case CMPL_BASE_TYPE_STAT_EJECT:
3270 		case CMPL_BASE_TYPE_HWRM_DONE:
3271 		case CMPL_BASE_TYPE_HWRM_FWD_REQ:
3272 		case CMPL_BASE_TYPE_HWRM_FWD_RESP:
3273 		case CMPL_BASE_TYPE_CQ_NOTIFICATION:
3274 		case CMPL_BASE_TYPE_SRQ_EVENT:
3275 		case CMPL_BASE_TYPE_DBQ_EVENT:
3276 		case CMPL_BASE_TYPE_QP_EVENT:
3277 		case CMPL_BASE_TYPE_FUNC_EVENT:
3278 			device_printf(softc->dev,
3279 			    "Unhandled completion type %u\n", type);
3280 			break;
3281 		default:
3282 			device_printf(softc->dev,
3283 			    "Unknown completion type %u\n", type);
3284 			break;
3285 		}
3286 	}
3287 
3288 	cpr->cons = last_cons;
3289 	cpr->v_bit = last_v_bit;
3290 	softc->db_ops.bnxt_db_rx_cq(cpr, 1);
3291 }
3292 
3293 static uint8_t
3294 get_phy_type(struct bnxt_softc *softc)
3295 {
3296 	struct bnxt_link_info *link_info = &softc->link_info;
3297 	uint8_t phy_type = link_info->phy_type;
3298 	uint16_t supported;
3299 
3300 	if (phy_type != HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN)
3301 		return phy_type;
3302 
3303 	/* Deduce the phy type from the media type and supported speeds */
3304 	supported = link_info->support_speeds;
3305 
3306 	if (link_info->media_type ==
3307 	    HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP)
3308 		return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET;
3309 	if (link_info->media_type ==
3310 	    HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_DAC) {
3311 		if (supported & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB)
3312 			return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX;
3313 		if (supported & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB)
3314 			return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR;
3315 		return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR;
3316 	}
3317 	if (link_info->media_type ==
3318 	    HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_FIBRE)
3319 		return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR;
3320 
3321 	return phy_type;
3322 }
3323 
3324 bool
3325 bnxt_check_hwrm_version(struct bnxt_softc *softc)
3326 {
3327 	char buf[16];
3328 
3329 	sprintf(buf, "%hhu.%hhu.%hhu", softc->ver_info->hwrm_min_major,
3330 	    softc->ver_info->hwrm_min_minor, softc->ver_info->hwrm_min_update);
3331 	if (softc->ver_info->hwrm_min_major > softc->ver_info->hwrm_if_major) {
3332 		device_printf(softc->dev,
3333 		    "WARNING: HWRM version %s is too old (older than %s)\n",
3334 		    softc->ver_info->hwrm_if_ver, buf);
3335 		return false;
3336 	}
3337 	else if(softc->ver_info->hwrm_min_major ==
3338 	    softc->ver_info->hwrm_if_major) {
3339 		if (softc->ver_info->hwrm_min_minor >
3340 		    softc->ver_info->hwrm_if_minor) {
3341 			device_printf(softc->dev,
3342 			    "WARNING: HWRM version %s is too old (older than %s)\n",
3343 			    softc->ver_info->hwrm_if_ver, buf);
3344 			return false;
3345 		}
3346 		else if (softc->ver_info->hwrm_min_minor ==
3347 		    softc->ver_info->hwrm_if_minor) {
3348 			if (softc->ver_info->hwrm_min_update >
3349 			    softc->ver_info->hwrm_if_update) {
3350 				device_printf(softc->dev,
3351 				    "WARNING: HWRM version %s is too old (older than %s)\n",
3352 				    softc->ver_info->hwrm_if_ver, buf);
3353 				return false;
3354 			}
3355 		}
3356 	}
3357 	return true;
3358 }
3359 
3360 static uint64_t
3361 bnxt_get_baudrate(struct bnxt_link_info *link)
3362 {
3363 	switch (link->link_speed) {
3364 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
3365 		return IF_Mbps(100);
3366 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
3367 		return IF_Gbps(1);
3368 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
3369 		return IF_Gbps(2);
3370 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
3371 		return IF_Mbps(2500);
3372 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
3373 		return IF_Gbps(10);
3374 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
3375 		return IF_Gbps(20);
3376 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
3377 		return IF_Gbps(25);
3378 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
3379 		return IF_Gbps(40);
3380 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
3381 		return IF_Gbps(50);
3382 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
3383 		return IF_Gbps(100);
3384 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB:
3385 		return IF_Mbps(10);
3386 	}
3387 	return IF_Gbps(100);
3388 }
3389 
3390 static void
3391 bnxt_get_wol_settings(struct bnxt_softc *softc)
3392 {
3393 	uint16_t wol_handle = 0;
3394 
3395 	if (!bnxt_wol_supported(softc))
3396 		return;
3397 
3398 	do {
3399 		wol_handle = bnxt_hwrm_get_wol_fltrs(softc, wol_handle);
3400 	} while (wol_handle && wol_handle != BNXT_NO_MORE_WOL_FILTERS);
3401 }
3402