xref: /freebsd/sys/dev/bnxt/bnxt_en/if_bnxt.c (revision 862af86f)
1 /*-
2  * Broadcom NetXtreme-C/E network driver.
3  *
4  * Copyright (c) 2016 Broadcom, All Rights Reserved.
5  * The term Broadcom refers to Broadcom Limited and/or its subsidiaries
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26  * THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/param.h>
30 #include <sys/socket.h>
31 #include <sys/kernel.h>
32 #include <sys/bus.h>
33 #include <sys/module.h>
34 #include <sys/rman.h>
35 #include <sys/endian.h>
36 #include <sys/sockio.h>
37 #include <sys/priv.h>
38 
39 #include <machine/bus.h>
40 #include <machine/resource.h>
41 
42 #include <dev/pci/pcireg.h>
43 
44 #include <net/if.h>
45 #include <net/if_dl.h>
46 #include <net/if_media.h>
47 #include <net/if_var.h>
48 #include <net/ethernet.h>
49 #include <net/iflib.h>
50 
51 #include <linux/pci.h>
52 #include <linux/kmod.h>
53 #include <linux/module.h>
54 #include <linux/delay.h>
55 #include <linux/idr.h>
56 #include <linux/netdevice.h>
57 #include <linux/etherdevice.h>
58 #include <linux/rcupdate.h>
59 #include "opt_inet.h"
60 #include "opt_inet6.h"
61 #include "opt_rss.h"
62 
63 #include "ifdi_if.h"
64 
65 #include "bnxt.h"
66 #include "bnxt_hwrm.h"
67 #include "bnxt_ioctl.h"
68 #include "bnxt_sysctl.h"
69 #include "hsi_struct_def.h"
70 #include "bnxt_mgmt.h"
71 #include "bnxt_ulp.h"
72 #include "bnxt_auxbus_compat.h"
73 
74 /*
75  * PCI Device ID Table
76  */
77 
78 static const pci_vendor_info_t bnxt_vendor_info_array[] =
79 {
80     PVID(BROADCOM_VENDOR_ID, BCM57301,
81 	"Broadcom BCM57301 NetXtreme-C 10Gb Ethernet Controller"),
82     PVID(BROADCOM_VENDOR_ID, BCM57302,
83 	"Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet Controller"),
84     PVID(BROADCOM_VENDOR_ID, BCM57304,
85 	"Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet Controller"),
86     PVID(BROADCOM_VENDOR_ID, BCM57311,
87 	"Broadcom BCM57311 NetXtreme-C 10Gb Ethernet"),
88     PVID(BROADCOM_VENDOR_ID, BCM57312,
89 	"Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet"),
90     PVID(BROADCOM_VENDOR_ID, BCM57314,
91 	"Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet"),
92     PVID(BROADCOM_VENDOR_ID, BCM57402,
93 	"Broadcom BCM57402 NetXtreme-E 10Gb Ethernet Controller"),
94     PVID(BROADCOM_VENDOR_ID, BCM57402_NPAR,
95 	"Broadcom BCM57402 NetXtreme-E Partition"),
96     PVID(BROADCOM_VENDOR_ID, BCM57404,
97 	"Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet Controller"),
98     PVID(BROADCOM_VENDOR_ID, BCM57404_NPAR,
99 	"Broadcom BCM57404 NetXtreme-E Partition"),
100     PVID(BROADCOM_VENDOR_ID, BCM57406,
101 	"Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet Controller"),
102     PVID(BROADCOM_VENDOR_ID, BCM57406_NPAR,
103 	"Broadcom BCM57406 NetXtreme-E Partition"),
104     PVID(BROADCOM_VENDOR_ID, BCM57407,
105 	"Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet Controller"),
106     PVID(BROADCOM_VENDOR_ID, BCM57407_NPAR,
107 	"Broadcom BCM57407 NetXtreme-E Ethernet Partition"),
108     PVID(BROADCOM_VENDOR_ID, BCM57407_SFP,
109 	"Broadcom BCM57407 NetXtreme-E 25Gb Ethernet Controller"),
110     PVID(BROADCOM_VENDOR_ID, BCM57412,
111 	"Broadcom BCM57412 NetXtreme-E 10Gb Ethernet"),
112     PVID(BROADCOM_VENDOR_ID, BCM57412_NPAR1,
113 	"Broadcom BCM57412 NetXtreme-E Ethernet Partition"),
114     PVID(BROADCOM_VENDOR_ID, BCM57412_NPAR2,
115 	"Broadcom BCM57412 NetXtreme-E Ethernet Partition"),
116     PVID(BROADCOM_VENDOR_ID, BCM57414,
117 	"Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet"),
118     PVID(BROADCOM_VENDOR_ID, BCM57414_NPAR1,
119 	"Broadcom BCM57414 NetXtreme-E Ethernet Partition"),
120     PVID(BROADCOM_VENDOR_ID, BCM57414_NPAR2,
121 	"Broadcom BCM57414 NetXtreme-E Ethernet Partition"),
122     PVID(BROADCOM_VENDOR_ID, BCM57416,
123 	"Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet"),
124     PVID(BROADCOM_VENDOR_ID, BCM57416_NPAR1,
125 	"Broadcom BCM57416 NetXtreme-E Ethernet Partition"),
126     PVID(BROADCOM_VENDOR_ID, BCM57416_NPAR2,
127 	"Broadcom BCM57416 NetXtreme-E Ethernet Partition"),
128     PVID(BROADCOM_VENDOR_ID, BCM57416_SFP,
129 	"Broadcom BCM57416 NetXtreme-E 10Gb Ethernet"),
130     PVID(BROADCOM_VENDOR_ID, BCM57417,
131 	"Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet"),
132     PVID(BROADCOM_VENDOR_ID, BCM57417_NPAR1,
133 	"Broadcom BCM57417 NetXtreme-E Ethernet Partition"),
134     PVID(BROADCOM_VENDOR_ID, BCM57417_NPAR2,
135 	"Broadcom BCM57417 NetXtreme-E Ethernet Partition"),
136     PVID(BROADCOM_VENDOR_ID, BCM57417_SFP,
137 	"Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet"),
138     PVID(BROADCOM_VENDOR_ID, BCM57454,
139 	"Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet"),
140     PVID(BROADCOM_VENDOR_ID, BCM58700,
141 	"Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet"),
142     PVID(BROADCOM_VENDOR_ID, BCM57508,
143 	"Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet"),
144     PVID(BROADCOM_VENDOR_ID, BCM57504,
145 	"Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet"),
146     PVID(BROADCOM_VENDOR_ID, BCM57502,
147 	"Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet"),
148     PVID(BROADCOM_VENDOR_ID, NETXTREME_C_VF1,
149 	"Broadcom NetXtreme-C Ethernet Virtual Function"),
150     PVID(BROADCOM_VENDOR_ID, NETXTREME_C_VF2,
151 	"Broadcom NetXtreme-C Ethernet Virtual Function"),
152     PVID(BROADCOM_VENDOR_ID, NETXTREME_C_VF3,
153 	"Broadcom NetXtreme-C Ethernet Virtual Function"),
154     PVID(BROADCOM_VENDOR_ID, NETXTREME_E_VF1,
155 	"Broadcom NetXtreme-E Ethernet Virtual Function"),
156     PVID(BROADCOM_VENDOR_ID, NETXTREME_E_VF2,
157 	"Broadcom NetXtreme-E Ethernet Virtual Function"),
158     PVID(BROADCOM_VENDOR_ID, NETXTREME_E_VF3,
159 	"Broadcom NetXtreme-E Ethernet Virtual Function"),
160     /* required last entry */
161 
162     PVID_END
163 };
164 
165 /*
166  * Function prototypes
167  */
168 
169 SLIST_HEAD(softc_list, bnxt_softc_list) pf_list;
170 int bnxt_num_pfs = 0;
171 
172 void
173 process_nq(struct bnxt_softc *softc, uint16_t nqid);
174 static void *bnxt_register(device_t dev);
175 
176 /* Soft queue setup and teardown */
177 static int bnxt_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
178     uint64_t *paddrs, int ntxqs, int ntxqsets);
179 static int bnxt_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
180     uint64_t *paddrs, int nrxqs, int nrxqsets);
181 static void bnxt_queues_free(if_ctx_t ctx);
182 
183 /* Device setup and teardown */
184 static int bnxt_attach_pre(if_ctx_t ctx);
185 static int bnxt_attach_post(if_ctx_t ctx);
186 static int bnxt_detach(if_ctx_t ctx);
187 
188 /* Device configuration */
189 static void bnxt_init(if_ctx_t ctx);
190 static void bnxt_stop(if_ctx_t ctx);
191 static void bnxt_multi_set(if_ctx_t ctx);
192 static int bnxt_mtu_set(if_ctx_t ctx, uint32_t mtu);
193 static void bnxt_media_status(if_ctx_t ctx, struct ifmediareq * ifmr);
194 static int bnxt_media_change(if_ctx_t ctx);
195 static int bnxt_promisc_set(if_ctx_t ctx, int flags);
196 static uint64_t	bnxt_get_counter(if_ctx_t, ift_counter);
197 static void bnxt_update_admin_status(if_ctx_t ctx);
198 static void bnxt_if_timer(if_ctx_t ctx, uint16_t qid);
199 
200 /* Interrupt enable / disable */
201 static void bnxt_intr_enable(if_ctx_t ctx);
202 static int bnxt_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
203 static int bnxt_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
204 static void bnxt_disable_intr(if_ctx_t ctx);
205 static int bnxt_msix_intr_assign(if_ctx_t ctx, int msix);
206 
207 /* vlan support */
208 static void bnxt_vlan_register(if_ctx_t ctx, uint16_t vtag);
209 static void bnxt_vlan_unregister(if_ctx_t ctx, uint16_t vtag);
210 
211 /* ioctl */
212 static int bnxt_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data);
213 
214 static int bnxt_shutdown(if_ctx_t ctx);
215 static int bnxt_suspend(if_ctx_t ctx);
216 static int bnxt_resume(if_ctx_t ctx);
217 
218 /* Internal support functions */
219 static int bnxt_probe_phy(struct bnxt_softc *softc);
220 static void bnxt_add_media_types(struct bnxt_softc *softc);
221 static int bnxt_pci_mapping(struct bnxt_softc *softc);
222 static void bnxt_pci_mapping_free(struct bnxt_softc *softc);
223 static int bnxt_update_link(struct bnxt_softc *softc, bool chng_link_state);
224 static int bnxt_handle_def_cp(void *arg);
225 static int bnxt_handle_isr(void *arg);
226 static void bnxt_clear_ids(struct bnxt_softc *softc);
227 static void inline bnxt_do_enable_intr(struct bnxt_cp_ring *cpr);
228 static void inline bnxt_do_disable_intr(struct bnxt_cp_ring *cpr);
229 static void bnxt_mark_cpr_invalid(struct bnxt_cp_ring *cpr);
230 static void bnxt_def_cp_task(void *context);
231 static void bnxt_handle_async_event(struct bnxt_softc *softc,
232     struct cmpl_base *cmpl);
233 static uint64_t bnxt_get_baudrate(struct bnxt_link_info *link);
234 static void bnxt_get_wol_settings(struct bnxt_softc *softc);
235 static int bnxt_wol_config(if_ctx_t ctx);
236 static bool bnxt_if_needs_restart(if_ctx_t, enum iflib_restart_event);
237 static int bnxt_i2c_req(if_ctx_t ctx, struct ifi2creq *i2c);
238 static void bnxt_get_port_module_status(struct bnxt_softc *softc);
239 static void bnxt_rdma_aux_device_init(struct bnxt_softc *softc);
240 static void bnxt_rdma_aux_device_uninit(struct bnxt_softc *softc);
241 static void bnxt_queue_fw_reset_work(struct bnxt_softc *bp, unsigned long delay);
242 void bnxt_queue_sp_work(struct bnxt_softc *bp);
243 
244 void bnxt_fw_reset(struct bnxt_softc *bp);
245 /*
246  * Device Interface Declaration
247  */
248 
249 static device_method_t bnxt_methods[] = {
250 	/* Device interface */
251 	DEVMETHOD(device_register, bnxt_register),
252 	DEVMETHOD(device_probe, iflib_device_probe),
253 	DEVMETHOD(device_attach, iflib_device_attach),
254 	DEVMETHOD(device_detach, iflib_device_detach),
255 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
256 	DEVMETHOD(device_suspend, iflib_device_suspend),
257 	DEVMETHOD(device_resume, iflib_device_resume),
258 	DEVMETHOD_END
259 };
260 
261 static driver_t bnxt_driver = {
262 	"bnxt", bnxt_methods, sizeof(struct bnxt_softc),
263 };
264 
265 DRIVER_MODULE(bnxt, pci, bnxt_driver, 0, 0);
266 
267 MODULE_LICENSE("Dual BSD/GPL");
268 MODULE_DEPEND(if_bnxt, pci, 1, 1, 1);
269 MODULE_DEPEND(if_bnxt, ether, 1, 1, 1);
270 MODULE_DEPEND(if_bnxt, iflib, 1, 1, 1);
271 MODULE_DEPEND(if_bnxt, linuxkpi, 1, 1, 1);
272 MODULE_VERSION(if_bnxt, 1);
273 
274 IFLIB_PNP_INFO(pci, bnxt, bnxt_vendor_info_array);
275 
276 void writel_fbsd(struct bnxt_softc *bp, u32, u8, u32);
277 u32 readl_fbsd(struct bnxt_softc *bp, u32, u8);
278 
readl_fbsd(struct bnxt_softc * bp,u32 reg_off,u8 bar_idx)279 u32 readl_fbsd(struct bnxt_softc *bp, u32 reg_off, u8 bar_idx)
280 {
281 
282 	if (!bar_idx)
283 		return bus_space_read_4(bp->doorbell_bar.tag, bp->doorbell_bar.handle, reg_off);
284 	else
285 		return bus_space_read_4(bp->hwrm_bar.tag, bp->hwrm_bar.handle, reg_off);
286 }
287 
writel_fbsd(struct bnxt_softc * bp,u32 reg_off,u8 bar_idx,u32 val)288 void writel_fbsd(struct bnxt_softc *bp, u32 reg_off, u8 bar_idx, u32 val)
289 {
290 
291 	if (!bar_idx)
292 		bus_space_write_4(bp->doorbell_bar.tag, bp->doorbell_bar.handle, reg_off, htole32(val));
293 	else
294 		bus_space_write_4(bp->hwrm_bar.tag, bp->hwrm_bar.handle, reg_off, htole32(val));
295 }
296 
297 static DEFINE_IDA(bnxt_aux_dev_ids);
298 
299 static device_method_t bnxt_iflib_methods[] = {
300 	DEVMETHOD(ifdi_tx_queues_alloc, bnxt_tx_queues_alloc),
301 	DEVMETHOD(ifdi_rx_queues_alloc, bnxt_rx_queues_alloc),
302 	DEVMETHOD(ifdi_queues_free, bnxt_queues_free),
303 
304 	DEVMETHOD(ifdi_attach_pre, bnxt_attach_pre),
305 	DEVMETHOD(ifdi_attach_post, bnxt_attach_post),
306 	DEVMETHOD(ifdi_detach, bnxt_detach),
307 
308 	DEVMETHOD(ifdi_init, bnxt_init),
309 	DEVMETHOD(ifdi_stop, bnxt_stop),
310 	DEVMETHOD(ifdi_multi_set, bnxt_multi_set),
311 	DEVMETHOD(ifdi_mtu_set, bnxt_mtu_set),
312 	DEVMETHOD(ifdi_media_status, bnxt_media_status),
313 	DEVMETHOD(ifdi_media_change, bnxt_media_change),
314 	DEVMETHOD(ifdi_promisc_set, bnxt_promisc_set),
315 	DEVMETHOD(ifdi_get_counter, bnxt_get_counter),
316 	DEVMETHOD(ifdi_update_admin_status, bnxt_update_admin_status),
317 	DEVMETHOD(ifdi_timer, bnxt_if_timer),
318 
319 	DEVMETHOD(ifdi_intr_enable, bnxt_intr_enable),
320 	DEVMETHOD(ifdi_tx_queue_intr_enable, bnxt_tx_queue_intr_enable),
321 	DEVMETHOD(ifdi_rx_queue_intr_enable, bnxt_rx_queue_intr_enable),
322 	DEVMETHOD(ifdi_intr_disable, bnxt_disable_intr),
323 	DEVMETHOD(ifdi_msix_intr_assign, bnxt_msix_intr_assign),
324 
325 	DEVMETHOD(ifdi_vlan_register, bnxt_vlan_register),
326 	DEVMETHOD(ifdi_vlan_unregister, bnxt_vlan_unregister),
327 
328 	DEVMETHOD(ifdi_priv_ioctl, bnxt_priv_ioctl),
329 
330 	DEVMETHOD(ifdi_suspend, bnxt_suspend),
331 	DEVMETHOD(ifdi_shutdown, bnxt_shutdown),
332 	DEVMETHOD(ifdi_resume, bnxt_resume),
333 	DEVMETHOD(ifdi_i2c_req, bnxt_i2c_req),
334 
335 	DEVMETHOD(ifdi_needs_restart, bnxt_if_needs_restart),
336 
337 	DEVMETHOD_END
338 };
339 
340 static driver_t bnxt_iflib_driver = {
341 	"bnxt", bnxt_iflib_methods, sizeof(struct bnxt_softc)
342 };
343 
344 /*
345  * iflib shared context
346  */
347 
348 #define BNXT_DRIVER_VERSION	"230.0.133.0"
349 const char bnxt_driver_version[] = BNXT_DRIVER_VERSION;
350 extern struct if_txrx bnxt_txrx;
351 static struct if_shared_ctx bnxt_sctx_init = {
352 	.isc_magic = IFLIB_MAGIC,
353 	.isc_driver = &bnxt_iflib_driver,
354 	.isc_nfl = 2,				// Number of Free Lists
355 	.isc_flags = IFLIB_HAS_RXCQ | IFLIB_HAS_TXCQ | IFLIB_NEED_ETHER_PAD,
356 	.isc_q_align = PAGE_SIZE,
357 	.isc_tx_maxsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
358 	.isc_tx_maxsegsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
359 	.isc_tso_maxsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
360 	.isc_tso_maxsegsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
361 	.isc_rx_maxsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
362 	.isc_rx_maxsegsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
363 
364 	// Only use a single segment to avoid page size constraints
365 	.isc_rx_nsegments = 1,
366 	.isc_ntxqs = 3,
367 	.isc_nrxqs = 3,
368 	.isc_nrxd_min = {16, 16, 16},
369 	.isc_nrxd_default = {PAGE_SIZE / sizeof(struct cmpl_base) * 8,
370 	    PAGE_SIZE / sizeof(struct rx_prod_pkt_bd),
371 	    PAGE_SIZE / sizeof(struct rx_prod_pkt_bd)},
372 	.isc_nrxd_max = {BNXT_MAX_RXD, BNXT_MAX_RXD, BNXT_MAX_RXD},
373 	.isc_ntxd_min = {16, 16, 16},
374 	.isc_ntxd_default = {PAGE_SIZE / sizeof(struct cmpl_base) * 2,
375 	    PAGE_SIZE / sizeof(struct tx_bd_short),
376 	    /* NQ depth 4096 */
377 	    PAGE_SIZE / sizeof(struct cmpl_base) * 16},
378 	.isc_ntxd_max = {BNXT_MAX_TXD, BNXT_MAX_TXD, BNXT_MAX_TXD},
379 
380 	.isc_admin_intrcnt = BNXT_ROCE_IRQ_COUNT,
381 	.isc_vendor_info = bnxt_vendor_info_array,
382 	.isc_driver_version = bnxt_driver_version,
383 };
384 
385 #define PCI_SUBSYSTEM_ID	0x2e
386 static struct workqueue_struct *bnxt_pf_wq;
387 
388 extern void bnxt_destroy_irq(struct bnxt_softc *softc);
389 
390 /*
391  * Device Methods
392  */
393 
394 static void *
bnxt_register(device_t dev)395 bnxt_register(device_t dev)
396 {
397 	return (&bnxt_sctx_init);
398 }
399 
400 static void
bnxt_nq_alloc(struct bnxt_softc * softc,int nqsets)401 bnxt_nq_alloc(struct bnxt_softc *softc, int nqsets)
402 {
403 
404 	if (softc->nq_rings)
405 		return;
406 
407 	softc->nq_rings = malloc(sizeof(struct bnxt_cp_ring) * nqsets,
408 	    M_DEVBUF, M_NOWAIT | M_ZERO);
409 }
410 
411 static void
bnxt_nq_free(struct bnxt_softc * softc)412 bnxt_nq_free(struct bnxt_softc *softc)
413 {
414 
415 	if (softc->nq_rings)
416 		free(softc->nq_rings, M_DEVBUF);
417 	softc->nq_rings = NULL;
418 }
419 
420 /*
421  * Device Dependent Configuration Functions
422 */
423 
424 /* Soft queue setup and teardown */
425 static int
bnxt_tx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int ntxqs,int ntxqsets)426 bnxt_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
427     uint64_t *paddrs, int ntxqs, int ntxqsets)
428 {
429 	struct bnxt_softc *softc;
430 	int i;
431 	int rc;
432 
433 	softc = iflib_get_softc(ctx);
434 
435 	if (BNXT_CHIP_P5(softc)) {
436 		bnxt_nq_alloc(softc, ntxqsets);
437 		if (!softc->nq_rings) {
438 			device_printf(iflib_get_dev(ctx),
439 					"unable to allocate NQ rings\n");
440 			rc = ENOMEM;
441 			goto nq_alloc_fail;
442 		}
443 	}
444 
445 	softc->tx_cp_rings = malloc(sizeof(struct bnxt_cp_ring) * ntxqsets,
446 	    M_DEVBUF, M_NOWAIT | M_ZERO);
447 	if (!softc->tx_cp_rings) {
448 		device_printf(iflib_get_dev(ctx),
449 		    "unable to allocate TX completion rings\n");
450 		rc = ENOMEM;
451 		goto cp_alloc_fail;
452 	}
453 	softc->tx_rings = malloc(sizeof(struct bnxt_ring) * ntxqsets,
454 	    M_DEVBUF, M_NOWAIT | M_ZERO);
455 	if (!softc->tx_rings) {
456 		device_printf(iflib_get_dev(ctx),
457 		    "unable to allocate TX rings\n");
458 		rc = ENOMEM;
459 		goto ring_alloc_fail;
460 	}
461 
462 	for (i=0; i < ntxqsets; i++) {
463 		rc = iflib_dma_alloc(ctx, sizeof(struct ctx_hw_stats),
464 				&softc->tx_stats[i], 0);
465 		if (rc)
466 			goto dma_alloc_fail;
467 		bus_dmamap_sync(softc->tx_stats[i].idi_tag, softc->tx_stats[i].idi_map,
468 				BUS_DMASYNC_PREREAD);
469 	}
470 
471 	for (i = 0; i < ntxqsets; i++) {
472 		/* Set up the completion ring */
473 		softc->tx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
474 		softc->tx_cp_rings[i].ring.phys_id =
475 		    (uint16_t)HWRM_NA_SIGNATURE;
476 		softc->tx_cp_rings[i].ring.softc = softc;
477 		softc->tx_cp_rings[i].ring.idx = i;
478 		softc->tx_cp_rings[i].ring.id =
479 		    (softc->scctx->isc_nrxqsets * 2) + 1 + i;
480 		softc->tx_cp_rings[i].ring.doorbell = (BNXT_CHIP_P5(softc)) ?
481 			DB_PF_OFFSET_P5: softc->tx_cp_rings[i].ring.id * 0x80;
482 		softc->tx_cp_rings[i].ring.ring_size =
483 		    softc->scctx->isc_ntxd[0];
484 		softc->tx_cp_rings[i].ring.vaddr = vaddrs[i * ntxqs];
485 		softc->tx_cp_rings[i].ring.paddr = paddrs[i * ntxqs];
486 
487 		/* Set up the TX ring */
488 		softc->tx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
489 		softc->tx_rings[i].softc = softc;
490 		softc->tx_rings[i].idx = i;
491 		softc->tx_rings[i].id =
492 		    (softc->scctx->isc_nrxqsets * 2) + 1 + i;
493 		softc->tx_rings[i].doorbell = (BNXT_CHIP_P5(softc)) ?
494 			DB_PF_OFFSET_P5 : softc->tx_rings[i].id * 0x80;
495 		softc->tx_rings[i].ring_size = softc->scctx->isc_ntxd[1];
496 		softc->tx_rings[i].vaddr = vaddrs[i * ntxqs + 1];
497 		softc->tx_rings[i].paddr = paddrs[i * ntxqs + 1];
498 
499 		bnxt_create_tx_sysctls(softc, i);
500 
501 		if (BNXT_CHIP_P5(softc)) {
502 			/* Set up the Notification ring (NQ) */
503 			softc->nq_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
504 			softc->nq_rings[i].ring.phys_id =
505 				(uint16_t)HWRM_NA_SIGNATURE;
506 			softc->nq_rings[i].ring.softc = softc;
507 			softc->nq_rings[i].ring.idx = i;
508 			softc->nq_rings[i].ring.id = i;
509 			softc->nq_rings[i].ring.doorbell = (BNXT_CHIP_P5(softc)) ?
510 				DB_PF_OFFSET_P5 : softc->nq_rings[i].ring.id * 0x80;
511 			softc->nq_rings[i].ring.ring_size = softc->scctx->isc_ntxd[2];
512 			softc->nq_rings[i].ring.vaddr = vaddrs[i * ntxqs + 2];
513 			softc->nq_rings[i].ring.paddr = paddrs[i * ntxqs + 2];
514 		}
515 	}
516 
517 	softc->ntxqsets = ntxqsets;
518 	return rc;
519 
520 dma_alloc_fail:
521 	for (i = i - 1; i >= 0; i--)
522 		iflib_dma_free(&softc->tx_stats[i]);
523 	free(softc->tx_rings, M_DEVBUF);
524 ring_alloc_fail:
525 	free(softc->tx_cp_rings, M_DEVBUF);
526 cp_alloc_fail:
527 	bnxt_nq_free(softc);
528 nq_alloc_fail:
529 	return rc;
530 }
531 
532 static void
bnxt_queues_free(if_ctx_t ctx)533 bnxt_queues_free(if_ctx_t ctx)
534 {
535 	struct bnxt_softc *softc = iflib_get_softc(ctx);
536 	int i;
537 
538 	// Free TX queues
539 	for (i=0; i<softc->ntxqsets; i++)
540 		iflib_dma_free(&softc->tx_stats[i]);
541 	free(softc->tx_rings, M_DEVBUF);
542 	softc->tx_rings = NULL;
543 	free(softc->tx_cp_rings, M_DEVBUF);
544 	softc->tx_cp_rings = NULL;
545 	softc->ntxqsets = 0;
546 
547 	// Free RX queues
548 	for (i=0; i<softc->nrxqsets; i++)
549 		iflib_dma_free(&softc->rx_stats[i]);
550 	iflib_dma_free(&softc->hw_tx_port_stats);
551 	iflib_dma_free(&softc->hw_rx_port_stats);
552 	iflib_dma_free(&softc->hw_tx_port_stats_ext);
553 	iflib_dma_free(&softc->hw_rx_port_stats_ext);
554 	free(softc->grp_info, M_DEVBUF);
555 	free(softc->ag_rings, M_DEVBUF);
556 	free(softc->rx_rings, M_DEVBUF);
557 	free(softc->rx_cp_rings, M_DEVBUF);
558 	bnxt_nq_free(softc);
559 }
560 
561 static int
bnxt_rx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int nrxqs,int nrxqsets)562 bnxt_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
563     uint64_t *paddrs, int nrxqs, int nrxqsets)
564 {
565 	struct bnxt_softc *softc;
566 	int i;
567 	int rc;
568 
569 	softc = iflib_get_softc(ctx);
570 
571 	softc->rx_cp_rings = malloc(sizeof(struct bnxt_cp_ring) * nrxqsets,
572 	    M_DEVBUF, M_NOWAIT | M_ZERO);
573 	if (!softc->rx_cp_rings) {
574 		device_printf(iflib_get_dev(ctx),
575 		    "unable to allocate RX completion rings\n");
576 		rc = ENOMEM;
577 		goto cp_alloc_fail;
578 	}
579 	softc->rx_rings = malloc(sizeof(struct bnxt_ring) * nrxqsets,
580 	    M_DEVBUF, M_NOWAIT | M_ZERO);
581 	if (!softc->rx_rings) {
582 		device_printf(iflib_get_dev(ctx),
583 		    "unable to allocate RX rings\n");
584 		rc = ENOMEM;
585 		goto ring_alloc_fail;
586 	}
587 	softc->ag_rings = malloc(sizeof(struct bnxt_ring) * nrxqsets,
588 	    M_DEVBUF, M_NOWAIT | M_ZERO);
589 	if (!softc->ag_rings) {
590 		device_printf(iflib_get_dev(ctx),
591 		    "unable to allocate aggregation rings\n");
592 		rc = ENOMEM;
593 		goto ag_alloc_fail;
594 	}
595 	softc->grp_info = malloc(sizeof(struct bnxt_grp_info) * nrxqsets,
596 	    M_DEVBUF, M_NOWAIT | M_ZERO);
597 	if (!softc->grp_info) {
598 		device_printf(iflib_get_dev(ctx),
599 		    "unable to allocate ring groups\n");
600 		rc = ENOMEM;
601 		goto grp_alloc_fail;
602 	}
603 
604 	for (i=0; i < nrxqsets; i++) {
605 		rc = iflib_dma_alloc(ctx, sizeof(struct ctx_hw_stats),
606 				&softc->rx_stats[i], 0);
607 		if (rc)
608 			goto hw_stats_alloc_fail;
609 		bus_dmamap_sync(softc->rx_stats[i].idi_tag, softc->rx_stats[i].idi_map,
610 				BUS_DMASYNC_PREREAD);
611 	}
612 
613 /*
614  * Additional 512 bytes for future expansion.
615  * To prevent corruption when loaded with newer firmwares with added counters.
616  * This can be deleted when there will be no further additions of counters.
617  */
618 #define BNXT_PORT_STAT_PADDING  512
619 
620 	rc = iflib_dma_alloc(ctx, sizeof(struct rx_port_stats) + BNXT_PORT_STAT_PADDING,
621 	    &softc->hw_rx_port_stats, 0);
622 	if (rc)
623 		goto hw_port_rx_stats_alloc_fail;
624 
625 	bus_dmamap_sync(softc->hw_rx_port_stats.idi_tag,
626             softc->hw_rx_port_stats.idi_map, BUS_DMASYNC_PREREAD);
627 
628 
629 	rc = iflib_dma_alloc(ctx, sizeof(struct tx_port_stats) + BNXT_PORT_STAT_PADDING,
630 	    &softc->hw_tx_port_stats, 0);
631 	if (rc)
632 		goto hw_port_tx_stats_alloc_fail;
633 
634 	bus_dmamap_sync(softc->hw_tx_port_stats.idi_tag,
635             softc->hw_tx_port_stats.idi_map, BUS_DMASYNC_PREREAD);
636 
637 	softc->rx_port_stats = (void *) softc->hw_rx_port_stats.idi_vaddr;
638 	softc->tx_port_stats = (void *) softc->hw_tx_port_stats.idi_vaddr;
639 
640 
641 	rc = iflib_dma_alloc(ctx, sizeof(struct rx_port_stats_ext),
642 		&softc->hw_rx_port_stats_ext, 0);
643 	if (rc)
644 		goto hw_port_rx_stats_ext_alloc_fail;
645 
646 	bus_dmamap_sync(softc->hw_rx_port_stats_ext.idi_tag,
647 	    softc->hw_rx_port_stats_ext.idi_map, BUS_DMASYNC_PREREAD);
648 
649 	rc = iflib_dma_alloc(ctx, sizeof(struct tx_port_stats_ext),
650 		&softc->hw_tx_port_stats_ext, 0);
651 	if (rc)
652 		goto hw_port_tx_stats_ext_alloc_fail;
653 
654 	bus_dmamap_sync(softc->hw_tx_port_stats_ext.idi_tag,
655 	    softc->hw_tx_port_stats_ext.idi_map, BUS_DMASYNC_PREREAD);
656 
657 	softc->rx_port_stats_ext = (void *) softc->hw_rx_port_stats_ext.idi_vaddr;
658 	softc->tx_port_stats_ext = (void *) softc->hw_tx_port_stats_ext.idi_vaddr;
659 
660 	for (i = 0; i < nrxqsets; i++) {
661 		/* Allocation the completion ring */
662 		softc->rx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
663 		softc->rx_cp_rings[i].ring.phys_id =
664 		    (uint16_t)HWRM_NA_SIGNATURE;
665 		softc->rx_cp_rings[i].ring.softc = softc;
666 		softc->rx_cp_rings[i].ring.idx = i;
667 		softc->rx_cp_rings[i].ring.id = i + 1;
668 		softc->rx_cp_rings[i].ring.doorbell = (BNXT_CHIP_P5(softc)) ?
669 			DB_PF_OFFSET_P5 : softc->rx_cp_rings[i].ring.id * 0x80;
670 		/*
671 		 * If this ring overflows, RX stops working.
672 		 */
673 		softc->rx_cp_rings[i].ring.ring_size =
674 		    softc->scctx->isc_nrxd[0];
675 		softc->rx_cp_rings[i].ring.vaddr = vaddrs[i * nrxqs];
676 		softc->rx_cp_rings[i].ring.paddr = paddrs[i * nrxqs];
677 
678 		/* Allocate the RX ring */
679 		softc->rx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
680 		softc->rx_rings[i].softc = softc;
681 		softc->rx_rings[i].idx = i;
682 		softc->rx_rings[i].id = i + 1;
683 		softc->rx_rings[i].doorbell = (BNXT_CHIP_P5(softc)) ?
684 			DB_PF_OFFSET_P5 : softc->rx_rings[i].id * 0x80;
685 		softc->rx_rings[i].ring_size = softc->scctx->isc_nrxd[1];
686 		softc->rx_rings[i].vaddr = vaddrs[i * nrxqs + 1];
687 		softc->rx_rings[i].paddr = paddrs[i * nrxqs + 1];
688 
689 		/* Allocate the TPA start buffer */
690 		softc->rx_rings[i].tpa_start = malloc(sizeof(struct bnxt_full_tpa_start) *
691 	    		(RX_TPA_START_CMPL_AGG_ID_MASK >> RX_TPA_START_CMPL_AGG_ID_SFT),
692 	    		M_DEVBUF, M_NOWAIT | M_ZERO);
693 		if (softc->rx_rings[i].tpa_start == NULL) {
694 			rc = -ENOMEM;
695 			device_printf(softc->dev,
696 					"Unable to allocate space for TPA\n");
697 			goto tpa_alloc_fail;
698 		}
699 		/* Allocate the AG ring */
700 		softc->ag_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
701 		softc->ag_rings[i].softc = softc;
702 		softc->ag_rings[i].idx = i;
703 		softc->ag_rings[i].id = nrxqsets + i + 1;
704 		softc->ag_rings[i].doorbell = (BNXT_CHIP_P5(softc)) ?
705 			DB_PF_OFFSET_P5 : softc->ag_rings[i].id * 0x80;
706 		softc->ag_rings[i].ring_size = softc->scctx->isc_nrxd[2];
707 		softc->ag_rings[i].vaddr = vaddrs[i * nrxqs + 2];
708 		softc->ag_rings[i].paddr = paddrs[i * nrxqs + 2];
709 
710 		/* Allocate the ring group */
711 		softc->grp_info[i].grp_id = (uint16_t)HWRM_NA_SIGNATURE;
712 		softc->grp_info[i].stats_ctx =
713 		    softc->rx_cp_rings[i].stats_ctx_id;
714 		softc->grp_info[i].rx_ring_id = softc->rx_rings[i].phys_id;
715 		softc->grp_info[i].ag_ring_id = softc->ag_rings[i].phys_id;
716 		softc->grp_info[i].cp_ring_id =
717 		    softc->rx_cp_rings[i].ring.phys_id;
718 
719 		bnxt_create_rx_sysctls(softc, i);
720 	}
721 
722 	/*
723 	 * When SR-IOV is enabled, avoid each VF sending PORT_QSTATS
724          * HWRM every sec with which firmware timeouts can happen
725          */
726 	if (BNXT_PF(softc))
727 		bnxt_create_port_stats_sysctls(softc);
728 
729 	/* And finally, the VNIC */
730 	softc->vnic_info.id = (uint16_t)HWRM_NA_SIGNATURE;
731 	softc->vnic_info.filter_id = -1;
732 	softc->vnic_info.def_ring_grp = (uint16_t)HWRM_NA_SIGNATURE;
733 	softc->vnic_info.cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
734 	softc->vnic_info.lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
735 	softc->vnic_info.rx_mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
736 		HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN;
737 	softc->vnic_info.mc_list_count = 0;
738 	softc->vnic_info.flags = BNXT_VNIC_FLAG_DEFAULT;
739 	rc = iflib_dma_alloc(ctx, BNXT_MAX_MC_ADDRS * ETHER_ADDR_LEN,
740 	    &softc->vnic_info.mc_list, 0);
741 	if (rc)
742 		goto mc_list_alloc_fail;
743 
744 	/* The VNIC RSS Hash Key */
745 	rc = iflib_dma_alloc(ctx, HW_HASH_KEY_SIZE,
746 	    &softc->vnic_info.rss_hash_key_tbl, 0);
747 	if (rc)
748 		goto rss_hash_alloc_fail;
749 	bus_dmamap_sync(softc->vnic_info.rss_hash_key_tbl.idi_tag,
750 	    softc->vnic_info.rss_hash_key_tbl.idi_map,
751 	    BUS_DMASYNC_PREWRITE);
752 	memcpy(softc->vnic_info.rss_hash_key_tbl.idi_vaddr,
753 	    softc->vnic_info.rss_hash_key, HW_HASH_KEY_SIZE);
754 
755 	/* Allocate the RSS tables */
756 	rc = iflib_dma_alloc(ctx, HW_HASH_INDEX_SIZE * sizeof(uint16_t),
757 	    &softc->vnic_info.rss_grp_tbl, 0);
758 	if (rc)
759 		goto rss_grp_alloc_fail;
760 	bus_dmamap_sync(softc->vnic_info.rss_grp_tbl.idi_tag,
761 	    softc->vnic_info.rss_grp_tbl.idi_map,
762 	    BUS_DMASYNC_PREWRITE);
763 	memset(softc->vnic_info.rss_grp_tbl.idi_vaddr, 0xff,
764 	    softc->vnic_info.rss_grp_tbl.idi_size);
765 
766 	softc->nrxqsets = nrxqsets;
767 	return rc;
768 
769 rss_grp_alloc_fail:
770 	iflib_dma_free(&softc->vnic_info.rss_hash_key_tbl);
771 rss_hash_alloc_fail:
772 	iflib_dma_free(&softc->vnic_info.mc_list);
773 mc_list_alloc_fail:
774 	for (i = i - 1; i >= 0; i--) {
775 		if (softc->rx_rings[i].tpa_start)
776 			free(softc->rx_rings[i].tpa_start, M_DEVBUF);
777 	}
778 tpa_alloc_fail:
779 	iflib_dma_free(&softc->hw_tx_port_stats_ext);
780 hw_port_tx_stats_ext_alloc_fail:
781 	iflib_dma_free(&softc->hw_rx_port_stats_ext);
782 hw_port_rx_stats_ext_alloc_fail:
783 	iflib_dma_free(&softc->hw_tx_port_stats);
784 hw_port_tx_stats_alloc_fail:
785 	iflib_dma_free(&softc->hw_rx_port_stats);
786 hw_port_rx_stats_alloc_fail:
787 	for (i=0; i < nrxqsets; i++) {
788 		if (softc->rx_stats[i].idi_vaddr)
789 			iflib_dma_free(&softc->rx_stats[i]);
790 	}
791 hw_stats_alloc_fail:
792 	free(softc->grp_info, M_DEVBUF);
793 grp_alloc_fail:
794 	free(softc->ag_rings, M_DEVBUF);
795 ag_alloc_fail:
796 	free(softc->rx_rings, M_DEVBUF);
797 ring_alloc_fail:
798 	free(softc->rx_cp_rings, M_DEVBUF);
799 cp_alloc_fail:
800 	return rc;
801 }
802 
bnxt_free_hwrm_short_cmd_req(struct bnxt_softc * softc)803 static void bnxt_free_hwrm_short_cmd_req(struct bnxt_softc *softc)
804 {
805 	if (softc->hwrm_short_cmd_req_addr.idi_vaddr)
806 		iflib_dma_free(&softc->hwrm_short_cmd_req_addr);
807 	softc->hwrm_short_cmd_req_addr.idi_vaddr = NULL;
808 }
809 
bnxt_alloc_hwrm_short_cmd_req(struct bnxt_softc * softc)810 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt_softc *softc)
811 {
812 	int rc;
813 
814 	rc = iflib_dma_alloc(softc->ctx, softc->hwrm_max_req_len,
815 	    &softc->hwrm_short_cmd_req_addr, BUS_DMA_NOWAIT);
816 
817 	return rc;
818 }
819 
bnxt_free_ring(struct bnxt_softc * softc,struct bnxt_ring_mem_info * rmem)820 static void bnxt_free_ring(struct bnxt_softc *softc, struct bnxt_ring_mem_info *rmem)
821 {
822 	int i;
823 
824 	for (i = 0; i < rmem->nr_pages; i++) {
825 		if (!rmem->pg_arr[i].idi_vaddr)
826 			continue;
827 
828 		iflib_dma_free(&rmem->pg_arr[i]);
829 		rmem->pg_arr[i].idi_vaddr = NULL;
830 	}
831 	if (rmem->pg_tbl.idi_vaddr) {
832 		iflib_dma_free(&rmem->pg_tbl);
833 		rmem->pg_tbl.idi_vaddr = NULL;
834 
835 	}
836 	if (rmem->vmem_size && *rmem->vmem) {
837 		free(*rmem->vmem, M_DEVBUF);
838 		*rmem->vmem = NULL;
839 	}
840 }
841 
bnxt_init_ctx_mem(struct bnxt_ctx_mem_type * ctxm,void * p,int len)842 static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len)
843 {
844 	u8 init_val = ctxm->init_value;
845 	u16 offset = ctxm->init_offset;
846 	u8 *p2 = p;
847 	int i;
848 
849 	if (!init_val)
850 		return;
851 	if (offset == BNXT_CTX_INIT_INVALID_OFFSET) {
852 		memset(p, init_val, len);
853 		return;
854 	}
855 	for (i = 0; i < len; i += ctxm->entry_size)
856 		*(p2 + i + offset) = init_val;
857 }
858 
bnxt_alloc_ring(struct bnxt_softc * softc,struct bnxt_ring_mem_info * rmem)859 static int bnxt_alloc_ring(struct bnxt_softc *softc, struct bnxt_ring_mem_info *rmem)
860 {
861 	uint64_t valid_bit = 0;
862 	int i;
863 	int rc;
864 
865 	if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
866 		valid_bit = PTU_PTE_VALID;
867 
868 	if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl.idi_vaddr) {
869 		size_t pg_tbl_size = rmem->nr_pages * 8;
870 
871 		if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
872 			pg_tbl_size = rmem->page_size;
873 
874 		rc = iflib_dma_alloc(softc->ctx, pg_tbl_size, &rmem->pg_tbl, 0);
875 		if (rc)
876 			return -ENOMEM;
877 	}
878 
879 	for (i = 0; i < rmem->nr_pages; i++) {
880 		uint64_t extra_bits = valid_bit;
881 		uint64_t *ptr;
882 
883 		rc = iflib_dma_alloc(softc->ctx, rmem->page_size, &rmem->pg_arr[i], 0);
884 		if (rc)
885 			return -ENOMEM;
886 
887 		if (rmem->ctx_mem)
888 			bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i].idi_vaddr,
889 					rmem->page_size);
890 
891 		if (rmem->nr_pages > 1 || rmem->depth > 0) {
892 			if (i == rmem->nr_pages - 2 &&
893 					(rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
894 				extra_bits |= PTU_PTE_NEXT_TO_LAST;
895 			else if (i == rmem->nr_pages - 1 &&
896 					(rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
897 				extra_bits |= PTU_PTE_LAST;
898 
899 			ptr = (void *) rmem->pg_tbl.idi_vaddr;
900 			ptr[i]  = htole64(rmem->pg_arr[i].idi_paddr | extra_bits);
901 		}
902 	}
903 
904 	if (rmem->vmem_size) {
905 		*rmem->vmem = malloc(rmem->vmem_size, M_DEVBUF, M_NOWAIT | M_ZERO);
906 		if (!(*rmem->vmem))
907 			return -ENOMEM;
908 	}
909 	return 0;
910 }
911 
912 
913 #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES		\
914 	(HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP |		\
915 	 HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ |	\
916 	 HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ |		\
917 	 HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC |	\
918 	 HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT)
919 
bnxt_alloc_ctx_mem_blk(struct bnxt_softc * softc,struct bnxt_ctx_pg_info * ctx_pg)920 static int bnxt_alloc_ctx_mem_blk(struct bnxt_softc *softc,
921 				  struct bnxt_ctx_pg_info *ctx_pg)
922 {
923 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
924 
925 	rmem->page_size = BNXT_PAGE_SIZE;
926 	rmem->pg_arr = ctx_pg->ctx_arr;
927 	rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
928 	if (rmem->depth >= 1)
929 		rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
930 
931 	return bnxt_alloc_ring(softc, rmem);
932 }
933 
bnxt_alloc_ctx_pg_tbls(struct bnxt_softc * softc,struct bnxt_ctx_pg_info * ctx_pg,u32 mem_size,u8 depth,struct bnxt_ctx_mem_type * ctxm)934 static int bnxt_alloc_ctx_pg_tbls(struct bnxt_softc *softc,
935 				  struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
936 				  u8 depth, struct bnxt_ctx_mem_type *ctxm)
937 {
938 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
939 	int rc;
940 
941 	if (!mem_size)
942 		return -EINVAL;
943 
944 	ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
945 	if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
946 		ctx_pg->nr_pages = 0;
947 		return -EINVAL;
948 	}
949 	if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
950 		int nr_tbls, i;
951 
952 		rmem->depth = 2;
953 		ctx_pg->ctx_pg_tbl = kzalloc(MAX_CTX_PAGES * sizeof(ctx_pg),
954 					      GFP_KERNEL);
955 		if (!ctx_pg->ctx_pg_tbl)
956 			return -ENOMEM;
957 		nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
958 		rmem->nr_pages = nr_tbls;
959 		rc = bnxt_alloc_ctx_mem_blk(softc, ctx_pg);
960 		if (rc)
961 			return rc;
962 		for (i = 0; i < nr_tbls; i++) {
963 			struct bnxt_ctx_pg_info *pg_tbl;
964 
965 			pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
966 			if (!pg_tbl)
967 				return -ENOMEM;
968 			ctx_pg->ctx_pg_tbl[i] = pg_tbl;
969 			rmem = &pg_tbl->ring_mem;
970 			memcpy(&rmem->pg_tbl, &ctx_pg->ctx_arr[i], sizeof(struct iflib_dma_info));
971 			rmem->depth = 1;
972 			rmem->nr_pages = MAX_CTX_PAGES;
973 			rmem->ctx_mem = ctxm;
974 			if (i == (nr_tbls - 1)) {
975 				int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
976 
977 				if (rem)
978 					rmem->nr_pages = rem;
979 			}
980 			rc = bnxt_alloc_ctx_mem_blk(softc, pg_tbl);
981 			if (rc)
982 				break;
983 		}
984 	} else {
985 		rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
986 		if (rmem->nr_pages > 1 || depth)
987 			rmem->depth = 1;
988 		rmem->ctx_mem = ctxm;
989 		rc = bnxt_alloc_ctx_mem_blk(softc, ctx_pg);
990 	}
991 	return rc;
992 }
993 
bnxt_free_ctx_pg_tbls(struct bnxt_softc * softc,struct bnxt_ctx_pg_info * ctx_pg)994 static void bnxt_free_ctx_pg_tbls(struct bnxt_softc *softc,
995 				  struct bnxt_ctx_pg_info *ctx_pg)
996 {
997 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
998 
999 	if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
1000 	    ctx_pg->ctx_pg_tbl) {
1001 		int i, nr_tbls = rmem->nr_pages;
1002 
1003 		for (i = 0; i < nr_tbls; i++) {
1004 			struct bnxt_ctx_pg_info *pg_tbl;
1005 			struct bnxt_ring_mem_info *rmem2;
1006 
1007 			pg_tbl = ctx_pg->ctx_pg_tbl[i];
1008 			if (!pg_tbl)
1009 				continue;
1010 			rmem2 = &pg_tbl->ring_mem;
1011 			bnxt_free_ring(softc, rmem2);
1012 			ctx_pg->ctx_arr[i].idi_vaddr = NULL;
1013 			free(pg_tbl , M_DEVBUF);
1014 			ctx_pg->ctx_pg_tbl[i] = NULL;
1015 		}
1016 		kfree(ctx_pg->ctx_pg_tbl);
1017 		ctx_pg->ctx_pg_tbl = NULL;
1018 	}
1019 	bnxt_free_ring(softc, rmem);
1020 	ctx_pg->nr_pages = 0;
1021 }
1022 
bnxt_setup_ctxm_pg_tbls(struct bnxt_softc * softc,struct bnxt_ctx_mem_type * ctxm,u32 entries,u8 pg_lvl)1023 static int bnxt_setup_ctxm_pg_tbls(struct bnxt_softc *softc,
1024 				   struct bnxt_ctx_mem_type *ctxm, u32 entries,
1025 				   u8 pg_lvl)
1026 {
1027 	struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
1028 	int i, rc = 0, n = 1;
1029 	u32 mem_size;
1030 
1031 	if (!ctxm->entry_size || !ctx_pg)
1032 		return -EINVAL;
1033 	if (ctxm->instance_bmap)
1034 		n = hweight32(ctxm->instance_bmap);
1035 	if (ctxm->entry_multiple)
1036 		entries = roundup(entries, ctxm->entry_multiple);
1037 	entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries);
1038 	mem_size = entries * ctxm->entry_size;
1039 	for (i = 0; i < n && !rc; i++) {
1040 		ctx_pg[i].entries = entries;
1041 		rc = bnxt_alloc_ctx_pg_tbls(softc, &ctx_pg[i], mem_size, pg_lvl,
1042 					    ctxm->init_value ? ctxm : NULL);
1043 	}
1044 	return rc;
1045 }
1046 
bnxt_free_ctx_mem(struct bnxt_softc * softc)1047 static void bnxt_free_ctx_mem(struct bnxt_softc *softc)
1048 {
1049 	struct bnxt_ctx_mem_info *ctx = softc->ctx_mem;
1050 	u16 type;
1051 
1052 	if (!ctx)
1053 		return;
1054 
1055 	for (type = 0; type < BNXT_CTX_MAX; type++) {
1056 		struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
1057 		struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
1058 		int i, n = 1;
1059 
1060 		if (!ctx_pg)
1061 			continue;
1062 		if (ctxm->instance_bmap)
1063 			n = hweight32(ctxm->instance_bmap);
1064 		for (i = 0; i < n; i++)
1065 			bnxt_free_ctx_pg_tbls(softc, &ctx_pg[i]);
1066 
1067 		kfree(ctx_pg);
1068 		ctxm->pg_info = NULL;
1069 	}
1070 
1071 	ctx->flags &= ~BNXT_CTX_FLAG_INITED;
1072 	kfree(ctx);
1073 	softc->ctx_mem = NULL;
1074 }
1075 
bnxt_alloc_ctx_mem(struct bnxt_softc * softc)1076 static int bnxt_alloc_ctx_mem(struct bnxt_softc *softc)
1077 {
1078 	struct bnxt_ctx_pg_info *ctx_pg;
1079 	struct bnxt_ctx_mem_type *ctxm;
1080 	struct bnxt_ctx_mem_info *ctx;
1081 	u32 l2_qps, qp1_qps, max_qps;
1082 	u32 ena, entries_sp, entries;
1083 	u32 srqs, max_srqs, min;
1084 	u32 num_mr, num_ah;
1085 	u32 extra_srqs = 0;
1086 	u32 extra_qps = 0;
1087 	u8 pg_lvl = 1;
1088 	int i, rc;
1089 
1090 	if (!BNXT_CHIP_P5(softc))
1091 		return 0;
1092 
1093 	rc = bnxt_hwrm_func_backing_store_qcaps(softc);
1094 	if (rc) {
1095 		device_printf(softc->dev, "Failed querying context mem capability, rc = %d.\n",
1096 			   rc);
1097 		return rc;
1098 	}
1099 	ctx = softc->ctx_mem;
1100 	if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
1101 		return 0;
1102 
1103 	ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
1104 	l2_qps = ctxm->qp_l2_entries;
1105 	qp1_qps = ctxm->qp_qp1_entries;
1106 	max_qps = ctxm->max_entries;
1107 	ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
1108 	srqs = ctxm->srq_l2_entries;
1109 	max_srqs = ctxm->max_entries;
1110 	if (softc->flags & BNXT_FLAG_ROCE_CAP) {
1111 		pg_lvl = 2;
1112 		extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps);
1113 		extra_srqs = min_t(u32, 8192, max_srqs - srqs);
1114 	}
1115 
1116 	ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
1117 	rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, l2_qps + qp1_qps + extra_qps,
1118 				     pg_lvl);
1119 	if (rc)
1120 		return rc;
1121 
1122 	ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
1123 	rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, srqs + extra_srqs, pg_lvl);
1124 	if (rc)
1125 		return rc;
1126 
1127 	ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
1128 	rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, ctxm->cq_l2_entries +
1129 				     extra_qps * 2, pg_lvl);
1130 	if (rc)
1131 		return rc;
1132 
1133 	ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
1134 	rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, ctxm->max_entries, 1);
1135 	if (rc)
1136 		return rc;
1137 
1138 	ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
1139 	rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, ctxm->max_entries, 1);
1140 	if (rc)
1141 		return rc;
1142 
1143 	ena = 0;
1144 	if (!(softc->flags & BNXT_FLAG_ROCE_CAP))
1145 		goto skip_rdma;
1146 
1147 	ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
1148 	ctx_pg = ctxm->pg_info;
1149 	/* 128K extra is needed to accomodate static AH context
1150 	 * allocation by f/w.
1151 	 */
1152 	num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
1153 	num_ah = min_t(u32, num_mr, 1024 * 128);
1154 	rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, num_mr + num_ah, 2);
1155 	if (rc)
1156 		return rc;
1157 	ctx_pg->entries = num_mr + num_ah;
1158 	ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_MRAV;
1159 	if (ctxm->mrav_num_entries_units)
1160 		ctx_pg->entries =
1161 			((num_mr / ctxm->mrav_num_entries_units) << 16) |
1162 			 (num_ah / ctxm->mrav_num_entries_units);
1163 
1164 	ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
1165 	rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, l2_qps + qp1_qps + extra_qps, 1);
1166 	if (rc)
1167 		return rc;
1168 	ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TIM;
1169 
1170 skip_rdma:
1171 	ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
1172 	min = ctxm->min_entries;
1173 	entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps +
1174 		     2 * (extra_qps + qp1_qps) + min;
1175 	rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, entries_sp, 2);
1176 		if (rc)
1177 			return rc;
1178 
1179 	ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
1180 	entries = l2_qps + 2 * (extra_qps + qp1_qps);
1181 	rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, entries, 2);
1182 	if (rc)
1183 		return rc;
1184 	for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
1185 		if (i < BNXT_MAX_TQM_LEGACY_RINGS)
1186 			ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i;
1187 		else
1188 			ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8;
1189 	}
1190 	ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES;
1191 
1192 	rc = bnxt_hwrm_func_backing_store_cfg(softc, ena);
1193 	if (rc) {
1194 		device_printf(softc->dev, "Failed configuring context mem, rc = %d.\n",
1195 			   rc);
1196 		return rc;
1197 	}
1198 	ctx->flags |= BNXT_CTX_FLAG_INITED;
1199 
1200 	return 0;
1201 }
1202 
1203 /*
1204  * If we update the index, a write barrier is needed after the write to ensure
1205  * the completion ring has space before the RX/TX ring does.  Since we can't
1206  * make the RX and AG doorbells covered by the same barrier without remapping
1207  * MSI-X vectors, we create the barrier over the enture doorbell bar.
1208  * TODO: Remap the MSI-X vectors to allow a barrier to only cover the doorbells
1209  *       for a single ring group.
1210  *
1211  * A barrier of just the size of the write is used to ensure the ordering
1212  * remains correct and no writes are lost.
1213  */
1214 
bnxt_cuw_db_rx(void * db_ptr,uint16_t idx)1215 static void bnxt_cuw_db_rx(void *db_ptr, uint16_t idx)
1216 {
1217 	struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1218 	struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1219 
1220 	bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 4,
1221 			BUS_SPACE_BARRIER_WRITE);
1222 	bus_space_write_4(db_bar->tag, db_bar->handle, ring->doorbell,
1223 			htole32(RX_DOORBELL_KEY_RX | idx));
1224 }
1225 
bnxt_cuw_db_tx(void * db_ptr,uint16_t idx)1226 static void bnxt_cuw_db_tx(void *db_ptr, uint16_t idx)
1227 {
1228 	struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1229 	struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1230 
1231 	bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 4,
1232 			BUS_SPACE_BARRIER_WRITE);
1233 	bus_space_write_4(db_bar->tag, db_bar->handle, ring->doorbell,
1234 			htole32(TX_DOORBELL_KEY_TX | idx));
1235 }
1236 
bnxt_cuw_db_cq(void * db_ptr,bool enable_irq)1237 static void bnxt_cuw_db_cq(void *db_ptr, bool enable_irq)
1238 {
1239 	struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1240 	struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1241 
1242 	bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 4,
1243 			BUS_SPACE_BARRIER_WRITE);
1244 	bus_space_write_4(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1245 			htole32(CMPL_DOORBELL_KEY_CMPL |
1246 				((cpr->cons == UINT32_MAX) ? 0 :
1247 				 (cpr->cons | CMPL_DOORBELL_IDX_VALID)) |
1248 				((enable_irq) ? 0 : CMPL_DOORBELL_MASK)));
1249 	bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1250 			BUS_SPACE_BARRIER_WRITE);
1251 }
1252 
bnxt_thor_db_rx(void * db_ptr,uint16_t idx)1253 static void bnxt_thor_db_rx(void *db_ptr, uint16_t idx)
1254 {
1255 	struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1256 	struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1257 
1258 	bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 8,
1259 			BUS_SPACE_BARRIER_WRITE);
1260 	bus_space_write_8(db_bar->tag, db_bar->handle, ring->doorbell,
1261 			htole64((DBR_PATH_L2 | DBR_TYPE_SRQ | idx) |
1262 				((uint64_t)ring->phys_id << DBR_XID_SFT)));
1263 }
1264 
bnxt_thor_db_tx(void * db_ptr,uint16_t idx)1265 static void bnxt_thor_db_tx(void *db_ptr, uint16_t idx)
1266 {
1267 	struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1268 	struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1269 
1270 	bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 8,
1271 			BUS_SPACE_BARRIER_WRITE);
1272 	bus_space_write_8(db_bar->tag, db_bar->handle, ring->doorbell,
1273 			htole64((DBR_PATH_L2 | DBR_TYPE_SQ | idx) |
1274 				((uint64_t)ring->phys_id << DBR_XID_SFT)));
1275 }
1276 
bnxt_thor_db_rx_cq(void * db_ptr,bool enable_irq)1277 static void bnxt_thor_db_rx_cq(void *db_ptr, bool enable_irq)
1278 {
1279 	struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1280 	struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1281 	dbc_dbc_t db_msg = { 0 };
1282 	uint32_t cons = cpr->cons;
1283 
1284 	if (cons == UINT32_MAX)
1285 		cons = 0;
1286 	else
1287 		cons = RING_NEXT(&cpr->ring, cons);
1288 
1289 	db_msg.index = ((cons << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK);
1290 
1291 	db_msg.type_path_xid = ((cpr->ring.phys_id << DBC_DBC_XID_SFT) &
1292 			DBC_DBC_XID_MASK) | DBC_DBC_PATH_L2 |
1293 		((enable_irq) ? DBC_DBC_TYPE_CQ_ARMALL: DBC_DBC_TYPE_CQ);
1294 
1295 	bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
1296 			BUS_SPACE_BARRIER_WRITE);
1297 	bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1298 			htole64(*(uint64_t *)&db_msg));
1299 	bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1300 			BUS_SPACE_BARRIER_WRITE);
1301 }
1302 
bnxt_thor_db_tx_cq(void * db_ptr,bool enable_irq)1303 static void bnxt_thor_db_tx_cq(void *db_ptr, bool enable_irq)
1304 {
1305 	struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1306 	struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1307 	dbc_dbc_t db_msg = { 0 };
1308 	uint32_t cons = cpr->cons;
1309 
1310 	db_msg.index = ((cons << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK);
1311 
1312 	db_msg.type_path_xid = ((cpr->ring.phys_id << DBC_DBC_XID_SFT) &
1313 			DBC_DBC_XID_MASK) | DBC_DBC_PATH_L2 |
1314 		((enable_irq) ? DBC_DBC_TYPE_CQ_ARMALL: DBC_DBC_TYPE_CQ);
1315 
1316 	bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
1317 			BUS_SPACE_BARRIER_WRITE);
1318 	bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1319 			htole64(*(uint64_t *)&db_msg));
1320 	bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1321 			BUS_SPACE_BARRIER_WRITE);
1322 }
1323 
bnxt_thor_db_nq(void * db_ptr,bool enable_irq)1324 static void bnxt_thor_db_nq(void *db_ptr, bool enable_irq)
1325 {
1326 	struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1327 	struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1328 	dbc_dbc_t db_msg = { 0 };
1329 	uint32_t cons = cpr->cons;
1330 
1331 	db_msg.index = ((cons << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK);
1332 
1333 	db_msg.type_path_xid = ((cpr->ring.phys_id << DBC_DBC_XID_SFT) &
1334 			DBC_DBC_XID_MASK) | DBC_DBC_PATH_L2 |
1335 		((enable_irq) ? DBC_DBC_TYPE_NQ_ARM: DBC_DBC_TYPE_NQ);
1336 
1337 	bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
1338 			BUS_SPACE_BARRIER_WRITE);
1339 	bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1340 			htole64(*(uint64_t *)&db_msg));
1341 	bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1342 			BUS_SPACE_BARRIER_WRITE);
1343 }
1344 
bnxt_find_dev(uint32_t domain,uint32_t bus,uint32_t dev_fn,char * dev_name)1345 struct bnxt_softc *bnxt_find_dev(uint32_t domain, uint32_t bus, uint32_t dev_fn, char *dev_name)
1346 {
1347 	struct bnxt_softc_list *sc = NULL;
1348 
1349 	SLIST_FOREACH(sc, &pf_list, next) {
1350 		/* get the softc reference based on device name */
1351 		if (dev_name && !strncmp(dev_name, if_name(iflib_get_ifp(sc->softc->ctx)), BNXT_MAX_STR)) {
1352 			return sc->softc;
1353 		}
1354 		/* get the softc reference based on domain,bus,device,function */
1355 		if (!dev_name &&
1356 		    (domain == sc->softc->domain) &&
1357 		    (bus == sc->softc->bus) &&
1358 		    (dev_fn == sc->softc->dev_fn)) {
1359 			return sc->softc;
1360 
1361 		}
1362 	}
1363 
1364 	return NULL;
1365 }
1366 
1367 
bnxt_verify_asym_queues(struct bnxt_softc * softc)1368 static void bnxt_verify_asym_queues(struct bnxt_softc *softc)
1369 {
1370 	uint8_t i, lltc = 0;
1371 
1372 	if (!softc->max_lltc)
1373 		return;
1374 
1375 	/* Verify that lossless TX and RX queues are in the same index */
1376 	for (i = 0; i < softc->max_tc; i++) {
1377 		if (BNXT_LLQ(softc->tx_q_info[i].queue_profile) &&
1378 		    BNXT_LLQ(softc->rx_q_info[i].queue_profile))
1379 			lltc++;
1380 	}
1381 	softc->max_lltc = min(softc->max_lltc, lltc);
1382 }
1383 
bnxt_hwrm_poll(struct bnxt_softc * bp)1384 static int bnxt_hwrm_poll(struct bnxt_softc *bp)
1385 {
1386 	struct hwrm_ver_get_output	*resp =
1387 	    (void *)bp->hwrm_cmd_resp.idi_vaddr;
1388 	struct hwrm_ver_get_input req = {0};
1389 	int rc;
1390 
1391 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET);
1392 
1393 	req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
1394 	req.hwrm_intf_min = HWRM_VERSION_MINOR;
1395 	req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
1396 
1397 	rc = _hwrm_send_message(bp, &req, sizeof(req));
1398 	if (rc)
1399 		return rc;
1400 
1401 	if (resp->flags & HWRM_VER_GET_OUTPUT_FLAGS_DEV_NOT_RDY)
1402 		rc = -EAGAIN;
1403 
1404 	return rc;
1405 }
1406 
bnxt_rtnl_lock_sp(struct bnxt_softc * bp)1407 static void bnxt_rtnl_lock_sp(struct bnxt_softc *bp)
1408 {
1409 	/* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
1410 	 * set.  If the device is being closed, bnxt_close() may be holding
1411 	 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear.  So we
1412 	 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
1413 	 */
1414 	clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
1415 	rtnl_lock();
1416 }
1417 
bnxt_rtnl_unlock_sp(struct bnxt_softc * bp)1418 static void bnxt_rtnl_unlock_sp(struct bnxt_softc *bp)
1419 {
1420 	set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
1421 	rtnl_unlock();
1422 }
1423 
bnxt_fw_fatal_close(struct bnxt_softc * softc)1424 static void bnxt_fw_fatal_close(struct bnxt_softc *softc)
1425 {
1426 	bnxt_disable_intr(softc->ctx);
1427 	if (pci_is_enabled(softc->pdev))
1428 		pci_disable_device(softc->pdev);
1429 }
1430 
bnxt_fw_health_readl(struct bnxt_softc * bp,int reg_idx)1431 static u32 bnxt_fw_health_readl(struct bnxt_softc *bp, int reg_idx)
1432 {
1433 	struct bnxt_fw_health *fw_health = bp->fw_health;
1434 	u32 reg = fw_health->regs[reg_idx];
1435 	u32 reg_type, reg_off, val = 0;
1436 
1437 	reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
1438 	reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
1439 	switch (reg_type) {
1440 	case BNXT_FW_HEALTH_REG_TYPE_CFG:
1441 		pci_read_config_dword(bp->pdev, reg_off, &val);
1442 		break;
1443 	case BNXT_FW_HEALTH_REG_TYPE_GRC:
1444 		reg_off = fw_health->mapped_regs[reg_idx];
1445 		fallthrough;
1446 	case BNXT_FW_HEALTH_REG_TYPE_BAR0:
1447 		val = readl_fbsd(bp, reg_off, 0);
1448 		break;
1449 	case BNXT_FW_HEALTH_REG_TYPE_BAR1:
1450 		val = readl_fbsd(bp, reg_off, 2);
1451 		break;
1452 	}
1453 	if (reg_idx == BNXT_FW_RESET_INPROG_REG)
1454 		val &= fw_health->fw_reset_inprog_reg_mask;
1455 	return val;
1456 }
1457 
bnxt_fw_reset_close(struct bnxt_softc * bp)1458 static void bnxt_fw_reset_close(struct bnxt_softc *bp)
1459 {
1460 	int i;
1461 	bnxt_ulp_stop(bp);
1462 	/* When firmware is in fatal state, quiesce device and disable
1463 	 * bus master to prevent any potential bad DMAs before freeing
1464 	 * kernel memory.
1465 	 */
1466 	if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
1467 		u16 val = 0;
1468 
1469 		val = pci_read_config(bp->dev, PCI_SUBSYSTEM_ID, 2);
1470 		if (val == 0xffff) {
1471 			bp->fw_reset_min_dsecs = 0;
1472 		}
1473 		bnxt_fw_fatal_close(bp);
1474 	}
1475 
1476 	iflib_request_reset(bp->ctx);
1477 	bnxt_stop(bp->ctx);
1478 	bnxt_hwrm_func_drv_unrgtr(bp, false);
1479 
1480 	for (i = bp->nrxqsets-1; i>=0; i--) {
1481 		if (BNXT_CHIP_P5(bp))
1482 			iflib_irq_free(bp->ctx, &bp->nq_rings[i].irq);
1483 		else
1484 			iflib_irq_free(bp->ctx, &bp->rx_cp_rings[i].irq);
1485 
1486 	}
1487 	if (pci_is_enabled(bp->pdev))
1488 		pci_disable_device(bp->pdev);
1489 	pci_disable_busmaster(bp->dev);
1490 	bnxt_free_ctx_mem(bp);
1491 }
1492 
is_bnxt_fw_ok(struct bnxt_softc * bp)1493 static bool is_bnxt_fw_ok(struct bnxt_softc *bp)
1494 {
1495 	struct bnxt_fw_health *fw_health = bp->fw_health;
1496 	bool no_heartbeat = false, has_reset = false;
1497 	u32 val;
1498 
1499 	val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
1500 	if (val == fw_health->last_fw_heartbeat)
1501 		no_heartbeat = true;
1502 
1503 	val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
1504 	if (val != fw_health->last_fw_reset_cnt)
1505 		has_reset = true;
1506 
1507 	if (!no_heartbeat && has_reset)
1508 		return true;
1509 
1510 	return false;
1511 }
1512 
bnxt_fw_reset(struct bnxt_softc * bp)1513 void bnxt_fw_reset(struct bnxt_softc *bp)
1514 {
1515 	bnxt_rtnl_lock_sp(bp);
1516 	if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
1517 	    !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
1518 		int tmo;
1519 		set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
1520 		bnxt_fw_reset_close(bp);
1521 
1522 		if ((bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD)) {
1523 			bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
1524 			tmo = HZ / 10;
1525 		} else {
1526 			bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
1527 			tmo = bp->fw_reset_min_dsecs * HZ /10;
1528 		}
1529 		bnxt_queue_fw_reset_work(bp, tmo);
1530 	}
1531 	bnxt_rtnl_unlock_sp(bp);
1532 }
1533 
bnxt_queue_fw_reset_work(struct bnxt_softc * bp,unsigned long delay)1534 static void bnxt_queue_fw_reset_work(struct bnxt_softc *bp, unsigned long delay)
1535 {
1536 	if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
1537 		return;
1538 
1539 	if (BNXT_PF(bp))
1540 		queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
1541 	else
1542 		schedule_delayed_work(&bp->fw_reset_task, delay);
1543 }
1544 
bnxt_queue_sp_work(struct bnxt_softc * bp)1545 void bnxt_queue_sp_work(struct bnxt_softc *bp)
1546 {
1547 	if (BNXT_PF(bp))
1548 		queue_work(bnxt_pf_wq, &bp->sp_task);
1549 	else
1550 		schedule_work(&bp->sp_task);
1551 }
1552 
bnxt_fw_reset_writel(struct bnxt_softc * bp,int reg_idx)1553 static void bnxt_fw_reset_writel(struct bnxt_softc *bp, int reg_idx)
1554 {
1555 	struct bnxt_fw_health *fw_health = bp->fw_health;
1556 	u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
1557 	u32 val = fw_health->fw_reset_seq_vals[reg_idx];
1558 	u32 reg_type, reg_off, delay_msecs;
1559 
1560 	delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
1561 	reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
1562 	reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
1563 	switch (reg_type) {
1564 	case BNXT_FW_HEALTH_REG_TYPE_CFG:
1565 		pci_write_config_dword(bp->pdev, reg_off, val);
1566 		break;
1567 	case BNXT_FW_HEALTH_REG_TYPE_GRC:
1568 		writel_fbsd(bp, BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4, 0, reg_off & BNXT_GRC_BASE_MASK);
1569 		reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
1570 		fallthrough;
1571 	case BNXT_FW_HEALTH_REG_TYPE_BAR0:
1572 		writel_fbsd(bp, reg_off, 0, val);
1573 		break;
1574 	case BNXT_FW_HEALTH_REG_TYPE_BAR1:
1575 		writel_fbsd(bp, reg_off, 2, val);
1576 		break;
1577 	}
1578 	if (delay_msecs) {
1579 		pci_read_config_dword(bp->pdev, 0, &val);
1580 		msleep(delay_msecs);
1581 	}
1582 }
1583 
bnxt_reset_all(struct bnxt_softc * bp)1584 static void bnxt_reset_all(struct bnxt_softc *bp)
1585 {
1586 	struct bnxt_fw_health *fw_health = bp->fw_health;
1587 	int i, rc;
1588 
1589 	if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
1590 		bp->fw_reset_timestamp = jiffies;
1591 		return;
1592 	}
1593 
1594 	if (fw_health->flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_HOST) {
1595 		for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
1596 			bnxt_fw_reset_writel(bp, i);
1597 	} else if (fw_health->flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU) {
1598 		struct hwrm_fw_reset_input req = {0};
1599 
1600 		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET);
1601 		req.target_id = htole16(HWRM_TARGET_ID_KONG);
1602 		req.embedded_proc_type = HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_CHIP;
1603 		req.selfrst_status = HWRM_FW_RESET_INPUT_SELFRST_STATUS_SELFRSTASAP;
1604 		req.flags = HWRM_FW_RESET_INPUT_FLAGS_RESET_GRACEFUL;
1605 		rc = hwrm_send_message(bp, &req, sizeof(req));
1606 
1607 		if (rc != -ENODEV)
1608 			device_printf(bp->dev, "Unable to reset FW rc=%d\n", rc);
1609 	}
1610 	bp->fw_reset_timestamp = jiffies;
1611 }
1612 
__bnxt_alloc_fw_health(struct bnxt_softc * bp)1613 static int __bnxt_alloc_fw_health(struct bnxt_softc *bp)
1614 {
1615 	if (bp->fw_health)
1616 		return 0;
1617 
1618 	bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
1619 	if (!bp->fw_health)
1620 		return -ENOMEM;
1621 
1622 	mutex_init(&bp->fw_health->lock);
1623 	return 0;
1624 }
1625 
bnxt_alloc_fw_health(struct bnxt_softc * bp)1626 static int bnxt_alloc_fw_health(struct bnxt_softc *bp)
1627 {
1628 	int rc;
1629 
1630 	if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
1631 	    !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
1632 		return 0;
1633 
1634 	rc = __bnxt_alloc_fw_health(bp);
1635 	if (rc) {
1636 		bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
1637 		bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
1638 		return rc;
1639 	}
1640 
1641 	return 0;
1642 }
1643 
__bnxt_map_fw_health_reg(struct bnxt_softc * bp,u32 reg)1644 static inline void __bnxt_map_fw_health_reg(struct bnxt_softc *bp, u32 reg)
1645 {
1646 	writel_fbsd(bp, BNXT_GRCPF_REG_WINDOW_BASE_OUT + BNXT_FW_HEALTH_WIN_MAP_OFF, 0, reg & BNXT_GRC_BASE_MASK);
1647 }
1648 
bnxt_map_fw_health_regs(struct bnxt_softc * bp)1649 static int bnxt_map_fw_health_regs(struct bnxt_softc *bp)
1650 {
1651 	struct bnxt_fw_health *fw_health = bp->fw_health;
1652 	u32 reg_base = 0xffffffff;
1653 	int i;
1654 
1655 	bp->fw_health->status_reliable = false;
1656 	bp->fw_health->resets_reliable = false;
1657 	/* Only pre-map the monitoring GRC registers using window 3 */
1658 	for (i = 0; i < 4; i++) {
1659 		u32 reg = fw_health->regs[i];
1660 
1661 		if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
1662 			continue;
1663 		if (reg_base == 0xffffffff)
1664 			reg_base = reg & BNXT_GRC_BASE_MASK;
1665 		if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
1666 			return -ERANGE;
1667 		fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
1668 	}
1669 	bp->fw_health->status_reliable = true;
1670 	bp->fw_health->resets_reliable = true;
1671 	if (reg_base == 0xffffffff)
1672 		return 0;
1673 
1674 	__bnxt_map_fw_health_reg(bp, reg_base);
1675 	return 0;
1676 }
1677 
bnxt_inv_fw_health_reg(struct bnxt_softc * bp)1678 static void bnxt_inv_fw_health_reg(struct bnxt_softc *bp)
1679 {
1680 	struct bnxt_fw_health *fw_health = bp->fw_health;
1681 	u32 reg_type;
1682 
1683 	if (!fw_health)
1684 		return;
1685 
1686 	reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
1687 	if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
1688 		fw_health->status_reliable = false;
1689 
1690 	reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]);
1691 	if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
1692 		fw_health->resets_reliable = false;
1693 }
1694 
bnxt_hwrm_error_recovery_qcfg(struct bnxt_softc * bp)1695 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt_softc *bp)
1696 {
1697 	struct bnxt_fw_health *fw_health = bp->fw_health;
1698 	struct hwrm_error_recovery_qcfg_output *resp =
1699 	    (void *)bp->hwrm_cmd_resp.idi_vaddr;
1700 	struct hwrm_error_recovery_qcfg_input req = {0};
1701 	int rc, i;
1702 
1703 	if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
1704 		return 0;
1705 
1706 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG);
1707 	rc = _hwrm_send_message(bp, &req, sizeof(req));
1708 
1709 	if (rc)
1710 		goto err_recovery_out;
1711 	fw_health->flags = le32toh(resp->flags);
1712 	if ((fw_health->flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU) &&
1713 	    !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
1714 		rc = -EINVAL;
1715 		goto err_recovery_out;
1716 	}
1717 	fw_health->polling_dsecs = le32toh(resp->driver_polling_freq);
1718 	fw_health->master_func_wait_dsecs =
1719 		le32toh(resp->master_func_wait_period);
1720 	fw_health->normal_func_wait_dsecs =
1721 		le32toh(resp->normal_func_wait_period);
1722 	fw_health->post_reset_wait_dsecs =
1723 		le32toh(resp->master_func_wait_period_after_reset);
1724 	fw_health->post_reset_max_wait_dsecs =
1725 		le32toh(resp->max_bailout_time_after_reset);
1726 	fw_health->regs[BNXT_FW_HEALTH_REG] =
1727 		le32toh(resp->fw_health_status_reg);
1728 	fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
1729 		le32toh(resp->fw_heartbeat_reg);
1730 	fw_health->regs[BNXT_FW_RESET_CNT_REG] =
1731 		le32toh(resp->fw_reset_cnt_reg);
1732 	fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
1733 		le32toh(resp->reset_inprogress_reg);
1734 	fw_health->fw_reset_inprog_reg_mask =
1735 		le32toh(resp->reset_inprogress_reg_mask);
1736 	fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
1737 	if (fw_health->fw_reset_seq_cnt >= 16) {
1738 		rc = -EINVAL;
1739 		goto err_recovery_out;
1740 	}
1741 	for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
1742 		fw_health->fw_reset_seq_regs[i] =
1743 			le32toh(resp->reset_reg[i]);
1744 		fw_health->fw_reset_seq_vals[i] =
1745 			le32toh(resp->reset_reg_val[i]);
1746 		fw_health->fw_reset_seq_delay_msec[i] =
1747 			le32toh(resp->delay_after_reset[i]);
1748 	}
1749 err_recovery_out:
1750 	if (!rc)
1751 		rc = bnxt_map_fw_health_regs(bp);
1752 	if (rc)
1753 		bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
1754 	return rc;
1755 }
1756 
bnxt_drv_rgtr(struct bnxt_softc * bp)1757 static int bnxt_drv_rgtr(struct bnxt_softc *bp)
1758 {
1759 	int rc;
1760 
1761 	/* determine whether we can support error recovery before
1762 	 * registering with FW
1763 	 */
1764 	if (bnxt_alloc_fw_health(bp)) {
1765 		device_printf(bp->dev, "no memory for firmware error recovery\n");
1766 	} else {
1767 		rc = bnxt_hwrm_error_recovery_qcfg(bp);
1768 		if (rc)
1769 			device_printf(bp->dev, "hwrm query error recovery failure rc: %d\n",
1770 				    rc);
1771 	}
1772 	rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);  //sumit dbg: revisit the params
1773 	if (rc)
1774 		return -ENODEV;
1775 	return 0;
1776 }
1777 
bnxt_fw_reset_timeout(struct bnxt_softc * bp)1778 static bool bnxt_fw_reset_timeout(struct bnxt_softc *bp)
1779 {
1780 	return time_after(jiffies, bp->fw_reset_timestamp +
1781 			  (bp->fw_reset_max_dsecs * HZ / 10));
1782 }
1783 
bnxt_open(struct bnxt_softc * bp)1784 static int bnxt_open(struct bnxt_softc *bp)
1785 {
1786 	int rc = 0;
1787 	if (BNXT_PF(bp))
1788 		rc = bnxt_hwrm_nvm_get_dev_info(bp, &bp->nvm_info->mfg_id,
1789 			&bp->nvm_info->device_id, &bp->nvm_info->sector_size,
1790 			&bp->nvm_info->size, &bp->nvm_info->reserved_size,
1791 			&bp->nvm_info->available_size);
1792 
1793 	/* Get the queue config */
1794 	rc = bnxt_hwrm_queue_qportcfg(bp, HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX);
1795 	if (rc) {
1796 		device_printf(bp->dev, "reinit: hwrm qportcfg (tx) failed\n");
1797 		return rc;
1798 	}
1799 	if (bp->is_asym_q) {
1800 		rc = bnxt_hwrm_queue_qportcfg(bp,
1801 					      HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX);
1802 		if (rc) {
1803 			device_printf(bp->dev, "re-init: hwrm qportcfg (rx)  failed\n");
1804 			return rc;
1805 		}
1806 		bnxt_verify_asym_queues(bp);
1807 	} else {
1808 		bp->rx_max_q = bp->tx_max_q;
1809 		memcpy(bp->rx_q_info, bp->tx_q_info, sizeof(bp->rx_q_info));
1810 		memcpy(bp->rx_q_ids, bp->tx_q_ids, sizeof(bp->rx_q_ids));
1811 	}
1812 	/* Get the HW capabilities */
1813 	rc = bnxt_hwrm_func_qcaps(bp);
1814 	if (rc)
1815 		return rc;
1816 
1817 	/* Register the driver with the FW */
1818 	rc = bnxt_drv_rgtr(bp);
1819 	if (rc)
1820 		return rc;
1821 	if (bp->hwrm_spec_code >= 0x10803) {
1822 		rc = bnxt_alloc_ctx_mem(bp);
1823 		if (rc) {
1824 			device_printf(bp->dev, "attach: alloc_ctx_mem failed\n");
1825 			return rc;
1826 		}
1827 		rc = bnxt_hwrm_func_resc_qcaps(bp, true);
1828 		if (!rc)
1829 			bp->flags |= BNXT_FLAG_FW_CAP_NEW_RM;
1830 	}
1831 
1832 	if (BNXT_CHIP_P5(bp))
1833 		bnxt_hwrm_reserve_pf_rings(bp);
1834 	/* Get the current configuration of this function */
1835 	rc = bnxt_hwrm_func_qcfg(bp);
1836 	if (rc) {
1837 		device_printf(bp->dev, "re-init: hwrm func qcfg failed\n");
1838 		return rc;
1839 	}
1840 
1841 	bnxt_msix_intr_assign(bp->ctx, 0);
1842 	bnxt_init(bp->ctx);
1843 	bnxt_intr_enable(bp->ctx);
1844 
1845 	if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
1846 		if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
1847 			bnxt_ulp_start(bp, 0);
1848 		}
1849 	}
1850 
1851 	device_printf(bp->dev, "Network interface is UP and operational\n");
1852 
1853 	return rc;
1854 }
bnxt_fw_reset_abort(struct bnxt_softc * bp,int rc)1855 static void bnxt_fw_reset_abort(struct bnxt_softc *bp, int rc)
1856 {
1857 	clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
1858 	if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
1859 		bnxt_ulp_start(bp, rc);
1860 	}
1861 	bp->fw_reset_state = 0;
1862 }
1863 
bnxt_fw_reset_task(struct work_struct * work)1864 static void bnxt_fw_reset_task(struct work_struct *work)
1865 {
1866 	struct bnxt_softc *bp = container_of(work, struct bnxt_softc, fw_reset_task.work);
1867 	int rc = 0;
1868 
1869 	if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
1870 		device_printf(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
1871 		return;
1872 	}
1873 
1874 	switch (bp->fw_reset_state) {
1875 	case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
1876 		u32 val;
1877 
1878 		val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
1879 		if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
1880 		    !bnxt_fw_reset_timeout(bp)) {
1881 			bnxt_queue_fw_reset_work(bp, HZ / 5);
1882 			return;
1883 		}
1884 
1885 		if (!bp->fw_health->primary) {
1886 			u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
1887 
1888 			bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
1889 			bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
1890 			return;
1891 		}
1892 		bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
1893 	}
1894 		fallthrough;
1895 	case BNXT_FW_RESET_STATE_RESET_FW:
1896 		bnxt_reset_all(bp);
1897 		bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
1898 		bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
1899 		return;
1900 	case BNXT_FW_RESET_STATE_ENABLE_DEV:
1901 		bnxt_inv_fw_health_reg(bp);
1902 		if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
1903 		    !bp->fw_reset_min_dsecs) {
1904 			u16 val;
1905 
1906 			val = pci_read_config(bp->dev, PCI_SUBSYSTEM_ID, 2);
1907 			if (val == 0xffff) {
1908 				if (bnxt_fw_reset_timeout(bp)) {
1909 					device_printf(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
1910 					rc = -ETIMEDOUT;
1911 					goto fw_reset_abort;
1912 				}
1913 				bnxt_queue_fw_reset_work(bp, HZ / 1000);
1914 				return;
1915 			}
1916 		}
1917 		clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
1918 		clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
1919 		if (!pci_is_enabled(bp->pdev)) {
1920 			if (pci_enable_device(bp->pdev)) {
1921 				device_printf(bp->dev, "Cannot re-enable PCI device\n");
1922 				rc = -ENODEV;
1923 				goto fw_reset_abort;
1924 			}
1925 		}
1926 		pci_set_master(bp->pdev);
1927 		bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
1928 		fallthrough;
1929 	case BNXT_FW_RESET_STATE_POLL_FW:
1930 		bp->hwrm_cmd_timeo = SHORT_HWRM_CMD_TIMEOUT;
1931 		rc = bnxt_hwrm_poll(bp);
1932 		if (rc) {
1933 			if (bnxt_fw_reset_timeout(bp)) {
1934 				device_printf(bp->dev, "Firmware reset aborted\n");
1935 				goto fw_reset_abort_status;
1936 			}
1937 			bnxt_queue_fw_reset_work(bp, HZ / 5);
1938 			return;
1939 		}
1940 		bp->hwrm_cmd_timeo = DFLT_HWRM_CMD_TIMEOUT;
1941 		bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
1942 		fallthrough;
1943 	case BNXT_FW_RESET_STATE_OPENING:
1944 		rc = bnxt_open(bp);
1945 		if (rc) {
1946 			device_printf(bp->dev, "bnxt_open() failed during FW reset\n");
1947 			bnxt_fw_reset_abort(bp, rc);
1948 			rtnl_unlock();
1949 			return;
1950 		}
1951 
1952 		if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
1953 		    bp->fw_health->enabled) {
1954 			bp->fw_health->last_fw_reset_cnt =
1955 				bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
1956 		}
1957 		bp->fw_reset_state = 0;
1958 		smp_mb__before_atomic();
1959 		clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
1960 		bnxt_ulp_start(bp, 0);
1961 		clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
1962 		set_bit(BNXT_STATE_OPEN, &bp->state);
1963 		rtnl_unlock();
1964 	}
1965 	return;
1966 
1967 fw_reset_abort_status:
1968 	if (bp->fw_health->status_reliable ||
1969 	    (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
1970 		u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
1971 
1972 		device_printf(bp->dev, "fw_health_status 0x%x\n", sts);
1973 	}
1974 fw_reset_abort:
1975 	rtnl_lock();
1976 	bnxt_fw_reset_abort(bp, rc);
1977 	rtnl_unlock();
1978 }
1979 
bnxt_force_fw_reset(struct bnxt_softc * bp)1980 static void bnxt_force_fw_reset(struct bnxt_softc *bp)
1981 {
1982 	struct bnxt_fw_health *fw_health = bp->fw_health;
1983 	u32 wait_dsecs;
1984 
1985 	if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
1986 	    test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
1987 		return;
1988 	bnxt_fw_reset_close(bp);
1989 	wait_dsecs = fw_health->master_func_wait_dsecs;
1990 	if (fw_health->primary) {
1991 		if (fw_health->flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU)
1992 			wait_dsecs = 0;
1993 		bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
1994 	} else {
1995 		bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
1996 		wait_dsecs = fw_health->normal_func_wait_dsecs;
1997 		bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
1998 	}
1999 
2000 	bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
2001 	bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
2002 	bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
2003 }
2004 
bnxt_fw_exception(struct bnxt_softc * bp)2005 static void bnxt_fw_exception(struct bnxt_softc *bp)
2006 {
2007 	device_printf(bp->dev, "Detected firmware fatal condition, initiating reset\n");
2008 	set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2009 	bnxt_rtnl_lock_sp(bp);
2010 	bnxt_force_fw_reset(bp);
2011 	bnxt_rtnl_unlock_sp(bp);
2012 }
2013 
__bnxt_fw_recover(struct bnxt_softc * bp)2014 static void __bnxt_fw_recover(struct bnxt_softc *bp)
2015 {
2016 	if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
2017 	    test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
2018 		bnxt_fw_reset(bp);
2019 	else
2020 		bnxt_fw_exception(bp);
2021 }
2022 
bnxt_devlink_health_fw_report(struct bnxt_softc * bp)2023 static void bnxt_devlink_health_fw_report(struct bnxt_softc *bp)
2024 {
2025 	struct bnxt_fw_health *fw_health = bp->fw_health;
2026 
2027 	if (!fw_health)
2028 		return;
2029 
2030 	if (!fw_health->fw_reporter) {
2031 		__bnxt_fw_recover(bp);
2032 		return;
2033 	}
2034 }
2035 
bnxt_sp_task(struct work_struct * work)2036 static void bnxt_sp_task(struct work_struct *work)
2037 {
2038 	struct bnxt_softc *bp = container_of(work, struct bnxt_softc, sp_task);
2039 
2040 	set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
2041 	smp_mb__after_atomic();
2042 	if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
2043 		clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
2044 		return;
2045 	}
2046 
2047 	if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
2048 		if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
2049 		    test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
2050 			bnxt_devlink_health_fw_report(bp);
2051 		else
2052 			bnxt_fw_reset(bp);
2053 	}
2054 
2055 	if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
2056 		if (!is_bnxt_fw_ok(bp))
2057 			bnxt_devlink_health_fw_report(bp);
2058 	}
2059 	smp_mb__before_atomic();
2060 	clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
2061 }
2062 
2063 /* Device setup and teardown */
2064 static int
bnxt_attach_pre(if_ctx_t ctx)2065 bnxt_attach_pre(if_ctx_t ctx)
2066 {
2067 	struct bnxt_softc *softc = iflib_get_softc(ctx);
2068 	if_softc_ctx_t scctx;
2069 	int rc = 0;
2070 
2071 	softc->ctx = ctx;
2072 	softc->dev = iflib_get_dev(ctx);
2073 	softc->media = iflib_get_media(ctx);
2074 	softc->scctx = iflib_get_softc_ctx(ctx);
2075 	softc->sctx = iflib_get_sctx(ctx);
2076 	scctx = softc->scctx;
2077 
2078 	/* TODO: Better way of detecting NPAR/VF is needed */
2079 	switch (pci_get_device(softc->dev)) {
2080 	case BCM57402_NPAR:
2081 	case BCM57404_NPAR:
2082 	case BCM57406_NPAR:
2083 	case BCM57407_NPAR:
2084 	case BCM57412_NPAR1:
2085 	case BCM57412_NPAR2:
2086 	case BCM57414_NPAR1:
2087 	case BCM57414_NPAR2:
2088 	case BCM57416_NPAR1:
2089 	case BCM57416_NPAR2:
2090 		softc->flags |= BNXT_FLAG_NPAR;
2091 		break;
2092 	case NETXTREME_C_VF1:
2093 	case NETXTREME_C_VF2:
2094 	case NETXTREME_C_VF3:
2095 	case NETXTREME_E_VF1:
2096 	case NETXTREME_E_VF2:
2097 	case NETXTREME_E_VF3:
2098 		softc->flags |= BNXT_FLAG_VF;
2099 		break;
2100 	}
2101 
2102 	softc->domain = pci_get_domain(softc->dev);
2103 	softc->bus = pci_get_bus(softc->dev);
2104 	softc->slot = pci_get_slot(softc->dev);
2105 	softc->function = pci_get_function(softc->dev);
2106 	softc->dev_fn = PCI_DEVFN(softc->slot, softc->function);
2107 
2108 	if (bnxt_num_pfs == 0)
2109 		  SLIST_INIT(&pf_list);
2110 	bnxt_num_pfs++;
2111 	softc->list.softc = softc;
2112 	SLIST_INSERT_HEAD(&pf_list, &softc->list, next);
2113 
2114 	pci_enable_busmaster(softc->dev);
2115 
2116 	if (bnxt_pci_mapping(softc)) {
2117 		device_printf(softc->dev, "PCI mapping failed\n");
2118 		rc = ENXIO;
2119 		goto pci_map_fail;
2120 	}
2121 
2122 	softc->pdev = kzalloc(sizeof(*softc->pdev), GFP_KERNEL);
2123 	if (!softc->pdev) {
2124 		device_printf(softc->dev, "pdev alloc failed\n");
2125 		rc = -ENOMEM;
2126 		goto free_pci_map;
2127 	}
2128 
2129 	rc = linux_pci_attach_device(softc->dev, NULL, NULL, softc->pdev);
2130 	if (rc) {
2131 		device_printf(softc->dev, "Failed to attach Linux PCI device 0x%x\n", rc);
2132 		goto pci_attach_fail;
2133 	}
2134 
2135 	/* HWRM setup/init */
2136 	BNXT_HWRM_LOCK_INIT(softc, device_get_nameunit(softc->dev));
2137 	rc = bnxt_alloc_hwrm_dma_mem(softc);
2138 	if (rc)
2139 		goto dma_fail;
2140 
2141 	/* Get firmware version and compare with driver */
2142 	softc->ver_info = malloc(sizeof(struct bnxt_ver_info),
2143 	    M_DEVBUF, M_NOWAIT | M_ZERO);
2144 	if (softc->ver_info == NULL) {
2145 		rc = ENOMEM;
2146 		device_printf(softc->dev,
2147 		    "Unable to allocate space for version info\n");
2148 		goto ver_alloc_fail;
2149 	}
2150 	/* Default minimum required HWRM version */
2151 	softc->ver_info->hwrm_min_major = HWRM_VERSION_MAJOR;
2152 	softc->ver_info->hwrm_min_minor = HWRM_VERSION_MINOR;
2153 	softc->ver_info->hwrm_min_update = HWRM_VERSION_UPDATE;
2154 
2155 	rc = bnxt_hwrm_ver_get(softc);
2156 	if (rc) {
2157 		device_printf(softc->dev, "attach: hwrm ver get failed\n");
2158 		goto ver_fail;
2159 	}
2160 
2161 	/* Now perform a function reset */
2162 	rc = bnxt_hwrm_func_reset(softc);
2163 
2164 	if ((softc->flags & BNXT_FLAG_SHORT_CMD) ||
2165 	    softc->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
2166 		rc = bnxt_alloc_hwrm_short_cmd_req(softc);
2167 		if (rc)
2168 			goto hwrm_short_cmd_alloc_fail;
2169 	}
2170 
2171 	if ((softc->ver_info->chip_num == BCM57508) ||
2172 	    (softc->ver_info->chip_num == BCM57504) ||
2173 	    (softc->ver_info->chip_num == BCM57502))
2174 		softc->flags |= BNXT_FLAG_CHIP_P5;
2175 
2176 	softc->flags |= BNXT_FLAG_TPA;
2177 
2178 	if (BNXT_CHIP_P5(softc) && (!softc->ver_info->chip_rev) &&
2179 			(!softc->ver_info->chip_metal))
2180 		softc->flags &= ~BNXT_FLAG_TPA;
2181 
2182 	if (BNXT_CHIP_P5(softc))
2183 		softc->flags &= ~BNXT_FLAG_TPA;
2184 
2185 	/* Get NVRAM info */
2186 	if (BNXT_PF(softc)) {
2187 		if (!bnxt_pf_wq) {
2188 			bnxt_pf_wq =
2189 				create_singlethread_workqueue("bnxt_pf_wq");
2190 			if (!bnxt_pf_wq) {
2191 				device_printf(softc->dev, "Unable to create workqueue.\n");
2192 				rc = -ENOMEM;
2193 				goto nvm_alloc_fail;
2194 			}
2195 		}
2196 
2197 		softc->nvm_info = malloc(sizeof(struct bnxt_nvram_info),
2198 		    M_DEVBUF, M_NOWAIT | M_ZERO);
2199 		if (softc->nvm_info == NULL) {
2200 			rc = ENOMEM;
2201 			device_printf(softc->dev,
2202 			    "Unable to allocate space for NVRAM info\n");
2203 			goto nvm_alloc_fail;
2204 		}
2205 
2206 		rc = bnxt_hwrm_nvm_get_dev_info(softc, &softc->nvm_info->mfg_id,
2207 		    &softc->nvm_info->device_id, &softc->nvm_info->sector_size,
2208 		    &softc->nvm_info->size, &softc->nvm_info->reserved_size,
2209 		    &softc->nvm_info->available_size);
2210 	}
2211 
2212 	if (BNXT_CHIP_P5(softc)) {
2213 		softc->db_ops.bnxt_db_tx = bnxt_thor_db_tx;
2214 		softc->db_ops.bnxt_db_rx = bnxt_thor_db_rx;
2215 		softc->db_ops.bnxt_db_rx_cq = bnxt_thor_db_rx_cq;
2216 		softc->db_ops.bnxt_db_tx_cq = bnxt_thor_db_tx_cq;
2217 		softc->db_ops.bnxt_db_nq = bnxt_thor_db_nq;
2218 	} else {
2219 		softc->db_ops.bnxt_db_tx = bnxt_cuw_db_tx;
2220 		softc->db_ops.bnxt_db_rx = bnxt_cuw_db_rx;
2221 		softc->db_ops.bnxt_db_rx_cq = bnxt_cuw_db_cq;
2222 		softc->db_ops.bnxt_db_tx_cq = bnxt_cuw_db_cq;
2223 	}
2224 
2225 
2226 	/* Get the queue config */
2227 	rc = bnxt_hwrm_queue_qportcfg(softc, HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX);
2228 	if (rc) {
2229 		device_printf(softc->dev, "attach: hwrm qportcfg (tx) failed\n");
2230 		goto failed;
2231 	}
2232 	if (softc->is_asym_q) {
2233 		rc = bnxt_hwrm_queue_qportcfg(softc,
2234 					      HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX);
2235 		if (rc) {
2236 			device_printf(softc->dev, "attach: hwrm qportcfg (rx)  failed\n");
2237 			return rc;
2238 		}
2239 		bnxt_verify_asym_queues(softc);
2240 	} else {
2241 		softc->rx_max_q = softc->tx_max_q;
2242 		memcpy(softc->rx_q_info, softc->tx_q_info, sizeof(softc->rx_q_info));
2243 		memcpy(softc->rx_q_ids, softc->tx_q_ids, sizeof(softc->rx_q_ids));
2244 	}
2245 
2246 	/* Get the HW capabilities */
2247 	rc = bnxt_hwrm_func_qcaps(softc);
2248 	if (rc)
2249 		goto failed;
2250 
2251 	/*
2252 	 * Register the driver with the FW
2253 	 * Register the async events with the FW
2254 	 */
2255 	rc = bnxt_drv_rgtr(softc);
2256 	if (rc)
2257 		goto failed;
2258 
2259 	if (softc->hwrm_spec_code >= 0x10803) {
2260 		rc = bnxt_alloc_ctx_mem(softc);
2261 		if (rc) {
2262 			device_printf(softc->dev, "attach: alloc_ctx_mem failed\n");
2263 			return rc;
2264 		}
2265 		rc = bnxt_hwrm_func_resc_qcaps(softc, true);
2266 		if (!rc)
2267 			softc->flags |= BNXT_FLAG_FW_CAP_NEW_RM;
2268 	}
2269 
2270 	/* Get the current configuration of this function */
2271 	rc = bnxt_hwrm_func_qcfg(softc);
2272 	if (rc) {
2273 		device_printf(softc->dev, "attach: hwrm func qcfg failed\n");
2274 		goto failed;
2275 	}
2276 
2277 	iflib_set_mac(ctx, softc->func.mac_addr);
2278 
2279 	scctx->isc_txrx = &bnxt_txrx;
2280 	scctx->isc_tx_csum_flags = (CSUM_IP | CSUM_TCP | CSUM_UDP |
2281 	    CSUM_TCP_IPV6 | CSUM_UDP_IPV6 | CSUM_TSO);
2282 	scctx->isc_capabilities = scctx->isc_capenable =
2283 	    /* These are translated to hwassit bits */
2284 	    IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 | IFCAP_TSO4 | IFCAP_TSO6 |
2285 	    /* These are checked by iflib */
2286 	    IFCAP_LRO | IFCAP_VLAN_HWFILTER |
2287 	    /* These are part of the iflib mask */
2288 	    IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_VLAN_MTU |
2289 	    IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO |
2290 	    /* These likely get lost... */
2291 	    IFCAP_VLAN_HWCSUM | IFCAP_JUMBO_MTU;
2292 
2293 	if (bnxt_wol_supported(softc))
2294 		scctx->isc_capabilities |= IFCAP_WOL_MAGIC;
2295 	bnxt_get_wol_settings(softc);
2296 	if (softc->wol)
2297 		scctx->isc_capenable |= IFCAP_WOL_MAGIC;
2298 
2299 	/* Get the queue config */
2300 	bnxt_get_wol_settings(softc);
2301 	if (BNXT_CHIP_P5(softc))
2302 		bnxt_hwrm_reserve_pf_rings(softc);
2303 	rc = bnxt_hwrm_func_qcfg(softc);
2304 	if (rc) {
2305 		device_printf(softc->dev, "attach: hwrm func qcfg failed\n");
2306 		goto failed;
2307 	}
2308 
2309 	bnxt_clear_ids(softc);
2310 	if (rc)
2311 		goto failed;
2312 
2313 	/* Now set up iflib sc */
2314 	scctx->isc_tx_nsegments = 31,
2315 	scctx->isc_tx_tso_segments_max = 31;
2316 	scctx->isc_tx_tso_size_max = BNXT_TSO_SIZE;
2317 	scctx->isc_tx_tso_segsize_max = BNXT_TSO_SIZE;
2318 	scctx->isc_vectors = softc->func.max_cp_rings;
2319 	scctx->isc_min_frame_size = BNXT_MIN_FRAME_SIZE;
2320 	scctx->isc_txrx = &bnxt_txrx;
2321 
2322 	if (scctx->isc_nrxd[0] <
2323 	    ((scctx->isc_nrxd[1] * 4) + scctx->isc_nrxd[2]))
2324 		device_printf(softc->dev,
2325 		    "WARNING: nrxd0 (%d) should be at least 4 * nrxd1 (%d) + nrxd2 (%d).  Driver may be unstable\n",
2326 		    scctx->isc_nrxd[0], scctx->isc_nrxd[1], scctx->isc_nrxd[2]);
2327 	if (scctx->isc_ntxd[0] < scctx->isc_ntxd[1] * 2)
2328 		device_printf(softc->dev,
2329 		    "WARNING: ntxd0 (%d) should be at least 2 * ntxd1 (%d).  Driver may be unstable\n",
2330 		    scctx->isc_ntxd[0], scctx->isc_ntxd[1]);
2331 	scctx->isc_txqsizes[0] = sizeof(struct cmpl_base) * scctx->isc_ntxd[0];
2332 	scctx->isc_txqsizes[1] = sizeof(struct tx_bd_short) *
2333 	    scctx->isc_ntxd[1];
2334 	scctx->isc_txqsizes[2] = sizeof(struct cmpl_base) * scctx->isc_ntxd[2];
2335 	scctx->isc_rxqsizes[0] = sizeof(struct cmpl_base) * scctx->isc_nrxd[0];
2336 	scctx->isc_rxqsizes[1] = sizeof(struct rx_prod_pkt_bd) *
2337 	    scctx->isc_nrxd[1];
2338 	scctx->isc_rxqsizes[2] = sizeof(struct rx_prod_pkt_bd) *
2339 	    scctx->isc_nrxd[2];
2340 
2341 	scctx->isc_nrxqsets_max = min(pci_msix_count(softc->dev)-1,
2342 	    softc->fn_qcfg.alloc_completion_rings - 1);
2343 	scctx->isc_nrxqsets_max = min(scctx->isc_nrxqsets_max,
2344 	    softc->fn_qcfg.alloc_rx_rings);
2345 	scctx->isc_nrxqsets_max = min(scctx->isc_nrxqsets_max,
2346 	    softc->fn_qcfg.alloc_vnics);
2347 	scctx->isc_ntxqsets_max = min(softc->fn_qcfg.alloc_tx_rings,
2348 	    softc->fn_qcfg.alloc_completion_rings - scctx->isc_nrxqsets_max - 1);
2349 
2350 	scctx->isc_rss_table_size = HW_HASH_INDEX_SIZE;
2351 	scctx->isc_rss_table_mask = scctx->isc_rss_table_size - 1;
2352 
2353 	/* iflib will map and release this bar */
2354 	scctx->isc_msix_bar = pci_msix_table_bar(softc->dev);
2355 
2356         /*
2357          * Default settings for HW LRO (TPA):
2358          *  Disable HW LRO by default
2359          *  Can be enabled after taking care of 'packet forwarding'
2360          */
2361 	if (softc->flags & BNXT_FLAG_TPA) {
2362 		softc->hw_lro.enable = 0;
2363 		softc->hw_lro.is_mode_gro = 0;
2364 		softc->hw_lro.max_agg_segs = 5; /* 2^5 = 32 segs */
2365 		softc->hw_lro.max_aggs = HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX;
2366 		softc->hw_lro.min_agg_len = 512;
2367 	}
2368 
2369 	/* Allocate the default completion ring */
2370 	softc->def_cp_ring.stats_ctx_id = HWRM_NA_SIGNATURE;
2371 	softc->def_cp_ring.ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
2372 	softc->def_cp_ring.ring.softc = softc;
2373 	softc->def_cp_ring.ring.id = 0;
2374 	softc->def_cp_ring.ring.doorbell = (BNXT_CHIP_P5(softc)) ?
2375 		DB_PF_OFFSET_P5 : softc->def_cp_ring.ring.id * 0x80;
2376 	softc->def_cp_ring.ring.ring_size = PAGE_SIZE /
2377 	    sizeof(struct cmpl_base);
2378 	rc = iflib_dma_alloc(ctx,
2379 	    sizeof(struct cmpl_base) * softc->def_cp_ring.ring.ring_size,
2380 	    &softc->def_cp_ring_mem, 0);
2381 	softc->def_cp_ring.ring.vaddr = softc->def_cp_ring_mem.idi_vaddr;
2382 	softc->def_cp_ring.ring.paddr = softc->def_cp_ring_mem.idi_paddr;
2383 	iflib_config_gtask_init(ctx, &softc->def_cp_task, bnxt_def_cp_task,
2384 	    "dflt_cp");
2385 
2386 	rc = bnxt_init_sysctl_ctx(softc);
2387 	if (rc)
2388 		goto init_sysctl_failed;
2389 	if (BNXT_PF(softc)) {
2390 		rc = bnxt_create_nvram_sysctls(softc->nvm_info);
2391 		if (rc)
2392 			goto failed;
2393 	}
2394 
2395 	arc4rand(softc->vnic_info.rss_hash_key, HW_HASH_KEY_SIZE, 0);
2396 	softc->vnic_info.rss_hash_type =
2397 	    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 |
2398 	    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4 |
2399 	    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4 |
2400 	    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6 |
2401 	    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6 |
2402 	    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
2403 	rc = bnxt_create_config_sysctls_pre(softc);
2404 	if (rc)
2405 		goto failed;
2406 
2407 	rc = bnxt_create_hw_lro_sysctls(softc);
2408 	if (rc)
2409 		goto failed;
2410 
2411 	rc = bnxt_create_pause_fc_sysctls(softc);
2412 	if (rc)
2413 		goto failed;
2414 
2415 	rc = bnxt_create_dcb_sysctls(softc);
2416 	if (rc)
2417 		goto failed;
2418 
2419 	set_bit(BNXT_STATE_OPEN, &softc->state);
2420 	INIT_WORK(&softc->sp_task, bnxt_sp_task);
2421 	INIT_DELAYED_WORK(&softc->fw_reset_task, bnxt_fw_reset_task);
2422 
2423 	/* Initialize the vlan list */
2424 	SLIST_INIT(&softc->vnic_info.vlan_tags);
2425 	softc->vnic_info.vlan_tag_list.idi_vaddr = NULL;
2426 	softc->state_bv = bit_alloc(BNXT_STATE_MAX, M_DEVBUF,
2427 			M_WAITOK|M_ZERO);
2428 
2429 	return (rc);
2430 
2431 failed:
2432 	bnxt_free_sysctl_ctx(softc);
2433 init_sysctl_failed:
2434 	bnxt_hwrm_func_drv_unrgtr(softc, false);
2435 	if (BNXT_PF(softc))
2436 		free(softc->nvm_info, M_DEVBUF);
2437 nvm_alloc_fail:
2438 	bnxt_free_hwrm_short_cmd_req(softc);
2439 hwrm_short_cmd_alloc_fail:
2440 ver_fail:
2441 	free(softc->ver_info, M_DEVBUF);
2442 ver_alloc_fail:
2443 	bnxt_free_hwrm_dma_mem(softc);
2444 dma_fail:
2445 	BNXT_HWRM_LOCK_DESTROY(softc);
2446 	if (softc->pdev)
2447 		linux_pci_detach_device(softc->pdev);
2448 pci_attach_fail:
2449 	kfree(softc->pdev);
2450 	softc->pdev = NULL;
2451 free_pci_map:
2452 	bnxt_pci_mapping_free(softc);
2453 pci_map_fail:
2454 	pci_disable_busmaster(softc->dev);
2455 	return (rc);
2456 }
2457 
2458 static int
bnxt_attach_post(if_ctx_t ctx)2459 bnxt_attach_post(if_ctx_t ctx)
2460 {
2461 	struct bnxt_softc *softc = iflib_get_softc(ctx);
2462 	if_t ifp = iflib_get_ifp(ctx);
2463 	int rc;
2464 
2465 	softc->ifp = ifp;
2466 	bnxt_create_config_sysctls_post(softc);
2467 
2468 	/* Update link state etc... */
2469 	rc = bnxt_probe_phy(softc);
2470 	if (rc)
2471 		goto failed;
2472 
2473 	/* Needs to be done after probing the phy */
2474 	bnxt_create_ver_sysctls(softc);
2475 	ifmedia_removeall(softc->media);
2476 	bnxt_add_media_types(softc);
2477 	ifmedia_set(softc->media, IFM_ETHER | IFM_AUTO);
2478 
2479 	softc->scctx->isc_max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN +
2480 	    ETHER_CRC_LEN;
2481 
2482 	softc->rx_buf_size = min(softc->scctx->isc_max_frame_size, BNXT_PAGE_SIZE);
2483 	bnxt_dcb_init(softc);
2484 	bnxt_rdma_aux_device_init(softc);
2485 
2486 failed:
2487 	return rc;
2488 }
2489 
2490 static int
bnxt_detach(if_ctx_t ctx)2491 bnxt_detach(if_ctx_t ctx)
2492 {
2493 	struct bnxt_softc *softc = iflib_get_softc(ctx);
2494 	struct bnxt_vlan_tag *tag;
2495 	struct bnxt_vlan_tag *tmp;
2496 	int i;
2497 
2498 	bnxt_rdma_aux_device_uninit(softc);
2499 	cancel_delayed_work_sync(&softc->fw_reset_task);
2500 	cancel_work_sync(&softc->sp_task);
2501 	bnxt_dcb_free(softc);
2502 	SLIST_REMOVE(&pf_list, &softc->list, bnxt_softc_list, next);
2503 	bnxt_num_pfs--;
2504 	bnxt_wol_config(ctx);
2505 	bnxt_do_disable_intr(&softc->def_cp_ring);
2506 	bnxt_free_sysctl_ctx(softc);
2507 	bnxt_hwrm_func_reset(softc);
2508 	bnxt_free_ctx_mem(softc);
2509 	bnxt_clear_ids(softc);
2510 	iflib_irq_free(ctx, &softc->def_cp_ring.irq);
2511 	iflib_config_gtask_deinit(&softc->def_cp_task);
2512 	/* We need to free() these here... */
2513 	for (i = softc->nrxqsets-1; i>=0; i--) {
2514 		if (BNXT_CHIP_P5(softc))
2515 			iflib_irq_free(ctx, &softc->nq_rings[i].irq);
2516 		else
2517 			iflib_irq_free(ctx, &softc->rx_cp_rings[i].irq);
2518 
2519 	}
2520 	iflib_dma_free(&softc->vnic_info.mc_list);
2521 	iflib_dma_free(&softc->vnic_info.rss_hash_key_tbl);
2522 	iflib_dma_free(&softc->vnic_info.rss_grp_tbl);
2523 	if (softc->vnic_info.vlan_tag_list.idi_vaddr)
2524 		iflib_dma_free(&softc->vnic_info.vlan_tag_list);
2525 	SLIST_FOREACH_SAFE(tag, &softc->vnic_info.vlan_tags, next, tmp)
2526 		free(tag, M_DEVBUF);
2527 	iflib_dma_free(&softc->def_cp_ring_mem);
2528 	for (i = 0; i < softc->nrxqsets; i++)
2529 		free(softc->rx_rings[i].tpa_start, M_DEVBUF);
2530 	free(softc->ver_info, M_DEVBUF);
2531 	if (BNXT_PF(softc))
2532 		free(softc->nvm_info, M_DEVBUF);
2533 
2534 	bnxt_hwrm_func_drv_unrgtr(softc, false);
2535 	bnxt_free_hwrm_dma_mem(softc);
2536 	bnxt_free_hwrm_short_cmd_req(softc);
2537 	BNXT_HWRM_LOCK_DESTROY(softc);
2538 
2539 	if (!bnxt_num_pfs && bnxt_pf_wq)
2540 		destroy_workqueue(bnxt_pf_wq);
2541 
2542 	if (softc->pdev)
2543 		linux_pci_detach_device(softc->pdev);
2544 	free(softc->state_bv, M_DEVBUF);
2545 	pci_disable_busmaster(softc->dev);
2546 	bnxt_pci_mapping_free(softc);
2547 
2548 	return 0;
2549 }
2550 
2551 static void
bnxt_hwrm_resource_free(struct bnxt_softc * softc)2552 bnxt_hwrm_resource_free(struct bnxt_softc *softc)
2553 {
2554 	int i, rc = 0;
2555 
2556 	rc = bnxt_hwrm_ring_free(softc,
2557 			HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
2558 			&softc->def_cp_ring.ring,
2559 			(uint16_t)HWRM_NA_SIGNATURE);
2560 	if (rc)
2561 		goto fail;
2562 
2563 	for (i = 0; i < softc->ntxqsets; i++) {
2564 		rc = bnxt_hwrm_ring_free(softc,
2565 				HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
2566 				&softc->tx_rings[i],
2567 				softc->tx_cp_rings[i].ring.phys_id);
2568 		if (rc)
2569 			goto fail;
2570 
2571 		rc = bnxt_hwrm_ring_free(softc,
2572 				HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
2573 				&softc->tx_cp_rings[i].ring,
2574 				(uint16_t)HWRM_NA_SIGNATURE);
2575 		if (rc)
2576 			goto fail;
2577 
2578 		rc = bnxt_hwrm_stat_ctx_free(softc, &softc->tx_cp_rings[i]);
2579 		if (rc)
2580 			goto fail;
2581 	}
2582 	rc = bnxt_hwrm_free_filter(softc);
2583 	if (rc)
2584 		goto fail;
2585 
2586 	rc = bnxt_hwrm_vnic_free(softc, &softc->vnic_info);
2587 	if (rc)
2588 		goto fail;
2589 
2590 	rc = bnxt_hwrm_vnic_ctx_free(softc, softc->vnic_info.rss_id);
2591 	if (rc)
2592 		goto fail;
2593 
2594 	for (i = 0; i < softc->nrxqsets; i++) {
2595 		rc = bnxt_hwrm_ring_grp_free(softc, &softc->grp_info[i]);
2596 		if (rc)
2597 			goto fail;
2598 
2599 		rc = bnxt_hwrm_ring_free(softc,
2600 				HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG,
2601 				&softc->ag_rings[i],
2602 				(uint16_t)HWRM_NA_SIGNATURE);
2603 		if (rc)
2604 			goto fail;
2605 
2606 		rc = bnxt_hwrm_ring_free(softc,
2607 				HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
2608 				&softc->rx_rings[i],
2609 				softc->rx_cp_rings[i].ring.phys_id);
2610 		if (rc)
2611 			goto fail;
2612 
2613 		rc = bnxt_hwrm_ring_free(softc,
2614 				HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
2615 				&softc->rx_cp_rings[i].ring,
2616 				(uint16_t)HWRM_NA_SIGNATURE);
2617 		if (rc)
2618 			goto fail;
2619 
2620 		if (BNXT_CHIP_P5(softc)) {
2621 			rc = bnxt_hwrm_ring_free(softc,
2622 					HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ,
2623 					&softc->nq_rings[i].ring,
2624 					(uint16_t)HWRM_NA_SIGNATURE);
2625 			if (rc)
2626 				goto fail;
2627 		}
2628 
2629 		rc = bnxt_hwrm_stat_ctx_free(softc, &softc->rx_cp_rings[i]);
2630 		if (rc)
2631 			goto fail;
2632 	}
2633 
2634 fail:
2635 	return;
2636 }
2637 
2638 
2639 static void
bnxt_func_reset(struct bnxt_softc * softc)2640 bnxt_func_reset(struct bnxt_softc *softc)
2641 {
2642 
2643 	if (!BNXT_CHIP_P5(softc)) {
2644 		bnxt_hwrm_func_reset(softc);
2645 		return;
2646 	}
2647 
2648 	bnxt_hwrm_resource_free(softc);
2649 	return;
2650 }
2651 
2652 static void
bnxt_rss_grp_tbl_init(struct bnxt_softc * softc)2653 bnxt_rss_grp_tbl_init(struct bnxt_softc *softc)
2654 {
2655 	uint16_t *rgt = (uint16_t *) softc->vnic_info.rss_grp_tbl.idi_vaddr;
2656 	int i, j;
2657 
2658 	for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
2659 		if (BNXT_CHIP_P5(softc)) {
2660 			rgt[i++] = htole16(softc->rx_rings[j].phys_id);
2661 			rgt[i] = htole16(softc->rx_cp_rings[j].ring.phys_id);
2662 		} else {
2663 			rgt[i] = htole16(softc->grp_info[j].grp_id);
2664 		}
2665 		if (++j == softc->nrxqsets)
2666 			j = 0;
2667 	}
2668 }
2669 
bnxt_get_port_module_status(struct bnxt_softc * softc)2670 static void bnxt_get_port_module_status(struct bnxt_softc *softc)
2671 {
2672 	struct bnxt_link_info *link_info = &softc->link_info;
2673 	struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
2674 	uint8_t module_status;
2675 
2676 	if (bnxt_update_link(softc, false))
2677 		return;
2678 
2679 	module_status = link_info->module_status;
2680 	switch (module_status) {
2681 	case HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_DISABLETX:
2682 	case HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_PWRDOWN:
2683 	case HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG:
2684 		device_printf(softc->dev, "Unqualified SFP+ module detected on port %d\n",
2685 			    softc->pf.port_id);
2686 		if (softc->hwrm_spec_code >= 0x10201) {
2687 			device_printf(softc->dev, "Module part number %s\n",
2688 				    resp->phy_vendor_partnumber);
2689 		}
2690 		if (module_status == HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_DISABLETX)
2691 			device_printf(softc->dev, "TX is disabled\n");
2692 		if (module_status == HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_PWRDOWN)
2693 			device_printf(softc->dev, "SFP+ module is shutdown\n");
2694 	}
2695 }
2696 
bnxt_aux_dev_free(struct bnxt_softc * softc)2697 static void bnxt_aux_dev_free(struct bnxt_softc *softc)
2698 {
2699 	kfree(softc->aux_dev);
2700 	softc->aux_dev = NULL;
2701 }
2702 
bnxt_aux_dev_init(struct bnxt_softc * softc)2703 static struct bnxt_aux_dev *bnxt_aux_dev_init(struct bnxt_softc *softc)
2704 {
2705 	struct bnxt_aux_dev *bnxt_adev;
2706 
2707 	msleep(1000 * 2);
2708 	bnxt_adev = kzalloc(sizeof(*bnxt_adev), GFP_KERNEL);
2709 	if (!bnxt_adev)
2710 		return ERR_PTR(-ENOMEM);
2711 
2712 	return bnxt_adev;
2713 }
2714 
bnxt_rdma_aux_device_uninit(struct bnxt_softc * softc)2715 static void bnxt_rdma_aux_device_uninit(struct bnxt_softc *softc)
2716 {
2717 	struct bnxt_aux_dev *bnxt_adev = softc->aux_dev;
2718 
2719 	/* Skip if no auxiliary device init was done. */
2720 	if (!(softc->flags & BNXT_FLAG_ROCE_CAP))
2721 		return;
2722 
2723 	if (IS_ERR_OR_NULL(bnxt_adev))
2724 		return;
2725 
2726 	bnxt_rdma_aux_device_del(softc);
2727 
2728 	if (bnxt_adev->id >= 0)
2729 		ida_free(&bnxt_aux_dev_ids, bnxt_adev->id);
2730 
2731 	bnxt_aux_dev_free(softc);
2732 }
2733 
bnxt_rdma_aux_device_init(struct bnxt_softc * softc)2734 static void bnxt_rdma_aux_device_init(struct bnxt_softc *softc)
2735 {
2736 	int rc;
2737 
2738 	if (!(softc->flags & BNXT_FLAG_ROCE_CAP))
2739 		return;
2740 
2741 	softc->aux_dev = bnxt_aux_dev_init(softc);
2742 	if (IS_ERR_OR_NULL(softc->aux_dev)) {
2743 		device_printf(softc->dev, "Failed to init auxiliary device for ROCE\n");
2744 		goto skip_aux_init;
2745 	}
2746 
2747 	softc->aux_dev->id = ida_alloc(&bnxt_aux_dev_ids, GFP_KERNEL);
2748 	if (softc->aux_dev->id < 0) {
2749 		device_printf(softc->dev, "ida alloc failed for ROCE auxiliary device\n");
2750 		bnxt_aux_dev_free(softc);
2751 		goto skip_aux_init;
2752 	}
2753 
2754 	msleep(1000 * 2);
2755 	/* If aux bus init fails, continue with netdev init. */
2756 	rc = bnxt_rdma_aux_device_add(softc);
2757 	if (rc) {
2758 		device_printf(softc->dev, "Failed to add auxiliary device for ROCE\n");
2759 		msleep(1000 * 2);
2760 		ida_free(&bnxt_aux_dev_ids, softc->aux_dev->id);
2761 	}
2762 	device_printf(softc->dev, "%s:%d Added auxiliary device (id %d) for ROCE \n",
2763 		      __func__, __LINE__, softc->aux_dev->id);
2764 skip_aux_init:
2765 	return;
2766 }
2767 
2768 /* Device configuration */
2769 static void
bnxt_init(if_ctx_t ctx)2770 bnxt_init(if_ctx_t ctx)
2771 {
2772 	struct bnxt_softc *softc = iflib_get_softc(ctx);
2773 	struct ifmediareq ifmr;
2774 	int i;
2775 	int rc;
2776 
2777 	if (!BNXT_CHIP_P5(softc)) {
2778 		rc = bnxt_hwrm_func_reset(softc);
2779 		if (rc)
2780 			return;
2781 	} else if (softc->is_dev_init) {
2782 		bnxt_stop(ctx);
2783 	}
2784 
2785 	softc->is_dev_init = true;
2786 	bnxt_clear_ids(softc);
2787 
2788 	if (BNXT_CHIP_P5(softc))
2789 		goto skip_def_cp_ring;
2790 	/* Allocate the default completion ring */
2791 	softc->def_cp_ring.cons = UINT32_MAX;
2792 	softc->def_cp_ring.v_bit = 1;
2793 	bnxt_mark_cpr_invalid(&softc->def_cp_ring);
2794 	rc = bnxt_hwrm_ring_alloc(softc,
2795 			HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
2796 			&softc->def_cp_ring.ring);
2797 	if (rc)
2798 		goto fail;
2799 skip_def_cp_ring:
2800 	for (i = 0; i < softc->nrxqsets; i++) {
2801 		/* Allocate the statistics context */
2802 		rc = bnxt_hwrm_stat_ctx_alloc(softc, &softc->rx_cp_rings[i],
2803 		    softc->rx_stats[i].idi_paddr);
2804 		if (rc)
2805 			goto fail;
2806 
2807 		if (BNXT_CHIP_P5(softc)) {
2808 			/* Allocate the NQ */
2809 			softc->nq_rings[i].cons = 0;
2810 			softc->nq_rings[i].v_bit = 1;
2811 			softc->nq_rings[i].last_idx = UINT32_MAX;
2812 			bnxt_mark_cpr_invalid(&softc->nq_rings[i]);
2813 			rc = bnxt_hwrm_ring_alloc(softc,
2814 					HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ,
2815 					&softc->nq_rings[i].ring);
2816 			if (rc)
2817 				goto fail;
2818 
2819 			softc->db_ops.bnxt_db_nq(&softc->nq_rings[i], 1);
2820 		}
2821 		/* Allocate the completion ring */
2822 		softc->rx_cp_rings[i].cons = UINT32_MAX;
2823 		softc->rx_cp_rings[i].v_bit = 1;
2824 		softc->rx_cp_rings[i].last_idx = UINT32_MAX;
2825 		bnxt_mark_cpr_invalid(&softc->rx_cp_rings[i]);
2826 		rc = bnxt_hwrm_ring_alloc(softc,
2827 				HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
2828 				&softc->rx_cp_rings[i].ring);
2829 		if (rc)
2830 			goto fail;
2831 
2832 		if (BNXT_CHIP_P5(softc))
2833 			softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[i], 1);
2834 
2835 		/* Allocate the RX ring */
2836 		rc = bnxt_hwrm_ring_alloc(softc,
2837 		    HWRM_RING_ALLOC_INPUT_RING_TYPE_RX, &softc->rx_rings[i]);
2838 		if (rc)
2839 			goto fail;
2840 		softc->db_ops.bnxt_db_rx(&softc->rx_rings[i], 0);
2841 
2842 		/* Allocate the AG ring */
2843 		rc = bnxt_hwrm_ring_alloc(softc,
2844 				HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG,
2845 				&softc->ag_rings[i]);
2846 		if (rc)
2847 			goto fail;
2848 		softc->db_ops.bnxt_db_rx(&softc->ag_rings[i], 0);
2849 
2850 		/* Allocate the ring group */
2851 		softc->grp_info[i].stats_ctx =
2852 		    softc->rx_cp_rings[i].stats_ctx_id;
2853 		softc->grp_info[i].rx_ring_id = softc->rx_rings[i].phys_id;
2854 		softc->grp_info[i].ag_ring_id = softc->ag_rings[i].phys_id;
2855 		softc->grp_info[i].cp_ring_id =
2856 		    softc->rx_cp_rings[i].ring.phys_id;
2857 		rc = bnxt_hwrm_ring_grp_alloc(softc, &softc->grp_info[i]);
2858 		if (rc)
2859 			goto fail;
2860 	}
2861 
2862 	/* And now set the default CP / NQ ring for the async */
2863 	rc = bnxt_cfg_async_cr(softc);
2864 	if (rc)
2865 		goto fail;
2866 
2867 	/* Allocate the VNIC RSS context */
2868 	rc = bnxt_hwrm_vnic_ctx_alloc(softc, &softc->vnic_info.rss_id);
2869 	if (rc)
2870 		goto fail;
2871 
2872 	/* Allocate the vnic */
2873 	softc->vnic_info.def_ring_grp = softc->grp_info[0].grp_id;
2874 	softc->vnic_info.mru = softc->scctx->isc_max_frame_size;
2875 	rc = bnxt_hwrm_vnic_alloc(softc, &softc->vnic_info);
2876 	if (rc)
2877 		goto fail;
2878 	rc = bnxt_hwrm_vnic_cfg(softc, &softc->vnic_info);
2879 	if (rc)
2880 		goto fail;
2881 	rc = bnxt_hwrm_vnic_set_hds(softc, &softc->vnic_info);
2882 	if (rc)
2883 		goto fail;
2884 	rc = bnxt_hwrm_set_filter(softc);
2885 	if (rc)
2886 		goto fail;
2887 
2888 	bnxt_rss_grp_tbl_init(softc);
2889 
2890 	rc = bnxt_hwrm_rss_cfg(softc, &softc->vnic_info,
2891 	    softc->vnic_info.rss_hash_type);
2892 	if (rc)
2893 		goto fail;
2894 
2895 	rc = bnxt_hwrm_vnic_tpa_cfg(softc);
2896 	if (rc)
2897 		goto fail;
2898 
2899 	for (i = 0; i < softc->ntxqsets; i++) {
2900 		/* Allocate the statistics context */
2901 		rc = bnxt_hwrm_stat_ctx_alloc(softc, &softc->tx_cp_rings[i],
2902 		    softc->tx_stats[i].idi_paddr);
2903 		if (rc)
2904 			goto fail;
2905 
2906 		/* Allocate the completion ring */
2907 		softc->tx_cp_rings[i].cons = UINT32_MAX;
2908 		softc->tx_cp_rings[i].v_bit = 1;
2909 		bnxt_mark_cpr_invalid(&softc->tx_cp_rings[i]);
2910 		rc = bnxt_hwrm_ring_alloc(softc,
2911 				HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
2912 				&softc->tx_cp_rings[i].ring);
2913 		if (rc)
2914 			goto fail;
2915 
2916 		if (BNXT_CHIP_P5(softc))
2917 			softc->db_ops.bnxt_db_tx_cq(&softc->tx_cp_rings[i], 1);
2918 
2919 		/* Allocate the TX ring */
2920 		rc = bnxt_hwrm_ring_alloc(softc,
2921 				HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
2922 				&softc->tx_rings[i]);
2923 		if (rc)
2924 			goto fail;
2925 		softc->db_ops.bnxt_db_tx(&softc->tx_rings[i], 0);
2926 	}
2927 
2928 	bnxt_do_enable_intr(&softc->def_cp_ring);
2929 	bnxt_get_port_module_status(softc);
2930 	bnxt_media_status(softc->ctx, &ifmr);
2931 	bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info);
2932 	return;
2933 
2934 fail:
2935 	bnxt_func_reset(softc);
2936 	bnxt_clear_ids(softc);
2937 	return;
2938 }
2939 
2940 static void
bnxt_stop(if_ctx_t ctx)2941 bnxt_stop(if_ctx_t ctx)
2942 {
2943 	struct bnxt_softc *softc = iflib_get_softc(ctx);
2944 
2945 	softc->is_dev_init = false;
2946 	bnxt_do_disable_intr(&softc->def_cp_ring);
2947 	bnxt_func_reset(softc);
2948 	bnxt_clear_ids(softc);
2949 	return;
2950 }
2951 
2952 static u_int
bnxt_copy_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)2953 bnxt_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2954 {
2955 	uint8_t *mta = arg;
2956 
2957 	if (cnt == BNXT_MAX_MC_ADDRS)
2958 		return (1);
2959 
2960 	bcopy(LLADDR(sdl), &mta[cnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN);
2961 
2962 	return (1);
2963 }
2964 
2965 static void
bnxt_multi_set(if_ctx_t ctx)2966 bnxt_multi_set(if_ctx_t ctx)
2967 {
2968 	struct bnxt_softc *softc = iflib_get_softc(ctx);
2969 	if_t ifp = iflib_get_ifp(ctx);
2970 	uint8_t *mta;
2971 	int mcnt;
2972 
2973 	mta = softc->vnic_info.mc_list.idi_vaddr;
2974 	bzero(mta, softc->vnic_info.mc_list.idi_size);
2975 	mcnt = if_foreach_llmaddr(ifp, bnxt_copy_maddr, mta);
2976 
2977 	if (mcnt > BNXT_MAX_MC_ADDRS) {
2978 		softc->vnic_info.rx_mask |=
2979 		    HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
2980 		bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info);
2981 	} else {
2982 		softc->vnic_info.rx_mask &=
2983 		    ~HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
2984 		bus_dmamap_sync(softc->vnic_info.mc_list.idi_tag,
2985 		    softc->vnic_info.mc_list.idi_map, BUS_DMASYNC_PREWRITE);
2986 		softc->vnic_info.mc_list_count = mcnt;
2987 		softc->vnic_info.rx_mask |=
2988 		    HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
2989 		if (bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info))
2990 			device_printf(softc->dev,
2991 			    "set_multi: rx_mask set failed\n");
2992 	}
2993 }
2994 
2995 static int
bnxt_mtu_set(if_ctx_t ctx,uint32_t mtu)2996 bnxt_mtu_set(if_ctx_t ctx, uint32_t mtu)
2997 {
2998 	struct bnxt_softc *softc = iflib_get_softc(ctx);
2999 
3000 	if (mtu > BNXT_MAX_MTU)
3001 		return EINVAL;
3002 
3003 	softc->scctx->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3004 	softc->rx_buf_size = min(softc->scctx->isc_max_frame_size, BNXT_PAGE_SIZE);
3005 	return 0;
3006 }
3007 
3008 static void
bnxt_media_status(if_ctx_t ctx,struct ifmediareq * ifmr)3009 bnxt_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
3010 {
3011 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3012 	struct bnxt_link_info *link_info = &softc->link_info;
3013 	struct ifmedia_entry *next;
3014 	uint64_t target_baudrate = bnxt_get_baudrate(link_info);
3015 	int active_media = IFM_UNKNOWN;
3016 
3017 	bnxt_update_link(softc, true);
3018 
3019 	ifmr->ifm_status = IFM_AVALID;
3020 	ifmr->ifm_active = IFM_ETHER;
3021 
3022 	if (link_info->link_up)
3023 		ifmr->ifm_status |= IFM_ACTIVE;
3024 	else
3025 		ifmr->ifm_status &= ~IFM_ACTIVE;
3026 
3027 	if (link_info->duplex == HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_FULL)
3028 		ifmr->ifm_active |= IFM_FDX;
3029 	else
3030 		ifmr->ifm_active |= IFM_HDX;
3031 
3032         /*
3033          * Go through the list of supported media which got prepared
3034          * as part of bnxt_add_media_types() using api ifmedia_add().
3035          */
3036 	LIST_FOREACH(next, &(iflib_get_media(ctx)->ifm_list), ifm_list) {
3037 		if (ifmedia_baudrate(next->ifm_media) == target_baudrate) {
3038 			active_media = next->ifm_media;
3039 			break;
3040 		}
3041 	}
3042 	ifmr->ifm_active |= active_media;
3043 
3044 	if (link_info->flow_ctrl.rx)
3045 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
3046 	if (link_info->flow_ctrl.tx)
3047 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
3048 
3049 	bnxt_report_link(softc);
3050 	return;
3051 }
3052 
3053 static int
bnxt_media_change(if_ctx_t ctx)3054 bnxt_media_change(if_ctx_t ctx)
3055 {
3056 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3057 	struct ifmedia *ifm = iflib_get_media(ctx);
3058 	struct ifmediareq ifmr;
3059 	int rc;
3060 
3061 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3062 		return EINVAL;
3063 
3064 	softc->link_info.req_signal_mode =
3065 			HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_PAM4;
3066 
3067 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
3068 	case IFM_100_T:
3069 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
3070 		softc->link_info.req_link_speed =
3071 		    HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100MB;
3072 		break;
3073 	case IFM_1000_KX:
3074 	case IFM_1000_SGMII:
3075 	case IFM_1000_CX:
3076 	case IFM_1000_SX:
3077 	case IFM_1000_LX:
3078 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
3079 		softc->link_info.req_link_speed =
3080 		    HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_1GB;
3081 		break;
3082 	case IFM_2500_KX:
3083 	case IFM_2500_T:
3084 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
3085 		softc->link_info.req_link_speed =
3086 		    HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2_5GB;
3087 		break;
3088 	case IFM_10G_CR1:
3089 	case IFM_10G_KR:
3090 	case IFM_10G_LR:
3091 	case IFM_10G_SR:
3092 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
3093 		softc->link_info.req_link_speed =
3094 		    HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
3095 		break;
3096 	case IFM_20G_KR2:
3097 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
3098 		softc->link_info.req_link_speed =
3099 		    HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_20GB;
3100 		break;
3101 	case IFM_25G_CR:
3102 	case IFM_25G_KR:
3103 	case IFM_25G_SR:
3104 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
3105 		softc->link_info.req_link_speed =
3106 		    HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_25GB;
3107 		break;
3108 	case IFM_40G_CR4:
3109 	case IFM_40G_KR4:
3110 	case IFM_40G_LR4:
3111 	case IFM_40G_SR4:
3112 	case IFM_40G_XLAUI:
3113 	case IFM_40G_XLAUI_AC:
3114 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
3115 		softc->link_info.req_link_speed =
3116 		    HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
3117 		break;
3118 	case IFM_50G_CR2:
3119 	case IFM_50G_KR2:
3120 	case IFM_50G_SR2:
3121 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
3122 		softc->link_info.req_link_speed =
3123 		    HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
3124 		break;
3125 	case IFM_50G_CP:
3126 	case IFM_50G_LR:
3127 	case IFM_50G_SR:
3128 	case IFM_50G_KR_PAM4:
3129 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
3130 		softc->link_info.req_link_speed =
3131 		    HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB;
3132 		softc->link_info.req_signal_mode =
3133 			HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_PAM4;
3134 		softc->link_info.force_pam4_speed_set_by_user = true;
3135 		break;
3136 	case IFM_100G_CR4:
3137 	case IFM_100G_KR4:
3138 	case IFM_100G_LR4:
3139 	case IFM_100G_SR4:
3140 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
3141 		softc->link_info.req_link_speed =
3142 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
3143 		break;
3144 	case IFM_100G_CP2:
3145 	case IFM_100G_SR2:
3146 	case IFM_100G_KR_PAM4:
3147 	case IFM_100G_KR2_PAM4:
3148 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
3149 		softc->link_info.req_link_speed =
3150 			HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB;
3151 		softc->link_info.req_signal_mode =
3152 			HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_PAM4;
3153 		softc->link_info.force_pam4_speed_set_by_user = true;
3154 		break;
3155 	case IFM_200G_SR4:
3156 	case IFM_200G_FR4:
3157 	case IFM_200G_LR4:
3158 	case IFM_200G_DR4:
3159 	case IFM_200G_CR4_PAM4:
3160 	case IFM_200G_KR4_PAM4:
3161 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
3162 		softc->link_info.req_link_speed =
3163 			HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
3164 		softc->link_info.force_pam4_speed_set_by_user = true;
3165 		softc->link_info.req_signal_mode =
3166 			HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_PAM4;
3167 		break;
3168 	case IFM_1000_T:
3169 		softc->link_info.advertising = HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
3170 		softc->link_info.autoneg |= BNXT_AUTONEG_SPEED;
3171 		break;
3172 	case IFM_10G_T:
3173 		softc->link_info.advertising = HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
3174 		softc->link_info.autoneg |= BNXT_AUTONEG_SPEED;
3175 		break;
3176 	default:
3177 		device_printf(softc->dev,
3178 		    "Unsupported media type!  Using auto\n");
3179 		/* Fall-through */
3180 	case IFM_AUTO:
3181 		// Auto
3182 		softc->link_info.autoneg |= BNXT_AUTONEG_SPEED;
3183 		break;
3184 	}
3185 	rc = bnxt_hwrm_set_link_setting(softc, true, true, true);
3186 	bnxt_media_status(softc->ctx, &ifmr);
3187 	return rc;
3188 }
3189 
3190 static int
bnxt_promisc_set(if_ctx_t ctx,int flags)3191 bnxt_promisc_set(if_ctx_t ctx, int flags)
3192 {
3193 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3194 	if_t ifp = iflib_get_ifp(ctx);
3195 	int rc;
3196 
3197 	if (if_getflags(ifp) & IFF_ALLMULTI ||
3198 	    if_llmaddr_count(ifp) > BNXT_MAX_MC_ADDRS)
3199 		softc->vnic_info.rx_mask |=
3200 		    HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
3201 	else
3202 		softc->vnic_info.rx_mask &=
3203 		    ~HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
3204 
3205 	if (if_getflags(ifp) & IFF_PROMISC)
3206 		softc->vnic_info.rx_mask |=
3207 		    HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS |
3208 		    HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN;
3209 	else
3210 		softc->vnic_info.rx_mask &=
3211 		    ~(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS);
3212 
3213 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info);
3214 
3215 	return rc;
3216 }
3217 
3218 static uint64_t
bnxt_get_counter(if_ctx_t ctx,ift_counter cnt)3219 bnxt_get_counter(if_ctx_t ctx, ift_counter cnt)
3220 {
3221 	if_t ifp = iflib_get_ifp(ctx);
3222 
3223 	if (cnt < IFCOUNTERS)
3224 		return if_get_counter_default(ifp, cnt);
3225 
3226 	return 0;
3227 }
3228 
3229 static void
bnxt_update_admin_status(if_ctx_t ctx)3230 bnxt_update_admin_status(if_ctx_t ctx)
3231 {
3232 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3233 
3234 	/*
3235 	 * When SR-IOV is enabled, avoid each VF sending this HWRM
3236 	 * request every sec with which firmware timeouts can happen
3237 	 */
3238 	if (!BNXT_PF(softc))
3239 		return;
3240 
3241 	bnxt_hwrm_port_qstats(softc);
3242 
3243 	if (BNXT_CHIP_P5(softc) &&
3244 	    (softc->flags & BNXT_FLAG_FW_CAP_EXT_STATS))
3245 		bnxt_hwrm_port_qstats_ext(softc);
3246 
3247 	if (BNXT_CHIP_P5(softc)) {
3248 		struct ifmediareq ifmr;
3249 
3250 		if (bit_test(softc->state_bv, BNXT_STATE_LINK_CHANGE)) {
3251 			bit_clear(softc->state_bv, BNXT_STATE_LINK_CHANGE);
3252 			bnxt_media_status(softc->ctx, &ifmr);
3253 		}
3254 	}
3255 
3256 	return;
3257 }
3258 
3259 static void
bnxt_if_timer(if_ctx_t ctx,uint16_t qid)3260 bnxt_if_timer(if_ctx_t ctx, uint16_t qid)
3261 {
3262 
3263 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3264 	uint64_t ticks_now = ticks;
3265 
3266         /* Schedule bnxt_update_admin_status() once per sec */
3267 	if (ticks_now - softc->admin_ticks >= hz) {
3268 		softc->admin_ticks = ticks_now;
3269 		iflib_admin_intr_deferred(ctx);
3270 	}
3271 
3272 	return;
3273 }
3274 
3275 static void inline
bnxt_do_enable_intr(struct bnxt_cp_ring * cpr)3276 bnxt_do_enable_intr(struct bnxt_cp_ring *cpr)
3277 {
3278 	struct bnxt_softc *softc = cpr->ring.softc;
3279 
3280 	if (cpr->ring.phys_id == (uint16_t)HWRM_NA_SIGNATURE)
3281 		return;
3282 
3283 	if (BNXT_CHIP_P5(softc))
3284 		softc->db_ops.bnxt_db_nq(cpr, 1);
3285 	else
3286 		softc->db_ops.bnxt_db_rx_cq(cpr, 1);
3287 }
3288 
3289 static void inline
bnxt_do_disable_intr(struct bnxt_cp_ring * cpr)3290 bnxt_do_disable_intr(struct bnxt_cp_ring *cpr)
3291 {
3292 	struct bnxt_softc *softc = cpr->ring.softc;
3293 
3294 	if (cpr->ring.phys_id == (uint16_t)HWRM_NA_SIGNATURE)
3295 		return;
3296 
3297 	if (BNXT_CHIP_P5(softc))
3298 		softc->db_ops.bnxt_db_nq(cpr, 0);
3299 	else
3300 		softc->db_ops.bnxt_db_rx_cq(cpr, 0);
3301 }
3302 
3303 /* Enable all interrupts */
3304 static void
bnxt_intr_enable(if_ctx_t ctx)3305 bnxt_intr_enable(if_ctx_t ctx)
3306 {
3307 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3308 	int i;
3309 
3310 	bnxt_do_enable_intr(&softc->def_cp_ring);
3311 	for (i = 0; i < softc->nrxqsets; i++)
3312 		if (BNXT_CHIP_P5(softc))
3313 			softc->db_ops.bnxt_db_nq(&softc->nq_rings[i], 1);
3314 		else
3315 			softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[i], 1);
3316 
3317 	return;
3318 }
3319 
3320 /* Enable interrupt for a single queue */
3321 static int
bnxt_tx_queue_intr_enable(if_ctx_t ctx,uint16_t qid)3322 bnxt_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
3323 {
3324 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3325 
3326 	if (BNXT_CHIP_P5(softc))
3327 		softc->db_ops.bnxt_db_nq(&softc->nq_rings[qid], 1);
3328 	else
3329 		softc->db_ops.bnxt_db_rx_cq(&softc->tx_cp_rings[qid], 1);
3330 
3331 	return 0;
3332 }
3333 
3334 static void
bnxt_process_cmd_cmpl(struct bnxt_softc * softc,hwrm_cmpl_t * cmd_cmpl)3335 bnxt_process_cmd_cmpl(struct bnxt_softc *softc, hwrm_cmpl_t *cmd_cmpl)
3336 {
3337 	device_printf(softc->dev, "cmd sequence number %d\n",
3338 			cmd_cmpl->sequence_id);
3339 	return;
3340 }
3341 
3342 static void
bnxt_process_async_msg(struct bnxt_cp_ring * cpr,tx_cmpl_t * cmpl)3343 bnxt_process_async_msg(struct bnxt_cp_ring *cpr, tx_cmpl_t *cmpl)
3344 {
3345 	struct bnxt_softc *softc = cpr->ring.softc;
3346 	uint16_t type = cmpl->flags_type & TX_CMPL_TYPE_MASK;
3347 
3348 	switch (type) {
3349 	case HWRM_CMPL_TYPE_HWRM_DONE:
3350 		bnxt_process_cmd_cmpl(softc, (hwrm_cmpl_t *)cmpl);
3351 		break;
3352 	case HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT:
3353 		bnxt_handle_async_event(softc, (cmpl_base_t *) cmpl);
3354 		break;
3355 	default:
3356 		device_printf(softc->dev, "%s:%d Unhandled async message %x\n",
3357 				__FUNCTION__, __LINE__, type);
3358 		break;
3359 	}
3360 }
3361 
3362 void
process_nq(struct bnxt_softc * softc,uint16_t nqid)3363 process_nq(struct bnxt_softc *softc, uint16_t nqid)
3364 {
3365 	struct bnxt_cp_ring *cpr = &softc->nq_rings[nqid];
3366 	nq_cn_t *cmp = (nq_cn_t *) cpr->ring.vaddr;
3367 	bool v_bit = cpr->v_bit;
3368 	uint32_t cons = cpr->cons;
3369 	uint16_t nq_type, nqe_cnt = 0;
3370 
3371 	while (1) {
3372 		if (!NQ_VALID(&cmp[cons], v_bit))
3373 			goto done;
3374 
3375 		nq_type = NQ_CN_TYPE_MASK & cmp[cons].type;
3376 
3377 		if (nq_type != NQ_CN_TYPE_CQ_NOTIFICATION)
3378 			 bnxt_process_async_msg(cpr, (tx_cmpl_t *)&cmp[cons]);
3379 
3380 		NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
3381 		nqe_cnt++;
3382 	}
3383 done:
3384 	if (nqe_cnt) {
3385 		cpr->cons = cons;
3386 		cpr->v_bit = v_bit;
3387 	}
3388 }
3389 
3390 static int
bnxt_rx_queue_intr_enable(if_ctx_t ctx,uint16_t qid)3391 bnxt_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
3392 {
3393 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3394 
3395 	if (BNXT_CHIP_P5(softc)) {
3396 		process_nq(softc, qid);
3397 		softc->db_ops.bnxt_db_nq(&softc->nq_rings[qid], 1);
3398 	}
3399 	softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[qid], 1);
3400         return 0;
3401 }
3402 
3403 /* Disable all interrupts */
3404 static void
bnxt_disable_intr(if_ctx_t ctx)3405 bnxt_disable_intr(if_ctx_t ctx)
3406 {
3407 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3408 	int i;
3409 
3410 	/*
3411 	 * NOTE: These TX interrupts should never get enabled, so don't
3412 	 * update the index
3413 	 */
3414 	for (i = 0; i < softc->nrxqsets; i++)
3415 		if (BNXT_CHIP_P5(softc))
3416 			softc->db_ops.bnxt_db_nq(&softc->nq_rings[i], 0);
3417 		else
3418 			softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[i], 0);
3419 
3420 
3421 	return;
3422 }
3423 
3424 static int
bnxt_msix_intr_assign(if_ctx_t ctx,int msix)3425 bnxt_msix_intr_assign(if_ctx_t ctx, int msix)
3426 {
3427 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3428 	struct bnxt_cp_ring *ring;
3429 	struct if_irq *irq;
3430 	uint16_t id;
3431 	int rc;
3432 	int i;
3433 	char irq_name[16];
3434 
3435 	if (BNXT_CHIP_P5(softc))
3436 		goto skip_default_cp;
3437 
3438 	rc = iflib_irq_alloc_generic(ctx, &softc->def_cp_ring.irq,
3439 	    softc->def_cp_ring.ring.id + 1, IFLIB_INTR_ADMIN,
3440 	    bnxt_handle_def_cp, softc, 0, "def_cp");
3441 	if (rc) {
3442 		device_printf(iflib_get_dev(ctx),
3443 		    "Failed to register default completion ring handler\n");
3444 		return rc;
3445 	}
3446 
3447 skip_default_cp:
3448 	for (i=0; i<softc->scctx->isc_nrxqsets; i++) {
3449 		if (BNXT_CHIP_P5(softc)) {
3450 			irq = &softc->nq_rings[i].irq;
3451 			id = softc->nq_rings[i].ring.id;
3452 			ring = &softc->nq_rings[i];
3453 		} else {
3454 			irq = &softc->rx_cp_rings[i].irq;
3455 			id = softc->rx_cp_rings[i].ring.id ;
3456 			ring = &softc->rx_cp_rings[i];
3457 		}
3458 		snprintf(irq_name, sizeof(irq_name), "rxq%d", i);
3459 		rc = iflib_irq_alloc_generic(ctx, irq, id + 1, IFLIB_INTR_RX,
3460 				bnxt_handle_isr, ring, i, irq_name);
3461 		if (rc) {
3462 			device_printf(iflib_get_dev(ctx),
3463 			    "Failed to register RX completion ring handler\n");
3464 			i--;
3465 			goto fail;
3466 		}
3467 	}
3468 
3469 	for (i=0; i<softc->scctx->isc_ntxqsets; i++)
3470 		iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_TX, NULL, i, "tx_cp");
3471 
3472 	return rc;
3473 
3474 fail:
3475 	for (; i>=0; i--)
3476 		iflib_irq_free(ctx, &softc->rx_cp_rings[i].irq);
3477 	iflib_irq_free(ctx, &softc->def_cp_ring.irq);
3478 	return rc;
3479 }
3480 
3481 /*
3482  * We're explicitly allowing duplicates here.  They will need to be
3483  * removed as many times as they are added.
3484  */
3485 static void
bnxt_vlan_register(if_ctx_t ctx,uint16_t vtag)3486 bnxt_vlan_register(if_ctx_t ctx, uint16_t vtag)
3487 {
3488 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3489 	struct bnxt_vlan_tag *new_tag;
3490 
3491 	new_tag = malloc(sizeof(struct bnxt_vlan_tag), M_DEVBUF, M_NOWAIT);
3492 	if (new_tag == NULL)
3493 		return;
3494 	new_tag->tag = vtag;
3495 	new_tag->filter_id = -1;
3496 	SLIST_INSERT_HEAD(&softc->vnic_info.vlan_tags, new_tag, next);
3497 };
3498 
3499 static void
bnxt_vlan_unregister(if_ctx_t ctx,uint16_t vtag)3500 bnxt_vlan_unregister(if_ctx_t ctx, uint16_t vtag)
3501 {
3502 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3503 	struct bnxt_vlan_tag *vlan_tag;
3504 
3505 	SLIST_FOREACH(vlan_tag, &softc->vnic_info.vlan_tags, next) {
3506 		if (vlan_tag->tag == vtag) {
3507 			SLIST_REMOVE(&softc->vnic_info.vlan_tags, vlan_tag,
3508 			    bnxt_vlan_tag, next);
3509 			free(vlan_tag, M_DEVBUF);
3510 			break;
3511 		}
3512 	}
3513 }
3514 
3515 static int
bnxt_wol_config(if_ctx_t ctx)3516 bnxt_wol_config(if_ctx_t ctx)
3517 {
3518 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3519 	if_t ifp = iflib_get_ifp(ctx);
3520 
3521 	if (!softc)
3522 		return -EBUSY;
3523 
3524 	if (!bnxt_wol_supported(softc))
3525 		return -ENOTSUP;
3526 
3527 	if (if_getcapenable(ifp) & IFCAP_WOL_MAGIC) {
3528 		if (!softc->wol) {
3529 			if (bnxt_hwrm_alloc_wol_fltr(softc))
3530 				return -EBUSY;
3531 			softc->wol = 1;
3532 		}
3533 	} else {
3534 		if (softc->wol) {
3535 			if (bnxt_hwrm_free_wol_fltr(softc))
3536 				return -EBUSY;
3537 			softc->wol = 0;
3538 		}
3539 	}
3540 
3541 	return 0;
3542 }
3543 
3544 static bool
bnxt_if_needs_restart(if_ctx_t ctx __unused,enum iflib_restart_event event)3545 bnxt_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
3546 {
3547 	switch (event) {
3548 	case IFLIB_RESTART_VLAN_CONFIG:
3549 	default:
3550 		return (false);
3551 	}
3552 }
3553 
3554 static int
bnxt_shutdown(if_ctx_t ctx)3555 bnxt_shutdown(if_ctx_t ctx)
3556 {
3557 	bnxt_wol_config(ctx);
3558 	return 0;
3559 }
3560 
3561 static int
bnxt_suspend(if_ctx_t ctx)3562 bnxt_suspend(if_ctx_t ctx)
3563 {
3564 	bnxt_wol_config(ctx);
3565 	return 0;
3566 }
3567 
3568 static int
bnxt_resume(if_ctx_t ctx)3569 bnxt_resume(if_ctx_t ctx)
3570 {
3571 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3572 
3573 	bnxt_get_wol_settings(softc);
3574 	return 0;
3575 }
3576 
3577 static int
bnxt_priv_ioctl(if_ctx_t ctx,u_long command,caddr_t data)3578 bnxt_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data)
3579 {
3580 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3581 	struct ifreq *ifr = (struct ifreq *)data;
3582 	struct bnxt_ioctl_header *ioh;
3583 	size_t iol;
3584 	int rc = ENOTSUP;
3585 	struct bnxt_ioctl_data iod_storage, *iod = &iod_storage;
3586 
3587 	switch (command) {
3588 	case SIOCGPRIVATE_0:
3589 		if ((rc = priv_check(curthread, PRIV_DRIVER)) != 0)
3590 			goto exit;
3591 
3592 		ioh = ifr_buffer_get_buffer(ifr);
3593 		iol = ifr_buffer_get_length(ifr);
3594 		if (iol > sizeof(iod_storage))
3595 			return (EINVAL);
3596 
3597 		if ((rc = copyin(ioh, iod, iol)) != 0)
3598 			goto exit;
3599 
3600 		switch (iod->hdr.type) {
3601 		case BNXT_HWRM_NVM_FIND_DIR_ENTRY:
3602 		{
3603 			struct bnxt_ioctl_hwrm_nvm_find_dir_entry *find =
3604 			    &iod->find;
3605 
3606 			rc = bnxt_hwrm_nvm_find_dir_entry(softc, find->type,
3607 			    &find->ordinal, find->ext, &find->index,
3608 			    find->use_index, find->search_opt,
3609 			    &find->data_length, &find->item_length,
3610 			    &find->fw_ver);
3611 			if (rc) {
3612 				iod->hdr.rc = rc;
3613 				rc = copyout(&iod->hdr.rc, &ioh->rc,
3614 				    sizeof(ioh->rc));
3615 			} else {
3616 				iod->hdr.rc = 0;
3617 				rc = copyout(iod, ioh, iol);
3618 			}
3619 
3620 			goto exit;
3621 		}
3622 		case BNXT_HWRM_NVM_READ:
3623 		{
3624 			struct bnxt_ioctl_hwrm_nvm_read *rd = &iod->read;
3625 			struct iflib_dma_info dma_data;
3626 			size_t offset;
3627 			size_t remain;
3628 			size_t csize;
3629 
3630 			/*
3631 			 * Some HWRM versions can't read more than 0x8000 bytes
3632 			 */
3633 			rc = iflib_dma_alloc(softc->ctx,
3634 			    min(rd->length, 0x8000), &dma_data, BUS_DMA_NOWAIT);
3635 			if (rc)
3636 				break;
3637 			for (remain = rd->length, offset = 0;
3638 			    remain && offset < rd->length; offset += 0x8000) {
3639 				csize = min(remain, 0x8000);
3640 				rc = bnxt_hwrm_nvm_read(softc, rd->index,
3641 				    rd->offset + offset, csize, &dma_data);
3642 				if (rc) {
3643 					iod->hdr.rc = rc;
3644 					rc = copyout(&iod->hdr.rc, &ioh->rc,
3645 					    sizeof(ioh->rc));
3646 					break;
3647 				} else {
3648 					rc = copyout(dma_data.idi_vaddr,
3649 					    rd->data + offset, csize);
3650 					iod->hdr.rc = rc;
3651 				}
3652 				remain -= csize;
3653 			}
3654 			if (rc == 0)
3655 				rc = copyout(iod, ioh, iol);
3656 
3657 			iflib_dma_free(&dma_data);
3658 			goto exit;
3659 		}
3660 		case BNXT_HWRM_FW_RESET:
3661 		{
3662 			struct bnxt_ioctl_hwrm_fw_reset *rst =
3663 			    &iod->reset;
3664 
3665 			rc = bnxt_hwrm_fw_reset(softc, rst->processor,
3666 			    &rst->selfreset);
3667 			if (rc) {
3668 				iod->hdr.rc = rc;
3669 				rc = copyout(&iod->hdr.rc, &ioh->rc,
3670 				    sizeof(ioh->rc));
3671 			} else {
3672 				iod->hdr.rc = 0;
3673 				rc = copyout(iod, ioh, iol);
3674 			}
3675 
3676 			goto exit;
3677 		}
3678 		case BNXT_HWRM_FW_QSTATUS:
3679 		{
3680 			struct bnxt_ioctl_hwrm_fw_qstatus *qstat =
3681 			    &iod->status;
3682 
3683 			rc = bnxt_hwrm_fw_qstatus(softc, qstat->processor,
3684 			    &qstat->selfreset);
3685 			if (rc) {
3686 				iod->hdr.rc = rc;
3687 				rc = copyout(&iod->hdr.rc, &ioh->rc,
3688 				    sizeof(ioh->rc));
3689 			} else {
3690 				iod->hdr.rc = 0;
3691 				rc = copyout(iod, ioh, iol);
3692 			}
3693 
3694 			goto exit;
3695 		}
3696 		case BNXT_HWRM_NVM_WRITE:
3697 		{
3698 			struct bnxt_ioctl_hwrm_nvm_write *wr =
3699 			    &iod->write;
3700 
3701 			rc = bnxt_hwrm_nvm_write(softc, wr->data, true,
3702 			    wr->type, wr->ordinal, wr->ext, wr->attr,
3703 			    wr->option, wr->data_length, wr->keep,
3704 			    &wr->item_length, &wr->index);
3705 			if (rc) {
3706 				iod->hdr.rc = rc;
3707 				rc = copyout(&iod->hdr.rc, &ioh->rc,
3708 				    sizeof(ioh->rc));
3709 			}
3710 			else {
3711 				iod->hdr.rc = 0;
3712 				rc = copyout(iod, ioh, iol);
3713 			}
3714 
3715 			goto exit;
3716 		}
3717 		case BNXT_HWRM_NVM_ERASE_DIR_ENTRY:
3718 		{
3719 			struct bnxt_ioctl_hwrm_nvm_erase_dir_entry *erase =
3720 			    &iod->erase;
3721 
3722 			rc = bnxt_hwrm_nvm_erase_dir_entry(softc, erase->index);
3723 			if (rc) {
3724 				iod->hdr.rc = rc;
3725 				rc = copyout(&iod->hdr.rc, &ioh->rc,
3726 				    sizeof(ioh->rc));
3727 			} else {
3728 				iod->hdr.rc = 0;
3729 				rc = copyout(iod, ioh, iol);
3730 			}
3731 
3732 			goto exit;
3733 		}
3734 		case BNXT_HWRM_NVM_GET_DIR_INFO:
3735 		{
3736 			struct bnxt_ioctl_hwrm_nvm_get_dir_info *info =
3737 			    &iod->dir_info;
3738 
3739 			rc = bnxt_hwrm_nvm_get_dir_info(softc, &info->entries,
3740 			    &info->entry_length);
3741 			if (rc) {
3742 				iod->hdr.rc = rc;
3743 				rc = copyout(&iod->hdr.rc, &ioh->rc,
3744 				    sizeof(ioh->rc));
3745 			} else {
3746 				iod->hdr.rc = 0;
3747 				rc = copyout(iod, ioh, iol);
3748 			}
3749 
3750 			goto exit;
3751 		}
3752 		case BNXT_HWRM_NVM_GET_DIR_ENTRIES:
3753 		{
3754 			struct bnxt_ioctl_hwrm_nvm_get_dir_entries *get =
3755 			    &iod->dir_entries;
3756 			struct iflib_dma_info dma_data;
3757 
3758 			rc = iflib_dma_alloc(softc->ctx, get->max_size,
3759 			    &dma_data, BUS_DMA_NOWAIT);
3760 			if (rc)
3761 				break;
3762 			rc = bnxt_hwrm_nvm_get_dir_entries(softc, &get->entries,
3763 			    &get->entry_length, &dma_data);
3764 			if (rc) {
3765 				iod->hdr.rc = rc;
3766 				rc = copyout(&iod->hdr.rc, &ioh->rc,
3767 				    sizeof(ioh->rc));
3768 			} else {
3769 				rc = copyout(dma_data.idi_vaddr, get->data,
3770 				    get->entry_length * get->entries);
3771 				iod->hdr.rc = rc;
3772 				if (rc == 0)
3773 					rc = copyout(iod, ioh, iol);
3774 			}
3775 			iflib_dma_free(&dma_data);
3776 
3777 			goto exit;
3778 		}
3779 		case BNXT_HWRM_NVM_VERIFY_UPDATE:
3780 		{
3781 			struct bnxt_ioctl_hwrm_nvm_verify_update *vrfy =
3782 			    &iod->verify;
3783 
3784 			rc = bnxt_hwrm_nvm_verify_update(softc, vrfy->type,
3785 			    vrfy->ordinal, vrfy->ext);
3786 			if (rc) {
3787 				iod->hdr.rc = rc;
3788 				rc = copyout(&iod->hdr.rc, &ioh->rc,
3789 				    sizeof(ioh->rc));
3790 			} else {
3791 				iod->hdr.rc = 0;
3792 				rc = copyout(iod, ioh, iol);
3793 			}
3794 
3795 			goto exit;
3796 		}
3797 		case BNXT_HWRM_NVM_INSTALL_UPDATE:
3798 		{
3799 			struct bnxt_ioctl_hwrm_nvm_install_update *inst =
3800 			    &iod->install;
3801 
3802 			rc = bnxt_hwrm_nvm_install_update(softc,
3803 			    inst->install_type, &inst->installed_items,
3804 			    &inst->result, &inst->problem_item,
3805 			    &inst->reset_required);
3806 			if (rc) {
3807 				iod->hdr.rc = rc;
3808 				rc = copyout(&iod->hdr.rc, &ioh->rc,
3809 				    sizeof(ioh->rc));
3810 			} else {
3811 				iod->hdr.rc = 0;
3812 				rc = copyout(iod, ioh, iol);
3813 			}
3814 
3815 			goto exit;
3816 		}
3817 		case BNXT_HWRM_NVM_MODIFY:
3818 		{
3819 			struct bnxt_ioctl_hwrm_nvm_modify *mod = &iod->modify;
3820 
3821 			rc = bnxt_hwrm_nvm_modify(softc, mod->index,
3822 			    mod->offset, mod->data, true, mod->length);
3823 			if (rc) {
3824 				iod->hdr.rc = rc;
3825 				rc = copyout(&iod->hdr.rc, &ioh->rc,
3826 				    sizeof(ioh->rc));
3827 			} else {
3828 				iod->hdr.rc = 0;
3829 				rc = copyout(iod, ioh, iol);
3830 			}
3831 
3832 			goto exit;
3833 		}
3834 		case BNXT_HWRM_FW_GET_TIME:
3835 		{
3836 			struct bnxt_ioctl_hwrm_fw_get_time *gtm =
3837 			    &iod->get_time;
3838 
3839 			rc = bnxt_hwrm_fw_get_time(softc, &gtm->year,
3840 			    &gtm->month, &gtm->day, &gtm->hour, &gtm->minute,
3841 			    &gtm->second, &gtm->millisecond, &gtm->zone);
3842 			if (rc) {
3843 				iod->hdr.rc = rc;
3844 				rc = copyout(&iod->hdr.rc, &ioh->rc,
3845 				    sizeof(ioh->rc));
3846 			} else {
3847 				iod->hdr.rc = 0;
3848 				rc = copyout(iod, ioh, iol);
3849 			}
3850 
3851 			goto exit;
3852 		}
3853 		case BNXT_HWRM_FW_SET_TIME:
3854 		{
3855 			struct bnxt_ioctl_hwrm_fw_set_time *stm =
3856 			    &iod->set_time;
3857 
3858 			rc = bnxt_hwrm_fw_set_time(softc, stm->year,
3859 			    stm->month, stm->day, stm->hour, stm->minute,
3860 			    stm->second, stm->millisecond, stm->zone);
3861 			if (rc) {
3862 				iod->hdr.rc = rc;
3863 				rc = copyout(&iod->hdr.rc, &ioh->rc,
3864 				    sizeof(ioh->rc));
3865 			} else {
3866 				iod->hdr.rc = 0;
3867 				rc = copyout(iod, ioh, iol);
3868 			}
3869 
3870 			goto exit;
3871 		}
3872 		}
3873 		break;
3874 	}
3875 
3876 exit:
3877 	return rc;
3878 }
3879 
3880 static int
bnxt_i2c_req(if_ctx_t ctx,struct ifi2creq * i2c)3881 bnxt_i2c_req(if_ctx_t ctx, struct ifi2creq *i2c)
3882 {
3883 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3884 	uint8_t *data = i2c->data;
3885 	int rc;
3886 
3887 	/* No point in going further if phy status indicates
3888 	 * module is not inserted or if it is powered down or
3889 	 * if it is of type 10GBase-T
3890 	 */
3891 	if (softc->link_info.module_status >
3892 		HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG)
3893 		return -EOPNOTSUPP;
3894 
3895 	/* This feature is not supported in older firmware versions */
3896 	if (!BNXT_CHIP_P5(softc) ||
3897 	    (softc->hwrm_spec_code < 0x10202))
3898 		return -EOPNOTSUPP;
3899 
3900 
3901 	rc = bnxt_read_sfp_module_eeprom_info(softc, I2C_DEV_ADDR_A0, 0, 0, 0,
3902 		i2c->offset, i2c->len, data);
3903 
3904 	return rc;
3905 }
3906 
3907 /*
3908  * Support functions
3909  */
3910 static int
bnxt_probe_phy(struct bnxt_softc * softc)3911 bnxt_probe_phy(struct bnxt_softc *softc)
3912 {
3913 	struct bnxt_link_info *link_info = &softc->link_info;
3914 	int rc = 0;
3915 
3916 	softc->phy_flags = 0;
3917 	rc = bnxt_hwrm_phy_qcaps(softc);
3918 	if (rc) {
3919 		device_printf(softc->dev,
3920 			      "Probe phy can't get phy capabilities (rc: %x)\n", rc);
3921 		return rc;
3922 	}
3923 
3924 	rc = bnxt_update_link(softc, false);
3925 	if (rc) {
3926 		device_printf(softc->dev,
3927 		    "Probe phy can't update link (rc: %x)\n", rc);
3928 		return (rc);
3929 	}
3930 
3931 	bnxt_get_port_module_status(softc);
3932 
3933 	/*initialize the ethool setting copy with NVM settings */
3934 	if (link_info->auto_mode != HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE)
3935 		link_info->autoneg |= BNXT_AUTONEG_SPEED;
3936 
3937 	link_info->req_duplex = link_info->duplex_setting;
3938 
3939 	/* NRZ link speed */
3940 	if (link_info->autoneg & BNXT_AUTONEG_SPEED)
3941 		link_info->req_link_speed = link_info->auto_link_speeds;
3942 	else
3943 		link_info->req_link_speed = link_info->force_link_speed;
3944 
3945 	/* PAM4 link speed */
3946 	if (link_info->auto_pam4_link_speeds)
3947 		link_info->req_link_speed = link_info->auto_pam4_link_speeds;
3948 	if (link_info->force_pam4_link_speed)
3949 		link_info->req_link_speed = link_info->force_pam4_link_speed;
3950 
3951 	return (rc);
3952 }
3953 
3954 static void
add_media(struct bnxt_softc * softc,uint8_t media_type,uint16_t supported,uint16_t supported_pam4)3955 add_media(struct bnxt_softc *softc, uint8_t media_type, uint16_t supported,
3956 	  uint16_t supported_pam4)
3957 {
3958 	switch (media_type) {
3959 		case BNXT_MEDIA_CR:
3960 			BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_50G, IFM_50G_CP);
3961 			BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_100G, IFM_100G_CP2);
3962 			BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_200G, IFM_200G_CR4_PAM4);
3963 			BNXT_IFMEDIA_ADD(supported, SPEEDS_100GB, IFM_100G_CR4);
3964 			BNXT_IFMEDIA_ADD(supported, SPEEDS_50GB, IFM_50G_CR2);
3965 			BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_CR4);
3966 			BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_CR);
3967 			BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_CR1);
3968 			BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_CX);
3969 			break;
3970 
3971 		case BNXT_MEDIA_LR:
3972 			BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_50G, IFM_50G_LR);
3973 			BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_200G, IFM_200G_LR4);
3974 			BNXT_IFMEDIA_ADD(supported, SPEEDS_100GB, IFM_100G_LR4);
3975 			BNXT_IFMEDIA_ADD(supported, SPEEDS_50GB, IFM_50G_LR2);
3976 			BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_LR4);
3977 			BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_LR);
3978 			BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_LR);
3979 			BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_LX);
3980 			break;
3981 
3982 		case BNXT_MEDIA_SR:
3983 			BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_50G, IFM_50G_SR);
3984 			BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_100G, IFM_100G_SR2);
3985 			BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_200G, IFM_200G_SR4);
3986 			BNXT_IFMEDIA_ADD(supported, SPEEDS_100GB, IFM_100G_SR4);
3987 			BNXT_IFMEDIA_ADD(supported, SPEEDS_50GB, IFM_50G_SR2);
3988 			BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_SR4);
3989 			BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_SR);
3990 			BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_SR);
3991 			BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_SX);
3992 			break;
3993 
3994 		case BNXT_MEDIA_KR:
3995 			BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_50G, IFM_50G_KR_PAM4);
3996 			BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_100G, IFM_100G_KR2_PAM4);
3997 			BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_200G, IFM_200G_KR4_PAM4);
3998 			BNXT_IFMEDIA_ADD(supported, SPEEDS_100GB, IFM_100G_KR4);
3999 			BNXT_IFMEDIA_ADD(supported, SPEEDS_50GB, IFM_50G_KR2);
4000 			BNXT_IFMEDIA_ADD(supported, SPEEDS_50GB, IFM_50G_KR4);
4001 			BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_KR4);
4002 			BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_KR);
4003 			BNXT_IFMEDIA_ADD(supported, SPEEDS_20GB, IFM_20G_KR2);
4004 			BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_KR);
4005 			BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_KX);
4006 			break;
4007 
4008 		default:
4009 			break;
4010 
4011 	}
4012 	return;
4013 
4014 }
4015 
4016 static void
bnxt_add_media_types(struct bnxt_softc * softc)4017 bnxt_add_media_types(struct bnxt_softc *softc)
4018 {
4019 	struct bnxt_link_info *link_info = &softc->link_info;
4020 	uint16_t supported = 0, supported_pam4 = 0;
4021 	uint8_t phy_type = get_phy_type(softc), media_type;
4022 
4023 	supported = link_info->support_speeds;
4024 	supported_pam4 = link_info->support_pam4_speeds;
4025 
4026 	/* Auto is always supported */
4027 	ifmedia_add(softc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
4028 
4029 	if (softc->flags & BNXT_FLAG_NPAR)
4030 		return;
4031 
4032 	switch (phy_type) {
4033 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASECR4:
4034 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR4:
4035 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR2:
4036 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_50G_BASECR:
4037 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASECR4:
4038 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_L:
4039 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_S:
4040 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_N:
4041 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR:
4042 		media_type = BNXT_MEDIA_CR;
4043 		break;
4044 
4045 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASELR4:
4046 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR4:
4047 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_50G_BASELR:
4048 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASELR4:
4049 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASELR:
4050 		media_type = BNXT_MEDIA_LR;
4051 		break;
4052 
4053 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASESR4:
4054 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR10:
4055 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR4:
4056 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_50G_BASESR:
4057 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASESR4:
4058 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR:
4059 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASEER4:
4060 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER4:
4061 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASEER4:
4062 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASESR:
4063 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASESX:
4064 		media_type = BNXT_MEDIA_SR;
4065 		break;
4066 
4067 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR4:
4068 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR2:
4069 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR:
4070 		media_type = BNXT_MEDIA_KR;
4071 		break;
4072 
4073 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_ACTIVE_CABLE:
4074 		BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_ACC);
4075 		BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_AOC);
4076 		BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_XLAUI);
4077 		BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_XLAUI_AC);
4078 		return;
4079 
4080 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASECX:
4081 		BNXT_IFMEDIA_ADD(supported, SPEEDS_1GBHD, IFM_1000_CX);
4082 		return;
4083 
4084 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASET:
4085 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET:
4086 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE:
4087 		BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_T);
4088 		BNXT_IFMEDIA_ADD(supported, SPEEDS_2_5GB, IFM_2500_T);
4089 		BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_T);
4090 		BNXT_IFMEDIA_ADD(supported, SPEEDS_100MB, IFM_100_T);
4091 		BNXT_IFMEDIA_ADD(supported, SPEEDS_10MB, IFM_10_T);
4092 		return;
4093 
4094 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX:
4095 		BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_KR);
4096 		BNXT_IFMEDIA_ADD(supported, SPEEDS_2_5GB, IFM_2500_KX);
4097 		BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_KX);
4098 		return;
4099 
4100 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_SGMIIEXTPHY:
4101 		BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_SGMII);
4102 		return;
4103 
4104 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN:
4105 		/* Only Autoneg is supported for TYPE_UNKNOWN */
4106 		return;
4107 
4108         default:
4109 		/* Only Autoneg is supported for new phy type values */
4110 		device_printf(softc->dev, "phy type %d not supported by driver\n", phy_type);
4111 		return;
4112 	}
4113 
4114 	/* add_media is invoked twice, once with a firmware speed mask of 0 and a valid
4115 	 * value for both NRZ and PAM4 sig mode. This ensures accurate display of all
4116 	 * supported medias and currently configured media in the "ifconfig -m" output
4117 	 */
4118 
4119 	if (link_info->sig_mode == BNXT_SIG_MODE_PAM4) {
4120 		add_media(softc, media_type, supported, 0);
4121 		add_media(softc, media_type, 0, supported_pam4);
4122 	} else {
4123 		add_media(softc, media_type, 0, supported_pam4);
4124 		add_media(softc, media_type, supported, 0);
4125 	}
4126 
4127 	return;
4128 }
4129 
4130 static int
bnxt_map_bar(struct bnxt_softc * softc,struct bnxt_bar_info * bar,int bar_num,bool shareable)4131 bnxt_map_bar(struct bnxt_softc *softc, struct bnxt_bar_info *bar, int bar_num, bool shareable)
4132 {
4133 	uint32_t	flag;
4134 
4135 	if (bar->res != NULL) {
4136 		device_printf(softc->dev, "Bar %d already mapped\n", bar_num);
4137 		return EDOOFUS;
4138 	}
4139 
4140 	bar->rid = PCIR_BAR(bar_num);
4141 	flag = RF_ACTIVE;
4142 	if (shareable)
4143 		flag |= RF_SHAREABLE;
4144 
4145 	if ((bar->res =
4146 		bus_alloc_resource_any(softc->dev,
4147 			   SYS_RES_MEMORY,
4148 			   &bar->rid,
4149 			   flag)) == NULL) {
4150 		device_printf(softc->dev,
4151 		    "PCI BAR%d mapping failure\n", bar_num);
4152 		return (ENXIO);
4153 	}
4154 	bar->tag = rman_get_bustag(bar->res);
4155 	bar->handle = rman_get_bushandle(bar->res);
4156 	bar->size = rman_get_size(bar->res);
4157 
4158 	return 0;
4159 }
4160 
4161 static int
bnxt_pci_mapping(struct bnxt_softc * softc)4162 bnxt_pci_mapping(struct bnxt_softc *softc)
4163 {
4164 	int rc;
4165 
4166 	rc = bnxt_map_bar(softc, &softc->hwrm_bar, 0, true);
4167 	if (rc)
4168 		return rc;
4169 
4170 	rc = bnxt_map_bar(softc, &softc->doorbell_bar, 2, false);
4171 
4172 	return rc;
4173 }
4174 
4175 static void
bnxt_pci_mapping_free(struct bnxt_softc * softc)4176 bnxt_pci_mapping_free(struct bnxt_softc *softc)
4177 {
4178 	if (softc->hwrm_bar.res != NULL)
4179 		bus_release_resource(softc->dev, SYS_RES_MEMORY,
4180 		    softc->hwrm_bar.rid, softc->hwrm_bar.res);
4181 	softc->hwrm_bar.res = NULL;
4182 
4183 	if (softc->doorbell_bar.res != NULL)
4184 		bus_release_resource(softc->dev, SYS_RES_MEMORY,
4185 		    softc->doorbell_bar.rid, softc->doorbell_bar.res);
4186 	softc->doorbell_bar.res = NULL;
4187 }
4188 
4189 static int
bnxt_update_link(struct bnxt_softc * softc,bool chng_link_state)4190 bnxt_update_link(struct bnxt_softc *softc, bool chng_link_state)
4191 {
4192 	struct bnxt_link_info *link_info = &softc->link_info;
4193 	uint8_t link_up = link_info->link_up;
4194 	int rc = 0;
4195 
4196 	rc = bnxt_hwrm_port_phy_qcfg(softc);
4197 	if (rc)
4198 		goto exit;
4199 
4200 	/* TODO: need to add more logic to report VF link */
4201 	if (chng_link_state) {
4202 		if (link_info->phy_link_status ==
4203 		    HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK)
4204 			link_info->link_up = 1;
4205 		else
4206 			link_info->link_up = 0;
4207 		if (link_up != link_info->link_up)
4208 			bnxt_report_link(softc);
4209 	} else {
4210 		/* always link down if not require to update link state */
4211 		link_info->link_up = 0;
4212 	}
4213 
4214 exit:
4215 	return rc;
4216 }
4217 
4218 #define ETHTOOL_SPEED_1000		1000
4219 #define ETHTOOL_SPEED_10000		10000
4220 #define ETHTOOL_SPEED_20000		20000
4221 #define ETHTOOL_SPEED_25000		25000
4222 #define ETHTOOL_SPEED_40000		40000
4223 #define ETHTOOL_SPEED_50000		50000
4224 #define ETHTOOL_SPEED_100000		100000
4225 #define ETHTOOL_SPEED_200000		200000
4226 #define ETHTOOL_SPEED_UNKNOWN		-1
4227 
4228 static u32
bnxt_fw_to_ethtool_speed(u16 fw_link_speed)4229 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
4230 {
4231 	switch (fw_link_speed) {
4232 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
4233 		return ETHTOOL_SPEED_1000;
4234 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
4235 		return ETHTOOL_SPEED_10000;
4236 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
4237 		return ETHTOOL_SPEED_20000;
4238 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
4239 		return ETHTOOL_SPEED_25000;
4240 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
4241 		return ETHTOOL_SPEED_40000;
4242 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
4243 		return ETHTOOL_SPEED_50000;
4244 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
4245 		return ETHTOOL_SPEED_100000;
4246 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
4247 		return ETHTOOL_SPEED_200000;
4248 	default:
4249 		return ETHTOOL_SPEED_UNKNOWN;
4250 	}
4251 }
4252 
4253 void
bnxt_report_link(struct bnxt_softc * softc)4254 bnxt_report_link(struct bnxt_softc *softc)
4255 {
4256 	struct bnxt_link_info *link_info = &softc->link_info;
4257 	const char *duplex = NULL, *flow_ctrl = NULL;
4258 	const char *signal_mode = "";
4259 
4260 	if(softc->edev)
4261 		softc->edev->espeed =
4262 		    bnxt_fw_to_ethtool_speed(link_info->link_speed);
4263 
4264 	if (link_info->link_up == link_info->last_link_up) {
4265 		if (!link_info->link_up)
4266 			return;
4267 		if ((link_info->duplex == link_info->last_duplex) &&
4268 		    (link_info->phy_type == link_info->last_phy_type) &&
4269                     (!(BNXT_IS_FLOW_CTRL_CHANGED(link_info))))
4270 			return;
4271 	}
4272 
4273 	if (link_info->link_up) {
4274 		if (link_info->duplex ==
4275 		    HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_FULL)
4276 			duplex = "full duplex";
4277 		else
4278 			duplex = "half duplex";
4279 		if (link_info->flow_ctrl.tx & link_info->flow_ctrl.rx)
4280 			flow_ctrl = "FC - receive & transmit";
4281 		else if (link_info->flow_ctrl.tx)
4282 			flow_ctrl = "FC - transmit";
4283 		else if (link_info->flow_ctrl.rx)
4284 			flow_ctrl = "FC - receive";
4285 		else
4286 			flow_ctrl = "FC - none";
4287 
4288 		if (softc->link_info.phy_qcfg_resp.option_flags &
4289 		    HWRM_PORT_PHY_QCFG_OUTPUT_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
4290 			uint8_t sig_mode = softc->link_info.active_fec_sig_mode &
4291 				      HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_MASK;
4292 			switch (sig_mode) {
4293 			case BNXT_SIG_MODE_NRZ:
4294 				signal_mode = "(NRZ) ";
4295 				break;
4296 			case BNXT_SIG_MODE_PAM4:
4297 				signal_mode = "(PAM4) ";
4298 				break;
4299 			default:
4300 				break;
4301 			}
4302 		link_info->sig_mode = sig_mode;
4303 		}
4304 
4305 		iflib_link_state_change(softc->ctx, LINK_STATE_UP,
4306 		    IF_Gbps(100));
4307 		device_printf(softc->dev, "Link is UP %s %s, %s - %d Mbps \n", duplex, signal_mode,
4308 		    flow_ctrl, (link_info->link_speed * 100));
4309 	} else {
4310 		iflib_link_state_change(softc->ctx, LINK_STATE_DOWN,
4311 		    bnxt_get_baudrate(&softc->link_info));
4312 		device_printf(softc->dev, "Link is Down\n");
4313 	}
4314 
4315 	link_info->last_link_up = link_info->link_up;
4316 	link_info->last_duplex = link_info->duplex;
4317 	link_info->last_phy_type = link_info->phy_type;
4318 	link_info->last_flow_ctrl.tx = link_info->flow_ctrl.tx;
4319 	link_info->last_flow_ctrl.rx = link_info->flow_ctrl.rx;
4320 	link_info->last_flow_ctrl.autoneg = link_info->flow_ctrl.autoneg;
4321 	/* update media types */
4322 	ifmedia_removeall(softc->media);
4323 	bnxt_add_media_types(softc);
4324 	ifmedia_set(softc->media, IFM_ETHER | IFM_AUTO);
4325 }
4326 
4327 static int
bnxt_handle_isr(void * arg)4328 bnxt_handle_isr(void *arg)
4329 {
4330 	struct bnxt_cp_ring *cpr = arg;
4331 	struct bnxt_softc *softc = cpr->ring.softc;
4332 
4333 	cpr->int_count++;
4334 	/* Disable further interrupts for this queue */
4335 	if (!BNXT_CHIP_P5(softc))
4336 		softc->db_ops.bnxt_db_rx_cq(cpr, 0);
4337 
4338 	return FILTER_SCHEDULE_THREAD;
4339 }
4340 
4341 static int
bnxt_handle_def_cp(void * arg)4342 bnxt_handle_def_cp(void *arg)
4343 {
4344 	struct bnxt_softc *softc = arg;
4345 
4346 	softc->db_ops.bnxt_db_rx_cq(&softc->def_cp_ring, 0);
4347 	GROUPTASK_ENQUEUE(&softc->def_cp_task);
4348 	return FILTER_HANDLED;
4349 }
4350 
4351 static void
bnxt_clear_ids(struct bnxt_softc * softc)4352 bnxt_clear_ids(struct bnxt_softc *softc)
4353 {
4354 	int i;
4355 
4356 	softc->def_cp_ring.stats_ctx_id = HWRM_NA_SIGNATURE;
4357 	softc->def_cp_ring.ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
4358 	softc->def_nq_ring.stats_ctx_id = HWRM_NA_SIGNATURE;
4359 	softc->def_nq_ring.ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
4360 	for (i = 0; i < softc->ntxqsets; i++) {
4361 		softc->tx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
4362 		softc->tx_cp_rings[i].ring.phys_id =
4363 		    (uint16_t)HWRM_NA_SIGNATURE;
4364 		softc->tx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
4365 
4366 		if (!softc->nq_rings)
4367 			continue;
4368 		softc->nq_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
4369 		softc->nq_rings[i].ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
4370 	}
4371 	for (i = 0; i < softc->nrxqsets; i++) {
4372 		softc->rx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
4373 		softc->rx_cp_rings[i].ring.phys_id =
4374 		    (uint16_t)HWRM_NA_SIGNATURE;
4375 		softc->rx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
4376 		softc->ag_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
4377 		softc->grp_info[i].grp_id = (uint16_t)HWRM_NA_SIGNATURE;
4378 	}
4379 	softc->vnic_info.filter_id = -1;
4380 	softc->vnic_info.id = (uint16_t)HWRM_NA_SIGNATURE;
4381 	softc->vnic_info.rss_id = (uint16_t)HWRM_NA_SIGNATURE;
4382 	memset(softc->vnic_info.rss_grp_tbl.idi_vaddr, 0xff,
4383 	    softc->vnic_info.rss_grp_tbl.idi_size);
4384 }
4385 
4386 static void
bnxt_mark_cpr_invalid(struct bnxt_cp_ring * cpr)4387 bnxt_mark_cpr_invalid(struct bnxt_cp_ring *cpr)
4388 {
4389 	struct cmpl_base *cmp = (void *)cpr->ring.vaddr;
4390 	int i;
4391 
4392 	for (i = 0; i < cpr->ring.ring_size; i++)
4393 		cmp[i].info3_v = !cpr->v_bit;
4394 }
4395 
bnxt_event_error_report(struct bnxt_softc * softc,u32 data1,u32 data2)4396 static void bnxt_event_error_report(struct bnxt_softc *softc, u32 data1, u32 data2)
4397 {
4398 	u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
4399 
4400 	switch (err_type) {
4401 	case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
4402 		device_printf(softc->dev,
4403 			      "1PPS: Received invalid signal on pin%u from the external source. Please fix the signal and reconfigure the pin\n",
4404 			      BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
4405 		break;
4406 	case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
4407 		device_printf(softc->dev,
4408 			      "Pause Storm detected!\n");
4409 		break;
4410 	case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
4411 		device_printf(softc->dev,
4412 			      "One or more MMIO doorbells dropped by the device! epoch: 0x%x\n",
4413 			      BNXT_EVENT_DBR_EPOCH(data1));
4414 		break;
4415 	case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM: {
4416 		const char *nvm_err_str;
4417 
4418 		if (EVENT_DATA1_NVM_ERR_TYPE_WRITE(data1))
4419 			nvm_err_str = "nvm write error";
4420 		else if (EVENT_DATA1_NVM_ERR_TYPE_ERASE(data1))
4421 			nvm_err_str = "nvm erase error";
4422 		else
4423 			nvm_err_str = "unrecognized nvm error";
4424 
4425 		device_printf(softc->dev,
4426 			      "%s reported at address 0x%x\n", nvm_err_str,
4427 			      (u32)EVENT_DATA2_NVM_ERR_ADDR(data2));
4428 		break;
4429 	}
4430 	case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: {
4431 		char *threshold_type;
4432 		char *dir_str;
4433 
4434 		switch (EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1)) {
4435 		case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN:
4436 			threshold_type = "warning";
4437 			break;
4438 		case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL:
4439 			threshold_type = "critical";
4440 			break;
4441 		case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL:
4442 			threshold_type = "fatal";
4443 			break;
4444 		case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN:
4445 			threshold_type = "shutdown";
4446 			break;
4447 		default:
4448 			device_printf(softc->dev,
4449 				      "Unknown Thermal threshold type event\n");
4450 			return;
4451 		}
4452 		if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1))
4453 			dir_str = "above";
4454 		else
4455 			dir_str = "below";
4456 		device_printf(softc->dev,
4457 			      "Chip temperature has gone %s the %s thermal threshold!\n",
4458 			      dir_str, threshold_type);
4459 		device_printf(softc->dev,
4460 			      "Temperature (In Celsius), Current: %u, threshold: %u\n",
4461 			      BNXT_EVENT_THERMAL_CURRENT_TEMP(data2),
4462 			      BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2));
4463 		break;
4464 	}
4465 	case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED:
4466 		device_printf(softc->dev,
4467 			      "Speed change is not supported with dual rate transceivers on this board\n");
4468 		break;
4469 
4470 	default:
4471 	device_printf(softc->dev,
4472 		      "FW reported unknown error type: %u, data1: 0x%x data2: 0x%x\n",
4473 		      err_type, data1, data2);
4474 		break;
4475 	}
4476 }
4477 
4478 static void
bnxt_handle_async_event(struct bnxt_softc * softc,struct cmpl_base * cmpl)4479 bnxt_handle_async_event(struct bnxt_softc *softc, struct cmpl_base *cmpl)
4480 {
4481 	struct hwrm_async_event_cmpl *ae = (void *)cmpl;
4482 	uint16_t async_id = le16toh(ae->event_id);
4483 	struct ifmediareq ifmr;
4484 	char *type_str;
4485 	char *status_desc;
4486 	struct bnxt_fw_health *fw_health;
4487 	u32 data1 = le32toh(ae->event_data1);
4488 	u32 data2 = le32toh(ae->event_data2);
4489 
4490 	switch (async_id) {
4491 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
4492 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
4493 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE:
4494 		if (BNXT_CHIP_P5(softc))
4495 			bit_set(softc->state_bv, BNXT_STATE_LINK_CHANGE);
4496 		else
4497 			bnxt_media_status(softc->ctx, &ifmr);
4498 		break;
4499 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
4500 		bnxt_event_error_report(softc, data1, data2);
4501 		goto async_event_process_exit;
4502 	}
4503 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_THRESHOLD:
4504 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE:
4505 		break;
4506 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
4507 		type_str = "Solicited";
4508 
4509 		if (!softc->fw_health)
4510 			goto async_event_process_exit;
4511 
4512 		softc->fw_reset_timestamp = jiffies;
4513 		softc->fw_reset_min_dsecs = ae->timestamp_lo;
4514 		if (!softc->fw_reset_min_dsecs)
4515 			softc->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
4516 		softc->fw_reset_max_dsecs = le16toh(ae->timestamp_hi);
4517 		if (!softc->fw_reset_max_dsecs)
4518 			softc->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
4519 		if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) {
4520 			set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &softc->state);
4521 		} else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
4522 			type_str = "Fatal";
4523 			softc->fw_health->fatalities++;
4524 			set_bit(BNXT_STATE_FW_FATAL_COND, &softc->state);
4525 		} else if (data2 && BNXT_FW_STATUS_HEALTHY !=
4526 			   EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) {
4527 			type_str = "Non-fatal";
4528 			softc->fw_health->survivals++;
4529 			set_bit(BNXT_STATE_FW_NON_FATAL_COND, &softc->state);
4530 		}
4531 		device_printf(softc->dev,
4532 			   "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
4533 			   type_str, data1, data2,
4534 			   softc->fw_reset_min_dsecs * 100,
4535 			   softc->fw_reset_max_dsecs * 100);
4536 		set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &softc->sp_event);
4537 		break;
4538 	}
4539 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
4540 		fw_health = softc->fw_health;
4541 		status_desc = "healthy";
4542 		u32 status;
4543 
4544 		if (!fw_health)
4545 			goto async_event_process_exit;
4546 
4547 		if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
4548 			fw_health->enabled = false;
4549 			device_printf(softc->dev, "Driver recovery watchdog is disabled\n");
4550 			break;
4551 		}
4552 		fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
4553 		fw_health->tmr_multiplier =
4554 			DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
4555 				     HZ * 10);
4556 		fw_health->tmr_counter = fw_health->tmr_multiplier;
4557 		if (!fw_health->enabled)
4558 			fw_health->last_fw_heartbeat =
4559 				bnxt_fw_health_readl(softc, BNXT_FW_HEARTBEAT_REG);
4560 		fw_health->last_fw_reset_cnt =
4561 			bnxt_fw_health_readl(softc, BNXT_FW_RESET_CNT_REG);
4562 		status = bnxt_fw_health_readl(softc, BNXT_FW_HEALTH_REG);
4563 		if (status != BNXT_FW_STATUS_HEALTHY)
4564 			status_desc = "unhealthy";
4565 		device_printf(softc->dev,
4566 			   "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n",
4567 			   fw_health->primary ? "primary" : "backup", status,
4568 			   status_desc, fw_health->last_fw_reset_cnt);
4569 		if (!fw_health->enabled) {
4570 			/* Make sure tmr_counter is set and seen by
4571 			 * bnxt_health_check() before setting enabled
4572 			 */
4573 			smp_mb();
4574 			fw_health->enabled = true;
4575 		}
4576 		goto async_event_process_exit;
4577 	}
4578 
4579 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE:
4580 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE:
4581 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED:
4582 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED:
4583 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD:
4584 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD:
4585 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
4586 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD:
4587 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR:
4588 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE:
4589 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE:
4590 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
4591 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR:
4592 		device_printf(softc->dev,
4593 		    "Unhandled async completion type %u\n", async_id);
4594 		break;
4595 	default:
4596 		device_printf(softc->dev,
4597 		    "Unknown async completion type %u\n", async_id);
4598 		break;
4599 	}
4600 	bnxt_queue_sp_work(softc);
4601 
4602 async_event_process_exit:
4603 	bnxt_ulp_async_events(softc, ae);
4604 }
4605 
4606 static void
bnxt_def_cp_task(void * context)4607 bnxt_def_cp_task(void *context)
4608 {
4609 	if_ctx_t ctx = context;
4610 	struct bnxt_softc *softc = iflib_get_softc(ctx);
4611 	struct bnxt_cp_ring *cpr = &softc->def_cp_ring;
4612 
4613 	/* Handle completions on the default completion ring */
4614 	struct cmpl_base *cmpl;
4615 	uint32_t cons = cpr->cons;
4616 	bool v_bit = cpr->v_bit;
4617 	bool last_v_bit;
4618 	uint32_t last_cons;
4619 	uint16_t type;
4620 
4621 	for (;;) {
4622 		last_cons = cons;
4623 		last_v_bit = v_bit;
4624 		NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
4625 		cmpl = &((struct cmpl_base *)cpr->ring.vaddr)[cons];
4626 
4627 		if (!CMP_VALID(cmpl, v_bit))
4628 			break;
4629 
4630 		type = le16toh(cmpl->type) & CMPL_BASE_TYPE_MASK;
4631 		switch (type) {
4632 		case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
4633 			bnxt_handle_async_event(softc, cmpl);
4634 			break;
4635 		case CMPL_BASE_TYPE_TX_L2:
4636 		case CMPL_BASE_TYPE_RX_L2:
4637 		case CMPL_BASE_TYPE_RX_AGG:
4638 		case CMPL_BASE_TYPE_RX_TPA_START:
4639 		case CMPL_BASE_TYPE_RX_TPA_END:
4640 		case CMPL_BASE_TYPE_STAT_EJECT:
4641 		case CMPL_BASE_TYPE_HWRM_DONE:
4642 		case CMPL_BASE_TYPE_HWRM_FWD_REQ:
4643 		case CMPL_BASE_TYPE_HWRM_FWD_RESP:
4644 		case CMPL_BASE_TYPE_CQ_NOTIFICATION:
4645 		case CMPL_BASE_TYPE_SRQ_EVENT:
4646 		case CMPL_BASE_TYPE_DBQ_EVENT:
4647 		case CMPL_BASE_TYPE_QP_EVENT:
4648 		case CMPL_BASE_TYPE_FUNC_EVENT:
4649 			device_printf(softc->dev,
4650 			    "Unhandled completion type %u\n", type);
4651 			break;
4652 		default:
4653 			device_printf(softc->dev,
4654 			    "Unknown completion type %u\n", type);
4655 			break;
4656 		}
4657 	}
4658 
4659 	cpr->cons = last_cons;
4660 	cpr->v_bit = last_v_bit;
4661 	softc->db_ops.bnxt_db_rx_cq(cpr, 1);
4662 }
4663 
4664 uint8_t
get_phy_type(struct bnxt_softc * softc)4665 get_phy_type(struct bnxt_softc *softc)
4666 {
4667 	struct bnxt_link_info *link_info = &softc->link_info;
4668 	uint8_t phy_type = link_info->phy_type;
4669 	uint16_t supported;
4670 
4671 	if (phy_type != HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN)
4672 		return phy_type;
4673 
4674 	/* Deduce the phy type from the media type and supported speeds */
4675 	supported = link_info->support_speeds;
4676 
4677 	if (link_info->media_type ==
4678 	    HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP)
4679 		return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET;
4680 	if (link_info->media_type ==
4681 	    HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_DAC) {
4682 		if (supported & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB)
4683 			return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX;
4684 		if (supported & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB)
4685 			return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR;
4686 		return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR;
4687 	}
4688 	if (link_info->media_type ==
4689 	    HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_FIBRE)
4690 		return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR;
4691 
4692 	return phy_type;
4693 }
4694 
4695 bool
bnxt_check_hwrm_version(struct bnxt_softc * softc)4696 bnxt_check_hwrm_version(struct bnxt_softc *softc)
4697 {
4698 	char buf[16];
4699 
4700 	sprintf(buf, "%hhu.%hhu.%hhu", softc->ver_info->hwrm_min_major,
4701 	    softc->ver_info->hwrm_min_minor, softc->ver_info->hwrm_min_update);
4702 	if (softc->ver_info->hwrm_min_major > softc->ver_info->hwrm_if_major) {
4703 		device_printf(softc->dev,
4704 		    "WARNING: HWRM version %s is too old (older than %s)\n",
4705 		    softc->ver_info->hwrm_if_ver, buf);
4706 		return false;
4707 	}
4708 	else if(softc->ver_info->hwrm_min_major ==
4709 	    softc->ver_info->hwrm_if_major) {
4710 		if (softc->ver_info->hwrm_min_minor >
4711 		    softc->ver_info->hwrm_if_minor) {
4712 			device_printf(softc->dev,
4713 			    "WARNING: HWRM version %s is too old (older than %s)\n",
4714 			    softc->ver_info->hwrm_if_ver, buf);
4715 			return false;
4716 		}
4717 		else if (softc->ver_info->hwrm_min_minor ==
4718 		    softc->ver_info->hwrm_if_minor) {
4719 			if (softc->ver_info->hwrm_min_update >
4720 			    softc->ver_info->hwrm_if_update) {
4721 				device_printf(softc->dev,
4722 				    "WARNING: HWRM version %s is too old (older than %s)\n",
4723 				    softc->ver_info->hwrm_if_ver, buf);
4724 				return false;
4725 			}
4726 		}
4727 	}
4728 	return true;
4729 }
4730 
4731 static uint64_t
bnxt_get_baudrate(struct bnxt_link_info * link)4732 bnxt_get_baudrate(struct bnxt_link_info *link)
4733 {
4734 	switch (link->link_speed) {
4735 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
4736 		return IF_Mbps(100);
4737 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
4738 		return IF_Gbps(1);
4739 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
4740 		return IF_Gbps(2);
4741 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
4742 		return IF_Mbps(2500);
4743 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
4744 		return IF_Gbps(10);
4745 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
4746 		return IF_Gbps(20);
4747 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
4748 		return IF_Gbps(25);
4749 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
4750 		return IF_Gbps(40);
4751 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
4752 		return IF_Gbps(50);
4753 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
4754 		return IF_Gbps(100);
4755 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB:
4756 		return IF_Mbps(10);
4757 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
4758 		return IF_Gbps(200);
4759 	}
4760 	return IF_Gbps(100);
4761 }
4762 
4763 static void
bnxt_get_wol_settings(struct bnxt_softc * softc)4764 bnxt_get_wol_settings(struct bnxt_softc *softc)
4765 {
4766 	uint16_t wol_handle = 0;
4767 
4768 	if (!bnxt_wol_supported(softc))
4769 		return;
4770 
4771 	do {
4772 		wol_handle = bnxt_hwrm_get_wol_fltrs(softc, wol_handle);
4773 	} while (wol_handle && wol_handle != BNXT_NO_MORE_WOL_FILTERS);
4774 }
4775