xref: /openbsd/sys/dev/pci/if_bnxt.c (revision 510d2225)
1 /*	$OpenBSD: if_bnxt.c,v 1.39 2023/11/10 15:51:20 bluhm Exp $	*/
2 /*-
3  * Broadcom NetXtreme-C/E network driver.
4  *
5  * Copyright (c) 2016 Broadcom, All Rights Reserved.
6  * The term Broadcom refers to Broadcom Limited and/or its subsidiaries
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
18  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27  * THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /*
31  * Copyright (c) 2018 Jonathan Matthew <jmatthew@openbsd.org>
32  *
33  * Permission to use, copy, modify, and distribute this software for any
34  * purpose with or without fee is hereby granted, provided that the above
35  * copyright notice and this permission notice appear in all copies.
36  *
37  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
38  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
39  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
40  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
41  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
42  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
43  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
44  */
45 
46 
47 #include "bpfilter.h"
48 #include "vlan.h"
49 
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/mbuf.h>
53 #include <sys/kernel.h>
54 #include <sys/malloc.h>
55 #include <sys/device.h>
56 #include <sys/stdint.h>
57 #include <sys/sockio.h>
58 #include <sys/atomic.h>
59 #include <sys/intrmap.h>
60 
61 #include <machine/bus.h>
62 
63 #include <dev/pci/pcireg.h>
64 #include <dev/pci/pcivar.h>
65 #include <dev/pci/pcidevs.h>
66 
67 #include <dev/pci/if_bnxtreg.h>
68 
69 #include <net/if.h>
70 #include <net/if_media.h>
71 #include <net/toeplitz.h>
72 
73 #if NBPFILTER > 0
74 #include <net/bpf.h>
75 #endif
76 
77 #include <netinet/in.h>
78 #include <netinet/if_ether.h>
79 
80 #define BNXT_HWRM_BAR		0x10
81 #define BNXT_DOORBELL_BAR	0x18
82 
83 #define BNXT_MAX_QUEUES		8
84 
85 #define BNXT_CP_RING_ID_BASE	0
86 #define BNXT_RX_RING_ID_BASE	(BNXT_MAX_QUEUES + 1)
87 #define BNXT_AG_RING_ID_BASE	((BNXT_MAX_QUEUES * 2) + 1)
88 #define BNXT_TX_RING_ID_BASE	((BNXT_MAX_QUEUES * 3) + 1)
89 
90 #define BNXT_MAX_MTU		9500
91 #define BNXT_AG_BUFFER_SIZE	8192
92 
93 #define BNXT_CP_PAGES		4
94 
95 #define BNXT_MAX_TX_SEGS	32	/* a bit much? */
96 #define BNXT_TX_SLOTS(bs)	(bs->bs_map->dm_nsegs + 1)
97 
98 #define BNXT_HWRM_SHORT_REQ_LEN	sizeof(struct hwrm_short_input)
99 
100 #define BNXT_HWRM_LOCK_INIT(_sc, _name)	\
101 	mtx_init_flags(&sc->sc_lock, IPL_NET, _name, 0)
102 #define BNXT_HWRM_LOCK(_sc) 		mtx_enter(&_sc->sc_lock)
103 #define BNXT_HWRM_UNLOCK(_sc) 		mtx_leave(&_sc->sc_lock)
104 #define BNXT_HWRM_LOCK_DESTROY(_sc)	/* nothing */
105 #define BNXT_HWRM_LOCK_ASSERT(_sc)	MUTEX_ASSERT_LOCKED(&_sc->sc_lock)
106 
107 #define BNXT_FLAG_VF            0x0001
108 #define BNXT_FLAG_NPAR          0x0002
109 #define BNXT_FLAG_WOL_CAP       0x0004
110 #define BNXT_FLAG_SHORT_CMD     0x0008
111 #define BNXT_FLAG_MSIX          0x0010
112 
113 /* NVRam stuff has a five minute timeout */
114 #define BNXT_NVM_TIMEO	(5 * 60 * 1000)
115 
116 #define NEXT_CP_CONS_V(_ring, _cons, _v_bit)		\
117 do {	 						\
118 	if (++(_cons) == (_ring)->ring_size)		\
119 		((_cons) = 0, (_v_bit) = !_v_bit);	\
120 } while (0);
121 
122 struct bnxt_ring {
123 	uint64_t		paddr;
124 	uint64_t		doorbell;
125 	caddr_t			vaddr;
126 	uint32_t		ring_size;
127 	uint16_t		id;
128 	uint16_t		phys_id;
129 };
130 
131 struct bnxt_cp_ring {
132 	struct bnxt_ring	ring;
133 	void			*irq;
134 	struct bnxt_softc	*softc;
135 	uint32_t		cons;
136 	int			v_bit;
137 	uint32_t		commit_cons;
138 	int			commit_v_bit;
139 	struct ctx_hw_stats	*stats;
140 	uint32_t		stats_ctx_id;
141 	struct bnxt_dmamem	*ring_mem;
142 };
143 
144 struct bnxt_grp_info {
145 	uint32_t		grp_id;
146 	uint16_t		stats_ctx;
147 	uint16_t		rx_ring_id;
148 	uint16_t		cp_ring_id;
149 	uint16_t		ag_ring_id;
150 };
151 
152 struct bnxt_vnic_info {
153 	uint16_t		id;
154 	uint16_t		def_ring_grp;
155 	uint16_t		cos_rule;
156 	uint16_t		lb_rule;
157 	uint16_t		mru;
158 
159 	uint32_t		flags;
160 #define BNXT_VNIC_FLAG_DEFAULT		0x01
161 #define BNXT_VNIC_FLAG_BD_STALL		0x02
162 #define BNXT_VNIC_FLAG_VLAN_STRIP	0x04
163 
164 	uint64_t		filter_id;
165 	uint32_t		flow_id;
166 
167 	uint16_t		rss_id;
168 };
169 
170 struct bnxt_slot {
171 	bus_dmamap_t		bs_map;
172 	struct mbuf		*bs_m;
173 };
174 
175 struct bnxt_dmamem {
176 	bus_dmamap_t		bdm_map;
177 	bus_dma_segment_t	bdm_seg;
178 	size_t			bdm_size;
179 	caddr_t			bdm_kva;
180 };
181 #define BNXT_DMA_MAP(_bdm)	((_bdm)->bdm_map)
182 #define BNXT_DMA_LEN(_bdm)	((_bdm)->bdm_size)
183 #define BNXT_DMA_DVA(_bdm)	((u_int64_t)(_bdm)->bdm_map->dm_segs[0].ds_addr)
184 #define BNXT_DMA_KVA(_bdm)	((void *)(_bdm)->bdm_kva)
185 
186 struct bnxt_rx_queue {
187 	struct bnxt_softc	*rx_softc;
188 	struct ifiqueue		*rx_ifiq;
189 	struct bnxt_dmamem	*rx_ring_mem;	/* rx and ag */
190 	struct bnxt_ring	rx_ring;
191 	struct bnxt_ring	rx_ag_ring;
192 	struct if_rxring	rxr[2];
193 	struct bnxt_slot	*rx_slots;
194 	struct bnxt_slot	*rx_ag_slots;
195 	int			rx_prod;
196 	int			rx_cons;
197 	int			rx_ag_prod;
198 	int			rx_ag_cons;
199 	struct timeout		rx_refill;
200 };
201 
202 struct bnxt_tx_queue {
203 	struct bnxt_softc	*tx_softc;
204 	struct ifqueue		*tx_ifq;
205 	struct bnxt_dmamem	*tx_ring_mem;
206 	struct bnxt_ring	tx_ring;
207 	struct bnxt_slot	*tx_slots;
208 	int			tx_prod;
209 	int			tx_cons;
210 	int			tx_ring_prod;
211 	int			tx_ring_cons;
212 };
213 
214 struct bnxt_queue {
215 	char			q_name[8];
216 	int			q_index;
217 	void			*q_ihc;
218 	struct bnxt_softc	*q_sc;
219 	struct bnxt_cp_ring	q_cp;
220 	struct bnxt_rx_queue	q_rx;
221 	struct bnxt_tx_queue	q_tx;
222 	struct bnxt_grp_info	q_rg;
223 };
224 
225 struct bnxt_softc {
226 	struct device		sc_dev;
227 	struct arpcom		sc_ac;
228 	struct ifmedia		sc_media;
229 
230 	struct mutex		sc_lock;
231 
232 	pci_chipset_tag_t	sc_pc;
233 	pcitag_t		sc_tag;
234 	bus_dma_tag_t		sc_dmat;
235 
236 	bus_space_tag_t		sc_hwrm_t;
237 	bus_space_handle_t	sc_hwrm_h;
238 	bus_size_t		sc_hwrm_s;
239 
240 	struct bnxt_dmamem	*sc_cmd_resp;
241 	uint16_t		sc_cmd_seq;
242 	uint16_t		sc_max_req_len;
243 	uint32_t		sc_cmd_timeo;
244 	uint32_t		sc_flags;
245 
246 	bus_space_tag_t		sc_db_t;
247 	bus_space_handle_t	sc_db_h;
248 	bus_size_t		sc_db_s;
249 
250 	void			*sc_ih;
251 
252 	int			sc_hwrm_ver;
253 	int			sc_tx_queue_id;
254 
255 	struct bnxt_vnic_info	sc_vnic;
256 	struct bnxt_dmamem	*sc_stats_ctx_mem;
257 	struct bnxt_dmamem	*sc_rx_cfg;
258 
259 	struct bnxt_cp_ring	sc_cp_ring;
260 
261 	int			sc_nqueues;
262 	struct intrmap		*sc_intrmap;
263 	struct bnxt_queue	sc_queues[BNXT_MAX_QUEUES];
264 };
265 #define DEVNAME(_sc)	((_sc)->sc_dev.dv_xname)
266 
267 const struct pci_matchid bnxt_devices[] = {
268 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57301 },
269 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57302 },
270 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57304 },
271 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57311 },
272 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57312 },
273 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57314 },
274 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57402 },
275 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57404 },
276 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57406 },
277 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57407 },
278 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57412 },
279 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57414 },
280 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57416 },
281 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57416_SFP },
282 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57417 },
283 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57417_SFP }
284 };
285 
286 int		bnxt_match(struct device *, void *, void *);
287 void		bnxt_attach(struct device *, struct device *, void *);
288 
289 void		bnxt_up(struct bnxt_softc *);
290 void		bnxt_down(struct bnxt_softc *);
291 void		bnxt_iff(struct bnxt_softc *);
292 int		bnxt_ioctl(struct ifnet *, u_long, caddr_t);
293 int		bnxt_rxrinfo(struct bnxt_softc *, struct if_rxrinfo *);
294 void		bnxt_start(struct ifqueue *);
295 int		bnxt_admin_intr(void *);
296 int		bnxt_intr(void *);
297 void		bnxt_watchdog(struct ifnet *);
298 void		bnxt_media_status(struct ifnet *, struct ifmediareq *);
299 int		bnxt_media_change(struct ifnet *);
300 int		bnxt_media_autonegotiate(struct bnxt_softc *);
301 
302 struct cmpl_base *bnxt_cpr_next_cmpl(struct bnxt_softc *, struct bnxt_cp_ring *);
303 void		bnxt_cpr_commit(struct bnxt_softc *, struct bnxt_cp_ring *);
304 void		bnxt_cpr_rollback(struct bnxt_softc *, struct bnxt_cp_ring *);
305 
306 void		bnxt_mark_cpr_invalid(struct bnxt_cp_ring *);
307 void		bnxt_write_cp_doorbell(struct bnxt_softc *, struct bnxt_ring *,
308 		    int);
309 void		bnxt_write_cp_doorbell_index(struct bnxt_softc *,
310 		    struct bnxt_ring *, uint32_t, int);
311 void		bnxt_write_rx_doorbell(struct bnxt_softc *, struct bnxt_ring *,
312 		    int);
313 void		bnxt_write_tx_doorbell(struct bnxt_softc *, struct bnxt_ring *,
314 		    int);
315 
316 int		bnxt_rx_fill(struct bnxt_queue *);
317 u_int		bnxt_rx_fill_slots(struct bnxt_softc *, struct bnxt_ring *, void *,
318 		    struct bnxt_slot *, uint *, int, uint16_t, u_int);
319 void		bnxt_refill(void *);
320 int		bnxt_rx(struct bnxt_softc *, struct bnxt_rx_queue *,
321 		    struct bnxt_cp_ring *, struct mbuf_list *, int *, int *,
322 		    struct cmpl_base *);
323 
324 void		bnxt_txeof(struct bnxt_softc *, struct bnxt_tx_queue *, int *,
325 		    struct cmpl_base *);
326 
327 int		bnxt_set_cp_ring_aggint(struct bnxt_softc *, struct bnxt_cp_ring *);
328 
329 int		_hwrm_send_message(struct bnxt_softc *, void *, uint32_t);
330 int		hwrm_send_message(struct bnxt_softc *, void *, uint32_t);
331 void		bnxt_hwrm_cmd_hdr_init(struct bnxt_softc *, void *, uint16_t);
332 int 		bnxt_hwrm_err_map(uint16_t err);
333 
334 /* HWRM Function Prototypes */
335 int		bnxt_hwrm_ring_alloc(struct bnxt_softc *, uint8_t,
336 		    struct bnxt_ring *, uint16_t, uint32_t, int);
337 int		bnxt_hwrm_ring_free(struct bnxt_softc *, uint8_t,
338 		    struct bnxt_ring *);
339 int		bnxt_hwrm_ver_get(struct bnxt_softc *);
340 int		bnxt_hwrm_queue_qportcfg(struct bnxt_softc *);
341 int		bnxt_hwrm_func_drv_rgtr(struct bnxt_softc *);
342 int		bnxt_hwrm_func_qcaps(struct bnxt_softc *);
343 int		bnxt_hwrm_func_qcfg(struct bnxt_softc *);
344 int		bnxt_hwrm_func_reset(struct bnxt_softc *);
345 int		bnxt_hwrm_vnic_ctx_alloc(struct bnxt_softc *, uint16_t *);
346 int		bnxt_hwrm_vnic_ctx_free(struct bnxt_softc *, uint16_t *);
347 int		bnxt_hwrm_vnic_cfg(struct bnxt_softc *,
348 		    struct bnxt_vnic_info *);
349 int		bnxt_hwrm_vnic_cfg_placement(struct bnxt_softc *,
350 		    struct bnxt_vnic_info *vnic);
351 int		bnxt_hwrm_stat_ctx_alloc(struct bnxt_softc *,
352 		    struct bnxt_cp_ring *, uint64_t);
353 int		bnxt_hwrm_stat_ctx_free(struct bnxt_softc *,
354 		    struct bnxt_cp_ring *);
355 int		bnxt_hwrm_ring_grp_alloc(struct bnxt_softc *,
356 		    struct bnxt_grp_info *);
357 int		bnxt_hwrm_ring_grp_free(struct bnxt_softc *,
358 		    struct bnxt_grp_info *);
359 int		bnxt_hwrm_vnic_alloc(struct bnxt_softc *,
360 		    struct bnxt_vnic_info *);
361 int		bnxt_hwrm_vnic_free(struct bnxt_softc *,
362 		    struct bnxt_vnic_info *);
363 int		bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt_softc *,
364 		    uint32_t, uint32_t, uint64_t, uint32_t);
365 int		bnxt_hwrm_set_filter(struct bnxt_softc *,
366 		    struct bnxt_vnic_info *);
367 int		bnxt_hwrm_free_filter(struct bnxt_softc *,
368 		    struct bnxt_vnic_info *);
369 int		bnxt_hwrm_vnic_rss_cfg(struct bnxt_softc *,
370 		    struct bnxt_vnic_info *, uint32_t, daddr_t, daddr_t);
371 int		bnxt_cfg_async_cr(struct bnxt_softc *, struct bnxt_cp_ring *);
372 int		bnxt_hwrm_nvm_get_dev_info(struct bnxt_softc *, uint16_t *,
373 		    uint16_t *, uint32_t *, uint32_t *, uint32_t *, uint32_t *);
374 int		bnxt_hwrm_port_phy_qcfg(struct bnxt_softc *,
375 		    struct ifmediareq *);
376 int		bnxt_hwrm_func_rgtr_async_events(struct bnxt_softc *);
377 int		bnxt_get_sffpage(struct bnxt_softc *, struct if_sffpage *);
378 
379 /* not used yet: */
380 #if 0
381 int bnxt_hwrm_func_drv_unrgtr(struct bnxt_softc *softc, bool shutdown);
382 
383 int bnxt_hwrm_port_qstats(struct bnxt_softc *softc);
384 
385 
386 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt_softc *softc);
387 void bnxt_validate_hw_lro_settings(struct bnxt_softc *softc);
388 int bnxt_hwrm_fw_reset(struct bnxt_softc *softc, uint8_t processor,
389     uint8_t *selfreset);
390 int bnxt_hwrm_fw_qstatus(struct bnxt_softc *softc, uint8_t type,
391     uint8_t *selfreset);
392 int bnxt_hwrm_fw_get_time(struct bnxt_softc *softc, uint16_t *year,
393     uint8_t *month, uint8_t *day, uint8_t *hour, uint8_t *minute,
394     uint8_t *second, uint16_t *millisecond, uint16_t *zone);
395 int bnxt_hwrm_fw_set_time(struct bnxt_softc *softc, uint16_t year,
396     uint8_t month, uint8_t day, uint8_t hour, uint8_t minute, uint8_t second,
397     uint16_t millisecond, uint16_t zone);
398 
399 #endif
400 
401 
402 const struct cfattach bnxt_ca = {
403 	sizeof(struct bnxt_softc), bnxt_match, bnxt_attach
404 };
405 
406 struct cfdriver bnxt_cd = {
407 	NULL, "bnxt", DV_IFNET
408 };
409 
410 struct bnxt_dmamem *
411 bnxt_dmamem_alloc(struct bnxt_softc *sc, size_t size)
412 {
413 	struct bnxt_dmamem *m;
414 	int nsegs;
415 
416 	m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
417 	if (m == NULL)
418 		return (NULL);
419 
420 	m->bdm_size = size;
421 
422 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
423 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->bdm_map) != 0)
424 		goto bdmfree;
425 
426 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->bdm_seg, 1,
427 	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
428 		goto destroy;
429 
430 	if (bus_dmamem_map(sc->sc_dmat, &m->bdm_seg, nsegs, size, &m->bdm_kva,
431 	    BUS_DMA_NOWAIT) != 0)
432 		goto free;
433 
434 	if (bus_dmamap_load(sc->sc_dmat, m->bdm_map, m->bdm_kva, size, NULL,
435 	    BUS_DMA_NOWAIT) != 0)
436 		goto unmap;
437 
438 	return (m);
439 
440 unmap:
441 	bus_dmamem_unmap(sc->sc_dmat, m->bdm_kva, m->bdm_size);
442 free:
443 	bus_dmamem_free(sc->sc_dmat, &m->bdm_seg, 1);
444 destroy:
445 	bus_dmamap_destroy(sc->sc_dmat, m->bdm_map);
446 bdmfree:
447 	free(m, M_DEVBUF, sizeof *m);
448 
449 	return (NULL);
450 }
451 
452 void
453 bnxt_dmamem_free(struct bnxt_softc *sc, struct bnxt_dmamem *m)
454 {
455 	bus_dmamap_unload(sc->sc_dmat, m->bdm_map);
456 	bus_dmamem_unmap(sc->sc_dmat, m->bdm_kva, m->bdm_size);
457 	bus_dmamem_free(sc->sc_dmat, &m->bdm_seg, 1);
458 	bus_dmamap_destroy(sc->sc_dmat, m->bdm_map);
459 	free(m, M_DEVBUF, sizeof *m);
460 }
461 
462 int
463 bnxt_match(struct device *parent, void *match, void *aux)
464 {
465 	return (pci_matchbyid(aux, bnxt_devices, nitems(bnxt_devices)));
466 }
467 
468 void
469 bnxt_attach(struct device *parent, struct device *self, void *aux)
470 {
471 	struct bnxt_softc *sc = (struct bnxt_softc *)self;
472 	struct ifnet *ifp = &sc->sc_ac.ac_if;
473 	struct pci_attach_args *pa = aux;
474 	struct bnxt_cp_ring *cpr;
475 	pci_intr_handle_t ih;
476 	const char *intrstr;
477 	u_int memtype;
478 	int i;
479 
480 	sc->sc_pc = pa->pa_pc;
481 	sc->sc_tag = pa->pa_tag;
482 	sc->sc_dmat = pa->pa_dmat;
483 
484 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BNXT_HWRM_BAR);
485 	if (pci_mapreg_map(pa, BNXT_HWRM_BAR, memtype, 0, &sc->sc_hwrm_t,
486 	    &sc->sc_hwrm_h, NULL, &sc->sc_hwrm_s, 0)) {
487 		printf(": failed to map hwrm\n");
488 		return;
489 	}
490 
491 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BNXT_DOORBELL_BAR);
492 	if (pci_mapreg_map(pa, BNXT_DOORBELL_BAR, memtype, 0, &sc->sc_db_t,
493 	    &sc->sc_db_h, NULL, &sc->sc_db_s, 0)) {
494 		printf(": failed to map doorbell\n");
495 		goto unmap_1;
496 	}
497 
498 	BNXT_HWRM_LOCK_INIT(sc, DEVNAME(sc));
499 	sc->sc_cmd_resp = bnxt_dmamem_alloc(sc, PAGE_SIZE);
500 	if (sc->sc_cmd_resp == NULL) {
501 		printf(": failed to allocate command response buffer\n");
502 		goto unmap_2;
503 	}
504 
505 	if (bnxt_hwrm_ver_get(sc) != 0) {
506 		printf(": failed to query version info\n");
507 		goto free_resp;
508 	}
509 
510 	if (bnxt_hwrm_nvm_get_dev_info(sc, NULL, NULL, NULL, NULL, NULL, NULL)
511 	    != 0) {
512 		printf(": failed to get nvram info\n");
513 		goto free_resp;
514 	}
515 
516 	if (bnxt_hwrm_func_drv_rgtr(sc) != 0) {
517 		printf(": failed to register driver with firmware\n");
518 		goto free_resp;
519 	}
520 
521 	if (bnxt_hwrm_func_rgtr_async_events(sc) != 0) {
522 		printf(": failed to register async events\n");
523 		goto free_resp;
524 	}
525 
526 	if (bnxt_hwrm_func_qcaps(sc) != 0) {
527 		printf(": failed to get queue capabilities\n");
528 		goto free_resp;
529 	}
530 
531 	/*
532 	 * devices advertise msi support, but there's no way to tell a
533 	 * completion queue to use msi mode, only legacy or msi-x.
534 	 */
535 	if (pci_intr_map_msix(pa, 0, &ih) == 0) {
536 		int nmsix;
537 
538 		sc->sc_flags |= BNXT_FLAG_MSIX;
539 		intrstr = pci_intr_string(sc->sc_pc, ih);
540 
541 		nmsix = pci_intr_msix_count(pa);
542 		if (nmsix > 1) {
543 			sc->sc_ih = pci_intr_establish(sc->sc_pc, ih,
544 			    IPL_NET | IPL_MPSAFE, bnxt_admin_intr, sc, DEVNAME(sc));
545 			sc->sc_intrmap = intrmap_create(&sc->sc_dev,
546 			    nmsix - 1, BNXT_MAX_QUEUES, INTRMAP_POWEROF2);
547 			sc->sc_nqueues = intrmap_count(sc->sc_intrmap);
548 			KASSERT(sc->sc_nqueues > 0);
549 			KASSERT(powerof2(sc->sc_nqueues));
550 		} else {
551 			sc->sc_ih = pci_intr_establish(sc->sc_pc, ih,
552 			    IPL_NET | IPL_MPSAFE, bnxt_intr, &sc->sc_queues[0],
553 			    DEVNAME(sc));
554 			sc->sc_nqueues = 1;
555 		}
556 	} else if (pci_intr_map(pa, &ih) == 0) {
557 		intrstr = pci_intr_string(sc->sc_pc, ih);
558 		sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_NET | IPL_MPSAFE,
559 		    bnxt_intr, &sc->sc_queues[0], DEVNAME(sc));
560 		sc->sc_nqueues = 1;
561 	} else {
562 		printf(": unable to map interrupt\n");
563 		goto free_resp;
564 	}
565 	if (sc->sc_ih == NULL) {
566 		printf(": unable to establish interrupt");
567 		if (intrstr != NULL)
568 			printf(" at %s", intrstr);
569 		printf("\n");
570 		goto deintr;
571 	}
572 	printf("%s, %d queues, address %s\n", intrstr, sc->sc_nqueues,
573 	    ether_sprintf(sc->sc_ac.ac_enaddr));
574 
575 	if (bnxt_hwrm_func_qcfg(sc) != 0) {
576 		printf("%s: failed to query function config\n", DEVNAME(sc));
577 		goto deintr;
578 	}
579 
580 	if (bnxt_hwrm_queue_qportcfg(sc) != 0) {
581 		printf("%s: failed to query port config\n", DEVNAME(sc));
582 		goto deintr;
583 	}
584 
585 	if (bnxt_hwrm_func_reset(sc) != 0) {
586 		printf("%s: reset failed\n", DEVNAME(sc));
587 		goto deintr;
588 	}
589 
590 	if (sc->sc_intrmap == NULL)
591 		cpr = &sc->sc_queues[0].q_cp;
592 	else
593 		cpr = &sc->sc_cp_ring;
594 
595 	cpr->stats_ctx_id = HWRM_NA_SIGNATURE;
596 	cpr->ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
597 	cpr->softc = sc;
598 	cpr->ring.id = 0;
599 	cpr->ring.doorbell = cpr->ring.id * 0x80;
600 	cpr->ring.ring_size = (PAGE_SIZE * BNXT_CP_PAGES) /
601 	    sizeof(struct cmpl_base);
602 	cpr->ring_mem = bnxt_dmamem_alloc(sc, PAGE_SIZE *
603 	    BNXT_CP_PAGES);
604 	if (cpr->ring_mem == NULL) {
605 		printf("%s: failed to allocate completion queue memory\n",
606 		    DEVNAME(sc));
607 		goto deintr;
608 	}
609 	cpr->ring.vaddr = BNXT_DMA_KVA(cpr->ring_mem);
610 	cpr->ring.paddr = BNXT_DMA_DVA(cpr->ring_mem);
611 	cpr->cons = UINT32_MAX;
612 	cpr->v_bit = 1;
613 	bnxt_mark_cpr_invalid(cpr);
614 	if (bnxt_hwrm_ring_alloc(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
615 	    &cpr->ring, (uint16_t)HWRM_NA_SIGNATURE,
616 	    HWRM_NA_SIGNATURE, 1) != 0) {
617 		printf("%s: failed to allocate completion queue\n",
618 		    DEVNAME(sc));
619 		goto free_cp_mem;
620 	}
621 	if (bnxt_cfg_async_cr(sc, cpr) != 0) {
622 		printf("%s: failed to set async completion ring\n",
623 		    DEVNAME(sc));
624 		goto free_cp_mem;
625 	}
626 	bnxt_write_cp_doorbell(sc, &cpr->ring, 1);
627 
628 	if (bnxt_set_cp_ring_aggint(sc, cpr) != 0) {
629 		printf("%s: failed to set interrupt aggregation\n",
630 		    DEVNAME(sc));
631 		goto free_cp_mem;
632 	}
633 
634 	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
635 	ifp->if_softc = sc;
636 	ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;
637 	ifp->if_xflags = IFXF_MPSAFE;
638 	ifp->if_ioctl = bnxt_ioctl;
639 	ifp->if_qstart = bnxt_start;
640 	ifp->if_watchdog = bnxt_watchdog;
641 	ifp->if_hardmtu = BNXT_MAX_MTU;
642 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
643 	    IFCAP_CSUM_UDPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv6 |
644 	    IFCAP_CSUM_TCPv6;
645 #if NVLAN > 0
646 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
647 #endif
648 	ifq_init_maxlen(&ifp->if_snd, 1024);	/* ? */
649 
650 	ifmedia_init(&sc->sc_media, IFM_IMASK, bnxt_media_change,
651 	    bnxt_media_status);
652 
653 	if_attach(ifp);
654 	ether_ifattach(ifp);
655 
656 	if_attach_iqueues(ifp, sc->sc_nqueues);
657 	if_attach_queues(ifp, sc->sc_nqueues);
658 	for (i = 0; i < sc->sc_nqueues; i++) {
659 		struct ifiqueue *ifiq = ifp->if_iqs[i];
660 		struct ifqueue *ifq = ifp->if_ifqs[i];
661 		struct bnxt_queue *bq = &sc->sc_queues[i];
662 		struct bnxt_cp_ring *cp = &bq->q_cp;
663 		struct bnxt_rx_queue *rx = &bq->q_rx;
664 		struct bnxt_tx_queue *tx = &bq->q_tx;
665 
666 		bq->q_index = i;
667 		bq->q_sc = sc;
668 
669 		rx->rx_softc = sc;
670 		rx->rx_ifiq = ifiq;
671 		timeout_set(&rx->rx_refill, bnxt_refill, bq);
672 		ifiq->ifiq_softc = rx;
673 
674 		tx->tx_softc = sc;
675 		tx->tx_ifq = ifq;
676 		ifq->ifq_softc = tx;
677 
678 		if (sc->sc_intrmap != NULL) {
679 			cp->stats_ctx_id = HWRM_NA_SIGNATURE;
680 			cp->ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
681 			cp->ring.id = i + 1;	/* first cp ring is async only */
682 			cp->softc = sc;
683 			cp->ring.doorbell = bq->q_cp.ring.id * 0x80;
684 			cp->ring.ring_size = (PAGE_SIZE * BNXT_CP_PAGES) /
685 			    sizeof(struct cmpl_base);
686 			if (pci_intr_map_msix(pa, i + 1, &ih) != 0) {
687 				printf("%s: unable to map queue interrupt %d\n",
688 				    DEVNAME(sc), i);
689 				goto intrdisestablish;
690 			}
691 			snprintf(bq->q_name, sizeof(bq->q_name), "%s:%d",
692 			    DEVNAME(sc), i);
693 			bq->q_ihc = pci_intr_establish_cpu(sc->sc_pc, ih,
694 			    IPL_NET | IPL_MPSAFE, intrmap_cpu(sc->sc_intrmap, i),
695 			    bnxt_intr, bq, bq->q_name);
696 			if (bq->q_ihc == NULL) {
697 				printf("%s: unable to establish interrupt %d\n",
698 				    DEVNAME(sc), i);
699 				goto intrdisestablish;
700 			}
701 		}
702 	}
703 
704 	bnxt_media_autonegotiate(sc);
705 	bnxt_hwrm_port_phy_qcfg(sc, NULL);
706 	return;
707 
708 intrdisestablish:
709 	for (i = 0; i < sc->sc_nqueues; i++) {
710 		struct bnxt_queue *bq = &sc->sc_queues[i];
711 		if (bq->q_ihc == NULL)
712 			continue;
713 		pci_intr_disestablish(sc->sc_pc, bq->q_ihc);
714 		bq->q_ihc = NULL;
715 	}
716 free_cp_mem:
717 	bnxt_dmamem_free(sc, cpr->ring_mem);
718 deintr:
719 	if (sc->sc_intrmap != NULL) {
720 		intrmap_destroy(sc->sc_intrmap);
721 		sc->sc_intrmap = NULL;
722 	}
723 	pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
724 	sc->sc_ih = NULL;
725 free_resp:
726 	bnxt_dmamem_free(sc, sc->sc_cmd_resp);
727 unmap_2:
728 	bus_space_unmap(sc->sc_db_t, sc->sc_db_h, sc->sc_db_s);
729 	sc->sc_db_s = 0;
730 unmap_1:
731 	bus_space_unmap(sc->sc_hwrm_t, sc->sc_hwrm_h, sc->sc_hwrm_s);
732 	sc->sc_hwrm_s = 0;
733 }
734 
735 void
736 bnxt_free_slots(struct bnxt_softc *sc, struct bnxt_slot *slots, int allocated,
737     int total)
738 {
739 	struct bnxt_slot *bs;
740 
741 	int i = allocated;
742 	while (i-- > 0) {
743 		bs = &slots[i];
744 		bus_dmamap_destroy(sc->sc_dmat, bs->bs_map);
745 	}
746 	free(slots, M_DEVBUF, total * sizeof(*bs));
747 }
748 
749 int
750 bnxt_set_cp_ring_aggint(struct bnxt_softc *sc, struct bnxt_cp_ring *cpr)
751 {
752 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input aggint;
753 
754 	/*
755 	 * set interrupt aggregation parameters for around 10k interrupts
756 	 * per second.  the timers are in units of 80usec, and the counters
757 	 * are based on the minimum rx ring size of 32.
758 	 */
759 	memset(&aggint, 0, sizeof(aggint));
760         bnxt_hwrm_cmd_hdr_init(sc, &aggint,
761 	    HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
762 	aggint.ring_id = htole16(cpr->ring.phys_id);
763 	aggint.num_cmpl_dma_aggr = htole16(32);
764 	aggint.num_cmpl_dma_aggr_during_int  = aggint.num_cmpl_dma_aggr;
765 	aggint.cmpl_aggr_dma_tmr = htole16((1000000000 / 20000) / 80);
766 	aggint.cmpl_aggr_dma_tmr_during_int = aggint.cmpl_aggr_dma_tmr;
767 	aggint.int_lat_tmr_min = htole16((1000000000 / 20000) / 80);
768 	aggint.int_lat_tmr_max = htole16((1000000000 / 10000) / 80);
769 	aggint.num_cmpl_aggr_int = htole16(16);
770 	return (hwrm_send_message(sc, &aggint, sizeof(aggint)));
771 }
772 
773 int
774 bnxt_queue_up(struct bnxt_softc *sc, struct bnxt_queue *bq)
775 {
776 	struct ifnet *ifp = &sc->sc_ac.ac_if;
777 	struct bnxt_cp_ring *cp = &bq->q_cp;
778 	struct bnxt_rx_queue *rx = &bq->q_rx;
779 	struct bnxt_tx_queue *tx = &bq->q_tx;
780 	struct bnxt_grp_info *rg = &bq->q_rg;
781 	struct bnxt_slot *bs;
782 	int i;
783 
784 	tx->tx_ring_mem = bnxt_dmamem_alloc(sc, PAGE_SIZE);
785 	if (tx->tx_ring_mem == NULL) {
786 		printf("%s: failed to allocate tx ring %d\n", DEVNAME(sc), bq->q_index);
787 		return ENOMEM;
788 	}
789 
790 	rx->rx_ring_mem = bnxt_dmamem_alloc(sc, PAGE_SIZE * 2);
791 	if (rx->rx_ring_mem == NULL) {
792 		printf("%s: failed to allocate rx ring %d\n", DEVNAME(sc), bq->q_index);
793 		goto free_tx;
794 	}
795 
796 	/* completion ring is already allocated if we're not using an intrmap */
797 	if (sc->sc_intrmap != NULL) {
798 		cp->ring_mem = bnxt_dmamem_alloc(sc, PAGE_SIZE * BNXT_CP_PAGES);
799 		if (cp->ring_mem == NULL) {
800 			printf("%s: failed to allocate completion ring %d mem\n",
801 			    DEVNAME(sc), bq->q_index);
802 			goto free_rx;
803 		}
804 		cp->ring.vaddr = BNXT_DMA_KVA(cp->ring_mem);
805 		cp->ring.paddr = BNXT_DMA_DVA(cp->ring_mem);
806 		cp->cons = UINT32_MAX;
807 		cp->v_bit = 1;
808 		bnxt_mark_cpr_invalid(cp);
809 
810 		if (bnxt_hwrm_ring_alloc(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
811 		    &cp->ring, (uint16_t)HWRM_NA_SIGNATURE,
812 		    HWRM_NA_SIGNATURE, 1) != 0) {
813 			printf("%s: failed to allocate completion queue %d\n",
814 			    DEVNAME(sc), bq->q_index);
815 			goto free_rx;
816 		}
817 
818 		if (bnxt_set_cp_ring_aggint(sc, cp) != 0) {
819 			printf("%s: failed to set interrupt %d aggregation\n",
820 			    DEVNAME(sc), bq->q_index);
821 			goto free_rx;
822 		}
823 		bnxt_write_cp_doorbell(sc, &cp->ring, 1);
824 	}
825 
826 	if (bnxt_hwrm_stat_ctx_alloc(sc, &bq->q_cp,
827 	    BNXT_DMA_DVA(sc->sc_stats_ctx_mem) +
828 	    (bq->q_index * sizeof(struct ctx_hw_stats))) != 0) {
829 		printf("%s: failed to set up stats context\n", DEVNAME(sc));
830 		goto free_rx;
831 	}
832 
833 	tx->tx_ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
834 	tx->tx_ring.id = BNXT_TX_RING_ID_BASE + bq->q_index;
835 	tx->tx_ring.doorbell = tx->tx_ring.id * 0x80;
836 	tx->tx_ring.ring_size = PAGE_SIZE / sizeof(struct tx_bd_short);
837 	tx->tx_ring.vaddr = BNXT_DMA_KVA(tx->tx_ring_mem);
838 	tx->tx_ring.paddr = BNXT_DMA_DVA(tx->tx_ring_mem);
839 	if (bnxt_hwrm_ring_alloc(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
840 	    &tx->tx_ring, cp->ring.phys_id, HWRM_NA_SIGNATURE, 1) != 0) {
841 		printf("%s: failed to set up tx ring\n",
842 		    DEVNAME(sc));
843 		goto dealloc_stats;
844 	}
845 	bnxt_write_tx_doorbell(sc, &tx->tx_ring, 0);
846 
847 	rx->rx_ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
848 	rx->rx_ring.id = BNXT_RX_RING_ID_BASE + bq->q_index;
849 	rx->rx_ring.doorbell = rx->rx_ring.id * 0x80;
850 	rx->rx_ring.ring_size = PAGE_SIZE / sizeof(struct rx_prod_pkt_bd);
851 	rx->rx_ring.vaddr = BNXT_DMA_KVA(rx->rx_ring_mem);
852 	rx->rx_ring.paddr = BNXT_DMA_DVA(rx->rx_ring_mem);
853 	if (bnxt_hwrm_ring_alloc(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
854 	    &rx->rx_ring, cp->ring.phys_id, HWRM_NA_SIGNATURE, 1) != 0) {
855 		printf("%s: failed to set up rx ring\n",
856 		    DEVNAME(sc));
857 		goto dealloc_tx;
858 	}
859 	bnxt_write_rx_doorbell(sc, &rx->rx_ring, 0);
860 
861 	rx->rx_ag_ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
862 	rx->rx_ag_ring.id = BNXT_AG_RING_ID_BASE + bq->q_index;
863 	rx->rx_ag_ring.doorbell = rx->rx_ag_ring.id * 0x80;
864 	rx->rx_ag_ring.ring_size = PAGE_SIZE / sizeof(struct rx_prod_pkt_bd);
865 	rx->rx_ag_ring.vaddr = BNXT_DMA_KVA(rx->rx_ring_mem) + PAGE_SIZE;
866 	rx->rx_ag_ring.paddr = BNXT_DMA_DVA(rx->rx_ring_mem) + PAGE_SIZE;
867 	if (bnxt_hwrm_ring_alloc(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
868 	    &rx->rx_ag_ring, cp->ring.phys_id, HWRM_NA_SIGNATURE, 1) != 0) {
869 		printf("%s: failed to set up rx ag ring\n",
870 		    DEVNAME(sc));
871 		goto dealloc_rx;
872 	}
873 	bnxt_write_rx_doorbell(sc, &rx->rx_ag_ring, 0);
874 
875 	rg->grp_id = HWRM_NA_SIGNATURE;
876 	rg->stats_ctx = cp->stats_ctx_id;
877 	rg->rx_ring_id = rx->rx_ring.phys_id;
878 	rg->ag_ring_id = rx->rx_ag_ring.phys_id;
879 	rg->cp_ring_id = cp->ring.phys_id;
880 	if (bnxt_hwrm_ring_grp_alloc(sc, rg) != 0) {
881 		printf("%s: failed to allocate ring group\n",
882 		    DEVNAME(sc));
883 		goto dealloc_ag;
884 	}
885 
886 	rx->rx_slots = mallocarray(sizeof(*bs), rx->rx_ring.ring_size,
887 	    M_DEVBUF, M_WAITOK | M_ZERO);
888 	if (rx->rx_slots == NULL) {
889 		printf("%s: failed to allocate rx slots\n", DEVNAME(sc));
890 		goto dealloc_ring_group;
891 	}
892 
893 	for (i = 0; i < rx->rx_ring.ring_size; i++) {
894 		bs = &rx->rx_slots[i];
895 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
896 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &bs->bs_map) != 0) {
897 			printf("%s: failed to allocate rx dma maps\n",
898 			    DEVNAME(sc));
899 			goto destroy_rx_slots;
900 		}
901 	}
902 
903 	rx->rx_ag_slots = mallocarray(sizeof(*bs), rx->rx_ag_ring.ring_size,
904 	    M_DEVBUF, M_WAITOK | M_ZERO);
905 	if (rx->rx_ag_slots == NULL) {
906 		printf("%s: failed to allocate rx ag slots\n", DEVNAME(sc));
907 		goto destroy_rx_slots;
908 	}
909 
910 	for (i = 0; i < rx->rx_ag_ring.ring_size; i++) {
911 		bs = &rx->rx_ag_slots[i];
912 		if (bus_dmamap_create(sc->sc_dmat, BNXT_AG_BUFFER_SIZE, 1,
913 		    BNXT_AG_BUFFER_SIZE, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
914 		    &bs->bs_map) != 0) {
915 			printf("%s: failed to allocate rx ag dma maps\n",
916 			    DEVNAME(sc));
917 			goto destroy_rx_ag_slots;
918 		}
919 	}
920 
921 	tx->tx_slots = mallocarray(sizeof(*bs), tx->tx_ring.ring_size,
922 	    M_DEVBUF, M_WAITOK | M_ZERO);
923 	if (tx->tx_slots == NULL) {
924 		printf("%s: failed to allocate tx slots\n", DEVNAME(sc));
925 		goto destroy_rx_ag_slots;
926 	}
927 
928 	for (i = 0; i < tx->tx_ring.ring_size; i++) {
929 		bs = &tx->tx_slots[i];
930 		if (bus_dmamap_create(sc->sc_dmat, BNXT_MAX_MTU, BNXT_MAX_TX_SEGS,
931 		    BNXT_MAX_MTU, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
932 		    &bs->bs_map) != 0) {
933 			printf("%s: failed to allocate tx dma maps\n",
934 			    DEVNAME(sc));
935 			goto destroy_tx_slots;
936 		}
937 	}
938 
939 	/*
940 	 * initially, the rx ring must be filled at least some distance beyond
941 	 * the current consumer index, as it looks like the firmware assumes the
942 	 * ring is full on creation, but doesn't prefetch the whole thing.
943 	 * once the whole ring has been used once, we should be able to back off
944 	 * to 2 or so slots, but we currently don't have a way of doing that.
945 	 */
946 	if_rxr_init(&rx->rxr[0], 32, rx->rx_ring.ring_size - 1);
947 	if_rxr_init(&rx->rxr[1], 32, rx->rx_ag_ring.ring_size - 1);
948 	rx->rx_prod = 0;
949 	rx->rx_cons = 0;
950 	rx->rx_ag_prod = 0;
951 	rx->rx_ag_cons = 0;
952 	bnxt_rx_fill(bq);
953 
954 	tx->tx_cons = 0;
955 	tx->tx_prod = 0;
956 	tx->tx_ring_cons = 0;
957 	tx->tx_ring_prod = 0;
958 	ifq_clr_oactive(ifp->if_ifqs[bq->q_index]);
959 	ifq_restart(ifp->if_ifqs[bq->q_index]);
960 	return 0;
961 
962 destroy_tx_slots:
963 	bnxt_free_slots(sc, tx->tx_slots, i, tx->tx_ring.ring_size);
964 	tx->tx_slots = NULL;
965 
966 	i = rx->rx_ag_ring.ring_size;
967 destroy_rx_ag_slots:
968 	bnxt_free_slots(sc, rx->rx_ag_slots, i, rx->rx_ag_ring.ring_size);
969 	rx->rx_ag_slots = NULL;
970 
971 	i = rx->rx_ring.ring_size;
972 destroy_rx_slots:
973 	bnxt_free_slots(sc, rx->rx_slots, i, rx->rx_ring.ring_size);
974 	rx->rx_slots = NULL;
975 dealloc_ring_group:
976 	bnxt_hwrm_ring_grp_free(sc, &bq->q_rg);
977 dealloc_ag:
978 	bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
979 	    &rx->rx_ag_ring);
980 dealloc_tx:
981 	bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
982 	    &tx->tx_ring);
983 dealloc_rx:
984 	bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
985 	    &rx->rx_ring);
986 dealloc_stats:
987 	bnxt_hwrm_stat_ctx_free(sc, cp);
988 free_rx:
989 	bnxt_dmamem_free(sc, rx->rx_ring_mem);
990 	rx->rx_ring_mem = NULL;
991 free_tx:
992 	bnxt_dmamem_free(sc, tx->tx_ring_mem);
993 	tx->tx_ring_mem = NULL;
994 	return ENOMEM;
995 }
996 
997 void
998 bnxt_queue_down(struct bnxt_softc *sc, struct bnxt_queue *bq)
999 {
1000 	struct bnxt_cp_ring *cp = &bq->q_cp;
1001 	struct bnxt_rx_queue *rx = &bq->q_rx;
1002 	struct bnxt_tx_queue *tx = &bq->q_tx;
1003 
1004 	/* empty rx ring first i guess */
1005 
1006 	bnxt_free_slots(sc, tx->tx_slots, tx->tx_ring.ring_size,
1007 	    tx->tx_ring.ring_size);
1008 	tx->tx_slots = NULL;
1009 
1010 	bnxt_free_slots(sc, rx->rx_ag_slots, rx->rx_ag_ring.ring_size,
1011 	    rx->rx_ag_ring.ring_size);
1012 	rx->rx_ag_slots = NULL;
1013 
1014 	bnxt_free_slots(sc, rx->rx_slots, rx->rx_ring.ring_size,
1015 	    rx->rx_ring.ring_size);
1016 	rx->rx_slots = NULL;
1017 
1018 	bnxt_hwrm_ring_grp_free(sc, &bq->q_rg);
1019 	bnxt_hwrm_stat_ctx_free(sc, &bq->q_cp);
1020 
1021 	/* may need to wait for 500ms here before we can free the rings */
1022 
1023 	bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
1024 	    &tx->tx_ring);
1025 	bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
1026 	    &rx->rx_ag_ring);
1027 	bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
1028 	    &rx->rx_ring);
1029 
1030 	/* if no intrmap, leave cp ring in place for async events */
1031 	if (sc->sc_intrmap != NULL) {
1032 		bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
1033 		    &cp->ring);
1034 
1035 		bnxt_dmamem_free(sc, cp->ring_mem);
1036 		cp->ring_mem = NULL;
1037 	}
1038 
1039 	bnxt_dmamem_free(sc, rx->rx_ring_mem);
1040 	rx->rx_ring_mem = NULL;
1041 
1042 	bnxt_dmamem_free(sc, tx->tx_ring_mem);
1043 	tx->tx_ring_mem = NULL;
1044 }
1045 
1046 void
1047 bnxt_up(struct bnxt_softc *sc)
1048 {
1049 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1050 	int i;
1051 
1052 	sc->sc_stats_ctx_mem = bnxt_dmamem_alloc(sc,
1053 	    sizeof(struct ctx_hw_stats) * sc->sc_nqueues);
1054 	if (sc->sc_stats_ctx_mem == NULL) {
1055 		printf("%s: failed to allocate stats contexts\n", DEVNAME(sc));
1056 		return;
1057 	}
1058 
1059 	sc->sc_rx_cfg = bnxt_dmamem_alloc(sc, PAGE_SIZE * 2);
1060 	if (sc->sc_rx_cfg == NULL) {
1061 		printf("%s: failed to allocate rx config buffer\n",
1062 		    DEVNAME(sc));
1063 		goto free_stats;
1064 	}
1065 
1066 	for (i = 0; i < sc->sc_nqueues; i++) {
1067 		if (bnxt_queue_up(sc, &sc->sc_queues[i]) != 0) {
1068 			goto down_queues;
1069 		}
1070 	}
1071 
1072 	sc->sc_vnic.rss_id = (uint16_t)HWRM_NA_SIGNATURE;
1073 	if (bnxt_hwrm_vnic_ctx_alloc(sc, &sc->sc_vnic.rss_id) != 0) {
1074 		printf("%s: failed to allocate vnic rss context\n",
1075 		    DEVNAME(sc));
1076 		goto down_queues;
1077 	}
1078 
1079 	sc->sc_vnic.id = (uint16_t)HWRM_NA_SIGNATURE;
1080 	sc->sc_vnic.def_ring_grp = sc->sc_queues[0].q_rg.grp_id;
1081 	sc->sc_vnic.mru = BNXT_MAX_MTU;
1082 	sc->sc_vnic.cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1083 	sc->sc_vnic.lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1084 	sc->sc_vnic.flags = BNXT_VNIC_FLAG_DEFAULT |
1085 	    BNXT_VNIC_FLAG_VLAN_STRIP;
1086 	if (bnxt_hwrm_vnic_alloc(sc, &sc->sc_vnic) != 0) {
1087 		printf("%s: failed to allocate vnic\n", DEVNAME(sc));
1088 		goto dealloc_vnic_ctx;
1089 	}
1090 
1091 	if (bnxt_hwrm_vnic_cfg(sc, &sc->sc_vnic) != 0) {
1092 		printf("%s: failed to configure vnic\n", DEVNAME(sc));
1093 		goto dealloc_vnic;
1094 	}
1095 
1096 	if (bnxt_hwrm_vnic_cfg_placement(sc, &sc->sc_vnic) != 0) {
1097 		printf("%s: failed to configure vnic placement mode\n",
1098 		    DEVNAME(sc));
1099 		goto dealloc_vnic;
1100 	}
1101 
1102 	sc->sc_vnic.filter_id = -1;
1103 	if (bnxt_hwrm_set_filter(sc, &sc->sc_vnic) != 0) {
1104 		printf("%s: failed to set vnic filter\n", DEVNAME(sc));
1105 		goto dealloc_vnic;
1106 	}
1107 
1108 	if (sc->sc_nqueues > 1) {
1109 		uint16_t *rss_table = (BNXT_DMA_KVA(sc->sc_rx_cfg) + PAGE_SIZE);
1110 		uint8_t *hash_key = (uint8_t *)(rss_table + HW_HASH_INDEX_SIZE);
1111 
1112 		for (i = 0; i < HW_HASH_INDEX_SIZE; i++) {
1113 			struct bnxt_queue *bq;
1114 
1115 			bq = &sc->sc_queues[i % sc->sc_nqueues];
1116 			rss_table[i] = htole16(bq->q_rg.grp_id);
1117 		}
1118 		stoeplitz_to_key(hash_key, HW_HASH_KEY_SIZE);
1119 
1120 		if (bnxt_hwrm_vnic_rss_cfg(sc, &sc->sc_vnic,
1121 		    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 |
1122 		    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4 |
1123 		    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6 |
1124 		    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6,
1125 		    BNXT_DMA_DVA(sc->sc_rx_cfg) + PAGE_SIZE,
1126 		    BNXT_DMA_DVA(sc->sc_rx_cfg) + PAGE_SIZE +
1127 		    (HW_HASH_INDEX_SIZE * sizeof(uint16_t))) != 0) {
1128 			printf("%s: failed to set RSS config\n", DEVNAME(sc));
1129 			goto dealloc_vnic;
1130 		}
1131 	}
1132 
1133 	bnxt_iff(sc);
1134 	SET(ifp->if_flags, IFF_RUNNING);
1135 
1136 	return;
1137 
1138 dealloc_vnic:
1139 	bnxt_hwrm_vnic_free(sc, &sc->sc_vnic);
1140 dealloc_vnic_ctx:
1141 	bnxt_hwrm_vnic_ctx_free(sc, &sc->sc_vnic.rss_id);
1142 down_queues:
1143 	for (i = 0; i < sc->sc_nqueues; i++)
1144 		bnxt_queue_down(sc, &sc->sc_queues[i]);
1145 
1146 	bnxt_dmamem_free(sc, sc->sc_rx_cfg);
1147 	sc->sc_rx_cfg = NULL;
1148 free_stats:
1149 	bnxt_dmamem_free(sc, sc->sc_stats_ctx_mem);
1150 	sc->sc_stats_ctx_mem = NULL;
1151 }
1152 
1153 void
1154 bnxt_down(struct bnxt_softc *sc)
1155 {
1156 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1157 	int i;
1158 
1159 	CLR(ifp->if_flags, IFF_RUNNING);
1160 
1161 	for (i = 0; i < sc->sc_nqueues; i++) {
1162 		ifq_clr_oactive(ifp->if_ifqs[i]);
1163 		ifq_barrier(ifp->if_ifqs[i]);
1164 		/* intr barrier? */
1165 
1166 		timeout_del(&sc->sc_queues[i].q_rx.rx_refill);
1167 	}
1168 
1169 	bnxt_hwrm_free_filter(sc, &sc->sc_vnic);
1170 	bnxt_hwrm_vnic_free(sc, &sc->sc_vnic);
1171 	bnxt_hwrm_vnic_ctx_free(sc, &sc->sc_vnic.rss_id);
1172 
1173 	for (i = 0; i < sc->sc_nqueues; i++)
1174 		bnxt_queue_down(sc, &sc->sc_queues[i]);
1175 
1176 	bnxt_dmamem_free(sc, sc->sc_rx_cfg);
1177 	sc->sc_rx_cfg = NULL;
1178 
1179 	bnxt_dmamem_free(sc, sc->sc_stats_ctx_mem);
1180 	sc->sc_stats_ctx_mem = NULL;
1181 }
1182 
1183 void
1184 bnxt_iff(struct bnxt_softc *sc)
1185 {
1186 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1187 	struct ether_multi *enm;
1188 	struct ether_multistep step;
1189 	char *mc_list;
1190 	uint32_t rx_mask, mc_count;
1191 
1192 	rx_mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST
1193 	    | HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST
1194 	    | HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN;
1195 
1196 	mc_list = BNXT_DMA_KVA(sc->sc_rx_cfg);
1197 	mc_count = 0;
1198 
1199 	if (ifp->if_flags & IFF_PROMISC) {
1200 		SET(ifp->if_flags, IFF_ALLMULTI);
1201 		rx_mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
1202 	} else if ((sc->sc_ac.ac_multirangecnt > 0) ||
1203 	    (sc->sc_ac.ac_multicnt > (PAGE_SIZE / ETHER_ADDR_LEN))) {
1204 		SET(ifp->if_flags, IFF_ALLMULTI);
1205 		rx_mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
1206 	} else {
1207 		CLR(ifp->if_flags, IFF_ALLMULTI);
1208 		ETHER_FIRST_MULTI(step, &sc->sc_ac, enm);
1209 		while (enm != NULL) {
1210 			memcpy(mc_list, enm->enm_addrlo, ETHER_ADDR_LEN);
1211 			mc_list += ETHER_ADDR_LEN;
1212 			mc_count++;
1213 
1214 			ETHER_NEXT_MULTI(step, enm);
1215 		}
1216 	}
1217 
1218 	bnxt_hwrm_cfa_l2_set_rx_mask(sc, sc->sc_vnic.id, rx_mask,
1219 	    BNXT_DMA_DVA(sc->sc_rx_cfg), mc_count);
1220 }
1221 
1222 int
1223 bnxt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1224 {
1225 	struct bnxt_softc 	*sc = (struct bnxt_softc *)ifp->if_softc;
1226 	struct ifreq		*ifr = (struct ifreq *)data;
1227 	int			s, error = 0;
1228 
1229 	s = splnet();
1230 	switch (cmd) {
1231 	case SIOCSIFADDR:
1232 		ifp->if_flags |= IFF_UP;
1233 		/* FALLTHROUGH */
1234 
1235 	case SIOCSIFFLAGS:
1236 		if (ISSET(ifp->if_flags, IFF_UP)) {
1237 			if (ISSET(ifp->if_flags, IFF_RUNNING))
1238 				error = ENETRESET;
1239 			else
1240 				bnxt_up(sc);
1241 		} else {
1242 			if (ISSET(ifp->if_flags, IFF_RUNNING))
1243 				bnxt_down(sc);
1244 		}
1245 		break;
1246 
1247 	case SIOCGIFMEDIA:
1248 	case SIOCSIFMEDIA:
1249 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1250 		break;
1251 
1252 	case SIOCGIFRXR:
1253 		error = bnxt_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
1254 		break;
1255 
1256 	case SIOCGIFSFFPAGE:
1257 		error = bnxt_get_sffpage(sc, (struct if_sffpage *)data);
1258 		break;
1259 
1260 	default:
1261 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
1262 	}
1263 
1264 	if (error == ENETRESET) {
1265 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
1266 		    (IFF_UP | IFF_RUNNING))
1267 			bnxt_iff(sc);
1268 		error = 0;
1269 	}
1270 
1271 	splx(s);
1272 
1273 	return (error);
1274 }
1275 
1276 int
1277 bnxt_rxrinfo(struct bnxt_softc *sc, struct if_rxrinfo *ifri)
1278 {
1279 	struct if_rxring_info *ifr;
1280 	int i;
1281 	int error;
1282 
1283 	ifr = mallocarray(sc->sc_nqueues * 2, sizeof(*ifr), M_TEMP,
1284 	    M_WAITOK | M_ZERO | M_CANFAIL);
1285 	if (ifr == NULL)
1286 		return (ENOMEM);
1287 
1288 	for (i = 0; i < sc->sc_nqueues; i++) {
1289 		ifr[(i * 2)].ifr_size = MCLBYTES;
1290 		ifr[(i * 2)].ifr_info = sc->sc_queues[i].q_rx.rxr[0];
1291 
1292 		ifr[(i * 2) + 1].ifr_size = BNXT_AG_BUFFER_SIZE;
1293 		ifr[(i * 2) + 1].ifr_info = sc->sc_queues[i].q_rx.rxr[1];
1294 	}
1295 
1296 	error = if_rxr_info_ioctl(ifri, sc->sc_nqueues * 2, ifr);
1297 	free(ifr, M_TEMP, sc->sc_nqueues * 2 * sizeof(*ifr));
1298 
1299 	return (error);
1300 }
1301 
1302 int
1303 bnxt_load_mbuf(struct bnxt_softc *sc, struct bnxt_slot *bs, struct mbuf *m)
1304 {
1305 	switch (bus_dmamap_load_mbuf(sc->sc_dmat, bs->bs_map, m,
1306 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) {
1307 	case 0:
1308 		break;
1309 
1310 	case EFBIG:
1311 		if (m_defrag(m, M_DONTWAIT) == 0 &&
1312 		    bus_dmamap_load_mbuf(sc->sc_dmat, bs->bs_map, m,
1313 		    BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0)
1314 			break;
1315 
1316 	default:
1317 		return (1);
1318 	}
1319 
1320 	bs->bs_m = m;
1321 	return (0);
1322 }
1323 
1324 void
1325 bnxt_start(struct ifqueue *ifq)
1326 {
1327 	struct ifnet *ifp = ifq->ifq_if;
1328 	struct tx_bd_short *txring;
1329 	struct tx_bd_long_hi *txhi;
1330 	struct bnxt_tx_queue *tx = ifq->ifq_softc;
1331 	struct bnxt_softc *sc = tx->tx_softc;
1332 	struct bnxt_slot *bs;
1333 	bus_dmamap_t map;
1334 	struct mbuf *m;
1335 	u_int idx, free, used, laststart;
1336 	uint16_t txflags;
1337 	int i;
1338 
1339 	txring = (struct tx_bd_short *)BNXT_DMA_KVA(tx->tx_ring_mem);
1340 
1341 	idx = tx->tx_ring_prod;
1342 	free = tx->tx_ring_cons;
1343 	if (free <= idx)
1344 		free += tx->tx_ring.ring_size;
1345 	free -= idx;
1346 
1347 	used = 0;
1348 
1349 	for (;;) {
1350 		/* +1 for tx_bd_long_hi */
1351 		if (used + BNXT_MAX_TX_SEGS + 1 > free) {
1352 			ifq_set_oactive(ifq);
1353 			break;
1354 		}
1355 
1356 		m = ifq_dequeue(ifq);
1357 		if (m == NULL)
1358 			break;
1359 
1360 		bs = &tx->tx_slots[tx->tx_prod];
1361 		if (bnxt_load_mbuf(sc, bs, m) != 0) {
1362 			m_freem(m);
1363 			ifp->if_oerrors++;
1364 			continue;
1365 		}
1366 
1367 #if NBPFILTER > 0
1368 		if (ifp->if_bpf)
1369 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1370 #endif
1371 		map = bs->bs_map;
1372 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1373 		    BUS_DMASYNC_PREWRITE);
1374 		used += BNXT_TX_SLOTS(bs);
1375 
1376 		/* first segment */
1377 		laststart = idx;
1378 		txring[idx].len = htole16(map->dm_segs[0].ds_len);
1379 		txring[idx].opaque = tx->tx_prod;
1380 		txring[idx].addr = htole64(map->dm_segs[0].ds_addr);
1381 
1382 		if (map->dm_mapsize < 512)
1383 			txflags = TX_BD_LONG_FLAGS_LHINT_LT512;
1384 		else if (map->dm_mapsize < 1024)
1385 			txflags = TX_BD_LONG_FLAGS_LHINT_LT1K;
1386 		else if (map->dm_mapsize < 2048)
1387 			txflags = TX_BD_LONG_FLAGS_LHINT_LT2K;
1388 		else
1389 			txflags = TX_BD_LONG_FLAGS_LHINT_GTE2K;
1390 		txflags |= TX_BD_LONG_TYPE_TX_BD_LONG |
1391 		    TX_BD_LONG_FLAGS_NO_CMPL |
1392 		    (BNXT_TX_SLOTS(bs) << TX_BD_LONG_FLAGS_BD_CNT_SFT);
1393 		if (map->dm_nsegs == 1)
1394 			txflags |= TX_BD_SHORT_FLAGS_PACKET_END;
1395 		txring[idx].flags_type = htole16(txflags);
1396 
1397 		idx++;
1398 		if (idx == tx->tx_ring.ring_size)
1399 			idx = 0;
1400 
1401 		/* long tx descriptor */
1402 		txhi = (struct tx_bd_long_hi *)&txring[idx];
1403 		memset(txhi, 0, sizeof(*txhi));
1404 		txflags = 0;
1405 		if (m->m_pkthdr.csum_flags & (M_UDP_CSUM_OUT | M_TCP_CSUM_OUT))
1406 			txflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
1407 		if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
1408 			txflags |= TX_BD_LONG_LFLAGS_IP_CHKSUM;
1409 		txhi->lflags = htole16(txflags);
1410 
1411 #if NVLAN > 0
1412 		if (m->m_flags & M_VLANTAG) {
1413 			txhi->cfa_meta = htole32(m->m_pkthdr.ether_vtag |
1414 			    TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100 |
1415 			    TX_BD_LONG_CFA_META_KEY_VLAN_TAG);
1416 		}
1417 #endif
1418 
1419 		idx++;
1420 		if (idx == tx->tx_ring.ring_size)
1421 			idx = 0;
1422 
1423 		/* remaining segments */
1424 		txflags = TX_BD_SHORT_TYPE_TX_BD_SHORT;
1425 		for (i = 1; i < map->dm_nsegs; i++) {
1426 			if (i == map->dm_nsegs - 1)
1427 				txflags |= TX_BD_SHORT_FLAGS_PACKET_END;
1428 			txring[idx].flags_type = htole16(txflags);
1429 
1430 			txring[idx].len =
1431 			    htole16(bs->bs_map->dm_segs[i].ds_len);
1432 			txring[idx].opaque = tx->tx_prod;
1433 			txring[idx].addr =
1434 			    htole64(bs->bs_map->dm_segs[i].ds_addr);
1435 
1436 			idx++;
1437 			if (idx == tx->tx_ring.ring_size)
1438 				idx = 0;
1439 		}
1440 
1441 		if (++tx->tx_prod >= tx->tx_ring.ring_size)
1442 			tx->tx_prod = 0;
1443 	}
1444 
1445 	/* unset NO_CMPL on the first bd of the last packet */
1446 	if (used != 0) {
1447 		txring[laststart].flags_type &=
1448 		    ~htole16(TX_BD_SHORT_FLAGS_NO_CMPL);
1449 	}
1450 
1451 	bnxt_write_tx_doorbell(sc, &tx->tx_ring, idx);
1452 	tx->tx_ring_prod = idx;
1453 }
1454 
1455 void
1456 bnxt_handle_async_event(struct bnxt_softc *sc, struct cmpl_base *cmpl)
1457 {
1458 	struct hwrm_async_event_cmpl *ae = (struct hwrm_async_event_cmpl *)cmpl;
1459 	uint16_t type = le16toh(ae->event_id);
1460 
1461 	switch (type) {
1462 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
1463 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
1464 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE:
1465 		bnxt_hwrm_port_phy_qcfg(sc, NULL);
1466 		break;
1467 
1468 	default:
1469 		printf("%s: unexpected async event %x\n", DEVNAME(sc), type);
1470 		break;
1471 	}
1472 }
1473 
1474 struct cmpl_base *
1475 bnxt_cpr_next_cmpl(struct bnxt_softc *sc, struct bnxt_cp_ring *cpr)
1476 {
1477 	struct cmpl_base *cmpl;
1478 	uint32_t cons;
1479 	int v_bit;
1480 
1481 	cons = cpr->cons + 1;
1482 	v_bit = cpr->v_bit;
1483 	if (cons == cpr->ring.ring_size) {
1484 		cons = 0;
1485 		v_bit = !v_bit;
1486 	}
1487 	cmpl = &((struct cmpl_base *)cpr->ring.vaddr)[cons];
1488 
1489 	if ((!!(cmpl->info3_v & htole32(CMPL_BASE_V))) != (!!v_bit))
1490 		return (NULL);
1491 
1492 	cpr->cons = cons;
1493 	cpr->v_bit = v_bit;
1494 	return (cmpl);
1495 }
1496 
1497 void
1498 bnxt_cpr_commit(struct bnxt_softc *sc, struct bnxt_cp_ring *cpr)
1499 {
1500 	cpr->commit_cons = cpr->cons;
1501 	cpr->commit_v_bit = cpr->v_bit;
1502 }
1503 
1504 void
1505 bnxt_cpr_rollback(struct bnxt_softc *sc, struct bnxt_cp_ring *cpr)
1506 {
1507 	cpr->cons = cpr->commit_cons;
1508 	cpr->v_bit = cpr->commit_v_bit;
1509 }
1510 
1511 int
1512 bnxt_admin_intr(void *xsc)
1513 {
1514 	struct bnxt_softc *sc = (struct bnxt_softc *)xsc;
1515 	struct bnxt_cp_ring *cpr = &sc->sc_cp_ring;
1516 	struct cmpl_base *cmpl;
1517 	uint16_t type;
1518 
1519 	bnxt_write_cp_doorbell(sc, &cpr->ring, 0);
1520 	cmpl = bnxt_cpr_next_cmpl(sc, cpr);
1521 	while (cmpl != NULL) {
1522 		type = le16toh(cmpl->type) & CMPL_BASE_TYPE_MASK;
1523 		switch (type) {
1524 		case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1525 			bnxt_handle_async_event(sc, cmpl);
1526 			break;
1527 		default:
1528 			printf("%s: unexpected completion type %u\n",
1529 			    DEVNAME(sc), type);
1530 		}
1531 
1532 		bnxt_cpr_commit(sc, cpr);
1533 		cmpl = bnxt_cpr_next_cmpl(sc, cpr);
1534 	}
1535 
1536 	bnxt_write_cp_doorbell_index(sc, &cpr->ring,
1537 	    (cpr->commit_cons+1) % cpr->ring.ring_size, 1);
1538 	return (1);
1539 }
1540 
1541 int
1542 bnxt_intr(void *xq)
1543 {
1544 	struct bnxt_queue *q = (struct bnxt_queue *)xq;
1545 	struct bnxt_softc *sc = q->q_sc;
1546 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1547 	struct bnxt_cp_ring *cpr = &q->q_cp;
1548 	struct bnxt_rx_queue *rx = &q->q_rx;
1549 	struct bnxt_tx_queue *tx = &q->q_tx;
1550 	struct cmpl_base *cmpl;
1551 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1552 	uint16_t type;
1553 	int rxfree, txfree, agfree, rv, rollback;
1554 
1555 	bnxt_write_cp_doorbell(sc, &cpr->ring, 0);
1556 	rxfree = 0;
1557 	txfree = 0;
1558 	agfree = 0;
1559 	rv = -1;
1560 	cmpl = bnxt_cpr_next_cmpl(sc, cpr);
1561 	while (cmpl != NULL) {
1562 		type = le16toh(cmpl->type) & CMPL_BASE_TYPE_MASK;
1563 		rollback = 0;
1564 		switch (type) {
1565 		case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1566 			bnxt_handle_async_event(sc, cmpl);
1567 			break;
1568 		case CMPL_BASE_TYPE_RX_L2:
1569 			if (ISSET(ifp->if_flags, IFF_RUNNING))
1570 				rollback = bnxt_rx(sc, rx, cpr, &ml, &rxfree,
1571 				    &agfree, cmpl);
1572 			break;
1573 		case CMPL_BASE_TYPE_TX_L2:
1574 			if (ISSET(ifp->if_flags, IFF_RUNNING))
1575 				bnxt_txeof(sc, tx, &txfree, cmpl);
1576 			break;
1577 		default:
1578 			printf("%s: unexpected completion type %u\n",
1579 			    DEVNAME(sc), type);
1580 		}
1581 
1582 		if (rollback) {
1583 			bnxt_cpr_rollback(sc, cpr);
1584 			break;
1585 		}
1586 		rv = 1;
1587 		bnxt_cpr_commit(sc, cpr);
1588 		cmpl = bnxt_cpr_next_cmpl(sc, cpr);
1589 	}
1590 
1591 	/*
1592 	 * comments in bnxtreg.h suggest we should be writing cpr->cons here,
1593 	 * but writing cpr->cons + 1 makes it stop interrupting.
1594 	 */
1595 	bnxt_write_cp_doorbell_index(sc, &cpr->ring,
1596 	    (cpr->commit_cons+1) % cpr->ring.ring_size, 1);
1597 
1598 	if (rxfree != 0) {
1599 		rx->rx_cons += rxfree;
1600 		if (rx->rx_cons >= rx->rx_ring.ring_size)
1601 			rx->rx_cons -= rx->rx_ring.ring_size;
1602 
1603 		rx->rx_ag_cons += agfree;
1604 		if (rx->rx_ag_cons >= rx->rx_ag_ring.ring_size)
1605 			rx->rx_ag_cons -= rx->rx_ag_ring.ring_size;
1606 
1607 		if_rxr_put(&rx->rxr[0], rxfree);
1608 		if_rxr_put(&rx->rxr[1], agfree);
1609 
1610 		if (ifiq_input(rx->rx_ifiq, &ml)) {
1611 			if_rxr_livelocked(&rx->rxr[0]);
1612 			if_rxr_livelocked(&rx->rxr[1]);
1613 		}
1614 
1615 		bnxt_rx_fill(q);
1616 		if ((rx->rx_cons == rx->rx_prod) ||
1617 		    (rx->rx_ag_cons == rx->rx_ag_prod))
1618 			timeout_add(&rx->rx_refill, 0);
1619 	}
1620 	if (txfree != 0) {
1621 		if (ifq_is_oactive(tx->tx_ifq))
1622 			ifq_restart(tx->tx_ifq);
1623 	}
1624 	return (rv);
1625 }
1626 
1627 void
1628 bnxt_watchdog(struct ifnet *ifp)
1629 {
1630 }
1631 
1632 void
1633 bnxt_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1634 {
1635 	struct bnxt_softc *sc = (struct bnxt_softc *)ifp->if_softc;
1636 	bnxt_hwrm_port_phy_qcfg(sc, ifmr);
1637 }
1638 
1639 uint64_t
1640 bnxt_get_media_type(uint64_t speed, int phy_type)
1641 {
1642 	switch (phy_type) {
1643 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN:
1644 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR:
1645 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_L:
1646 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_S:
1647 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_N:
1648 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR4:
1649 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASECR4:
1650 		switch (speed) {
1651 		case IF_Gbps(1):
1652 			return IFM_1000_T;
1653 		case IF_Gbps(10):
1654 			return IFM_10G_SFP_CU;
1655 		case IF_Gbps(25):
1656 			return IFM_25G_CR;
1657 		case IF_Gbps(40):
1658 			return IFM_40G_CR4;
1659 		case IF_Gbps(50):
1660 			return IFM_50G_CR2;
1661 		case IF_Gbps(100):
1662 			return IFM_100G_CR4;
1663 		}
1664 		break;
1665 
1666 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASELR:
1667 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR4:
1668 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASELR4:
1669 		switch (speed) {
1670 		case IF_Gbps(1):
1671 			return IFM_1000_LX;
1672 		case IF_Gbps(10):
1673 			return IFM_10G_LR;
1674 		case IF_Gbps(25):
1675 			return IFM_25G_LR;
1676 		case IF_Gbps(40):
1677 			return IFM_40G_LR4;
1678 		case IF_Gbps(100):
1679 			return IFM_100G_LR4;
1680 		}
1681 		break;
1682 
1683 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR:
1684 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASESR:
1685 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR4:
1686 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR10:
1687 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASESX:
1688 		switch (speed) {
1689 		case IF_Gbps(1):
1690 			return IFM_1000_SX;
1691 		case IF_Gbps(10):
1692 			return IFM_10G_SR;
1693 		case IF_Gbps(25):
1694 			return IFM_25G_SR;
1695 		case IF_Gbps(40):
1696 			return IFM_40G_SR4;
1697 		case IF_Gbps(100):
1698 			return IFM_100G_SR4;
1699 		}
1700 		break;
1701 
1702 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER4:
1703 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASEER4:
1704 		switch (speed) {
1705 		case IF_Gbps(10):
1706 			return IFM_10G_ER;
1707 		case IF_Gbps(25):
1708 			return IFM_25G_ER;
1709 		}
1710 		/* missing IFM_40G_ER4, IFM_100G_ER4 */
1711 		break;
1712 
1713 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR4:
1714 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR2:
1715 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR:
1716 		switch (speed) {
1717 		case IF_Gbps(10):
1718 			return IFM_10G_KR;
1719 		case IF_Gbps(20):
1720 			return IFM_20G_KR2;
1721 		case IF_Gbps(25):
1722 			return IFM_25G_KR;
1723 		case IF_Gbps(40):
1724 			return IFM_40G_KR4;
1725 		case IF_Gbps(50):
1726 			return IFM_50G_KR2;
1727 		case IF_Gbps(100):
1728 			return IFM_100G_KR4;
1729 		}
1730 		break;
1731 
1732 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX:
1733 		switch (speed) {
1734 		case IF_Gbps(1):
1735 			return IFM_1000_KX;
1736 		case IF_Mbps(2500):
1737 			return IFM_2500_KX;
1738 		case IF_Gbps(10):
1739 			return IFM_10G_KX4;
1740 		}
1741 		break;
1742 
1743 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET:
1744 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE:
1745 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASET:
1746 		switch (speed) {
1747 		case IF_Mbps(10):
1748 			return IFM_10_T;
1749 		case IF_Mbps(100):
1750 			return IFM_100_TX;
1751 		case IF_Gbps(1):
1752 			return IFM_1000_T;
1753 		case IF_Mbps(2500):
1754 			return IFM_2500_T;
1755 		case IF_Gbps(10):
1756 			return IFM_10G_T;
1757 		}
1758 		break;
1759 
1760 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_SGMIIEXTPHY:
1761 		switch (speed) {
1762 		case IF_Gbps(1):
1763 			return IFM_1000_SGMII;
1764 		}
1765 		break;
1766 
1767 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_ACTIVE_CABLE:
1768 		switch (speed) {
1769 		case IF_Gbps(10):
1770 			return IFM_10G_AOC;
1771 		case IF_Gbps(25):
1772 			return IFM_25G_AOC;
1773 		case IF_Gbps(40):
1774 			return IFM_40G_AOC;
1775 		case IF_Gbps(100):
1776 			return IFM_100G_AOC;
1777 		}
1778 		break;
1779 	}
1780 
1781 	return 0;
1782 }
1783 
1784 void
1785 bnxt_add_media_type(struct bnxt_softc *sc, int supported_speeds, uint64_t speed, uint64_t ifmt)
1786 {
1787 	int speed_bit = 0;
1788 	switch (speed) {
1789 	case IF_Gbps(1):
1790 		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB;
1791 		break;
1792 	case IF_Gbps(2):
1793 		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2GB;
1794 		break;
1795 	case IF_Mbps(2500):
1796 		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB;
1797 		break;
1798 	case IF_Gbps(10):
1799 		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB;
1800 		break;
1801 	case IF_Gbps(20):
1802 		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB;
1803 		break;
1804 	case IF_Gbps(25):
1805 		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB;
1806 		break;
1807 	case IF_Gbps(40):
1808 		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB;
1809 		break;
1810 	case IF_Gbps(50):
1811 		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB;
1812 		break;
1813 	case IF_Gbps(100):
1814 		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB;
1815 		break;
1816 	}
1817 	if (supported_speeds & speed_bit)
1818 		ifmedia_add(&sc->sc_media, IFM_ETHER | ifmt, 0, NULL);
1819 }
1820 
1821 int
1822 bnxt_hwrm_port_phy_qcfg(struct bnxt_softc *softc, struct ifmediareq *ifmr)
1823 {
1824 	struct ifnet *ifp = &softc->sc_ac.ac_if;
1825 	struct hwrm_port_phy_qcfg_input req = {0};
1826 	struct hwrm_port_phy_qcfg_output *resp =
1827 	    BNXT_DMA_KVA(softc->sc_cmd_resp);
1828 	int link_state = LINK_STATE_DOWN;
1829 	uint64_t speeds[] = {
1830 		IF_Gbps(1), IF_Gbps(2), IF_Mbps(2500), IF_Gbps(10), IF_Gbps(20),
1831 		IF_Gbps(25), IF_Gbps(40), IF_Gbps(50), IF_Gbps(100)
1832 	};
1833 	uint64_t media_type;
1834 	int duplex;
1835 	int rc = 0;
1836 	int i;
1837 
1838 	BNXT_HWRM_LOCK(softc);
1839 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_PHY_QCFG);
1840 
1841 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1842 	if (rc) {
1843 		printf("%s: failed to query port phy config\n", DEVNAME(softc));
1844 		goto exit;
1845 	}
1846 
1847 	if (softc->sc_hwrm_ver > 0x10800)
1848 		duplex = resp->duplex_state;
1849 	else
1850 		duplex = resp->duplex_cfg;
1851 
1852 	if (resp->link == HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) {
1853 		if (duplex == HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_HALF)
1854 			link_state = LINK_STATE_HALF_DUPLEX;
1855 		else
1856 			link_state = LINK_STATE_FULL_DUPLEX;
1857 
1858 		switch (resp->link_speed) {
1859 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB:
1860 			ifp->if_baudrate = IF_Mbps(10);
1861 			break;
1862 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1863 			ifp->if_baudrate = IF_Mbps(100);
1864 			break;
1865 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1866 			ifp->if_baudrate = IF_Gbps(1);
1867 			break;
1868 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1869 			ifp->if_baudrate = IF_Gbps(2);
1870 			break;
1871 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1872 			ifp->if_baudrate = IF_Mbps(2500);
1873 			break;
1874 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1875 			ifp->if_baudrate = IF_Gbps(10);
1876 			break;
1877 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1878 			ifp->if_baudrate = IF_Gbps(20);
1879 			break;
1880 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1881 			ifp->if_baudrate = IF_Gbps(25);
1882 			break;
1883 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1884 			ifp->if_baudrate = IF_Gbps(40);
1885 			break;
1886 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1887 			ifp->if_baudrate = IF_Gbps(50);
1888 			break;
1889 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
1890 			ifp->if_baudrate = IF_Gbps(100);
1891 			break;
1892 		}
1893 	}
1894 
1895 	ifmedia_delete_instance(&softc->sc_media, IFM_INST_ANY);
1896 	for (i = 0; i < nitems(speeds); i++) {
1897 		media_type = bnxt_get_media_type(speeds[i], resp->phy_type);
1898 		if (media_type != 0)
1899 			bnxt_add_media_type(softc, resp->support_speeds,
1900 			    speeds[i], media_type);
1901 	}
1902 	ifmedia_add(&softc->sc_media, IFM_ETHER|IFM_AUTO, 0, NULL);
1903 	ifmedia_set(&softc->sc_media, IFM_ETHER|IFM_AUTO);
1904 
1905 	if (ifmr != NULL) {
1906 		ifmr->ifm_status = IFM_AVALID;
1907 		if (LINK_STATE_IS_UP(ifp->if_link_state)) {
1908 			ifmr->ifm_status |= IFM_ACTIVE;
1909 			ifmr->ifm_active = IFM_ETHER | IFM_AUTO;
1910 			if (resp->pause & HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX)
1911 				ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1912 			if (resp->pause & HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX)
1913 				ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1914 			if (duplex == HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_HALF)
1915 				ifmr->ifm_active |= IFM_HDX;
1916 			else
1917 				ifmr->ifm_active |= IFM_FDX;
1918 
1919 			media_type = bnxt_get_media_type(ifp->if_baudrate, resp->phy_type);
1920 			if (media_type != 0)
1921 				ifmr->ifm_active |= media_type;
1922 		}
1923 	}
1924 
1925 exit:
1926 	BNXT_HWRM_UNLOCK(softc);
1927 
1928 	if (rc == 0 && (link_state != ifp->if_link_state)) {
1929 		ifp->if_link_state = link_state;
1930 		if_link_state_change(ifp);
1931 	}
1932 
1933 	return rc;
1934 }
1935 
1936 int
1937 bnxt_media_change(struct ifnet *ifp)
1938 {
1939 	struct bnxt_softc *sc = (struct bnxt_softc *)ifp->if_softc;
1940 	struct hwrm_port_phy_cfg_input req = {0};
1941 	uint64_t link_speed;
1942 
1943 	if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
1944 		return EINVAL;
1945 
1946 	if (sc->sc_flags & BNXT_FLAG_NPAR)
1947 		return ENODEV;
1948 
1949 	bnxt_hwrm_cmd_hdr_init(sc, &req, HWRM_PORT_PHY_CFG);
1950 
1951 	switch (IFM_SUBTYPE(sc->sc_media.ifm_media)) {
1952 	case IFM_100G_CR4:
1953 	case IFM_100G_SR4:
1954 	case IFM_100G_KR4:
1955 	case IFM_100G_LR4:
1956 	case IFM_100G_AOC:
1957 		link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100GB;
1958 		break;
1959 
1960 	case IFM_50G_CR2:
1961 	case IFM_50G_KR2:
1962 		link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_50GB;
1963 		break;
1964 
1965 	case IFM_40G_CR4:
1966 	case IFM_40G_SR4:
1967 	case IFM_40G_LR4:
1968 	case IFM_40G_KR4:
1969 	case IFM_40G_AOC:
1970 		link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_40GB;
1971 		break;
1972 
1973 	case IFM_25G_CR:
1974 	case IFM_25G_KR:
1975 	case IFM_25G_SR:
1976 	case IFM_25G_LR:
1977 	case IFM_25G_ER:
1978 	case IFM_25G_AOC:
1979 		link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_25GB;
1980 		break;
1981 
1982 	case IFM_10G_LR:
1983 	case IFM_10G_SR:
1984 	case IFM_10G_CX4:
1985 	case IFM_10G_T:
1986 	case IFM_10G_SFP_CU:
1987 	case IFM_10G_LRM:
1988 	case IFM_10G_KX4:
1989 	case IFM_10G_KR:
1990 	case IFM_10G_CR1:
1991 	case IFM_10G_ER:
1992 	case IFM_10G_AOC:
1993 		link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10GB;
1994 		break;
1995 
1996 	case IFM_2500_SX:
1997 	case IFM_2500_KX:
1998 	case IFM_2500_T:
1999 		link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2_5GB;
2000 		break;
2001 
2002 	case IFM_1000_T:
2003 	case IFM_1000_LX:
2004 	case IFM_1000_SX:
2005 	case IFM_1000_CX:
2006 	case IFM_1000_KX:
2007 		link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_1GB;
2008 		break;
2009 
2010 	case IFM_100_TX:
2011 		link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100MB;
2012 		break;
2013 
2014 	default:
2015 		link_speed = 0;
2016 	}
2017 
2018 	req.enables |= htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX);
2019 	req.auto_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2020 	if (link_speed == 0) {
2021 		req.auto_mode |=
2022 		    HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
2023 		req.flags |=
2024 		    htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG);
2025 		req.enables |=
2026 		    htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE);
2027 	} else {
2028 		req.force_link_speed = htole16(link_speed);
2029 		req.flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE);
2030 	}
2031 	req.flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY);
2032 
2033 	return hwrm_send_message(sc, &req, sizeof(req));
2034 }
2035 
2036 int
2037 bnxt_media_autonegotiate(struct bnxt_softc *sc)
2038 {
2039 	struct hwrm_port_phy_cfg_input req = {0};
2040 
2041 	if (sc->sc_flags & BNXT_FLAG_NPAR)
2042 		return ENODEV;
2043 
2044 	bnxt_hwrm_cmd_hdr_init(sc, &req, HWRM_PORT_PHY_CFG);
2045 	req.auto_mode |= HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
2046 	req.auto_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2047 	req.enables |= htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE |
2048 	    HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX);
2049 	req.flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG);
2050 	req.flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY);
2051 
2052 	return hwrm_send_message(sc, &req, sizeof(req));
2053 }
2054 
2055 
2056 void
2057 bnxt_mark_cpr_invalid(struct bnxt_cp_ring *cpr)
2058 {
2059 	struct cmpl_base *cmp = (void *)cpr->ring.vaddr;
2060 	int i;
2061 
2062 	for (i = 0; i < cpr->ring.ring_size; i++)
2063 		cmp[i].info3_v = !cpr->v_bit;
2064 }
2065 
2066 void
2067 bnxt_write_cp_doorbell(struct bnxt_softc *sc, struct bnxt_ring *ring,
2068     int enable)
2069 {
2070 	uint32_t val = CMPL_DOORBELL_KEY_CMPL;
2071 	if (enable == 0)
2072 		val |= CMPL_DOORBELL_MASK;
2073 
2074 	bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
2075 	    BUS_SPACE_BARRIER_WRITE);
2076 	bus_space_barrier(sc->sc_db_t, sc->sc_db_h, 0, sc->sc_db_s,
2077 	    BUS_SPACE_BARRIER_WRITE);
2078 	bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,
2079 	    htole32(val));
2080 }
2081 
2082 void
2083 bnxt_write_cp_doorbell_index(struct bnxt_softc *sc, struct bnxt_ring *ring,
2084     uint32_t index, int enable)
2085 {
2086 	uint32_t val = CMPL_DOORBELL_KEY_CMPL | CMPL_DOORBELL_IDX_VALID |
2087 	    (index & CMPL_DOORBELL_IDX_MASK);
2088 	if (enable == 0)
2089 		val |= CMPL_DOORBELL_MASK;
2090 	bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
2091 	    BUS_SPACE_BARRIER_WRITE);
2092 	bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,
2093 	    htole32(val));
2094 	bus_space_barrier(sc->sc_db_t, sc->sc_db_h, 0, sc->sc_db_s,
2095 	    BUS_SPACE_BARRIER_WRITE);
2096 }
2097 
2098 void
2099 bnxt_write_rx_doorbell(struct bnxt_softc *sc, struct bnxt_ring *ring, int index)
2100 {
2101 	uint32_t val = RX_DOORBELL_KEY_RX | index;
2102 	bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
2103 	    BUS_SPACE_BARRIER_WRITE);
2104 	bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,
2105 	    htole32(val));
2106 
2107 	/* second write isn't necessary on all hardware */
2108 	bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
2109 	    BUS_SPACE_BARRIER_WRITE);
2110 	bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,
2111 	    htole32(val));
2112 }
2113 
2114 void
2115 bnxt_write_tx_doorbell(struct bnxt_softc *sc, struct bnxt_ring *ring, int index)
2116 {
2117 	uint32_t val = TX_DOORBELL_KEY_TX | index;
2118 	bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
2119 	    BUS_SPACE_BARRIER_WRITE);
2120 	bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,
2121 	    htole32(val));
2122 
2123 	/* second write isn't necessary on all hardware */
2124 	bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
2125 	    BUS_SPACE_BARRIER_WRITE);
2126 	bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,
2127 	    htole32(val));
2128 }
2129 
2130 u_int
2131 bnxt_rx_fill_slots(struct bnxt_softc *sc, struct bnxt_ring *ring, void *ring_mem,
2132     struct bnxt_slot *slots, uint *prod, int bufsize, uint16_t bdtype,
2133     u_int nslots)
2134 {
2135 	struct rx_prod_pkt_bd *rxring;
2136 	struct bnxt_slot *bs;
2137 	struct mbuf *m;
2138 	uint p, fills;
2139 
2140 	rxring = (struct rx_prod_pkt_bd *)ring_mem;
2141 	p = *prod;
2142 	for (fills = 0; fills < nslots; fills++) {
2143 		bs = &slots[p];
2144 		m = MCLGETL(NULL, M_DONTWAIT, bufsize);
2145 		if (m == NULL)
2146 			break;
2147 
2148 		m->m_len = m->m_pkthdr.len = bufsize;
2149 		if (bus_dmamap_load_mbuf(sc->sc_dmat, bs->bs_map, m,
2150 		    BUS_DMA_NOWAIT) != 0) {
2151 			m_freem(m);
2152 			break;
2153 		}
2154 		bs->bs_m = m;
2155 
2156 		rxring[p].flags_type = htole16(bdtype);
2157 		rxring[p].len = htole16(bufsize);
2158 		rxring[p].opaque = p;
2159 		rxring[p].addr = htole64(bs->bs_map->dm_segs[0].ds_addr);
2160 
2161 		if (++p >= ring->ring_size)
2162 			p = 0;
2163 	}
2164 
2165 	if (fills != 0)
2166 		bnxt_write_rx_doorbell(sc, ring, p);
2167 	*prod = p;
2168 
2169 	return (nslots - fills);
2170 }
2171 
2172 int
2173 bnxt_rx_fill(struct bnxt_queue *q)
2174 {
2175 	struct bnxt_rx_queue *rx = &q->q_rx;
2176 	struct bnxt_softc *sc = q->q_sc;
2177 	u_int slots;
2178 	int rv = 0;
2179 
2180 	slots = if_rxr_get(&rx->rxr[0], rx->rx_ring.ring_size);
2181 	if (slots > 0) {
2182 		slots = bnxt_rx_fill_slots(sc, &rx->rx_ring,
2183 		    BNXT_DMA_KVA(rx->rx_ring_mem), rx->rx_slots,
2184 		    &rx->rx_prod, MCLBYTES,
2185 		    RX_PROD_PKT_BD_TYPE_RX_PROD_PKT, slots);
2186 		if_rxr_put(&rx->rxr[0], slots);
2187 	} else
2188 		rv = 1;
2189 
2190 	slots = if_rxr_get(&rx->rxr[1],  rx->rx_ag_ring.ring_size);
2191 	if (slots > 0) {
2192 		slots = bnxt_rx_fill_slots(sc, &rx->rx_ag_ring,
2193 		    BNXT_DMA_KVA(rx->rx_ring_mem) + PAGE_SIZE,
2194 		    rx->rx_ag_slots, &rx->rx_ag_prod,
2195 		    BNXT_AG_BUFFER_SIZE,
2196 		    RX_PROD_AGG_BD_TYPE_RX_PROD_AGG, slots);
2197 		if_rxr_put(&rx->rxr[1], slots);
2198 	} else
2199 		rv = 1;
2200 
2201 	return (rv);
2202 }
2203 
2204 void
2205 bnxt_refill(void *xq)
2206 {
2207 	struct bnxt_queue *q = xq;
2208 	struct bnxt_rx_queue *rx = &q->q_rx;
2209 
2210 	bnxt_rx_fill(q);
2211 
2212 	if (rx->rx_cons == rx->rx_prod)
2213 		timeout_add(&rx->rx_refill, 1);
2214 }
2215 
2216 int
2217 bnxt_rx(struct bnxt_softc *sc, struct bnxt_rx_queue *rx,
2218     struct bnxt_cp_ring *cpr, struct mbuf_list *ml, int *slots, int *agslots,
2219     struct cmpl_base *cmpl)
2220 {
2221 	struct mbuf *m, *am;
2222 	struct bnxt_slot *bs;
2223 	struct rx_pkt_cmpl *rxlo = (struct rx_pkt_cmpl *)cmpl;
2224 	struct rx_pkt_cmpl_hi *rxhi;
2225 	struct rx_abuf_cmpl *ag;
2226 	uint32_t flags;
2227 	uint16_t errors;
2228 
2229 	/* second part of the rx completion */
2230 	rxhi = (struct rx_pkt_cmpl_hi *)bnxt_cpr_next_cmpl(sc, cpr);
2231 	if (rxhi == NULL) {
2232 		return (1);
2233 	}
2234 
2235 	/* packets over 2k in size use an aggregation buffer completion too */
2236 	ag = NULL;
2237 	if ((rxlo->agg_bufs_v1 >> RX_PKT_CMPL_AGG_BUFS_SFT) != 0) {
2238 		ag = (struct rx_abuf_cmpl *)bnxt_cpr_next_cmpl(sc, cpr);
2239 		if (ag == NULL) {
2240 			return (1);
2241 		}
2242 	}
2243 
2244 	bs = &rx->rx_slots[rxlo->opaque];
2245 	bus_dmamap_sync(sc->sc_dmat, bs->bs_map, 0, bs->bs_map->dm_mapsize,
2246 	    BUS_DMASYNC_POSTREAD);
2247 	bus_dmamap_unload(sc->sc_dmat, bs->bs_map);
2248 
2249 	m = bs->bs_m;
2250 	bs->bs_m = NULL;
2251 	m->m_pkthdr.len = m->m_len = letoh16(rxlo->len);
2252 	(*slots)++;
2253 
2254 	/* checksum flags */
2255 	flags = lemtoh32(&rxhi->flags2);
2256 	errors = lemtoh16(&rxhi->errors_v2);
2257 	if ((flags & RX_PKT_CMPL_FLAGS2_IP_CS_CALC) != 0 &&
2258 	    (errors & RX_PKT_CMPL_ERRORS_IP_CS_ERROR) == 0)
2259 		m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
2260 
2261 	if ((flags & RX_PKT_CMPL_FLAGS2_L4_CS_CALC) != 0 &&
2262 	    (errors & RX_PKT_CMPL_ERRORS_L4_CS_ERROR) == 0)
2263 		m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
2264 		    M_UDP_CSUM_IN_OK;
2265 
2266 #if NVLAN > 0
2267 	if ((flags & RX_PKT_CMPL_FLAGS2_META_FORMAT_MASK) ==
2268 	    RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) {
2269 		m->m_pkthdr.ether_vtag = lemtoh16(&rxhi->metadata);
2270 		m->m_flags |= M_VLANTAG;
2271 	}
2272 #endif
2273 
2274 	if (lemtoh16(&rxlo->flags_type) & RX_PKT_CMPL_FLAGS_RSS_VALID) {
2275 		m->m_pkthdr.ph_flowid = lemtoh32(&rxlo->rss_hash);
2276 		m->m_pkthdr.csum_flags |= M_FLOWID;
2277 	}
2278 
2279 	if (ag != NULL) {
2280 		bs = &rx->rx_ag_slots[ag->opaque];
2281 		bus_dmamap_sync(sc->sc_dmat, bs->bs_map, 0,
2282 		    bs->bs_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2283 		bus_dmamap_unload(sc->sc_dmat, bs->bs_map);
2284 
2285 		am = bs->bs_m;
2286 		bs->bs_m = NULL;
2287 		am->m_len = letoh16(ag->len);
2288 		m->m_next = am;
2289 		m->m_pkthdr.len += am->m_len;
2290 		(*agslots)++;
2291 	}
2292 
2293 	ml_enqueue(ml, m);
2294 	return (0);
2295 }
2296 
2297 void
2298 bnxt_txeof(struct bnxt_softc *sc, struct bnxt_tx_queue *tx, int *txfree,
2299     struct cmpl_base *cmpl)
2300 {
2301 	struct tx_cmpl *txcmpl = (struct tx_cmpl *)cmpl;
2302 	struct bnxt_slot *bs;
2303 	bus_dmamap_t map;
2304 	u_int idx, segs, last;
2305 
2306 	idx = tx->tx_ring_cons;
2307 	last = tx->tx_cons;
2308 	do {
2309 		bs = &tx->tx_slots[tx->tx_cons];
2310 		map = bs->bs_map;
2311 
2312 		segs = BNXT_TX_SLOTS(bs);
2313 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2314 		    BUS_DMASYNC_POSTWRITE);
2315 		bus_dmamap_unload(sc->sc_dmat, map);
2316 		m_freem(bs->bs_m);
2317 		bs->bs_m = NULL;
2318 
2319 		idx += segs;
2320 		(*txfree) += segs;
2321 		if (idx >= tx->tx_ring.ring_size)
2322 			idx -= tx->tx_ring.ring_size;
2323 
2324 		last = tx->tx_cons;
2325 		if (++tx->tx_cons >= tx->tx_ring.ring_size)
2326 			tx->tx_cons = 0;
2327 
2328 	} while (last != txcmpl->opaque);
2329 	tx->tx_ring_cons = idx;
2330 }
2331 
2332 /* bnxt_hwrm.c */
2333 
2334 int
2335 bnxt_hwrm_err_map(uint16_t err)
2336 {
2337 	int rc;
2338 
2339 	switch (err) {
2340 	case HWRM_ERR_CODE_SUCCESS:
2341 		return 0;
2342 	case HWRM_ERR_CODE_INVALID_PARAMS:
2343 	case HWRM_ERR_CODE_INVALID_FLAGS:
2344 	case HWRM_ERR_CODE_INVALID_ENABLES:
2345 		return EINVAL;
2346 	case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
2347 		return EACCES;
2348 	case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
2349 		return ENOMEM;
2350 	case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
2351 		return ENOSYS;
2352 	case HWRM_ERR_CODE_FAIL:
2353 		return EIO;
2354 	case HWRM_ERR_CODE_HWRM_ERROR:
2355 	case HWRM_ERR_CODE_UNKNOWN_ERR:
2356 	default:
2357 		return EIO;
2358 	}
2359 
2360 	return rc;
2361 }
2362 
2363 void
2364 bnxt_hwrm_cmd_hdr_init(struct bnxt_softc *softc, void *request,
2365     uint16_t req_type)
2366 {
2367 	struct input *req = request;
2368 
2369 	req->req_type = htole16(req_type);
2370 	req->cmpl_ring = 0xffff;
2371 	req->target_id = 0xffff;
2372 	req->resp_addr = htole64(BNXT_DMA_DVA(softc->sc_cmd_resp));
2373 }
2374 
2375 int
2376 _hwrm_send_message(struct bnxt_softc *softc, void *msg, uint32_t msg_len)
2377 {
2378 	struct input *req = msg;
2379 	struct hwrm_err_output *resp = BNXT_DMA_KVA(softc->sc_cmd_resp);
2380 	uint32_t *data = msg;
2381 	int i;
2382 	uint8_t *valid;
2383 	uint16_t err;
2384 	uint16_t max_req_len = HWRM_MAX_REQ_LEN;
2385 	struct hwrm_short_input short_input = {0};
2386 
2387 	/* TODO: DMASYNC in here. */
2388 	req->seq_id = htole16(softc->sc_cmd_seq++);
2389 	memset(resp, 0, PAGE_SIZE);
2390 
2391 	if (softc->sc_flags & BNXT_FLAG_SHORT_CMD) {
2392 		void *short_cmd_req = BNXT_DMA_KVA(softc->sc_cmd_resp);
2393 
2394 		memcpy(short_cmd_req, req, msg_len);
2395 		memset((uint8_t *) short_cmd_req + msg_len, 0,
2396 		    softc->sc_max_req_len - msg_len);
2397 
2398 		short_input.req_type = req->req_type;
2399 		short_input.signature =
2400 		    htole16(HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
2401 		short_input.size = htole16(msg_len);
2402 		short_input.req_addr =
2403 		    htole64(BNXT_DMA_DVA(softc->sc_cmd_resp));
2404 
2405 		data = (uint32_t *)&short_input;
2406 		msg_len = sizeof(short_input);
2407 
2408 		/* Sync memory write before updating doorbell */
2409 		membar_sync();
2410 
2411 		max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
2412 	}
2413 
2414 	/* Write request msg to hwrm channel */
2415 	for (i = 0; i < msg_len; i += 4) {
2416 		bus_space_write_4(softc->sc_hwrm_t,
2417 				  softc->sc_hwrm_h,
2418 				  i, *data);
2419 		data++;
2420 	}
2421 
2422 	/* Clear to the end of the request buffer */
2423 	for (i = msg_len; i < max_req_len; i += 4)
2424 		bus_space_write_4(softc->sc_hwrm_t, softc->sc_hwrm_h,
2425 		    i, 0);
2426 
2427 	/* Ring channel doorbell */
2428 	bus_space_write_4(softc->sc_hwrm_t, softc->sc_hwrm_h, 0x100,
2429 	    htole32(1));
2430 
2431 	/* Check if response len is updated */
2432 	for (i = 0; i < softc->sc_cmd_timeo; i++) {
2433 		if (resp->resp_len && resp->resp_len <= 4096)
2434 			break;
2435 		DELAY(1000);
2436 	}
2437 	if (i >= softc->sc_cmd_timeo) {
2438 		printf("%s: timeout sending %s: (timeout: %u) seq: %d\n",
2439 		    DEVNAME(softc), GET_HWRM_REQ_TYPE(req->req_type),
2440 		    softc->sc_cmd_timeo,
2441 		    le16toh(req->seq_id));
2442 		return ETIMEDOUT;
2443 	}
2444 	/* Last byte of resp contains the valid key */
2445 	valid = (uint8_t *)resp + resp->resp_len - 1;
2446 	for (i = 0; i < softc->sc_cmd_timeo; i++) {
2447 		if (*valid == HWRM_RESP_VALID_KEY)
2448 			break;
2449 		DELAY(1000);
2450 	}
2451 	if (i >= softc->sc_cmd_timeo) {
2452 		printf("%s: timeout sending %s: "
2453 		    "(timeout: %u) msg {0x%x 0x%x} len:%d v: %d\n",
2454 		    DEVNAME(softc), GET_HWRM_REQ_TYPE(req->req_type),
2455 		    softc->sc_cmd_timeo, le16toh(req->req_type),
2456 		    le16toh(req->seq_id), msg_len,
2457 		    *valid);
2458 		return ETIMEDOUT;
2459 	}
2460 
2461 	err = le16toh(resp->error_code);
2462 	if (err) {
2463 		/* HWRM_ERR_CODE_FAIL is a "normal" error, don't log */
2464 		if (err != HWRM_ERR_CODE_FAIL) {
2465 			printf("%s: %s command returned %s error.\n",
2466 			    DEVNAME(softc),
2467 			    GET_HWRM_REQ_TYPE(req->req_type),
2468 			    GET_HWRM_ERROR_CODE(err));
2469 		}
2470 		return bnxt_hwrm_err_map(err);
2471 	}
2472 
2473 	return 0;
2474 }
2475 
2476 
2477 int
2478 hwrm_send_message(struct bnxt_softc *softc, void *msg, uint32_t msg_len)
2479 {
2480 	int rc;
2481 
2482 	BNXT_HWRM_LOCK(softc);
2483 	rc = _hwrm_send_message(softc, msg, msg_len);
2484 	BNXT_HWRM_UNLOCK(softc);
2485 	return rc;
2486 }
2487 
2488 
2489 int
2490 bnxt_hwrm_queue_qportcfg(struct bnxt_softc *softc)
2491 {
2492 	struct hwrm_queue_qportcfg_input req = {0};
2493 	struct hwrm_queue_qportcfg_output *resp =
2494 	    BNXT_DMA_KVA(softc->sc_cmd_resp);
2495 	int	rc = 0;
2496 
2497 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_QUEUE_QPORTCFG);
2498 
2499 	BNXT_HWRM_LOCK(softc);
2500 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2501 	if (rc)
2502 		goto qportcfg_exit;
2503 
2504 	if (!resp->max_configurable_queues) {
2505 		rc = -EINVAL;
2506 		goto qportcfg_exit;
2507 	}
2508 
2509 	softc->sc_tx_queue_id = resp->queue_id0;
2510 
2511 qportcfg_exit:
2512 	BNXT_HWRM_UNLOCK(softc);
2513 	return rc;
2514 }
2515 
2516 int
2517 bnxt_hwrm_ver_get(struct bnxt_softc *softc)
2518 {
2519 	struct hwrm_ver_get_input	req = {0};
2520 	struct hwrm_ver_get_output	*resp =
2521 	    BNXT_DMA_KVA(softc->sc_cmd_resp);
2522 	int				rc;
2523 #if 0
2524 	const char nastr[] = "<not installed>";
2525 	const char naver[] = "<N/A>";
2526 #endif
2527 	uint32_t dev_caps_cfg;
2528 
2529 	softc->sc_max_req_len = HWRM_MAX_REQ_LEN;
2530 	softc->sc_cmd_timeo = 1000;
2531 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VER_GET);
2532 
2533 	req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
2534 	req.hwrm_intf_min = HWRM_VERSION_MINOR;
2535 	req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
2536 
2537 	BNXT_HWRM_LOCK(softc);
2538 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2539 	if (rc)
2540 		goto fail;
2541 
2542 	printf(": fw ver %d.%d.%d, ", resp->hwrm_fw_maj, resp->hwrm_fw_min,
2543 	    resp->hwrm_fw_bld);
2544 
2545 	softc->sc_hwrm_ver = (resp->hwrm_intf_maj << 16) |
2546 	    (resp->hwrm_intf_min << 8) | resp->hwrm_intf_upd;
2547 #if 0
2548 	snprintf(softc->ver_info->hwrm_if_ver, BNXT_VERSTR_SIZE, "%d.%d.%d",
2549 	    resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
2550 	softc->ver_info->hwrm_if_major = resp->hwrm_intf_maj;
2551 	softc->ver_info->hwrm_if_minor = resp->hwrm_intf_min;
2552 	softc->ver_info->hwrm_if_update = resp->hwrm_intf_upd;
2553 	snprintf(softc->ver_info->hwrm_fw_ver, BNXT_VERSTR_SIZE, "%d.%d.%d",
2554 	    resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
2555 	strlcpy(softc->ver_info->driver_hwrm_if_ver, HWRM_VERSION_STR,
2556 	    BNXT_VERSTR_SIZE);
2557 	strlcpy(softc->ver_info->hwrm_fw_name, resp->hwrm_fw_name,
2558 	    BNXT_NAME_SIZE);
2559 
2560 	if (resp->mgmt_fw_maj == 0 && resp->mgmt_fw_min == 0 &&
2561 	    resp->mgmt_fw_bld == 0) {
2562 		strlcpy(softc->ver_info->mgmt_fw_ver, naver, BNXT_VERSTR_SIZE);
2563 		strlcpy(softc->ver_info->mgmt_fw_name, nastr, BNXT_NAME_SIZE);
2564 	}
2565 	else {
2566 		snprintf(softc->ver_info->mgmt_fw_ver, BNXT_VERSTR_SIZE,
2567 		    "%d.%d.%d", resp->mgmt_fw_maj, resp->mgmt_fw_min,
2568 		    resp->mgmt_fw_bld);
2569 		strlcpy(softc->ver_info->mgmt_fw_name, resp->mgmt_fw_name,
2570 		    BNXT_NAME_SIZE);
2571 	}
2572 	if (resp->netctrl_fw_maj == 0 && resp->netctrl_fw_min == 0 &&
2573 	    resp->netctrl_fw_bld == 0) {
2574 		strlcpy(softc->ver_info->netctrl_fw_ver, naver,
2575 		    BNXT_VERSTR_SIZE);
2576 		strlcpy(softc->ver_info->netctrl_fw_name, nastr,
2577 		    BNXT_NAME_SIZE);
2578 	}
2579 	else {
2580 		snprintf(softc->ver_info->netctrl_fw_ver, BNXT_VERSTR_SIZE,
2581 		    "%d.%d.%d", resp->netctrl_fw_maj, resp->netctrl_fw_min,
2582 		    resp->netctrl_fw_bld);
2583 		strlcpy(softc->ver_info->netctrl_fw_name, resp->netctrl_fw_name,
2584 		    BNXT_NAME_SIZE);
2585 	}
2586 	if (resp->roce_fw_maj == 0 && resp->roce_fw_min == 0 &&
2587 	    resp->roce_fw_bld == 0) {
2588 		strlcpy(softc->ver_info->roce_fw_ver, naver, BNXT_VERSTR_SIZE);
2589 		strlcpy(softc->ver_info->roce_fw_name, nastr, BNXT_NAME_SIZE);
2590 	}
2591 	else {
2592 		snprintf(softc->ver_info->roce_fw_ver, BNXT_VERSTR_SIZE,
2593 		    "%d.%d.%d", resp->roce_fw_maj, resp->roce_fw_min,
2594 		    resp->roce_fw_bld);
2595 		strlcpy(softc->ver_info->roce_fw_name, resp->roce_fw_name,
2596 		    BNXT_NAME_SIZE);
2597 	}
2598 	softc->ver_info->chip_num = le16toh(resp->chip_num);
2599 	softc->ver_info->chip_rev = resp->chip_rev;
2600 	softc->ver_info->chip_metal = resp->chip_metal;
2601 	softc->ver_info->chip_bond_id = resp->chip_bond_id;
2602 	softc->ver_info->chip_type = resp->chip_platform_type;
2603 #endif
2604 
2605 	if (resp->max_req_win_len)
2606 		softc->sc_max_req_len = le16toh(resp->max_req_win_len);
2607 	if (resp->def_req_timeout)
2608 		softc->sc_cmd_timeo = le16toh(resp->def_req_timeout);
2609 
2610 	dev_caps_cfg = le32toh(resp->dev_caps_cfg);
2611 	if ((dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
2612 	    (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
2613 		softc->sc_flags |= BNXT_FLAG_SHORT_CMD;
2614 
2615 fail:
2616 	BNXT_HWRM_UNLOCK(softc);
2617 	return rc;
2618 }
2619 
2620 
2621 int
2622 bnxt_hwrm_func_drv_rgtr(struct bnxt_softc *softc)
2623 {
2624 	struct hwrm_func_drv_rgtr_input req = {0};
2625 
2626 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_DRV_RGTR);
2627 
2628 	req.enables = htole32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
2629 	    HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_OS_TYPE);
2630 	req.os_type = htole16(HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_FREEBSD);
2631 
2632 	req.ver_maj = 6;
2633 	req.ver_min = 4;
2634 	req.ver_upd = 0;
2635 
2636 	return hwrm_send_message(softc, &req, sizeof(req));
2637 }
2638 
2639 #if 0
2640 
2641 int
2642 bnxt_hwrm_func_drv_unrgtr(struct bnxt_softc *softc, bool shutdown)
2643 {
2644 	struct hwrm_func_drv_unrgtr_input req = {0};
2645 
2646 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_DRV_UNRGTR);
2647 	if (shutdown == true)
2648 		req.flags |=
2649 		    HWRM_FUNC_DRV_UNRGTR_INPUT_FLAGS_PREPARE_FOR_SHUTDOWN;
2650 	return hwrm_send_message(softc, &req, sizeof(req));
2651 }
2652 
2653 #endif
2654 
2655 int
2656 bnxt_hwrm_func_qcaps(struct bnxt_softc *softc)
2657 {
2658 	int rc = 0;
2659 	struct hwrm_func_qcaps_input req = {0};
2660 	struct hwrm_func_qcaps_output *resp =
2661 	    BNXT_DMA_KVA(softc->sc_cmd_resp);
2662 	/* struct bnxt_func_info *func = &softc->func; */
2663 
2664 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_QCAPS);
2665 	req.fid = htole16(0xffff);
2666 
2667 	BNXT_HWRM_LOCK(softc);
2668 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2669 	if (rc)
2670 		goto fail;
2671 
2672 	if (resp->flags &
2673 	    htole32(HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_MAGICPKT_SUPPORTED))
2674 		softc->sc_flags |= BNXT_FLAG_WOL_CAP;
2675 
2676 	memcpy(softc->sc_ac.ac_enaddr, resp->mac_address, 6);
2677 	/*
2678 	func->fw_fid = le16toh(resp->fid);
2679 	memcpy(func->mac_addr, resp->mac_address, ETHER_ADDR_LEN);
2680 	func->max_rsscos_ctxs = le16toh(resp->max_rsscos_ctx);
2681 	func->max_cp_rings = le16toh(resp->max_cmpl_rings);
2682 	func->max_tx_rings = le16toh(resp->max_tx_rings);
2683 	func->max_rx_rings = le16toh(resp->max_rx_rings);
2684 	func->max_hw_ring_grps = le32toh(resp->max_hw_ring_grps);
2685 	if (!func->max_hw_ring_grps)
2686 		func->max_hw_ring_grps = func->max_tx_rings;
2687 	func->max_l2_ctxs = le16toh(resp->max_l2_ctxs);
2688 	func->max_vnics = le16toh(resp->max_vnics);
2689 	func->max_stat_ctxs = le16toh(resp->max_stat_ctx);
2690 	if (BNXT_PF(softc)) {
2691 		struct bnxt_pf_info *pf = &softc->pf;
2692 
2693 		pf->port_id = le16toh(resp->port_id);
2694 		pf->first_vf_id = le16toh(resp->first_vf_id);
2695 		pf->max_vfs = le16toh(resp->max_vfs);
2696 		pf->max_encap_records = le32toh(resp->max_encap_records);
2697 		pf->max_decap_records = le32toh(resp->max_decap_records);
2698 		pf->max_tx_em_flows = le32toh(resp->max_tx_em_flows);
2699 		pf->max_tx_wm_flows = le32toh(resp->max_tx_wm_flows);
2700 		pf->max_rx_em_flows = le32toh(resp->max_rx_em_flows);
2701 		pf->max_rx_wm_flows = le32toh(resp->max_rx_wm_flows);
2702 	}
2703 	if (!_is_valid_ether_addr(func->mac_addr)) {
2704 		device_printf(softc->dev, "Invalid ethernet address, generating random locally administered address\n");
2705 		get_random_ether_addr(func->mac_addr);
2706 	}
2707 	*/
2708 
2709 fail:
2710 	BNXT_HWRM_UNLOCK(softc);
2711 	return rc;
2712 }
2713 
2714 
2715 int
2716 bnxt_hwrm_func_qcfg(struct bnxt_softc *softc)
2717 {
2718         struct hwrm_func_qcfg_input req = {0};
2719         /* struct hwrm_func_qcfg_output *resp =
2720 	    BNXT_DMA_KVA(softc->sc_cmd_resp);
2721 	struct bnxt_func_qcfg *fn_qcfg = &softc->fn_qcfg; */
2722         int rc;
2723 
2724 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_QCFG);
2725         req.fid = htole16(0xffff);
2726 	BNXT_HWRM_LOCK(softc);
2727 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2728         if (rc)
2729 		goto fail;
2730 
2731 	/*
2732 	fn_qcfg->alloc_completion_rings = le16toh(resp->alloc_cmpl_rings);
2733 	fn_qcfg->alloc_tx_rings = le16toh(resp->alloc_tx_rings);
2734 	fn_qcfg->alloc_rx_rings = le16toh(resp->alloc_rx_rings);
2735 	fn_qcfg->alloc_vnics = le16toh(resp->alloc_vnics);
2736 	*/
2737 fail:
2738 	BNXT_HWRM_UNLOCK(softc);
2739         return rc;
2740 }
2741 
2742 
2743 int
2744 bnxt_hwrm_func_reset(struct bnxt_softc *softc)
2745 {
2746 	struct hwrm_func_reset_input req = {0};
2747 
2748 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_RESET);
2749 	req.enables = 0;
2750 
2751 	return hwrm_send_message(softc, &req, sizeof(req));
2752 }
2753 
2754 int
2755 bnxt_hwrm_vnic_cfg_placement(struct bnxt_softc *softc,
2756     struct bnxt_vnic_info *vnic)
2757 {
2758 	struct hwrm_vnic_plcmodes_cfg_input req = {0};
2759 
2760 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_PLCMODES_CFG);
2761 
2762 	req.flags = htole32(
2763 	    HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
2764 	req.enables = htole32(
2765 	    HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
2766 	req.vnic_id = htole16(vnic->id);
2767 	req.jumbo_thresh = htole16(MCLBYTES);
2768 
2769 	return hwrm_send_message(softc, &req, sizeof(req));
2770 }
2771 
2772 int
2773 bnxt_hwrm_vnic_cfg(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
2774 {
2775 	struct hwrm_vnic_cfg_input req = {0};
2776 
2777 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_CFG);
2778 
2779 	if (vnic->flags & BNXT_VNIC_FLAG_DEFAULT)
2780 		req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
2781 	if (vnic->flags & BNXT_VNIC_FLAG_BD_STALL)
2782 		req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
2783 	if (vnic->flags & BNXT_VNIC_FLAG_VLAN_STRIP)
2784 		req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
2785 	req.enables = htole32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
2786 	    HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE |
2787 	    HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
2788 	req.vnic_id = htole16(vnic->id);
2789 	req.dflt_ring_grp = htole16(vnic->def_ring_grp);
2790 	req.rss_rule = htole16(vnic->rss_id);
2791 	req.cos_rule = htole16(vnic->cos_rule);
2792 	req.lb_rule = htole16(vnic->lb_rule);
2793 	req.mru = htole16(vnic->mru);
2794 
2795 	return hwrm_send_message(softc, &req, sizeof(req));
2796 }
2797 
2798 int
2799 bnxt_hwrm_vnic_alloc(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
2800 {
2801 	struct hwrm_vnic_alloc_input req = {0};
2802 	struct hwrm_vnic_alloc_output *resp =
2803 	    BNXT_DMA_KVA(softc->sc_cmd_resp);
2804 	int rc;
2805 
2806 	if (vnic->id != (uint16_t)HWRM_NA_SIGNATURE) {
2807 		printf("%s: attempt to re-allocate vnic %04x\n",
2808 		    DEVNAME(softc), vnic->id);
2809 		return EINVAL;
2810 	}
2811 
2812 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_ALLOC);
2813 
2814 	if (vnic->flags & BNXT_VNIC_FLAG_DEFAULT)
2815 		req.flags = htole32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
2816 
2817 	BNXT_HWRM_LOCK(softc);
2818 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2819 	if (rc)
2820 		goto fail;
2821 
2822 	vnic->id = le32toh(resp->vnic_id);
2823 
2824 fail:
2825 	BNXT_HWRM_UNLOCK(softc);
2826 	return rc;
2827 }
2828 
2829 int
2830 bnxt_hwrm_vnic_free(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
2831 {
2832 	struct hwrm_vnic_free_input req = {0};
2833 	int rc;
2834 
2835 	if (vnic->id == (uint16_t)HWRM_NA_SIGNATURE) {
2836 		printf("%s: attempt to deallocate vnic %04x\n",
2837 		    DEVNAME(softc), vnic->id);
2838 		return (EINVAL);
2839 	}
2840 
2841 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_FREE);
2842 	req.vnic_id = htole16(vnic->id);
2843 
2844 	BNXT_HWRM_LOCK(softc);
2845 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2846 	if (rc == 0)
2847 		vnic->id = (uint16_t)HWRM_NA_SIGNATURE;
2848 	BNXT_HWRM_UNLOCK(softc);
2849 
2850 	return (rc);
2851 }
2852 
2853 int
2854 bnxt_hwrm_vnic_ctx_alloc(struct bnxt_softc *softc, uint16_t *ctx_id)
2855 {
2856 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
2857 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
2858 	    BNXT_DMA_KVA(softc->sc_cmd_resp);
2859 	int rc;
2860 
2861 	if (*ctx_id != (uint16_t)HWRM_NA_SIGNATURE) {
2862 		printf("%s: attempt to re-allocate vnic ctx %04x\n",
2863 		    DEVNAME(softc), *ctx_id);
2864 		return EINVAL;
2865 	}
2866 
2867 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
2868 
2869 	BNXT_HWRM_LOCK(softc);
2870 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2871 	if (rc)
2872 		goto fail;
2873 
2874 	*ctx_id = letoh16(resp->rss_cos_lb_ctx_id);
2875 
2876 fail:
2877 	BNXT_HWRM_UNLOCK(softc);
2878 	return (rc);
2879 }
2880 
2881 int
2882 bnxt_hwrm_vnic_ctx_free(struct bnxt_softc *softc, uint16_t *ctx_id)
2883 {
2884 	struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
2885 	int rc;
2886 
2887 	if (*ctx_id == (uint16_t)HWRM_NA_SIGNATURE) {
2888 		printf("%s: attempt to deallocate vnic ctx %04x\n",
2889 		    DEVNAME(softc), *ctx_id);
2890 		return (EINVAL);
2891 	}
2892 
2893 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE);
2894 	req.rss_cos_lb_ctx_id = htole32(*ctx_id);
2895 
2896 	BNXT_HWRM_LOCK(softc);
2897 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2898 	if (rc == 0)
2899 		*ctx_id = (uint16_t)HWRM_NA_SIGNATURE;
2900 	BNXT_HWRM_UNLOCK(softc);
2901 	return (rc);
2902 }
2903 
2904 int
2905 bnxt_hwrm_ring_grp_alloc(struct bnxt_softc *softc, struct bnxt_grp_info *grp)
2906 {
2907 	struct hwrm_ring_grp_alloc_input req = {0};
2908 	struct hwrm_ring_grp_alloc_output *resp;
2909 	int rc = 0;
2910 
2911 	if (grp->grp_id != HWRM_NA_SIGNATURE) {
2912 		printf("%s: attempt to re-allocate ring group %04x\n",
2913 		    DEVNAME(softc), grp->grp_id);
2914 		return EINVAL;
2915 	}
2916 
2917 	resp = BNXT_DMA_KVA(softc->sc_cmd_resp);
2918 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_GRP_ALLOC);
2919 	req.cr = htole16(grp->cp_ring_id);
2920 	req.rr = htole16(grp->rx_ring_id);
2921 	req.ar = htole16(grp->ag_ring_id);
2922 	req.sc = htole16(grp->stats_ctx);
2923 
2924 	BNXT_HWRM_LOCK(softc);
2925 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2926 	if (rc)
2927 		goto fail;
2928 
2929 	grp->grp_id = letoh32(resp->ring_group_id);
2930 
2931 fail:
2932 	BNXT_HWRM_UNLOCK(softc);
2933 	return rc;
2934 }
2935 
2936 int
2937 bnxt_hwrm_ring_grp_free(struct bnxt_softc *softc, struct bnxt_grp_info *grp)
2938 {
2939 	struct hwrm_ring_grp_free_input req = {0};
2940 	int rc = 0;
2941 
2942 	if (grp->grp_id == HWRM_NA_SIGNATURE) {
2943 		printf("%s: attempt to free ring group %04x\n",
2944 		    DEVNAME(softc), grp->grp_id);
2945 		return EINVAL;
2946 	}
2947 
2948 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_GRP_FREE);
2949 	req.ring_group_id = htole32(grp->grp_id);
2950 
2951 	BNXT_HWRM_LOCK(softc);
2952 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2953 	if (rc == 0)
2954 		grp->grp_id = HWRM_NA_SIGNATURE;
2955 
2956 	BNXT_HWRM_UNLOCK(softc);
2957 	return (rc);
2958 }
2959 
2960 /*
2961  * Ring allocation message to the firmware
2962  */
2963 int
2964 bnxt_hwrm_ring_alloc(struct bnxt_softc *softc, uint8_t type,
2965     struct bnxt_ring *ring, uint16_t cmpl_ring_id, uint32_t stat_ctx_id,
2966     int irq)
2967 {
2968 	struct hwrm_ring_alloc_input req = {0};
2969 	struct hwrm_ring_alloc_output *resp;
2970 	int rc;
2971 
2972 	if (ring->phys_id != (uint16_t)HWRM_NA_SIGNATURE) {
2973 		printf("%s: attempt to re-allocate ring %04x\n",
2974 		    DEVNAME(softc), ring->phys_id);
2975 		return EINVAL;
2976 	}
2977 
2978 	resp = BNXT_DMA_KVA(softc->sc_cmd_resp);
2979 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_ALLOC);
2980 	req.enables = htole32(0);
2981 	req.fbo = htole32(0);
2982 
2983 	if (stat_ctx_id != HWRM_NA_SIGNATURE) {
2984 		req.enables |= htole32(
2985 		    HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
2986 		req.stat_ctx_id = htole32(stat_ctx_id);
2987 	}
2988 	req.ring_type = type;
2989 	req.page_tbl_addr = htole64(ring->paddr);
2990 	req.length = htole32(ring->ring_size);
2991 	req.logical_id = htole16(ring->id);
2992 	req.cmpl_ring_id = htole16(cmpl_ring_id);
2993 	req.queue_id = htole16(softc->sc_tx_queue_id);
2994 	req.int_mode = (softc->sc_flags & BNXT_FLAG_MSIX) ?
2995 	    HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX :
2996 	    HWRM_RING_ALLOC_INPUT_INT_MODE_LEGACY;
2997 	BNXT_HWRM_LOCK(softc);
2998 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2999 	if (rc)
3000 		goto fail;
3001 
3002 	ring->phys_id = le16toh(resp->ring_id);
3003 
3004 fail:
3005 	BNXT_HWRM_UNLOCK(softc);
3006 	return rc;
3007 }
3008 
3009 int
3010 bnxt_hwrm_ring_free(struct bnxt_softc *softc, uint8_t type, struct bnxt_ring *ring)
3011 {
3012 	struct hwrm_ring_free_input req = {0};
3013 	int rc;
3014 
3015 	if (ring->phys_id == (uint16_t)HWRM_NA_SIGNATURE) {
3016 		printf("%s: attempt to deallocate ring %04x\n",
3017 		    DEVNAME(softc), ring->phys_id);
3018 		return (EINVAL);
3019 	}
3020 
3021 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_FREE);
3022 	req.ring_type = type;
3023 	req.ring_id = htole16(ring->phys_id);
3024 	BNXT_HWRM_LOCK(softc);
3025 	rc = _hwrm_send_message(softc, &req, sizeof(req));
3026 	if (rc)
3027 		goto fail;
3028 
3029 	ring->phys_id = (uint16_t)HWRM_NA_SIGNATURE;
3030 fail:
3031 	BNXT_HWRM_UNLOCK(softc);
3032 	return (rc);
3033 }
3034 
3035 
3036 int
3037 bnxt_hwrm_stat_ctx_alloc(struct bnxt_softc *softc, struct bnxt_cp_ring *cpr,
3038     uint64_t paddr)
3039 {
3040 	struct hwrm_stat_ctx_alloc_input req = {0};
3041 	struct hwrm_stat_ctx_alloc_output *resp;
3042 	int rc = 0;
3043 
3044 	if (cpr->stats_ctx_id != HWRM_NA_SIGNATURE) {
3045 		printf("%s: attempt to re-allocate stats ctx %08x\n",
3046 		    DEVNAME(softc), cpr->stats_ctx_id);
3047 		return EINVAL;
3048 	}
3049 
3050 	resp = BNXT_DMA_KVA(softc->sc_cmd_resp);
3051 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_STAT_CTX_ALLOC);
3052 
3053 	req.update_period_ms = htole32(1000);
3054 	req.stats_dma_addr = htole64(paddr);
3055 
3056 	BNXT_HWRM_LOCK(softc);
3057 	rc = _hwrm_send_message(softc, &req, sizeof(req));
3058 	if (rc)
3059 		goto fail;
3060 
3061 	cpr->stats_ctx_id = le32toh(resp->stat_ctx_id);
3062 
3063 fail:
3064 	BNXT_HWRM_UNLOCK(softc);
3065 
3066 	return rc;
3067 }
3068 
3069 int
3070 bnxt_hwrm_stat_ctx_free(struct bnxt_softc *softc, struct bnxt_cp_ring *cpr)
3071 {
3072 	struct hwrm_stat_ctx_free_input req = {0};
3073 	int rc = 0;
3074 
3075 	if (cpr->stats_ctx_id == HWRM_NA_SIGNATURE) {
3076 		printf("%s: attempt to free stats ctx %08x\n",
3077 		    DEVNAME(softc), cpr->stats_ctx_id);
3078 		return EINVAL;
3079 	}
3080 
3081 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_STAT_CTX_FREE);
3082 	req.stat_ctx_id = htole32(cpr->stats_ctx_id);
3083 
3084 	BNXT_HWRM_LOCK(softc);
3085 	rc = _hwrm_send_message(softc, &req, sizeof(req));
3086 	BNXT_HWRM_UNLOCK(softc);
3087 
3088 	if (rc == 0)
3089 		cpr->stats_ctx_id = HWRM_NA_SIGNATURE;
3090 
3091 	return (rc);
3092 }
3093 
3094 #if 0
3095 
3096 int
3097 bnxt_hwrm_port_qstats(struct bnxt_softc *softc)
3098 {
3099 	struct hwrm_port_qstats_input req = {0};
3100 	int rc = 0;
3101 
3102 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_QSTATS);
3103 
3104 	req.port_id = htole16(softc->pf.port_id);
3105 	req.rx_stat_host_addr = htole64(softc->hw_rx_port_stats.idi_paddr);
3106 	req.tx_stat_host_addr = htole64(softc->hw_tx_port_stats.idi_paddr);
3107 
3108 	BNXT_HWRM_LOCK(softc);
3109 	rc = _hwrm_send_message(softc, &req, sizeof(req));
3110 	BNXT_HWRM_UNLOCK(softc);
3111 
3112 	return rc;
3113 }
3114 
3115 #endif
3116 
3117 int
3118 bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt_softc *softc,
3119     uint32_t vnic_id, uint32_t rx_mask, uint64_t mc_addr, uint32_t mc_count)
3120 {
3121 	struct hwrm_cfa_l2_set_rx_mask_input req = {0};
3122 
3123 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_CFA_L2_SET_RX_MASK);
3124 
3125 	req.vnic_id = htole32(vnic_id);
3126 	req.mask = htole32(rx_mask);
3127 	req.mc_tbl_addr = htole64(mc_addr);
3128 	req.num_mc_entries = htole32(mc_count);
3129 	return hwrm_send_message(softc, &req, sizeof(req));
3130 }
3131 
3132 int
3133 bnxt_hwrm_set_filter(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
3134 {
3135 	struct hwrm_cfa_l2_filter_alloc_input	req = {0};
3136 	struct hwrm_cfa_l2_filter_alloc_output	*resp;
3137 	uint32_t enables = 0;
3138 	int rc = 0;
3139 
3140 	if (vnic->filter_id != -1) {
3141 		printf("%s: attempt to re-allocate l2 ctx filter\n",
3142 		    DEVNAME(softc));
3143 		return EINVAL;
3144 	}
3145 
3146 	resp = BNXT_DMA_KVA(softc->sc_cmd_resp);
3147 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_CFA_L2_FILTER_ALLOC);
3148 
3149 	req.flags = htole32(HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX
3150 	    | HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST);
3151 	enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR
3152 	    | HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK
3153 	    | HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
3154 	req.enables = htole32(enables);
3155 	req.dst_id = htole16(vnic->id);
3156 	memcpy(req.l2_addr, softc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
3157 	memset(&req.l2_addr_mask, 0xff, sizeof(req.l2_addr_mask));
3158 
3159 	BNXT_HWRM_LOCK(softc);
3160 	rc = _hwrm_send_message(softc, &req, sizeof(req));
3161 	if (rc)
3162 		goto fail;
3163 
3164 	vnic->filter_id = le64toh(resp->l2_filter_id);
3165 	vnic->flow_id = le64toh(resp->flow_id);
3166 
3167 fail:
3168 	BNXT_HWRM_UNLOCK(softc);
3169 	return (rc);
3170 }
3171 
3172 int
3173 bnxt_hwrm_free_filter(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
3174 {
3175 	struct hwrm_cfa_l2_filter_free_input req = {0};
3176 	int rc = 0;
3177 
3178 	if (vnic->filter_id == -1) {
3179 		printf("%s: attempt to deallocate filter %llx\n",
3180 		     DEVNAME(softc), vnic->filter_id);
3181 		return (EINVAL);
3182 	}
3183 
3184 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_CFA_L2_FILTER_FREE);
3185 	req.l2_filter_id = htole64(vnic->filter_id);
3186 
3187 	BNXT_HWRM_LOCK(softc);
3188 	rc = _hwrm_send_message(softc, &req, sizeof(req));
3189 	if (rc == 0)
3190 		vnic->filter_id = -1;
3191 	BNXT_HWRM_UNLOCK(softc);
3192 
3193 	return (rc);
3194 }
3195 
3196 
3197 int
3198 bnxt_hwrm_vnic_rss_cfg(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic,
3199     uint32_t hash_type, daddr_t rss_table, daddr_t rss_key)
3200 {
3201 	struct hwrm_vnic_rss_cfg_input	req = {0};
3202 
3203 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_RSS_CFG);
3204 
3205 	req.hash_type = htole32(hash_type);
3206 	req.ring_grp_tbl_addr = htole64(rss_table);
3207 	req.hash_key_tbl_addr = htole64(rss_key);
3208 	req.rss_ctx_idx = htole16(vnic->rss_id);
3209 
3210 	return hwrm_send_message(softc, &req, sizeof(req));
3211 }
3212 
3213 int
3214 bnxt_cfg_async_cr(struct bnxt_softc *softc, struct bnxt_cp_ring *cpr)
3215 {
3216 	int rc = 0;
3217 
3218 	if (1 /* BNXT_PF(softc) */) {
3219 		struct hwrm_func_cfg_input req = {0};
3220 
3221 		bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_CFG);
3222 
3223 		req.fid = htole16(0xffff);
3224 		req.enables = htole32(HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3225 		req.async_event_cr = htole16(cpr->ring.phys_id);
3226 
3227 		rc = hwrm_send_message(softc, &req, sizeof(req));
3228 	} else {
3229 		struct hwrm_func_vf_cfg_input req = {0};
3230 
3231 		bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_VF_CFG);
3232 
3233 		req.enables = htole32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3234 		req.async_event_cr = htole16(cpr->ring.phys_id);
3235 
3236 		rc = hwrm_send_message(softc, &req, sizeof(req));
3237 	}
3238 	return rc;
3239 }
3240 
3241 #if 0
3242 
3243 void
3244 bnxt_validate_hw_lro_settings(struct bnxt_softc *softc)
3245 {
3246 	softc->hw_lro.enable = min(softc->hw_lro.enable, 1);
3247 
3248         softc->hw_lro.is_mode_gro = min(softc->hw_lro.is_mode_gro, 1);
3249 
3250 	softc->hw_lro.max_agg_segs = min(softc->hw_lro.max_agg_segs,
3251 		HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX);
3252 
3253 	softc->hw_lro.max_aggs = min(softc->hw_lro.max_aggs,
3254 		HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
3255 
3256 	softc->hw_lro.min_agg_len = min(softc->hw_lro.min_agg_len, BNXT_MAX_MTU);
3257 }
3258 
3259 int
3260 bnxt_hwrm_vnic_tpa_cfg(struct bnxt_softc *softc)
3261 {
3262 	struct hwrm_vnic_tpa_cfg_input req = {0};
3263 	uint32_t flags;
3264 
3265 	if (softc->vnic_info.id == (uint16_t) HWRM_NA_SIGNATURE) {
3266 		return 0;
3267 	}
3268 
3269 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_TPA_CFG);
3270 
3271 	if (softc->hw_lro.enable) {
3272 		flags = HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
3273 			HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
3274 			HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
3275 			HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ;
3276 
3277         	if (softc->hw_lro.is_mode_gro)
3278 			flags |= HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO;
3279 		else
3280 			flags |= HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE;
3281 
3282 		req.flags = htole32(flags);
3283 
3284 		req.enables = htole32(HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
3285 				HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
3286 				HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
3287 
3288 		req.max_agg_segs = htole16(softc->hw_lro.max_agg_segs);
3289 		req.max_aggs = htole16(softc->hw_lro.max_aggs);
3290 		req.min_agg_len = htole32(softc->hw_lro.min_agg_len);
3291 	}
3292 
3293 	req.vnic_id = htole16(softc->vnic_info.id);
3294 
3295 	return hwrm_send_message(softc, &req, sizeof(req));
3296 }
3297 
3298 
3299 int
3300 bnxt_hwrm_fw_reset(struct bnxt_softc *softc, uint8_t processor,
3301     uint8_t *selfreset)
3302 {
3303 	struct hwrm_fw_reset_input req = {0};
3304 	struct hwrm_fw_reset_output *resp =
3305 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
3306 	int rc;
3307 
3308 	MPASS(selfreset);
3309 
3310 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_RESET);
3311 	req.embedded_proc_type = processor;
3312 	req.selfrst_status = *selfreset;
3313 
3314 	BNXT_HWRM_LOCK(softc);
3315 	rc = _hwrm_send_message(softc, &req, sizeof(req));
3316 	if (rc)
3317 		goto exit;
3318 	*selfreset = resp->selfrst_status;
3319 
3320 exit:
3321 	BNXT_HWRM_UNLOCK(softc);
3322 	return rc;
3323 }
3324 
3325 int
3326 bnxt_hwrm_fw_qstatus(struct bnxt_softc *softc, uint8_t type, uint8_t *selfreset)
3327 {
3328 	struct hwrm_fw_qstatus_input req = {0};
3329 	struct hwrm_fw_qstatus_output *resp =
3330 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
3331 	int rc;
3332 
3333 	MPASS(selfreset);
3334 
3335 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_QSTATUS);
3336 	req.embedded_proc_type = type;
3337 
3338 	BNXT_HWRM_LOCK(softc);
3339 	rc = _hwrm_send_message(softc, &req, sizeof(req));
3340 	if (rc)
3341 		goto exit;
3342 	*selfreset = resp->selfrst_status;
3343 
3344 exit:
3345 	BNXT_HWRM_UNLOCK(softc);
3346 	return rc;
3347 }
3348 
3349 #endif
3350 
3351 int
3352 bnxt_hwrm_nvm_get_dev_info(struct bnxt_softc *softc, uint16_t *mfg_id,
3353     uint16_t *device_id, uint32_t *sector_size, uint32_t *nvram_size,
3354     uint32_t *reserved_size, uint32_t *available_size)
3355 {
3356 	struct hwrm_nvm_get_dev_info_input req = {0};
3357 	struct hwrm_nvm_get_dev_info_output *resp =
3358 	    BNXT_DMA_KVA(softc->sc_cmd_resp);
3359 	int rc;
3360 	uint32_t old_timeo;
3361 
3362 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_GET_DEV_INFO);
3363 
3364 	BNXT_HWRM_LOCK(softc);
3365 	old_timeo = softc->sc_cmd_timeo;
3366 	softc->sc_cmd_timeo = BNXT_NVM_TIMEO;
3367 	rc = _hwrm_send_message(softc, &req, sizeof(req));
3368 	softc->sc_cmd_timeo = old_timeo;
3369 	if (rc)
3370 		goto exit;
3371 
3372 	if (mfg_id)
3373 		*mfg_id = le16toh(resp->manufacturer_id);
3374 	if (device_id)
3375 		*device_id = le16toh(resp->device_id);
3376 	if (sector_size)
3377 		*sector_size = le32toh(resp->sector_size);
3378 	if (nvram_size)
3379 		*nvram_size = le32toh(resp->nvram_size);
3380 	if (reserved_size)
3381 		*reserved_size = le32toh(resp->reserved_size);
3382 	if (available_size)
3383 		*available_size = le32toh(resp->available_size);
3384 
3385 exit:
3386 	BNXT_HWRM_UNLOCK(softc);
3387 	return rc;
3388 }
3389 
3390 #if 0
3391 
3392 int
3393 bnxt_hwrm_fw_get_time(struct bnxt_softc *softc, uint16_t *year, uint8_t *month,
3394     uint8_t *day, uint8_t *hour, uint8_t *minute, uint8_t *second,
3395     uint16_t *millisecond, uint16_t *zone)
3396 {
3397 	struct hwrm_fw_get_time_input req = {0};
3398 	struct hwrm_fw_get_time_output *resp =
3399 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
3400 	int rc;
3401 
3402 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_GET_TIME);
3403 
3404 	BNXT_HWRM_LOCK(softc);
3405 	rc = _hwrm_send_message(softc, &req, sizeof(req));
3406 	if (rc)
3407 		goto exit;
3408 
3409 	if (year)
3410 		*year = le16toh(resp->year);
3411 	if (month)
3412 		*month = resp->month;
3413 	if (day)
3414 		*day = resp->day;
3415 	if (hour)
3416 		*hour = resp->hour;
3417 	if (minute)
3418 		*minute = resp->minute;
3419 	if (second)
3420 		*second = resp->second;
3421 	if (millisecond)
3422 		*millisecond = le16toh(resp->millisecond);
3423 	if (zone)
3424 		*zone = le16toh(resp->zone);
3425 
3426 exit:
3427 	BNXT_HWRM_UNLOCK(softc);
3428 	return rc;
3429 }
3430 
3431 int
3432 bnxt_hwrm_fw_set_time(struct bnxt_softc *softc, uint16_t year, uint8_t month,
3433     uint8_t day, uint8_t hour, uint8_t minute, uint8_t second,
3434     uint16_t millisecond, uint16_t zone)
3435 {
3436 	struct hwrm_fw_set_time_input req = {0};
3437 
3438 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_SET_TIME);
3439 
3440 	req.year = htole16(year);
3441 	req.month = month;
3442 	req.day = day;
3443 	req.hour = hour;
3444 	req.minute = minute;
3445 	req.second = second;
3446 	req.millisecond = htole16(millisecond);
3447 	req.zone = htole16(zone);
3448 	return hwrm_send_message(softc, &req, sizeof(req));
3449 }
3450 
3451 #endif
3452 
3453 void
3454 _bnxt_hwrm_set_async_event_bit(struct hwrm_func_drv_rgtr_input *req, int bit)
3455 {
3456 	req->async_event_fwd[bit/32] |= (1 << (bit % 32));
3457 }
3458 
3459 int bnxt_hwrm_func_rgtr_async_events(struct bnxt_softc *softc)
3460 {
3461 	struct hwrm_func_drv_rgtr_input req = {0};
3462 	int events[] = {
3463 		HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
3464 		HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
3465 		HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
3466 		HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
3467 		HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE
3468 	};
3469 	int i;
3470 
3471 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_DRV_RGTR);
3472 
3473 	req.enables =
3474 		htole32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
3475 
3476 	for (i = 0; i < nitems(events); i++)
3477 		_bnxt_hwrm_set_async_event_bit(&req, events[i]);
3478 
3479 	return hwrm_send_message(softc, &req, sizeof(req));
3480 }
3481 
3482 int
3483 bnxt_get_sffpage(struct bnxt_softc *softc, struct if_sffpage *sff)
3484 {
3485 	struct hwrm_port_phy_i2c_read_input req;
3486 	struct hwrm_port_phy_i2c_read_output *out;
3487 	int offset;
3488 
3489 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_PHY_I2C_READ);
3490 	req.i2c_slave_addr = sff->sff_addr;
3491 	req.page_number = htole16(sff->sff_page);
3492 
3493 	for (offset = 0; offset < 256; offset += sizeof(out->data)) {
3494 		req.page_offset = htole16(offset);
3495 		req.data_length = sizeof(out->data);
3496 		req.enables = htole32(HWRM_PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET);
3497 
3498 		if (hwrm_send_message(softc, &req, sizeof(req))) {
3499 			printf("%s: failed to read i2c data\n", DEVNAME(softc));
3500 			return 1;
3501 		}
3502 
3503 		out = (struct hwrm_port_phy_i2c_read_output *)
3504 		    BNXT_DMA_KVA(softc->sc_cmd_resp);
3505 		memcpy(sff->sff_data + offset, out->data, sizeof(out->data));
3506 	}
3507 
3508 	return 0;
3509 }
3510