1 /* $OpenBSD: if_bnxt.c,v 1.52 2024/10/06 23:43:18 jmatthew Exp $ */
2 /*-
3 * Broadcom NetXtreme-C/E network driver.
4 *
5 * Copyright (c) 2016 Broadcom, All Rights Reserved.
6 * The term Broadcom refers to Broadcom Limited and/or its subsidiaries
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * Copyright (c) 2018 Jonathan Matthew <jmatthew@openbsd.org>
32 *
33 * Permission to use, copy, modify, and distribute this software for any
34 * purpose with or without fee is hereby granted, provided that the above
35 * copyright notice and this permission notice appear in all copies.
36 *
37 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
38 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
39 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
40 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
41 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
42 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
43 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
44 */
45
46
47 #include "bpfilter.h"
48 #include "vlan.h"
49
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/mbuf.h>
53 #include <sys/malloc.h>
54 #include <sys/device.h>
55 #include <sys/stdint.h>
56 #include <sys/sockio.h>
57 #include <sys/atomic.h>
58 #include <sys/intrmap.h>
59
60 #include <machine/bus.h>
61
62 #include <dev/pci/pcivar.h>
63 #include <dev/pci/pcidevs.h>
64
65 #include <dev/pci/if_bnxtreg.h>
66
67 #include <net/if.h>
68 #include <net/if_media.h>
69 #include <net/route.h>
70 #include <net/toeplitz.h>
71
72 #if NBPFILTER > 0
73 #include <net/bpf.h>
74 #endif
75
76 #include <netinet/in.h>
77 #include <netinet/if_ether.h>
78 #include <netinet/tcp.h>
79 #include <netinet/tcp_timer.h>
80 #include <netinet/tcp_var.h>
81
82 #define BNXT_HWRM_BAR 0x10
83 #define BNXT_DOORBELL_BAR 0x18
84
85 #define BNXT_MAX_QUEUES 8
86
87 #define BNXT_CP_RING_ID_BASE 0
88 #define BNXT_RX_RING_ID_BASE (BNXT_MAX_QUEUES + 1)
89 #define BNXT_AG_RING_ID_BASE ((BNXT_MAX_QUEUES * 2) + 1)
90 #define BNXT_TX_RING_ID_BASE ((BNXT_MAX_QUEUES * 3) + 1)
91
92 #define BNXT_MAX_MTU 9500
93 #define BNXT_AG_BUFFER_SIZE 8192
94
95 #define BNXT_CP_PAGES 4
96
97 #define BNXT_MAX_TX_SEGS 31
98 #define BNXT_TX_SLOTS(bs) (bs->bs_map->dm_nsegs + 1)
99
100 #define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
101
102 #define BNXT_HWRM_LOCK_INIT(_sc, _name) \
103 mtx_init_flags(&sc->sc_lock, IPL_NET, _name, 0)
104 #define BNXT_HWRM_LOCK(_sc) mtx_enter(&_sc->sc_lock)
105 #define BNXT_HWRM_UNLOCK(_sc) mtx_leave(&_sc->sc_lock)
106 #define BNXT_HWRM_LOCK_DESTROY(_sc) /* nothing */
107 #define BNXT_HWRM_LOCK_ASSERT(_sc) MUTEX_ASSERT_LOCKED(&_sc->sc_lock)
108
109 #define BNXT_FLAG_VF 0x0001
110 #define BNXT_FLAG_NPAR 0x0002
111 #define BNXT_FLAG_WOL_CAP 0x0004
112 #define BNXT_FLAG_SHORT_CMD 0x0008
113 #define BNXT_FLAG_MSIX 0x0010
114
115 /* NVRam stuff has a five minute timeout */
116 #define BNXT_NVM_TIMEO (5 * 60 * 1000)
117
118 #define NEXT_CP_CONS_V(_ring, _cons, _v_bit) \
119 do { \
120 if (++(_cons) == (_ring)->ring_size) \
121 ((_cons) = 0, (_v_bit) = !_v_bit); \
122 } while (0);
123
124 struct bnxt_ring {
125 uint64_t paddr;
126 uint64_t doorbell;
127 caddr_t vaddr;
128 uint32_t ring_size;
129 uint16_t id;
130 uint16_t phys_id;
131 };
132
133 struct bnxt_cp_ring {
134 struct bnxt_ring ring;
135 void *irq;
136 struct bnxt_softc *softc;
137 uint32_t cons;
138 int v_bit;
139 uint32_t commit_cons;
140 int commit_v_bit;
141 struct ctx_hw_stats *stats;
142 uint32_t stats_ctx_id;
143 struct bnxt_dmamem *ring_mem;
144 };
145
146 struct bnxt_grp_info {
147 uint32_t grp_id;
148 uint16_t stats_ctx;
149 uint16_t rx_ring_id;
150 uint16_t cp_ring_id;
151 uint16_t ag_ring_id;
152 };
153
154 struct bnxt_vnic_info {
155 uint16_t id;
156 uint16_t def_ring_grp;
157 uint16_t cos_rule;
158 uint16_t lb_rule;
159 uint16_t mru;
160
161 uint32_t flags;
162 #define BNXT_VNIC_FLAG_DEFAULT 0x01
163 #define BNXT_VNIC_FLAG_BD_STALL 0x02
164 #define BNXT_VNIC_FLAG_VLAN_STRIP 0x04
165
166 uint64_t filter_id;
167 uint32_t flow_id;
168
169 uint16_t rss_id;
170 };
171
172 struct bnxt_slot {
173 bus_dmamap_t bs_map;
174 struct mbuf *bs_m;
175 };
176
177 struct bnxt_dmamem {
178 bus_dmamap_t bdm_map;
179 bus_dma_segment_t bdm_seg;
180 size_t bdm_size;
181 caddr_t bdm_kva;
182 };
183 #define BNXT_DMA_MAP(_bdm) ((_bdm)->bdm_map)
184 #define BNXT_DMA_LEN(_bdm) ((_bdm)->bdm_size)
185 #define BNXT_DMA_DVA(_bdm) ((u_int64_t)(_bdm)->bdm_map->dm_segs[0].ds_addr)
186 #define BNXT_DMA_KVA(_bdm) ((void *)(_bdm)->bdm_kva)
187
188 struct bnxt_rx_queue {
189 struct bnxt_softc *rx_softc;
190 struct ifiqueue *rx_ifiq;
191 struct bnxt_dmamem *rx_ring_mem; /* rx and ag */
192 struct bnxt_ring rx_ring;
193 struct bnxt_ring rx_ag_ring;
194 struct if_rxring rxr[2];
195 struct bnxt_slot *rx_slots;
196 struct bnxt_slot *rx_ag_slots;
197 int rx_prod;
198 int rx_cons;
199 int rx_ag_prod;
200 int rx_ag_cons;
201 struct timeout rx_refill;
202 };
203
204 struct bnxt_tx_queue {
205 struct bnxt_softc *tx_softc;
206 struct ifqueue *tx_ifq;
207 struct bnxt_dmamem *tx_ring_mem;
208 struct bnxt_ring tx_ring;
209 struct bnxt_slot *tx_slots;
210 int tx_prod;
211 int tx_cons;
212 int tx_ring_prod;
213 int tx_ring_cons;
214 };
215
216 struct bnxt_queue {
217 char q_name[8];
218 int q_index;
219 void *q_ihc;
220 struct bnxt_softc *q_sc;
221 struct bnxt_cp_ring q_cp;
222 struct bnxt_rx_queue q_rx;
223 struct bnxt_tx_queue q_tx;
224 struct bnxt_grp_info q_rg;
225 };
226
227 struct bnxt_softc {
228 struct device sc_dev;
229 struct arpcom sc_ac;
230 struct ifmedia sc_media;
231
232 struct mutex sc_lock;
233
234 pci_chipset_tag_t sc_pc;
235 pcitag_t sc_tag;
236 bus_dma_tag_t sc_dmat;
237
238 bus_space_tag_t sc_hwrm_t;
239 bus_space_handle_t sc_hwrm_h;
240 bus_size_t sc_hwrm_s;
241
242 struct bnxt_dmamem *sc_cmd_resp;
243 uint16_t sc_cmd_seq;
244 uint16_t sc_max_req_len;
245 uint32_t sc_cmd_timeo;
246 uint32_t sc_flags;
247
248 bus_space_tag_t sc_db_t;
249 bus_space_handle_t sc_db_h;
250 bus_size_t sc_db_s;
251
252 void *sc_ih;
253
254 int sc_hwrm_ver;
255 int sc_tx_queue_id;
256
257 struct bnxt_vnic_info sc_vnic;
258 struct bnxt_dmamem *sc_stats_ctx_mem;
259 struct bnxt_dmamem *sc_rx_cfg;
260
261 struct bnxt_cp_ring sc_cp_ring;
262
263 int sc_nqueues;
264 struct intrmap *sc_intrmap;
265 struct bnxt_queue sc_queues[BNXT_MAX_QUEUES];
266 };
267 #define DEVNAME(_sc) ((_sc)->sc_dev.dv_xname)
268
269 const struct pci_matchid bnxt_devices[] = {
270 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57301 },
271 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57302 },
272 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57304 },
273 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57311 },
274 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57312 },
275 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57314 },
276 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57402 },
277 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57404 },
278 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57406 },
279 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57407 },
280 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57412 },
281 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57414 },
282 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57416 },
283 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57416_SFP },
284 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57417 },
285 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57417_SFP }
286 };
287
288 int bnxt_match(struct device *, void *, void *);
289 void bnxt_attach(struct device *, struct device *, void *);
290
291 void bnxt_up(struct bnxt_softc *);
292 void bnxt_down(struct bnxt_softc *);
293 void bnxt_iff(struct bnxt_softc *);
294 int bnxt_ioctl(struct ifnet *, u_long, caddr_t);
295 int bnxt_rxrinfo(struct bnxt_softc *, struct if_rxrinfo *);
296 void bnxt_start(struct ifqueue *);
297 int bnxt_admin_intr(void *);
298 int bnxt_intr(void *);
299 void bnxt_watchdog(struct ifnet *);
300 void bnxt_media_status(struct ifnet *, struct ifmediareq *);
301 int bnxt_media_change(struct ifnet *);
302 int bnxt_media_autonegotiate(struct bnxt_softc *);
303
304 struct cmpl_base *bnxt_cpr_next_cmpl(struct bnxt_softc *, struct bnxt_cp_ring *);
305 void bnxt_cpr_commit(struct bnxt_softc *, struct bnxt_cp_ring *);
306 void bnxt_cpr_rollback(struct bnxt_softc *, struct bnxt_cp_ring *);
307
308 void bnxt_mark_cpr_invalid(struct bnxt_cp_ring *);
309 void bnxt_write_cp_doorbell(struct bnxt_softc *, struct bnxt_ring *,
310 int);
311 void bnxt_write_cp_doorbell_index(struct bnxt_softc *,
312 struct bnxt_ring *, uint32_t, int);
313 void bnxt_write_rx_doorbell(struct bnxt_softc *, struct bnxt_ring *,
314 int);
315 void bnxt_write_tx_doorbell(struct bnxt_softc *, struct bnxt_ring *,
316 int);
317
318 int bnxt_rx_fill(struct bnxt_queue *);
319 int bnxt_rx_fill_ag(struct bnxt_queue *);
320 u_int bnxt_rx_fill_slots(struct bnxt_softc *, struct bnxt_ring *, void *,
321 struct bnxt_slot *, uint *, int, uint16_t, u_int);
322 void bnxt_refill(void *);
323 int bnxt_rx(struct bnxt_softc *, struct bnxt_rx_queue *,
324 struct bnxt_cp_ring *, struct mbuf_list *, int *, int *,
325 struct cmpl_base *);
326
327 void bnxt_txeof(struct bnxt_softc *, struct bnxt_tx_queue *, int *,
328 struct cmpl_base *);
329
330 int bnxt_set_cp_ring_aggint(struct bnxt_softc *, struct bnxt_cp_ring *);
331
332 int _hwrm_send_message(struct bnxt_softc *, void *, uint32_t);
333 int hwrm_send_message(struct bnxt_softc *, void *, uint32_t);
334 void bnxt_hwrm_cmd_hdr_init(struct bnxt_softc *, void *, uint16_t);
335 int bnxt_hwrm_err_map(uint16_t err);
336
337 /* HWRM Function Prototypes */
338 int bnxt_hwrm_ring_alloc(struct bnxt_softc *, uint8_t,
339 struct bnxt_ring *, uint16_t, uint32_t, int);
340 int bnxt_hwrm_ring_free(struct bnxt_softc *, uint8_t,
341 struct bnxt_ring *);
342 int bnxt_hwrm_ver_get(struct bnxt_softc *);
343 int bnxt_hwrm_queue_qportcfg(struct bnxt_softc *);
344 int bnxt_hwrm_func_drv_rgtr(struct bnxt_softc *);
345 int bnxt_hwrm_func_qcaps(struct bnxt_softc *);
346 int bnxt_hwrm_func_qcfg(struct bnxt_softc *);
347 int bnxt_hwrm_func_reset(struct bnxt_softc *);
348 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt_softc *, uint16_t *);
349 int bnxt_hwrm_vnic_ctx_free(struct bnxt_softc *, uint16_t *);
350 int bnxt_hwrm_vnic_cfg(struct bnxt_softc *,
351 struct bnxt_vnic_info *);
352 int bnxt_hwrm_vnic_cfg_placement(struct bnxt_softc *,
353 struct bnxt_vnic_info *vnic);
354 int bnxt_hwrm_stat_ctx_alloc(struct bnxt_softc *,
355 struct bnxt_cp_ring *, uint64_t);
356 int bnxt_hwrm_stat_ctx_free(struct bnxt_softc *,
357 struct bnxt_cp_ring *);
358 int bnxt_hwrm_ring_grp_alloc(struct bnxt_softc *,
359 struct bnxt_grp_info *);
360 int bnxt_hwrm_ring_grp_free(struct bnxt_softc *,
361 struct bnxt_grp_info *);
362 int bnxt_hwrm_vnic_alloc(struct bnxt_softc *,
363 struct bnxt_vnic_info *);
364 int bnxt_hwrm_vnic_free(struct bnxt_softc *,
365 struct bnxt_vnic_info *);
366 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt_softc *,
367 uint32_t, uint32_t, uint64_t, uint32_t);
368 int bnxt_hwrm_set_filter(struct bnxt_softc *,
369 struct bnxt_vnic_info *);
370 int bnxt_hwrm_free_filter(struct bnxt_softc *,
371 struct bnxt_vnic_info *);
372 int bnxt_hwrm_vnic_rss_cfg(struct bnxt_softc *,
373 struct bnxt_vnic_info *, uint32_t, daddr_t, daddr_t);
374 int bnxt_cfg_async_cr(struct bnxt_softc *, struct bnxt_cp_ring *);
375 int bnxt_hwrm_nvm_get_dev_info(struct bnxt_softc *, uint16_t *,
376 uint16_t *, uint32_t *, uint32_t *, uint32_t *, uint32_t *);
377 int bnxt_hwrm_port_phy_qcfg(struct bnxt_softc *,
378 struct ifmediareq *);
379 int bnxt_hwrm_func_rgtr_async_events(struct bnxt_softc *);
380 int bnxt_get_sffpage(struct bnxt_softc *, struct if_sffpage *);
381
382 /* not used yet: */
383 #if 0
384 int bnxt_hwrm_func_drv_unrgtr(struct bnxt_softc *softc, bool shutdown);
385
386 int bnxt_hwrm_port_qstats(struct bnxt_softc *softc);
387
388
389 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt_softc *softc);
390 void bnxt_validate_hw_lro_settings(struct bnxt_softc *softc);
391 int bnxt_hwrm_fw_reset(struct bnxt_softc *softc, uint8_t processor,
392 uint8_t *selfreset);
393 int bnxt_hwrm_fw_qstatus(struct bnxt_softc *softc, uint8_t type,
394 uint8_t *selfreset);
395 int bnxt_hwrm_fw_get_time(struct bnxt_softc *softc, uint16_t *year,
396 uint8_t *month, uint8_t *day, uint8_t *hour, uint8_t *minute,
397 uint8_t *second, uint16_t *millisecond, uint16_t *zone);
398 int bnxt_hwrm_fw_set_time(struct bnxt_softc *softc, uint16_t year,
399 uint8_t month, uint8_t day, uint8_t hour, uint8_t minute, uint8_t second,
400 uint16_t millisecond, uint16_t zone);
401
402 #endif
403
404
405 const struct cfattach bnxt_ca = {
406 sizeof(struct bnxt_softc), bnxt_match, bnxt_attach
407 };
408
409 struct cfdriver bnxt_cd = {
410 NULL, "bnxt", DV_IFNET
411 };
412
413 struct bnxt_dmamem *
bnxt_dmamem_alloc(struct bnxt_softc * sc,size_t size)414 bnxt_dmamem_alloc(struct bnxt_softc *sc, size_t size)
415 {
416 struct bnxt_dmamem *m;
417 int nsegs;
418
419 m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
420 if (m == NULL)
421 return (NULL);
422
423 m->bdm_size = size;
424
425 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
426 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->bdm_map) != 0)
427 goto bdmfree;
428
429 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->bdm_seg, 1,
430 &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
431 goto destroy;
432
433 if (bus_dmamem_map(sc->sc_dmat, &m->bdm_seg, nsegs, size, &m->bdm_kva,
434 BUS_DMA_NOWAIT) != 0)
435 goto free;
436
437 if (bus_dmamap_load(sc->sc_dmat, m->bdm_map, m->bdm_kva, size, NULL,
438 BUS_DMA_NOWAIT) != 0)
439 goto unmap;
440
441 return (m);
442
443 unmap:
444 bus_dmamem_unmap(sc->sc_dmat, m->bdm_kva, m->bdm_size);
445 free:
446 bus_dmamem_free(sc->sc_dmat, &m->bdm_seg, 1);
447 destroy:
448 bus_dmamap_destroy(sc->sc_dmat, m->bdm_map);
449 bdmfree:
450 free(m, M_DEVBUF, sizeof *m);
451
452 return (NULL);
453 }
454
455 void
bnxt_dmamem_free(struct bnxt_softc * sc,struct bnxt_dmamem * m)456 bnxt_dmamem_free(struct bnxt_softc *sc, struct bnxt_dmamem *m)
457 {
458 bus_dmamap_unload(sc->sc_dmat, m->bdm_map);
459 bus_dmamem_unmap(sc->sc_dmat, m->bdm_kva, m->bdm_size);
460 bus_dmamem_free(sc->sc_dmat, &m->bdm_seg, 1);
461 bus_dmamap_destroy(sc->sc_dmat, m->bdm_map);
462 free(m, M_DEVBUF, sizeof *m);
463 }
464
465 int
bnxt_match(struct device * parent,void * match,void * aux)466 bnxt_match(struct device *parent, void *match, void *aux)
467 {
468 return (pci_matchbyid(aux, bnxt_devices, nitems(bnxt_devices)));
469 }
470
471 void
bnxt_attach(struct device * parent,struct device * self,void * aux)472 bnxt_attach(struct device *parent, struct device *self, void *aux)
473 {
474 struct bnxt_softc *sc = (struct bnxt_softc *)self;
475 struct ifnet *ifp = &sc->sc_ac.ac_if;
476 struct pci_attach_args *pa = aux;
477 struct bnxt_cp_ring *cpr;
478 pci_intr_handle_t ih;
479 const char *intrstr;
480 u_int memtype;
481 int i;
482
483 sc->sc_pc = pa->pa_pc;
484 sc->sc_tag = pa->pa_tag;
485 sc->sc_dmat = pa->pa_dmat;
486
487 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BNXT_HWRM_BAR);
488 if (pci_mapreg_map(pa, BNXT_HWRM_BAR, memtype, 0, &sc->sc_hwrm_t,
489 &sc->sc_hwrm_h, NULL, &sc->sc_hwrm_s, 0)) {
490 printf(": failed to map hwrm\n");
491 return;
492 }
493
494 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BNXT_DOORBELL_BAR);
495 if (pci_mapreg_map(pa, BNXT_DOORBELL_BAR, memtype, 0, &sc->sc_db_t,
496 &sc->sc_db_h, NULL, &sc->sc_db_s, 0)) {
497 printf(": failed to map doorbell\n");
498 goto unmap_1;
499 }
500
501 BNXT_HWRM_LOCK_INIT(sc, DEVNAME(sc));
502 sc->sc_cmd_resp = bnxt_dmamem_alloc(sc, PAGE_SIZE);
503 if (sc->sc_cmd_resp == NULL) {
504 printf(": failed to allocate command response buffer\n");
505 goto unmap_2;
506 }
507
508 if (bnxt_hwrm_ver_get(sc) != 0) {
509 printf(": failed to query version info\n");
510 goto free_resp;
511 }
512
513 if (bnxt_hwrm_nvm_get_dev_info(sc, NULL, NULL, NULL, NULL, NULL, NULL)
514 != 0) {
515 printf(": failed to get nvram info\n");
516 goto free_resp;
517 }
518
519 if (bnxt_hwrm_func_drv_rgtr(sc) != 0) {
520 printf(": failed to register driver with firmware\n");
521 goto free_resp;
522 }
523
524 if (bnxt_hwrm_func_rgtr_async_events(sc) != 0) {
525 printf(": failed to register async events\n");
526 goto free_resp;
527 }
528
529 if (bnxt_hwrm_func_qcaps(sc) != 0) {
530 printf(": failed to get queue capabilities\n");
531 goto free_resp;
532 }
533
534 /*
535 * devices advertise msi support, but there's no way to tell a
536 * completion queue to use msi mode, only legacy or msi-x.
537 */
538 if (pci_intr_map_msix(pa, 0, &ih) == 0) {
539 int nmsix;
540
541 sc->sc_flags |= BNXT_FLAG_MSIX;
542 intrstr = pci_intr_string(sc->sc_pc, ih);
543
544 nmsix = pci_intr_msix_count(pa);
545 if (nmsix > 1) {
546 sc->sc_ih = pci_intr_establish(sc->sc_pc, ih,
547 IPL_NET | IPL_MPSAFE, bnxt_admin_intr, sc, DEVNAME(sc));
548 sc->sc_intrmap = intrmap_create(&sc->sc_dev,
549 nmsix - 1, BNXT_MAX_QUEUES, INTRMAP_POWEROF2);
550 sc->sc_nqueues = intrmap_count(sc->sc_intrmap);
551 KASSERT(sc->sc_nqueues > 0);
552 KASSERT(powerof2(sc->sc_nqueues));
553 } else {
554 sc->sc_ih = pci_intr_establish(sc->sc_pc, ih,
555 IPL_NET | IPL_MPSAFE, bnxt_intr, &sc->sc_queues[0],
556 DEVNAME(sc));
557 sc->sc_nqueues = 1;
558 }
559 } else if (pci_intr_map(pa, &ih) == 0) {
560 intrstr = pci_intr_string(sc->sc_pc, ih);
561 sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_NET | IPL_MPSAFE,
562 bnxt_intr, &sc->sc_queues[0], DEVNAME(sc));
563 sc->sc_nqueues = 1;
564 } else {
565 printf(": unable to map interrupt\n");
566 goto free_resp;
567 }
568 if (sc->sc_ih == NULL) {
569 printf(": unable to establish interrupt");
570 if (intrstr != NULL)
571 printf(" at %s", intrstr);
572 printf("\n");
573 goto deintr;
574 }
575 printf("%s, %d queues, address %s\n", intrstr, sc->sc_nqueues,
576 ether_sprintf(sc->sc_ac.ac_enaddr));
577
578 if (bnxt_hwrm_func_qcfg(sc) != 0) {
579 printf("%s: failed to query function config\n", DEVNAME(sc));
580 goto deintr;
581 }
582
583 if (bnxt_hwrm_queue_qportcfg(sc) != 0) {
584 printf("%s: failed to query port config\n", DEVNAME(sc));
585 goto deintr;
586 }
587
588 if (bnxt_hwrm_func_reset(sc) != 0) {
589 printf("%s: reset failed\n", DEVNAME(sc));
590 goto deintr;
591 }
592
593 if (sc->sc_intrmap == NULL)
594 cpr = &sc->sc_queues[0].q_cp;
595 else
596 cpr = &sc->sc_cp_ring;
597
598 cpr->stats_ctx_id = HWRM_NA_SIGNATURE;
599 cpr->ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
600 cpr->softc = sc;
601 cpr->ring.id = 0;
602 cpr->ring.doorbell = cpr->ring.id * 0x80;
603 cpr->ring.ring_size = (PAGE_SIZE * BNXT_CP_PAGES) /
604 sizeof(struct cmpl_base);
605 cpr->ring_mem = bnxt_dmamem_alloc(sc, PAGE_SIZE *
606 BNXT_CP_PAGES);
607 if (cpr->ring_mem == NULL) {
608 printf("%s: failed to allocate completion queue memory\n",
609 DEVNAME(sc));
610 goto deintr;
611 }
612 cpr->ring.vaddr = BNXT_DMA_KVA(cpr->ring_mem);
613 cpr->ring.paddr = BNXT_DMA_DVA(cpr->ring_mem);
614 cpr->cons = UINT32_MAX;
615 cpr->v_bit = 1;
616 bnxt_mark_cpr_invalid(cpr);
617 if (bnxt_hwrm_ring_alloc(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
618 &cpr->ring, (uint16_t)HWRM_NA_SIGNATURE,
619 HWRM_NA_SIGNATURE, 1) != 0) {
620 printf("%s: failed to allocate completion queue\n",
621 DEVNAME(sc));
622 goto free_cp_mem;
623 }
624 if (bnxt_cfg_async_cr(sc, cpr) != 0) {
625 printf("%s: failed to set async completion ring\n",
626 DEVNAME(sc));
627 goto free_cp_mem;
628 }
629 bnxt_write_cp_doorbell(sc, &cpr->ring, 1);
630
631 if (bnxt_set_cp_ring_aggint(sc, cpr) != 0) {
632 printf("%s: failed to set interrupt aggregation\n",
633 DEVNAME(sc));
634 goto free_cp_mem;
635 }
636
637 strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
638 ifp->if_softc = sc;
639 ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;
640 ifp->if_xflags = IFXF_MPSAFE;
641 ifp->if_ioctl = bnxt_ioctl;
642 ifp->if_qstart = bnxt_start;
643 ifp->if_watchdog = bnxt_watchdog;
644 ifp->if_hardmtu = BNXT_MAX_MTU;
645 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
646 IFCAP_CSUM_UDPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv6 |
647 IFCAP_CSUM_TCPv6;
648 ifp->if_capabilities |= IFCAP_TSOv4 | IFCAP_TSOv6;
649 #if NVLAN > 0
650 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
651 #endif
652 ifq_init_maxlen(&ifp->if_snd, 1024); /* ? */
653
654 ifmedia_init(&sc->sc_media, IFM_IMASK, bnxt_media_change,
655 bnxt_media_status);
656
657 if_attach(ifp);
658 ether_ifattach(ifp);
659
660 if_attach_iqueues(ifp, sc->sc_nqueues);
661 if_attach_queues(ifp, sc->sc_nqueues);
662 for (i = 0; i < sc->sc_nqueues; i++) {
663 struct ifiqueue *ifiq = ifp->if_iqs[i];
664 struct ifqueue *ifq = ifp->if_ifqs[i];
665 struct bnxt_queue *bq = &sc->sc_queues[i];
666 struct bnxt_cp_ring *cp = &bq->q_cp;
667 struct bnxt_rx_queue *rx = &bq->q_rx;
668 struct bnxt_tx_queue *tx = &bq->q_tx;
669
670 bq->q_index = i;
671 bq->q_sc = sc;
672
673 rx->rx_softc = sc;
674 rx->rx_ifiq = ifiq;
675 timeout_set(&rx->rx_refill, bnxt_refill, bq);
676 ifiq->ifiq_softc = rx;
677
678 tx->tx_softc = sc;
679 tx->tx_ifq = ifq;
680 ifq->ifq_softc = tx;
681
682 if (sc->sc_intrmap != NULL) {
683 cp->stats_ctx_id = HWRM_NA_SIGNATURE;
684 cp->ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
685 cp->ring.id = i + 1; /* first cp ring is async only */
686 cp->softc = sc;
687 cp->ring.doorbell = bq->q_cp.ring.id * 0x80;
688 cp->ring.ring_size = (PAGE_SIZE * BNXT_CP_PAGES) /
689 sizeof(struct cmpl_base);
690 if (pci_intr_map_msix(pa, i + 1, &ih) != 0) {
691 printf("%s: unable to map queue interrupt %d\n",
692 DEVNAME(sc), i);
693 goto intrdisestablish;
694 }
695 snprintf(bq->q_name, sizeof(bq->q_name), "%s:%d",
696 DEVNAME(sc), i);
697 bq->q_ihc = pci_intr_establish_cpu(sc->sc_pc, ih,
698 IPL_NET | IPL_MPSAFE, intrmap_cpu(sc->sc_intrmap, i),
699 bnxt_intr, bq, bq->q_name);
700 if (bq->q_ihc == NULL) {
701 printf("%s: unable to establish interrupt %d\n",
702 DEVNAME(sc), i);
703 goto intrdisestablish;
704 }
705 }
706 }
707
708 bnxt_media_autonegotiate(sc);
709 bnxt_hwrm_port_phy_qcfg(sc, NULL);
710 return;
711
712 intrdisestablish:
713 for (i = 0; i < sc->sc_nqueues; i++) {
714 struct bnxt_queue *bq = &sc->sc_queues[i];
715 if (bq->q_ihc == NULL)
716 continue;
717 pci_intr_disestablish(sc->sc_pc, bq->q_ihc);
718 bq->q_ihc = NULL;
719 }
720 free_cp_mem:
721 bnxt_dmamem_free(sc, cpr->ring_mem);
722 deintr:
723 if (sc->sc_intrmap != NULL) {
724 intrmap_destroy(sc->sc_intrmap);
725 sc->sc_intrmap = NULL;
726 }
727 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
728 sc->sc_ih = NULL;
729 free_resp:
730 bnxt_dmamem_free(sc, sc->sc_cmd_resp);
731 unmap_2:
732 bus_space_unmap(sc->sc_db_t, sc->sc_db_h, sc->sc_db_s);
733 sc->sc_db_s = 0;
734 unmap_1:
735 bus_space_unmap(sc->sc_hwrm_t, sc->sc_hwrm_h, sc->sc_hwrm_s);
736 sc->sc_hwrm_s = 0;
737 }
738
739 void
bnxt_free_slots(struct bnxt_softc * sc,struct bnxt_slot * slots,int allocated,int total)740 bnxt_free_slots(struct bnxt_softc *sc, struct bnxt_slot *slots, int allocated,
741 int total)
742 {
743 struct bnxt_slot *bs;
744
745 int i = allocated;
746 while (i-- > 0) {
747 bs = &slots[i];
748 bus_dmamap_destroy(sc->sc_dmat, bs->bs_map);
749 if (bs->bs_m != NULL)
750 m_freem(bs->bs_m);
751 }
752 free(slots, M_DEVBUF, total * sizeof(*bs));
753 }
754
755 int
bnxt_set_cp_ring_aggint(struct bnxt_softc * sc,struct bnxt_cp_ring * cpr)756 bnxt_set_cp_ring_aggint(struct bnxt_softc *sc, struct bnxt_cp_ring *cpr)
757 {
758 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input aggint;
759
760 /*
761 * set interrupt aggregation parameters for around 10k interrupts
762 * per second. the timers are in units of 80usec, and the counters
763 * are based on the minimum rx ring size of 32.
764 */
765 memset(&aggint, 0, sizeof(aggint));
766 bnxt_hwrm_cmd_hdr_init(sc, &aggint,
767 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
768 aggint.ring_id = htole16(cpr->ring.phys_id);
769 aggint.num_cmpl_dma_aggr = htole16(32);
770 aggint.num_cmpl_dma_aggr_during_int = aggint.num_cmpl_dma_aggr;
771 aggint.cmpl_aggr_dma_tmr = htole16((1000000000 / 20000) / 80);
772 aggint.cmpl_aggr_dma_tmr_during_int = aggint.cmpl_aggr_dma_tmr;
773 aggint.int_lat_tmr_min = htole16((1000000000 / 20000) / 80);
774 aggint.int_lat_tmr_max = htole16((1000000000 / 10000) / 80);
775 aggint.num_cmpl_aggr_int = htole16(16);
776 return (hwrm_send_message(sc, &aggint, sizeof(aggint)));
777 }
778
779 int
bnxt_queue_up(struct bnxt_softc * sc,struct bnxt_queue * bq)780 bnxt_queue_up(struct bnxt_softc *sc, struct bnxt_queue *bq)
781 {
782 struct ifnet *ifp = &sc->sc_ac.ac_if;
783 struct bnxt_cp_ring *cp = &bq->q_cp;
784 struct bnxt_rx_queue *rx = &bq->q_rx;
785 struct bnxt_tx_queue *tx = &bq->q_tx;
786 struct bnxt_grp_info *rg = &bq->q_rg;
787 struct bnxt_slot *bs;
788 int i;
789
790 tx->tx_ring_mem = bnxt_dmamem_alloc(sc, PAGE_SIZE);
791 if (tx->tx_ring_mem == NULL) {
792 printf("%s: failed to allocate tx ring %d\n", DEVNAME(sc), bq->q_index);
793 return ENOMEM;
794 }
795
796 rx->rx_ring_mem = bnxt_dmamem_alloc(sc, PAGE_SIZE * 2);
797 if (rx->rx_ring_mem == NULL) {
798 printf("%s: failed to allocate rx ring %d\n", DEVNAME(sc), bq->q_index);
799 goto free_tx;
800 }
801
802 /* completion ring is already allocated if we're not using an intrmap */
803 if (sc->sc_intrmap != NULL) {
804 cp->ring_mem = bnxt_dmamem_alloc(sc, PAGE_SIZE * BNXT_CP_PAGES);
805 if (cp->ring_mem == NULL) {
806 printf("%s: failed to allocate completion ring %d mem\n",
807 DEVNAME(sc), bq->q_index);
808 goto free_rx;
809 }
810 cp->ring.vaddr = BNXT_DMA_KVA(cp->ring_mem);
811 cp->ring.paddr = BNXT_DMA_DVA(cp->ring_mem);
812 cp->cons = UINT32_MAX;
813 cp->v_bit = 1;
814 bnxt_mark_cpr_invalid(cp);
815
816 if (bnxt_hwrm_ring_alloc(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
817 &cp->ring, (uint16_t)HWRM_NA_SIGNATURE,
818 HWRM_NA_SIGNATURE, 1) != 0) {
819 printf("%s: failed to allocate completion queue %d\n",
820 DEVNAME(sc), bq->q_index);
821 goto free_rx;
822 }
823
824 if (bnxt_set_cp_ring_aggint(sc, cp) != 0) {
825 printf("%s: failed to set interrupt %d aggregation\n",
826 DEVNAME(sc), bq->q_index);
827 goto free_rx;
828 }
829 bnxt_write_cp_doorbell(sc, &cp->ring, 1);
830 }
831
832 if (bnxt_hwrm_stat_ctx_alloc(sc, &bq->q_cp,
833 BNXT_DMA_DVA(sc->sc_stats_ctx_mem) +
834 (bq->q_index * sizeof(struct ctx_hw_stats))) != 0) {
835 printf("%s: failed to set up stats context\n", DEVNAME(sc));
836 goto free_rx;
837 }
838
839 tx->tx_ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
840 tx->tx_ring.id = BNXT_TX_RING_ID_BASE + bq->q_index;
841 tx->tx_ring.doorbell = tx->tx_ring.id * 0x80;
842 tx->tx_ring.ring_size = PAGE_SIZE / sizeof(struct tx_bd_short);
843 tx->tx_ring.vaddr = BNXT_DMA_KVA(tx->tx_ring_mem);
844 tx->tx_ring.paddr = BNXT_DMA_DVA(tx->tx_ring_mem);
845 if (bnxt_hwrm_ring_alloc(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
846 &tx->tx_ring, cp->ring.phys_id, HWRM_NA_SIGNATURE, 1) != 0) {
847 printf("%s: failed to set up tx ring\n",
848 DEVNAME(sc));
849 goto dealloc_stats;
850 }
851 bnxt_write_tx_doorbell(sc, &tx->tx_ring, 0);
852
853 rx->rx_ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
854 rx->rx_ring.id = BNXT_RX_RING_ID_BASE + bq->q_index;
855 rx->rx_ring.doorbell = rx->rx_ring.id * 0x80;
856 rx->rx_ring.ring_size = PAGE_SIZE / sizeof(struct rx_prod_pkt_bd);
857 rx->rx_ring.vaddr = BNXT_DMA_KVA(rx->rx_ring_mem);
858 rx->rx_ring.paddr = BNXT_DMA_DVA(rx->rx_ring_mem);
859 if (bnxt_hwrm_ring_alloc(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
860 &rx->rx_ring, cp->ring.phys_id, HWRM_NA_SIGNATURE, 1) != 0) {
861 printf("%s: failed to set up rx ring\n",
862 DEVNAME(sc));
863 goto dealloc_tx;
864 }
865 bnxt_write_rx_doorbell(sc, &rx->rx_ring, 0);
866
867 rx->rx_ag_ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
868 rx->rx_ag_ring.id = BNXT_AG_RING_ID_BASE + bq->q_index;
869 rx->rx_ag_ring.doorbell = rx->rx_ag_ring.id * 0x80;
870 rx->rx_ag_ring.ring_size = PAGE_SIZE / sizeof(struct rx_prod_pkt_bd);
871 rx->rx_ag_ring.vaddr = BNXT_DMA_KVA(rx->rx_ring_mem) + PAGE_SIZE;
872 rx->rx_ag_ring.paddr = BNXT_DMA_DVA(rx->rx_ring_mem) + PAGE_SIZE;
873 if (bnxt_hwrm_ring_alloc(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
874 &rx->rx_ag_ring, cp->ring.phys_id, HWRM_NA_SIGNATURE, 1) != 0) {
875 printf("%s: failed to set up rx ag ring\n",
876 DEVNAME(sc));
877 goto dealloc_rx;
878 }
879 bnxt_write_rx_doorbell(sc, &rx->rx_ag_ring, 0);
880
881 rg->grp_id = HWRM_NA_SIGNATURE;
882 rg->stats_ctx = cp->stats_ctx_id;
883 rg->rx_ring_id = rx->rx_ring.phys_id;
884 rg->ag_ring_id = rx->rx_ag_ring.phys_id;
885 rg->cp_ring_id = cp->ring.phys_id;
886 if (bnxt_hwrm_ring_grp_alloc(sc, rg) != 0) {
887 printf("%s: failed to allocate ring group\n",
888 DEVNAME(sc));
889 goto dealloc_ag;
890 }
891
892 rx->rx_slots = mallocarray(sizeof(*bs), rx->rx_ring.ring_size,
893 M_DEVBUF, M_WAITOK | M_ZERO);
894 if (rx->rx_slots == NULL) {
895 printf("%s: failed to allocate rx slots\n", DEVNAME(sc));
896 goto dealloc_ring_group;
897 }
898
899 for (i = 0; i < rx->rx_ring.ring_size; i++) {
900 bs = &rx->rx_slots[i];
901 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
902 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &bs->bs_map) != 0) {
903 printf("%s: failed to allocate rx dma maps\n",
904 DEVNAME(sc));
905 goto destroy_rx_slots;
906 }
907 }
908
909 rx->rx_ag_slots = mallocarray(sizeof(*bs), rx->rx_ag_ring.ring_size,
910 M_DEVBUF, M_WAITOK | M_ZERO);
911 if (rx->rx_ag_slots == NULL) {
912 printf("%s: failed to allocate rx ag slots\n", DEVNAME(sc));
913 goto destroy_rx_slots;
914 }
915
916 for (i = 0; i < rx->rx_ag_ring.ring_size; i++) {
917 bs = &rx->rx_ag_slots[i];
918 if (bus_dmamap_create(sc->sc_dmat, BNXT_AG_BUFFER_SIZE, 1,
919 BNXT_AG_BUFFER_SIZE, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
920 &bs->bs_map) != 0) {
921 printf("%s: failed to allocate rx ag dma maps\n",
922 DEVNAME(sc));
923 goto destroy_rx_ag_slots;
924 }
925 }
926
927 tx->tx_slots = mallocarray(sizeof(*bs), tx->tx_ring.ring_size,
928 M_DEVBUF, M_WAITOK | M_ZERO);
929 if (tx->tx_slots == NULL) {
930 printf("%s: failed to allocate tx slots\n", DEVNAME(sc));
931 goto destroy_rx_ag_slots;
932 }
933
934 for (i = 0; i < tx->tx_ring.ring_size; i++) {
935 bs = &tx->tx_slots[i];
936 if (bus_dmamap_create(sc->sc_dmat, MAXMCLBYTES, BNXT_MAX_TX_SEGS,
937 BNXT_MAX_MTU, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
938 &bs->bs_map) != 0) {
939 printf("%s: failed to allocate tx dma maps\n",
940 DEVNAME(sc));
941 goto destroy_tx_slots;
942 }
943 }
944
945 /*
946 * initially, the rx ring must be filled at least some distance beyond
947 * the current consumer index, as it looks like the firmware assumes the
948 * ring is full on creation, but doesn't prefetch the whole thing.
949 * once the whole ring has been used once, we should be able to back off
950 * to 2 or so slots, but we currently don't have a way of doing that.
951 */
952 if_rxr_init(&rx->rxr[0], 32, rx->rx_ring.ring_size - 1);
953 if_rxr_init(&rx->rxr[1], 32, rx->rx_ag_ring.ring_size - 1);
954 rx->rx_prod = 0;
955 rx->rx_cons = 0;
956 rx->rx_ag_prod = 0;
957 rx->rx_ag_cons = 0;
958 bnxt_rx_fill(bq);
959 bnxt_rx_fill_ag(bq);
960
961 tx->tx_cons = 0;
962 tx->tx_prod = 0;
963 tx->tx_ring_cons = 0;
964 tx->tx_ring_prod = 0;
965 ifq_clr_oactive(ifp->if_ifqs[bq->q_index]);
966 ifq_restart(ifp->if_ifqs[bq->q_index]);
967 return 0;
968
969 destroy_tx_slots:
970 bnxt_free_slots(sc, tx->tx_slots, i, tx->tx_ring.ring_size);
971 tx->tx_slots = NULL;
972
973 i = rx->rx_ag_ring.ring_size;
974 destroy_rx_ag_slots:
975 bnxt_free_slots(sc, rx->rx_ag_slots, i, rx->rx_ag_ring.ring_size);
976 rx->rx_ag_slots = NULL;
977
978 i = rx->rx_ring.ring_size;
979 destroy_rx_slots:
980 bnxt_free_slots(sc, rx->rx_slots, i, rx->rx_ring.ring_size);
981 rx->rx_slots = NULL;
982 dealloc_ring_group:
983 bnxt_hwrm_ring_grp_free(sc, &bq->q_rg);
984 dealloc_ag:
985 bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
986 &rx->rx_ag_ring);
987 dealloc_tx:
988 bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
989 &tx->tx_ring);
990 dealloc_rx:
991 bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
992 &rx->rx_ring);
993 dealloc_stats:
994 bnxt_hwrm_stat_ctx_free(sc, cp);
995 free_rx:
996 bnxt_dmamem_free(sc, rx->rx_ring_mem);
997 rx->rx_ring_mem = NULL;
998 free_tx:
999 bnxt_dmamem_free(sc, tx->tx_ring_mem);
1000 tx->tx_ring_mem = NULL;
1001 return ENOMEM;
1002 }
1003
1004 void
bnxt_queue_down(struct bnxt_softc * sc,struct bnxt_queue * bq)1005 bnxt_queue_down(struct bnxt_softc *sc, struct bnxt_queue *bq)
1006 {
1007 struct bnxt_cp_ring *cp = &bq->q_cp;
1008 struct bnxt_rx_queue *rx = &bq->q_rx;
1009 struct bnxt_tx_queue *tx = &bq->q_tx;
1010
1011 bnxt_free_slots(sc, tx->tx_slots, tx->tx_ring.ring_size,
1012 tx->tx_ring.ring_size);
1013 tx->tx_slots = NULL;
1014
1015 bnxt_free_slots(sc, rx->rx_ag_slots, rx->rx_ag_ring.ring_size,
1016 rx->rx_ag_ring.ring_size);
1017 rx->rx_ag_slots = NULL;
1018
1019 bnxt_free_slots(sc, rx->rx_slots, rx->rx_ring.ring_size,
1020 rx->rx_ring.ring_size);
1021 rx->rx_slots = NULL;
1022
1023 bnxt_hwrm_ring_grp_free(sc, &bq->q_rg);
1024 bnxt_hwrm_stat_ctx_free(sc, &bq->q_cp);
1025
1026 /* may need to wait for 500ms here before we can free the rings */
1027
1028 bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
1029 &tx->tx_ring);
1030 bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
1031 &rx->rx_ag_ring);
1032 bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
1033 &rx->rx_ring);
1034
1035 /* if no intrmap, leave cp ring in place for async events */
1036 if (sc->sc_intrmap != NULL) {
1037 bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
1038 &cp->ring);
1039
1040 bnxt_dmamem_free(sc, cp->ring_mem);
1041 cp->ring_mem = NULL;
1042 }
1043
1044 bnxt_dmamem_free(sc, rx->rx_ring_mem);
1045 rx->rx_ring_mem = NULL;
1046
1047 bnxt_dmamem_free(sc, tx->tx_ring_mem);
1048 tx->tx_ring_mem = NULL;
1049 }
1050
1051 void
bnxt_up(struct bnxt_softc * sc)1052 bnxt_up(struct bnxt_softc *sc)
1053 {
1054 struct ifnet *ifp = &sc->sc_ac.ac_if;
1055 int i;
1056
1057 sc->sc_stats_ctx_mem = bnxt_dmamem_alloc(sc,
1058 sizeof(struct ctx_hw_stats) * sc->sc_nqueues);
1059 if (sc->sc_stats_ctx_mem == NULL) {
1060 printf("%s: failed to allocate stats contexts\n", DEVNAME(sc));
1061 return;
1062 }
1063
1064 sc->sc_rx_cfg = bnxt_dmamem_alloc(sc, PAGE_SIZE * 2);
1065 if (sc->sc_rx_cfg == NULL) {
1066 printf("%s: failed to allocate rx config buffer\n",
1067 DEVNAME(sc));
1068 goto free_stats;
1069 }
1070
1071 for (i = 0; i < sc->sc_nqueues; i++) {
1072 if (bnxt_queue_up(sc, &sc->sc_queues[i]) != 0) {
1073 goto down_queues;
1074 }
1075 }
1076
1077 sc->sc_vnic.rss_id = (uint16_t)HWRM_NA_SIGNATURE;
1078 if (bnxt_hwrm_vnic_ctx_alloc(sc, &sc->sc_vnic.rss_id) != 0) {
1079 printf("%s: failed to allocate vnic rss context\n",
1080 DEVNAME(sc));
1081 goto down_all_queues;
1082 }
1083
1084 sc->sc_vnic.id = (uint16_t)HWRM_NA_SIGNATURE;
1085 sc->sc_vnic.def_ring_grp = sc->sc_queues[0].q_rg.grp_id;
1086 sc->sc_vnic.mru = BNXT_MAX_MTU;
1087 sc->sc_vnic.cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1088 sc->sc_vnic.lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1089 sc->sc_vnic.flags = BNXT_VNIC_FLAG_DEFAULT |
1090 BNXT_VNIC_FLAG_VLAN_STRIP;
1091 if (bnxt_hwrm_vnic_alloc(sc, &sc->sc_vnic) != 0) {
1092 printf("%s: failed to allocate vnic\n", DEVNAME(sc));
1093 goto dealloc_vnic_ctx;
1094 }
1095
1096 if (bnxt_hwrm_vnic_cfg(sc, &sc->sc_vnic) != 0) {
1097 printf("%s: failed to configure vnic\n", DEVNAME(sc));
1098 goto dealloc_vnic;
1099 }
1100
1101 if (bnxt_hwrm_vnic_cfg_placement(sc, &sc->sc_vnic) != 0) {
1102 printf("%s: failed to configure vnic placement mode\n",
1103 DEVNAME(sc));
1104 goto dealloc_vnic;
1105 }
1106
1107 sc->sc_vnic.filter_id = -1;
1108 if (bnxt_hwrm_set_filter(sc, &sc->sc_vnic) != 0) {
1109 printf("%s: failed to set vnic filter\n", DEVNAME(sc));
1110 goto dealloc_vnic;
1111 }
1112
1113 if (sc->sc_nqueues > 1) {
1114 uint16_t *rss_table = (BNXT_DMA_KVA(sc->sc_rx_cfg) + PAGE_SIZE);
1115 uint8_t *hash_key = (uint8_t *)(rss_table + HW_HASH_INDEX_SIZE);
1116
1117 for (i = 0; i < HW_HASH_INDEX_SIZE; i++) {
1118 struct bnxt_queue *bq;
1119
1120 bq = &sc->sc_queues[i % sc->sc_nqueues];
1121 rss_table[i] = htole16(bq->q_rg.grp_id);
1122 }
1123 stoeplitz_to_key(hash_key, HW_HASH_KEY_SIZE);
1124
1125 if (bnxt_hwrm_vnic_rss_cfg(sc, &sc->sc_vnic,
1126 HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 |
1127 HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4 |
1128 HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6 |
1129 HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6,
1130 BNXT_DMA_DVA(sc->sc_rx_cfg) + PAGE_SIZE,
1131 BNXT_DMA_DVA(sc->sc_rx_cfg) + PAGE_SIZE +
1132 (HW_HASH_INDEX_SIZE * sizeof(uint16_t))) != 0) {
1133 printf("%s: failed to set RSS config\n", DEVNAME(sc));
1134 goto dealloc_vnic;
1135 }
1136 }
1137
1138 bnxt_iff(sc);
1139 SET(ifp->if_flags, IFF_RUNNING);
1140
1141 return;
1142
1143 dealloc_vnic:
1144 bnxt_hwrm_vnic_free(sc, &sc->sc_vnic);
1145 dealloc_vnic_ctx:
1146 bnxt_hwrm_vnic_ctx_free(sc, &sc->sc_vnic.rss_id);
1147
1148 down_all_queues:
1149 i = sc->sc_nqueues;
1150 down_queues:
1151 while (i-- > 0)
1152 bnxt_queue_down(sc, &sc->sc_queues[i]);
1153
1154 bnxt_dmamem_free(sc, sc->sc_rx_cfg);
1155 sc->sc_rx_cfg = NULL;
1156 free_stats:
1157 bnxt_dmamem_free(sc, sc->sc_stats_ctx_mem);
1158 sc->sc_stats_ctx_mem = NULL;
1159 }
1160
1161 void
bnxt_down(struct bnxt_softc * sc)1162 bnxt_down(struct bnxt_softc *sc)
1163 {
1164 struct ifnet *ifp = &sc->sc_ac.ac_if;
1165 int i;
1166
1167 CLR(ifp->if_flags, IFF_RUNNING);
1168
1169 intr_barrier(sc->sc_ih);
1170
1171 for (i = 0; i < sc->sc_nqueues; i++) {
1172 ifq_clr_oactive(ifp->if_ifqs[i]);
1173 ifq_barrier(ifp->if_ifqs[i]);
1174
1175 timeout_del_barrier(&sc->sc_queues[i].q_rx.rx_refill);
1176
1177 if (sc->sc_intrmap != NULL)
1178 intr_barrier(sc->sc_queues[i].q_ihc);
1179 }
1180
1181 bnxt_hwrm_free_filter(sc, &sc->sc_vnic);
1182 bnxt_hwrm_vnic_free(sc, &sc->sc_vnic);
1183 bnxt_hwrm_vnic_ctx_free(sc, &sc->sc_vnic.rss_id);
1184
1185 for (i = 0; i < sc->sc_nqueues; i++)
1186 bnxt_queue_down(sc, &sc->sc_queues[i]);
1187
1188 bnxt_dmamem_free(sc, sc->sc_rx_cfg);
1189 sc->sc_rx_cfg = NULL;
1190
1191 bnxt_dmamem_free(sc, sc->sc_stats_ctx_mem);
1192 sc->sc_stats_ctx_mem = NULL;
1193 }
1194
1195 void
bnxt_iff(struct bnxt_softc * sc)1196 bnxt_iff(struct bnxt_softc *sc)
1197 {
1198 struct ifnet *ifp = &sc->sc_ac.ac_if;
1199 struct ether_multi *enm;
1200 struct ether_multistep step;
1201 char *mc_list;
1202 uint32_t rx_mask, mc_count;
1203
1204 rx_mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST
1205 | HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST
1206 | HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN;
1207
1208 mc_list = BNXT_DMA_KVA(sc->sc_rx_cfg);
1209 mc_count = 0;
1210
1211 if (ifp->if_flags & IFF_PROMISC) {
1212 SET(ifp->if_flags, IFF_ALLMULTI);
1213 rx_mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
1214 } else if ((sc->sc_ac.ac_multirangecnt > 0) ||
1215 (sc->sc_ac.ac_multicnt > (PAGE_SIZE / ETHER_ADDR_LEN))) {
1216 SET(ifp->if_flags, IFF_ALLMULTI);
1217 rx_mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
1218 } else {
1219 CLR(ifp->if_flags, IFF_ALLMULTI);
1220 ETHER_FIRST_MULTI(step, &sc->sc_ac, enm);
1221 while (enm != NULL) {
1222 memcpy(mc_list, enm->enm_addrlo, ETHER_ADDR_LEN);
1223 mc_list += ETHER_ADDR_LEN;
1224 mc_count++;
1225
1226 ETHER_NEXT_MULTI(step, enm);
1227 }
1228 }
1229
1230 bnxt_hwrm_cfa_l2_set_rx_mask(sc, sc->sc_vnic.id, rx_mask,
1231 BNXT_DMA_DVA(sc->sc_rx_cfg), mc_count);
1232 }
1233
1234 int
bnxt_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)1235 bnxt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1236 {
1237 struct bnxt_softc *sc = (struct bnxt_softc *)ifp->if_softc;
1238 struct ifreq *ifr = (struct ifreq *)data;
1239 int s, error = 0;
1240
1241 s = splnet();
1242 switch (cmd) {
1243 case SIOCSIFADDR:
1244 ifp->if_flags |= IFF_UP;
1245 /* FALLTHROUGH */
1246
1247 case SIOCSIFFLAGS:
1248 if (ISSET(ifp->if_flags, IFF_UP)) {
1249 if (ISSET(ifp->if_flags, IFF_RUNNING))
1250 error = ENETRESET;
1251 else
1252 bnxt_up(sc);
1253 } else {
1254 if (ISSET(ifp->if_flags, IFF_RUNNING))
1255 bnxt_down(sc);
1256 }
1257 break;
1258
1259 case SIOCGIFMEDIA:
1260 case SIOCSIFMEDIA:
1261 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1262 break;
1263
1264 case SIOCGIFRXR:
1265 error = bnxt_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
1266 break;
1267
1268 case SIOCGIFSFFPAGE:
1269 error = bnxt_get_sffpage(sc, (struct if_sffpage *)data);
1270 break;
1271
1272 default:
1273 error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
1274 }
1275
1276 if (error == ENETRESET) {
1277 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
1278 (IFF_UP | IFF_RUNNING))
1279 bnxt_iff(sc);
1280 error = 0;
1281 }
1282
1283 splx(s);
1284
1285 return (error);
1286 }
1287
1288 int
bnxt_rxrinfo(struct bnxt_softc * sc,struct if_rxrinfo * ifri)1289 bnxt_rxrinfo(struct bnxt_softc *sc, struct if_rxrinfo *ifri)
1290 {
1291 struct if_rxring_info *ifr;
1292 int i;
1293 int error;
1294
1295 ifr = mallocarray(sc->sc_nqueues * 2, sizeof(*ifr), M_TEMP,
1296 M_WAITOK | M_ZERO | M_CANFAIL);
1297 if (ifr == NULL)
1298 return (ENOMEM);
1299
1300 for (i = 0; i < sc->sc_nqueues; i++) {
1301 ifr[(i * 2)].ifr_size = MCLBYTES;
1302 ifr[(i * 2)].ifr_info = sc->sc_queues[i].q_rx.rxr[0];
1303
1304 ifr[(i * 2) + 1].ifr_size = BNXT_AG_BUFFER_SIZE;
1305 ifr[(i * 2) + 1].ifr_info = sc->sc_queues[i].q_rx.rxr[1];
1306 }
1307
1308 error = if_rxr_info_ioctl(ifri, sc->sc_nqueues * 2, ifr);
1309 free(ifr, M_TEMP, sc->sc_nqueues * 2 * sizeof(*ifr));
1310
1311 return (error);
1312 }
1313
1314 int
bnxt_load_mbuf(struct bnxt_softc * sc,struct bnxt_slot * bs,struct mbuf * m)1315 bnxt_load_mbuf(struct bnxt_softc *sc, struct bnxt_slot *bs, struct mbuf *m)
1316 {
1317 switch (bus_dmamap_load_mbuf(sc->sc_dmat, bs->bs_map, m,
1318 BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) {
1319 case 0:
1320 break;
1321
1322 case EFBIG:
1323 if (m_defrag(m, M_DONTWAIT) == 0 &&
1324 bus_dmamap_load_mbuf(sc->sc_dmat, bs->bs_map, m,
1325 BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0)
1326 break;
1327
1328 default:
1329 return (1);
1330 }
1331
1332 bs->bs_m = m;
1333 return (0);
1334 }
1335
1336 void
bnxt_start(struct ifqueue * ifq)1337 bnxt_start(struct ifqueue *ifq)
1338 {
1339 struct ifnet *ifp = ifq->ifq_if;
1340 struct tx_bd_short *txring;
1341 struct tx_bd_long_hi *txhi;
1342 struct bnxt_tx_queue *tx = ifq->ifq_softc;
1343 struct bnxt_softc *sc = tx->tx_softc;
1344 struct bnxt_slot *bs;
1345 struct ether_extracted ext;
1346 bus_dmamap_t map;
1347 struct mbuf *m;
1348 u_int idx, free, used, laststart;
1349 uint16_t txflags, lflags;
1350 int i, slen;
1351
1352 txring = (struct tx_bd_short *)BNXT_DMA_KVA(tx->tx_ring_mem);
1353
1354 idx = tx->tx_ring_prod;
1355 free = tx->tx_ring_cons;
1356 if (free <= idx)
1357 free += tx->tx_ring.ring_size;
1358 free -= idx;
1359
1360 used = 0;
1361
1362 for (;;) {
1363 /* +1 for tx_bd_long_hi */
1364 if (used + BNXT_MAX_TX_SEGS + 1 > free) {
1365 ifq_set_oactive(ifq);
1366 break;
1367 }
1368
1369 m = ifq_dequeue(ifq);
1370 if (m == NULL)
1371 break;
1372
1373 bs = &tx->tx_slots[tx->tx_prod];
1374 if (bnxt_load_mbuf(sc, bs, m) != 0) {
1375 m_freem(m);
1376 ifp->if_oerrors++;
1377 continue;
1378 }
1379
1380 #if NBPFILTER > 0
1381 if (ifp->if_bpf)
1382 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1383 #endif
1384 map = bs->bs_map;
1385 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1386 BUS_DMASYNC_PREWRITE);
1387 used += BNXT_TX_SLOTS(bs);
1388
1389 /* first segment */
1390 laststart = idx;
1391 txring[idx].len = htole16(map->dm_segs[0].ds_len);
1392 txring[idx].opaque = tx->tx_prod;
1393 txring[idx].addr = htole64(map->dm_segs[0].ds_addr);
1394 if (m->m_pkthdr.csum_flags & M_TCP_TSO)
1395 slen = m->m_pkthdr.ph_mss;
1396 else
1397 slen = map->dm_mapsize;
1398
1399 if (slen < 512)
1400 txflags = TX_BD_LONG_FLAGS_LHINT_LT512;
1401 else if (slen < 1024)
1402 txflags = TX_BD_LONG_FLAGS_LHINT_LT1K;
1403 else if (slen < 2048)
1404 txflags = TX_BD_LONG_FLAGS_LHINT_LT2K;
1405 else
1406 txflags = TX_BD_LONG_FLAGS_LHINT_GTE2K;
1407 txflags |= TX_BD_LONG_TYPE_TX_BD_LONG |
1408 TX_BD_LONG_FLAGS_NO_CMPL;
1409 txflags |= (BNXT_TX_SLOTS(bs) << TX_BD_LONG_FLAGS_BD_CNT_SFT) &
1410 TX_BD_LONG_FLAGS_BD_CNT_MASK;
1411 if (map->dm_nsegs == 1)
1412 txflags |= TX_BD_SHORT_FLAGS_PACKET_END;
1413 txring[idx].flags_type = htole16(txflags);
1414
1415 idx++;
1416 if (idx == tx->tx_ring.ring_size)
1417 idx = 0;
1418
1419 /* long tx descriptor */
1420 txhi = (struct tx_bd_long_hi *)&txring[idx];
1421 memset(txhi, 0, sizeof(*txhi));
1422
1423 lflags = 0;
1424 if (m->m_pkthdr.csum_flags & M_TCP_TSO) {
1425 uint16_t hdrsize;
1426 uint32_t outlen;
1427 uint32_t paylen;
1428
1429 ether_extract_headers(m, &ext);
1430 if (ext.tcp && m->m_pkthdr.ph_mss > 0) {
1431 lflags |= TX_BD_LONG_LFLAGS_LSO;
1432 hdrsize = sizeof(*ext.eh);
1433 if (ext.ip4 || ext.ip6)
1434 hdrsize += ext.iphlen;
1435 else
1436 tcpstat_inc(tcps_outbadtso);
1437
1438 hdrsize += ext.tcphlen;
1439 txhi->hdr_size = htole16(hdrsize / 2);
1440
1441 outlen = m->m_pkthdr.ph_mss;
1442 txhi->mss = htole32(outlen);
1443
1444 paylen = m->m_pkthdr.len - hdrsize;
1445 tcpstat_add(tcps_outpkttso,
1446 (paylen + outlen + 1) / outlen);
1447 } else {
1448 tcpstat_inc(tcps_outbadtso);
1449 }
1450 } else {
1451 if (m->m_pkthdr.csum_flags & (M_UDP_CSUM_OUT |
1452 M_TCP_CSUM_OUT))
1453 lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
1454 if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
1455 lflags |= TX_BD_LONG_LFLAGS_IP_CHKSUM;
1456 }
1457 txhi->lflags = htole16(lflags);
1458
1459 #if NVLAN > 0
1460 if (m->m_flags & M_VLANTAG) {
1461 txhi->cfa_meta = htole32(m->m_pkthdr.ether_vtag |
1462 TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100 |
1463 TX_BD_LONG_CFA_META_KEY_VLAN_TAG);
1464 }
1465 #endif
1466
1467 idx++;
1468 if (idx == tx->tx_ring.ring_size)
1469 idx = 0;
1470
1471 /* remaining segments */
1472 txflags = TX_BD_SHORT_TYPE_TX_BD_SHORT;
1473 for (i = 1; i < map->dm_nsegs; i++) {
1474 if (i == map->dm_nsegs - 1)
1475 txflags |= TX_BD_SHORT_FLAGS_PACKET_END;
1476 txring[idx].flags_type = htole16(txflags);
1477
1478 txring[idx].len =
1479 htole16(bs->bs_map->dm_segs[i].ds_len);
1480 txring[idx].opaque = tx->tx_prod;
1481 txring[idx].addr =
1482 htole64(bs->bs_map->dm_segs[i].ds_addr);
1483
1484 idx++;
1485 if (idx == tx->tx_ring.ring_size)
1486 idx = 0;
1487 }
1488
1489 if (++tx->tx_prod >= tx->tx_ring.ring_size)
1490 tx->tx_prod = 0;
1491 }
1492
1493 /* unset NO_CMPL on the first bd of the last packet */
1494 if (used != 0) {
1495 txring[laststart].flags_type &=
1496 ~htole16(TX_BD_SHORT_FLAGS_NO_CMPL);
1497 }
1498
1499 bnxt_write_tx_doorbell(sc, &tx->tx_ring, idx);
1500 tx->tx_ring_prod = idx;
1501 }
1502
1503 void
bnxt_handle_async_event(struct bnxt_softc * sc,struct cmpl_base * cmpl)1504 bnxt_handle_async_event(struct bnxt_softc *sc, struct cmpl_base *cmpl)
1505 {
1506 struct hwrm_async_event_cmpl *ae = (struct hwrm_async_event_cmpl *)cmpl;
1507 uint16_t type = le16toh(ae->event_id);
1508
1509 switch (type) {
1510 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
1511 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
1512 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE:
1513 bnxt_hwrm_port_phy_qcfg(sc, NULL);
1514 break;
1515
1516 default:
1517 printf("%s: unexpected async event %x\n", DEVNAME(sc), type);
1518 break;
1519 }
1520 }
1521
1522 struct cmpl_base *
bnxt_cpr_next_cmpl(struct bnxt_softc * sc,struct bnxt_cp_ring * cpr)1523 bnxt_cpr_next_cmpl(struct bnxt_softc *sc, struct bnxt_cp_ring *cpr)
1524 {
1525 struct cmpl_base *cmpl;
1526 uint32_t cons;
1527 int v_bit;
1528
1529 cons = cpr->cons + 1;
1530 v_bit = cpr->v_bit;
1531 if (cons == cpr->ring.ring_size) {
1532 cons = 0;
1533 v_bit = !v_bit;
1534 }
1535 cmpl = &((struct cmpl_base *)cpr->ring.vaddr)[cons];
1536
1537 if ((!!(cmpl->info3_v & htole32(CMPL_BASE_V))) != (!!v_bit))
1538 return (NULL);
1539
1540 cpr->cons = cons;
1541 cpr->v_bit = v_bit;
1542 return (cmpl);
1543 }
1544
1545 void
bnxt_cpr_commit(struct bnxt_softc * sc,struct bnxt_cp_ring * cpr)1546 bnxt_cpr_commit(struct bnxt_softc *sc, struct bnxt_cp_ring *cpr)
1547 {
1548 cpr->commit_cons = cpr->cons;
1549 cpr->commit_v_bit = cpr->v_bit;
1550 }
1551
1552 void
bnxt_cpr_rollback(struct bnxt_softc * sc,struct bnxt_cp_ring * cpr)1553 bnxt_cpr_rollback(struct bnxt_softc *sc, struct bnxt_cp_ring *cpr)
1554 {
1555 cpr->cons = cpr->commit_cons;
1556 cpr->v_bit = cpr->commit_v_bit;
1557 }
1558
1559 int
bnxt_admin_intr(void * xsc)1560 bnxt_admin_intr(void *xsc)
1561 {
1562 struct bnxt_softc *sc = (struct bnxt_softc *)xsc;
1563 struct bnxt_cp_ring *cpr = &sc->sc_cp_ring;
1564 struct cmpl_base *cmpl;
1565 uint16_t type;
1566
1567 bnxt_write_cp_doorbell(sc, &cpr->ring, 0);
1568 cmpl = bnxt_cpr_next_cmpl(sc, cpr);
1569 while (cmpl != NULL) {
1570 type = le16toh(cmpl->type) & CMPL_BASE_TYPE_MASK;
1571 switch (type) {
1572 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1573 bnxt_handle_async_event(sc, cmpl);
1574 break;
1575 default:
1576 printf("%s: unexpected completion type %u\n",
1577 DEVNAME(sc), type);
1578 }
1579
1580 bnxt_cpr_commit(sc, cpr);
1581 cmpl = bnxt_cpr_next_cmpl(sc, cpr);
1582 }
1583
1584 bnxt_write_cp_doorbell_index(sc, &cpr->ring,
1585 (cpr->commit_cons+1) % cpr->ring.ring_size, 1);
1586 return (1);
1587 }
1588
1589 int
bnxt_intr(void * xq)1590 bnxt_intr(void *xq)
1591 {
1592 struct bnxt_queue *q = (struct bnxt_queue *)xq;
1593 struct bnxt_softc *sc = q->q_sc;
1594 struct ifnet *ifp = &sc->sc_ac.ac_if;
1595 struct bnxt_cp_ring *cpr = &q->q_cp;
1596 struct bnxt_rx_queue *rx = &q->q_rx;
1597 struct bnxt_tx_queue *tx = &q->q_tx;
1598 struct cmpl_base *cmpl;
1599 struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1600 uint16_t type;
1601 int rxfree, txfree, agfree, rv, rollback;
1602
1603 bnxt_write_cp_doorbell(sc, &cpr->ring, 0);
1604 rxfree = 0;
1605 txfree = 0;
1606 agfree = 0;
1607 rv = -1;
1608 cmpl = bnxt_cpr_next_cmpl(sc, cpr);
1609 while (cmpl != NULL) {
1610 type = le16toh(cmpl->type) & CMPL_BASE_TYPE_MASK;
1611 rollback = 0;
1612 switch (type) {
1613 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1614 bnxt_handle_async_event(sc, cmpl);
1615 break;
1616 case CMPL_BASE_TYPE_RX_L2:
1617 if (ISSET(ifp->if_flags, IFF_RUNNING))
1618 rollback = bnxt_rx(sc, rx, cpr, &ml, &rxfree,
1619 &agfree, cmpl);
1620 break;
1621 case CMPL_BASE_TYPE_TX_L2:
1622 if (ISSET(ifp->if_flags, IFF_RUNNING))
1623 bnxt_txeof(sc, tx, &txfree, cmpl);
1624 break;
1625 default:
1626 printf("%s: unexpected completion type %u\n",
1627 DEVNAME(sc), type);
1628 }
1629
1630 if (rollback) {
1631 bnxt_cpr_rollback(sc, cpr);
1632 break;
1633 }
1634 rv = 1;
1635 bnxt_cpr_commit(sc, cpr);
1636 cmpl = bnxt_cpr_next_cmpl(sc, cpr);
1637 }
1638
1639 /*
1640 * comments in bnxtreg.h suggest we should be writing cpr->cons here,
1641 * but writing cpr->cons + 1 makes it stop interrupting.
1642 */
1643 bnxt_write_cp_doorbell_index(sc, &cpr->ring,
1644 (cpr->commit_cons+1) % cpr->ring.ring_size, 1);
1645
1646 if (rxfree != 0) {
1647 rx->rx_cons += rxfree;
1648 if (rx->rx_cons >= rx->rx_ring.ring_size)
1649 rx->rx_cons -= rx->rx_ring.ring_size;
1650
1651 rx->rx_ag_cons += agfree;
1652 if (rx->rx_ag_cons >= rx->rx_ag_ring.ring_size)
1653 rx->rx_ag_cons -= rx->rx_ag_ring.ring_size;
1654
1655 if_rxr_put(&rx->rxr[0], rxfree);
1656 if_rxr_put(&rx->rxr[1], agfree);
1657
1658 if (ifiq_input(rx->rx_ifiq, &ml)) {
1659 if_rxr_livelocked(&rx->rxr[0]);
1660 if_rxr_livelocked(&rx->rxr[1]);
1661 }
1662
1663 bnxt_rx_fill(q);
1664 bnxt_rx_fill_ag(q);
1665 if ((rx->rx_cons == rx->rx_prod) ||
1666 (rx->rx_ag_cons == rx->rx_ag_prod))
1667 timeout_add(&rx->rx_refill, 0);
1668 }
1669 if (txfree != 0) {
1670 if (ifq_is_oactive(tx->tx_ifq))
1671 ifq_restart(tx->tx_ifq);
1672 }
1673 return (rv);
1674 }
1675
1676 void
bnxt_watchdog(struct ifnet * ifp)1677 bnxt_watchdog(struct ifnet *ifp)
1678 {
1679 }
1680
1681 void
bnxt_media_status(struct ifnet * ifp,struct ifmediareq * ifmr)1682 bnxt_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1683 {
1684 struct bnxt_softc *sc = (struct bnxt_softc *)ifp->if_softc;
1685 bnxt_hwrm_port_phy_qcfg(sc, ifmr);
1686 }
1687
1688 uint64_t
bnxt_get_media_type(uint64_t speed,int phy_type)1689 bnxt_get_media_type(uint64_t speed, int phy_type)
1690 {
1691 switch (phy_type) {
1692 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN:
1693 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR:
1694 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_L:
1695 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_S:
1696 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_N:
1697 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR4:
1698 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASECR4:
1699 switch (speed) {
1700 case IF_Gbps(1):
1701 return IFM_1000_T;
1702 case IF_Gbps(10):
1703 return IFM_10G_SFP_CU;
1704 case IF_Gbps(25):
1705 return IFM_25G_CR;
1706 case IF_Gbps(40):
1707 return IFM_40G_CR4;
1708 case IF_Gbps(50):
1709 return IFM_50G_CR2;
1710 case IF_Gbps(100):
1711 return IFM_100G_CR4;
1712 }
1713 break;
1714
1715 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASELR:
1716 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR4:
1717 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASELR4:
1718 switch (speed) {
1719 case IF_Gbps(1):
1720 return IFM_1000_LX;
1721 case IF_Gbps(10):
1722 return IFM_10G_LR;
1723 case IF_Gbps(25):
1724 return IFM_25G_LR;
1725 case IF_Gbps(40):
1726 return IFM_40G_LR4;
1727 case IF_Gbps(100):
1728 return IFM_100G_LR4;
1729 }
1730 break;
1731
1732 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR:
1733 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASESR:
1734 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR4:
1735 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR10:
1736 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASESX:
1737 switch (speed) {
1738 case IF_Gbps(1):
1739 return IFM_1000_SX;
1740 case IF_Gbps(10):
1741 return IFM_10G_SR;
1742 case IF_Gbps(25):
1743 return IFM_25G_SR;
1744 case IF_Gbps(40):
1745 return IFM_40G_SR4;
1746 case IF_Gbps(100):
1747 return IFM_100G_SR4;
1748 }
1749 break;
1750
1751 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER4:
1752 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASEER4:
1753 switch (speed) {
1754 case IF_Gbps(10):
1755 return IFM_10G_ER;
1756 case IF_Gbps(25):
1757 return IFM_25G_ER;
1758 }
1759 /* missing IFM_40G_ER4, IFM_100G_ER4 */
1760 break;
1761
1762 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR4:
1763 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR2:
1764 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR:
1765 switch (speed) {
1766 case IF_Gbps(10):
1767 return IFM_10G_KR;
1768 case IF_Gbps(20):
1769 return IFM_20G_KR2;
1770 case IF_Gbps(25):
1771 return IFM_25G_KR;
1772 case IF_Gbps(40):
1773 return IFM_40G_KR4;
1774 case IF_Gbps(50):
1775 return IFM_50G_KR2;
1776 case IF_Gbps(100):
1777 return IFM_100G_KR4;
1778 }
1779 break;
1780
1781 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX:
1782 switch (speed) {
1783 case IF_Gbps(1):
1784 return IFM_1000_KX;
1785 case IF_Mbps(2500):
1786 return IFM_2500_KX;
1787 case IF_Gbps(10):
1788 return IFM_10G_KX4;
1789 }
1790 break;
1791
1792 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET:
1793 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE:
1794 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASET:
1795 switch (speed) {
1796 case IF_Mbps(10):
1797 return IFM_10_T;
1798 case IF_Mbps(100):
1799 return IFM_100_TX;
1800 case IF_Gbps(1):
1801 return IFM_1000_T;
1802 case IF_Mbps(2500):
1803 return IFM_2500_T;
1804 case IF_Gbps(10):
1805 return IFM_10G_T;
1806 }
1807 break;
1808
1809 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_SGMIIEXTPHY:
1810 switch (speed) {
1811 case IF_Gbps(1):
1812 return IFM_1000_SGMII;
1813 }
1814 break;
1815
1816 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_ACTIVE_CABLE:
1817 switch (speed) {
1818 case IF_Gbps(10):
1819 return IFM_10G_AOC;
1820 case IF_Gbps(25):
1821 return IFM_25G_AOC;
1822 case IF_Gbps(40):
1823 return IFM_40G_AOC;
1824 case IF_Gbps(100):
1825 return IFM_100G_AOC;
1826 }
1827 break;
1828 }
1829
1830 return 0;
1831 }
1832
1833 void
bnxt_add_media_type(struct bnxt_softc * sc,int supported_speeds,uint64_t speed,uint64_t ifmt)1834 bnxt_add_media_type(struct bnxt_softc *sc, int supported_speeds, uint64_t speed, uint64_t ifmt)
1835 {
1836 int speed_bit = 0;
1837 switch (speed) {
1838 case IF_Gbps(1):
1839 speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB;
1840 break;
1841 case IF_Gbps(2):
1842 speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2GB;
1843 break;
1844 case IF_Mbps(2500):
1845 speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB;
1846 break;
1847 case IF_Gbps(10):
1848 speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB;
1849 break;
1850 case IF_Gbps(20):
1851 speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB;
1852 break;
1853 case IF_Gbps(25):
1854 speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB;
1855 break;
1856 case IF_Gbps(40):
1857 speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB;
1858 break;
1859 case IF_Gbps(50):
1860 speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB;
1861 break;
1862 case IF_Gbps(100):
1863 speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB;
1864 break;
1865 }
1866 if (supported_speeds & speed_bit)
1867 ifmedia_add(&sc->sc_media, IFM_ETHER | ifmt, 0, NULL);
1868 }
1869
1870 int
bnxt_hwrm_port_phy_qcfg(struct bnxt_softc * softc,struct ifmediareq * ifmr)1871 bnxt_hwrm_port_phy_qcfg(struct bnxt_softc *softc, struct ifmediareq *ifmr)
1872 {
1873 struct ifnet *ifp = &softc->sc_ac.ac_if;
1874 struct hwrm_port_phy_qcfg_input req = {0};
1875 struct hwrm_port_phy_qcfg_output *resp =
1876 BNXT_DMA_KVA(softc->sc_cmd_resp);
1877 int link_state = LINK_STATE_DOWN;
1878 uint64_t speeds[] = {
1879 IF_Gbps(1), IF_Gbps(2), IF_Mbps(2500), IF_Gbps(10), IF_Gbps(20),
1880 IF_Gbps(25), IF_Gbps(40), IF_Gbps(50), IF_Gbps(100)
1881 };
1882 uint64_t media_type;
1883 int duplex;
1884 int rc = 0;
1885 int i;
1886
1887 BNXT_HWRM_LOCK(softc);
1888 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_PHY_QCFG);
1889
1890 rc = _hwrm_send_message(softc, &req, sizeof(req));
1891 if (rc) {
1892 printf("%s: failed to query port phy config\n", DEVNAME(softc));
1893 goto exit;
1894 }
1895
1896 if (softc->sc_hwrm_ver > 0x10800)
1897 duplex = resp->duplex_state;
1898 else
1899 duplex = resp->duplex_cfg;
1900
1901 if (resp->link == HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) {
1902 if (duplex == HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_HALF)
1903 link_state = LINK_STATE_HALF_DUPLEX;
1904 else
1905 link_state = LINK_STATE_FULL_DUPLEX;
1906
1907 switch (resp->link_speed) {
1908 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB:
1909 ifp->if_baudrate = IF_Mbps(10);
1910 break;
1911 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1912 ifp->if_baudrate = IF_Mbps(100);
1913 break;
1914 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1915 ifp->if_baudrate = IF_Gbps(1);
1916 break;
1917 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1918 ifp->if_baudrate = IF_Gbps(2);
1919 break;
1920 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1921 ifp->if_baudrate = IF_Mbps(2500);
1922 break;
1923 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1924 ifp->if_baudrate = IF_Gbps(10);
1925 break;
1926 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1927 ifp->if_baudrate = IF_Gbps(20);
1928 break;
1929 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1930 ifp->if_baudrate = IF_Gbps(25);
1931 break;
1932 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1933 ifp->if_baudrate = IF_Gbps(40);
1934 break;
1935 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1936 ifp->if_baudrate = IF_Gbps(50);
1937 break;
1938 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
1939 ifp->if_baudrate = IF_Gbps(100);
1940 break;
1941 }
1942 }
1943
1944 ifmedia_delete_instance(&softc->sc_media, IFM_INST_ANY);
1945 for (i = 0; i < nitems(speeds); i++) {
1946 media_type = bnxt_get_media_type(speeds[i], resp->phy_type);
1947 if (media_type != 0)
1948 bnxt_add_media_type(softc, resp->support_speeds,
1949 speeds[i], media_type);
1950 }
1951 ifmedia_add(&softc->sc_media, IFM_ETHER|IFM_AUTO, 0, NULL);
1952 ifmedia_set(&softc->sc_media, IFM_ETHER|IFM_AUTO);
1953
1954 if (ifmr != NULL) {
1955 ifmr->ifm_status = IFM_AVALID;
1956 if (LINK_STATE_IS_UP(ifp->if_link_state)) {
1957 ifmr->ifm_status |= IFM_ACTIVE;
1958 ifmr->ifm_active = IFM_ETHER | IFM_AUTO;
1959 if (resp->pause & HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX)
1960 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1961 if (resp->pause & HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX)
1962 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1963 if (duplex == HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_HALF)
1964 ifmr->ifm_active |= IFM_HDX;
1965 else
1966 ifmr->ifm_active |= IFM_FDX;
1967
1968 media_type = bnxt_get_media_type(ifp->if_baudrate, resp->phy_type);
1969 if (media_type != 0)
1970 ifmr->ifm_active |= media_type;
1971 }
1972 }
1973
1974 exit:
1975 BNXT_HWRM_UNLOCK(softc);
1976
1977 if (rc == 0 && (link_state != ifp->if_link_state)) {
1978 ifp->if_link_state = link_state;
1979 if_link_state_change(ifp);
1980 }
1981
1982 return rc;
1983 }
1984
1985 int
bnxt_media_change(struct ifnet * ifp)1986 bnxt_media_change(struct ifnet *ifp)
1987 {
1988 struct bnxt_softc *sc = (struct bnxt_softc *)ifp->if_softc;
1989 struct hwrm_port_phy_cfg_input req = {0};
1990 uint64_t link_speed;
1991
1992 if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
1993 return EINVAL;
1994
1995 if (sc->sc_flags & BNXT_FLAG_NPAR)
1996 return ENODEV;
1997
1998 bnxt_hwrm_cmd_hdr_init(sc, &req, HWRM_PORT_PHY_CFG);
1999
2000 switch (IFM_SUBTYPE(sc->sc_media.ifm_media)) {
2001 case IFM_100G_CR4:
2002 case IFM_100G_SR4:
2003 case IFM_100G_KR4:
2004 case IFM_100G_LR4:
2005 case IFM_100G_AOC:
2006 link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100GB;
2007 break;
2008
2009 case IFM_50G_CR2:
2010 case IFM_50G_KR2:
2011 link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_50GB;
2012 break;
2013
2014 case IFM_40G_CR4:
2015 case IFM_40G_SR4:
2016 case IFM_40G_LR4:
2017 case IFM_40G_KR4:
2018 case IFM_40G_AOC:
2019 link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_40GB;
2020 break;
2021
2022 case IFM_25G_CR:
2023 case IFM_25G_KR:
2024 case IFM_25G_SR:
2025 case IFM_25G_LR:
2026 case IFM_25G_ER:
2027 case IFM_25G_AOC:
2028 link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_25GB;
2029 break;
2030
2031 case IFM_10G_LR:
2032 case IFM_10G_SR:
2033 case IFM_10G_CX4:
2034 case IFM_10G_T:
2035 case IFM_10G_SFP_CU:
2036 case IFM_10G_LRM:
2037 case IFM_10G_KX4:
2038 case IFM_10G_KR:
2039 case IFM_10G_CR1:
2040 case IFM_10G_ER:
2041 case IFM_10G_AOC:
2042 link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10GB;
2043 break;
2044
2045 case IFM_2500_SX:
2046 case IFM_2500_KX:
2047 case IFM_2500_T:
2048 link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2_5GB;
2049 break;
2050
2051 case IFM_1000_T:
2052 case IFM_1000_LX:
2053 case IFM_1000_SX:
2054 case IFM_1000_CX:
2055 case IFM_1000_KX:
2056 link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_1GB;
2057 break;
2058
2059 case IFM_100_TX:
2060 link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100MB;
2061 break;
2062
2063 default:
2064 link_speed = 0;
2065 }
2066
2067 req.enables |= htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX);
2068 req.auto_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2069 if (link_speed == 0) {
2070 req.auto_mode |=
2071 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
2072 req.flags |=
2073 htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG);
2074 req.enables |=
2075 htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE);
2076 } else {
2077 req.force_link_speed = htole16(link_speed);
2078 req.flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE);
2079 }
2080 req.flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY);
2081
2082 return hwrm_send_message(sc, &req, sizeof(req));
2083 }
2084
2085 int
bnxt_media_autonegotiate(struct bnxt_softc * sc)2086 bnxt_media_autonegotiate(struct bnxt_softc *sc)
2087 {
2088 struct hwrm_port_phy_cfg_input req = {0};
2089
2090 if (sc->sc_flags & BNXT_FLAG_NPAR)
2091 return ENODEV;
2092
2093 bnxt_hwrm_cmd_hdr_init(sc, &req, HWRM_PORT_PHY_CFG);
2094 req.auto_mode |= HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
2095 req.auto_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2096 req.enables |= htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE |
2097 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX);
2098 req.flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG);
2099 req.flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY);
2100
2101 return hwrm_send_message(sc, &req, sizeof(req));
2102 }
2103
2104
2105 void
bnxt_mark_cpr_invalid(struct bnxt_cp_ring * cpr)2106 bnxt_mark_cpr_invalid(struct bnxt_cp_ring *cpr)
2107 {
2108 struct cmpl_base *cmp = (void *)cpr->ring.vaddr;
2109 int i;
2110
2111 for (i = 0; i < cpr->ring.ring_size; i++)
2112 cmp[i].info3_v = !cpr->v_bit;
2113 }
2114
2115 void
bnxt_write_cp_doorbell(struct bnxt_softc * sc,struct bnxt_ring * ring,int enable)2116 bnxt_write_cp_doorbell(struct bnxt_softc *sc, struct bnxt_ring *ring,
2117 int enable)
2118 {
2119 uint32_t val = CMPL_DOORBELL_KEY_CMPL;
2120 if (enable == 0)
2121 val |= CMPL_DOORBELL_MASK;
2122
2123 bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
2124 BUS_SPACE_BARRIER_WRITE);
2125 bus_space_barrier(sc->sc_db_t, sc->sc_db_h, 0, sc->sc_db_s,
2126 BUS_SPACE_BARRIER_WRITE);
2127 bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,
2128 htole32(val));
2129 }
2130
2131 void
bnxt_write_cp_doorbell_index(struct bnxt_softc * sc,struct bnxt_ring * ring,uint32_t index,int enable)2132 bnxt_write_cp_doorbell_index(struct bnxt_softc *sc, struct bnxt_ring *ring,
2133 uint32_t index, int enable)
2134 {
2135 uint32_t val = CMPL_DOORBELL_KEY_CMPL | CMPL_DOORBELL_IDX_VALID |
2136 (index & CMPL_DOORBELL_IDX_MASK);
2137 if (enable == 0)
2138 val |= CMPL_DOORBELL_MASK;
2139 bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
2140 BUS_SPACE_BARRIER_WRITE);
2141 bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,
2142 htole32(val));
2143 bus_space_barrier(sc->sc_db_t, sc->sc_db_h, 0, sc->sc_db_s,
2144 BUS_SPACE_BARRIER_WRITE);
2145 }
2146
2147 void
bnxt_write_rx_doorbell(struct bnxt_softc * sc,struct bnxt_ring * ring,int index)2148 bnxt_write_rx_doorbell(struct bnxt_softc *sc, struct bnxt_ring *ring, int index)
2149 {
2150 uint32_t val = RX_DOORBELL_KEY_RX | index;
2151 bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
2152 BUS_SPACE_BARRIER_WRITE);
2153 bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,
2154 htole32(val));
2155
2156 /* second write isn't necessary on all hardware */
2157 bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
2158 BUS_SPACE_BARRIER_WRITE);
2159 bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,
2160 htole32(val));
2161 }
2162
2163 void
bnxt_write_tx_doorbell(struct bnxt_softc * sc,struct bnxt_ring * ring,int index)2164 bnxt_write_tx_doorbell(struct bnxt_softc *sc, struct bnxt_ring *ring, int index)
2165 {
2166 uint32_t val = TX_DOORBELL_KEY_TX | index;
2167 bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
2168 BUS_SPACE_BARRIER_WRITE);
2169 bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,
2170 htole32(val));
2171
2172 /* second write isn't necessary on all hardware */
2173 bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
2174 BUS_SPACE_BARRIER_WRITE);
2175 bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,
2176 htole32(val));
2177 }
2178
2179 u_int
bnxt_rx_fill_slots(struct bnxt_softc * sc,struct bnxt_ring * ring,void * ring_mem,struct bnxt_slot * slots,uint * prod,int bufsize,uint16_t bdtype,u_int nslots)2180 bnxt_rx_fill_slots(struct bnxt_softc *sc, struct bnxt_ring *ring, void *ring_mem,
2181 struct bnxt_slot *slots, uint *prod, int bufsize, uint16_t bdtype,
2182 u_int nslots)
2183 {
2184 struct rx_prod_pkt_bd *rxring;
2185 struct bnxt_slot *bs;
2186 struct mbuf *m;
2187 uint p, fills;
2188
2189 rxring = (struct rx_prod_pkt_bd *)ring_mem;
2190 p = *prod;
2191 for (fills = 0; fills < nslots; fills++) {
2192 bs = &slots[p];
2193 m = MCLGETL(NULL, M_DONTWAIT, bufsize);
2194 if (m == NULL)
2195 break;
2196
2197 m->m_len = m->m_pkthdr.len = bufsize;
2198 if (bus_dmamap_load_mbuf(sc->sc_dmat, bs->bs_map, m,
2199 BUS_DMA_NOWAIT) != 0) {
2200 m_freem(m);
2201 break;
2202 }
2203 bs->bs_m = m;
2204
2205 rxring[p].flags_type = htole16(bdtype);
2206 rxring[p].len = htole16(bufsize);
2207 rxring[p].opaque = p;
2208 rxring[p].addr = htole64(bs->bs_map->dm_segs[0].ds_addr);
2209
2210 if (++p >= ring->ring_size)
2211 p = 0;
2212 }
2213
2214 if (fills != 0)
2215 bnxt_write_rx_doorbell(sc, ring, p);
2216 *prod = p;
2217
2218 return (nslots - fills);
2219 }
2220
2221 int
bnxt_rx_fill(struct bnxt_queue * q)2222 bnxt_rx_fill(struct bnxt_queue *q)
2223 {
2224 struct bnxt_rx_queue *rx = &q->q_rx;
2225 struct bnxt_softc *sc = q->q_sc;
2226 u_int slots;
2227 int rv = 0;
2228
2229 slots = if_rxr_get(&rx->rxr[0], rx->rx_ring.ring_size);
2230 if (slots > 0) {
2231 slots = bnxt_rx_fill_slots(sc, &rx->rx_ring,
2232 BNXT_DMA_KVA(rx->rx_ring_mem), rx->rx_slots,
2233 &rx->rx_prod, MCLBYTES,
2234 RX_PROD_PKT_BD_TYPE_RX_PROD_PKT, slots);
2235 if_rxr_put(&rx->rxr[0], slots);
2236 } else
2237 rv = 1;
2238
2239 return (rv);
2240 }
2241
2242 int
bnxt_rx_fill_ag(struct bnxt_queue * q)2243 bnxt_rx_fill_ag(struct bnxt_queue *q)
2244 {
2245 struct bnxt_rx_queue *rx = &q->q_rx;
2246 struct bnxt_softc *sc = q->q_sc;
2247 u_int slots;
2248 int rv = 0;
2249
2250 slots = if_rxr_get(&rx->rxr[1], rx->rx_ag_ring.ring_size);
2251 if (slots > 0) {
2252 slots = bnxt_rx_fill_slots(sc, &rx->rx_ag_ring,
2253 BNXT_DMA_KVA(rx->rx_ring_mem) + PAGE_SIZE,
2254 rx->rx_ag_slots, &rx->rx_ag_prod,
2255 BNXT_AG_BUFFER_SIZE,
2256 RX_PROD_AGG_BD_TYPE_RX_PROD_AGG, slots);
2257 if_rxr_put(&rx->rxr[1], slots);
2258 } else
2259 rv = 1;
2260
2261 return (rv);
2262 }
2263
2264 void
bnxt_refill(void * xq)2265 bnxt_refill(void *xq)
2266 {
2267 struct bnxt_queue *q = xq;
2268 struct bnxt_rx_queue *rx = &q->q_rx;
2269
2270 if (rx->rx_cons == rx->rx_prod)
2271 bnxt_rx_fill(q);
2272
2273 if (rx->rx_ag_cons == rx->rx_ag_prod)
2274 bnxt_rx_fill_ag(q);
2275
2276 if ((rx->rx_cons == rx->rx_prod) ||
2277 (rx->rx_ag_cons == rx->rx_ag_prod))
2278 timeout_add(&rx->rx_refill, 1);
2279 }
2280
2281 int
bnxt_rx(struct bnxt_softc * sc,struct bnxt_rx_queue * rx,struct bnxt_cp_ring * cpr,struct mbuf_list * ml,int * slots,int * agslots,struct cmpl_base * cmpl)2282 bnxt_rx(struct bnxt_softc *sc, struct bnxt_rx_queue *rx,
2283 struct bnxt_cp_ring *cpr, struct mbuf_list *ml, int *slots, int *agslots,
2284 struct cmpl_base *cmpl)
2285 {
2286 struct mbuf *m, *am;
2287 struct bnxt_slot *bs;
2288 struct rx_pkt_cmpl *rxlo = (struct rx_pkt_cmpl *)cmpl;
2289 struct rx_pkt_cmpl_hi *rxhi;
2290 struct rx_abuf_cmpl *ag;
2291 uint32_t flags;
2292 uint16_t errors;
2293
2294 /* second part of the rx completion */
2295 rxhi = (struct rx_pkt_cmpl_hi *)bnxt_cpr_next_cmpl(sc, cpr);
2296 if (rxhi == NULL) {
2297 return (1);
2298 }
2299
2300 /* packets over 2k in size use an aggregation buffer completion too */
2301 ag = NULL;
2302 if ((rxlo->agg_bufs_v1 >> RX_PKT_CMPL_AGG_BUFS_SFT) != 0) {
2303 ag = (struct rx_abuf_cmpl *)bnxt_cpr_next_cmpl(sc, cpr);
2304 if (ag == NULL) {
2305 return (1);
2306 }
2307 }
2308
2309 bs = &rx->rx_slots[rxlo->opaque];
2310 bus_dmamap_sync(sc->sc_dmat, bs->bs_map, 0, bs->bs_map->dm_mapsize,
2311 BUS_DMASYNC_POSTREAD);
2312 bus_dmamap_unload(sc->sc_dmat, bs->bs_map);
2313
2314 m = bs->bs_m;
2315 bs->bs_m = NULL;
2316 m->m_pkthdr.len = m->m_len = letoh16(rxlo->len);
2317 (*slots)++;
2318
2319 /* checksum flags */
2320 flags = lemtoh32(&rxhi->flags2);
2321 errors = lemtoh16(&rxhi->errors_v2);
2322 if ((flags & RX_PKT_CMPL_FLAGS2_IP_CS_CALC) != 0 &&
2323 (errors & RX_PKT_CMPL_ERRORS_IP_CS_ERROR) == 0)
2324 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
2325
2326 if ((flags & RX_PKT_CMPL_FLAGS2_L4_CS_CALC) != 0 &&
2327 (errors & RX_PKT_CMPL_ERRORS_L4_CS_ERROR) == 0)
2328 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
2329 M_UDP_CSUM_IN_OK;
2330
2331 #if NVLAN > 0
2332 if ((flags & RX_PKT_CMPL_FLAGS2_META_FORMAT_MASK) ==
2333 RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) {
2334 m->m_pkthdr.ether_vtag = lemtoh16(&rxhi->metadata);
2335 m->m_flags |= M_VLANTAG;
2336 }
2337 #endif
2338
2339 if (lemtoh16(&rxlo->flags_type) & RX_PKT_CMPL_FLAGS_RSS_VALID) {
2340 m->m_pkthdr.ph_flowid = lemtoh32(&rxlo->rss_hash);
2341 m->m_pkthdr.csum_flags |= M_FLOWID;
2342 }
2343
2344 if (ag != NULL) {
2345 bs = &rx->rx_ag_slots[ag->opaque];
2346 bus_dmamap_sync(sc->sc_dmat, bs->bs_map, 0,
2347 bs->bs_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2348 bus_dmamap_unload(sc->sc_dmat, bs->bs_map);
2349
2350 am = bs->bs_m;
2351 bs->bs_m = NULL;
2352 am->m_len = letoh16(ag->len);
2353 m->m_next = am;
2354 m->m_pkthdr.len += am->m_len;
2355 (*agslots)++;
2356 }
2357
2358 ml_enqueue(ml, m);
2359 return (0);
2360 }
2361
2362 void
bnxt_txeof(struct bnxt_softc * sc,struct bnxt_tx_queue * tx,int * txfree,struct cmpl_base * cmpl)2363 bnxt_txeof(struct bnxt_softc *sc, struct bnxt_tx_queue *tx, int *txfree,
2364 struct cmpl_base *cmpl)
2365 {
2366 struct tx_cmpl *txcmpl = (struct tx_cmpl *)cmpl;
2367 struct bnxt_slot *bs;
2368 bus_dmamap_t map;
2369 u_int idx, segs, last;
2370
2371 idx = tx->tx_ring_cons;
2372 last = tx->tx_cons;
2373 do {
2374 bs = &tx->tx_slots[tx->tx_cons];
2375 map = bs->bs_map;
2376
2377 segs = BNXT_TX_SLOTS(bs);
2378 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2379 BUS_DMASYNC_POSTWRITE);
2380 bus_dmamap_unload(sc->sc_dmat, map);
2381 m_freem(bs->bs_m);
2382 bs->bs_m = NULL;
2383
2384 idx += segs;
2385 (*txfree) += segs;
2386 if (idx >= tx->tx_ring.ring_size)
2387 idx -= tx->tx_ring.ring_size;
2388
2389 last = tx->tx_cons;
2390 if (++tx->tx_cons >= tx->tx_ring.ring_size)
2391 tx->tx_cons = 0;
2392
2393 } while (last != txcmpl->opaque);
2394 tx->tx_ring_cons = idx;
2395 }
2396
2397 /* bnxt_hwrm.c */
2398
2399 int
bnxt_hwrm_err_map(uint16_t err)2400 bnxt_hwrm_err_map(uint16_t err)
2401 {
2402 int rc;
2403
2404 switch (err) {
2405 case HWRM_ERR_CODE_SUCCESS:
2406 return 0;
2407 case HWRM_ERR_CODE_INVALID_PARAMS:
2408 case HWRM_ERR_CODE_INVALID_FLAGS:
2409 case HWRM_ERR_CODE_INVALID_ENABLES:
2410 return EINVAL;
2411 case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
2412 return EACCES;
2413 case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
2414 return ENOMEM;
2415 case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
2416 return ENOSYS;
2417 case HWRM_ERR_CODE_FAIL:
2418 return EIO;
2419 case HWRM_ERR_CODE_HWRM_ERROR:
2420 case HWRM_ERR_CODE_UNKNOWN_ERR:
2421 default:
2422 return EIO;
2423 }
2424
2425 return rc;
2426 }
2427
2428 void
bnxt_hwrm_cmd_hdr_init(struct bnxt_softc * softc,void * request,uint16_t req_type)2429 bnxt_hwrm_cmd_hdr_init(struct bnxt_softc *softc, void *request,
2430 uint16_t req_type)
2431 {
2432 struct input *req = request;
2433
2434 req->req_type = htole16(req_type);
2435 req->cmpl_ring = 0xffff;
2436 req->target_id = 0xffff;
2437 req->resp_addr = htole64(BNXT_DMA_DVA(softc->sc_cmd_resp));
2438 }
2439
2440 int
_hwrm_send_message(struct bnxt_softc * softc,void * msg,uint32_t msg_len)2441 _hwrm_send_message(struct bnxt_softc *softc, void *msg, uint32_t msg_len)
2442 {
2443 struct input *req = msg;
2444 struct hwrm_err_output *resp = BNXT_DMA_KVA(softc->sc_cmd_resp);
2445 uint32_t *data = msg;
2446 int i;
2447 uint8_t *valid;
2448 uint16_t err;
2449 uint16_t max_req_len = HWRM_MAX_REQ_LEN;
2450 struct hwrm_short_input short_input = {0};
2451
2452 /* TODO: DMASYNC in here. */
2453 req->seq_id = htole16(softc->sc_cmd_seq++);
2454 memset(resp, 0, PAGE_SIZE);
2455
2456 if (softc->sc_flags & BNXT_FLAG_SHORT_CMD) {
2457 void *short_cmd_req = BNXT_DMA_KVA(softc->sc_cmd_resp);
2458
2459 memcpy(short_cmd_req, req, msg_len);
2460 memset((uint8_t *) short_cmd_req + msg_len, 0,
2461 softc->sc_max_req_len - msg_len);
2462
2463 short_input.req_type = req->req_type;
2464 short_input.signature =
2465 htole16(HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
2466 short_input.size = htole16(msg_len);
2467 short_input.req_addr =
2468 htole64(BNXT_DMA_DVA(softc->sc_cmd_resp));
2469
2470 data = (uint32_t *)&short_input;
2471 msg_len = sizeof(short_input);
2472
2473 /* Sync memory write before updating doorbell */
2474 membar_sync();
2475
2476 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
2477 }
2478
2479 /* Write request msg to hwrm channel */
2480 for (i = 0; i < msg_len; i += 4) {
2481 bus_space_write_4(softc->sc_hwrm_t,
2482 softc->sc_hwrm_h,
2483 i, *data);
2484 data++;
2485 }
2486
2487 /* Clear to the end of the request buffer */
2488 for (i = msg_len; i < max_req_len; i += 4)
2489 bus_space_write_4(softc->sc_hwrm_t, softc->sc_hwrm_h,
2490 i, 0);
2491
2492 /* Ring channel doorbell */
2493 bus_space_write_4(softc->sc_hwrm_t, softc->sc_hwrm_h, 0x100,
2494 htole32(1));
2495
2496 /* Check if response len is updated */
2497 for (i = 0; i < softc->sc_cmd_timeo; i++) {
2498 if (resp->resp_len && resp->resp_len <= 4096)
2499 break;
2500 DELAY(1000);
2501 }
2502 if (i >= softc->sc_cmd_timeo) {
2503 printf("%s: timeout sending %s: (timeout: %u) seq: %d\n",
2504 DEVNAME(softc), GET_HWRM_REQ_TYPE(req->req_type),
2505 softc->sc_cmd_timeo,
2506 le16toh(req->seq_id));
2507 return ETIMEDOUT;
2508 }
2509 /* Last byte of resp contains the valid key */
2510 valid = (uint8_t *)resp + resp->resp_len - 1;
2511 for (i = 0; i < softc->sc_cmd_timeo; i++) {
2512 if (*valid == HWRM_RESP_VALID_KEY)
2513 break;
2514 DELAY(1000);
2515 }
2516 if (i >= softc->sc_cmd_timeo) {
2517 printf("%s: timeout sending %s: "
2518 "(timeout: %u) msg {0x%x 0x%x} len:%d v: %d\n",
2519 DEVNAME(softc), GET_HWRM_REQ_TYPE(req->req_type),
2520 softc->sc_cmd_timeo, le16toh(req->req_type),
2521 le16toh(req->seq_id), msg_len,
2522 *valid);
2523 return ETIMEDOUT;
2524 }
2525
2526 err = le16toh(resp->error_code);
2527 if (err) {
2528 /* HWRM_ERR_CODE_FAIL is a "normal" error, don't log */
2529 if (err != HWRM_ERR_CODE_FAIL) {
2530 printf("%s: %s command returned %s error.\n",
2531 DEVNAME(softc),
2532 GET_HWRM_REQ_TYPE(req->req_type),
2533 GET_HWRM_ERROR_CODE(err));
2534 }
2535 return bnxt_hwrm_err_map(err);
2536 }
2537
2538 return 0;
2539 }
2540
2541
2542 int
hwrm_send_message(struct bnxt_softc * softc,void * msg,uint32_t msg_len)2543 hwrm_send_message(struct bnxt_softc *softc, void *msg, uint32_t msg_len)
2544 {
2545 int rc;
2546
2547 BNXT_HWRM_LOCK(softc);
2548 rc = _hwrm_send_message(softc, msg, msg_len);
2549 BNXT_HWRM_UNLOCK(softc);
2550 return rc;
2551 }
2552
2553
2554 int
bnxt_hwrm_queue_qportcfg(struct bnxt_softc * softc)2555 bnxt_hwrm_queue_qportcfg(struct bnxt_softc *softc)
2556 {
2557 struct hwrm_queue_qportcfg_input req = {0};
2558 struct hwrm_queue_qportcfg_output *resp =
2559 BNXT_DMA_KVA(softc->sc_cmd_resp);
2560 int rc = 0;
2561
2562 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_QUEUE_QPORTCFG);
2563
2564 BNXT_HWRM_LOCK(softc);
2565 rc = _hwrm_send_message(softc, &req, sizeof(req));
2566 if (rc)
2567 goto qportcfg_exit;
2568
2569 if (!resp->max_configurable_queues) {
2570 rc = -EINVAL;
2571 goto qportcfg_exit;
2572 }
2573
2574 softc->sc_tx_queue_id = resp->queue_id0;
2575
2576 qportcfg_exit:
2577 BNXT_HWRM_UNLOCK(softc);
2578 return rc;
2579 }
2580
2581 int
bnxt_hwrm_ver_get(struct bnxt_softc * softc)2582 bnxt_hwrm_ver_get(struct bnxt_softc *softc)
2583 {
2584 struct hwrm_ver_get_input req = {0};
2585 struct hwrm_ver_get_output *resp =
2586 BNXT_DMA_KVA(softc->sc_cmd_resp);
2587 int rc;
2588 #if 0
2589 const char nastr[] = "<not installed>";
2590 const char naver[] = "<N/A>";
2591 #endif
2592 uint32_t dev_caps_cfg;
2593
2594 softc->sc_max_req_len = HWRM_MAX_REQ_LEN;
2595 softc->sc_cmd_timeo = 1000;
2596 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VER_GET);
2597
2598 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
2599 req.hwrm_intf_min = HWRM_VERSION_MINOR;
2600 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
2601
2602 BNXT_HWRM_LOCK(softc);
2603 rc = _hwrm_send_message(softc, &req, sizeof(req));
2604 if (rc)
2605 goto fail;
2606
2607 printf(": fw ver %d.%d.%d, ", resp->hwrm_fw_maj, resp->hwrm_fw_min,
2608 resp->hwrm_fw_bld);
2609
2610 softc->sc_hwrm_ver = (resp->hwrm_intf_maj << 16) |
2611 (resp->hwrm_intf_min << 8) | resp->hwrm_intf_upd;
2612 #if 0
2613 snprintf(softc->ver_info->hwrm_if_ver, BNXT_VERSTR_SIZE, "%d.%d.%d",
2614 resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
2615 softc->ver_info->hwrm_if_major = resp->hwrm_intf_maj;
2616 softc->ver_info->hwrm_if_minor = resp->hwrm_intf_min;
2617 softc->ver_info->hwrm_if_update = resp->hwrm_intf_upd;
2618 snprintf(softc->ver_info->hwrm_fw_ver, BNXT_VERSTR_SIZE, "%d.%d.%d",
2619 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
2620 strlcpy(softc->ver_info->driver_hwrm_if_ver, HWRM_VERSION_STR,
2621 BNXT_VERSTR_SIZE);
2622 strlcpy(softc->ver_info->hwrm_fw_name, resp->hwrm_fw_name,
2623 BNXT_NAME_SIZE);
2624
2625 if (resp->mgmt_fw_maj == 0 && resp->mgmt_fw_min == 0 &&
2626 resp->mgmt_fw_bld == 0) {
2627 strlcpy(softc->ver_info->mgmt_fw_ver, naver, BNXT_VERSTR_SIZE);
2628 strlcpy(softc->ver_info->mgmt_fw_name, nastr, BNXT_NAME_SIZE);
2629 }
2630 else {
2631 snprintf(softc->ver_info->mgmt_fw_ver, BNXT_VERSTR_SIZE,
2632 "%d.%d.%d", resp->mgmt_fw_maj, resp->mgmt_fw_min,
2633 resp->mgmt_fw_bld);
2634 strlcpy(softc->ver_info->mgmt_fw_name, resp->mgmt_fw_name,
2635 BNXT_NAME_SIZE);
2636 }
2637 if (resp->netctrl_fw_maj == 0 && resp->netctrl_fw_min == 0 &&
2638 resp->netctrl_fw_bld == 0) {
2639 strlcpy(softc->ver_info->netctrl_fw_ver, naver,
2640 BNXT_VERSTR_SIZE);
2641 strlcpy(softc->ver_info->netctrl_fw_name, nastr,
2642 BNXT_NAME_SIZE);
2643 }
2644 else {
2645 snprintf(softc->ver_info->netctrl_fw_ver, BNXT_VERSTR_SIZE,
2646 "%d.%d.%d", resp->netctrl_fw_maj, resp->netctrl_fw_min,
2647 resp->netctrl_fw_bld);
2648 strlcpy(softc->ver_info->netctrl_fw_name, resp->netctrl_fw_name,
2649 BNXT_NAME_SIZE);
2650 }
2651 if (resp->roce_fw_maj == 0 && resp->roce_fw_min == 0 &&
2652 resp->roce_fw_bld == 0) {
2653 strlcpy(softc->ver_info->roce_fw_ver, naver, BNXT_VERSTR_SIZE);
2654 strlcpy(softc->ver_info->roce_fw_name, nastr, BNXT_NAME_SIZE);
2655 }
2656 else {
2657 snprintf(softc->ver_info->roce_fw_ver, BNXT_VERSTR_SIZE,
2658 "%d.%d.%d", resp->roce_fw_maj, resp->roce_fw_min,
2659 resp->roce_fw_bld);
2660 strlcpy(softc->ver_info->roce_fw_name, resp->roce_fw_name,
2661 BNXT_NAME_SIZE);
2662 }
2663 softc->ver_info->chip_num = le16toh(resp->chip_num);
2664 softc->ver_info->chip_rev = resp->chip_rev;
2665 softc->ver_info->chip_metal = resp->chip_metal;
2666 softc->ver_info->chip_bond_id = resp->chip_bond_id;
2667 softc->ver_info->chip_type = resp->chip_platform_type;
2668 #endif
2669
2670 if (resp->max_req_win_len)
2671 softc->sc_max_req_len = le16toh(resp->max_req_win_len);
2672 if (resp->def_req_timeout)
2673 softc->sc_cmd_timeo = le16toh(resp->def_req_timeout);
2674
2675 dev_caps_cfg = le32toh(resp->dev_caps_cfg);
2676 if ((dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
2677 (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
2678 softc->sc_flags |= BNXT_FLAG_SHORT_CMD;
2679
2680 fail:
2681 BNXT_HWRM_UNLOCK(softc);
2682 return rc;
2683 }
2684
2685
2686 int
bnxt_hwrm_func_drv_rgtr(struct bnxt_softc * softc)2687 bnxt_hwrm_func_drv_rgtr(struct bnxt_softc *softc)
2688 {
2689 struct hwrm_func_drv_rgtr_input req = {0};
2690
2691 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_DRV_RGTR);
2692
2693 req.enables = htole32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
2694 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_OS_TYPE);
2695 req.os_type = htole16(HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_FREEBSD);
2696
2697 req.ver_maj = 6;
2698 req.ver_min = 4;
2699 req.ver_upd = 0;
2700
2701 return hwrm_send_message(softc, &req, sizeof(req));
2702 }
2703
2704 #if 0
2705
2706 int
2707 bnxt_hwrm_func_drv_unrgtr(struct bnxt_softc *softc, bool shutdown)
2708 {
2709 struct hwrm_func_drv_unrgtr_input req = {0};
2710
2711 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_DRV_UNRGTR);
2712 if (shutdown == true)
2713 req.flags |=
2714 HWRM_FUNC_DRV_UNRGTR_INPUT_FLAGS_PREPARE_FOR_SHUTDOWN;
2715 return hwrm_send_message(softc, &req, sizeof(req));
2716 }
2717
2718 #endif
2719
2720 int
bnxt_hwrm_func_qcaps(struct bnxt_softc * softc)2721 bnxt_hwrm_func_qcaps(struct bnxt_softc *softc)
2722 {
2723 int rc = 0;
2724 struct hwrm_func_qcaps_input req = {0};
2725 struct hwrm_func_qcaps_output *resp =
2726 BNXT_DMA_KVA(softc->sc_cmd_resp);
2727 /* struct bnxt_func_info *func = &softc->func; */
2728
2729 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_QCAPS);
2730 req.fid = htole16(0xffff);
2731
2732 BNXT_HWRM_LOCK(softc);
2733 rc = _hwrm_send_message(softc, &req, sizeof(req));
2734 if (rc)
2735 goto fail;
2736
2737 if (resp->flags &
2738 htole32(HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_MAGICPKT_SUPPORTED))
2739 softc->sc_flags |= BNXT_FLAG_WOL_CAP;
2740
2741 memcpy(softc->sc_ac.ac_enaddr, resp->mac_address, 6);
2742 /*
2743 func->fw_fid = le16toh(resp->fid);
2744 memcpy(func->mac_addr, resp->mac_address, ETHER_ADDR_LEN);
2745 func->max_rsscos_ctxs = le16toh(resp->max_rsscos_ctx);
2746 func->max_cp_rings = le16toh(resp->max_cmpl_rings);
2747 func->max_tx_rings = le16toh(resp->max_tx_rings);
2748 func->max_rx_rings = le16toh(resp->max_rx_rings);
2749 func->max_hw_ring_grps = le32toh(resp->max_hw_ring_grps);
2750 if (!func->max_hw_ring_grps)
2751 func->max_hw_ring_grps = func->max_tx_rings;
2752 func->max_l2_ctxs = le16toh(resp->max_l2_ctxs);
2753 func->max_vnics = le16toh(resp->max_vnics);
2754 func->max_stat_ctxs = le16toh(resp->max_stat_ctx);
2755 if (BNXT_PF(softc)) {
2756 struct bnxt_pf_info *pf = &softc->pf;
2757
2758 pf->port_id = le16toh(resp->port_id);
2759 pf->first_vf_id = le16toh(resp->first_vf_id);
2760 pf->max_vfs = le16toh(resp->max_vfs);
2761 pf->max_encap_records = le32toh(resp->max_encap_records);
2762 pf->max_decap_records = le32toh(resp->max_decap_records);
2763 pf->max_tx_em_flows = le32toh(resp->max_tx_em_flows);
2764 pf->max_tx_wm_flows = le32toh(resp->max_tx_wm_flows);
2765 pf->max_rx_em_flows = le32toh(resp->max_rx_em_flows);
2766 pf->max_rx_wm_flows = le32toh(resp->max_rx_wm_flows);
2767 }
2768 if (!_is_valid_ether_addr(func->mac_addr)) {
2769 device_printf(softc->dev, "Invalid ethernet address, generating random locally administered address\n");
2770 get_random_ether_addr(func->mac_addr);
2771 }
2772 */
2773
2774 fail:
2775 BNXT_HWRM_UNLOCK(softc);
2776 return rc;
2777 }
2778
2779
2780 int
bnxt_hwrm_func_qcfg(struct bnxt_softc * softc)2781 bnxt_hwrm_func_qcfg(struct bnxt_softc *softc)
2782 {
2783 struct hwrm_func_qcfg_input req = {0};
2784 /* struct hwrm_func_qcfg_output *resp =
2785 BNXT_DMA_KVA(softc->sc_cmd_resp);
2786 struct bnxt_func_qcfg *fn_qcfg = &softc->fn_qcfg; */
2787 int rc;
2788
2789 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_QCFG);
2790 req.fid = htole16(0xffff);
2791 BNXT_HWRM_LOCK(softc);
2792 rc = _hwrm_send_message(softc, &req, sizeof(req));
2793 if (rc)
2794 goto fail;
2795
2796 /*
2797 fn_qcfg->alloc_completion_rings = le16toh(resp->alloc_cmpl_rings);
2798 fn_qcfg->alloc_tx_rings = le16toh(resp->alloc_tx_rings);
2799 fn_qcfg->alloc_rx_rings = le16toh(resp->alloc_rx_rings);
2800 fn_qcfg->alloc_vnics = le16toh(resp->alloc_vnics);
2801 */
2802 fail:
2803 BNXT_HWRM_UNLOCK(softc);
2804 return rc;
2805 }
2806
2807
2808 int
bnxt_hwrm_func_reset(struct bnxt_softc * softc)2809 bnxt_hwrm_func_reset(struct bnxt_softc *softc)
2810 {
2811 struct hwrm_func_reset_input req = {0};
2812
2813 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_RESET);
2814 req.enables = 0;
2815
2816 return hwrm_send_message(softc, &req, sizeof(req));
2817 }
2818
2819 int
bnxt_hwrm_vnic_cfg_placement(struct bnxt_softc * softc,struct bnxt_vnic_info * vnic)2820 bnxt_hwrm_vnic_cfg_placement(struct bnxt_softc *softc,
2821 struct bnxt_vnic_info *vnic)
2822 {
2823 struct hwrm_vnic_plcmodes_cfg_input req = {0};
2824
2825 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_PLCMODES_CFG);
2826
2827 req.flags = htole32(
2828 HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
2829 req.enables = htole32(
2830 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
2831 req.vnic_id = htole16(vnic->id);
2832 req.jumbo_thresh = htole16(MCLBYTES);
2833
2834 return hwrm_send_message(softc, &req, sizeof(req));
2835 }
2836
2837 int
bnxt_hwrm_vnic_cfg(struct bnxt_softc * softc,struct bnxt_vnic_info * vnic)2838 bnxt_hwrm_vnic_cfg(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
2839 {
2840 struct hwrm_vnic_cfg_input req = {0};
2841
2842 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_CFG);
2843
2844 if (vnic->flags & BNXT_VNIC_FLAG_DEFAULT)
2845 req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
2846 if (vnic->flags & BNXT_VNIC_FLAG_BD_STALL)
2847 req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
2848 if (vnic->flags & BNXT_VNIC_FLAG_VLAN_STRIP)
2849 req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
2850 req.enables = htole32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
2851 HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE |
2852 HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
2853 req.vnic_id = htole16(vnic->id);
2854 req.dflt_ring_grp = htole16(vnic->def_ring_grp);
2855 req.rss_rule = htole16(vnic->rss_id);
2856 req.cos_rule = htole16(vnic->cos_rule);
2857 req.lb_rule = htole16(vnic->lb_rule);
2858 req.mru = htole16(vnic->mru);
2859
2860 return hwrm_send_message(softc, &req, sizeof(req));
2861 }
2862
2863 int
bnxt_hwrm_vnic_alloc(struct bnxt_softc * softc,struct bnxt_vnic_info * vnic)2864 bnxt_hwrm_vnic_alloc(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
2865 {
2866 struct hwrm_vnic_alloc_input req = {0};
2867 struct hwrm_vnic_alloc_output *resp =
2868 BNXT_DMA_KVA(softc->sc_cmd_resp);
2869 int rc;
2870
2871 if (vnic->id != (uint16_t)HWRM_NA_SIGNATURE) {
2872 printf("%s: attempt to re-allocate vnic %04x\n",
2873 DEVNAME(softc), vnic->id);
2874 return EINVAL;
2875 }
2876
2877 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_ALLOC);
2878
2879 if (vnic->flags & BNXT_VNIC_FLAG_DEFAULT)
2880 req.flags = htole32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
2881
2882 BNXT_HWRM_LOCK(softc);
2883 rc = _hwrm_send_message(softc, &req, sizeof(req));
2884 if (rc)
2885 goto fail;
2886
2887 vnic->id = le32toh(resp->vnic_id);
2888
2889 fail:
2890 BNXT_HWRM_UNLOCK(softc);
2891 return rc;
2892 }
2893
2894 int
bnxt_hwrm_vnic_free(struct bnxt_softc * softc,struct bnxt_vnic_info * vnic)2895 bnxt_hwrm_vnic_free(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
2896 {
2897 struct hwrm_vnic_free_input req = {0};
2898 int rc;
2899
2900 if (vnic->id == (uint16_t)HWRM_NA_SIGNATURE) {
2901 printf("%s: attempt to deallocate vnic %04x\n",
2902 DEVNAME(softc), vnic->id);
2903 return (EINVAL);
2904 }
2905
2906 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_FREE);
2907 req.vnic_id = htole16(vnic->id);
2908
2909 BNXT_HWRM_LOCK(softc);
2910 rc = _hwrm_send_message(softc, &req, sizeof(req));
2911 if (rc == 0)
2912 vnic->id = (uint16_t)HWRM_NA_SIGNATURE;
2913 BNXT_HWRM_UNLOCK(softc);
2914
2915 return (rc);
2916 }
2917
2918 int
bnxt_hwrm_vnic_ctx_alloc(struct bnxt_softc * softc,uint16_t * ctx_id)2919 bnxt_hwrm_vnic_ctx_alloc(struct bnxt_softc *softc, uint16_t *ctx_id)
2920 {
2921 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
2922 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
2923 BNXT_DMA_KVA(softc->sc_cmd_resp);
2924 int rc;
2925
2926 if (*ctx_id != (uint16_t)HWRM_NA_SIGNATURE) {
2927 printf("%s: attempt to re-allocate vnic ctx %04x\n",
2928 DEVNAME(softc), *ctx_id);
2929 return EINVAL;
2930 }
2931
2932 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
2933
2934 BNXT_HWRM_LOCK(softc);
2935 rc = _hwrm_send_message(softc, &req, sizeof(req));
2936 if (rc)
2937 goto fail;
2938
2939 *ctx_id = letoh16(resp->rss_cos_lb_ctx_id);
2940
2941 fail:
2942 BNXT_HWRM_UNLOCK(softc);
2943 return (rc);
2944 }
2945
2946 int
bnxt_hwrm_vnic_ctx_free(struct bnxt_softc * softc,uint16_t * ctx_id)2947 bnxt_hwrm_vnic_ctx_free(struct bnxt_softc *softc, uint16_t *ctx_id)
2948 {
2949 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
2950 int rc;
2951
2952 if (*ctx_id == (uint16_t)HWRM_NA_SIGNATURE) {
2953 printf("%s: attempt to deallocate vnic ctx %04x\n",
2954 DEVNAME(softc), *ctx_id);
2955 return (EINVAL);
2956 }
2957
2958 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE);
2959 req.rss_cos_lb_ctx_id = htole32(*ctx_id);
2960
2961 BNXT_HWRM_LOCK(softc);
2962 rc = _hwrm_send_message(softc, &req, sizeof(req));
2963 if (rc == 0)
2964 *ctx_id = (uint16_t)HWRM_NA_SIGNATURE;
2965 BNXT_HWRM_UNLOCK(softc);
2966 return (rc);
2967 }
2968
2969 int
bnxt_hwrm_ring_grp_alloc(struct bnxt_softc * softc,struct bnxt_grp_info * grp)2970 bnxt_hwrm_ring_grp_alloc(struct bnxt_softc *softc, struct bnxt_grp_info *grp)
2971 {
2972 struct hwrm_ring_grp_alloc_input req = {0};
2973 struct hwrm_ring_grp_alloc_output *resp;
2974 int rc = 0;
2975
2976 if (grp->grp_id != HWRM_NA_SIGNATURE) {
2977 printf("%s: attempt to re-allocate ring group %04x\n",
2978 DEVNAME(softc), grp->grp_id);
2979 return EINVAL;
2980 }
2981
2982 resp = BNXT_DMA_KVA(softc->sc_cmd_resp);
2983 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_GRP_ALLOC);
2984 req.cr = htole16(grp->cp_ring_id);
2985 req.rr = htole16(grp->rx_ring_id);
2986 req.ar = htole16(grp->ag_ring_id);
2987 req.sc = htole16(grp->stats_ctx);
2988
2989 BNXT_HWRM_LOCK(softc);
2990 rc = _hwrm_send_message(softc, &req, sizeof(req));
2991 if (rc)
2992 goto fail;
2993
2994 grp->grp_id = letoh32(resp->ring_group_id);
2995
2996 fail:
2997 BNXT_HWRM_UNLOCK(softc);
2998 return rc;
2999 }
3000
3001 int
bnxt_hwrm_ring_grp_free(struct bnxt_softc * softc,struct bnxt_grp_info * grp)3002 bnxt_hwrm_ring_grp_free(struct bnxt_softc *softc, struct bnxt_grp_info *grp)
3003 {
3004 struct hwrm_ring_grp_free_input req = {0};
3005 int rc = 0;
3006
3007 if (grp->grp_id == HWRM_NA_SIGNATURE) {
3008 printf("%s: attempt to free ring group %04x\n",
3009 DEVNAME(softc), grp->grp_id);
3010 return EINVAL;
3011 }
3012
3013 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_GRP_FREE);
3014 req.ring_group_id = htole32(grp->grp_id);
3015
3016 BNXT_HWRM_LOCK(softc);
3017 rc = _hwrm_send_message(softc, &req, sizeof(req));
3018 if (rc == 0)
3019 grp->grp_id = HWRM_NA_SIGNATURE;
3020
3021 BNXT_HWRM_UNLOCK(softc);
3022 return (rc);
3023 }
3024
3025 /*
3026 * Ring allocation message to the firmware
3027 */
3028 int
bnxt_hwrm_ring_alloc(struct bnxt_softc * softc,uint8_t type,struct bnxt_ring * ring,uint16_t cmpl_ring_id,uint32_t stat_ctx_id,int irq)3029 bnxt_hwrm_ring_alloc(struct bnxt_softc *softc, uint8_t type,
3030 struct bnxt_ring *ring, uint16_t cmpl_ring_id, uint32_t stat_ctx_id,
3031 int irq)
3032 {
3033 struct hwrm_ring_alloc_input req = {0};
3034 struct hwrm_ring_alloc_output *resp;
3035 int rc;
3036
3037 if (ring->phys_id != (uint16_t)HWRM_NA_SIGNATURE) {
3038 printf("%s: attempt to re-allocate ring %04x\n",
3039 DEVNAME(softc), ring->phys_id);
3040 return EINVAL;
3041 }
3042
3043 resp = BNXT_DMA_KVA(softc->sc_cmd_resp);
3044 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_ALLOC);
3045 req.enables = htole32(0);
3046 req.fbo = htole32(0);
3047
3048 if (stat_ctx_id != HWRM_NA_SIGNATURE) {
3049 req.enables |= htole32(
3050 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
3051 req.stat_ctx_id = htole32(stat_ctx_id);
3052 }
3053 req.ring_type = type;
3054 req.page_tbl_addr = htole64(ring->paddr);
3055 req.length = htole32(ring->ring_size);
3056 req.logical_id = htole16(ring->id);
3057 req.cmpl_ring_id = htole16(cmpl_ring_id);
3058 req.queue_id = htole16(softc->sc_tx_queue_id);
3059 req.int_mode = (softc->sc_flags & BNXT_FLAG_MSIX) ?
3060 HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX :
3061 HWRM_RING_ALLOC_INPUT_INT_MODE_LEGACY;
3062 BNXT_HWRM_LOCK(softc);
3063 rc = _hwrm_send_message(softc, &req, sizeof(req));
3064 if (rc)
3065 goto fail;
3066
3067 ring->phys_id = le16toh(resp->ring_id);
3068
3069 fail:
3070 BNXT_HWRM_UNLOCK(softc);
3071 return rc;
3072 }
3073
3074 int
bnxt_hwrm_ring_free(struct bnxt_softc * softc,uint8_t type,struct bnxt_ring * ring)3075 bnxt_hwrm_ring_free(struct bnxt_softc *softc, uint8_t type, struct bnxt_ring *ring)
3076 {
3077 struct hwrm_ring_free_input req = {0};
3078 int rc;
3079
3080 if (ring->phys_id == (uint16_t)HWRM_NA_SIGNATURE) {
3081 printf("%s: attempt to deallocate ring %04x\n",
3082 DEVNAME(softc), ring->phys_id);
3083 return (EINVAL);
3084 }
3085
3086 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_FREE);
3087 req.ring_type = type;
3088 req.ring_id = htole16(ring->phys_id);
3089 BNXT_HWRM_LOCK(softc);
3090 rc = _hwrm_send_message(softc, &req, sizeof(req));
3091 if (rc)
3092 goto fail;
3093
3094 ring->phys_id = (uint16_t)HWRM_NA_SIGNATURE;
3095 fail:
3096 BNXT_HWRM_UNLOCK(softc);
3097 return (rc);
3098 }
3099
3100
3101 int
bnxt_hwrm_stat_ctx_alloc(struct bnxt_softc * softc,struct bnxt_cp_ring * cpr,uint64_t paddr)3102 bnxt_hwrm_stat_ctx_alloc(struct bnxt_softc *softc, struct bnxt_cp_ring *cpr,
3103 uint64_t paddr)
3104 {
3105 struct hwrm_stat_ctx_alloc_input req = {0};
3106 struct hwrm_stat_ctx_alloc_output *resp;
3107 int rc = 0;
3108
3109 if (cpr->stats_ctx_id != HWRM_NA_SIGNATURE) {
3110 printf("%s: attempt to re-allocate stats ctx %08x\n",
3111 DEVNAME(softc), cpr->stats_ctx_id);
3112 return EINVAL;
3113 }
3114
3115 resp = BNXT_DMA_KVA(softc->sc_cmd_resp);
3116 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_STAT_CTX_ALLOC);
3117
3118 req.update_period_ms = htole32(1000);
3119 req.stats_dma_addr = htole64(paddr);
3120
3121 BNXT_HWRM_LOCK(softc);
3122 rc = _hwrm_send_message(softc, &req, sizeof(req));
3123 if (rc)
3124 goto fail;
3125
3126 cpr->stats_ctx_id = le32toh(resp->stat_ctx_id);
3127
3128 fail:
3129 BNXT_HWRM_UNLOCK(softc);
3130
3131 return rc;
3132 }
3133
3134 int
bnxt_hwrm_stat_ctx_free(struct bnxt_softc * softc,struct bnxt_cp_ring * cpr)3135 bnxt_hwrm_stat_ctx_free(struct bnxt_softc *softc, struct bnxt_cp_ring *cpr)
3136 {
3137 struct hwrm_stat_ctx_free_input req = {0};
3138 int rc = 0;
3139
3140 if (cpr->stats_ctx_id == HWRM_NA_SIGNATURE) {
3141 printf("%s: attempt to free stats ctx %08x\n",
3142 DEVNAME(softc), cpr->stats_ctx_id);
3143 return EINVAL;
3144 }
3145
3146 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_STAT_CTX_FREE);
3147 req.stat_ctx_id = htole32(cpr->stats_ctx_id);
3148
3149 BNXT_HWRM_LOCK(softc);
3150 rc = _hwrm_send_message(softc, &req, sizeof(req));
3151 BNXT_HWRM_UNLOCK(softc);
3152
3153 if (rc == 0)
3154 cpr->stats_ctx_id = HWRM_NA_SIGNATURE;
3155
3156 return (rc);
3157 }
3158
3159 #if 0
3160
3161 int
3162 bnxt_hwrm_port_qstats(struct bnxt_softc *softc)
3163 {
3164 struct hwrm_port_qstats_input req = {0};
3165 int rc = 0;
3166
3167 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_QSTATS);
3168
3169 req.port_id = htole16(softc->pf.port_id);
3170 req.rx_stat_host_addr = htole64(softc->hw_rx_port_stats.idi_paddr);
3171 req.tx_stat_host_addr = htole64(softc->hw_tx_port_stats.idi_paddr);
3172
3173 BNXT_HWRM_LOCK(softc);
3174 rc = _hwrm_send_message(softc, &req, sizeof(req));
3175 BNXT_HWRM_UNLOCK(softc);
3176
3177 return rc;
3178 }
3179
3180 #endif
3181
3182 int
bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt_softc * softc,uint32_t vnic_id,uint32_t rx_mask,uint64_t mc_addr,uint32_t mc_count)3183 bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt_softc *softc,
3184 uint32_t vnic_id, uint32_t rx_mask, uint64_t mc_addr, uint32_t mc_count)
3185 {
3186 struct hwrm_cfa_l2_set_rx_mask_input req = {0};
3187
3188 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_CFA_L2_SET_RX_MASK);
3189
3190 req.vnic_id = htole32(vnic_id);
3191 req.mask = htole32(rx_mask);
3192 req.mc_tbl_addr = htole64(mc_addr);
3193 req.num_mc_entries = htole32(mc_count);
3194 return hwrm_send_message(softc, &req, sizeof(req));
3195 }
3196
3197 int
bnxt_hwrm_set_filter(struct bnxt_softc * softc,struct bnxt_vnic_info * vnic)3198 bnxt_hwrm_set_filter(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
3199 {
3200 struct hwrm_cfa_l2_filter_alloc_input req = {0};
3201 struct hwrm_cfa_l2_filter_alloc_output *resp;
3202 uint32_t enables = 0;
3203 int rc = 0;
3204
3205 if (vnic->filter_id != -1) {
3206 printf("%s: attempt to re-allocate l2 ctx filter\n",
3207 DEVNAME(softc));
3208 return EINVAL;
3209 }
3210
3211 resp = BNXT_DMA_KVA(softc->sc_cmd_resp);
3212 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_CFA_L2_FILTER_ALLOC);
3213
3214 req.flags = htole32(HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX
3215 | HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST);
3216 enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR
3217 | HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK
3218 | HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
3219 req.enables = htole32(enables);
3220 req.dst_id = htole16(vnic->id);
3221 memcpy(req.l2_addr, softc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
3222 memset(&req.l2_addr_mask, 0xff, sizeof(req.l2_addr_mask));
3223
3224 BNXT_HWRM_LOCK(softc);
3225 rc = _hwrm_send_message(softc, &req, sizeof(req));
3226 if (rc)
3227 goto fail;
3228
3229 vnic->filter_id = le64toh(resp->l2_filter_id);
3230 vnic->flow_id = le64toh(resp->flow_id);
3231
3232 fail:
3233 BNXT_HWRM_UNLOCK(softc);
3234 return (rc);
3235 }
3236
3237 int
bnxt_hwrm_free_filter(struct bnxt_softc * softc,struct bnxt_vnic_info * vnic)3238 bnxt_hwrm_free_filter(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
3239 {
3240 struct hwrm_cfa_l2_filter_free_input req = {0};
3241 int rc = 0;
3242
3243 if (vnic->filter_id == -1) {
3244 printf("%s: attempt to deallocate filter %llx\n",
3245 DEVNAME(softc), vnic->filter_id);
3246 return (EINVAL);
3247 }
3248
3249 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_CFA_L2_FILTER_FREE);
3250 req.l2_filter_id = htole64(vnic->filter_id);
3251
3252 BNXT_HWRM_LOCK(softc);
3253 rc = _hwrm_send_message(softc, &req, sizeof(req));
3254 if (rc == 0)
3255 vnic->filter_id = -1;
3256 BNXT_HWRM_UNLOCK(softc);
3257
3258 return (rc);
3259 }
3260
3261
3262 int
bnxt_hwrm_vnic_rss_cfg(struct bnxt_softc * softc,struct bnxt_vnic_info * vnic,uint32_t hash_type,daddr_t rss_table,daddr_t rss_key)3263 bnxt_hwrm_vnic_rss_cfg(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic,
3264 uint32_t hash_type, daddr_t rss_table, daddr_t rss_key)
3265 {
3266 struct hwrm_vnic_rss_cfg_input req = {0};
3267
3268 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_RSS_CFG);
3269
3270 req.hash_type = htole32(hash_type);
3271 req.ring_grp_tbl_addr = htole64(rss_table);
3272 req.hash_key_tbl_addr = htole64(rss_key);
3273 req.rss_ctx_idx = htole16(vnic->rss_id);
3274
3275 return hwrm_send_message(softc, &req, sizeof(req));
3276 }
3277
3278 int
bnxt_cfg_async_cr(struct bnxt_softc * softc,struct bnxt_cp_ring * cpr)3279 bnxt_cfg_async_cr(struct bnxt_softc *softc, struct bnxt_cp_ring *cpr)
3280 {
3281 int rc = 0;
3282
3283 if (1 /* BNXT_PF(softc) */) {
3284 struct hwrm_func_cfg_input req = {0};
3285
3286 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_CFG);
3287
3288 req.fid = htole16(0xffff);
3289 req.enables = htole32(HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3290 req.async_event_cr = htole16(cpr->ring.phys_id);
3291
3292 rc = hwrm_send_message(softc, &req, sizeof(req));
3293 } else {
3294 struct hwrm_func_vf_cfg_input req = {0};
3295
3296 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_VF_CFG);
3297
3298 req.enables = htole32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3299 req.async_event_cr = htole16(cpr->ring.phys_id);
3300
3301 rc = hwrm_send_message(softc, &req, sizeof(req));
3302 }
3303 return rc;
3304 }
3305
3306 #if 0
3307
3308 void
3309 bnxt_validate_hw_lro_settings(struct bnxt_softc *softc)
3310 {
3311 softc->hw_lro.enable = min(softc->hw_lro.enable, 1);
3312
3313 softc->hw_lro.is_mode_gro = min(softc->hw_lro.is_mode_gro, 1);
3314
3315 softc->hw_lro.max_agg_segs = min(softc->hw_lro.max_agg_segs,
3316 HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX);
3317
3318 softc->hw_lro.max_aggs = min(softc->hw_lro.max_aggs,
3319 HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
3320
3321 softc->hw_lro.min_agg_len = min(softc->hw_lro.min_agg_len, BNXT_MAX_MTU);
3322 }
3323
3324 int
3325 bnxt_hwrm_vnic_tpa_cfg(struct bnxt_softc *softc)
3326 {
3327 struct hwrm_vnic_tpa_cfg_input req = {0};
3328 uint32_t flags;
3329
3330 if (softc->vnic_info.id == (uint16_t) HWRM_NA_SIGNATURE) {
3331 return 0;
3332 }
3333
3334 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_TPA_CFG);
3335
3336 if (softc->hw_lro.enable) {
3337 flags = HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
3338 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
3339 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
3340 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ;
3341
3342 if (softc->hw_lro.is_mode_gro)
3343 flags |= HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO;
3344 else
3345 flags |= HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE;
3346
3347 req.flags = htole32(flags);
3348
3349 req.enables = htole32(HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
3350 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
3351 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
3352
3353 req.max_agg_segs = htole16(softc->hw_lro.max_agg_segs);
3354 req.max_aggs = htole16(softc->hw_lro.max_aggs);
3355 req.min_agg_len = htole32(softc->hw_lro.min_agg_len);
3356 }
3357
3358 req.vnic_id = htole16(softc->vnic_info.id);
3359
3360 return hwrm_send_message(softc, &req, sizeof(req));
3361 }
3362
3363
3364 int
3365 bnxt_hwrm_fw_reset(struct bnxt_softc *softc, uint8_t processor,
3366 uint8_t *selfreset)
3367 {
3368 struct hwrm_fw_reset_input req = {0};
3369 struct hwrm_fw_reset_output *resp =
3370 (void *)softc->hwrm_cmd_resp.idi_vaddr;
3371 int rc;
3372
3373 MPASS(selfreset);
3374
3375 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_RESET);
3376 req.embedded_proc_type = processor;
3377 req.selfrst_status = *selfreset;
3378
3379 BNXT_HWRM_LOCK(softc);
3380 rc = _hwrm_send_message(softc, &req, sizeof(req));
3381 if (rc)
3382 goto exit;
3383 *selfreset = resp->selfrst_status;
3384
3385 exit:
3386 BNXT_HWRM_UNLOCK(softc);
3387 return rc;
3388 }
3389
3390 int
3391 bnxt_hwrm_fw_qstatus(struct bnxt_softc *softc, uint8_t type, uint8_t *selfreset)
3392 {
3393 struct hwrm_fw_qstatus_input req = {0};
3394 struct hwrm_fw_qstatus_output *resp =
3395 (void *)softc->hwrm_cmd_resp.idi_vaddr;
3396 int rc;
3397
3398 MPASS(selfreset);
3399
3400 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_QSTATUS);
3401 req.embedded_proc_type = type;
3402
3403 BNXT_HWRM_LOCK(softc);
3404 rc = _hwrm_send_message(softc, &req, sizeof(req));
3405 if (rc)
3406 goto exit;
3407 *selfreset = resp->selfrst_status;
3408
3409 exit:
3410 BNXT_HWRM_UNLOCK(softc);
3411 return rc;
3412 }
3413
3414 #endif
3415
3416 int
bnxt_hwrm_nvm_get_dev_info(struct bnxt_softc * softc,uint16_t * mfg_id,uint16_t * device_id,uint32_t * sector_size,uint32_t * nvram_size,uint32_t * reserved_size,uint32_t * available_size)3417 bnxt_hwrm_nvm_get_dev_info(struct bnxt_softc *softc, uint16_t *mfg_id,
3418 uint16_t *device_id, uint32_t *sector_size, uint32_t *nvram_size,
3419 uint32_t *reserved_size, uint32_t *available_size)
3420 {
3421 struct hwrm_nvm_get_dev_info_input req = {0};
3422 struct hwrm_nvm_get_dev_info_output *resp =
3423 BNXT_DMA_KVA(softc->sc_cmd_resp);
3424 int rc;
3425 uint32_t old_timeo;
3426
3427 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_GET_DEV_INFO);
3428
3429 BNXT_HWRM_LOCK(softc);
3430 old_timeo = softc->sc_cmd_timeo;
3431 softc->sc_cmd_timeo = BNXT_NVM_TIMEO;
3432 rc = _hwrm_send_message(softc, &req, sizeof(req));
3433 softc->sc_cmd_timeo = old_timeo;
3434 if (rc)
3435 goto exit;
3436
3437 if (mfg_id)
3438 *mfg_id = le16toh(resp->manufacturer_id);
3439 if (device_id)
3440 *device_id = le16toh(resp->device_id);
3441 if (sector_size)
3442 *sector_size = le32toh(resp->sector_size);
3443 if (nvram_size)
3444 *nvram_size = le32toh(resp->nvram_size);
3445 if (reserved_size)
3446 *reserved_size = le32toh(resp->reserved_size);
3447 if (available_size)
3448 *available_size = le32toh(resp->available_size);
3449
3450 exit:
3451 BNXT_HWRM_UNLOCK(softc);
3452 return rc;
3453 }
3454
3455 #if 0
3456
3457 int
3458 bnxt_hwrm_fw_get_time(struct bnxt_softc *softc, uint16_t *year, uint8_t *month,
3459 uint8_t *day, uint8_t *hour, uint8_t *minute, uint8_t *second,
3460 uint16_t *millisecond, uint16_t *zone)
3461 {
3462 struct hwrm_fw_get_time_input req = {0};
3463 struct hwrm_fw_get_time_output *resp =
3464 (void *)softc->hwrm_cmd_resp.idi_vaddr;
3465 int rc;
3466
3467 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_GET_TIME);
3468
3469 BNXT_HWRM_LOCK(softc);
3470 rc = _hwrm_send_message(softc, &req, sizeof(req));
3471 if (rc)
3472 goto exit;
3473
3474 if (year)
3475 *year = le16toh(resp->year);
3476 if (month)
3477 *month = resp->month;
3478 if (day)
3479 *day = resp->day;
3480 if (hour)
3481 *hour = resp->hour;
3482 if (minute)
3483 *minute = resp->minute;
3484 if (second)
3485 *second = resp->second;
3486 if (millisecond)
3487 *millisecond = le16toh(resp->millisecond);
3488 if (zone)
3489 *zone = le16toh(resp->zone);
3490
3491 exit:
3492 BNXT_HWRM_UNLOCK(softc);
3493 return rc;
3494 }
3495
3496 int
3497 bnxt_hwrm_fw_set_time(struct bnxt_softc *softc, uint16_t year, uint8_t month,
3498 uint8_t day, uint8_t hour, uint8_t minute, uint8_t second,
3499 uint16_t millisecond, uint16_t zone)
3500 {
3501 struct hwrm_fw_set_time_input req = {0};
3502
3503 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_SET_TIME);
3504
3505 req.year = htole16(year);
3506 req.month = month;
3507 req.day = day;
3508 req.hour = hour;
3509 req.minute = minute;
3510 req.second = second;
3511 req.millisecond = htole16(millisecond);
3512 req.zone = htole16(zone);
3513 return hwrm_send_message(softc, &req, sizeof(req));
3514 }
3515
3516 #endif
3517
3518 void
_bnxt_hwrm_set_async_event_bit(struct hwrm_func_drv_rgtr_input * req,int bit)3519 _bnxt_hwrm_set_async_event_bit(struct hwrm_func_drv_rgtr_input *req, int bit)
3520 {
3521 req->async_event_fwd[bit/32] |= (1 << (bit % 32));
3522 }
3523
3524 int
bnxt_hwrm_func_rgtr_async_events(struct bnxt_softc * softc)3525 bnxt_hwrm_func_rgtr_async_events(struct bnxt_softc *softc)
3526 {
3527 struct hwrm_func_drv_rgtr_input req = {0};
3528 int events[] = {
3529 HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
3530 HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
3531 HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
3532 HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
3533 HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE
3534 };
3535 int i;
3536
3537 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_DRV_RGTR);
3538
3539 req.enables =
3540 htole32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
3541
3542 for (i = 0; i < nitems(events); i++)
3543 _bnxt_hwrm_set_async_event_bit(&req, events[i]);
3544
3545 return hwrm_send_message(softc, &req, sizeof(req));
3546 }
3547
3548 int
bnxt_get_sffpage(struct bnxt_softc * softc,struct if_sffpage * sff)3549 bnxt_get_sffpage(struct bnxt_softc *softc, struct if_sffpage *sff)
3550 {
3551 struct hwrm_port_phy_i2c_read_input req;
3552 struct hwrm_port_phy_i2c_read_output *out;
3553 int offset;
3554
3555 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_PHY_I2C_READ);
3556 req.i2c_slave_addr = sff->sff_addr;
3557 req.page_number = htole16(sff->sff_page);
3558
3559 for (offset = 0; offset < 256; offset += sizeof(out->data)) {
3560 req.page_offset = htole16(offset);
3561 req.data_length = sizeof(out->data);
3562 req.enables = htole32(HWRM_PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET);
3563
3564 if (hwrm_send_message(softc, &req, sizeof(req))) {
3565 printf("%s: failed to read i2c data\n", DEVNAME(softc));
3566 return 1;
3567 }
3568
3569 out = (struct hwrm_port_phy_i2c_read_output *)
3570 BNXT_DMA_KVA(softc->sc_cmd_resp);
3571 memcpy(sff->sff_data + offset, out->data, sizeof(out->data));
3572 }
3573
3574 return 0;
3575 }
3576