xref: /freebsd/sys/dev/bnxt/bnxt_txrx.c (revision 49a3df78)
1 /*-
2  * Broadcom NetXtreme-C/E network driver.
3  *
4  * Copyright (c) 2016 Broadcom, All Rights Reserved.
5  * The term Broadcom refers to Broadcom Limited and/or its subsidiaries
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26  * THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/types.h>
33 #include <sys/socket.h>
34 #include <sys/endian.h>
35 #include <net/if.h>
36 #include <net/if_var.h>
37 #include <net/ethernet.h>
38 #include <net/iflib.h>
39 
40 #include "opt_inet.h"
41 #include "opt_inet6.h"
42 #include "opt_rss.h"
43 
44 #include "bnxt.h"
45 
46 /*
47  * Function prototypes
48  */
49 
50 static int bnxt_isc_txd_encap(void *sc, if_pkt_info_t pi);
51 static void bnxt_isc_txd_flush(void *sc, uint16_t txqid, qidx_t pidx);
52 static int bnxt_isc_txd_credits_update(void *sc, uint16_t txqid, bool clear);
53 
54 static void bnxt_isc_rxd_refill(void *sc, if_rxd_update_t iru);
55 
56 /*				uint16_t rxqid, uint8_t flid,
57     uint32_t pidx, uint64_t *paddrs, caddr_t *vaddrs, uint16_t count,
58     uint16_t buf_size);
59 */
60 static void bnxt_isc_rxd_flush(void *sc, uint16_t rxqid, uint8_t flid,
61     qidx_t pidx);
62 static int bnxt_isc_rxd_available(void *sc, uint16_t rxqid, qidx_t idx,
63     qidx_t budget);
64 static int bnxt_isc_rxd_pkt_get(void *sc, if_rxd_info_t ri);
65 
66 static int bnxt_intr(void *sc);
67 
68 struct if_txrx bnxt_txrx  = {
69 	.ift_txd_encap = bnxt_isc_txd_encap,
70 	.ift_txd_flush = bnxt_isc_txd_flush,
71 	.ift_txd_credits_update = bnxt_isc_txd_credits_update,
72 	.ift_rxd_available = bnxt_isc_rxd_available,
73 	.ift_rxd_pkt_get = bnxt_isc_rxd_pkt_get,
74 	.ift_rxd_refill = bnxt_isc_rxd_refill,
75 	.ift_rxd_flush = bnxt_isc_rxd_flush,
76 	.ift_legacy_intr = bnxt_intr
77 };
78 
79 /*
80  * Device Dependent Packet Transmit and Receive Functions
81  */
82 
83 static const uint16_t bnxt_tx_lhint[] = {
84 	TX_BD_SHORT_FLAGS_LHINT_LT512,
85 	TX_BD_SHORT_FLAGS_LHINT_LT1K,
86 	TX_BD_SHORT_FLAGS_LHINT_LT2K,
87 	TX_BD_SHORT_FLAGS_LHINT_LT2K,
88 	TX_BD_SHORT_FLAGS_LHINT_GTE2K,
89 };
90 
91 static int
92 bnxt_isc_txd_encap(void *sc, if_pkt_info_t pi)
93 {
94 	struct bnxt_softc *softc = (struct bnxt_softc *)sc;
95 	struct bnxt_ring *txr = &softc->tx_rings[pi->ipi_qsidx];
96 	struct tx_bd_long *tbd;
97 	struct tx_bd_long_hi *tbdh;
98 	bool need_hi = false;
99 	uint16_t flags_type;
100 	uint16_t lflags;
101 	uint32_t cfa_meta;
102 	int seg = 0;
103 
104 	/* If we have offloads enabled, we need to use two BDs. */
105 	if ((pi->ipi_csum_flags & (CSUM_OFFLOAD | CSUM_TSO | CSUM_IP)) ||
106 	    pi->ipi_mflags & M_VLANTAG)
107 		need_hi = true;
108 
109 	/* TODO: Devices before Cu+B1 need to not mix long and short BDs */
110 	need_hi = true;
111 
112 	pi->ipi_new_pidx = pi->ipi_pidx;
113 	tbd = &((struct tx_bd_long *)txr->vaddr)[pi->ipi_new_pidx];
114 	pi->ipi_ndescs = 0;
115 	/* No need to byte-swap the opaque value */
116 	tbd->opaque = ((pi->ipi_nsegs + need_hi) << 24) | pi->ipi_new_pidx;
117 	tbd->len = htole16(pi->ipi_segs[seg].ds_len);
118 	tbd->addr = htole64(pi->ipi_segs[seg++].ds_addr);
119 	flags_type = ((pi->ipi_nsegs + need_hi) <<
120 	    TX_BD_SHORT_FLAGS_BD_CNT_SFT) & TX_BD_SHORT_FLAGS_BD_CNT_MASK;
121 	if (pi->ipi_len >= 2048)
122 		flags_type |= TX_BD_SHORT_FLAGS_LHINT_GTE2K;
123 	else
124 		flags_type |= bnxt_tx_lhint[pi->ipi_len >> 9];
125 
126 	if (need_hi) {
127 		flags_type |= TX_BD_LONG_TYPE_TX_BD_LONG;
128 
129 		pi->ipi_new_pidx = RING_NEXT(txr, pi->ipi_new_pidx);
130 		tbdh = &((struct tx_bd_long_hi *)txr->vaddr)[pi->ipi_new_pidx];
131 		tbdh->kid_or_ts_high_mss = htole16(pi->ipi_tso_segsz);
132 		tbdh->kid_or_ts_low_hdr_size = htole16((pi->ipi_ehdrlen + pi->ipi_ip_hlen +
133 		    pi->ipi_tcp_hlen) >> 1);
134 		tbdh->cfa_action = 0;
135 		lflags = 0;
136 		cfa_meta = 0;
137 		if (pi->ipi_mflags & M_VLANTAG) {
138 			/* TODO: Do we need to byte-swap the vtag here? */
139 			cfa_meta = TX_BD_LONG_CFA_META_KEY_VLAN_TAG |
140 			    pi->ipi_vtag;
141 			cfa_meta |= TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100;
142 		}
143 		tbdh->cfa_meta = htole32(cfa_meta);
144 		if (pi->ipi_csum_flags & CSUM_TSO) {
145 			lflags |= TX_BD_LONG_LFLAGS_LSO |
146 			    TX_BD_LONG_LFLAGS_T_IPID;
147 		}
148 		else if(pi->ipi_csum_flags & CSUM_OFFLOAD) {
149 			lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM |
150 			    TX_BD_LONG_LFLAGS_IP_CHKSUM;
151 		}
152 		else if(pi->ipi_csum_flags & CSUM_IP) {
153 			lflags |= TX_BD_LONG_LFLAGS_IP_CHKSUM;
154 		}
155 		tbdh->lflags = htole16(lflags);
156 	}
157 	else {
158 		flags_type |= TX_BD_SHORT_TYPE_TX_BD_SHORT;
159 	}
160 
161 	for (; seg < pi->ipi_nsegs; seg++) {
162 		tbd->flags_type = htole16(flags_type);
163 		pi->ipi_new_pidx = RING_NEXT(txr, pi->ipi_new_pidx);
164 		tbd = &((struct tx_bd_long *)txr->vaddr)[pi->ipi_new_pidx];
165 		tbd->len = htole16(pi->ipi_segs[seg].ds_len);
166 		tbd->addr = htole64(pi->ipi_segs[seg].ds_addr);
167 		flags_type = TX_BD_SHORT_TYPE_TX_BD_SHORT;
168 	}
169 	flags_type |= TX_BD_SHORT_FLAGS_PACKET_END;
170 	tbd->flags_type = htole16(flags_type);
171 	pi->ipi_new_pidx = RING_NEXT(txr, pi->ipi_new_pidx);
172 
173 	return 0;
174 }
175 
176 static void
177 bnxt_isc_txd_flush(void *sc, uint16_t txqid, qidx_t pidx)
178 {
179 	struct bnxt_softc *softc = (struct bnxt_softc *)sc;
180 	struct bnxt_ring *tx_ring = &softc->tx_rings[txqid];
181 
182 	/* pidx is what we last set ipi_new_pidx to */
183 	softc->db_ops.bnxt_db_tx(tx_ring, pidx);
184 	return;
185 }
186 
187 static int
188 bnxt_isc_txd_credits_update(void *sc, uint16_t txqid, bool clear)
189 {
190 	struct bnxt_softc *softc = (struct bnxt_softc *)sc;
191 	struct bnxt_cp_ring *cpr = &softc->tx_cp_rings[txqid];
192 	struct tx_cmpl *cmpl = (struct tx_cmpl *)cpr->ring.vaddr;
193 	int avail = 0;
194 	uint32_t cons = cpr->cons;
195 	bool v_bit = cpr->v_bit;
196 	bool last_v_bit;
197 	uint32_t last_cons;
198 	uint16_t type;
199 	uint16_t err;
200 
201 	for (;;) {
202 		last_cons = cons;
203 		last_v_bit = v_bit;
204 		NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
205 		CMPL_PREFETCH_NEXT(cpr, cons);
206 
207 		if (!CMP_VALID(&cmpl[cons], v_bit))
208 			goto done;
209 
210 		type = cmpl[cons].flags_type & TX_CMPL_TYPE_MASK;
211 		switch (type) {
212 		case TX_CMPL_TYPE_TX_L2:
213 			err = (le16toh(cmpl[cons].errors_v) &
214 			    TX_CMPL_ERRORS_BUFFER_ERROR_MASK) >>
215 			    TX_CMPL_ERRORS_BUFFER_ERROR_SFT;
216 			if (err)
217 				device_printf(softc->dev,
218 				    "TX completion error %u\n", err);
219 			/* No need to byte-swap the opaque value */
220 			avail += cmpl[cons].opaque >> 24;
221 			/*
222 			 * If we're not clearing, iflib only cares if there's
223 			 * at least one buffer.  Don't scan the whole ring in
224 			 * this case.
225 			 */
226 			if (!clear)
227 				goto done;
228 			break;
229 		default:
230 			if (type & 1) {
231 				NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
232 				if (!CMP_VALID(&cmpl[cons], v_bit))
233 					goto done;
234 			}
235 			device_printf(softc->dev,
236 			    "Unhandled TX completion type %u\n", type);
237 			break;
238 		}
239 	}
240 done:
241 
242 	if (clear && avail) {
243 		cpr->cons = last_cons;
244 		cpr->v_bit = last_v_bit;
245 		softc->db_ops.bnxt_db_tx_cq(cpr, 0);
246 	}
247 
248 	return avail;
249 }
250 
251 static void
252 bnxt_isc_rxd_refill(void *sc, if_rxd_update_t iru)
253 {
254 	struct bnxt_softc *softc = (struct bnxt_softc *)sc;
255 	struct bnxt_ring *rx_ring;
256 	struct rx_prod_pkt_bd *rxbd;
257 	uint16_t type;
258 	uint16_t i;
259 	uint16_t rxqid;
260 	uint16_t count, len;
261 	uint32_t pidx;
262 	uint8_t flid;
263 	uint64_t *paddrs;
264 	qidx_t	*frag_idxs;
265 
266 	rxqid = iru->iru_qsidx;
267 	count = iru->iru_count;
268 	len = iru->iru_buf_size;
269 	pidx = iru->iru_pidx;
270 	flid = iru->iru_flidx;
271 	paddrs = iru->iru_paddrs;
272 	frag_idxs = iru->iru_idxs;
273 
274 	if (flid == 0) {
275 		rx_ring = &softc->rx_rings[rxqid];
276 		type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT;
277 	}
278 	else {
279 		rx_ring = &softc->ag_rings[rxqid];
280 		type = RX_PROD_AGG_BD_TYPE_RX_PROD_AGG;
281 	}
282 	rxbd = (void *)rx_ring->vaddr;
283 
284 	for (i=0; i<count; i++) {
285 		rxbd[pidx].flags_type = htole16(type);
286 		rxbd[pidx].len = htole16(len);
287 		/* No need to byte-swap the opaque value */
288 		rxbd[pidx].opaque = (((rxqid & 0xff) << 24) | (flid << 16)
289 		    | (frag_idxs[i]));
290 		rxbd[pidx].addr = htole64(paddrs[i]);
291 		if (++pidx == rx_ring->ring_size)
292 			pidx = 0;
293 	}
294 	return;
295 }
296 
297 static void
298 bnxt_isc_rxd_flush(void *sc, uint16_t rxqid, uint8_t flid,
299     qidx_t pidx)
300 {
301 	struct bnxt_softc *softc = (struct bnxt_softc *)sc;
302 	struct bnxt_ring *rx_ring;
303 
304 	if (flid == 0)
305 		rx_ring = &softc->rx_rings[rxqid];
306 	else
307 		rx_ring = &softc->ag_rings[rxqid];
308 
309 	/*
310 	 * We *must* update the completion ring before updating the RX ring
311 	 * or we will overrun the completion ring and the device will wedge for
312 	 * RX.
313 	 */
314 	softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[rxqid], 0);
315 	softc->db_ops.bnxt_db_rx(rx_ring, pidx);
316 	return;
317 }
318 
319 static int
320 bnxt_isc_rxd_available(void *sc, uint16_t rxqid, qidx_t idx, qidx_t budget)
321 {
322 	struct bnxt_softc *softc = (struct bnxt_softc *)sc;
323 	struct bnxt_cp_ring *cpr = &softc->rx_cp_rings[rxqid];
324 	struct rx_pkt_cmpl *rcp;
325 	struct rx_tpa_end_cmpl *rtpae;
326 	struct cmpl_base *cmp = (struct cmpl_base *)cpr->ring.vaddr;
327 	int avail = 0;
328 	uint32_t cons = cpr->cons;
329 	bool v_bit = cpr->v_bit;
330 	uint8_t ags;
331 	int i;
332 	uint16_t type;
333 
334 	for (;;) {
335 		NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
336 		CMPL_PREFETCH_NEXT(cpr, cons);
337 
338 		if (!CMP_VALID(&cmp[cons], v_bit))
339 			goto cmpl_invalid;
340 
341 		type = le16toh(cmp[cons].type) & CMPL_BASE_TYPE_MASK;
342 		switch (type) {
343 		case CMPL_BASE_TYPE_RX_L2:
344 			rcp = (void *)&cmp[cons];
345 			ags = (rcp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK) >>
346 			    RX_PKT_CMPL_AGG_BUFS_SFT;
347 			NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
348 			CMPL_PREFETCH_NEXT(cpr, cons);
349 
350 			if (!CMP_VALID(&cmp[cons], v_bit))
351 				goto cmpl_invalid;
352 
353 			/* Now account for all the AG completions */
354 			for (i=0; i<ags; i++) {
355 				NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
356 				CMPL_PREFETCH_NEXT(cpr, cons);
357 				if (!CMP_VALID(&cmp[cons], v_bit))
358 					goto cmpl_invalid;
359 			}
360 			avail++;
361 			break;
362 		case CMPL_BASE_TYPE_RX_TPA_END:
363 			rtpae = (void *)&cmp[cons];
364 			ags = (rtpae->agg_bufs_v1 &
365 			    RX_TPA_END_CMPL_AGG_BUFS_MASK) >>
366 			    RX_TPA_END_CMPL_AGG_BUFS_SFT;
367 			NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
368 			CMPL_PREFETCH_NEXT(cpr, cons);
369 
370 			if (!CMP_VALID(&cmp[cons], v_bit))
371 				goto cmpl_invalid;
372 			/* Now account for all the AG completions */
373 			for (i=0; i<ags; i++) {
374 				NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
375 				CMPL_PREFETCH_NEXT(cpr, cons);
376 				if (!CMP_VALID(&cmp[cons], v_bit))
377 					goto cmpl_invalid;
378 			}
379 			avail++;
380 			break;
381 		case CMPL_BASE_TYPE_RX_TPA_START:
382 			NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
383 			CMPL_PREFETCH_NEXT(cpr, cons);
384 
385 			if (!CMP_VALID(&cmp[cons], v_bit))
386 				goto cmpl_invalid;
387 			break;
388 		case CMPL_BASE_TYPE_RX_AGG:
389 			break;
390 		default:
391 			device_printf(softc->dev,
392 			    "Unhandled completion type %d on RXQ %d\n",
393 			    type, rxqid);
394 
395 			/* Odd completion types use two completions */
396 			if (type & 1) {
397 				NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
398 				CMPL_PREFETCH_NEXT(cpr, cons);
399 
400 				if (!CMP_VALID(&cmp[cons], v_bit))
401 					goto cmpl_invalid;
402 			}
403 			break;
404 		}
405 		if (avail > budget)
406 			break;
407 	}
408 cmpl_invalid:
409 
410 	return avail;
411 }
412 
413 static void
414 bnxt_set_rsstype(if_rxd_info_t ri, uint8_t rss_hash_type)
415 {
416 	uint8_t rss_profile_id;
417 
418 	rss_profile_id = BNXT_GET_RSS_PROFILE_ID(rss_hash_type);
419 	switch (rss_profile_id) {
420 	case BNXT_RSS_HASH_TYPE_TCPV4:
421 		ri->iri_rsstype = M_HASHTYPE_RSS_TCP_IPV4;
422 		break;
423 	case BNXT_RSS_HASH_TYPE_UDPV4:
424 		ri->iri_rsstype = M_HASHTYPE_RSS_UDP_IPV4;
425 		break;
426 	case BNXT_RSS_HASH_TYPE_IPV4:
427 		ri->iri_rsstype = M_HASHTYPE_RSS_IPV4;
428 		break;
429 	case BNXT_RSS_HASH_TYPE_TCPV6:
430 		ri->iri_rsstype = M_HASHTYPE_RSS_TCP_IPV6;
431 		break;
432 	case BNXT_RSS_HASH_TYPE_UDPV6:
433 		ri->iri_rsstype = M_HASHTYPE_RSS_UDP_IPV6;
434 		break;
435 	case BNXT_RSS_HASH_TYPE_IPV6:
436 		ri->iri_rsstype = M_HASHTYPE_RSS_IPV6;
437 		break;
438 	default:
439 		ri->iri_rsstype = M_HASHTYPE_OPAQUE_HASH;
440 		break;
441 	}
442 }
443 
444 static int
445 bnxt_pkt_get_l2(struct bnxt_softc *softc, if_rxd_info_t ri,
446     struct bnxt_cp_ring *cpr, uint16_t flags_type)
447 {
448 	struct rx_pkt_cmpl *rcp;
449 	struct rx_pkt_cmpl_hi *rcph;
450 	struct rx_abuf_cmpl *acp;
451 	uint32_t flags2;
452 	uint32_t errors;
453 	uint8_t	ags;
454 	int i;
455 
456 	rcp = &((struct rx_pkt_cmpl *)cpr->ring.vaddr)[cpr->cons];
457 
458 	/* Extract from the first 16-byte BD */
459 	if (flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) {
460 		ri->iri_flowid = le32toh(rcp->rss_hash);
461 		bnxt_set_rsstype(ri, rcp->rss_hash_type);
462 	}
463 	else {
464 		ri->iri_rsstype = M_HASHTYPE_NONE;
465 	}
466 	ags = (rcp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK) >>
467 	    RX_PKT_CMPL_AGG_BUFS_SFT;
468 	ri->iri_nfrags = ags + 1;
469 	/* No need to byte-swap the opaque value */
470 	ri->iri_frags[0].irf_flid = (rcp->opaque >> 16) & 0xff;
471 	ri->iri_frags[0].irf_idx = rcp->opaque & 0xffff;
472 	ri->iri_frags[0].irf_len = le16toh(rcp->len);
473 	ri->iri_len = le16toh(rcp->len);
474 
475 	/* Now the second 16-byte BD */
476 	NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
477 	ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
478 	rcph = &((struct rx_pkt_cmpl_hi *)cpr->ring.vaddr)[cpr->cons];
479 
480 	flags2 = le32toh(rcph->flags2);
481 	errors = le16toh(rcph->errors_v2);
482 	if ((flags2 & RX_PKT_CMPL_FLAGS2_META_FORMAT_MASK) ==
483 	    RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) {
484 		ri->iri_flags |= M_VLANTAG;
485 		/* TODO: Should this be the entire 16-bits? */
486 		ri->iri_vtag = le32toh(rcph->metadata) &
487 		    (RX_PKT_CMPL_METADATA_VID_MASK | RX_PKT_CMPL_METADATA_DE |
488 		    RX_PKT_CMPL_METADATA_PRI_MASK);
489 	}
490 	if (flags2 & RX_PKT_CMPL_FLAGS2_IP_CS_CALC) {
491 		ri->iri_csum_flags |= CSUM_IP_CHECKED;
492 		if (!(errors & RX_PKT_CMPL_ERRORS_IP_CS_ERROR))
493 			ri->iri_csum_flags |= CSUM_IP_VALID;
494 	}
495 	if (flags2 & (RX_PKT_CMPL_FLAGS2_L4_CS_CALC |
496 		      RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)) {
497 		ri->iri_csum_flags |= CSUM_L4_CALC;
498 		if (!(errors & (RX_PKT_CMPL_ERRORS_L4_CS_ERROR |
499 				RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR))) {
500 			ri->iri_csum_flags |= CSUM_L4_VALID;
501 			ri->iri_csum_data = 0xffff;
502 		}
503 	}
504 
505 	/* And finally the ag ring stuff. */
506 	for (i=1; i < ri->iri_nfrags; i++) {
507 		NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
508 		ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
509 		acp = &((struct rx_abuf_cmpl *)cpr->ring.vaddr)[cpr->cons];
510 
511 		/* No need to byte-swap the opaque value */
512 		ri->iri_frags[i].irf_flid = (acp->opaque >> 16 & 0xff);
513 		ri->iri_frags[i].irf_idx = acp->opaque & 0xffff;
514 		ri->iri_frags[i].irf_len = le16toh(acp->len);
515 		ri->iri_len += le16toh(acp->len);
516 	}
517 
518 	return 0;
519 }
520 
521 static int
522 bnxt_pkt_get_tpa(struct bnxt_softc *softc, if_rxd_info_t ri,
523     struct bnxt_cp_ring *cpr, uint16_t flags_type)
524 {
525 	struct rx_tpa_end_cmpl *agend =
526 	    &((struct rx_tpa_end_cmpl *)cpr->ring.vaddr)[cpr->cons];
527 	struct rx_abuf_cmpl *acp;
528 	struct bnxt_full_tpa_start *tpas;
529 	uint32_t flags2;
530 	uint8_t	ags;
531 	uint8_t agg_id;
532 	int i;
533 
534 	/* Get the agg_id */
535 	agg_id = (agend->agg_id & RX_TPA_END_CMPL_AGG_ID_MASK) >>
536 	    RX_TPA_END_CMPL_AGG_ID_SFT;
537 	tpas = &(softc->rx_rings[ri->iri_qsidx].tpa_start[agg_id]);
538 
539 	/* Extract from the first 16-byte BD */
540 	if (le16toh(tpas->low.flags_type) & RX_TPA_START_CMPL_FLAGS_RSS_VALID) {
541 		ri->iri_flowid = le32toh(tpas->low.rss_hash);
542 		bnxt_set_rsstype(ri, tpas->low.rss_hash_type);
543 	}
544 	else {
545 		ri->iri_rsstype = M_HASHTYPE_NONE;
546 	}
547 	ags = (agend->agg_bufs_v1 & RX_TPA_END_CMPL_AGG_BUFS_MASK) >>
548 	    RX_TPA_END_CMPL_AGG_BUFS_SFT;
549 	ri->iri_nfrags = ags + 1;
550 	/* No need to byte-swap the opaque value */
551 	ri->iri_frags[0].irf_flid = ((tpas->low.opaque >> 16) & 0xff);
552 	ri->iri_frags[0].irf_idx = (tpas->low.opaque & 0xffff);
553 	ri->iri_frags[0].irf_len = le16toh(tpas->low.len);
554 	ri->iri_len = le16toh(tpas->low.len);
555 
556 	/* Now the second 16-byte BD */
557 	NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
558 	ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
559 
560 	flags2 = le32toh(tpas->high.flags2);
561 	if ((flags2 & RX_TPA_START_CMPL_FLAGS2_META_FORMAT_MASK) ==
562 	    RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN) {
563 		ri->iri_flags |= M_VLANTAG;
564 		/* TODO: Should this be the entire 16-bits? */
565 		ri->iri_vtag = le32toh(tpas->high.metadata) &
566 		    (RX_TPA_START_CMPL_METADATA_VID_MASK |
567 		    RX_TPA_START_CMPL_METADATA_DE |
568 		    RX_TPA_START_CMPL_METADATA_PRI_MASK);
569 	}
570 	if (flags2 & RX_TPA_START_CMPL_FLAGS2_IP_CS_CALC) {
571 		ri->iri_csum_flags |= CSUM_IP_CHECKED;
572 		ri->iri_csum_flags |= CSUM_IP_VALID;
573 	}
574 	if (flags2 & RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC) {
575 		ri->iri_csum_flags |= CSUM_L4_CALC;
576 		ri->iri_csum_flags |= CSUM_L4_VALID;
577 		ri->iri_csum_data = 0xffff;
578 	}
579 
580 	/* Now the ag ring stuff. */
581 	for (i=1; i < ri->iri_nfrags; i++) {
582 		NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
583 		ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
584 		acp = &((struct rx_abuf_cmpl *)cpr->ring.vaddr)[cpr->cons];
585 
586 		/* No need to byte-swap the opaque value */
587 		ri->iri_frags[i].irf_flid = ((acp->opaque >> 16) & 0xff);
588 		ri->iri_frags[i].irf_idx = (acp->opaque & 0xffff);
589 		ri->iri_frags[i].irf_len = le16toh(acp->len);
590 		ri->iri_len += le16toh(acp->len);
591 	}
592 
593 	/* And finally, the empty BD at the end... */
594 	ri->iri_nfrags++;
595 	/* No need to byte-swap the opaque value */
596 	ri->iri_frags[i].irf_flid = ((agend->opaque >> 16) & 0xff);
597 	ri->iri_frags[i].irf_idx = (agend->opaque & 0xffff);
598 	ri->iri_frags[i].irf_len = le16toh(agend->len);
599 	ri->iri_len += le16toh(agend->len);
600 
601 	return 0;
602 }
603 
604 /* If we return anything but zero, iflib will assert... */
605 static int
606 bnxt_isc_rxd_pkt_get(void *sc, if_rxd_info_t ri)
607 {
608 	struct bnxt_softc *softc = (struct bnxt_softc *)sc;
609 	struct bnxt_cp_ring *cpr = &softc->rx_cp_rings[ri->iri_qsidx];
610 	struct cmpl_base *cmp_q = (struct cmpl_base *)cpr->ring.vaddr;
611 	struct cmpl_base *cmp;
612 	struct rx_tpa_start_cmpl *rtpa;
613 	uint16_t flags_type;
614 	uint16_t type;
615 	uint8_t agg_id;
616 
617 	for (;;) {
618 		NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
619 		ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
620 		CMPL_PREFETCH_NEXT(cpr, cpr->cons);
621 		cmp = &((struct cmpl_base *)cpr->ring.vaddr)[cpr->cons];
622 
623 		flags_type = le16toh(cmp->type);
624 		type = flags_type & CMPL_BASE_TYPE_MASK;
625 
626 		switch (type) {
627 		case CMPL_BASE_TYPE_RX_L2:
628 			return bnxt_pkt_get_l2(softc, ri, cpr, flags_type);
629 		case CMPL_BASE_TYPE_RX_TPA_END:
630 			return bnxt_pkt_get_tpa(softc, ri, cpr, flags_type);
631 		case CMPL_BASE_TYPE_RX_TPA_START:
632 			rtpa = (void *)&cmp_q[cpr->cons];
633 			agg_id = (rtpa->agg_id &
634 			    RX_TPA_START_CMPL_AGG_ID_MASK) >>
635 			    RX_TPA_START_CMPL_AGG_ID_SFT;
636 			softc->rx_rings[ri->iri_qsidx].tpa_start[agg_id].low = *rtpa;
637 
638 			NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
639 			ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
640 			CMPL_PREFETCH_NEXT(cpr, cpr->cons);
641 
642 			softc->rx_rings[ri->iri_qsidx].tpa_start[agg_id].high =
643 			    ((struct rx_tpa_start_cmpl_hi *)cmp_q)[cpr->cons];
644 			break;
645 		default:
646 			device_printf(softc->dev,
647 			    "Unhandled completion type %d on RXQ %d get\n",
648 			    type, ri->iri_qsidx);
649 			if (type & 1) {
650 				NEXT_CP_CONS_V(&cpr->ring, cpr->cons,
651 				    cpr->v_bit);
652 				ri->iri_cidx = RING_NEXT(&cpr->ring,
653 				    ri->iri_cidx);
654 				CMPL_PREFETCH_NEXT(cpr, cpr->cons);
655 			}
656 			break;
657 		}
658 	}
659 
660 	return 0;
661 }
662 
663 static int
664 bnxt_intr(void *sc)
665 {
666 	struct bnxt_softc *softc = (struct bnxt_softc *)sc;
667 
668 	device_printf(softc->dev, "STUB: %s @ %s:%d\n", __func__, __FILE__, __LINE__);
669 	return ENOSYS;
670 }
671