xref: /freebsd/sys/net/iflib.c (revision 716fd348)
1 /*-
2  * Copyright (c) 2014-2018, Matthew Macy <mmacy@mattmacy.io>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  *  1. Redistributions of source code must retain the above copyright notice,
9  *     this list of conditions and the following disclaimer.
10  *
11  *  2. Neither the name of Matthew Macy nor the names of its
12  *     contributors may be used to endorse or promote products derived from
13  *     this software without specific prior written permission.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33 #include "opt_acpi.h"
34 #include "opt_sched.h"
35 
36 #include <sys/param.h>
37 #include <sys/types.h>
38 #include <sys/bus.h>
39 #include <sys/eventhandler.h>
40 #include <sys/kernel.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/module.h>
44 #include <sys/kobj.h>
45 #include <sys/rman.h>
46 #include <sys/sbuf.h>
47 #include <sys/smp.h>
48 #include <sys/socket.h>
49 #include <sys/sockio.h>
50 #include <sys/sysctl.h>
51 #include <sys/syslog.h>
52 #include <sys/taskqueue.h>
53 #include <sys/limits.h>
54 
55 #include <net/if.h>
56 #include <net/if_var.h>
57 #include <net/if_types.h>
58 #include <net/if_media.h>
59 #include <net/bpf.h>
60 #include <net/ethernet.h>
61 #include <net/mp_ring.h>
62 #include <net/debugnet.h>
63 #include <net/pfil.h>
64 #include <net/vnet.h>
65 
66 #include <netinet/in.h>
67 #include <netinet/in_pcb.h>
68 #include <netinet/tcp_lro.h>
69 #include <netinet/in_systm.h>
70 #include <netinet/if_ether.h>
71 #include <netinet/ip.h>
72 #include <netinet/ip6.h>
73 #include <netinet/tcp.h>
74 #include <netinet/ip_var.h>
75 #include <netinet6/ip6_var.h>
76 
77 #include <machine/bus.h>
78 #include <machine/in_cksum.h>
79 
80 #include <vm/vm.h>
81 #include <vm/pmap.h>
82 
83 #include <dev/led/led.h>
84 #include <dev/pci/pcireg.h>
85 #include <dev/pci/pcivar.h>
86 #include <dev/pci/pci_private.h>
87 
88 #include <net/iflib.h>
89 #include <net/iflib_private.h>
90 
91 #include "ifdi_if.h"
92 
93 #ifdef PCI_IOV
94 #include <dev/pci/pci_iov.h>
95 #endif
96 
97 #include <sys/bitstring.h>
98 /*
99  * enable accounting of every mbuf as it comes in to and goes out of
100  * iflib's software descriptor references
101  */
102 #define MEMORY_LOGGING 0
103 /*
104  * Enable mbuf vectors for compressing long mbuf chains
105  */
106 
107 /*
108  * NB:
109  * - Prefetching in tx cleaning should perhaps be a tunable. The distance ahead
110  *   we prefetch needs to be determined by the time spent in m_free vis a vis
111  *   the cost of a prefetch. This will of course vary based on the workload:
112  *      - NFLX's m_free path is dominated by vm-based M_EXT manipulation which
113  *        is quite expensive, thus suggesting very little prefetch.
114  *      - small packet forwarding which is just returning a single mbuf to
115  *        UMA will typically be very fast vis a vis the cost of a memory
116  *        access.
117  */
118 
119 /*
120  * File organization:
121  *  - private structures
122  *  - iflib private utility functions
123  *  - ifnet functions
124  *  - vlan registry and other exported functions
125  *  - iflib public core functions
126  *
127  *
128  */
129 MALLOC_DEFINE(M_IFLIB, "iflib", "ifnet library");
130 
131 #define	IFLIB_RXEOF_MORE (1U << 0)
132 #define	IFLIB_RXEOF_EMPTY (2U << 0)
133 
134 struct iflib_txq;
135 typedef struct iflib_txq *iflib_txq_t;
136 struct iflib_rxq;
137 typedef struct iflib_rxq *iflib_rxq_t;
138 struct iflib_fl;
139 typedef struct iflib_fl *iflib_fl_t;
140 
141 struct iflib_ctx;
142 
143 static void iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid);
144 static void iflib_timer(void *arg);
145 static void iflib_tqg_detach(if_ctx_t ctx);
146 
147 typedef struct iflib_filter_info {
148 	driver_filter_t *ifi_filter;
149 	void *ifi_filter_arg;
150 	struct grouptask *ifi_task;
151 	void *ifi_ctx;
152 } *iflib_filter_info_t;
153 
154 struct iflib_ctx {
155 	KOBJ_FIELDS;
156 	/*
157 	 * Pointer to hardware driver's softc
158 	 */
159 	void *ifc_softc;
160 	device_t ifc_dev;
161 	if_t ifc_ifp;
162 
163 	cpuset_t ifc_cpus;
164 	if_shared_ctx_t ifc_sctx;
165 	struct if_softc_ctx ifc_softc_ctx;
166 
167 	struct sx ifc_ctx_sx;
168 	struct mtx ifc_state_mtx;
169 
170 	iflib_txq_t ifc_txqs;
171 	iflib_rxq_t ifc_rxqs;
172 	uint32_t ifc_if_flags;
173 	uint32_t ifc_flags;
174 	uint32_t ifc_max_fl_buf_size;
175 	uint32_t ifc_rx_mbuf_sz;
176 
177 	int ifc_link_state;
178 	int ifc_watchdog_events;
179 	struct cdev *ifc_led_dev;
180 	struct resource *ifc_msix_mem;
181 
182 	struct if_irq ifc_legacy_irq;
183 	struct grouptask ifc_admin_task;
184 	struct grouptask ifc_vflr_task;
185 	struct iflib_filter_info ifc_filter_info;
186 	struct ifmedia	ifc_media;
187 	struct ifmedia	*ifc_mediap;
188 
189 	struct sysctl_oid *ifc_sysctl_node;
190 	uint16_t ifc_sysctl_ntxqs;
191 	uint16_t ifc_sysctl_nrxqs;
192 	uint16_t ifc_sysctl_qs_eq_override;
193 	uint16_t ifc_sysctl_rx_budget;
194 	uint16_t ifc_sysctl_tx_abdicate;
195 	uint16_t ifc_sysctl_core_offset;
196 #define	CORE_OFFSET_UNSPECIFIED	0xffff
197 	uint8_t  ifc_sysctl_separate_txrx;
198 	uint8_t  ifc_sysctl_use_logical_cores;
199 	bool	 ifc_cpus_are_physical_cores;
200 
201 	qidx_t ifc_sysctl_ntxds[8];
202 	qidx_t ifc_sysctl_nrxds[8];
203 	struct if_txrx ifc_txrx;
204 #define isc_txd_encap  ifc_txrx.ift_txd_encap
205 #define isc_txd_flush  ifc_txrx.ift_txd_flush
206 #define isc_txd_credits_update  ifc_txrx.ift_txd_credits_update
207 #define isc_rxd_available ifc_txrx.ift_rxd_available
208 #define isc_rxd_pkt_get ifc_txrx.ift_rxd_pkt_get
209 #define isc_rxd_refill ifc_txrx.ift_rxd_refill
210 #define isc_rxd_flush ifc_txrx.ift_rxd_flush
211 #define isc_legacy_intr ifc_txrx.ift_legacy_intr
212 #define isc_txq_select ifc_txrx.ift_txq_select
213 	eventhandler_tag ifc_vlan_attach_event;
214 	eventhandler_tag ifc_vlan_detach_event;
215 	struct ether_addr ifc_mac;
216 };
217 
218 void *
219 iflib_get_softc(if_ctx_t ctx)
220 {
221 
222 	return (ctx->ifc_softc);
223 }
224 
225 device_t
226 iflib_get_dev(if_ctx_t ctx)
227 {
228 
229 	return (ctx->ifc_dev);
230 }
231 
232 if_t
233 iflib_get_ifp(if_ctx_t ctx)
234 {
235 
236 	return (ctx->ifc_ifp);
237 }
238 
239 struct ifmedia *
240 iflib_get_media(if_ctx_t ctx)
241 {
242 
243 	return (ctx->ifc_mediap);
244 }
245 
246 uint32_t
247 iflib_get_flags(if_ctx_t ctx)
248 {
249 	return (ctx->ifc_flags);
250 }
251 
252 void
253 iflib_set_mac(if_ctx_t ctx, uint8_t mac[ETHER_ADDR_LEN])
254 {
255 
256 	bcopy(mac, ctx->ifc_mac.octet, ETHER_ADDR_LEN);
257 }
258 
259 if_softc_ctx_t
260 iflib_get_softc_ctx(if_ctx_t ctx)
261 {
262 
263 	return (&ctx->ifc_softc_ctx);
264 }
265 
266 if_shared_ctx_t
267 iflib_get_sctx(if_ctx_t ctx)
268 {
269 
270 	return (ctx->ifc_sctx);
271 }
272 
273 #define IP_ALIGNED(m) ((((uintptr_t)(m)->m_data) & 0x3) == 0x2)
274 #define CACHE_PTR_INCREMENT (CACHE_LINE_SIZE/sizeof(void*))
275 #define CACHE_PTR_NEXT(ptr) ((void *)(((uintptr_t)(ptr)+CACHE_LINE_SIZE-1) & (CACHE_LINE_SIZE-1)))
276 
277 #define LINK_ACTIVE(ctx) ((ctx)->ifc_link_state == LINK_STATE_UP)
278 #define CTX_IS_VF(ctx) ((ctx)->ifc_sctx->isc_flags & IFLIB_IS_VF)
279 
280 typedef struct iflib_sw_rx_desc_array {
281 	bus_dmamap_t	*ifsd_map;         /* bus_dma maps for packet */
282 	struct mbuf	**ifsd_m;           /* pkthdr mbufs */
283 	caddr_t		*ifsd_cl;          /* direct cluster pointer for rx */
284 	bus_addr_t	*ifsd_ba;          /* bus addr of cluster for rx */
285 } iflib_rxsd_array_t;
286 
287 typedef struct iflib_sw_tx_desc_array {
288 	bus_dmamap_t    *ifsd_map;         /* bus_dma maps for packet */
289 	bus_dmamap_t	*ifsd_tso_map;     /* bus_dma maps for TSO packet */
290 	struct mbuf    **ifsd_m;           /* pkthdr mbufs */
291 } if_txsd_vec_t;
292 
293 /* magic number that should be high enough for any hardware */
294 #define IFLIB_MAX_TX_SEGS		128
295 #define IFLIB_RX_COPY_THRESH		128
296 #define IFLIB_MAX_RX_REFRESH		32
297 /* The minimum descriptors per second before we start coalescing */
298 #define IFLIB_MIN_DESC_SEC		16384
299 #define IFLIB_DEFAULT_TX_UPDATE_FREQ	16
300 #define IFLIB_QUEUE_IDLE		0
301 #define IFLIB_QUEUE_HUNG		1
302 #define IFLIB_QUEUE_WORKING		2
303 /* maximum number of txqs that can share an rx interrupt */
304 #define IFLIB_MAX_TX_SHARED_INTR	4
305 
306 /* this should really scale with ring size - this is a fairly arbitrary value */
307 #define TX_BATCH_SIZE			32
308 
309 #define IFLIB_RESTART_BUDGET		8
310 
311 #define CSUM_OFFLOAD		(CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP| \
312 				 CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \
313 				 CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP)
314 
315 struct iflib_txq {
316 	qidx_t		ift_in_use;
317 	qidx_t		ift_cidx;
318 	qidx_t		ift_cidx_processed;
319 	qidx_t		ift_pidx;
320 	uint8_t		ift_gen;
321 	uint8_t		ift_br_offset;
322 	uint16_t	ift_npending;
323 	uint16_t	ift_db_pending;
324 	uint16_t	ift_rs_pending;
325 	/* implicit pad */
326 	uint8_t		ift_txd_size[8];
327 	uint64_t	ift_processed;
328 	uint64_t	ift_cleaned;
329 	uint64_t	ift_cleaned_prev;
330 #if MEMORY_LOGGING
331 	uint64_t	ift_enqueued;
332 	uint64_t	ift_dequeued;
333 #endif
334 	uint64_t	ift_no_tx_dma_setup;
335 	uint64_t	ift_no_desc_avail;
336 	uint64_t	ift_mbuf_defrag_failed;
337 	uint64_t	ift_mbuf_defrag;
338 	uint64_t	ift_map_failed;
339 	uint64_t	ift_txd_encap_efbig;
340 	uint64_t	ift_pullups;
341 	uint64_t	ift_last_timer_tick;
342 
343 	struct mtx	ift_mtx;
344 	struct mtx	ift_db_mtx;
345 
346 	/* constant values */
347 	if_ctx_t	ift_ctx;
348 	struct ifmp_ring        *ift_br;
349 	struct grouptask	ift_task;
350 	qidx_t		ift_size;
351 	uint16_t	ift_id;
352 	struct callout	ift_timer;
353 #ifdef DEV_NETMAP
354 	struct callout	ift_netmap_timer;
355 #endif /* DEV_NETMAP */
356 
357 	if_txsd_vec_t	ift_sds;
358 	uint8_t		ift_qstatus;
359 	uint8_t		ift_closed;
360 	uint8_t		ift_update_freq;
361 	struct iflib_filter_info ift_filter_info;
362 	bus_dma_tag_t	ift_buf_tag;
363 	bus_dma_tag_t	ift_tso_buf_tag;
364 	iflib_dma_info_t	ift_ifdi;
365 #define	MTX_NAME_LEN	32
366 	char                    ift_mtx_name[MTX_NAME_LEN];
367 	bus_dma_segment_t	ift_segs[IFLIB_MAX_TX_SEGS]  __aligned(CACHE_LINE_SIZE);
368 #ifdef IFLIB_DIAGNOSTICS
369 	uint64_t ift_cpu_exec_count[256];
370 #endif
371 } __aligned(CACHE_LINE_SIZE);
372 
373 struct iflib_fl {
374 	qidx_t		ifl_cidx;
375 	qidx_t		ifl_pidx;
376 	qidx_t		ifl_credits;
377 	uint8_t		ifl_gen;
378 	uint8_t		ifl_rxd_size;
379 #if MEMORY_LOGGING
380 	uint64_t	ifl_m_enqueued;
381 	uint64_t	ifl_m_dequeued;
382 	uint64_t	ifl_cl_enqueued;
383 	uint64_t	ifl_cl_dequeued;
384 #endif
385 	/* implicit pad */
386 	bitstr_t 	*ifl_rx_bitmap;
387 	qidx_t		ifl_fragidx;
388 	/* constant */
389 	qidx_t		ifl_size;
390 	uint16_t	ifl_buf_size;
391 	uint16_t	ifl_cltype;
392 	uma_zone_t	ifl_zone;
393 	iflib_rxsd_array_t	ifl_sds;
394 	iflib_rxq_t	ifl_rxq;
395 	uint8_t		ifl_id;
396 	bus_dma_tag_t	ifl_buf_tag;
397 	iflib_dma_info_t	ifl_ifdi;
398 	uint64_t	ifl_bus_addrs[IFLIB_MAX_RX_REFRESH] __aligned(CACHE_LINE_SIZE);
399 	qidx_t		ifl_rxd_idxs[IFLIB_MAX_RX_REFRESH];
400 }  __aligned(CACHE_LINE_SIZE);
401 
402 static inline qidx_t
403 get_inuse(int size, qidx_t cidx, qidx_t pidx, uint8_t gen)
404 {
405 	qidx_t used;
406 
407 	if (pidx > cidx)
408 		used = pidx - cidx;
409 	else if (pidx < cidx)
410 		used = size - cidx + pidx;
411 	else if (gen == 0 && pidx == cidx)
412 		used = 0;
413 	else if (gen == 1 && pidx == cidx)
414 		used = size;
415 	else
416 		panic("bad state");
417 
418 	return (used);
419 }
420 
421 #define TXQ_AVAIL(txq) (txq->ift_size - get_inuse(txq->ift_size, txq->ift_cidx, txq->ift_pidx, txq->ift_gen))
422 
423 #define IDXDIFF(head, tail, wrap) \
424 	((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head))
425 
426 struct iflib_rxq {
427 	if_ctx_t	ifr_ctx;
428 	iflib_fl_t	ifr_fl;
429 	uint64_t	ifr_rx_irq;
430 	struct pfil_head	*pfil;
431 	/*
432 	 * If there is a separate completion queue (IFLIB_HAS_RXCQ), this is
433 	 * the completion queue consumer index.  Otherwise it's unused.
434 	 */
435 	qidx_t		ifr_cq_cidx;
436 	uint16_t	ifr_id;
437 	uint8_t		ifr_nfl;
438 	uint8_t		ifr_ntxqirq;
439 	uint8_t		ifr_txqid[IFLIB_MAX_TX_SHARED_INTR];
440 	uint8_t		ifr_fl_offset;
441 	struct lro_ctrl			ifr_lc;
442 	struct grouptask        ifr_task;
443 	struct callout		ifr_watchdog;
444 	struct iflib_filter_info ifr_filter_info;
445 	iflib_dma_info_t		ifr_ifdi;
446 
447 	/* dynamically allocate if any drivers need a value substantially larger than this */
448 	struct if_rxd_frag	ifr_frags[IFLIB_MAX_RX_SEGS] __aligned(CACHE_LINE_SIZE);
449 #ifdef IFLIB_DIAGNOSTICS
450 	uint64_t ifr_cpu_exec_count[256];
451 #endif
452 }  __aligned(CACHE_LINE_SIZE);
453 
454 typedef struct if_rxsd {
455 	caddr_t *ifsd_cl;
456 	iflib_fl_t ifsd_fl;
457 } *if_rxsd_t;
458 
459 /* multiple of word size */
460 #ifdef __LP64__
461 #define PKT_INFO_SIZE	6
462 #define RXD_INFO_SIZE	5
463 #define PKT_TYPE uint64_t
464 #else
465 #define PKT_INFO_SIZE	11
466 #define RXD_INFO_SIZE	8
467 #define PKT_TYPE uint32_t
468 #endif
469 #define PKT_LOOP_BOUND  ((PKT_INFO_SIZE/3)*3)
470 #define RXD_LOOP_BOUND  ((RXD_INFO_SIZE/4)*4)
471 
472 typedef struct if_pkt_info_pad {
473 	PKT_TYPE pkt_val[PKT_INFO_SIZE];
474 } *if_pkt_info_pad_t;
475 typedef struct if_rxd_info_pad {
476 	PKT_TYPE rxd_val[RXD_INFO_SIZE];
477 } *if_rxd_info_pad_t;
478 
479 CTASSERT(sizeof(struct if_pkt_info_pad) == sizeof(struct if_pkt_info));
480 CTASSERT(sizeof(struct if_rxd_info_pad) == sizeof(struct if_rxd_info));
481 
482 static inline void
483 pkt_info_zero(if_pkt_info_t pi)
484 {
485 	if_pkt_info_pad_t pi_pad;
486 
487 	pi_pad = (if_pkt_info_pad_t)pi;
488 	pi_pad->pkt_val[0] = 0; pi_pad->pkt_val[1] = 0; pi_pad->pkt_val[2] = 0;
489 	pi_pad->pkt_val[3] = 0; pi_pad->pkt_val[4] = 0; pi_pad->pkt_val[5] = 0;
490 #ifndef __LP64__
491 	pi_pad->pkt_val[6] = 0; pi_pad->pkt_val[7] = 0; pi_pad->pkt_val[8] = 0;
492 	pi_pad->pkt_val[9] = 0; pi_pad->pkt_val[10] = 0;
493 #endif
494 }
495 
496 static device_method_t iflib_pseudo_methods[] = {
497 	DEVMETHOD(device_attach, noop_attach),
498 	DEVMETHOD(device_detach, iflib_pseudo_detach),
499 	DEVMETHOD_END
500 };
501 
502 driver_t iflib_pseudodriver = {
503 	"iflib_pseudo", iflib_pseudo_methods, sizeof(struct iflib_ctx),
504 };
505 
506 static inline void
507 rxd_info_zero(if_rxd_info_t ri)
508 {
509 	if_rxd_info_pad_t ri_pad;
510 	int i;
511 
512 	ri_pad = (if_rxd_info_pad_t)ri;
513 	for (i = 0; i < RXD_LOOP_BOUND; i += 4) {
514 		ri_pad->rxd_val[i] = 0;
515 		ri_pad->rxd_val[i+1] = 0;
516 		ri_pad->rxd_val[i+2] = 0;
517 		ri_pad->rxd_val[i+3] = 0;
518 	}
519 #ifdef __LP64__
520 	ri_pad->rxd_val[RXD_INFO_SIZE-1] = 0;
521 #endif
522 }
523 
524 /*
525  * Only allow a single packet to take up most 1/nth of the tx ring
526  */
527 #define MAX_SINGLE_PACKET_FRACTION 12
528 #define IF_BAD_DMA (bus_addr_t)-1
529 
530 #define CTX_ACTIVE(ctx) ((if_getdrvflags((ctx)->ifc_ifp) & IFF_DRV_RUNNING))
531 
532 #define CTX_LOCK_INIT(_sc)  sx_init(&(_sc)->ifc_ctx_sx, "iflib ctx lock")
533 #define CTX_LOCK(ctx) sx_xlock(&(ctx)->ifc_ctx_sx)
534 #define CTX_UNLOCK(ctx) sx_xunlock(&(ctx)->ifc_ctx_sx)
535 #define CTX_LOCK_DESTROY(ctx) sx_destroy(&(ctx)->ifc_ctx_sx)
536 
537 #define STATE_LOCK_INIT(_sc, _name)  mtx_init(&(_sc)->ifc_state_mtx, _name, "iflib state lock", MTX_DEF)
538 #define STATE_LOCK(ctx) mtx_lock(&(ctx)->ifc_state_mtx)
539 #define STATE_UNLOCK(ctx) mtx_unlock(&(ctx)->ifc_state_mtx)
540 #define STATE_LOCK_DESTROY(ctx) mtx_destroy(&(ctx)->ifc_state_mtx)
541 
542 #define CALLOUT_LOCK(txq)	mtx_lock(&txq->ift_mtx)
543 #define CALLOUT_UNLOCK(txq) 	mtx_unlock(&txq->ift_mtx)
544 
545 void
546 iflib_set_detach(if_ctx_t ctx)
547 {
548 	STATE_LOCK(ctx);
549 	ctx->ifc_flags |= IFC_IN_DETACH;
550 	STATE_UNLOCK(ctx);
551 }
552 
553 /* Our boot-time initialization hook */
554 static int	iflib_module_event_handler(module_t, int, void *);
555 
556 static moduledata_t iflib_moduledata = {
557 	"iflib",
558 	iflib_module_event_handler,
559 	NULL
560 };
561 
562 DECLARE_MODULE(iflib, iflib_moduledata, SI_SUB_INIT_IF, SI_ORDER_ANY);
563 MODULE_VERSION(iflib, 1);
564 
565 MODULE_DEPEND(iflib, pci, 1, 1, 1);
566 MODULE_DEPEND(iflib, ether, 1, 1, 1);
567 
568 TASKQGROUP_DEFINE(if_io_tqg, mp_ncpus, 1);
569 TASKQGROUP_DEFINE(if_config_tqg, 1, 1);
570 
571 #ifndef IFLIB_DEBUG_COUNTERS
572 #ifdef INVARIANTS
573 #define IFLIB_DEBUG_COUNTERS 1
574 #else
575 #define IFLIB_DEBUG_COUNTERS 0
576 #endif /* !INVARIANTS */
577 #endif
578 
579 static SYSCTL_NODE(_net, OID_AUTO, iflib, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
580     "iflib driver parameters");
581 
582 /*
583  * XXX need to ensure that this can't accidentally cause the head to be moved backwards
584  */
585 static int iflib_min_tx_latency = 0;
586 SYSCTL_INT(_net_iflib, OID_AUTO, min_tx_latency, CTLFLAG_RW,
587 		   &iflib_min_tx_latency, 0, "minimize transmit latency at the possible expense of throughput");
588 static int iflib_no_tx_batch = 0;
589 SYSCTL_INT(_net_iflib, OID_AUTO, no_tx_batch, CTLFLAG_RW,
590 		   &iflib_no_tx_batch, 0, "minimize transmit latency at the possible expense of throughput");
591 static int iflib_timer_default = 1000;
592 SYSCTL_INT(_net_iflib, OID_AUTO, timer_default, CTLFLAG_RW,
593 		   &iflib_timer_default, 0, "number of ticks between iflib_timer calls");
594 
595 
596 #if IFLIB_DEBUG_COUNTERS
597 
598 static int iflib_tx_seen;
599 static int iflib_tx_sent;
600 static int iflib_tx_encap;
601 static int iflib_rx_allocs;
602 static int iflib_fl_refills;
603 static int iflib_fl_refills_large;
604 static int iflib_tx_frees;
605 
606 SYSCTL_INT(_net_iflib, OID_AUTO, tx_seen, CTLFLAG_RD,
607 		   &iflib_tx_seen, 0, "# TX mbufs seen");
608 SYSCTL_INT(_net_iflib, OID_AUTO, tx_sent, CTLFLAG_RD,
609 		   &iflib_tx_sent, 0, "# TX mbufs sent");
610 SYSCTL_INT(_net_iflib, OID_AUTO, tx_encap, CTLFLAG_RD,
611 		   &iflib_tx_encap, 0, "# TX mbufs encapped");
612 SYSCTL_INT(_net_iflib, OID_AUTO, tx_frees, CTLFLAG_RD,
613 		   &iflib_tx_frees, 0, "# TX frees");
614 SYSCTL_INT(_net_iflib, OID_AUTO, rx_allocs, CTLFLAG_RD,
615 		   &iflib_rx_allocs, 0, "# RX allocations");
616 SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills, CTLFLAG_RD,
617 		   &iflib_fl_refills, 0, "# refills");
618 SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills_large, CTLFLAG_RD,
619 		   &iflib_fl_refills_large, 0, "# large refills");
620 
621 static int iflib_txq_drain_flushing;
622 static int iflib_txq_drain_oactive;
623 static int iflib_txq_drain_notready;
624 
625 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_flushing, CTLFLAG_RD,
626 		   &iflib_txq_drain_flushing, 0, "# drain flushes");
627 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_oactive, CTLFLAG_RD,
628 		   &iflib_txq_drain_oactive, 0, "# drain oactives");
629 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_notready, CTLFLAG_RD,
630 		   &iflib_txq_drain_notready, 0, "# drain notready");
631 
632 static int iflib_encap_load_mbuf_fail;
633 static int iflib_encap_pad_mbuf_fail;
634 static int iflib_encap_txq_avail_fail;
635 static int iflib_encap_txd_encap_fail;
636 
637 SYSCTL_INT(_net_iflib, OID_AUTO, encap_load_mbuf_fail, CTLFLAG_RD,
638 		   &iflib_encap_load_mbuf_fail, 0, "# busdma load failures");
639 SYSCTL_INT(_net_iflib, OID_AUTO, encap_pad_mbuf_fail, CTLFLAG_RD,
640 		   &iflib_encap_pad_mbuf_fail, 0, "# runt frame pad failures");
641 SYSCTL_INT(_net_iflib, OID_AUTO, encap_txq_avail_fail, CTLFLAG_RD,
642 		   &iflib_encap_txq_avail_fail, 0, "# txq avail failures");
643 SYSCTL_INT(_net_iflib, OID_AUTO, encap_txd_encap_fail, CTLFLAG_RD,
644 		   &iflib_encap_txd_encap_fail, 0, "# driver encap failures");
645 
646 static int iflib_task_fn_rxs;
647 static int iflib_rx_intr_enables;
648 static int iflib_fast_intrs;
649 static int iflib_rx_unavail;
650 static int iflib_rx_ctx_inactive;
651 static int iflib_rx_if_input;
652 static int iflib_rxd_flush;
653 
654 static int iflib_verbose_debug;
655 
656 SYSCTL_INT(_net_iflib, OID_AUTO, task_fn_rx, CTLFLAG_RD,
657 		   &iflib_task_fn_rxs, 0, "# task_fn_rx calls");
658 SYSCTL_INT(_net_iflib, OID_AUTO, rx_intr_enables, CTLFLAG_RD,
659 		   &iflib_rx_intr_enables, 0, "# RX intr enables");
660 SYSCTL_INT(_net_iflib, OID_AUTO, fast_intrs, CTLFLAG_RD,
661 		   &iflib_fast_intrs, 0, "# fast_intr calls");
662 SYSCTL_INT(_net_iflib, OID_AUTO, rx_unavail, CTLFLAG_RD,
663 		   &iflib_rx_unavail, 0, "# times rxeof called with no available data");
664 SYSCTL_INT(_net_iflib, OID_AUTO, rx_ctx_inactive, CTLFLAG_RD,
665 		   &iflib_rx_ctx_inactive, 0, "# times rxeof called with inactive context");
666 SYSCTL_INT(_net_iflib, OID_AUTO, rx_if_input, CTLFLAG_RD,
667 		   &iflib_rx_if_input, 0, "# times rxeof called if_input");
668 SYSCTL_INT(_net_iflib, OID_AUTO, rxd_flush, CTLFLAG_RD,
669 	         &iflib_rxd_flush, 0, "# times rxd_flush called");
670 SYSCTL_INT(_net_iflib, OID_AUTO, verbose_debug, CTLFLAG_RW,
671 		   &iflib_verbose_debug, 0, "enable verbose debugging");
672 
673 #define DBG_COUNTER_INC(name) atomic_add_int(&(iflib_ ## name), 1)
674 static void
675 iflib_debug_reset(void)
676 {
677 	iflib_tx_seen = iflib_tx_sent = iflib_tx_encap = iflib_rx_allocs =
678 		iflib_fl_refills = iflib_fl_refills_large = iflib_tx_frees =
679 		iflib_txq_drain_flushing = iflib_txq_drain_oactive =
680 		iflib_txq_drain_notready =
681 		iflib_encap_load_mbuf_fail = iflib_encap_pad_mbuf_fail =
682 		iflib_encap_txq_avail_fail = iflib_encap_txd_encap_fail =
683 		iflib_task_fn_rxs = iflib_rx_intr_enables = iflib_fast_intrs =
684 		iflib_rx_unavail =
685 		iflib_rx_ctx_inactive = iflib_rx_if_input =
686 		iflib_rxd_flush = 0;
687 }
688 
689 #else
690 #define DBG_COUNTER_INC(name)
691 static void iflib_debug_reset(void) {}
692 #endif
693 
694 #define IFLIB_DEBUG 0
695 
696 static void iflib_tx_structures_free(if_ctx_t ctx);
697 static void iflib_rx_structures_free(if_ctx_t ctx);
698 static int iflib_queues_alloc(if_ctx_t ctx);
699 static int iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq);
700 static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget);
701 static int iflib_qset_structures_setup(if_ctx_t ctx);
702 static int iflib_msix_init(if_ctx_t ctx);
703 static int iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filterarg, int *rid, const char *str);
704 static void iflib_txq_check_drain(iflib_txq_t txq, int budget);
705 static uint32_t iflib_txq_can_drain(struct ifmp_ring *);
706 #ifdef ALTQ
707 static void iflib_altq_if_start(if_t ifp);
708 static int iflib_altq_if_transmit(if_t ifp, struct mbuf *m);
709 #endif
710 static int iflib_register(if_ctx_t);
711 static void iflib_deregister(if_ctx_t);
712 static void iflib_unregister_vlan_handlers(if_ctx_t ctx);
713 static uint16_t iflib_get_mbuf_size_for(unsigned int size);
714 static void iflib_init_locked(if_ctx_t ctx);
715 static void iflib_add_device_sysctl_pre(if_ctx_t ctx);
716 static void iflib_add_device_sysctl_post(if_ctx_t ctx);
717 static void iflib_ifmp_purge(iflib_txq_t txq);
718 static void _iflib_pre_assert(if_softc_ctx_t scctx);
719 static void iflib_if_init_locked(if_ctx_t ctx);
720 static void iflib_free_intr_mem(if_ctx_t ctx);
721 #ifndef __NO_STRICT_ALIGNMENT
722 static struct mbuf * iflib_fixup_rx(struct mbuf *m);
723 #endif
724 
725 static SLIST_HEAD(cpu_offset_list, cpu_offset) cpu_offsets =
726     SLIST_HEAD_INITIALIZER(cpu_offsets);
727 struct cpu_offset {
728 	SLIST_ENTRY(cpu_offset) entries;
729 	cpuset_t	set;
730 	unsigned int	refcount;
731 	uint16_t	next_cpuid;
732 };
733 static struct mtx cpu_offset_mtx;
734 MTX_SYSINIT(iflib_cpu_offset, &cpu_offset_mtx, "iflib_cpu_offset lock",
735     MTX_DEF);
736 
737 DEBUGNET_DEFINE(iflib);
738 
739 static int
740 iflib_num_rx_descs(if_ctx_t ctx)
741 {
742 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
743 	if_shared_ctx_t sctx = ctx->ifc_sctx;
744 	uint16_t first_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0;
745 
746 	return scctx->isc_nrxd[first_rxq];
747 }
748 
749 static int
750 iflib_num_tx_descs(if_ctx_t ctx)
751 {
752 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
753 	if_shared_ctx_t sctx = ctx->ifc_sctx;
754 	uint16_t first_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0;
755 
756 	return scctx->isc_ntxd[first_txq];
757 }
758 
759 #ifdef DEV_NETMAP
760 #include <sys/selinfo.h>
761 #include <net/netmap.h>
762 #include <dev/netmap/netmap_kern.h>
763 
764 MODULE_DEPEND(iflib, netmap, 1, 1, 1);
765 
766 static int netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, bool init);
767 static void iflib_netmap_timer(void *arg);
768 
769 /*
770  * device-specific sysctl variables:
771  *
772  * iflib_crcstrip: 0: keep CRC in rx frames (default), 1: strip it.
773  *	During regular operations the CRC is stripped, but on some
774  *	hardware reception of frames not multiple of 64 is slower,
775  *	so using crcstrip=0 helps in benchmarks.
776  *
777  * iflib_rx_miss, iflib_rx_miss_bufs:
778  *	count packets that might be missed due to lost interrupts.
779  */
780 SYSCTL_DECL(_dev_netmap);
781 /*
782  * The xl driver by default strips CRCs and we do not override it.
783  */
784 
785 int iflib_crcstrip = 1;
786 SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_crcstrip,
787     CTLFLAG_RW, &iflib_crcstrip, 1, "strip CRC on RX frames");
788 
789 int iflib_rx_miss, iflib_rx_miss_bufs;
790 SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss,
791     CTLFLAG_RW, &iflib_rx_miss, 0, "potentially missed RX intr");
792 SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss_bufs,
793     CTLFLAG_RW, &iflib_rx_miss_bufs, 0, "potentially missed RX intr bufs");
794 
795 /*
796  * Register/unregister. We are already under netmap lock.
797  * Only called on the first register or the last unregister.
798  */
799 static int
800 iflib_netmap_register(struct netmap_adapter *na, int onoff)
801 {
802 	if_t ifp = na->ifp;
803 	if_ctx_t ctx = ifp->if_softc;
804 	int status;
805 
806 	CTX_LOCK(ctx);
807 	if (!CTX_IS_VF(ctx))
808 		IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip);
809 
810 	iflib_stop(ctx);
811 
812 	/*
813 	 * Enable (or disable) netmap flags, and intercept (or restore)
814 	 * ifp->if_transmit. This is done once the device has been stopped
815 	 * to prevent race conditions. Also, this must be done after
816 	 * calling netmap_disable_all_rings() and before calling
817 	 * netmap_enable_all_rings(), so that these two functions see the
818 	 * updated state of the NAF_NETMAP_ON bit.
819 	 */
820 	if (onoff) {
821 		nm_set_native_flags(na);
822 	} else {
823 		nm_clear_native_flags(na);
824 	}
825 
826 	iflib_init_locked(ctx);
827 	IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); // XXX why twice ?
828 	status = ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1;
829 	if (status)
830 		nm_clear_native_flags(na);
831 	CTX_UNLOCK(ctx);
832 	return (status);
833 }
834 
835 static int
836 iflib_netmap_config(struct netmap_adapter *na, struct nm_config_info *info)
837 {
838 	if_t ifp = na->ifp;
839 	if_ctx_t ctx = ifp->if_softc;
840 	iflib_rxq_t rxq = &ctx->ifc_rxqs[0];
841 	iflib_fl_t fl = &rxq->ifr_fl[0];
842 
843 	info->num_tx_rings = ctx->ifc_softc_ctx.isc_ntxqsets;
844 	info->num_rx_rings = ctx->ifc_softc_ctx.isc_nrxqsets;
845 	info->num_tx_descs = iflib_num_tx_descs(ctx);
846 	info->num_rx_descs = iflib_num_rx_descs(ctx);
847 	info->rx_buf_maxsize = fl->ifl_buf_size;
848 	nm_prinf("txr %u rxr %u txd %u rxd %u rbufsz %u",
849 		info->num_tx_rings, info->num_rx_rings, info->num_tx_descs,
850 		info->num_rx_descs, info->rx_buf_maxsize);
851 
852 	return 0;
853 }
854 
855 static int
856 netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, bool init)
857 {
858 	struct netmap_adapter *na = kring->na;
859 	u_int const lim = kring->nkr_num_slots - 1;
860 	struct netmap_ring *ring = kring->ring;
861 	bus_dmamap_t *map;
862 	struct if_rxd_update iru;
863 	if_ctx_t ctx = rxq->ifr_ctx;
864 	iflib_fl_t fl = &rxq->ifr_fl[0];
865 	u_int nic_i_first, nic_i;
866 	u_int nm_i;
867 	int i, n;
868 #if IFLIB_DEBUG_COUNTERS
869 	int rf_count = 0;
870 #endif
871 
872 	/*
873 	 * This function is used both at initialization and in rxsync.
874 	 * At initialization we need to prepare (with isc_rxd_refill())
875 	 * all the netmap buffers currently owned by the kernel, in
876 	 * such a way to keep fl->ifl_pidx and kring->nr_hwcur in sync
877 	 * (except for kring->nkr_hwofs). These may be less than
878 	 * kring->nkr_num_slots if netmap_reset() was called while
879 	 * an application using the kring that still owned some
880 	 * buffers.
881 	 * At rxsync time, both indexes point to the next buffer to be
882 	 * refilled.
883 	 * In any case we publish (with isc_rxd_flush()) up to
884 	 * (fl->ifl_pidx - 1) % N (included), to avoid the NIC tail/prod
885 	 * pointer to overrun the head/cons pointer, although this is
886 	 * not necessary for some NICs (e.g. vmx).
887 	 */
888 	if (__predict_false(init)) {
889 		n = kring->nkr_num_slots - nm_kr_rxspace(kring);
890 	} else {
891 		n = kring->rhead - kring->nr_hwcur;
892 		if (n == 0)
893 			return (0); /* Nothing to do. */
894 		if (n < 0)
895 			n += kring->nkr_num_slots;
896 	}
897 
898 	iru_init(&iru, rxq, 0 /* flid */);
899 	map = fl->ifl_sds.ifsd_map;
900 	nic_i = fl->ifl_pidx;
901 	nm_i = netmap_idx_n2k(kring, nic_i);
902 	if (__predict_false(init)) {
903 		/*
904 		 * On init/reset, nic_i must be 0, and we must
905 		 * start to refill from hwtail (see netmap_reset()).
906 		 */
907 		MPASS(nic_i == 0);
908 		MPASS(nm_i == kring->nr_hwtail);
909 	} else
910 		MPASS(nm_i == kring->nr_hwcur);
911 	DBG_COUNTER_INC(fl_refills);
912 	while (n > 0) {
913 #if IFLIB_DEBUG_COUNTERS
914 		if (++rf_count == 9)
915 			DBG_COUNTER_INC(fl_refills_large);
916 #endif
917 		nic_i_first = nic_i;
918 		for (i = 0; n > 0 && i < IFLIB_MAX_RX_REFRESH; n--, i++) {
919 			struct netmap_slot *slot = &ring->slot[nm_i];
920 			uint64_t paddr;
921 			void *addr = PNMB(na, slot, &paddr);
922 
923 			MPASS(i < IFLIB_MAX_RX_REFRESH);
924 
925 			if (addr == NETMAP_BUF_BASE(na)) /* bad buf */
926 			        return netmap_ring_reinit(kring);
927 
928 			fl->ifl_bus_addrs[i] = paddr +
929 			    nm_get_offset(kring, slot);
930 			fl->ifl_rxd_idxs[i] = nic_i;
931 
932 			if (__predict_false(init)) {
933 				netmap_load_map(na, fl->ifl_buf_tag,
934 				    map[nic_i], addr);
935 			} else if (slot->flags & NS_BUF_CHANGED) {
936 				/* buffer has changed, reload map */
937 				netmap_reload_map(na, fl->ifl_buf_tag,
938 				    map[nic_i], addr);
939 			}
940 			bus_dmamap_sync(fl->ifl_buf_tag, map[nic_i],
941 			    BUS_DMASYNC_PREREAD);
942 			slot->flags &= ~NS_BUF_CHANGED;
943 
944 			nm_i = nm_next(nm_i, lim);
945 			nic_i = nm_next(nic_i, lim);
946 		}
947 
948 		iru.iru_pidx = nic_i_first;
949 		iru.iru_count = i;
950 		ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
951 	}
952 	fl->ifl_pidx = nic_i;
953 	/*
954 	 * At the end of the loop we must have refilled everything
955 	 * we could possibly refill.
956 	 */
957 	MPASS(nm_i == kring->rhead);
958 	kring->nr_hwcur = nm_i;
959 
960 	bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
961 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
962 	ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id,
963 	    nm_prev(nic_i, lim));
964 	DBG_COUNTER_INC(rxd_flush);
965 
966 	return (0);
967 }
968 
969 #define NETMAP_TX_TIMER_US	90
970 
971 /*
972  * Reconcile kernel and user view of the transmit ring.
973  *
974  * All information is in the kring.
975  * Userspace wants to send packets up to the one before kring->rhead,
976  * kernel knows kring->nr_hwcur is the first unsent packet.
977  *
978  * Here we push packets out (as many as possible), and possibly
979  * reclaim buffers from previously completed transmission.
980  *
981  * The caller (netmap) guarantees that there is only one instance
982  * running at any time. Any interference with other driver
983  * methods should be handled by the individual drivers.
984  */
985 static int
986 iflib_netmap_txsync(struct netmap_kring *kring, int flags)
987 {
988 	struct netmap_adapter *na = kring->na;
989 	if_t ifp = na->ifp;
990 	struct netmap_ring *ring = kring->ring;
991 	u_int nm_i;	/* index into the netmap kring */
992 	u_int nic_i;	/* index into the NIC ring */
993 	u_int n;
994 	u_int const lim = kring->nkr_num_slots - 1;
995 	u_int const head = kring->rhead;
996 	struct if_pkt_info pi;
997 	int tx_pkts = 0, tx_bytes = 0;
998 
999 	/*
1000 	 * interrupts on every tx packet are expensive so request
1001 	 * them every half ring, or where NS_REPORT is set
1002 	 */
1003 	u_int report_frequency = kring->nkr_num_slots >> 1;
1004 	/* device-specific */
1005 	if_ctx_t ctx = ifp->if_softc;
1006 	iflib_txq_t txq = &ctx->ifc_txqs[kring->ring_id];
1007 
1008 	bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
1009 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1010 
1011 	/*
1012 	 * First part: process new packets to send.
1013 	 * nm_i is the current index in the netmap kring,
1014 	 * nic_i is the corresponding index in the NIC ring.
1015 	 *
1016 	 * If we have packets to send (nm_i != head)
1017 	 * iterate over the netmap ring, fetch length and update
1018 	 * the corresponding slot in the NIC ring. Some drivers also
1019 	 * need to update the buffer's physical address in the NIC slot
1020 	 * even NS_BUF_CHANGED is not set (PNMB computes the addresses).
1021 	 *
1022 	 * The netmap_reload_map() calls is especially expensive,
1023 	 * even when (as in this case) the tag is 0, so do only
1024 	 * when the buffer has actually changed.
1025 	 *
1026 	 * If possible do not set the report/intr bit on all slots,
1027 	 * but only a few times per ring or when NS_REPORT is set.
1028 	 *
1029 	 * Finally, on 10G and faster drivers, it might be useful
1030 	 * to prefetch the next slot and txr entry.
1031 	 */
1032 
1033 	nm_i = kring->nr_hwcur;
1034 	if (nm_i != head) {	/* we have new packets to send */
1035 		uint32_t pkt_len = 0, seg_idx = 0;
1036 		int nic_i_start = -1, flags = 0;
1037 		pkt_info_zero(&pi);
1038 		pi.ipi_segs = txq->ift_segs;
1039 		pi.ipi_qsidx = kring->ring_id;
1040 		nic_i = netmap_idx_k2n(kring, nm_i);
1041 
1042 		__builtin_prefetch(&ring->slot[nm_i]);
1043 		__builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i]);
1044 		__builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i]);
1045 
1046 		for (n = 0; nm_i != head; n++) {
1047 			struct netmap_slot *slot = &ring->slot[nm_i];
1048 			uint64_t offset = nm_get_offset(kring, slot);
1049 			u_int len = slot->len;
1050 			uint64_t paddr;
1051 			void *addr = PNMB(na, slot, &paddr);
1052 
1053 			flags |= (slot->flags & NS_REPORT ||
1054 				nic_i == 0 || nic_i == report_frequency) ?
1055 				IPI_TX_INTR : 0;
1056 
1057 			/*
1058 			 * If this is the first packet fragment, save the
1059 			 * index of the first NIC slot for later.
1060 			 */
1061 			if (nic_i_start < 0)
1062 				nic_i_start = nic_i;
1063 
1064 			pi.ipi_segs[seg_idx].ds_addr = paddr + offset;
1065 			pi.ipi_segs[seg_idx].ds_len = len;
1066 			if (len) {
1067 				pkt_len += len;
1068 				seg_idx++;
1069 			}
1070 
1071 			if (!(slot->flags & NS_MOREFRAG)) {
1072 				pi.ipi_len = pkt_len;
1073 				pi.ipi_nsegs = seg_idx;
1074 				pi.ipi_pidx = nic_i_start;
1075 				pi.ipi_ndescs = 0;
1076 				pi.ipi_flags = flags;
1077 
1078 				/* Prepare the NIC TX ring. */
1079 				ctx->isc_txd_encap(ctx->ifc_softc, &pi);
1080 				DBG_COUNTER_INC(tx_encap);
1081 
1082 				/* Update transmit counters */
1083 				tx_bytes += pi.ipi_len;
1084 				tx_pkts++;
1085 
1086 				/* Reinit per-packet info for the next one. */
1087 				flags = seg_idx = pkt_len = 0;
1088 				nic_i_start = -1;
1089 			}
1090 
1091 			/* prefetch for next round */
1092 			__builtin_prefetch(&ring->slot[nm_i + 1]);
1093 			__builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i + 1]);
1094 			__builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i + 1]);
1095 
1096 			NM_CHECK_ADDR_LEN_OFF(na, len, offset);
1097 
1098 			if (slot->flags & NS_BUF_CHANGED) {
1099 				/* buffer has changed, reload map */
1100 				netmap_reload_map(na, txq->ift_buf_tag,
1101 				    txq->ift_sds.ifsd_map[nic_i], addr);
1102 			}
1103 			/* make sure changes to the buffer are synced */
1104 			bus_dmamap_sync(txq->ift_buf_tag,
1105 			    txq->ift_sds.ifsd_map[nic_i],
1106 			    BUS_DMASYNC_PREWRITE);
1107 
1108 			slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED | NS_MOREFRAG);
1109 			nm_i = nm_next(nm_i, lim);
1110 			nic_i = nm_next(nic_i, lim);
1111 		}
1112 		kring->nr_hwcur = nm_i;
1113 
1114 		/* synchronize the NIC ring */
1115 		bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
1116 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1117 
1118 		/* (re)start the tx unit up to slot nic_i (excluded) */
1119 		ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, nic_i);
1120 	}
1121 
1122 	/*
1123 	 * Second part: reclaim buffers for completed transmissions.
1124 	 *
1125 	 * If there are unclaimed buffers, attempt to reclaim them.
1126 	 * If we don't manage to reclaim them all, and TX IRQs are not in use,
1127 	 * trigger a per-tx-queue timer to try again later.
1128 	 */
1129 	if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) {
1130 		if (iflib_tx_credits_update(ctx, txq)) {
1131 			/* some tx completed, increment avail */
1132 			nic_i = txq->ift_cidx_processed;
1133 			kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim);
1134 		}
1135 	}
1136 
1137 	if (!(ctx->ifc_flags & IFC_NETMAP_TX_IRQ))
1138 		if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) {
1139 			callout_reset_sbt_on(&txq->ift_netmap_timer,
1140 			    NETMAP_TX_TIMER_US * SBT_1US, SBT_1US,
1141 			    iflib_netmap_timer, txq,
1142 			    txq->ift_netmap_timer.c_cpu, 0);
1143 		}
1144 
1145 	if_inc_counter(ifp, IFCOUNTER_OBYTES, tx_bytes);
1146 	if_inc_counter(ifp, IFCOUNTER_OPACKETS, tx_pkts);
1147 
1148 	return (0);
1149 }
1150 
1151 /*
1152  * Reconcile kernel and user view of the receive ring.
1153  * Same as for the txsync, this routine must be efficient.
1154  * The caller guarantees a single invocations, but races against
1155  * the rest of the driver should be handled here.
1156  *
1157  * On call, kring->rhead is the first packet that userspace wants
1158  * to keep, and kring->rcur is the wakeup point.
1159  * The kernel has previously reported packets up to kring->rtail.
1160  *
1161  * If (flags & NAF_FORCE_READ) also check for incoming packets irrespective
1162  * of whether or not we received an interrupt.
1163  */
1164 static int
1165 iflib_netmap_rxsync(struct netmap_kring *kring, int flags)
1166 {
1167 	struct netmap_adapter *na = kring->na;
1168 	struct netmap_ring *ring = kring->ring;
1169 	if_t ifp = na->ifp;
1170 	uint32_t nm_i;	/* index into the netmap ring */
1171 	uint32_t nic_i;	/* index into the NIC ring */
1172 	u_int n;
1173 	u_int const lim = kring->nkr_num_slots - 1;
1174 	int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
1175 	int i = 0, rx_bytes = 0, rx_pkts = 0;
1176 
1177 	if_ctx_t ctx = ifp->if_softc;
1178 	if_shared_ctx_t sctx = ctx->ifc_sctx;
1179 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
1180 	iflib_rxq_t rxq = &ctx->ifc_rxqs[kring->ring_id];
1181 	iflib_fl_t fl = &rxq->ifr_fl[0];
1182 	struct if_rxd_info ri;
1183 	qidx_t *cidxp;
1184 
1185 	/*
1186 	 * netmap only uses free list 0, to avoid out of order consumption
1187 	 * of receive buffers
1188 	 */
1189 
1190 	bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
1191 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1192 
1193 	/*
1194 	 * First part: import newly received packets.
1195 	 *
1196 	 * nm_i is the index of the next free slot in the netmap ring,
1197 	 * nic_i is the index of the next received packet in the NIC ring
1198 	 * (or in the free list 0 if IFLIB_HAS_RXCQ is set), and they may
1199 	 * differ in case if_init() has been called while
1200 	 * in netmap mode. For the receive ring we have
1201 	 *
1202 	 *	nic_i = fl->ifl_cidx;
1203 	 *	nm_i = kring->nr_hwtail (previous)
1204 	 * and
1205 	 *	nm_i == (nic_i + kring->nkr_hwofs) % ring_size
1206 	 *
1207 	 * fl->ifl_cidx is set to 0 on a ring reinit
1208 	 */
1209 	if (netmap_no_pendintr || force_update) {
1210 		uint32_t hwtail_lim = nm_prev(kring->nr_hwcur, lim);
1211 		bool have_rxcq = sctx->isc_flags & IFLIB_HAS_RXCQ;
1212 		int crclen = iflib_crcstrip ? 0 : 4;
1213 		int error, avail;
1214 
1215 		/*
1216 		 * For the free list consumer index, we use the same
1217 		 * logic as in iflib_rxeof().
1218 		 */
1219 		if (have_rxcq)
1220 			cidxp = &rxq->ifr_cq_cidx;
1221 		else
1222 			cidxp = &fl->ifl_cidx;
1223 		avail = ctx->isc_rxd_available(ctx->ifc_softc,
1224 		    rxq->ifr_id, *cidxp, USHRT_MAX);
1225 
1226 		nic_i = fl->ifl_cidx;
1227 		nm_i = netmap_idx_n2k(kring, nic_i);
1228 		MPASS(nm_i == kring->nr_hwtail);
1229 		for (n = 0; avail > 0 && nm_i != hwtail_lim; n++, avail--) {
1230 			rxd_info_zero(&ri);
1231 			ri.iri_frags = rxq->ifr_frags;
1232 			ri.iri_qsidx = kring->ring_id;
1233 			ri.iri_ifp = ctx->ifc_ifp;
1234 			ri.iri_cidx = *cidxp;
1235 
1236 			error = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri);
1237 			for (i = 0; i < ri.iri_nfrags; i++) {
1238 				if (error) {
1239 					ring->slot[nm_i].len = 0;
1240 					ring->slot[nm_i].flags = 0;
1241 				} else {
1242 					ring->slot[nm_i].len = ri.iri_frags[i].irf_len;
1243 					if (i == (ri.iri_nfrags - 1)) {
1244 						ring->slot[nm_i].len -= crclen;
1245 						ring->slot[nm_i].flags = 0;
1246 
1247 						/* Update receive counters */
1248 						rx_bytes += ri.iri_len;
1249 						rx_pkts++;
1250 					} else
1251 						ring->slot[nm_i].flags = NS_MOREFRAG;
1252 				}
1253 
1254 				bus_dmamap_sync(fl->ifl_buf_tag,
1255 				    fl->ifl_sds.ifsd_map[nic_i], BUS_DMASYNC_POSTREAD);
1256 				nm_i = nm_next(nm_i, lim);
1257 				fl->ifl_cidx = nic_i = nm_next(nic_i, lim);
1258 			}
1259 
1260 			if (have_rxcq) {
1261 				*cidxp = ri.iri_cidx;
1262 				while (*cidxp >= scctx->isc_nrxd[0])
1263 					*cidxp -= scctx->isc_nrxd[0];
1264 			}
1265 
1266 		}
1267 		if (n) { /* update the state variables */
1268 			if (netmap_no_pendintr && !force_update) {
1269 				/* diagnostics */
1270 				iflib_rx_miss ++;
1271 				iflib_rx_miss_bufs += n;
1272 			}
1273 			kring->nr_hwtail = nm_i;
1274 		}
1275 		kring->nr_kflags &= ~NKR_PENDINTR;
1276 	}
1277 	/*
1278 	 * Second part: skip past packets that userspace has released.
1279 	 * (kring->nr_hwcur to head excluded),
1280 	 * and make the buffers available for reception.
1281 	 * As usual nm_i is the index in the netmap ring,
1282 	 * nic_i is the index in the NIC ring, and
1283 	 * nm_i == (nic_i + kring->nkr_hwofs) % ring_size
1284 	 */
1285 	netmap_fl_refill(rxq, kring, false);
1286 
1287 	if_inc_counter(ifp, IFCOUNTER_IBYTES, rx_bytes);
1288 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_pkts);
1289 
1290 	return (0);
1291 }
1292 
1293 static void
1294 iflib_netmap_intr(struct netmap_adapter *na, int onoff)
1295 {
1296 	if_ctx_t ctx = na->ifp->if_softc;
1297 
1298 	CTX_LOCK(ctx);
1299 	if (onoff) {
1300 		IFDI_INTR_ENABLE(ctx);
1301 	} else {
1302 		IFDI_INTR_DISABLE(ctx);
1303 	}
1304 	CTX_UNLOCK(ctx);
1305 }
1306 
1307 static int
1308 iflib_netmap_attach(if_ctx_t ctx)
1309 {
1310 	struct netmap_adapter na;
1311 
1312 	bzero(&na, sizeof(na));
1313 
1314 	na.ifp = ctx->ifc_ifp;
1315 	na.na_flags = NAF_BDG_MAYSLEEP | NAF_MOREFRAG | NAF_OFFSETS;
1316 	MPASS(ctx->ifc_softc_ctx.isc_ntxqsets);
1317 	MPASS(ctx->ifc_softc_ctx.isc_nrxqsets);
1318 
1319 	na.num_tx_desc = iflib_num_tx_descs(ctx);
1320 	na.num_rx_desc = iflib_num_rx_descs(ctx);
1321 	na.nm_txsync = iflib_netmap_txsync;
1322 	na.nm_rxsync = iflib_netmap_rxsync;
1323 	na.nm_register = iflib_netmap_register;
1324 	na.nm_intr = iflib_netmap_intr;
1325 	na.nm_config = iflib_netmap_config;
1326 	na.num_tx_rings = ctx->ifc_softc_ctx.isc_ntxqsets;
1327 	na.num_rx_rings = ctx->ifc_softc_ctx.isc_nrxqsets;
1328 	return (netmap_attach(&na));
1329 }
1330 
1331 static int
1332 iflib_netmap_txq_init(if_ctx_t ctx, iflib_txq_t txq)
1333 {
1334 	struct netmap_adapter *na = NA(ctx->ifc_ifp);
1335 	struct netmap_slot *slot;
1336 
1337 	slot = netmap_reset(na, NR_TX, txq->ift_id, 0);
1338 	if (slot == NULL)
1339 		return (0);
1340 	for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxd[0]; i++) {
1341 		/*
1342 		 * In netmap mode, set the map for the packet buffer.
1343 		 * NOTE: Some drivers (not this one) also need to set
1344 		 * the physical buffer address in the NIC ring.
1345 		 * netmap_idx_n2k() maps a nic index, i, into the corresponding
1346 		 * netmap slot index, si
1347 		 */
1348 		int si = netmap_idx_n2k(na->tx_rings[txq->ift_id], i);
1349 		netmap_load_map(na, txq->ift_buf_tag, txq->ift_sds.ifsd_map[i],
1350 		    NMB(na, slot + si));
1351 	}
1352 	return (1);
1353 }
1354 
1355 static int
1356 iflib_netmap_rxq_init(if_ctx_t ctx, iflib_rxq_t rxq)
1357 {
1358 	struct netmap_adapter *na = NA(ctx->ifc_ifp);
1359 	struct netmap_kring *kring;
1360 	struct netmap_slot *slot;
1361 
1362 	slot = netmap_reset(na, NR_RX, rxq->ifr_id, 0);
1363 	if (slot == NULL)
1364 		return (0);
1365 	kring = na->rx_rings[rxq->ifr_id];
1366 	netmap_fl_refill(rxq, kring, true);
1367 	return (1);
1368 }
1369 
1370 static void
1371 iflib_netmap_timer(void *arg)
1372 {
1373 	iflib_txq_t txq = arg;
1374 	if_ctx_t ctx = txq->ift_ctx;
1375 
1376 	/*
1377 	 * Wake up the netmap application, to give it a chance to
1378 	 * call txsync and reclaim more completed TX buffers.
1379 	 */
1380 	netmap_tx_irq(ctx->ifc_ifp, txq->ift_id);
1381 }
1382 
1383 #define iflib_netmap_detach(ifp) netmap_detach(ifp)
1384 
1385 #else
1386 #define iflib_netmap_txq_init(ctx, txq) (0)
1387 #define iflib_netmap_rxq_init(ctx, rxq) (0)
1388 #define iflib_netmap_detach(ifp)
1389 #define netmap_enable_all_rings(ifp)
1390 #define netmap_disable_all_rings(ifp)
1391 
1392 #define iflib_netmap_attach(ctx) (0)
1393 #define netmap_rx_irq(ifp, qid, budget) (0)
1394 #endif
1395 
1396 #if defined(__i386__) || defined(__amd64__)
1397 static __inline void
1398 prefetch(void *x)
1399 {
1400 	__asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
1401 }
1402 
1403 static __inline void
1404 prefetch2cachelines(void *x)
1405 {
1406 	__asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
1407 #if (CACHE_LINE_SIZE < 128)
1408 	__asm volatile("prefetcht0 %0" :: "m" (*(((unsigned long *)x)+CACHE_LINE_SIZE/(sizeof(unsigned long)))));
1409 #endif
1410 }
1411 #else
1412 static __inline void
1413 prefetch(void *x)
1414 {
1415 }
1416 
1417 static __inline void
1418 prefetch2cachelines(void *x)
1419 {
1420 }
1421 #endif
1422 
1423 static void
1424 iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid)
1425 {
1426 	iflib_fl_t fl;
1427 
1428 	fl = &rxq->ifr_fl[flid];
1429 	iru->iru_paddrs = fl->ifl_bus_addrs;
1430 	iru->iru_idxs = fl->ifl_rxd_idxs;
1431 	iru->iru_qsidx = rxq->ifr_id;
1432 	iru->iru_buf_size = fl->ifl_buf_size;
1433 	iru->iru_flidx = fl->ifl_id;
1434 }
1435 
1436 static void
1437 _iflib_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err)
1438 {
1439 	if (err)
1440 		return;
1441 	*(bus_addr_t *) arg = segs[0].ds_addr;
1442 }
1443 
1444 #define	DMA_WIDTH_TO_BUS_LOWADDR(width)				\
1445 	(((width) == 0) || (width) == flsll(BUS_SPACE_MAXADDR) ?	\
1446 	    BUS_SPACE_MAXADDR : (1ULL << (width)) - 1ULL)
1447 
1448 int
1449 iflib_dma_alloc_align(if_ctx_t ctx, int size, int align, iflib_dma_info_t dma, int mapflags)
1450 {
1451 	int err;
1452 	device_t dev = ctx->ifc_dev;
1453 	bus_addr_t lowaddr;
1454 
1455 	lowaddr = DMA_WIDTH_TO_BUS_LOWADDR(ctx->ifc_softc_ctx.isc_dma_width);
1456 
1457 	err = bus_dma_tag_create(bus_get_dma_tag(dev),	/* parent */
1458 				align, 0,		/* alignment, bounds */
1459 				lowaddr,		/* lowaddr */
1460 				BUS_SPACE_MAXADDR,	/* highaddr */
1461 				NULL, NULL,		/* filter, filterarg */
1462 				size,			/* maxsize */
1463 				1,			/* nsegments */
1464 				size,			/* maxsegsize */
1465 				BUS_DMA_ALLOCNOW,	/* flags */
1466 				NULL,			/* lockfunc */
1467 				NULL,			/* lockarg */
1468 				&dma->idi_tag);
1469 	if (err) {
1470 		device_printf(dev,
1471 		    "%s: bus_dma_tag_create failed: %d\n",
1472 		    __func__, err);
1473 		goto fail_0;
1474 	}
1475 
1476 	err = bus_dmamem_alloc(dma->idi_tag, (void**) &dma->idi_vaddr,
1477 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->idi_map);
1478 	if (err) {
1479 		device_printf(dev,
1480 		    "%s: bus_dmamem_alloc(%ju) failed: %d\n",
1481 		    __func__, (uintmax_t)size, err);
1482 		goto fail_1;
1483 	}
1484 
1485 	dma->idi_paddr = IF_BAD_DMA;
1486 	err = bus_dmamap_load(dma->idi_tag, dma->idi_map, dma->idi_vaddr,
1487 	    size, _iflib_dmamap_cb, &dma->idi_paddr, mapflags | BUS_DMA_NOWAIT);
1488 	if (err || dma->idi_paddr == IF_BAD_DMA) {
1489 		device_printf(dev,
1490 		    "%s: bus_dmamap_load failed: %d\n",
1491 		    __func__, err);
1492 		goto fail_2;
1493 	}
1494 
1495 	dma->idi_size = size;
1496 	return (0);
1497 
1498 fail_2:
1499 	bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map);
1500 fail_1:
1501 	bus_dma_tag_destroy(dma->idi_tag);
1502 fail_0:
1503 	dma->idi_tag = NULL;
1504 
1505 	return (err);
1506 }
1507 
1508 int
1509 iflib_dma_alloc(if_ctx_t ctx, int size, iflib_dma_info_t dma, int mapflags)
1510 {
1511 	if_shared_ctx_t sctx = ctx->ifc_sctx;
1512 
1513 	KASSERT(sctx->isc_q_align != 0, ("alignment value not initialized"));
1514 
1515 	return (iflib_dma_alloc_align(ctx, size, sctx->isc_q_align, dma, mapflags));
1516 }
1517 
1518 int
1519 iflib_dma_alloc_multi(if_ctx_t ctx, int *sizes, iflib_dma_info_t *dmalist, int mapflags, int count)
1520 {
1521 	int i, err;
1522 	iflib_dma_info_t *dmaiter;
1523 
1524 	dmaiter = dmalist;
1525 	for (i = 0; i < count; i++, dmaiter++) {
1526 		if ((err = iflib_dma_alloc(ctx, sizes[i], *dmaiter, mapflags)) != 0)
1527 			break;
1528 	}
1529 	if (err)
1530 		iflib_dma_free_multi(dmalist, i);
1531 	return (err);
1532 }
1533 
1534 void
1535 iflib_dma_free(iflib_dma_info_t dma)
1536 {
1537 	if (dma->idi_tag == NULL)
1538 		return;
1539 	if (dma->idi_paddr != IF_BAD_DMA) {
1540 		bus_dmamap_sync(dma->idi_tag, dma->idi_map,
1541 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1542 		bus_dmamap_unload(dma->idi_tag, dma->idi_map);
1543 		dma->idi_paddr = IF_BAD_DMA;
1544 	}
1545 	if (dma->idi_vaddr != NULL) {
1546 		bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map);
1547 		dma->idi_vaddr = NULL;
1548 	}
1549 	bus_dma_tag_destroy(dma->idi_tag);
1550 	dma->idi_tag = NULL;
1551 }
1552 
1553 void
1554 iflib_dma_free_multi(iflib_dma_info_t *dmalist, int count)
1555 {
1556 	int i;
1557 	iflib_dma_info_t *dmaiter = dmalist;
1558 
1559 	for (i = 0; i < count; i++, dmaiter++)
1560 		iflib_dma_free(*dmaiter);
1561 }
1562 
1563 static int
1564 iflib_fast_intr(void *arg)
1565 {
1566 	iflib_filter_info_t info = arg;
1567 	struct grouptask *gtask = info->ifi_task;
1568 	int result;
1569 
1570 	DBG_COUNTER_INC(fast_intrs);
1571 	if (info->ifi_filter != NULL) {
1572 		result = info->ifi_filter(info->ifi_filter_arg);
1573 		if ((result & FILTER_SCHEDULE_THREAD) == 0)
1574 			return (result);
1575 	}
1576 
1577 	GROUPTASK_ENQUEUE(gtask);
1578 	return (FILTER_HANDLED);
1579 }
1580 
1581 static int
1582 iflib_fast_intr_rxtx(void *arg)
1583 {
1584 	iflib_filter_info_t info = arg;
1585 	struct grouptask *gtask = info->ifi_task;
1586 	if_ctx_t ctx;
1587 	iflib_rxq_t rxq = (iflib_rxq_t)info->ifi_ctx;
1588 	iflib_txq_t txq;
1589 	void *sc;
1590 	int i, cidx, result;
1591 	qidx_t txqid;
1592 	bool intr_enable, intr_legacy;
1593 
1594 	DBG_COUNTER_INC(fast_intrs);
1595 	if (info->ifi_filter != NULL) {
1596 		result = info->ifi_filter(info->ifi_filter_arg);
1597 		if ((result & FILTER_SCHEDULE_THREAD) == 0)
1598 			return (result);
1599 	}
1600 
1601 	ctx = rxq->ifr_ctx;
1602 	sc = ctx->ifc_softc;
1603 	intr_enable = false;
1604 	intr_legacy = !!(ctx->ifc_flags & IFC_LEGACY);
1605 	MPASS(rxq->ifr_ntxqirq);
1606 	for (i = 0; i < rxq->ifr_ntxqirq; i++) {
1607 		txqid = rxq->ifr_txqid[i];
1608 		txq = &ctx->ifc_txqs[txqid];
1609 		bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
1610 		    BUS_DMASYNC_POSTREAD);
1611 		if (!ctx->isc_txd_credits_update(sc, txqid, false)) {
1612 			if (intr_legacy)
1613 				intr_enable = true;
1614 			else
1615 				IFDI_TX_QUEUE_INTR_ENABLE(ctx, txqid);
1616 			continue;
1617 		}
1618 		GROUPTASK_ENQUEUE(&txq->ift_task);
1619 	}
1620 	if (ctx->ifc_sctx->isc_flags & IFLIB_HAS_RXCQ)
1621 		cidx = rxq->ifr_cq_cidx;
1622 	else
1623 		cidx = rxq->ifr_fl[0].ifl_cidx;
1624 	if (iflib_rxd_avail(ctx, rxq, cidx, 1))
1625 		GROUPTASK_ENQUEUE(gtask);
1626 	else {
1627 		if (intr_legacy)
1628 			intr_enable = true;
1629 		else
1630 			IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
1631 		DBG_COUNTER_INC(rx_intr_enables);
1632 	}
1633 	if (intr_enable)
1634 		IFDI_INTR_ENABLE(ctx);
1635 	return (FILTER_HANDLED);
1636 }
1637 
1638 static int
1639 iflib_fast_intr_ctx(void *arg)
1640 {
1641 	iflib_filter_info_t info = arg;
1642 	struct grouptask *gtask = info->ifi_task;
1643 	int result;
1644 
1645 	DBG_COUNTER_INC(fast_intrs);
1646 	if (info->ifi_filter != NULL) {
1647 		result = info->ifi_filter(info->ifi_filter_arg);
1648 		if ((result & FILTER_SCHEDULE_THREAD) == 0)
1649 			return (result);
1650 	}
1651 
1652 	GROUPTASK_ENQUEUE(gtask);
1653 	return (FILTER_HANDLED);
1654 }
1655 
1656 static int
1657 _iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid,
1658 		 driver_filter_t filter, driver_intr_t handler, void *arg,
1659 		 const char *name)
1660 {
1661 	struct resource *res;
1662 	void *tag = NULL;
1663 	device_t dev = ctx->ifc_dev;
1664 	int flags, i, rc;
1665 
1666 	flags = RF_ACTIVE;
1667 	if (ctx->ifc_flags & IFC_LEGACY)
1668 		flags |= RF_SHAREABLE;
1669 	MPASS(rid < 512);
1670 	i = rid;
1671 	res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &i, flags);
1672 	if (res == NULL) {
1673 		device_printf(dev,
1674 		    "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
1675 		return (ENOMEM);
1676 	}
1677 	irq->ii_res = res;
1678 	KASSERT(filter == NULL || handler == NULL, ("filter and handler can't both be non-NULL"));
1679 	rc = bus_setup_intr(dev, res, INTR_MPSAFE | INTR_TYPE_NET,
1680 						filter, handler, arg, &tag);
1681 	if (rc != 0) {
1682 		device_printf(dev,
1683 		    "failed to setup interrupt for rid %d, name %s: %d\n",
1684 					  rid, name ? name : "unknown", rc);
1685 		return (rc);
1686 	} else if (name)
1687 		bus_describe_intr(dev, res, tag, "%s", name);
1688 
1689 	irq->ii_tag = tag;
1690 	return (0);
1691 }
1692 
1693 /*********************************************************************
1694  *
1695  *  Allocate DMA resources for TX buffers as well as memory for the TX
1696  *  mbuf map.  TX DMA maps (non-TSO/TSO) and TX mbuf map are kept in a
1697  *  iflib_sw_tx_desc_array structure, storing all the information that
1698  *  is needed to transmit a packet on the wire.  This is called only
1699  *  once at attach, setup is done every reset.
1700  *
1701  **********************************************************************/
1702 static int
1703 iflib_txsd_alloc(iflib_txq_t txq)
1704 {
1705 	if_ctx_t ctx = txq->ift_ctx;
1706 	if_shared_ctx_t sctx = ctx->ifc_sctx;
1707 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
1708 	device_t dev = ctx->ifc_dev;
1709 	bus_size_t tsomaxsize;
1710 	bus_addr_t lowaddr;
1711 	int err, nsegments, ntsosegments;
1712 	bool tso;
1713 
1714 	nsegments = scctx->isc_tx_nsegments;
1715 	ntsosegments = scctx->isc_tx_tso_segments_max;
1716 	tsomaxsize = scctx->isc_tx_tso_size_max;
1717 	if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_VLAN_MTU)
1718 		tsomaxsize += sizeof(struct ether_vlan_header);
1719 	MPASS(scctx->isc_ntxd[0] > 0);
1720 	MPASS(scctx->isc_ntxd[txq->ift_br_offset] > 0);
1721 	MPASS(nsegments > 0);
1722 	if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) {
1723 		MPASS(ntsosegments > 0);
1724 		MPASS(sctx->isc_tso_maxsize >= tsomaxsize);
1725 	}
1726 
1727 	lowaddr = DMA_WIDTH_TO_BUS_LOWADDR(scctx->isc_dma_width);
1728 
1729 	/*
1730 	 * Set up DMA tags for TX buffers.
1731 	 */
1732 	if ((err = bus_dma_tag_create(bus_get_dma_tag(dev),
1733 			       1, 0,			/* alignment, bounds */
1734 			       lowaddr,			/* lowaddr */
1735 			       BUS_SPACE_MAXADDR,	/* highaddr */
1736 			       NULL, NULL,		/* filter, filterarg */
1737 			       sctx->isc_tx_maxsize,		/* maxsize */
1738 			       nsegments,	/* nsegments */
1739 			       sctx->isc_tx_maxsegsize,	/* maxsegsize */
1740 			       0,			/* flags */
1741 			       NULL,			/* lockfunc */
1742 			       NULL,			/* lockfuncarg */
1743 			       &txq->ift_buf_tag))) {
1744 		device_printf(dev,"Unable to allocate TX DMA tag: %d\n", err);
1745 		device_printf(dev,"maxsize: %ju nsegments: %d maxsegsize: %ju\n",
1746 		    (uintmax_t)sctx->isc_tx_maxsize, nsegments, (uintmax_t)sctx->isc_tx_maxsegsize);
1747 		goto fail;
1748 	}
1749 	tso = (if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) != 0;
1750 	if (tso && (err = bus_dma_tag_create(bus_get_dma_tag(dev),
1751 			       1, 0,			/* alignment, bounds */
1752 			       lowaddr,			/* lowaddr */
1753 			       BUS_SPACE_MAXADDR,	/* highaddr */
1754 			       NULL, NULL,		/* filter, filterarg */
1755 			       tsomaxsize,		/* maxsize */
1756 			       ntsosegments,	/* nsegments */
1757 			       sctx->isc_tso_maxsegsize,/* maxsegsize */
1758 			       0,			/* flags */
1759 			       NULL,			/* lockfunc */
1760 			       NULL,			/* lockfuncarg */
1761 			       &txq->ift_tso_buf_tag))) {
1762 		device_printf(dev, "Unable to allocate TSO TX DMA tag: %d\n",
1763 		    err);
1764 		goto fail;
1765 	}
1766 
1767 	/* Allocate memory for the TX mbuf map. */
1768 	if (!(txq->ift_sds.ifsd_m =
1769 	    (struct mbuf **) malloc(sizeof(struct mbuf *) *
1770 	    scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1771 		device_printf(dev, "Unable to allocate TX mbuf map memory\n");
1772 		err = ENOMEM;
1773 		goto fail;
1774 	}
1775 
1776 	/*
1777 	 * Create the DMA maps for TX buffers.
1778 	 */
1779 	if ((txq->ift_sds.ifsd_map = (bus_dmamap_t *)malloc(
1780 	    sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset],
1781 	    M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
1782 		device_printf(dev,
1783 		    "Unable to allocate TX buffer DMA map memory\n");
1784 		err = ENOMEM;
1785 		goto fail;
1786 	}
1787 	if (tso && (txq->ift_sds.ifsd_tso_map = (bus_dmamap_t *)malloc(
1788 	    sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset],
1789 	    M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
1790 		device_printf(dev,
1791 		    "Unable to allocate TSO TX buffer map memory\n");
1792 		err = ENOMEM;
1793 		goto fail;
1794 	}
1795 	for (int i = 0; i < scctx->isc_ntxd[txq->ift_br_offset]; i++) {
1796 		err = bus_dmamap_create(txq->ift_buf_tag, 0,
1797 		    &txq->ift_sds.ifsd_map[i]);
1798 		if (err != 0) {
1799 			device_printf(dev, "Unable to create TX DMA map\n");
1800 			goto fail;
1801 		}
1802 		if (!tso)
1803 			continue;
1804 		err = bus_dmamap_create(txq->ift_tso_buf_tag, 0,
1805 		    &txq->ift_sds.ifsd_tso_map[i]);
1806 		if (err != 0) {
1807 			device_printf(dev, "Unable to create TSO TX DMA map\n");
1808 			goto fail;
1809 		}
1810 	}
1811 	return (0);
1812 fail:
1813 	/* We free all, it handles case where we are in the middle */
1814 	iflib_tx_structures_free(ctx);
1815 	return (err);
1816 }
1817 
1818 static void
1819 iflib_txsd_destroy(if_ctx_t ctx, iflib_txq_t txq, int i)
1820 {
1821 	bus_dmamap_t map;
1822 
1823 	if (txq->ift_sds.ifsd_map != NULL) {
1824 		map = txq->ift_sds.ifsd_map[i];
1825 		bus_dmamap_sync(txq->ift_buf_tag, map, BUS_DMASYNC_POSTWRITE);
1826 		bus_dmamap_unload(txq->ift_buf_tag, map);
1827 		bus_dmamap_destroy(txq->ift_buf_tag, map);
1828 		txq->ift_sds.ifsd_map[i] = NULL;
1829 	}
1830 
1831 	if (txq->ift_sds.ifsd_tso_map != NULL) {
1832 		map = txq->ift_sds.ifsd_tso_map[i];
1833 		bus_dmamap_sync(txq->ift_tso_buf_tag, map,
1834 		    BUS_DMASYNC_POSTWRITE);
1835 		bus_dmamap_unload(txq->ift_tso_buf_tag, map);
1836 		bus_dmamap_destroy(txq->ift_tso_buf_tag, map);
1837 		txq->ift_sds.ifsd_tso_map[i] = NULL;
1838 	}
1839 }
1840 
1841 static void
1842 iflib_txq_destroy(iflib_txq_t txq)
1843 {
1844 	if_ctx_t ctx = txq->ift_ctx;
1845 
1846 	for (int i = 0; i < txq->ift_size; i++)
1847 		iflib_txsd_destroy(ctx, txq, i);
1848 
1849 	if (txq->ift_br != NULL) {
1850 		ifmp_ring_free(txq->ift_br);
1851 		txq->ift_br = NULL;
1852 	}
1853 
1854 	mtx_destroy(&txq->ift_mtx);
1855 
1856 	if (txq->ift_sds.ifsd_map != NULL) {
1857 		free(txq->ift_sds.ifsd_map, M_IFLIB);
1858 		txq->ift_sds.ifsd_map = NULL;
1859 	}
1860 	if (txq->ift_sds.ifsd_tso_map != NULL) {
1861 		free(txq->ift_sds.ifsd_tso_map, M_IFLIB);
1862 		txq->ift_sds.ifsd_tso_map = NULL;
1863 	}
1864 	if (txq->ift_sds.ifsd_m != NULL) {
1865 		free(txq->ift_sds.ifsd_m, M_IFLIB);
1866 		txq->ift_sds.ifsd_m = NULL;
1867 	}
1868 	if (txq->ift_buf_tag != NULL) {
1869 		bus_dma_tag_destroy(txq->ift_buf_tag);
1870 		txq->ift_buf_tag = NULL;
1871 	}
1872 	if (txq->ift_tso_buf_tag != NULL) {
1873 		bus_dma_tag_destroy(txq->ift_tso_buf_tag);
1874 		txq->ift_tso_buf_tag = NULL;
1875 	}
1876 	if (txq->ift_ifdi != NULL) {
1877 		free(txq->ift_ifdi, M_IFLIB);
1878 	}
1879 }
1880 
1881 static void
1882 iflib_txsd_free(if_ctx_t ctx, iflib_txq_t txq, int i)
1883 {
1884 	struct mbuf **mp;
1885 
1886 	mp = &txq->ift_sds.ifsd_m[i];
1887 	if (*mp == NULL)
1888 		return;
1889 
1890 	if (txq->ift_sds.ifsd_map != NULL) {
1891 		bus_dmamap_sync(txq->ift_buf_tag,
1892 		    txq->ift_sds.ifsd_map[i], BUS_DMASYNC_POSTWRITE);
1893 		bus_dmamap_unload(txq->ift_buf_tag, txq->ift_sds.ifsd_map[i]);
1894 	}
1895 	if (txq->ift_sds.ifsd_tso_map != NULL) {
1896 		bus_dmamap_sync(txq->ift_tso_buf_tag,
1897 		    txq->ift_sds.ifsd_tso_map[i], BUS_DMASYNC_POSTWRITE);
1898 		bus_dmamap_unload(txq->ift_tso_buf_tag,
1899 		    txq->ift_sds.ifsd_tso_map[i]);
1900 	}
1901 	m_freem(*mp);
1902 	DBG_COUNTER_INC(tx_frees);
1903 	*mp = NULL;
1904 }
1905 
1906 static int
1907 iflib_txq_setup(iflib_txq_t txq)
1908 {
1909 	if_ctx_t ctx = txq->ift_ctx;
1910 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
1911 	if_shared_ctx_t sctx = ctx->ifc_sctx;
1912 	iflib_dma_info_t di;
1913 	int i;
1914 
1915 	/* Set number of descriptors available */
1916 	txq->ift_qstatus = IFLIB_QUEUE_IDLE;
1917 	/* XXX make configurable */
1918 	txq->ift_update_freq = IFLIB_DEFAULT_TX_UPDATE_FREQ;
1919 
1920 	/* Reset indices */
1921 	txq->ift_cidx_processed = 0;
1922 	txq->ift_pidx = txq->ift_cidx = txq->ift_npending = 0;
1923 	txq->ift_size = scctx->isc_ntxd[txq->ift_br_offset];
1924 
1925 	for (i = 0, di = txq->ift_ifdi; i < sctx->isc_ntxqs; i++, di++)
1926 		bzero((void *)di->idi_vaddr, di->idi_size);
1927 
1928 	IFDI_TXQ_SETUP(ctx, txq->ift_id);
1929 	for (i = 0, di = txq->ift_ifdi; i < sctx->isc_ntxqs; i++, di++)
1930 		bus_dmamap_sync(di->idi_tag, di->idi_map,
1931 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1932 	return (0);
1933 }
1934 
1935 /*********************************************************************
1936  *
1937  *  Allocate DMA resources for RX buffers as well as memory for the RX
1938  *  mbuf map, direct RX cluster pointer map and RX cluster bus address
1939  *  map.  RX DMA map, RX mbuf map, direct RX cluster pointer map and
1940  *  RX cluster map are kept in a iflib_sw_rx_desc_array structure.
1941  *  Since we use use one entry in iflib_sw_rx_desc_array per received
1942  *  packet, the maximum number of entries we'll need is equal to the
1943  *  number of hardware receive descriptors that we've allocated.
1944  *
1945  **********************************************************************/
1946 static int
1947 iflib_rxsd_alloc(iflib_rxq_t rxq)
1948 {
1949 	if_ctx_t ctx = rxq->ifr_ctx;
1950 	if_shared_ctx_t sctx = ctx->ifc_sctx;
1951 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
1952 	device_t dev = ctx->ifc_dev;
1953 	iflib_fl_t fl;
1954 	bus_addr_t lowaddr;
1955 	int			err;
1956 
1957 	MPASS(scctx->isc_nrxd[0] > 0);
1958 	MPASS(scctx->isc_nrxd[rxq->ifr_fl_offset] > 0);
1959 
1960 	lowaddr = DMA_WIDTH_TO_BUS_LOWADDR(scctx->isc_dma_width);
1961 
1962 	fl = rxq->ifr_fl;
1963 	for (int i = 0; i <  rxq->ifr_nfl; i++, fl++) {
1964 		fl->ifl_size = scctx->isc_nrxd[rxq->ifr_fl_offset]; /* this isn't necessarily the same */
1965 		/* Set up DMA tag for RX buffers. */
1966 		err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
1967 					 1, 0,			/* alignment, bounds */
1968 					 lowaddr,		/* lowaddr */
1969 					 BUS_SPACE_MAXADDR,	/* highaddr */
1970 					 NULL, NULL,		/* filter, filterarg */
1971 					 sctx->isc_rx_maxsize,	/* maxsize */
1972 					 sctx->isc_rx_nsegments,	/* nsegments */
1973 					 sctx->isc_rx_maxsegsize,	/* maxsegsize */
1974 					 0,			/* flags */
1975 					 NULL,			/* lockfunc */
1976 					 NULL,			/* lockarg */
1977 					 &fl->ifl_buf_tag);
1978 		if (err) {
1979 			device_printf(dev,
1980 			    "Unable to allocate RX DMA tag: %d\n", err);
1981 			goto fail;
1982 		}
1983 
1984 		/* Allocate memory for the RX mbuf map. */
1985 		if (!(fl->ifl_sds.ifsd_m =
1986 		      (struct mbuf **) malloc(sizeof(struct mbuf *) *
1987 					      scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1988 			device_printf(dev,
1989 			    "Unable to allocate RX mbuf map memory\n");
1990 			err = ENOMEM;
1991 			goto fail;
1992 		}
1993 
1994 		/* Allocate memory for the direct RX cluster pointer map. */
1995 		if (!(fl->ifl_sds.ifsd_cl =
1996 		      (caddr_t *) malloc(sizeof(caddr_t) *
1997 					      scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1998 			device_printf(dev,
1999 			    "Unable to allocate RX cluster map memory\n");
2000 			err = ENOMEM;
2001 			goto fail;
2002 		}
2003 
2004 		/* Allocate memory for the RX cluster bus address map. */
2005 		if (!(fl->ifl_sds.ifsd_ba =
2006 		      (bus_addr_t *) malloc(sizeof(bus_addr_t) *
2007 					      scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
2008 			device_printf(dev,
2009 			    "Unable to allocate RX bus address map memory\n");
2010 			err = ENOMEM;
2011 			goto fail;
2012 		}
2013 
2014 		/*
2015 		 * Create the DMA maps for RX buffers.
2016 		 */
2017 		if (!(fl->ifl_sds.ifsd_map =
2018 		      (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
2019 			device_printf(dev,
2020 			    "Unable to allocate RX buffer DMA map memory\n");
2021 			err = ENOMEM;
2022 			goto fail;
2023 		}
2024 		for (int i = 0; i < scctx->isc_nrxd[rxq->ifr_fl_offset]; i++) {
2025 			err = bus_dmamap_create(fl->ifl_buf_tag, 0,
2026 			    &fl->ifl_sds.ifsd_map[i]);
2027 			if (err != 0) {
2028 				device_printf(dev, "Unable to create RX buffer DMA map\n");
2029 				goto fail;
2030 			}
2031 		}
2032 	}
2033 	return (0);
2034 
2035 fail:
2036 	iflib_rx_structures_free(ctx);
2037 	return (err);
2038 }
2039 
2040 /*
2041  * Internal service routines
2042  */
2043 
2044 struct rxq_refill_cb_arg {
2045 	int               error;
2046 	bus_dma_segment_t seg;
2047 	int               nseg;
2048 };
2049 
2050 static void
2051 _rxq_refill_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2052 {
2053 	struct rxq_refill_cb_arg *cb_arg = arg;
2054 
2055 	cb_arg->error = error;
2056 	cb_arg->seg = segs[0];
2057 	cb_arg->nseg = nseg;
2058 }
2059 
2060 /**
2061  * iflib_fl_refill - refill an rxq free-buffer list
2062  * @ctx: the iflib context
2063  * @fl: the free list to refill
2064  * @count: the number of new buffers to allocate
2065  *
2066  * (Re)populate an rxq free-buffer list with up to @count new packet buffers.
2067  * The caller must assure that @count does not exceed the queue's capacity
2068  * minus one (since we always leave a descriptor unavailable).
2069  */
2070 static uint8_t
2071 iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count)
2072 {
2073 	struct if_rxd_update iru;
2074 	struct rxq_refill_cb_arg cb_arg;
2075 	struct mbuf *m;
2076 	caddr_t cl, *sd_cl;
2077 	struct mbuf **sd_m;
2078 	bus_dmamap_t *sd_map;
2079 	bus_addr_t bus_addr, *sd_ba;
2080 	int err, frag_idx, i, idx, n, pidx;
2081 	qidx_t credits;
2082 
2083 	MPASS(count <= fl->ifl_size - fl->ifl_credits - 1);
2084 
2085 	sd_m = fl->ifl_sds.ifsd_m;
2086 	sd_map = fl->ifl_sds.ifsd_map;
2087 	sd_cl = fl->ifl_sds.ifsd_cl;
2088 	sd_ba = fl->ifl_sds.ifsd_ba;
2089 	pidx = fl->ifl_pidx;
2090 	idx = pidx;
2091 	frag_idx = fl->ifl_fragidx;
2092 	credits = fl->ifl_credits;
2093 
2094 	i = 0;
2095 	n = count;
2096 	MPASS(n > 0);
2097 	MPASS(credits + n <= fl->ifl_size);
2098 
2099 	if (pidx < fl->ifl_cidx)
2100 		MPASS(pidx + n <= fl->ifl_cidx);
2101 	if (pidx == fl->ifl_cidx && (credits < fl->ifl_size))
2102 		MPASS(fl->ifl_gen == 0);
2103 	if (pidx > fl->ifl_cidx)
2104 		MPASS(n <= fl->ifl_size - pidx + fl->ifl_cidx);
2105 
2106 	DBG_COUNTER_INC(fl_refills);
2107 	if (n > 8)
2108 		DBG_COUNTER_INC(fl_refills_large);
2109 	iru_init(&iru, fl->ifl_rxq, fl->ifl_id);
2110 	while (n-- > 0) {
2111 		/*
2112 		 * We allocate an uninitialized mbuf + cluster, mbuf is
2113 		 * initialized after rx.
2114 		 *
2115 		 * If the cluster is still set then we know a minimum sized
2116 		 * packet was received
2117 		 */
2118 		bit_ffc_at(fl->ifl_rx_bitmap, frag_idx, fl->ifl_size,
2119 		    &frag_idx);
2120 		if (frag_idx < 0)
2121 			bit_ffc(fl->ifl_rx_bitmap, fl->ifl_size, &frag_idx);
2122 		MPASS(frag_idx >= 0);
2123 		if ((cl = sd_cl[frag_idx]) == NULL) {
2124 			cl = uma_zalloc(fl->ifl_zone, M_NOWAIT);
2125 			if (__predict_false(cl == NULL))
2126 				break;
2127 
2128 			cb_arg.error = 0;
2129 			MPASS(sd_map != NULL);
2130 			err = bus_dmamap_load(fl->ifl_buf_tag, sd_map[frag_idx],
2131 			    cl, fl->ifl_buf_size, _rxq_refill_cb, &cb_arg,
2132 			    BUS_DMA_NOWAIT);
2133 			if (__predict_false(err != 0 || cb_arg.error)) {
2134 				uma_zfree(fl->ifl_zone, cl);
2135 				break;
2136 			}
2137 
2138 			sd_ba[frag_idx] = bus_addr = cb_arg.seg.ds_addr;
2139 			sd_cl[frag_idx] = cl;
2140 #if MEMORY_LOGGING
2141 			fl->ifl_cl_enqueued++;
2142 #endif
2143 		} else {
2144 			bus_addr = sd_ba[frag_idx];
2145 		}
2146 		bus_dmamap_sync(fl->ifl_buf_tag, sd_map[frag_idx],
2147 		    BUS_DMASYNC_PREREAD);
2148 
2149 		if (sd_m[frag_idx] == NULL) {
2150 			m = m_gethdr_raw(M_NOWAIT, 0);
2151 			if (__predict_false(m == NULL))
2152 				break;
2153 			sd_m[frag_idx] = m;
2154 		}
2155 		bit_set(fl->ifl_rx_bitmap, frag_idx);
2156 #if MEMORY_LOGGING
2157 		fl->ifl_m_enqueued++;
2158 #endif
2159 
2160 		DBG_COUNTER_INC(rx_allocs);
2161 		fl->ifl_rxd_idxs[i] = frag_idx;
2162 		fl->ifl_bus_addrs[i] = bus_addr;
2163 		credits++;
2164 		i++;
2165 		MPASS(credits <= fl->ifl_size);
2166 		if (++idx == fl->ifl_size) {
2167 #ifdef INVARIANTS
2168 			fl->ifl_gen = 1;
2169 #endif
2170 			idx = 0;
2171 		}
2172 		if (n == 0 || i == IFLIB_MAX_RX_REFRESH) {
2173 			iru.iru_pidx = pidx;
2174 			iru.iru_count = i;
2175 			ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
2176 			fl->ifl_pidx = idx;
2177 			fl->ifl_credits = credits;
2178 			pidx = idx;
2179 			i = 0;
2180 		}
2181 	}
2182 
2183 	if (n < count - 1) {
2184 		if (i != 0) {
2185 			iru.iru_pidx = pidx;
2186 			iru.iru_count = i;
2187 			ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
2188 			fl->ifl_pidx = idx;
2189 			fl->ifl_credits = credits;
2190 		}
2191 		DBG_COUNTER_INC(rxd_flush);
2192 		bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
2193 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2194 		ctx->isc_rxd_flush(ctx->ifc_softc, fl->ifl_rxq->ifr_id,
2195 		    fl->ifl_id, fl->ifl_pidx);
2196 		if (__predict_true(bit_test(fl->ifl_rx_bitmap, frag_idx))) {
2197 			fl->ifl_fragidx = frag_idx + 1;
2198 			if (fl->ifl_fragidx == fl->ifl_size)
2199 				fl->ifl_fragidx = 0;
2200 		} else {
2201 			fl->ifl_fragidx = frag_idx;
2202 		}
2203 	}
2204 
2205 	return (n == -1 ? 0 : IFLIB_RXEOF_EMPTY);
2206 }
2207 
2208 static inline uint8_t
2209 iflib_fl_refill_all(if_ctx_t ctx, iflib_fl_t fl)
2210 {
2211 	/*
2212 	 * We leave an unused descriptor to avoid pidx to catch up with cidx.
2213 	 * This is important as it confuses most NICs. For instance,
2214 	 * Intel NICs have (per receive ring) RDH and RDT registers, where
2215 	 * RDH points to the next receive descriptor to be used by the NIC,
2216 	 * and RDT for the next receive descriptor to be published by the
2217 	 * driver to the NIC (RDT - 1 is thus the last valid one).
2218 	 * The condition RDH == RDT means no descriptors are available to
2219 	 * the NIC, and thus it would be ambiguous if it also meant that
2220 	 * all the descriptors are available to the NIC.
2221 	 */
2222 	int32_t reclaimable = fl->ifl_size - fl->ifl_credits - 1;
2223 #ifdef INVARIANTS
2224 	int32_t delta = fl->ifl_size - get_inuse(fl->ifl_size, fl->ifl_cidx, fl->ifl_pidx, fl->ifl_gen) - 1;
2225 #endif
2226 
2227 	MPASS(fl->ifl_credits <= fl->ifl_size);
2228 	MPASS(reclaimable == delta);
2229 
2230 	if (reclaimable > 0)
2231 		return (iflib_fl_refill(ctx, fl, reclaimable));
2232 	return (0);
2233 }
2234 
2235 uint8_t
2236 iflib_in_detach(if_ctx_t ctx)
2237 {
2238 	bool in_detach;
2239 
2240 	STATE_LOCK(ctx);
2241 	in_detach = !!(ctx->ifc_flags & IFC_IN_DETACH);
2242 	STATE_UNLOCK(ctx);
2243 	return (in_detach);
2244 }
2245 
2246 static void
2247 iflib_fl_bufs_free(iflib_fl_t fl)
2248 {
2249 	iflib_dma_info_t idi = fl->ifl_ifdi;
2250 	bus_dmamap_t sd_map;
2251 	uint32_t i;
2252 
2253 	for (i = 0; i < fl->ifl_size; i++) {
2254 		struct mbuf **sd_m = &fl->ifl_sds.ifsd_m[i];
2255 		caddr_t *sd_cl = &fl->ifl_sds.ifsd_cl[i];
2256 
2257 		if (*sd_cl != NULL) {
2258 			sd_map = fl->ifl_sds.ifsd_map[i];
2259 			bus_dmamap_sync(fl->ifl_buf_tag, sd_map,
2260 			    BUS_DMASYNC_POSTREAD);
2261 			bus_dmamap_unload(fl->ifl_buf_tag, sd_map);
2262 			uma_zfree(fl->ifl_zone, *sd_cl);
2263 			*sd_cl = NULL;
2264 			if (*sd_m != NULL) {
2265 				m_init(*sd_m, M_NOWAIT, MT_DATA, 0);
2266 				m_free_raw(*sd_m);
2267 				*sd_m = NULL;
2268 			}
2269 		} else {
2270 			MPASS(*sd_m == NULL);
2271 		}
2272 #if MEMORY_LOGGING
2273 		fl->ifl_m_dequeued++;
2274 		fl->ifl_cl_dequeued++;
2275 #endif
2276 	}
2277 #ifdef INVARIANTS
2278 	for (i = 0; i < fl->ifl_size; i++) {
2279 		MPASS(fl->ifl_sds.ifsd_cl[i] == NULL);
2280 		MPASS(fl->ifl_sds.ifsd_m[i] == NULL);
2281 	}
2282 #endif
2283 	/*
2284 	 * Reset free list values
2285 	 */
2286 	fl->ifl_credits = fl->ifl_cidx = fl->ifl_pidx = fl->ifl_gen = fl->ifl_fragidx = 0;
2287 	bzero(idi->idi_vaddr, idi->idi_size);
2288 }
2289 
2290 /*********************************************************************
2291  *
2292  *  Initialize a free list and its buffers.
2293  *
2294  **********************************************************************/
2295 static int
2296 iflib_fl_setup(iflib_fl_t fl)
2297 {
2298 	iflib_rxq_t rxq = fl->ifl_rxq;
2299 	if_ctx_t ctx = rxq->ifr_ctx;
2300 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
2301 	int qidx;
2302 
2303 	bit_nclear(fl->ifl_rx_bitmap, 0, fl->ifl_size - 1);
2304 	/*
2305 	** Free current RX buffer structs and their mbufs
2306 	*/
2307 	iflib_fl_bufs_free(fl);
2308 	/* Now replenish the mbufs */
2309 	MPASS(fl->ifl_credits == 0);
2310 	qidx = rxq->ifr_fl_offset + fl->ifl_id;
2311 	if (scctx->isc_rxd_buf_size[qidx] != 0)
2312 		fl->ifl_buf_size = scctx->isc_rxd_buf_size[qidx];
2313 	else
2314 		fl->ifl_buf_size = ctx->ifc_rx_mbuf_sz;
2315 	/*
2316 	 * ifl_buf_size may be a driver-supplied value, so pull it up
2317 	 * to the selected mbuf size.
2318 	 */
2319 	fl->ifl_buf_size = iflib_get_mbuf_size_for(fl->ifl_buf_size);
2320 	if (fl->ifl_buf_size > ctx->ifc_max_fl_buf_size)
2321 		ctx->ifc_max_fl_buf_size = fl->ifl_buf_size;
2322 	fl->ifl_cltype = m_gettype(fl->ifl_buf_size);
2323 	fl->ifl_zone = m_getzone(fl->ifl_buf_size);
2324 
2325 	/*
2326 	 * Avoid pre-allocating zillions of clusters to an idle card
2327 	 * potentially speeding up attach. In any case make sure
2328 	 * to leave a descriptor unavailable. See the comment in
2329 	 * iflib_fl_refill_all().
2330 	 */
2331 	MPASS(fl->ifl_size > 0);
2332 	(void)iflib_fl_refill(ctx, fl, min(128, fl->ifl_size - 1));
2333 	if (min(128, fl->ifl_size - 1) != fl->ifl_credits)
2334 		return (ENOBUFS);
2335 	/*
2336 	 * handle failure
2337 	 */
2338 	MPASS(rxq != NULL);
2339 	MPASS(fl->ifl_ifdi != NULL);
2340 	bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
2341 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2342 	return (0);
2343 }
2344 
2345 /*********************************************************************
2346  *
2347  *  Free receive ring data structures
2348  *
2349  **********************************************************************/
2350 static void
2351 iflib_rx_sds_free(iflib_rxq_t rxq)
2352 {
2353 	iflib_fl_t fl;
2354 	int i, j;
2355 
2356 	if (rxq->ifr_fl != NULL) {
2357 		for (i = 0; i < rxq->ifr_nfl; i++) {
2358 			fl = &rxq->ifr_fl[i];
2359 			if (fl->ifl_buf_tag != NULL) {
2360 				if (fl->ifl_sds.ifsd_map != NULL) {
2361 					for (j = 0; j < fl->ifl_size; j++) {
2362 						bus_dmamap_sync(
2363 						    fl->ifl_buf_tag,
2364 						    fl->ifl_sds.ifsd_map[j],
2365 						    BUS_DMASYNC_POSTREAD);
2366 						bus_dmamap_unload(
2367 						    fl->ifl_buf_tag,
2368 						    fl->ifl_sds.ifsd_map[j]);
2369 						bus_dmamap_destroy(
2370 						    fl->ifl_buf_tag,
2371 						    fl->ifl_sds.ifsd_map[j]);
2372 					}
2373 				}
2374 				bus_dma_tag_destroy(fl->ifl_buf_tag);
2375 				fl->ifl_buf_tag = NULL;
2376 			}
2377 			free(fl->ifl_sds.ifsd_m, M_IFLIB);
2378 			free(fl->ifl_sds.ifsd_cl, M_IFLIB);
2379 			free(fl->ifl_sds.ifsd_ba, M_IFLIB);
2380 			free(fl->ifl_sds.ifsd_map, M_IFLIB);
2381 			free(fl->ifl_rx_bitmap, M_IFLIB);
2382 			fl->ifl_sds.ifsd_m = NULL;
2383 			fl->ifl_sds.ifsd_cl = NULL;
2384 			fl->ifl_sds.ifsd_ba = NULL;
2385 			fl->ifl_sds.ifsd_map = NULL;
2386 			fl->ifl_rx_bitmap = NULL;
2387 		}
2388 		free(rxq->ifr_fl, M_IFLIB);
2389 		rxq->ifr_fl = NULL;
2390 		free(rxq->ifr_ifdi, M_IFLIB);
2391 		rxq->ifr_ifdi = NULL;
2392 		rxq->ifr_cq_cidx = 0;
2393 	}
2394 }
2395 
2396 /*
2397  * Timer routine
2398  */
2399 static void
2400 iflib_timer(void *arg)
2401 {
2402 	iflib_txq_t txq = arg;
2403 	if_ctx_t ctx = txq->ift_ctx;
2404 	if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
2405 	uint64_t this_tick = ticks;
2406 
2407 	if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))
2408 		return;
2409 
2410 	/*
2411 	** Check on the state of the TX queue(s), this
2412 	** can be done without the lock because its RO
2413 	** and the HUNG state will be static if set.
2414 	*/
2415 	if (this_tick - txq->ift_last_timer_tick >= iflib_timer_default) {
2416 		txq->ift_last_timer_tick = this_tick;
2417 		IFDI_TIMER(ctx, txq->ift_id);
2418 		if ((txq->ift_qstatus == IFLIB_QUEUE_HUNG) &&
2419 		    ((txq->ift_cleaned_prev == txq->ift_cleaned) ||
2420 		     (sctx->isc_pause_frames == 0)))
2421 			goto hung;
2422 
2423 		if (txq->ift_qstatus != IFLIB_QUEUE_IDLE &&
2424 		    ifmp_ring_is_stalled(txq->ift_br)) {
2425 			KASSERT(ctx->ifc_link_state == LINK_STATE_UP,
2426 			    ("queue can't be marked as hung if interface is down"));
2427 			txq->ift_qstatus = IFLIB_QUEUE_HUNG;
2428 		}
2429 		txq->ift_cleaned_prev = txq->ift_cleaned;
2430 	}
2431 	/* handle any laggards */
2432 	if (txq->ift_db_pending)
2433 		GROUPTASK_ENQUEUE(&txq->ift_task);
2434 
2435 	sctx->isc_pause_frames = 0;
2436 	if (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)
2437 		callout_reset_on(&txq->ift_timer, iflib_timer_default, iflib_timer,
2438 		    txq, txq->ift_timer.c_cpu);
2439 	return;
2440 
2441  hung:
2442 	device_printf(ctx->ifc_dev,
2443 	    "Watchdog timeout (TX: %d desc avail: %d pidx: %d) -- resetting\n",
2444 	    txq->ift_id, TXQ_AVAIL(txq), txq->ift_pidx);
2445 	STATE_LOCK(ctx);
2446 	if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
2447 	ctx->ifc_flags |= (IFC_DO_WATCHDOG|IFC_DO_RESET);
2448 	iflib_admin_intr_deferred(ctx);
2449 	STATE_UNLOCK(ctx);
2450 }
2451 
2452 static uint16_t
2453 iflib_get_mbuf_size_for(unsigned int size)
2454 {
2455 
2456 	if (size <= MCLBYTES)
2457 		return (MCLBYTES);
2458 	else
2459 		return (MJUMPAGESIZE);
2460 }
2461 
2462 static void
2463 iflib_calc_rx_mbuf_sz(if_ctx_t ctx)
2464 {
2465 	if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
2466 
2467 	/*
2468 	 * XXX don't set the max_frame_size to larger
2469 	 * than the hardware can handle
2470 	 */
2471 	ctx->ifc_rx_mbuf_sz =
2472 	    iflib_get_mbuf_size_for(sctx->isc_max_frame_size);
2473 }
2474 
2475 uint32_t
2476 iflib_get_rx_mbuf_sz(if_ctx_t ctx)
2477 {
2478 
2479 	return (ctx->ifc_rx_mbuf_sz);
2480 }
2481 
2482 static void
2483 iflib_init_locked(if_ctx_t ctx)
2484 {
2485 	if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
2486 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
2487 	if_t ifp = ctx->ifc_ifp;
2488 	iflib_fl_t fl;
2489 	iflib_txq_t txq;
2490 	iflib_rxq_t rxq;
2491 	int i, j, tx_ip_csum_flags, tx_ip6_csum_flags;
2492 
2493 	if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
2494 	IFDI_INTR_DISABLE(ctx);
2495 
2496 	/*
2497 	 * See iflib_stop(). Useful in case iflib_init_locked() is
2498 	 * called without first calling iflib_stop().
2499 	 */
2500 	netmap_disable_all_rings(ifp);
2501 
2502 	tx_ip_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP);
2503 	tx_ip6_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_SCTP);
2504 	/* Set hardware offload abilities */
2505 	if_clearhwassist(ifp);
2506 	if (if_getcapenable(ifp) & IFCAP_TXCSUM)
2507 		if_sethwassistbits(ifp, tx_ip_csum_flags, 0);
2508 	if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6)
2509 		if_sethwassistbits(ifp,  tx_ip6_csum_flags, 0);
2510 	if (if_getcapenable(ifp) & IFCAP_TSO4)
2511 		if_sethwassistbits(ifp, CSUM_IP_TSO, 0);
2512 	if (if_getcapenable(ifp) & IFCAP_TSO6)
2513 		if_sethwassistbits(ifp, CSUM_IP6_TSO, 0);
2514 
2515 	for (i = 0, txq = ctx->ifc_txqs; i < sctx->isc_ntxqsets; i++, txq++) {
2516 		CALLOUT_LOCK(txq);
2517 		callout_stop(&txq->ift_timer);
2518 #ifdef DEV_NETMAP
2519 		callout_stop(&txq->ift_netmap_timer);
2520 #endif /* DEV_NETMAP */
2521 		CALLOUT_UNLOCK(txq);
2522 		(void)iflib_netmap_txq_init(ctx, txq);
2523 	}
2524 
2525 	/*
2526 	 * Calculate a suitable Rx mbuf size prior to calling IFDI_INIT, so
2527 	 * that drivers can use the value when setting up the hardware receive
2528 	 * buffers.
2529 	 */
2530 	iflib_calc_rx_mbuf_sz(ctx);
2531 
2532 #ifdef INVARIANTS
2533 	i = if_getdrvflags(ifp);
2534 #endif
2535 	IFDI_INIT(ctx);
2536 	MPASS(if_getdrvflags(ifp) == i);
2537 	for (i = 0, rxq = ctx->ifc_rxqs; i < sctx->isc_nrxqsets; i++, rxq++) {
2538 		if (iflib_netmap_rxq_init(ctx, rxq) > 0) {
2539 			/* This rxq is in netmap mode. Skip normal init. */
2540 			continue;
2541 		}
2542 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) {
2543 			if (iflib_fl_setup(fl)) {
2544 				device_printf(ctx->ifc_dev,
2545 				    "setting up free list %d failed - "
2546 				    "check cluster settings\n", j);
2547 				goto done;
2548 			}
2549 		}
2550 	}
2551 done:
2552 	if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
2553 	IFDI_INTR_ENABLE(ctx);
2554 	txq = ctx->ifc_txqs;
2555 	for (i = 0; i < sctx->isc_ntxqsets; i++, txq++)
2556 		callout_reset_on(&txq->ift_timer, iflib_timer_default, iflib_timer, txq,
2557 			txq->ift_timer.c_cpu);
2558 
2559         /* Re-enable txsync/rxsync. */
2560 	netmap_enable_all_rings(ifp);
2561 }
2562 
2563 static int
2564 iflib_media_change(if_t ifp)
2565 {
2566 	if_ctx_t ctx = if_getsoftc(ifp);
2567 	int err;
2568 
2569 	CTX_LOCK(ctx);
2570 	if ((err = IFDI_MEDIA_CHANGE(ctx)) == 0)
2571 		iflib_if_init_locked(ctx);
2572 	CTX_UNLOCK(ctx);
2573 	return (err);
2574 }
2575 
2576 static void
2577 iflib_media_status(if_t ifp, struct ifmediareq *ifmr)
2578 {
2579 	if_ctx_t ctx = if_getsoftc(ifp);
2580 
2581 	CTX_LOCK(ctx);
2582 	IFDI_UPDATE_ADMIN_STATUS(ctx);
2583 	IFDI_MEDIA_STATUS(ctx, ifmr);
2584 	CTX_UNLOCK(ctx);
2585 }
2586 
2587 void
2588 iflib_stop(if_ctx_t ctx)
2589 {
2590 	iflib_txq_t txq = ctx->ifc_txqs;
2591 	iflib_rxq_t rxq = ctx->ifc_rxqs;
2592 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
2593 	if_shared_ctx_t sctx = ctx->ifc_sctx;
2594 	iflib_dma_info_t di;
2595 	iflib_fl_t fl;
2596 	int i, j;
2597 
2598 	/* Tell the stack that the interface is no longer active */
2599 	if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
2600 
2601 	IFDI_INTR_DISABLE(ctx);
2602 	DELAY(1000);
2603 	IFDI_STOP(ctx);
2604 	DELAY(1000);
2605 
2606 	/*
2607 	 * Stop any pending txsync/rxsync and prevent new ones
2608 	 * form starting. Processes blocked in poll() will get
2609 	 * POLLERR.
2610 	 */
2611 	netmap_disable_all_rings(ctx->ifc_ifp);
2612 
2613 	iflib_debug_reset();
2614 	/* Wait for current tx queue users to exit to disarm watchdog timer. */
2615 	for (i = 0; i < scctx->isc_ntxqsets; i++, txq++) {
2616 		/* make sure all transmitters have completed before proceeding XXX */
2617 
2618 		CALLOUT_LOCK(txq);
2619 		callout_stop(&txq->ift_timer);
2620 #ifdef DEV_NETMAP
2621 		callout_stop(&txq->ift_netmap_timer);
2622 #endif /* DEV_NETMAP */
2623 		CALLOUT_UNLOCK(txq);
2624 
2625 		/* clean any enqueued buffers */
2626 		iflib_ifmp_purge(txq);
2627 		/* Free any existing tx buffers. */
2628 		for (j = 0; j < txq->ift_size; j++) {
2629 			iflib_txsd_free(ctx, txq, j);
2630 		}
2631 		txq->ift_processed = txq->ift_cleaned = txq->ift_cidx_processed = 0;
2632 		txq->ift_in_use = txq->ift_gen = txq->ift_no_desc_avail = 0;
2633 		if (sctx->isc_flags & IFLIB_PRESERVE_TX_INDICES)
2634 			txq->ift_cidx = txq->ift_pidx;
2635 		else
2636 			txq->ift_cidx = txq->ift_pidx = 0;
2637 
2638 		txq->ift_closed = txq->ift_mbuf_defrag = txq->ift_mbuf_defrag_failed = 0;
2639 		txq->ift_no_tx_dma_setup = txq->ift_txd_encap_efbig = txq->ift_map_failed = 0;
2640 		txq->ift_pullups = 0;
2641 		ifmp_ring_reset_stats(txq->ift_br);
2642 		for (j = 0, di = txq->ift_ifdi; j < sctx->isc_ntxqs; j++, di++)
2643 			bzero((void *)di->idi_vaddr, di->idi_size);
2644 	}
2645 	for (i = 0; i < scctx->isc_nrxqsets; i++, rxq++) {
2646 		gtaskqueue_drain(rxq->ifr_task.gt_taskqueue,
2647 		    &rxq->ifr_task.gt_task);
2648 
2649 		rxq->ifr_cq_cidx = 0;
2650 		for (j = 0, di = rxq->ifr_ifdi; j < sctx->isc_nrxqs; j++, di++)
2651 			bzero((void *)di->idi_vaddr, di->idi_size);
2652 		/* also resets the free lists pidx/cidx */
2653 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
2654 			iflib_fl_bufs_free(fl);
2655 	}
2656 }
2657 
2658 static inline caddr_t
2659 calc_next_rxd(iflib_fl_t fl, int cidx)
2660 {
2661 	qidx_t size;
2662 	int nrxd;
2663 	caddr_t start, end, cur, next;
2664 
2665 	nrxd = fl->ifl_size;
2666 	size = fl->ifl_rxd_size;
2667 	start = fl->ifl_ifdi->idi_vaddr;
2668 
2669 	if (__predict_false(size == 0))
2670 		return (start);
2671 	cur = start + size*cidx;
2672 	end = start + size*nrxd;
2673 	next = CACHE_PTR_NEXT(cur);
2674 	return (next < end ? next : start);
2675 }
2676 
2677 static inline void
2678 prefetch_pkts(iflib_fl_t fl, int cidx)
2679 {
2680 	int nextptr;
2681 	int nrxd = fl->ifl_size;
2682 	caddr_t next_rxd;
2683 
2684 	nextptr = (cidx + CACHE_PTR_INCREMENT) & (nrxd-1);
2685 	prefetch(&fl->ifl_sds.ifsd_m[nextptr]);
2686 	prefetch(&fl->ifl_sds.ifsd_cl[nextptr]);
2687 	next_rxd = calc_next_rxd(fl, cidx);
2688 	prefetch(next_rxd);
2689 	prefetch(fl->ifl_sds.ifsd_m[(cidx + 1) & (nrxd-1)]);
2690 	prefetch(fl->ifl_sds.ifsd_m[(cidx + 2) & (nrxd-1)]);
2691 	prefetch(fl->ifl_sds.ifsd_m[(cidx + 3) & (nrxd-1)]);
2692 	prefetch(fl->ifl_sds.ifsd_m[(cidx + 4) & (nrxd-1)]);
2693 	prefetch(fl->ifl_sds.ifsd_cl[(cidx + 1) & (nrxd-1)]);
2694 	prefetch(fl->ifl_sds.ifsd_cl[(cidx + 2) & (nrxd-1)]);
2695 	prefetch(fl->ifl_sds.ifsd_cl[(cidx + 3) & (nrxd-1)]);
2696 	prefetch(fl->ifl_sds.ifsd_cl[(cidx + 4) & (nrxd-1)]);
2697 }
2698 
2699 static struct mbuf *
2700 rxd_frag_to_sd(iflib_rxq_t rxq, if_rxd_frag_t irf, bool unload, if_rxsd_t sd,
2701     int *pf_rv, if_rxd_info_t ri)
2702 {
2703 	bus_dmamap_t map;
2704 	iflib_fl_t fl;
2705 	caddr_t payload;
2706 	struct mbuf *m;
2707 	int flid, cidx, len, next;
2708 
2709 	map = NULL;
2710 	flid = irf->irf_flid;
2711 	cidx = irf->irf_idx;
2712 	fl = &rxq->ifr_fl[flid];
2713 	sd->ifsd_fl = fl;
2714 	m = fl->ifl_sds.ifsd_m[cidx];
2715 	sd->ifsd_cl = &fl->ifl_sds.ifsd_cl[cidx];
2716 	fl->ifl_credits--;
2717 #if MEMORY_LOGGING
2718 	fl->ifl_m_dequeued++;
2719 #endif
2720 	if (rxq->ifr_ctx->ifc_flags & IFC_PREFETCH)
2721 		prefetch_pkts(fl, cidx);
2722 	next = (cidx + CACHE_PTR_INCREMENT) & (fl->ifl_size-1);
2723 	prefetch(&fl->ifl_sds.ifsd_map[next]);
2724 	map = fl->ifl_sds.ifsd_map[cidx];
2725 
2726 	bus_dmamap_sync(fl->ifl_buf_tag, map, BUS_DMASYNC_POSTREAD);
2727 
2728 	if (rxq->pfil != NULL && PFIL_HOOKED_IN(rxq->pfil) && pf_rv != NULL &&
2729 	    irf->irf_len != 0) {
2730 		payload  = *sd->ifsd_cl;
2731 		payload +=  ri->iri_pad;
2732 		len = ri->iri_len - ri->iri_pad;
2733 		*pf_rv = pfil_run_hooks(rxq->pfil, payload, ri->iri_ifp,
2734 		    len | PFIL_MEMPTR | PFIL_IN, NULL);
2735 		switch (*pf_rv) {
2736 		case PFIL_DROPPED:
2737 		case PFIL_CONSUMED:
2738 			/*
2739 			 * The filter ate it.  Everything is recycled.
2740 			 */
2741 			m = NULL;
2742 			unload = 0;
2743 			break;
2744 		case PFIL_REALLOCED:
2745 			/*
2746 			 * The filter copied it.  Everything is recycled.
2747 			 */
2748 			m = pfil_mem2mbuf(payload);
2749 			unload = 0;
2750 			break;
2751 		case PFIL_PASS:
2752 			/*
2753 			 * Filter said it was OK, so receive like
2754 			 * normal
2755 			 */
2756 			fl->ifl_sds.ifsd_m[cidx] = NULL;
2757 			break;
2758 		default:
2759 			MPASS(0);
2760 		}
2761 	} else {
2762 		fl->ifl_sds.ifsd_m[cidx] = NULL;
2763 		if (pf_rv != NULL)
2764 			*pf_rv = PFIL_PASS;
2765 	}
2766 
2767 	if (unload && irf->irf_len != 0)
2768 		bus_dmamap_unload(fl->ifl_buf_tag, map);
2769 	fl->ifl_cidx = (fl->ifl_cidx + 1) & (fl->ifl_size-1);
2770 	if (__predict_false(fl->ifl_cidx == 0))
2771 		fl->ifl_gen = 0;
2772 	bit_clear(fl->ifl_rx_bitmap, cidx);
2773 	return (m);
2774 }
2775 
2776 static struct mbuf *
2777 assemble_segments(iflib_rxq_t rxq, if_rxd_info_t ri, if_rxsd_t sd, int *pf_rv)
2778 {
2779 	struct mbuf *m, *mh, *mt;
2780 	caddr_t cl;
2781 	int  *pf_rv_ptr, flags, i, padlen;
2782 	bool consumed;
2783 
2784 	i = 0;
2785 	mh = NULL;
2786 	consumed = false;
2787 	*pf_rv = PFIL_PASS;
2788 	pf_rv_ptr = pf_rv;
2789 	do {
2790 		m = rxd_frag_to_sd(rxq, &ri->iri_frags[i], !consumed, sd,
2791 		    pf_rv_ptr, ri);
2792 
2793 		MPASS(*sd->ifsd_cl != NULL);
2794 
2795 		/*
2796 		 * Exclude zero-length frags & frags from
2797 		 * packets the filter has consumed or dropped
2798 		 */
2799 		if (ri->iri_frags[i].irf_len == 0 || consumed ||
2800 		    *pf_rv == PFIL_CONSUMED || *pf_rv == PFIL_DROPPED) {
2801 			if (mh == NULL) {
2802 				/* everything saved here */
2803 				consumed = true;
2804 				pf_rv_ptr = NULL;
2805 				continue;
2806 			}
2807 			/* XXX we can save the cluster here, but not the mbuf */
2808 			m_init(m, M_NOWAIT, MT_DATA, 0);
2809 			m_free(m);
2810 			continue;
2811 		}
2812 		if (mh == NULL) {
2813 			flags = M_PKTHDR|M_EXT;
2814 			mh = mt = m;
2815 			padlen = ri->iri_pad;
2816 		} else {
2817 			flags = M_EXT;
2818 			mt->m_next = m;
2819 			mt = m;
2820 			/* assuming padding is only on the first fragment */
2821 			padlen = 0;
2822 		}
2823 		cl = *sd->ifsd_cl;
2824 		*sd->ifsd_cl = NULL;
2825 
2826 		/* Can these two be made one ? */
2827 		m_init(m, M_NOWAIT, MT_DATA, flags);
2828 		m_cljset(m, cl, sd->ifsd_fl->ifl_cltype);
2829 		/*
2830 		 * These must follow m_init and m_cljset
2831 		 */
2832 		m->m_data += padlen;
2833 		ri->iri_len -= padlen;
2834 		m->m_len = ri->iri_frags[i].irf_len;
2835 	} while (++i < ri->iri_nfrags);
2836 
2837 	return (mh);
2838 }
2839 
2840 /*
2841  * Process one software descriptor
2842  */
2843 static struct mbuf *
2844 iflib_rxd_pkt_get(iflib_rxq_t rxq, if_rxd_info_t ri)
2845 {
2846 	struct if_rxsd sd;
2847 	struct mbuf *m;
2848 	int pf_rv;
2849 
2850 	/* should I merge this back in now that the two paths are basically duplicated? */
2851 	if (ri->iri_nfrags == 1 &&
2852 	    ri->iri_frags[0].irf_len != 0 &&
2853 	    ri->iri_frags[0].irf_len <= MIN(IFLIB_RX_COPY_THRESH, MHLEN)) {
2854 		m = rxd_frag_to_sd(rxq, &ri->iri_frags[0], false, &sd,
2855 		    &pf_rv, ri);
2856 		if (pf_rv != PFIL_PASS && pf_rv != PFIL_REALLOCED)
2857 			return (m);
2858 		if (pf_rv == PFIL_PASS) {
2859 			m_init(m, M_NOWAIT, MT_DATA, M_PKTHDR);
2860 #ifndef __NO_STRICT_ALIGNMENT
2861 			if (!IP_ALIGNED(m) && ri->iri_pad == 0)
2862 				m->m_data += 2;
2863 #endif
2864 			memcpy(m->m_data, *sd.ifsd_cl, ri->iri_len);
2865 			m->m_len = ri->iri_frags[0].irf_len;
2866 			m->m_data += ri->iri_pad;
2867 			ri->iri_len -= ri->iri_pad;
2868 		}
2869 	} else {
2870 		m = assemble_segments(rxq, ri, &sd, &pf_rv);
2871 		if (m == NULL)
2872 			return (NULL);
2873 		if (pf_rv != PFIL_PASS && pf_rv != PFIL_REALLOCED)
2874 			return (m);
2875 	}
2876 	m->m_pkthdr.len = ri->iri_len;
2877 	m->m_pkthdr.rcvif = ri->iri_ifp;
2878 	m->m_flags |= ri->iri_flags;
2879 	m->m_pkthdr.ether_vtag = ri->iri_vtag;
2880 	m->m_pkthdr.flowid = ri->iri_flowid;
2881 	M_HASHTYPE_SET(m, ri->iri_rsstype);
2882 	m->m_pkthdr.csum_flags = ri->iri_csum_flags;
2883 	m->m_pkthdr.csum_data = ri->iri_csum_data;
2884 	return (m);
2885 }
2886 
2887 #if defined(INET6) || defined(INET)
2888 static void
2889 iflib_get_ip_forwarding(struct lro_ctrl *lc, bool *v4, bool *v6)
2890 {
2891 	CURVNET_SET(lc->ifp->if_vnet);
2892 #if defined(INET6)
2893 	*v6 = V_ip6_forwarding;
2894 #endif
2895 #if defined(INET)
2896 	*v4 = V_ipforwarding;
2897 #endif
2898 	CURVNET_RESTORE();
2899 }
2900 
2901 /*
2902  * Returns true if it's possible this packet could be LROed.
2903  * if it returns false, it is guaranteed that tcp_lro_rx()
2904  * would not return zero.
2905  */
2906 static bool
2907 iflib_check_lro_possible(struct mbuf *m, bool v4_forwarding, bool v6_forwarding)
2908 {
2909 	struct ether_header *eh;
2910 
2911 	eh = mtod(m, struct ether_header *);
2912 	switch (eh->ether_type) {
2913 #if defined(INET6)
2914 		case htons(ETHERTYPE_IPV6):
2915 			return (!v6_forwarding);
2916 #endif
2917 #if defined (INET)
2918 		case htons(ETHERTYPE_IP):
2919 			return (!v4_forwarding);
2920 #endif
2921 	}
2922 
2923 	return false;
2924 }
2925 #else
2926 static void
2927 iflib_get_ip_forwarding(struct lro_ctrl *lc __unused, bool *v4 __unused, bool *v6 __unused)
2928 {
2929 }
2930 #endif
2931 
2932 static void
2933 _task_fn_rx_watchdog(void *context)
2934 {
2935 	iflib_rxq_t rxq = context;
2936 
2937 	GROUPTASK_ENQUEUE(&rxq->ifr_task);
2938 }
2939 
2940 static uint8_t
2941 iflib_rxeof(iflib_rxq_t rxq, qidx_t budget)
2942 {
2943 	if_t ifp;
2944 	if_ctx_t ctx = rxq->ifr_ctx;
2945 	if_shared_ctx_t sctx = ctx->ifc_sctx;
2946 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
2947 	int avail, i;
2948 	qidx_t *cidxp;
2949 	struct if_rxd_info ri;
2950 	int err, budget_left, rx_bytes, rx_pkts;
2951 	iflib_fl_t fl;
2952 	int lro_enabled;
2953 	bool v4_forwarding, v6_forwarding, lro_possible;
2954 	uint8_t retval = 0;
2955 
2956 	/*
2957 	 * XXX early demux data packets so that if_input processing only handles
2958 	 * acks in interrupt context
2959 	 */
2960 	struct mbuf *m, *mh, *mt, *mf;
2961 
2962 	NET_EPOCH_ASSERT();
2963 
2964 	lro_possible = v4_forwarding = v6_forwarding = false;
2965 	ifp = ctx->ifc_ifp;
2966 	mh = mt = NULL;
2967 	MPASS(budget > 0);
2968 	rx_pkts	= rx_bytes = 0;
2969 	if (sctx->isc_flags & IFLIB_HAS_RXCQ)
2970 		cidxp = &rxq->ifr_cq_cidx;
2971 	else
2972 		cidxp = &rxq->ifr_fl[0].ifl_cidx;
2973 	if ((avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget)) == 0) {
2974 		for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++)
2975 			retval |= iflib_fl_refill_all(ctx, fl);
2976 		DBG_COUNTER_INC(rx_unavail);
2977 		return (retval);
2978 	}
2979 
2980 	/* pfil needs the vnet to be set */
2981 	CURVNET_SET_QUIET(ifp->if_vnet);
2982 	for (budget_left = budget; budget_left > 0 && avail > 0;) {
2983 		if (__predict_false(!CTX_ACTIVE(ctx))) {
2984 			DBG_COUNTER_INC(rx_ctx_inactive);
2985 			break;
2986 		}
2987 		/*
2988 		 * Reset client set fields to their default values
2989 		 */
2990 		rxd_info_zero(&ri);
2991 		ri.iri_qsidx = rxq->ifr_id;
2992 		ri.iri_cidx = *cidxp;
2993 		ri.iri_ifp = ifp;
2994 		ri.iri_frags = rxq->ifr_frags;
2995 		err = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri);
2996 
2997 		if (err)
2998 			goto err;
2999 		rx_pkts += 1;
3000 		rx_bytes += ri.iri_len;
3001 		if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
3002 			*cidxp = ri.iri_cidx;
3003 			/* Update our consumer index */
3004 			/* XXX NB: shurd - check if this is still safe */
3005 			while (rxq->ifr_cq_cidx >= scctx->isc_nrxd[0])
3006 				rxq->ifr_cq_cidx -= scctx->isc_nrxd[0];
3007 			/* was this only a completion queue message? */
3008 			if (__predict_false(ri.iri_nfrags == 0))
3009 				continue;
3010 		}
3011 		MPASS(ri.iri_nfrags != 0);
3012 		MPASS(ri.iri_len != 0);
3013 
3014 		/* will advance the cidx on the corresponding free lists */
3015 		m = iflib_rxd_pkt_get(rxq, &ri);
3016 		avail--;
3017 		budget_left--;
3018 		if (avail == 0 && budget_left)
3019 			avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget_left);
3020 
3021 		if (__predict_false(m == NULL))
3022 			continue;
3023 
3024 		/* imm_pkt: -- cxgb */
3025 		if (mh == NULL)
3026 			mh = mt = m;
3027 		else {
3028 			mt->m_nextpkt = m;
3029 			mt = m;
3030 		}
3031 	}
3032 	CURVNET_RESTORE();
3033 	/* make sure that we can refill faster than drain */
3034 	for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++)
3035 		retval |= iflib_fl_refill_all(ctx, fl);
3036 
3037 	lro_enabled = (if_getcapenable(ifp) & IFCAP_LRO);
3038 	if (lro_enabled)
3039 		iflib_get_ip_forwarding(&rxq->ifr_lc, &v4_forwarding, &v6_forwarding);
3040 	mt = mf = NULL;
3041 	while (mh != NULL) {
3042 		m = mh;
3043 		mh = mh->m_nextpkt;
3044 		m->m_nextpkt = NULL;
3045 #ifndef __NO_STRICT_ALIGNMENT
3046 		if (!IP_ALIGNED(m) && (m = iflib_fixup_rx(m)) == NULL)
3047 			continue;
3048 #endif
3049 #if defined(INET6) || defined(INET)
3050 		if (lro_enabled) {
3051 			if (!lro_possible) {
3052 				lro_possible = iflib_check_lro_possible(m, v4_forwarding, v6_forwarding);
3053 				if (lro_possible && mf != NULL) {
3054 					ifp->if_input(ifp, mf);
3055 					DBG_COUNTER_INC(rx_if_input);
3056 					mt = mf = NULL;
3057 				}
3058 			}
3059 			if ((m->m_pkthdr.csum_flags & (CSUM_L4_CALC|CSUM_L4_VALID)) ==
3060 			    (CSUM_L4_CALC|CSUM_L4_VALID)) {
3061 				if (lro_possible && tcp_lro_rx(&rxq->ifr_lc, m, 0) == 0)
3062 					continue;
3063 			}
3064 		}
3065 #endif
3066 		if (lro_possible) {
3067 			ifp->if_input(ifp, m);
3068 			DBG_COUNTER_INC(rx_if_input);
3069 			continue;
3070 		}
3071 
3072 		if (mf == NULL)
3073 			mf = m;
3074 		if (mt != NULL)
3075 			mt->m_nextpkt = m;
3076 		mt = m;
3077 	}
3078 	if (mf != NULL) {
3079 		ifp->if_input(ifp, mf);
3080 		DBG_COUNTER_INC(rx_if_input);
3081 	}
3082 
3083 	if_inc_counter(ifp, IFCOUNTER_IBYTES, rx_bytes);
3084 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_pkts);
3085 
3086 	/*
3087 	 * Flush any outstanding LRO work
3088 	 */
3089 #if defined(INET6) || defined(INET)
3090 	tcp_lro_flush_all(&rxq->ifr_lc);
3091 #endif
3092 	if (avail != 0 || iflib_rxd_avail(ctx, rxq, *cidxp, 1) != 0)
3093 		retval |= IFLIB_RXEOF_MORE;
3094 	return (retval);
3095 err:
3096 	STATE_LOCK(ctx);
3097 	ctx->ifc_flags |= IFC_DO_RESET;
3098 	iflib_admin_intr_deferred(ctx);
3099 	STATE_UNLOCK(ctx);
3100 	return (0);
3101 }
3102 
3103 #define TXD_NOTIFY_COUNT(txq) (((txq)->ift_size / (txq)->ift_update_freq)-1)
3104 static inline qidx_t
3105 txq_max_db_deferred(iflib_txq_t txq, qidx_t in_use)
3106 {
3107 	qidx_t notify_count = TXD_NOTIFY_COUNT(txq);
3108 	qidx_t minthresh = txq->ift_size / 8;
3109 	if (in_use > 4*minthresh)
3110 		return (notify_count);
3111 	if (in_use > 2*minthresh)
3112 		return (notify_count >> 1);
3113 	if (in_use > minthresh)
3114 		return (notify_count >> 3);
3115 	return (0);
3116 }
3117 
3118 static inline qidx_t
3119 txq_max_rs_deferred(iflib_txq_t txq)
3120 {
3121 	qidx_t notify_count = TXD_NOTIFY_COUNT(txq);
3122 	qidx_t minthresh = txq->ift_size / 8;
3123 	if (txq->ift_in_use > 4*minthresh)
3124 		return (notify_count);
3125 	if (txq->ift_in_use > 2*minthresh)
3126 		return (notify_count >> 1);
3127 	if (txq->ift_in_use > minthresh)
3128 		return (notify_count >> 2);
3129 	return (2);
3130 }
3131 
3132 #define M_CSUM_FLAGS(m) ((m)->m_pkthdr.csum_flags)
3133 #define M_HAS_VLANTAG(m) (m->m_flags & M_VLANTAG)
3134 
3135 #define TXQ_MAX_DB_DEFERRED(txq, in_use) txq_max_db_deferred((txq), (in_use))
3136 #define TXQ_MAX_RS_DEFERRED(txq) txq_max_rs_deferred(txq)
3137 #define TXQ_MAX_DB_CONSUMED(size) (size >> 4)
3138 
3139 /* forward compatibility for cxgb */
3140 #define FIRST_QSET(ctx) 0
3141 #define NTXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_ntxqsets)
3142 #define NRXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_nrxqsets)
3143 #define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NTXQSETS(ctx)) + FIRST_QSET(ctx))
3144 #define DESC_RECLAIMABLE(q) ((int)((q)->ift_processed - (q)->ift_cleaned - (q)->ift_ctx->ifc_softc_ctx.isc_tx_nsegments))
3145 
3146 /* XXX we should be setting this to something other than zero */
3147 #define RECLAIM_THRESH(ctx) ((ctx)->ifc_sctx->isc_tx_reclaim_thresh)
3148 #define	MAX_TX_DESC(ctx) MAX((ctx)->ifc_softc_ctx.isc_tx_tso_segments_max, \
3149     (ctx)->ifc_softc_ctx.isc_tx_nsegments)
3150 
3151 static inline bool
3152 iflib_txd_db_check(iflib_txq_t txq, int ring)
3153 {
3154 	if_ctx_t ctx = txq->ift_ctx;
3155 	qidx_t dbval, max;
3156 
3157 	max = TXQ_MAX_DB_DEFERRED(txq, txq->ift_in_use);
3158 
3159 	/* force || threshold exceeded || at the edge of the ring */
3160 	if (ring || (txq->ift_db_pending >= max) || (TXQ_AVAIL(txq) <= MAX_TX_DESC(ctx) + 2)) {
3161 
3162 		/*
3163 		 * 'npending' is used if the card's doorbell is in terms of the number of descriptors
3164 		 * pending flush (BRCM). 'pidx' is used in cases where the card's doorbeel uses the
3165 		 * producer index explicitly (INTC).
3166 		 */
3167 		dbval = txq->ift_npending ? txq->ift_npending : txq->ift_pidx;
3168 		bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
3169 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3170 		ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, dbval);
3171 
3172 		/*
3173 		 * Absent bugs there are zero packets pending so reset pending counts to zero.
3174 		 */
3175 		txq->ift_db_pending = txq->ift_npending = 0;
3176 		return (true);
3177 	}
3178 	return (false);
3179 }
3180 
3181 #ifdef PKT_DEBUG
3182 static void
3183 print_pkt(if_pkt_info_t pi)
3184 {
3185 	printf("pi len:  %d qsidx: %d nsegs: %d ndescs: %d flags: %x pidx: %d\n",
3186 	       pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx);
3187 	printf("pi new_pidx: %d csum_flags: %lx tso_segsz: %d mflags: %x vtag: %d\n",
3188 	       pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_tso_segsz, pi->ipi_mflags, pi->ipi_vtag);
3189 	printf("pi etype: %d ehdrlen: %d ip_hlen: %d ipproto: %d\n",
3190 	       pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto);
3191 }
3192 #endif
3193 
3194 #define IS_TSO4(pi) ((pi)->ipi_csum_flags & CSUM_IP_TSO)
3195 #define IS_TX_OFFLOAD4(pi) ((pi)->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP_TSO))
3196 #define IS_TSO6(pi) ((pi)->ipi_csum_flags & CSUM_IP6_TSO)
3197 #define IS_TX_OFFLOAD6(pi) ((pi)->ipi_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_TSO))
3198 
3199 static int
3200 iflib_parse_header(iflib_txq_t txq, if_pkt_info_t pi, struct mbuf **mp)
3201 {
3202 	if_shared_ctx_t sctx = txq->ift_ctx->ifc_sctx;
3203 	struct ether_vlan_header *eh;
3204 	struct mbuf *m;
3205 
3206 	m = *mp;
3207 	if ((sctx->isc_flags & IFLIB_NEED_SCRATCH) &&
3208 	    M_WRITABLE(m) == 0) {
3209 		if ((m = m_dup(m, M_NOWAIT)) == NULL) {
3210 			return (ENOMEM);
3211 		} else {
3212 			m_freem(*mp);
3213 			DBG_COUNTER_INC(tx_frees);
3214 			*mp = m;
3215 		}
3216 	}
3217 
3218 	/*
3219 	 * Determine where frame payload starts.
3220 	 * Jump over vlan headers if already present,
3221 	 * helpful for QinQ too.
3222 	 */
3223 	if (__predict_false(m->m_len < sizeof(*eh))) {
3224 		txq->ift_pullups++;
3225 		if (__predict_false((m = m_pullup(m, sizeof(*eh))) == NULL))
3226 			return (ENOMEM);
3227 	}
3228 	eh = mtod(m, struct ether_vlan_header *);
3229 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3230 		pi->ipi_etype = ntohs(eh->evl_proto);
3231 		pi->ipi_ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3232 	} else {
3233 		pi->ipi_etype = ntohs(eh->evl_encap_proto);
3234 		pi->ipi_ehdrlen = ETHER_HDR_LEN;
3235 	}
3236 
3237 	switch (pi->ipi_etype) {
3238 #ifdef INET
3239 	case ETHERTYPE_IP:
3240 	{
3241 		struct mbuf *n;
3242 		struct ip *ip = NULL;
3243 		struct tcphdr *th = NULL;
3244 		int minthlen;
3245 
3246 		minthlen = min(m->m_pkthdr.len, pi->ipi_ehdrlen + sizeof(*ip) + sizeof(*th));
3247 		if (__predict_false(m->m_len < minthlen)) {
3248 			/*
3249 			 * if this code bloat is causing too much of a hit
3250 			 * move it to a separate function and mark it noinline
3251 			 */
3252 			if (m->m_len == pi->ipi_ehdrlen) {
3253 				n = m->m_next;
3254 				MPASS(n);
3255 				if (n->m_len >= sizeof(*ip))  {
3256 					ip = (struct ip *)n->m_data;
3257 					if (n->m_len >= (ip->ip_hl << 2) + sizeof(*th))
3258 						th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
3259 				} else {
3260 					txq->ift_pullups++;
3261 					if (__predict_false((m = m_pullup(m, minthlen)) == NULL))
3262 						return (ENOMEM);
3263 					ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
3264 				}
3265 			} else {
3266 				txq->ift_pullups++;
3267 				if (__predict_false((m = m_pullup(m, minthlen)) == NULL))
3268 					return (ENOMEM);
3269 				ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
3270 				if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th))
3271 					th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
3272 			}
3273 		} else {
3274 			ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
3275 			if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th))
3276 				th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
3277 		}
3278 		pi->ipi_ip_hlen = ip->ip_hl << 2;
3279 		pi->ipi_ipproto = ip->ip_p;
3280 		pi->ipi_flags |= IPI_TX_IPV4;
3281 
3282 		/* TCP checksum offload may require TCP header length */
3283 		if (IS_TX_OFFLOAD4(pi)) {
3284 			if (__predict_true(pi->ipi_ipproto == IPPROTO_TCP)) {
3285 				if (__predict_false(th == NULL)) {
3286 					txq->ift_pullups++;
3287 					if (__predict_false((m = m_pullup(m, (ip->ip_hl << 2) + sizeof(*th))) == NULL))
3288 						return (ENOMEM);
3289 					th = (struct tcphdr *)((caddr_t)ip + pi->ipi_ip_hlen);
3290 				}
3291 				pi->ipi_tcp_hflags = th->th_flags;
3292 				pi->ipi_tcp_hlen = th->th_off << 2;
3293 				pi->ipi_tcp_seq = th->th_seq;
3294 			}
3295 			if (IS_TSO4(pi)) {
3296 				if (__predict_false(ip->ip_p != IPPROTO_TCP))
3297 					return (ENXIO);
3298 				/*
3299 				 * TSO always requires hardware checksum offload.
3300 				 */
3301 				pi->ipi_csum_flags |= (CSUM_IP_TCP | CSUM_IP);
3302 				th->th_sum = in_pseudo(ip->ip_src.s_addr,
3303 						       ip->ip_dst.s_addr, htons(IPPROTO_TCP));
3304 				pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz;
3305 				if (sctx->isc_flags & IFLIB_TSO_INIT_IP) {
3306 					ip->ip_sum = 0;
3307 					ip->ip_len = htons(pi->ipi_ip_hlen + pi->ipi_tcp_hlen + pi->ipi_tso_segsz);
3308 				}
3309 			}
3310 		}
3311 		if ((sctx->isc_flags & IFLIB_NEED_ZERO_CSUM) && (pi->ipi_csum_flags & CSUM_IP))
3312                        ip->ip_sum = 0;
3313 
3314 		break;
3315 	}
3316 #endif
3317 #ifdef INET6
3318 	case ETHERTYPE_IPV6:
3319 	{
3320 		struct ip6_hdr *ip6 = (struct ip6_hdr *)(m->m_data + pi->ipi_ehdrlen);
3321 		struct tcphdr *th;
3322 		pi->ipi_ip_hlen = sizeof(struct ip6_hdr);
3323 
3324 		if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) {
3325 			txq->ift_pullups++;
3326 			if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) == NULL))
3327 				return (ENOMEM);
3328 		}
3329 		th = (struct tcphdr *)((caddr_t)ip6 + pi->ipi_ip_hlen);
3330 
3331 		/* XXX-BZ this will go badly in case of ext hdrs. */
3332 		pi->ipi_ipproto = ip6->ip6_nxt;
3333 		pi->ipi_flags |= IPI_TX_IPV6;
3334 
3335 		/* TCP checksum offload may require TCP header length */
3336 		if (IS_TX_OFFLOAD6(pi)) {
3337 			if (pi->ipi_ipproto == IPPROTO_TCP) {
3338 				if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) {
3339 					txq->ift_pullups++;
3340 					if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) == NULL))
3341 						return (ENOMEM);
3342 				}
3343 				pi->ipi_tcp_hflags = th->th_flags;
3344 				pi->ipi_tcp_hlen = th->th_off << 2;
3345 				pi->ipi_tcp_seq = th->th_seq;
3346 			}
3347 			if (IS_TSO6(pi)) {
3348 				if (__predict_false(ip6->ip6_nxt != IPPROTO_TCP))
3349 					return (ENXIO);
3350 				/*
3351 				 * TSO always requires hardware checksum offload.
3352 				 */
3353 				pi->ipi_csum_flags |= CSUM_IP6_TCP;
3354 				th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
3355 				pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz;
3356 			}
3357 		}
3358 		break;
3359 	}
3360 #endif
3361 	default:
3362 		pi->ipi_csum_flags &= ~CSUM_OFFLOAD;
3363 		pi->ipi_ip_hlen = 0;
3364 		break;
3365 	}
3366 	*mp = m;
3367 
3368 	return (0);
3369 }
3370 
3371 /*
3372  * If dodgy hardware rejects the scatter gather chain we've handed it
3373  * we'll need to remove the mbuf chain from ifsg_m[] before we can add the
3374  * m_defrag'd mbufs
3375  */
3376 static __noinline struct mbuf *
3377 iflib_remove_mbuf(iflib_txq_t txq)
3378 {
3379 	int ntxd, pidx;
3380 	struct mbuf *m, **ifsd_m;
3381 
3382 	ifsd_m = txq->ift_sds.ifsd_m;
3383 	ntxd = txq->ift_size;
3384 	pidx = txq->ift_pidx & (ntxd - 1);
3385 	ifsd_m = txq->ift_sds.ifsd_m;
3386 	m = ifsd_m[pidx];
3387 	ifsd_m[pidx] = NULL;
3388 	bus_dmamap_unload(txq->ift_buf_tag, txq->ift_sds.ifsd_map[pidx]);
3389 	if (txq->ift_sds.ifsd_tso_map != NULL)
3390 		bus_dmamap_unload(txq->ift_tso_buf_tag,
3391 		    txq->ift_sds.ifsd_tso_map[pidx]);
3392 #if MEMORY_LOGGING
3393 	txq->ift_dequeued++;
3394 #endif
3395 	return (m);
3396 }
3397 
3398 static inline caddr_t
3399 calc_next_txd(iflib_txq_t txq, int cidx, uint8_t qid)
3400 {
3401 	qidx_t size;
3402 	int ntxd;
3403 	caddr_t start, end, cur, next;
3404 
3405 	ntxd = txq->ift_size;
3406 	size = txq->ift_txd_size[qid];
3407 	start = txq->ift_ifdi[qid].idi_vaddr;
3408 
3409 	if (__predict_false(size == 0))
3410 		return (start);
3411 	cur = start + size*cidx;
3412 	end = start + size*ntxd;
3413 	next = CACHE_PTR_NEXT(cur);
3414 	return (next < end ? next : start);
3415 }
3416 
3417 /*
3418  * Pad an mbuf to ensure a minimum ethernet frame size.
3419  * min_frame_size is the frame size (less CRC) to pad the mbuf to
3420  */
3421 static __noinline int
3422 iflib_ether_pad(device_t dev, struct mbuf **m_head, uint16_t min_frame_size)
3423 {
3424 	/*
3425 	 * 18 is enough bytes to pad an ARP packet to 46 bytes, and
3426 	 * and ARP message is the smallest common payload I can think of
3427 	 */
3428 	static char pad[18];	/* just zeros */
3429 	int n;
3430 	struct mbuf *new_head;
3431 
3432 	if (!M_WRITABLE(*m_head)) {
3433 		new_head = m_dup(*m_head, M_NOWAIT);
3434 		if (new_head == NULL) {
3435 			m_freem(*m_head);
3436 			device_printf(dev, "cannot pad short frame, m_dup() failed");
3437 			DBG_COUNTER_INC(encap_pad_mbuf_fail);
3438 			DBG_COUNTER_INC(tx_frees);
3439 			return ENOMEM;
3440 		}
3441 		m_freem(*m_head);
3442 		*m_head = new_head;
3443 	}
3444 
3445 	for (n = min_frame_size - (*m_head)->m_pkthdr.len;
3446 	     n > 0; n -= sizeof(pad))
3447 		if (!m_append(*m_head, min(n, sizeof(pad)), pad))
3448 			break;
3449 
3450 	if (n > 0) {
3451 		m_freem(*m_head);
3452 		device_printf(dev, "cannot pad short frame\n");
3453 		DBG_COUNTER_INC(encap_pad_mbuf_fail);
3454 		DBG_COUNTER_INC(tx_frees);
3455 		return (ENOBUFS);
3456 	}
3457 
3458 	return 0;
3459 }
3460 
3461 static int
3462 iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
3463 {
3464 	if_ctx_t		ctx;
3465 	if_shared_ctx_t		sctx;
3466 	if_softc_ctx_t		scctx;
3467 	bus_dma_tag_t		buf_tag;
3468 	bus_dma_segment_t	*segs;
3469 	struct mbuf		*m_head, **ifsd_m;
3470 	void			*next_txd;
3471 	bus_dmamap_t		map;
3472 	struct if_pkt_info	pi;
3473 	int remap = 0;
3474 	int err, nsegs, ndesc, max_segs, pidx, cidx, next, ntxd;
3475 
3476 	ctx = txq->ift_ctx;
3477 	sctx = ctx->ifc_sctx;
3478 	scctx = &ctx->ifc_softc_ctx;
3479 	segs = txq->ift_segs;
3480 	ntxd = txq->ift_size;
3481 	m_head = *m_headp;
3482 	map = NULL;
3483 
3484 	/*
3485 	 * If we're doing TSO the next descriptor to clean may be quite far ahead
3486 	 */
3487 	cidx = txq->ift_cidx;
3488 	pidx = txq->ift_pidx;
3489 	if (ctx->ifc_flags & IFC_PREFETCH) {
3490 		next = (cidx + CACHE_PTR_INCREMENT) & (ntxd-1);
3491 		if (!(ctx->ifc_flags & IFLIB_HAS_TXCQ)) {
3492 			next_txd = calc_next_txd(txq, cidx, 0);
3493 			prefetch(next_txd);
3494 		}
3495 
3496 		/* prefetch the next cache line of mbuf pointers and flags */
3497 		prefetch(&txq->ift_sds.ifsd_m[next]);
3498 		prefetch(&txq->ift_sds.ifsd_map[next]);
3499 		next = (cidx + CACHE_LINE_SIZE) & (ntxd-1);
3500 	}
3501 	map = txq->ift_sds.ifsd_map[pidx];
3502 	ifsd_m = txq->ift_sds.ifsd_m;
3503 
3504 	if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3505 		buf_tag = txq->ift_tso_buf_tag;
3506 		max_segs = scctx->isc_tx_tso_segments_max;
3507 		map = txq->ift_sds.ifsd_tso_map[pidx];
3508 		MPASS(buf_tag != NULL);
3509 		MPASS(max_segs > 0);
3510 	} else {
3511 		buf_tag = txq->ift_buf_tag;
3512 		max_segs = scctx->isc_tx_nsegments;
3513 		map = txq->ift_sds.ifsd_map[pidx];
3514 	}
3515 	if ((sctx->isc_flags & IFLIB_NEED_ETHER_PAD) &&
3516 	    __predict_false(m_head->m_pkthdr.len < scctx->isc_min_frame_size)) {
3517 		err = iflib_ether_pad(ctx->ifc_dev, m_headp, scctx->isc_min_frame_size);
3518 		if (err) {
3519 			DBG_COUNTER_INC(encap_txd_encap_fail);
3520 			return err;
3521 		}
3522 	}
3523 	m_head = *m_headp;
3524 
3525 	pkt_info_zero(&pi);
3526 	pi.ipi_mflags = (m_head->m_flags & (M_VLANTAG|M_BCAST|M_MCAST));
3527 	pi.ipi_pidx = pidx;
3528 	pi.ipi_qsidx = txq->ift_id;
3529 	pi.ipi_len = m_head->m_pkthdr.len;
3530 	pi.ipi_csum_flags = m_head->m_pkthdr.csum_flags;
3531 	pi.ipi_vtag = M_HAS_VLANTAG(m_head) ? m_head->m_pkthdr.ether_vtag : 0;
3532 
3533 	/* deliberate bitwise OR to make one condition */
3534 	if (__predict_true((pi.ipi_csum_flags | pi.ipi_vtag))) {
3535 		if (__predict_false((err = iflib_parse_header(txq, &pi, m_headp)) != 0)) {
3536 			DBG_COUNTER_INC(encap_txd_encap_fail);
3537 			return (err);
3538 		}
3539 		m_head = *m_headp;
3540 	}
3541 
3542 retry:
3543 	err = bus_dmamap_load_mbuf_sg(buf_tag, map, m_head, segs, &nsegs,
3544 	    BUS_DMA_NOWAIT);
3545 defrag:
3546 	if (__predict_false(err)) {
3547 		switch (err) {
3548 		case EFBIG:
3549 			/* try collapse once and defrag once */
3550 			if (remap == 0) {
3551 				m_head = m_collapse(*m_headp, M_NOWAIT, max_segs);
3552 				/* try defrag if collapsing fails */
3553 				if (m_head == NULL)
3554 					remap++;
3555 			}
3556 			if (remap == 1) {
3557 				txq->ift_mbuf_defrag++;
3558 				m_head = m_defrag(*m_headp, M_NOWAIT);
3559 			}
3560 			/*
3561 			 * remap should never be >1 unless bus_dmamap_load_mbuf_sg
3562 			 * failed to map an mbuf that was run through m_defrag
3563 			 */
3564 			MPASS(remap <= 1);
3565 			if (__predict_false(m_head == NULL || remap > 1))
3566 				goto defrag_failed;
3567 			remap++;
3568 			*m_headp = m_head;
3569 			goto retry;
3570 			break;
3571 		case ENOMEM:
3572 			txq->ift_no_tx_dma_setup++;
3573 			break;
3574 		default:
3575 			txq->ift_no_tx_dma_setup++;
3576 			m_freem(*m_headp);
3577 			DBG_COUNTER_INC(tx_frees);
3578 			*m_headp = NULL;
3579 			break;
3580 		}
3581 		txq->ift_map_failed++;
3582 		DBG_COUNTER_INC(encap_load_mbuf_fail);
3583 		DBG_COUNTER_INC(encap_txd_encap_fail);
3584 		return (err);
3585 	}
3586 	ifsd_m[pidx] = m_head;
3587 	/*
3588 	 * XXX assumes a 1 to 1 relationship between segments and
3589 	 *        descriptors - this does not hold true on all drivers, e.g.
3590 	 *        cxgb
3591 	 */
3592 	if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) {
3593 		txq->ift_no_desc_avail++;
3594 		bus_dmamap_unload(buf_tag, map);
3595 		DBG_COUNTER_INC(encap_txq_avail_fail);
3596 		DBG_COUNTER_INC(encap_txd_encap_fail);
3597 		if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0)
3598 			GROUPTASK_ENQUEUE(&txq->ift_task);
3599 		return (ENOBUFS);
3600 	}
3601 	/*
3602 	 * On Intel cards we can greatly reduce the number of TX interrupts
3603 	 * we see by only setting report status on every Nth descriptor.
3604 	 * However, this also means that the driver will need to keep track
3605 	 * of the descriptors that RS was set on to check them for the DD bit.
3606 	 */
3607 	txq->ift_rs_pending += nsegs + 1;
3608 	if (txq->ift_rs_pending > TXQ_MAX_RS_DEFERRED(txq) ||
3609 	     iflib_no_tx_batch || (TXQ_AVAIL(txq) - nsegs) <= MAX_TX_DESC(ctx) + 2) {
3610 		pi.ipi_flags |= IPI_TX_INTR;
3611 		txq->ift_rs_pending = 0;
3612 	}
3613 
3614 	pi.ipi_segs = segs;
3615 	pi.ipi_nsegs = nsegs;
3616 
3617 	MPASS(pidx >= 0 && pidx < txq->ift_size);
3618 #ifdef PKT_DEBUG
3619 	print_pkt(&pi);
3620 #endif
3621 	if ((err = ctx->isc_txd_encap(ctx->ifc_softc, &pi)) == 0) {
3622 		bus_dmamap_sync(buf_tag, map, BUS_DMASYNC_PREWRITE);
3623 		DBG_COUNTER_INC(tx_encap);
3624 		MPASS(pi.ipi_new_pidx < txq->ift_size);
3625 
3626 		ndesc = pi.ipi_new_pidx - pi.ipi_pidx;
3627 		if (pi.ipi_new_pidx < pi.ipi_pidx) {
3628 			ndesc += txq->ift_size;
3629 			txq->ift_gen = 1;
3630 		}
3631 		/*
3632 		 * drivers can need as many as
3633 		 * two sentinels
3634 		 */
3635 		MPASS(ndesc <= pi.ipi_nsegs + 2);
3636 		MPASS(pi.ipi_new_pidx != pidx);
3637 		MPASS(ndesc > 0);
3638 		txq->ift_in_use += ndesc;
3639 		txq->ift_db_pending += ndesc;
3640 
3641 		/*
3642 		 * We update the last software descriptor again here because there may
3643 		 * be a sentinel and/or there may be more mbufs than segments
3644 		 */
3645 		txq->ift_pidx = pi.ipi_new_pidx;
3646 		txq->ift_npending += pi.ipi_ndescs;
3647 	} else {
3648 		*m_headp = m_head = iflib_remove_mbuf(txq);
3649 		if (err == EFBIG) {
3650 			txq->ift_txd_encap_efbig++;
3651 			if (remap < 2) {
3652 				remap = 1;
3653 				goto defrag;
3654 			}
3655 		}
3656 		goto defrag_failed;
3657 	}
3658 	/*
3659 	 * err can't possibly be non-zero here, so we don't neet to test it
3660 	 * to see if we need to DBG_COUNTER_INC(encap_txd_encap_fail).
3661 	 */
3662 	return (err);
3663 
3664 defrag_failed:
3665 	txq->ift_mbuf_defrag_failed++;
3666 	txq->ift_map_failed++;
3667 	m_freem(*m_headp);
3668 	DBG_COUNTER_INC(tx_frees);
3669 	*m_headp = NULL;
3670 	DBG_COUNTER_INC(encap_txd_encap_fail);
3671 	return (ENOMEM);
3672 }
3673 
3674 static void
3675 iflib_tx_desc_free(iflib_txq_t txq, int n)
3676 {
3677 	uint32_t qsize, cidx, mask, gen;
3678 	struct mbuf *m, **ifsd_m;
3679 	bool do_prefetch;
3680 
3681 	cidx = txq->ift_cidx;
3682 	gen = txq->ift_gen;
3683 	qsize = txq->ift_size;
3684 	mask = qsize-1;
3685 	ifsd_m = txq->ift_sds.ifsd_m;
3686 	do_prefetch = (txq->ift_ctx->ifc_flags & IFC_PREFETCH);
3687 
3688 	while (n-- > 0) {
3689 		if (do_prefetch) {
3690 			prefetch(ifsd_m[(cidx + 3) & mask]);
3691 			prefetch(ifsd_m[(cidx + 4) & mask]);
3692 		}
3693 		if ((m = ifsd_m[cidx]) != NULL) {
3694 			prefetch(&ifsd_m[(cidx + CACHE_PTR_INCREMENT) & mask]);
3695 			if (m->m_pkthdr.csum_flags & CSUM_TSO) {
3696 				bus_dmamap_sync(txq->ift_tso_buf_tag,
3697 				    txq->ift_sds.ifsd_tso_map[cidx],
3698 				    BUS_DMASYNC_POSTWRITE);
3699 				bus_dmamap_unload(txq->ift_tso_buf_tag,
3700 				    txq->ift_sds.ifsd_tso_map[cidx]);
3701 			} else {
3702 				bus_dmamap_sync(txq->ift_buf_tag,
3703 				    txq->ift_sds.ifsd_map[cidx],
3704 				    BUS_DMASYNC_POSTWRITE);
3705 				bus_dmamap_unload(txq->ift_buf_tag,
3706 				    txq->ift_sds.ifsd_map[cidx]);
3707 			}
3708 			/* XXX we don't support any drivers that batch packets yet */
3709 			MPASS(m->m_nextpkt == NULL);
3710 			m_freem(m);
3711 			ifsd_m[cidx] = NULL;
3712 #if MEMORY_LOGGING
3713 			txq->ift_dequeued++;
3714 #endif
3715 			DBG_COUNTER_INC(tx_frees);
3716 		}
3717 		if (__predict_false(++cidx == qsize)) {
3718 			cidx = 0;
3719 			gen = 0;
3720 		}
3721 	}
3722 	txq->ift_cidx = cidx;
3723 	txq->ift_gen = gen;
3724 }
3725 
3726 static __inline int
3727 iflib_completed_tx_reclaim(iflib_txq_t txq, int thresh)
3728 {
3729 	int reclaim;
3730 	if_ctx_t ctx = txq->ift_ctx;
3731 
3732 	KASSERT(thresh >= 0, ("invalid threshold to reclaim"));
3733 	MPASS(thresh /*+ MAX_TX_DESC(txq->ift_ctx) */ < txq->ift_size);
3734 
3735 	/*
3736 	 * Need a rate-limiting check so that this isn't called every time
3737 	 */
3738 	iflib_tx_credits_update(ctx, txq);
3739 	reclaim = DESC_RECLAIMABLE(txq);
3740 
3741 	if (reclaim <= thresh /* + MAX_TX_DESC(txq->ift_ctx) */) {
3742 #ifdef INVARIANTS
3743 		if (iflib_verbose_debug) {
3744 			printf("%s processed=%ju cleaned=%ju tx_nsegments=%d reclaim=%d thresh=%d\n", __FUNCTION__,
3745 			       txq->ift_processed, txq->ift_cleaned, txq->ift_ctx->ifc_softc_ctx.isc_tx_nsegments,
3746 			       reclaim, thresh);
3747 		}
3748 #endif
3749 		return (0);
3750 	}
3751 	iflib_tx_desc_free(txq, reclaim);
3752 	txq->ift_cleaned += reclaim;
3753 	txq->ift_in_use -= reclaim;
3754 
3755 	return (reclaim);
3756 }
3757 
3758 static struct mbuf **
3759 _ring_peek_one(struct ifmp_ring *r, int cidx, int offset, int remaining)
3760 {
3761 	int next, size;
3762 	struct mbuf **items;
3763 
3764 	size = r->size;
3765 	next = (cidx + CACHE_PTR_INCREMENT) & (size-1);
3766 	items = __DEVOLATILE(struct mbuf **, &r->items[0]);
3767 
3768 	prefetch(items[(cidx + offset) & (size-1)]);
3769 	if (remaining > 1) {
3770 		prefetch2cachelines(&items[next]);
3771 		prefetch2cachelines(items[(cidx + offset + 1) & (size-1)]);
3772 		prefetch2cachelines(items[(cidx + offset + 2) & (size-1)]);
3773 		prefetch2cachelines(items[(cidx + offset + 3) & (size-1)]);
3774 	}
3775 	return (__DEVOLATILE(struct mbuf **, &r->items[(cidx + offset) & (size-1)]));
3776 }
3777 
3778 static void
3779 iflib_txq_check_drain(iflib_txq_t txq, int budget)
3780 {
3781 
3782 	ifmp_ring_check_drainage(txq->ift_br, budget);
3783 }
3784 
3785 static uint32_t
3786 iflib_txq_can_drain(struct ifmp_ring *r)
3787 {
3788 	iflib_txq_t txq = r->cookie;
3789 	if_ctx_t ctx = txq->ift_ctx;
3790 
3791 	if (TXQ_AVAIL(txq) > MAX_TX_DESC(ctx) + 2)
3792 		return (1);
3793 	bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
3794 	    BUS_DMASYNC_POSTREAD);
3795 	return (ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id,
3796 	    false));
3797 }
3798 
3799 static uint32_t
3800 iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
3801 {
3802 	iflib_txq_t txq = r->cookie;
3803 	if_ctx_t ctx = txq->ift_ctx;
3804 	if_t ifp = ctx->ifc_ifp;
3805 	struct mbuf *m, **mp;
3806 	int avail, bytes_sent, skipped, count, err, i;
3807 	int mcast_sent, pkt_sent, reclaimed;
3808 	bool do_prefetch, rang, ring;
3809 
3810 	if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING) ||
3811 			    !LINK_ACTIVE(ctx))) {
3812 		DBG_COUNTER_INC(txq_drain_notready);
3813 		return (0);
3814 	}
3815 	reclaimed = iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
3816 	rang = iflib_txd_db_check(txq, reclaimed && txq->ift_db_pending);
3817 	avail = IDXDIFF(pidx, cidx, r->size);
3818 
3819 	if (__predict_false(ctx->ifc_flags & IFC_QFLUSH)) {
3820 		/*
3821 		 * The driver is unloading so we need to free all pending packets.
3822 		 */
3823 		DBG_COUNTER_INC(txq_drain_flushing);
3824 		for (i = 0; i < avail; i++) {
3825 			if (__predict_true(r->items[(cidx + i) & (r->size-1)] != (void *)txq))
3826 				m_freem(r->items[(cidx + i) & (r->size-1)]);
3827 			r->items[(cidx + i) & (r->size-1)] = NULL;
3828 		}
3829 		return (avail);
3830 	}
3831 
3832 	if (__predict_false(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE)) {
3833 		txq->ift_qstatus = IFLIB_QUEUE_IDLE;
3834 		CALLOUT_LOCK(txq);
3835 		callout_stop(&txq->ift_timer);
3836 		CALLOUT_UNLOCK(txq);
3837 		DBG_COUNTER_INC(txq_drain_oactive);
3838 		return (0);
3839 	}
3840 
3841 	/*
3842 	 * If we've reclaimed any packets this queue cannot be hung.
3843 	 */
3844 	if (reclaimed)
3845 		txq->ift_qstatus = IFLIB_QUEUE_IDLE;
3846 	skipped = mcast_sent = bytes_sent = pkt_sent = 0;
3847 	count = MIN(avail, TX_BATCH_SIZE);
3848 #ifdef INVARIANTS
3849 	if (iflib_verbose_debug)
3850 		printf("%s avail=%d ifc_flags=%x txq_avail=%d ", __FUNCTION__,
3851 		       avail, ctx->ifc_flags, TXQ_AVAIL(txq));
3852 #endif
3853 	do_prefetch = (ctx->ifc_flags & IFC_PREFETCH);
3854 	err = 0;
3855 	for (i = 0; i < count && TXQ_AVAIL(txq) >= MAX_TX_DESC(ctx) + 2; i++) {
3856 		int rem = do_prefetch ? count - i : 0;
3857 
3858 		mp = _ring_peek_one(r, cidx, i, rem);
3859 		MPASS(mp != NULL && *mp != NULL);
3860 
3861 		/*
3862 		 * Completion interrupts will use the address of the txq
3863 		 * as a sentinel to enqueue _something_ in order to acquire
3864 		 * the lock on the mp_ring (there's no direct lock call).
3865 		 * We obviously whave to check for these sentinel cases
3866 		 * and skip them.
3867 		 */
3868 		if (__predict_false(*mp == (struct mbuf *)txq)) {
3869 			skipped++;
3870 			continue;
3871 		}
3872 		err = iflib_encap(txq, mp);
3873 		if (__predict_false(err)) {
3874 			/* no room - bail out */
3875 			if (err == ENOBUFS)
3876 				break;
3877 			skipped++;
3878 			/* we can't send this packet - skip it */
3879 			continue;
3880 		}
3881 		pkt_sent++;
3882 		m = *mp;
3883 		DBG_COUNTER_INC(tx_sent);
3884 		bytes_sent += m->m_pkthdr.len;
3885 		mcast_sent += !!(m->m_flags & M_MCAST);
3886 
3887 		if (__predict_false(!(ifp->if_drv_flags & IFF_DRV_RUNNING)))
3888 			break;
3889 		ETHER_BPF_MTAP(ifp, m);
3890 		rang = iflib_txd_db_check(txq, false);
3891 	}
3892 
3893 	/* deliberate use of bitwise or to avoid gratuitous short-circuit */
3894 	ring = rang ? false  : (iflib_min_tx_latency | err);
3895 	iflib_txd_db_check(txq, ring);
3896 	if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent);
3897 	if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent);
3898 	if (mcast_sent)
3899 		if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast_sent);
3900 #ifdef INVARIANTS
3901 	if (iflib_verbose_debug)
3902 		printf("consumed=%d\n", skipped + pkt_sent);
3903 #endif
3904 	return (skipped + pkt_sent);
3905 }
3906 
3907 static uint32_t
3908 iflib_txq_drain_always(struct ifmp_ring *r)
3909 {
3910 	return (1);
3911 }
3912 
3913 static uint32_t
3914 iflib_txq_drain_free(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
3915 {
3916 	int i, avail;
3917 	struct mbuf **mp;
3918 	iflib_txq_t txq;
3919 
3920 	txq = r->cookie;
3921 
3922 	txq->ift_qstatus = IFLIB_QUEUE_IDLE;
3923 	CALLOUT_LOCK(txq);
3924 	callout_stop(&txq->ift_timer);
3925 	CALLOUT_UNLOCK(txq);
3926 
3927 	avail = IDXDIFF(pidx, cidx, r->size);
3928 	for (i = 0; i < avail; i++) {
3929 		mp = _ring_peek_one(r, cidx, i, avail - i);
3930 		if (__predict_false(*mp == (struct mbuf *)txq))
3931 			continue;
3932 		m_freem(*mp);
3933 		DBG_COUNTER_INC(tx_frees);
3934 	}
3935 	MPASS(ifmp_ring_is_stalled(r) == 0);
3936 	return (avail);
3937 }
3938 
3939 static void
3940 iflib_ifmp_purge(iflib_txq_t txq)
3941 {
3942 	struct ifmp_ring *r;
3943 
3944 	r = txq->ift_br;
3945 	r->drain = iflib_txq_drain_free;
3946 	r->can_drain = iflib_txq_drain_always;
3947 
3948 	ifmp_ring_check_drainage(r, r->size);
3949 
3950 	r->drain = iflib_txq_drain;
3951 	r->can_drain = iflib_txq_can_drain;
3952 }
3953 
3954 static void
3955 _task_fn_tx(void *context)
3956 {
3957 	iflib_txq_t txq = context;
3958 	if_ctx_t ctx = txq->ift_ctx;
3959 	if_t ifp = ctx->ifc_ifp;
3960 	int abdicate = ctx->ifc_sysctl_tx_abdicate;
3961 
3962 #ifdef IFLIB_DIAGNOSTICS
3963 	txq->ift_cpu_exec_count[curcpu]++;
3964 #endif
3965 	if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
3966 		return;
3967 #ifdef DEV_NETMAP
3968 	if ((if_getcapenable(ifp) & IFCAP_NETMAP) &&
3969 	    netmap_tx_irq(ifp, txq->ift_id))
3970 		goto skip_ifmp;
3971 #endif
3972 #ifdef ALTQ
3973 	if (ALTQ_IS_ENABLED(&ifp->if_snd))
3974 		iflib_altq_if_start(ifp);
3975 #endif
3976 	if (txq->ift_db_pending)
3977 		ifmp_ring_enqueue(txq->ift_br, (void **)&txq, 1, TX_BATCH_SIZE, abdicate);
3978 	else if (!abdicate)
3979 		ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
3980 	/*
3981 	 * When abdicating, we always need to check drainage, not just when we don't enqueue
3982 	 */
3983 	if (abdicate)
3984 		ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
3985 #ifdef DEV_NETMAP
3986 skip_ifmp:
3987 #endif
3988 	if (ctx->ifc_flags & IFC_LEGACY)
3989 		IFDI_INTR_ENABLE(ctx);
3990 	else
3991 		IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id);
3992 }
3993 
3994 static void
3995 _task_fn_rx(void *context)
3996 {
3997 	iflib_rxq_t rxq = context;
3998 	if_ctx_t ctx = rxq->ifr_ctx;
3999 	uint8_t more;
4000 	uint16_t budget;
4001 #ifdef DEV_NETMAP
4002 	u_int work = 0;
4003 	int nmirq;
4004 #endif
4005 
4006 #ifdef IFLIB_DIAGNOSTICS
4007 	rxq->ifr_cpu_exec_count[curcpu]++;
4008 #endif
4009 	DBG_COUNTER_INC(task_fn_rxs);
4010 	if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
4011 		return;
4012 #ifdef DEV_NETMAP
4013 	nmirq = netmap_rx_irq(ctx->ifc_ifp, rxq->ifr_id, &work);
4014 	if (nmirq != NM_IRQ_PASS) {
4015 		more = (nmirq == NM_IRQ_RESCHED) ? IFLIB_RXEOF_MORE : 0;
4016 		goto skip_rxeof;
4017 	}
4018 #endif
4019 	budget = ctx->ifc_sysctl_rx_budget;
4020 	if (budget == 0)
4021 		budget = 16;	/* XXX */
4022 	more = iflib_rxeof(rxq, budget);
4023 #ifdef DEV_NETMAP
4024 skip_rxeof:
4025 #endif
4026 	if ((more & IFLIB_RXEOF_MORE) == 0) {
4027 		if (ctx->ifc_flags & IFC_LEGACY)
4028 			IFDI_INTR_ENABLE(ctx);
4029 		else
4030 			IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
4031 		DBG_COUNTER_INC(rx_intr_enables);
4032 	}
4033 	if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
4034 		return;
4035 
4036 	if (more & IFLIB_RXEOF_MORE)
4037 		GROUPTASK_ENQUEUE(&rxq->ifr_task);
4038 	else if (more & IFLIB_RXEOF_EMPTY)
4039 		callout_reset_curcpu(&rxq->ifr_watchdog, 1, &_task_fn_rx_watchdog, rxq);
4040 }
4041 
4042 static void
4043 _task_fn_admin(void *context)
4044 {
4045 	if_ctx_t ctx = context;
4046 	if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
4047 	iflib_txq_t txq;
4048 	int i;
4049 	bool oactive, running, do_reset, do_watchdog, in_detach;
4050 
4051 	STATE_LOCK(ctx);
4052 	running = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING);
4053 	oactive = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE);
4054 	do_reset = (ctx->ifc_flags & IFC_DO_RESET);
4055 	do_watchdog = (ctx->ifc_flags & IFC_DO_WATCHDOG);
4056 	in_detach = (ctx->ifc_flags & IFC_IN_DETACH);
4057 	ctx->ifc_flags &= ~(IFC_DO_RESET|IFC_DO_WATCHDOG);
4058 	STATE_UNLOCK(ctx);
4059 
4060 	if ((!running && !oactive) && !(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN))
4061 		return;
4062 	if (in_detach)
4063 		return;
4064 
4065 	CTX_LOCK(ctx);
4066 	for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) {
4067 		CALLOUT_LOCK(txq);
4068 		callout_stop(&txq->ift_timer);
4069 		CALLOUT_UNLOCK(txq);
4070 	}
4071 	if (ctx->ifc_sctx->isc_flags & IFLIB_HAS_ADMINCQ)
4072 		IFDI_ADMIN_COMPLETION_HANDLE(ctx);
4073 	if (do_watchdog) {
4074 		ctx->ifc_watchdog_events++;
4075 		IFDI_WATCHDOG_RESET(ctx);
4076 	}
4077 	IFDI_UPDATE_ADMIN_STATUS(ctx);
4078 	for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) {
4079 		callout_reset_on(&txq->ift_timer, iflib_timer_default, iflib_timer, txq,
4080 		    txq->ift_timer.c_cpu);
4081 	}
4082 	IFDI_LINK_INTR_ENABLE(ctx);
4083 	if (do_reset)
4084 		iflib_if_init_locked(ctx);
4085 	CTX_UNLOCK(ctx);
4086 
4087 	if (LINK_ACTIVE(ctx) == 0)
4088 		return;
4089 	for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++)
4090 		iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET);
4091 }
4092 
4093 static void
4094 _task_fn_iov(void *context)
4095 {
4096 	if_ctx_t ctx = context;
4097 
4098 	if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING) &&
4099 	    !(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN))
4100 		return;
4101 
4102 	CTX_LOCK(ctx);
4103 	IFDI_VFLR_HANDLE(ctx);
4104 	CTX_UNLOCK(ctx);
4105 }
4106 
4107 static int
4108 iflib_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
4109 {
4110 	int err;
4111 	if_int_delay_info_t info;
4112 	if_ctx_t ctx;
4113 
4114 	info = (if_int_delay_info_t)arg1;
4115 	ctx = info->iidi_ctx;
4116 	info->iidi_req = req;
4117 	info->iidi_oidp = oidp;
4118 	CTX_LOCK(ctx);
4119 	err = IFDI_SYSCTL_INT_DELAY(ctx, info);
4120 	CTX_UNLOCK(ctx);
4121 	return (err);
4122 }
4123 
4124 /*********************************************************************
4125  *
4126  *  IFNET FUNCTIONS
4127  *
4128  **********************************************************************/
4129 
4130 static void
4131 iflib_if_init_locked(if_ctx_t ctx)
4132 {
4133 	iflib_stop(ctx);
4134 	iflib_init_locked(ctx);
4135 }
4136 
4137 static void
4138 iflib_if_init(void *arg)
4139 {
4140 	if_ctx_t ctx = arg;
4141 
4142 	CTX_LOCK(ctx);
4143 	iflib_if_init_locked(ctx);
4144 	CTX_UNLOCK(ctx);
4145 }
4146 
4147 static int
4148 iflib_if_transmit(if_t ifp, struct mbuf *m)
4149 {
4150 	if_ctx_t	ctx = if_getsoftc(ifp);
4151 
4152 	iflib_txq_t txq;
4153 	int err, qidx;
4154 	int abdicate = ctx->ifc_sysctl_tx_abdicate;
4155 
4156 	if (__predict_false((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || !LINK_ACTIVE(ctx))) {
4157 		DBG_COUNTER_INC(tx_frees);
4158 		m_freem(m);
4159 		return (ENETDOWN);
4160 	}
4161 
4162 	MPASS(m->m_nextpkt == NULL);
4163 	/* ALTQ-enabled interfaces always use queue 0. */
4164 	qidx = 0;
4165 	/* Use driver-supplied queue selection method if it exists */
4166 	if (ctx->isc_txq_select)
4167 		qidx = ctx->isc_txq_select(ctx->ifc_softc, m);
4168 	/* If not, use iflib's standard method */
4169 	else if ((NTXQSETS(ctx) > 1) && M_HASHTYPE_GET(m) && !ALTQ_IS_ENABLED(&ifp->if_snd))
4170 		qidx = QIDX(ctx, m);
4171 
4172 	/* Set TX queue */
4173 	txq = &ctx->ifc_txqs[qidx];
4174 
4175 #ifdef DRIVER_BACKPRESSURE
4176 	if (txq->ift_closed) {
4177 		while (m != NULL) {
4178 			next = m->m_nextpkt;
4179 			m->m_nextpkt = NULL;
4180 			m_freem(m);
4181 			DBG_COUNTER_INC(tx_frees);
4182 			m = next;
4183 		}
4184 		return (ENOBUFS);
4185 	}
4186 #endif
4187 #ifdef notyet
4188 	qidx = count = 0;
4189 	mp = marr;
4190 	next = m;
4191 	do {
4192 		count++;
4193 		next = next->m_nextpkt;
4194 	} while (next != NULL);
4195 
4196 	if (count > nitems(marr))
4197 		if ((mp = malloc(count*sizeof(struct mbuf *), M_IFLIB, M_NOWAIT)) == NULL) {
4198 			/* XXX check nextpkt */
4199 			m_freem(m);
4200 			/* XXX simplify for now */
4201 			DBG_COUNTER_INC(tx_frees);
4202 			return (ENOBUFS);
4203 		}
4204 	for (next = m, i = 0; next != NULL; i++) {
4205 		mp[i] = next;
4206 		next = next->m_nextpkt;
4207 		mp[i]->m_nextpkt = NULL;
4208 	}
4209 #endif
4210 	DBG_COUNTER_INC(tx_seen);
4211 	err = ifmp_ring_enqueue(txq->ift_br, (void **)&m, 1, TX_BATCH_SIZE, abdicate);
4212 
4213 	if (abdicate)
4214 		GROUPTASK_ENQUEUE(&txq->ift_task);
4215  	if (err) {
4216 		if (!abdicate)
4217 			GROUPTASK_ENQUEUE(&txq->ift_task);
4218 		/* support forthcoming later */
4219 #ifdef DRIVER_BACKPRESSURE
4220 		txq->ift_closed = TRUE;
4221 #endif
4222 		ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
4223 		m_freem(m);
4224 		DBG_COUNTER_INC(tx_frees);
4225 	}
4226 
4227 	return (err);
4228 }
4229 
4230 #ifdef ALTQ
4231 /*
4232  * The overall approach to integrating iflib with ALTQ is to continue to use
4233  * the iflib mp_ring machinery between the ALTQ queue(s) and the hardware
4234  * ring.  Technically, when using ALTQ, queueing to an intermediate mp_ring
4235  * is redundant/unnecessary, but doing so minimizes the amount of
4236  * ALTQ-specific code required in iflib.  It is assumed that the overhead of
4237  * redundantly queueing to an intermediate mp_ring is swamped by the
4238  * performance limitations inherent in using ALTQ.
4239  *
4240  * When ALTQ support is compiled in, all iflib drivers will use a transmit
4241  * routine, iflib_altq_if_transmit(), that checks if ALTQ is enabled for the
4242  * given interface.  If ALTQ is enabled for an interface, then all
4243  * transmitted packets for that interface will be submitted to the ALTQ
4244  * subsystem via IFQ_ENQUEUE().  We don't use the legacy if_transmit()
4245  * implementation because it uses IFQ_HANDOFF(), which will duplicatively
4246  * update stats that the iflib machinery handles, and which is sensitve to
4247  * the disused IFF_DRV_OACTIVE flag.  Additionally, iflib_altq_if_start()
4248  * will be installed as the start routine for use by ALTQ facilities that
4249  * need to trigger queue drains on a scheduled basis.
4250  *
4251  */
4252 static void
4253 iflib_altq_if_start(if_t ifp)
4254 {
4255 	struct ifaltq *ifq = &ifp->if_snd;
4256 	struct mbuf *m;
4257 
4258 	IFQ_LOCK(ifq);
4259 	IFQ_DEQUEUE_NOLOCK(ifq, m);
4260 	while (m != NULL) {
4261 		iflib_if_transmit(ifp, m);
4262 		IFQ_DEQUEUE_NOLOCK(ifq, m);
4263 	}
4264 	IFQ_UNLOCK(ifq);
4265 }
4266 
4267 static int
4268 iflib_altq_if_transmit(if_t ifp, struct mbuf *m)
4269 {
4270 	int err;
4271 
4272 	if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
4273 		IFQ_ENQUEUE(&ifp->if_snd, m, err);
4274 		if (err == 0)
4275 			iflib_altq_if_start(ifp);
4276 	} else
4277 		err = iflib_if_transmit(ifp, m);
4278 
4279 	return (err);
4280 }
4281 #endif /* ALTQ */
4282 
4283 static void
4284 iflib_if_qflush(if_t ifp)
4285 {
4286 	if_ctx_t ctx = if_getsoftc(ifp);
4287 	iflib_txq_t txq = ctx->ifc_txqs;
4288 	int i;
4289 
4290 	STATE_LOCK(ctx);
4291 	ctx->ifc_flags |= IFC_QFLUSH;
4292 	STATE_UNLOCK(ctx);
4293 	for (i = 0; i < NTXQSETS(ctx); i++, txq++)
4294 		while (!(ifmp_ring_is_idle(txq->ift_br) || ifmp_ring_is_stalled(txq->ift_br)))
4295 			iflib_txq_check_drain(txq, 0);
4296 	STATE_LOCK(ctx);
4297 	ctx->ifc_flags &= ~IFC_QFLUSH;
4298 	STATE_UNLOCK(ctx);
4299 
4300 	/*
4301 	 * When ALTQ is enabled, this will also take care of purging the
4302 	 * ALTQ queue(s).
4303 	 */
4304 	if_qflush(ifp);
4305 }
4306 
4307 #define IFCAP_FLAGS (IFCAP_HWCSUM_IPV6 | IFCAP_HWCSUM | IFCAP_LRO | \
4308 		     IFCAP_TSO | IFCAP_VLAN_HWTAGGING | IFCAP_HWSTATS | \
4309 		     IFCAP_VLAN_MTU | IFCAP_VLAN_HWFILTER | \
4310 		     IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM | IFCAP_MEXTPG)
4311 
4312 static int
4313 iflib_if_ioctl(if_t ifp, u_long command, caddr_t data)
4314 {
4315 	if_ctx_t ctx = if_getsoftc(ifp);
4316 	struct ifreq	*ifr = (struct ifreq *)data;
4317 #if defined(INET) || defined(INET6)
4318 	struct ifaddr	*ifa = (struct ifaddr *)data;
4319 #endif
4320 	bool		avoid_reset = false;
4321 	int		err = 0, reinit = 0, bits;
4322 
4323 	switch (command) {
4324 	case SIOCSIFADDR:
4325 #ifdef INET
4326 		if (ifa->ifa_addr->sa_family == AF_INET)
4327 			avoid_reset = true;
4328 #endif
4329 #ifdef INET6
4330 		if (ifa->ifa_addr->sa_family == AF_INET6)
4331 			avoid_reset = true;
4332 #endif
4333 		/*
4334 		** Calling init results in link renegotiation,
4335 		** so we avoid doing it when possible.
4336 		*/
4337 		if (avoid_reset) {
4338 			if_setflagbits(ifp, IFF_UP,0);
4339 			if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
4340 				reinit = 1;
4341 #ifdef INET
4342 			if (!(if_getflags(ifp) & IFF_NOARP))
4343 				arp_ifinit(ifp, ifa);
4344 #endif
4345 		} else
4346 			err = ether_ioctl(ifp, command, data);
4347 		break;
4348 	case SIOCSIFMTU:
4349 		CTX_LOCK(ctx);
4350 		if (ifr->ifr_mtu == if_getmtu(ifp)) {
4351 			CTX_UNLOCK(ctx);
4352 			break;
4353 		}
4354 		bits = if_getdrvflags(ifp);
4355 		/* stop the driver and free any clusters before proceeding */
4356 		iflib_stop(ctx);
4357 
4358 		if ((err = IFDI_MTU_SET(ctx, ifr->ifr_mtu)) == 0) {
4359 			STATE_LOCK(ctx);
4360 			if (ifr->ifr_mtu > ctx->ifc_max_fl_buf_size)
4361 				ctx->ifc_flags |= IFC_MULTISEG;
4362 			else
4363 				ctx->ifc_flags &= ~IFC_MULTISEG;
4364 			STATE_UNLOCK(ctx);
4365 			err = if_setmtu(ifp, ifr->ifr_mtu);
4366 		}
4367 		iflib_init_locked(ctx);
4368 		STATE_LOCK(ctx);
4369 		if_setdrvflags(ifp, bits);
4370 		STATE_UNLOCK(ctx);
4371 		CTX_UNLOCK(ctx);
4372 		break;
4373 	case SIOCSIFFLAGS:
4374 		CTX_LOCK(ctx);
4375 		if (if_getflags(ifp) & IFF_UP) {
4376 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4377 				if ((if_getflags(ifp) ^ ctx->ifc_if_flags) &
4378 				    (IFF_PROMISC | IFF_ALLMULTI)) {
4379 					CTX_UNLOCK(ctx);
4380 					err = IFDI_PROMISC_SET(ctx, if_getflags(ifp));
4381 					CTX_LOCK(ctx);
4382 				}
4383 			} else
4384 				reinit = 1;
4385 		} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4386 			iflib_stop(ctx);
4387 		}
4388 		ctx->ifc_if_flags = if_getflags(ifp);
4389 		CTX_UNLOCK(ctx);
4390 		break;
4391 	case SIOCADDMULTI:
4392 	case SIOCDELMULTI:
4393 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4394 			CTX_LOCK(ctx);
4395 			IFDI_INTR_DISABLE(ctx);
4396 			IFDI_MULTI_SET(ctx);
4397 			IFDI_INTR_ENABLE(ctx);
4398 			CTX_UNLOCK(ctx);
4399 		}
4400 		break;
4401 	case SIOCSIFMEDIA:
4402 		CTX_LOCK(ctx);
4403 		IFDI_MEDIA_SET(ctx);
4404 		CTX_UNLOCK(ctx);
4405 		/* FALLTHROUGH */
4406 	case SIOCGIFMEDIA:
4407 	case SIOCGIFXMEDIA:
4408 		err = ifmedia_ioctl(ifp, ifr, ctx->ifc_mediap, command);
4409 		break;
4410 	case SIOCGI2C:
4411 	{
4412 		struct ifi2creq i2c;
4413 
4414 		err = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
4415 		if (err != 0)
4416 			break;
4417 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
4418 			err = EINVAL;
4419 			break;
4420 		}
4421 		if (i2c.len > sizeof(i2c.data)) {
4422 			err = EINVAL;
4423 			break;
4424 		}
4425 
4426 		if ((err = IFDI_I2C_REQ(ctx, &i2c)) == 0)
4427 			err = copyout(&i2c, ifr_data_get_ptr(ifr),
4428 			    sizeof(i2c));
4429 		break;
4430 	}
4431 	case SIOCSIFCAP:
4432 	{
4433 		int mask, setmask, oldmask;
4434 
4435 		oldmask = if_getcapenable(ifp);
4436 		mask = ifr->ifr_reqcap ^ oldmask;
4437 		mask &= ctx->ifc_softc_ctx.isc_capabilities | IFCAP_MEXTPG;
4438 		setmask = 0;
4439 #ifdef TCP_OFFLOAD
4440 		setmask |= mask & (IFCAP_TOE4|IFCAP_TOE6);
4441 #endif
4442 		setmask |= (mask & IFCAP_FLAGS);
4443 		setmask |= (mask & IFCAP_WOL);
4444 
4445 		/*
4446 		 * If any RX csum has changed, change all the ones that
4447 		 * are supported by the driver.
4448 		 */
4449 		if (setmask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
4450 			setmask |= ctx->ifc_softc_ctx.isc_capabilities &
4451 			    (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6);
4452 		}
4453 
4454 		/*
4455 		 * want to ensure that traffic has stopped before we change any of the flags
4456 		 */
4457 		if (setmask) {
4458 			CTX_LOCK(ctx);
4459 			bits = if_getdrvflags(ifp);
4460 			if (bits & IFF_DRV_RUNNING && setmask & ~IFCAP_WOL)
4461 				iflib_stop(ctx);
4462 			STATE_LOCK(ctx);
4463 			if_togglecapenable(ifp, setmask);
4464 			ctx->ifc_softc_ctx.isc_capenable ^= setmask;
4465 			STATE_UNLOCK(ctx);
4466 			if (bits & IFF_DRV_RUNNING && setmask & ~IFCAP_WOL)
4467 				iflib_init_locked(ctx);
4468 			STATE_LOCK(ctx);
4469 			if_setdrvflags(ifp, bits);
4470 			STATE_UNLOCK(ctx);
4471 			CTX_UNLOCK(ctx);
4472 		}
4473 		if_vlancap(ifp);
4474 		break;
4475 	}
4476 	case SIOCGPRIVATE_0:
4477 	case SIOCSDRVSPEC:
4478 	case SIOCGDRVSPEC:
4479 		CTX_LOCK(ctx);
4480 		err = IFDI_PRIV_IOCTL(ctx, command, data);
4481 		CTX_UNLOCK(ctx);
4482 		break;
4483 	default:
4484 		err = ether_ioctl(ifp, command, data);
4485 		break;
4486 	}
4487 	if (reinit)
4488 		iflib_if_init(ctx);
4489 	return (err);
4490 }
4491 
4492 static uint64_t
4493 iflib_if_get_counter(if_t ifp, ift_counter cnt)
4494 {
4495 	if_ctx_t ctx = if_getsoftc(ifp);
4496 
4497 	return (IFDI_GET_COUNTER(ctx, cnt));
4498 }
4499 
4500 /*********************************************************************
4501  *
4502  *  OTHER FUNCTIONS EXPORTED TO THE STACK
4503  *
4504  **********************************************************************/
4505 
4506 static void
4507 iflib_vlan_register(void *arg, if_t ifp, uint16_t vtag)
4508 {
4509 	if_ctx_t ctx = if_getsoftc(ifp);
4510 
4511 	if ((void *)ctx != arg)
4512 		return;
4513 
4514 	if ((vtag == 0) || (vtag > 4095))
4515 		return;
4516 
4517 	if (iflib_in_detach(ctx))
4518 		return;
4519 
4520 	CTX_LOCK(ctx);
4521 	/* Driver may need all untagged packets to be flushed */
4522 	if (IFDI_NEEDS_RESTART(ctx, IFLIB_RESTART_VLAN_CONFIG))
4523 		iflib_stop(ctx);
4524 	IFDI_VLAN_REGISTER(ctx, vtag);
4525 	/* Re-init to load the changes, if required */
4526 	if (IFDI_NEEDS_RESTART(ctx, IFLIB_RESTART_VLAN_CONFIG))
4527 		iflib_init_locked(ctx);
4528 	CTX_UNLOCK(ctx);
4529 }
4530 
4531 static void
4532 iflib_vlan_unregister(void *arg, if_t ifp, uint16_t vtag)
4533 {
4534 	if_ctx_t ctx = if_getsoftc(ifp);
4535 
4536 	if ((void *)ctx != arg)
4537 		return;
4538 
4539 	if ((vtag == 0) || (vtag > 4095))
4540 		return;
4541 
4542 	CTX_LOCK(ctx);
4543 	/* Driver may need all tagged packets to be flushed */
4544 	if (IFDI_NEEDS_RESTART(ctx, IFLIB_RESTART_VLAN_CONFIG))
4545 		iflib_stop(ctx);
4546 	IFDI_VLAN_UNREGISTER(ctx, vtag);
4547 	/* Re-init to load the changes, if required */
4548 	if (IFDI_NEEDS_RESTART(ctx, IFLIB_RESTART_VLAN_CONFIG))
4549 		iflib_init_locked(ctx);
4550 	CTX_UNLOCK(ctx);
4551 }
4552 
4553 static void
4554 iflib_led_func(void *arg, int onoff)
4555 {
4556 	if_ctx_t ctx = arg;
4557 
4558 	CTX_LOCK(ctx);
4559 	IFDI_LED_FUNC(ctx, onoff);
4560 	CTX_UNLOCK(ctx);
4561 }
4562 
4563 /*********************************************************************
4564  *
4565  *  BUS FUNCTION DEFINITIONS
4566  *
4567  **********************************************************************/
4568 
4569 int
4570 iflib_device_probe(device_t dev)
4571 {
4572 	const pci_vendor_info_t *ent;
4573 	if_shared_ctx_t sctx;
4574 	uint16_t pci_device_id, pci_rev_id, pci_subdevice_id, pci_subvendor_id;
4575 	uint16_t pci_vendor_id;
4576 
4577 	if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC)
4578 		return (ENOTSUP);
4579 
4580 	pci_vendor_id = pci_get_vendor(dev);
4581 	pci_device_id = pci_get_device(dev);
4582 	pci_subvendor_id = pci_get_subvendor(dev);
4583 	pci_subdevice_id = pci_get_subdevice(dev);
4584 	pci_rev_id = pci_get_revid(dev);
4585 	if (sctx->isc_parse_devinfo != NULL)
4586 		sctx->isc_parse_devinfo(&pci_device_id, &pci_subvendor_id, &pci_subdevice_id, &pci_rev_id);
4587 
4588 	ent = sctx->isc_vendor_info;
4589 	while (ent->pvi_vendor_id != 0) {
4590 		if (pci_vendor_id != ent->pvi_vendor_id) {
4591 			ent++;
4592 			continue;
4593 		}
4594 		if ((pci_device_id == ent->pvi_device_id) &&
4595 		    ((pci_subvendor_id == ent->pvi_subvendor_id) ||
4596 		     (ent->pvi_subvendor_id == 0)) &&
4597 		    ((pci_subdevice_id == ent->pvi_subdevice_id) ||
4598 		     (ent->pvi_subdevice_id == 0)) &&
4599 		    ((pci_rev_id == ent->pvi_rev_id) ||
4600 		     (ent->pvi_rev_id == 0))) {
4601 			device_set_desc_copy(dev, ent->pvi_name);
4602 			/* this needs to be changed to zero if the bus probing code
4603 			 * ever stops re-probing on best match because the sctx
4604 			 * may have its values over written by register calls
4605 			 * in subsequent probes
4606 			 */
4607 			return (BUS_PROBE_DEFAULT);
4608 		}
4609 		ent++;
4610 	}
4611 	return (ENXIO);
4612 }
4613 
4614 int
4615 iflib_device_probe_vendor(device_t dev)
4616 {
4617 	int probe;
4618 
4619 	probe = iflib_device_probe(dev);
4620 	if (probe == BUS_PROBE_DEFAULT)
4621 		return (BUS_PROBE_VENDOR);
4622 	else
4623 		return (probe);
4624 }
4625 
4626 static void
4627 iflib_reset_qvalues(if_ctx_t ctx)
4628 {
4629 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
4630 	if_shared_ctx_t sctx = ctx->ifc_sctx;
4631 	device_t dev = ctx->ifc_dev;
4632 	int i;
4633 
4634 	if (ctx->ifc_sysctl_ntxqs != 0)
4635 		scctx->isc_ntxqsets = ctx->ifc_sysctl_ntxqs;
4636 	if (ctx->ifc_sysctl_nrxqs != 0)
4637 		scctx->isc_nrxqsets = ctx->ifc_sysctl_nrxqs;
4638 
4639 	for (i = 0; i < sctx->isc_ntxqs; i++) {
4640 		if (ctx->ifc_sysctl_ntxds[i] != 0)
4641 			scctx->isc_ntxd[i] = ctx->ifc_sysctl_ntxds[i];
4642 		else
4643 			scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i];
4644 	}
4645 
4646 	for (i = 0; i < sctx->isc_nrxqs; i++) {
4647 		if (ctx->ifc_sysctl_nrxds[i] != 0)
4648 			scctx->isc_nrxd[i] = ctx->ifc_sysctl_nrxds[i];
4649 		else
4650 			scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i];
4651 	}
4652 
4653 	for (i = 0; i < sctx->isc_nrxqs; i++) {
4654 		if (scctx->isc_nrxd[i] < sctx->isc_nrxd_min[i]) {
4655 			device_printf(dev, "nrxd%d: %d less than nrxd_min %d - resetting to min\n",
4656 				      i, scctx->isc_nrxd[i], sctx->isc_nrxd_min[i]);
4657 			scctx->isc_nrxd[i] = sctx->isc_nrxd_min[i];
4658 		}
4659 		if (scctx->isc_nrxd[i] > sctx->isc_nrxd_max[i]) {
4660 			device_printf(dev, "nrxd%d: %d greater than nrxd_max %d - resetting to max\n",
4661 				      i, scctx->isc_nrxd[i], sctx->isc_nrxd_max[i]);
4662 			scctx->isc_nrxd[i] = sctx->isc_nrxd_max[i];
4663 		}
4664 		if (!powerof2(scctx->isc_nrxd[i])) {
4665 			device_printf(dev, "nrxd%d: %d is not a power of 2 - using default value of %d\n",
4666 				      i, scctx->isc_nrxd[i], sctx->isc_nrxd_default[i]);
4667 			scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i];
4668 		}
4669 	}
4670 
4671 	for (i = 0; i < sctx->isc_ntxqs; i++) {
4672 		if (scctx->isc_ntxd[i] < sctx->isc_ntxd_min[i]) {
4673 			device_printf(dev, "ntxd%d: %d less than ntxd_min %d - resetting to min\n",
4674 				      i, scctx->isc_ntxd[i], sctx->isc_ntxd_min[i]);
4675 			scctx->isc_ntxd[i] = sctx->isc_ntxd_min[i];
4676 		}
4677 		if (scctx->isc_ntxd[i] > sctx->isc_ntxd_max[i]) {
4678 			device_printf(dev, "ntxd%d: %d greater than ntxd_max %d - resetting to max\n",
4679 				      i, scctx->isc_ntxd[i], sctx->isc_ntxd_max[i]);
4680 			scctx->isc_ntxd[i] = sctx->isc_ntxd_max[i];
4681 		}
4682 		if (!powerof2(scctx->isc_ntxd[i])) {
4683 			device_printf(dev, "ntxd%d: %d is not a power of 2 - using default value of %d\n",
4684 				      i, scctx->isc_ntxd[i], sctx->isc_ntxd_default[i]);
4685 			scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i];
4686 		}
4687 	}
4688 }
4689 
4690 static void
4691 iflib_add_pfil(if_ctx_t ctx)
4692 {
4693 	struct pfil_head *pfil;
4694 	struct pfil_head_args pa;
4695 	iflib_rxq_t rxq;
4696 	int i;
4697 
4698 	pa.pa_version = PFIL_VERSION;
4699 	pa.pa_flags = PFIL_IN;
4700 	pa.pa_type = PFIL_TYPE_ETHERNET;
4701 	pa.pa_headname = ctx->ifc_ifp->if_xname;
4702 	pfil = pfil_head_register(&pa);
4703 
4704 	for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) {
4705 		rxq->pfil = pfil;
4706 	}
4707 }
4708 
4709 static void
4710 iflib_rem_pfil(if_ctx_t ctx)
4711 {
4712 	struct pfil_head *pfil;
4713 	iflib_rxq_t rxq;
4714 	int i;
4715 
4716 	rxq = ctx->ifc_rxqs;
4717 	pfil = rxq->pfil;
4718 	for (i = 0; i < NRXQSETS(ctx); i++, rxq++) {
4719 		rxq->pfil = NULL;
4720 	}
4721 	pfil_head_unregister(pfil);
4722 }
4723 
4724 
4725 /*
4726  * Advance forward by n members of the cpuset ctx->ifc_cpus starting from
4727  * cpuid and wrapping as necessary.
4728  */
4729 static unsigned int
4730 cpuid_advance(if_ctx_t ctx, unsigned int cpuid, unsigned int n)
4731 {
4732 	unsigned int first_valid;
4733 	unsigned int last_valid;
4734 
4735 	/* cpuid should always be in the valid set */
4736 	MPASS(CPU_ISSET(cpuid, &ctx->ifc_cpus));
4737 
4738 	/* valid set should never be empty */
4739 	MPASS(!CPU_EMPTY(&ctx->ifc_cpus));
4740 
4741 	first_valid = CPU_FFS(&ctx->ifc_cpus) - 1;
4742 	last_valid = CPU_FLS(&ctx->ifc_cpus) - 1;
4743 	n = n % CPU_COUNT(&ctx->ifc_cpus);
4744 	while (n > 0) {
4745 		do {
4746 			cpuid++;
4747 			if (cpuid > last_valid)
4748 				cpuid = first_valid;
4749 		} while (!CPU_ISSET(cpuid, &ctx->ifc_cpus));
4750 		n--;
4751 	}
4752 
4753 	return (cpuid);
4754 }
4755 
4756 #if defined(SMP) && defined(SCHED_ULE)
4757 extern struct cpu_group *cpu_top;              /* CPU topology */
4758 
4759 static int
4760 find_child_with_core(int cpu, struct cpu_group *grp)
4761 {
4762 	int i;
4763 
4764 	if (grp->cg_children == 0)
4765 		return -1;
4766 
4767 	MPASS(grp->cg_child);
4768 	for (i = 0; i < grp->cg_children; i++) {
4769 		if (CPU_ISSET(cpu, &grp->cg_child[i].cg_mask))
4770 			return i;
4771 	}
4772 
4773 	return -1;
4774 }
4775 
4776 
4777 /*
4778  * Find an L2 neighbor of the given CPU or return -1 if none found.  This
4779  * does not distinguish among multiple L2 neighbors if the given CPU has
4780  * more than one (it will always return the same result in that case).
4781  */
4782 static int
4783 find_l2_neighbor(int cpu)
4784 {
4785 	struct cpu_group *grp;
4786 	int i;
4787 
4788 	grp = cpu_top;
4789 	if (grp == NULL)
4790 		return -1;
4791 
4792 	/*
4793 	 * Find the smallest CPU group that contains the given core.
4794 	 */
4795 	i = 0;
4796 	while ((i = find_child_with_core(cpu, grp)) != -1) {
4797 		/*
4798 		 * If the smallest group containing the given CPU has less
4799 		 * than two members, we conclude the given CPU has no
4800 		 * L2 neighbor.
4801 		 */
4802 		if (grp->cg_child[i].cg_count <= 1)
4803 			return (-1);
4804 		grp = &grp->cg_child[i];
4805 	}
4806 
4807 	/* Must share L2. */
4808 	if (grp->cg_level > CG_SHARE_L2 || grp->cg_level == CG_SHARE_NONE)
4809 		return -1;
4810 
4811 	/*
4812 	 * Select the first member of the set that isn't the reference
4813 	 * CPU, which at this point is guaranteed to exist.
4814 	 */
4815 	for (i = 0; i < CPU_SETSIZE; i++) {
4816 		if (CPU_ISSET(i, &grp->cg_mask) && i != cpu)
4817 			return (i);
4818 	}
4819 
4820 	/* Should never be reached */
4821 	return (-1);
4822 }
4823 
4824 #else
4825 static int
4826 find_l2_neighbor(int cpu)
4827 {
4828 
4829 	return (-1);
4830 }
4831 #endif
4832 
4833 /*
4834  * CPU mapping behaviors
4835  * ---------------------
4836  * 'separate txrx' refers to the separate_txrx sysctl
4837  * 'use logical' refers to the use_logical_cores sysctl
4838  * 'INTR CPUS' indicates whether bus_get_cpus(INTR_CPUS) succeeded
4839  *
4840  *  separate     use     INTR
4841  *    txrx     logical   CPUS   result
4842  * ---------- --------- ------ ------------------------------------------------
4843  *     -          -       X     RX and TX queues mapped to consecutive physical
4844  *                              cores with RX/TX pairs on same core and excess
4845  *                              of either following
4846  *     -          X       X     RX and TX queues mapped to consecutive cores
4847  *                              of any type with RX/TX pairs on same core and
4848  *                              excess of either following
4849  *     X          -       X     RX and TX queues mapped to consecutive physical
4850  *                              cores; all RX then all TX
4851  *     X          X       X     RX queues mapped to consecutive physical cores
4852  *                              first, then TX queues mapped to L2 neighbor of
4853  *                              the corresponding RX queue if one exists,
4854  *                              otherwise to consecutive physical cores
4855  *     -         n/a      -     RX and TX queues mapped to consecutive cores of
4856  *                              any type with RX/TX pairs on same core and excess
4857  *                              of either following
4858  *     X         n/a      -     RX and TX queues mapped to consecutive cores of
4859  *                              any type; all RX then all TX
4860  */
4861 static unsigned int
4862 get_cpuid_for_queue(if_ctx_t ctx, unsigned int base_cpuid, unsigned int qid,
4863     bool is_tx)
4864 {
4865 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
4866 	unsigned int core_index;
4867 
4868 	if (ctx->ifc_sysctl_separate_txrx) {
4869 		/*
4870 		 * When using separate CPUs for TX and RX, the assignment
4871 		 * will always be of a consecutive CPU out of the set of
4872 		 * context CPUs, except for the specific case where the
4873 		 * context CPUs are phsyical cores, the use of logical cores
4874 		 * has been enabled, the assignment is for TX, the TX qid
4875 		 * corresponds to an RX qid, and the CPU assigned to the
4876 		 * corresponding RX queue has an L2 neighbor.
4877 		 */
4878 		if (ctx->ifc_sysctl_use_logical_cores &&
4879 		    ctx->ifc_cpus_are_physical_cores &&
4880 		    is_tx && qid < scctx->isc_nrxqsets) {
4881 			int l2_neighbor;
4882 			unsigned int rx_cpuid;
4883 
4884 			rx_cpuid = cpuid_advance(ctx, base_cpuid, qid);
4885 			l2_neighbor = find_l2_neighbor(rx_cpuid);
4886 			if (l2_neighbor != -1) {
4887 				return (l2_neighbor);
4888 			}
4889 			/*
4890 			 * ... else fall through to the normal
4891 			 * consecutive-after-RX assignment scheme.
4892 			 *
4893 			 * Note that we are assuming that all RX queue CPUs
4894 			 * have an L2 neighbor, or all do not.  If a mixed
4895 			 * scenario is possible, we will have to keep track
4896 			 * separately of how many queues prior to this one
4897 			 * were not able to be assigned to an L2 neighbor.
4898 			 */
4899 		}
4900 		if (is_tx)
4901 			core_index = scctx->isc_nrxqsets + qid;
4902 		else
4903 			core_index = qid;
4904 	} else {
4905 		core_index = qid;
4906 	}
4907 
4908 	return (cpuid_advance(ctx, base_cpuid, core_index));
4909 }
4910 
4911 static uint16_t
4912 get_ctx_core_offset(if_ctx_t ctx)
4913 {
4914 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
4915 	struct cpu_offset *op;
4916 	cpuset_t assigned_cpus;
4917 	unsigned int cores_consumed;
4918 	unsigned int base_cpuid = ctx->ifc_sysctl_core_offset;
4919 	unsigned int first_valid;
4920 	unsigned int last_valid;
4921 	unsigned int i;
4922 
4923 	first_valid = CPU_FFS(&ctx->ifc_cpus) - 1;
4924 	last_valid = CPU_FLS(&ctx->ifc_cpus) - 1;
4925 
4926 	if (base_cpuid != CORE_OFFSET_UNSPECIFIED) {
4927 		/*
4928 		 * Align the user-chosen base CPU ID to the next valid CPU
4929 		 * for this device.  If the chosen base CPU ID is smaller
4930 		 * than the first valid CPU or larger than the last valid
4931 		 * CPU, we assume the user does not know what the valid
4932 		 * range is for this device and is thinking in terms of a
4933 		 * zero-based reference frame, and so we shift the given
4934 		 * value into the valid range (and wrap accordingly) so the
4935 		 * intent is translated to the proper frame of reference.
4936 		 * If the base CPU ID is within the valid first/last, but
4937 		 * does not correspond to a valid CPU, it is advanced to the
4938 		 * next valid CPU (wrapping if necessary).
4939 		 */
4940 		if (base_cpuid < first_valid || base_cpuid > last_valid) {
4941 			/* shift from zero-based to first_valid-based */
4942 			base_cpuid += first_valid;
4943 			/* wrap to range [first_valid, last_valid] */
4944 			base_cpuid = (base_cpuid - first_valid) %
4945 			    (last_valid - first_valid + 1);
4946 		}
4947 		if (!CPU_ISSET(base_cpuid, &ctx->ifc_cpus)) {
4948 			/*
4949 			 * base_cpuid is in [first_valid, last_valid], but
4950 			 * not a member of the valid set.  In this case,
4951 			 * there will always be a member of the valid set
4952 			 * with a CPU ID that is greater than base_cpuid,
4953 			 * and we simply advance to it.
4954 			 */
4955 			while (!CPU_ISSET(base_cpuid, &ctx->ifc_cpus))
4956 				base_cpuid++;
4957 		}
4958 		return (base_cpuid);
4959 	}
4960 
4961 	/*
4962 	 * Determine how many cores will be consumed by performing the CPU
4963 	 * assignments and counting how many of the assigned CPUs correspond
4964 	 * to CPUs in the set of context CPUs.  This is done using the CPU
4965 	 * ID first_valid as the base CPU ID, as the base CPU must be within
4966 	 * the set of context CPUs.
4967 	 *
4968 	 * Note not all assigned CPUs will be in the set of context CPUs
4969 	 * when separate CPUs are being allocated to TX and RX queues,
4970 	 * assignment to logical cores has been enabled, the set of context
4971 	 * CPUs contains only physical CPUs, and TX queues are mapped to L2
4972 	 * neighbors of CPUs that RX queues have been mapped to - in this
4973 	 * case we do only want to count how many CPUs in the set of context
4974 	 * CPUs have been consumed, as that determines the next CPU in that
4975 	 * set to start allocating at for the next device for which
4976 	 * core_offset is not set.
4977 	 */
4978 	CPU_ZERO(&assigned_cpus);
4979 	for (i = 0; i < scctx->isc_ntxqsets; i++)
4980 		CPU_SET(get_cpuid_for_queue(ctx, first_valid, i, true),
4981 		    &assigned_cpus);
4982 	for (i = 0; i < scctx->isc_nrxqsets; i++)
4983 		CPU_SET(get_cpuid_for_queue(ctx, first_valid, i, false),
4984 		    &assigned_cpus);
4985 	CPU_AND(&assigned_cpus, &assigned_cpus, &ctx->ifc_cpus);
4986 	cores_consumed = CPU_COUNT(&assigned_cpus);
4987 
4988 	mtx_lock(&cpu_offset_mtx);
4989 	SLIST_FOREACH(op, &cpu_offsets, entries) {
4990 		if (CPU_CMP(&ctx->ifc_cpus, &op->set) == 0) {
4991 			base_cpuid = op->next_cpuid;
4992 			op->next_cpuid = cpuid_advance(ctx, op->next_cpuid,
4993 			    cores_consumed);
4994 			MPASS(op->refcount < UINT_MAX);
4995 			op->refcount++;
4996 			break;
4997 		}
4998 	}
4999 	if (base_cpuid == CORE_OFFSET_UNSPECIFIED) {
5000 		base_cpuid = first_valid;
5001 		op = malloc(sizeof(struct cpu_offset), M_IFLIB,
5002 		    M_NOWAIT | M_ZERO);
5003 		if (op == NULL) {
5004 			device_printf(ctx->ifc_dev,
5005 			    "allocation for cpu offset failed.\n");
5006 		} else {
5007 			op->next_cpuid = cpuid_advance(ctx, base_cpuid,
5008 			    cores_consumed);
5009 			op->refcount = 1;
5010 			CPU_COPY(&ctx->ifc_cpus, &op->set);
5011 			SLIST_INSERT_HEAD(&cpu_offsets, op, entries);
5012 		}
5013 	}
5014 	mtx_unlock(&cpu_offset_mtx);
5015 
5016 	return (base_cpuid);
5017 }
5018 
5019 static void
5020 unref_ctx_core_offset(if_ctx_t ctx)
5021 {
5022 	struct cpu_offset *op, *top;
5023 
5024 	mtx_lock(&cpu_offset_mtx);
5025 	SLIST_FOREACH_SAFE(op, &cpu_offsets, entries, top) {
5026 		if (CPU_CMP(&ctx->ifc_cpus, &op->set) == 0) {
5027 			MPASS(op->refcount > 0);
5028 			op->refcount--;
5029 			if (op->refcount == 0) {
5030 				SLIST_REMOVE(&cpu_offsets, op, cpu_offset, entries);
5031 				free(op, M_IFLIB);
5032 			}
5033 			break;
5034 		}
5035 	}
5036 	mtx_unlock(&cpu_offset_mtx);
5037 }
5038 
5039 int
5040 iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ctxp)
5041 {
5042 	if_ctx_t ctx;
5043 	if_t ifp;
5044 	if_softc_ctx_t scctx;
5045 	kobjop_desc_t kobj_desc;
5046 	kobj_method_t *kobj_method;
5047 	int err, msix, rid;
5048 	int num_txd, num_rxd;
5049 
5050 	ctx = malloc(sizeof(* ctx), M_IFLIB, M_WAITOK|M_ZERO);
5051 
5052 	if (sc == NULL) {
5053 		sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO);
5054 		device_set_softc(dev, ctx);
5055 		ctx->ifc_flags |= IFC_SC_ALLOCATED;
5056 	}
5057 
5058 	ctx->ifc_sctx = sctx;
5059 	ctx->ifc_dev = dev;
5060 	ctx->ifc_softc = sc;
5061 
5062 	if ((err = iflib_register(ctx)) != 0) {
5063 		device_printf(dev, "iflib_register failed %d\n", err);
5064 		goto fail_ctx_free;
5065 	}
5066 	iflib_add_device_sysctl_pre(ctx);
5067 
5068 	scctx = &ctx->ifc_softc_ctx;
5069 	ifp = ctx->ifc_ifp;
5070 
5071 	iflib_reset_qvalues(ctx);
5072 	IFNET_WLOCK();
5073 	CTX_LOCK(ctx);
5074 	if ((err = IFDI_ATTACH_PRE(ctx)) != 0) {
5075 		device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err);
5076 		goto fail_unlock;
5077 	}
5078 	_iflib_pre_assert(scctx);
5079 	ctx->ifc_txrx = *scctx->isc_txrx;
5080 
5081 	MPASS(scctx->isc_dma_width <= flsll(BUS_SPACE_MAXADDR));
5082 
5083 	if (sctx->isc_flags & IFLIB_DRIVER_MEDIA)
5084 		ctx->ifc_mediap = scctx->isc_media;
5085 
5086 #ifdef INVARIANTS
5087 	if (scctx->isc_capabilities & IFCAP_TXCSUM)
5088 		MPASS(scctx->isc_tx_csum_flags);
5089 #endif
5090 
5091 	if_setcapabilities(ifp,
5092 	    scctx->isc_capabilities | IFCAP_HWSTATS | IFCAP_MEXTPG);
5093 	if_setcapenable(ifp,
5094 	    scctx->isc_capenable | IFCAP_HWSTATS | IFCAP_MEXTPG);
5095 
5096 	if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets))
5097 		scctx->isc_ntxqsets = scctx->isc_ntxqsets_max;
5098 	if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets))
5099 		scctx->isc_nrxqsets = scctx->isc_nrxqsets_max;
5100 
5101 	num_txd = iflib_num_tx_descs(ctx);
5102 	num_rxd = iflib_num_rx_descs(ctx);
5103 
5104 	/* XXX change for per-queue sizes */
5105 	device_printf(dev, "Using %d TX descriptors and %d RX descriptors\n",
5106 	    num_txd, num_rxd);
5107 
5108 	if (scctx->isc_tx_nsegments > num_txd / MAX_SINGLE_PACKET_FRACTION)
5109 		scctx->isc_tx_nsegments = max(1, num_txd /
5110 		    MAX_SINGLE_PACKET_FRACTION);
5111 	if (scctx->isc_tx_tso_segments_max > num_txd /
5112 	    MAX_SINGLE_PACKET_FRACTION)
5113 		scctx->isc_tx_tso_segments_max = max(1,
5114 		    num_txd / MAX_SINGLE_PACKET_FRACTION);
5115 
5116 	/* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */
5117 	if (if_getcapabilities(ifp) & IFCAP_TSO) {
5118 		/*
5119 		 * The stack can't handle a TSO size larger than IP_MAXPACKET,
5120 		 * but some MACs do.
5121 		 */
5122 		if_sethwtsomax(ifp, min(scctx->isc_tx_tso_size_max,
5123 		    IP_MAXPACKET));
5124 		/*
5125 		 * Take maximum number of m_pullup(9)'s in iflib_parse_header()
5126 		 * into account.  In the worst case, each of these calls will
5127 		 * add another mbuf and, thus, the requirement for another DMA
5128 		 * segment.  So for best performance, it doesn't make sense to
5129 		 * advertize a maximum of TSO segments that typically will
5130 		 * require defragmentation in iflib_encap().
5131 		 */
5132 		if_sethwtsomaxsegcount(ifp, scctx->isc_tx_tso_segments_max - 3);
5133 		if_sethwtsomaxsegsize(ifp, scctx->isc_tx_tso_segsize_max);
5134 	}
5135 	if (scctx->isc_rss_table_size == 0)
5136 		scctx->isc_rss_table_size = 64;
5137 	scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1;
5138 
5139 	GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx);
5140 	/* XXX format name */
5141 	taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx,
5142 	    NULL, NULL, "admin");
5143 
5144 	/* Set up cpu set.  If it fails, use the set of all CPUs. */
5145 	if (bus_get_cpus(dev, INTR_CPUS, sizeof(ctx->ifc_cpus), &ctx->ifc_cpus) != 0) {
5146 		device_printf(dev, "Unable to fetch CPU list\n");
5147 		CPU_COPY(&all_cpus, &ctx->ifc_cpus);
5148 		ctx->ifc_cpus_are_physical_cores = false;
5149 	} else
5150 		ctx->ifc_cpus_are_physical_cores = true;
5151 	MPASS(CPU_COUNT(&ctx->ifc_cpus) > 0);
5152 
5153 	/*
5154 	** Now set up MSI or MSI-X, should return us the number of supported
5155 	** vectors (will be 1 for a legacy interrupt and MSI).
5156 	*/
5157 	if (sctx->isc_flags & IFLIB_SKIP_MSIX) {
5158 		msix = scctx->isc_vectors;
5159 	} else if (scctx->isc_msix_bar != 0)
5160 	       /*
5161 		* The simple fact that isc_msix_bar is not 0 does not mean we
5162 		* we have a good value there that is known to work.
5163 		*/
5164 		msix = iflib_msix_init(ctx);
5165 	else {
5166 		scctx->isc_vectors = 1;
5167 		scctx->isc_ntxqsets = 1;
5168 		scctx->isc_nrxqsets = 1;
5169 		scctx->isc_intr = IFLIB_INTR_LEGACY;
5170 		msix = 0;
5171 	}
5172 	/* Get memory for the station queues */
5173 	if ((err = iflib_queues_alloc(ctx))) {
5174 		device_printf(dev, "Unable to allocate queue memory\n");
5175 		goto fail_intr_free;
5176 	}
5177 
5178 	if ((err = iflib_qset_structures_setup(ctx)))
5179 		goto fail_queues;
5180 
5181 	/*
5182 	 * Now that we know how many queues there are, get the core offset.
5183 	 */
5184 	ctx->ifc_sysctl_core_offset = get_ctx_core_offset(ctx);
5185 
5186 	if (msix > 1) {
5187 		/*
5188 		 * When using MSI-X, ensure that ifdi_{r,t}x_queue_intr_enable
5189 		 * aren't the default NULL implementation.
5190 		 */
5191 		kobj_desc = &ifdi_rx_queue_intr_enable_desc;
5192 		kobj_method = kobj_lookup_method(((kobj_t)ctx)->ops->cls, NULL,
5193 		    kobj_desc);
5194 		if (kobj_method == &kobj_desc->deflt) {
5195 			device_printf(dev,
5196 			    "MSI-X requires ifdi_rx_queue_intr_enable method");
5197 			err = EOPNOTSUPP;
5198 			goto fail_queues;
5199 		}
5200 		kobj_desc = &ifdi_tx_queue_intr_enable_desc;
5201 		kobj_method = kobj_lookup_method(((kobj_t)ctx)->ops->cls, NULL,
5202 		    kobj_desc);
5203 		if (kobj_method == &kobj_desc->deflt) {
5204 			device_printf(dev,
5205 			    "MSI-X requires ifdi_tx_queue_intr_enable method");
5206 			err = EOPNOTSUPP;
5207 			goto fail_queues;
5208 		}
5209 
5210 		/*
5211 		 * Assign the MSI-X vectors.
5212 		 * Note that the default NULL ifdi_msix_intr_assign method will
5213 		 * fail here, too.
5214 		 */
5215 		err = IFDI_MSIX_INTR_ASSIGN(ctx, msix);
5216 		if (err != 0) {
5217 			device_printf(dev, "IFDI_MSIX_INTR_ASSIGN failed %d\n",
5218 			    err);
5219 			goto fail_queues;
5220 		}
5221 	} else if (scctx->isc_intr != IFLIB_INTR_MSIX) {
5222 		rid = 0;
5223 		if (scctx->isc_intr == IFLIB_INTR_MSI) {
5224 			MPASS(msix == 1);
5225 			rid = 1;
5226 		}
5227 		if ((err = iflib_legacy_setup(ctx, ctx->isc_legacy_intr, ctx->ifc_softc, &rid, "irq0")) != 0) {
5228 			device_printf(dev, "iflib_legacy_setup failed %d\n", err);
5229 			goto fail_queues;
5230 		}
5231 	} else {
5232 		device_printf(dev,
5233 		    "Cannot use iflib with only 1 MSI-X interrupt!\n");
5234 		err = ENODEV;
5235 		goto fail_queues;
5236 	}
5237 
5238 	ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac.octet);
5239 
5240 	if ((err = IFDI_ATTACH_POST(ctx)) != 0) {
5241 		device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err);
5242 		goto fail_detach;
5243 	}
5244 
5245 	/*
5246 	 * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported.
5247 	 * This must appear after the call to ether_ifattach() because
5248 	 * ether_ifattach() sets if_hdrlen to the default value.
5249 	 */
5250 	if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU)
5251 		if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
5252 
5253 	if ((err = iflib_netmap_attach(ctx))) {
5254 		device_printf(ctx->ifc_dev, "netmap attach failed: %d\n", err);
5255 		goto fail_detach;
5256 	}
5257 	*ctxp = ctx;
5258 
5259 	DEBUGNET_SET(ctx->ifc_ifp, iflib);
5260 
5261 	if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter);
5262 	iflib_add_device_sysctl_post(ctx);
5263 	iflib_add_pfil(ctx);
5264 	ctx->ifc_flags |= IFC_INIT_DONE;
5265 	CTX_UNLOCK(ctx);
5266 	IFNET_WUNLOCK();
5267 
5268 	return (0);
5269 
5270 fail_detach:
5271 	ether_ifdetach(ctx->ifc_ifp);
5272 fail_queues:
5273 	iflib_tqg_detach(ctx);
5274 	iflib_tx_structures_free(ctx);
5275 	iflib_rx_structures_free(ctx);
5276 	IFDI_DETACH(ctx);
5277 	IFDI_QUEUES_FREE(ctx);
5278 fail_intr_free:
5279 	iflib_free_intr_mem(ctx);
5280 fail_unlock:
5281 	CTX_UNLOCK(ctx);
5282 	IFNET_WUNLOCK();
5283 	iflib_deregister(ctx);
5284 fail_ctx_free:
5285 	device_set_softc(ctx->ifc_dev, NULL);
5286         if (ctx->ifc_flags & IFC_SC_ALLOCATED)
5287                 free(ctx->ifc_softc, M_IFLIB);
5288         free(ctx, M_IFLIB);
5289 	return (err);
5290 }
5291 
5292 int
5293 iflib_pseudo_register(device_t dev, if_shared_ctx_t sctx, if_ctx_t *ctxp,
5294 					  struct iflib_cloneattach_ctx *clctx)
5295 {
5296 	int num_txd, num_rxd;
5297 	int err;
5298 	if_ctx_t ctx;
5299 	if_t ifp;
5300 	if_softc_ctx_t scctx;
5301 	int i;
5302 	void *sc;
5303 
5304 	ctx = malloc(sizeof(*ctx), M_IFLIB, M_WAITOK|M_ZERO);
5305 	sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO);
5306 	ctx->ifc_flags |= IFC_SC_ALLOCATED;
5307 	if (sctx->isc_flags & (IFLIB_PSEUDO|IFLIB_VIRTUAL))
5308 		ctx->ifc_flags |= IFC_PSEUDO;
5309 
5310 	ctx->ifc_sctx = sctx;
5311 	ctx->ifc_softc = sc;
5312 	ctx->ifc_dev = dev;
5313 
5314 	if ((err = iflib_register(ctx)) != 0) {
5315 		device_printf(dev, "%s: iflib_register failed %d\n", __func__, err);
5316 		goto fail_ctx_free;
5317 	}
5318 	iflib_add_device_sysctl_pre(ctx);
5319 
5320 	scctx = &ctx->ifc_softc_ctx;
5321 	ifp = ctx->ifc_ifp;
5322 
5323 	iflib_reset_qvalues(ctx);
5324 	CTX_LOCK(ctx);
5325 	if ((err = IFDI_ATTACH_PRE(ctx)) != 0) {
5326 		device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err);
5327 		goto fail_unlock;
5328 	}
5329 	if (sctx->isc_flags & IFLIB_GEN_MAC)
5330 		ether_gen_addr(ifp, &ctx->ifc_mac);
5331 	if ((err = IFDI_CLONEATTACH(ctx, clctx->cc_ifc, clctx->cc_name,
5332 								clctx->cc_params)) != 0) {
5333 		device_printf(dev, "IFDI_CLONEATTACH failed %d\n", err);
5334 		goto fail_unlock;
5335 	}
5336 #ifdef INVARIANTS
5337 	if (scctx->isc_capabilities & IFCAP_TXCSUM)
5338 		MPASS(scctx->isc_tx_csum_flags);
5339 #endif
5340 
5341 	if_setcapabilities(ifp, scctx->isc_capabilities | IFCAP_HWSTATS | IFCAP_LINKSTATE);
5342 	if_setcapenable(ifp, scctx->isc_capenable | IFCAP_HWSTATS | IFCAP_LINKSTATE);
5343 
5344 	ifp->if_flags |= IFF_NOGROUP;
5345 	if (sctx->isc_flags & IFLIB_PSEUDO) {
5346 		ifmedia_add(ctx->ifc_mediap, IFM_ETHER | IFM_AUTO, 0, NULL);
5347 		ifmedia_set(ctx->ifc_mediap, IFM_ETHER | IFM_AUTO);
5348 		if (sctx->isc_flags & IFLIB_PSEUDO_ETHER) {
5349 			ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac.octet);
5350 		} else {
5351 			if_attach(ctx->ifc_ifp);
5352 			bpfattach(ctx->ifc_ifp, DLT_NULL, sizeof(u_int32_t));
5353 		}
5354 
5355 		if ((err = IFDI_ATTACH_POST(ctx)) != 0) {
5356 			device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err);
5357 			goto fail_detach;
5358 		}
5359 		*ctxp = ctx;
5360 
5361 		/*
5362 		 * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported.
5363 		 * This must appear after the call to ether_ifattach() because
5364 		 * ether_ifattach() sets if_hdrlen to the default value.
5365 		 */
5366 		if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU)
5367 			if_setifheaderlen(ifp,
5368 			    sizeof(struct ether_vlan_header));
5369 
5370 		if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter);
5371 		iflib_add_device_sysctl_post(ctx);
5372 		ctx->ifc_flags |= IFC_INIT_DONE;
5373 		CTX_UNLOCK(ctx);
5374 		return (0);
5375 	}
5376 	ifmedia_add(ctx->ifc_mediap, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
5377 	ifmedia_add(ctx->ifc_mediap, IFM_ETHER | IFM_AUTO, 0, NULL);
5378 	ifmedia_set(ctx->ifc_mediap, IFM_ETHER | IFM_AUTO);
5379 
5380 	_iflib_pre_assert(scctx);
5381 	ctx->ifc_txrx = *scctx->isc_txrx;
5382 
5383 	if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets))
5384 		scctx->isc_ntxqsets = scctx->isc_ntxqsets_max;
5385 	if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets))
5386 		scctx->isc_nrxqsets = scctx->isc_nrxqsets_max;
5387 
5388 	num_txd = iflib_num_tx_descs(ctx);
5389 	num_rxd = iflib_num_rx_descs(ctx);
5390 
5391 	/* XXX change for per-queue sizes */
5392 	device_printf(dev, "Using %d TX descriptors and %d RX descriptors\n",
5393 	    num_txd, num_rxd);
5394 
5395 	if (scctx->isc_tx_nsegments > num_txd / MAX_SINGLE_PACKET_FRACTION)
5396 		scctx->isc_tx_nsegments = max(1, num_txd /
5397 		    MAX_SINGLE_PACKET_FRACTION);
5398 	if (scctx->isc_tx_tso_segments_max > num_txd /
5399 	    MAX_SINGLE_PACKET_FRACTION)
5400 		scctx->isc_tx_tso_segments_max = max(1,
5401 		    num_txd / MAX_SINGLE_PACKET_FRACTION);
5402 
5403 	/* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */
5404 	if (if_getcapabilities(ifp) & IFCAP_TSO) {
5405 		/*
5406 		 * The stack can't handle a TSO size larger than IP_MAXPACKET,
5407 		 * but some MACs do.
5408 		 */
5409 		if_sethwtsomax(ifp, min(scctx->isc_tx_tso_size_max,
5410 		    IP_MAXPACKET));
5411 		/*
5412 		 * Take maximum number of m_pullup(9)'s in iflib_parse_header()
5413 		 * into account.  In the worst case, each of these calls will
5414 		 * add another mbuf and, thus, the requirement for another DMA
5415 		 * segment.  So for best performance, it doesn't make sense to
5416 		 * advertize a maximum of TSO segments that typically will
5417 		 * require defragmentation in iflib_encap().
5418 		 */
5419 		if_sethwtsomaxsegcount(ifp, scctx->isc_tx_tso_segments_max - 3);
5420 		if_sethwtsomaxsegsize(ifp, scctx->isc_tx_tso_segsize_max);
5421 	}
5422 	if (scctx->isc_rss_table_size == 0)
5423 		scctx->isc_rss_table_size = 64;
5424 	scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1;
5425 
5426 	GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx);
5427 	/* XXX format name */
5428 	taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx,
5429 	    NULL, NULL, "admin");
5430 
5431 	/* XXX --- can support > 1 -- but keep it simple for now */
5432 	scctx->isc_intr = IFLIB_INTR_LEGACY;
5433 
5434 	/* Get memory for the station queues */
5435 	if ((err = iflib_queues_alloc(ctx))) {
5436 		device_printf(dev, "Unable to allocate queue memory\n");
5437 		goto fail_iflib_detach;
5438 	}
5439 
5440 	if ((err = iflib_qset_structures_setup(ctx))) {
5441 		device_printf(dev, "qset structure setup failed %d\n", err);
5442 		goto fail_queues;
5443 	}
5444 
5445 	/*
5446 	 * XXX What if anything do we want to do about interrupts?
5447 	 */
5448 	ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac.octet);
5449 	if ((err = IFDI_ATTACH_POST(ctx)) != 0) {
5450 		device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err);
5451 		goto fail_detach;
5452 	}
5453 
5454 	/*
5455 	 * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported.
5456 	 * This must appear after the call to ether_ifattach() because
5457 	 * ether_ifattach() sets if_hdrlen to the default value.
5458 	 */
5459 	if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU)
5460 		if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
5461 
5462 	/* XXX handle more than one queue */
5463 	for (i = 0; i < scctx->isc_nrxqsets; i++)
5464 		IFDI_RX_CLSET(ctx, 0, i, ctx->ifc_rxqs[i].ifr_fl[0].ifl_sds.ifsd_cl);
5465 
5466 	*ctxp = ctx;
5467 
5468 	if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter);
5469 	iflib_add_device_sysctl_post(ctx);
5470 	ctx->ifc_flags |= IFC_INIT_DONE;
5471 	CTX_UNLOCK(ctx);
5472 
5473 	return (0);
5474 fail_detach:
5475 	ether_ifdetach(ctx->ifc_ifp);
5476 fail_queues:
5477 	iflib_tqg_detach(ctx);
5478 	iflib_tx_structures_free(ctx);
5479 	iflib_rx_structures_free(ctx);
5480 fail_iflib_detach:
5481 	IFDI_DETACH(ctx);
5482 	IFDI_QUEUES_FREE(ctx);
5483 fail_unlock:
5484 	CTX_UNLOCK(ctx);
5485 	iflib_deregister(ctx);
5486 fail_ctx_free:
5487 	free(ctx->ifc_softc, M_IFLIB);
5488 	free(ctx, M_IFLIB);
5489 	return (err);
5490 }
5491 
5492 int
5493 iflib_pseudo_deregister(if_ctx_t ctx)
5494 {
5495 	if_t ifp = ctx->ifc_ifp;
5496 	if_shared_ctx_t sctx = ctx->ifc_sctx;
5497 
5498 	/* Unregister VLAN event handlers early */
5499 	iflib_unregister_vlan_handlers(ctx);
5500 
5501 	if ((sctx->isc_flags & IFLIB_PSEUDO)  &&
5502 		(sctx->isc_flags & IFLIB_PSEUDO_ETHER) == 0) {
5503 		bpfdetach(ifp);
5504 		if_detach(ifp);
5505 	} else {
5506 		ether_ifdetach(ifp);
5507 	}
5508 
5509 	iflib_tqg_detach(ctx);
5510 	iflib_tx_structures_free(ctx);
5511 	iflib_rx_structures_free(ctx);
5512 	IFDI_DETACH(ctx);
5513 	IFDI_QUEUES_FREE(ctx);
5514 
5515 	iflib_deregister(ctx);
5516 
5517 	if (ctx->ifc_flags & IFC_SC_ALLOCATED)
5518 		free(ctx->ifc_softc, M_IFLIB);
5519 	free(ctx, M_IFLIB);
5520 	return (0);
5521 }
5522 
5523 int
5524 iflib_device_attach(device_t dev)
5525 {
5526 	if_ctx_t ctx;
5527 	if_shared_ctx_t sctx;
5528 
5529 	if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC)
5530 		return (ENOTSUP);
5531 
5532 	pci_enable_busmaster(dev);
5533 
5534 	return (iflib_device_register(dev, NULL, sctx, &ctx));
5535 }
5536 
5537 int
5538 iflib_device_deregister(if_ctx_t ctx)
5539 {
5540 	if_t ifp = ctx->ifc_ifp;
5541 	device_t dev = ctx->ifc_dev;
5542 
5543 	/* Make sure VLANS are not using driver */
5544 	if (if_vlantrunkinuse(ifp)) {
5545 		device_printf(dev, "Vlan in use, detach first\n");
5546 		return (EBUSY);
5547 	}
5548 #ifdef PCI_IOV
5549 	if (!CTX_IS_VF(ctx) && pci_iov_detach(dev) != 0) {
5550 		device_printf(dev, "SR-IOV in use; detach first.\n");
5551 		return (EBUSY);
5552 	}
5553 #endif
5554 
5555 	STATE_LOCK(ctx);
5556 	ctx->ifc_flags |= IFC_IN_DETACH;
5557 	STATE_UNLOCK(ctx);
5558 
5559 	/* Unregister VLAN handlers before calling iflib_stop() */
5560 	iflib_unregister_vlan_handlers(ctx);
5561 
5562 	iflib_netmap_detach(ifp);
5563 	ether_ifdetach(ifp);
5564 
5565 	CTX_LOCK(ctx);
5566 	iflib_stop(ctx);
5567 	CTX_UNLOCK(ctx);
5568 
5569 	iflib_rem_pfil(ctx);
5570 	if (ctx->ifc_led_dev != NULL)
5571 		led_destroy(ctx->ifc_led_dev);
5572 
5573 	iflib_tqg_detach(ctx);
5574 	iflib_tx_structures_free(ctx);
5575 	iflib_rx_structures_free(ctx);
5576 
5577 	CTX_LOCK(ctx);
5578 	IFDI_DETACH(ctx);
5579 	IFDI_QUEUES_FREE(ctx);
5580 	CTX_UNLOCK(ctx);
5581 
5582 	/* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/
5583 	iflib_free_intr_mem(ctx);
5584 
5585 	bus_generic_detach(dev);
5586 
5587 	iflib_deregister(ctx);
5588 
5589 	device_set_softc(ctx->ifc_dev, NULL);
5590 	if (ctx->ifc_flags & IFC_SC_ALLOCATED)
5591 		free(ctx->ifc_softc, M_IFLIB);
5592 	unref_ctx_core_offset(ctx);
5593 	free(ctx, M_IFLIB);
5594 	return (0);
5595 }
5596 
5597 static void
5598 iflib_tqg_detach(if_ctx_t ctx)
5599 {
5600 	iflib_txq_t txq;
5601 	iflib_rxq_t rxq;
5602 	int i;
5603 	struct taskqgroup *tqg;
5604 
5605 	/* XXX drain any dependent tasks */
5606 	tqg = qgroup_if_io_tqg;
5607 	for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) {
5608 		callout_drain(&txq->ift_timer);
5609 #ifdef DEV_NETMAP
5610 		callout_drain(&txq->ift_netmap_timer);
5611 #endif /* DEV_NETMAP */
5612 		if (txq->ift_task.gt_uniq != NULL)
5613 			taskqgroup_detach(tqg, &txq->ift_task);
5614 	}
5615 	for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) {
5616 		if (rxq->ifr_task.gt_uniq != NULL)
5617 			taskqgroup_detach(tqg, &rxq->ifr_task);
5618 	}
5619 	tqg = qgroup_if_config_tqg;
5620 	if (ctx->ifc_admin_task.gt_uniq != NULL)
5621 		taskqgroup_detach(tqg, &ctx->ifc_admin_task);
5622 	if (ctx->ifc_vflr_task.gt_uniq != NULL)
5623 		taskqgroup_detach(tqg, &ctx->ifc_vflr_task);
5624 }
5625 
5626 static void
5627 iflib_free_intr_mem(if_ctx_t ctx)
5628 {
5629 
5630 	if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_MSIX) {
5631 		iflib_irq_free(ctx, &ctx->ifc_legacy_irq);
5632 	}
5633 	if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_LEGACY) {
5634 		pci_release_msi(ctx->ifc_dev);
5635 	}
5636 	if (ctx->ifc_msix_mem != NULL) {
5637 		bus_release_resource(ctx->ifc_dev, SYS_RES_MEMORY,
5638 		    rman_get_rid(ctx->ifc_msix_mem), ctx->ifc_msix_mem);
5639 		ctx->ifc_msix_mem = NULL;
5640 	}
5641 }
5642 
5643 int
5644 iflib_device_detach(device_t dev)
5645 {
5646 	if_ctx_t ctx = device_get_softc(dev);
5647 
5648 	return (iflib_device_deregister(ctx));
5649 }
5650 
5651 int
5652 iflib_device_suspend(device_t dev)
5653 {
5654 	if_ctx_t ctx = device_get_softc(dev);
5655 
5656 	CTX_LOCK(ctx);
5657 	IFDI_SUSPEND(ctx);
5658 	CTX_UNLOCK(ctx);
5659 
5660 	return bus_generic_suspend(dev);
5661 }
5662 int
5663 iflib_device_shutdown(device_t dev)
5664 {
5665 	if_ctx_t ctx = device_get_softc(dev);
5666 
5667 	CTX_LOCK(ctx);
5668 	IFDI_SHUTDOWN(ctx);
5669 	CTX_UNLOCK(ctx);
5670 
5671 	return bus_generic_suspend(dev);
5672 }
5673 
5674 int
5675 iflib_device_resume(device_t dev)
5676 {
5677 	if_ctx_t ctx = device_get_softc(dev);
5678 	iflib_txq_t txq = ctx->ifc_txqs;
5679 
5680 	CTX_LOCK(ctx);
5681 	IFDI_RESUME(ctx);
5682 	iflib_if_init_locked(ctx);
5683 	CTX_UNLOCK(ctx);
5684 	for (int i = 0; i < NTXQSETS(ctx); i++, txq++)
5685 		iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET);
5686 
5687 	return (bus_generic_resume(dev));
5688 }
5689 
5690 int
5691 iflib_device_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
5692 {
5693 	int error;
5694 	if_ctx_t ctx = device_get_softc(dev);
5695 
5696 	CTX_LOCK(ctx);
5697 	error = IFDI_IOV_INIT(ctx, num_vfs, params);
5698 	CTX_UNLOCK(ctx);
5699 
5700 	return (error);
5701 }
5702 
5703 void
5704 iflib_device_iov_uninit(device_t dev)
5705 {
5706 	if_ctx_t ctx = device_get_softc(dev);
5707 
5708 	CTX_LOCK(ctx);
5709 	IFDI_IOV_UNINIT(ctx);
5710 	CTX_UNLOCK(ctx);
5711 }
5712 
5713 int
5714 iflib_device_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
5715 {
5716 	int error;
5717 	if_ctx_t ctx = device_get_softc(dev);
5718 
5719 	CTX_LOCK(ctx);
5720 	error = IFDI_IOV_VF_ADD(ctx, vfnum, params);
5721 	CTX_UNLOCK(ctx);
5722 
5723 	return (error);
5724 }
5725 
5726 /*********************************************************************
5727  *
5728  *  MODULE FUNCTION DEFINITIONS
5729  *
5730  **********************************************************************/
5731 
5732 /*
5733  * - Start a fast taskqueue thread for each core
5734  * - Start a taskqueue for control operations
5735  */
5736 static int
5737 iflib_module_init(void)
5738 {
5739 	iflib_timer_default = hz / 2;
5740 	return (0);
5741 }
5742 
5743 static int
5744 iflib_module_event_handler(module_t mod, int what, void *arg)
5745 {
5746 	int err;
5747 
5748 	switch (what) {
5749 	case MOD_LOAD:
5750 		if ((err = iflib_module_init()) != 0)
5751 			return (err);
5752 		break;
5753 	case MOD_UNLOAD:
5754 		return (EBUSY);
5755 	default:
5756 		return (EOPNOTSUPP);
5757 	}
5758 
5759 	return (0);
5760 }
5761 
5762 /*********************************************************************
5763  *
5764  *  PUBLIC FUNCTION DEFINITIONS
5765  *     ordered as in iflib.h
5766  *
5767  **********************************************************************/
5768 
5769 static void
5770 _iflib_assert(if_shared_ctx_t sctx)
5771 {
5772 	int i;
5773 
5774 	MPASS(sctx->isc_tx_maxsize);
5775 	MPASS(sctx->isc_tx_maxsegsize);
5776 
5777 	MPASS(sctx->isc_rx_maxsize);
5778 	MPASS(sctx->isc_rx_nsegments);
5779 	MPASS(sctx->isc_rx_maxsegsize);
5780 
5781 	MPASS(sctx->isc_nrxqs >= 1 && sctx->isc_nrxqs <= 8);
5782 	for (i = 0; i < sctx->isc_nrxqs; i++) {
5783 		MPASS(sctx->isc_nrxd_min[i]);
5784 		MPASS(powerof2(sctx->isc_nrxd_min[i]));
5785 		MPASS(sctx->isc_nrxd_max[i]);
5786 		MPASS(powerof2(sctx->isc_nrxd_max[i]));
5787 		MPASS(sctx->isc_nrxd_default[i]);
5788 		MPASS(powerof2(sctx->isc_nrxd_default[i]));
5789 	}
5790 
5791 	MPASS(sctx->isc_ntxqs >= 1 && sctx->isc_ntxqs <= 8);
5792 	for (i = 0; i < sctx->isc_ntxqs; i++) {
5793 		MPASS(sctx->isc_ntxd_min[i]);
5794 		MPASS(powerof2(sctx->isc_ntxd_min[i]));
5795 		MPASS(sctx->isc_ntxd_max[i]);
5796 		MPASS(powerof2(sctx->isc_ntxd_max[i]));
5797 		MPASS(sctx->isc_ntxd_default[i]);
5798 		MPASS(powerof2(sctx->isc_ntxd_default[i]));
5799 	}
5800 }
5801 
5802 static void
5803 _iflib_pre_assert(if_softc_ctx_t scctx)
5804 {
5805 
5806 	MPASS(scctx->isc_txrx->ift_txd_encap);
5807 	MPASS(scctx->isc_txrx->ift_txd_flush);
5808 	MPASS(scctx->isc_txrx->ift_txd_credits_update);
5809 	MPASS(scctx->isc_txrx->ift_rxd_available);
5810 	MPASS(scctx->isc_txrx->ift_rxd_pkt_get);
5811 	MPASS(scctx->isc_txrx->ift_rxd_refill);
5812 	MPASS(scctx->isc_txrx->ift_rxd_flush);
5813 }
5814 
5815 static int
5816 iflib_register(if_ctx_t ctx)
5817 {
5818 	if_shared_ctx_t sctx = ctx->ifc_sctx;
5819 	driver_t *driver = sctx->isc_driver;
5820 	device_t dev = ctx->ifc_dev;
5821 	if_t ifp;
5822 	u_char type;
5823 	int iflags;
5824 
5825 	if ((sctx->isc_flags & IFLIB_PSEUDO) == 0)
5826 		_iflib_assert(sctx);
5827 
5828 	CTX_LOCK_INIT(ctx);
5829 	STATE_LOCK_INIT(ctx, device_get_nameunit(ctx->ifc_dev));
5830 	if (sctx->isc_flags & IFLIB_PSEUDO) {
5831 		if (sctx->isc_flags & IFLIB_PSEUDO_ETHER)
5832 			type = IFT_ETHER;
5833 		else
5834 			type = IFT_PPP;
5835 	} else
5836 		type = IFT_ETHER;
5837 	ifp = ctx->ifc_ifp = if_alloc(type);
5838 	if (ifp == NULL) {
5839 		device_printf(dev, "can not allocate ifnet structure\n");
5840 		return (ENOMEM);
5841 	}
5842 
5843 	/*
5844 	 * Initialize our context's device specific methods
5845 	 */
5846 	kobj_init((kobj_t) ctx, (kobj_class_t) driver);
5847 	kobj_class_compile((kobj_class_t) driver);
5848 
5849 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
5850 	if_setsoftc(ifp, ctx);
5851 	if_setdev(ifp, dev);
5852 	if_setinitfn(ifp, iflib_if_init);
5853 	if_setioctlfn(ifp, iflib_if_ioctl);
5854 #ifdef ALTQ
5855 	if_setstartfn(ifp, iflib_altq_if_start);
5856 	if_settransmitfn(ifp, iflib_altq_if_transmit);
5857 	if_setsendqready(ifp);
5858 #else
5859 	if_settransmitfn(ifp, iflib_if_transmit);
5860 #endif
5861 	if_setqflushfn(ifp, iflib_if_qflush);
5862 	iflags = IFF_MULTICAST | IFF_KNOWSEPOCH;
5863 
5864 	if ((sctx->isc_flags & IFLIB_PSEUDO) &&
5865 		(sctx->isc_flags & IFLIB_PSEUDO_ETHER) == 0)
5866 		iflags |= IFF_POINTOPOINT;
5867 	else
5868 		iflags |= IFF_BROADCAST | IFF_SIMPLEX;
5869 	if_setflags(ifp, iflags);
5870 	ctx->ifc_vlan_attach_event =
5871 		EVENTHANDLER_REGISTER(vlan_config, iflib_vlan_register, ctx,
5872 							  EVENTHANDLER_PRI_FIRST);
5873 	ctx->ifc_vlan_detach_event =
5874 		EVENTHANDLER_REGISTER(vlan_unconfig, iflib_vlan_unregister, ctx,
5875 							  EVENTHANDLER_PRI_FIRST);
5876 
5877 	if ((sctx->isc_flags & IFLIB_DRIVER_MEDIA) == 0) {
5878 		ctx->ifc_mediap = &ctx->ifc_media;
5879 		ifmedia_init(ctx->ifc_mediap, IFM_IMASK,
5880 		    iflib_media_change, iflib_media_status);
5881 	}
5882 	return (0);
5883 }
5884 
5885 static void
5886 iflib_unregister_vlan_handlers(if_ctx_t ctx)
5887 {
5888 	/* Unregister VLAN events */
5889 	if (ctx->ifc_vlan_attach_event != NULL) {
5890 		EVENTHANDLER_DEREGISTER(vlan_config, ctx->ifc_vlan_attach_event);
5891 		ctx->ifc_vlan_attach_event = NULL;
5892 	}
5893 	if (ctx->ifc_vlan_detach_event != NULL) {
5894 		EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event);
5895 		ctx->ifc_vlan_detach_event = NULL;
5896 	}
5897 
5898 }
5899 
5900 static void
5901 iflib_deregister(if_ctx_t ctx)
5902 {
5903 	if_t ifp = ctx->ifc_ifp;
5904 
5905 	/* Remove all media */
5906 	ifmedia_removeall(&ctx->ifc_media);
5907 
5908 	/* Ensure that VLAN event handlers are unregistered */
5909 	iflib_unregister_vlan_handlers(ctx);
5910 
5911 	/* Release kobject reference */
5912 	kobj_delete((kobj_t) ctx, NULL);
5913 
5914 	/* Free the ifnet structure */
5915 	if_free(ifp);
5916 
5917 	STATE_LOCK_DESTROY(ctx);
5918 
5919 	/* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/
5920 	CTX_LOCK_DESTROY(ctx);
5921 }
5922 
5923 static int
5924 iflib_queues_alloc(if_ctx_t ctx)
5925 {
5926 	if_shared_ctx_t sctx = ctx->ifc_sctx;
5927 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
5928 	device_t dev = ctx->ifc_dev;
5929 	int nrxqsets = scctx->isc_nrxqsets;
5930 	int ntxqsets = scctx->isc_ntxqsets;
5931 	iflib_txq_t txq;
5932 	iflib_rxq_t rxq;
5933 	iflib_fl_t fl = NULL;
5934 	int i, j, cpu, err, txconf, rxconf;
5935 	iflib_dma_info_t ifdip;
5936 	uint32_t *rxqsizes = scctx->isc_rxqsizes;
5937 	uint32_t *txqsizes = scctx->isc_txqsizes;
5938 	uint8_t nrxqs = sctx->isc_nrxqs;
5939 	uint8_t ntxqs = sctx->isc_ntxqs;
5940 	int nfree_lists = sctx->isc_nfl ? sctx->isc_nfl : 1;
5941 	int fl_offset = (sctx->isc_flags & IFLIB_HAS_RXCQ ? 1 : 0);
5942 	caddr_t *vaddrs;
5943 	uint64_t *paddrs;
5944 
5945 	KASSERT(ntxqs > 0, ("number of queues per qset must be at least 1"));
5946 	KASSERT(nrxqs > 0, ("number of queues per qset must be at least 1"));
5947 	KASSERT(nrxqs >= fl_offset + nfree_lists,
5948            ("there must be at least a rxq for each free list"));
5949 
5950 	/* Allocate the TX ring struct memory */
5951 	if (!(ctx->ifc_txqs =
5952 	    (iflib_txq_t) malloc(sizeof(struct iflib_txq) *
5953 	    ntxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) {
5954 		device_printf(dev, "Unable to allocate TX ring memory\n");
5955 		err = ENOMEM;
5956 		goto fail;
5957 	}
5958 
5959 	/* Now allocate the RX */
5960 	if (!(ctx->ifc_rxqs =
5961 	    (iflib_rxq_t) malloc(sizeof(struct iflib_rxq) *
5962 	    nrxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) {
5963 		device_printf(dev, "Unable to allocate RX ring memory\n");
5964 		err = ENOMEM;
5965 		goto rx_fail;
5966 	}
5967 
5968 	txq = ctx->ifc_txqs;
5969 	rxq = ctx->ifc_rxqs;
5970 
5971 	/*
5972 	 * XXX handle allocation failure
5973 	 */
5974 	for (txconf = i = 0, cpu = CPU_FIRST(); i < ntxqsets; i++, txconf++, txq++, cpu = CPU_NEXT(cpu)) {
5975 		/* Set up some basics */
5976 
5977 		if ((ifdip = malloc(sizeof(struct iflib_dma_info) * ntxqs,
5978 		    M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
5979 			device_printf(dev,
5980 			    "Unable to allocate TX DMA info memory\n");
5981 			err = ENOMEM;
5982 			goto err_tx_desc;
5983 		}
5984 		txq->ift_ifdi = ifdip;
5985 		for (j = 0; j < ntxqs; j++, ifdip++) {
5986 			if (iflib_dma_alloc(ctx, txqsizes[j], ifdip, 0)) {
5987 				device_printf(dev,
5988 				    "Unable to allocate TX descriptors\n");
5989 				err = ENOMEM;
5990 				goto err_tx_desc;
5991 			}
5992 			txq->ift_txd_size[j] = scctx->isc_txd_size[j];
5993 			bzero((void *)ifdip->idi_vaddr, txqsizes[j]);
5994 		}
5995 		txq->ift_ctx = ctx;
5996 		txq->ift_id = i;
5997 		if (sctx->isc_flags & IFLIB_HAS_TXCQ) {
5998 			txq->ift_br_offset = 1;
5999 		} else {
6000 			txq->ift_br_offset = 0;
6001 		}
6002 
6003 		if (iflib_txsd_alloc(txq)) {
6004 			device_printf(dev, "Critical Failure setting up TX buffers\n");
6005 			err = ENOMEM;
6006 			goto err_tx_desc;
6007 		}
6008 
6009 		/* Initialize the TX lock */
6010 		snprintf(txq->ift_mtx_name, MTX_NAME_LEN, "%s:TX(%d):callout",
6011 		    device_get_nameunit(dev), txq->ift_id);
6012 		mtx_init(&txq->ift_mtx, txq->ift_mtx_name, NULL, MTX_DEF);
6013 		callout_init_mtx(&txq->ift_timer, &txq->ift_mtx, 0);
6014 		txq->ift_timer.c_cpu = cpu;
6015 #ifdef DEV_NETMAP
6016 		callout_init_mtx(&txq->ift_netmap_timer, &txq->ift_mtx, 0);
6017 		txq->ift_netmap_timer.c_cpu = cpu;
6018 #endif /* DEV_NETMAP */
6019 
6020 		err = ifmp_ring_alloc(&txq->ift_br, 2048, txq, iflib_txq_drain,
6021 				      iflib_txq_can_drain, M_IFLIB, M_WAITOK);
6022 		if (err) {
6023 			/* XXX free any allocated rings */
6024 			device_printf(dev, "Unable to allocate buf_ring\n");
6025 			goto err_tx_desc;
6026 		}
6027 	}
6028 
6029 	for (rxconf = i = 0; i < nrxqsets; i++, rxconf++, rxq++) {
6030 		/* Set up some basics */
6031 		callout_init(&rxq->ifr_watchdog, 1);
6032 
6033 		if ((ifdip = malloc(sizeof(struct iflib_dma_info) * nrxqs,
6034 		   M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
6035 			device_printf(dev,
6036 			    "Unable to allocate RX DMA info memory\n");
6037 			err = ENOMEM;
6038 			goto err_tx_desc;
6039 		}
6040 
6041 		rxq->ifr_ifdi = ifdip;
6042 		/* XXX this needs to be changed if #rx queues != #tx queues */
6043 		rxq->ifr_ntxqirq = 1;
6044 		rxq->ifr_txqid[0] = i;
6045 		for (j = 0; j < nrxqs; j++, ifdip++) {
6046 			if (iflib_dma_alloc(ctx, rxqsizes[j], ifdip, 0)) {
6047 				device_printf(dev,
6048 				    "Unable to allocate RX descriptors\n");
6049 				err = ENOMEM;
6050 				goto err_tx_desc;
6051 			}
6052 			bzero((void *)ifdip->idi_vaddr, rxqsizes[j]);
6053 		}
6054 		rxq->ifr_ctx = ctx;
6055 		rxq->ifr_id = i;
6056 		rxq->ifr_fl_offset = fl_offset;
6057 		rxq->ifr_nfl = nfree_lists;
6058 		if (!(fl =
6059 			  (iflib_fl_t) malloc(sizeof(struct iflib_fl) * nfree_lists, M_IFLIB, M_NOWAIT | M_ZERO))) {
6060 			device_printf(dev, "Unable to allocate free list memory\n");
6061 			err = ENOMEM;
6062 			goto err_tx_desc;
6063 		}
6064 		rxq->ifr_fl = fl;
6065 		for (j = 0; j < nfree_lists; j++) {
6066 			fl[j].ifl_rxq = rxq;
6067 			fl[j].ifl_id = j;
6068 			fl[j].ifl_ifdi = &rxq->ifr_ifdi[j + rxq->ifr_fl_offset];
6069 			fl[j].ifl_rxd_size = scctx->isc_rxd_size[j];
6070 		}
6071 		/* Allocate receive buffers for the ring */
6072 		if (iflib_rxsd_alloc(rxq)) {
6073 			device_printf(dev,
6074 			    "Critical Failure setting up receive buffers\n");
6075 			err = ENOMEM;
6076 			goto err_rx_desc;
6077 		}
6078 
6079 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
6080 			fl->ifl_rx_bitmap = bit_alloc(fl->ifl_size, M_IFLIB,
6081 			    M_WAITOK);
6082 	}
6083 
6084 	/* TXQs */
6085 	vaddrs = malloc(sizeof(caddr_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK);
6086 	paddrs = malloc(sizeof(uint64_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK);
6087 	for (i = 0; i < ntxqsets; i++) {
6088 		iflib_dma_info_t di = ctx->ifc_txqs[i].ift_ifdi;
6089 
6090 		for (j = 0; j < ntxqs; j++, di++) {
6091 			vaddrs[i*ntxqs + j] = di->idi_vaddr;
6092 			paddrs[i*ntxqs + j] = di->idi_paddr;
6093 		}
6094 	}
6095 	if ((err = IFDI_TX_QUEUES_ALLOC(ctx, vaddrs, paddrs, ntxqs, ntxqsets)) != 0) {
6096 		device_printf(ctx->ifc_dev,
6097 		    "Unable to allocate device TX queue\n");
6098 		iflib_tx_structures_free(ctx);
6099 		free(vaddrs, M_IFLIB);
6100 		free(paddrs, M_IFLIB);
6101 		goto err_rx_desc;
6102 	}
6103 	free(vaddrs, M_IFLIB);
6104 	free(paddrs, M_IFLIB);
6105 
6106 	/* RXQs */
6107 	vaddrs = malloc(sizeof(caddr_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK);
6108 	paddrs = malloc(sizeof(uint64_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK);
6109 	for (i = 0; i < nrxqsets; i++) {
6110 		iflib_dma_info_t di = ctx->ifc_rxqs[i].ifr_ifdi;
6111 
6112 		for (j = 0; j < nrxqs; j++, di++) {
6113 			vaddrs[i*nrxqs + j] = di->idi_vaddr;
6114 			paddrs[i*nrxqs + j] = di->idi_paddr;
6115 		}
6116 	}
6117 	if ((err = IFDI_RX_QUEUES_ALLOC(ctx, vaddrs, paddrs, nrxqs, nrxqsets)) != 0) {
6118 		device_printf(ctx->ifc_dev,
6119 		    "Unable to allocate device RX queue\n");
6120 		iflib_tx_structures_free(ctx);
6121 		free(vaddrs, M_IFLIB);
6122 		free(paddrs, M_IFLIB);
6123 		goto err_rx_desc;
6124 	}
6125 	free(vaddrs, M_IFLIB);
6126 	free(paddrs, M_IFLIB);
6127 
6128 	return (0);
6129 
6130 /* XXX handle allocation failure changes */
6131 err_rx_desc:
6132 err_tx_desc:
6133 rx_fail:
6134 	if (ctx->ifc_rxqs != NULL)
6135 		free(ctx->ifc_rxqs, M_IFLIB);
6136 	ctx->ifc_rxqs = NULL;
6137 	if (ctx->ifc_txqs != NULL)
6138 		free(ctx->ifc_txqs, M_IFLIB);
6139 	ctx->ifc_txqs = NULL;
6140 fail:
6141 	return (err);
6142 }
6143 
6144 static int
6145 iflib_tx_structures_setup(if_ctx_t ctx)
6146 {
6147 	iflib_txq_t txq = ctx->ifc_txqs;
6148 	int i;
6149 
6150 	for (i = 0; i < NTXQSETS(ctx); i++, txq++)
6151 		iflib_txq_setup(txq);
6152 
6153 	return (0);
6154 }
6155 
6156 static void
6157 iflib_tx_structures_free(if_ctx_t ctx)
6158 {
6159 	iflib_txq_t txq = ctx->ifc_txqs;
6160 	if_shared_ctx_t sctx = ctx->ifc_sctx;
6161 	int i, j;
6162 
6163 	for (i = 0; i < NTXQSETS(ctx); i++, txq++) {
6164 		for (j = 0; j < sctx->isc_ntxqs; j++)
6165 			iflib_dma_free(&txq->ift_ifdi[j]);
6166 		iflib_txq_destroy(txq);
6167 	}
6168 	free(ctx->ifc_txqs, M_IFLIB);
6169 	ctx->ifc_txqs = NULL;
6170 }
6171 
6172 /*********************************************************************
6173  *
6174  *  Initialize all receive rings.
6175  *
6176  **********************************************************************/
6177 static int
6178 iflib_rx_structures_setup(if_ctx_t ctx)
6179 {
6180 	iflib_rxq_t rxq = ctx->ifc_rxqs;
6181 	int q;
6182 #if defined(INET6) || defined(INET)
6183 	int err, i;
6184 #endif
6185 
6186 	for (q = 0; q < ctx->ifc_softc_ctx.isc_nrxqsets; q++, rxq++) {
6187 #if defined(INET6) || defined(INET)
6188 		err = tcp_lro_init_args(&rxq->ifr_lc, ctx->ifc_ifp,
6189 		    TCP_LRO_ENTRIES, min(1024,
6190 		    ctx->ifc_softc_ctx.isc_nrxd[rxq->ifr_fl_offset]));
6191 		if (err != 0) {
6192 			device_printf(ctx->ifc_dev,
6193 			    "LRO Initialization failed!\n");
6194 			goto fail;
6195 		}
6196 #endif
6197 		IFDI_RXQ_SETUP(ctx, rxq->ifr_id);
6198 	}
6199 	return (0);
6200 #if defined(INET6) || defined(INET)
6201 fail:
6202 	/*
6203 	 * Free LRO resources allocated so far, we will only handle
6204 	 * the rings that completed, the failing case will have
6205 	 * cleaned up for itself.  'q' failed, so its the terminus.
6206 	 */
6207 	rxq = ctx->ifc_rxqs;
6208 	for (i = 0; i < q; ++i, rxq++) {
6209 		tcp_lro_free(&rxq->ifr_lc);
6210 	}
6211 	return (err);
6212 #endif
6213 }
6214 
6215 /*********************************************************************
6216  *
6217  *  Free all receive rings.
6218  *
6219  **********************************************************************/
6220 static void
6221 iflib_rx_structures_free(if_ctx_t ctx)
6222 {
6223 	iflib_rxq_t rxq = ctx->ifc_rxqs;
6224 	if_shared_ctx_t sctx = ctx->ifc_sctx;
6225 	int i, j;
6226 
6227 	for (i = 0; i < ctx->ifc_softc_ctx.isc_nrxqsets; i++, rxq++) {
6228 		for (j = 0; j < sctx->isc_nrxqs; j++)
6229 			iflib_dma_free(&rxq->ifr_ifdi[j]);
6230 		iflib_rx_sds_free(rxq);
6231 #if defined(INET6) || defined(INET)
6232 		tcp_lro_free(&rxq->ifr_lc);
6233 #endif
6234 	}
6235 	free(ctx->ifc_rxqs, M_IFLIB);
6236 	ctx->ifc_rxqs = NULL;
6237 }
6238 
6239 static int
6240 iflib_qset_structures_setup(if_ctx_t ctx)
6241 {
6242 	int err;
6243 
6244 	/*
6245 	 * It is expected that the caller takes care of freeing queues if this
6246 	 * fails.
6247 	 */
6248 	if ((err = iflib_tx_structures_setup(ctx)) != 0) {
6249 		device_printf(ctx->ifc_dev, "iflib_tx_structures_setup failed: %d\n", err);
6250 		return (err);
6251 	}
6252 
6253 	if ((err = iflib_rx_structures_setup(ctx)) != 0)
6254 		device_printf(ctx->ifc_dev, "iflib_rx_structures_setup failed: %d\n", err);
6255 
6256 	return (err);
6257 }
6258 
6259 int
6260 iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid,
6261 		driver_filter_t filter, void *filter_arg, driver_intr_t handler, void *arg, const char *name)
6262 {
6263 
6264 	return (_iflib_irq_alloc(ctx, irq, rid, filter, handler, arg, name));
6265 }
6266 
6267 /* Just to avoid copy/paste */
6268 static inline int
6269 iflib_irq_set_affinity(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type,
6270     int qid, struct grouptask *gtask, struct taskqgroup *tqg, void *uniq,
6271     const char *name)
6272 {
6273 	device_t dev;
6274 	unsigned int base_cpuid, cpuid;
6275 	int err;
6276 
6277 	dev = ctx->ifc_dev;
6278 	base_cpuid = ctx->ifc_sysctl_core_offset;
6279 	cpuid = get_cpuid_for_queue(ctx, base_cpuid, qid, type == IFLIB_INTR_TX);
6280 	err = taskqgroup_attach_cpu(tqg, gtask, uniq, cpuid, dev,
6281 	    irq ? irq->ii_res : NULL, name);
6282 	if (err) {
6283 		device_printf(dev, "taskqgroup_attach_cpu failed %d\n", err);
6284 		return (err);
6285 	}
6286 #ifdef notyet
6287 	if (cpuid > ctx->ifc_cpuid_highest)
6288 		ctx->ifc_cpuid_highest = cpuid;
6289 #endif
6290 	return (0);
6291 }
6292 
6293 int
6294 iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid,
6295 			iflib_intr_type_t type, driver_filter_t *filter,
6296 			void *filter_arg, int qid, const char *name)
6297 {
6298 	device_t dev;
6299 	struct grouptask *gtask;
6300 	struct taskqgroup *tqg;
6301 	iflib_filter_info_t info;
6302 	gtask_fn_t *fn;
6303 	int tqrid, err;
6304 	driver_filter_t *intr_fast;
6305 	void *q;
6306 
6307 	info = &ctx->ifc_filter_info;
6308 	tqrid = rid;
6309 
6310 	switch (type) {
6311 	/* XXX merge tx/rx for netmap? */
6312 	case IFLIB_INTR_TX:
6313 		q = &ctx->ifc_txqs[qid];
6314 		info = &ctx->ifc_txqs[qid].ift_filter_info;
6315 		gtask = &ctx->ifc_txqs[qid].ift_task;
6316 		tqg = qgroup_if_io_tqg;
6317 		fn = _task_fn_tx;
6318 		intr_fast = iflib_fast_intr;
6319 		GROUPTASK_INIT(gtask, 0, fn, q);
6320 		ctx->ifc_flags |= IFC_NETMAP_TX_IRQ;
6321 		break;
6322 	case IFLIB_INTR_RX:
6323 		q = &ctx->ifc_rxqs[qid];
6324 		info = &ctx->ifc_rxqs[qid].ifr_filter_info;
6325 		gtask = &ctx->ifc_rxqs[qid].ifr_task;
6326 		tqg = qgroup_if_io_tqg;
6327 		fn = _task_fn_rx;
6328 		intr_fast = iflib_fast_intr;
6329 		NET_GROUPTASK_INIT(gtask, 0, fn, q);
6330 		break;
6331 	case IFLIB_INTR_RXTX:
6332 		q = &ctx->ifc_rxqs[qid];
6333 		info = &ctx->ifc_rxqs[qid].ifr_filter_info;
6334 		gtask = &ctx->ifc_rxqs[qid].ifr_task;
6335 		tqg = qgroup_if_io_tqg;
6336 		fn = _task_fn_rx;
6337 		intr_fast = iflib_fast_intr_rxtx;
6338 		NET_GROUPTASK_INIT(gtask, 0, fn, q);
6339 		break;
6340 	case IFLIB_INTR_ADMIN:
6341 		q = ctx;
6342 		tqrid = -1;
6343 		info = &ctx->ifc_filter_info;
6344 		gtask = &ctx->ifc_admin_task;
6345 		tqg = qgroup_if_config_tqg;
6346 		fn = _task_fn_admin;
6347 		intr_fast = iflib_fast_intr_ctx;
6348 		break;
6349 	default:
6350 		device_printf(ctx->ifc_dev, "%s: unknown net intr type\n",
6351 		    __func__);
6352 		return (EINVAL);
6353 	}
6354 
6355 	info->ifi_filter = filter;
6356 	info->ifi_filter_arg = filter_arg;
6357 	info->ifi_task = gtask;
6358 	info->ifi_ctx = q;
6359 
6360 	dev = ctx->ifc_dev;
6361 	err = _iflib_irq_alloc(ctx, irq, rid, intr_fast, NULL, info,  name);
6362 	if (err != 0) {
6363 		device_printf(dev, "_iflib_irq_alloc failed %d\n", err);
6364 		return (err);
6365 	}
6366 	if (type == IFLIB_INTR_ADMIN)
6367 		return (0);
6368 
6369 	if (tqrid != -1) {
6370 		err = iflib_irq_set_affinity(ctx, irq, type, qid, gtask, tqg, q,
6371 		    name);
6372 		if (err)
6373 			return (err);
6374 	} else {
6375 		taskqgroup_attach(tqg, gtask, q, dev, irq->ii_res, name);
6376 	}
6377 
6378 	return (0);
6379 }
6380 
6381 void
6382 iflib_softirq_alloc_generic(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type, void *arg, int qid, const char *name)
6383 {
6384 	device_t dev;
6385 	struct grouptask *gtask;
6386 	struct taskqgroup *tqg;
6387 	gtask_fn_t *fn;
6388 	void *q;
6389 	int err;
6390 
6391 	switch (type) {
6392 	case IFLIB_INTR_TX:
6393 		q = &ctx->ifc_txqs[qid];
6394 		gtask = &ctx->ifc_txqs[qid].ift_task;
6395 		tqg = qgroup_if_io_tqg;
6396 		fn = _task_fn_tx;
6397 		GROUPTASK_INIT(gtask, 0, fn, q);
6398 		break;
6399 	case IFLIB_INTR_RX:
6400 		q = &ctx->ifc_rxqs[qid];
6401 		gtask = &ctx->ifc_rxqs[qid].ifr_task;
6402 		tqg = qgroup_if_io_tqg;
6403 		fn = _task_fn_rx;
6404 		NET_GROUPTASK_INIT(gtask, 0, fn, q);
6405 		break;
6406 	case IFLIB_INTR_IOV:
6407 		q = ctx;
6408 		gtask = &ctx->ifc_vflr_task;
6409 		tqg = qgroup_if_config_tqg;
6410 		fn = _task_fn_iov;
6411 		GROUPTASK_INIT(gtask, 0, fn, q);
6412 		break;
6413 	default:
6414 		panic("unknown net intr type");
6415 	}
6416 	err = iflib_irq_set_affinity(ctx, irq, type, qid, gtask, tqg, q, name);
6417 	if (err) {
6418 		dev = ctx->ifc_dev;
6419 		taskqgroup_attach(tqg, gtask, q, dev, irq ? irq->ii_res : NULL,
6420 		    name);
6421 	}
6422 }
6423 
6424 void
6425 iflib_irq_free(if_ctx_t ctx, if_irq_t irq)
6426 {
6427 
6428 	if (irq->ii_tag)
6429 		bus_teardown_intr(ctx->ifc_dev, irq->ii_res, irq->ii_tag);
6430 
6431 	if (irq->ii_res)
6432 		bus_release_resource(ctx->ifc_dev, SYS_RES_IRQ,
6433 		    rman_get_rid(irq->ii_res), irq->ii_res);
6434 }
6435 
6436 static int
6437 iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filter_arg, int *rid, const char *name)
6438 {
6439 	iflib_txq_t txq = ctx->ifc_txqs;
6440 	iflib_rxq_t rxq = ctx->ifc_rxqs;
6441 	if_irq_t irq = &ctx->ifc_legacy_irq;
6442 	iflib_filter_info_t info;
6443 	device_t dev;
6444 	struct grouptask *gtask;
6445 	struct resource *res;
6446 	struct taskqgroup *tqg;
6447 	void *q;
6448 	int err, tqrid;
6449 	bool rx_only;
6450 
6451 	q = &ctx->ifc_rxqs[0];
6452 	info = &rxq[0].ifr_filter_info;
6453 	gtask = &rxq[0].ifr_task;
6454 	tqg = qgroup_if_io_tqg;
6455 	tqrid = *rid;
6456 	rx_only = (ctx->ifc_sctx->isc_flags & IFLIB_SINGLE_IRQ_RX_ONLY) != 0;
6457 
6458 	ctx->ifc_flags |= IFC_LEGACY;
6459 	info->ifi_filter = filter;
6460 	info->ifi_filter_arg = filter_arg;
6461 	info->ifi_task = gtask;
6462 	info->ifi_ctx = rx_only ? ctx : q;
6463 
6464 	dev = ctx->ifc_dev;
6465 	/* We allocate a single interrupt resource */
6466 	err = _iflib_irq_alloc(ctx, irq, tqrid, rx_only ? iflib_fast_intr_ctx :
6467 	    iflib_fast_intr_rxtx, NULL, info, name);
6468 	if (err != 0)
6469 		return (err);
6470 	NET_GROUPTASK_INIT(gtask, 0, _task_fn_rx, q);
6471 	res = irq->ii_res;
6472 	taskqgroup_attach(tqg, gtask, q, dev, res, name);
6473 
6474 	GROUPTASK_INIT(&txq->ift_task, 0, _task_fn_tx, txq);
6475 	taskqgroup_attach(qgroup_if_io_tqg, &txq->ift_task, txq, dev, res,
6476 	    "tx");
6477 	return (0);
6478 }
6479 
6480 void
6481 iflib_led_create(if_ctx_t ctx)
6482 {
6483 
6484 	ctx->ifc_led_dev = led_create(iflib_led_func, ctx,
6485 	    device_get_nameunit(ctx->ifc_dev));
6486 }
6487 
6488 void
6489 iflib_tx_intr_deferred(if_ctx_t ctx, int txqid)
6490 {
6491 
6492 	GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task);
6493 }
6494 
6495 void
6496 iflib_rx_intr_deferred(if_ctx_t ctx, int rxqid)
6497 {
6498 
6499 	GROUPTASK_ENQUEUE(&ctx->ifc_rxqs[rxqid].ifr_task);
6500 }
6501 
6502 void
6503 iflib_admin_intr_deferred(if_ctx_t ctx)
6504 {
6505 
6506 	MPASS(ctx->ifc_admin_task.gt_taskqueue != NULL);
6507 	GROUPTASK_ENQUEUE(&ctx->ifc_admin_task);
6508 }
6509 
6510 void
6511 iflib_iov_intr_deferred(if_ctx_t ctx)
6512 {
6513 
6514 	GROUPTASK_ENQUEUE(&ctx->ifc_vflr_task);
6515 }
6516 
6517 void
6518 iflib_io_tqg_attach(struct grouptask *gt, void *uniq, int cpu, const char *name)
6519 {
6520 
6521 	taskqgroup_attach_cpu(qgroup_if_io_tqg, gt, uniq, cpu, NULL, NULL,
6522 	    name);
6523 }
6524 
6525 void
6526 iflib_config_gtask_init(void *ctx, struct grouptask *gtask, gtask_fn_t *fn,
6527 	const char *name)
6528 {
6529 
6530 	GROUPTASK_INIT(gtask, 0, fn, ctx);
6531 	taskqgroup_attach(qgroup_if_config_tqg, gtask, gtask, NULL, NULL,
6532 	    name);
6533 }
6534 
6535 void
6536 iflib_config_gtask_deinit(struct grouptask *gtask)
6537 {
6538 
6539 	taskqgroup_detach(qgroup_if_config_tqg, gtask);
6540 }
6541 
6542 void
6543 iflib_link_state_change(if_ctx_t ctx, int link_state, uint64_t baudrate)
6544 {
6545 	if_t ifp = ctx->ifc_ifp;
6546 	iflib_txq_t txq = ctx->ifc_txqs;
6547 
6548 	if_setbaudrate(ifp, baudrate);
6549 	if (baudrate >= IF_Gbps(10)) {
6550 		STATE_LOCK(ctx);
6551 		ctx->ifc_flags |= IFC_PREFETCH;
6552 		STATE_UNLOCK(ctx);
6553 	}
6554 	/* If link down, disable watchdog */
6555 	if ((ctx->ifc_link_state == LINK_STATE_UP) && (link_state == LINK_STATE_DOWN)) {
6556 		for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxqsets; i++, txq++)
6557 			txq->ift_qstatus = IFLIB_QUEUE_IDLE;
6558 	}
6559 	ctx->ifc_link_state = link_state;
6560 	if_link_state_change(ifp, link_state);
6561 }
6562 
6563 static int
6564 iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq)
6565 {
6566 	int credits;
6567 #ifdef INVARIANTS
6568 	int credits_pre = txq->ift_cidx_processed;
6569 #endif
6570 
6571 	bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
6572 	    BUS_DMASYNC_POSTREAD);
6573 	if ((credits = ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, true)) == 0)
6574 		return (0);
6575 
6576 	txq->ift_processed += credits;
6577 	txq->ift_cidx_processed += credits;
6578 
6579 	MPASS(credits_pre + credits == txq->ift_cidx_processed);
6580 	if (txq->ift_cidx_processed >= txq->ift_size)
6581 		txq->ift_cidx_processed -= txq->ift_size;
6582 	return (credits);
6583 }
6584 
6585 static int
6586 iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget)
6587 {
6588 	iflib_fl_t fl;
6589 	u_int i;
6590 
6591 	for (i = 0, fl = &rxq->ifr_fl[0]; i < rxq->ifr_nfl; i++, fl++)
6592 		bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
6593 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
6594 	return (ctx->isc_rxd_available(ctx->ifc_softc, rxq->ifr_id, cidx,
6595 	    budget));
6596 }
6597 
6598 void
6599 iflib_add_int_delay_sysctl(if_ctx_t ctx, const char *name,
6600 	const char *description, if_int_delay_info_t info,
6601 	int offset, int value)
6602 {
6603 	info->iidi_ctx = ctx;
6604 	info->iidi_offset = offset;
6605 	info->iidi_value = value;
6606 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(ctx->ifc_dev),
6607 	    SYSCTL_CHILDREN(device_get_sysctl_tree(ctx->ifc_dev)),
6608 	    OID_AUTO, name, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
6609 	    info, 0, iflib_sysctl_int_delay, "I", description);
6610 }
6611 
6612 struct sx *
6613 iflib_ctx_lock_get(if_ctx_t ctx)
6614 {
6615 
6616 	return (&ctx->ifc_ctx_sx);
6617 }
6618 
6619 static int
6620 iflib_msix_init(if_ctx_t ctx)
6621 {
6622 	device_t dev = ctx->ifc_dev;
6623 	if_shared_ctx_t sctx = ctx->ifc_sctx;
6624 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
6625 	int admincnt, bar, err, iflib_num_rx_queues, iflib_num_tx_queues;
6626 	int msgs, queuemsgs, queues, rx_queues, tx_queues, vectors;
6627 
6628 	iflib_num_tx_queues = ctx->ifc_sysctl_ntxqs;
6629 	iflib_num_rx_queues = ctx->ifc_sysctl_nrxqs;
6630 
6631 	if (bootverbose)
6632 		device_printf(dev, "msix_init qsets capped at %d\n",
6633 		    imax(scctx->isc_ntxqsets, scctx->isc_nrxqsets));
6634 
6635 	/* Override by tuneable */
6636 	if (scctx->isc_disable_msix)
6637 		goto msi;
6638 
6639 	/* First try MSI-X */
6640 	if ((msgs = pci_msix_count(dev)) == 0) {
6641 		if (bootverbose)
6642 			device_printf(dev, "MSI-X not supported or disabled\n");
6643 		goto msi;
6644 	}
6645 
6646 	bar = ctx->ifc_softc_ctx.isc_msix_bar;
6647 	/*
6648 	 * bar == -1 => "trust me I know what I'm doing"
6649 	 * Some drivers are for hardware that is so shoddily
6650 	 * documented that no one knows which bars are which
6651 	 * so the developer has to map all bars. This hack
6652 	 * allows shoddy garbage to use MSI-X in this framework.
6653 	 */
6654 	if (bar != -1) {
6655 		ctx->ifc_msix_mem = bus_alloc_resource_any(dev,
6656 	            SYS_RES_MEMORY, &bar, RF_ACTIVE);
6657 		if (ctx->ifc_msix_mem == NULL) {
6658 			device_printf(dev, "Unable to map MSI-X table\n");
6659 			goto msi;
6660 		}
6661 	}
6662 
6663 	admincnt = sctx->isc_admin_intrcnt;
6664 #if IFLIB_DEBUG
6665 	/* use only 1 qset in debug mode */
6666 	queuemsgs = min(msgs - admincnt, 1);
6667 #else
6668 	queuemsgs = msgs - admincnt;
6669 #endif
6670 #ifdef RSS
6671 	queues = imin(queuemsgs, rss_getnumbuckets());
6672 #else
6673 	queues = queuemsgs;
6674 #endif
6675 	queues = imin(CPU_COUNT(&ctx->ifc_cpus), queues);
6676 	if (bootverbose)
6677 		device_printf(dev,
6678 		    "intr CPUs: %d queue msgs: %d admincnt: %d\n",
6679 		    CPU_COUNT(&ctx->ifc_cpus), queuemsgs, admincnt);
6680 #ifdef  RSS
6681 	/* If we're doing RSS, clamp at the number of RSS buckets */
6682 	if (queues > rss_getnumbuckets())
6683 		queues = rss_getnumbuckets();
6684 #endif
6685 	if (iflib_num_rx_queues > 0 && iflib_num_rx_queues < queuemsgs - admincnt)
6686 		rx_queues = iflib_num_rx_queues;
6687 	else
6688 		rx_queues = queues;
6689 
6690 	if (rx_queues > scctx->isc_nrxqsets)
6691 		rx_queues = scctx->isc_nrxqsets;
6692 
6693 	/*
6694 	 * We want this to be all logical CPUs by default
6695 	 */
6696 	if (iflib_num_tx_queues > 0 && iflib_num_tx_queues < queues)
6697 		tx_queues = iflib_num_tx_queues;
6698 	else
6699 		tx_queues = mp_ncpus;
6700 
6701 	if (tx_queues > scctx->isc_ntxqsets)
6702 		tx_queues = scctx->isc_ntxqsets;
6703 
6704 	if (ctx->ifc_sysctl_qs_eq_override == 0) {
6705 #ifdef INVARIANTS
6706 		if (tx_queues != rx_queues)
6707 			device_printf(dev,
6708 			    "queue equality override not set, capping rx_queues at %d and tx_queues at %d\n",
6709 			    min(rx_queues, tx_queues), min(rx_queues, tx_queues));
6710 #endif
6711 		tx_queues = min(rx_queues, tx_queues);
6712 		rx_queues = min(rx_queues, tx_queues);
6713 	}
6714 
6715 	vectors = rx_queues + admincnt;
6716 	if (msgs < vectors) {
6717 		device_printf(dev,
6718 		    "insufficient number of MSI-X vectors "
6719 		    "(supported %d, need %d)\n", msgs, vectors);
6720 		goto msi;
6721 	}
6722 
6723 	device_printf(dev, "Using %d RX queues %d TX queues\n", rx_queues,
6724 	    tx_queues);
6725 	msgs = vectors;
6726 	if ((err = pci_alloc_msix(dev, &vectors)) == 0) {
6727 		if (vectors != msgs) {
6728 			device_printf(dev,
6729 			    "Unable to allocate sufficient MSI-X vectors "
6730 			    "(got %d, need %d)\n", vectors, msgs);
6731 			pci_release_msi(dev);
6732 			if (bar != -1) {
6733 				bus_release_resource(dev, SYS_RES_MEMORY, bar,
6734 				    ctx->ifc_msix_mem);
6735 				ctx->ifc_msix_mem = NULL;
6736 			}
6737 			goto msi;
6738 		}
6739 		device_printf(dev, "Using MSI-X interrupts with %d vectors\n",
6740 		    vectors);
6741 		scctx->isc_vectors = vectors;
6742 		scctx->isc_nrxqsets = rx_queues;
6743 		scctx->isc_ntxqsets = tx_queues;
6744 		scctx->isc_intr = IFLIB_INTR_MSIX;
6745 
6746 		return (vectors);
6747 	} else {
6748 		device_printf(dev,
6749 		    "failed to allocate %d MSI-X vectors, err: %d\n", vectors,
6750 		    err);
6751 		if (bar != -1) {
6752 			bus_release_resource(dev, SYS_RES_MEMORY, bar,
6753 			    ctx->ifc_msix_mem);
6754 			ctx->ifc_msix_mem = NULL;
6755 		}
6756 	}
6757 
6758 msi:
6759 	vectors = pci_msi_count(dev);
6760 	scctx->isc_nrxqsets = 1;
6761 	scctx->isc_ntxqsets = 1;
6762 	scctx->isc_vectors = vectors;
6763 	if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0) {
6764 		device_printf(dev,"Using an MSI interrupt\n");
6765 		scctx->isc_intr = IFLIB_INTR_MSI;
6766 	} else {
6767 		scctx->isc_vectors = 1;
6768 		device_printf(dev,"Using a Legacy interrupt\n");
6769 		scctx->isc_intr = IFLIB_INTR_LEGACY;
6770 	}
6771 
6772 	return (vectors);
6773 }
6774 
6775 static const char *ring_states[] = { "IDLE", "BUSY", "STALLED", "ABDICATED" };
6776 
6777 static int
6778 mp_ring_state_handler(SYSCTL_HANDLER_ARGS)
6779 {
6780 	int rc;
6781 	uint16_t *state = ((uint16_t *)oidp->oid_arg1);
6782 	struct sbuf *sb;
6783 	const char *ring_state = "UNKNOWN";
6784 
6785 	/* XXX needed ? */
6786 	rc = sysctl_wire_old_buffer(req, 0);
6787 	MPASS(rc == 0);
6788 	if (rc != 0)
6789 		return (rc);
6790 	sb = sbuf_new_for_sysctl(NULL, NULL, 80, req);
6791 	MPASS(sb != NULL);
6792 	if (sb == NULL)
6793 		return (ENOMEM);
6794 	if (state[3] <= 3)
6795 		ring_state = ring_states[state[3]];
6796 
6797 	sbuf_printf(sb, "pidx_head: %04hd pidx_tail: %04hd cidx: %04hd state: %s",
6798 		    state[0], state[1], state[2], ring_state);
6799 	rc = sbuf_finish(sb);
6800 	sbuf_delete(sb);
6801         return(rc);
6802 }
6803 
6804 enum iflib_ndesc_handler {
6805 	IFLIB_NTXD_HANDLER,
6806 	IFLIB_NRXD_HANDLER,
6807 };
6808 
6809 static int
6810 mp_ndesc_handler(SYSCTL_HANDLER_ARGS)
6811 {
6812 	if_ctx_t ctx = (void *)arg1;
6813 	enum iflib_ndesc_handler type = arg2;
6814 	char buf[256] = {0};
6815 	qidx_t *ndesc;
6816 	char *p, *next;
6817 	int nqs, rc, i;
6818 
6819 	nqs = 8;
6820 	switch(type) {
6821 	case IFLIB_NTXD_HANDLER:
6822 		ndesc = ctx->ifc_sysctl_ntxds;
6823 		if (ctx->ifc_sctx)
6824 			nqs = ctx->ifc_sctx->isc_ntxqs;
6825 		break;
6826 	case IFLIB_NRXD_HANDLER:
6827 		ndesc = ctx->ifc_sysctl_nrxds;
6828 		if (ctx->ifc_sctx)
6829 			nqs = ctx->ifc_sctx->isc_nrxqs;
6830 		break;
6831 	default:
6832 		printf("%s: unhandled type\n", __func__);
6833 		return (EINVAL);
6834 	}
6835 	if (nqs == 0)
6836 		nqs = 8;
6837 
6838 	for (i=0; i<8; i++) {
6839 		if (i >= nqs)
6840 			break;
6841 		if (i)
6842 			strcat(buf, ",");
6843 		sprintf(strchr(buf, 0), "%d", ndesc[i]);
6844 	}
6845 
6846 	rc = sysctl_handle_string(oidp, buf, sizeof(buf), req);
6847 	if (rc || req->newptr == NULL)
6848 		return rc;
6849 
6850 	for (i = 0, next = buf, p = strsep(&next, " ,"); i < 8 && p;
6851 	    i++, p = strsep(&next, " ,")) {
6852 		ndesc[i] = strtoul(p, NULL, 10);
6853 	}
6854 
6855 	return(rc);
6856 }
6857 
6858 #define NAME_BUFLEN 32
6859 static void
6860 iflib_add_device_sysctl_pre(if_ctx_t ctx)
6861 {
6862         device_t dev = iflib_get_dev(ctx);
6863 	struct sysctl_oid_list *child, *oid_list;
6864 	struct sysctl_ctx_list *ctx_list;
6865 	struct sysctl_oid *node;
6866 
6867 	ctx_list = device_get_sysctl_ctx(dev);
6868 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
6869 	ctx->ifc_sysctl_node = node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "iflib",
6870 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "IFLIB fields");
6871 	oid_list = SYSCTL_CHILDREN(node);
6872 
6873 	SYSCTL_ADD_CONST_STRING(ctx_list, oid_list, OID_AUTO, "driver_version",
6874 		       CTLFLAG_RD, ctx->ifc_sctx->isc_driver_version,
6875 		       "driver version");
6876 
6877 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_ntxqs",
6878 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxqs, 0,
6879 			"# of txqs to use, 0 => use default #");
6880 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_nrxqs",
6881 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_nrxqs, 0,
6882 			"# of rxqs to use, 0 => use default #");
6883 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_qs_enable",
6884 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_qs_eq_override, 0,
6885                        "permit #txq != #rxq");
6886 	SYSCTL_ADD_INT(ctx_list, oid_list, OID_AUTO, "disable_msix",
6887                       CTLFLAG_RWTUN, &ctx->ifc_softc_ctx.isc_disable_msix, 0,
6888                       "disable MSI-X (default 0)");
6889 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "rx_budget",
6890 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_rx_budget, 0,
6891 		       "set the RX budget");
6892 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "tx_abdicate",
6893 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_tx_abdicate, 0,
6894 		       "cause TX to abdicate instead of running to completion");
6895 	ctx->ifc_sysctl_core_offset = CORE_OFFSET_UNSPECIFIED;
6896 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "core_offset",
6897 		       CTLFLAG_RDTUN, &ctx->ifc_sysctl_core_offset, 0,
6898 		       "offset to start using cores at");
6899 	SYSCTL_ADD_U8(ctx_list, oid_list, OID_AUTO, "separate_txrx",
6900 		       CTLFLAG_RDTUN, &ctx->ifc_sysctl_separate_txrx, 0,
6901 		       "use separate cores for TX and RX");
6902 	SYSCTL_ADD_U8(ctx_list, oid_list, OID_AUTO, "use_logical_cores",
6903 		      CTLFLAG_RDTUN, &ctx->ifc_sysctl_use_logical_cores, 0,
6904 		      "try to make use of logical cores for TX and RX");
6905 
6906 	/* XXX change for per-queue sizes */
6907 	SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_ntxds",
6908 	    CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, ctx,
6909 	    IFLIB_NTXD_HANDLER, mp_ndesc_handler, "A",
6910 	    "list of # of TX descriptors to use, 0 = use default #");
6911 	SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_nrxds",
6912 	    CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, ctx,
6913 	    IFLIB_NRXD_HANDLER, mp_ndesc_handler, "A",
6914 	    "list of # of RX descriptors to use, 0 = use default #");
6915 }
6916 
6917 static void
6918 iflib_add_device_sysctl_post(if_ctx_t ctx)
6919 {
6920 	if_shared_ctx_t sctx = ctx->ifc_sctx;
6921 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
6922         device_t dev = iflib_get_dev(ctx);
6923 	struct sysctl_oid_list *child;
6924 	struct sysctl_ctx_list *ctx_list;
6925 	iflib_fl_t fl;
6926 	iflib_txq_t txq;
6927 	iflib_rxq_t rxq;
6928 	int i, j;
6929 	char namebuf[NAME_BUFLEN];
6930 	char *qfmt;
6931 	struct sysctl_oid *queue_node, *fl_node, *node;
6932 	struct sysctl_oid_list *queue_list, *fl_list;
6933 	ctx_list = device_get_sysctl_ctx(dev);
6934 
6935 	node = ctx->ifc_sysctl_node;
6936 	child = SYSCTL_CHILDREN(node);
6937 
6938 	if (scctx->isc_ntxqsets > 100)
6939 		qfmt = "txq%03d";
6940 	else if (scctx->isc_ntxqsets > 10)
6941 		qfmt = "txq%02d";
6942 	else
6943 		qfmt = "txq%d";
6944 	for (i = 0, txq = ctx->ifc_txqs; i < scctx->isc_ntxqsets; i++, txq++) {
6945 		snprintf(namebuf, NAME_BUFLEN, qfmt, i);
6946 		queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf,
6947 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
6948 		queue_list = SYSCTL_CHILDREN(queue_node);
6949 		SYSCTL_ADD_INT(ctx_list, queue_list, OID_AUTO, "cpu",
6950 			       CTLFLAG_RD,
6951 			       &txq->ift_task.gt_cpu, 0, "cpu this queue is bound to");
6952 #if MEMORY_LOGGING
6953 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_dequeued",
6954 				CTLFLAG_RD,
6955 				&txq->ift_dequeued, "total mbufs freed");
6956 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_enqueued",
6957 				CTLFLAG_RD,
6958 				&txq->ift_enqueued, "total mbufs enqueued");
6959 #endif
6960 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag",
6961 				   CTLFLAG_RD,
6962 				   &txq->ift_mbuf_defrag, "# of times m_defrag was called");
6963 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "m_pullups",
6964 				   CTLFLAG_RD,
6965 				   &txq->ift_pullups, "# of times m_pullup was called");
6966 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag_failed",
6967 				   CTLFLAG_RD,
6968 				   &txq->ift_mbuf_defrag_failed, "# of times m_defrag failed");
6969 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_desc_avail",
6970 				   CTLFLAG_RD,
6971 				   &txq->ift_no_desc_avail, "# of times no descriptors were available");
6972 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "tx_map_failed",
6973 				   CTLFLAG_RD,
6974 				   &txq->ift_map_failed, "# of times DMA map failed");
6975 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txd_encap_efbig",
6976 				   CTLFLAG_RD,
6977 				   &txq->ift_txd_encap_efbig, "# of times txd_encap returned EFBIG");
6978 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_tx_dma_setup",
6979 				   CTLFLAG_RD,
6980 				   &txq->ift_no_tx_dma_setup, "# of times map failed for other than EFBIG");
6981 		SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_pidx",
6982 				   CTLFLAG_RD,
6983 				   &txq->ift_pidx, 1, "Producer Index");
6984 		SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx",
6985 				   CTLFLAG_RD,
6986 				   &txq->ift_cidx, 1, "Consumer Index");
6987 		SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx_processed",
6988 				   CTLFLAG_RD,
6989 				   &txq->ift_cidx_processed, 1, "Consumer Index seen by credit update");
6990 		SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_in_use",
6991 				   CTLFLAG_RD,
6992 				   &txq->ift_in_use, 1, "descriptors in use");
6993 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_processed",
6994 				   CTLFLAG_RD,
6995 				   &txq->ift_processed, "descriptors procesed for clean");
6996 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_cleaned",
6997 				   CTLFLAG_RD,
6998 				   &txq->ift_cleaned, "total cleaned");
6999 		SYSCTL_ADD_PROC(ctx_list, queue_list, OID_AUTO, "ring_state",
7000 		    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
7001 		    __DEVOLATILE(uint64_t *, &txq->ift_br->state), 0,
7002 		    mp_ring_state_handler, "A", "soft ring state");
7003 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_enqueues",
7004 				       CTLFLAG_RD, &txq->ift_br->enqueues,
7005 				       "# of enqueues to the mp_ring for this queue");
7006 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_drops",
7007 				       CTLFLAG_RD, &txq->ift_br->drops,
7008 				       "# of drops in the mp_ring for this queue");
7009 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_starts",
7010 				       CTLFLAG_RD, &txq->ift_br->starts,
7011 				       "# of normal consumer starts in the mp_ring for this queue");
7012 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_stalls",
7013 				       CTLFLAG_RD, &txq->ift_br->stalls,
7014 					       "# of consumer stalls in the mp_ring for this queue");
7015 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_restarts",
7016 			       CTLFLAG_RD, &txq->ift_br->restarts,
7017 				       "# of consumer restarts in the mp_ring for this queue");
7018 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_abdications",
7019 				       CTLFLAG_RD, &txq->ift_br->abdications,
7020 				       "# of consumer abdications in the mp_ring for this queue");
7021 	}
7022 
7023 	if (scctx->isc_nrxqsets > 100)
7024 		qfmt = "rxq%03d";
7025 	else if (scctx->isc_nrxqsets > 10)
7026 		qfmt = "rxq%02d";
7027 	else
7028 		qfmt = "rxq%d";
7029 	for (i = 0, rxq = ctx->ifc_rxqs; i < scctx->isc_nrxqsets; i++, rxq++) {
7030 		snprintf(namebuf, NAME_BUFLEN, qfmt, i);
7031 		queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf,
7032 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
7033 		queue_list = SYSCTL_CHILDREN(queue_node);
7034 		SYSCTL_ADD_INT(ctx_list, queue_list, OID_AUTO, "cpu",
7035 			       CTLFLAG_RD,
7036 			       &rxq->ifr_task.gt_cpu, 0, "cpu this queue is bound to");
7037 		if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
7038 			SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_cidx",
7039 				       CTLFLAG_RD,
7040 				       &rxq->ifr_cq_cidx, 1, "Consumer Index");
7041 		}
7042 
7043 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) {
7044 			snprintf(namebuf, NAME_BUFLEN, "rxq_fl%d", j);
7045 			fl_node = SYSCTL_ADD_NODE(ctx_list, queue_list, OID_AUTO, namebuf,
7046 			    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "freelist Name");
7047 			fl_list = SYSCTL_CHILDREN(fl_node);
7048 			SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "pidx",
7049 				       CTLFLAG_RD,
7050 				       &fl->ifl_pidx, 1, "Producer Index");
7051 			SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "cidx",
7052 				       CTLFLAG_RD,
7053 				       &fl->ifl_cidx, 1, "Consumer Index");
7054 			SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "credits",
7055 				       CTLFLAG_RD,
7056 				       &fl->ifl_credits, 1, "credits available");
7057 			SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "buf_size",
7058 				       CTLFLAG_RD,
7059 				       &fl->ifl_buf_size, 1, "buffer size");
7060 #if MEMORY_LOGGING
7061 			SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_enqueued",
7062 					CTLFLAG_RD,
7063 					&fl->ifl_m_enqueued, "mbufs allocated");
7064 			SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_dequeued",
7065 					CTLFLAG_RD,
7066 					&fl->ifl_m_dequeued, "mbufs freed");
7067 			SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_enqueued",
7068 					CTLFLAG_RD,
7069 					&fl->ifl_cl_enqueued, "clusters allocated");
7070 			SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_dequeued",
7071 					CTLFLAG_RD,
7072 					&fl->ifl_cl_dequeued, "clusters freed");
7073 #endif
7074 		}
7075 	}
7076 
7077 }
7078 
7079 void
7080 iflib_request_reset(if_ctx_t ctx)
7081 {
7082 
7083 	STATE_LOCK(ctx);
7084 	ctx->ifc_flags |= IFC_DO_RESET;
7085 	STATE_UNLOCK(ctx);
7086 }
7087 
7088 #ifndef __NO_STRICT_ALIGNMENT
7089 static struct mbuf *
7090 iflib_fixup_rx(struct mbuf *m)
7091 {
7092 	struct mbuf *n;
7093 
7094 	if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
7095 		bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
7096 		m->m_data += ETHER_HDR_LEN;
7097 		n = m;
7098 	} else {
7099 		MGETHDR(n, M_NOWAIT, MT_DATA);
7100 		if (n == NULL) {
7101 			m_freem(m);
7102 			return (NULL);
7103 		}
7104 		bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
7105 		m->m_data += ETHER_HDR_LEN;
7106 		m->m_len -= ETHER_HDR_LEN;
7107 		n->m_len = ETHER_HDR_LEN;
7108 		M_MOVE_PKTHDR(n, m);
7109 		n->m_next = m;
7110 	}
7111 	return (n);
7112 }
7113 #endif
7114 
7115 #ifdef DEBUGNET
7116 static void
7117 iflib_debugnet_init(if_t ifp, int *nrxr, int *ncl, int *clsize)
7118 {
7119 	if_ctx_t ctx;
7120 
7121 	ctx = if_getsoftc(ifp);
7122 	CTX_LOCK(ctx);
7123 	*nrxr = NRXQSETS(ctx);
7124 	*ncl = ctx->ifc_rxqs[0].ifr_fl->ifl_size;
7125 	*clsize = ctx->ifc_rxqs[0].ifr_fl->ifl_buf_size;
7126 	CTX_UNLOCK(ctx);
7127 }
7128 
7129 static void
7130 iflib_debugnet_event(if_t ifp, enum debugnet_ev event)
7131 {
7132 	if_ctx_t ctx;
7133 	if_softc_ctx_t scctx;
7134 	iflib_fl_t fl;
7135 	iflib_rxq_t rxq;
7136 	int i, j;
7137 
7138 	ctx = if_getsoftc(ifp);
7139 	scctx = &ctx->ifc_softc_ctx;
7140 
7141 	switch (event) {
7142 	case DEBUGNET_START:
7143 		for (i = 0; i < scctx->isc_nrxqsets; i++) {
7144 			rxq = &ctx->ifc_rxqs[i];
7145 			for (j = 0; j < rxq->ifr_nfl; j++) {
7146 				fl = rxq->ifr_fl;
7147 				fl->ifl_zone = m_getzone(fl->ifl_buf_size);
7148 			}
7149 		}
7150 		iflib_no_tx_batch = 1;
7151 		break;
7152 	default:
7153 		break;
7154 	}
7155 }
7156 
7157 static int
7158 iflib_debugnet_transmit(if_t ifp, struct mbuf *m)
7159 {
7160 	if_ctx_t ctx;
7161 	iflib_txq_t txq;
7162 	int error;
7163 
7164 	ctx = if_getsoftc(ifp);
7165 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
7166 	    IFF_DRV_RUNNING)
7167 		return (EBUSY);
7168 
7169 	txq = &ctx->ifc_txqs[0];
7170 	error = iflib_encap(txq, &m);
7171 	if (error == 0)
7172 		(void)iflib_txd_db_check(txq, true);
7173 	return (error);
7174 }
7175 
7176 static int
7177 iflib_debugnet_poll(if_t ifp, int count)
7178 {
7179 	struct epoch_tracker et;
7180 	if_ctx_t ctx;
7181 	if_softc_ctx_t scctx;
7182 	iflib_txq_t txq;
7183 	int i;
7184 
7185 	ctx = if_getsoftc(ifp);
7186 	scctx = &ctx->ifc_softc_ctx;
7187 
7188 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
7189 	    IFF_DRV_RUNNING)
7190 		return (EBUSY);
7191 
7192 	txq = &ctx->ifc_txqs[0];
7193 	(void)iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
7194 
7195 	NET_EPOCH_ENTER(et);
7196 	for (i = 0; i < scctx->isc_nrxqsets; i++)
7197 		(void)iflib_rxeof(&ctx->ifc_rxqs[i], 16 /* XXX */);
7198 	NET_EPOCH_EXIT(et);
7199 	return (0);
7200 }
7201 #endif /* DEBUGNET */
7202