1 /*- 2 * Copyright (c) 2014-2018, Matthew Macy <mmacy@mattmacy.io> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Neither the name of Matthew Macy nor the names of its 12 * contributors may be used to endorse or promote products derived from 13 * this software without specific prior written permission. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 #include "opt_inet6.h" 33 #include "opt_acpi.h" 34 #include "opt_sched.h" 35 36 #include <sys/param.h> 37 #include <sys/types.h> 38 #include <sys/bus.h> 39 #include <sys/eventhandler.h> 40 #include <sys/jail.h> 41 #include <sys/kernel.h> 42 #include <sys/lock.h> 43 #include <sys/md5.h> 44 #include <sys/mutex.h> 45 #include <sys/module.h> 46 #include <sys/kobj.h> 47 #include <sys/rman.h> 48 #include <sys/proc.h> 49 #include <sys/sbuf.h> 50 #include <sys/smp.h> 51 #include <sys/socket.h> 52 #include <sys/sockio.h> 53 #include <sys/sysctl.h> 54 #include <sys/syslog.h> 55 #include <sys/taskqueue.h> 56 #include <sys/limits.h> 57 58 #include <net/if.h> 59 #include <net/if_var.h> 60 #include <net/if_types.h> 61 #include <net/if_media.h> 62 #include <net/bpf.h> 63 #include <net/ethernet.h> 64 #include <net/mp_ring.h> 65 #include <net/vnet.h> 66 67 #include <netinet/in.h> 68 #include <netinet/in_pcb.h> 69 #include <netinet/tcp_lro.h> 70 #include <netinet/in_systm.h> 71 #include <netinet/if_ether.h> 72 #include <netinet/ip.h> 73 #include <netinet/ip6.h> 74 #include <netinet/tcp.h> 75 #include <netinet/ip_var.h> 76 #include <netinet/netdump/netdump.h> 77 #include <netinet6/ip6_var.h> 78 79 #include <machine/bus.h> 80 #include <machine/in_cksum.h> 81 82 #include <vm/vm.h> 83 #include <vm/pmap.h> 84 85 #include <dev/led/led.h> 86 #include <dev/pci/pcireg.h> 87 #include <dev/pci/pcivar.h> 88 #include <dev/pci/pci_private.h> 89 90 #include <net/iflib.h> 91 #include <net/iflib_private.h> 92 93 #include "ifdi_if.h" 94 95 #if defined(__i386__) || defined(__amd64__) 96 #include <sys/memdesc.h> 97 #include <machine/bus.h> 98 #include <machine/md_var.h> 99 #include <machine/specialreg.h> 100 #include <x86/include/busdma_impl.h> 101 #include <x86/iommu/busdma_dmar.h> 102 #endif 103 104 #include <sys/bitstring.h> 105 /* 106 * enable accounting of every mbuf as it comes in to and goes out of 107 * iflib's software descriptor references 108 */ 109 #define MEMORY_LOGGING 0 110 /* 111 * Enable mbuf vectors for compressing long mbuf chains 112 */ 113 114 /* 115 * NB: 116 * - Prefetching in tx cleaning should perhaps be a tunable. The distance ahead 117 * we prefetch needs to be determined by the time spent in m_free vis a vis 118 * the cost of a prefetch. This will of course vary based on the workload: 119 * - NFLX's m_free path is dominated by vm-based M_EXT manipulation which 120 * is quite expensive, thus suggesting very little prefetch. 121 * - small packet forwarding which is just returning a single mbuf to 122 * UMA will typically be very fast vis a vis the cost of a memory 123 * access. 124 */ 125 126 127 /* 128 * File organization: 129 * - private structures 130 * - iflib private utility functions 131 * - ifnet functions 132 * - vlan registry and other exported functions 133 * - iflib public core functions 134 * 135 * 136 */ 137 MALLOC_DEFINE(M_IFLIB, "iflib", "ifnet library"); 138 139 struct iflib_txq; 140 typedef struct iflib_txq *iflib_txq_t; 141 struct iflib_rxq; 142 typedef struct iflib_rxq *iflib_rxq_t; 143 struct iflib_fl; 144 typedef struct iflib_fl *iflib_fl_t; 145 146 struct iflib_ctx; 147 148 static void iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid); 149 static void iflib_timer(void *arg); 150 151 typedef struct iflib_filter_info { 152 driver_filter_t *ifi_filter; 153 void *ifi_filter_arg; 154 struct grouptask *ifi_task; 155 void *ifi_ctx; 156 } *iflib_filter_info_t; 157 158 struct iflib_ctx { 159 KOBJ_FIELDS; 160 /* 161 * Pointer to hardware driver's softc 162 */ 163 void *ifc_softc; 164 device_t ifc_dev; 165 if_t ifc_ifp; 166 167 cpuset_t ifc_cpus; 168 if_shared_ctx_t ifc_sctx; 169 struct if_softc_ctx ifc_softc_ctx; 170 171 struct sx ifc_ctx_sx; 172 struct mtx ifc_state_mtx; 173 174 uint16_t ifc_nhwtxqs; 175 176 iflib_txq_t ifc_txqs; 177 iflib_rxq_t ifc_rxqs; 178 uint32_t ifc_if_flags; 179 uint32_t ifc_flags; 180 uint32_t ifc_max_fl_buf_size; 181 int ifc_in_detach; 182 183 int ifc_link_state; 184 int ifc_link_irq; 185 int ifc_watchdog_events; 186 struct cdev *ifc_led_dev; 187 struct resource *ifc_msix_mem; 188 189 struct if_irq ifc_legacy_irq; 190 struct grouptask ifc_admin_task; 191 struct grouptask ifc_vflr_task; 192 struct iflib_filter_info ifc_filter_info; 193 struct ifmedia ifc_media; 194 195 struct sysctl_oid *ifc_sysctl_node; 196 uint16_t ifc_sysctl_ntxqs; 197 uint16_t ifc_sysctl_nrxqs; 198 uint16_t ifc_sysctl_qs_eq_override; 199 uint16_t ifc_sysctl_rx_budget; 200 uint16_t ifc_sysctl_tx_abdicate; 201 202 qidx_t ifc_sysctl_ntxds[8]; 203 qidx_t ifc_sysctl_nrxds[8]; 204 struct if_txrx ifc_txrx; 205 #define isc_txd_encap ifc_txrx.ift_txd_encap 206 #define isc_txd_flush ifc_txrx.ift_txd_flush 207 #define isc_txd_credits_update ifc_txrx.ift_txd_credits_update 208 #define isc_rxd_available ifc_txrx.ift_rxd_available 209 #define isc_rxd_pkt_get ifc_txrx.ift_rxd_pkt_get 210 #define isc_rxd_refill ifc_txrx.ift_rxd_refill 211 #define isc_rxd_flush ifc_txrx.ift_rxd_flush 212 #define isc_rxd_refill ifc_txrx.ift_rxd_refill 213 #define isc_rxd_refill ifc_txrx.ift_rxd_refill 214 #define isc_legacy_intr ifc_txrx.ift_legacy_intr 215 eventhandler_tag ifc_vlan_attach_event; 216 eventhandler_tag ifc_vlan_detach_event; 217 uint8_t ifc_mac[ETHER_ADDR_LEN]; 218 char ifc_mtx_name[16]; 219 }; 220 221 222 void * 223 iflib_get_softc(if_ctx_t ctx) 224 { 225 226 return (ctx->ifc_softc); 227 } 228 229 device_t 230 iflib_get_dev(if_ctx_t ctx) 231 { 232 233 return (ctx->ifc_dev); 234 } 235 236 if_t 237 iflib_get_ifp(if_ctx_t ctx) 238 { 239 240 return (ctx->ifc_ifp); 241 } 242 243 struct ifmedia * 244 iflib_get_media(if_ctx_t ctx) 245 { 246 247 return (&ctx->ifc_media); 248 } 249 250 uint32_t 251 iflib_get_flags(if_ctx_t ctx) 252 { 253 return (ctx->ifc_flags); 254 } 255 256 void 257 iflib_set_detach(if_ctx_t ctx) 258 { 259 ctx->ifc_in_detach = 1; 260 } 261 262 void 263 iflib_set_mac(if_ctx_t ctx, uint8_t mac[ETHER_ADDR_LEN]) 264 { 265 266 bcopy(mac, ctx->ifc_mac, ETHER_ADDR_LEN); 267 } 268 269 if_softc_ctx_t 270 iflib_get_softc_ctx(if_ctx_t ctx) 271 { 272 273 return (&ctx->ifc_softc_ctx); 274 } 275 276 if_shared_ctx_t 277 iflib_get_sctx(if_ctx_t ctx) 278 { 279 280 return (ctx->ifc_sctx); 281 } 282 283 #define IP_ALIGNED(m) ((((uintptr_t)(m)->m_data) & 0x3) == 0x2) 284 #define CACHE_PTR_INCREMENT (CACHE_LINE_SIZE/sizeof(void*)) 285 #define CACHE_PTR_NEXT(ptr) ((void *)(((uintptr_t)(ptr)+CACHE_LINE_SIZE-1) & (CACHE_LINE_SIZE-1))) 286 287 #define LINK_ACTIVE(ctx) ((ctx)->ifc_link_state == LINK_STATE_UP) 288 #define CTX_IS_VF(ctx) ((ctx)->ifc_sctx->isc_flags & IFLIB_IS_VF) 289 290 #define RX_SW_DESC_MAP_CREATED (1 << 0) 291 #define TX_SW_DESC_MAP_CREATED (1 << 1) 292 #define RX_SW_DESC_INUSE (1 << 3) 293 #define TX_SW_DESC_MAPPED (1 << 4) 294 295 #define M_TOOBIG M_PROTO1 296 297 typedef struct iflib_sw_rx_desc_array { 298 bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */ 299 struct mbuf **ifsd_m; /* pkthdr mbufs */ 300 caddr_t *ifsd_cl; /* direct cluster pointer for rx */ 301 uint8_t *ifsd_flags; 302 } iflib_rxsd_array_t; 303 304 typedef struct iflib_sw_tx_desc_array { 305 bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */ 306 struct mbuf **ifsd_m; /* pkthdr mbufs */ 307 uint8_t *ifsd_flags; 308 } if_txsd_vec_t; 309 310 311 /* magic number that should be high enough for any hardware */ 312 #define IFLIB_MAX_TX_SEGS 128 313 /* bnxt supports 64 with hardware LRO enabled */ 314 #define IFLIB_MAX_RX_SEGS 64 315 #define IFLIB_RX_COPY_THRESH 128 316 #define IFLIB_MAX_RX_REFRESH 32 317 /* The minimum descriptors per second before we start coalescing */ 318 #define IFLIB_MIN_DESC_SEC 16384 319 #define IFLIB_DEFAULT_TX_UPDATE_FREQ 16 320 #define IFLIB_QUEUE_IDLE 0 321 #define IFLIB_QUEUE_HUNG 1 322 #define IFLIB_QUEUE_WORKING 2 323 /* maximum number of txqs that can share an rx interrupt */ 324 #define IFLIB_MAX_TX_SHARED_INTR 4 325 326 /* this should really scale with ring size - this is a fairly arbitrary value */ 327 #define TX_BATCH_SIZE 32 328 329 #define IFLIB_RESTART_BUDGET 8 330 331 332 #define CSUM_OFFLOAD (CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP| \ 333 CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \ 334 CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP) 335 struct iflib_txq { 336 qidx_t ift_in_use; 337 qidx_t ift_cidx; 338 qidx_t ift_cidx_processed; 339 qidx_t ift_pidx; 340 uint8_t ift_gen; 341 uint8_t ift_br_offset; 342 uint16_t ift_npending; 343 uint16_t ift_db_pending; 344 uint16_t ift_rs_pending; 345 /* implicit pad */ 346 uint8_t ift_txd_size[8]; 347 uint64_t ift_processed; 348 uint64_t ift_cleaned; 349 uint64_t ift_cleaned_prev; 350 #if MEMORY_LOGGING 351 uint64_t ift_enqueued; 352 uint64_t ift_dequeued; 353 #endif 354 uint64_t ift_no_tx_dma_setup; 355 uint64_t ift_no_desc_avail; 356 uint64_t ift_mbuf_defrag_failed; 357 uint64_t ift_mbuf_defrag; 358 uint64_t ift_map_failed; 359 uint64_t ift_txd_encap_efbig; 360 uint64_t ift_pullups; 361 uint64_t ift_last_timer_tick; 362 363 struct mtx ift_mtx; 364 struct mtx ift_db_mtx; 365 366 /* constant values */ 367 if_ctx_t ift_ctx; 368 struct ifmp_ring *ift_br; 369 struct grouptask ift_task; 370 qidx_t ift_size; 371 uint16_t ift_id; 372 struct callout ift_timer; 373 374 if_txsd_vec_t ift_sds; 375 uint8_t ift_qstatus; 376 uint8_t ift_closed; 377 uint8_t ift_update_freq; 378 struct iflib_filter_info ift_filter_info; 379 bus_dma_tag_t ift_desc_tag; 380 bus_dma_tag_t ift_tso_desc_tag; 381 iflib_dma_info_t ift_ifdi; 382 #define MTX_NAME_LEN 16 383 char ift_mtx_name[MTX_NAME_LEN]; 384 char ift_db_mtx_name[MTX_NAME_LEN]; 385 bus_dma_segment_t ift_segs[IFLIB_MAX_TX_SEGS] __aligned(CACHE_LINE_SIZE); 386 #ifdef IFLIB_DIAGNOSTICS 387 uint64_t ift_cpu_exec_count[256]; 388 #endif 389 } __aligned(CACHE_LINE_SIZE); 390 391 struct iflib_fl { 392 qidx_t ifl_cidx; 393 qidx_t ifl_pidx; 394 qidx_t ifl_credits; 395 uint8_t ifl_gen; 396 uint8_t ifl_rxd_size; 397 #if MEMORY_LOGGING 398 uint64_t ifl_m_enqueued; 399 uint64_t ifl_m_dequeued; 400 uint64_t ifl_cl_enqueued; 401 uint64_t ifl_cl_dequeued; 402 #endif 403 /* implicit pad */ 404 405 bitstr_t *ifl_rx_bitmap; 406 qidx_t ifl_fragidx; 407 /* constant */ 408 qidx_t ifl_size; 409 uint16_t ifl_buf_size; 410 uint16_t ifl_cltype; 411 uma_zone_t ifl_zone; 412 iflib_rxsd_array_t ifl_sds; 413 iflib_rxq_t ifl_rxq; 414 uint8_t ifl_id; 415 bus_dma_tag_t ifl_desc_tag; 416 iflib_dma_info_t ifl_ifdi; 417 uint64_t ifl_bus_addrs[IFLIB_MAX_RX_REFRESH] __aligned(CACHE_LINE_SIZE); 418 caddr_t ifl_vm_addrs[IFLIB_MAX_RX_REFRESH]; 419 qidx_t ifl_rxd_idxs[IFLIB_MAX_RX_REFRESH]; 420 } __aligned(CACHE_LINE_SIZE); 421 422 static inline qidx_t 423 get_inuse(int size, qidx_t cidx, qidx_t pidx, uint8_t gen) 424 { 425 qidx_t used; 426 427 if (pidx > cidx) 428 used = pidx - cidx; 429 else if (pidx < cidx) 430 used = size - cidx + pidx; 431 else if (gen == 0 && pidx == cidx) 432 used = 0; 433 else if (gen == 1 && pidx == cidx) 434 used = size; 435 else 436 panic("bad state"); 437 438 return (used); 439 } 440 441 #define TXQ_AVAIL(txq) (txq->ift_size - get_inuse(txq->ift_size, txq->ift_cidx, txq->ift_pidx, txq->ift_gen)) 442 443 #define IDXDIFF(head, tail, wrap) \ 444 ((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head)) 445 446 struct iflib_rxq { 447 /* If there is a separate completion queue - 448 * these are the cq cidx and pidx. Otherwise 449 * these are unused. 450 */ 451 qidx_t ifr_size; 452 qidx_t ifr_cq_cidx; 453 qidx_t ifr_cq_pidx; 454 uint8_t ifr_cq_gen; 455 uint8_t ifr_fl_offset; 456 457 if_ctx_t ifr_ctx; 458 iflib_fl_t ifr_fl; 459 uint64_t ifr_rx_irq; 460 uint16_t ifr_id; 461 uint8_t ifr_lro_enabled; 462 uint8_t ifr_nfl; 463 uint8_t ifr_ntxqirq; 464 uint8_t ifr_txqid[IFLIB_MAX_TX_SHARED_INTR]; 465 struct lro_ctrl ifr_lc; 466 struct grouptask ifr_task; 467 struct iflib_filter_info ifr_filter_info; 468 iflib_dma_info_t ifr_ifdi; 469 470 /* dynamically allocate if any drivers need a value substantially larger than this */ 471 struct if_rxd_frag ifr_frags[IFLIB_MAX_RX_SEGS] __aligned(CACHE_LINE_SIZE); 472 #ifdef IFLIB_DIAGNOSTICS 473 uint64_t ifr_cpu_exec_count[256]; 474 #endif 475 } __aligned(CACHE_LINE_SIZE); 476 477 typedef struct if_rxsd { 478 caddr_t *ifsd_cl; 479 struct mbuf **ifsd_m; 480 iflib_fl_t ifsd_fl; 481 qidx_t ifsd_cidx; 482 } *if_rxsd_t; 483 484 /* multiple of word size */ 485 #ifdef __LP64__ 486 #define PKT_INFO_SIZE 6 487 #define RXD_INFO_SIZE 5 488 #define PKT_TYPE uint64_t 489 #else 490 #define PKT_INFO_SIZE 11 491 #define RXD_INFO_SIZE 8 492 #define PKT_TYPE uint32_t 493 #endif 494 #define PKT_LOOP_BOUND ((PKT_INFO_SIZE/3)*3) 495 #define RXD_LOOP_BOUND ((RXD_INFO_SIZE/4)*4) 496 497 typedef struct if_pkt_info_pad { 498 PKT_TYPE pkt_val[PKT_INFO_SIZE]; 499 } *if_pkt_info_pad_t; 500 typedef struct if_rxd_info_pad { 501 PKT_TYPE rxd_val[RXD_INFO_SIZE]; 502 } *if_rxd_info_pad_t; 503 504 CTASSERT(sizeof(struct if_pkt_info_pad) == sizeof(struct if_pkt_info)); 505 CTASSERT(sizeof(struct if_rxd_info_pad) == sizeof(struct if_rxd_info)); 506 507 508 static inline void 509 pkt_info_zero(if_pkt_info_t pi) 510 { 511 if_pkt_info_pad_t pi_pad; 512 513 pi_pad = (if_pkt_info_pad_t)pi; 514 pi_pad->pkt_val[0] = 0; pi_pad->pkt_val[1] = 0; pi_pad->pkt_val[2] = 0; 515 pi_pad->pkt_val[3] = 0; pi_pad->pkt_val[4] = 0; pi_pad->pkt_val[5] = 0; 516 #ifndef __LP64__ 517 pi_pad->pkt_val[6] = 0; pi_pad->pkt_val[7] = 0; pi_pad->pkt_val[8] = 0; 518 pi_pad->pkt_val[9] = 0; pi_pad->pkt_val[10] = 0; 519 #endif 520 } 521 522 static device_method_t iflib_pseudo_methods[] = { 523 DEVMETHOD(device_attach, noop_attach), 524 DEVMETHOD(device_detach, iflib_pseudo_detach), 525 DEVMETHOD_END 526 }; 527 528 driver_t iflib_pseudodriver = { 529 "iflib_pseudo", iflib_pseudo_methods, sizeof(struct iflib_ctx), 530 }; 531 532 static inline void 533 rxd_info_zero(if_rxd_info_t ri) 534 { 535 if_rxd_info_pad_t ri_pad; 536 int i; 537 538 ri_pad = (if_rxd_info_pad_t)ri; 539 for (i = 0; i < RXD_LOOP_BOUND; i += 4) { 540 ri_pad->rxd_val[i] = 0; 541 ri_pad->rxd_val[i+1] = 0; 542 ri_pad->rxd_val[i+2] = 0; 543 ri_pad->rxd_val[i+3] = 0; 544 } 545 #ifdef __LP64__ 546 ri_pad->rxd_val[RXD_INFO_SIZE-1] = 0; 547 #endif 548 } 549 550 /* 551 * Only allow a single packet to take up most 1/nth of the tx ring 552 */ 553 #define MAX_SINGLE_PACKET_FRACTION 12 554 #define IF_BAD_DMA (bus_addr_t)-1 555 556 #define CTX_ACTIVE(ctx) ((if_getdrvflags((ctx)->ifc_ifp) & IFF_DRV_RUNNING)) 557 558 #define CTX_LOCK_INIT(_sc) sx_init(&(_sc)->ifc_ctx_sx, "iflib ctx lock") 559 #define CTX_LOCK(ctx) sx_xlock(&(ctx)->ifc_ctx_sx) 560 #define CTX_UNLOCK(ctx) sx_xunlock(&(ctx)->ifc_ctx_sx) 561 #define CTX_LOCK_DESTROY(ctx) sx_destroy(&(ctx)->ifc_ctx_sx) 562 563 564 #define STATE_LOCK_INIT(_sc, _name) mtx_init(&(_sc)->ifc_state_mtx, _name, "iflib state lock", MTX_DEF) 565 #define STATE_LOCK(ctx) mtx_lock(&(ctx)->ifc_state_mtx) 566 #define STATE_UNLOCK(ctx) mtx_unlock(&(ctx)->ifc_state_mtx) 567 #define STATE_LOCK_DESTROY(ctx) mtx_destroy(&(ctx)->ifc_state_mtx) 568 569 570 571 #define CALLOUT_LOCK(txq) mtx_lock(&txq->ift_mtx) 572 #define CALLOUT_UNLOCK(txq) mtx_unlock(&txq->ift_mtx) 573 574 575 /* Our boot-time initialization hook */ 576 static int iflib_module_event_handler(module_t, int, void *); 577 578 static moduledata_t iflib_moduledata = { 579 "iflib", 580 iflib_module_event_handler, 581 NULL 582 }; 583 584 DECLARE_MODULE(iflib, iflib_moduledata, SI_SUB_INIT_IF, SI_ORDER_ANY); 585 MODULE_VERSION(iflib, 1); 586 587 MODULE_DEPEND(iflib, pci, 1, 1, 1); 588 MODULE_DEPEND(iflib, ether, 1, 1, 1); 589 590 TASKQGROUP_DEFINE(if_io_tqg, mp_ncpus, 1); 591 TASKQGROUP_DEFINE(if_config_tqg, 1, 1); 592 593 #ifndef IFLIB_DEBUG_COUNTERS 594 #ifdef INVARIANTS 595 #define IFLIB_DEBUG_COUNTERS 1 596 #else 597 #define IFLIB_DEBUG_COUNTERS 0 598 #endif /* !INVARIANTS */ 599 #endif 600 601 static SYSCTL_NODE(_net, OID_AUTO, iflib, CTLFLAG_RD, 0, 602 "iflib driver parameters"); 603 604 /* 605 * XXX need to ensure that this can't accidentally cause the head to be moved backwards 606 */ 607 static int iflib_min_tx_latency = 0; 608 SYSCTL_INT(_net_iflib, OID_AUTO, min_tx_latency, CTLFLAG_RW, 609 &iflib_min_tx_latency, 0, "minimize transmit latency at the possible expense of throughput"); 610 static int iflib_no_tx_batch = 0; 611 SYSCTL_INT(_net_iflib, OID_AUTO, no_tx_batch, CTLFLAG_RW, 612 &iflib_no_tx_batch, 0, "minimize transmit latency at the possible expense of throughput"); 613 614 615 #if IFLIB_DEBUG_COUNTERS 616 617 static int iflib_tx_seen; 618 static int iflib_tx_sent; 619 static int iflib_tx_encap; 620 static int iflib_rx_allocs; 621 static int iflib_fl_refills; 622 static int iflib_fl_refills_large; 623 static int iflib_tx_frees; 624 625 SYSCTL_INT(_net_iflib, OID_AUTO, tx_seen, CTLFLAG_RD, 626 &iflib_tx_seen, 0, "# tx mbufs seen"); 627 SYSCTL_INT(_net_iflib, OID_AUTO, tx_sent, CTLFLAG_RD, 628 &iflib_tx_sent, 0, "# tx mbufs sent"); 629 SYSCTL_INT(_net_iflib, OID_AUTO, tx_encap, CTLFLAG_RD, 630 &iflib_tx_encap, 0, "# tx mbufs encapped"); 631 SYSCTL_INT(_net_iflib, OID_AUTO, tx_frees, CTLFLAG_RD, 632 &iflib_tx_frees, 0, "# tx frees"); 633 SYSCTL_INT(_net_iflib, OID_AUTO, rx_allocs, CTLFLAG_RD, 634 &iflib_rx_allocs, 0, "# rx allocations"); 635 SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills, CTLFLAG_RD, 636 &iflib_fl_refills, 0, "# refills"); 637 SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills_large, CTLFLAG_RD, 638 &iflib_fl_refills_large, 0, "# large refills"); 639 640 641 static int iflib_txq_drain_flushing; 642 static int iflib_txq_drain_oactive; 643 static int iflib_txq_drain_notready; 644 645 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_flushing, CTLFLAG_RD, 646 &iflib_txq_drain_flushing, 0, "# drain flushes"); 647 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_oactive, CTLFLAG_RD, 648 &iflib_txq_drain_oactive, 0, "# drain oactives"); 649 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_notready, CTLFLAG_RD, 650 &iflib_txq_drain_notready, 0, "# drain notready"); 651 652 653 static int iflib_encap_load_mbuf_fail; 654 static int iflib_encap_pad_mbuf_fail; 655 static int iflib_encap_txq_avail_fail; 656 static int iflib_encap_txd_encap_fail; 657 658 SYSCTL_INT(_net_iflib, OID_AUTO, encap_load_mbuf_fail, CTLFLAG_RD, 659 &iflib_encap_load_mbuf_fail, 0, "# busdma load failures"); 660 SYSCTL_INT(_net_iflib, OID_AUTO, encap_pad_mbuf_fail, CTLFLAG_RD, 661 &iflib_encap_pad_mbuf_fail, 0, "# runt frame pad failures"); 662 SYSCTL_INT(_net_iflib, OID_AUTO, encap_txq_avail_fail, CTLFLAG_RD, 663 &iflib_encap_txq_avail_fail, 0, "# txq avail failures"); 664 SYSCTL_INT(_net_iflib, OID_AUTO, encap_txd_encap_fail, CTLFLAG_RD, 665 &iflib_encap_txd_encap_fail, 0, "# driver encap failures"); 666 667 static int iflib_task_fn_rxs; 668 static int iflib_rx_intr_enables; 669 static int iflib_fast_intrs; 670 static int iflib_rx_unavail; 671 static int iflib_rx_ctx_inactive; 672 static int iflib_rx_if_input; 673 static int iflib_rx_mbuf_null; 674 static int iflib_rxd_flush; 675 676 static int iflib_verbose_debug; 677 678 SYSCTL_INT(_net_iflib, OID_AUTO, task_fn_rx, CTLFLAG_RD, 679 &iflib_task_fn_rxs, 0, "# task_fn_rx calls"); 680 SYSCTL_INT(_net_iflib, OID_AUTO, rx_intr_enables, CTLFLAG_RD, 681 &iflib_rx_intr_enables, 0, "# rx intr enables"); 682 SYSCTL_INT(_net_iflib, OID_AUTO, fast_intrs, CTLFLAG_RD, 683 &iflib_fast_intrs, 0, "# fast_intr calls"); 684 SYSCTL_INT(_net_iflib, OID_AUTO, rx_unavail, CTLFLAG_RD, 685 &iflib_rx_unavail, 0, "# times rxeof called with no available data"); 686 SYSCTL_INT(_net_iflib, OID_AUTO, rx_ctx_inactive, CTLFLAG_RD, 687 &iflib_rx_ctx_inactive, 0, "# times rxeof called with inactive context"); 688 SYSCTL_INT(_net_iflib, OID_AUTO, rx_if_input, CTLFLAG_RD, 689 &iflib_rx_if_input, 0, "# times rxeof called if_input"); 690 SYSCTL_INT(_net_iflib, OID_AUTO, rx_mbuf_null, CTLFLAG_RD, 691 &iflib_rx_mbuf_null, 0, "# times rxeof got null mbuf"); 692 SYSCTL_INT(_net_iflib, OID_AUTO, rxd_flush, CTLFLAG_RD, 693 &iflib_rxd_flush, 0, "# times rxd_flush called"); 694 SYSCTL_INT(_net_iflib, OID_AUTO, verbose_debug, CTLFLAG_RW, 695 &iflib_verbose_debug, 0, "enable verbose debugging"); 696 697 #define DBG_COUNTER_INC(name) atomic_add_int(&(iflib_ ## name), 1) 698 static void 699 iflib_debug_reset(void) 700 { 701 iflib_tx_seen = iflib_tx_sent = iflib_tx_encap = iflib_rx_allocs = 702 iflib_fl_refills = iflib_fl_refills_large = iflib_tx_frees = 703 iflib_txq_drain_flushing = iflib_txq_drain_oactive = 704 iflib_txq_drain_notready = 705 iflib_encap_load_mbuf_fail = iflib_encap_pad_mbuf_fail = 706 iflib_encap_txq_avail_fail = iflib_encap_txd_encap_fail = 707 iflib_task_fn_rxs = iflib_rx_intr_enables = iflib_fast_intrs = 708 iflib_rx_unavail = 709 iflib_rx_ctx_inactive = iflib_rx_if_input = 710 iflib_rx_mbuf_null = iflib_rxd_flush = 0; 711 } 712 713 #else 714 #define DBG_COUNTER_INC(name) 715 static void iflib_debug_reset(void) {} 716 #endif 717 718 #define IFLIB_DEBUG 0 719 720 static void iflib_tx_structures_free(if_ctx_t ctx); 721 static void iflib_rx_structures_free(if_ctx_t ctx); 722 static int iflib_queues_alloc(if_ctx_t ctx); 723 static int iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq); 724 static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget); 725 static int iflib_qset_structures_setup(if_ctx_t ctx); 726 static int iflib_msix_init(if_ctx_t ctx); 727 static int iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filterarg, int *rid, const char *str); 728 static void iflib_txq_check_drain(iflib_txq_t txq, int budget); 729 static uint32_t iflib_txq_can_drain(struct ifmp_ring *); 730 #ifdef ALTQ 731 static void iflib_altq_if_start(if_t ifp); 732 static int iflib_altq_if_transmit(if_t ifp, struct mbuf *m); 733 #endif 734 static int iflib_register(if_ctx_t); 735 static void iflib_init_locked(if_ctx_t ctx); 736 static void iflib_add_device_sysctl_pre(if_ctx_t ctx); 737 static void iflib_add_device_sysctl_post(if_ctx_t ctx); 738 static void iflib_ifmp_purge(iflib_txq_t txq); 739 static void _iflib_pre_assert(if_softc_ctx_t scctx); 740 static void iflib_if_init_locked(if_ctx_t ctx); 741 #ifndef __NO_STRICT_ALIGNMENT 742 static struct mbuf * iflib_fixup_rx(struct mbuf *m); 743 #endif 744 745 NETDUMP_DEFINE(iflib); 746 747 #ifdef DEV_NETMAP 748 #include <sys/selinfo.h> 749 #include <net/netmap.h> 750 #include <dev/netmap/netmap_kern.h> 751 752 MODULE_DEPEND(iflib, netmap, 1, 1, 1); 753 754 static int netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init); 755 756 /* 757 * device-specific sysctl variables: 758 * 759 * iflib_crcstrip: 0: keep CRC in rx frames (default), 1: strip it. 760 * During regular operations the CRC is stripped, but on some 761 * hardware reception of frames not multiple of 64 is slower, 762 * so using crcstrip=0 helps in benchmarks. 763 * 764 * iflib_rx_miss, iflib_rx_miss_bufs: 765 * count packets that might be missed due to lost interrupts. 766 */ 767 SYSCTL_DECL(_dev_netmap); 768 /* 769 * The xl driver by default strips CRCs and we do not override it. 770 */ 771 772 int iflib_crcstrip = 1; 773 SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_crcstrip, 774 CTLFLAG_RW, &iflib_crcstrip, 1, "strip CRC on rx frames"); 775 776 int iflib_rx_miss, iflib_rx_miss_bufs; 777 SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss, 778 CTLFLAG_RW, &iflib_rx_miss, 0, "potentially missed rx intr"); 779 SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss_bufs, 780 CTLFLAG_RW, &iflib_rx_miss_bufs, 0, "potentially missed rx intr bufs"); 781 782 /* 783 * Register/unregister. We are already under netmap lock. 784 * Only called on the first register or the last unregister. 785 */ 786 static int 787 iflib_netmap_register(struct netmap_adapter *na, int onoff) 788 { 789 struct ifnet *ifp = na->ifp; 790 if_ctx_t ctx = ifp->if_softc; 791 int status; 792 793 CTX_LOCK(ctx); 794 IFDI_INTR_DISABLE(ctx); 795 796 /* Tell the stack that the interface is no longer active */ 797 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 798 799 if (!CTX_IS_VF(ctx)) 800 IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); 801 802 /* enable or disable flags and callbacks in na and ifp */ 803 if (onoff) { 804 nm_set_native_flags(na); 805 } else { 806 nm_clear_native_flags(na); 807 } 808 iflib_stop(ctx); 809 iflib_init_locked(ctx); 810 IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); // XXX why twice ? 811 status = ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1; 812 if (status) 813 nm_clear_native_flags(na); 814 CTX_UNLOCK(ctx); 815 return (status); 816 } 817 818 static int 819 netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init) 820 { 821 struct netmap_adapter *na = kring->na; 822 u_int const lim = kring->nkr_num_slots - 1; 823 u_int head = kring->rhead; 824 struct netmap_ring *ring = kring->ring; 825 bus_dmamap_t *map; 826 struct if_rxd_update iru; 827 if_ctx_t ctx = rxq->ifr_ctx; 828 iflib_fl_t fl = &rxq->ifr_fl[0]; 829 uint32_t refill_pidx, nic_i; 830 #if IFLIB_DEBUG_COUNTERS 831 int rf_count = 0; 832 #endif 833 834 if (nm_i == head && __predict_true(!init)) 835 return 0; 836 iru_init(&iru, rxq, 0 /* flid */); 837 map = fl->ifl_sds.ifsd_map; 838 refill_pidx = netmap_idx_k2n(kring, nm_i); 839 /* 840 * IMPORTANT: we must leave one free slot in the ring, 841 * so move head back by one unit 842 */ 843 head = nm_prev(head, lim); 844 nic_i = UINT_MAX; 845 DBG_COUNTER_INC(fl_refills); 846 while (nm_i != head) { 847 #if IFLIB_DEBUG_COUNTERS 848 if (++rf_count == 9) 849 DBG_COUNTER_INC(fl_refills_large); 850 #endif 851 for (int tmp_pidx = 0; tmp_pidx < IFLIB_MAX_RX_REFRESH && nm_i != head; tmp_pidx++) { 852 struct netmap_slot *slot = &ring->slot[nm_i]; 853 void *addr = PNMB(na, slot, &fl->ifl_bus_addrs[tmp_pidx]); 854 uint32_t nic_i_dma = refill_pidx; 855 nic_i = netmap_idx_k2n(kring, nm_i); 856 857 MPASS(tmp_pidx < IFLIB_MAX_RX_REFRESH); 858 859 if (addr == NETMAP_BUF_BASE(na)) /* bad buf */ 860 return netmap_ring_reinit(kring); 861 862 fl->ifl_vm_addrs[tmp_pidx] = addr; 863 if (__predict_false(init) && map) { 864 netmap_load_map(na, fl->ifl_ifdi->idi_tag, map[nic_i], addr); 865 } else if (map && (slot->flags & NS_BUF_CHANGED)) { 866 /* buffer has changed, reload map */ 867 netmap_reload_map(na, fl->ifl_ifdi->idi_tag, map[nic_i], addr); 868 } 869 slot->flags &= ~NS_BUF_CHANGED; 870 871 nm_i = nm_next(nm_i, lim); 872 fl->ifl_rxd_idxs[tmp_pidx] = nic_i = nm_next(nic_i, lim); 873 if (nm_i != head && tmp_pidx < IFLIB_MAX_RX_REFRESH-1) 874 continue; 875 876 iru.iru_pidx = refill_pidx; 877 iru.iru_count = tmp_pidx+1; 878 ctx->isc_rxd_refill(ctx->ifc_softc, &iru); 879 880 refill_pidx = nic_i; 881 if (map == NULL) 882 continue; 883 884 for (int n = 0; n < iru.iru_count; n++) { 885 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, map[nic_i_dma], 886 BUS_DMASYNC_PREREAD); 887 /* XXX - change this to not use the netmap func*/ 888 nic_i_dma = nm_next(nic_i_dma, lim); 889 } 890 } 891 } 892 kring->nr_hwcur = head; 893 894 if (map) 895 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, 896 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 897 if (__predict_true(nic_i != UINT_MAX)) { 898 ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id, nic_i); 899 DBG_COUNTER_INC(rxd_flush); 900 } 901 return (0); 902 } 903 904 /* 905 * Reconcile kernel and user view of the transmit ring. 906 * 907 * All information is in the kring. 908 * Userspace wants to send packets up to the one before kring->rhead, 909 * kernel knows kring->nr_hwcur is the first unsent packet. 910 * 911 * Here we push packets out (as many as possible), and possibly 912 * reclaim buffers from previously completed transmission. 913 * 914 * The caller (netmap) guarantees that there is only one instance 915 * running at any time. Any interference with other driver 916 * methods should be handled by the individual drivers. 917 */ 918 static int 919 iflib_netmap_txsync(struct netmap_kring *kring, int flags) 920 { 921 struct netmap_adapter *na = kring->na; 922 struct ifnet *ifp = na->ifp; 923 struct netmap_ring *ring = kring->ring; 924 u_int nm_i; /* index into the netmap kring */ 925 u_int nic_i; /* index into the NIC ring */ 926 u_int n; 927 u_int const lim = kring->nkr_num_slots - 1; 928 u_int const head = kring->rhead; 929 struct if_pkt_info pi; 930 931 /* 932 * interrupts on every tx packet are expensive so request 933 * them every half ring, or where NS_REPORT is set 934 */ 935 u_int report_frequency = kring->nkr_num_slots >> 1; 936 /* device-specific */ 937 if_ctx_t ctx = ifp->if_softc; 938 iflib_txq_t txq = &ctx->ifc_txqs[kring->ring_id]; 939 940 if (txq->ift_sds.ifsd_map) 941 bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map, 942 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 943 944 945 /* 946 * First part: process new packets to send. 947 * nm_i is the current index in the netmap kring, 948 * nic_i is the corresponding index in the NIC ring. 949 * 950 * If we have packets to send (nm_i != head) 951 * iterate over the netmap ring, fetch length and update 952 * the corresponding slot in the NIC ring. Some drivers also 953 * need to update the buffer's physical address in the NIC slot 954 * even NS_BUF_CHANGED is not set (PNMB computes the addresses). 955 * 956 * The netmap_reload_map() calls is especially expensive, 957 * even when (as in this case) the tag is 0, so do only 958 * when the buffer has actually changed. 959 * 960 * If possible do not set the report/intr bit on all slots, 961 * but only a few times per ring or when NS_REPORT is set. 962 * 963 * Finally, on 10G and faster drivers, it might be useful 964 * to prefetch the next slot and txr entry. 965 */ 966 967 nm_i = kring->nr_hwcur; 968 if (nm_i != head) { /* we have new packets to send */ 969 pkt_info_zero(&pi); 970 pi.ipi_segs = txq->ift_segs; 971 pi.ipi_qsidx = kring->ring_id; 972 nic_i = netmap_idx_k2n(kring, nm_i); 973 974 __builtin_prefetch(&ring->slot[nm_i]); 975 __builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i]); 976 if (txq->ift_sds.ifsd_map) 977 __builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i]); 978 979 for (n = 0; nm_i != head; n++) { 980 struct netmap_slot *slot = &ring->slot[nm_i]; 981 u_int len = slot->len; 982 uint64_t paddr; 983 void *addr = PNMB(na, slot, &paddr); 984 int flags = (slot->flags & NS_REPORT || 985 nic_i == 0 || nic_i == report_frequency) ? 986 IPI_TX_INTR : 0; 987 988 /* device-specific */ 989 pi.ipi_len = len; 990 pi.ipi_segs[0].ds_addr = paddr; 991 pi.ipi_segs[0].ds_len = len; 992 pi.ipi_nsegs = 1; 993 pi.ipi_ndescs = 0; 994 pi.ipi_pidx = nic_i; 995 pi.ipi_flags = flags; 996 997 /* Fill the slot in the NIC ring. */ 998 ctx->isc_txd_encap(ctx->ifc_softc, &pi); 999 DBG_COUNTER_INC(tx_encap); 1000 1001 /* prefetch for next round */ 1002 __builtin_prefetch(&ring->slot[nm_i + 1]); 1003 __builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i + 1]); 1004 if (txq->ift_sds.ifsd_map) { 1005 __builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i + 1]); 1006 1007 NM_CHECK_ADDR_LEN(na, addr, len); 1008 1009 if (slot->flags & NS_BUF_CHANGED) { 1010 /* buffer has changed, reload map */ 1011 netmap_reload_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[nic_i], addr); 1012 } 1013 /* make sure changes to the buffer are synced */ 1014 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_sds.ifsd_map[nic_i], 1015 BUS_DMASYNC_PREWRITE); 1016 } 1017 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); 1018 nm_i = nm_next(nm_i, lim); 1019 nic_i = nm_next(nic_i, lim); 1020 } 1021 kring->nr_hwcur = nm_i; 1022 1023 /* synchronize the NIC ring */ 1024 if (txq->ift_sds.ifsd_map) 1025 bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map, 1026 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1027 1028 /* (re)start the tx unit up to slot nic_i (excluded) */ 1029 ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, nic_i); 1030 } 1031 1032 /* 1033 * Second part: reclaim buffers for completed transmissions. 1034 * 1035 * If there are unclaimed buffers, attempt to reclaim them. 1036 * If none are reclaimed, and TX IRQs are not in use, do an initial 1037 * minimal delay, then trigger the tx handler which will spin in the 1038 * group task queue. 1039 */ 1040 if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) { 1041 if (iflib_tx_credits_update(ctx, txq)) { 1042 /* some tx completed, increment avail */ 1043 nic_i = txq->ift_cidx_processed; 1044 kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim); 1045 } 1046 } 1047 if (!(ctx->ifc_flags & IFC_NETMAP_TX_IRQ)) 1048 if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) { 1049 callout_reset_on(&txq->ift_timer, hz < 2000 ? 1 : hz / 1000, 1050 iflib_timer, txq, txq->ift_timer.c_cpu); 1051 } 1052 return (0); 1053 } 1054 1055 /* 1056 * Reconcile kernel and user view of the receive ring. 1057 * Same as for the txsync, this routine must be efficient. 1058 * The caller guarantees a single invocations, but races against 1059 * the rest of the driver should be handled here. 1060 * 1061 * On call, kring->rhead is the first packet that userspace wants 1062 * to keep, and kring->rcur is the wakeup point. 1063 * The kernel has previously reported packets up to kring->rtail. 1064 * 1065 * If (flags & NAF_FORCE_READ) also check for incoming packets irrespective 1066 * of whether or not we received an interrupt. 1067 */ 1068 static int 1069 iflib_netmap_rxsync(struct netmap_kring *kring, int flags) 1070 { 1071 struct netmap_adapter *na = kring->na; 1072 struct netmap_ring *ring = kring->ring; 1073 uint32_t nm_i; /* index into the netmap ring */ 1074 uint32_t nic_i; /* index into the NIC ring */ 1075 u_int i, n; 1076 u_int const lim = kring->nkr_num_slots - 1; 1077 u_int const head = kring->rhead; 1078 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; 1079 struct if_rxd_info ri; 1080 1081 struct ifnet *ifp = na->ifp; 1082 if_ctx_t ctx = ifp->if_softc; 1083 iflib_rxq_t rxq = &ctx->ifc_rxqs[kring->ring_id]; 1084 iflib_fl_t fl = rxq->ifr_fl; 1085 if (head > lim) 1086 return netmap_ring_reinit(kring); 1087 1088 /* XXX check sync modes */ 1089 for (i = 0, fl = rxq->ifr_fl; i < rxq->ifr_nfl; i++, fl++) { 1090 if (fl->ifl_sds.ifsd_map == NULL) 1091 continue; 1092 bus_dmamap_sync(rxq->ifr_fl[i].ifl_desc_tag, fl->ifl_ifdi->idi_map, 1093 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1094 } 1095 /* 1096 * First part: import newly received packets. 1097 * 1098 * nm_i is the index of the next free slot in the netmap ring, 1099 * nic_i is the index of the next received packet in the NIC ring, 1100 * and they may differ in case if_init() has been called while 1101 * in netmap mode. For the receive ring we have 1102 * 1103 * nic_i = rxr->next_check; 1104 * nm_i = kring->nr_hwtail (previous) 1105 * and 1106 * nm_i == (nic_i + kring->nkr_hwofs) % ring_size 1107 * 1108 * rxr->next_check is set to 0 on a ring reinit 1109 */ 1110 if (netmap_no_pendintr || force_update) { 1111 int crclen = iflib_crcstrip ? 0 : 4; 1112 int error, avail; 1113 1114 for (i = 0; i < rxq->ifr_nfl; i++) { 1115 fl = &rxq->ifr_fl[i]; 1116 nic_i = fl->ifl_cidx; 1117 nm_i = netmap_idx_n2k(kring, nic_i); 1118 avail = iflib_rxd_avail(ctx, rxq, nic_i, USHRT_MAX); 1119 for (n = 0; avail > 0; n++, avail--) { 1120 rxd_info_zero(&ri); 1121 ri.iri_frags = rxq->ifr_frags; 1122 ri.iri_qsidx = kring->ring_id; 1123 ri.iri_ifp = ctx->ifc_ifp; 1124 ri.iri_cidx = nic_i; 1125 1126 error = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri); 1127 ring->slot[nm_i].len = error ? 0 : ri.iri_len - crclen; 1128 ring->slot[nm_i].flags = 0; 1129 if (fl->ifl_sds.ifsd_map) 1130 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, 1131 fl->ifl_sds.ifsd_map[nic_i], BUS_DMASYNC_POSTREAD); 1132 nm_i = nm_next(nm_i, lim); 1133 nic_i = nm_next(nic_i, lim); 1134 } 1135 if (n) { /* update the state variables */ 1136 if (netmap_no_pendintr && !force_update) { 1137 /* diagnostics */ 1138 iflib_rx_miss ++; 1139 iflib_rx_miss_bufs += n; 1140 } 1141 fl->ifl_cidx = nic_i; 1142 kring->nr_hwtail = nm_i; 1143 } 1144 kring->nr_kflags &= ~NKR_PENDINTR; 1145 } 1146 } 1147 /* 1148 * Second part: skip past packets that userspace has released. 1149 * (kring->nr_hwcur to head excluded), 1150 * and make the buffers available for reception. 1151 * As usual nm_i is the index in the netmap ring, 1152 * nic_i is the index in the NIC ring, and 1153 * nm_i == (nic_i + kring->nkr_hwofs) % ring_size 1154 */ 1155 /* XXX not sure how this will work with multiple free lists */ 1156 nm_i = kring->nr_hwcur; 1157 1158 return (netmap_fl_refill(rxq, kring, nm_i, false)); 1159 } 1160 1161 static void 1162 iflib_netmap_intr(struct netmap_adapter *na, int onoff) 1163 { 1164 struct ifnet *ifp = na->ifp; 1165 if_ctx_t ctx = ifp->if_softc; 1166 1167 CTX_LOCK(ctx); 1168 if (onoff) { 1169 IFDI_INTR_ENABLE(ctx); 1170 } else { 1171 IFDI_INTR_DISABLE(ctx); 1172 } 1173 CTX_UNLOCK(ctx); 1174 } 1175 1176 1177 static int 1178 iflib_netmap_attach(if_ctx_t ctx) 1179 { 1180 struct netmap_adapter na; 1181 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 1182 1183 bzero(&na, sizeof(na)); 1184 1185 na.ifp = ctx->ifc_ifp; 1186 na.na_flags = NAF_BDG_MAYSLEEP; 1187 MPASS(ctx->ifc_softc_ctx.isc_ntxqsets); 1188 MPASS(ctx->ifc_softc_ctx.isc_nrxqsets); 1189 1190 na.num_tx_desc = scctx->isc_ntxd[0]; 1191 na.num_rx_desc = scctx->isc_nrxd[0]; 1192 na.nm_txsync = iflib_netmap_txsync; 1193 na.nm_rxsync = iflib_netmap_rxsync; 1194 na.nm_register = iflib_netmap_register; 1195 na.nm_intr = iflib_netmap_intr; 1196 na.num_tx_rings = ctx->ifc_softc_ctx.isc_ntxqsets; 1197 na.num_rx_rings = ctx->ifc_softc_ctx.isc_nrxqsets; 1198 return (netmap_attach(&na)); 1199 } 1200 1201 static void 1202 iflib_netmap_txq_init(if_ctx_t ctx, iflib_txq_t txq) 1203 { 1204 struct netmap_adapter *na = NA(ctx->ifc_ifp); 1205 struct netmap_slot *slot; 1206 1207 slot = netmap_reset(na, NR_TX, txq->ift_id, 0); 1208 if (slot == NULL) 1209 return; 1210 if (txq->ift_sds.ifsd_map == NULL) 1211 return; 1212 1213 for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxd[0]; i++) { 1214 1215 /* 1216 * In netmap mode, set the map for the packet buffer. 1217 * NOTE: Some drivers (not this one) also need to set 1218 * the physical buffer address in the NIC ring. 1219 * netmap_idx_n2k() maps a nic index, i, into the corresponding 1220 * netmap slot index, si 1221 */ 1222 int si = netmap_idx_n2k(na->tx_rings[txq->ift_id], i); 1223 netmap_load_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[i], NMB(na, slot + si)); 1224 } 1225 } 1226 1227 static void 1228 iflib_netmap_rxq_init(if_ctx_t ctx, iflib_rxq_t rxq) 1229 { 1230 struct netmap_adapter *na = NA(ctx->ifc_ifp); 1231 struct netmap_kring *kring = na->rx_rings[rxq->ifr_id]; 1232 struct netmap_slot *slot; 1233 uint32_t nm_i; 1234 1235 slot = netmap_reset(na, NR_RX, rxq->ifr_id, 0); 1236 if (slot == NULL) 1237 return; 1238 nm_i = netmap_idx_n2k(kring, 0); 1239 netmap_fl_refill(rxq, kring, nm_i, true); 1240 } 1241 1242 static void 1243 iflib_netmap_timer_adjust(if_ctx_t ctx, uint16_t txqid, uint32_t *reset_on) 1244 { 1245 struct netmap_kring *kring; 1246 1247 kring = NA(ctx->ifc_ifp)->tx_rings[txqid]; 1248 1249 if (kring->nr_hwcur != nm_next(kring->nr_hwtail, kring->nkr_num_slots - 1)) { 1250 if (ctx->isc_txd_credits_update(ctx->ifc_softc, txqid, false)) 1251 netmap_tx_irq(ctx->ifc_ifp, txqid); 1252 if (!(ctx->ifc_flags & IFC_NETMAP_TX_IRQ)) { 1253 if (hz < 2000) 1254 *reset_on = 1; 1255 else 1256 *reset_on = hz / 1000; 1257 } 1258 } 1259 } 1260 1261 #define iflib_netmap_detach(ifp) netmap_detach(ifp) 1262 1263 #else 1264 #define iflib_netmap_txq_init(ctx, txq) 1265 #define iflib_netmap_rxq_init(ctx, rxq) 1266 #define iflib_netmap_detach(ifp) 1267 1268 #define iflib_netmap_attach(ctx) (0) 1269 #define netmap_rx_irq(ifp, qid, budget) (0) 1270 #define netmap_tx_irq(ifp, qid) do {} while (0) 1271 #define iflib_netmap_timer_adjust(ctx, txqid, reset_on) 1272 1273 #endif 1274 1275 #if defined(__i386__) || defined(__amd64__) 1276 static __inline void 1277 prefetch(void *x) 1278 { 1279 __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); 1280 } 1281 static __inline void 1282 prefetch2cachelines(void *x) 1283 { 1284 __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); 1285 #if (CACHE_LINE_SIZE < 128) 1286 __asm volatile("prefetcht0 %0" :: "m" (*(((unsigned long *)x)+CACHE_LINE_SIZE/(sizeof(unsigned long))))); 1287 #endif 1288 } 1289 #else 1290 #define prefetch(x) 1291 #define prefetch2cachelines(x) 1292 #endif 1293 1294 static void 1295 iflib_gen_mac(if_ctx_t ctx) 1296 { 1297 struct thread *td; 1298 MD5_CTX mdctx; 1299 char uuid[HOSTUUIDLEN+1]; 1300 char buf[HOSTUUIDLEN+16]; 1301 uint8_t *mac; 1302 unsigned char digest[16]; 1303 1304 td = curthread; 1305 mac = ctx->ifc_mac; 1306 uuid[HOSTUUIDLEN] = 0; 1307 bcopy(td->td_ucred->cr_prison->pr_hostuuid, uuid, HOSTUUIDLEN); 1308 snprintf(buf, HOSTUUIDLEN+16, "%s-%s", uuid, device_get_nameunit(ctx->ifc_dev)); 1309 /* 1310 * Generate a pseudo-random, deterministic MAC 1311 * address based on the UUID and unit number. 1312 * The FreeBSD Foundation OUI of 58-9C-FC is used. 1313 */ 1314 MD5Init(&mdctx); 1315 MD5Update(&mdctx, buf, strlen(buf)); 1316 MD5Final(digest, &mdctx); 1317 1318 mac[0] = 0x58; 1319 mac[1] = 0x9C; 1320 mac[2] = 0xFC; 1321 mac[3] = digest[0]; 1322 mac[4] = digest[1]; 1323 mac[5] = digest[2]; 1324 } 1325 1326 static void 1327 iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid) 1328 { 1329 iflib_fl_t fl; 1330 1331 fl = &rxq->ifr_fl[flid]; 1332 iru->iru_paddrs = fl->ifl_bus_addrs; 1333 iru->iru_vaddrs = &fl->ifl_vm_addrs[0]; 1334 iru->iru_idxs = fl->ifl_rxd_idxs; 1335 iru->iru_qsidx = rxq->ifr_id; 1336 iru->iru_buf_size = fl->ifl_buf_size; 1337 iru->iru_flidx = fl->ifl_id; 1338 } 1339 1340 static void 1341 _iflib_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err) 1342 { 1343 if (err) 1344 return; 1345 *(bus_addr_t *) arg = segs[0].ds_addr; 1346 } 1347 1348 int 1349 iflib_dma_alloc(if_ctx_t ctx, int size, iflib_dma_info_t dma, int mapflags) 1350 { 1351 int err; 1352 if_shared_ctx_t sctx = ctx->ifc_sctx; 1353 device_t dev = ctx->ifc_dev; 1354 1355 KASSERT(sctx->isc_q_align != 0, ("alignment value not initialized")); 1356 1357 err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1358 sctx->isc_q_align, 0, /* alignment, bounds */ 1359 BUS_SPACE_MAXADDR, /* lowaddr */ 1360 BUS_SPACE_MAXADDR, /* highaddr */ 1361 NULL, NULL, /* filter, filterarg */ 1362 size, /* maxsize */ 1363 1, /* nsegments */ 1364 size, /* maxsegsize */ 1365 BUS_DMA_ALLOCNOW, /* flags */ 1366 NULL, /* lockfunc */ 1367 NULL, /* lockarg */ 1368 &dma->idi_tag); 1369 if (err) { 1370 device_printf(dev, 1371 "%s: bus_dma_tag_create failed: %d\n", 1372 __func__, err); 1373 goto fail_0; 1374 } 1375 1376 err = bus_dmamem_alloc(dma->idi_tag, (void**) &dma->idi_vaddr, 1377 BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->idi_map); 1378 if (err) { 1379 device_printf(dev, 1380 "%s: bus_dmamem_alloc(%ju) failed: %d\n", 1381 __func__, (uintmax_t)size, err); 1382 goto fail_1; 1383 } 1384 1385 dma->idi_paddr = IF_BAD_DMA; 1386 err = bus_dmamap_load(dma->idi_tag, dma->idi_map, dma->idi_vaddr, 1387 size, _iflib_dmamap_cb, &dma->idi_paddr, mapflags | BUS_DMA_NOWAIT); 1388 if (err || dma->idi_paddr == IF_BAD_DMA) { 1389 device_printf(dev, 1390 "%s: bus_dmamap_load failed: %d\n", 1391 __func__, err); 1392 goto fail_2; 1393 } 1394 1395 dma->idi_size = size; 1396 return (0); 1397 1398 fail_2: 1399 bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map); 1400 fail_1: 1401 bus_dma_tag_destroy(dma->idi_tag); 1402 fail_0: 1403 dma->idi_tag = NULL; 1404 1405 return (err); 1406 } 1407 1408 int 1409 iflib_dma_alloc_multi(if_ctx_t ctx, int *sizes, iflib_dma_info_t *dmalist, int mapflags, int count) 1410 { 1411 int i, err; 1412 iflib_dma_info_t *dmaiter; 1413 1414 dmaiter = dmalist; 1415 for (i = 0; i < count; i++, dmaiter++) { 1416 if ((err = iflib_dma_alloc(ctx, sizes[i], *dmaiter, mapflags)) != 0) 1417 break; 1418 } 1419 if (err) 1420 iflib_dma_free_multi(dmalist, i); 1421 return (err); 1422 } 1423 1424 void 1425 iflib_dma_free(iflib_dma_info_t dma) 1426 { 1427 if (dma->idi_tag == NULL) 1428 return; 1429 if (dma->idi_paddr != IF_BAD_DMA) { 1430 bus_dmamap_sync(dma->idi_tag, dma->idi_map, 1431 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1432 bus_dmamap_unload(dma->idi_tag, dma->idi_map); 1433 dma->idi_paddr = IF_BAD_DMA; 1434 } 1435 if (dma->idi_vaddr != NULL) { 1436 bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map); 1437 dma->idi_vaddr = NULL; 1438 } 1439 bus_dma_tag_destroy(dma->idi_tag); 1440 dma->idi_tag = NULL; 1441 } 1442 1443 void 1444 iflib_dma_free_multi(iflib_dma_info_t *dmalist, int count) 1445 { 1446 int i; 1447 iflib_dma_info_t *dmaiter = dmalist; 1448 1449 for (i = 0; i < count; i++, dmaiter++) 1450 iflib_dma_free(*dmaiter); 1451 } 1452 1453 #ifdef EARLY_AP_STARTUP 1454 static const int iflib_started = 1; 1455 #else 1456 /* 1457 * We used to abuse the smp_started flag to decide if the queues have been 1458 * fully initialized (by late taskqgroup_adjust() calls in a SYSINIT()). 1459 * That gave bad races, since the SYSINIT() runs strictly after smp_started 1460 * is set. Run a SYSINIT() strictly after that to just set a usable 1461 * completion flag. 1462 */ 1463 1464 static int iflib_started; 1465 1466 static void 1467 iflib_record_started(void *arg) 1468 { 1469 iflib_started = 1; 1470 } 1471 1472 SYSINIT(iflib_record_started, SI_SUB_SMP + 1, SI_ORDER_FIRST, 1473 iflib_record_started, NULL); 1474 #endif 1475 1476 static int 1477 iflib_fast_intr(void *arg) 1478 { 1479 iflib_filter_info_t info = arg; 1480 struct grouptask *gtask = info->ifi_task; 1481 if (!iflib_started) 1482 return (FILTER_HANDLED); 1483 1484 DBG_COUNTER_INC(fast_intrs); 1485 if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED) 1486 return (FILTER_HANDLED); 1487 1488 GROUPTASK_ENQUEUE(gtask); 1489 return (FILTER_HANDLED); 1490 } 1491 1492 static int 1493 iflib_fast_intr_rxtx(void *arg) 1494 { 1495 iflib_filter_info_t info = arg; 1496 struct grouptask *gtask = info->ifi_task; 1497 iflib_rxq_t rxq = (iflib_rxq_t)info->ifi_ctx; 1498 if_ctx_t ctx = NULL;; 1499 int i, cidx; 1500 1501 if (!iflib_started) 1502 return (FILTER_HANDLED); 1503 1504 DBG_COUNTER_INC(fast_intrs); 1505 if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED) 1506 return (FILTER_HANDLED); 1507 1508 MPASS(rxq->ifr_ntxqirq); 1509 for (i = 0; i < rxq->ifr_ntxqirq; i++) { 1510 qidx_t txqid = rxq->ifr_txqid[i]; 1511 1512 ctx = rxq->ifr_ctx; 1513 1514 if (!ctx->isc_txd_credits_update(ctx->ifc_softc, txqid, false)) { 1515 IFDI_TX_QUEUE_INTR_ENABLE(ctx, txqid); 1516 continue; 1517 } 1518 GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task); 1519 } 1520 if (ctx->ifc_sctx->isc_flags & IFLIB_HAS_RXCQ) 1521 cidx = rxq->ifr_cq_cidx; 1522 else 1523 cidx = rxq->ifr_fl[0].ifl_cidx; 1524 if (iflib_rxd_avail(ctx, rxq, cidx, 1)) 1525 GROUPTASK_ENQUEUE(gtask); 1526 else { 1527 IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id); 1528 DBG_COUNTER_INC(rx_intr_enables); 1529 } 1530 return (FILTER_HANDLED); 1531 } 1532 1533 1534 static int 1535 iflib_fast_intr_ctx(void *arg) 1536 { 1537 iflib_filter_info_t info = arg; 1538 struct grouptask *gtask = info->ifi_task; 1539 1540 if (!iflib_started) 1541 return (FILTER_HANDLED); 1542 1543 DBG_COUNTER_INC(fast_intrs); 1544 if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED) 1545 return (FILTER_HANDLED); 1546 1547 GROUPTASK_ENQUEUE(gtask); 1548 return (FILTER_HANDLED); 1549 } 1550 1551 static int 1552 _iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid, 1553 driver_filter_t filter, driver_intr_t handler, void *arg, 1554 const char *name) 1555 { 1556 int rc, flags; 1557 struct resource *res; 1558 void *tag = NULL; 1559 device_t dev = ctx->ifc_dev; 1560 1561 flags = RF_ACTIVE; 1562 if (ctx->ifc_flags & IFC_LEGACY) 1563 flags |= RF_SHAREABLE; 1564 MPASS(rid < 512); 1565 irq->ii_rid = rid; 1566 res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &irq->ii_rid, flags); 1567 if (res == NULL) { 1568 device_printf(dev, 1569 "failed to allocate IRQ for rid %d, name %s.\n", rid, name); 1570 return (ENOMEM); 1571 } 1572 irq->ii_res = res; 1573 KASSERT(filter == NULL || handler == NULL, ("filter and handler can't both be non-NULL")); 1574 rc = bus_setup_intr(dev, res, INTR_MPSAFE | INTR_TYPE_NET, 1575 filter, handler, arg, &tag); 1576 if (rc != 0) { 1577 device_printf(dev, 1578 "failed to setup interrupt for rid %d, name %s: %d\n", 1579 rid, name ? name : "unknown", rc); 1580 return (rc); 1581 } else if (name) 1582 bus_describe_intr(dev, res, tag, "%s", name); 1583 1584 irq->ii_tag = tag; 1585 return (0); 1586 } 1587 1588 1589 /********************************************************************* 1590 * 1591 * Allocate memory for tx_buffer structures. The tx_buffer stores all 1592 * the information needed to transmit a packet on the wire. This is 1593 * called only once at attach, setup is done every reset. 1594 * 1595 **********************************************************************/ 1596 1597 static int 1598 iflib_txsd_alloc(iflib_txq_t txq) 1599 { 1600 if_ctx_t ctx = txq->ift_ctx; 1601 if_shared_ctx_t sctx = ctx->ifc_sctx; 1602 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 1603 device_t dev = ctx->ifc_dev; 1604 bus_size_t tsomaxsize; 1605 int err, nsegments, ntsosegments; 1606 1607 nsegments = scctx->isc_tx_nsegments; 1608 ntsosegments = scctx->isc_tx_tso_segments_max; 1609 tsomaxsize = scctx->isc_tx_tso_size_max; 1610 if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_VLAN_MTU) 1611 tsomaxsize += sizeof(struct ether_vlan_header); 1612 MPASS(scctx->isc_ntxd[0] > 0); 1613 MPASS(scctx->isc_ntxd[txq->ift_br_offset] > 0); 1614 MPASS(nsegments > 0); 1615 if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) { 1616 MPASS(ntsosegments > 0); 1617 MPASS(sctx->isc_tso_maxsize >= tsomaxsize); 1618 } 1619 1620 /* 1621 * Setup DMA descriptor areas. 1622 */ 1623 if ((err = bus_dma_tag_create(bus_get_dma_tag(dev), 1624 1, 0, /* alignment, bounds */ 1625 BUS_SPACE_MAXADDR, /* lowaddr */ 1626 BUS_SPACE_MAXADDR, /* highaddr */ 1627 NULL, NULL, /* filter, filterarg */ 1628 sctx->isc_tx_maxsize, /* maxsize */ 1629 nsegments, /* nsegments */ 1630 sctx->isc_tx_maxsegsize, /* maxsegsize */ 1631 0, /* flags */ 1632 NULL, /* lockfunc */ 1633 NULL, /* lockfuncarg */ 1634 &txq->ift_desc_tag))) { 1635 device_printf(dev,"Unable to allocate TX DMA tag: %d\n", err); 1636 device_printf(dev,"maxsize: %ju nsegments: %d maxsegsize: %ju\n", 1637 (uintmax_t)sctx->isc_tx_maxsize, nsegments, (uintmax_t)sctx->isc_tx_maxsegsize); 1638 goto fail; 1639 } 1640 if ((if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) & 1641 (err = bus_dma_tag_create(bus_get_dma_tag(dev), 1642 1, 0, /* alignment, bounds */ 1643 BUS_SPACE_MAXADDR, /* lowaddr */ 1644 BUS_SPACE_MAXADDR, /* highaddr */ 1645 NULL, NULL, /* filter, filterarg */ 1646 tsomaxsize, /* maxsize */ 1647 ntsosegments, /* nsegments */ 1648 sctx->isc_tso_maxsegsize,/* maxsegsize */ 1649 0, /* flags */ 1650 NULL, /* lockfunc */ 1651 NULL, /* lockfuncarg */ 1652 &txq->ift_tso_desc_tag))) { 1653 device_printf(dev,"Unable to allocate TX TSO DMA tag: %d\n", err); 1654 1655 goto fail; 1656 } 1657 if (!(txq->ift_sds.ifsd_flags = 1658 (uint8_t *) malloc(sizeof(uint8_t) * 1659 scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1660 device_printf(dev, "Unable to allocate tx_buffer memory\n"); 1661 err = ENOMEM; 1662 goto fail; 1663 } 1664 if (!(txq->ift_sds.ifsd_m = 1665 (struct mbuf **) malloc(sizeof(struct mbuf *) * 1666 scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1667 device_printf(dev, "Unable to allocate tx_buffer memory\n"); 1668 err = ENOMEM; 1669 goto fail; 1670 } 1671 1672 /* Create the descriptor buffer dma maps */ 1673 #if defined(ACPI_DMAR) || (! (defined(__i386__) || defined(__amd64__))) 1674 if ((ctx->ifc_flags & IFC_DMAR) == 0) 1675 return (0); 1676 1677 if (!(txq->ift_sds.ifsd_map = 1678 (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1679 device_printf(dev, "Unable to allocate tx_buffer map memory\n"); 1680 err = ENOMEM; 1681 goto fail; 1682 } 1683 1684 for (int i = 0; i < scctx->isc_ntxd[txq->ift_br_offset]; i++) { 1685 err = bus_dmamap_create(txq->ift_desc_tag, 0, &txq->ift_sds.ifsd_map[i]); 1686 if (err != 0) { 1687 device_printf(dev, "Unable to create TX DMA map\n"); 1688 goto fail; 1689 } 1690 } 1691 #endif 1692 return (0); 1693 fail: 1694 /* We free all, it handles case where we are in the middle */ 1695 iflib_tx_structures_free(ctx); 1696 return (err); 1697 } 1698 1699 static void 1700 iflib_txsd_destroy(if_ctx_t ctx, iflib_txq_t txq, int i) 1701 { 1702 bus_dmamap_t map; 1703 1704 map = NULL; 1705 if (txq->ift_sds.ifsd_map != NULL) 1706 map = txq->ift_sds.ifsd_map[i]; 1707 if (map != NULL) { 1708 bus_dmamap_unload(txq->ift_desc_tag, map); 1709 bus_dmamap_destroy(txq->ift_desc_tag, map); 1710 txq->ift_sds.ifsd_map[i] = NULL; 1711 } 1712 } 1713 1714 static void 1715 iflib_txq_destroy(iflib_txq_t txq) 1716 { 1717 if_ctx_t ctx = txq->ift_ctx; 1718 1719 for (int i = 0; i < txq->ift_size; i++) 1720 iflib_txsd_destroy(ctx, txq, i); 1721 if (txq->ift_sds.ifsd_map != NULL) { 1722 free(txq->ift_sds.ifsd_map, M_IFLIB); 1723 txq->ift_sds.ifsd_map = NULL; 1724 } 1725 if (txq->ift_sds.ifsd_m != NULL) { 1726 free(txq->ift_sds.ifsd_m, M_IFLIB); 1727 txq->ift_sds.ifsd_m = NULL; 1728 } 1729 if (txq->ift_sds.ifsd_flags != NULL) { 1730 free(txq->ift_sds.ifsd_flags, M_IFLIB); 1731 txq->ift_sds.ifsd_flags = NULL; 1732 } 1733 if (txq->ift_desc_tag != NULL) { 1734 bus_dma_tag_destroy(txq->ift_desc_tag); 1735 txq->ift_desc_tag = NULL; 1736 } 1737 if (txq->ift_tso_desc_tag != NULL) { 1738 bus_dma_tag_destroy(txq->ift_tso_desc_tag); 1739 txq->ift_tso_desc_tag = NULL; 1740 } 1741 } 1742 1743 static void 1744 iflib_txsd_free(if_ctx_t ctx, iflib_txq_t txq, int i) 1745 { 1746 struct mbuf **mp; 1747 1748 mp = &txq->ift_sds.ifsd_m[i]; 1749 if (*mp == NULL) 1750 return; 1751 1752 if (txq->ift_sds.ifsd_map != NULL) { 1753 bus_dmamap_sync(txq->ift_desc_tag, 1754 txq->ift_sds.ifsd_map[i], 1755 BUS_DMASYNC_POSTWRITE); 1756 bus_dmamap_unload(txq->ift_desc_tag, 1757 txq->ift_sds.ifsd_map[i]); 1758 } 1759 m_free(*mp); 1760 DBG_COUNTER_INC(tx_frees); 1761 *mp = NULL; 1762 } 1763 1764 static int 1765 iflib_txq_setup(iflib_txq_t txq) 1766 { 1767 if_ctx_t ctx = txq->ift_ctx; 1768 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 1769 iflib_dma_info_t di; 1770 int i; 1771 1772 /* Set number of descriptors available */ 1773 txq->ift_qstatus = IFLIB_QUEUE_IDLE; 1774 /* XXX make configurable */ 1775 txq->ift_update_freq = IFLIB_DEFAULT_TX_UPDATE_FREQ; 1776 1777 /* Reset indices */ 1778 txq->ift_cidx_processed = 0; 1779 txq->ift_pidx = txq->ift_cidx = txq->ift_npending = 0; 1780 txq->ift_size = scctx->isc_ntxd[txq->ift_br_offset]; 1781 1782 for (i = 0, di = txq->ift_ifdi; i < ctx->ifc_nhwtxqs; i++, di++) 1783 bzero((void *)di->idi_vaddr, di->idi_size); 1784 1785 IFDI_TXQ_SETUP(ctx, txq->ift_id); 1786 for (i = 0, di = txq->ift_ifdi; i < ctx->ifc_nhwtxqs; i++, di++) 1787 bus_dmamap_sync(di->idi_tag, di->idi_map, 1788 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1789 return (0); 1790 } 1791 1792 /********************************************************************* 1793 * 1794 * Allocate memory for rx_buffer structures. Since we use one 1795 * rx_buffer per received packet, the maximum number of rx_buffer's 1796 * that we'll need is equal to the number of receive descriptors 1797 * that we've allocated. 1798 * 1799 **********************************************************************/ 1800 static int 1801 iflib_rxsd_alloc(iflib_rxq_t rxq) 1802 { 1803 if_ctx_t ctx = rxq->ifr_ctx; 1804 if_shared_ctx_t sctx = ctx->ifc_sctx; 1805 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 1806 device_t dev = ctx->ifc_dev; 1807 iflib_fl_t fl; 1808 int err; 1809 1810 MPASS(scctx->isc_nrxd[0] > 0); 1811 MPASS(scctx->isc_nrxd[rxq->ifr_fl_offset] > 0); 1812 1813 fl = rxq->ifr_fl; 1814 for (int i = 0; i < rxq->ifr_nfl; i++, fl++) { 1815 fl->ifl_size = scctx->isc_nrxd[rxq->ifr_fl_offset]; /* this isn't necessarily the same */ 1816 err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1817 1, 0, /* alignment, bounds */ 1818 BUS_SPACE_MAXADDR, /* lowaddr */ 1819 BUS_SPACE_MAXADDR, /* highaddr */ 1820 NULL, NULL, /* filter, filterarg */ 1821 sctx->isc_rx_maxsize, /* maxsize */ 1822 sctx->isc_rx_nsegments, /* nsegments */ 1823 sctx->isc_rx_maxsegsize, /* maxsegsize */ 1824 0, /* flags */ 1825 NULL, /* lockfunc */ 1826 NULL, /* lockarg */ 1827 &fl->ifl_desc_tag); 1828 if (err) { 1829 device_printf(dev, "%s: bus_dma_tag_create failed %d\n", 1830 __func__, err); 1831 goto fail; 1832 } 1833 if (!(fl->ifl_sds.ifsd_flags = 1834 (uint8_t *) malloc(sizeof(uint8_t) * 1835 scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1836 device_printf(dev, "Unable to allocate tx_buffer memory\n"); 1837 err = ENOMEM; 1838 goto fail; 1839 } 1840 if (!(fl->ifl_sds.ifsd_m = 1841 (struct mbuf **) malloc(sizeof(struct mbuf *) * 1842 scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1843 device_printf(dev, "Unable to allocate tx_buffer memory\n"); 1844 err = ENOMEM; 1845 goto fail; 1846 } 1847 if (!(fl->ifl_sds.ifsd_cl = 1848 (caddr_t *) malloc(sizeof(caddr_t) * 1849 scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1850 device_printf(dev, "Unable to allocate tx_buffer memory\n"); 1851 err = ENOMEM; 1852 goto fail; 1853 } 1854 1855 /* Create the descriptor buffer dma maps */ 1856 #if defined(ACPI_DMAR) || (! (defined(__i386__) || defined(__amd64__))) 1857 if ((ctx->ifc_flags & IFC_DMAR) == 0) 1858 continue; 1859 1860 if (!(fl->ifl_sds.ifsd_map = 1861 (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1862 device_printf(dev, "Unable to allocate tx_buffer map memory\n"); 1863 err = ENOMEM; 1864 goto fail; 1865 } 1866 1867 for (int i = 0; i < scctx->isc_nrxd[rxq->ifr_fl_offset]; i++) { 1868 err = bus_dmamap_create(fl->ifl_desc_tag, 0, &fl->ifl_sds.ifsd_map[i]); 1869 if (err != 0) { 1870 device_printf(dev, "Unable to create RX buffer DMA map\n"); 1871 goto fail; 1872 } 1873 } 1874 #endif 1875 } 1876 return (0); 1877 1878 fail: 1879 iflib_rx_structures_free(ctx); 1880 return (err); 1881 } 1882 1883 1884 /* 1885 * Internal service routines 1886 */ 1887 1888 struct rxq_refill_cb_arg { 1889 int error; 1890 bus_dma_segment_t seg; 1891 int nseg; 1892 }; 1893 1894 static void 1895 _rxq_refill_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1896 { 1897 struct rxq_refill_cb_arg *cb_arg = arg; 1898 1899 cb_arg->error = error; 1900 cb_arg->seg = segs[0]; 1901 cb_arg->nseg = nseg; 1902 } 1903 1904 1905 #ifdef ACPI_DMAR 1906 #define IS_DMAR(ctx) (ctx->ifc_flags & IFC_DMAR) 1907 #else 1908 #define IS_DMAR(ctx) (0) 1909 #endif 1910 1911 /** 1912 * rxq_refill - refill an rxq free-buffer list 1913 * @ctx: the iflib context 1914 * @rxq: the free-list to refill 1915 * @n: the number of new buffers to allocate 1916 * 1917 * (Re)populate an rxq free-buffer list with up to @n new packet buffers. 1918 * The caller must assure that @n does not exceed the queue's capacity. 1919 */ 1920 static void 1921 _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count) 1922 { 1923 struct mbuf *m; 1924 int idx, frag_idx = fl->ifl_fragidx; 1925 int pidx = fl->ifl_pidx; 1926 caddr_t cl, *sd_cl; 1927 struct mbuf **sd_m; 1928 uint8_t *sd_flags; 1929 struct if_rxd_update iru; 1930 bus_dmamap_t *sd_map; 1931 int n, i = 0; 1932 uint64_t bus_addr; 1933 int err; 1934 qidx_t credits; 1935 1936 sd_m = fl->ifl_sds.ifsd_m; 1937 sd_map = fl->ifl_sds.ifsd_map; 1938 sd_cl = fl->ifl_sds.ifsd_cl; 1939 sd_flags = fl->ifl_sds.ifsd_flags; 1940 idx = pidx; 1941 credits = fl->ifl_credits; 1942 1943 n = count; 1944 MPASS(n > 0); 1945 MPASS(credits + n <= fl->ifl_size); 1946 1947 if (pidx < fl->ifl_cidx) 1948 MPASS(pidx + n <= fl->ifl_cidx); 1949 if (pidx == fl->ifl_cidx && (credits < fl->ifl_size)) 1950 MPASS(fl->ifl_gen == 0); 1951 if (pidx > fl->ifl_cidx) 1952 MPASS(n <= fl->ifl_size - pidx + fl->ifl_cidx); 1953 1954 DBG_COUNTER_INC(fl_refills); 1955 if (n > 8) 1956 DBG_COUNTER_INC(fl_refills_large); 1957 iru_init(&iru, fl->ifl_rxq, fl->ifl_id); 1958 while (n--) { 1959 /* 1960 * We allocate an uninitialized mbuf + cluster, mbuf is 1961 * initialized after rx. 1962 * 1963 * If the cluster is still set then we know a minimum sized packet was received 1964 */ 1965 bit_ffc_at(fl->ifl_rx_bitmap, frag_idx, fl->ifl_size, &frag_idx); 1966 if ((frag_idx < 0) || (frag_idx >= fl->ifl_size)) 1967 bit_ffc(fl->ifl_rx_bitmap, fl->ifl_size, &frag_idx); 1968 if ((cl = sd_cl[frag_idx]) == NULL) { 1969 if ((cl = sd_cl[frag_idx] = m_cljget(NULL, M_NOWAIT, fl->ifl_buf_size)) == NULL) 1970 break; 1971 #if MEMORY_LOGGING 1972 fl->ifl_cl_enqueued++; 1973 #endif 1974 } 1975 if ((m = m_gethdr(M_NOWAIT, MT_NOINIT)) == NULL) { 1976 break; 1977 } 1978 #if MEMORY_LOGGING 1979 fl->ifl_m_enqueued++; 1980 #endif 1981 1982 DBG_COUNTER_INC(rx_allocs); 1983 #if defined(__i386__) || defined(__amd64__) 1984 if (!IS_DMAR(ctx)) { 1985 bus_addr = pmap_kextract((vm_offset_t)cl); 1986 } else 1987 #endif 1988 { 1989 struct rxq_refill_cb_arg cb_arg; 1990 1991 cb_arg.error = 0; 1992 MPASS(sd_map != NULL); 1993 MPASS(sd_map[frag_idx] != NULL); 1994 err = bus_dmamap_load(fl->ifl_desc_tag, sd_map[frag_idx], 1995 cl, fl->ifl_buf_size, _rxq_refill_cb, &cb_arg, 0); 1996 bus_dmamap_sync(fl->ifl_desc_tag, sd_map[frag_idx], 1997 BUS_DMASYNC_PREREAD); 1998 1999 if (err != 0 || cb_arg.error) { 2000 /* 2001 * !zone_pack ? 2002 */ 2003 if (fl->ifl_zone == zone_pack) 2004 uma_zfree(fl->ifl_zone, cl); 2005 m_free(m); 2006 n = 0; 2007 goto done; 2008 } 2009 bus_addr = cb_arg.seg.ds_addr; 2010 } 2011 bit_set(fl->ifl_rx_bitmap, frag_idx); 2012 sd_flags[frag_idx] |= RX_SW_DESC_INUSE; 2013 2014 MPASS(sd_m[frag_idx] == NULL); 2015 sd_cl[frag_idx] = cl; 2016 sd_m[frag_idx] = m; 2017 fl->ifl_rxd_idxs[i] = frag_idx; 2018 fl->ifl_bus_addrs[i] = bus_addr; 2019 fl->ifl_vm_addrs[i] = cl; 2020 credits++; 2021 i++; 2022 MPASS(credits <= fl->ifl_size); 2023 if (++idx == fl->ifl_size) { 2024 fl->ifl_gen = 1; 2025 idx = 0; 2026 } 2027 if (n == 0 || i == IFLIB_MAX_RX_REFRESH) { 2028 iru.iru_pidx = pidx; 2029 iru.iru_count = i; 2030 ctx->isc_rxd_refill(ctx->ifc_softc, &iru); 2031 i = 0; 2032 pidx = idx; 2033 fl->ifl_pidx = idx; 2034 fl->ifl_credits = credits; 2035 } 2036 2037 } 2038 done: 2039 if (i) { 2040 iru.iru_pidx = pidx; 2041 iru.iru_count = i; 2042 ctx->isc_rxd_refill(ctx->ifc_softc, &iru); 2043 fl->ifl_pidx = idx; 2044 fl->ifl_credits = credits; 2045 } 2046 DBG_COUNTER_INC(rxd_flush); 2047 if (fl->ifl_pidx == 0) 2048 pidx = fl->ifl_size - 1; 2049 else 2050 pidx = fl->ifl_pidx - 1; 2051 2052 if (sd_map) 2053 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, 2054 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2055 ctx->isc_rxd_flush(ctx->ifc_softc, fl->ifl_rxq->ifr_id, fl->ifl_id, pidx); 2056 fl->ifl_fragidx = frag_idx; 2057 } 2058 2059 static __inline void 2060 __iflib_fl_refill_lt(if_ctx_t ctx, iflib_fl_t fl, int max) 2061 { 2062 /* we avoid allowing pidx to catch up with cidx as it confuses ixl */ 2063 int32_t reclaimable = fl->ifl_size - fl->ifl_credits - 1; 2064 #ifdef INVARIANTS 2065 int32_t delta = fl->ifl_size - get_inuse(fl->ifl_size, fl->ifl_cidx, fl->ifl_pidx, fl->ifl_gen) - 1; 2066 #endif 2067 2068 MPASS(fl->ifl_credits <= fl->ifl_size); 2069 MPASS(reclaimable == delta); 2070 2071 if (reclaimable > 0) 2072 _iflib_fl_refill(ctx, fl, min(max, reclaimable)); 2073 } 2074 2075 static void 2076 iflib_fl_bufs_free(iflib_fl_t fl) 2077 { 2078 iflib_dma_info_t idi = fl->ifl_ifdi; 2079 uint32_t i; 2080 2081 for (i = 0; i < fl->ifl_size; i++) { 2082 struct mbuf **sd_m = &fl->ifl_sds.ifsd_m[i]; 2083 uint8_t *sd_flags = &fl->ifl_sds.ifsd_flags[i]; 2084 caddr_t *sd_cl = &fl->ifl_sds.ifsd_cl[i]; 2085 2086 if (*sd_flags & RX_SW_DESC_INUSE) { 2087 if (fl->ifl_sds.ifsd_map != NULL) { 2088 bus_dmamap_t sd_map = fl->ifl_sds.ifsd_map[i]; 2089 bus_dmamap_unload(fl->ifl_desc_tag, sd_map); 2090 if (fl->ifl_rxq->ifr_ctx->ifc_in_detach) 2091 bus_dmamap_destroy(fl->ifl_desc_tag, sd_map); 2092 } 2093 if (*sd_m != NULL) { 2094 m_init(*sd_m, M_NOWAIT, MT_DATA, 0); 2095 uma_zfree(zone_mbuf, *sd_m); 2096 } 2097 if (*sd_cl != NULL) 2098 uma_zfree(fl->ifl_zone, *sd_cl); 2099 *sd_flags = 0; 2100 } else { 2101 MPASS(*sd_cl == NULL); 2102 MPASS(*sd_m == NULL); 2103 } 2104 #if MEMORY_LOGGING 2105 fl->ifl_m_dequeued++; 2106 fl->ifl_cl_dequeued++; 2107 #endif 2108 *sd_cl = NULL; 2109 *sd_m = NULL; 2110 } 2111 #ifdef INVARIANTS 2112 for (i = 0; i < fl->ifl_size; i++) { 2113 MPASS(fl->ifl_sds.ifsd_flags[i] == 0); 2114 MPASS(fl->ifl_sds.ifsd_cl[i] == NULL); 2115 MPASS(fl->ifl_sds.ifsd_m[i] == NULL); 2116 } 2117 #endif 2118 /* 2119 * Reset free list values 2120 */ 2121 fl->ifl_credits = fl->ifl_cidx = fl->ifl_pidx = fl->ifl_gen = fl->ifl_fragidx = 0; 2122 bzero(idi->idi_vaddr, idi->idi_size); 2123 } 2124 2125 /********************************************************************* 2126 * 2127 * Initialize a receive ring and its buffers. 2128 * 2129 **********************************************************************/ 2130 static int 2131 iflib_fl_setup(iflib_fl_t fl) 2132 { 2133 iflib_rxq_t rxq = fl->ifl_rxq; 2134 if_ctx_t ctx = rxq->ifr_ctx; 2135 if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; 2136 2137 bit_nclear(fl->ifl_rx_bitmap, 0, fl->ifl_size - 1); 2138 /* 2139 ** Free current RX buffer structs and their mbufs 2140 */ 2141 iflib_fl_bufs_free(fl); 2142 /* Now replenish the mbufs */ 2143 MPASS(fl->ifl_credits == 0); 2144 /* 2145 * XXX don't set the max_frame_size to larger 2146 * than the hardware can handle 2147 */ 2148 if (sctx->isc_max_frame_size <= 2048) 2149 fl->ifl_buf_size = MCLBYTES; 2150 #ifndef CONTIGMALLOC_WORKS 2151 else 2152 fl->ifl_buf_size = MJUMPAGESIZE; 2153 #else 2154 else if (sctx->isc_max_frame_size <= 4096) 2155 fl->ifl_buf_size = MJUMPAGESIZE; 2156 else if (sctx->isc_max_frame_size <= 9216) 2157 fl->ifl_buf_size = MJUM9BYTES; 2158 else 2159 fl->ifl_buf_size = MJUM16BYTES; 2160 #endif 2161 if (fl->ifl_buf_size > ctx->ifc_max_fl_buf_size) 2162 ctx->ifc_max_fl_buf_size = fl->ifl_buf_size; 2163 fl->ifl_cltype = m_gettype(fl->ifl_buf_size); 2164 fl->ifl_zone = m_getzone(fl->ifl_buf_size); 2165 2166 2167 /* avoid pre-allocating zillions of clusters to an idle card 2168 * potentially speeding up attach 2169 */ 2170 _iflib_fl_refill(ctx, fl, min(128, fl->ifl_size)); 2171 MPASS(min(128, fl->ifl_size) == fl->ifl_credits); 2172 if (min(128, fl->ifl_size) != fl->ifl_credits) 2173 return (ENOBUFS); 2174 /* 2175 * handle failure 2176 */ 2177 MPASS(rxq != NULL); 2178 MPASS(fl->ifl_ifdi != NULL); 2179 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, 2180 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2181 return (0); 2182 } 2183 2184 /********************************************************************* 2185 * 2186 * Free receive ring data structures 2187 * 2188 **********************************************************************/ 2189 static void 2190 iflib_rx_sds_free(iflib_rxq_t rxq) 2191 { 2192 iflib_fl_t fl; 2193 int i; 2194 2195 if (rxq->ifr_fl != NULL) { 2196 for (i = 0; i < rxq->ifr_nfl; i++) { 2197 fl = &rxq->ifr_fl[i]; 2198 if (fl->ifl_desc_tag != NULL) { 2199 bus_dma_tag_destroy(fl->ifl_desc_tag); 2200 fl->ifl_desc_tag = NULL; 2201 } 2202 free(fl->ifl_sds.ifsd_m, M_IFLIB); 2203 free(fl->ifl_sds.ifsd_cl, M_IFLIB); 2204 /* XXX destroy maps first */ 2205 free(fl->ifl_sds.ifsd_map, M_IFLIB); 2206 fl->ifl_sds.ifsd_m = NULL; 2207 fl->ifl_sds.ifsd_cl = NULL; 2208 fl->ifl_sds.ifsd_map = NULL; 2209 } 2210 free(rxq->ifr_fl, M_IFLIB); 2211 rxq->ifr_fl = NULL; 2212 rxq->ifr_cq_gen = rxq->ifr_cq_cidx = rxq->ifr_cq_pidx = 0; 2213 } 2214 } 2215 2216 /* 2217 * MI independent logic 2218 * 2219 */ 2220 static void 2221 iflib_timer(void *arg) 2222 { 2223 iflib_txq_t txq = arg; 2224 if_ctx_t ctx = txq->ift_ctx; 2225 if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; 2226 uint64_t this_tick = ticks; 2227 uint32_t reset_on = hz / 2; 2228 2229 if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) 2230 return; 2231 /* 2232 ** Check on the state of the TX queue(s), this 2233 ** can be done without the lock because its RO 2234 ** and the HUNG state will be static if set. 2235 */ 2236 if (this_tick - txq->ift_last_timer_tick >= hz / 2) { 2237 txq->ift_last_timer_tick = this_tick; 2238 IFDI_TIMER(ctx, txq->ift_id); 2239 if ((txq->ift_qstatus == IFLIB_QUEUE_HUNG) && 2240 ((txq->ift_cleaned_prev == txq->ift_cleaned) || 2241 (sctx->isc_pause_frames == 0))) 2242 goto hung; 2243 2244 if (ifmp_ring_is_stalled(txq->ift_br)) 2245 txq->ift_qstatus = IFLIB_QUEUE_HUNG; 2246 txq->ift_cleaned_prev = txq->ift_cleaned; 2247 } 2248 #ifdef DEV_NETMAP 2249 if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP) 2250 iflib_netmap_timer_adjust(ctx, txq->ift_id, &reset_on); 2251 #endif 2252 /* handle any laggards */ 2253 if (txq->ift_db_pending) 2254 GROUPTASK_ENQUEUE(&txq->ift_task); 2255 2256 sctx->isc_pause_frames = 0; 2257 if (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING) 2258 callout_reset_on(&txq->ift_timer, reset_on, iflib_timer, txq, txq->ift_timer.c_cpu); 2259 return; 2260 hung: 2261 device_printf(ctx->ifc_dev, "TX(%d) desc avail = %d, pidx = %d\n", 2262 txq->ift_id, TXQ_AVAIL(txq), txq->ift_pidx); 2263 STATE_LOCK(ctx); 2264 if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 2265 ctx->ifc_flags |= (IFC_DO_WATCHDOG|IFC_DO_RESET); 2266 iflib_admin_intr_deferred(ctx); 2267 STATE_UNLOCK(ctx); 2268 } 2269 2270 static void 2271 iflib_init_locked(if_ctx_t ctx) 2272 { 2273 if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; 2274 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 2275 if_t ifp = ctx->ifc_ifp; 2276 iflib_fl_t fl; 2277 iflib_txq_t txq; 2278 iflib_rxq_t rxq; 2279 int i, j, tx_ip_csum_flags, tx_ip6_csum_flags; 2280 2281 2282 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 2283 IFDI_INTR_DISABLE(ctx); 2284 2285 tx_ip_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP); 2286 tx_ip6_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_SCTP); 2287 /* Set hardware offload abilities */ 2288 if_clearhwassist(ifp); 2289 if (if_getcapenable(ifp) & IFCAP_TXCSUM) 2290 if_sethwassistbits(ifp, tx_ip_csum_flags, 0); 2291 if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6) 2292 if_sethwassistbits(ifp, tx_ip6_csum_flags, 0); 2293 if (if_getcapenable(ifp) & IFCAP_TSO4) 2294 if_sethwassistbits(ifp, CSUM_IP_TSO, 0); 2295 if (if_getcapenable(ifp) & IFCAP_TSO6) 2296 if_sethwassistbits(ifp, CSUM_IP6_TSO, 0); 2297 2298 for (i = 0, txq = ctx->ifc_txqs; i < sctx->isc_ntxqsets; i++, txq++) { 2299 CALLOUT_LOCK(txq); 2300 callout_stop(&txq->ift_timer); 2301 CALLOUT_UNLOCK(txq); 2302 iflib_netmap_txq_init(ctx, txq); 2303 } 2304 #ifdef INVARIANTS 2305 i = if_getdrvflags(ifp); 2306 #endif 2307 IFDI_INIT(ctx); 2308 MPASS(if_getdrvflags(ifp) == i); 2309 for (i = 0, rxq = ctx->ifc_rxqs; i < sctx->isc_nrxqsets; i++, rxq++) { 2310 /* XXX this should really be done on a per-queue basis */ 2311 if (if_getcapenable(ifp) & IFCAP_NETMAP) { 2312 MPASS(rxq->ifr_id == i); 2313 iflib_netmap_rxq_init(ctx, rxq); 2314 continue; 2315 } 2316 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) { 2317 if (iflib_fl_setup(fl)) { 2318 device_printf(ctx->ifc_dev, "freelist setup failed - check cluster settings\n"); 2319 goto done; 2320 } 2321 } 2322 } 2323 done: 2324 if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 2325 IFDI_INTR_ENABLE(ctx); 2326 txq = ctx->ifc_txqs; 2327 for (i = 0; i < sctx->isc_ntxqsets; i++, txq++) 2328 callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq, 2329 txq->ift_timer.c_cpu); 2330 } 2331 2332 static int 2333 iflib_media_change(if_t ifp) 2334 { 2335 if_ctx_t ctx = if_getsoftc(ifp); 2336 int err; 2337 2338 CTX_LOCK(ctx); 2339 if ((err = IFDI_MEDIA_CHANGE(ctx)) == 0) 2340 iflib_init_locked(ctx); 2341 CTX_UNLOCK(ctx); 2342 return (err); 2343 } 2344 2345 static void 2346 iflib_media_status(if_t ifp, struct ifmediareq *ifmr) 2347 { 2348 if_ctx_t ctx = if_getsoftc(ifp); 2349 2350 CTX_LOCK(ctx); 2351 IFDI_UPDATE_ADMIN_STATUS(ctx); 2352 IFDI_MEDIA_STATUS(ctx, ifmr); 2353 CTX_UNLOCK(ctx); 2354 } 2355 2356 void 2357 iflib_stop(if_ctx_t ctx) 2358 { 2359 iflib_txq_t txq = ctx->ifc_txqs; 2360 iflib_rxq_t rxq = ctx->ifc_rxqs; 2361 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 2362 iflib_dma_info_t di; 2363 iflib_fl_t fl; 2364 int i, j; 2365 2366 /* Tell the stack that the interface is no longer active */ 2367 if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 2368 2369 IFDI_INTR_DISABLE(ctx); 2370 DELAY(1000); 2371 IFDI_STOP(ctx); 2372 DELAY(1000); 2373 2374 iflib_debug_reset(); 2375 /* Wait for current tx queue users to exit to disarm watchdog timer. */ 2376 for (i = 0; i < scctx->isc_ntxqsets; i++, txq++) { 2377 /* make sure all transmitters have completed before proceeding XXX */ 2378 2379 CALLOUT_LOCK(txq); 2380 callout_stop(&txq->ift_timer); 2381 CALLOUT_UNLOCK(txq); 2382 2383 /* clean any enqueued buffers */ 2384 iflib_ifmp_purge(txq); 2385 /* Free any existing tx buffers. */ 2386 for (j = 0; j < txq->ift_size; j++) { 2387 iflib_txsd_free(ctx, txq, j); 2388 } 2389 txq->ift_processed = txq->ift_cleaned = txq->ift_cidx_processed = 0; 2390 txq->ift_in_use = txq->ift_gen = txq->ift_cidx = txq->ift_pidx = txq->ift_no_desc_avail = 0; 2391 txq->ift_closed = txq->ift_mbuf_defrag = txq->ift_mbuf_defrag_failed = 0; 2392 txq->ift_no_tx_dma_setup = txq->ift_txd_encap_efbig = txq->ift_map_failed = 0; 2393 txq->ift_pullups = 0; 2394 ifmp_ring_reset_stats(txq->ift_br); 2395 for (j = 0, di = txq->ift_ifdi; j < ctx->ifc_nhwtxqs; j++, di++) 2396 bzero((void *)di->idi_vaddr, di->idi_size); 2397 } 2398 for (i = 0; i < scctx->isc_nrxqsets; i++, rxq++) { 2399 /* make sure all transmitters have completed before proceeding XXX */ 2400 2401 for (j = 0, di = rxq->ifr_ifdi; j < rxq->ifr_nfl; j++, di++) 2402 bzero((void *)di->idi_vaddr, di->idi_size); 2403 /* also resets the free lists pidx/cidx */ 2404 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) 2405 iflib_fl_bufs_free(fl); 2406 } 2407 } 2408 2409 static inline caddr_t 2410 calc_next_rxd(iflib_fl_t fl, int cidx) 2411 { 2412 qidx_t size; 2413 int nrxd; 2414 caddr_t start, end, cur, next; 2415 2416 nrxd = fl->ifl_size; 2417 size = fl->ifl_rxd_size; 2418 start = fl->ifl_ifdi->idi_vaddr; 2419 2420 if (__predict_false(size == 0)) 2421 return (start); 2422 cur = start + size*cidx; 2423 end = start + size*nrxd; 2424 next = CACHE_PTR_NEXT(cur); 2425 return (next < end ? next : start); 2426 } 2427 2428 static inline void 2429 prefetch_pkts(iflib_fl_t fl, int cidx) 2430 { 2431 int nextptr; 2432 int nrxd = fl->ifl_size; 2433 caddr_t next_rxd; 2434 2435 2436 nextptr = (cidx + CACHE_PTR_INCREMENT) & (nrxd-1); 2437 prefetch(&fl->ifl_sds.ifsd_m[nextptr]); 2438 prefetch(&fl->ifl_sds.ifsd_cl[nextptr]); 2439 next_rxd = calc_next_rxd(fl, cidx); 2440 prefetch(next_rxd); 2441 prefetch(fl->ifl_sds.ifsd_m[(cidx + 1) & (nrxd-1)]); 2442 prefetch(fl->ifl_sds.ifsd_m[(cidx + 2) & (nrxd-1)]); 2443 prefetch(fl->ifl_sds.ifsd_m[(cidx + 3) & (nrxd-1)]); 2444 prefetch(fl->ifl_sds.ifsd_m[(cidx + 4) & (nrxd-1)]); 2445 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 1) & (nrxd-1)]); 2446 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 2) & (nrxd-1)]); 2447 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 3) & (nrxd-1)]); 2448 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 4) & (nrxd-1)]); 2449 } 2450 2451 static void 2452 rxd_frag_to_sd(iflib_rxq_t rxq, if_rxd_frag_t irf, int unload, if_rxsd_t sd) 2453 { 2454 int flid, cidx; 2455 bus_dmamap_t map; 2456 iflib_fl_t fl; 2457 iflib_dma_info_t di; 2458 int next; 2459 2460 map = NULL; 2461 flid = irf->irf_flid; 2462 cidx = irf->irf_idx; 2463 fl = &rxq->ifr_fl[flid]; 2464 sd->ifsd_fl = fl; 2465 sd->ifsd_cidx = cidx; 2466 sd->ifsd_m = &fl->ifl_sds.ifsd_m[cidx]; 2467 sd->ifsd_cl = &fl->ifl_sds.ifsd_cl[cidx]; 2468 fl->ifl_credits--; 2469 #if MEMORY_LOGGING 2470 fl->ifl_m_dequeued++; 2471 #endif 2472 if (rxq->ifr_ctx->ifc_flags & IFC_PREFETCH) 2473 prefetch_pkts(fl, cidx); 2474 if (fl->ifl_sds.ifsd_map != NULL) { 2475 next = (cidx + CACHE_PTR_INCREMENT) & (fl->ifl_size-1); 2476 prefetch(&fl->ifl_sds.ifsd_map[next]); 2477 map = fl->ifl_sds.ifsd_map[cidx]; 2478 di = fl->ifl_ifdi; 2479 next = (cidx + CACHE_LINE_SIZE) & (fl->ifl_size-1); 2480 prefetch(&fl->ifl_sds.ifsd_flags[next]); 2481 bus_dmamap_sync(di->idi_tag, di->idi_map, 2482 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2483 2484 /* not valid assert if bxe really does SGE from non-contiguous elements */ 2485 MPASS(fl->ifl_cidx == cidx); 2486 if (unload) 2487 bus_dmamap_unload(fl->ifl_desc_tag, map); 2488 } 2489 fl->ifl_cidx = (fl->ifl_cidx + 1) & (fl->ifl_size-1); 2490 if (__predict_false(fl->ifl_cidx == 0)) 2491 fl->ifl_gen = 0; 2492 if (map != NULL) 2493 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, 2494 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2495 bit_clear(fl->ifl_rx_bitmap, cidx); 2496 } 2497 2498 static struct mbuf * 2499 assemble_segments(iflib_rxq_t rxq, if_rxd_info_t ri, if_rxsd_t sd) 2500 { 2501 int i, padlen , flags; 2502 struct mbuf *m, *mh, *mt; 2503 caddr_t cl; 2504 2505 i = 0; 2506 mh = NULL; 2507 do { 2508 rxd_frag_to_sd(rxq, &ri->iri_frags[i], TRUE, sd); 2509 2510 MPASS(*sd->ifsd_cl != NULL); 2511 MPASS(*sd->ifsd_m != NULL); 2512 2513 /* Don't include zero-length frags */ 2514 if (ri->iri_frags[i].irf_len == 0) { 2515 /* XXX we can save the cluster here, but not the mbuf */ 2516 m_init(*sd->ifsd_m, M_NOWAIT, MT_DATA, 0); 2517 m_free(*sd->ifsd_m); 2518 *sd->ifsd_m = NULL; 2519 continue; 2520 } 2521 m = *sd->ifsd_m; 2522 *sd->ifsd_m = NULL; 2523 if (mh == NULL) { 2524 flags = M_PKTHDR|M_EXT; 2525 mh = mt = m; 2526 padlen = ri->iri_pad; 2527 } else { 2528 flags = M_EXT; 2529 mt->m_next = m; 2530 mt = m; 2531 /* assuming padding is only on the first fragment */ 2532 padlen = 0; 2533 } 2534 cl = *sd->ifsd_cl; 2535 *sd->ifsd_cl = NULL; 2536 2537 /* Can these two be made one ? */ 2538 m_init(m, M_NOWAIT, MT_DATA, flags); 2539 m_cljset(m, cl, sd->ifsd_fl->ifl_cltype); 2540 /* 2541 * These must follow m_init and m_cljset 2542 */ 2543 m->m_data += padlen; 2544 ri->iri_len -= padlen; 2545 m->m_len = ri->iri_frags[i].irf_len; 2546 } while (++i < ri->iri_nfrags); 2547 2548 return (mh); 2549 } 2550 2551 /* 2552 * Process one software descriptor 2553 */ 2554 static struct mbuf * 2555 iflib_rxd_pkt_get(iflib_rxq_t rxq, if_rxd_info_t ri) 2556 { 2557 struct if_rxsd sd; 2558 struct mbuf *m; 2559 2560 /* should I merge this back in now that the two paths are basically duplicated? */ 2561 if (ri->iri_nfrags == 1 && 2562 ri->iri_frags[0].irf_len <= MIN(IFLIB_RX_COPY_THRESH, MHLEN)) { 2563 rxd_frag_to_sd(rxq, &ri->iri_frags[0], FALSE, &sd); 2564 m = *sd.ifsd_m; 2565 *sd.ifsd_m = NULL; 2566 m_init(m, M_NOWAIT, MT_DATA, M_PKTHDR); 2567 #ifndef __NO_STRICT_ALIGNMENT 2568 if (!IP_ALIGNED(m)) 2569 m->m_data += 2; 2570 #endif 2571 memcpy(m->m_data, *sd.ifsd_cl, ri->iri_len); 2572 m->m_len = ri->iri_frags[0].irf_len; 2573 } else { 2574 m = assemble_segments(rxq, ri, &sd); 2575 } 2576 m->m_pkthdr.len = ri->iri_len; 2577 m->m_pkthdr.rcvif = ri->iri_ifp; 2578 m->m_flags |= ri->iri_flags; 2579 m->m_pkthdr.ether_vtag = ri->iri_vtag; 2580 m->m_pkthdr.flowid = ri->iri_flowid; 2581 M_HASHTYPE_SET(m, ri->iri_rsstype); 2582 m->m_pkthdr.csum_flags = ri->iri_csum_flags; 2583 m->m_pkthdr.csum_data = ri->iri_csum_data; 2584 return (m); 2585 } 2586 2587 #if defined(INET6) || defined(INET) 2588 static void 2589 iflib_get_ip_forwarding(struct lro_ctrl *lc, bool *v4, bool *v6) 2590 { 2591 CURVNET_SET(lc->ifp->if_vnet); 2592 #if defined(INET6) 2593 *v6 = VNET(ip6_forwarding); 2594 #endif 2595 #if defined(INET) 2596 *v4 = VNET(ipforwarding); 2597 #endif 2598 CURVNET_RESTORE(); 2599 } 2600 2601 /* 2602 * Returns true if it's possible this packet could be LROed. 2603 * if it returns false, it is guaranteed that tcp_lro_rx() 2604 * would not return zero. 2605 */ 2606 static bool 2607 iflib_check_lro_possible(struct mbuf *m, bool v4_forwarding, bool v6_forwarding) 2608 { 2609 struct ether_header *eh; 2610 uint16_t eh_type; 2611 2612 eh = mtod(m, struct ether_header *); 2613 eh_type = ntohs(eh->ether_type); 2614 switch (eh_type) { 2615 #if defined(INET6) 2616 case ETHERTYPE_IPV6: 2617 return !v6_forwarding; 2618 #endif 2619 #if defined (INET) 2620 case ETHERTYPE_IP: 2621 return !v4_forwarding; 2622 #endif 2623 } 2624 2625 return false; 2626 } 2627 #else 2628 static void 2629 iflib_get_ip_forwarding(struct lro_ctrl *lc __unused, bool *v4 __unused, bool *v6 __unused) 2630 { 2631 } 2632 #endif 2633 2634 static bool 2635 iflib_rxeof(iflib_rxq_t rxq, qidx_t budget) 2636 { 2637 if_ctx_t ctx = rxq->ifr_ctx; 2638 if_shared_ctx_t sctx = ctx->ifc_sctx; 2639 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 2640 int avail, i; 2641 qidx_t *cidxp; 2642 struct if_rxd_info ri; 2643 int err, budget_left, rx_bytes, rx_pkts; 2644 iflib_fl_t fl; 2645 struct ifnet *ifp; 2646 int lro_enabled; 2647 bool v4_forwarding, v6_forwarding, lro_possible; 2648 2649 /* 2650 * XXX early demux data packets so that if_input processing only handles 2651 * acks in interrupt context 2652 */ 2653 struct mbuf *m, *mh, *mt, *mf; 2654 2655 lro_possible = v4_forwarding = v6_forwarding = false; 2656 ifp = ctx->ifc_ifp; 2657 mh = mt = NULL; 2658 MPASS(budget > 0); 2659 rx_pkts = rx_bytes = 0; 2660 if (sctx->isc_flags & IFLIB_HAS_RXCQ) 2661 cidxp = &rxq->ifr_cq_cidx; 2662 else 2663 cidxp = &rxq->ifr_fl[0].ifl_cidx; 2664 if ((avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget)) == 0) { 2665 for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++) 2666 __iflib_fl_refill_lt(ctx, fl, budget + 8); 2667 DBG_COUNTER_INC(rx_unavail); 2668 return (false); 2669 } 2670 2671 for (budget_left = budget; budget_left > 0 && avail > 0;) { 2672 if (__predict_false(!CTX_ACTIVE(ctx))) { 2673 DBG_COUNTER_INC(rx_ctx_inactive); 2674 break; 2675 } 2676 /* 2677 * Reset client set fields to their default values 2678 */ 2679 rxd_info_zero(&ri); 2680 ri.iri_qsidx = rxq->ifr_id; 2681 ri.iri_cidx = *cidxp; 2682 ri.iri_ifp = ifp; 2683 ri.iri_frags = rxq->ifr_frags; 2684 err = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri); 2685 2686 if (err) 2687 goto err; 2688 if (sctx->isc_flags & IFLIB_HAS_RXCQ) { 2689 *cidxp = ri.iri_cidx; 2690 /* Update our consumer index */ 2691 /* XXX NB: shurd - check if this is still safe */ 2692 while (rxq->ifr_cq_cidx >= scctx->isc_nrxd[0]) { 2693 rxq->ifr_cq_cidx -= scctx->isc_nrxd[0]; 2694 rxq->ifr_cq_gen = 0; 2695 } 2696 /* was this only a completion queue message? */ 2697 if (__predict_false(ri.iri_nfrags == 0)) 2698 continue; 2699 } 2700 MPASS(ri.iri_nfrags != 0); 2701 MPASS(ri.iri_len != 0); 2702 2703 /* will advance the cidx on the corresponding free lists */ 2704 m = iflib_rxd_pkt_get(rxq, &ri); 2705 avail--; 2706 budget_left--; 2707 if (avail == 0 && budget_left) 2708 avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget_left); 2709 2710 if (__predict_false(m == NULL)) { 2711 DBG_COUNTER_INC(rx_mbuf_null); 2712 continue; 2713 } 2714 /* imm_pkt: -- cxgb */ 2715 if (mh == NULL) 2716 mh = mt = m; 2717 else { 2718 mt->m_nextpkt = m; 2719 mt = m; 2720 } 2721 } 2722 /* make sure that we can refill faster than drain */ 2723 for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++) 2724 __iflib_fl_refill_lt(ctx, fl, budget + 8); 2725 2726 lro_enabled = (if_getcapenable(ifp) & IFCAP_LRO); 2727 if (lro_enabled) 2728 iflib_get_ip_forwarding(&rxq->ifr_lc, &v4_forwarding, &v6_forwarding); 2729 mt = mf = NULL; 2730 while (mh != NULL) { 2731 m = mh; 2732 mh = mh->m_nextpkt; 2733 m->m_nextpkt = NULL; 2734 #ifndef __NO_STRICT_ALIGNMENT 2735 if (!IP_ALIGNED(m) && (m = iflib_fixup_rx(m)) == NULL) 2736 continue; 2737 #endif 2738 rx_bytes += m->m_pkthdr.len; 2739 rx_pkts++; 2740 #if defined(INET6) || defined(INET) 2741 if (lro_enabled) { 2742 if (!lro_possible) { 2743 lro_possible = iflib_check_lro_possible(m, v4_forwarding, v6_forwarding); 2744 if (lro_possible && mf != NULL) { 2745 ifp->if_input(ifp, mf); 2746 DBG_COUNTER_INC(rx_if_input); 2747 mt = mf = NULL; 2748 } 2749 } 2750 if ((m->m_pkthdr.csum_flags & (CSUM_L4_CALC|CSUM_L4_VALID)) == 2751 (CSUM_L4_CALC|CSUM_L4_VALID)) { 2752 if (lro_possible && tcp_lro_rx(&rxq->ifr_lc, m, 0) == 0) 2753 continue; 2754 } 2755 } 2756 #endif 2757 if (lro_possible) { 2758 ifp->if_input(ifp, m); 2759 DBG_COUNTER_INC(rx_if_input); 2760 continue; 2761 } 2762 2763 if (mf == NULL) 2764 mf = m; 2765 if (mt != NULL) 2766 mt->m_nextpkt = m; 2767 mt = m; 2768 } 2769 if (mf != NULL) { 2770 ifp->if_input(ifp, mf); 2771 DBG_COUNTER_INC(rx_if_input); 2772 } 2773 2774 if_inc_counter(ifp, IFCOUNTER_IBYTES, rx_bytes); 2775 if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_pkts); 2776 2777 /* 2778 * Flush any outstanding LRO work 2779 */ 2780 #if defined(INET6) || defined(INET) 2781 tcp_lro_flush_all(&rxq->ifr_lc); 2782 #endif 2783 if (avail) 2784 return true; 2785 return (iflib_rxd_avail(ctx, rxq, *cidxp, 1)); 2786 err: 2787 STATE_LOCK(ctx); 2788 ctx->ifc_flags |= IFC_DO_RESET; 2789 iflib_admin_intr_deferred(ctx); 2790 STATE_UNLOCK(ctx); 2791 return (false); 2792 } 2793 2794 #define TXD_NOTIFY_COUNT(txq) (((txq)->ift_size / (txq)->ift_update_freq)-1) 2795 static inline qidx_t 2796 txq_max_db_deferred(iflib_txq_t txq, qidx_t in_use) 2797 { 2798 qidx_t notify_count = TXD_NOTIFY_COUNT(txq); 2799 qidx_t minthresh = txq->ift_size / 8; 2800 if (in_use > 4*minthresh) 2801 return (notify_count); 2802 if (in_use > 2*minthresh) 2803 return (notify_count >> 1); 2804 if (in_use > minthresh) 2805 return (notify_count >> 3); 2806 return (0); 2807 } 2808 2809 static inline qidx_t 2810 txq_max_rs_deferred(iflib_txq_t txq) 2811 { 2812 qidx_t notify_count = TXD_NOTIFY_COUNT(txq); 2813 qidx_t minthresh = txq->ift_size / 8; 2814 if (txq->ift_in_use > 4*minthresh) 2815 return (notify_count); 2816 if (txq->ift_in_use > 2*minthresh) 2817 return (notify_count >> 1); 2818 if (txq->ift_in_use > minthresh) 2819 return (notify_count >> 2); 2820 return (2); 2821 } 2822 2823 #define M_CSUM_FLAGS(m) ((m)->m_pkthdr.csum_flags) 2824 #define M_HAS_VLANTAG(m) (m->m_flags & M_VLANTAG) 2825 2826 #define TXQ_MAX_DB_DEFERRED(txq, in_use) txq_max_db_deferred((txq), (in_use)) 2827 #define TXQ_MAX_RS_DEFERRED(txq) txq_max_rs_deferred(txq) 2828 #define TXQ_MAX_DB_CONSUMED(size) (size >> 4) 2829 2830 /* forward compatibility for cxgb */ 2831 #define FIRST_QSET(ctx) 0 2832 #define NTXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_ntxqsets) 2833 #define NRXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_nrxqsets) 2834 #define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NTXQSETS(ctx)) + FIRST_QSET(ctx)) 2835 #define DESC_RECLAIMABLE(q) ((int)((q)->ift_processed - (q)->ift_cleaned - (q)->ift_ctx->ifc_softc_ctx.isc_tx_nsegments)) 2836 2837 /* XXX we should be setting this to something other than zero */ 2838 #define RECLAIM_THRESH(ctx) ((ctx)->ifc_sctx->isc_tx_reclaim_thresh) 2839 #define MAX_TX_DESC(ctx) max((ctx)->ifc_softc_ctx.isc_tx_tso_segments_max, \ 2840 (ctx)->ifc_softc_ctx.isc_tx_nsegments) 2841 2842 static inline bool 2843 iflib_txd_db_check(if_ctx_t ctx, iflib_txq_t txq, int ring, qidx_t in_use) 2844 { 2845 qidx_t dbval, max; 2846 bool rang; 2847 2848 rang = false; 2849 max = TXQ_MAX_DB_DEFERRED(txq, in_use); 2850 if (ring || txq->ift_db_pending >= max) { 2851 dbval = txq->ift_npending ? txq->ift_npending : txq->ift_pidx; 2852 ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, dbval); 2853 txq->ift_db_pending = txq->ift_npending = 0; 2854 rang = true; 2855 } 2856 return (rang); 2857 } 2858 2859 #ifdef PKT_DEBUG 2860 static void 2861 print_pkt(if_pkt_info_t pi) 2862 { 2863 printf("pi len: %d qsidx: %d nsegs: %d ndescs: %d flags: %x pidx: %d\n", 2864 pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx); 2865 printf("pi new_pidx: %d csum_flags: %lx tso_segsz: %d mflags: %x vtag: %d\n", 2866 pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_tso_segsz, pi->ipi_mflags, pi->ipi_vtag); 2867 printf("pi etype: %d ehdrlen: %d ip_hlen: %d ipproto: %d\n", 2868 pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto); 2869 } 2870 #endif 2871 2872 #define IS_TSO4(pi) ((pi)->ipi_csum_flags & CSUM_IP_TSO) 2873 #define IS_TX_OFFLOAD4(pi) ((pi)->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP_TSO)) 2874 #define IS_TSO6(pi) ((pi)->ipi_csum_flags & CSUM_IP6_TSO) 2875 #define IS_TX_OFFLOAD6(pi) ((pi)->ipi_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_TSO)) 2876 2877 static int 2878 iflib_parse_header(iflib_txq_t txq, if_pkt_info_t pi, struct mbuf **mp) 2879 { 2880 if_shared_ctx_t sctx = txq->ift_ctx->ifc_sctx; 2881 struct ether_vlan_header *eh; 2882 struct mbuf *m; 2883 2884 m = *mp; 2885 if ((sctx->isc_flags & IFLIB_NEED_SCRATCH) && 2886 M_WRITABLE(m) == 0) { 2887 if ((m = m_dup(m, M_NOWAIT)) == NULL) { 2888 return (ENOMEM); 2889 } else { 2890 m_freem(*mp); 2891 DBG_COUNTER_INC(tx_frees); 2892 *mp = m; 2893 } 2894 } 2895 2896 /* 2897 * Determine where frame payload starts. 2898 * Jump over vlan headers if already present, 2899 * helpful for QinQ too. 2900 */ 2901 if (__predict_false(m->m_len < sizeof(*eh))) { 2902 txq->ift_pullups++; 2903 if (__predict_false((m = m_pullup(m, sizeof(*eh))) == NULL)) 2904 return (ENOMEM); 2905 } 2906 eh = mtod(m, struct ether_vlan_header *); 2907 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 2908 pi->ipi_etype = ntohs(eh->evl_proto); 2909 pi->ipi_ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 2910 } else { 2911 pi->ipi_etype = ntohs(eh->evl_encap_proto); 2912 pi->ipi_ehdrlen = ETHER_HDR_LEN; 2913 } 2914 2915 switch (pi->ipi_etype) { 2916 #ifdef INET 2917 case ETHERTYPE_IP: 2918 { 2919 struct mbuf *n; 2920 struct ip *ip = NULL; 2921 struct tcphdr *th = NULL; 2922 int minthlen; 2923 2924 minthlen = min(m->m_pkthdr.len, pi->ipi_ehdrlen + sizeof(*ip) + sizeof(*th)); 2925 if (__predict_false(m->m_len < minthlen)) { 2926 /* 2927 * if this code bloat is causing too much of a hit 2928 * move it to a separate function and mark it noinline 2929 */ 2930 if (m->m_len == pi->ipi_ehdrlen) { 2931 n = m->m_next; 2932 MPASS(n); 2933 if (n->m_len >= sizeof(*ip)) { 2934 ip = (struct ip *)n->m_data; 2935 if (n->m_len >= (ip->ip_hl << 2) + sizeof(*th)) 2936 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 2937 } else { 2938 txq->ift_pullups++; 2939 if (__predict_false((m = m_pullup(m, minthlen)) == NULL)) 2940 return (ENOMEM); 2941 ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); 2942 } 2943 } else { 2944 txq->ift_pullups++; 2945 if (__predict_false((m = m_pullup(m, minthlen)) == NULL)) 2946 return (ENOMEM); 2947 ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); 2948 if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th)) 2949 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 2950 } 2951 } else { 2952 ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); 2953 if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th)) 2954 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 2955 } 2956 pi->ipi_ip_hlen = ip->ip_hl << 2; 2957 pi->ipi_ipproto = ip->ip_p; 2958 pi->ipi_flags |= IPI_TX_IPV4; 2959 2960 if ((sctx->isc_flags & IFLIB_NEED_ZERO_CSUM) && (pi->ipi_csum_flags & CSUM_IP)) 2961 ip->ip_sum = 0; 2962 2963 /* TCP checksum offload may require TCP header length */ 2964 if (IS_TX_OFFLOAD4(pi)) { 2965 if (__predict_true(pi->ipi_ipproto == IPPROTO_TCP)) { 2966 if (__predict_false(th == NULL)) { 2967 txq->ift_pullups++; 2968 if (__predict_false((m = m_pullup(m, (ip->ip_hl << 2) + sizeof(*th))) == NULL)) 2969 return (ENOMEM); 2970 th = (struct tcphdr *)((caddr_t)ip + pi->ipi_ip_hlen); 2971 } 2972 pi->ipi_tcp_hflags = th->th_flags; 2973 pi->ipi_tcp_hlen = th->th_off << 2; 2974 pi->ipi_tcp_seq = th->th_seq; 2975 } 2976 if (IS_TSO4(pi)) { 2977 if (__predict_false(ip->ip_p != IPPROTO_TCP)) 2978 return (ENXIO); 2979 th->th_sum = in_pseudo(ip->ip_src.s_addr, 2980 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 2981 pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz; 2982 if (sctx->isc_flags & IFLIB_TSO_INIT_IP) { 2983 ip->ip_sum = 0; 2984 ip->ip_len = htons(pi->ipi_ip_hlen + pi->ipi_tcp_hlen + pi->ipi_tso_segsz); 2985 } 2986 } 2987 } 2988 break; 2989 } 2990 #endif 2991 #ifdef INET6 2992 case ETHERTYPE_IPV6: 2993 { 2994 struct ip6_hdr *ip6 = (struct ip6_hdr *)(m->m_data + pi->ipi_ehdrlen); 2995 struct tcphdr *th; 2996 pi->ipi_ip_hlen = sizeof(struct ip6_hdr); 2997 2998 if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) { 2999 txq->ift_pullups++; 3000 if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) == NULL)) 3001 return (ENOMEM); 3002 } 3003 th = (struct tcphdr *)((caddr_t)ip6 + pi->ipi_ip_hlen); 3004 3005 /* XXX-BZ this will go badly in case of ext hdrs. */ 3006 pi->ipi_ipproto = ip6->ip6_nxt; 3007 pi->ipi_flags |= IPI_TX_IPV6; 3008 3009 /* TCP checksum offload may require TCP header length */ 3010 if (IS_TX_OFFLOAD6(pi)) { 3011 if (pi->ipi_ipproto == IPPROTO_TCP) { 3012 if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) { 3013 txq->ift_pullups++; 3014 if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) == NULL)) 3015 return (ENOMEM); 3016 } 3017 pi->ipi_tcp_hflags = th->th_flags; 3018 pi->ipi_tcp_hlen = th->th_off << 2; 3019 pi->ipi_tcp_seq = th->th_seq; 3020 } 3021 if (IS_TSO6(pi)) { 3022 if (__predict_false(ip6->ip6_nxt != IPPROTO_TCP)) 3023 return (ENXIO); 3024 /* 3025 * The corresponding flag is set by the stack in the IPv4 3026 * TSO case, but not in IPv6 (at least in FreeBSD 10.2). 3027 * So, set it here because the rest of the flow requires it. 3028 */ 3029 pi->ipi_csum_flags |= CSUM_IP6_TCP; 3030 th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0); 3031 pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz; 3032 } 3033 } 3034 break; 3035 } 3036 #endif 3037 default: 3038 pi->ipi_csum_flags &= ~CSUM_OFFLOAD; 3039 pi->ipi_ip_hlen = 0; 3040 break; 3041 } 3042 *mp = m; 3043 3044 return (0); 3045 } 3046 3047 /* 3048 * If dodgy hardware rejects the scatter gather chain we've handed it 3049 * we'll need to remove the mbuf chain from ifsg_m[] before we can add the 3050 * m_defrag'd mbufs 3051 */ 3052 static __noinline struct mbuf * 3053 iflib_remove_mbuf(iflib_txq_t txq) 3054 { 3055 int ntxd, i, pidx; 3056 struct mbuf *m, *mh, **ifsd_m; 3057 3058 pidx = txq->ift_pidx; 3059 ifsd_m = txq->ift_sds.ifsd_m; 3060 ntxd = txq->ift_size; 3061 mh = m = ifsd_m[pidx]; 3062 ifsd_m[pidx] = NULL; 3063 #if MEMORY_LOGGING 3064 txq->ift_dequeued++; 3065 #endif 3066 i = 1; 3067 3068 while (m) { 3069 ifsd_m[(pidx + i) & (ntxd -1)] = NULL; 3070 #if MEMORY_LOGGING 3071 txq->ift_dequeued++; 3072 #endif 3073 m = m->m_next; 3074 i++; 3075 } 3076 return (mh); 3077 } 3078 3079 static int 3080 iflib_busdma_load_mbuf_sg(iflib_txq_t txq, bus_dma_tag_t tag, bus_dmamap_t map, 3081 struct mbuf **m0, bus_dma_segment_t *segs, int *nsegs, 3082 int max_segs, int flags) 3083 { 3084 if_ctx_t ctx; 3085 if_shared_ctx_t sctx; 3086 if_softc_ctx_t scctx; 3087 int i, next, pidx, err, ntxd, count; 3088 struct mbuf *m, *tmp, **ifsd_m; 3089 3090 m = *m0; 3091 3092 /* 3093 * Please don't ever do this 3094 */ 3095 MPASS(__predict_true(m->m_len > 0)); 3096 3097 ctx = txq->ift_ctx; 3098 sctx = ctx->ifc_sctx; 3099 scctx = &ctx->ifc_softc_ctx; 3100 ifsd_m = txq->ift_sds.ifsd_m; 3101 ntxd = txq->ift_size; 3102 pidx = txq->ift_pidx; 3103 if (map != NULL) { 3104 uint8_t *ifsd_flags = txq->ift_sds.ifsd_flags; 3105 3106 err = bus_dmamap_load_mbuf_sg(tag, map, 3107 *m0, segs, nsegs, BUS_DMA_NOWAIT); 3108 if (err) 3109 return (err); 3110 ifsd_flags[pidx] |= TX_SW_DESC_MAPPED; 3111 count = 0; 3112 m = *m0; 3113 do { 3114 if (__predict_false(m->m_len <= 0)) { 3115 tmp = m; 3116 m = m->m_next; 3117 tmp->m_next = NULL; 3118 m_free(tmp); 3119 continue; 3120 } 3121 m = m->m_next; 3122 count++; 3123 } while (m != NULL); 3124 if (count > *nsegs) { 3125 ifsd_m[pidx] = *m0; 3126 ifsd_m[pidx]->m_flags |= M_TOOBIG; 3127 return (0); 3128 } 3129 m = *m0; 3130 count = 0; 3131 do { 3132 next = (pidx + count) & (ntxd-1); 3133 MPASS(ifsd_m[next] == NULL); 3134 ifsd_m[next] = m; 3135 count++; 3136 tmp = m; 3137 m = m->m_next; 3138 } while (m != NULL); 3139 } else { 3140 int buflen, sgsize, maxsegsz, max_sgsize; 3141 vm_offset_t vaddr; 3142 vm_paddr_t curaddr; 3143 3144 count = i = 0; 3145 m = *m0; 3146 if (m->m_pkthdr.csum_flags & CSUM_TSO) 3147 maxsegsz = scctx->isc_tx_tso_segsize_max; 3148 else 3149 maxsegsz = sctx->isc_tx_maxsegsize; 3150 3151 do { 3152 if (__predict_false(m->m_len <= 0)) { 3153 tmp = m; 3154 m = m->m_next; 3155 tmp->m_next = NULL; 3156 m_free(tmp); 3157 continue; 3158 } 3159 buflen = m->m_len; 3160 vaddr = (vm_offset_t)m->m_data; 3161 /* 3162 * see if we can't be smarter about physically 3163 * contiguous mappings 3164 */ 3165 next = (pidx + count) & (ntxd-1); 3166 MPASS(ifsd_m[next] == NULL); 3167 #if MEMORY_LOGGING 3168 txq->ift_enqueued++; 3169 #endif 3170 ifsd_m[next] = m; 3171 while (buflen > 0) { 3172 if (i >= max_segs) 3173 goto err; 3174 max_sgsize = MIN(buflen, maxsegsz); 3175 curaddr = pmap_kextract(vaddr); 3176 sgsize = PAGE_SIZE - (curaddr & PAGE_MASK); 3177 sgsize = MIN(sgsize, max_sgsize); 3178 segs[i].ds_addr = curaddr; 3179 segs[i].ds_len = sgsize; 3180 vaddr += sgsize; 3181 buflen -= sgsize; 3182 i++; 3183 } 3184 count++; 3185 tmp = m; 3186 m = m->m_next; 3187 } while (m != NULL); 3188 *nsegs = i; 3189 } 3190 return (0); 3191 err: 3192 *m0 = iflib_remove_mbuf(txq); 3193 return (EFBIG); 3194 } 3195 3196 static inline caddr_t 3197 calc_next_txd(iflib_txq_t txq, int cidx, uint8_t qid) 3198 { 3199 qidx_t size; 3200 int ntxd; 3201 caddr_t start, end, cur, next; 3202 3203 ntxd = txq->ift_size; 3204 size = txq->ift_txd_size[qid]; 3205 start = txq->ift_ifdi[qid].idi_vaddr; 3206 3207 if (__predict_false(size == 0)) 3208 return (start); 3209 cur = start + size*cidx; 3210 end = start + size*ntxd; 3211 next = CACHE_PTR_NEXT(cur); 3212 return (next < end ? next : start); 3213 } 3214 3215 /* 3216 * Pad an mbuf to ensure a minimum ethernet frame size. 3217 * min_frame_size is the frame size (less CRC) to pad the mbuf to 3218 */ 3219 static __noinline int 3220 iflib_ether_pad(device_t dev, struct mbuf **m_head, uint16_t min_frame_size) 3221 { 3222 /* 3223 * 18 is enough bytes to pad an ARP packet to 46 bytes, and 3224 * and ARP message is the smallest common payload I can think of 3225 */ 3226 static char pad[18]; /* just zeros */ 3227 int n; 3228 struct mbuf *new_head; 3229 3230 if (!M_WRITABLE(*m_head)) { 3231 new_head = m_dup(*m_head, M_NOWAIT); 3232 if (new_head == NULL) { 3233 m_freem(*m_head); 3234 device_printf(dev, "cannot pad short frame, m_dup() failed"); 3235 DBG_COUNTER_INC(encap_pad_mbuf_fail); 3236 DBG_COUNTER_INC(tx_frees); 3237 return ENOMEM; 3238 } 3239 m_freem(*m_head); 3240 *m_head = new_head; 3241 } 3242 3243 for (n = min_frame_size - (*m_head)->m_pkthdr.len; 3244 n > 0; n -= sizeof(pad)) 3245 if (!m_append(*m_head, min(n, sizeof(pad)), pad)) 3246 break; 3247 3248 if (n > 0) { 3249 m_freem(*m_head); 3250 device_printf(dev, "cannot pad short frame\n"); 3251 DBG_COUNTER_INC(encap_pad_mbuf_fail); 3252 DBG_COUNTER_INC(tx_frees); 3253 return (ENOBUFS); 3254 } 3255 3256 return 0; 3257 } 3258 3259 static int 3260 iflib_encap(iflib_txq_t txq, struct mbuf **m_headp) 3261 { 3262 if_ctx_t ctx; 3263 if_shared_ctx_t sctx; 3264 if_softc_ctx_t scctx; 3265 bus_dma_segment_t *segs; 3266 struct mbuf *m_head; 3267 void *next_txd; 3268 bus_dmamap_t map; 3269 struct if_pkt_info pi; 3270 int remap = 0; 3271 int err, nsegs, ndesc, max_segs, pidx, cidx, next, ntxd; 3272 bus_dma_tag_t desc_tag; 3273 3274 ctx = txq->ift_ctx; 3275 sctx = ctx->ifc_sctx; 3276 scctx = &ctx->ifc_softc_ctx; 3277 segs = txq->ift_segs; 3278 ntxd = txq->ift_size; 3279 m_head = *m_headp; 3280 map = NULL; 3281 3282 /* 3283 * If we're doing TSO the next descriptor to clean may be quite far ahead 3284 */ 3285 cidx = txq->ift_cidx; 3286 pidx = txq->ift_pidx; 3287 if (ctx->ifc_flags & IFC_PREFETCH) { 3288 next = (cidx + CACHE_PTR_INCREMENT) & (ntxd-1); 3289 if (!(ctx->ifc_flags & IFLIB_HAS_TXCQ)) { 3290 next_txd = calc_next_txd(txq, cidx, 0); 3291 prefetch(next_txd); 3292 } 3293 3294 /* prefetch the next cache line of mbuf pointers and flags */ 3295 prefetch(&txq->ift_sds.ifsd_m[next]); 3296 if (txq->ift_sds.ifsd_map != NULL) { 3297 prefetch(&txq->ift_sds.ifsd_map[next]); 3298 next = (cidx + CACHE_LINE_SIZE) & (ntxd-1); 3299 prefetch(&txq->ift_sds.ifsd_flags[next]); 3300 } 3301 } else if (txq->ift_sds.ifsd_map != NULL) 3302 map = txq->ift_sds.ifsd_map[pidx]; 3303 3304 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3305 desc_tag = txq->ift_tso_desc_tag; 3306 max_segs = scctx->isc_tx_tso_segments_max; 3307 MPASS(desc_tag != NULL); 3308 MPASS(max_segs > 0); 3309 } else { 3310 desc_tag = txq->ift_desc_tag; 3311 max_segs = scctx->isc_tx_nsegments; 3312 } 3313 if ((sctx->isc_flags & IFLIB_NEED_ETHER_PAD) && 3314 __predict_false(m_head->m_pkthdr.len < scctx->isc_min_frame_size)) { 3315 err = iflib_ether_pad(ctx->ifc_dev, m_headp, scctx->isc_min_frame_size); 3316 if (err) { 3317 DBG_COUNTER_INC(encap_txd_encap_fail); 3318 return err; 3319 } 3320 } 3321 m_head = *m_headp; 3322 3323 pkt_info_zero(&pi); 3324 pi.ipi_mflags = (m_head->m_flags & (M_VLANTAG|M_BCAST|M_MCAST)); 3325 pi.ipi_pidx = pidx; 3326 pi.ipi_qsidx = txq->ift_id; 3327 pi.ipi_len = m_head->m_pkthdr.len; 3328 pi.ipi_csum_flags = m_head->m_pkthdr.csum_flags; 3329 pi.ipi_vtag = (m_head->m_flags & M_VLANTAG) ? m_head->m_pkthdr.ether_vtag : 0; 3330 3331 /* deliberate bitwise OR to make one condition */ 3332 if (__predict_true((pi.ipi_csum_flags | pi.ipi_vtag))) { 3333 if (__predict_false((err = iflib_parse_header(txq, &pi, m_headp)) != 0)) { 3334 DBG_COUNTER_INC(encap_txd_encap_fail); 3335 return (err); 3336 } 3337 m_head = *m_headp; 3338 } 3339 3340 retry: 3341 err = iflib_busdma_load_mbuf_sg(txq, desc_tag, map, m_headp, segs, &nsegs, max_segs, BUS_DMA_NOWAIT); 3342 defrag: 3343 if (__predict_false(err)) { 3344 switch (err) { 3345 case EFBIG: 3346 /* try collapse once and defrag once */ 3347 if (remap == 0) { 3348 m_head = m_collapse(*m_headp, M_NOWAIT, max_segs); 3349 /* try defrag if collapsing fails */ 3350 if (m_head == NULL) 3351 remap++; 3352 } 3353 if (remap == 1) { 3354 txq->ift_mbuf_defrag++; 3355 m_head = m_defrag(*m_headp, M_NOWAIT); 3356 } 3357 remap++; 3358 if (__predict_false(m_head == NULL)) 3359 goto defrag_failed; 3360 *m_headp = m_head; 3361 goto retry; 3362 break; 3363 case ENOMEM: 3364 txq->ift_no_tx_dma_setup++; 3365 break; 3366 default: 3367 txq->ift_no_tx_dma_setup++; 3368 m_freem(*m_headp); 3369 DBG_COUNTER_INC(tx_frees); 3370 *m_headp = NULL; 3371 break; 3372 } 3373 txq->ift_map_failed++; 3374 DBG_COUNTER_INC(encap_load_mbuf_fail); 3375 DBG_COUNTER_INC(encap_txd_encap_fail); 3376 return (err); 3377 } 3378 3379 /* 3380 * XXX assumes a 1 to 1 relationship between segments and 3381 * descriptors - this does not hold true on all drivers, e.g. 3382 * cxgb 3383 */ 3384 if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) { 3385 txq->ift_no_desc_avail++; 3386 if (map != NULL) 3387 bus_dmamap_unload(desc_tag, map); 3388 DBG_COUNTER_INC(encap_txq_avail_fail); 3389 DBG_COUNTER_INC(encap_txd_encap_fail); 3390 if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0) 3391 GROUPTASK_ENQUEUE(&txq->ift_task); 3392 return (ENOBUFS); 3393 } 3394 /* 3395 * On Intel cards we can greatly reduce the number of TX interrupts 3396 * we see by only setting report status on every Nth descriptor. 3397 * However, this also means that the driver will need to keep track 3398 * of the descriptors that RS was set on to check them for the DD bit. 3399 */ 3400 txq->ift_rs_pending += nsegs + 1; 3401 if (txq->ift_rs_pending > TXQ_MAX_RS_DEFERRED(txq) || 3402 iflib_no_tx_batch || (TXQ_AVAIL(txq) - nsegs) <= MAX_TX_DESC(ctx) + 2) { 3403 pi.ipi_flags |= IPI_TX_INTR; 3404 txq->ift_rs_pending = 0; 3405 } 3406 3407 pi.ipi_segs = segs; 3408 pi.ipi_nsegs = nsegs; 3409 3410 MPASS(pidx >= 0 && pidx < txq->ift_size); 3411 #ifdef PKT_DEBUG 3412 print_pkt(&pi); 3413 #endif 3414 if (map != NULL) 3415 bus_dmamap_sync(desc_tag, map, BUS_DMASYNC_PREWRITE); 3416 if ((err = ctx->isc_txd_encap(ctx->ifc_softc, &pi)) == 0) { 3417 if (map != NULL) 3418 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, 3419 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3420 DBG_COUNTER_INC(tx_encap); 3421 MPASS(pi.ipi_new_pidx < txq->ift_size); 3422 3423 ndesc = pi.ipi_new_pidx - pi.ipi_pidx; 3424 if (pi.ipi_new_pidx < pi.ipi_pidx) { 3425 ndesc += txq->ift_size; 3426 txq->ift_gen = 1; 3427 } 3428 /* 3429 * drivers can need as many as 3430 * two sentinels 3431 */ 3432 MPASS(ndesc <= pi.ipi_nsegs + 2); 3433 MPASS(pi.ipi_new_pidx != pidx); 3434 MPASS(ndesc > 0); 3435 txq->ift_in_use += ndesc; 3436 3437 /* 3438 * We update the last software descriptor again here because there may 3439 * be a sentinel and/or there may be more mbufs than segments 3440 */ 3441 txq->ift_pidx = pi.ipi_new_pidx; 3442 txq->ift_npending += pi.ipi_ndescs; 3443 } else { 3444 *m_headp = m_head = iflib_remove_mbuf(txq); 3445 if (err == EFBIG) { 3446 txq->ift_txd_encap_efbig++; 3447 if (remap < 2) { 3448 remap = 1; 3449 goto defrag; 3450 } 3451 } 3452 goto defrag_failed; 3453 } 3454 /* 3455 * err can't possibly be non-zero here, so we don't neet to test it 3456 * to see if we need to DBG_COUNTER_INC(encap_txd_encap_fail). 3457 */ 3458 return (err); 3459 3460 defrag_failed: 3461 txq->ift_mbuf_defrag_failed++; 3462 txq->ift_map_failed++; 3463 m_freem(*m_headp); 3464 DBG_COUNTER_INC(tx_frees); 3465 *m_headp = NULL; 3466 DBG_COUNTER_INC(encap_txd_encap_fail); 3467 return (ENOMEM); 3468 } 3469 3470 static void 3471 iflib_tx_desc_free(iflib_txq_t txq, int n) 3472 { 3473 int hasmap; 3474 uint32_t qsize, cidx, mask, gen; 3475 struct mbuf *m, **ifsd_m; 3476 uint8_t *ifsd_flags; 3477 bus_dmamap_t *ifsd_map; 3478 bool do_prefetch; 3479 3480 cidx = txq->ift_cidx; 3481 gen = txq->ift_gen; 3482 qsize = txq->ift_size; 3483 mask = qsize-1; 3484 hasmap = txq->ift_sds.ifsd_map != NULL; 3485 ifsd_flags = txq->ift_sds.ifsd_flags; 3486 ifsd_m = txq->ift_sds.ifsd_m; 3487 ifsd_map = txq->ift_sds.ifsd_map; 3488 do_prefetch = (txq->ift_ctx->ifc_flags & IFC_PREFETCH); 3489 3490 while (n-- > 0) { 3491 if (do_prefetch) { 3492 prefetch(ifsd_m[(cidx + 3) & mask]); 3493 prefetch(ifsd_m[(cidx + 4) & mask]); 3494 } 3495 if (ifsd_m[cidx] != NULL) { 3496 prefetch(&ifsd_m[(cidx + CACHE_PTR_INCREMENT) & mask]); 3497 prefetch(&ifsd_flags[(cidx + CACHE_PTR_INCREMENT) & mask]); 3498 if (hasmap && (ifsd_flags[cidx] & TX_SW_DESC_MAPPED)) { 3499 /* 3500 * does it matter if it's not the TSO tag? If so we'll 3501 * have to add the type to flags 3502 */ 3503 bus_dmamap_unload(txq->ift_desc_tag, ifsd_map[cidx]); 3504 ifsd_flags[cidx] &= ~TX_SW_DESC_MAPPED; 3505 } 3506 if ((m = ifsd_m[cidx]) != NULL) { 3507 /* XXX we don't support any drivers that batch packets yet */ 3508 MPASS(m->m_nextpkt == NULL); 3509 /* if the number of clusters exceeds the number of segments 3510 * there won't be space on the ring to save a pointer to each 3511 * cluster so we simply free the list here 3512 */ 3513 if (m->m_flags & M_TOOBIG) { 3514 m_freem(m); 3515 } else { 3516 m_free(m); 3517 } 3518 ifsd_m[cidx] = NULL; 3519 #if MEMORY_LOGGING 3520 txq->ift_dequeued++; 3521 #endif 3522 DBG_COUNTER_INC(tx_frees); 3523 } 3524 } 3525 if (__predict_false(++cidx == qsize)) { 3526 cidx = 0; 3527 gen = 0; 3528 } 3529 } 3530 txq->ift_cidx = cidx; 3531 txq->ift_gen = gen; 3532 } 3533 3534 static __inline int 3535 iflib_completed_tx_reclaim(iflib_txq_t txq, int thresh) 3536 { 3537 int reclaim; 3538 if_ctx_t ctx = txq->ift_ctx; 3539 3540 KASSERT(thresh >= 0, ("invalid threshold to reclaim")); 3541 MPASS(thresh /*+ MAX_TX_DESC(txq->ift_ctx) */ < txq->ift_size); 3542 3543 /* 3544 * Need a rate-limiting check so that this isn't called every time 3545 */ 3546 iflib_tx_credits_update(ctx, txq); 3547 reclaim = DESC_RECLAIMABLE(txq); 3548 3549 if (reclaim <= thresh /* + MAX_TX_DESC(txq->ift_ctx) */) { 3550 #ifdef INVARIANTS 3551 if (iflib_verbose_debug) { 3552 printf("%s processed=%ju cleaned=%ju tx_nsegments=%d reclaim=%d thresh=%d\n", __FUNCTION__, 3553 txq->ift_processed, txq->ift_cleaned, txq->ift_ctx->ifc_softc_ctx.isc_tx_nsegments, 3554 reclaim, thresh); 3555 3556 } 3557 #endif 3558 return (0); 3559 } 3560 iflib_tx_desc_free(txq, reclaim); 3561 txq->ift_cleaned += reclaim; 3562 txq->ift_in_use -= reclaim; 3563 3564 return (reclaim); 3565 } 3566 3567 static struct mbuf ** 3568 _ring_peek_one(struct ifmp_ring *r, int cidx, int offset, int remaining) 3569 { 3570 int next, size; 3571 struct mbuf **items; 3572 3573 size = r->size; 3574 next = (cidx + CACHE_PTR_INCREMENT) & (size-1); 3575 items = __DEVOLATILE(struct mbuf **, &r->items[0]); 3576 3577 prefetch(items[(cidx + offset) & (size-1)]); 3578 if (remaining > 1) { 3579 prefetch2cachelines(&items[next]); 3580 prefetch2cachelines(items[(cidx + offset + 1) & (size-1)]); 3581 prefetch2cachelines(items[(cidx + offset + 2) & (size-1)]); 3582 prefetch2cachelines(items[(cidx + offset + 3) & (size-1)]); 3583 } 3584 return (__DEVOLATILE(struct mbuf **, &r->items[(cidx + offset) & (size-1)])); 3585 } 3586 3587 static void 3588 iflib_txq_check_drain(iflib_txq_t txq, int budget) 3589 { 3590 3591 ifmp_ring_check_drainage(txq->ift_br, budget); 3592 } 3593 3594 static uint32_t 3595 iflib_txq_can_drain(struct ifmp_ring *r) 3596 { 3597 iflib_txq_t txq = r->cookie; 3598 if_ctx_t ctx = txq->ift_ctx; 3599 3600 return ((TXQ_AVAIL(txq) > MAX_TX_DESC(ctx) + 2) || 3601 ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false)); 3602 } 3603 3604 static uint32_t 3605 iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx) 3606 { 3607 iflib_txq_t txq = r->cookie; 3608 if_ctx_t ctx = txq->ift_ctx; 3609 struct ifnet *ifp = ctx->ifc_ifp; 3610 struct mbuf **mp, *m; 3611 int i, count, consumed, pkt_sent, bytes_sent, mcast_sent, avail; 3612 int reclaimed, err, in_use_prev, desc_used; 3613 bool do_prefetch, ring, rang; 3614 3615 if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING) || 3616 !LINK_ACTIVE(ctx))) { 3617 DBG_COUNTER_INC(txq_drain_notready); 3618 return (0); 3619 } 3620 reclaimed = iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx)); 3621 rang = iflib_txd_db_check(ctx, txq, reclaimed, txq->ift_in_use); 3622 avail = IDXDIFF(pidx, cidx, r->size); 3623 if (__predict_false(ctx->ifc_flags & IFC_QFLUSH)) { 3624 DBG_COUNTER_INC(txq_drain_flushing); 3625 for (i = 0; i < avail; i++) { 3626 if (__predict_true(r->items[(cidx + i) & (r->size-1)] != (void *)txq)) 3627 m_free(r->items[(cidx + i) & (r->size-1)]); 3628 r->items[(cidx + i) & (r->size-1)] = NULL; 3629 } 3630 return (avail); 3631 } 3632 3633 if (__predict_false(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE)) { 3634 txq->ift_qstatus = IFLIB_QUEUE_IDLE; 3635 CALLOUT_LOCK(txq); 3636 callout_stop(&txq->ift_timer); 3637 CALLOUT_UNLOCK(txq); 3638 DBG_COUNTER_INC(txq_drain_oactive); 3639 return (0); 3640 } 3641 if (reclaimed) 3642 txq->ift_qstatus = IFLIB_QUEUE_IDLE; 3643 consumed = mcast_sent = bytes_sent = pkt_sent = 0; 3644 count = MIN(avail, TX_BATCH_SIZE); 3645 #ifdef INVARIANTS 3646 if (iflib_verbose_debug) 3647 printf("%s avail=%d ifc_flags=%x txq_avail=%d ", __FUNCTION__, 3648 avail, ctx->ifc_flags, TXQ_AVAIL(txq)); 3649 #endif 3650 do_prefetch = (ctx->ifc_flags & IFC_PREFETCH); 3651 avail = TXQ_AVAIL(txq); 3652 err = 0; 3653 for (desc_used = i = 0; i < count && avail > MAX_TX_DESC(ctx) + 2; i++) { 3654 int rem = do_prefetch ? count - i : 0; 3655 3656 mp = _ring_peek_one(r, cidx, i, rem); 3657 MPASS(mp != NULL && *mp != NULL); 3658 if (__predict_false(*mp == (struct mbuf *)txq)) { 3659 consumed++; 3660 reclaimed++; 3661 continue; 3662 } 3663 in_use_prev = txq->ift_in_use; 3664 err = iflib_encap(txq, mp); 3665 if (__predict_false(err)) { 3666 /* no room - bail out */ 3667 if (err == ENOBUFS) 3668 break; 3669 consumed++; 3670 /* we can't send this packet - skip it */ 3671 continue; 3672 } 3673 consumed++; 3674 pkt_sent++; 3675 m = *mp; 3676 DBG_COUNTER_INC(tx_sent); 3677 bytes_sent += m->m_pkthdr.len; 3678 mcast_sent += !!(m->m_flags & M_MCAST); 3679 avail = TXQ_AVAIL(txq); 3680 3681 txq->ift_db_pending += (txq->ift_in_use - in_use_prev); 3682 desc_used += (txq->ift_in_use - in_use_prev); 3683 ETHER_BPF_MTAP(ifp, m); 3684 if (__predict_false(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) 3685 break; 3686 rang = iflib_txd_db_check(ctx, txq, false, in_use_prev); 3687 } 3688 3689 /* deliberate use of bitwise or to avoid gratuitous short-circuit */ 3690 ring = rang ? false : (iflib_min_tx_latency | err) || (TXQ_AVAIL(txq) < MAX_TX_DESC(ctx)); 3691 iflib_txd_db_check(ctx, txq, ring, txq->ift_in_use); 3692 if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent); 3693 if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent); 3694 if (mcast_sent) 3695 if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast_sent); 3696 #ifdef INVARIANTS 3697 if (iflib_verbose_debug) 3698 printf("consumed=%d\n", consumed); 3699 #endif 3700 return (consumed); 3701 } 3702 3703 static uint32_t 3704 iflib_txq_drain_always(struct ifmp_ring *r) 3705 { 3706 return (1); 3707 } 3708 3709 static uint32_t 3710 iflib_txq_drain_free(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx) 3711 { 3712 int i, avail; 3713 struct mbuf **mp; 3714 iflib_txq_t txq; 3715 3716 txq = r->cookie; 3717 3718 txq->ift_qstatus = IFLIB_QUEUE_IDLE; 3719 CALLOUT_LOCK(txq); 3720 callout_stop(&txq->ift_timer); 3721 CALLOUT_UNLOCK(txq); 3722 3723 avail = IDXDIFF(pidx, cidx, r->size); 3724 for (i = 0; i < avail; i++) { 3725 mp = _ring_peek_one(r, cidx, i, avail - i); 3726 if (__predict_false(*mp == (struct mbuf *)txq)) 3727 continue; 3728 m_freem(*mp); 3729 DBG_COUNTER_INC(tx_frees); 3730 } 3731 MPASS(ifmp_ring_is_stalled(r) == 0); 3732 return (avail); 3733 } 3734 3735 static void 3736 iflib_ifmp_purge(iflib_txq_t txq) 3737 { 3738 struct ifmp_ring *r; 3739 3740 r = txq->ift_br; 3741 r->drain = iflib_txq_drain_free; 3742 r->can_drain = iflib_txq_drain_always; 3743 3744 ifmp_ring_check_drainage(r, r->size); 3745 3746 r->drain = iflib_txq_drain; 3747 r->can_drain = iflib_txq_can_drain; 3748 } 3749 3750 static void 3751 _task_fn_tx(void *context) 3752 { 3753 iflib_txq_t txq = context; 3754 if_ctx_t ctx = txq->ift_ctx; 3755 struct ifnet *ifp = ctx->ifc_ifp; 3756 int abdicate = ctx->ifc_sysctl_tx_abdicate; 3757 3758 #ifdef IFLIB_DIAGNOSTICS 3759 txq->ift_cpu_exec_count[curcpu]++; 3760 #endif 3761 if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) 3762 return; 3763 if (if_getcapenable(ifp) & IFCAP_NETMAP) { 3764 if (ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false)) 3765 netmap_tx_irq(ifp, txq->ift_id); 3766 IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id); 3767 return; 3768 } 3769 #ifdef ALTQ 3770 if (ALTQ_IS_ENABLED(&ifp->if_snd)) 3771 iflib_altq_if_start(ifp); 3772 #endif 3773 if (txq->ift_db_pending) 3774 ifmp_ring_enqueue(txq->ift_br, (void **)&txq, 1, TX_BATCH_SIZE, abdicate); 3775 else if (!abdicate) 3776 ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); 3777 /* 3778 * When abdicating, we always need to check drainage, not just when we don't enqueue 3779 */ 3780 if (abdicate) 3781 ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); 3782 ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); 3783 if (ctx->ifc_flags & IFC_LEGACY) 3784 IFDI_INTR_ENABLE(ctx); 3785 else { 3786 #ifdef INVARIANTS 3787 int rc = 3788 #endif 3789 IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id); 3790 KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver")); 3791 } 3792 } 3793 3794 static void 3795 _task_fn_rx(void *context) 3796 { 3797 iflib_rxq_t rxq = context; 3798 if_ctx_t ctx = rxq->ifr_ctx; 3799 bool more; 3800 uint16_t budget; 3801 3802 #ifdef IFLIB_DIAGNOSTICS 3803 rxq->ifr_cpu_exec_count[curcpu]++; 3804 #endif 3805 DBG_COUNTER_INC(task_fn_rxs); 3806 if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))) 3807 return; 3808 more = true; 3809 #ifdef DEV_NETMAP 3810 if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP) { 3811 u_int work = 0; 3812 if (netmap_rx_irq(ctx->ifc_ifp, rxq->ifr_id, &work)) { 3813 more = false; 3814 } 3815 } 3816 #endif 3817 budget = ctx->ifc_sysctl_rx_budget; 3818 if (budget == 0) 3819 budget = 16; /* XXX */ 3820 if (more == false || (more = iflib_rxeof(rxq, budget)) == false) { 3821 if (ctx->ifc_flags & IFC_LEGACY) 3822 IFDI_INTR_ENABLE(ctx); 3823 else { 3824 #ifdef INVARIANTS 3825 int rc = 3826 #endif 3827 IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id); 3828 KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver")); 3829 DBG_COUNTER_INC(rx_intr_enables); 3830 } 3831 } 3832 if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))) 3833 return; 3834 if (more) 3835 GROUPTASK_ENQUEUE(&rxq->ifr_task); 3836 } 3837 3838 static void 3839 _task_fn_admin(void *context) 3840 { 3841 if_ctx_t ctx = context; 3842 if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; 3843 iflib_txq_t txq; 3844 int i; 3845 bool oactive, running, do_reset, do_watchdog; 3846 uint32_t reset_on = hz / 2; 3847 3848 STATE_LOCK(ctx); 3849 running = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING); 3850 oactive = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE); 3851 do_reset = (ctx->ifc_flags & IFC_DO_RESET); 3852 do_watchdog = (ctx->ifc_flags & IFC_DO_WATCHDOG); 3853 ctx->ifc_flags &= ~(IFC_DO_RESET|IFC_DO_WATCHDOG); 3854 STATE_UNLOCK(ctx); 3855 3856 if ((!running & !oactive) && 3857 !(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN)) 3858 return; 3859 3860 CTX_LOCK(ctx); 3861 for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) { 3862 CALLOUT_LOCK(txq); 3863 callout_stop(&txq->ift_timer); 3864 CALLOUT_UNLOCK(txq); 3865 } 3866 if (do_watchdog) { 3867 ctx->ifc_watchdog_events++; 3868 IFDI_WATCHDOG_RESET(ctx); 3869 } 3870 IFDI_UPDATE_ADMIN_STATUS(ctx); 3871 for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) { 3872 #ifdef DEV_NETMAP 3873 reset_on = hz / 2; 3874 if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP) 3875 iflib_netmap_timer_adjust(ctx, txq->ift_id, &reset_on); 3876 #endif 3877 callout_reset_on(&txq->ift_timer, reset_on, iflib_timer, txq, txq->ift_timer.c_cpu); 3878 } 3879 IFDI_LINK_INTR_ENABLE(ctx); 3880 if (do_reset) 3881 iflib_if_init_locked(ctx); 3882 CTX_UNLOCK(ctx); 3883 3884 if (LINK_ACTIVE(ctx) == 0) 3885 return; 3886 for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) 3887 iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET); 3888 } 3889 3890 3891 static void 3892 _task_fn_iov(void *context) 3893 { 3894 if_ctx_t ctx = context; 3895 3896 if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) 3897 return; 3898 3899 CTX_LOCK(ctx); 3900 IFDI_VFLR_HANDLE(ctx); 3901 CTX_UNLOCK(ctx); 3902 } 3903 3904 static int 3905 iflib_sysctl_int_delay(SYSCTL_HANDLER_ARGS) 3906 { 3907 int err; 3908 if_int_delay_info_t info; 3909 if_ctx_t ctx; 3910 3911 info = (if_int_delay_info_t)arg1; 3912 ctx = info->iidi_ctx; 3913 info->iidi_req = req; 3914 info->iidi_oidp = oidp; 3915 CTX_LOCK(ctx); 3916 err = IFDI_SYSCTL_INT_DELAY(ctx, info); 3917 CTX_UNLOCK(ctx); 3918 return (err); 3919 } 3920 3921 /********************************************************************* 3922 * 3923 * IFNET FUNCTIONS 3924 * 3925 **********************************************************************/ 3926 3927 static void 3928 iflib_if_init_locked(if_ctx_t ctx) 3929 { 3930 iflib_stop(ctx); 3931 iflib_init_locked(ctx); 3932 } 3933 3934 3935 static void 3936 iflib_if_init(void *arg) 3937 { 3938 if_ctx_t ctx = arg; 3939 3940 CTX_LOCK(ctx); 3941 iflib_if_init_locked(ctx); 3942 CTX_UNLOCK(ctx); 3943 } 3944 3945 static int 3946 iflib_if_transmit(if_t ifp, struct mbuf *m) 3947 { 3948 if_ctx_t ctx = if_getsoftc(ifp); 3949 3950 iflib_txq_t txq; 3951 int err, qidx; 3952 int abdicate = ctx->ifc_sysctl_tx_abdicate; 3953 3954 if (__predict_false((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || !LINK_ACTIVE(ctx))) { 3955 DBG_COUNTER_INC(tx_frees); 3956 m_freem(m); 3957 return (ENOBUFS); 3958 } 3959 3960 MPASS(m->m_nextpkt == NULL); 3961 /* ALTQ-enabled interfaces always use queue 0. */ 3962 qidx = 0; 3963 if ((NTXQSETS(ctx) > 1) && M_HASHTYPE_GET(m) && !ALTQ_IS_ENABLED(&ifp->if_snd)) 3964 qidx = QIDX(ctx, m); 3965 /* 3966 * XXX calculate buf_ring based on flowid (divvy up bits?) 3967 */ 3968 txq = &ctx->ifc_txqs[qidx]; 3969 3970 #ifdef DRIVER_BACKPRESSURE 3971 if (txq->ift_closed) { 3972 while (m != NULL) { 3973 next = m->m_nextpkt; 3974 m->m_nextpkt = NULL; 3975 m_freem(m); 3976 DBG_COUNTER_INC(tx_frees); 3977 m = next; 3978 } 3979 return (ENOBUFS); 3980 } 3981 #endif 3982 #ifdef notyet 3983 qidx = count = 0; 3984 mp = marr; 3985 next = m; 3986 do { 3987 count++; 3988 next = next->m_nextpkt; 3989 } while (next != NULL); 3990 3991 if (count > nitems(marr)) 3992 if ((mp = malloc(count*sizeof(struct mbuf *), M_IFLIB, M_NOWAIT)) == NULL) { 3993 /* XXX check nextpkt */ 3994 m_freem(m); 3995 /* XXX simplify for now */ 3996 DBG_COUNTER_INC(tx_frees); 3997 return (ENOBUFS); 3998 } 3999 for (next = m, i = 0; next != NULL; i++) { 4000 mp[i] = next; 4001 next = next->m_nextpkt; 4002 mp[i]->m_nextpkt = NULL; 4003 } 4004 #endif 4005 DBG_COUNTER_INC(tx_seen); 4006 err = ifmp_ring_enqueue(txq->ift_br, (void **)&m, 1, TX_BATCH_SIZE, abdicate); 4007 4008 if (abdicate) 4009 GROUPTASK_ENQUEUE(&txq->ift_task); 4010 if (err) { 4011 if (!abdicate) 4012 GROUPTASK_ENQUEUE(&txq->ift_task); 4013 /* support forthcoming later */ 4014 #ifdef DRIVER_BACKPRESSURE 4015 txq->ift_closed = TRUE; 4016 #endif 4017 ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); 4018 m_freem(m); 4019 DBG_COUNTER_INC(tx_frees); 4020 } 4021 4022 return (err); 4023 } 4024 4025 #ifdef ALTQ 4026 /* 4027 * The overall approach to integrating iflib with ALTQ is to continue to use 4028 * the iflib mp_ring machinery between the ALTQ queue(s) and the hardware 4029 * ring. Technically, when using ALTQ, queueing to an intermediate mp_ring 4030 * is redundant/unnecessary, but doing so minimizes the amount of 4031 * ALTQ-specific code required in iflib. It is assumed that the overhead of 4032 * redundantly queueing to an intermediate mp_ring is swamped by the 4033 * performance limitations inherent in using ALTQ. 4034 * 4035 * When ALTQ support is compiled in, all iflib drivers will use a transmit 4036 * routine, iflib_altq_if_transmit(), that checks if ALTQ is enabled for the 4037 * given interface. If ALTQ is enabled for an interface, then all 4038 * transmitted packets for that interface will be submitted to the ALTQ 4039 * subsystem via IFQ_ENQUEUE(). We don't use the legacy if_transmit() 4040 * implementation because it uses IFQ_HANDOFF(), which will duplicatively 4041 * update stats that the iflib machinery handles, and which is sensitve to 4042 * the disused IFF_DRV_OACTIVE flag. Additionally, iflib_altq_if_start() 4043 * will be installed as the start routine for use by ALTQ facilities that 4044 * need to trigger queue drains on a scheduled basis. 4045 * 4046 */ 4047 static void 4048 iflib_altq_if_start(if_t ifp) 4049 { 4050 struct ifaltq *ifq = &ifp->if_snd; 4051 struct mbuf *m; 4052 4053 IFQ_LOCK(ifq); 4054 IFQ_DEQUEUE_NOLOCK(ifq, m); 4055 while (m != NULL) { 4056 iflib_if_transmit(ifp, m); 4057 IFQ_DEQUEUE_NOLOCK(ifq, m); 4058 } 4059 IFQ_UNLOCK(ifq); 4060 } 4061 4062 static int 4063 iflib_altq_if_transmit(if_t ifp, struct mbuf *m) 4064 { 4065 int err; 4066 4067 if (ALTQ_IS_ENABLED(&ifp->if_snd)) { 4068 IFQ_ENQUEUE(&ifp->if_snd, m, err); 4069 if (err == 0) 4070 iflib_altq_if_start(ifp); 4071 } else 4072 err = iflib_if_transmit(ifp, m); 4073 4074 return (err); 4075 } 4076 #endif /* ALTQ */ 4077 4078 static void 4079 iflib_if_qflush(if_t ifp) 4080 { 4081 if_ctx_t ctx = if_getsoftc(ifp); 4082 iflib_txq_t txq = ctx->ifc_txqs; 4083 int i; 4084 4085 STATE_LOCK(ctx); 4086 ctx->ifc_flags |= IFC_QFLUSH; 4087 STATE_UNLOCK(ctx); 4088 for (i = 0; i < NTXQSETS(ctx); i++, txq++) 4089 while (!(ifmp_ring_is_idle(txq->ift_br) || ifmp_ring_is_stalled(txq->ift_br))) 4090 iflib_txq_check_drain(txq, 0); 4091 STATE_LOCK(ctx); 4092 ctx->ifc_flags &= ~IFC_QFLUSH; 4093 STATE_UNLOCK(ctx); 4094 4095 /* 4096 * When ALTQ is enabled, this will also take care of purging the 4097 * ALTQ queue(s). 4098 */ 4099 if_qflush(ifp); 4100 } 4101 4102 4103 #define IFCAP_FLAGS (IFCAP_HWCSUM_IPV6 | IFCAP_HWCSUM | IFCAP_LRO | \ 4104 IFCAP_TSO | IFCAP_VLAN_HWTAGGING | IFCAP_HWSTATS | \ 4105 IFCAP_VLAN_MTU | IFCAP_VLAN_HWFILTER | \ 4106 IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM) 4107 4108 static int 4109 iflib_if_ioctl(if_t ifp, u_long command, caddr_t data) 4110 { 4111 if_ctx_t ctx = if_getsoftc(ifp); 4112 struct ifreq *ifr = (struct ifreq *)data; 4113 #if defined(INET) || defined(INET6) 4114 struct ifaddr *ifa = (struct ifaddr *)data; 4115 #endif 4116 bool avoid_reset = FALSE; 4117 int err = 0, reinit = 0, bits; 4118 4119 switch (command) { 4120 case SIOCSIFADDR: 4121 #ifdef INET 4122 if (ifa->ifa_addr->sa_family == AF_INET) 4123 avoid_reset = TRUE; 4124 #endif 4125 #ifdef INET6 4126 if (ifa->ifa_addr->sa_family == AF_INET6) 4127 avoid_reset = TRUE; 4128 #endif 4129 /* 4130 ** Calling init results in link renegotiation, 4131 ** so we avoid doing it when possible. 4132 */ 4133 if (avoid_reset) { 4134 if_setflagbits(ifp, IFF_UP,0); 4135 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) 4136 reinit = 1; 4137 #ifdef INET 4138 if (!(if_getflags(ifp) & IFF_NOARP)) 4139 arp_ifinit(ifp, ifa); 4140 #endif 4141 } else 4142 err = ether_ioctl(ifp, command, data); 4143 break; 4144 case SIOCSIFMTU: 4145 CTX_LOCK(ctx); 4146 if (ifr->ifr_mtu == if_getmtu(ifp)) { 4147 CTX_UNLOCK(ctx); 4148 break; 4149 } 4150 bits = if_getdrvflags(ifp); 4151 /* stop the driver and free any clusters before proceeding */ 4152 iflib_stop(ctx); 4153 4154 if ((err = IFDI_MTU_SET(ctx, ifr->ifr_mtu)) == 0) { 4155 STATE_LOCK(ctx); 4156 if (ifr->ifr_mtu > ctx->ifc_max_fl_buf_size) 4157 ctx->ifc_flags |= IFC_MULTISEG; 4158 else 4159 ctx->ifc_flags &= ~IFC_MULTISEG; 4160 STATE_UNLOCK(ctx); 4161 err = if_setmtu(ifp, ifr->ifr_mtu); 4162 } 4163 iflib_init_locked(ctx); 4164 STATE_LOCK(ctx); 4165 if_setdrvflags(ifp, bits); 4166 STATE_UNLOCK(ctx); 4167 CTX_UNLOCK(ctx); 4168 break; 4169 case SIOCSIFFLAGS: 4170 CTX_LOCK(ctx); 4171 if (if_getflags(ifp) & IFF_UP) { 4172 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 4173 if ((if_getflags(ifp) ^ ctx->ifc_if_flags) & 4174 (IFF_PROMISC | IFF_ALLMULTI)) { 4175 err = IFDI_PROMISC_SET(ctx, if_getflags(ifp)); 4176 } 4177 } else 4178 reinit = 1; 4179 } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 4180 iflib_stop(ctx); 4181 } 4182 ctx->ifc_if_flags = if_getflags(ifp); 4183 CTX_UNLOCK(ctx); 4184 break; 4185 case SIOCADDMULTI: 4186 case SIOCDELMULTI: 4187 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 4188 CTX_LOCK(ctx); 4189 IFDI_INTR_DISABLE(ctx); 4190 IFDI_MULTI_SET(ctx); 4191 IFDI_INTR_ENABLE(ctx); 4192 CTX_UNLOCK(ctx); 4193 } 4194 break; 4195 case SIOCSIFMEDIA: 4196 CTX_LOCK(ctx); 4197 IFDI_MEDIA_SET(ctx); 4198 CTX_UNLOCK(ctx); 4199 /* falls thru */ 4200 case SIOCGIFMEDIA: 4201 case SIOCGIFXMEDIA: 4202 err = ifmedia_ioctl(ifp, ifr, &ctx->ifc_media, command); 4203 break; 4204 case SIOCGI2C: 4205 { 4206 struct ifi2creq i2c; 4207 4208 err = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); 4209 if (err != 0) 4210 break; 4211 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { 4212 err = EINVAL; 4213 break; 4214 } 4215 if (i2c.len > sizeof(i2c.data)) { 4216 err = EINVAL; 4217 break; 4218 } 4219 4220 if ((err = IFDI_I2C_REQ(ctx, &i2c)) == 0) 4221 err = copyout(&i2c, ifr_data_get_ptr(ifr), 4222 sizeof(i2c)); 4223 break; 4224 } 4225 case SIOCSIFCAP: 4226 { 4227 int mask, setmask, oldmask; 4228 4229 oldmask = if_getcapenable(ifp); 4230 mask = ifr->ifr_reqcap ^ oldmask; 4231 mask &= ctx->ifc_softc_ctx.isc_capabilities; 4232 setmask = 0; 4233 #ifdef TCP_OFFLOAD 4234 setmask |= mask & (IFCAP_TOE4|IFCAP_TOE6); 4235 #endif 4236 setmask |= (mask & IFCAP_FLAGS); 4237 setmask |= (mask & IFCAP_WOL); 4238 4239 /* 4240 * If we're disabling any RX csum, disable all the ones 4241 * the driver supports. This assumes all supported are 4242 * enabled. 4243 * 4244 * Otherwise, if they've changed, enable all of them. 4245 */ 4246 if ((setmask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) < 4247 (oldmask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6))) 4248 setmask &= ~(IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6); 4249 else if ((setmask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) != 4250 (oldmask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6))) 4251 setmask |= (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)); 4252 4253 /* 4254 * want to ensure that traffic has stopped before we change any of the flags 4255 */ 4256 if (setmask) { 4257 CTX_LOCK(ctx); 4258 bits = if_getdrvflags(ifp); 4259 if (bits & IFF_DRV_RUNNING && setmask & ~IFCAP_WOL) 4260 iflib_stop(ctx); 4261 STATE_LOCK(ctx); 4262 if_togglecapenable(ifp, setmask); 4263 STATE_UNLOCK(ctx); 4264 if (bits & IFF_DRV_RUNNING && setmask & ~IFCAP_WOL) 4265 iflib_init_locked(ctx); 4266 STATE_LOCK(ctx); 4267 if_setdrvflags(ifp, bits); 4268 STATE_UNLOCK(ctx); 4269 CTX_UNLOCK(ctx); 4270 } 4271 if_vlancap(ifp); 4272 break; 4273 } 4274 case SIOCGPRIVATE_0: 4275 case SIOCSDRVSPEC: 4276 case SIOCGDRVSPEC: 4277 CTX_LOCK(ctx); 4278 err = IFDI_PRIV_IOCTL(ctx, command, data); 4279 CTX_UNLOCK(ctx); 4280 break; 4281 default: 4282 err = ether_ioctl(ifp, command, data); 4283 break; 4284 } 4285 if (reinit) 4286 iflib_if_init(ctx); 4287 return (err); 4288 } 4289 4290 static uint64_t 4291 iflib_if_get_counter(if_t ifp, ift_counter cnt) 4292 { 4293 if_ctx_t ctx = if_getsoftc(ifp); 4294 4295 return (IFDI_GET_COUNTER(ctx, cnt)); 4296 } 4297 4298 /********************************************************************* 4299 * 4300 * OTHER FUNCTIONS EXPORTED TO THE STACK 4301 * 4302 **********************************************************************/ 4303 4304 static void 4305 iflib_vlan_register(void *arg, if_t ifp, uint16_t vtag) 4306 { 4307 if_ctx_t ctx = if_getsoftc(ifp); 4308 4309 if ((void *)ctx != arg) 4310 return; 4311 4312 if ((vtag == 0) || (vtag > 4095)) 4313 return; 4314 4315 CTX_LOCK(ctx); 4316 IFDI_VLAN_REGISTER(ctx, vtag); 4317 /* Re-init to load the changes */ 4318 if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) 4319 iflib_if_init_locked(ctx); 4320 CTX_UNLOCK(ctx); 4321 } 4322 4323 static void 4324 iflib_vlan_unregister(void *arg, if_t ifp, uint16_t vtag) 4325 { 4326 if_ctx_t ctx = if_getsoftc(ifp); 4327 4328 if ((void *)ctx != arg) 4329 return; 4330 4331 if ((vtag == 0) || (vtag > 4095)) 4332 return; 4333 4334 CTX_LOCK(ctx); 4335 IFDI_VLAN_UNREGISTER(ctx, vtag); 4336 /* Re-init to load the changes */ 4337 if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) 4338 iflib_if_init_locked(ctx); 4339 CTX_UNLOCK(ctx); 4340 } 4341 4342 static void 4343 iflib_led_func(void *arg, int onoff) 4344 { 4345 if_ctx_t ctx = arg; 4346 4347 CTX_LOCK(ctx); 4348 IFDI_LED_FUNC(ctx, onoff); 4349 CTX_UNLOCK(ctx); 4350 } 4351 4352 /********************************************************************* 4353 * 4354 * BUS FUNCTION DEFINITIONS 4355 * 4356 **********************************************************************/ 4357 4358 int 4359 iflib_device_probe(device_t dev) 4360 { 4361 pci_vendor_info_t *ent; 4362 4363 uint16_t pci_vendor_id, pci_device_id; 4364 uint16_t pci_subvendor_id, pci_subdevice_id; 4365 uint16_t pci_rev_id; 4366 if_shared_ctx_t sctx; 4367 4368 if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC) 4369 return (ENOTSUP); 4370 4371 pci_vendor_id = pci_get_vendor(dev); 4372 pci_device_id = pci_get_device(dev); 4373 pci_subvendor_id = pci_get_subvendor(dev); 4374 pci_subdevice_id = pci_get_subdevice(dev); 4375 pci_rev_id = pci_get_revid(dev); 4376 if (sctx->isc_parse_devinfo != NULL) 4377 sctx->isc_parse_devinfo(&pci_device_id, &pci_subvendor_id, &pci_subdevice_id, &pci_rev_id); 4378 4379 ent = sctx->isc_vendor_info; 4380 while (ent->pvi_vendor_id != 0) { 4381 if (pci_vendor_id != ent->pvi_vendor_id) { 4382 ent++; 4383 continue; 4384 } 4385 if ((pci_device_id == ent->pvi_device_id) && 4386 ((pci_subvendor_id == ent->pvi_subvendor_id) || 4387 (ent->pvi_subvendor_id == 0)) && 4388 ((pci_subdevice_id == ent->pvi_subdevice_id) || 4389 (ent->pvi_subdevice_id == 0)) && 4390 ((pci_rev_id == ent->pvi_rev_id) || 4391 (ent->pvi_rev_id == 0))) { 4392 4393 device_set_desc_copy(dev, ent->pvi_name); 4394 /* this needs to be changed to zero if the bus probing code 4395 * ever stops re-probing on best match because the sctx 4396 * may have its values over written by register calls 4397 * in subsequent probes 4398 */ 4399 return (BUS_PROBE_DEFAULT); 4400 } 4401 ent++; 4402 } 4403 return (ENXIO); 4404 } 4405 4406 static void 4407 iflib_reset_qvalues(if_ctx_t ctx) 4408 { 4409 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 4410 if_shared_ctx_t sctx = ctx->ifc_sctx; 4411 device_t dev = ctx->ifc_dev; 4412 int i; 4413 4414 scctx->isc_txrx_budget_bytes_max = IFLIB_MAX_TX_BYTES; 4415 scctx->isc_tx_qdepth = IFLIB_DEFAULT_TX_QDEPTH; 4416 /* 4417 * XXX sanity check that ntxd & nrxd are a power of 2 4418 */ 4419 if (ctx->ifc_sysctl_ntxqs != 0) 4420 scctx->isc_ntxqsets = ctx->ifc_sysctl_ntxqs; 4421 if (ctx->ifc_sysctl_nrxqs != 0) 4422 scctx->isc_nrxqsets = ctx->ifc_sysctl_nrxqs; 4423 4424 for (i = 0; i < sctx->isc_ntxqs; i++) { 4425 if (ctx->ifc_sysctl_ntxds[i] != 0) 4426 scctx->isc_ntxd[i] = ctx->ifc_sysctl_ntxds[i]; 4427 else 4428 scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i]; 4429 } 4430 4431 for (i = 0; i < sctx->isc_nrxqs; i++) { 4432 if (ctx->ifc_sysctl_nrxds[i] != 0) 4433 scctx->isc_nrxd[i] = ctx->ifc_sysctl_nrxds[i]; 4434 else 4435 scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i]; 4436 } 4437 4438 for (i = 0; i < sctx->isc_nrxqs; i++) { 4439 if (scctx->isc_nrxd[i] < sctx->isc_nrxd_min[i]) { 4440 device_printf(dev, "nrxd%d: %d less than nrxd_min %d - resetting to min\n", 4441 i, scctx->isc_nrxd[i], sctx->isc_nrxd_min[i]); 4442 scctx->isc_nrxd[i] = sctx->isc_nrxd_min[i]; 4443 } 4444 if (scctx->isc_nrxd[i] > sctx->isc_nrxd_max[i]) { 4445 device_printf(dev, "nrxd%d: %d greater than nrxd_max %d - resetting to max\n", 4446 i, scctx->isc_nrxd[i], sctx->isc_nrxd_max[i]); 4447 scctx->isc_nrxd[i] = sctx->isc_nrxd_max[i]; 4448 } 4449 } 4450 4451 for (i = 0; i < sctx->isc_ntxqs; i++) { 4452 if (scctx->isc_ntxd[i] < sctx->isc_ntxd_min[i]) { 4453 device_printf(dev, "ntxd%d: %d less than ntxd_min %d - resetting to min\n", 4454 i, scctx->isc_ntxd[i], sctx->isc_ntxd_min[i]); 4455 scctx->isc_ntxd[i] = sctx->isc_ntxd_min[i]; 4456 } 4457 if (scctx->isc_ntxd[i] > sctx->isc_ntxd_max[i]) { 4458 device_printf(dev, "ntxd%d: %d greater than ntxd_max %d - resetting to max\n", 4459 i, scctx->isc_ntxd[i], sctx->isc_ntxd_max[i]); 4460 scctx->isc_ntxd[i] = sctx->isc_ntxd_max[i]; 4461 } 4462 } 4463 } 4464 4465 int 4466 iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ctxp) 4467 { 4468 int err, rid, msix; 4469 if_ctx_t ctx; 4470 if_t ifp; 4471 if_softc_ctx_t scctx; 4472 int i; 4473 uint16_t main_txq; 4474 uint16_t main_rxq; 4475 4476 4477 ctx = malloc(sizeof(* ctx), M_IFLIB, M_WAITOK|M_ZERO); 4478 4479 if (sc == NULL) { 4480 sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO); 4481 device_set_softc(dev, ctx); 4482 ctx->ifc_flags |= IFC_SC_ALLOCATED; 4483 } 4484 4485 ctx->ifc_sctx = sctx; 4486 ctx->ifc_dev = dev; 4487 ctx->ifc_softc = sc; 4488 4489 if ((err = iflib_register(ctx)) != 0) { 4490 if (ctx->ifc_flags & IFC_SC_ALLOCATED) 4491 free(sc, M_IFLIB); 4492 free(ctx, M_IFLIB); 4493 device_printf(dev, "iflib_register failed %d\n", err); 4494 return (err); 4495 } 4496 iflib_add_device_sysctl_pre(ctx); 4497 4498 scctx = &ctx->ifc_softc_ctx; 4499 ifp = ctx->ifc_ifp; 4500 4501 iflib_reset_qvalues(ctx); 4502 CTX_LOCK(ctx); 4503 if ((err = IFDI_ATTACH_PRE(ctx)) != 0) { 4504 CTX_UNLOCK(ctx); 4505 device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err); 4506 return (err); 4507 } 4508 _iflib_pre_assert(scctx); 4509 ctx->ifc_txrx = *scctx->isc_txrx; 4510 4511 #ifdef INVARIANTS 4512 MPASS(scctx->isc_capabilities); 4513 if (scctx->isc_capabilities & IFCAP_TXCSUM) 4514 MPASS(scctx->isc_tx_csum_flags); 4515 #endif 4516 4517 if_setcapabilities(ifp, scctx->isc_capabilities | IFCAP_HWSTATS); 4518 if_setcapenable(ifp, scctx->isc_capenable | IFCAP_HWSTATS); 4519 4520 if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets)) 4521 scctx->isc_ntxqsets = scctx->isc_ntxqsets_max; 4522 if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets)) 4523 scctx->isc_nrxqsets = scctx->isc_nrxqsets_max; 4524 4525 #ifdef ACPI_DMAR 4526 if (dmar_get_dma_tag(device_get_parent(dev), dev) != NULL) 4527 ctx->ifc_flags |= IFC_DMAR; 4528 #elif !(defined(__i386__) || defined(__amd64__)) 4529 /* set unconditionally for !x86 */ 4530 ctx->ifc_flags |= IFC_DMAR; 4531 #endif 4532 4533 main_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0; 4534 main_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0; 4535 4536 /* XXX change for per-queue sizes */ 4537 device_printf(dev, "using %d tx descriptors and %d rx descriptors\n", 4538 scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]); 4539 for (i = 0; i < sctx->isc_nrxqs; i++) { 4540 if (!powerof2(scctx->isc_nrxd[i])) { 4541 /* round down instead? */ 4542 device_printf(dev, "# rx descriptors must be a power of 2\n"); 4543 err = EINVAL; 4544 goto fail; 4545 } 4546 } 4547 for (i = 0; i < sctx->isc_ntxqs; i++) { 4548 if (!powerof2(scctx->isc_ntxd[i])) { 4549 device_printf(dev, 4550 "# tx descriptors must be a power of 2"); 4551 err = EINVAL; 4552 goto fail; 4553 } 4554 } 4555 4556 if (scctx->isc_tx_nsegments > scctx->isc_ntxd[main_txq] / 4557 MAX_SINGLE_PACKET_FRACTION) 4558 scctx->isc_tx_nsegments = max(1, scctx->isc_ntxd[main_txq] / 4559 MAX_SINGLE_PACKET_FRACTION); 4560 if (scctx->isc_tx_tso_segments_max > scctx->isc_ntxd[main_txq] / 4561 MAX_SINGLE_PACKET_FRACTION) 4562 scctx->isc_tx_tso_segments_max = max(1, 4563 scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION); 4564 4565 /* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */ 4566 if (if_getcapabilities(ifp) & IFCAP_TSO) { 4567 /* 4568 * The stack can't handle a TSO size larger than IP_MAXPACKET, 4569 * but some MACs do. 4570 */ 4571 if_sethwtsomax(ifp, min(scctx->isc_tx_tso_size_max, 4572 IP_MAXPACKET)); 4573 /* 4574 * Take maximum number of m_pullup(9)'s in iflib_parse_header() 4575 * into account. In the worst case, each of these calls will 4576 * add another mbuf and, thus, the requirement for another DMA 4577 * segment. So for best performance, it doesn't make sense to 4578 * advertize a maximum of TSO segments that typically will 4579 * require defragmentation in iflib_encap(). 4580 */ 4581 if_sethwtsomaxsegcount(ifp, scctx->isc_tx_tso_segments_max - 3); 4582 if_sethwtsomaxsegsize(ifp, scctx->isc_tx_tso_segsize_max); 4583 } 4584 if (scctx->isc_rss_table_size == 0) 4585 scctx->isc_rss_table_size = 64; 4586 scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1; 4587 4588 GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx); 4589 /* XXX format name */ 4590 taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx, -1, "admin"); 4591 4592 /* Set up cpu set. If it fails, use the set of all CPUs. */ 4593 if (bus_get_cpus(dev, INTR_CPUS, sizeof(ctx->ifc_cpus), &ctx->ifc_cpus) != 0) { 4594 device_printf(dev, "Unable to fetch CPU list\n"); 4595 CPU_COPY(&all_cpus, &ctx->ifc_cpus); 4596 } 4597 MPASS(CPU_COUNT(&ctx->ifc_cpus) > 0); 4598 4599 /* 4600 ** Now setup MSI or MSI/X, should 4601 ** return us the number of supported 4602 ** vectors. (Will be 1 for MSI) 4603 */ 4604 if (sctx->isc_flags & IFLIB_SKIP_MSIX) { 4605 msix = scctx->isc_vectors; 4606 } else if (scctx->isc_msix_bar != 0) 4607 /* 4608 * The simple fact that isc_msix_bar is not 0 does not mean we 4609 * we have a good value there that is known to work. 4610 */ 4611 msix = iflib_msix_init(ctx); 4612 else { 4613 scctx->isc_vectors = 1; 4614 scctx->isc_ntxqsets = 1; 4615 scctx->isc_nrxqsets = 1; 4616 scctx->isc_intr = IFLIB_INTR_LEGACY; 4617 msix = 0; 4618 } 4619 /* Get memory for the station queues */ 4620 if ((err = iflib_queues_alloc(ctx))) { 4621 device_printf(dev, "Unable to allocate queue memory\n"); 4622 goto fail; 4623 } 4624 4625 if ((err = iflib_qset_structures_setup(ctx))) 4626 goto fail_queues; 4627 4628 /* 4629 * Group taskqueues aren't properly set up until SMP is started, 4630 * so we disable interrupts until we can handle them post 4631 * SI_SUB_SMP. 4632 * 4633 * XXX: disabling interrupts doesn't actually work, at least for 4634 * the non-MSI case. When they occur before SI_SUB_SMP completes, 4635 * we do null handling and depend on this not causing too large an 4636 * interrupt storm. 4637 */ 4638 IFDI_INTR_DISABLE(ctx); 4639 if (msix > 1 && (err = IFDI_MSIX_INTR_ASSIGN(ctx, msix)) != 0) { 4640 device_printf(dev, "IFDI_MSIX_INTR_ASSIGN failed %d\n", err); 4641 goto fail_intr_free; 4642 } 4643 if (msix <= 1) { 4644 rid = 0; 4645 if (scctx->isc_intr == IFLIB_INTR_MSI) { 4646 MPASS(msix == 1); 4647 rid = 1; 4648 } 4649 if ((err = iflib_legacy_setup(ctx, ctx->isc_legacy_intr, ctx->ifc_softc, &rid, "irq0")) != 0) { 4650 device_printf(dev, "iflib_legacy_setup failed %d\n", err); 4651 goto fail_intr_free; 4652 } 4653 } 4654 4655 ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac); 4656 4657 if ((err = IFDI_ATTACH_POST(ctx)) != 0) { 4658 device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err); 4659 goto fail_detach; 4660 } 4661 4662 /* 4663 * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported. 4664 * This must appear after the call to ether_ifattach() because 4665 * ether_ifattach() sets if_hdrlen to the default value. 4666 */ 4667 if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU) 4668 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 4669 4670 if ((err = iflib_netmap_attach(ctx))) { 4671 device_printf(ctx->ifc_dev, "netmap attach failed: %d\n", err); 4672 goto fail_detach; 4673 } 4674 *ctxp = ctx; 4675 4676 NETDUMP_SET(ctx->ifc_ifp, iflib); 4677 4678 if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter); 4679 iflib_add_device_sysctl_post(ctx); 4680 ctx->ifc_flags |= IFC_INIT_DONE; 4681 CTX_UNLOCK(ctx); 4682 return (0); 4683 fail_detach: 4684 ether_ifdetach(ctx->ifc_ifp); 4685 fail_intr_free: 4686 if (scctx->isc_intr == IFLIB_INTR_MSIX || scctx->isc_intr == IFLIB_INTR_MSI) 4687 pci_release_msi(ctx->ifc_dev); 4688 fail_queues: 4689 iflib_tx_structures_free(ctx); 4690 iflib_rx_structures_free(ctx); 4691 fail: 4692 IFDI_DETACH(ctx); 4693 CTX_UNLOCK(ctx); 4694 return (err); 4695 } 4696 4697 int 4698 iflib_pseudo_register(device_t dev, if_shared_ctx_t sctx, if_ctx_t *ctxp, 4699 struct iflib_cloneattach_ctx *clctx) 4700 { 4701 int err; 4702 if_ctx_t ctx; 4703 if_t ifp; 4704 if_softc_ctx_t scctx; 4705 int i; 4706 void *sc; 4707 uint16_t main_txq; 4708 uint16_t main_rxq; 4709 4710 ctx = malloc(sizeof(*ctx), M_IFLIB, M_WAITOK|M_ZERO); 4711 sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO); 4712 ctx->ifc_flags |= IFC_SC_ALLOCATED; 4713 if (sctx->isc_flags & (IFLIB_PSEUDO|IFLIB_VIRTUAL)) 4714 ctx->ifc_flags |= IFC_PSEUDO; 4715 4716 ctx->ifc_sctx = sctx; 4717 ctx->ifc_softc = sc; 4718 ctx->ifc_dev = dev; 4719 4720 if ((err = iflib_register(ctx)) != 0) { 4721 device_printf(dev, "%s: iflib_register failed %d\n", __func__, err); 4722 free(sc, M_IFLIB); 4723 free(ctx, M_IFLIB); 4724 return (err); 4725 } 4726 iflib_add_device_sysctl_pre(ctx); 4727 4728 scctx = &ctx->ifc_softc_ctx; 4729 ifp = ctx->ifc_ifp; 4730 4731 /* 4732 * XXX sanity check that ntxd & nrxd are a power of 2 4733 */ 4734 iflib_reset_qvalues(ctx); 4735 4736 if ((err = IFDI_ATTACH_PRE(ctx)) != 0) { 4737 device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err); 4738 return (err); 4739 } 4740 if (sctx->isc_flags & IFLIB_GEN_MAC) 4741 iflib_gen_mac(ctx); 4742 if ((err = IFDI_CLONEATTACH(ctx, clctx->cc_ifc, clctx->cc_name, 4743 clctx->cc_params)) != 0) { 4744 device_printf(dev, "IFDI_CLONEATTACH failed %d\n", err); 4745 return (err); 4746 } 4747 ifmedia_add(&ctx->ifc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 4748 ifmedia_add(&ctx->ifc_media, IFM_ETHER | IFM_AUTO, 0, NULL); 4749 ifmedia_set(&ctx->ifc_media, IFM_ETHER | IFM_AUTO); 4750 4751 #ifdef INVARIANTS 4752 MPASS(scctx->isc_capabilities); 4753 if (scctx->isc_capabilities & IFCAP_TXCSUM) 4754 MPASS(scctx->isc_tx_csum_flags); 4755 #endif 4756 4757 if_setcapabilities(ifp, scctx->isc_capabilities | IFCAP_HWSTATS | IFCAP_LINKSTATE); 4758 if_setcapenable(ifp, scctx->isc_capenable | IFCAP_HWSTATS | IFCAP_LINKSTATE); 4759 4760 ifp->if_flags |= IFF_NOGROUP; 4761 if (sctx->isc_flags & IFLIB_PSEUDO) { 4762 ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac); 4763 4764 if ((err = IFDI_ATTACH_POST(ctx)) != 0) { 4765 device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err); 4766 goto fail_detach; 4767 } 4768 *ctxp = ctx; 4769 4770 /* 4771 * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported. 4772 * This must appear after the call to ether_ifattach() because 4773 * ether_ifattach() sets if_hdrlen to the default value. 4774 */ 4775 if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU) 4776 if_setifheaderlen(ifp, 4777 sizeof(struct ether_vlan_header)); 4778 4779 if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter); 4780 iflib_add_device_sysctl_post(ctx); 4781 ctx->ifc_flags |= IFC_INIT_DONE; 4782 return (0); 4783 } 4784 _iflib_pre_assert(scctx); 4785 ctx->ifc_txrx = *scctx->isc_txrx; 4786 4787 if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets)) 4788 scctx->isc_ntxqsets = scctx->isc_ntxqsets_max; 4789 if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets)) 4790 scctx->isc_nrxqsets = scctx->isc_nrxqsets_max; 4791 4792 main_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0; 4793 main_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0; 4794 4795 /* XXX change for per-queue sizes */ 4796 device_printf(dev, "using %d tx descriptors and %d rx descriptors\n", 4797 scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]); 4798 for (i = 0; i < sctx->isc_nrxqs; i++) { 4799 if (!powerof2(scctx->isc_nrxd[i])) { 4800 /* round down instead? */ 4801 device_printf(dev, "# rx descriptors must be a power of 2\n"); 4802 err = EINVAL; 4803 goto fail; 4804 } 4805 } 4806 for (i = 0; i < sctx->isc_ntxqs; i++) { 4807 if (!powerof2(scctx->isc_ntxd[i])) { 4808 device_printf(dev, 4809 "# tx descriptors must be a power of 2"); 4810 err = EINVAL; 4811 goto fail; 4812 } 4813 } 4814 4815 if (scctx->isc_tx_nsegments > scctx->isc_ntxd[main_txq] / 4816 MAX_SINGLE_PACKET_FRACTION) 4817 scctx->isc_tx_nsegments = max(1, scctx->isc_ntxd[main_txq] / 4818 MAX_SINGLE_PACKET_FRACTION); 4819 if (scctx->isc_tx_tso_segments_max > scctx->isc_ntxd[main_txq] / 4820 MAX_SINGLE_PACKET_FRACTION) 4821 scctx->isc_tx_tso_segments_max = max(1, 4822 scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION); 4823 4824 /* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */ 4825 if (if_getcapabilities(ifp) & IFCAP_TSO) { 4826 /* 4827 * The stack can't handle a TSO size larger than IP_MAXPACKET, 4828 * but some MACs do. 4829 */ 4830 if_sethwtsomax(ifp, min(scctx->isc_tx_tso_size_max, 4831 IP_MAXPACKET)); 4832 /* 4833 * Take maximum number of m_pullup(9)'s in iflib_parse_header() 4834 * into account. In the worst case, each of these calls will 4835 * add another mbuf and, thus, the requirement for another DMA 4836 * segment. So for best performance, it doesn't make sense to 4837 * advertize a maximum of TSO segments that typically will 4838 * require defragmentation in iflib_encap(). 4839 */ 4840 if_sethwtsomaxsegcount(ifp, scctx->isc_tx_tso_segments_max - 3); 4841 if_sethwtsomaxsegsize(ifp, scctx->isc_tx_tso_segsize_max); 4842 } 4843 if (scctx->isc_rss_table_size == 0) 4844 scctx->isc_rss_table_size = 64; 4845 scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1; 4846 4847 GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx); 4848 /* XXX format name */ 4849 taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx, -1, "admin"); 4850 4851 /* XXX --- can support > 1 -- but keep it simple for now */ 4852 scctx->isc_intr = IFLIB_INTR_LEGACY; 4853 4854 /* Get memory for the station queues */ 4855 if ((err = iflib_queues_alloc(ctx))) { 4856 device_printf(dev, "Unable to allocate queue memory\n"); 4857 goto fail; 4858 } 4859 4860 if ((err = iflib_qset_structures_setup(ctx))) { 4861 device_printf(dev, "qset structure setup failed %d\n", err); 4862 goto fail_queues; 4863 } 4864 4865 /* 4866 * XXX What if anything do we want to do about interrupts? 4867 */ 4868 ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac); 4869 if ((err = IFDI_ATTACH_POST(ctx)) != 0) { 4870 device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err); 4871 goto fail_detach; 4872 } 4873 4874 /* 4875 * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported. 4876 * This must appear after the call to ether_ifattach() because 4877 * ether_ifattach() sets if_hdrlen to the default value. 4878 */ 4879 if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU) 4880 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 4881 4882 /* XXX handle more than one queue */ 4883 for (i = 0; i < scctx->isc_nrxqsets; i++) 4884 IFDI_RX_CLSET(ctx, 0, i, ctx->ifc_rxqs[i].ifr_fl[0].ifl_sds.ifsd_cl); 4885 4886 *ctxp = ctx; 4887 4888 if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter); 4889 iflib_add_device_sysctl_post(ctx); 4890 ctx->ifc_flags |= IFC_INIT_DONE; 4891 return (0); 4892 fail_detach: 4893 ether_ifdetach(ctx->ifc_ifp); 4894 fail_queues: 4895 iflib_tx_structures_free(ctx); 4896 iflib_rx_structures_free(ctx); 4897 fail: 4898 IFDI_DETACH(ctx); 4899 return (err); 4900 } 4901 4902 int 4903 iflib_pseudo_deregister(if_ctx_t ctx) 4904 { 4905 if_t ifp = ctx->ifc_ifp; 4906 iflib_txq_t txq; 4907 iflib_rxq_t rxq; 4908 int i, j; 4909 struct taskqgroup *tqg; 4910 iflib_fl_t fl; 4911 4912 /* Unregister VLAN events */ 4913 if (ctx->ifc_vlan_attach_event != NULL) 4914 EVENTHANDLER_DEREGISTER(vlan_config, ctx->ifc_vlan_attach_event); 4915 if (ctx->ifc_vlan_detach_event != NULL) 4916 EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event); 4917 4918 ether_ifdetach(ifp); 4919 /* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/ 4920 CTX_LOCK_DESTROY(ctx); 4921 /* XXX drain any dependent tasks */ 4922 tqg = qgroup_if_io_tqg; 4923 for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) { 4924 callout_drain(&txq->ift_timer); 4925 if (txq->ift_task.gt_uniq != NULL) 4926 taskqgroup_detach(tqg, &txq->ift_task); 4927 } 4928 for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) { 4929 if (rxq->ifr_task.gt_uniq != NULL) 4930 taskqgroup_detach(tqg, &rxq->ifr_task); 4931 4932 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) 4933 free(fl->ifl_rx_bitmap, M_IFLIB); 4934 } 4935 tqg = qgroup_if_config_tqg; 4936 if (ctx->ifc_admin_task.gt_uniq != NULL) 4937 taskqgroup_detach(tqg, &ctx->ifc_admin_task); 4938 if (ctx->ifc_vflr_task.gt_uniq != NULL) 4939 taskqgroup_detach(tqg, &ctx->ifc_vflr_task); 4940 4941 if_free(ifp); 4942 4943 iflib_tx_structures_free(ctx); 4944 iflib_rx_structures_free(ctx); 4945 if (ctx->ifc_flags & IFC_SC_ALLOCATED) 4946 free(ctx->ifc_softc, M_IFLIB); 4947 free(ctx, M_IFLIB); 4948 return (0); 4949 } 4950 4951 int 4952 iflib_device_attach(device_t dev) 4953 { 4954 if_ctx_t ctx; 4955 if_shared_ctx_t sctx; 4956 4957 if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC) 4958 return (ENOTSUP); 4959 4960 pci_enable_busmaster(dev); 4961 4962 return (iflib_device_register(dev, NULL, sctx, &ctx)); 4963 } 4964 4965 int 4966 iflib_device_deregister(if_ctx_t ctx) 4967 { 4968 if_t ifp = ctx->ifc_ifp; 4969 iflib_txq_t txq; 4970 iflib_rxq_t rxq; 4971 device_t dev = ctx->ifc_dev; 4972 int i, j; 4973 struct taskqgroup *tqg; 4974 iflib_fl_t fl; 4975 4976 /* Make sure VLANS are not using driver */ 4977 if (if_vlantrunkinuse(ifp)) { 4978 device_printf(dev,"Vlan in use, detach first\n"); 4979 return (EBUSY); 4980 } 4981 4982 CTX_LOCK(ctx); 4983 ctx->ifc_in_detach = 1; 4984 iflib_stop(ctx); 4985 CTX_UNLOCK(ctx); 4986 4987 /* Unregister VLAN events */ 4988 if (ctx->ifc_vlan_attach_event != NULL) 4989 EVENTHANDLER_DEREGISTER(vlan_config, ctx->ifc_vlan_attach_event); 4990 if (ctx->ifc_vlan_detach_event != NULL) 4991 EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event); 4992 4993 iflib_netmap_detach(ifp); 4994 ether_ifdetach(ifp); 4995 if (ctx->ifc_led_dev != NULL) 4996 led_destroy(ctx->ifc_led_dev); 4997 /* XXX drain any dependent tasks */ 4998 tqg = qgroup_if_io_tqg; 4999 for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) { 5000 callout_drain(&txq->ift_timer); 5001 if (txq->ift_task.gt_uniq != NULL) 5002 taskqgroup_detach(tqg, &txq->ift_task); 5003 } 5004 for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) { 5005 if (rxq->ifr_task.gt_uniq != NULL) 5006 taskqgroup_detach(tqg, &rxq->ifr_task); 5007 5008 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) 5009 free(fl->ifl_rx_bitmap, M_IFLIB); 5010 5011 } 5012 tqg = qgroup_if_config_tqg; 5013 if (ctx->ifc_admin_task.gt_uniq != NULL) 5014 taskqgroup_detach(tqg, &ctx->ifc_admin_task); 5015 if (ctx->ifc_vflr_task.gt_uniq != NULL) 5016 taskqgroup_detach(tqg, &ctx->ifc_vflr_task); 5017 CTX_LOCK(ctx); 5018 IFDI_DETACH(ctx); 5019 CTX_UNLOCK(ctx); 5020 5021 /* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/ 5022 CTX_LOCK_DESTROY(ctx); 5023 device_set_softc(ctx->ifc_dev, NULL); 5024 if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_LEGACY) { 5025 pci_release_msi(dev); 5026 } 5027 if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_MSIX) { 5028 iflib_irq_free(ctx, &ctx->ifc_legacy_irq); 5029 } 5030 if (ctx->ifc_msix_mem != NULL) { 5031 bus_release_resource(ctx->ifc_dev, SYS_RES_MEMORY, 5032 ctx->ifc_softc_ctx.isc_msix_bar, ctx->ifc_msix_mem); 5033 ctx->ifc_msix_mem = NULL; 5034 } 5035 5036 bus_generic_detach(dev); 5037 if_free(ifp); 5038 5039 iflib_tx_structures_free(ctx); 5040 iflib_rx_structures_free(ctx); 5041 if (ctx->ifc_flags & IFC_SC_ALLOCATED) 5042 free(ctx->ifc_softc, M_IFLIB); 5043 free(ctx, M_IFLIB); 5044 return (0); 5045 } 5046 5047 5048 int 5049 iflib_device_detach(device_t dev) 5050 { 5051 if_ctx_t ctx = device_get_softc(dev); 5052 5053 return (iflib_device_deregister(ctx)); 5054 } 5055 5056 int 5057 iflib_device_suspend(device_t dev) 5058 { 5059 if_ctx_t ctx = device_get_softc(dev); 5060 5061 CTX_LOCK(ctx); 5062 IFDI_SUSPEND(ctx); 5063 CTX_UNLOCK(ctx); 5064 5065 return bus_generic_suspend(dev); 5066 } 5067 int 5068 iflib_device_shutdown(device_t dev) 5069 { 5070 if_ctx_t ctx = device_get_softc(dev); 5071 5072 CTX_LOCK(ctx); 5073 IFDI_SHUTDOWN(ctx); 5074 CTX_UNLOCK(ctx); 5075 5076 return bus_generic_suspend(dev); 5077 } 5078 5079 5080 int 5081 iflib_device_resume(device_t dev) 5082 { 5083 if_ctx_t ctx = device_get_softc(dev); 5084 iflib_txq_t txq = ctx->ifc_txqs; 5085 5086 CTX_LOCK(ctx); 5087 IFDI_RESUME(ctx); 5088 iflib_init_locked(ctx); 5089 CTX_UNLOCK(ctx); 5090 for (int i = 0; i < NTXQSETS(ctx); i++, txq++) 5091 iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET); 5092 5093 return (bus_generic_resume(dev)); 5094 } 5095 5096 int 5097 iflib_device_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params) 5098 { 5099 int error; 5100 if_ctx_t ctx = device_get_softc(dev); 5101 5102 CTX_LOCK(ctx); 5103 error = IFDI_IOV_INIT(ctx, num_vfs, params); 5104 CTX_UNLOCK(ctx); 5105 5106 return (error); 5107 } 5108 5109 void 5110 iflib_device_iov_uninit(device_t dev) 5111 { 5112 if_ctx_t ctx = device_get_softc(dev); 5113 5114 CTX_LOCK(ctx); 5115 IFDI_IOV_UNINIT(ctx); 5116 CTX_UNLOCK(ctx); 5117 } 5118 5119 int 5120 iflib_device_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params) 5121 { 5122 int error; 5123 if_ctx_t ctx = device_get_softc(dev); 5124 5125 CTX_LOCK(ctx); 5126 error = IFDI_IOV_VF_ADD(ctx, vfnum, params); 5127 CTX_UNLOCK(ctx); 5128 5129 return (error); 5130 } 5131 5132 /********************************************************************* 5133 * 5134 * MODULE FUNCTION DEFINITIONS 5135 * 5136 **********************************************************************/ 5137 5138 /* 5139 * - Start a fast taskqueue thread for each core 5140 * - Start a taskqueue for control operations 5141 */ 5142 static int 5143 iflib_module_init(void) 5144 { 5145 return (0); 5146 } 5147 5148 static int 5149 iflib_module_event_handler(module_t mod, int what, void *arg) 5150 { 5151 int err; 5152 5153 switch (what) { 5154 case MOD_LOAD: 5155 if ((err = iflib_module_init()) != 0) 5156 return (err); 5157 break; 5158 case MOD_UNLOAD: 5159 return (EBUSY); 5160 default: 5161 return (EOPNOTSUPP); 5162 } 5163 5164 return (0); 5165 } 5166 5167 /********************************************************************* 5168 * 5169 * PUBLIC FUNCTION DEFINITIONS 5170 * ordered as in iflib.h 5171 * 5172 **********************************************************************/ 5173 5174 5175 static void 5176 _iflib_assert(if_shared_ctx_t sctx) 5177 { 5178 MPASS(sctx->isc_tx_maxsize); 5179 MPASS(sctx->isc_tx_maxsegsize); 5180 5181 MPASS(sctx->isc_rx_maxsize); 5182 MPASS(sctx->isc_rx_nsegments); 5183 MPASS(sctx->isc_rx_maxsegsize); 5184 5185 MPASS(sctx->isc_nrxd_min[0]); 5186 MPASS(sctx->isc_nrxd_max[0]); 5187 MPASS(sctx->isc_nrxd_default[0]); 5188 MPASS(sctx->isc_ntxd_min[0]); 5189 MPASS(sctx->isc_ntxd_max[0]); 5190 MPASS(sctx->isc_ntxd_default[0]); 5191 } 5192 5193 static void 5194 _iflib_pre_assert(if_softc_ctx_t scctx) 5195 { 5196 5197 MPASS(scctx->isc_txrx->ift_txd_encap); 5198 MPASS(scctx->isc_txrx->ift_txd_flush); 5199 MPASS(scctx->isc_txrx->ift_txd_credits_update); 5200 MPASS(scctx->isc_txrx->ift_rxd_available); 5201 MPASS(scctx->isc_txrx->ift_rxd_pkt_get); 5202 MPASS(scctx->isc_txrx->ift_rxd_refill); 5203 MPASS(scctx->isc_txrx->ift_rxd_flush); 5204 } 5205 5206 static int 5207 iflib_register(if_ctx_t ctx) 5208 { 5209 if_shared_ctx_t sctx = ctx->ifc_sctx; 5210 driver_t *driver = sctx->isc_driver; 5211 device_t dev = ctx->ifc_dev; 5212 if_t ifp; 5213 5214 _iflib_assert(sctx); 5215 5216 CTX_LOCK_INIT(ctx); 5217 STATE_LOCK_INIT(ctx, device_get_nameunit(ctx->ifc_dev)); 5218 ifp = ctx->ifc_ifp = if_gethandle(IFT_ETHER); 5219 if (ifp == NULL) { 5220 device_printf(dev, "can not allocate ifnet structure\n"); 5221 return (ENOMEM); 5222 } 5223 5224 /* 5225 * Initialize our context's device specific methods 5226 */ 5227 kobj_init((kobj_t) ctx, (kobj_class_t) driver); 5228 kobj_class_compile((kobj_class_t) driver); 5229 driver->refs++; 5230 5231 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 5232 if_setsoftc(ifp, ctx); 5233 if_setdev(ifp, dev); 5234 if_setinitfn(ifp, iflib_if_init); 5235 if_setioctlfn(ifp, iflib_if_ioctl); 5236 #ifdef ALTQ 5237 if_setstartfn(ifp, iflib_altq_if_start); 5238 if_settransmitfn(ifp, iflib_altq_if_transmit); 5239 if_setsendqready(ifp); 5240 #else 5241 if_settransmitfn(ifp, iflib_if_transmit); 5242 #endif 5243 if_setqflushfn(ifp, iflib_if_qflush); 5244 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 5245 5246 ctx->ifc_vlan_attach_event = 5247 EVENTHANDLER_REGISTER(vlan_config, iflib_vlan_register, ctx, 5248 EVENTHANDLER_PRI_FIRST); 5249 ctx->ifc_vlan_detach_event = 5250 EVENTHANDLER_REGISTER(vlan_unconfig, iflib_vlan_unregister, ctx, 5251 EVENTHANDLER_PRI_FIRST); 5252 5253 ifmedia_init(&ctx->ifc_media, IFM_IMASK, 5254 iflib_media_change, iflib_media_status); 5255 5256 return (0); 5257 } 5258 5259 5260 static int 5261 iflib_queues_alloc(if_ctx_t ctx) 5262 { 5263 if_shared_ctx_t sctx = ctx->ifc_sctx; 5264 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 5265 device_t dev = ctx->ifc_dev; 5266 int nrxqsets = scctx->isc_nrxqsets; 5267 int ntxqsets = scctx->isc_ntxqsets; 5268 iflib_txq_t txq; 5269 iflib_rxq_t rxq; 5270 iflib_fl_t fl = NULL; 5271 int i, j, cpu, err, txconf, rxconf; 5272 iflib_dma_info_t ifdip; 5273 uint32_t *rxqsizes = scctx->isc_rxqsizes; 5274 uint32_t *txqsizes = scctx->isc_txqsizes; 5275 uint8_t nrxqs = sctx->isc_nrxqs; 5276 uint8_t ntxqs = sctx->isc_ntxqs; 5277 int nfree_lists = sctx->isc_nfl ? sctx->isc_nfl : 1; 5278 caddr_t *vaddrs; 5279 uint64_t *paddrs; 5280 5281 KASSERT(ntxqs > 0, ("number of queues per qset must be at least 1")); 5282 KASSERT(nrxqs > 0, ("number of queues per qset must be at least 1")); 5283 5284 /* Allocate the TX ring struct memory */ 5285 if (!(ctx->ifc_txqs = 5286 (iflib_txq_t) malloc(sizeof(struct iflib_txq) * 5287 ntxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) { 5288 device_printf(dev, "Unable to allocate TX ring memory\n"); 5289 err = ENOMEM; 5290 goto fail; 5291 } 5292 5293 /* Now allocate the RX */ 5294 if (!(ctx->ifc_rxqs = 5295 (iflib_rxq_t) malloc(sizeof(struct iflib_rxq) * 5296 nrxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) { 5297 device_printf(dev, "Unable to allocate RX ring memory\n"); 5298 err = ENOMEM; 5299 goto rx_fail; 5300 } 5301 5302 txq = ctx->ifc_txqs; 5303 rxq = ctx->ifc_rxqs; 5304 5305 /* 5306 * XXX handle allocation failure 5307 */ 5308 for (txconf = i = 0, cpu = CPU_FIRST(); i < ntxqsets; i++, txconf++, txq++, cpu = CPU_NEXT(cpu)) { 5309 /* Set up some basics */ 5310 5311 if ((ifdip = malloc(sizeof(struct iflib_dma_info) * ntxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) { 5312 device_printf(dev, "failed to allocate iflib_dma_info\n"); 5313 err = ENOMEM; 5314 goto err_tx_desc; 5315 } 5316 txq->ift_ifdi = ifdip; 5317 for (j = 0; j < ntxqs; j++, ifdip++) { 5318 if (iflib_dma_alloc(ctx, txqsizes[j], ifdip, BUS_DMA_NOWAIT)) { 5319 device_printf(dev, "Unable to allocate Descriptor memory\n"); 5320 err = ENOMEM; 5321 goto err_tx_desc; 5322 } 5323 txq->ift_txd_size[j] = scctx->isc_txd_size[j]; 5324 bzero((void *)ifdip->idi_vaddr, txqsizes[j]); 5325 } 5326 txq->ift_ctx = ctx; 5327 txq->ift_id = i; 5328 if (sctx->isc_flags & IFLIB_HAS_TXCQ) { 5329 txq->ift_br_offset = 1; 5330 } else { 5331 txq->ift_br_offset = 0; 5332 } 5333 /* XXX fix this */ 5334 txq->ift_timer.c_cpu = cpu; 5335 5336 if (iflib_txsd_alloc(txq)) { 5337 device_printf(dev, "Critical Failure setting up TX buffers\n"); 5338 err = ENOMEM; 5339 goto err_tx_desc; 5340 } 5341 5342 /* Initialize the TX lock */ 5343 snprintf(txq->ift_mtx_name, MTX_NAME_LEN, "%s:tx(%d):callout", 5344 device_get_nameunit(dev), txq->ift_id); 5345 mtx_init(&txq->ift_mtx, txq->ift_mtx_name, NULL, MTX_DEF); 5346 callout_init_mtx(&txq->ift_timer, &txq->ift_mtx, 0); 5347 5348 snprintf(txq->ift_db_mtx_name, MTX_NAME_LEN, "%s:tx(%d):db", 5349 device_get_nameunit(dev), txq->ift_id); 5350 5351 err = ifmp_ring_alloc(&txq->ift_br, 2048, txq, iflib_txq_drain, 5352 iflib_txq_can_drain, M_IFLIB, M_WAITOK); 5353 if (err) { 5354 /* XXX free any allocated rings */ 5355 device_printf(dev, "Unable to allocate buf_ring\n"); 5356 goto err_tx_desc; 5357 } 5358 } 5359 5360 for (rxconf = i = 0; i < nrxqsets; i++, rxconf++, rxq++) { 5361 /* Set up some basics */ 5362 5363 if ((ifdip = malloc(sizeof(struct iflib_dma_info) * nrxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) { 5364 device_printf(dev, "failed to allocate iflib_dma_info\n"); 5365 err = ENOMEM; 5366 goto err_tx_desc; 5367 } 5368 5369 rxq->ifr_ifdi = ifdip; 5370 /* XXX this needs to be changed if #rx queues != #tx queues */ 5371 rxq->ifr_ntxqirq = 1; 5372 rxq->ifr_txqid[0] = i; 5373 for (j = 0; j < nrxqs; j++, ifdip++) { 5374 if (iflib_dma_alloc(ctx, rxqsizes[j], ifdip, BUS_DMA_NOWAIT)) { 5375 device_printf(dev, "Unable to allocate Descriptor memory\n"); 5376 err = ENOMEM; 5377 goto err_tx_desc; 5378 } 5379 bzero((void *)ifdip->idi_vaddr, rxqsizes[j]); 5380 } 5381 rxq->ifr_ctx = ctx; 5382 rxq->ifr_id = i; 5383 if (sctx->isc_flags & IFLIB_HAS_RXCQ) { 5384 rxq->ifr_fl_offset = 1; 5385 } else { 5386 rxq->ifr_fl_offset = 0; 5387 } 5388 rxq->ifr_nfl = nfree_lists; 5389 if (!(fl = 5390 (iflib_fl_t) malloc(sizeof(struct iflib_fl) * nfree_lists, M_IFLIB, M_NOWAIT | M_ZERO))) { 5391 device_printf(dev, "Unable to allocate free list memory\n"); 5392 err = ENOMEM; 5393 goto err_tx_desc; 5394 } 5395 rxq->ifr_fl = fl; 5396 for (j = 0; j < nfree_lists; j++) { 5397 fl[j].ifl_rxq = rxq; 5398 fl[j].ifl_id = j; 5399 fl[j].ifl_ifdi = &rxq->ifr_ifdi[j + rxq->ifr_fl_offset]; 5400 fl[j].ifl_rxd_size = scctx->isc_rxd_size[j]; 5401 } 5402 /* Allocate receive buffers for the ring*/ 5403 if (iflib_rxsd_alloc(rxq)) { 5404 device_printf(dev, 5405 "Critical Failure setting up receive buffers\n"); 5406 err = ENOMEM; 5407 goto err_rx_desc; 5408 } 5409 5410 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) 5411 fl->ifl_rx_bitmap = bit_alloc(fl->ifl_size, M_IFLIB, M_WAITOK|M_ZERO); 5412 } 5413 5414 /* TXQs */ 5415 vaddrs = malloc(sizeof(caddr_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK); 5416 paddrs = malloc(sizeof(uint64_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK); 5417 for (i = 0; i < ntxqsets; i++) { 5418 iflib_dma_info_t di = ctx->ifc_txqs[i].ift_ifdi; 5419 5420 for (j = 0; j < ntxqs; j++, di++) { 5421 vaddrs[i*ntxqs + j] = di->idi_vaddr; 5422 paddrs[i*ntxqs + j] = di->idi_paddr; 5423 } 5424 } 5425 if ((err = IFDI_TX_QUEUES_ALLOC(ctx, vaddrs, paddrs, ntxqs, ntxqsets)) != 0) { 5426 device_printf(ctx->ifc_dev, "device queue allocation failed\n"); 5427 iflib_tx_structures_free(ctx); 5428 free(vaddrs, M_IFLIB); 5429 free(paddrs, M_IFLIB); 5430 goto err_rx_desc; 5431 } 5432 free(vaddrs, M_IFLIB); 5433 free(paddrs, M_IFLIB); 5434 5435 /* RXQs */ 5436 vaddrs = malloc(sizeof(caddr_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK); 5437 paddrs = malloc(sizeof(uint64_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK); 5438 for (i = 0; i < nrxqsets; i++) { 5439 iflib_dma_info_t di = ctx->ifc_rxqs[i].ifr_ifdi; 5440 5441 for (j = 0; j < nrxqs; j++, di++) { 5442 vaddrs[i*nrxqs + j] = di->idi_vaddr; 5443 paddrs[i*nrxqs + j] = di->idi_paddr; 5444 } 5445 } 5446 if ((err = IFDI_RX_QUEUES_ALLOC(ctx, vaddrs, paddrs, nrxqs, nrxqsets)) != 0) { 5447 device_printf(ctx->ifc_dev, "device queue allocation failed\n"); 5448 iflib_tx_structures_free(ctx); 5449 free(vaddrs, M_IFLIB); 5450 free(paddrs, M_IFLIB); 5451 goto err_rx_desc; 5452 } 5453 free(vaddrs, M_IFLIB); 5454 free(paddrs, M_IFLIB); 5455 5456 return (0); 5457 5458 /* XXX handle allocation failure changes */ 5459 err_rx_desc: 5460 err_tx_desc: 5461 rx_fail: 5462 if (ctx->ifc_rxqs != NULL) 5463 free(ctx->ifc_rxqs, M_IFLIB); 5464 ctx->ifc_rxqs = NULL; 5465 if (ctx->ifc_txqs != NULL) 5466 free(ctx->ifc_txqs, M_IFLIB); 5467 ctx->ifc_txqs = NULL; 5468 fail: 5469 return (err); 5470 } 5471 5472 static int 5473 iflib_tx_structures_setup(if_ctx_t ctx) 5474 { 5475 iflib_txq_t txq = ctx->ifc_txqs; 5476 int i; 5477 5478 for (i = 0; i < NTXQSETS(ctx); i++, txq++) 5479 iflib_txq_setup(txq); 5480 5481 return (0); 5482 } 5483 5484 static void 5485 iflib_tx_structures_free(if_ctx_t ctx) 5486 { 5487 iflib_txq_t txq = ctx->ifc_txqs; 5488 int i, j; 5489 5490 for (i = 0; i < NTXQSETS(ctx); i++, txq++) { 5491 iflib_txq_destroy(txq); 5492 for (j = 0; j < ctx->ifc_nhwtxqs; j++) 5493 iflib_dma_free(&txq->ift_ifdi[j]); 5494 } 5495 free(ctx->ifc_txqs, M_IFLIB); 5496 ctx->ifc_txqs = NULL; 5497 IFDI_QUEUES_FREE(ctx); 5498 } 5499 5500 /********************************************************************* 5501 * 5502 * Initialize all receive rings. 5503 * 5504 **********************************************************************/ 5505 static int 5506 iflib_rx_structures_setup(if_ctx_t ctx) 5507 { 5508 iflib_rxq_t rxq = ctx->ifc_rxqs; 5509 int q; 5510 #if defined(INET6) || defined(INET) 5511 int i, err; 5512 #endif 5513 5514 for (q = 0; q < ctx->ifc_softc_ctx.isc_nrxqsets; q++, rxq++) { 5515 #if defined(INET6) || defined(INET) 5516 tcp_lro_free(&rxq->ifr_lc); 5517 if ((err = tcp_lro_init_args(&rxq->ifr_lc, ctx->ifc_ifp, 5518 TCP_LRO_ENTRIES, min(1024, 5519 ctx->ifc_softc_ctx.isc_nrxd[rxq->ifr_fl_offset]))) != 0) { 5520 device_printf(ctx->ifc_dev, "LRO Initialization failed!\n"); 5521 goto fail; 5522 } 5523 rxq->ifr_lro_enabled = TRUE; 5524 #endif 5525 IFDI_RXQ_SETUP(ctx, rxq->ifr_id); 5526 } 5527 return (0); 5528 #if defined(INET6) || defined(INET) 5529 fail: 5530 /* 5531 * Free RX software descriptors allocated so far, we will only handle 5532 * the rings that completed, the failing case will have 5533 * cleaned up for itself. 'q' failed, so its the terminus. 5534 */ 5535 rxq = ctx->ifc_rxqs; 5536 for (i = 0; i < q; ++i, rxq++) { 5537 iflib_rx_sds_free(rxq); 5538 rxq->ifr_cq_gen = rxq->ifr_cq_cidx = rxq->ifr_cq_pidx = 0; 5539 } 5540 return (err); 5541 #endif 5542 } 5543 5544 /********************************************************************* 5545 * 5546 * Free all receive rings. 5547 * 5548 **********************************************************************/ 5549 static void 5550 iflib_rx_structures_free(if_ctx_t ctx) 5551 { 5552 iflib_rxq_t rxq = ctx->ifc_rxqs; 5553 5554 for (int i = 0; i < ctx->ifc_softc_ctx.isc_nrxqsets; i++, rxq++) { 5555 iflib_rx_sds_free(rxq); 5556 } 5557 } 5558 5559 static int 5560 iflib_qset_structures_setup(if_ctx_t ctx) 5561 { 5562 int err; 5563 5564 /* 5565 * It is expected that the caller takes care of freeing queues if this 5566 * fails. 5567 */ 5568 if ((err = iflib_tx_structures_setup(ctx)) != 0) { 5569 device_printf(ctx->ifc_dev, "iflib_tx_structures_setup failed: %d\n", err); 5570 return (err); 5571 } 5572 5573 if ((err = iflib_rx_structures_setup(ctx)) != 0) 5574 device_printf(ctx->ifc_dev, "iflib_rx_structures_setup failed: %d\n", err); 5575 5576 return (err); 5577 } 5578 5579 int 5580 iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid, 5581 driver_filter_t filter, void *filter_arg, driver_intr_t handler, void *arg, const char *name) 5582 { 5583 5584 return (_iflib_irq_alloc(ctx, irq, rid, filter, handler, arg, name)); 5585 } 5586 5587 #ifdef SMP 5588 static int 5589 find_nth(if_ctx_t ctx, int qid) 5590 { 5591 cpuset_t cpus; 5592 int i, cpuid, eqid, count; 5593 5594 CPU_COPY(&ctx->ifc_cpus, &cpus); 5595 count = CPU_COUNT(&cpus); 5596 eqid = qid % count; 5597 /* clear up to the qid'th bit */ 5598 for (i = 0; i < eqid; i++) { 5599 cpuid = CPU_FFS(&cpus); 5600 MPASS(cpuid != 0); 5601 CPU_CLR(cpuid-1, &cpus); 5602 } 5603 cpuid = CPU_FFS(&cpus); 5604 MPASS(cpuid != 0); 5605 return (cpuid-1); 5606 } 5607 5608 #ifdef SCHED_ULE 5609 extern struct cpu_group *cpu_top; /* CPU topology */ 5610 5611 static int 5612 find_child_with_core(int cpu, struct cpu_group *grp) 5613 { 5614 int i; 5615 5616 if (grp->cg_children == 0) 5617 return -1; 5618 5619 MPASS(grp->cg_child); 5620 for (i = 0; i < grp->cg_children; i++) { 5621 if (CPU_ISSET(cpu, &grp->cg_child[i].cg_mask)) 5622 return i; 5623 } 5624 5625 return -1; 5626 } 5627 5628 /* 5629 * Find the nth "close" core to the specified core 5630 * "close" is defined as the deepest level that shares 5631 * at least an L2 cache. With threads, this will be 5632 * threads on the same core. If the sahred cache is L3 5633 * or higher, simply returns the same core. 5634 */ 5635 static int 5636 find_close_core(int cpu, int core_offset) 5637 { 5638 struct cpu_group *grp; 5639 int i; 5640 int fcpu; 5641 cpuset_t cs; 5642 5643 grp = cpu_top; 5644 if (grp == NULL) 5645 return cpu; 5646 i = 0; 5647 while ((i = find_child_with_core(cpu, grp)) != -1) { 5648 /* If the child only has one cpu, don't descend */ 5649 if (grp->cg_child[i].cg_count <= 1) 5650 break; 5651 grp = &grp->cg_child[i]; 5652 } 5653 5654 /* If they don't share at least an L2 cache, use the same CPU */ 5655 if (grp->cg_level > CG_SHARE_L2 || grp->cg_level == CG_SHARE_NONE) 5656 return cpu; 5657 5658 /* Now pick one */ 5659 CPU_COPY(&grp->cg_mask, &cs); 5660 5661 /* Add the selected CPU offset to core offset. */ 5662 for (i = 0; (fcpu = CPU_FFS(&cs)) != 0; i++) { 5663 if (fcpu - 1 == cpu) 5664 break; 5665 CPU_CLR(fcpu - 1, &cs); 5666 } 5667 MPASS(fcpu); 5668 5669 core_offset += i; 5670 5671 CPU_COPY(&grp->cg_mask, &cs); 5672 for (i = core_offset % grp->cg_count; i > 0; i--) { 5673 MPASS(CPU_FFS(&cs)); 5674 CPU_CLR(CPU_FFS(&cs) - 1, &cs); 5675 } 5676 MPASS(CPU_FFS(&cs)); 5677 return CPU_FFS(&cs) - 1; 5678 } 5679 #else 5680 static int 5681 find_close_core(int cpu, int core_offset __unused) 5682 { 5683 return cpu; 5684 } 5685 #endif 5686 5687 static int 5688 get_core_offset(if_ctx_t ctx, iflib_intr_type_t type, int qid) 5689 { 5690 switch (type) { 5691 case IFLIB_INTR_TX: 5692 /* TX queues get cores which share at least an L2 cache with the corresponding RX queue */ 5693 /* XXX handle multiple RX threads per core and more than two core per L2 group */ 5694 return qid / CPU_COUNT(&ctx->ifc_cpus) + 1; 5695 case IFLIB_INTR_RX: 5696 case IFLIB_INTR_RXTX: 5697 /* RX queues get the specified core */ 5698 return qid / CPU_COUNT(&ctx->ifc_cpus); 5699 default: 5700 return -1; 5701 } 5702 } 5703 #else 5704 #define get_core_offset(ctx, type, qid) CPU_FIRST() 5705 #define find_close_core(cpuid, tid) CPU_FIRST() 5706 #define find_nth(ctx, gid) CPU_FIRST() 5707 #endif 5708 5709 /* Just to avoid copy/paste */ 5710 static inline int 5711 iflib_irq_set_affinity(if_ctx_t ctx, int irq, iflib_intr_type_t type, int qid, 5712 struct grouptask *gtask, struct taskqgroup *tqg, void *uniq, const char *name) 5713 { 5714 int cpuid; 5715 int err, tid; 5716 5717 cpuid = find_nth(ctx, qid); 5718 tid = get_core_offset(ctx, type, qid); 5719 MPASS(tid >= 0); 5720 cpuid = find_close_core(cpuid, tid); 5721 err = taskqgroup_attach_cpu(tqg, gtask, uniq, cpuid, irq, name); 5722 if (err) { 5723 device_printf(ctx->ifc_dev, "taskqgroup_attach_cpu failed %d\n", err); 5724 return (err); 5725 } 5726 #ifdef notyet 5727 if (cpuid > ctx->ifc_cpuid_highest) 5728 ctx->ifc_cpuid_highest = cpuid; 5729 #endif 5730 return 0; 5731 } 5732 5733 int 5734 iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid, 5735 iflib_intr_type_t type, driver_filter_t *filter, 5736 void *filter_arg, int qid, const char *name) 5737 { 5738 struct grouptask *gtask; 5739 struct taskqgroup *tqg; 5740 iflib_filter_info_t info; 5741 gtask_fn_t *fn; 5742 int tqrid, err; 5743 driver_filter_t *intr_fast; 5744 void *q; 5745 5746 info = &ctx->ifc_filter_info; 5747 tqrid = rid; 5748 5749 switch (type) { 5750 /* XXX merge tx/rx for netmap? */ 5751 case IFLIB_INTR_TX: 5752 q = &ctx->ifc_txqs[qid]; 5753 info = &ctx->ifc_txqs[qid].ift_filter_info; 5754 gtask = &ctx->ifc_txqs[qid].ift_task; 5755 tqg = qgroup_if_io_tqg; 5756 fn = _task_fn_tx; 5757 intr_fast = iflib_fast_intr; 5758 GROUPTASK_INIT(gtask, 0, fn, q); 5759 ctx->ifc_flags |= IFC_NETMAP_TX_IRQ; 5760 break; 5761 case IFLIB_INTR_RX: 5762 q = &ctx->ifc_rxqs[qid]; 5763 info = &ctx->ifc_rxqs[qid].ifr_filter_info; 5764 gtask = &ctx->ifc_rxqs[qid].ifr_task; 5765 tqg = qgroup_if_io_tqg; 5766 fn = _task_fn_rx; 5767 intr_fast = iflib_fast_intr; 5768 GROUPTASK_INIT(gtask, 0, fn, q); 5769 break; 5770 case IFLIB_INTR_RXTX: 5771 q = &ctx->ifc_rxqs[qid]; 5772 info = &ctx->ifc_rxqs[qid].ifr_filter_info; 5773 gtask = &ctx->ifc_rxqs[qid].ifr_task; 5774 tqg = qgroup_if_io_tqg; 5775 fn = _task_fn_rx; 5776 intr_fast = iflib_fast_intr_rxtx; 5777 GROUPTASK_INIT(gtask, 0, fn, q); 5778 break; 5779 case IFLIB_INTR_ADMIN: 5780 q = ctx; 5781 tqrid = -1; 5782 info = &ctx->ifc_filter_info; 5783 gtask = &ctx->ifc_admin_task; 5784 tqg = qgroup_if_config_tqg; 5785 fn = _task_fn_admin; 5786 intr_fast = iflib_fast_intr_ctx; 5787 break; 5788 default: 5789 panic("unknown net intr type"); 5790 } 5791 5792 info->ifi_filter = filter; 5793 info->ifi_filter_arg = filter_arg; 5794 info->ifi_task = gtask; 5795 info->ifi_ctx = q; 5796 5797 err = _iflib_irq_alloc(ctx, irq, rid, intr_fast, NULL, info, name); 5798 if (err != 0) { 5799 device_printf(ctx->ifc_dev, "_iflib_irq_alloc failed %d\n", err); 5800 return (err); 5801 } 5802 if (type == IFLIB_INTR_ADMIN) 5803 return (0); 5804 5805 if (tqrid != -1) { 5806 err = iflib_irq_set_affinity(ctx, rman_get_start(irq->ii_res), type, qid, gtask, tqg, q, name); 5807 if (err) 5808 return (err); 5809 } else { 5810 taskqgroup_attach(tqg, gtask, q, rman_get_start(irq->ii_res), name); 5811 } 5812 5813 return (0); 5814 } 5815 5816 void 5817 iflib_softirq_alloc_generic(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type, void *arg, int qid, const char *name) 5818 { 5819 struct grouptask *gtask; 5820 struct taskqgroup *tqg; 5821 gtask_fn_t *fn; 5822 void *q; 5823 int irq_num = -1; 5824 int err; 5825 5826 switch (type) { 5827 case IFLIB_INTR_TX: 5828 q = &ctx->ifc_txqs[qid]; 5829 gtask = &ctx->ifc_txqs[qid].ift_task; 5830 tqg = qgroup_if_io_tqg; 5831 fn = _task_fn_tx; 5832 if (irq != NULL) 5833 irq_num = rman_get_start(irq->ii_res); 5834 break; 5835 case IFLIB_INTR_RX: 5836 q = &ctx->ifc_rxqs[qid]; 5837 gtask = &ctx->ifc_rxqs[qid].ifr_task; 5838 tqg = qgroup_if_io_tqg; 5839 fn = _task_fn_rx; 5840 if (irq != NULL) 5841 irq_num = rman_get_start(irq->ii_res); 5842 break; 5843 case IFLIB_INTR_IOV: 5844 q = ctx; 5845 gtask = &ctx->ifc_vflr_task; 5846 tqg = qgroup_if_config_tqg; 5847 fn = _task_fn_iov; 5848 break; 5849 default: 5850 panic("unknown net intr type"); 5851 } 5852 GROUPTASK_INIT(gtask, 0, fn, q); 5853 if (irq_num != -1) { 5854 err = iflib_irq_set_affinity(ctx, irq_num, type, qid, gtask, tqg, q, name); 5855 if (err) 5856 taskqgroup_attach(tqg, gtask, q, irq_num, name); 5857 } 5858 else { 5859 taskqgroup_attach(tqg, gtask, q, irq_num, name); 5860 } 5861 } 5862 5863 void 5864 iflib_irq_free(if_ctx_t ctx, if_irq_t irq) 5865 { 5866 if (irq->ii_tag) 5867 bus_teardown_intr(ctx->ifc_dev, irq->ii_res, irq->ii_tag); 5868 5869 if (irq->ii_res) 5870 bus_release_resource(ctx->ifc_dev, SYS_RES_IRQ, irq->ii_rid, irq->ii_res); 5871 } 5872 5873 static int 5874 iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filter_arg, int *rid, const char *name) 5875 { 5876 iflib_txq_t txq = ctx->ifc_txqs; 5877 iflib_rxq_t rxq = ctx->ifc_rxqs; 5878 if_irq_t irq = &ctx->ifc_legacy_irq; 5879 iflib_filter_info_t info; 5880 struct grouptask *gtask; 5881 struct taskqgroup *tqg; 5882 gtask_fn_t *fn; 5883 int tqrid; 5884 void *q; 5885 int err; 5886 5887 q = &ctx->ifc_rxqs[0]; 5888 info = &rxq[0].ifr_filter_info; 5889 gtask = &rxq[0].ifr_task; 5890 tqg = qgroup_if_io_tqg; 5891 tqrid = irq->ii_rid = *rid; 5892 fn = _task_fn_rx; 5893 5894 ctx->ifc_flags |= IFC_LEGACY; 5895 info->ifi_filter = filter; 5896 info->ifi_filter_arg = filter_arg; 5897 info->ifi_task = gtask; 5898 info->ifi_ctx = ctx; 5899 5900 /* We allocate a single interrupt resource */ 5901 if ((err = _iflib_irq_alloc(ctx, irq, tqrid, iflib_fast_intr_ctx, NULL, info, name)) != 0) 5902 return (err); 5903 GROUPTASK_INIT(gtask, 0, fn, q); 5904 taskqgroup_attach(tqg, gtask, q, rman_get_start(irq->ii_res), name); 5905 5906 GROUPTASK_INIT(&txq->ift_task, 0, _task_fn_tx, txq); 5907 taskqgroup_attach(qgroup_if_io_tqg, &txq->ift_task, txq, rman_get_start(irq->ii_res), "tx"); 5908 return (0); 5909 } 5910 5911 void 5912 iflib_led_create(if_ctx_t ctx) 5913 { 5914 5915 ctx->ifc_led_dev = led_create(iflib_led_func, ctx, 5916 device_get_nameunit(ctx->ifc_dev)); 5917 } 5918 5919 void 5920 iflib_tx_intr_deferred(if_ctx_t ctx, int txqid) 5921 { 5922 5923 GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task); 5924 } 5925 5926 void 5927 iflib_rx_intr_deferred(if_ctx_t ctx, int rxqid) 5928 { 5929 5930 GROUPTASK_ENQUEUE(&ctx->ifc_rxqs[rxqid].ifr_task); 5931 } 5932 5933 void 5934 iflib_admin_intr_deferred(if_ctx_t ctx) 5935 { 5936 #ifdef INVARIANTS 5937 struct grouptask *gtask; 5938 5939 gtask = &ctx->ifc_admin_task; 5940 MPASS(gtask != NULL && gtask->gt_taskqueue != NULL); 5941 #endif 5942 5943 GROUPTASK_ENQUEUE(&ctx->ifc_admin_task); 5944 } 5945 5946 void 5947 iflib_iov_intr_deferred(if_ctx_t ctx) 5948 { 5949 5950 GROUPTASK_ENQUEUE(&ctx->ifc_vflr_task); 5951 } 5952 5953 void 5954 iflib_io_tqg_attach(struct grouptask *gt, void *uniq, int cpu, char *name) 5955 { 5956 5957 taskqgroup_attach_cpu(qgroup_if_io_tqg, gt, uniq, cpu, -1, name); 5958 } 5959 5960 void 5961 iflib_config_gtask_init(void *ctx, struct grouptask *gtask, gtask_fn_t *fn, 5962 const char *name) 5963 { 5964 5965 GROUPTASK_INIT(gtask, 0, fn, ctx); 5966 taskqgroup_attach(qgroup_if_config_tqg, gtask, gtask, -1, name); 5967 } 5968 5969 void 5970 iflib_config_gtask_deinit(struct grouptask *gtask) 5971 { 5972 5973 taskqgroup_detach(qgroup_if_config_tqg, gtask); 5974 } 5975 5976 void 5977 iflib_link_state_change(if_ctx_t ctx, int link_state, uint64_t baudrate) 5978 { 5979 if_t ifp = ctx->ifc_ifp; 5980 iflib_txq_t txq = ctx->ifc_txqs; 5981 5982 if_setbaudrate(ifp, baudrate); 5983 if (baudrate >= IF_Gbps(10)) { 5984 STATE_LOCK(ctx); 5985 ctx->ifc_flags |= IFC_PREFETCH; 5986 STATE_UNLOCK(ctx); 5987 } 5988 /* If link down, disable watchdog */ 5989 if ((ctx->ifc_link_state == LINK_STATE_UP) && (link_state == LINK_STATE_DOWN)) { 5990 for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxqsets; i++, txq++) 5991 txq->ift_qstatus = IFLIB_QUEUE_IDLE; 5992 } 5993 ctx->ifc_link_state = link_state; 5994 if_link_state_change(ifp, link_state); 5995 } 5996 5997 static int 5998 iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq) 5999 { 6000 int credits; 6001 #ifdef INVARIANTS 6002 int credits_pre = txq->ift_cidx_processed; 6003 #endif 6004 6005 if (ctx->isc_txd_credits_update == NULL) 6006 return (0); 6007 6008 if ((credits = ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, true)) == 0) 6009 return (0); 6010 6011 txq->ift_processed += credits; 6012 txq->ift_cidx_processed += credits; 6013 6014 MPASS(credits_pre + credits == txq->ift_cidx_processed); 6015 if (txq->ift_cidx_processed >= txq->ift_size) 6016 txq->ift_cidx_processed -= txq->ift_size; 6017 return (credits); 6018 } 6019 6020 static int 6021 iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget) 6022 { 6023 6024 return (ctx->isc_rxd_available(ctx->ifc_softc, rxq->ifr_id, cidx, 6025 budget)); 6026 } 6027 6028 void 6029 iflib_add_int_delay_sysctl(if_ctx_t ctx, const char *name, 6030 const char *description, if_int_delay_info_t info, 6031 int offset, int value) 6032 { 6033 info->iidi_ctx = ctx; 6034 info->iidi_offset = offset; 6035 info->iidi_value = value; 6036 SYSCTL_ADD_PROC(device_get_sysctl_ctx(ctx->ifc_dev), 6037 SYSCTL_CHILDREN(device_get_sysctl_tree(ctx->ifc_dev)), 6038 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, 6039 info, 0, iflib_sysctl_int_delay, "I", description); 6040 } 6041 6042 struct sx * 6043 iflib_ctx_lock_get(if_ctx_t ctx) 6044 { 6045 6046 return (&ctx->ifc_ctx_sx); 6047 } 6048 6049 static int 6050 iflib_msix_init(if_ctx_t ctx) 6051 { 6052 device_t dev = ctx->ifc_dev; 6053 if_shared_ctx_t sctx = ctx->ifc_sctx; 6054 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 6055 int vectors, queues, rx_queues, tx_queues, queuemsgs, msgs; 6056 int iflib_num_tx_queues, iflib_num_rx_queues; 6057 int err, admincnt, bar; 6058 6059 iflib_num_tx_queues = ctx->ifc_sysctl_ntxqs; 6060 iflib_num_rx_queues = ctx->ifc_sysctl_nrxqs; 6061 6062 device_printf(dev, "msix_init qsets capped at %d\n", imax(scctx->isc_ntxqsets, scctx->isc_nrxqsets)); 6063 6064 bar = ctx->ifc_softc_ctx.isc_msix_bar; 6065 admincnt = sctx->isc_admin_intrcnt; 6066 /* Override by tuneable */ 6067 if (scctx->isc_disable_msix) 6068 goto msi; 6069 6070 /* 6071 * bar == -1 => "trust me I know what I'm doing" 6072 * Some drivers are for hardware that is so shoddily 6073 * documented that no one knows which bars are which 6074 * so the developer has to map all bars. This hack 6075 * allows shoddy garbage to use msix in this framework. 6076 */ 6077 if (bar != -1) { 6078 ctx->ifc_msix_mem = bus_alloc_resource_any(dev, 6079 SYS_RES_MEMORY, &bar, RF_ACTIVE); 6080 if (ctx->ifc_msix_mem == NULL) { 6081 /* May not be enabled */ 6082 device_printf(dev, "Unable to map MSIX table \n"); 6083 goto msi; 6084 } 6085 } 6086 /* First try MSI/X */ 6087 if ((msgs = pci_msix_count(dev)) == 0) { /* system has msix disabled */ 6088 device_printf(dev, "System has MSIX disabled \n"); 6089 bus_release_resource(dev, SYS_RES_MEMORY, 6090 bar, ctx->ifc_msix_mem); 6091 ctx->ifc_msix_mem = NULL; 6092 goto msi; 6093 } 6094 #if IFLIB_DEBUG 6095 /* use only 1 qset in debug mode */ 6096 queuemsgs = min(msgs - admincnt, 1); 6097 #else 6098 queuemsgs = msgs - admincnt; 6099 #endif 6100 #ifdef RSS 6101 queues = imin(queuemsgs, rss_getnumbuckets()); 6102 #else 6103 queues = queuemsgs; 6104 #endif 6105 queues = imin(CPU_COUNT(&ctx->ifc_cpus), queues); 6106 device_printf(dev, "pxm cpus: %d queue msgs: %d admincnt: %d\n", 6107 CPU_COUNT(&ctx->ifc_cpus), queuemsgs, admincnt); 6108 #ifdef RSS 6109 /* If we're doing RSS, clamp at the number of RSS buckets */ 6110 if (queues > rss_getnumbuckets()) 6111 queues = rss_getnumbuckets(); 6112 #endif 6113 if (iflib_num_rx_queues > 0 && iflib_num_rx_queues < queuemsgs - admincnt) 6114 rx_queues = iflib_num_rx_queues; 6115 else 6116 rx_queues = queues; 6117 6118 if (rx_queues > scctx->isc_nrxqsets) 6119 rx_queues = scctx->isc_nrxqsets; 6120 6121 /* 6122 * We want this to be all logical CPUs by default 6123 */ 6124 if (iflib_num_tx_queues > 0 && iflib_num_tx_queues < queues) 6125 tx_queues = iflib_num_tx_queues; 6126 else 6127 tx_queues = mp_ncpus; 6128 6129 if (tx_queues > scctx->isc_ntxqsets) 6130 tx_queues = scctx->isc_ntxqsets; 6131 6132 if (ctx->ifc_sysctl_qs_eq_override == 0) { 6133 #ifdef INVARIANTS 6134 if (tx_queues != rx_queues) 6135 device_printf(dev, "queue equality override not set, capping rx_queues at %d and tx_queues at %d\n", 6136 min(rx_queues, tx_queues), min(rx_queues, tx_queues)); 6137 #endif 6138 tx_queues = min(rx_queues, tx_queues); 6139 rx_queues = min(rx_queues, tx_queues); 6140 } 6141 6142 device_printf(dev, "using %d rx queues %d tx queues \n", rx_queues, tx_queues); 6143 6144 vectors = rx_queues + admincnt; 6145 if ((err = pci_alloc_msix(dev, &vectors)) == 0) { 6146 device_printf(dev, 6147 "Using MSIX interrupts with %d vectors\n", vectors); 6148 scctx->isc_vectors = vectors; 6149 scctx->isc_nrxqsets = rx_queues; 6150 scctx->isc_ntxqsets = tx_queues; 6151 scctx->isc_intr = IFLIB_INTR_MSIX; 6152 6153 return (vectors); 6154 } else { 6155 device_printf(dev, "failed to allocate %d msix vectors, err: %d - using MSI\n", vectors, err); 6156 bus_release_resource(dev, SYS_RES_MEMORY, bar, 6157 ctx->ifc_msix_mem); 6158 ctx->ifc_msix_mem = NULL; 6159 } 6160 msi: 6161 vectors = pci_msi_count(dev); 6162 scctx->isc_nrxqsets = 1; 6163 scctx->isc_ntxqsets = 1; 6164 scctx->isc_vectors = vectors; 6165 if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0) { 6166 device_printf(dev,"Using an MSI interrupt\n"); 6167 scctx->isc_intr = IFLIB_INTR_MSI; 6168 } else { 6169 scctx->isc_vectors = 1; 6170 device_printf(dev,"Using a Legacy interrupt\n"); 6171 scctx->isc_intr = IFLIB_INTR_LEGACY; 6172 } 6173 6174 return (vectors); 6175 } 6176 6177 static const char *ring_states[] = { "IDLE", "BUSY", "STALLED", "ABDICATED" }; 6178 6179 static int 6180 mp_ring_state_handler(SYSCTL_HANDLER_ARGS) 6181 { 6182 int rc; 6183 uint16_t *state = ((uint16_t *)oidp->oid_arg1); 6184 struct sbuf *sb; 6185 const char *ring_state = "UNKNOWN"; 6186 6187 /* XXX needed ? */ 6188 rc = sysctl_wire_old_buffer(req, 0); 6189 MPASS(rc == 0); 6190 if (rc != 0) 6191 return (rc); 6192 sb = sbuf_new_for_sysctl(NULL, NULL, 80, req); 6193 MPASS(sb != NULL); 6194 if (sb == NULL) 6195 return (ENOMEM); 6196 if (state[3] <= 3) 6197 ring_state = ring_states[state[3]]; 6198 6199 sbuf_printf(sb, "pidx_head: %04hd pidx_tail: %04hd cidx: %04hd state: %s", 6200 state[0], state[1], state[2], ring_state); 6201 rc = sbuf_finish(sb); 6202 sbuf_delete(sb); 6203 return(rc); 6204 } 6205 6206 enum iflib_ndesc_handler { 6207 IFLIB_NTXD_HANDLER, 6208 IFLIB_NRXD_HANDLER, 6209 }; 6210 6211 static int 6212 mp_ndesc_handler(SYSCTL_HANDLER_ARGS) 6213 { 6214 if_ctx_t ctx = (void *)arg1; 6215 enum iflib_ndesc_handler type = arg2; 6216 char buf[256] = {0}; 6217 qidx_t *ndesc; 6218 char *p, *next; 6219 int nqs, rc, i; 6220 6221 MPASS(type == IFLIB_NTXD_HANDLER || type == IFLIB_NRXD_HANDLER); 6222 6223 nqs = 8; 6224 switch(type) { 6225 case IFLIB_NTXD_HANDLER: 6226 ndesc = ctx->ifc_sysctl_ntxds; 6227 if (ctx->ifc_sctx) 6228 nqs = ctx->ifc_sctx->isc_ntxqs; 6229 break; 6230 case IFLIB_NRXD_HANDLER: 6231 ndesc = ctx->ifc_sysctl_nrxds; 6232 if (ctx->ifc_sctx) 6233 nqs = ctx->ifc_sctx->isc_nrxqs; 6234 break; 6235 default: 6236 panic("unhandled type"); 6237 } 6238 if (nqs == 0) 6239 nqs = 8; 6240 6241 for (i=0; i<8; i++) { 6242 if (i >= nqs) 6243 break; 6244 if (i) 6245 strcat(buf, ","); 6246 sprintf(strchr(buf, 0), "%d", ndesc[i]); 6247 } 6248 6249 rc = sysctl_handle_string(oidp, buf, sizeof(buf), req); 6250 if (rc || req->newptr == NULL) 6251 return rc; 6252 6253 for (i = 0, next = buf, p = strsep(&next, " ,"); i < 8 && p; 6254 i++, p = strsep(&next, " ,")) { 6255 ndesc[i] = strtoul(p, NULL, 10); 6256 } 6257 6258 return(rc); 6259 } 6260 6261 #define NAME_BUFLEN 32 6262 static void 6263 iflib_add_device_sysctl_pre(if_ctx_t ctx) 6264 { 6265 device_t dev = iflib_get_dev(ctx); 6266 struct sysctl_oid_list *child, *oid_list; 6267 struct sysctl_ctx_list *ctx_list; 6268 struct sysctl_oid *node; 6269 6270 ctx_list = device_get_sysctl_ctx(dev); 6271 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 6272 ctx->ifc_sysctl_node = node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "iflib", 6273 CTLFLAG_RD, NULL, "IFLIB fields"); 6274 oid_list = SYSCTL_CHILDREN(node); 6275 6276 SYSCTL_ADD_STRING(ctx_list, oid_list, OID_AUTO, "driver_version", 6277 CTLFLAG_RD, ctx->ifc_sctx->isc_driver_version, 0, 6278 "driver version"); 6279 6280 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_ntxqs", 6281 CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxqs, 0, 6282 "# of txqs to use, 0 => use default #"); 6283 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_nrxqs", 6284 CTLFLAG_RWTUN, &ctx->ifc_sysctl_nrxqs, 0, 6285 "# of rxqs to use, 0 => use default #"); 6286 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_qs_enable", 6287 CTLFLAG_RWTUN, &ctx->ifc_sysctl_qs_eq_override, 0, 6288 "permit #txq != #rxq"); 6289 SYSCTL_ADD_INT(ctx_list, oid_list, OID_AUTO, "disable_msix", 6290 CTLFLAG_RWTUN, &ctx->ifc_softc_ctx.isc_disable_msix, 0, 6291 "disable MSIX (default 0)"); 6292 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "rx_budget", 6293 CTLFLAG_RWTUN, &ctx->ifc_sysctl_rx_budget, 0, 6294 "set the rx budget"); 6295 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "tx_abdicate", 6296 CTLFLAG_RWTUN, &ctx->ifc_sysctl_tx_abdicate, 0, 6297 "cause tx to abdicate instead of running to completion"); 6298 6299 /* XXX change for per-queue sizes */ 6300 SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_ntxds", 6301 CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NTXD_HANDLER, 6302 mp_ndesc_handler, "A", 6303 "list of # of tx descriptors to use, 0 = use default #"); 6304 SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_nrxds", 6305 CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NRXD_HANDLER, 6306 mp_ndesc_handler, "A", 6307 "list of # of rx descriptors to use, 0 = use default #"); 6308 } 6309 6310 static void 6311 iflib_add_device_sysctl_post(if_ctx_t ctx) 6312 { 6313 if_shared_ctx_t sctx = ctx->ifc_sctx; 6314 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 6315 device_t dev = iflib_get_dev(ctx); 6316 struct sysctl_oid_list *child; 6317 struct sysctl_ctx_list *ctx_list; 6318 iflib_fl_t fl; 6319 iflib_txq_t txq; 6320 iflib_rxq_t rxq; 6321 int i, j; 6322 char namebuf[NAME_BUFLEN]; 6323 char *qfmt; 6324 struct sysctl_oid *queue_node, *fl_node, *node; 6325 struct sysctl_oid_list *queue_list, *fl_list; 6326 ctx_list = device_get_sysctl_ctx(dev); 6327 6328 node = ctx->ifc_sysctl_node; 6329 child = SYSCTL_CHILDREN(node); 6330 6331 if (scctx->isc_ntxqsets > 100) 6332 qfmt = "txq%03d"; 6333 else if (scctx->isc_ntxqsets > 10) 6334 qfmt = "txq%02d"; 6335 else 6336 qfmt = "txq%d"; 6337 for (i = 0, txq = ctx->ifc_txqs; i < scctx->isc_ntxqsets; i++, txq++) { 6338 snprintf(namebuf, NAME_BUFLEN, qfmt, i); 6339 queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf, 6340 CTLFLAG_RD, NULL, "Queue Name"); 6341 queue_list = SYSCTL_CHILDREN(queue_node); 6342 #if MEMORY_LOGGING 6343 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_dequeued", 6344 CTLFLAG_RD, 6345 &txq->ift_dequeued, "total mbufs freed"); 6346 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_enqueued", 6347 CTLFLAG_RD, 6348 &txq->ift_enqueued, "total mbufs enqueued"); 6349 #endif 6350 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag", 6351 CTLFLAG_RD, 6352 &txq->ift_mbuf_defrag, "# of times m_defrag was called"); 6353 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "m_pullups", 6354 CTLFLAG_RD, 6355 &txq->ift_pullups, "# of times m_pullup was called"); 6356 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag_failed", 6357 CTLFLAG_RD, 6358 &txq->ift_mbuf_defrag_failed, "# of times m_defrag failed"); 6359 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_desc_avail", 6360 CTLFLAG_RD, 6361 &txq->ift_no_desc_avail, "# of times no descriptors were available"); 6362 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "tx_map_failed", 6363 CTLFLAG_RD, 6364 &txq->ift_map_failed, "# of times dma map failed"); 6365 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txd_encap_efbig", 6366 CTLFLAG_RD, 6367 &txq->ift_txd_encap_efbig, "# of times txd_encap returned EFBIG"); 6368 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_tx_dma_setup", 6369 CTLFLAG_RD, 6370 &txq->ift_no_tx_dma_setup, "# of times map failed for other than EFBIG"); 6371 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_pidx", 6372 CTLFLAG_RD, 6373 &txq->ift_pidx, 1, "Producer Index"); 6374 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx", 6375 CTLFLAG_RD, 6376 &txq->ift_cidx, 1, "Consumer Index"); 6377 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx_processed", 6378 CTLFLAG_RD, 6379 &txq->ift_cidx_processed, 1, "Consumer Index seen by credit update"); 6380 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_in_use", 6381 CTLFLAG_RD, 6382 &txq->ift_in_use, 1, "descriptors in use"); 6383 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_processed", 6384 CTLFLAG_RD, 6385 &txq->ift_processed, "descriptors procesed for clean"); 6386 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_cleaned", 6387 CTLFLAG_RD, 6388 &txq->ift_cleaned, "total cleaned"); 6389 SYSCTL_ADD_PROC(ctx_list, queue_list, OID_AUTO, "ring_state", 6390 CTLTYPE_STRING | CTLFLAG_RD, __DEVOLATILE(uint64_t *, &txq->ift_br->state), 6391 0, mp_ring_state_handler, "A", "soft ring state"); 6392 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_enqueues", 6393 CTLFLAG_RD, &txq->ift_br->enqueues, 6394 "# of enqueues to the mp_ring for this queue"); 6395 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_drops", 6396 CTLFLAG_RD, &txq->ift_br->drops, 6397 "# of drops in the mp_ring for this queue"); 6398 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_starts", 6399 CTLFLAG_RD, &txq->ift_br->starts, 6400 "# of normal consumer starts in the mp_ring for this queue"); 6401 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_stalls", 6402 CTLFLAG_RD, &txq->ift_br->stalls, 6403 "# of consumer stalls in the mp_ring for this queue"); 6404 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_restarts", 6405 CTLFLAG_RD, &txq->ift_br->restarts, 6406 "# of consumer restarts in the mp_ring for this queue"); 6407 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_abdications", 6408 CTLFLAG_RD, &txq->ift_br->abdications, 6409 "# of consumer abdications in the mp_ring for this queue"); 6410 } 6411 6412 if (scctx->isc_nrxqsets > 100) 6413 qfmt = "rxq%03d"; 6414 else if (scctx->isc_nrxqsets > 10) 6415 qfmt = "rxq%02d"; 6416 else 6417 qfmt = "rxq%d"; 6418 for (i = 0, rxq = ctx->ifc_rxqs; i < scctx->isc_nrxqsets; i++, rxq++) { 6419 snprintf(namebuf, NAME_BUFLEN, qfmt, i); 6420 queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf, 6421 CTLFLAG_RD, NULL, "Queue Name"); 6422 queue_list = SYSCTL_CHILDREN(queue_node); 6423 if (sctx->isc_flags & IFLIB_HAS_RXCQ) { 6424 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_pidx", 6425 CTLFLAG_RD, 6426 &rxq->ifr_cq_pidx, 1, "Producer Index"); 6427 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_cidx", 6428 CTLFLAG_RD, 6429 &rxq->ifr_cq_cidx, 1, "Consumer Index"); 6430 } 6431 6432 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) { 6433 snprintf(namebuf, NAME_BUFLEN, "rxq_fl%d", j); 6434 fl_node = SYSCTL_ADD_NODE(ctx_list, queue_list, OID_AUTO, namebuf, 6435 CTLFLAG_RD, NULL, "freelist Name"); 6436 fl_list = SYSCTL_CHILDREN(fl_node); 6437 SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "pidx", 6438 CTLFLAG_RD, 6439 &fl->ifl_pidx, 1, "Producer Index"); 6440 SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "cidx", 6441 CTLFLAG_RD, 6442 &fl->ifl_cidx, 1, "Consumer Index"); 6443 SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "credits", 6444 CTLFLAG_RD, 6445 &fl->ifl_credits, 1, "credits available"); 6446 #if MEMORY_LOGGING 6447 SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_enqueued", 6448 CTLFLAG_RD, 6449 &fl->ifl_m_enqueued, "mbufs allocated"); 6450 SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_dequeued", 6451 CTLFLAG_RD, 6452 &fl->ifl_m_dequeued, "mbufs freed"); 6453 SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_enqueued", 6454 CTLFLAG_RD, 6455 &fl->ifl_cl_enqueued, "clusters allocated"); 6456 SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_dequeued", 6457 CTLFLAG_RD, 6458 &fl->ifl_cl_dequeued, "clusters freed"); 6459 #endif 6460 6461 } 6462 } 6463 6464 } 6465 6466 #ifndef __NO_STRICT_ALIGNMENT 6467 static struct mbuf * 6468 iflib_fixup_rx(struct mbuf *m) 6469 { 6470 struct mbuf *n; 6471 6472 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) { 6473 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len); 6474 m->m_data += ETHER_HDR_LEN; 6475 n = m; 6476 } else { 6477 MGETHDR(n, M_NOWAIT, MT_DATA); 6478 if (n == NULL) { 6479 m_freem(m); 6480 return (NULL); 6481 } 6482 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN); 6483 m->m_data += ETHER_HDR_LEN; 6484 m->m_len -= ETHER_HDR_LEN; 6485 n->m_len = ETHER_HDR_LEN; 6486 M_MOVE_PKTHDR(n, m); 6487 n->m_next = m; 6488 } 6489 return (n); 6490 } 6491 #endif 6492 6493 #ifdef NETDUMP 6494 static void 6495 iflib_netdump_init(struct ifnet *ifp, int *nrxr, int *ncl, int *clsize) 6496 { 6497 if_ctx_t ctx; 6498 6499 ctx = if_getsoftc(ifp); 6500 CTX_LOCK(ctx); 6501 *nrxr = NRXQSETS(ctx); 6502 *ncl = ctx->ifc_rxqs[0].ifr_fl->ifl_size; 6503 *clsize = ctx->ifc_rxqs[0].ifr_fl->ifl_buf_size; 6504 CTX_UNLOCK(ctx); 6505 } 6506 6507 static void 6508 iflib_netdump_event(struct ifnet *ifp, enum netdump_ev event) 6509 { 6510 if_ctx_t ctx; 6511 if_softc_ctx_t scctx; 6512 iflib_fl_t fl; 6513 iflib_rxq_t rxq; 6514 int i, j; 6515 6516 ctx = if_getsoftc(ifp); 6517 scctx = &ctx->ifc_softc_ctx; 6518 6519 switch (event) { 6520 case NETDUMP_START: 6521 for (i = 0; i < scctx->isc_nrxqsets; i++) { 6522 rxq = &ctx->ifc_rxqs[i]; 6523 for (j = 0; j < rxq->ifr_nfl; j++) { 6524 fl = rxq->ifr_fl; 6525 fl->ifl_zone = m_getzone(fl->ifl_buf_size); 6526 } 6527 } 6528 iflib_no_tx_batch = 1; 6529 break; 6530 default: 6531 break; 6532 } 6533 } 6534 6535 static int 6536 iflib_netdump_transmit(struct ifnet *ifp, struct mbuf *m) 6537 { 6538 if_ctx_t ctx; 6539 iflib_txq_t txq; 6540 int error; 6541 6542 ctx = if_getsoftc(ifp); 6543 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 6544 IFF_DRV_RUNNING) 6545 return (EBUSY); 6546 6547 txq = &ctx->ifc_txqs[0]; 6548 error = iflib_encap(txq, &m); 6549 if (error == 0) 6550 (void)iflib_txd_db_check(ctx, txq, true, txq->ift_in_use); 6551 return (error); 6552 } 6553 6554 static int 6555 iflib_netdump_poll(struct ifnet *ifp, int count) 6556 { 6557 if_ctx_t ctx; 6558 if_softc_ctx_t scctx; 6559 iflib_txq_t txq; 6560 int i; 6561 6562 ctx = if_getsoftc(ifp); 6563 scctx = &ctx->ifc_softc_ctx; 6564 6565 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 6566 IFF_DRV_RUNNING) 6567 return (EBUSY); 6568 6569 txq = &ctx->ifc_txqs[0]; 6570 (void)iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx)); 6571 6572 for (i = 0; i < scctx->isc_nrxqsets; i++) 6573 (void)iflib_rxeof(&ctx->ifc_rxqs[i], 16 /* XXX */); 6574 return (0); 6575 } 6576 #endif /* NETDUMP */ 6577