1 /* 2 * Copyright (C) 2013-2016 Vincenzo Maffione 3 * Copyright (C) 2013-2016 Luigi Rizzo 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 /* 29 * This module implements netmap support on top of standard, 30 * unmodified device drivers. 31 * 32 * A NIOCREGIF request is handled here if the device does not 33 * have native support. TX and RX rings are emulated as follows: 34 * 35 * NIOCREGIF 36 * We preallocate a block of TX mbufs (roughly as many as 37 * tx descriptors; the number is not critical) to speed up 38 * operation during transmissions. The refcount on most of 39 * these buffers is artificially bumped up so we can recycle 40 * them more easily. Also, the destructor is intercepted 41 * so we use it as an interrupt notification to wake up 42 * processes blocked on a poll(). 43 * 44 * For each receive ring we allocate one "struct mbq" 45 * (an mbuf tailq plus a spinlock). We intercept packets 46 * (through if_input) 47 * on the receive path and put them in the mbq from which 48 * netmap receive routines can grab them. 49 * 50 * TX: 51 * in the generic_txsync() routine, netmap buffers are copied 52 * (or linked, in a future) to the preallocated mbufs 53 * and pushed to the transmit queue. Some of these mbufs 54 * (those with NS_REPORT, or otherwise every half ring) 55 * have the refcount=1, others have refcount=2. 56 * When the destructor is invoked, we take that as 57 * a notification that all mbufs up to that one in 58 * the specific ring have been completed, and generate 59 * the equivalent of a transmit interrupt. 60 * 61 * RX: 62 * 63 */ 64 65 #ifdef __FreeBSD__ 66 67 #include <sys/cdefs.h> /* prerequisite */ 68 __FBSDID("$FreeBSD$"); 69 70 #include <sys/types.h> 71 #include <sys/errno.h> 72 #include <sys/malloc.h> 73 #include <sys/lock.h> /* PROT_EXEC */ 74 #include <sys/rwlock.h> 75 #include <sys/socket.h> /* sockaddrs */ 76 #include <sys/selinfo.h> 77 #include <net/if.h> 78 #include <net/if_var.h> 79 #include <machine/bus.h> /* bus_dmamap_* in netmap_kern.h */ 80 81 // XXX temporary - D() defined here 82 #include <net/netmap.h> 83 #include <dev/netmap/netmap_kern.h> 84 #include <dev/netmap/netmap_mem2.h> 85 86 #define rtnl_lock() ND("rtnl_lock called") 87 #define rtnl_unlock() ND("rtnl_unlock called") 88 #define MBUF_RXQ(m) ((m)->m_pkthdr.flowid) 89 #define smp_mb() 90 91 /* 92 * FreeBSD mbuf allocator/deallocator in emulation mode: 93 */ 94 #if __FreeBSD_version < 1100000 95 96 /* 97 * For older versions of FreeBSD: 98 * 99 * We allocate EXT_PACKET mbuf+clusters, but need to set M_NOFREE 100 * so that the destructor, if invoked, will not free the packet. 101 * In principle we should set the destructor only on demand, 102 * but since there might be a race we better do it on allocation. 103 * As a consequence, we also need to set the destructor or we 104 * would leak buffers. 105 */ 106 107 /* mbuf destructor, also need to change the type to EXT_EXTREF, 108 * add an M_NOFREE flag, and then clear the flag and 109 * chain into uma_zfree(zone_pack, mf) 110 * (or reinstall the buffer ?) 111 */ 112 static inline void 113 set_mbuf_destructor(struct mbuf *m, void *fn) 114 { 115 116 m->m_ext.ext_free = fn; 117 m->m_ext.ext_type = EXT_EXTREF; 118 } 119 120 static int 121 void_mbuf_dtor(struct mbuf *m, void *arg1, void *arg2) 122 { 123 /* restore original mbuf */ 124 m->m_ext.ext_buf = m->m_data = m->m_ext.ext_arg1; 125 m->m_ext.ext_arg1 = NULL; 126 m->m_ext.ext_type = EXT_PACKET; 127 m->m_ext.ext_free = NULL; 128 if (MBUF_REFCNT(m) == 0) 129 SET_MBUF_REFCNT(m, 1); 130 uma_zfree(zone_pack, m); 131 132 return 0; 133 } 134 135 static inline struct mbuf * 136 nm_os_get_mbuf(struct ifnet *ifp, int len) 137 { 138 struct mbuf *m; 139 140 (void)ifp; 141 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 142 if (m) { 143 /* m_getcl() (mb_ctor_mbuf) has an assert that checks that 144 * M_NOFREE flag is not specified as third argument, 145 * so we have to set M_NOFREE after m_getcl(). */ 146 m->m_flags |= M_NOFREE; 147 m->m_ext.ext_arg1 = m->m_ext.ext_buf; // XXX save 148 m->m_ext.ext_free = (void *)void_mbuf_dtor; 149 m->m_ext.ext_type = EXT_EXTREF; 150 ND(5, "create m %p refcnt %d", m, MBUF_REFCNT(m)); 151 } 152 return m; 153 } 154 155 #else /* __FreeBSD_version >= 1100000 */ 156 157 /* 158 * Newer versions of FreeBSD, using a straightforward scheme. 159 * 160 * We allocate mbufs with m_gethdr(), since the mbuf header is needed 161 * by the driver. We also attach a customly-provided external storage, 162 * which in this case is a netmap buffer. When calling m_extadd(), however 163 * we pass a NULL address, since the real address (and length) will be 164 * filled in by nm_os_generic_xmit_frame() right before calling 165 * if_transmit(). 166 * 167 * The dtor function does nothing, however we need it since mb_free_ext() 168 * has a KASSERT(), checking that the mbuf dtor function is not NULL. 169 */ 170 171 static void void_mbuf_dtor(struct mbuf *m, void *arg1, void *arg2) { } 172 173 static inline void 174 set_mbuf_destructor(struct mbuf *m, void *fn) 175 { 176 177 m->m_ext.ext_free = (fn != NULL) ? fn : (void *)void_mbuf_dtor; 178 } 179 180 static inline struct mbuf * 181 nm_os_get_mbuf(struct ifnet *ifp, int len) 182 { 183 struct mbuf *m; 184 185 (void)ifp; 186 (void)len; 187 188 m = m_gethdr(M_NOWAIT, MT_DATA); 189 if (m == NULL) { 190 return m; 191 } 192 193 m_extadd(m, NULL /* buf */, 0 /* size */, void_mbuf_dtor, 194 NULL, NULL, 0, EXT_NET_DRV); 195 196 return m; 197 } 198 199 #endif /* __FreeBSD_version >= 1100000 */ 200 201 #elif defined _WIN32 202 203 #include "win_glue.h" 204 205 #define rtnl_lock() ND("rtnl_lock called") 206 #define rtnl_unlock() ND("rtnl_unlock called") 207 #define MBUF_TXQ(m) 0//((m)->m_pkthdr.flowid) 208 #define MBUF_RXQ(m) 0//((m)->m_pkthdr.flowid) 209 #define smp_mb() //XXX: to be correctly defined 210 211 #else /* linux */ 212 213 #include "bsd_glue.h" 214 215 #include <linux/rtnetlink.h> /* rtnl_[un]lock() */ 216 #include <linux/ethtool.h> /* struct ethtool_ops, get_ringparam */ 217 #include <linux/hrtimer.h> 218 219 static inline struct mbuf * 220 nm_os_get_mbuf(struct ifnet *ifp, int len) 221 { 222 return alloc_skb(ifp->needed_headroom + len + 223 ifp->needed_tailroom, GFP_ATOMIC); 224 } 225 226 #endif /* linux */ 227 228 229 /* Common headers. */ 230 #include <net/netmap.h> 231 #include <dev/netmap/netmap_kern.h> 232 #include <dev/netmap/netmap_mem2.h> 233 234 235 #define for_each_kring_n(_i, _k, _karr, _n) \ 236 for (_k=_karr, _i = 0; _i < _n; (_k)++, (_i)++) 237 238 #define for_each_tx_kring(_i, _k, _na) \ 239 for_each_kring_n(_i, _k, (_na)->tx_rings, (_na)->num_tx_rings) 240 #define for_each_tx_kring_h(_i, _k, _na) \ 241 for_each_kring_n(_i, _k, (_na)->tx_rings, (_na)->num_tx_rings + 1) 242 243 #define for_each_rx_kring(_i, _k, _na) \ 244 for_each_kring_n(_i, _k, (_na)->rx_rings, (_na)->num_rx_rings) 245 #define for_each_rx_kring_h(_i, _k, _na) \ 246 for_each_kring_n(_i, _k, (_na)->rx_rings, (_na)->num_rx_rings + 1) 247 248 249 /* ======================== PERFORMANCE STATISTICS =========================== */ 250 251 #ifdef RATE_GENERIC 252 #define IFRATE(x) x 253 struct rate_stats { 254 unsigned long txpkt; 255 unsigned long txsync; 256 unsigned long txirq; 257 unsigned long txrepl; 258 unsigned long txdrop; 259 unsigned long rxpkt; 260 unsigned long rxirq; 261 unsigned long rxsync; 262 }; 263 264 struct rate_context { 265 unsigned refcount; 266 struct timer_list timer; 267 struct rate_stats new; 268 struct rate_stats old; 269 }; 270 271 #define RATE_PRINTK(_NAME_) \ 272 printk( #_NAME_ " = %lu Hz\n", (cur._NAME_ - ctx->old._NAME_)/RATE_PERIOD); 273 #define RATE_PERIOD 2 274 static void rate_callback(unsigned long arg) 275 { 276 struct rate_context * ctx = (struct rate_context *)arg; 277 struct rate_stats cur = ctx->new; 278 int r; 279 280 RATE_PRINTK(txpkt); 281 RATE_PRINTK(txsync); 282 RATE_PRINTK(txirq); 283 RATE_PRINTK(txrepl); 284 RATE_PRINTK(txdrop); 285 RATE_PRINTK(rxpkt); 286 RATE_PRINTK(rxsync); 287 RATE_PRINTK(rxirq); 288 printk("\n"); 289 290 ctx->old = cur; 291 r = mod_timer(&ctx->timer, jiffies + 292 msecs_to_jiffies(RATE_PERIOD * 1000)); 293 if (unlikely(r)) 294 D("[v1000] Error: mod_timer()"); 295 } 296 297 static struct rate_context rate_ctx; 298 299 void generic_rate(int txp, int txs, int txi, int rxp, int rxs, int rxi) 300 { 301 if (txp) rate_ctx.new.txpkt++; 302 if (txs) rate_ctx.new.txsync++; 303 if (txi) rate_ctx.new.txirq++; 304 if (rxp) rate_ctx.new.rxpkt++; 305 if (rxs) rate_ctx.new.rxsync++; 306 if (rxi) rate_ctx.new.rxirq++; 307 } 308 309 #else /* !RATE */ 310 #define IFRATE(x) 311 #endif /* !RATE */ 312 313 314 /* =============== GENERIC NETMAP ADAPTER SUPPORT ================= */ 315 316 /* 317 * Wrapper used by the generic adapter layer to notify 318 * the poller threads. Differently from netmap_rx_irq(), we check 319 * only NAF_NETMAP_ON instead of NAF_NATIVE_ON to enable the irq. 320 */ 321 void 322 netmap_generic_irq(struct netmap_adapter *na, u_int q, u_int *work_done) 323 { 324 if (unlikely(!nm_netmap_on(na))) 325 return; 326 327 netmap_common_irq(na, q, work_done); 328 #ifdef RATE_GENERIC 329 if (work_done) 330 rate_ctx.new.rxirq++; 331 else 332 rate_ctx.new.txirq++; 333 #endif /* RATE_GENERIC */ 334 } 335 336 static int 337 generic_netmap_unregister(struct netmap_adapter *na) 338 { 339 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na; 340 struct netmap_kring *kring = NULL; 341 int i, r; 342 343 if (na->active_fds == 0) { 344 D("Generic adapter %p goes off", na); 345 rtnl_lock(); 346 347 na->na_flags &= ~NAF_NETMAP_ON; 348 349 /* Release packet steering control. */ 350 nm_os_catch_tx(gna, 0); 351 352 /* Stop intercepting packets on the RX path. */ 353 nm_os_catch_rx(gna, 0); 354 355 rtnl_unlock(); 356 } 357 358 for_each_rx_kring_h(r, kring, na) { 359 if (nm_kring_pending_off(kring)) { 360 D("RX ring %d of generic adapter %p goes off", r, na); 361 kring->nr_mode = NKR_NETMAP_OFF; 362 } 363 } 364 for_each_tx_kring_h(r, kring, na) { 365 if (nm_kring_pending_off(kring)) { 366 kring->nr_mode = NKR_NETMAP_OFF; 367 D("TX ring %d of generic adapter %p goes off", r, na); 368 } 369 } 370 371 for_each_rx_kring(r, kring, na) { 372 /* Free the mbufs still pending in the RX queues, 373 * that did not end up into the corresponding netmap 374 * RX rings. */ 375 mbq_safe_purge(&kring->rx_queue); 376 nm_os_mitigation_cleanup(&gna->mit[r]); 377 } 378 379 /* Decrement reference counter for the mbufs in the 380 * TX pools. These mbufs can be still pending in drivers, 381 * (e.g. this happens with virtio-net driver, which 382 * does lazy reclaiming of transmitted mbufs). */ 383 for_each_tx_kring(r, kring, na) { 384 /* We must remove the destructor on the TX event, 385 * because the destructor invokes netmap code, and 386 * the netmap module may disappear before the 387 * TX event is consumed. */ 388 mtx_lock_spin(&kring->tx_event_lock); 389 if (kring->tx_event) { 390 set_mbuf_destructor(kring->tx_event, NULL); 391 } 392 kring->tx_event = NULL; 393 mtx_unlock_spin(&kring->tx_event_lock); 394 } 395 396 if (na->active_fds == 0) { 397 free(gna->mit, M_DEVBUF); 398 399 for_each_rx_kring(r, kring, na) { 400 mbq_safe_fini(&kring->rx_queue); 401 } 402 403 for_each_tx_kring(r, kring, na) { 404 mtx_destroy(&kring->tx_event_lock); 405 if (kring->tx_pool == NULL) { 406 continue; 407 } 408 409 for (i=0; i<na->num_tx_desc; i++) { 410 if (kring->tx_pool[i]) { 411 m_freem(kring->tx_pool[i]); 412 } 413 } 414 free(kring->tx_pool, M_DEVBUF); 415 kring->tx_pool = NULL; 416 } 417 418 #ifdef RATE_GENERIC 419 if (--rate_ctx.refcount == 0) { 420 D("del_timer()"); 421 del_timer(&rate_ctx.timer); 422 } 423 #endif 424 } 425 426 return 0; 427 } 428 429 /* Enable/disable netmap mode for a generic network interface. */ 430 static int 431 generic_netmap_register(struct netmap_adapter *na, int enable) 432 { 433 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na; 434 struct netmap_kring *kring = NULL; 435 int error; 436 int i, r; 437 438 if (!na) { 439 return EINVAL; 440 } 441 442 if (!enable) { 443 /* This is actually an unregif. */ 444 return generic_netmap_unregister(na); 445 } 446 447 if (na->active_fds == 0) { 448 D("Generic adapter %p goes on", na); 449 /* Do all memory allocations when (na->active_fds == 0), to 450 * simplify error management. */ 451 452 /* Allocate memory for mitigation support on all the rx queues. */ 453 gna->mit = malloc(na->num_rx_rings * sizeof(struct nm_generic_mit), 454 M_DEVBUF, M_NOWAIT | M_ZERO); 455 if (!gna->mit) { 456 D("mitigation allocation failed"); 457 error = ENOMEM; 458 goto out; 459 } 460 461 for_each_rx_kring(r, kring, na) { 462 /* Init mitigation support. */ 463 nm_os_mitigation_init(&gna->mit[r], r, na); 464 465 /* Initialize the rx queue, as generic_rx_handler() can 466 * be called as soon as nm_os_catch_rx() returns. 467 */ 468 mbq_safe_init(&kring->rx_queue); 469 } 470 471 /* 472 * Prepare mbuf pools (parallel to the tx rings), for packet 473 * transmission. Don't preallocate the mbufs here, it's simpler 474 * to leave this task to txsync. 475 */ 476 for_each_tx_kring(r, kring, na) { 477 kring->tx_pool = NULL; 478 } 479 for_each_tx_kring(r, kring, na) { 480 kring->tx_pool = 481 malloc(na->num_tx_desc * sizeof(struct mbuf *), 482 M_DEVBUF, M_NOWAIT | M_ZERO); 483 if (!kring->tx_pool) { 484 D("tx_pool allocation failed"); 485 error = ENOMEM; 486 goto free_tx_pools; 487 } 488 mtx_init(&kring->tx_event_lock, "tx_event_lock", 489 NULL, MTX_SPIN); 490 } 491 } 492 493 for_each_rx_kring_h(r, kring, na) { 494 if (nm_kring_pending_on(kring)) { 495 D("RX ring %d of generic adapter %p goes on", r, na); 496 kring->nr_mode = NKR_NETMAP_ON; 497 } 498 499 } 500 for_each_tx_kring_h(r, kring, na) { 501 if (nm_kring_pending_on(kring)) { 502 D("TX ring %d of generic adapter %p goes on", r, na); 503 kring->nr_mode = NKR_NETMAP_ON; 504 } 505 } 506 507 for_each_tx_kring(r, kring, na) { 508 /* Initialize tx_pool and tx_event. */ 509 for (i=0; i<na->num_tx_desc; i++) { 510 kring->tx_pool[i] = NULL; 511 } 512 513 kring->tx_event = NULL; 514 } 515 516 if (na->active_fds == 0) { 517 rtnl_lock(); 518 519 /* Prepare to intercept incoming traffic. */ 520 error = nm_os_catch_rx(gna, 1); 521 if (error) { 522 D("nm_os_catch_rx(1) failed (%d)", error); 523 goto register_handler; 524 } 525 526 /* Make netmap control the packet steering. */ 527 error = nm_os_catch_tx(gna, 1); 528 if (error) { 529 D("nm_os_catch_tx(1) failed (%d)", error); 530 goto catch_rx; 531 } 532 533 rtnl_unlock(); 534 535 na->na_flags |= NAF_NETMAP_ON; 536 537 #ifdef RATE_GENERIC 538 if (rate_ctx.refcount == 0) { 539 D("setup_timer()"); 540 memset(&rate_ctx, 0, sizeof(rate_ctx)); 541 setup_timer(&rate_ctx.timer, &rate_callback, (unsigned long)&rate_ctx); 542 if (mod_timer(&rate_ctx.timer, jiffies + msecs_to_jiffies(1500))) { 543 D("Error: mod_timer()"); 544 } 545 } 546 rate_ctx.refcount++; 547 #endif /* RATE */ 548 } 549 550 return 0; 551 552 /* Here (na->active_fds == 0) holds. */ 553 catch_rx: 554 nm_os_catch_rx(gna, 0); 555 register_handler: 556 rtnl_unlock(); 557 free_tx_pools: 558 for_each_tx_kring(r, kring, na) { 559 mtx_destroy(&kring->tx_event_lock); 560 if (kring->tx_pool == NULL) { 561 continue; 562 } 563 free(kring->tx_pool, M_DEVBUF); 564 kring->tx_pool = NULL; 565 } 566 for_each_rx_kring(r, kring, na) { 567 mbq_safe_fini(&kring->rx_queue); 568 } 569 free(gna->mit, M_DEVBUF); 570 out: 571 572 return error; 573 } 574 575 /* 576 * Callback invoked when the device driver frees an mbuf used 577 * by netmap to transmit a packet. This usually happens when 578 * the NIC notifies the driver that transmission is completed. 579 */ 580 static void 581 generic_mbuf_destructor(struct mbuf *m) 582 { 583 struct netmap_adapter *na = NA(GEN_TX_MBUF_IFP(m)); 584 struct netmap_kring *kring; 585 unsigned int r = MBUF_TXQ(m); 586 unsigned int r_orig = r; 587 588 if (unlikely(!nm_netmap_on(na) || r >= na->num_tx_rings)) { 589 D("Error: no netmap adapter on device %p", 590 GEN_TX_MBUF_IFP(m)); 591 return; 592 } 593 594 /* 595 * First, clear the event mbuf. 596 * In principle, the event 'm' should match the one stored 597 * on ring 'r'. However we check it explicitely to stay 598 * safe against lower layers (qdisc, driver, etc.) changing 599 * MBUF_TXQ(m) under our feet. If the match is not found 600 * on 'r', we try to see if it belongs to some other ring. 601 */ 602 for (;;) { 603 bool match = false; 604 605 kring = &na->tx_rings[r]; 606 mtx_lock_spin(&kring->tx_event_lock); 607 if (kring->tx_event == m) { 608 kring->tx_event = NULL; 609 match = true; 610 } 611 mtx_unlock_spin(&kring->tx_event_lock); 612 613 if (match) { 614 if (r != r_orig) { 615 RD(1, "event %p migrated: ring %u --> %u", 616 m, r_orig, r); 617 } 618 break; 619 } 620 621 if (++r == na->num_tx_rings) r = 0; 622 623 if (r == r_orig) { 624 RD(1, "Cannot match event %p", m); 625 return; 626 } 627 } 628 629 /* Second, wake up clients. They will reclaim the event through 630 * txsync. */ 631 netmap_generic_irq(na, r, NULL); 632 #ifdef __FreeBSD__ 633 void_mbuf_dtor(m, NULL, NULL); 634 #endif 635 } 636 637 /* Record completed transmissions and update hwtail. 638 * 639 * The oldest tx buffer not yet completed is at nr_hwtail + 1, 640 * nr_hwcur is the first unsent buffer. 641 */ 642 static u_int 643 generic_netmap_tx_clean(struct netmap_kring *kring, int txqdisc) 644 { 645 u_int const lim = kring->nkr_num_slots - 1; 646 u_int nm_i = nm_next(kring->nr_hwtail, lim); 647 u_int hwcur = kring->nr_hwcur; 648 u_int n = 0; 649 struct mbuf **tx_pool = kring->tx_pool; 650 651 ND("hwcur = %d, hwtail = %d", kring->nr_hwcur, kring->nr_hwtail); 652 653 while (nm_i != hwcur) { /* buffers not completed */ 654 struct mbuf *m = tx_pool[nm_i]; 655 656 if (txqdisc) { 657 if (m == NULL) { 658 /* Nothing to do, this is going 659 * to be replenished. */ 660 RD(3, "Is this happening?"); 661 662 } else if (MBUF_QUEUED(m)) { 663 break; /* Not dequeued yet. */ 664 665 } else if (MBUF_REFCNT(m) != 1) { 666 /* This mbuf has been dequeued but is still busy 667 * (refcount is 2). 668 * Leave it to the driver and replenish. */ 669 m_freem(m); 670 tx_pool[nm_i] = NULL; 671 } 672 673 } else { 674 if (unlikely(m == NULL)) { 675 int event_consumed; 676 677 /* This slot was used to place an event. */ 678 mtx_lock_spin(&kring->tx_event_lock); 679 event_consumed = (kring->tx_event == NULL); 680 mtx_unlock_spin(&kring->tx_event_lock); 681 if (!event_consumed) { 682 /* The event has not been consumed yet, 683 * still busy in the driver. */ 684 break; 685 } 686 /* The event has been consumed, we can go 687 * ahead. */ 688 689 } else if (MBUF_REFCNT(m) != 1) { 690 /* This mbuf is still busy: its refcnt is 2. */ 691 break; 692 } 693 } 694 695 n++; 696 nm_i = nm_next(nm_i, lim); 697 } 698 kring->nr_hwtail = nm_prev(nm_i, lim); 699 ND("tx completed [%d] -> hwtail %d", n, kring->nr_hwtail); 700 701 return n; 702 } 703 704 /* Compute a slot index in the middle between inf and sup. */ 705 static inline u_int 706 ring_middle(u_int inf, u_int sup, u_int lim) 707 { 708 u_int n = lim + 1; 709 u_int e; 710 711 if (sup >= inf) { 712 e = (sup + inf) / 2; 713 } else { /* wrap around */ 714 e = (sup + n + inf) / 2; 715 if (e >= n) { 716 e -= n; 717 } 718 } 719 720 if (unlikely(e >= n)) { 721 D("This cannot happen"); 722 e = 0; 723 } 724 725 return e; 726 } 727 728 static void 729 generic_set_tx_event(struct netmap_kring *kring, u_int hwcur) 730 { 731 u_int lim = kring->nkr_num_slots - 1; 732 struct mbuf *m; 733 u_int e; 734 u_int ntc = nm_next(kring->nr_hwtail, lim); /* next to clean */ 735 736 if (ntc == hwcur) { 737 return; /* all buffers are free */ 738 } 739 740 /* 741 * We have pending packets in the driver between hwtail+1 742 * and hwcur, and we have to chose one of these slot to 743 * generate a notification. 744 * There is a race but this is only called within txsync which 745 * does a double check. 746 */ 747 #if 0 748 /* Choose a slot in the middle, so that we don't risk ending 749 * up in a situation where the client continuously wake up, 750 * fills one or a few TX slots and go to sleep again. */ 751 e = ring_middle(ntc, hwcur, lim); 752 #else 753 /* Choose the first pending slot, to be safe against driver 754 * reordering mbuf transmissions. */ 755 e = ntc; 756 #endif 757 758 m = kring->tx_pool[e]; 759 if (m == NULL) { 760 /* An event is already in place. */ 761 return; 762 } 763 764 mtx_lock_spin(&kring->tx_event_lock); 765 if (kring->tx_event) { 766 /* An event is already in place. */ 767 mtx_unlock_spin(&kring->tx_event_lock); 768 return; 769 } 770 771 set_mbuf_destructor(m, generic_mbuf_destructor); 772 kring->tx_event = m; 773 mtx_unlock_spin(&kring->tx_event_lock); 774 775 kring->tx_pool[e] = NULL; 776 777 ND(5, "Request Event at %d mbuf %p refcnt %d", e, m, m ? MBUF_REFCNT(m) : -2 ); 778 779 /* Decrement the refcount. This will free it if we lose the race 780 * with the driver. */ 781 m_freem(m); 782 smp_mb(); 783 } 784 785 786 /* 787 * generic_netmap_txsync() transforms netmap buffers into mbufs 788 * and passes them to the standard device driver 789 * (ndo_start_xmit() or ifp->if_transmit() ). 790 * On linux this is not done directly, but using dev_queue_xmit(), 791 * since it implements the TX flow control (and takes some locks). 792 */ 793 static int 794 generic_netmap_txsync(struct netmap_kring *kring, int flags) 795 { 796 struct netmap_adapter *na = kring->na; 797 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na; 798 struct ifnet *ifp = na->ifp; 799 struct netmap_ring *ring = kring->ring; 800 u_int nm_i; /* index into the netmap ring */ // j 801 u_int const lim = kring->nkr_num_slots - 1; 802 u_int const head = kring->rhead; 803 u_int ring_nr = kring->ring_id; 804 805 IFRATE(rate_ctx.new.txsync++); 806 807 rmb(); 808 809 /* 810 * First part: process new packets to send. 811 */ 812 nm_i = kring->nr_hwcur; 813 if (nm_i != head) { /* we have new packets to send */ 814 struct nm_os_gen_arg a; 815 u_int event = -1; 816 817 if (gna->txqdisc && nm_kr_txempty(kring)) { 818 /* In txqdisc mode, we ask for a delayed notification, 819 * but only when cur == hwtail, which means that the 820 * client is going to block. */ 821 event = ring_middle(nm_i, head, lim); 822 ND(3, "Place txqdisc event (hwcur=%u,event=%u," 823 "head=%u,hwtail=%u)", nm_i, event, head, 824 kring->nr_hwtail); 825 } 826 827 a.ifp = ifp; 828 a.ring_nr = ring_nr; 829 a.head = a.tail = NULL; 830 831 while (nm_i != head) { 832 struct netmap_slot *slot = &ring->slot[nm_i]; 833 u_int len = slot->len; 834 void *addr = NMB(na, slot); 835 /* device-specific */ 836 struct mbuf *m; 837 int tx_ret; 838 839 NM_CHECK_ADDR_LEN(na, addr, len); 840 841 /* Tale a mbuf from the tx pool (replenishing the pool 842 * entry if necessary) and copy in the user packet. */ 843 m = kring->tx_pool[nm_i]; 844 if (unlikely(m == NULL)) { 845 kring->tx_pool[nm_i] = m = 846 nm_os_get_mbuf(ifp, NETMAP_BUF_SIZE(na)); 847 if (m == NULL) { 848 RD(2, "Failed to replenish mbuf"); 849 /* Here we could schedule a timer which 850 * retries to replenish after a while, 851 * and notifies the client when it 852 * manages to replenish some slots. In 853 * any case we break early to avoid 854 * crashes. */ 855 break; 856 } 857 IFRATE(rate_ctx.new.txrepl++); 858 } 859 860 a.m = m; 861 a.addr = addr; 862 a.len = len; 863 a.qevent = (nm_i == event); 864 /* When not in txqdisc mode, we should ask 865 * notifications when NS_REPORT is set, or roughly 866 * every half ring. To optimize this, we set a 867 * notification event when the client runs out of 868 * TX ring space, or when transmission fails. In 869 * the latter case we also break early. 870 */ 871 tx_ret = nm_os_generic_xmit_frame(&a); 872 if (unlikely(tx_ret)) { 873 if (!gna->txqdisc) { 874 /* 875 * No room for this mbuf in the device driver. 876 * Request a notification FOR A PREVIOUS MBUF, 877 * then call generic_netmap_tx_clean(kring) to do the 878 * double check and see if we can free more buffers. 879 * If there is space continue, else break; 880 * NOTE: the double check is necessary if the problem 881 * occurs in the txsync call after selrecord(). 882 * Also, we need some way to tell the caller that not 883 * all buffers were queued onto the device (this was 884 * not a problem with native netmap driver where space 885 * is preallocated). The bridge has a similar problem 886 * and we solve it there by dropping the excess packets. 887 */ 888 generic_set_tx_event(kring, nm_i); 889 if (generic_netmap_tx_clean(kring, gna->txqdisc)) { 890 /* space now available */ 891 continue; 892 } else { 893 break; 894 } 895 } 896 897 /* In txqdisc mode, the netmap-aware qdisc 898 * queue has the same length as the number of 899 * netmap slots (N). Since tail is advanced 900 * only when packets are dequeued, qdisc 901 * queue overrun cannot happen, so 902 * nm_os_generic_xmit_frame() did not fail 903 * because of that. 904 * However, packets can be dropped because 905 * carrier is off, or because our qdisc is 906 * being deactivated, or possibly for other 907 * reasons. In these cases, we just let the 908 * packet to be dropped. */ 909 IFRATE(rate_ctx.new.txdrop++); 910 } 911 912 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); 913 nm_i = nm_next(nm_i, lim); 914 IFRATE(rate_ctx.new.txpkt++); 915 } 916 if (a.head != NULL) { 917 a.addr = NULL; 918 nm_os_generic_xmit_frame(&a); 919 } 920 /* Update hwcur to the next slot to transmit. Here nm_i 921 * is not necessarily head, we could break early. */ 922 kring->nr_hwcur = nm_i; 923 } 924 925 /* 926 * Second, reclaim completed buffers 927 */ 928 if (!gna->txqdisc && (flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring))) { 929 /* No more available slots? Set a notification event 930 * on a netmap slot that will be cleaned in the future. 931 * No doublecheck is performed, since txsync() will be 932 * called twice by netmap_poll(). 933 */ 934 generic_set_tx_event(kring, nm_i); 935 } 936 937 generic_netmap_tx_clean(kring, gna->txqdisc); 938 939 return 0; 940 } 941 942 943 /* 944 * This handler is registered (through nm_os_catch_rx()) 945 * within the attached network interface 946 * in the RX subsystem, so that every mbuf passed up by 947 * the driver can be stolen to the network stack. 948 * Stolen packets are put in a queue where the 949 * generic_netmap_rxsync() callback can extract them. 950 * Returns 1 if the packet was stolen, 0 otherwise. 951 */ 952 int 953 generic_rx_handler(struct ifnet *ifp, struct mbuf *m) 954 { 955 struct netmap_adapter *na = NA(ifp); 956 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na; 957 struct netmap_kring *kring; 958 u_int work_done; 959 u_int r = MBUF_RXQ(m); /* receive ring number */ 960 961 if (r >= na->num_rx_rings) { 962 r = r % na->num_rx_rings; 963 } 964 965 kring = &na->rx_rings[r]; 966 967 if (kring->nr_mode == NKR_NETMAP_OFF) { 968 /* We must not intercept this mbuf. */ 969 return 0; 970 } 971 972 /* limit the size of the queue */ 973 if (unlikely(!gna->rxsg && MBUF_LEN(m) > NETMAP_BUF_SIZE(na))) { 974 /* This may happen when GRO/LRO features are enabled for 975 * the NIC driver when the generic adapter does not 976 * support RX scatter-gather. */ 977 RD(2, "Warning: driver pushed up big packet " 978 "(size=%d)", (int)MBUF_LEN(m)); 979 m_freem(m); 980 } else if (unlikely(mbq_len(&kring->rx_queue) > 1024)) { 981 m_freem(m); 982 } else { 983 mbq_safe_enqueue(&kring->rx_queue, m); 984 } 985 986 if (netmap_generic_mit < 32768) { 987 /* no rx mitigation, pass notification up */ 988 netmap_generic_irq(na, r, &work_done); 989 } else { 990 /* same as send combining, filter notification if there is a 991 * pending timer, otherwise pass it up and start a timer. 992 */ 993 if (likely(nm_os_mitigation_active(&gna->mit[r]))) { 994 /* Record that there is some pending work. */ 995 gna->mit[r].mit_pending = 1; 996 } else { 997 netmap_generic_irq(na, r, &work_done); 998 nm_os_mitigation_start(&gna->mit[r]); 999 } 1000 } 1001 1002 /* We have intercepted the mbuf. */ 1003 return 1; 1004 } 1005 1006 /* 1007 * generic_netmap_rxsync() extracts mbufs from the queue filled by 1008 * generic_netmap_rx_handler() and puts their content in the netmap 1009 * receive ring. 1010 * Access must be protected because the rx handler is asynchronous, 1011 */ 1012 static int 1013 generic_netmap_rxsync(struct netmap_kring *kring, int flags) 1014 { 1015 struct netmap_ring *ring = kring->ring; 1016 struct netmap_adapter *na = kring->na; 1017 u_int nm_i; /* index into the netmap ring */ //j, 1018 u_int n; 1019 u_int const lim = kring->nkr_num_slots - 1; 1020 u_int const head = kring->rhead; 1021 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; 1022 1023 /* Adapter-specific variables. */ 1024 uint16_t slot_flags = kring->nkr_slot_flags; 1025 u_int nm_buf_len = NETMAP_BUF_SIZE(na); 1026 struct mbq tmpq; 1027 struct mbuf *m; 1028 int avail; /* in bytes */ 1029 int mlen; 1030 int copy; 1031 1032 if (head > lim) 1033 return netmap_ring_reinit(kring); 1034 1035 IFRATE(rate_ctx.new.rxsync++); 1036 1037 /* 1038 * First part: skip past packets that userspace has released. 1039 * This can possibly make room for the second part. 1040 */ 1041 nm_i = kring->nr_hwcur; 1042 if (nm_i != head) { 1043 /* Userspace has released some packets. */ 1044 for (n = 0; nm_i != head; n++) { 1045 struct netmap_slot *slot = &ring->slot[nm_i]; 1046 1047 slot->flags &= ~NS_BUF_CHANGED; 1048 nm_i = nm_next(nm_i, lim); 1049 } 1050 kring->nr_hwcur = head; 1051 } 1052 1053 /* 1054 * Second part: import newly received packets. 1055 */ 1056 if (!netmap_no_pendintr && !force_update) { 1057 return 0; 1058 } 1059 1060 nm_i = kring->nr_hwtail; /* First empty slot in the receive ring. */ 1061 1062 /* Compute the available space (in bytes) in this netmap ring. 1063 * The first slot that is not considered in is the one before 1064 * nr_hwcur. */ 1065 1066 avail = nm_prev(kring->nr_hwcur, lim) - nm_i; 1067 if (avail < 0) 1068 avail += lim + 1; 1069 avail *= nm_buf_len; 1070 1071 /* First pass: While holding the lock on the RX mbuf queue, 1072 * extract as many mbufs as they fit the available space, 1073 * and put them in a temporary queue. 1074 * To avoid performing a per-mbuf division (mlen / nm_buf_len) to 1075 * to update avail, we do the update in a while loop that we 1076 * also use to set the RX slots, but without performing the copy. */ 1077 mbq_init(&tmpq); 1078 mbq_lock(&kring->rx_queue); 1079 for (n = 0;; n++) { 1080 m = mbq_peek(&kring->rx_queue); 1081 if (!m) { 1082 /* No more packets from the driver. */ 1083 break; 1084 } 1085 1086 mlen = MBUF_LEN(m); 1087 if (mlen > avail) { 1088 /* No more space in the ring. */ 1089 break; 1090 } 1091 1092 mbq_dequeue(&kring->rx_queue); 1093 1094 while (mlen) { 1095 copy = nm_buf_len; 1096 if (mlen < copy) { 1097 copy = mlen; 1098 } 1099 mlen -= copy; 1100 avail -= nm_buf_len; 1101 1102 ring->slot[nm_i].len = copy; 1103 ring->slot[nm_i].flags = slot_flags | (mlen ? NS_MOREFRAG : 0); 1104 nm_i = nm_next(nm_i, lim); 1105 } 1106 1107 mbq_enqueue(&tmpq, m); 1108 } 1109 mbq_unlock(&kring->rx_queue); 1110 1111 /* Second pass: Drain the temporary queue, going over the used RX slots, 1112 * and perform the copy out of the RX queue lock. */ 1113 nm_i = kring->nr_hwtail; 1114 1115 for (;;) { 1116 void *nmaddr; 1117 int ofs = 0; 1118 int morefrag; 1119 1120 m = mbq_dequeue(&tmpq); 1121 if (!m) { 1122 break; 1123 } 1124 1125 do { 1126 nmaddr = NMB(na, &ring->slot[nm_i]); 1127 /* We only check the address here on generic rx rings. */ 1128 if (nmaddr == NETMAP_BUF_BASE(na)) { /* Bad buffer */ 1129 m_freem(m); 1130 mbq_purge(&tmpq); 1131 mbq_fini(&tmpq); 1132 return netmap_ring_reinit(kring); 1133 } 1134 1135 copy = ring->slot[nm_i].len; 1136 m_copydata(m, ofs, copy, nmaddr); 1137 ofs += copy; 1138 morefrag = ring->slot[nm_i].flags & NS_MOREFRAG; 1139 nm_i = nm_next(nm_i, lim); 1140 } while (morefrag); 1141 1142 m_freem(m); 1143 } 1144 1145 mbq_fini(&tmpq); 1146 1147 if (n) { 1148 kring->nr_hwtail = nm_i; 1149 IFRATE(rate_ctx.new.rxpkt += n); 1150 } 1151 kring->nr_kflags &= ~NKR_PENDINTR; 1152 1153 return 0; 1154 } 1155 1156 static void 1157 generic_netmap_dtor(struct netmap_adapter *na) 1158 { 1159 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter*)na; 1160 struct ifnet *ifp = netmap_generic_getifp(gna); 1161 struct netmap_adapter *prev_na = gna->prev; 1162 1163 if (prev_na != NULL) { 1164 D("Released generic NA %p", gna); 1165 netmap_adapter_put(prev_na); 1166 if (nm_iszombie(na)) { 1167 /* 1168 * The driver has been removed without releasing 1169 * the reference so we need to do it here. 1170 */ 1171 netmap_adapter_put(prev_na); 1172 } 1173 } 1174 NM_ATTACH_NA(ifp, prev_na); 1175 /* 1176 * netmap_detach_common(), that it's called after this function, 1177 * overrides WNA(ifp) if na->ifp is not NULL. 1178 */ 1179 na->ifp = NULL; 1180 D("Restored native NA %p", prev_na); 1181 } 1182 1183 /* 1184 * generic_netmap_attach() makes it possible to use netmap on 1185 * a device without native netmap support. 1186 * This is less performant than native support but potentially 1187 * faster than raw sockets or similar schemes. 1188 * 1189 * In this "emulated" mode, netmap rings do not necessarily 1190 * have the same size as those in the NIC. We use a default 1191 * value and possibly override it if the OS has ways to fetch the 1192 * actual configuration. 1193 */ 1194 int 1195 generic_netmap_attach(struct ifnet *ifp) 1196 { 1197 struct netmap_adapter *na; 1198 struct netmap_generic_adapter *gna; 1199 int retval; 1200 u_int num_tx_desc, num_rx_desc; 1201 1202 num_tx_desc = num_rx_desc = netmap_generic_ringsize; /* starting point */ 1203 1204 nm_os_generic_find_num_desc(ifp, &num_tx_desc, &num_rx_desc); /* ignore errors */ 1205 ND("Netmap ring size: TX = %d, RX = %d", num_tx_desc, num_rx_desc); 1206 if (num_tx_desc == 0 || num_rx_desc == 0) { 1207 D("Device has no hw slots (tx %u, rx %u)", num_tx_desc, num_rx_desc); 1208 return EINVAL; 1209 } 1210 1211 gna = malloc(sizeof(*gna), M_DEVBUF, M_NOWAIT | M_ZERO); 1212 if (gna == NULL) { 1213 D("no memory on attach, give up"); 1214 return ENOMEM; 1215 } 1216 na = (struct netmap_adapter *)gna; 1217 strncpy(na->name, ifp->if_xname, sizeof(na->name)); 1218 na->ifp = ifp; 1219 na->num_tx_desc = num_tx_desc; 1220 na->num_rx_desc = num_rx_desc; 1221 na->nm_register = &generic_netmap_register; 1222 na->nm_txsync = &generic_netmap_txsync; 1223 na->nm_rxsync = &generic_netmap_rxsync; 1224 na->nm_dtor = &generic_netmap_dtor; 1225 /* when using generic, NAF_NETMAP_ON is set so we force 1226 * NAF_SKIP_INTR to use the regular interrupt handler 1227 */ 1228 na->na_flags = NAF_SKIP_INTR | NAF_HOST_RINGS; 1229 1230 ND("[GNA] num_tx_queues(%d), real_num_tx_queues(%d), len(%lu)", 1231 ifp->num_tx_queues, ifp->real_num_tx_queues, 1232 ifp->tx_queue_len); 1233 ND("[GNA] num_rx_queues(%d), real_num_rx_queues(%d)", 1234 ifp->num_rx_queues, ifp->real_num_rx_queues); 1235 1236 nm_os_generic_find_num_queues(ifp, &na->num_tx_rings, &na->num_rx_rings); 1237 1238 retval = netmap_attach_common(na); 1239 if (retval) { 1240 free(gna, M_DEVBUF); 1241 return retval; 1242 } 1243 1244 gna->prev = NA(ifp); /* save old na */ 1245 if (gna->prev != NULL) { 1246 netmap_adapter_get(gna->prev); 1247 } 1248 NM_ATTACH_NA(ifp, na); 1249 1250 nm_os_generic_set_features(gna); 1251 1252 D("Created generic NA %p (prev %p)", gna, gna->prev); 1253 1254 return retval; 1255 } 1256