1 /* 2 * Copyright (C) 2013 Universita` di Pisa. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26 /* 27 * This module implements netmap support on top of standard, 28 * unmodified device drivers. 29 * 30 * A NIOCREGIF request is handled here if the device does not 31 * have native support. TX and RX rings are emulated as follows: 32 * 33 * NIOCREGIF 34 * We preallocate a block of TX mbufs (roughly as many as 35 * tx descriptors; the number is not critical) to speed up 36 * operation during transmissions. The refcount on most of 37 * these buffers is artificially bumped up so we can recycle 38 * them more easily. Also, the destructor is intercepted 39 * so we use it as an interrupt notification to wake up 40 * processes blocked on a poll(). 41 * 42 * For each receive ring we allocate one "struct mbq" 43 * (an mbuf tailq plus a spinlock). We intercept packets 44 * (through if_input) 45 * on the receive path and put them in the mbq from which 46 * netmap receive routines can grab them. 47 * 48 * TX: 49 * in the generic_txsync() routine, netmap buffers are copied 50 * (or linked, in a future) to the preallocated mbufs 51 * and pushed to the transmit queue. Some of these mbufs 52 * (those with NS_REPORT, or otherwise every half ring) 53 * have the refcount=1, others have refcount=2. 54 * When the destructor is invoked, we take that as 55 * a notification that all mbufs up to that one in 56 * the specific ring have been completed, and generate 57 * the equivalent of a transmit interrupt. 58 * 59 * RX: 60 * 61 */ 62 63 64 #include <sys/cdefs.h> /* prerequisite */ 65 __FBSDID("$FreeBSD: head/sys/dev/netmap/netmap.c 257666 2013-11-05 01:06:22Z luigi $"); 66 67 #include <sys/types.h> 68 #include <sys/errno.h> 69 #include <sys/malloc.h> 70 #include <sys/lock.h> /* PROT_EXEC */ 71 #include <sys/socket.h> /* sockaddrs */ 72 #include <sys/event.h> 73 #include <net/if.h> 74 #include <net/if_var.h> 75 #include <sys/bus.h> /* bus_dmamap_* in netmap_kern.h */ 76 77 // XXX temporary - D() defined here 78 #include <net/netmap.h> 79 #include <net/netmap/netmap_kern.h> 80 #include <net/netmap/netmap_mem2.h> 81 82 #define rtnl_lock() D("rtnl_lock called"); 83 #define rtnl_unlock() D("rtnl_lock called"); 84 #define MBUF_TXQ(m) ((m)->m_pkthdr.hash) 85 #define smp_mb() 86 87 /* 88 * mbuf wrappers 89 */ 90 91 /* 92 * we allocate an EXT_PACKET 93 */ 94 #define netmap_get_mbuf(len) m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR) 95 96 /* mbuf destructor, also need to change the type to EXT_EXTREF, 97 * add an M_NOFREE flag, and then clear the flag and 98 * chain into uma_zfree(zone_pack, mf) 99 * (or reinstall the buffer ?) 100 */ 101 #define SET_MBUF_DESTRUCTOR(m, fn) do { \ 102 (m)->m_ext.ext_free = (void *)fn; \ 103 /* (m)->m_ext.ext_type = EXT_EXTREF; */ \ 104 } while (0) 105 106 107 #define GET_MBUF_REFCNT(m) ((m)->m_ext.ref_cnt ? *(m)->m_ext.ref_cnt : -1) 108 109 /* ======================== usage stats =========================== */ 110 111 #ifdef RATE 112 #define IFRATE(x) x 113 struct rate_stats { 114 unsigned long txpkt; 115 unsigned long txsync; 116 unsigned long txirq; 117 unsigned long rxpkt; 118 unsigned long rxirq; 119 unsigned long rxsync; 120 }; 121 122 struct rate_context { 123 unsigned refcount; 124 struct timer_list timer; 125 struct rate_stats new; 126 struct rate_stats old; 127 }; 128 129 #define RATE_PRINTK(_NAME_) \ 130 printk( #_NAME_ " = %lu Hz\n", (cur._NAME_ - ctx->old._NAME_)/RATE_PERIOD); 131 #define RATE_PERIOD 2 132 static void rate_callback(unsigned long arg) 133 { 134 struct rate_context * ctx = (struct rate_context *)arg; 135 struct rate_stats cur = ctx->new; 136 int r; 137 138 RATE_PRINTK(txpkt); 139 RATE_PRINTK(txsync); 140 RATE_PRINTK(txirq); 141 RATE_PRINTK(rxpkt); 142 RATE_PRINTK(rxsync); 143 RATE_PRINTK(rxirq); 144 printk("\n"); 145 146 ctx->old = cur; 147 r = mod_timer(&ctx->timer, jiffies + 148 msecs_to_jiffies(RATE_PERIOD * 1000)); 149 if (unlikely(r)) 150 D("[v1000] Error: mod_timer()"); 151 } 152 153 static struct rate_context rate_ctx; 154 155 #else /* !RATE */ 156 #define IFRATE(x) 157 #endif /* !RATE */ 158 159 160 /* =============== GENERIC NETMAP ADAPTER SUPPORT ================= */ 161 #define GENERIC_BUF_SIZE netmap_buf_size /* Size of the mbufs in the Tx pool. */ 162 163 /* 164 * Wrapper used by the generic adapter layer to notify 165 * the poller threads. Differently from netmap_rx_irq(), we check 166 * only IFCAP_NETMAP instead of NAF_NATIVE_ON to enable the irq. 167 */ 168 static int 169 netmap_generic_irq(struct ifnet *ifp, u_int q, u_int *work_done) 170 { 171 if (unlikely(!(ifp->if_capenable & IFCAP_NETMAP))) 172 return 0; 173 174 return netmap_common_irq(ifp, q, work_done); 175 } 176 177 178 /* Enable/disable netmap mode for a generic network interface. */ 179 int generic_netmap_register(struct netmap_adapter *na, int enable) 180 { 181 struct ifnet *ifp = na->ifp; 182 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na; 183 struct mbuf *m; 184 int error; 185 int i, r; 186 187 if (!na) 188 return EINVAL; 189 190 #ifdef REG_RESET 191 error = ifp->netdev_ops->ndo_stop(ifp); 192 if (error) { 193 return error; 194 } 195 #endif /* REG_RESET */ 196 197 if (enable) { /* Enable netmap mode. */ 198 /* Initialize the rx queue, as generic_rx_handler() can 199 * be called as soon as netmap_catch_rx() returns. 200 */ 201 for (r=0; r<na->num_rx_rings; r++) { 202 mbq_safe_init(&na->rx_rings[r].rx_queue); 203 na->rx_rings[r].nr_ntc = 0; 204 } 205 206 /* Init the mitigation timer. */ 207 netmap_mitigation_init(gna); 208 209 /* 210 * Preallocate packet buffers for the tx rings. 211 */ 212 for (r=0; r<na->num_tx_rings; r++) { 213 na->tx_rings[r].nr_ntc = 0; 214 na->tx_rings[r].tx_pool = kmalloc(na->num_tx_desc * sizeof(struct mbuf *), 215 M_DEVBUF, M_NOWAIT | M_ZERO); 216 if (!na->tx_rings[r].tx_pool) { 217 D("tx_pool allocation failed"); 218 error = ENOMEM; 219 goto free_tx_pool; 220 } 221 for (i=0; i<na->num_tx_desc; i++) { 222 m = netmap_get_mbuf(GENERIC_BUF_SIZE); 223 if (!m) { 224 D("tx_pool[%d] allocation failed", i); 225 error = ENOMEM; 226 goto free_mbufs; 227 } 228 na->tx_rings[r].tx_pool[i] = m; 229 } 230 } 231 rtnl_lock(); 232 /* Prepare to intercept incoming traffic. */ 233 error = netmap_catch_rx(na, 1); 234 if (error) { 235 D("netdev_rx_handler_register() failed"); 236 goto register_handler; 237 } 238 ifp->if_capenable |= IFCAP_NETMAP; 239 240 /* Make netmap control the packet steering. */ 241 netmap_catch_packet_steering(gna, 1); 242 243 rtnl_unlock(); 244 245 #ifdef RATE 246 if (rate_ctx.refcount == 0) { 247 D("setup_timer()"); 248 memset(&rate_ctx, 0, sizeof(rate_ctx)); 249 setup_timer(&rate_ctx.timer, &rate_callback, (unsigned long)&rate_ctx); 250 if (mod_timer(&rate_ctx.timer, jiffies + msecs_to_jiffies(1500))) { 251 D("Error: mod_timer()"); 252 } 253 } 254 rate_ctx.refcount++; 255 #endif /* RATE */ 256 257 } else { /* Disable netmap mode. */ 258 rtnl_lock(); 259 260 ifp->if_capenable &= ~IFCAP_NETMAP; 261 262 /* Release packet steering control. */ 263 netmap_catch_packet_steering(gna, 0); 264 265 /* Do not intercept packets on the rx path. */ 266 netmap_catch_rx(na, 0); 267 268 rtnl_unlock(); 269 270 /* Free the mbufs going to the netmap rings */ 271 for (r=0; r<na->num_rx_rings; r++) { 272 mbq_safe_purge(&na->rx_rings[r].rx_queue); 273 mbq_safe_destroy(&na->rx_rings[r].rx_queue); 274 } 275 276 netmap_mitigation_cleanup(gna); 277 278 for (r=0; r<na->num_tx_rings; r++) { 279 for (i=0; i<na->num_tx_desc; i++) { 280 m_freem(na->tx_rings[r].tx_pool[i]); 281 } 282 kfree(na->tx_rings[r].tx_pool, M_DEVBUF); 283 } 284 285 #ifdef RATE 286 if (--rate_ctx.refcount == 0) { 287 D("del_timer()"); 288 del_timer(&rate_ctx.timer); 289 } 290 #endif 291 } 292 293 #ifdef REG_RESET 294 error = ifp->netdev_ops->ndo_open(ifp); 295 if (error) { 296 goto alloc_tx_pool; 297 } 298 #endif 299 300 return 0; 301 302 register_handler: 303 rtnl_unlock(); 304 free_tx_pool: 305 r--; 306 i = na->num_tx_desc; /* Useless, but just to stay safe. */ 307 free_mbufs: 308 i--; 309 for (; r>=0; r--) { 310 for (; i>=0; i--) { 311 m_freem(na->tx_rings[r].tx_pool[i]); 312 } 313 kfree(na->tx_rings[r].tx_pool, M_DEVBUF); 314 i = na->num_tx_desc - 1; 315 } 316 317 return error; 318 } 319 320 /* 321 * Callback invoked when the device driver frees an mbuf used 322 * by netmap to transmit a packet. This usually happens when 323 * the NIC notifies the driver that transmission is completed. 324 */ 325 static void 326 generic_mbuf_destructor(struct mbuf *m) 327 { 328 if (netmap_verbose) 329 D("Tx irq (%p) queue %d", m, MBUF_TXQ(m)); 330 netmap_generic_irq(MBUF_IFP(m), MBUF_TXQ(m), NULL); 331 #if 0 332 m->m_ext.ext_type = EXT_PACKET; 333 #endif 334 m->m_ext.ext_free = NULL; 335 #if 0 336 if (*(m->m_ext.ref_cnt) == 0) 337 *(m->m_ext.ref_cnt) = 1; 338 uma_zfree(zone_pack, m); 339 #endif 340 IFRATE(rate_ctx.new.txirq++); 341 } 342 343 /* Record completed transmissions and update hwavail. 344 * 345 * nr_ntc is the oldest tx buffer not yet completed 346 * (same as nr_hwavail + nr_hwcur + 1), 347 * nr_hwcur is the first unsent buffer. 348 * When cleaning, we try to recover buffers between nr_ntc and nr_hwcur. 349 */ 350 static int 351 generic_netmap_tx_clean(struct netmap_kring *kring) 352 { 353 u_int num_slots = kring->nkr_num_slots; 354 u_int ntc = kring->nr_ntc; 355 u_int hwcur = kring->nr_hwcur; 356 u_int n = 0; 357 struct mbuf **tx_pool = kring->tx_pool; 358 359 while (ntc != hwcur) { /* buffers not completed */ 360 struct mbuf *m = tx_pool[ntc]; 361 362 if (unlikely(m == NULL)) { 363 /* try to replenish the entry */ 364 tx_pool[ntc] = m = netmap_get_mbuf(GENERIC_BUF_SIZE); 365 if (unlikely(m == NULL)) { 366 D("mbuf allocation failed, XXX error"); 367 // XXX how do we proceed ? break ? 368 return -ENOMEM; 369 } 370 #if 0 371 } else if (GET_MBUF_REFCNT(m) != 1) { 372 break; /* This mbuf is still busy: its refcnt is 2. */ 373 #endif 374 } 375 if (unlikely(++ntc == num_slots)) { 376 ntc = 0; 377 } 378 n++; 379 } 380 kring->nr_ntc = ntc; 381 kring->nr_hwavail += n; 382 ND("tx completed [%d] -> hwavail %d", n, kring->nr_hwavail); 383 384 return n; 385 } 386 387 388 /* 389 * We have pending packets in the driver between nr_ntc and j. 390 * Compute a position in the middle, to be used to generate 391 * a notification. 392 */ 393 static inline u_int 394 generic_tx_event_middle(struct netmap_kring *kring, u_int hwcur) 395 { 396 u_int n = kring->nkr_num_slots; 397 u_int ntc = kring->nr_ntc; 398 u_int e; 399 400 if (hwcur >= ntc) { 401 e = (hwcur + ntc) / 2; 402 } else { /* wrap around */ 403 e = (hwcur + n + ntc) / 2; 404 if (e >= n) { 405 e -= n; 406 } 407 } 408 409 if (unlikely(e >= n)) { 410 D("This cannot happen"); 411 e = 0; 412 } 413 414 return e; 415 } 416 417 /* 418 * We have pending packets in the driver between nr_ntc and hwcur. 419 * Schedule a notification approximately in the middle of the two. 420 * There is a race but this is only called within txsync which does 421 * a double check. 422 */ 423 static void 424 generic_set_tx_event(struct netmap_kring *kring, u_int hwcur) 425 { 426 struct mbuf *m; 427 u_int e; 428 429 if (kring->nr_ntc == hwcur) { 430 return; 431 } 432 e = generic_tx_event_middle(kring, hwcur); 433 434 m = kring->tx_pool[e]; 435 if (m == NULL) { 436 /* This can happen if there is already an event on the netmap 437 slot 'e': There is nothing to do. */ 438 return; 439 } 440 ND("Event at %d mbuf %p refcnt %d", e, m, GET_MBUF_REFCNT(m)); 441 kring->tx_pool[e] = NULL; 442 SET_MBUF_DESTRUCTOR(m, generic_mbuf_destructor); 443 444 // XXX wmb() ? 445 /* Decrement the refcount an free it if we have the last one. */ 446 m_freem(m); 447 smp_mb(); 448 } 449 450 451 /* 452 * generic_netmap_txsync() transforms netmap buffers into mbufs 453 * and passes them to the standard device driver 454 * (ndo_start_xmit() or ifp->if_transmit() ). 455 * On linux this is not done directly, but using dev_queue_xmit(), 456 * since it implements the TX flow control (and takes some locks). 457 */ 458 static int 459 generic_netmap_txsync(struct netmap_adapter *na, u_int ring_nr, int flags) 460 { 461 struct ifnet *ifp = na->ifp; 462 struct netmap_kring *kring = &na->tx_rings[ring_nr]; 463 struct netmap_ring *ring = kring->ring; 464 u_int j, k, num_slots = kring->nkr_num_slots; 465 int new_slots, ntx; 466 467 IFRATE(rate_ctx.new.txsync++); 468 469 // TODO: handle the case of mbuf allocation failure 470 /* first, reclaim completed buffers */ 471 generic_netmap_tx_clean(kring); 472 473 /* Take a copy of ring->cur now, and never read it again. */ 474 k = ring->cur; 475 if (unlikely(k >= num_slots)) { 476 return netmap_ring_reinit(kring); 477 } 478 479 rmb(); 480 j = kring->nr_hwcur; 481 /* 482 * 'new_slots' counts how many new slots have been added: 483 * everything from hwcur to cur, excluding reserved ones, if any. 484 * nr_hwreserved start from hwcur and counts how many slots were 485 * not sent to the NIC from the previous round. 486 */ 487 new_slots = k - j - kring->nr_hwreserved; 488 if (new_slots < 0) { 489 new_slots += num_slots; 490 } 491 ntx = 0; 492 if (j != k) { 493 /* Process new packets to send: 494 * j is the current index in the netmap ring. 495 */ 496 while (j != k) { 497 struct netmap_slot *slot = &ring->slot[j]; /* Current slot in the netmap ring */ 498 void *addr = NMB(slot); 499 u_int len = slot->len; 500 struct mbuf *m; 501 int tx_ret; 502 503 if (unlikely(addr == netmap_buffer_base || len > NETMAP_BUF_SIZE)) { 504 return netmap_ring_reinit(kring); 505 } 506 /* Tale a mbuf from the tx pool and copy in the user packet. */ 507 m = kring->tx_pool[j]; 508 if (unlikely(!m)) { 509 RD(5, "This should never happen"); 510 kring->tx_pool[j] = m = netmap_get_mbuf(GENERIC_BUF_SIZE); 511 if (unlikely(m == NULL)) { 512 D("mbuf allocation failed"); 513 break; 514 } 515 } 516 /* XXX we should ask notifications when NS_REPORT is set, 517 * or roughly every half frame. We can optimize this 518 * by lazily requesting notifications only when a 519 * transmission fails. Probably the best way is to 520 * break on failures and set notifications when 521 * ring->avail == 0 || j != k 522 */ 523 tx_ret = generic_xmit_frame(ifp, m, addr, len, ring_nr); 524 if (unlikely(tx_ret)) { 525 RD(5, "start_xmit failed: err %d [%u,%u,%u,%u]", 526 tx_ret, kring->nr_ntc, j, k, kring->nr_hwavail); 527 /* 528 * No room for this mbuf in the device driver. 529 * Request a notification FOR A PREVIOUS MBUF, 530 * then call generic_netmap_tx_clean(kring) to do the 531 * double check and see if we can free more buffers. 532 * If there is space continue, else break; 533 * NOTE: the double check is necessary if the problem 534 * occurs in the txsync call after selrecord(). 535 * Also, we need some way to tell the caller that not 536 * all buffers were queued onto the device (this was 537 * not a problem with native netmap driver where space 538 * is preallocated). The bridge has a similar problem 539 * and we solve it there by dropping the excess packets. 540 */ 541 generic_set_tx_event(kring, j); 542 if (generic_netmap_tx_clean(kring)) { /* space now available */ 543 continue; 544 } else { 545 break; 546 } 547 } 548 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); 549 if (unlikely(++j == num_slots)) 550 j = 0; 551 ntx++; 552 } 553 554 /* Update hwcur to the next slot to transmit. */ 555 kring->nr_hwcur = j; 556 557 /* 558 * Report all new slots as unavailable, even those not sent. 559 * We account for them with with hwreserved, so that 560 * nr_hwreserved =:= cur - nr_hwcur 561 */ 562 kring->nr_hwavail -= new_slots; 563 kring->nr_hwreserved = k - j; 564 if (kring->nr_hwreserved < 0) { 565 kring->nr_hwreserved += num_slots; 566 } 567 568 IFRATE(rate_ctx.new.txpkt += ntx); 569 570 if (!kring->nr_hwavail) { 571 /* No more available slots? Set a notification event 572 * on a netmap slot that will be cleaned in the future. 573 * No doublecheck is performed, since txsync() will be 574 * called twice by netmap_poll(). 575 */ 576 generic_set_tx_event(kring, j); 577 } 578 ND("tx #%d, hwavail = %d", n, kring->nr_hwavail); 579 } 580 581 /* Synchronize the user's view to the kernel view. */ 582 ring->avail = kring->nr_hwavail; 583 ring->reserved = kring->nr_hwreserved; 584 585 return 0; 586 } 587 588 /* 589 * This handler is registered (through netmap_catch_rx()) 590 * within the attached network interface 591 * in the RX subsystem, so that every mbuf passed up by 592 * the driver can be stolen to the network stack. 593 * Stolen packets are put in a queue where the 594 * generic_netmap_rxsync() callback can extract them. 595 */ 596 void generic_rx_handler(struct ifnet *ifp, struct mbuf *m) 597 { 598 struct netmap_adapter *na = NA(ifp); 599 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na; 600 u_int work_done; 601 u_int rr = 0; // receive ring number 602 603 ND("called"); 604 /* limit the size of the queue */ 605 if (unlikely(mbq_len(&na->rx_rings[rr].rx_queue) > 1024)) { 606 m_freem(m); 607 } else { 608 mbq_safe_enqueue(&na->rx_rings[rr].rx_queue, m); 609 } 610 611 if (netmap_generic_mit < 32768) { 612 /* no rx mitigation, pass notification up */ 613 netmap_generic_irq(na->ifp, rr, &work_done); 614 IFRATE(rate_ctx.new.rxirq++); 615 } else { 616 /* same as send combining, filter notification if there is a 617 * pending timer, otherwise pass it up and start a timer. 618 */ 619 if (likely(netmap_mitigation_active(gna))) { 620 /* Record that there is some pending work. */ 621 gna->mit_pending = 1; 622 } else { 623 netmap_generic_irq(na->ifp, rr, &work_done); 624 IFRATE(rate_ctx.new.rxirq++); 625 netmap_mitigation_start(gna); 626 } 627 } 628 } 629 630 /* 631 * generic_netmap_rxsync() extracts mbufs from the queue filled by 632 * generic_netmap_rx_handler() and puts their content in the netmap 633 * receive ring. 634 * Access must be protected because the rx handler is asynchronous, 635 */ 636 static int 637 generic_netmap_rxsync(struct netmap_adapter *na, u_int ring_nr, int flags) 638 { 639 struct netmap_kring *kring = &na->rx_rings[ring_nr]; 640 struct netmap_ring *ring = kring->ring; 641 u_int j, n, lim = kring->nkr_num_slots - 1; 642 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; 643 u_int k, resvd = ring->reserved; 644 645 if (ring->cur > lim) 646 return netmap_ring_reinit(kring); 647 648 /* Import newly received packets into the netmap ring. */ 649 if (netmap_no_pendintr || force_update) { 650 uint16_t slot_flags = kring->nkr_slot_flags; 651 struct mbuf *m; 652 653 n = 0; 654 j = kring->nr_ntc; /* first empty slot in the receive ring */ 655 /* extract buffers from the rx queue, stop at most one 656 * slot before nr_hwcur (index k) 657 */ 658 k = (kring->nr_hwcur) ? kring->nr_hwcur-1 : lim; 659 while (j != k) { 660 int len; 661 void *addr = NMB(&ring->slot[j]); 662 663 if (addr == netmap_buffer_base) { /* Bad buffer */ 664 return netmap_ring_reinit(kring); 665 } 666 /* 667 * Call the locked version of the function. 668 * XXX Ideally we could grab a batch of mbufs at once, 669 * by changing rx_queue into a ring. 670 */ 671 m = mbq_safe_dequeue(&kring->rx_queue); 672 if (!m) 673 break; 674 len = MBUF_LEN(m); 675 m_copydata(m, 0, len, addr); 676 ring->slot[j].len = len; 677 ring->slot[j].flags = slot_flags; 678 m_freem(m); 679 if (unlikely(j++ == lim)) 680 j = 0; 681 n++; 682 } 683 if (n) { 684 kring->nr_ntc = j; 685 kring->nr_hwavail += n; 686 IFRATE(rate_ctx.new.rxpkt += n); 687 } 688 kring->nr_kflags &= ~NKR_PENDINTR; 689 } 690 691 // XXX should we invert the order ? 692 /* Skip past packets that userspace has released */ 693 j = kring->nr_hwcur; 694 k = ring->cur; 695 if (resvd > 0) { 696 if (resvd + ring->avail >= lim + 1) { 697 D("XXX invalid reserve/avail %d %d", resvd, ring->avail); 698 ring->reserved = resvd = 0; // XXX panic... 699 } 700 k = (k >= resvd) ? k - resvd : k + lim + 1 - resvd; 701 } 702 if (j != k) { 703 /* Userspace has released some packets. */ 704 for (n = 0; j != k; n++) { 705 struct netmap_slot *slot = &ring->slot[j]; 706 707 slot->flags &= ~NS_BUF_CHANGED; 708 if (unlikely(j++ == lim)) 709 j = 0; 710 } 711 kring->nr_hwavail -= n; 712 kring->nr_hwcur = k; 713 } 714 /* Tell userspace that there are new packets. */ 715 ring->avail = kring->nr_hwavail - resvd; 716 IFRATE(rate_ctx.new.rxsync++); 717 718 return 0; 719 } 720 721 static void 722 generic_netmap_dtor(struct netmap_adapter *na) 723 { 724 struct ifnet *ifp = na->ifp; 725 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter*)na; 726 struct netmap_adapter *prev_na = gna->prev; 727 728 if (prev_na != NULL) { 729 D("Released generic NA %p", gna); 730 #if 0 731 if_rele(na->ifp); 732 #endif 733 netmap_adapter_put(prev_na); 734 } 735 if (ifp != NULL) { 736 WNA(ifp) = prev_na; 737 D("Restored native NA %p", prev_na); 738 na->ifp = NULL; 739 } 740 } 741 742 /* 743 * generic_netmap_attach() makes it possible to use netmap on 744 * a device without native netmap support. 745 * This is less performant than native support but potentially 746 * faster than raw sockets or similar schemes. 747 * 748 * In this "emulated" mode, netmap rings do not necessarily 749 * have the same size as those in the NIC. We use a default 750 * value and possibly override it if the OS has ways to fetch the 751 * actual configuration. 752 */ 753 int 754 generic_netmap_attach(struct ifnet *ifp) 755 { 756 struct netmap_adapter *na; 757 struct netmap_generic_adapter *gna; 758 int retval; 759 u_int num_tx_desc, num_rx_desc; 760 761 num_tx_desc = num_rx_desc = netmap_generic_ringsize; /* starting point */ 762 763 generic_find_num_desc(ifp, &num_tx_desc, &num_rx_desc); 764 ND("Netmap ring size: TX = %d, RX = %d", num_tx_desc, num_rx_desc); 765 766 gna = kmalloc(sizeof(*gna), M_DEVBUF, M_NOWAIT | M_ZERO); 767 if (gna == NULL) { 768 D("no memory on attach, give up"); 769 return ENOMEM; 770 } 771 na = (struct netmap_adapter *)gna; 772 na->ifp = ifp; 773 na->num_tx_desc = num_tx_desc; 774 na->num_rx_desc = num_rx_desc; 775 na->nm_register = &generic_netmap_register; 776 na->nm_txsync = &generic_netmap_txsync; 777 na->nm_rxsync = &generic_netmap_rxsync; 778 na->nm_dtor = &generic_netmap_dtor; 779 /* when using generic, IFCAP_NETMAP is set so we force 780 * NAF_SKIP_INTR to use the regular interrupt handler 781 */ 782 na->na_flags = NAF_SKIP_INTR; 783 784 ND("[GNA] num_tx_queues(%d), real_num_tx_queues(%d), len(%lu)", 785 ifp->num_tx_queues, ifp->real_num_tx_queues, 786 ifp->tx_queue_len); 787 ND("[GNA] num_rx_queues(%d), real_num_rx_queues(%d)", 788 ifp->num_rx_queues, ifp->real_num_rx_queues); 789 790 generic_find_num_queues(ifp, &na->num_tx_rings, &na->num_rx_rings); 791 792 retval = netmap_attach_common(na); 793 if (retval) { 794 kfree(gna, M_DEVBUF); 795 } 796 797 return retval; 798 } 799