1 /*-
2 * Copyright (c) 2016, Vincenzo Maffione
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 /* Driver for ptnet paravirtualized network device. */
28
29 #include <sys/cdefs.h>
30
31 #include <sys/types.h>
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/sockio.h>
36 #include <sys/mbuf.h>
37 #include <sys/malloc.h>
38 #include <sys/module.h>
39 #include <sys/socket.h>
40 #include <sys/sysctl.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/taskqueue.h>
44 #include <sys/smp.h>
45 #include <sys/time.h>
46 #include <machine/smp.h>
47
48 #include <vm/uma.h>
49 #include <vm/vm.h>
50 #include <vm/pmap.h>
51
52 #include <net/ethernet.h>
53 #include <net/if.h>
54 #include <net/if_var.h>
55 #include <net/if_arp.h>
56 #include <net/if_dl.h>
57 #include <net/if_types.h>
58 #include <net/if_media.h>
59 #include <net/if_vlan_var.h>
60 #include <net/bpf.h>
61
62 #include <netinet/in_systm.h>
63 #include <netinet/in.h>
64 #include <netinet/ip.h>
65 #include <netinet/ip6.h>
66 #include <netinet6/ip6_var.h>
67 #include <netinet/udp.h>
68 #include <netinet/tcp.h>
69
70 #include <machine/bus.h>
71 #include <machine/resource.h>
72 #include <sys/bus.h>
73 #include <sys/rman.h>
74
75 #include <dev/pci/pcivar.h>
76 #include <dev/pci/pcireg.h>
77
78 #include "opt_inet.h"
79 #include "opt_inet6.h"
80
81 #include <sys/selinfo.h>
82 #include <net/netmap.h>
83 #include <dev/netmap/netmap_kern.h>
84 #include <net/netmap_virt.h>
85 #include <dev/netmap/netmap_mem2.h>
86 #include <dev/virtio/network/virtio_net.h>
87
88 #ifdef WITH_PTNETMAP
89
90 #ifndef INET
91 #error "INET not defined, cannot support offloadings"
92 #endif
93
94 static uint64_t ptnet_get_counter(if_t, ift_counter);
95
96 //#define PTNETMAP_STATS
97 //#define DEBUG
98 #ifdef DEBUG
99 #define DBG(x) x
100 #else /* !DEBUG */
101 #define DBG(x)
102 #endif /* !DEBUG */
103
104 extern int ptnet_vnet_hdr; /* Tunable parameter */
105
106 struct ptnet_softc;
107
108 struct ptnet_queue_stats {
109 uint64_t packets; /* if_[io]packets */
110 uint64_t bytes; /* if_[io]bytes */
111 uint64_t errors; /* if_[io]errors */
112 uint64_t iqdrops; /* if_iqdrops */
113 uint64_t mcasts; /* if_[io]mcasts */
114 #ifdef PTNETMAP_STATS
115 uint64_t intrs;
116 uint64_t kicks;
117 #endif /* PTNETMAP_STATS */
118 };
119
120 struct ptnet_queue {
121 struct ptnet_softc *sc;
122 struct resource *irq;
123 void *cookie;
124 int kring_id;
125 struct nm_csb_atok *atok;
126 struct nm_csb_ktoa *ktoa;
127 unsigned int kick;
128 struct mtx lock;
129 struct buf_ring *bufring; /* for TX queues */
130 struct ptnet_queue_stats stats;
131 #ifdef PTNETMAP_STATS
132 struct ptnet_queue_stats last_stats;
133 #endif /* PTNETMAP_STATS */
134 struct taskqueue *taskq;
135 struct task task;
136 char lock_name[16];
137 };
138
139 #define PTNET_Q_LOCK(_pq) mtx_lock(&(_pq)->lock)
140 #define PTNET_Q_TRYLOCK(_pq) mtx_trylock(&(_pq)->lock)
141 #define PTNET_Q_UNLOCK(_pq) mtx_unlock(&(_pq)->lock)
142
143 struct ptnet_softc {
144 device_t dev;
145 if_t ifp;
146 struct ifmedia media;
147 struct mtx lock;
148 char lock_name[16];
149 char hwaddr[ETHER_ADDR_LEN];
150
151 /* Mirror of PTFEAT register. */
152 uint32_t ptfeatures;
153 unsigned int vnet_hdr_len;
154
155 /* PCI BARs support. */
156 struct resource *iomem;
157 struct resource *msix_mem;
158
159 unsigned int num_rings;
160 unsigned int num_tx_rings;
161 struct ptnet_queue *queues;
162 struct ptnet_queue *rxqueues;
163 struct nm_csb_atok *csb_gh;
164 struct nm_csb_ktoa *csb_hg;
165
166 unsigned int min_tx_space;
167
168 struct netmap_pt_guest_adapter *ptna;
169
170 struct callout tick;
171 #ifdef PTNETMAP_STATS
172 struct timeval last_ts;
173 #endif /* PTNETMAP_STATS */
174 };
175
176 #define PTNET_CORE_LOCK(_sc) mtx_lock(&(_sc)->lock)
177 #define PTNET_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->lock)
178
179 static int ptnet_probe(device_t);
180 static int ptnet_attach(device_t);
181 static int ptnet_detach(device_t);
182 static int ptnet_suspend(device_t);
183 static int ptnet_resume(device_t);
184 static int ptnet_shutdown(device_t);
185
186 static void ptnet_init(void *opaque);
187 static int ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data);
188 static int ptnet_init_locked(struct ptnet_softc *sc);
189 static int ptnet_stop(struct ptnet_softc *sc);
190 static int ptnet_transmit(if_t ifp, struct mbuf *m);
191 static int ptnet_drain_transmit_queue(struct ptnet_queue *pq,
192 unsigned int budget,
193 bool may_resched);
194 static void ptnet_qflush(if_t ifp);
195 static void ptnet_tx_task(void *context, int pending);
196
197 static int ptnet_media_change(if_t ifp);
198 static void ptnet_media_status(if_t ifp, struct ifmediareq *ifmr);
199 #ifdef PTNETMAP_STATS
200 static void ptnet_tick(void *opaque);
201 #endif
202
203 static int ptnet_irqs_init(struct ptnet_softc *sc);
204 static void ptnet_irqs_fini(struct ptnet_softc *sc);
205
206 static uint32_t ptnet_nm_ptctl(struct ptnet_softc *sc, uint32_t cmd);
207 static int ptnet_nm_config(struct netmap_adapter *na,
208 struct nm_config_info *info);
209 static void ptnet_update_vnet_hdr(struct ptnet_softc *sc);
210 static int ptnet_nm_register(struct netmap_adapter *na, int onoff);
211 static int ptnet_nm_txsync(struct netmap_kring *kring, int flags);
212 static int ptnet_nm_rxsync(struct netmap_kring *kring, int flags);
213 static void ptnet_nm_intr(struct netmap_adapter *na, int onoff);
214
215 static void ptnet_tx_intr(void *opaque);
216 static void ptnet_rx_intr(void *opaque);
217
218 static unsigned ptnet_rx_discard(struct netmap_kring *kring,
219 unsigned int head);
220 static int ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget,
221 bool may_resched);
222 static void ptnet_rx_task(void *context, int pending);
223
224 #ifdef DEVICE_POLLING
225 static poll_handler_t ptnet_poll;
226 #endif
227
228 static device_method_t ptnet_methods[] = {
229 DEVMETHOD(device_probe, ptnet_probe),
230 DEVMETHOD(device_attach, ptnet_attach),
231 DEVMETHOD(device_detach, ptnet_detach),
232 DEVMETHOD(device_suspend, ptnet_suspend),
233 DEVMETHOD(device_resume, ptnet_resume),
234 DEVMETHOD(device_shutdown, ptnet_shutdown),
235 DEVMETHOD_END
236 };
237
238 static driver_t ptnet_driver = {
239 "ptnet",
240 ptnet_methods,
241 sizeof(struct ptnet_softc)
242 };
243
244 /* We use (SI_ORDER_MIDDLE+2) here, see DEV_MODULE_ORDERED() invocation. */
245 DRIVER_MODULE_ORDERED(ptnet, pci, ptnet_driver, NULL, NULL,
246 SI_ORDER_MIDDLE + 2);
247
248 static int
ptnet_probe(device_t dev)249 ptnet_probe(device_t dev)
250 {
251 if (pci_get_vendor(dev) != PTNETMAP_PCI_VENDOR_ID ||
252 pci_get_device(dev) != PTNETMAP_PCI_NETIF_ID) {
253 return (ENXIO);
254 }
255
256 device_set_desc(dev, "ptnet network adapter");
257
258 return (BUS_PROBE_DEFAULT);
259 }
260
ptnet_kick(struct ptnet_queue * pq)261 static inline void ptnet_kick(struct ptnet_queue *pq)
262 {
263 #ifdef PTNETMAP_STATS
264 pq->stats.kicks ++;
265 #endif /* PTNETMAP_STATS */
266 bus_write_4(pq->sc->iomem, pq->kick, 0);
267 }
268
269 #define PTNET_BUF_RING_SIZE 4096
270 #define PTNET_RX_BUDGET 512
271 #define PTNET_RX_BATCH 1
272 #define PTNET_TX_BUDGET 512
273 #define PTNET_TX_BATCH 64
274 #define PTNET_HDR_SIZE sizeof(struct virtio_net_hdr_mrg_rxbuf)
275 #define PTNET_MAX_PKT_SIZE 65536
276
277 #define PTNET_CSUM_OFFLOAD (CSUM_TCP | CSUM_UDP)
278 #define PTNET_CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6)
279 #define PTNET_ALL_OFFLOAD (CSUM_TSO | PTNET_CSUM_OFFLOAD |\
280 PTNET_CSUM_OFFLOAD_IPV6)
281
282 static int
ptnet_attach(device_t dev)283 ptnet_attach(device_t dev)
284 {
285 uint32_t ptfeatures = 0;
286 unsigned int num_rx_rings, num_tx_rings;
287 struct netmap_adapter na_arg;
288 unsigned int nifp_offset;
289 struct ptnet_softc *sc;
290 if_t ifp;
291 uint32_t macreg;
292 int err, rid;
293 int i;
294
295 sc = device_get_softc(dev);
296 sc->dev = dev;
297
298 /* Setup PCI resources. */
299 pci_enable_busmaster(dev);
300
301 rid = PCIR_BAR(PTNETMAP_IO_PCI_BAR);
302 sc->iomem = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
303 RF_ACTIVE);
304 if (sc->iomem == NULL) {
305 device_printf(dev, "Failed to map I/O BAR\n");
306 return (ENXIO);
307 }
308
309 /* Negotiate features with the hypervisor. */
310 if (ptnet_vnet_hdr) {
311 ptfeatures |= PTNETMAP_F_VNET_HDR;
312 }
313 bus_write_4(sc->iomem, PTNET_IO_PTFEAT, ptfeatures); /* wanted */
314 ptfeatures = bus_read_4(sc->iomem, PTNET_IO_PTFEAT); /* acked */
315 sc->ptfeatures = ptfeatures;
316
317 num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS);
318 num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS);
319 sc->num_rings = num_tx_rings + num_rx_rings;
320 sc->num_tx_rings = num_tx_rings;
321
322 if (sc->num_rings * sizeof(struct nm_csb_atok) > PAGE_SIZE) {
323 device_printf(dev, "CSB cannot handle that many rings (%u)\n",
324 sc->num_rings);
325 err = ENOMEM;
326 goto err_path;
327 }
328
329 /* Allocate CSB and carry out CSB allocation protocol. */
330 sc->csb_gh = contigmalloc(2*PAGE_SIZE, M_DEVBUF, M_NOWAIT | M_ZERO,
331 (size_t)0, -1UL, PAGE_SIZE, 0);
332 if (sc->csb_gh == NULL) {
333 device_printf(dev, "Failed to allocate CSB\n");
334 err = ENOMEM;
335 goto err_path;
336 }
337 sc->csb_hg = (struct nm_csb_ktoa *)(((char *)sc->csb_gh) + PAGE_SIZE);
338
339 {
340 /*
341 * We use uint64_t rather than vm_paddr_t since we
342 * need 64 bit addresses even on 32 bit platforms.
343 */
344 uint64_t paddr = vtophys(sc->csb_gh);
345
346 /* CSB allocation protocol: write to BAH first, then
347 * to BAL (for both GH and HG sections). */
348 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH,
349 (paddr >> 32) & 0xffffffff);
350 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL,
351 paddr & 0xffffffff);
352 paddr = vtophys(sc->csb_hg);
353 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH,
354 (paddr >> 32) & 0xffffffff);
355 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL,
356 paddr & 0xffffffff);
357 }
358
359 /* Allocate and initialize per-queue data structures. */
360 sc->queues = malloc(sizeof(struct ptnet_queue) * sc->num_rings,
361 M_DEVBUF, M_NOWAIT | M_ZERO);
362 if (sc->queues == NULL) {
363 err = ENOMEM;
364 goto err_path;
365 }
366 sc->rxqueues = sc->queues + num_tx_rings;
367
368 for (i = 0; i < sc->num_rings; i++) {
369 struct ptnet_queue *pq = sc->queues + i;
370
371 pq->sc = sc;
372 pq->kring_id = i;
373 pq->kick = PTNET_IO_KICK_BASE + 4 * i;
374 pq->atok = sc->csb_gh + i;
375 pq->ktoa = sc->csb_hg + i;
376 snprintf(pq->lock_name, sizeof(pq->lock_name), "%s-%d",
377 device_get_nameunit(dev), i);
378 mtx_init(&pq->lock, pq->lock_name, NULL, MTX_DEF);
379 if (i >= num_tx_rings) {
380 /* RX queue: fix kring_id. */
381 pq->kring_id -= num_tx_rings;
382 } else {
383 /* TX queue: allocate buf_ring. */
384 pq->bufring = buf_ring_alloc(PTNET_BUF_RING_SIZE,
385 M_DEVBUF, M_NOWAIT, &pq->lock);
386 if (pq->bufring == NULL) {
387 err = ENOMEM;
388 goto err_path;
389 }
390 }
391 }
392
393 sc->min_tx_space = 64; /* Safe initial value. */
394
395 err = ptnet_irqs_init(sc);
396 if (err) {
397 goto err_path;
398 }
399
400 /* Setup Ethernet interface. */
401 sc->ifp = ifp = if_alloc(IFT_ETHER);
402 if (ifp == NULL) {
403 device_printf(dev, "Failed to allocate ifnet\n");
404 err = ENOMEM;
405 goto err_path;
406 }
407
408 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
409 if_setbaudrate(ifp, IF_Gbps(10));
410 if_setsoftc(ifp, sc);
411 if_setflags(ifp, IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX);
412 if_setinitfn(ifp, ptnet_init);
413 if_setioctlfn(ifp, ptnet_ioctl);
414 if_setget_counter(ifp, ptnet_get_counter);
415 if_settransmitfn(ifp, ptnet_transmit);
416 if_setqflushfn(ifp, ptnet_qflush);
417
418 ifmedia_init(&sc->media, IFM_IMASK, ptnet_media_change,
419 ptnet_media_status);
420 ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX, 0, NULL);
421 ifmedia_set(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX);
422
423 macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_HI);
424 sc->hwaddr[0] = (macreg >> 8) & 0xff;
425 sc->hwaddr[1] = macreg & 0xff;
426 macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_LO);
427 sc->hwaddr[2] = (macreg >> 24) & 0xff;
428 sc->hwaddr[3] = (macreg >> 16) & 0xff;
429 sc->hwaddr[4] = (macreg >> 8) & 0xff;
430 sc->hwaddr[5] = macreg & 0xff;
431
432 ether_ifattach(ifp, sc->hwaddr);
433
434 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
435 if_setcapabilitiesbit(ifp, IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU, 0);
436
437 if (sc->ptfeatures & PTNETMAP_F_VNET_HDR) {
438 /* Similarly to what the vtnet driver does, we can emulate
439 * VLAN offloadings by inserting and removing the 802.1Q
440 * header during transmit and receive. We are then able
441 * to do checksum offloading of VLAN frames. */
442 if_setcapabilitiesbit(ifp, IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6
443 | IFCAP_VLAN_HWCSUM
444 | IFCAP_TSO | IFCAP_LRO
445 | IFCAP_VLAN_HWTSO
446 | IFCAP_VLAN_HWTAGGING, 0);
447 }
448
449 if_setcapenable(ifp, if_getcapabilities(ifp));
450 #ifdef DEVICE_POLLING
451 /* Don't enable polling by default. */
452 if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
453 #endif
454 snprintf(sc->lock_name, sizeof(sc->lock_name),
455 "%s", device_get_nameunit(dev));
456 mtx_init(&sc->lock, sc->lock_name, "ptnet core lock", MTX_DEF);
457 callout_init_mtx(&sc->tick, &sc->lock, 0);
458
459 /* Prepare a netmap_adapter struct instance to do netmap_attach(). */
460 nifp_offset = bus_read_4(sc->iomem, PTNET_IO_NIFP_OFS);
461 memset(&na_arg, 0, sizeof(na_arg));
462 na_arg.ifp = ifp;
463 na_arg.num_tx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS);
464 na_arg.num_rx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS);
465 na_arg.num_tx_rings = num_tx_rings;
466 na_arg.num_rx_rings = num_rx_rings;
467 na_arg.nm_config = ptnet_nm_config;
468 na_arg.nm_krings_create = ptnet_nm_krings_create;
469 na_arg.nm_krings_delete = ptnet_nm_krings_delete;
470 na_arg.nm_dtor = ptnet_nm_dtor;
471 na_arg.nm_intr = ptnet_nm_intr;
472 na_arg.nm_register = ptnet_nm_register;
473 na_arg.nm_txsync = ptnet_nm_txsync;
474 na_arg.nm_rxsync = ptnet_nm_rxsync;
475
476 netmap_pt_guest_attach(&na_arg, nifp_offset,
477 bus_read_4(sc->iomem, PTNET_IO_HOSTMEMID));
478
479 /* Now a netmap adapter for this ifp has been allocated, and it
480 * can be accessed through NA(ifp). We also have to initialize the CSB
481 * pointer. */
482 sc->ptna = (struct netmap_pt_guest_adapter *)NA(ifp);
483
484 /* If virtio-net header was negotiated, set the virt_hdr_len field in
485 * the netmap adapter, to inform users that this netmap adapter requires
486 * the application to deal with the headers. */
487 ptnet_update_vnet_hdr(sc);
488
489 device_printf(dev, "%s() completed\n", __func__);
490
491 return (0);
492
493 err_path:
494 ptnet_detach(dev);
495 return err;
496 }
497
498 /* Stop host sync-kloop if it was running. */
499 static void
ptnet_device_shutdown(struct ptnet_softc * sc)500 ptnet_device_shutdown(struct ptnet_softc *sc)
501 {
502 ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_DELETE);
503 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH, 0);
504 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL, 0);
505 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH, 0);
506 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL, 0);
507 }
508
509 static int
ptnet_detach(device_t dev)510 ptnet_detach(device_t dev)
511 {
512 struct ptnet_softc *sc = device_get_softc(dev);
513 int i;
514
515 ptnet_device_shutdown(sc);
516
517 #ifdef DEVICE_POLLING
518 if (if_getcapenable(sc->ifp) & IFCAP_POLLING) {
519 ether_poll_deregister(sc->ifp);
520 }
521 #endif
522 callout_drain(&sc->tick);
523
524 if (sc->queues) {
525 /* Drain taskqueues before calling if_detach. */
526 for (i = 0; i < sc->num_rings; i++) {
527 struct ptnet_queue *pq = sc->queues + i;
528
529 if (pq->taskq) {
530 taskqueue_drain(pq->taskq, &pq->task);
531 }
532 }
533 }
534
535 if (sc->ifp) {
536 ether_ifdetach(sc->ifp);
537
538 /* Uninitialize netmap adapters for this device. */
539 netmap_detach(sc->ifp);
540
541 ifmedia_removeall(&sc->media);
542 if_free(sc->ifp);
543 sc->ifp = NULL;
544 }
545
546 ptnet_irqs_fini(sc);
547
548 if (sc->csb_gh) {
549 contigfree(sc->csb_gh, 2*PAGE_SIZE, M_DEVBUF);
550 sc->csb_gh = NULL;
551 sc->csb_hg = NULL;
552 }
553
554 if (sc->queues) {
555 for (i = 0; i < sc->num_rings; i++) {
556 struct ptnet_queue *pq = sc->queues + i;
557
558 if (mtx_initialized(&pq->lock)) {
559 mtx_destroy(&pq->lock);
560 }
561 if (pq->bufring != NULL) {
562 buf_ring_free(pq->bufring, M_DEVBUF);
563 }
564 }
565 free(sc->queues, M_DEVBUF);
566 sc->queues = NULL;
567 }
568
569 if (sc->iomem) {
570 bus_release_resource(dev, SYS_RES_IOPORT,
571 PCIR_BAR(PTNETMAP_IO_PCI_BAR), sc->iomem);
572 sc->iomem = NULL;
573 }
574
575 mtx_destroy(&sc->lock);
576
577 device_printf(dev, "%s() completed\n", __func__);
578
579 return (0);
580 }
581
582 static int
ptnet_suspend(device_t dev)583 ptnet_suspend(device_t dev)
584 {
585 struct ptnet_softc *sc = device_get_softc(dev);
586
587 (void)sc;
588
589 return (0);
590 }
591
592 static int
ptnet_resume(device_t dev)593 ptnet_resume(device_t dev)
594 {
595 struct ptnet_softc *sc = device_get_softc(dev);
596
597 (void)sc;
598
599 return (0);
600 }
601
602 static int
ptnet_shutdown(device_t dev)603 ptnet_shutdown(device_t dev)
604 {
605 struct ptnet_softc *sc = device_get_softc(dev);
606
607 ptnet_device_shutdown(sc);
608
609 return (0);
610 }
611
612 static int
ptnet_irqs_init(struct ptnet_softc * sc)613 ptnet_irqs_init(struct ptnet_softc *sc)
614 {
615 int rid = PCIR_BAR(PTNETMAP_MSIX_PCI_BAR);
616 int nvecs = sc->num_rings;
617 device_t dev = sc->dev;
618 int err = ENOSPC;
619 int cpu_cur;
620 int i;
621
622 if (pci_find_cap(dev, PCIY_MSIX, NULL) != 0) {
623 device_printf(dev, "Could not find MSI-X capability\n");
624 return (ENXIO);
625 }
626
627 sc->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
628 &rid, RF_ACTIVE);
629 if (sc->msix_mem == NULL) {
630 device_printf(dev, "Failed to allocate MSIX PCI BAR\n");
631 return (ENXIO);
632 }
633
634 if (pci_msix_count(dev) < nvecs) {
635 device_printf(dev, "Not enough MSI-X vectors\n");
636 goto err_path;
637 }
638
639 err = pci_alloc_msix(dev, &nvecs);
640 if (err) {
641 device_printf(dev, "Failed to allocate MSI-X vectors\n");
642 goto err_path;
643 }
644
645 for (i = 0; i < nvecs; i++) {
646 struct ptnet_queue *pq = sc->queues + i;
647
648 rid = i + 1;
649 pq->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
650 RF_ACTIVE);
651 if (pq->irq == NULL) {
652 device_printf(dev, "Failed to allocate interrupt "
653 "for queue #%d\n", i);
654 err = ENOSPC;
655 goto err_path;
656 }
657 }
658
659 cpu_cur = CPU_FIRST();
660 for (i = 0; i < nvecs; i++) {
661 struct ptnet_queue *pq = sc->queues + i;
662 void (*handler)(void *) = ptnet_tx_intr;
663
664 if (i >= sc->num_tx_rings) {
665 handler = ptnet_rx_intr;
666 }
667 err = bus_setup_intr(dev, pq->irq, INTR_TYPE_NET | INTR_MPSAFE,
668 NULL /* intr_filter */, handler,
669 pq, &pq->cookie);
670 if (err) {
671 device_printf(dev, "Failed to register intr handler "
672 "for queue #%d\n", i);
673 goto err_path;
674 }
675
676 bus_describe_intr(dev, pq->irq, pq->cookie, "q%d", i);
677 #if 0
678 bus_bind_intr(sc->dev, pq->irq, cpu_cur);
679 #endif
680 cpu_cur = CPU_NEXT(cpu_cur);
681 }
682
683 device_printf(dev, "Allocated %d MSI-X vectors\n", nvecs);
684
685 cpu_cur = CPU_FIRST();
686 for (i = 0; i < nvecs; i++) {
687 struct ptnet_queue *pq = sc->queues + i;
688
689 if (i < sc->num_tx_rings)
690 TASK_INIT(&pq->task, 0, ptnet_tx_task, pq);
691 else
692 NET_TASK_INIT(&pq->task, 0, ptnet_rx_task, pq);
693
694 pq->taskq = taskqueue_create_fast("ptnet_queue", M_NOWAIT,
695 taskqueue_thread_enqueue, &pq->taskq);
696 taskqueue_start_threads(&pq->taskq, 1, PI_NET, "%s-pq-%d",
697 device_get_nameunit(sc->dev), cpu_cur);
698 cpu_cur = CPU_NEXT(cpu_cur);
699 }
700
701 return 0;
702 err_path:
703 ptnet_irqs_fini(sc);
704 return err;
705 }
706
707 static void
ptnet_irqs_fini(struct ptnet_softc * sc)708 ptnet_irqs_fini(struct ptnet_softc *sc)
709 {
710 device_t dev = sc->dev;
711 int i;
712
713 for (i = 0; i < sc->num_rings; i++) {
714 struct ptnet_queue *pq = sc->queues + i;
715
716 if (pq->taskq) {
717 taskqueue_free(pq->taskq);
718 pq->taskq = NULL;
719 }
720
721 if (pq->cookie) {
722 bus_teardown_intr(dev, pq->irq, pq->cookie);
723 pq->cookie = NULL;
724 }
725
726 if (pq->irq) {
727 bus_release_resource(dev, SYS_RES_IRQ, i + 1, pq->irq);
728 pq->irq = NULL;
729 }
730 }
731
732 if (sc->msix_mem) {
733 pci_release_msi(dev);
734
735 bus_release_resource(dev, SYS_RES_MEMORY,
736 PCIR_BAR(PTNETMAP_MSIX_PCI_BAR),
737 sc->msix_mem);
738 sc->msix_mem = NULL;
739 }
740 }
741
742 static void
ptnet_init(void * opaque)743 ptnet_init(void *opaque)
744 {
745 struct ptnet_softc *sc = opaque;
746
747 PTNET_CORE_LOCK(sc);
748 ptnet_init_locked(sc);
749 PTNET_CORE_UNLOCK(sc);
750 }
751
752 static int
ptnet_ioctl(if_t ifp,u_long cmd,caddr_t data)753 ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data)
754 {
755 struct ptnet_softc *sc = if_getsoftc(ifp);
756 device_t dev = sc->dev;
757 struct ifreq *ifr = (struct ifreq *)data;
758 int mask __unused, err = 0;
759
760 switch (cmd) {
761 case SIOCSIFFLAGS:
762 device_printf(dev, "SIOCSIFFLAGS %x\n", if_getflags(ifp));
763 PTNET_CORE_LOCK(sc);
764 if (if_getflags(ifp) & IFF_UP) {
765 /* Network stack wants the iff to be up. */
766 err = ptnet_init_locked(sc);
767 } else {
768 /* Network stack wants the iff to be down. */
769 err = ptnet_stop(sc);
770 }
771 /* We don't need to do nothing to support IFF_PROMISC,
772 * since that is managed by the backend port. */
773 PTNET_CORE_UNLOCK(sc);
774 break;
775
776 case SIOCSIFCAP:
777 device_printf(dev, "SIOCSIFCAP %x %x\n",
778 ifr->ifr_reqcap, if_getcapenable(ifp));
779 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
780 #ifdef DEVICE_POLLING
781 if (mask & IFCAP_POLLING) {
782 struct ptnet_queue *pq;
783 int i;
784
785 if (ifr->ifr_reqcap & IFCAP_POLLING) {
786 err = ether_poll_register(ptnet_poll, ifp);
787 if (err) {
788 break;
789 }
790 /* Stop queues and sync with taskqueues. */
791 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
792 for (i = 0; i < sc->num_rings; i++) {
793 pq = sc-> queues + i;
794 /* Make sure the worker sees the
795 * IFF_DRV_RUNNING down. */
796 PTNET_Q_LOCK(pq);
797 pq->atok->appl_need_kick = 0;
798 PTNET_Q_UNLOCK(pq);
799 /* Wait for rescheduling to finish. */
800 if (pq->taskq) {
801 taskqueue_drain(pq->taskq,
802 &pq->task);
803 }
804 }
805 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
806 } else {
807 err = ether_poll_deregister(ifp);
808 for (i = 0; i < sc->num_rings; i++) {
809 pq = sc-> queues + i;
810 PTNET_Q_LOCK(pq);
811 pq->atok->appl_need_kick = 1;
812 PTNET_Q_UNLOCK(pq);
813 }
814 }
815 }
816 #endif /* DEVICE_POLLING */
817 if_setcapenable(ifp, ifr->ifr_reqcap);
818 break;
819
820 case SIOCSIFMTU:
821 /* We support any reasonable MTU. */
822 if (ifr->ifr_mtu < ETHERMIN ||
823 ifr->ifr_mtu > PTNET_MAX_PKT_SIZE) {
824 err = EINVAL;
825 } else {
826 PTNET_CORE_LOCK(sc);
827 if_setmtu(ifp, ifr->ifr_mtu);
828 PTNET_CORE_UNLOCK(sc);
829 }
830 break;
831
832 case SIOCSIFMEDIA:
833 case SIOCGIFMEDIA:
834 err = ifmedia_ioctl(ifp, ifr, &sc->media, cmd);
835 break;
836
837 default:
838 err = ether_ioctl(ifp, cmd, data);
839 break;
840 }
841
842 return err;
843 }
844
845 static int
ptnet_init_locked(struct ptnet_softc * sc)846 ptnet_init_locked(struct ptnet_softc *sc)
847 {
848 if_t ifp = sc->ifp;
849 struct netmap_adapter *na_dr = &sc->ptna->dr.up;
850 struct netmap_adapter *na_nm = &sc->ptna->hwup.up;
851 unsigned int nm_buf_size;
852 int ret;
853
854 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
855 return 0; /* nothing to do */
856 }
857
858 device_printf(sc->dev, "%s\n", __func__);
859
860 /* Translate offload capabilities according to if_capenable. */
861 if_sethwassist(ifp, 0);
862 if (if_getcapenable(ifp) & IFCAP_TXCSUM)
863 if_sethwassistbits(ifp, PTNET_CSUM_OFFLOAD, 0);
864 if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6)
865 if_sethwassistbits(ifp, PTNET_CSUM_OFFLOAD_IPV6, 0);
866 if (if_getcapenable(ifp) & IFCAP_TSO4)
867 if_sethwassistbits(ifp, CSUM_IP_TSO, 0);
868 if (if_getcapenable(ifp) & IFCAP_TSO6)
869 if_sethwassistbits(ifp, CSUM_IP6_TSO, 0);
870
871 /*
872 * Prepare the interface for netmap mode access.
873 */
874 netmap_update_config(na_dr);
875
876 ret = netmap_mem_finalize(na_dr->nm_mem, na_dr);
877 if (ret) {
878 device_printf(sc->dev, "netmap_mem_finalize() failed\n");
879 return ret;
880 }
881
882 if (sc->ptna->backend_users == 0) {
883 ret = ptnet_nm_krings_create(na_nm);
884 if (ret) {
885 device_printf(sc->dev, "ptnet_nm_krings_create() "
886 "failed\n");
887 goto err_mem_finalize;
888 }
889
890 ret = netmap_mem_rings_create(na_dr);
891 if (ret) {
892 device_printf(sc->dev, "netmap_mem_rings_create() "
893 "failed\n");
894 goto err_rings_create;
895 }
896
897 ret = netmap_mem_get_lut(na_dr->nm_mem, &na_dr->na_lut);
898 if (ret) {
899 device_printf(sc->dev, "netmap_mem_get_lut() "
900 "failed\n");
901 goto err_get_lut;
902 }
903 }
904
905 ret = ptnet_nm_register(na_dr, 1 /* on */);
906 if (ret) {
907 goto err_register;
908 }
909
910 nm_buf_size = NETMAP_BUF_SIZE(na_dr);
911
912 KASSERT(nm_buf_size > 0, ("Invalid netmap buffer size"));
913 sc->min_tx_space = PTNET_MAX_PKT_SIZE / nm_buf_size + 2;
914 device_printf(sc->dev, "%s: min_tx_space = %u\n", __func__,
915 sc->min_tx_space);
916 #ifdef PTNETMAP_STATS
917 callout_reset(&sc->tick, hz, ptnet_tick, sc);
918 #endif
919
920 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
921
922 return 0;
923
924 err_register:
925 memset(&na_dr->na_lut, 0, sizeof(na_dr->na_lut));
926 err_get_lut:
927 netmap_mem_rings_delete(na_dr);
928 err_rings_create:
929 ptnet_nm_krings_delete(na_nm);
930 err_mem_finalize:
931 netmap_mem_deref(na_dr->nm_mem, na_dr);
932
933 return ret;
934 }
935
936 /* To be called under core lock. */
937 static int
ptnet_stop(struct ptnet_softc * sc)938 ptnet_stop(struct ptnet_softc *sc)
939 {
940 if_t ifp = sc->ifp;
941 struct netmap_adapter *na_dr = &sc->ptna->dr.up;
942 struct netmap_adapter *na_nm = &sc->ptna->hwup.up;
943 int i;
944
945 device_printf(sc->dev, "%s\n", __func__);
946
947 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
948 return 0; /* nothing to do */
949 }
950
951 /* Clear the driver-ready flag, and synchronize with all the queues,
952 * so that after this loop we are sure nobody is working anymore with
953 * the device. This scheme is taken from the vtnet driver. */
954 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
955 callout_stop(&sc->tick);
956 for (i = 0; i < sc->num_rings; i++) {
957 PTNET_Q_LOCK(sc->queues + i);
958 PTNET_Q_UNLOCK(sc->queues + i);
959 }
960
961 ptnet_nm_register(na_dr, 0 /* off */);
962
963 if (sc->ptna->backend_users == 0) {
964 netmap_mem_rings_delete(na_dr);
965 ptnet_nm_krings_delete(na_nm);
966 }
967 netmap_mem_deref(na_dr->nm_mem, na_dr);
968
969 return 0;
970 }
971
972 static void
ptnet_qflush(if_t ifp)973 ptnet_qflush(if_t ifp)
974 {
975 struct ptnet_softc *sc = if_getsoftc(ifp);
976 int i;
977
978 /* Flush all the bufrings and do the interface flush. */
979 for (i = 0; i < sc->num_rings; i++) {
980 struct ptnet_queue *pq = sc->queues + i;
981 struct mbuf *m;
982
983 PTNET_Q_LOCK(pq);
984 if (pq->bufring) {
985 while ((m = buf_ring_dequeue_sc(pq->bufring))) {
986 m_freem(m);
987 }
988 }
989 PTNET_Q_UNLOCK(pq);
990 }
991
992 if_qflush(ifp);
993 }
994
995 static int
ptnet_media_change(if_t ifp)996 ptnet_media_change(if_t ifp)
997 {
998 struct ptnet_softc *sc = if_getsoftc(ifp);
999 struct ifmedia *ifm = &sc->media;
1000
1001 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
1002 return EINVAL;
1003 }
1004
1005 return 0;
1006 }
1007
1008 static uint64_t
ptnet_get_counter(if_t ifp,ift_counter cnt)1009 ptnet_get_counter(if_t ifp, ift_counter cnt)
1010 {
1011 struct ptnet_softc *sc = if_getsoftc(ifp);
1012 struct ptnet_queue_stats stats[2];
1013 int i;
1014
1015 /* Accumulate statistics over the queues. */
1016 memset(stats, 0, sizeof(stats));
1017 for (i = 0; i < sc->num_rings; i++) {
1018 struct ptnet_queue *pq = sc->queues + i;
1019 int idx = (i < sc->num_tx_rings) ? 0 : 1;
1020
1021 stats[idx].packets += pq->stats.packets;
1022 stats[idx].bytes += pq->stats.bytes;
1023 stats[idx].errors += pq->stats.errors;
1024 stats[idx].iqdrops += pq->stats.iqdrops;
1025 stats[idx].mcasts += pq->stats.mcasts;
1026 }
1027
1028 switch (cnt) {
1029 case IFCOUNTER_IPACKETS:
1030 return (stats[1].packets);
1031 case IFCOUNTER_IQDROPS:
1032 return (stats[1].iqdrops);
1033 case IFCOUNTER_IERRORS:
1034 return (stats[1].errors);
1035 case IFCOUNTER_OPACKETS:
1036 return (stats[0].packets);
1037 case IFCOUNTER_OBYTES:
1038 return (stats[0].bytes);
1039 case IFCOUNTER_OMCASTS:
1040 return (stats[0].mcasts);
1041 default:
1042 return (if_get_counter_default(ifp, cnt));
1043 }
1044 }
1045
1046
1047 #ifdef PTNETMAP_STATS
1048 /* Called under core lock. */
1049 static void
ptnet_tick(void * opaque)1050 ptnet_tick(void *opaque)
1051 {
1052 struct ptnet_softc *sc = opaque;
1053 int i;
1054
1055 for (i = 0; i < sc->num_rings; i++) {
1056 struct ptnet_queue *pq = sc->queues + i;
1057 struct ptnet_queue_stats cur = pq->stats;
1058 struct timeval now;
1059 unsigned int delta;
1060
1061 microtime(&now);
1062 delta = now.tv_usec - sc->last_ts.tv_usec +
1063 (now.tv_sec - sc->last_ts.tv_sec) * 1000000;
1064 delta /= 1000; /* in milliseconds */
1065
1066 if (delta == 0)
1067 continue;
1068
1069 device_printf(sc->dev, "#%d[%u ms]:pkts %lu, kicks %lu, "
1070 "intr %lu\n", i, delta,
1071 (cur.packets - pq->last_stats.packets),
1072 (cur.kicks - pq->last_stats.kicks),
1073 (cur.intrs - pq->last_stats.intrs));
1074 pq->last_stats = cur;
1075 }
1076 microtime(&sc->last_ts);
1077 callout_schedule(&sc->tick, hz);
1078 }
1079 #endif /* PTNETMAP_STATS */
1080
1081 static void
ptnet_media_status(if_t ifp,struct ifmediareq * ifmr)1082 ptnet_media_status(if_t ifp, struct ifmediareq *ifmr)
1083 {
1084 /* We are always active, as the backend netmap port is
1085 * always open in netmap mode. */
1086 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
1087 ifmr->ifm_active = IFM_ETHER | IFM_10G_T | IFM_FDX;
1088 }
1089
1090 static uint32_t
ptnet_nm_ptctl(struct ptnet_softc * sc,uint32_t cmd)1091 ptnet_nm_ptctl(struct ptnet_softc *sc, uint32_t cmd)
1092 {
1093 /*
1094 * Write a command and read back error status,
1095 * with zero meaning success.
1096 */
1097 bus_write_4(sc->iomem, PTNET_IO_PTCTL, cmd);
1098 return bus_read_4(sc->iomem, PTNET_IO_PTCTL);
1099 }
1100
1101 static int
ptnet_nm_config(struct netmap_adapter * na,struct nm_config_info * info)1102 ptnet_nm_config(struct netmap_adapter *na, struct nm_config_info *info)
1103 {
1104 struct ptnet_softc *sc = if_getsoftc(na->ifp);
1105
1106 info->num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS);
1107 info->num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS);
1108 info->num_tx_descs = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS);
1109 info->num_rx_descs = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS);
1110 info->rx_buf_maxsize = NETMAP_BUF_SIZE(na);
1111
1112 device_printf(sc->dev, "txr %u, rxr %u, txd %u, rxd %u, rxbufsz %u\n",
1113 info->num_tx_rings, info->num_rx_rings,
1114 info->num_tx_descs, info->num_rx_descs,
1115 info->rx_buf_maxsize);
1116
1117 return 0;
1118 }
1119
1120 static void
ptnet_sync_from_csb(struct ptnet_softc * sc,struct netmap_adapter * na)1121 ptnet_sync_from_csb(struct ptnet_softc *sc, struct netmap_adapter *na)
1122 {
1123 int i;
1124
1125 /* Sync krings from the host, reading from
1126 * CSB. */
1127 for (i = 0; i < sc->num_rings; i++) {
1128 struct nm_csb_atok *atok = sc->queues[i].atok;
1129 struct nm_csb_ktoa *ktoa = sc->queues[i].ktoa;
1130 struct netmap_kring *kring;
1131
1132 if (i < na->num_tx_rings) {
1133 kring = na->tx_rings[i];
1134 } else {
1135 kring = na->rx_rings[i - na->num_tx_rings];
1136 }
1137 kring->rhead = kring->ring->head = atok->head;
1138 kring->rcur = kring->ring->cur = atok->cur;
1139 kring->nr_hwcur = ktoa->hwcur;
1140 kring->nr_hwtail = kring->rtail =
1141 kring->ring->tail = ktoa->hwtail;
1142
1143 nm_prdis("%d,%d: csb {hc %u h %u c %u ht %u}", t, i,
1144 ktoa->hwcur, atok->head, atok->cur,
1145 ktoa->hwtail);
1146 nm_prdis("%d,%d: kring {hc %u rh %u rc %u h %u c %u ht %u rt %u t %u}",
1147 t, i, kring->nr_hwcur, kring->rhead, kring->rcur,
1148 kring->ring->head, kring->ring->cur, kring->nr_hwtail,
1149 kring->rtail, kring->ring->tail);
1150 }
1151 }
1152
1153 static void
ptnet_update_vnet_hdr(struct ptnet_softc * sc)1154 ptnet_update_vnet_hdr(struct ptnet_softc *sc)
1155 {
1156 unsigned int wanted_hdr_len = ptnet_vnet_hdr ? PTNET_HDR_SIZE : 0;
1157
1158 bus_write_4(sc->iomem, PTNET_IO_VNET_HDR_LEN, wanted_hdr_len);
1159 sc->vnet_hdr_len = bus_read_4(sc->iomem, PTNET_IO_VNET_HDR_LEN);
1160 sc->ptna->hwup.up.virt_hdr_len = sc->vnet_hdr_len;
1161 }
1162
1163 static int
ptnet_nm_register(struct netmap_adapter * na,int onoff)1164 ptnet_nm_register(struct netmap_adapter *na, int onoff)
1165 {
1166 /* device-specific */
1167 if_t ifp = na->ifp;
1168 struct ptnet_softc *sc = if_getsoftc(ifp);
1169 int native = (na == &sc->ptna->hwup.up);
1170 struct ptnet_queue *pq;
1171 int ret = 0;
1172 int i;
1173
1174 if (!onoff) {
1175 sc->ptna->backend_users--;
1176 }
1177
1178 /* If this is the last netmap client, guest interrupt enable flags may
1179 * be in arbitrary state. Since these flags are going to be used also
1180 * by the netdevice driver, we have to make sure to start with
1181 * notifications enabled. Also, schedule NAPI to flush pending packets
1182 * in the RX rings, since we will not receive further interrupts
1183 * until these will be processed. */
1184 if (native && !onoff && na->active_fds == 0) {
1185 nm_prinf("Exit netmap mode, re-enable interrupts");
1186 for (i = 0; i < sc->num_rings; i++) {
1187 pq = sc->queues + i;
1188 pq->atok->appl_need_kick = 1;
1189 }
1190 }
1191
1192 if (onoff) {
1193 if (sc->ptna->backend_users == 0) {
1194 /* Initialize notification enable fields in the CSB. */
1195 for (i = 0; i < sc->num_rings; i++) {
1196 pq = sc->queues + i;
1197 pq->ktoa->kern_need_kick = 1;
1198 pq->atok->appl_need_kick =
1199 (!(if_getcapenable(ifp) & IFCAP_POLLING)
1200 && i >= sc->num_tx_rings);
1201 }
1202
1203 /* Set the virtio-net header length. */
1204 ptnet_update_vnet_hdr(sc);
1205
1206 /* Make sure the host adapter passed through is ready
1207 * for txsync/rxsync. */
1208 ret = ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_CREATE);
1209 if (ret) {
1210 return ret;
1211 }
1212
1213 /* Align the guest krings and rings to the state stored
1214 * in the CSB. */
1215 ptnet_sync_from_csb(sc, na);
1216 }
1217
1218 /* If not native, don't call nm_set_native_flags, since we don't want
1219 * to replace if_transmit method, nor set NAF_NETMAP_ON */
1220 if (native) {
1221 netmap_krings_mode_commit(na, onoff);
1222 nm_set_native_flags(na);
1223 }
1224
1225 } else {
1226 if (native) {
1227 nm_clear_native_flags(na);
1228 netmap_krings_mode_commit(na, onoff);
1229 }
1230
1231 if (sc->ptna->backend_users == 0) {
1232 ret = ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_DELETE);
1233 }
1234 }
1235
1236 if (onoff) {
1237 sc->ptna->backend_users++;
1238 }
1239
1240 return ret;
1241 }
1242
1243 static int
ptnet_nm_txsync(struct netmap_kring * kring,int flags)1244 ptnet_nm_txsync(struct netmap_kring *kring, int flags)
1245 {
1246 struct ptnet_softc *sc = if_getsoftc(kring->na->ifp);
1247 struct ptnet_queue *pq = sc->queues + kring->ring_id;
1248 bool notify;
1249
1250 notify = netmap_pt_guest_txsync(pq->atok, pq->ktoa, kring, flags);
1251 if (notify) {
1252 ptnet_kick(pq);
1253 }
1254
1255 return 0;
1256 }
1257
1258 static int
ptnet_nm_rxsync(struct netmap_kring * kring,int flags)1259 ptnet_nm_rxsync(struct netmap_kring *kring, int flags)
1260 {
1261 struct ptnet_softc *sc = if_getsoftc(kring->na->ifp);
1262 struct ptnet_queue *pq = sc->rxqueues + kring->ring_id;
1263 bool notify;
1264
1265 notify = netmap_pt_guest_rxsync(pq->atok, pq->ktoa, kring, flags);
1266 if (notify) {
1267 ptnet_kick(pq);
1268 }
1269
1270 return 0;
1271 }
1272
1273 static void
ptnet_nm_intr(struct netmap_adapter * na,int onoff)1274 ptnet_nm_intr(struct netmap_adapter *na, int onoff)
1275 {
1276 struct ptnet_softc *sc = if_getsoftc(na->ifp);
1277 int i;
1278
1279 for (i = 0; i < sc->num_rings; i++) {
1280 struct ptnet_queue *pq = sc->queues + i;
1281 pq->atok->appl_need_kick = onoff;
1282 }
1283 }
1284
1285 static void
ptnet_tx_intr(void * opaque)1286 ptnet_tx_intr(void *opaque)
1287 {
1288 struct ptnet_queue *pq = opaque;
1289 struct ptnet_softc *sc = pq->sc;
1290
1291 DBG(device_printf(sc->dev, "Tx interrupt #%d\n", pq->kring_id));
1292 #ifdef PTNETMAP_STATS
1293 pq->stats.intrs ++;
1294 #endif /* PTNETMAP_STATS */
1295
1296 if (netmap_tx_irq(sc->ifp, pq->kring_id) != NM_IRQ_PASS) {
1297 return;
1298 }
1299
1300 /* Schedule the tasqueue to flush process transmissions requests.
1301 * However, vtnet, if_em and if_igb just call ptnet_transmit() here,
1302 * at least when using MSI-X interrupts. The if_em driver, instead
1303 * schedule taskqueue when using legacy interrupts. */
1304 taskqueue_enqueue(pq->taskq, &pq->task);
1305 }
1306
1307 static void
ptnet_rx_intr(void * opaque)1308 ptnet_rx_intr(void *opaque)
1309 {
1310 struct ptnet_queue *pq = opaque;
1311 struct ptnet_softc *sc = pq->sc;
1312 unsigned int unused;
1313
1314 DBG(device_printf(sc->dev, "Rx interrupt #%d\n", pq->kring_id));
1315 #ifdef PTNETMAP_STATS
1316 pq->stats.intrs ++;
1317 #endif /* PTNETMAP_STATS */
1318
1319 if (netmap_rx_irq(sc->ifp, pq->kring_id, &unused) != NM_IRQ_PASS) {
1320 return;
1321 }
1322
1323 /* Like vtnet, if_igb and if_em drivers when using MSI-X interrupts,
1324 * receive-side processing is executed directly in the interrupt
1325 * service routine. Alternatively, we may schedule the taskqueue. */
1326 ptnet_rx_eof(pq, PTNET_RX_BUDGET, true);
1327 }
1328
1329 static void
ptnet_vlan_tag_remove(struct mbuf * m)1330 ptnet_vlan_tag_remove(struct mbuf *m)
1331 {
1332 struct ether_vlan_header *evh;
1333
1334 evh = mtod(m, struct ether_vlan_header *);
1335 m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag);
1336 m->m_flags |= M_VLANTAG;
1337
1338 /* Strip the 802.1Q header. */
1339 bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN,
1340 ETHER_HDR_LEN - ETHER_TYPE_LEN);
1341 m_adj(m, ETHER_VLAN_ENCAP_LEN);
1342 }
1343
1344 static void
ptnet_ring_update(struct ptnet_queue * pq,struct netmap_kring * kring,unsigned int head,unsigned int sync_flags)1345 ptnet_ring_update(struct ptnet_queue *pq, struct netmap_kring *kring,
1346 unsigned int head, unsigned int sync_flags)
1347 {
1348 struct netmap_ring *ring = kring->ring;
1349 struct nm_csb_atok *atok = pq->atok;
1350 struct nm_csb_ktoa *ktoa = pq->ktoa;
1351
1352 /* Some packets have been pushed to the netmap ring. We have
1353 * to tell the host to process the new packets, updating cur
1354 * and head in the CSB. */
1355 ring->head = ring->cur = head;
1356
1357 /* Mimic nm_txsync_prologue/nm_rxsync_prologue. */
1358 kring->rcur = kring->rhead = head;
1359
1360 nm_sync_kloop_appl_write(atok, kring->rcur, kring->rhead);
1361
1362 /* Kick the host if needed. */
1363 if (NM_ACCESS_ONCE(ktoa->kern_need_kick)) {
1364 atok->sync_flags = sync_flags;
1365 ptnet_kick(pq);
1366 }
1367 }
1368
1369 #define PTNET_TX_NOSPACE(_h, _k, _min) \
1370 ((((_h) < (_k)->rtail) ? 0 : (_k)->nkr_num_slots) + \
1371 (_k)->rtail - (_h)) < (_min)
1372
1373 /* This function may be called by the network stack, or by
1374 * by the taskqueue thread. */
1375 static int
ptnet_drain_transmit_queue(struct ptnet_queue * pq,unsigned int budget,bool may_resched)1376 ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget,
1377 bool may_resched)
1378 {
1379 struct ptnet_softc *sc = pq->sc;
1380 bool have_vnet_hdr = sc->vnet_hdr_len;
1381 struct netmap_adapter *na = &sc->ptna->dr.up;
1382 if_t ifp = sc->ifp;
1383 unsigned int batch_count = 0;
1384 struct nm_csb_atok *atok;
1385 struct nm_csb_ktoa *ktoa;
1386 struct netmap_kring *kring;
1387 struct netmap_ring *ring;
1388 struct netmap_slot *slot;
1389 unsigned int count = 0;
1390 unsigned int minspace;
1391 unsigned int head;
1392 unsigned int lim;
1393 struct mbuf *mhead;
1394 struct mbuf *mf;
1395 int nmbuf_bytes;
1396 uint8_t *nmbuf;
1397
1398 if (!PTNET_Q_TRYLOCK(pq)) {
1399 /* We failed to acquire the lock, schedule the taskqueue. */
1400 nm_prlim(1, "Deferring TX work");
1401 if (may_resched) {
1402 taskqueue_enqueue(pq->taskq, &pq->task);
1403 }
1404
1405 return 0;
1406 }
1407
1408 if (unlikely(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))) {
1409 PTNET_Q_UNLOCK(pq);
1410 nm_prlim(1, "Interface is down");
1411 return ENETDOWN;
1412 }
1413
1414 atok = pq->atok;
1415 ktoa = pq->ktoa;
1416 kring = na->tx_rings[pq->kring_id];
1417 ring = kring->ring;
1418 lim = kring->nkr_num_slots - 1;
1419 head = ring->head;
1420 minspace = sc->min_tx_space;
1421
1422 while (count < budget) {
1423 if (PTNET_TX_NOSPACE(head, kring, minspace)) {
1424 /* We ran out of slot, let's see if the host has
1425 * freed up some, by reading hwcur and hwtail from
1426 * the CSB. */
1427 ptnet_sync_tail(ktoa, kring);
1428
1429 if (PTNET_TX_NOSPACE(head, kring, minspace)) {
1430 /* Still no slots available. Reactivate the
1431 * interrupts so that we can be notified
1432 * when some free slots are made available by
1433 * the host. */
1434 atok->appl_need_kick = 1;
1435
1436 /* Double check. We need a full barrier to
1437 * prevent the store to atok->appl_need_kick
1438 * to be reordered with the load from
1439 * ktoa->hwcur and ktoa->hwtail (store-load
1440 * barrier). */
1441 nm_stld_barrier();
1442 ptnet_sync_tail(ktoa, kring);
1443 if (likely(PTNET_TX_NOSPACE(head, kring,
1444 minspace))) {
1445 break;
1446 }
1447
1448 nm_prlim(1, "Found more slots by doublecheck");
1449 /* More slots were freed before reactivating
1450 * the interrupts. */
1451 atok->appl_need_kick = 0;
1452 }
1453 }
1454
1455 mhead = drbr_peek(ifp, pq->bufring);
1456 if (!mhead) {
1457 break;
1458 }
1459
1460 /* Initialize transmission state variables. */
1461 slot = ring->slot + head;
1462 nmbuf = NMB(na, slot);
1463 nmbuf_bytes = 0;
1464
1465 /* If needed, prepare the virtio-net header at the beginning
1466 * of the first slot. */
1467 if (have_vnet_hdr) {
1468 struct virtio_net_hdr *vh =
1469 (struct virtio_net_hdr *)nmbuf;
1470
1471 /* For performance, we could replace this memset() with
1472 * two 8-bytes-wide writes. */
1473 memset(nmbuf, 0, PTNET_HDR_SIZE);
1474 if (mhead->m_pkthdr.csum_flags & PTNET_ALL_OFFLOAD) {
1475 mhead = virtio_net_tx_offload(ifp, mhead, false,
1476 vh);
1477 if (unlikely(!mhead)) {
1478 /* Packet dropped because errors
1479 * occurred while preparing the vnet
1480 * header. Let's go ahead with the next
1481 * packet. */
1482 pq->stats.errors ++;
1483 drbr_advance(ifp, pq->bufring);
1484 continue;
1485 }
1486 }
1487 nm_prdis(1, "%s: [csum_flags %lX] vnet hdr: flags %x "
1488 "csum_start %u csum_ofs %u hdr_len = %u "
1489 "gso_size %u gso_type %x", __func__,
1490 mhead->m_pkthdr.csum_flags, vh->flags,
1491 vh->csum_start, vh->csum_offset, vh->hdr_len,
1492 vh->gso_size, vh->gso_type);
1493
1494 nmbuf += PTNET_HDR_SIZE;
1495 nmbuf_bytes += PTNET_HDR_SIZE;
1496 }
1497
1498 for (mf = mhead; mf; mf = mf->m_next) {
1499 uint8_t *mdata = mf->m_data;
1500 int mlen = mf->m_len;
1501
1502 for (;;) {
1503 int copy = NETMAP_BUF_SIZE(na) - nmbuf_bytes;
1504
1505 if (mlen < copy) {
1506 copy = mlen;
1507 }
1508 memcpy(nmbuf, mdata, copy);
1509
1510 mdata += copy;
1511 mlen -= copy;
1512 nmbuf += copy;
1513 nmbuf_bytes += copy;
1514
1515 if (!mlen) {
1516 break;
1517 }
1518
1519 slot->len = nmbuf_bytes;
1520 slot->flags = NS_MOREFRAG;
1521
1522 head = nm_next(head, lim);
1523 KASSERT(head != ring->tail,
1524 ("Unexpectedly run out of TX space"));
1525 slot = ring->slot + head;
1526 nmbuf = NMB(na, slot);
1527 nmbuf_bytes = 0;
1528 }
1529 }
1530
1531 /* Complete last slot and update head. */
1532 slot->len = nmbuf_bytes;
1533 slot->flags = 0;
1534 head = nm_next(head, lim);
1535
1536 /* Consume the packet just processed. */
1537 drbr_advance(ifp, pq->bufring);
1538
1539 /* Copy the packet to listeners. */
1540 ETHER_BPF_MTAP(ifp, mhead);
1541
1542 pq->stats.packets ++;
1543 pq->stats.bytes += mhead->m_pkthdr.len;
1544 if (mhead->m_flags & M_MCAST) {
1545 pq->stats.mcasts ++;
1546 }
1547
1548 m_freem(mhead);
1549
1550 count ++;
1551 if (++batch_count == PTNET_TX_BATCH) {
1552 ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM);
1553 batch_count = 0;
1554 }
1555 }
1556
1557 if (batch_count) {
1558 ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM);
1559 }
1560
1561 if (count >= budget && may_resched) {
1562 DBG(nm_prlim(1, "out of budget: resched, %d mbufs pending\n",
1563 drbr_inuse(ifp, pq->bufring)));
1564 taskqueue_enqueue(pq->taskq, &pq->task);
1565 }
1566
1567 PTNET_Q_UNLOCK(pq);
1568
1569 return count;
1570 }
1571
1572 static int
ptnet_transmit(if_t ifp,struct mbuf * m)1573 ptnet_transmit(if_t ifp, struct mbuf *m)
1574 {
1575 struct ptnet_softc *sc = if_getsoftc(ifp);
1576 struct ptnet_queue *pq;
1577 unsigned int queue_idx;
1578 int err;
1579
1580 DBG(device_printf(sc->dev, "transmit %p\n", m));
1581
1582 /* Insert 802.1Q header if needed. */
1583 if (m->m_flags & M_VLANTAG) {
1584 m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
1585 if (m == NULL) {
1586 return ENOBUFS;
1587 }
1588 m->m_flags &= ~M_VLANTAG;
1589 }
1590
1591 /* Get the flow-id if available. */
1592 queue_idx = (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) ?
1593 m->m_pkthdr.flowid : curcpu;
1594
1595 if (unlikely(queue_idx >= sc->num_tx_rings)) {
1596 queue_idx %= sc->num_tx_rings;
1597 }
1598
1599 pq = sc->queues + queue_idx;
1600
1601 err = drbr_enqueue(ifp, pq->bufring, m);
1602 if (err) {
1603 /* ENOBUFS when the bufring is full */
1604 nm_prlim(1, "%s: drbr_enqueue() failed %d\n",
1605 __func__, err);
1606 pq->stats.errors ++;
1607 return err;
1608 }
1609
1610 if (if_getcapenable(ifp) & IFCAP_POLLING) {
1611 /* If polling is on, the transmit queues will be
1612 * drained by the poller. */
1613 return 0;
1614 }
1615
1616 err = ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true);
1617
1618 return (err < 0) ? err : 0;
1619 }
1620
1621 static unsigned int
ptnet_rx_discard(struct netmap_kring * kring,unsigned int head)1622 ptnet_rx_discard(struct netmap_kring *kring, unsigned int head)
1623 {
1624 struct netmap_ring *ring = kring->ring;
1625 struct netmap_slot *slot = ring->slot + head;
1626
1627 for (;;) {
1628 head = nm_next(head, kring->nkr_num_slots - 1);
1629 if (!(slot->flags & NS_MOREFRAG) || head == ring->tail) {
1630 break;
1631 }
1632 slot = ring->slot + head;
1633 }
1634
1635 return head;
1636 }
1637
1638 static inline struct mbuf *
ptnet_rx_slot(struct mbuf * mtail,uint8_t * nmbuf,unsigned int nmbuf_len)1639 ptnet_rx_slot(struct mbuf *mtail, uint8_t *nmbuf, unsigned int nmbuf_len)
1640 {
1641 uint8_t *mdata = mtod(mtail, uint8_t *) + mtail->m_len;
1642
1643 do {
1644 unsigned int copy;
1645
1646 if (mtail->m_len == MCLBYTES) {
1647 struct mbuf *mf;
1648
1649 mf = m_getcl(M_NOWAIT, MT_DATA, 0);
1650 if (unlikely(!mf)) {
1651 return NULL;
1652 }
1653
1654 mtail->m_next = mf;
1655 mtail = mf;
1656 mdata = mtod(mtail, uint8_t *);
1657 mtail->m_len = 0;
1658 }
1659
1660 copy = MCLBYTES - mtail->m_len;
1661 if (nmbuf_len < copy) {
1662 copy = nmbuf_len;
1663 }
1664
1665 memcpy(mdata, nmbuf, copy);
1666
1667 nmbuf += copy;
1668 nmbuf_len -= copy;
1669 mdata += copy;
1670 mtail->m_len += copy;
1671 } while (nmbuf_len);
1672
1673 return mtail;
1674 }
1675
1676 static int
ptnet_rx_eof(struct ptnet_queue * pq,unsigned int budget,bool may_resched)1677 ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched)
1678 {
1679 struct ptnet_softc *sc = pq->sc;
1680 bool have_vnet_hdr = sc->vnet_hdr_len;
1681 struct nm_csb_atok *atok = pq->atok;
1682 struct nm_csb_ktoa *ktoa = pq->ktoa;
1683 struct netmap_adapter *na = &sc->ptna->dr.up;
1684 struct netmap_kring *kring = na->rx_rings[pq->kring_id];
1685 struct netmap_ring *ring = kring->ring;
1686 unsigned int const lim = kring->nkr_num_slots - 1;
1687 unsigned int batch_count = 0;
1688 if_t ifp = sc->ifp;
1689 unsigned int count = 0;
1690 uint32_t head;
1691
1692 PTNET_Q_LOCK(pq);
1693
1694 if (unlikely(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))) {
1695 goto unlock;
1696 }
1697
1698 kring->nr_kflags &= ~NKR_PENDINTR;
1699
1700 head = ring->head;
1701 while (count < budget) {
1702 uint32_t prev_head = head;
1703 struct mbuf *mhead, *mtail;
1704 struct virtio_net_hdr *vh;
1705 struct netmap_slot *slot;
1706 unsigned int nmbuf_len;
1707 uint8_t *nmbuf;
1708 int deliver = 1; /* the mbuf to the network stack. */
1709 host_sync:
1710 if (head == ring->tail) {
1711 /* We ran out of slot, let's see if the host has
1712 * added some, by reading hwcur and hwtail from
1713 * the CSB. */
1714 ptnet_sync_tail(ktoa, kring);
1715
1716 if (head == ring->tail) {
1717 /* Still no slots available. Reactivate
1718 * interrupts as they were disabled by the
1719 * host thread right before issuing the
1720 * last interrupt. */
1721 atok->appl_need_kick = 1;
1722
1723 /* Double check for more completed RX slots.
1724 * We need a full barrier to prevent the store
1725 * to atok->appl_need_kick to be reordered with
1726 * the load from ktoa->hwcur and ktoa->hwtail
1727 * (store-load barrier). */
1728 nm_stld_barrier();
1729 ptnet_sync_tail(ktoa, kring);
1730 if (likely(head == ring->tail)) {
1731 break;
1732 }
1733 atok->appl_need_kick = 0;
1734 }
1735 }
1736
1737 /* Initialize ring state variables, possibly grabbing the
1738 * virtio-net header. */
1739 slot = ring->slot + head;
1740 nmbuf = NMB(na, slot);
1741 nmbuf_len = slot->len;
1742
1743 vh = (struct virtio_net_hdr *)nmbuf;
1744 if (have_vnet_hdr) {
1745 if (unlikely(nmbuf_len < PTNET_HDR_SIZE)) {
1746 /* There is no good reason why host should
1747 * put the header in multiple netmap slots.
1748 * If this is the case, discard. */
1749 nm_prlim(1, "Fragmented vnet-hdr: dropping");
1750 head = ptnet_rx_discard(kring, head);
1751 pq->stats.iqdrops ++;
1752 deliver = 0;
1753 goto skip;
1754 }
1755 nm_prdis(1, "%s: vnet hdr: flags %x csum_start %u "
1756 "csum_ofs %u hdr_len = %u gso_size %u "
1757 "gso_type %x", __func__, vh->flags,
1758 vh->csum_start, vh->csum_offset, vh->hdr_len,
1759 vh->gso_size, vh->gso_type);
1760 nmbuf += PTNET_HDR_SIZE;
1761 nmbuf_len -= PTNET_HDR_SIZE;
1762 }
1763
1764 /* Allocate the head of a new mbuf chain.
1765 * We use m_getcl() to allocate an mbuf with standard cluster
1766 * size (MCLBYTES). In the future we could use m_getjcl()
1767 * to choose different sizes. */
1768 mhead = mtail = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1769 if (unlikely(mhead == NULL)) {
1770 device_printf(sc->dev, "%s: failed to allocate mbuf "
1771 "head\n", __func__);
1772 pq->stats.errors ++;
1773 break;
1774 }
1775
1776 /* Initialize the mbuf state variables. */
1777 mhead->m_pkthdr.len = nmbuf_len;
1778 mtail->m_len = 0;
1779
1780 /* Scan all the netmap slots containing the current packet. */
1781 for (;;) {
1782 DBG(device_printf(sc->dev, "%s: h %u t %u rcv frag "
1783 "len %u, flags %u\n", __func__,
1784 head, ring->tail, slot->len,
1785 slot->flags));
1786
1787 mtail = ptnet_rx_slot(mtail, nmbuf, nmbuf_len);
1788 if (unlikely(!mtail)) {
1789 /* Ouch. We ran out of memory while processing
1790 * a packet. We have to restore the previous
1791 * head position, free the mbuf chain, and
1792 * schedule the taskqueue to give the packet
1793 * another chance. */
1794 device_printf(sc->dev, "%s: failed to allocate"
1795 " mbuf frag, reset head %u --> %u\n",
1796 __func__, head, prev_head);
1797 head = prev_head;
1798 m_freem(mhead);
1799 pq->stats.errors ++;
1800 if (may_resched) {
1801 taskqueue_enqueue(pq->taskq,
1802 &pq->task);
1803 }
1804 goto escape;
1805 }
1806
1807 /* We have to increment head irrespective of the
1808 * NS_MOREFRAG being set or not. */
1809 head = nm_next(head, lim);
1810
1811 if (!(slot->flags & NS_MOREFRAG)) {
1812 break;
1813 }
1814
1815 if (unlikely(head == ring->tail)) {
1816 /* The very last slot prepared by the host has
1817 * the NS_MOREFRAG set. Drop it and continue
1818 * the outer cycle (to do the double-check). */
1819 nm_prlim(1, "Incomplete packet: dropping");
1820 m_freem(mhead);
1821 pq->stats.iqdrops ++;
1822 goto host_sync;
1823 }
1824
1825 slot = ring->slot + head;
1826 nmbuf = NMB(na, slot);
1827 nmbuf_len = slot->len;
1828 mhead->m_pkthdr.len += nmbuf_len;
1829 }
1830
1831 mhead->m_pkthdr.rcvif = ifp;
1832 mhead->m_pkthdr.csum_flags = 0;
1833
1834 /* Store the queue idx in the packet header. */
1835 mhead->m_pkthdr.flowid = pq->kring_id;
1836 M_HASHTYPE_SET(mhead, M_HASHTYPE_OPAQUE);
1837
1838 if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) {
1839 struct ether_header *eh;
1840
1841 eh = mtod(mhead, struct ether_header *);
1842 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1843 ptnet_vlan_tag_remove(mhead);
1844 /*
1845 * With the 802.1Q header removed, update the
1846 * checksum starting location accordingly.
1847 */
1848 if (vh->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
1849 vh->csum_start -= ETHER_VLAN_ENCAP_LEN;
1850 }
1851 }
1852
1853 if (unlikely(have_vnet_hdr && virtio_net_rx_csum(mhead, vh))) {
1854 m_freem(mhead);
1855 nm_prlim(1, "Csum offload error: dropping");
1856 pq->stats.iqdrops ++;
1857 deliver = 0;
1858 }
1859
1860 skip:
1861 count ++;
1862 if (++batch_count >= PTNET_RX_BATCH) {
1863 /* Some packets have been (or will be) pushed to the network
1864 * stack. We need to update the CSB to tell the host about
1865 * the new ring->cur and ring->head (RX buffer refill). */
1866 ptnet_ring_update(pq, kring, head, NAF_FORCE_READ);
1867 batch_count = 0;
1868 }
1869
1870 if (likely(deliver)) {
1871 pq->stats.packets ++;
1872 pq->stats.bytes += mhead->m_pkthdr.len;
1873
1874 PTNET_Q_UNLOCK(pq);
1875 if_input(ifp, mhead);
1876 PTNET_Q_LOCK(pq);
1877 /* The ring->head index (and related indices) are
1878 * updated under pq lock by ptnet_ring_update().
1879 * Since we dropped the lock to call if_input(), we
1880 * must reload ring->head and restart processing the
1881 * ring from there. */
1882 head = ring->head;
1883
1884 if (unlikely(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))) {
1885 /* The interface has gone down while we didn't
1886 * have the lock. Stop any processing and exit. */
1887 goto unlock;
1888 }
1889 }
1890 }
1891 escape:
1892 if (batch_count) {
1893 ptnet_ring_update(pq, kring, head, NAF_FORCE_READ);
1894
1895 }
1896
1897 if (count >= budget && may_resched) {
1898 /* If we ran out of budget or the double-check found new
1899 * slots to process, schedule the taskqueue. */
1900 DBG(nm_prlim(1, "out of budget: resched h %u t %u\n",
1901 head, ring->tail));
1902 taskqueue_enqueue(pq->taskq, &pq->task);
1903 }
1904 unlock:
1905 PTNET_Q_UNLOCK(pq);
1906
1907 return count;
1908 }
1909
1910 static void
ptnet_rx_task(void * context,int pending)1911 ptnet_rx_task(void *context, int pending)
1912 {
1913 struct ptnet_queue *pq = context;
1914
1915 DBG(nm_prlim(1, "%s: pq #%u\n", __func__, pq->kring_id));
1916 ptnet_rx_eof(pq, PTNET_RX_BUDGET, true);
1917 }
1918
1919 static void
ptnet_tx_task(void * context,int pending)1920 ptnet_tx_task(void *context, int pending)
1921 {
1922 struct ptnet_queue *pq = context;
1923
1924 DBG(nm_prlim(1, "%s: pq #%u\n", __func__, pq->kring_id));
1925 ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true);
1926 }
1927
1928 #ifdef DEVICE_POLLING
1929 /* We don't need to handle differently POLL_AND_CHECK_STATUS and
1930 * POLL_ONLY, since we don't have an Interrupt Status Register. */
1931 static int
ptnet_poll(if_t ifp,enum poll_cmd cmd,int budget)1932 ptnet_poll(if_t ifp, enum poll_cmd cmd, int budget)
1933 {
1934 struct ptnet_softc *sc = if_getsoftc(ifp);
1935 unsigned int queue_budget;
1936 unsigned int count = 0;
1937 bool borrow = false;
1938 int i;
1939
1940 KASSERT(sc->num_rings > 0, ("Found no queues in while polling ptnet"));
1941 queue_budget = MAX(budget / sc->num_rings, 1);
1942 nm_prlim(1, "Per-queue budget is %d", queue_budget);
1943
1944 while (budget) {
1945 unsigned int rcnt = 0;
1946
1947 for (i = 0; i < sc->num_rings; i++) {
1948 struct ptnet_queue *pq = sc->queues + i;
1949
1950 if (borrow) {
1951 queue_budget = MIN(queue_budget, budget);
1952 if (queue_budget == 0) {
1953 break;
1954 }
1955 }
1956
1957 if (i < sc->num_tx_rings) {
1958 rcnt += ptnet_drain_transmit_queue(pq,
1959 queue_budget, false);
1960 } else {
1961 rcnt += ptnet_rx_eof(pq, queue_budget,
1962 false);
1963 }
1964 }
1965
1966 if (!rcnt) {
1967 /* A scan of the queues gave no result, we can
1968 * stop here. */
1969 break;
1970 }
1971
1972 if (rcnt > budget) {
1973 /* This may happen when initial budget < sc->num_rings,
1974 * since one packet budget is given to each queue
1975 * anyway. Just pretend we didn't eat "so much". */
1976 rcnt = budget;
1977 }
1978 count += rcnt;
1979 budget -= rcnt;
1980 borrow = true;
1981 }
1982
1983
1984 return count;
1985 }
1986 #endif /* DEVICE_POLLING */
1987 #endif /* WITH_PTNETMAP */
1988