xref: /freebsd/sys/dev/netmap/if_vtnet_netmap.h (revision 2e42b74a)
1 /*
2  * Copyright (C) 2014-2018 Vincenzo Maffione, Luigi Rizzo.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 /*
27  * $FreeBSD$
28  */
29 
30 #include <net/netmap.h>
31 #include <sys/selinfo.h>
32 #include <vm/vm.h>
33 #include <vm/pmap.h>    /* vtophys ? */
34 #include <dev/netmap/netmap_kern.h>
35 
36 /*
37  * Return 1 if the queue identified by 't' and 'idx' is in netmap mode.
38  */
39 static int
40 vtnet_netmap_queue_on(struct vtnet_softc *sc, enum txrx t, int idx)
41 {
42 	struct netmap_adapter *na = NA(sc->vtnet_ifp);
43 
44 	if (!nm_native_on(na))
45 		return 0;
46 
47 	if (t == NR_RX)
48 		return !!(idx < na->num_rx_rings &&
49 			na->rx_rings[idx]->nr_mode == NKR_NETMAP_ON);
50 
51 	return !!(idx < na->num_tx_rings &&
52 		na->tx_rings[idx]->nr_mode == NKR_NETMAP_ON);
53 }
54 
55 static void
56 vtnet_free_used(struct virtqueue *vq, int netmap_bufs, enum txrx t, int idx)
57 {
58 	void *cookie;
59 	int deq = 0;
60 
61 	while ((cookie = virtqueue_dequeue(vq, NULL)) != NULL) {
62 		if (netmap_bufs) {
63 			/* These are netmap buffers: there is nothing to do. */
64 		} else {
65 			/* These are mbufs that we need to free. */
66 			struct mbuf *m;
67 
68 			if (t == NR_TX) {
69 				struct vtnet_tx_header *txhdr = cookie;
70 				m = txhdr->vth_mbuf;
71 				m_freem(m);
72 				uma_zfree(vtnet_tx_header_zone, txhdr);
73 			} else {
74 				m = cookie;
75 				m_freem(m);
76 			}
77 		}
78 		deq++;
79 	}
80 
81 	if (deq)
82 		nm_prinf("%d sgs dequeued from %s-%d (netmap=%d)\n",
83 			 deq, nm_txrx2str(t), idx, netmap_bufs);
84 }
85 
86 /* Register and unregister. */
87 static int
88 vtnet_netmap_reg(struct netmap_adapter *na, int state)
89 {
90 	struct ifnet *ifp = na->ifp;
91 	struct vtnet_softc *sc = ifp->if_softc;
92 	int success;
93 	enum txrx t;
94 	int i;
95 
96 	/* Drain the taskqueues to make sure that there are no worker threads
97 	 * accessing the virtqueues. */
98 	vtnet_drain_taskqueues(sc);
99 
100 	VTNET_CORE_LOCK(sc);
101 
102 	/* We need nm_netmap_on() to return true when called by
103 	 * vtnet_init_locked() below. */
104 	if (state)
105 		nm_set_native_flags(na);
106 
107 	/* We need to trigger a device reset in order to unexpose guest buffers
108 	 * published to the host. */
109 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
110 	/* Get pending used buffers. The way they are freed depends on whether
111 	 * they are netmap buffer or they are mbufs. We can tell apart the two
112 	 * cases by looking at kring->nr_mode, before this is possibly updated
113 	 * in the loop below. */
114 	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
115 		struct vtnet_txq *txq = &sc->vtnet_txqs[i];
116 		struct vtnet_rxq *rxq = &sc->vtnet_rxqs[i];
117 		struct netmap_kring *kring;
118 
119 		VTNET_TXQ_LOCK(txq);
120 		kring = NMR(na, NR_TX)[i];
121 		vtnet_free_used(txq->vtntx_vq,
122 				kring->nr_mode == NKR_NETMAP_ON, NR_TX, i);
123 		VTNET_TXQ_UNLOCK(txq);
124 
125 		VTNET_RXQ_LOCK(rxq);
126 		kring = NMR(na, NR_RX)[i];
127 		vtnet_free_used(rxq->vtnrx_vq,
128 				kring->nr_mode == NKR_NETMAP_ON, NR_RX, i);
129 		VTNET_RXQ_UNLOCK(rxq);
130 	}
131 	vtnet_init_locked(sc);
132 	success = (ifp->if_drv_flags & IFF_DRV_RUNNING) ? 0 : ENXIO;
133 
134 	if (state) {
135 		for_rx_tx(t) {
136 			/* Hardware rings. */
137 			for (i = 0; i < nma_get_nrings(na, t); i++) {
138 				struct netmap_kring *kring = NMR(na, t)[i];
139 
140 				if (nm_kring_pending_on(kring))
141 					kring->nr_mode = NKR_NETMAP_ON;
142 			}
143 
144 			/* Host rings. */
145 			for (i = 0; i < nma_get_host_nrings(na, t); i++) {
146 				struct netmap_kring *kring =
147 					NMR(na, t)[nma_get_nrings(na, t) + i];
148 
149 				if (nm_kring_pending_on(kring))
150 					kring->nr_mode = NKR_NETMAP_ON;
151 			}
152 		}
153 	} else {
154 		nm_clear_native_flags(na);
155 		for_rx_tx(t) {
156 			/* Hardware rings. */
157 			for (i = 0; i < nma_get_nrings(na, t); i++) {
158 				struct netmap_kring *kring = NMR(na, t)[i];
159 
160 				if (nm_kring_pending_off(kring))
161 					kring->nr_mode = NKR_NETMAP_OFF;
162 			}
163 
164 			/* Host rings. */
165 			for (i = 0; i < nma_get_host_nrings(na, t); i++) {
166 				struct netmap_kring *kring =
167 					NMR(na, t)[nma_get_nrings(na, t) + i];
168 
169 				if (nm_kring_pending_off(kring))
170 					kring->nr_mode = NKR_NETMAP_OFF;
171 			}
172 		}
173 	}
174 
175 	VTNET_CORE_UNLOCK(sc);
176 
177 	return success;
178 }
179 
180 
181 /* Reconcile kernel and user view of the transmit ring. */
182 static int
183 vtnet_netmap_txsync(struct netmap_kring *kring, int flags)
184 {
185 	struct netmap_adapter *na = kring->na;
186 	struct ifnet *ifp = na->ifp;
187 	struct netmap_ring *ring = kring->ring;
188 	u_int ring_nr = kring->ring_id;
189 	u_int nm_i;	/* index into the netmap ring */
190 	u_int const lim = kring->nkr_num_slots - 1;
191 	u_int const head = kring->rhead;
192 
193 	/* device-specific */
194 	struct vtnet_softc *sc = ifp->if_softc;
195 	struct vtnet_txq *txq = &sc->vtnet_txqs[ring_nr];
196 	struct virtqueue *vq = txq->vtntx_vq;
197 	int interrupts = !(kring->nr_kflags & NKR_NOINTR);
198 	u_int n;
199 
200 	/*
201 	 * First part: process new packets to send.
202 	 */
203 	rmb();
204 
205 	nm_i = kring->nr_hwcur;
206 	if (nm_i != head) {	/* we have new packets to send */
207 		struct sglist *sg = txq->vtntx_sg;
208 
209 		for (; nm_i != head; nm_i = nm_next(nm_i, lim)) {
210 			/* we use an empty header here */
211 			struct netmap_slot *slot = &ring->slot[nm_i];
212 			u_int len = slot->len;
213 			uint64_t paddr;
214 			void *addr = PNMB(na, slot, &paddr);
215 			int err;
216 
217 			NM_CHECK_ADDR_LEN(na, addr, len);
218 
219 			slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
220 			/* Initialize the scatterlist, expose it to the hypervisor,
221 			 * and kick the hypervisor (if necessary).
222 			 */
223 			sglist_reset(sg); // cheap
224 			err = sglist_append(sg, &txq->vtntx_shrhdr, sc->vtnet_hdr_size);
225 			err |= sglist_append_phys(sg, paddr, len);
226 			KASSERT(err == 0, ("%s: cannot append to sglist %d",
227 						__func__, err));
228 			err = virtqueue_enqueue(vq, /*cookie=*/txq, sg,
229 						/*readable=*/sg->sg_nseg,
230 						/*writeable=*/0);
231 			if (unlikely(err)) {
232 				if (err != ENOSPC)
233 					nm_prerr("virtqueue_enqueue(%s) failed: %d\n",
234 							kring->name, err);
235 				break;
236 			}
237 		}
238 
239 		virtqueue_notify(vq);
240 
241 		/* Update hwcur depending on where we stopped. */
242 		kring->nr_hwcur = nm_i; /* note we migth break early */
243 	}
244 
245 	/* Free used slots. We only consider our own used buffers, recognized
246 	 * by the token we passed to virtqueue_enqueue.
247 	 */
248 	n = 0;
249 	for (;;) {
250 		void *token = virtqueue_dequeue(vq, NULL);
251 		if (token == NULL)
252 			break;
253 		if (unlikely(token != (void *)txq))
254 			nm_prerr("BUG: TX token mismatch\n");
255 		else
256 			n++;
257 	}
258 	if (n > 0) {
259 		kring->nr_hwtail += n;
260 		if (kring->nr_hwtail > lim)
261 			kring->nr_hwtail -= lim + 1;
262 	}
263 
264 	if (interrupts && virtqueue_nfree(vq) < 32)
265 		virtqueue_postpone_intr(vq, VQ_POSTPONE_LONG);
266 
267 	return 0;
268 }
269 
270 static int
271 vtnet_netmap_kring_refill(struct netmap_kring *kring, u_int nm_i, u_int head)
272 {
273 	struct netmap_adapter *na = kring->na;
274 	struct ifnet *ifp = na->ifp;
275 	struct netmap_ring *ring = kring->ring;
276 	u_int ring_nr = kring->ring_id;
277 	u_int const lim = kring->nkr_num_slots - 1;
278 
279 	/* device-specific */
280 	struct vtnet_softc *sc = ifp->if_softc;
281 	struct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr];
282 	struct virtqueue *vq = rxq->vtnrx_vq;
283 
284 	/* use a local sglist, default might be short */
285 	struct sglist_seg ss[2];
286 	struct sglist sg = { ss, 0, 0, 2 };
287 
288 	for (; nm_i != head; nm_i = nm_next(nm_i, lim)) {
289 		struct netmap_slot *slot = &ring->slot[nm_i];
290 		uint64_t paddr;
291 		void *addr = PNMB(na, slot, &paddr);
292 		int err;
293 
294 		if (addr == NETMAP_BUF_BASE(na)) { /* bad buf */
295 			if (netmap_ring_reinit(kring))
296 				return -1;
297 		}
298 
299 		slot->flags &= ~NS_BUF_CHANGED;
300 		sglist_reset(&sg);
301 		err = sglist_append(&sg, &rxq->vtnrx_shrhdr, sc->vtnet_hdr_size);
302 		err |= sglist_append_phys(&sg, paddr, NETMAP_BUF_SIZE(na));
303 		KASSERT(err == 0, ("%s: cannot append to sglist %d",
304 					__func__, err));
305 		/* writable for the host */
306 		err = virtqueue_enqueue(vq, /*cookie=*/rxq, &sg,
307 				/*readable=*/0, /*writeable=*/sg.sg_nseg);
308 		if (unlikely(err)) {
309 			if (err != ENOSPC)
310 				nm_prerr("virtqueue_enqueue(%s) failed: %d\n",
311 					kring->name, err);
312 			break;
313 		}
314 	}
315 
316 	return nm_i;
317 }
318 
319 /*
320  * Publish netmap buffers on a RX virtqueue.
321  * Returns -1 if this virtqueue is not being opened in netmap mode.
322  * If the virtqueue is being opened in netmap mode, return 0 on success and
323  * a positive error code on failure.
324  */
325 static int
326 vtnet_netmap_rxq_populate(struct vtnet_rxq *rxq)
327 {
328 	struct netmap_adapter *na = NA(rxq->vtnrx_sc->vtnet_ifp);
329 	struct netmap_kring *kring;
330 	int error;
331 
332 	if (!nm_native_on(na) || rxq->vtnrx_id >= na->num_rx_rings)
333 		return -1;
334 
335 	kring = na->rx_rings[rxq->vtnrx_id];
336 	if (!(nm_kring_pending_on(kring) ||
337 			kring->nr_pending_mode == NKR_NETMAP_ON))
338 		return -1;
339 
340 	/* Expose all the RX netmap buffers. Note that the number of
341 	 * netmap slots in the RX ring matches the maximum number of
342 	 * 2-elements sglist that the RX virtqueue can accommodate. */
343 	error = vtnet_netmap_kring_refill(kring, 0, na->num_rx_desc);
344 	virtqueue_notify(rxq->vtnrx_vq);
345 
346 	return error < 0 ? ENXIO : 0;
347 }
348 
349 /* Reconcile kernel and user view of the receive ring. */
350 static int
351 vtnet_netmap_rxsync(struct netmap_kring *kring, int flags)
352 {
353 	struct netmap_adapter *na = kring->na;
354 	struct ifnet *ifp = na->ifp;
355 	struct netmap_ring *ring = kring->ring;
356 	u_int ring_nr = kring->ring_id;
357 	u_int nm_i;	/* index into the netmap ring */
358 	u_int const lim = kring->nkr_num_slots - 1;
359 	u_int const head = kring->rhead;
360 	int force_update = (flags & NAF_FORCE_READ) ||
361 				(kring->nr_kflags & NKR_PENDINTR);
362 	int interrupts = !(kring->nr_kflags & NKR_NOINTR);
363 
364 	/* device-specific */
365 	struct vtnet_softc *sc = ifp->if_softc;
366 	struct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr];
367 	struct virtqueue *vq = rxq->vtnrx_vq;
368 
369 	rmb();
370 	/*
371 	 * First part: import newly received packets.
372 	 * Only accept our own buffers (matching the token). We should only get
373 	 * matching buffers. We may need to stop early to avoid hwtail to overrun
374 	 * hwcur.
375 	 */
376 	if (netmap_no_pendintr || force_update) {
377 		uint32_t hwtail_lim = nm_prev(kring->nr_hwcur, lim);
378 		void *token;
379 
380 		vtnet_rxq_disable_intr(rxq);
381 
382 		nm_i = kring->nr_hwtail;
383 		while (nm_i != hwtail_lim) {
384 			int len;
385 			token = virtqueue_dequeue(vq, &len);
386 			if (token == NULL) {
387 				if (interrupts && vtnet_rxq_enable_intr(rxq)) {
388 					vtnet_rxq_disable_intr(rxq);
389 					continue;
390 				}
391 				break;
392 			}
393 			if (unlikely(token != (void *)rxq)) {
394 				nm_prerr("BUG: RX token mismatch\n");
395 			} else {
396 				/* Skip the virtio-net header. */
397 				len -= sc->vtnet_hdr_size;
398 				if (unlikely(len < 0)) {
399 					RD(1, "Truncated virtio-net-header, "
400 						"missing %d bytes", -len);
401 					len = 0;
402 				}
403 				ring->slot[nm_i].len = len;
404 				ring->slot[nm_i].flags = 0;
405 				nm_i = nm_next(nm_i, lim);
406 			}
407 		}
408 		kring->nr_hwtail = nm_i;
409 		kring->nr_kflags &= ~NKR_PENDINTR;
410 	}
411 	ND("[B] h %d c %d hwcur %d hwtail %d", ring->head, ring->cur,
412 				kring->nr_hwcur, kring->nr_hwtail);
413 
414 	/*
415 	 * Second part: skip past packets that userspace has released.
416 	 */
417 	nm_i = kring->nr_hwcur; /* netmap ring index */
418 	if (nm_i != head) {
419 		int nm_j = vtnet_netmap_kring_refill(kring, nm_i, head);
420 		if (nm_j < 0)
421 			return nm_j;
422 		kring->nr_hwcur = nm_j;
423 		virtqueue_notify(vq);
424 	}
425 
426 	ND("[C] h %d c %d t %d hwcur %d hwtail %d", ring->head, ring->cur,
427 		ring->tail, kring->nr_hwcur, kring->nr_hwtail);
428 
429 	return 0;
430 }
431 
432 
433 /* Enable/disable interrupts on all virtqueues. */
434 static void
435 vtnet_netmap_intr(struct netmap_adapter *na, int state)
436 {
437 	struct vtnet_softc *sc = na->ifp->if_softc;
438 	int i;
439 
440 	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
441 		struct vtnet_rxq *rxq = &sc->vtnet_rxqs[i];
442 		struct vtnet_txq *txq = &sc->vtnet_txqs[i];
443 		struct virtqueue *txvq = txq->vtntx_vq;
444 
445 		if (state) {
446 			vtnet_rxq_enable_intr(rxq);
447 			virtqueue_enable_intr(txvq);
448 		} else {
449 			vtnet_rxq_disable_intr(rxq);
450 			virtqueue_disable_intr(txvq);
451 		}
452 	}
453 }
454 
455 static int
456 vtnet_netmap_tx_slots(struct vtnet_softc *sc)
457 {
458 	int div;
459 
460 	/* We need to prepend a virtio-net header to each netmap buffer to be
461 	 * transmitted, therefore calling virtqueue_enqueue() passing sglist
462 	 * with 2 elements.
463 	 * TX virtqueues use indirect descriptors if the feature was negotiated
464 	 * with the host, and if sc->vtnet_tx_nsegs > 1. With indirect
465 	 * descriptors, a single virtio descriptor is sufficient to reference
466 	 * each TX sglist. Without them, we need two separate virtio descriptors
467 	 * for each TX sglist. We therefore compute the number of netmap TX
468 	 * slots according to these assumptions.
469 	 */
470 	if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) && sc->vtnet_tx_nsegs > 1)
471 		div = 1;
472 	else
473 		div = 2;
474 
475 	return virtqueue_size(sc->vtnet_txqs[0].vtntx_vq) / div;
476 }
477 
478 static int
479 vtnet_netmap_rx_slots(struct vtnet_softc *sc)
480 {
481 	int div;
482 
483 	/* We need to prepend a virtio-net header to each netmap buffer to be
484 	 * received, therefore calling virtqueue_enqueue() passing sglist
485 	 * with 2 elements.
486 	 * RX virtqueues use indirect descriptors if the feature was negotiated
487 	 * with the host, and if sc->vtnet_rx_nsegs > 1. With indirect
488 	 * descriptors, a single virtio descriptor is sufficient to reference
489 	 * each RX sglist. Without them, we need two separate virtio descriptors
490 	 * for each RX sglist. We therefore compute the number of netmap RX
491 	 * slots according to these assumptions.
492 	 */
493 	if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) && sc->vtnet_rx_nsegs > 1)
494 		div = 1;
495 	else
496 		div = 2;
497 
498 	return virtqueue_size(sc->vtnet_rxqs[0].vtnrx_vq) / div;
499 }
500 
501 static int
502 vtnet_netmap_config(struct netmap_adapter *na, struct nm_config_info *info)
503 {
504 	struct vtnet_softc *sc = na->ifp->if_softc;
505 
506 	info->num_tx_rings = sc->vtnet_act_vq_pairs;
507 	info->num_rx_rings = sc->vtnet_act_vq_pairs;
508 	info->num_tx_descs = vtnet_netmap_tx_slots(sc);
509 	info->num_rx_descs = vtnet_netmap_rx_slots(sc);
510 	info->rx_buf_maxsize = NETMAP_BUF_SIZE(na);
511 
512 	return 0;
513 }
514 
515 static void
516 vtnet_netmap_attach(struct vtnet_softc *sc)
517 {
518 	struct netmap_adapter na;
519 
520 	bzero(&na, sizeof(na));
521 
522 	na.ifp = sc->vtnet_ifp;
523 	na.na_flags = 0;
524 	na.num_tx_desc = vtnet_netmap_tx_slots(sc);
525 	na.num_rx_desc = vtnet_netmap_rx_slots(sc);
526 	na.num_tx_rings = na.num_rx_rings = sc->vtnet_max_vq_pairs;
527 	na.rx_buf_maxsize = 0;
528 	na.nm_register = vtnet_netmap_reg;
529 	na.nm_txsync = vtnet_netmap_txsync;
530 	na.nm_rxsync = vtnet_netmap_rxsync;
531 	na.nm_intr = vtnet_netmap_intr;
532 	na.nm_config = vtnet_netmap_config;
533 
534 	netmap_attach(&na);
535 
536 	nm_prinf("vtnet attached txq=%d, txd=%d rxq=%d, rxd=%d\n",
537 			na.num_tx_rings, na.num_tx_desc,
538 			na.num_tx_rings, na.num_rx_desc);
539 }
540 /* end of file */
541