xref: /freebsd/sys/dev/netmap/if_vtnet_netmap.h (revision bb714db6)
1 /*
2  * Copyright (C) 2014-2018 Vincenzo Maffione, Luigi Rizzo.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 /*
27  * $FreeBSD$
28  */
29 
30 #include <net/netmap.h>
31 #include <sys/selinfo.h>
32 #include <vm/vm.h>
33 #include <vm/pmap.h>    /* vtophys ? */
34 #include <dev/netmap/netmap_kern.h>
35 
36 /* Register and unregister. */
37 static int
38 vtnet_netmap_reg(struct netmap_adapter *na, int state)
39 {
40 	struct ifnet *ifp = na->ifp;
41 	struct vtnet_softc *sc = ifp->if_softc;
42 
43 	/*
44 	 * Trigger a device reinit, asking vtnet_init_locked() to
45 	 * also enter or exit netmap mode.
46 	 */
47 	VTNET_CORE_LOCK(sc);
48 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
49 	vtnet_init_locked(sc, state ? VTNET_INIT_NETMAP_ENTER
50 	    : VTNET_INIT_NETMAP_EXIT);
51 	VTNET_CORE_UNLOCK(sc);
52 
53 	return (0);
54 }
55 
56 
57 /* Reconcile kernel and user view of the transmit ring. */
58 static int
59 vtnet_netmap_txsync(struct netmap_kring *kring, int flags)
60 {
61 	struct netmap_adapter *na = kring->na;
62 	struct ifnet *ifp = na->ifp;
63 	struct netmap_ring *ring = kring->ring;
64 	u_int ring_nr = kring->ring_id;
65 	u_int nm_i;	/* index into the netmap ring */
66 	u_int const lim = kring->nkr_num_slots - 1;
67 	u_int const head = kring->rhead;
68 
69 	/* device-specific */
70 	struct vtnet_softc *sc = ifp->if_softc;
71 	struct vtnet_txq *txq = &sc->vtnet_txqs[ring_nr];
72 	struct virtqueue *vq = txq->vtntx_vq;
73 	int interrupts = !(kring->nr_kflags & NKR_NOINTR);
74 	u_int n;
75 
76 	/*
77 	 * First part: process new packets to send.
78 	 */
79 
80 	nm_i = kring->nr_hwcur;
81 	if (nm_i != head) {	/* we have new packets to send */
82 		struct sglist *sg = txq->vtntx_sg;
83 
84 		for (; nm_i != head; nm_i = nm_next(nm_i, lim)) {
85 			/* we use an empty header here */
86 			struct netmap_slot *slot = &ring->slot[nm_i];
87 			u_int len = slot->len;
88 			uint64_t paddr;
89 			void *addr = PNMB(na, slot, &paddr);
90 			int err;
91 
92 			NM_CHECK_ADDR_LEN(na, addr, len);
93 
94 			slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
95 			/* Initialize the scatterlist, expose it to the hypervisor,
96 			 * and kick the hypervisor (if necessary).
97 			 */
98 			sglist_reset(sg); // cheap
99 			err = sglist_append(sg, &txq->vtntx_shrhdr, sc->vtnet_hdr_size);
100 			err |= sglist_append_phys(sg, paddr, len);
101 			KASSERT(err == 0, ("%s: cannot append to sglist %d",
102 						__func__, err));
103 			err = virtqueue_enqueue(vq, /*cookie=*/txq, sg,
104 						/*readable=*/sg->sg_nseg,
105 						/*writeable=*/0);
106 			if (unlikely(err)) {
107 				if (err != ENOSPC)
108 					nm_prerr("virtqueue_enqueue(%s) failed: %d",
109 							kring->name, err);
110 				break;
111 			}
112 		}
113 
114 		virtqueue_notify(vq);
115 
116 		/* Update hwcur depending on where we stopped. */
117 		kring->nr_hwcur = nm_i; /* note we migth break early */
118 	}
119 
120 	/* Free used slots. We only consider our own used buffers, recognized
121 	 * by the token we passed to virtqueue_enqueue.
122 	 */
123 	n = 0;
124 	for (;;) {
125 		void *token = virtqueue_dequeue(vq, NULL);
126 		if (token == NULL)
127 			break;
128 		if (unlikely(token != (void *)txq))
129 			nm_prerr("BUG: TX token mismatch");
130 		else
131 			n++;
132 	}
133 	if (n > 0) {
134 		kring->nr_hwtail += n;
135 		if (kring->nr_hwtail > lim)
136 			kring->nr_hwtail -= lim + 1;
137 	}
138 
139 	if (interrupts && virtqueue_nfree(vq) < 32)
140 		virtqueue_postpone_intr(vq, VQ_POSTPONE_LONG);
141 
142 	return 0;
143 }
144 
145 /*
146  * Publish 'num 'netmap receive buffers to the host, starting
147  * from the next available one (rx->vtnrx_nm_refill).
148  * Return a positive error code on error, and 0 on success.
149  * If we could not publish all of the buffers that's an error,
150  * since the netmap ring and the virtqueue would go out of sync.
151  */
152 static int
153 vtnet_netmap_kring_refill(struct netmap_kring *kring, u_int num)
154 {
155 	struct netmap_adapter *na = kring->na;
156 	struct ifnet *ifp = na->ifp;
157 	struct netmap_ring *ring = kring->ring;
158 	u_int ring_nr = kring->ring_id;
159 	u_int const lim = kring->nkr_num_slots - 1;
160 	u_int nm_i;
161 
162 	/* device-specific */
163 	struct vtnet_softc *sc = ifp->if_softc;
164 	struct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr];
165 	struct virtqueue *vq = rxq->vtnrx_vq;
166 
167 	/* use a local sglist, default might be short */
168 	struct sglist_seg ss[2];
169 	struct sglist sg = { ss, 0, 0, 2 };
170 
171 	for (nm_i = rxq->vtnrx_nm_refill; num > 0;
172 	    nm_i = nm_next(nm_i, lim), num--) {
173 		struct netmap_slot *slot = &ring->slot[nm_i];
174 		uint64_t paddr;
175 		void *addr = PNMB(na, slot, &paddr);
176 		int err;
177 
178 		if (addr == NETMAP_BUF_BASE(na)) { /* bad buf */
179 			if (netmap_ring_reinit(kring))
180 				return EFAULT;
181 		}
182 
183 		slot->flags &= ~NS_BUF_CHANGED;
184 		sglist_reset(&sg);
185 		err = sglist_append(&sg, &rxq->vtnrx_shrhdr, sc->vtnet_hdr_size);
186 		err |= sglist_append_phys(&sg, paddr, NETMAP_BUF_SIZE(na));
187 		KASSERT(err == 0, ("%s: cannot append to sglist %d",
188 					__func__, err));
189 		/* writable for the host */
190 		err = virtqueue_enqueue(vq, /*cookie=*/rxq, &sg,
191 				/*readable=*/0, /*writeable=*/sg.sg_nseg);
192 		if (unlikely(err)) {
193 			nm_prerr("virtqueue_enqueue(%s) failed: %d",
194 				kring->name, err);
195 			break;
196 		}
197 	}
198 	rxq->vtnrx_nm_refill = nm_i;
199 
200 	return num == 0 ? 0 : ENOSPC;
201 }
202 
203 /*
204  * Publish netmap buffers on a RX virtqueue.
205  * Returns -1 if this virtqueue is not being opened in netmap mode.
206  * If the virtqueue is being opened in netmap mode, return 0 on success and
207  * a positive error code on failure.
208  */
209 static int
210 vtnet_netmap_rxq_populate(struct vtnet_rxq *rxq)
211 {
212 	struct netmap_adapter *na = NA(rxq->vtnrx_sc->vtnet_ifp);
213 	struct netmap_kring *kring;
214 	struct netmap_slot *slot;
215 	int error;
216 
217 	slot = netmap_reset(na, NR_RX, rxq->vtnrx_id, 0);
218 	if (slot == NULL)
219 		return -1;
220 	kring = na->rx_rings[rxq->vtnrx_id];
221 
222 	/* Expose all the RX netmap buffers we can. In case of no indirect
223 	 * buffers, the number of netmap slots in the RX ring matches the
224 	 * maximum number of 2-elements sglist that the RX virtqueue can
225 	 * accommodate. We need to start from kring->nr_hwcur, which is 0
226 	 * on netmap register and may be different from 0 if a virtio
227 	 * re-init happens while the device is in use by netmap. */
228 	rxq->vtnrx_nm_refill = kring->nr_hwcur;
229 	error = vtnet_netmap_kring_refill(kring, na->num_rx_desc - 1);
230 	virtqueue_notify(rxq->vtnrx_vq);
231 
232 	return error;
233 }
234 
235 /* Reconcile kernel and user view of the receive ring. */
236 static int
237 vtnet_netmap_rxsync(struct netmap_kring *kring, int flags)
238 {
239 	struct netmap_adapter *na = kring->na;
240 	struct ifnet *ifp = na->ifp;
241 	struct netmap_ring *ring = kring->ring;
242 	u_int ring_nr = kring->ring_id;
243 	u_int nm_i;	/* index into the netmap ring */
244 	u_int const lim = kring->nkr_num_slots - 1;
245 	u_int const head = kring->rhead;
246 	int force_update = (flags & NAF_FORCE_READ) ||
247 				(kring->nr_kflags & NKR_PENDINTR);
248 	int interrupts = !(kring->nr_kflags & NKR_NOINTR);
249 
250 	/* device-specific */
251 	struct vtnet_softc *sc = ifp->if_softc;
252 	struct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr];
253 	struct virtqueue *vq = rxq->vtnrx_vq;
254 
255 	/*
256 	 * First part: import newly received packets.
257 	 * Only accept our own buffers (matching the token). We should only get
258 	 * matching buffers. The hwtail should never overrun hwcur, because
259 	 * we publish only N-1 receive buffers (and non N).
260 	 * In any case we must not leave this routine with the interrupts
261 	 * disabled, pending packets in the VQ and hwtail == (hwcur - 1),
262 	 * otherwise the pending packets could stall.
263 	 */
264 	if (netmap_no_pendintr || force_update) {
265 		uint32_t hwtail_lim = nm_prev(kring->nr_hwcur, lim);
266 		void *token;
267 
268 		vtnet_rxq_disable_intr(rxq);
269 
270 		nm_i = kring->nr_hwtail;
271 		for (;;) {
272 			int len;
273 			token = virtqueue_dequeue(vq, &len);
274 			if (token == NULL) {
275 				/*
276 				 * Enable the interrupts again and double-check
277 				 * for more work. We can go on until we win the
278 				 * race condition, since we are not replenishing
279 				 * in the meanwhile, and thus we will process at
280 				 * most N-1 slots.
281 				 */
282 				if (interrupts && vtnet_rxq_enable_intr(rxq)) {
283 					vtnet_rxq_disable_intr(rxq);
284 					continue;
285 				}
286 				break;
287 			}
288 			if (unlikely(token != (void *)rxq)) {
289 				nm_prerr("BUG: RX token mismatch");
290 			} else {
291 				if (nm_i == hwtail_lim) {
292 					KASSERT(false, ("hwtail would "
293 					    "overrun hwcur"));
294 				}
295 
296 				/* Skip the virtio-net header. */
297 				len -= sc->vtnet_hdr_size;
298 				if (unlikely(len < 0)) {
299 					nm_prlim(1, "Truncated virtio-net-header, "
300 						"missing %d bytes", -len);
301 					len = 0;
302 				}
303 				ring->slot[nm_i].len = len;
304 				ring->slot[nm_i].flags = 0;
305 				nm_i = nm_next(nm_i, lim);
306 			}
307 		}
308 		kring->nr_hwtail = nm_i;
309 		kring->nr_kflags &= ~NKR_PENDINTR;
310 	}
311 
312 	/*
313 	 * Second part: skip past packets that userspace has released.
314 	 */
315 	nm_i = kring->nr_hwcur; /* netmap ring index */
316 	if (nm_i != head) {
317 		int released;
318 		int error;
319 
320 		released = head - nm_i;
321 		if (released < 0)
322 			released += kring->nkr_num_slots;
323 		error = vtnet_netmap_kring_refill(kring, released);
324 		if (error) {
325 			nm_prerr("Failed to replenish RX VQ with %u sgs",
326 			    released);
327 			return error;
328 		}
329 		kring->nr_hwcur = head;
330 		virtqueue_notify(vq);
331 	}
332 
333 	nm_prdis("h %d c %d t %d hwcur %d hwtail %d", kring->rhead,
334 	    kring->rcur, kring->rtail, kring->nr_hwcur, kring->nr_hwtail);
335 
336 	return 0;
337 }
338 
339 
340 /* Enable/disable interrupts on all virtqueues. */
341 static void
342 vtnet_netmap_intr(struct netmap_adapter *na, int state)
343 {
344 	struct vtnet_softc *sc = na->ifp->if_softc;
345 	int i;
346 
347 	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
348 		struct vtnet_rxq *rxq = &sc->vtnet_rxqs[i];
349 		struct vtnet_txq *txq = &sc->vtnet_txqs[i];
350 		struct virtqueue *txvq = txq->vtntx_vq;
351 
352 		if (state) {
353 			vtnet_rxq_enable_intr(rxq);
354 			virtqueue_enable_intr(txvq);
355 		} else {
356 			vtnet_rxq_disable_intr(rxq);
357 			virtqueue_disable_intr(txvq);
358 		}
359 	}
360 }
361 
362 static int
363 vtnet_netmap_tx_slots(struct vtnet_softc *sc)
364 {
365 	int div;
366 
367 	/* We need to prepend a virtio-net header to each netmap buffer to be
368 	 * transmitted, therefore calling virtqueue_enqueue() passing sglist
369 	 * with 2 elements.
370 	 * TX virtqueues use indirect descriptors if the feature was negotiated
371 	 * with the host, and if sc->vtnet_tx_nsegs > 1. With indirect
372 	 * descriptors, a single virtio descriptor is sufficient to reference
373 	 * each TX sglist. Without them, we need two separate virtio descriptors
374 	 * for each TX sglist. We therefore compute the number of netmap TX
375 	 * slots according to these assumptions.
376 	 */
377 	if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) && sc->vtnet_tx_nsegs > 1)
378 		div = 1;
379 	else
380 		div = 2;
381 
382 	return virtqueue_size(sc->vtnet_txqs[0].vtntx_vq) / div;
383 }
384 
385 static int
386 vtnet_netmap_rx_slots(struct vtnet_softc *sc)
387 {
388 	int div;
389 
390 	/* We need to prepend a virtio-net header to each netmap buffer to be
391 	 * received, therefore calling virtqueue_enqueue() passing sglist
392 	 * with 2 elements.
393 	 * RX virtqueues use indirect descriptors if the feature was negotiated
394 	 * with the host, and if sc->vtnet_rx_nsegs > 1. With indirect
395 	 * descriptors, a single virtio descriptor is sufficient to reference
396 	 * each RX sglist. Without them, we need two separate virtio descriptors
397 	 * for each RX sglist. We therefore compute the number of netmap RX
398 	 * slots according to these assumptions.
399 	 */
400 	if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) && sc->vtnet_rx_nsegs > 1)
401 		div = 1;
402 	else
403 		div = 2;
404 
405 	return virtqueue_size(sc->vtnet_rxqs[0].vtnrx_vq) / div;
406 }
407 
408 static int
409 vtnet_netmap_config(struct netmap_adapter *na, struct nm_config_info *info)
410 {
411 	struct vtnet_softc *sc = na->ifp->if_softc;
412 
413 	info->num_tx_rings = sc->vtnet_act_vq_pairs;
414 	info->num_rx_rings = sc->vtnet_act_vq_pairs;
415 	info->num_tx_descs = vtnet_netmap_tx_slots(sc);
416 	info->num_rx_descs = vtnet_netmap_rx_slots(sc);
417 	info->rx_buf_maxsize = NETMAP_BUF_SIZE(na);
418 
419 	return 0;
420 }
421 
422 static void
423 vtnet_netmap_attach(struct vtnet_softc *sc)
424 {
425 	struct netmap_adapter na;
426 
427 	bzero(&na, sizeof(na));
428 
429 	na.ifp = sc->vtnet_ifp;
430 	na.na_flags = 0;
431 	na.num_tx_desc = vtnet_netmap_tx_slots(sc);
432 	na.num_rx_desc = vtnet_netmap_rx_slots(sc);
433 	na.num_tx_rings = na.num_rx_rings = sc->vtnet_max_vq_pairs;
434 	na.rx_buf_maxsize = 0;
435 	na.nm_register = vtnet_netmap_reg;
436 	na.nm_txsync = vtnet_netmap_txsync;
437 	na.nm_rxsync = vtnet_netmap_rxsync;
438 	na.nm_intr = vtnet_netmap_intr;
439 	na.nm_config = vtnet_netmap_config;
440 
441 	netmap_attach(&na);
442 
443 	nm_prinf("vtnet attached txq=%d, txd=%d rxq=%d, rxd=%d",
444 			na.num_tx_rings, na.num_tx_desc,
445 			na.num_tx_rings, na.num_rx_desc);
446 }
447 /* end of file */
448