xref: /freebsd/sys/dev/netmap/if_vtnet_netmap.h (revision 2ff63af9)
1 /*
2  * Copyright (C) 2014-2018 Vincenzo Maffione, Luigi Rizzo.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 /*
27  */
28 
29 #include <net/netmap.h>
30 #include <sys/selinfo.h>
31 #include <vm/vm.h>
32 #include <vm/pmap.h>    /* vtophys ? */
33 #include <dev/netmap/netmap_kern.h>
34 
35 /* Register and unregister. */
36 static int
vtnet_netmap_reg(struct netmap_adapter * na,int state)37 vtnet_netmap_reg(struct netmap_adapter *na, int state)
38 {
39 	if_t ifp = na->ifp;
40 	struct vtnet_softc *sc = if_getsoftc(ifp);
41 
42 	/*
43 	 * Trigger a device reinit, asking vtnet_init_locked() to
44 	 * also enter or exit netmap mode.
45 	 */
46 	VTNET_CORE_LOCK(sc);
47 	if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
48 	vtnet_init_locked(sc, state ? VTNET_INIT_NETMAP_ENTER
49 	    : VTNET_INIT_NETMAP_EXIT);
50 	VTNET_CORE_UNLOCK(sc);
51 
52 	return (0);
53 }
54 
55 
56 /* Reconcile kernel and user view of the transmit ring. */
57 static int
vtnet_netmap_txsync(struct netmap_kring * kring,int flags)58 vtnet_netmap_txsync(struct netmap_kring *kring, int flags)
59 {
60 	struct netmap_adapter *na = kring->na;
61 	if_t ifp = na->ifp;
62 	struct netmap_ring *ring = kring->ring;
63 	u_int ring_nr = kring->ring_id;
64 	u_int nm_i;	/* index into the netmap ring */
65 	u_int const lim = kring->nkr_num_slots - 1;
66 	u_int const head = kring->rhead;
67 
68 	/* device-specific */
69 	struct vtnet_softc *sc = if_getsoftc(ifp);
70 	struct vtnet_txq *txq = &sc->vtnet_txqs[ring_nr];
71 	struct virtqueue *vq = txq->vtntx_vq;
72 	int interrupts = !(kring->nr_kflags & NKR_NOINTR);
73 	u_int n;
74 
75 	/*
76 	 * First part: process new packets to send.
77 	 */
78 
79 	nm_i = kring->nr_hwcur;
80 	if (nm_i != head) {	/* we have new packets to send */
81 		struct sglist *sg = txq->vtntx_sg;
82 
83 		for (; nm_i != head; nm_i = nm_next(nm_i, lim)) {
84 			/* we use an empty header here */
85 			struct netmap_slot *slot = &ring->slot[nm_i];
86 			uint64_t offset = nm_get_offset(kring, slot);
87 			u_int len = slot->len;
88 			uint64_t paddr;
89 			int err;
90 
91 			(void)PNMB(na, slot, &paddr);
92 			NM_CHECK_ADDR_LEN_OFF(na, len, offset);
93 
94 			slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
95 			/* Initialize the scatterlist, expose it to the hypervisor,
96 			 * and kick the hypervisor (if necessary).
97 			 */
98 			sglist_reset(sg); // cheap
99 			err = sglist_append(sg, &txq->vtntx_shrhdr, sc->vtnet_hdr_size);
100 			err |= sglist_append_phys(sg, paddr + offset, len);
101 			KASSERT(err == 0, ("%s: cannot append to sglist %d",
102 						__func__, err));
103 			err = virtqueue_enqueue(vq, /*cookie=*/txq, sg,
104 						/*readable=*/sg->sg_nseg,
105 						/*writeable=*/0);
106 			if (unlikely(err)) {
107 				if (err != ENOSPC)
108 					nm_prerr("virtqueue_enqueue(%s) failed: %d",
109 							kring->name, err);
110 				break;
111 			}
112 		}
113 
114 		virtqueue_notify(vq);
115 
116 		/* Update hwcur depending on where we stopped. */
117 		kring->nr_hwcur = nm_i; /* note we might break early */
118 	}
119 
120 	/* Free used slots. We only consider our own used buffers, recognized
121 	 * by the token we passed to virtqueue_enqueue.
122 	 */
123 	n = 0;
124 	for (;;) {
125 		void *token = virtqueue_dequeue(vq, NULL);
126 		if (token == NULL)
127 			break;
128 		if (unlikely(token != (void *)txq))
129 			nm_prerr("BUG: TX token mismatch");
130 		else
131 			n++;
132 	}
133 	if (n > 0) {
134 		kring->nr_hwtail += n;
135 		if (kring->nr_hwtail > lim)
136 			kring->nr_hwtail -= lim + 1;
137 	}
138 
139 	if (interrupts && virtqueue_nfree(vq) < 32)
140 		virtqueue_postpone_intr(vq, VQ_POSTPONE_LONG);
141 
142 	return 0;
143 }
144 
145 /*
146  * Publish 'num 'netmap receive buffers to the host, starting
147  * from the next available one (rx->vtnrx_nm_refill).
148  * Return a positive error code on error, and 0 on success.
149  * If we could not publish all of the buffers that's an error,
150  * since the netmap ring and the virtqueue would go out of sync.
151  */
152 static int
vtnet_netmap_kring_refill(struct netmap_kring * kring,u_int num)153 vtnet_netmap_kring_refill(struct netmap_kring *kring, u_int num)
154 {
155 	struct netmap_adapter *na = kring->na;
156 	if_t ifp = na->ifp;
157 	struct netmap_ring *ring = kring->ring;
158 	u_int ring_nr = kring->ring_id;
159 	u_int const lim = kring->nkr_num_slots - 1;
160 	u_int nm_i;
161 
162 	/* device-specific */
163 	struct vtnet_softc *sc = if_getsoftc(ifp);
164 	struct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr];
165 	struct virtqueue *vq = rxq->vtnrx_vq;
166 
167 	/* use a local sglist, default might be short */
168 	struct sglist_seg ss[2];
169 	struct sglist sg = { ss, 0, 0, 2 };
170 
171 	for (nm_i = rxq->vtnrx_nm_refill; num > 0;
172 	    nm_i = nm_next(nm_i, lim), num--) {
173 		struct netmap_slot *slot = &ring->slot[nm_i];
174 		uint64_t offset = nm_get_offset(kring, slot);
175 		uint64_t paddr;
176 		void *addr = PNMB(na, slot, &paddr);
177 		int err;
178 
179 		if (addr == NETMAP_BUF_BASE(na)) { /* bad buf */
180 			netmap_ring_reinit(kring);
181 			return EFAULT;
182 		}
183 
184 		slot->flags &= ~NS_BUF_CHANGED;
185 		sglist_reset(&sg);
186 		err = sglist_append(&sg, &rxq->vtnrx_shrhdr, sc->vtnet_hdr_size);
187 		err |= sglist_append_phys(&sg, paddr + offset,
188 		    NETMAP_BUF_SIZE(na) - offset);
189 		KASSERT(err == 0, ("%s: cannot append to sglist %d",
190 					__func__, err));
191 		/* writable for the host */
192 		err = virtqueue_enqueue(vq, /*cookie=*/rxq, &sg,
193 				/*readable=*/0, /*writeable=*/sg.sg_nseg);
194 		if (unlikely(err)) {
195 			nm_prerr("virtqueue_enqueue(%s) failed: %d",
196 				kring->name, err);
197 			break;
198 		}
199 	}
200 	rxq->vtnrx_nm_refill = nm_i;
201 
202 	return num == 0 ? 0 : ENOSPC;
203 }
204 
205 /*
206  * Publish netmap buffers on a RX virtqueue.
207  * Returns -1 if this virtqueue is not being opened in netmap mode.
208  * If the virtqueue is being opened in netmap mode, return 0 on success and
209  * a positive error code on failure.
210  */
211 static int
vtnet_netmap_rxq_populate(struct vtnet_rxq * rxq)212 vtnet_netmap_rxq_populate(struct vtnet_rxq *rxq)
213 {
214 	struct netmap_adapter *na = NA(rxq->vtnrx_sc->vtnet_ifp);
215 	struct netmap_kring *kring;
216 	struct netmap_slot *slot;
217 	int error;
218 	int num;
219 
220 	slot = netmap_reset(na, NR_RX, rxq->vtnrx_id, 0);
221 	if (slot == NULL)
222 		return -1;
223 	kring = na->rx_rings[rxq->vtnrx_id];
224 
225 	/*
226 	 * Expose all the RX netmap buffers we can. In case of no indirect
227 	 * buffers, the number of netmap slots in the RX ring matches the
228 	 * maximum number of 2-elements sglist that the RX virtqueue can
229 	 * accommodate. We need to start from kring->nr_hwtail, which is 0
230 	 * on the first netmap register and may be different from 0 if a
231 	 * virtio re-init (caused by a netma register or i.e., ifconfig)
232 	 * happens while the device is in use by netmap.
233 	 */
234 	rxq->vtnrx_nm_refill = kring->nr_hwtail;
235 	num = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
236 	error = vtnet_netmap_kring_refill(kring, num);
237 	virtqueue_notify(rxq->vtnrx_vq);
238 
239 	return error;
240 }
241 
242 /* Reconcile kernel and user view of the receive ring. */
243 static int
vtnet_netmap_rxsync(struct netmap_kring * kring,int flags)244 vtnet_netmap_rxsync(struct netmap_kring *kring, int flags)
245 {
246 	struct netmap_adapter *na = kring->na;
247 	if_t ifp = na->ifp;
248 	struct netmap_ring *ring = kring->ring;
249 	u_int ring_nr = kring->ring_id;
250 	u_int nm_i;	/* index into the netmap ring */
251 	u_int const lim = kring->nkr_num_slots - 1;
252 	u_int const head = kring->rhead;
253 	int force_update = (flags & NAF_FORCE_READ) ||
254 				(kring->nr_kflags & NKR_PENDINTR);
255 	int interrupts = !(kring->nr_kflags & NKR_NOINTR);
256 
257 	/* device-specific */
258 	struct vtnet_softc *sc = if_getsoftc(ifp);
259 	struct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr];
260 	struct virtqueue *vq = rxq->vtnrx_vq;
261 
262 	/*
263 	 * First part: import newly received packets.
264 	 * Only accept our own buffers (matching the token). We should only get
265 	 * matching buffers. The hwtail should never overrun hwcur, because
266 	 * we publish only N-1 receive buffers (and not N).
267 	 * In any case we must not leave this routine with the interrupts
268 	 * disabled, pending packets in the VQ and hwtail == (hwcur - 1),
269 	 * otherwise the pending packets could stall.
270 	 */
271 	if (netmap_no_pendintr || force_update) {
272 		uint32_t hwtail_lim = nm_prev(kring->nr_hwcur, lim);
273 		void *token;
274 
275 		vtnet_rxq_disable_intr(rxq);
276 
277 		nm_i = kring->nr_hwtail;
278 		for (;;) {
279 			int len;
280 			token = virtqueue_dequeue(vq, &len);
281 			if (token == NULL) {
282 				/*
283 				 * Enable the interrupts again and double-check
284 				 * for more work. We can go on until we win the
285 				 * race condition, since we are not replenishing
286 				 * in the meanwhile, and thus we will process at
287 				 * most N-1 slots.
288 				 */
289 				if (interrupts && vtnet_rxq_enable_intr(rxq)) {
290 					vtnet_rxq_disable_intr(rxq);
291 					continue;
292 				}
293 				break;
294 			}
295 			if (unlikely(token != (void *)rxq)) {
296 				nm_prerr("BUG: RX token mismatch");
297 			} else {
298 				if (nm_i == hwtail_lim) {
299 					KASSERT(false, ("hwtail would "
300 					    "overrun hwcur"));
301 				}
302 
303 				/* Skip the virtio-net header. */
304 				len -= sc->vtnet_hdr_size;
305 				if (unlikely(len < 0)) {
306 					nm_prlim(1, "Truncated virtio-net-header, "
307 						"missing %d bytes", -len);
308 					len = 0;
309 				}
310 				ring->slot[nm_i].len = len;
311 				ring->slot[nm_i].flags = 0;
312 				nm_i = nm_next(nm_i, lim);
313 			}
314 		}
315 		kring->nr_hwtail = nm_i;
316 		kring->nr_kflags &= ~NKR_PENDINTR;
317 	}
318 
319 	/*
320 	 * Second part: skip past packets that userspace has released.
321 	 */
322 	nm_i = kring->nr_hwcur; /* netmap ring index */
323 	if (nm_i != head) {
324 		int released;
325 		int error;
326 
327 		released = head - nm_i;
328 		if (released < 0)
329 			released += kring->nkr_num_slots;
330 		error = vtnet_netmap_kring_refill(kring, released);
331 		if (error) {
332 			nm_prerr("Failed to replenish RX VQ with %u sgs",
333 			    released);
334 			return error;
335 		}
336 		kring->nr_hwcur = head;
337 		virtqueue_notify(vq);
338 	}
339 
340 	nm_prdis("h %d c %d t %d hwcur %d hwtail %d", kring->rhead,
341 	    kring->rcur, kring->rtail, kring->nr_hwcur, kring->nr_hwtail);
342 
343 	return 0;
344 }
345 
346 
347 /* Enable/disable interrupts on all virtqueues. */
348 static void
vtnet_netmap_intr(struct netmap_adapter * na,int state)349 vtnet_netmap_intr(struct netmap_adapter *na, int state)
350 {
351 	struct vtnet_softc *sc = if_getsoftc(na->ifp);
352 	int i;
353 
354 	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
355 		struct vtnet_rxq *rxq = &sc->vtnet_rxqs[i];
356 		struct vtnet_txq *txq = &sc->vtnet_txqs[i];
357 		struct virtqueue *txvq = txq->vtntx_vq;
358 
359 		if (state) {
360 			vtnet_rxq_enable_intr(rxq);
361 			virtqueue_enable_intr(txvq);
362 		} else {
363 			vtnet_rxq_disable_intr(rxq);
364 			virtqueue_disable_intr(txvq);
365 		}
366 	}
367 }
368 
369 static int
vtnet_netmap_tx_slots(struct vtnet_softc * sc)370 vtnet_netmap_tx_slots(struct vtnet_softc *sc)
371 {
372 	int div;
373 
374 	/* We need to prepend a virtio-net header to each netmap buffer to be
375 	 * transmitted, therefore calling virtqueue_enqueue() passing sglist
376 	 * with 2 elements.
377 	 * TX virtqueues use indirect descriptors if the feature was negotiated
378 	 * with the host, and if sc->vtnet_tx_nsegs > 1. With indirect
379 	 * descriptors, a single virtio descriptor is sufficient to reference
380 	 * each TX sglist. Without them, we need two separate virtio descriptors
381 	 * for each TX sglist. We therefore compute the number of netmap TX
382 	 * slots according to these assumptions.
383 	 */
384 	if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) && sc->vtnet_tx_nsegs > 1)
385 		div = 1;
386 	else
387 		div = 2;
388 
389 	return virtqueue_size(sc->vtnet_txqs[0].vtntx_vq) / div;
390 }
391 
392 static int
vtnet_netmap_rx_slots(struct vtnet_softc * sc)393 vtnet_netmap_rx_slots(struct vtnet_softc *sc)
394 {
395 	int div;
396 
397 	/* We need to prepend a virtio-net header to each netmap buffer to be
398 	 * received, therefore calling virtqueue_enqueue() passing sglist
399 	 * with 2 elements.
400 	 * RX virtqueues use indirect descriptors if the feature was negotiated
401 	 * with the host, and if sc->vtnet_rx_nsegs > 1. With indirect
402 	 * descriptors, a single virtio descriptor is sufficient to reference
403 	 * each RX sglist. Without them, we need two separate virtio descriptors
404 	 * for each RX sglist. We therefore compute the number of netmap RX
405 	 * slots according to these assumptions.
406 	 */
407 	if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) && sc->vtnet_rx_nsegs > 1)
408 		div = 1;
409 	else
410 		div = 2;
411 
412 	return virtqueue_size(sc->vtnet_rxqs[0].vtnrx_vq) / div;
413 }
414 
415 static int
vtnet_netmap_config(struct netmap_adapter * na,struct nm_config_info * info)416 vtnet_netmap_config(struct netmap_adapter *na, struct nm_config_info *info)
417 {
418 	struct vtnet_softc *sc = if_getsoftc(na->ifp);
419 
420 	info->num_tx_rings = sc->vtnet_act_vq_pairs;
421 	info->num_rx_rings = sc->vtnet_act_vq_pairs;
422 	info->num_tx_descs = vtnet_netmap_tx_slots(sc);
423 	info->num_rx_descs = vtnet_netmap_rx_slots(sc);
424 	info->rx_buf_maxsize = NETMAP_BUF_SIZE(na);
425 
426 	return 0;
427 }
428 
429 static void
vtnet_netmap_attach(struct vtnet_softc * sc)430 vtnet_netmap_attach(struct vtnet_softc *sc)
431 {
432 	struct netmap_adapter na;
433 
434 	bzero(&na, sizeof(na));
435 
436 	na.ifp = sc->vtnet_ifp;
437 	na.na_flags = NAF_OFFSETS;
438 	na.num_tx_desc = vtnet_netmap_tx_slots(sc);
439 	na.num_rx_desc = vtnet_netmap_rx_slots(sc);
440 	na.num_tx_rings = na.num_rx_rings = sc->vtnet_max_vq_pairs;
441 	na.rx_buf_maxsize = 0;
442 	na.nm_register = vtnet_netmap_reg;
443 	na.nm_txsync = vtnet_netmap_txsync;
444 	na.nm_rxsync = vtnet_netmap_rxsync;
445 	na.nm_intr = vtnet_netmap_intr;
446 	na.nm_config = vtnet_netmap_config;
447 
448 	netmap_attach(&na);
449 
450 	nm_prinf("vtnet attached txq=%d, txd=%d rxq=%d, rxd=%d",
451 			na.num_tx_rings, na.num_tx_desc,
452 			na.num_tx_rings, na.num_rx_desc);
453 }
454 /* end of file */
455