xref: /freebsd/sys/dev/netmap/if_vtnet_netmap.h (revision 780fb4a2)
1 /*
2  * Copyright (C) 2014 Vincenzo Maffione, Luigi Rizzo. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 /*
27  * $FreeBSD$
28  */
29 
30 #include <net/netmap.h>
31 #include <sys/selinfo.h>
32 #include <vm/vm.h>
33 #include <vm/pmap.h>    /* vtophys ? */
34 #include <dev/netmap/netmap_kern.h>
35 
36 
37 #define SOFTC_T	vtnet_softc
38 
39 /* Free all the unused buffer in all the RX virtqueues.
40  * This function is called when entering and exiting netmap mode.
41  * - buffers queued by the virtio driver return skbuf/mbuf pointer
42  *   and need to be freed;
43  * - buffers queued by netmap return the txq/rxq, and do not need work
44  */
45 static void
46 vtnet_netmap_free_bufs(struct SOFTC_T* sc)
47 {
48 	int i, nmb = 0, n = 0, last;
49 
50 	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
51 		struct vtnet_rxq *rxq = &sc->vtnet_rxqs[i];
52 		struct virtqueue *vq;
53 		struct mbuf *m;
54 		struct vtnet_txq *txq = &sc->vtnet_txqs[i];
55                 struct vtnet_tx_header *txhdr;
56 
57 		last = 0;
58 		vq = rxq->vtnrx_vq;
59 		while ((m = virtqueue_drain(vq, &last)) != NULL) {
60 			n++;
61 			if (m != (void *)rxq)
62 				m_freem(m);
63 			else
64 				nmb++;
65 		}
66 
67 		last = 0;
68 		vq = txq->vtntx_vq;
69 		while ((txhdr = virtqueue_drain(vq, &last)) != NULL) {
70 			n++;
71 			if (txhdr != (void *)txq) {
72 				m_freem(txhdr->vth_mbuf);
73 				uma_zfree(vtnet_tx_header_zone, txhdr);
74 			} else
75 				nmb++;
76 		}
77 	}
78 	D("freed %d mbufs, %d netmap bufs on %d queues",
79 		n - nmb, nmb, i);
80 }
81 
82 /* Register and unregister. */
83 static int
84 vtnet_netmap_reg(struct netmap_adapter *na, int onoff)
85 {
86         struct ifnet *ifp = na->ifp;
87 	struct SOFTC_T *sc = ifp->if_softc;
88 
89 	VTNET_CORE_LOCK(sc);
90 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
91 	/* enable or disable flags and callbacks in na and ifp */
92 	if (onoff) {
93 		nm_set_native_flags(na);
94 	} else {
95 		nm_clear_native_flags(na);
96 	}
97 	/* drain queues so netmap and native drivers
98 	 * do not interfere with each other
99 	 */
100 	vtnet_netmap_free_bufs(sc);
101         vtnet_init_locked(sc);       /* also enable intr */
102         VTNET_CORE_UNLOCK(sc);
103         return (ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1);
104 }
105 
106 
107 /* Reconcile kernel and user view of the transmit ring. */
108 static int
109 vtnet_netmap_txsync(struct netmap_kring *kring, int flags)
110 {
111 	struct netmap_adapter *na = kring->na;
112         struct ifnet *ifp = na->ifp;
113 	struct netmap_ring *ring = kring->ring;
114 	u_int ring_nr = kring->ring_id;
115 	u_int nm_i;	/* index into the netmap ring */
116 	u_int nic_i;	/* index into the NIC ring */
117 	u_int n;
118 	u_int const lim = kring->nkr_num_slots - 1;
119 	u_int const head = kring->rhead;
120 
121 	/* device-specific */
122 	struct SOFTC_T *sc = ifp->if_softc;
123 	struct vtnet_txq *txq = &sc->vtnet_txqs[ring_nr];
124 	struct virtqueue *vq = txq->vtntx_vq;
125 	int interrupts = !(kring->nr_kflags & NKR_NOINTR);
126 
127 	/*
128 	 * First part: process new packets to send.
129 	 */
130 	rmb();
131 
132 	nm_i = kring->nr_hwcur;
133 	if (nm_i != head) {	/* we have new packets to send */
134 		struct sglist *sg = txq->vtntx_sg;
135 
136 		nic_i = netmap_idx_k2n(kring, nm_i);
137 		for (n = 0; nm_i != head; n++) {
138 			/* we use an empty header here */
139 			static struct virtio_net_hdr_mrg_rxbuf hdr;
140 			struct netmap_slot *slot = &ring->slot[nm_i];
141 			u_int len = slot->len;
142 			uint64_t paddr;
143 			void *addr = PNMB(na, slot, &paddr);
144                         int err;
145 
146 			NM_CHECK_ADDR_LEN(na, addr, len);
147 
148 			slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
149 			/* Initialize the scatterlist, expose it to the hypervisor,
150 			 * and kick the hypervisor (if necessary).
151 			 */
152 			sglist_reset(sg); // cheap
153 			// if vtnet_hdr_size > 0 ...
154 			err = sglist_append(sg, &hdr, sc->vtnet_hdr_size);
155 			// XXX later, support multi segment
156 			err = sglist_append_phys(sg, paddr, len);
157 			/* use na as the cookie */
158                         err = virtqueue_enqueue(vq, txq, sg, sg->sg_nseg, 0);
159                         if (unlikely(err < 0)) {
160                                 D("virtqueue_enqueue failed");
161                                 break;
162                         }
163 
164 			nm_i = nm_next(nm_i, lim);
165 			nic_i = nm_next(nic_i, lim);
166 		}
167 		/* Update hwcur depending on where we stopped. */
168 		kring->nr_hwcur = nm_i; /* note we migth break early */
169 
170 		/* No more free TX slots? Ask the hypervisor for notifications,
171 		 * possibly only when a considerable amount of work has been
172 		 * done.
173 		 */
174 		ND(3,"sent %d packets, hwcur %d", n, nm_i);
175 		virtqueue_disable_intr(vq);
176 		virtqueue_notify(vq);
177 	} else {
178 		if (ring->head != ring->tail)
179 		    ND(5, "pure notify ? head %d tail %d nused %d %d",
180 			ring->head, ring->tail, virtqueue_nused(vq),
181 			(virtqueue_dump(vq), 1));
182 		virtqueue_notify(vq);
183 		if (interrupts) {
184 			virtqueue_enable_intr(vq); // like postpone with 0
185 		}
186 	}
187 
188 
189         /* Free used slots. We only consider our own used buffers, recognized
190 	 * by the token we passed to virtqueue_add_outbuf.
191 	 */
192         n = 0;
193         for (;;) {
194                 struct vtnet_tx_header *txhdr = virtqueue_dequeue(vq, NULL);
195                 if (txhdr == NULL)
196                         break;
197                 if (likely(txhdr == (void *)txq)) {
198                         n++;
199 			if (virtqueue_nused(vq) < 32) { // XXX slow release
200 				break;
201 			}
202 		} else { /* leftover from previous transmission */
203 			m_freem(txhdr->vth_mbuf);
204 			uma_zfree(vtnet_tx_header_zone, txhdr);
205 		}
206         }
207 	if (n) {
208 		kring->nr_hwtail += n;
209 		if (kring->nr_hwtail > lim)
210 			kring->nr_hwtail -= lim + 1;
211 	}
212 	if (nm_i != kring->nr_hwtail /* && vtnet_txq_below_threshold(txq) == 0*/) {
213 		ND(3, "disable intr, hwcur %d", nm_i);
214 		virtqueue_disable_intr(vq);
215 	} else if (interrupts) {
216 		ND(3, "enable intr, hwcur %d", nm_i);
217 		virtqueue_postpone_intr(vq, VQ_POSTPONE_SHORT);
218 	}
219 
220         return 0;
221 }
222 
223 static int
224 vtnet_refill_rxq(struct netmap_kring *kring, u_int nm_i, u_int head)
225 {
226 	struct netmap_adapter *na = kring->na;
227         struct ifnet *ifp = na->ifp;
228 	struct netmap_ring *ring = kring->ring;
229 	u_int ring_nr = kring->ring_id;
230 	u_int const lim = kring->nkr_num_slots - 1;
231 	u_int n;
232 
233 	/* device-specific */
234 	struct SOFTC_T *sc = ifp->if_softc;
235 	struct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr];
236 	struct virtqueue *vq = rxq->vtnrx_vq;
237 
238 	/* use a local sglist, default might be short */
239 	struct sglist_seg ss[2];
240 	struct sglist sg = { ss, 0, 0, 2 };
241 
242 	for (n = 0; nm_i != head; n++) {
243 		static struct virtio_net_hdr_mrg_rxbuf hdr;
244 		struct netmap_slot *slot = &ring->slot[nm_i];
245 		uint64_t paddr;
246 		void *addr = PNMB(na, slot, &paddr);
247 		int err = 0;
248 
249 		if (addr == NETMAP_BUF_BASE(na)) { /* bad buf */
250 			if (netmap_ring_reinit(kring))
251 				return -1;
252 		}
253 
254 		slot->flags &= ~NS_BUF_CHANGED;
255 		sglist_reset(&sg); // cheap
256 		err = sglist_append(&sg, &hdr, sc->vtnet_hdr_size);
257 		err = sglist_append_phys(&sg, paddr, NETMAP_BUF_SIZE(na));
258 		/* writable for the host */
259 		err = virtqueue_enqueue(vq, rxq, &sg, 0, sg.sg_nseg);
260 		if (err < 0) {
261 			D("virtqueue_enqueue failed");
262 			break;
263 		}
264 		nm_i = nm_next(nm_i, lim);
265 	}
266 	return nm_i;
267 }
268 
269 /* Reconcile kernel and user view of the receive ring. */
270 static int
271 vtnet_netmap_rxsync(struct netmap_kring *kring, int flags)
272 {
273 	struct netmap_adapter *na = kring->na;
274         struct ifnet *ifp = na->ifp;
275 	struct netmap_ring *ring = kring->ring;
276 	u_int ring_nr = kring->ring_id;
277 	u_int nm_i;	/* index into the netmap ring */
278 	// u_int nic_i;	/* index into the NIC ring */
279 	u_int n;
280 	u_int const lim = kring->nkr_num_slots - 1;
281 	u_int const head = kring->rhead;
282 	int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
283 	int interrupts = !(kring->nr_kflags & NKR_NOINTR);
284 
285 	/* device-specific */
286 	struct SOFTC_T *sc = ifp->if_softc;
287 	struct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr];
288 	struct virtqueue *vq = rxq->vtnrx_vq;
289 
290 	/* XXX netif_carrier_ok ? */
291 
292 	if (head > lim)
293 		return netmap_ring_reinit(kring);
294 
295 	rmb();
296 	/*
297 	 * First part: import newly received packets.
298 	 * Only accept our
299 	 * own buffers (matching the token). We should only get
300 	 * matching buffers, because of vtnet_netmap_free_rx_unused_bufs()
301 	 * and vtnet_netmap_init_buffers().
302 	 */
303 	if (netmap_no_pendintr || force_update) {
304                 struct netmap_adapter *token;
305 
306                 nm_i = kring->nr_hwtail;
307                 n = 0;
308 		for (;;) {
309 			int len;
310                         token = virtqueue_dequeue(vq, &len);
311                         if (token == NULL)
312                                 break;
313                         if (likely(token == (void *)rxq)) {
314                             ring->slot[nm_i].len = len;
315                             ring->slot[nm_i].flags = 0;
316                             nm_i = nm_next(nm_i, lim);
317                             n++;
318                         } else {
319 			    D("This should not happen");
320                         }
321 		}
322 		kring->nr_hwtail = nm_i;
323 		kring->nr_kflags &= ~NKR_PENDINTR;
324 	}
325         ND("[B] h %d c %d hwcur %d hwtail %d",
326 		ring->head, ring->cur, kring->nr_hwcur,
327 			      kring->nr_hwtail);
328 
329 	/*
330 	 * Second part: skip past packets that userspace has released.
331 	 */
332 	nm_i = kring->nr_hwcur; /* netmap ring index */
333 	if (nm_i != head) {
334 		int err = vtnet_refill_rxq(kring, nm_i, head);
335 		if (err < 0)
336 			return 1;
337 		kring->nr_hwcur = err;
338 		virtqueue_notify(vq);
339 		/* After draining the queue may need an intr from the hypervisor */
340 		if (interrupts) {
341 			vtnet_rxq_enable_intr(rxq);
342 		}
343 	}
344 
345         ND("[C] h %d c %d t %d hwcur %d hwtail %d",
346 		ring->head, ring->cur, ring->tail,
347 		kring->nr_hwcur, kring->nr_hwtail);
348 
349 	return 0;
350 }
351 
352 
353 /* Enable/disable interrupts on all virtqueues. */
354 static void
355 vtnet_netmap_intr(struct netmap_adapter *na, int onoff)
356 {
357 	struct SOFTC_T *sc = na->ifp->if_softc;
358 	int i;
359 
360 	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
361 		struct vtnet_rxq *rxq = &sc->vtnet_rxqs[i];
362 		struct vtnet_txq *txq = &sc->vtnet_txqs[i];
363 		struct virtqueue *txvq = txq->vtntx_vq;
364 
365 		if (onoff) {
366 			vtnet_rxq_enable_intr(rxq);
367 			virtqueue_enable_intr(txvq);
368 		} else {
369 			vtnet_rxq_disable_intr(rxq);
370 			virtqueue_disable_intr(txvq);
371 		}
372 	}
373 }
374 
375 /* Make RX virtqueues buffers pointing to netmap buffers. */
376 static int
377 vtnet_netmap_init_rx_buffers(struct SOFTC_T *sc)
378 {
379 	struct ifnet *ifp = sc->vtnet_ifp;
380 	struct netmap_adapter* na = NA(ifp);
381 	unsigned int r;
382 
383 	if (!nm_native_on(na))
384 		return 0;
385 	for (r = 0; r < na->num_rx_rings; r++) {
386                 struct netmap_kring *kring = na->rx_rings[r];
387 		struct vtnet_rxq *rxq = &sc->vtnet_rxqs[r];
388 		struct virtqueue *vq = rxq->vtnrx_vq;
389 	        struct netmap_slot* slot;
390 		int err = 0;
391 
392 		slot = netmap_reset(na, NR_RX, r, 0);
393 		if (!slot) {
394 			D("strange, null netmap ring %d", r);
395 			return 0;
396 		}
397 		/* Add up to na>-num_rx_desc-1 buffers to this RX virtqueue.
398 		 * It's important to leave one virtqueue slot free, otherwise
399 		 * we can run into ring->cur/ring->tail wraparounds.
400 		 */
401 		err = vtnet_refill_rxq(kring, 0, na->num_rx_desc-1);
402 		if (err < 0)
403 			return 0;
404 		virtqueue_notify(vq);
405 	}
406 
407 	return 1;
408 }
409 
410 static void
411 vtnet_netmap_attach(struct SOFTC_T *sc)
412 {
413 	struct netmap_adapter na;
414 
415 	bzero(&na, sizeof(na));
416 
417 	na.ifp = sc->vtnet_ifp;
418 	na.num_tx_desc =  1024;// sc->vtnet_rx_nmbufs;
419 	na.num_rx_desc =  1024; // sc->vtnet_rx_nmbufs;
420 	na.nm_register = vtnet_netmap_reg;
421 	na.nm_txsync = vtnet_netmap_txsync;
422 	na.nm_rxsync = vtnet_netmap_rxsync;
423 	na.nm_intr = vtnet_netmap_intr;
424 	na.num_tx_rings = na.num_rx_rings = sc->vtnet_max_vq_pairs;
425 	D("max rings %d", sc->vtnet_max_vq_pairs);
426 	netmap_attach(&na);
427 
428         D("virtio attached txq=%d, txd=%d rxq=%d, rxd=%d",
429 			na.num_tx_rings, na.num_tx_desc,
430 			na.num_tx_rings, na.num_rx_desc);
431 }
432 /* end of file */
433