xref: /freebsd/sys/dev/netmap/netmap_vale.c (revision 85732ac8)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (C) 2013-2016 Universita` di Pisa
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *   1. Redistributions of source code must retain the above copyright
11  *      notice, this list of conditions and the following disclaimer.
12  *   2. Redistributions in binary form must reproduce the above copyright
13  *      notice, this list of conditions and the following disclaimer in the
14  *      documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 
30 #if defined(__FreeBSD__)
31 #include <sys/cdefs.h> /* prerequisite */
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/types.h>
35 #include <sys/errno.h>
36 #include <sys/param.h>	/* defines used in kernel.h */
37 #include <sys/kernel.h>	/* types used in module initialization */
38 #include <sys/conf.h>	/* cdevsw struct, UID, GID */
39 #include <sys/sockio.h>
40 #include <sys/socketvar.h>	/* struct socket */
41 #include <sys/malloc.h>
42 #include <sys/poll.h>
43 #include <sys/rwlock.h>
44 #include <sys/socket.h> /* sockaddrs */
45 #include <sys/selinfo.h>
46 #include <sys/sysctl.h>
47 #include <net/if.h>
48 #include <net/if_var.h>
49 #include <net/bpf.h>		/* BIOCIMMEDIATE */
50 #include <machine/bus.h>	/* bus_dmamap_* */
51 #include <sys/endian.h>
52 #include <sys/refcount.h>
53 #include <sys/smp.h>
54 
55 
56 #elif defined(linux)
57 
58 #include "bsd_glue.h"
59 
60 #elif defined(__APPLE__)
61 
62 #warning OSX support is only partial
63 #include "osx_glue.h"
64 
65 #elif defined(_WIN32)
66 #include "win_glue.h"
67 
68 #else
69 
70 #error	Unsupported platform
71 
72 #endif /* unsupported */
73 
74 /*
75  * common headers
76  */
77 
78 #include <net/netmap.h>
79 #include <dev/netmap/netmap_kern.h>
80 #include <dev/netmap/netmap_mem2.h>
81 #include <dev/netmap/netmap_bdg.h>
82 
83 #ifdef WITH_VALE
84 
85 /*
86  * system parameters (most of them in netmap_kern.h)
87  * NM_BDG_NAME	prefix for switch port names, default "vale"
88  * NM_BDG_MAXPORTS	number of ports
89  * NM_BRIDGES	max number of switches in the system.
90  *	XXX should become a sysctl or tunable
91  *
92  * Switch ports are named valeX:Y where X is the switch name and Y
93  * is the port. If Y matches a physical interface name, the port is
94  * connected to a physical device.
95  *
96  * Unlike physical interfaces, switch ports use their own memory region
97  * for rings and buffers.
98  * The virtual interfaces use per-queue lock instead of core lock.
99  * In the tx loop, we aggregate traffic in batches to make all operations
100  * faster. The batch size is bridge_batch.
101  */
102 #define NM_BDG_MAXRINGS		16	/* XXX unclear how many. */
103 #define NM_BDG_MAXSLOTS		4096	/* XXX same as above */
104 #define NM_BRIDGE_RINGSIZE	1024	/* in the device */
105 #define NM_BDG_BATCH		1024	/* entries in the forwarding buffer */
106 /* actual size of the tables */
107 #define NM_BDG_BATCH_MAX	(NM_BDG_BATCH + NETMAP_MAX_FRAGS)
108 /* NM_FT_NULL terminates a list of slots in the ft */
109 #define NM_FT_NULL		NM_BDG_BATCH_MAX
110 
111 
112 /*
113  * bridge_batch is set via sysctl to the max batch size to be
114  * used in the bridge. The actual value may be larger as the
115  * last packet in the block may overflow the size.
116  */
117 static int bridge_batch = NM_BDG_BATCH; /* bridge batch size */
118 SYSBEGIN(vars_vale);
119 SYSCTL_DECL(_dev_netmap);
120 SYSCTL_INT(_dev_netmap, OID_AUTO, bridge_batch, CTLFLAG_RW, &bridge_batch, 0,
121 		"Max batch size to be used in the bridge");
122 SYSEND;
123 
124 static int netmap_vale_vp_create(struct nmreq_header *hdr, struct ifnet *,
125 		struct netmap_mem_d *nmd, struct netmap_vp_adapter **);
126 static int netmap_vale_vp_bdg_attach(const char *, struct netmap_adapter *,
127 		struct nm_bridge *);
128 static int netmap_vale_bwrap_attach(const char *, struct netmap_adapter *);
129 
130 /*
131  * For each output interface, nm_vale_q is used to construct a list.
132  * bq_len is the number of output buffers (we can have coalescing
133  * during the copy).
134  */
135 struct nm_vale_q {
136 	uint16_t bq_head;
137 	uint16_t bq_tail;
138 	uint32_t bq_len;	/* number of buffers */
139 };
140 
141 /* Holds the default callbacks */
142 struct netmap_bdg_ops vale_bdg_ops = {
143 	.lookup = netmap_vale_learning,
144 	.config = NULL,
145 	.dtor = NULL,
146 	.vp_create = netmap_vale_vp_create,
147 	.bwrap_attach = netmap_vale_bwrap_attach,
148 	.name = NM_BDG_NAME,
149 };
150 
151 /*
152  * this is a slightly optimized copy routine which rounds
153  * to multiple of 64 bytes and is often faster than dealing
154  * with other odd sizes. We assume there is enough room
155  * in the source and destination buffers.
156  *
157  * XXX only for multiples of 64 bytes, non overlapped.
158  */
159 static inline void
160 pkt_copy(void *_src, void *_dst, int l)
161 {
162 	uint64_t *src = _src;
163 	uint64_t *dst = _dst;
164 	if (unlikely(l >= 1024)) {
165 		memcpy(dst, src, l);
166 		return;
167 	}
168 	for (; likely(l > 0); l-=64) {
169 		*dst++ = *src++;
170 		*dst++ = *src++;
171 		*dst++ = *src++;
172 		*dst++ = *src++;
173 		*dst++ = *src++;
174 		*dst++ = *src++;
175 		*dst++ = *src++;
176 		*dst++ = *src++;
177 	}
178 }
179 
180 
181 /*
182  * Free the forwarding tables for rings attached to switch ports.
183  */
184 static void
185 nm_free_bdgfwd(struct netmap_adapter *na)
186 {
187 	int nrings, i;
188 	struct netmap_kring **kring;
189 
190 	NMG_LOCK_ASSERT();
191 	nrings = na->num_tx_rings;
192 	kring = na->tx_rings;
193 	for (i = 0; i < nrings; i++) {
194 		if (kring[i]->nkr_ft) {
195 			nm_os_free(kring[i]->nkr_ft);
196 			kring[i]->nkr_ft = NULL; /* protect from freeing twice */
197 		}
198 	}
199 }
200 
201 
202 /*
203  * Allocate the forwarding tables for the rings attached to the bridge ports.
204  */
205 static int
206 nm_alloc_bdgfwd(struct netmap_adapter *na)
207 {
208 	int nrings, l, i, num_dstq;
209 	struct netmap_kring **kring;
210 
211 	NMG_LOCK_ASSERT();
212 	/* all port:rings + broadcast */
213 	num_dstq = NM_BDG_MAXPORTS * NM_BDG_MAXRINGS + 1;
214 	l = sizeof(struct nm_bdg_fwd) * NM_BDG_BATCH_MAX;
215 	l += sizeof(struct nm_vale_q) * num_dstq;
216 	l += sizeof(uint16_t) * NM_BDG_BATCH_MAX;
217 
218 	nrings = netmap_real_rings(na, NR_TX);
219 	kring = na->tx_rings;
220 	for (i = 0; i < nrings; i++) {
221 		struct nm_bdg_fwd *ft;
222 		struct nm_vale_q *dstq;
223 		int j;
224 
225 		ft = nm_os_malloc(l);
226 		if (!ft) {
227 			nm_free_bdgfwd(na);
228 			return ENOMEM;
229 		}
230 		dstq = (struct nm_vale_q *)(ft + NM_BDG_BATCH_MAX);
231 		for (j = 0; j < num_dstq; j++) {
232 			dstq[j].bq_head = dstq[j].bq_tail = NM_FT_NULL;
233 			dstq[j].bq_len = 0;
234 		}
235 		kring[i]->nkr_ft = ft;
236 	}
237 	return 0;
238 }
239 
240 /* Allows external modules to create bridges in exclusive mode,
241  * returns an authentication token that the external module will need
242  * to provide during nm_bdg_ctl_{attach, detach}(), netmap_bdg_regops(),
243  * and nm_bdg_update_private_data() operations.
244  * Successfully executed if ret != NULL and *return_status == 0.
245  */
246 void *
247 netmap_vale_create(const char *bdg_name, int *return_status)
248 {
249 	struct nm_bridge *b = NULL;
250 	void *ret = NULL;
251 
252 	NMG_LOCK();
253 	b = nm_find_bridge(bdg_name, 0 /* don't create */, NULL);
254 	if (b) {
255 		*return_status = EEXIST;
256 		goto unlock_bdg_create;
257 	}
258 
259 	b = nm_find_bridge(bdg_name, 1 /* create */, &vale_bdg_ops);
260 	if (!b) {
261 		*return_status = ENOMEM;
262 		goto unlock_bdg_create;
263 	}
264 
265 	b->bdg_flags |= NM_BDG_ACTIVE | NM_BDG_EXCLUSIVE;
266 	ret = nm_bdg_get_auth_token(b);
267 	*return_status = 0;
268 
269 unlock_bdg_create:
270 	NMG_UNLOCK();
271 	return ret;
272 }
273 
274 /* Allows external modules to destroy a bridge created through
275  * netmap_bdg_create(), the bridge must be empty.
276  */
277 int
278 netmap_vale_destroy(const char *bdg_name, void *auth_token)
279 {
280 	struct nm_bridge *b = NULL;
281 	int ret = 0;
282 
283 	NMG_LOCK();
284 	b = nm_find_bridge(bdg_name, 0 /* don't create */, NULL);
285 	if (!b) {
286 		ret = ENXIO;
287 		goto unlock_bdg_free;
288 	}
289 
290 	if (!nm_bdg_valid_auth_token(b, auth_token)) {
291 		ret = EACCES;
292 		goto unlock_bdg_free;
293 	}
294 	if (!(b->bdg_flags & NM_BDG_EXCLUSIVE)) {
295 		ret = EINVAL;
296 		goto unlock_bdg_free;
297 	}
298 
299 	b->bdg_flags &= ~(NM_BDG_EXCLUSIVE | NM_BDG_ACTIVE);
300 	ret = netmap_bdg_free(b);
301 	if (ret) {
302 		b->bdg_flags |= NM_BDG_EXCLUSIVE | NM_BDG_ACTIVE;
303 	}
304 
305 unlock_bdg_free:
306 	NMG_UNLOCK();
307 	return ret;
308 }
309 
310 /* Process NETMAP_REQ_VALE_LIST. */
311 int
312 netmap_vale_list(struct nmreq_header *hdr)
313 {
314 	struct nmreq_vale_list *req =
315 		(struct nmreq_vale_list *)(uintptr_t)hdr->nr_body;
316 	int namelen = strlen(hdr->nr_name);
317 	struct nm_bridge *b, *bridges;
318 	struct netmap_vp_adapter *vpna;
319 	int error = 0, i, j;
320 	u_int num_bridges;
321 
322 	netmap_bns_getbridges(&bridges, &num_bridges);
323 
324 	/* this is used to enumerate bridges and ports */
325 	if (namelen) { /* look up indexes of bridge and port */
326 		if (strncmp(hdr->nr_name, NM_BDG_NAME,
327 					strlen(NM_BDG_NAME))) {
328 			return EINVAL;
329 		}
330 		NMG_LOCK();
331 		b = nm_find_bridge(hdr->nr_name, 0 /* don't create */, NULL);
332 		if (!b) {
333 			NMG_UNLOCK();
334 			return ENOENT;
335 		}
336 
337 		req->nr_bridge_idx = b - bridges; /* bridge index */
338 		req->nr_port_idx = NM_BDG_NOPORT;
339 		for (j = 0; j < b->bdg_active_ports; j++) {
340 			i = b->bdg_port_index[j];
341 			vpna = b->bdg_ports[i];
342 			if (vpna == NULL) {
343 				nm_prerr("This should not happen");
344 				continue;
345 			}
346 			/* the former and the latter identify a
347 			 * virtual port and a NIC, respectively
348 			 */
349 			if (!strcmp(vpna->up.name, hdr->nr_name)) {
350 				req->nr_port_idx = i; /* port index */
351 				break;
352 			}
353 		}
354 		NMG_UNLOCK();
355 	} else {
356 		/* return the first non-empty entry starting from
357 		 * bridge nr_arg1 and port nr_arg2.
358 		 *
359 		 * Users can detect the end of the same bridge by
360 		 * seeing the new and old value of nr_arg1, and can
361 		 * detect the end of all the bridge by error != 0
362 		 */
363 		i = req->nr_bridge_idx;
364 		j = req->nr_port_idx;
365 
366 		NMG_LOCK();
367 		for (error = ENOENT; i < NM_BRIDGES; i++) {
368 			b = bridges + i;
369 			for ( ; j < NM_BDG_MAXPORTS; j++) {
370 				if (b->bdg_ports[j] == NULL)
371 					continue;
372 				vpna = b->bdg_ports[j];
373 				/* write back the VALE switch name */
374 				strlcpy(hdr->nr_name, vpna->up.name,
375 					sizeof(hdr->nr_name));
376 				error = 0;
377 				goto out;
378 			}
379 			j = 0; /* following bridges scan from 0 */
380 		}
381 	out:
382 		req->nr_bridge_idx = i;
383 		req->nr_port_idx = j;
384 		NMG_UNLOCK();
385 	}
386 
387 	return error;
388 }
389 
390 /* Process NETMAP_REQ_VALE_ATTACH.
391  */
392 int
393 netmap_vale_attach(struct nmreq_header *hdr, void *auth_token)
394 {
395 	struct nmreq_vale_attach *req =
396 		(struct nmreq_vale_attach *)(uintptr_t)hdr->nr_body;
397 	struct netmap_vp_adapter * vpna;
398 	struct netmap_adapter *na = NULL;
399 	struct netmap_mem_d *nmd = NULL;
400 	struct nm_bridge *b = NULL;
401 	int error;
402 
403 	NMG_LOCK();
404 	/* permission check for modified bridges */
405 	b = nm_find_bridge(hdr->nr_name, 0 /* don't create */, NULL);
406 	if (b && !nm_bdg_valid_auth_token(b, auth_token)) {
407 		error = EACCES;
408 		goto unlock_exit;
409 	}
410 
411 	if (req->reg.nr_mem_id) {
412 		nmd = netmap_mem_find(req->reg.nr_mem_id);
413 		if (nmd == NULL) {
414 			error = EINVAL;
415 			goto unlock_exit;
416 		}
417 	}
418 
419 	/* check for existing one */
420 	error = netmap_get_vale_na(hdr, &na, nmd, 0);
421 	if (na) {
422 		error = EBUSY;
423 		goto unref_exit;
424 	}
425 	error = netmap_get_vale_na(hdr, &na,
426 				nmd, 1 /* create if not exists */);
427 	if (error) { /* no device */
428 		goto unlock_exit;
429 	}
430 
431 	if (na == NULL) { /* VALE prefix missing */
432 		error = EINVAL;
433 		goto unlock_exit;
434 	}
435 
436 	if (NETMAP_OWNED_BY_ANY(na)) {
437 		error = EBUSY;
438 		goto unref_exit;
439 	}
440 
441 	if (na->nm_bdg_ctl) {
442 		/* nop for VALE ports. The bwrap needs to put the hwna
443 		 * in netmap mode (see netmap_bwrap_bdg_ctl)
444 		 */
445 		error = na->nm_bdg_ctl(hdr, na);
446 		if (error)
447 			goto unref_exit;
448 		ND("registered %s to netmap-mode", na->name);
449 	}
450 	vpna = (struct netmap_vp_adapter *)na;
451 	req->port_index = vpna->bdg_port;
452 
453 	if (nmd)
454 		netmap_mem_put(nmd);
455 
456 	NMG_UNLOCK();
457 	return 0;
458 
459 unref_exit:
460 	netmap_adapter_put(na);
461 unlock_exit:
462 	if (nmd)
463 		netmap_mem_put(nmd);
464 
465 	NMG_UNLOCK();
466 	return error;
467 }
468 
469 /* Process NETMAP_REQ_VALE_DETACH.
470  */
471 int
472 netmap_vale_detach(struct nmreq_header *hdr, void *auth_token)
473 {
474 	struct nmreq_vale_detach *nmreq_det = (void *)(uintptr_t)hdr->nr_body;
475 	struct netmap_vp_adapter *vpna;
476 	struct netmap_adapter *na;
477 	struct nm_bridge *b = NULL;
478 	int error;
479 
480 	NMG_LOCK();
481 	/* permission check for modified bridges */
482 	b = nm_find_bridge(hdr->nr_name, 0 /* don't create */, NULL);
483 	if (b && !nm_bdg_valid_auth_token(b, auth_token)) {
484 		error = EACCES;
485 		goto unlock_exit;
486 	}
487 
488 	error = netmap_get_vale_na(hdr, &na, NULL, 0 /* don't create */);
489 	if (error) { /* no device, or another bridge or user owns the device */
490 		goto unlock_exit;
491 	}
492 
493 	if (na == NULL) { /* VALE prefix missing */
494 		error = EINVAL;
495 		goto unlock_exit;
496 	} else if (nm_is_bwrap(na) &&
497 		   ((struct netmap_bwrap_adapter *)na)->na_polling_state) {
498 		/* Don't detach a NIC with polling */
499 		error = EBUSY;
500 		goto unref_exit;
501 	}
502 
503 	vpna = (struct netmap_vp_adapter *)na;
504 	if (na->na_vp != vpna) {
505 		/* trying to detach first attach of VALE persistent port attached
506 		 * to 2 bridges
507 		 */
508 		error = EBUSY;
509 		goto unref_exit;
510 	}
511 	nmreq_det->port_index = vpna->bdg_port;
512 
513 	if (na->nm_bdg_ctl) {
514 		/* remove the port from bridge. The bwrap
515 		 * also needs to put the hwna in normal mode
516 		 */
517 		error = na->nm_bdg_ctl(hdr, na);
518 	}
519 
520 unref_exit:
521 	netmap_adapter_put(na);
522 unlock_exit:
523 	NMG_UNLOCK();
524 	return error;
525 
526 }
527 
528 
529 /* nm_dtor callback for ephemeral VALE ports */
530 static void
531 netmap_vale_vp_dtor(struct netmap_adapter *na)
532 {
533 	struct netmap_vp_adapter *vpna = (struct netmap_vp_adapter*)na;
534 	struct nm_bridge *b = vpna->na_bdg;
535 
536 	ND("%s has %d references", na->name, na->na_refcount);
537 
538 	if (b) {
539 		netmap_bdg_detach_common(b, vpna->bdg_port, -1);
540 	}
541 
542 	if (na->ifp != NULL && !nm_iszombie(na)) {
543 		NM_DETACH_NA(na->ifp);
544 		if (vpna->autodelete) {
545 			ND("releasing %s", na->ifp->if_xname);
546 			NMG_UNLOCK();
547 			nm_os_vi_detach(na->ifp);
548 			NMG_LOCK();
549 		}
550 	}
551 }
552 
553 
554 
555 /* nm_krings_create callback for VALE ports.
556  * Calls the standard netmap_krings_create, then adds leases on rx
557  * rings and bdgfwd on tx rings.
558  */
559 static int
560 netmap_vale_vp_krings_create(struct netmap_adapter *na)
561 {
562 	u_int tailroom;
563 	int error, i;
564 	uint32_t *leases;
565 	u_int nrx = netmap_real_rings(na, NR_RX);
566 
567 	/*
568 	 * Leases are attached to RX rings on vale ports
569 	 */
570 	tailroom = sizeof(uint32_t) * na->num_rx_desc * nrx;
571 
572 	error = netmap_krings_create(na, tailroom);
573 	if (error)
574 		return error;
575 
576 	leases = na->tailroom;
577 
578 	for (i = 0; i < nrx; i++) { /* Receive rings */
579 		na->rx_rings[i]->nkr_leases = leases;
580 		leases += na->num_rx_desc;
581 	}
582 
583 	error = nm_alloc_bdgfwd(na);
584 	if (error) {
585 		netmap_krings_delete(na);
586 		return error;
587 	}
588 
589 	return 0;
590 }
591 
592 
593 /* nm_krings_delete callback for VALE ports. */
594 static void
595 netmap_vale_vp_krings_delete(struct netmap_adapter *na)
596 {
597 	nm_free_bdgfwd(na);
598 	netmap_krings_delete(na);
599 }
600 
601 
602 static int
603 nm_vale_flush(struct nm_bdg_fwd *ft, u_int n,
604 	struct netmap_vp_adapter *na, u_int ring_nr);
605 
606 
607 /*
608  * main dispatch routine for the bridge.
609  * Grab packets from a kring, move them into the ft structure
610  * associated to the tx (input) port. Max one instance per port,
611  * filtered on input (ioctl, poll or XXX).
612  * Returns the next position in the ring.
613  */
614 static int
615 nm_vale_preflush(struct netmap_kring *kring, u_int end)
616 {
617 	struct netmap_vp_adapter *na =
618 		(struct netmap_vp_adapter*)kring->na;
619 	struct netmap_ring *ring = kring->ring;
620 	struct nm_bdg_fwd *ft;
621 	u_int ring_nr = kring->ring_id;
622 	u_int j = kring->nr_hwcur, lim = kring->nkr_num_slots - 1;
623 	u_int ft_i = 0;	/* start from 0 */
624 	u_int frags = 1; /* how many frags ? */
625 	struct nm_bridge *b = na->na_bdg;
626 
627 	/* To protect against modifications to the bridge we acquire a
628 	 * shared lock, waiting if we can sleep (if the source port is
629 	 * attached to a user process) or with a trylock otherwise (NICs).
630 	 */
631 	ND("wait rlock for %d packets", ((j > end ? lim+1 : 0) + end) - j);
632 	if (na->up.na_flags & NAF_BDG_MAYSLEEP)
633 		BDG_RLOCK(b);
634 	else if (!BDG_RTRYLOCK(b))
635 		return j;
636 	ND(5, "rlock acquired for %d packets", ((j > end ? lim+1 : 0) + end) - j);
637 	ft = kring->nkr_ft;
638 
639 	for (; likely(j != end); j = nm_next(j, lim)) {
640 		struct netmap_slot *slot = &ring->slot[j];
641 		char *buf;
642 
643 		ft[ft_i].ft_len = slot->len;
644 		ft[ft_i].ft_flags = slot->flags;
645 		ft[ft_i].ft_offset = 0;
646 
647 		ND("flags is 0x%x", slot->flags);
648 		/* we do not use the buf changed flag, but we still need to reset it */
649 		slot->flags &= ~NS_BUF_CHANGED;
650 
651 		/* this slot goes into a list so initialize the link field */
652 		ft[ft_i].ft_next = NM_FT_NULL;
653 		buf = ft[ft_i].ft_buf = (slot->flags & NS_INDIRECT) ?
654 			(void *)(uintptr_t)slot->ptr : NMB(&na->up, slot);
655 		if (unlikely(buf == NULL)) {
656 			nm_prlim(5, "NULL %s buffer pointer from %s slot %d len %d",
657 				(slot->flags & NS_INDIRECT) ? "INDIRECT" : "DIRECT",
658 				kring->name, j, ft[ft_i].ft_len);
659 			buf = ft[ft_i].ft_buf = NETMAP_BUF_BASE(&na->up);
660 			ft[ft_i].ft_len = 0;
661 			ft[ft_i].ft_flags = 0;
662 		}
663 		__builtin_prefetch(buf);
664 		++ft_i;
665 		if (slot->flags & NS_MOREFRAG) {
666 			frags++;
667 			continue;
668 		}
669 		if (unlikely(netmap_verbose && frags > 1))
670 			RD(5, "%d frags at %d", frags, ft_i - frags);
671 		ft[ft_i - frags].ft_frags = frags;
672 		frags = 1;
673 		if (unlikely((int)ft_i >= bridge_batch))
674 			ft_i = nm_vale_flush(ft, ft_i, na, ring_nr);
675 	}
676 	if (frags > 1) {
677 		/* Here ft_i > 0, ft[ft_i-1].flags has NS_MOREFRAG, and we
678 		 * have to fix frags count. */
679 		frags--;
680 		ft[ft_i - 1].ft_flags &= ~NS_MOREFRAG;
681 		ft[ft_i - frags].ft_frags = frags;
682 		nm_prlim(5, "Truncate incomplete fragment at %d (%d frags)", ft_i, frags);
683 	}
684 	if (ft_i)
685 		ft_i = nm_vale_flush(ft, ft_i, na, ring_nr);
686 	BDG_RUNLOCK(b);
687 	return j;
688 }
689 
690 
691 /* ----- FreeBSD if_bridge hash function ------- */
692 
693 /*
694  * The following hash function is adapted from "Hash Functions" by Bob Jenkins
695  * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
696  *
697  * http://www.burtleburtle.net/bob/hash/spooky.html
698  */
699 #define mix(a, b, c)                                                    \
700 do {                                                                    \
701 	a -= b; a -= c; a ^= (c >> 13);                                 \
702 	b -= c; b -= a; b ^= (a << 8);                                  \
703 	c -= a; c -= b; c ^= (b >> 13);                                 \
704 	a -= b; a -= c; a ^= (c >> 12);                                 \
705 	b -= c; b -= a; b ^= (a << 16);                                 \
706 	c -= a; c -= b; c ^= (b >> 5);                                  \
707 	a -= b; a -= c; a ^= (c >> 3);                                  \
708 	b -= c; b -= a; b ^= (a << 10);                                 \
709 	c -= a; c -= b; c ^= (b >> 15);                                 \
710 } while (/*CONSTCOND*/0)
711 
712 
713 static __inline uint32_t
714 nm_vale_rthash(const uint8_t *addr)
715 {
716 	uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = 0; // hask key
717 
718 	b += addr[5] << 8;
719 	b += addr[4];
720 	a += addr[3] << 24;
721 	a += addr[2] << 16;
722 	a += addr[1] << 8;
723 	a += addr[0];
724 
725 	mix(a, b, c);
726 #define BRIDGE_RTHASH_MASK	(NM_BDG_HASH-1)
727 	return (c & BRIDGE_RTHASH_MASK);
728 }
729 
730 #undef mix
731 
732 
733 /*
734  * Lookup function for a learning bridge.
735  * Update the hash table with the source address,
736  * and then returns the destination port index, and the
737  * ring in *dst_ring (at the moment, always use ring 0)
738  */
739 uint32_t
740 netmap_vale_learning(struct nm_bdg_fwd *ft, uint8_t *dst_ring,
741 		struct netmap_vp_adapter *na, void *private_data)
742 {
743 	uint8_t *buf = ((uint8_t *)ft->ft_buf) + ft->ft_offset;
744 	u_int buf_len = ft->ft_len - ft->ft_offset;
745 	struct nm_hash_ent *ht = private_data;
746 	uint32_t sh, dh;
747 	u_int dst, mysrc = na->bdg_port;
748 	uint64_t smac, dmac;
749 	uint8_t indbuf[12];
750 
751 	if (buf_len < 14) {
752 		return NM_BDG_NOPORT;
753 	}
754 
755 	if (ft->ft_flags & NS_INDIRECT) {
756 		if (copyin(buf, indbuf, sizeof(indbuf))) {
757 			return NM_BDG_NOPORT;
758 		}
759 		buf = indbuf;
760 	}
761 
762 	dmac = le64toh(*(uint64_t *)(buf)) & 0xffffffffffff;
763 	smac = le64toh(*(uint64_t *)(buf + 4));
764 	smac >>= 16;
765 
766 	/*
767 	 * The hash is somewhat expensive, there might be some
768 	 * worthwhile optimizations here.
769 	 */
770 	if (((buf[6] & 1) == 0) && (na->last_smac != smac)) { /* valid src */
771 		uint8_t *s = buf+6;
772 		sh = nm_vale_rthash(s); /* hash of source */
773 		/* update source port forwarding entry */
774 		na->last_smac = ht[sh].mac = smac;	/* XXX expire ? */
775 		ht[sh].ports = mysrc;
776 		if (netmap_debug & NM_DEBUG_VALE)
777 		    nm_prinf("src %02x:%02x:%02x:%02x:%02x:%02x on port %d",
778 			s[0], s[1], s[2], s[3], s[4], s[5], mysrc);
779 	}
780 	dst = NM_BDG_BROADCAST;
781 	if ((buf[0] & 1) == 0) { /* unicast */
782 		dh = nm_vale_rthash(buf); /* hash of dst */
783 		if (ht[dh].mac == dmac) {	/* found dst */
784 			dst = ht[dh].ports;
785 		}
786 	}
787 	return dst;
788 }
789 
790 
791 /*
792  * Available space in the ring. Only used in VALE code
793  * and only with is_rx = 1
794  */
795 static inline uint32_t
796 nm_kr_space(struct netmap_kring *k, int is_rx)
797 {
798 	int space;
799 
800 	if (is_rx) {
801 		int busy = k->nkr_hwlease - k->nr_hwcur;
802 		if (busy < 0)
803 			busy += k->nkr_num_slots;
804 		space = k->nkr_num_slots - 1 - busy;
805 	} else {
806 		/* XXX never used in this branch */
807 		space = k->nr_hwtail - k->nkr_hwlease;
808 		if (space < 0)
809 			space += k->nkr_num_slots;
810 	}
811 #if 0
812 	// sanity check
813 	if (k->nkr_hwlease >= k->nkr_num_slots ||
814 		k->nr_hwcur >= k->nkr_num_slots ||
815 		k->nr_tail >= k->nkr_num_slots ||
816 		busy < 0 ||
817 		busy >= k->nkr_num_slots) {
818 		D("invalid kring, cur %d tail %d lease %d lease_idx %d lim %d",			k->nr_hwcur, k->nr_hwtail, k->nkr_hwlease,
819 			k->nkr_lease_idx, k->nkr_num_slots);
820 	}
821 #endif
822 	return space;
823 }
824 
825 
826 
827 
828 /* make a lease on the kring for N positions. return the
829  * lease index
830  * XXX only used in VALE code and with is_rx = 1
831  */
832 static inline uint32_t
833 nm_kr_lease(struct netmap_kring *k, u_int n, int is_rx)
834 {
835 	uint32_t lim = k->nkr_num_slots - 1;
836 	uint32_t lease_idx = k->nkr_lease_idx;
837 
838 	k->nkr_leases[lease_idx] = NR_NOSLOT;
839 	k->nkr_lease_idx = nm_next(lease_idx, lim);
840 
841 #ifdef CONFIG_NETMAP_DEBUG
842 	if (n > nm_kr_space(k, is_rx)) {
843 		nm_prerr("invalid request for %d slots", n);
844 		panic("x");
845 	}
846 #endif /* CONFIG NETMAP_DEBUG */
847 	/* XXX verify that there are n slots */
848 	k->nkr_hwlease += n;
849 	if (k->nkr_hwlease > lim)
850 		k->nkr_hwlease -= lim + 1;
851 
852 #ifdef CONFIG_NETMAP_DEBUG
853 	if (k->nkr_hwlease >= k->nkr_num_slots ||
854 		k->nr_hwcur >= k->nkr_num_slots ||
855 		k->nr_hwtail >= k->nkr_num_slots ||
856 		k->nkr_lease_idx >= k->nkr_num_slots) {
857 		nm_prerr("invalid kring %s, cur %d tail %d lease %d lease_idx %d lim %d",
858 			k->na->name,
859 			k->nr_hwcur, k->nr_hwtail, k->nkr_hwlease,
860 			k->nkr_lease_idx, k->nkr_num_slots);
861 	}
862 #endif /* CONFIG_NETMAP_DEBUG */
863 	return lease_idx;
864 }
865 
866 /*
867  *
868  * This flush routine supports only unicast and broadcast but a large
869  * number of ports, and lets us replace the learn and dispatch functions.
870  */
871 int
872 nm_vale_flush(struct nm_bdg_fwd *ft, u_int n, struct netmap_vp_adapter *na,
873 		u_int ring_nr)
874 {
875 	struct nm_vale_q *dst_ents, *brddst;
876 	uint16_t num_dsts = 0, *dsts;
877 	struct nm_bridge *b = na->na_bdg;
878 	u_int i, me = na->bdg_port;
879 
880 	/*
881 	 * The work area (pointed by ft) is followed by an array of
882 	 * pointers to queues , dst_ents; there are NM_BDG_MAXRINGS
883 	 * queues per port plus one for the broadcast traffic.
884 	 * Then we have an array of destination indexes.
885 	 */
886 	dst_ents = (struct nm_vale_q *)(ft + NM_BDG_BATCH_MAX);
887 	dsts = (uint16_t *)(dst_ents + NM_BDG_MAXPORTS * NM_BDG_MAXRINGS + 1);
888 
889 	/* first pass: find a destination for each packet in the batch */
890 	for (i = 0; likely(i < n); i += ft[i].ft_frags) {
891 		uint8_t dst_ring = ring_nr; /* default, same ring as origin */
892 		uint16_t dst_port, d_i;
893 		struct nm_vale_q *d;
894 		struct nm_bdg_fwd *start_ft = NULL;
895 
896 		ND("slot %d frags %d", i, ft[i].ft_frags);
897 
898 		if (na->up.virt_hdr_len < ft[i].ft_len) {
899 			ft[i].ft_offset = na->up.virt_hdr_len;
900 			start_ft = &ft[i];
901 		} else if (na->up.virt_hdr_len == ft[i].ft_len && ft[i].ft_flags & NS_MOREFRAG) {
902 			ft[i].ft_offset = ft[i].ft_len;
903 			start_ft = &ft[i+1];
904 		} else {
905 			/* Drop the packet if the virtio-net header is not into the first
906 			 * fragment nor at the very beginning of the second.
907 			 */
908 			continue;
909 		}
910 		dst_port = b->bdg_ops.lookup(start_ft, &dst_ring, na, b->private_data);
911 		if (netmap_verbose > 255)
912 			RD(5, "slot %d port %d -> %d", i, me, dst_port);
913 		if (dst_port >= NM_BDG_NOPORT)
914 			continue; /* this packet is identified to be dropped */
915 		else if (dst_port == NM_BDG_BROADCAST)
916 			dst_ring = 0; /* broadcasts always go to ring 0 */
917 		else if (unlikely(dst_port == me ||
918 		    !b->bdg_ports[dst_port]))
919 			continue;
920 
921 		/* get a position in the scratch pad */
922 		d_i = dst_port * NM_BDG_MAXRINGS + dst_ring;
923 		d = dst_ents + d_i;
924 
925 		/* append the first fragment to the list */
926 		if (d->bq_head == NM_FT_NULL) { /* new destination */
927 			d->bq_head = d->bq_tail = i;
928 			/* remember this position to be scanned later */
929 			if (dst_port != NM_BDG_BROADCAST)
930 				dsts[num_dsts++] = d_i;
931 		} else {
932 			ft[d->bq_tail].ft_next = i;
933 			d->bq_tail = i;
934 		}
935 		d->bq_len += ft[i].ft_frags;
936 	}
937 
938 	/*
939 	 * Broadcast traffic goes to ring 0 on all destinations.
940 	 * So we need to add these rings to the list of ports to scan.
941 	 * XXX at the moment we scan all NM_BDG_MAXPORTS ports, which is
942 	 * expensive. We should keep a compact list of active destinations
943 	 * so we could shorten this loop.
944 	 */
945 	brddst = dst_ents + NM_BDG_BROADCAST * NM_BDG_MAXRINGS;
946 	if (brddst->bq_head != NM_FT_NULL) {
947 		u_int j;
948 		for (j = 0; likely(j < b->bdg_active_ports); j++) {
949 			uint16_t d_i;
950 			i = b->bdg_port_index[j];
951 			if (unlikely(i == me))
952 				continue;
953 			d_i = i * NM_BDG_MAXRINGS;
954 			if (dst_ents[d_i].bq_head == NM_FT_NULL)
955 				dsts[num_dsts++] = d_i;
956 		}
957 	}
958 
959 	ND(5, "pass 1 done %d pkts %d dsts", n, num_dsts);
960 	/* second pass: scan destinations */
961 	for (i = 0; i < num_dsts; i++) {
962 		struct netmap_vp_adapter *dst_na;
963 		struct netmap_kring *kring;
964 		struct netmap_ring *ring;
965 		u_int dst_nr, lim, j, d_i, next, brd_next;
966 		u_int needed, howmany;
967 		int retry = netmap_txsync_retry;
968 		struct nm_vale_q *d;
969 		uint32_t my_start = 0, lease_idx = 0;
970 		int nrings;
971 		int virt_hdr_mismatch = 0;
972 
973 		d_i = dsts[i];
974 		ND("second pass %d port %d", i, d_i);
975 		d = dst_ents + d_i;
976 		// XXX fix the division
977 		dst_na = b->bdg_ports[d_i/NM_BDG_MAXRINGS];
978 		/* protect from the lookup function returning an inactive
979 		 * destination port
980 		 */
981 		if (unlikely(dst_na == NULL))
982 			goto cleanup;
983 		if (dst_na->up.na_flags & NAF_SW_ONLY)
984 			goto cleanup;
985 		/*
986 		 * The interface may be in !netmap mode in two cases:
987 		 * - when na is attached but not activated yet;
988 		 * - when na is being deactivated but is still attached.
989 		 */
990 		if (unlikely(!nm_netmap_on(&dst_na->up))) {
991 			ND("not in netmap mode!");
992 			goto cleanup;
993 		}
994 
995 		/* there is at least one either unicast or broadcast packet */
996 		brd_next = brddst->bq_head;
997 		next = d->bq_head;
998 		/* we need to reserve this many slots. If fewer are
999 		 * available, some packets will be dropped.
1000 		 * Packets may have multiple fragments, so we may not use
1001 		 * there is a chance that we may not use all of the slots
1002 		 * we have claimed, so we will need to handle the leftover
1003 		 * ones when we regain the lock.
1004 		 */
1005 		needed = d->bq_len + brddst->bq_len;
1006 
1007 		if (unlikely(dst_na->up.virt_hdr_len != na->up.virt_hdr_len)) {
1008 			if (netmap_verbose) {
1009 				RD(3, "virt_hdr_mismatch, src %d dst %d", na->up.virt_hdr_len,
1010 						dst_na->up.virt_hdr_len);
1011 			}
1012 			/* There is a virtio-net header/offloadings mismatch between
1013 			 * source and destination. The slower mismatch datapath will
1014 			 * be used to cope with all the mismatches.
1015 			 */
1016 			virt_hdr_mismatch = 1;
1017 			if (dst_na->mfs < na->mfs) {
1018 				/* We may need to do segmentation offloadings, and so
1019 				 * we may need a number of destination slots greater
1020 				 * than the number of input slots ('needed').
1021 				 * We look for the smallest integer 'x' which satisfies:
1022 				 *	needed * na->mfs + x * H <= x * na->mfs
1023 				 * where 'H' is the length of the longest header that may
1024 				 * be replicated in the segmentation process (e.g. for
1025 				 * TCPv4 we must account for ethernet header, IP header
1026 				 * and TCPv4 header).
1027 				 */
1028 				KASSERT(dst_na->mfs > 0, ("vpna->mfs is 0"));
1029 				needed = (needed * na->mfs) /
1030 						(dst_na->mfs - WORST_CASE_GSO_HEADER) + 1;
1031 				ND(3, "srcmtu=%u, dstmtu=%u, x=%u", na->mfs, dst_na->mfs, needed);
1032 			}
1033 		}
1034 
1035 		ND(5, "pass 2 dst %d is %x %s",
1036 			i, d_i, is_vp ? "virtual" : "nic/host");
1037 		dst_nr = d_i & (NM_BDG_MAXRINGS-1);
1038 		nrings = dst_na->up.num_rx_rings;
1039 		if (dst_nr >= nrings)
1040 			dst_nr = dst_nr % nrings;
1041 		kring = dst_na->up.rx_rings[dst_nr];
1042 		ring = kring->ring;
1043 		/* the destination ring may have not been opened for RX */
1044 		if (unlikely(ring == NULL || kring->nr_mode != NKR_NETMAP_ON))
1045 			goto cleanup;
1046 		lim = kring->nkr_num_slots - 1;
1047 
1048 retry:
1049 
1050 		if (dst_na->retry && retry) {
1051 			/* try to get some free slot from the previous run */
1052 			kring->nm_notify(kring, NAF_FORCE_RECLAIM);
1053 			/* actually useful only for bwraps, since there
1054 			 * the notify will trigger a txsync on the hwna. VALE ports
1055 			 * have dst_na->retry == 0
1056 			 */
1057 		}
1058 		/* reserve the buffers in the queue and an entry
1059 		 * to report completion, and drop lock.
1060 		 * XXX this might become a helper function.
1061 		 */
1062 		mtx_lock(&kring->q_lock);
1063 		if (kring->nkr_stopped) {
1064 			mtx_unlock(&kring->q_lock);
1065 			goto cleanup;
1066 		}
1067 		my_start = j = kring->nkr_hwlease;
1068 		howmany = nm_kr_space(kring, 1);
1069 		if (needed < howmany)
1070 			howmany = needed;
1071 		lease_idx = nm_kr_lease(kring, howmany, 1);
1072 		mtx_unlock(&kring->q_lock);
1073 
1074 		/* only retry if we need more than available slots */
1075 		if (retry && needed <= howmany)
1076 			retry = 0;
1077 
1078 		/* copy to the destination queue */
1079 		while (howmany > 0) {
1080 			struct netmap_slot *slot;
1081 			struct nm_bdg_fwd *ft_p, *ft_end;
1082 			u_int cnt;
1083 
1084 			/* find the queue from which we pick next packet.
1085 			 * NM_FT_NULL is always higher than valid indexes
1086 			 * so we never dereference it if the other list
1087 			 * has packets (and if both are empty we never
1088 			 * get here).
1089 			 */
1090 			if (next < brd_next) {
1091 				ft_p = ft + next;
1092 				next = ft_p->ft_next;
1093 			} else { /* insert broadcast */
1094 				ft_p = ft + brd_next;
1095 				brd_next = ft_p->ft_next;
1096 			}
1097 			cnt = ft_p->ft_frags; // cnt > 0
1098 			if (unlikely(cnt > howmany))
1099 			    break; /* no more space */
1100 			if (netmap_verbose && cnt > 1)
1101 				RD(5, "rx %d frags to %d", cnt, j);
1102 			ft_end = ft_p + cnt;
1103 			if (unlikely(virt_hdr_mismatch)) {
1104 				bdg_mismatch_datapath(na, dst_na, ft_p, ring, &j, lim, &howmany);
1105 			} else {
1106 				howmany -= cnt;
1107 				do {
1108 					char *dst, *src = ft_p->ft_buf;
1109 					size_t copy_len = ft_p->ft_len, dst_len = copy_len;
1110 
1111 					slot = &ring->slot[j];
1112 					dst = NMB(&dst_na->up, slot);
1113 
1114 					ND("send [%d] %d(%d) bytes at %s:%d",
1115 							i, (int)copy_len, (int)dst_len,
1116 							NM_IFPNAME(dst_ifp), j);
1117 					/* round to a multiple of 64 */
1118 					copy_len = (copy_len + 63) & ~63;
1119 
1120 					if (unlikely(copy_len > NETMAP_BUF_SIZE(&dst_na->up) ||
1121 						     copy_len > NETMAP_BUF_SIZE(&na->up))) {
1122 						RD(5, "invalid len %d, down to 64", (int)copy_len);
1123 						copy_len = dst_len = 64; // XXX
1124 					}
1125 					if (ft_p->ft_flags & NS_INDIRECT) {
1126 						if (copyin(src, dst, copy_len)) {
1127 							// invalid user pointer, pretend len is 0
1128 							dst_len = 0;
1129 						}
1130 					} else {
1131 						//memcpy(dst, src, copy_len);
1132 						pkt_copy(src, dst, (int)copy_len);
1133 					}
1134 					slot->len = dst_len;
1135 					slot->flags = (cnt << 8)| NS_MOREFRAG;
1136 					j = nm_next(j, lim);
1137 					needed--;
1138 					ft_p++;
1139 				} while (ft_p != ft_end);
1140 				slot->flags = (cnt << 8); /* clear flag on last entry */
1141 			}
1142 			/* are we done ? */
1143 			if (next == NM_FT_NULL && brd_next == NM_FT_NULL)
1144 				break;
1145 		}
1146 		{
1147 		    /* current position */
1148 		    uint32_t *p = kring->nkr_leases; /* shorthand */
1149 		    uint32_t update_pos;
1150 		    int still_locked = 1;
1151 
1152 		    mtx_lock(&kring->q_lock);
1153 		    if (unlikely(howmany > 0)) {
1154 			/* not used all bufs. If i am the last one
1155 			 * i can recover the slots, otherwise must
1156 			 * fill them with 0 to mark empty packets.
1157 			 */
1158 			ND("leftover %d bufs", howmany);
1159 			if (nm_next(lease_idx, lim) == kring->nkr_lease_idx) {
1160 			    /* yes i am the last one */
1161 			    ND("roll back nkr_hwlease to %d", j);
1162 			    kring->nkr_hwlease = j;
1163 			} else {
1164 			    while (howmany-- > 0) {
1165 				ring->slot[j].len = 0;
1166 				ring->slot[j].flags = 0;
1167 				j = nm_next(j, lim);
1168 			    }
1169 			}
1170 		    }
1171 		    p[lease_idx] = j; /* report I am done */
1172 
1173 		    update_pos = kring->nr_hwtail;
1174 
1175 		    if (my_start == update_pos) {
1176 			/* all slots before my_start have been reported,
1177 			 * so scan subsequent leases to see if other ranges
1178 			 * have been completed, and to a selwakeup or txsync.
1179 		         */
1180 			while (lease_idx != kring->nkr_lease_idx &&
1181 				p[lease_idx] != NR_NOSLOT) {
1182 			    j = p[lease_idx];
1183 			    p[lease_idx] = NR_NOSLOT;
1184 			    lease_idx = nm_next(lease_idx, lim);
1185 			}
1186 			/* j is the new 'write' position. j != my_start
1187 			 * means there are new buffers to report
1188 			 */
1189 			if (likely(j != my_start)) {
1190 				kring->nr_hwtail = j;
1191 				still_locked = 0;
1192 				mtx_unlock(&kring->q_lock);
1193 				kring->nm_notify(kring, 0);
1194 				/* this is netmap_notify for VALE ports and
1195 				 * netmap_bwrap_notify for bwrap. The latter will
1196 				 * trigger a txsync on the underlying hwna
1197 				 */
1198 				if (dst_na->retry && retry--) {
1199 					/* XXX this is going to call nm_notify again.
1200 					 * Only useful for bwrap in virtual machines
1201 					 */
1202 					goto retry;
1203 				}
1204 			}
1205 		    }
1206 		    if (still_locked)
1207 			mtx_unlock(&kring->q_lock);
1208 		}
1209 cleanup:
1210 		d->bq_head = d->bq_tail = NM_FT_NULL; /* cleanup */
1211 		d->bq_len = 0;
1212 	}
1213 	brddst->bq_head = brddst->bq_tail = NM_FT_NULL; /* cleanup */
1214 	brddst->bq_len = 0;
1215 	return 0;
1216 }
1217 
1218 /* nm_txsync callback for VALE ports */
1219 static int
1220 netmap_vale_vp_txsync(struct netmap_kring *kring, int flags)
1221 {
1222 	struct netmap_vp_adapter *na =
1223 		(struct netmap_vp_adapter *)kring->na;
1224 	u_int done;
1225 	u_int const lim = kring->nkr_num_slots - 1;
1226 	u_int const head = kring->rhead;
1227 
1228 	if (bridge_batch <= 0) { /* testing only */
1229 		done = head; // used all
1230 		goto done;
1231 	}
1232 	if (!na->na_bdg) {
1233 		done = head;
1234 		goto done;
1235 	}
1236 	if (bridge_batch > NM_BDG_BATCH)
1237 		bridge_batch = NM_BDG_BATCH;
1238 
1239 	done = nm_vale_preflush(kring, head);
1240 done:
1241 	if (done != head)
1242 		nm_prerr("early break at %d/ %d, tail %d", done, head, kring->nr_hwtail);
1243 	/*
1244 	 * packets between 'done' and 'cur' are left unsent.
1245 	 */
1246 	kring->nr_hwcur = done;
1247 	kring->nr_hwtail = nm_prev(done, lim);
1248 	if (netmap_debug & NM_DEBUG_TXSYNC)
1249 		nm_prinf("%s ring %d flags %d", na->up.name, kring->ring_id, flags);
1250 	return 0;
1251 }
1252 
1253 
1254 /* create a netmap_vp_adapter that describes a VALE port.
1255  * Only persistent VALE ports have a non-null ifp.
1256  */
1257 static int
1258 netmap_vale_vp_create(struct nmreq_header *hdr, struct ifnet *ifp,
1259 		struct netmap_mem_d *nmd, struct netmap_vp_adapter **ret)
1260 {
1261 	struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body;
1262 	struct netmap_vp_adapter *vpna;
1263 	struct netmap_adapter *na;
1264 	int error = 0;
1265 	u_int npipes = 0;
1266 	u_int extrabufs = 0;
1267 
1268 	if (hdr->nr_reqtype != NETMAP_REQ_REGISTER) {
1269 		return EINVAL;
1270 	}
1271 
1272 	vpna = nm_os_malloc(sizeof(*vpna));
1273 	if (vpna == NULL)
1274 		return ENOMEM;
1275 
1276  	na = &vpna->up;
1277 
1278 	na->ifp = ifp;
1279 	strlcpy(na->name, hdr->nr_name, sizeof(na->name));
1280 
1281 	/* bound checking */
1282 	na->num_tx_rings = req->nr_tx_rings;
1283 	nm_bound_var(&na->num_tx_rings, 1, 1, NM_BDG_MAXRINGS, NULL);
1284 	req->nr_tx_rings = na->num_tx_rings; /* write back */
1285 	na->num_rx_rings = req->nr_rx_rings;
1286 	nm_bound_var(&na->num_rx_rings, 1, 1, NM_BDG_MAXRINGS, NULL);
1287 	req->nr_rx_rings = na->num_rx_rings; /* write back */
1288 	nm_bound_var(&req->nr_tx_slots, NM_BRIDGE_RINGSIZE,
1289 			1, NM_BDG_MAXSLOTS, NULL);
1290 	na->num_tx_desc = req->nr_tx_slots;
1291 	nm_bound_var(&req->nr_rx_slots, NM_BRIDGE_RINGSIZE,
1292 			1, NM_BDG_MAXSLOTS, NULL);
1293 	/* validate number of pipes. We want at least 1,
1294 	 * but probably can do with some more.
1295 	 * So let's use 2 as default (when 0 is supplied)
1296 	 */
1297 	nm_bound_var(&npipes, 2, 1, NM_MAXPIPES, NULL);
1298 	/* validate extra bufs */
1299 	extrabufs = req->nr_extra_bufs;
1300 	nm_bound_var(&extrabufs, 0, 0,
1301 			128*NM_BDG_MAXSLOTS, NULL);
1302 	req->nr_extra_bufs = extrabufs; /* write back */
1303 	na->num_rx_desc = req->nr_rx_slots;
1304 	/* Set the mfs to a default value, as it is needed on the VALE
1305 	 * mismatch datapath. XXX We should set it according to the MTU
1306 	 * known to the kernel. */
1307 	vpna->mfs = NM_BDG_MFS_DEFAULT;
1308 	vpna->last_smac = ~0llu;
1309 	/*if (vpna->mfs > netmap_buf_size)  TODO netmap_buf_size is zero??
1310 		vpna->mfs = netmap_buf_size; */
1311 	if (netmap_verbose)
1312 		nm_prinf("max frame size %u", vpna->mfs);
1313 
1314 	na->na_flags |= NAF_BDG_MAYSLEEP;
1315 	/* persistent VALE ports look like hw devices
1316 	 * with a native netmap adapter
1317 	 */
1318 	if (ifp)
1319 		na->na_flags |= NAF_NATIVE;
1320 	na->nm_txsync = netmap_vale_vp_txsync;
1321 	na->nm_rxsync = netmap_vp_rxsync; /* use the one provided by bdg */
1322 	na->nm_register = netmap_vp_reg;  /* use the one provided by bdg */
1323 	na->nm_krings_create = netmap_vale_vp_krings_create;
1324 	na->nm_krings_delete = netmap_vale_vp_krings_delete;
1325 	na->nm_dtor = netmap_vale_vp_dtor;
1326 	ND("nr_mem_id %d", req->nr_mem_id);
1327 	na->nm_mem = nmd ?
1328 		netmap_mem_get(nmd):
1329 		netmap_mem_private_new(
1330 			na->num_tx_rings, na->num_tx_desc,
1331 			na->num_rx_rings, na->num_rx_desc,
1332 			req->nr_extra_bufs, npipes, &error);
1333 	if (na->nm_mem == NULL)
1334 		goto err;
1335 	na->nm_bdg_attach = netmap_vale_vp_bdg_attach;
1336 	/* other nmd fields are set in the common routine */
1337 	error = netmap_attach_common(na);
1338 	if (error)
1339 		goto err;
1340 	*ret = vpna;
1341 	return 0;
1342 
1343 err:
1344 	if (na->nm_mem != NULL)
1345 		netmap_mem_put(na->nm_mem);
1346 	nm_os_free(vpna);
1347 	return error;
1348 }
1349 
1350 /* nm_bdg_attach callback for VALE ports
1351  * The na_vp port is this same netmap_adapter. There is no host port.
1352  */
1353 static int
1354 netmap_vale_vp_bdg_attach(const char *name, struct netmap_adapter *na,
1355 		struct nm_bridge *b)
1356 {
1357 	struct netmap_vp_adapter *vpna = (struct netmap_vp_adapter *)na;
1358 
1359 	if ((b->bdg_flags & NM_BDG_NEED_BWRAP) || vpna->na_bdg) {
1360 		return NM_NEED_BWRAP;
1361 	}
1362 	na->na_vp = vpna;
1363 	strlcpy(na->name, name, sizeof(na->name));
1364 	na->na_hostvp = NULL;
1365 	return 0;
1366 }
1367 
1368 static int
1369 netmap_vale_bwrap_krings_create(struct netmap_adapter *na)
1370 {
1371 	int error;
1372 
1373 	/* impersonate a netmap_vp_adapter */
1374 	error = netmap_vale_vp_krings_create(na);
1375 	if (error)
1376 		return error;
1377 	error = netmap_bwrap_krings_create_common(na);
1378 	if (error) {
1379 		netmap_vale_vp_krings_delete(na);
1380 	}
1381 	return error;
1382 }
1383 
1384 static void
1385 netmap_vale_bwrap_krings_delete(struct netmap_adapter *na)
1386 {
1387 	netmap_bwrap_krings_delete_common(na);
1388 	netmap_vale_vp_krings_delete(na);
1389 }
1390 
1391 static int
1392 netmap_vale_bwrap_attach(const char *nr_name, struct netmap_adapter *hwna)
1393 {
1394 	struct netmap_bwrap_adapter *bna;
1395 	struct netmap_adapter *na = NULL;
1396 	struct netmap_adapter *hostna = NULL;
1397 	int error;
1398 
1399 	bna = nm_os_malloc(sizeof(*bna));
1400 	if (bna == NULL) {
1401 		return ENOMEM;
1402 	}
1403 	na = &bna->up.up;
1404 	strlcpy(na->name, nr_name, sizeof(na->name));
1405 	na->nm_register = netmap_bwrap_reg;
1406 	na->nm_txsync = netmap_vale_vp_txsync;
1407 	// na->nm_rxsync = netmap_bwrap_rxsync;
1408 	na->nm_krings_create = netmap_vale_bwrap_krings_create;
1409 	na->nm_krings_delete = netmap_vale_bwrap_krings_delete;
1410 	na->nm_notify = netmap_bwrap_notify;
1411 	bna->up.retry = 1; /* XXX maybe this should depend on the hwna */
1412 	/* Set the mfs, needed on the VALE mismatch datapath. */
1413 	bna->up.mfs = NM_BDG_MFS_DEFAULT;
1414 
1415 	if (hwna->na_flags & NAF_HOST_RINGS) {
1416 		hostna = &bna->host.up;
1417 		hostna->nm_notify = netmap_bwrap_notify;
1418 		bna->host.mfs = NM_BDG_MFS_DEFAULT;
1419 	}
1420 
1421 	error = netmap_bwrap_attach_common(na, hwna);
1422 	if (error) {
1423 		nm_os_free(bna);
1424 	}
1425 	return error;
1426 }
1427 
1428 int
1429 netmap_get_vale_na(struct nmreq_header *hdr, struct netmap_adapter **na,
1430 		struct netmap_mem_d *nmd, int create)
1431 {
1432 	return netmap_get_bdg_na(hdr, na, nmd, create, &vale_bdg_ops);
1433 }
1434 
1435 
1436 /* creates a persistent VALE port */
1437 int
1438 nm_vi_create(struct nmreq_header *hdr)
1439 {
1440 	struct nmreq_vale_newif *req =
1441 		(struct nmreq_vale_newif *)(uintptr_t)hdr->nr_body;
1442 	int error = 0;
1443 	/* Build a nmreq_register out of the nmreq_vale_newif,
1444 	 * so that we can call netmap_get_bdg_na(). */
1445 	struct nmreq_register regreq;
1446 	bzero(&regreq, sizeof(regreq));
1447 	regreq.nr_tx_slots = req->nr_tx_slots;
1448 	regreq.nr_rx_slots = req->nr_rx_slots;
1449 	regreq.nr_tx_rings = req->nr_tx_rings;
1450 	regreq.nr_rx_rings = req->nr_rx_rings;
1451 	regreq.nr_mem_id = req->nr_mem_id;
1452 	hdr->nr_reqtype = NETMAP_REQ_REGISTER;
1453 	hdr->nr_body = (uintptr_t)&regreq;
1454 	error = netmap_vi_create(hdr, 0 /* no autodelete */);
1455 	hdr->nr_reqtype = NETMAP_REQ_VALE_NEWIF;
1456 	hdr->nr_body = (uintptr_t)req;
1457 	/* Write back to the original struct. */
1458 	req->nr_tx_slots = regreq.nr_tx_slots;
1459 	req->nr_rx_slots = regreq.nr_rx_slots;
1460 	req->nr_tx_rings = regreq.nr_tx_rings;
1461 	req->nr_rx_rings = regreq.nr_rx_rings;
1462 	req->nr_mem_id = regreq.nr_mem_id;
1463 	return error;
1464 }
1465 
1466 /* remove a persistent VALE port from the system */
1467 int
1468 nm_vi_destroy(const char *name)
1469 {
1470 	struct ifnet *ifp;
1471 	struct netmap_vp_adapter *vpna;
1472 	int error;
1473 
1474 	ifp = ifunit_ref(name);
1475 	if (!ifp)
1476 		return ENXIO;
1477 	NMG_LOCK();
1478 	/* make sure this is actually a VALE port */
1479 	if (!NM_NA_VALID(ifp) || NA(ifp)->nm_register != netmap_vp_reg) {
1480 		error = EINVAL;
1481 		goto err;
1482 	}
1483 
1484 	vpna = (struct netmap_vp_adapter *)NA(ifp);
1485 
1486 	/* we can only destroy ports that were created via NETMAP_BDG_NEWIF */
1487 	if (vpna->autodelete) {
1488 		error = EINVAL;
1489 		goto err;
1490 	}
1491 
1492 	/* also make sure that nobody is using the inferface */
1493 	if (NETMAP_OWNED_BY_ANY(&vpna->up) ||
1494 	    vpna->up.na_refcount > 1 /* any ref besides the one in nm_vi_create()? */) {
1495 		error = EBUSY;
1496 		goto err;
1497 	}
1498 
1499 	NMG_UNLOCK();
1500 
1501 	if (netmap_verbose)
1502 		nm_prinf("destroying a persistent vale interface %s", ifp->if_xname);
1503 	/* Linux requires all the references are released
1504 	 * before unregister
1505 	 */
1506 	netmap_detach(ifp);
1507 	if_rele(ifp);
1508 	nm_os_vi_detach(ifp);
1509 	return 0;
1510 
1511 err:
1512 	NMG_UNLOCK();
1513 	if_rele(ifp);
1514 	return error;
1515 }
1516 
1517 static int
1518 nm_update_info(struct nmreq_register *req, struct netmap_adapter *na)
1519 {
1520 	req->nr_rx_rings = na->num_rx_rings;
1521 	req->nr_tx_rings = na->num_tx_rings;
1522 	req->nr_rx_slots = na->num_rx_desc;
1523 	req->nr_tx_slots = na->num_tx_desc;
1524 	return netmap_mem_get_info(na->nm_mem, &req->nr_memsize, NULL,
1525 					&req->nr_mem_id);
1526 }
1527 
1528 
1529 /*
1530  * Create a virtual interface registered to the system.
1531  * The interface will be attached to a bridge later.
1532  */
1533 int
1534 netmap_vi_create(struct nmreq_header *hdr, int autodelete)
1535 {
1536 	struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body;
1537 	struct ifnet *ifp;
1538 	struct netmap_vp_adapter *vpna;
1539 	struct netmap_mem_d *nmd = NULL;
1540 	int error;
1541 
1542 	if (hdr->nr_reqtype != NETMAP_REQ_REGISTER) {
1543 		return EINVAL;
1544 	}
1545 
1546 	/* don't include VALE prefix */
1547 	if (!strncmp(hdr->nr_name, NM_BDG_NAME, strlen(NM_BDG_NAME)))
1548 		return EINVAL;
1549 	if (strlen(hdr->nr_name) >= IFNAMSIZ) {
1550 		return EINVAL;
1551 	}
1552 	ifp = ifunit_ref(hdr->nr_name);
1553 	if (ifp) { /* already exist, cannot create new one */
1554 		error = EEXIST;
1555 		NMG_LOCK();
1556 		if (NM_NA_VALID(ifp)) {
1557 			int update_err = nm_update_info(req, NA(ifp));
1558 			if (update_err)
1559 				error = update_err;
1560 		}
1561 		NMG_UNLOCK();
1562 		if_rele(ifp);
1563 		return error;
1564 	}
1565 	error = nm_os_vi_persist(hdr->nr_name, &ifp);
1566 	if (error)
1567 		return error;
1568 
1569 	NMG_LOCK();
1570 	if (req->nr_mem_id) {
1571 		nmd = netmap_mem_find(req->nr_mem_id);
1572 		if (nmd == NULL) {
1573 			error = EINVAL;
1574 			goto err_1;
1575 		}
1576 	}
1577 	/* netmap_vp_create creates a struct netmap_vp_adapter */
1578 	error = netmap_vale_vp_create(hdr, ifp, nmd, &vpna);
1579 	if (error) {
1580 		if (netmap_debug & NM_DEBUG_VALE)
1581 			nm_prerr("error %d", error);
1582 		goto err_1;
1583 	}
1584 	/* persist-specific routines */
1585 	vpna->up.nm_bdg_ctl = netmap_vp_bdg_ctl;
1586 	if (!autodelete) {
1587 		netmap_adapter_get(&vpna->up);
1588 	} else {
1589 		vpna->autodelete = 1;
1590 	}
1591 	NM_ATTACH_NA(ifp, &vpna->up);
1592 	/* return the updated info */
1593 	error = nm_update_info(req, &vpna->up);
1594 	if (error) {
1595 		goto err_2;
1596 	}
1597 	ND("returning nr_mem_id %d", req->nr_mem_id);
1598 	if (nmd)
1599 		netmap_mem_put(nmd);
1600 	NMG_UNLOCK();
1601 	ND("created %s", ifp->if_xname);
1602 	return 0;
1603 
1604 err_2:
1605 	netmap_detach(ifp);
1606 err_1:
1607 	if (nmd)
1608 		netmap_mem_put(nmd);
1609 	NMG_UNLOCK();
1610 	nm_os_vi_detach(ifp);
1611 
1612 	return error;
1613 }
1614 
1615 #endif /* WITH_VALE */
1616