xref: /freebsd/sys/dev/netmap/netmap_kern.h (revision 224e0c2f)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo
5  * Copyright (C) 2013-2016 Universita` di Pisa
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *   1. Redistributions of source code must retain the above copyright
12  *      notice, this list of conditions and the following disclaimer.
13  *   2. Redistributions in binary form must reproduce the above copyright
14  *      notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 /*
31  * $FreeBSD$
32  *
33  * The header contains the definitions of constants and function
34  * prototypes used only in kernelspace.
35  */
36 
37 #ifndef _NET_NETMAP_KERN_H_
38 #define _NET_NETMAP_KERN_H_
39 
40 #if defined(linux)
41 
42 #if  defined(CONFIG_NETMAP_VALE)
43 #define WITH_VALE
44 #endif
45 #if defined(CONFIG_NETMAP_PIPE)
46 #define WITH_PIPES
47 #endif
48 #if defined(CONFIG_NETMAP_MONITOR)
49 #define WITH_MONITOR
50 #endif
51 #if defined(CONFIG_NETMAP_GENERIC)
52 #define WITH_GENERIC
53 #endif
54 #if defined(CONFIG_NETMAP_PTNETMAP_GUEST)
55 #define WITH_PTNETMAP_GUEST
56 #endif
57 #if defined(CONFIG_NETMAP_PTNETMAP_HOST)
58 #define WITH_PTNETMAP_HOST
59 #endif
60 #if defined(CONFIG_NETMAP_SINK)
61 #define WITH_SINK
62 #endif
63 
64 #elif defined (_WIN32)
65 #define WITH_VALE	// comment out to disable VALE support
66 #define WITH_PIPES
67 #define WITH_MONITOR
68 #define WITH_GENERIC
69 
70 #else	/* neither linux nor windows */
71 #define WITH_VALE	// comment out to disable VALE support
72 #define WITH_PIPES
73 #define WITH_MONITOR
74 #define WITH_GENERIC
75 #define WITH_PTNETMAP_HOST	/* ptnetmap host support */
76 #define WITH_PTNETMAP_GUEST	/* ptnetmap guest support */
77 
78 #endif
79 
80 #if defined(__FreeBSD__)
81 #include <sys/selinfo.h>
82 
83 #define likely(x)	__builtin_expect((long)!!(x), 1L)
84 #define unlikely(x)	__builtin_expect((long)!!(x), 0L)
85 #define __user
86 
87 #define	NM_LOCK_T	struct mtx	/* low level spinlock, used to protect queues */
88 
89 #define NM_MTX_T	struct sx	/* OS-specific mutex (sleepable) */
90 #define NM_MTX_INIT(m)		sx_init(&(m), #m)
91 #define NM_MTX_DESTROY(m)	sx_destroy(&(m))
92 #define NM_MTX_LOCK(m)		sx_xlock(&(m))
93 #define NM_MTX_UNLOCK(m)	sx_xunlock(&(m))
94 #define NM_MTX_ASSERT(m)	sx_assert(&(m), SA_XLOCKED)
95 
96 #define	NM_SELINFO_T	struct nm_selinfo
97 #define NM_SELRECORD_T	struct thread
98 #define	MBUF_LEN(m)	((m)->m_pkthdr.len)
99 #define MBUF_TXQ(m)	((m)->m_pkthdr.flowid)
100 #define MBUF_TRANSMIT(na, ifp, m)	((na)->if_transmit(ifp, m))
101 #define	GEN_TX_MBUF_IFP(m)	((m)->m_pkthdr.rcvif)
102 
103 #define NM_ATOMIC_T	volatile int	// XXX ?
104 /* atomic operations */
105 #include <machine/atomic.h>
106 #define NM_ATOMIC_TEST_AND_SET(p)       (!atomic_cmpset_acq_int((p), 0, 1))
107 #define NM_ATOMIC_CLEAR(p)              atomic_store_rel_int((p), 0)
108 
109 #if __FreeBSD_version >= 1100030
110 #define	WNA(_ifp)	(_ifp)->if_netmap
111 #else /* older FreeBSD */
112 #define	WNA(_ifp)	(_ifp)->if_pspare[0]
113 #endif /* older FreeBSD */
114 
115 #if __FreeBSD_version >= 1100005
116 struct netmap_adapter *netmap_getna(if_t ifp);
117 #endif
118 
119 #if __FreeBSD_version >= 1100027
120 #define MBUF_REFCNT(m)		((m)->m_ext.ext_count)
121 #define SET_MBUF_REFCNT(m, x)   (m)->m_ext.ext_count = x
122 #else
123 #define MBUF_REFCNT(m)		((m)->m_ext.ref_cnt ? *((m)->m_ext.ref_cnt) : -1)
124 #define SET_MBUF_REFCNT(m, x)   *((m)->m_ext.ref_cnt) = x
125 #endif
126 
127 #define MBUF_QUEUED(m)		1
128 
129 struct nm_selinfo {
130 	struct selinfo si;
131 	struct mtx m;
132 };
133 
134 
135 // XXX linux struct, not used in FreeBSD
136 struct net_device_ops {
137 };
138 struct ethtool_ops {
139 };
140 struct hrtimer {
141 };
142 #define NM_BNS_GET(b)
143 #define NM_BNS_PUT(b)
144 
145 #elif defined (linux)
146 
147 #define	NM_LOCK_T	safe_spinlock_t	// see bsd_glue.h
148 #define	NM_SELINFO_T	wait_queue_head_t
149 #define	MBUF_LEN(m)	((m)->len)
150 #define MBUF_TRANSMIT(na, ifp, m)							\
151 	({										\
152 		/* Avoid infinite recursion with generic. */				\
153 		m->priority = NM_MAGIC_PRIORITY_TX;					\
154 		(((struct net_device_ops *)(na)->if_transmit)->ndo_start_xmit(m, ifp));	\
155 		0;									\
156 	})
157 
158 /* See explanation in nm_os_generic_xmit_frame. */
159 #define	GEN_TX_MBUF_IFP(m)	((struct ifnet *)skb_shinfo(m)->destructor_arg)
160 
161 #define NM_ATOMIC_T	volatile long unsigned int
162 
163 #define NM_MTX_T	struct mutex	/* OS-specific sleepable lock */
164 #define NM_MTX_INIT(m)	mutex_init(&(m))
165 #define NM_MTX_DESTROY(m)	do { (void)(m); } while (0)
166 #define NM_MTX_LOCK(m)		mutex_lock(&(m))
167 #define NM_MTX_UNLOCK(m)	mutex_unlock(&(m))
168 #define NM_MTX_ASSERT(m)	mutex_is_locked(&(m))
169 
170 #ifndef DEV_NETMAP
171 #define DEV_NETMAP
172 #endif /* DEV_NETMAP */
173 
174 #elif defined (__APPLE__)
175 
176 #warning apple support is incomplete.
177 #define likely(x)	__builtin_expect(!!(x), 1)
178 #define unlikely(x)	__builtin_expect(!!(x), 0)
179 #define	NM_LOCK_T	IOLock *
180 #define	NM_SELINFO_T	struct selinfo
181 #define	MBUF_LEN(m)	((m)->m_pkthdr.len)
182 
183 #elif defined (_WIN32)
184 #include "../../../WINDOWS/win_glue.h"
185 
186 #define NM_SELRECORD_T		IO_STACK_LOCATION
187 #define NM_SELINFO_T		win_SELINFO		// see win_glue.h
188 #define NM_LOCK_T		win_spinlock_t	// see win_glue.h
189 #define NM_MTX_T		KGUARDED_MUTEX	/* OS-specific mutex (sleepable) */
190 
191 #define NM_MTX_INIT(m)		KeInitializeGuardedMutex(&m);
192 #define NM_MTX_DESTROY(m)	do { (void)(m); } while (0)
193 #define NM_MTX_LOCK(m)		KeAcquireGuardedMutex(&(m))
194 #define NM_MTX_UNLOCK(m)	KeReleaseGuardedMutex(&(m))
195 #define NM_MTX_ASSERT(m)	assert(&m.Count>0)
196 
197 //These linknames are for the NDIS driver
198 #define NETMAP_NDIS_LINKNAME_STRING             L"\\DosDevices\\NMAPNDIS"
199 #define NETMAP_NDIS_NTDEVICE_STRING             L"\\Device\\NMAPNDIS"
200 
201 //Definition of internal driver-to-driver ioctl codes
202 #define NETMAP_KERNEL_XCHANGE_POINTERS		_IO('i', 180)
203 #define NETMAP_KERNEL_SEND_SHUTDOWN_SIGNAL	_IO_direct('i', 195)
204 
205 //Empty data structures are not permitted by MSVC compiler
206 //XXX_ale, try to solve this problem
207 struct net_device_ops{
208 	char data[1];
209 };
210 typedef struct ethtool_ops{
211 	char data[1];
212 };
213 typedef struct hrtimer{
214 	KTIMER timer;
215 	BOOLEAN active;
216 	KDPC deferred_proc;
217 };
218 
219 /* MSVC does not have likely/unlikely support */
220 #ifdef _MSC_VER
221 #define likely(x)	(x)
222 #define unlikely(x)	(x)
223 #else
224 #define likely(x)	__builtin_expect((long)!!(x), 1L)
225 #define unlikely(x)	__builtin_expect((long)!!(x), 0L)
226 #endif //_MSC_VER
227 
228 #else
229 
230 #error unsupported platform
231 
232 #endif /* end - platform-specific code */
233 
234 #ifndef _WIN32 /* support for emulated sysctl */
235 #define SYSBEGIN(x)
236 #define SYSEND
237 #endif /* _WIN32 */
238 
239 #define NM_ACCESS_ONCE(x)	(*(volatile __typeof__(x) *)&(x))
240 
241 #define	NMG_LOCK_T		NM_MTX_T
242 #define	NMG_LOCK_INIT()		NM_MTX_INIT(netmap_global_lock)
243 #define	NMG_LOCK_DESTROY()	NM_MTX_DESTROY(netmap_global_lock)
244 #define	NMG_LOCK()		NM_MTX_LOCK(netmap_global_lock)
245 #define	NMG_UNLOCK()		NM_MTX_UNLOCK(netmap_global_lock)
246 #define	NMG_LOCK_ASSERT()	NM_MTX_ASSERT(netmap_global_lock)
247 
248 #if defined(__FreeBSD__)
249 #define nm_prerr	printf
250 #define nm_prinf	printf
251 #elif defined (_WIN32)
252 #define nm_prerr	DbgPrint
253 #define nm_prinf	DbgPrint
254 #elif defined(linux)
255 #define nm_prerr(fmt, arg...)    printk(KERN_ERR fmt, ##arg)
256 #define nm_prinf(fmt, arg...)    printk(KERN_INFO fmt, ##arg)
257 #endif
258 
259 #define ND(format, ...)
260 #define D(format, ...)						\
261 	do {							\
262 		struct timeval __xxts;				\
263 		microtime(&__xxts);				\
264 		nm_prerr("%03d.%06d [%4d] %-25s " format "\n",	\
265 		(int)__xxts.tv_sec % 1000, (int)__xxts.tv_usec,	\
266 		__LINE__, __FUNCTION__, ##__VA_ARGS__);		\
267 	} while (0)
268 
269 /* rate limited, lps indicates how many per second */
270 #define RD(lps, format, ...)					\
271 	do {							\
272 		static int t0, __cnt;				\
273 		if (t0 != time_second) {			\
274 			t0 = time_second;			\
275 			__cnt = 0;				\
276 		}						\
277 		if (__cnt++ < lps)				\
278 			D(format, ##__VA_ARGS__);		\
279 	} while (0)
280 
281 struct netmap_adapter;
282 struct nm_bdg_fwd;
283 struct nm_bridge;
284 struct netmap_priv_d;
285 
286 /* os-specific NM_SELINFO_T initialzation/destruction functions */
287 void nm_os_selinfo_init(NM_SELINFO_T *);
288 void nm_os_selinfo_uninit(NM_SELINFO_T *);
289 
290 const char *nm_dump_buf(char *p, int len, int lim, char *dst);
291 
292 void nm_os_selwakeup(NM_SELINFO_T *si);
293 void nm_os_selrecord(NM_SELRECORD_T *sr, NM_SELINFO_T *si);
294 
295 int nm_os_ifnet_init(void);
296 void nm_os_ifnet_fini(void);
297 void nm_os_ifnet_lock(void);
298 void nm_os_ifnet_unlock(void);
299 
300 void nm_os_get_module(void);
301 void nm_os_put_module(void);
302 
303 void netmap_make_zombie(struct ifnet *);
304 void netmap_undo_zombie(struct ifnet *);
305 
306 /* os independent alloc/realloc/free */
307 void *nm_os_malloc(size_t);
308 void *nm_os_realloc(void *, size_t new_size, size_t old_size);
309 void nm_os_free(void *);
310 
311 /* passes a packet up to the host stack.
312  * If the packet is sent (or dropped) immediately it returns NULL,
313  * otherwise it links the packet to prev and returns m.
314  * In this case, a final call with m=NULL and prev != NULL will send up
315  * the entire chain to the host stack.
316  */
317 void *nm_os_send_up(struct ifnet *, struct mbuf *m, struct mbuf *prev);
318 
319 int nm_os_mbuf_has_offld(struct mbuf *m);
320 
321 #include "netmap_mbq.h"
322 
323 extern NMG_LOCK_T	netmap_global_lock;
324 
325 enum txrx { NR_RX = 0, NR_TX = 1, NR_TXRX };
326 
327 static __inline const char*
328 nm_txrx2str(enum txrx t)
329 {
330 	return (t== NR_RX ? "RX" : "TX");
331 }
332 
333 static __inline enum txrx
334 nm_txrx_swap(enum txrx t)
335 {
336 	return (t== NR_RX ? NR_TX : NR_RX);
337 }
338 
339 #define for_rx_tx(t)	for ((t) = 0; (t) < NR_TXRX; (t)++)
340 
341 #ifdef WITH_MONITOR
342 struct netmap_zmon_list {
343 	struct netmap_kring *next;
344 	struct netmap_kring *prev;
345 };
346 #endif /* WITH_MONITOR */
347 
348 /*
349  * private, kernel view of a ring. Keeps track of the status of
350  * a ring across system calls.
351  *
352  *	nr_hwcur	index of the next buffer to refill.
353  *			It corresponds to ring->head
354  *			at the time the system call returns.
355  *
356  *	nr_hwtail	index of the first buffer owned by the kernel.
357  *			On RX, hwcur->hwtail are receive buffers
358  *			not yet released. hwcur is advanced following
359  *			ring->head, hwtail is advanced on incoming packets,
360  *			and a wakeup is generated when hwtail passes ring->cur
361  *			    On TX, hwcur->rcur have been filled by the sender
362  *			but not sent yet to the NIC; rcur->hwtail are available
363  *			for new transmissions, and hwtail->hwcur-1 are pending
364  *			transmissions not yet acknowledged.
365  *
366  * The indexes in the NIC and netmap rings are offset by nkr_hwofs slots.
367  * This is so that, on a reset, buffers owned by userspace are not
368  * modified by the kernel. In particular:
369  * RX rings: the next empty buffer (hwtail + hwofs) coincides with
370  * 	the next empty buffer as known by the hardware (next_to_check or so).
371  * TX rings: hwcur + hwofs coincides with next_to_send
372  *
373  * For received packets, slot->flags is set to nkr_slot_flags
374  * so we can provide a proper initial value (e.g. set NS_FORWARD
375  * when operating in 'transparent' mode).
376  *
377  * The following fields are used to implement lock-free copy of packets
378  * from input to output ports in VALE switch:
379  *	nkr_hwlease	buffer after the last one being copied.
380  *			A writer in nm_bdg_flush reserves N buffers
381  *			from nr_hwlease, advances it, then does the
382  *			copy outside the lock.
383  *			In RX rings (used for VALE ports),
384  *			nkr_hwtail <= nkr_hwlease < nkr_hwcur+N-1
385  *			In TX rings (used for NIC or host stack ports)
386  *			nkr_hwcur <= nkr_hwlease < nkr_hwtail
387  *	nkr_leases	array of nkr_num_slots where writers can report
388  *			completion of their block. NR_NOSLOT (~0) indicates
389  *			that the writer has not finished yet
390  *	nkr_lease_idx	index of next free slot in nr_leases, to be assigned
391  *
392  * The kring is manipulated by txsync/rxsync and generic netmap function.
393  *
394  * Concurrent rxsync or txsync on the same ring are prevented through
395  * by nm_kr_(try)lock() which in turn uses nr_busy. This is all we need
396  * for NIC rings, and for TX rings attached to the host stack.
397  *
398  * RX rings attached to the host stack use an mbq (rx_queue) on both
399  * rxsync_from_host() and netmap_transmit(). The mbq is protected
400  * by its internal lock.
401  *
402  * RX rings attached to the VALE switch are accessed by both senders
403  * and receiver. They are protected through the q_lock on the RX ring.
404  */
405 struct netmap_kring {
406 	struct netmap_ring	*ring;
407 
408 	uint32_t	nr_hwcur;
409 	uint32_t	nr_hwtail;
410 
411 	/*
412 	 * Copies of values in user rings, so we do not need to look
413 	 * at the ring (which could be modified). These are set in the
414 	 * *sync_prologue()/finalize() routines.
415 	 */
416 	uint32_t	rhead;
417 	uint32_t	rcur;
418 	uint32_t	rtail;
419 
420 	uint32_t	nr_kflags;	/* private driver flags */
421 #define NKR_PENDINTR	0x1		// Pending interrupt.
422 #define NKR_EXCLUSIVE	0x2		/* exclusive binding */
423 #define NKR_FORWARD	0x4		/* (host ring only) there are
424 					   packets to forward
425 					 */
426 #define NKR_NEEDRING	0x8		/* ring needed even if users==0
427 					 * (used internally by pipes and
428 					 *  by ptnetmap host ports)
429 					 */
430 
431 	uint32_t	nr_mode;
432 	uint32_t	nr_pending_mode;
433 #define NKR_NETMAP_OFF	0x0
434 #define NKR_NETMAP_ON	0x1
435 
436 	uint32_t	nkr_num_slots;
437 
438 	/*
439 	 * On a NIC reset, the NIC ring indexes may be reset but the
440 	 * indexes in the netmap rings remain the same. nkr_hwofs
441 	 * keeps track of the offset between the two.
442 	 */
443 	int32_t		nkr_hwofs;
444 
445 	uint16_t	nkr_slot_flags;	/* initial value for flags */
446 
447 	/* last_reclaim is opaque marker to help reduce the frequency
448 	 * of operations such as reclaiming tx buffers. A possible use
449 	 * is set it to ticks and do the reclaim only once per tick.
450 	 */
451 	uint64_t	last_reclaim;
452 
453 
454 	NM_SELINFO_T	si;		/* poll/select wait queue */
455 	NM_LOCK_T	q_lock;		/* protects kring and ring. */
456 	NM_ATOMIC_T	nr_busy;	/* prevent concurrent syscalls */
457 
458 	struct netmap_adapter *na;
459 
460 	/* The following fields are for VALE switch support */
461 	struct nm_bdg_fwd *nkr_ft;
462 	uint32_t	*nkr_leases;
463 #define NR_NOSLOT	((uint32_t)~0)	/* used in nkr_*lease* */
464 	uint32_t	nkr_hwlease;
465 	uint32_t	nkr_lease_idx;
466 
467 	/* while nkr_stopped is set, no new [tr]xsync operations can
468 	 * be started on this kring.
469 	 * This is used by netmap_disable_all_rings()
470 	 * to find a synchronization point where critical data
471 	 * structures pointed to by the kring can be added or removed
472 	 */
473 	volatile int nkr_stopped;
474 
475 	/* Support for adapters without native netmap support.
476 	 * On tx rings we preallocate an array of tx buffers
477 	 * (same size as the netmap ring), on rx rings we
478 	 * store incoming mbufs in a queue that is drained by
479 	 * a rxsync.
480 	 */
481 	struct mbuf	**tx_pool;
482 	struct mbuf	*tx_event;	/* TX event used as a notification */
483 	NM_LOCK_T	tx_event_lock;	/* protects the tx_event mbuf */
484 	struct mbq	rx_queue;       /* intercepted rx mbufs. */
485 
486 	uint32_t	users;		/* existing bindings for this ring */
487 
488 	uint32_t	ring_id;	/* kring identifier */
489 	enum txrx	tx;		/* kind of ring (tx or rx) */
490 	char name[64];			/* diagnostic */
491 
492 	/* [tx]sync callback for this kring.
493 	 * The default nm_kring_create callback (netmap_krings_create)
494 	 * sets the nm_sync callback of each hardware tx(rx) kring to
495 	 * the corresponding nm_txsync(nm_rxsync) taken from the
496 	 * netmap_adapter; moreover, it sets the sync callback
497 	 * of the host tx(rx) ring to netmap_txsync_to_host
498 	 * (netmap_rxsync_from_host).
499 	 *
500 	 * Overrides: the above configuration is not changed by
501 	 * any of the nm_krings_create callbacks.
502 	 */
503 	int (*nm_sync)(struct netmap_kring *kring, int flags);
504 	int (*nm_notify)(struct netmap_kring *kring, int flags);
505 
506 #ifdef WITH_PIPES
507 	struct netmap_kring *pipe;	/* if this is a pipe ring,
508 					 * pointer to the other end
509 					 */
510 #endif /* WITH_PIPES */
511 
512 #ifdef WITH_VALE
513 	int (*save_notify)(struct netmap_kring *kring, int flags);
514 #endif
515 
516 #ifdef WITH_MONITOR
517 	/* array of krings that are monitoring this kring */
518 	struct netmap_kring **monitors;
519 	uint32_t max_monitors; /* current size of the monitors array */
520 	uint32_t n_monitors;	/* next unused entry in the monitor array */
521 	uint32_t mon_pos[NR_TXRX]; /* index of this ring in the monitored ring array */
522 	uint32_t mon_tail;  /* last seen slot on rx */
523 
524 	/* circular list of zero-copy monitors */
525 	struct netmap_zmon_list zmon_list[NR_TXRX];
526 
527 	/*
528 	 * Monitors work by intercepting the sync and notify callbacks of the
529 	 * monitored krings. This is implemented by replacing the pointers
530 	 * above and saving the previous ones in mon_* pointers below
531 	 */
532 	int (*mon_sync)(struct netmap_kring *kring, int flags);
533 	int (*mon_notify)(struct netmap_kring *kring, int flags);
534 
535 #endif
536 }
537 #ifdef _WIN32
538 __declspec(align(64));
539 #else
540 __attribute__((__aligned__(64)));
541 #endif
542 
543 /* return 1 iff the kring needs to be turned on */
544 static inline int
545 nm_kring_pending_on(struct netmap_kring *kring)
546 {
547 	return kring->nr_pending_mode == NKR_NETMAP_ON &&
548 	       kring->nr_mode == NKR_NETMAP_OFF;
549 }
550 
551 /* return 1 iff the kring needs to be turned off */
552 static inline int
553 nm_kring_pending_off(struct netmap_kring *kring)
554 {
555 	return kring->nr_pending_mode == NKR_NETMAP_OFF &&
556 	       kring->nr_mode == NKR_NETMAP_ON;
557 }
558 
559 /* return the next index, with wraparound */
560 static inline uint32_t
561 nm_next(uint32_t i, uint32_t lim)
562 {
563 	return unlikely (i == lim) ? 0 : i + 1;
564 }
565 
566 
567 /* return the previous index, with wraparound */
568 static inline uint32_t
569 nm_prev(uint32_t i, uint32_t lim)
570 {
571 	return unlikely (i == 0) ? lim : i - 1;
572 }
573 
574 
575 /*
576  *
577  * Here is the layout for the Rx and Tx rings.
578 
579        RxRING                            TxRING
580 
581       +-----------------+            +-----------------+
582       |                 |            |                 |
583       |XXX free slot XXX|            |XXX free slot XXX|
584       +-----------------+            +-----------------+
585 head->| owned by user   |<-hwcur     | not sent to nic |<-hwcur
586       |                 |            | yet             |
587       +-----------------+            |                 |
588  cur->| available to    |            |                 |
589       | user, not read  |            +-----------------+
590       | yet             |       cur->| (being          |
591       |                 |            |  prepared)      |
592       |                 |            |                 |
593       +-----------------+            +     ------      +
594 tail->|                 |<-hwtail    |                 |<-hwlease
595       | (being          | ...        |                 | ...
596       |  prepared)      | ...        |                 | ...
597       +-----------------+ ...        |                 | ...
598       |                 |<-hwlease   +-----------------+
599       |                 |      tail->|                 |<-hwtail
600       |                 |            |                 |
601       |                 |            |                 |
602       |                 |            |                 |
603       +-----------------+            +-----------------+
604 
605  * The cur/tail (user view) and hwcur/hwtail (kernel view)
606  * are used in the normal operation of the card.
607  *
608  * When a ring is the output of a switch port (Rx ring for
609  * a VALE port, Tx ring for the host stack or NIC), slots
610  * are reserved in blocks through 'hwlease' which points
611  * to the next unused slot.
612  * On an Rx ring, hwlease is always after hwtail,
613  * and completions cause hwtail to advance.
614  * On a Tx ring, hwlease is always between cur and hwtail,
615  * and completions cause cur to advance.
616  *
617  * nm_kr_space() returns the maximum number of slots that
618  * can be assigned.
619  * nm_kr_lease() reserves the required number of buffers,
620  *    advances nkr_hwlease and also returns an entry in
621  *    a circular array where completions should be reported.
622  */
623 
624 
625 struct netmap_lut {
626 	struct lut_entry *lut;
627 	uint32_t objtotal;	/* max buffer index */
628 	uint32_t objsize;	/* buffer size */
629 };
630 
631 struct netmap_vp_adapter; // forward
632 
633 /*
634  * The "struct netmap_adapter" extends the "struct adapter"
635  * (or equivalent) device descriptor.
636  * It contains all base fields needed to support netmap operation.
637  * There are in fact different types of netmap adapters
638  * (native, generic, VALE switch...) so a netmap_adapter is
639  * just the first field in the derived type.
640  */
641 struct netmap_adapter {
642 	/*
643 	 * On linux we do not have a good way to tell if an interface
644 	 * is netmap-capable. So we always use the following trick:
645 	 * NA(ifp) points here, and the first entry (which hopefully
646 	 * always exists and is at least 32 bits) contains a magic
647 	 * value which we can use to detect that the interface is good.
648 	 */
649 	uint32_t magic;
650 	uint32_t na_flags;	/* enabled, and other flags */
651 #define NAF_SKIP_INTR	1	/* use the regular interrupt handler.
652 				 * useful during initialization
653 				 */
654 #define NAF_SW_ONLY	2	/* forward packets only to sw adapter */
655 #define NAF_BDG_MAYSLEEP 4	/* the bridge is allowed to sleep when
656 				 * forwarding packets coming from this
657 				 * interface
658 				 */
659 #define NAF_MEM_OWNER	8	/* the adapter uses its own memory area
660 				 * that cannot be changed
661 				 */
662 #define NAF_NATIVE      16      /* the adapter is native.
663 				 * Virtual ports (non persistent vale ports,
664 				 * pipes, monitors...) should never use
665 				 * this flag.
666 				 */
667 #define	NAF_NETMAP_ON	32	/* netmap is active (either native or
668 				 * emulated). Where possible (e.g. FreeBSD)
669 				 * IFCAP_NETMAP also mirrors this flag.
670 				 */
671 #define NAF_HOST_RINGS  64	/* the adapter supports the host rings */
672 #define NAF_FORCE_NATIVE 128	/* the adapter is always NATIVE */
673 #define NAF_PTNETMAP_HOST 256	/* the adapter supports ptnetmap in the host */
674 #define NAF_ZOMBIE	(1U<<30) /* the nic driver has been unloaded */
675 #define	NAF_BUSY	(1U<<31) /* the adapter is used internally and
676 				  * cannot be registered from userspace
677 				  */
678 	int active_fds; /* number of user-space descriptors using this
679 			 interface, which is equal to the number of
680 			 struct netmap_if objs in the mapped region. */
681 
682 	u_int num_rx_rings; /* number of adapter receive rings */
683 	u_int num_tx_rings; /* number of adapter transmit rings */
684 
685 	u_int num_tx_desc;  /* number of descriptor in each queue */
686 	u_int num_rx_desc;
687 
688 	/* tx_rings and rx_rings are private but allocated
689 	 * as a contiguous chunk of memory. Each array has
690 	 * N+1 entries, for the adapter queues and for the host queue.
691 	 */
692 	struct netmap_kring *tx_rings; /* array of TX rings. */
693 	struct netmap_kring *rx_rings; /* array of RX rings. */
694 
695 	void *tailroom;		       /* space below the rings array */
696 				       /* (used for leases) */
697 
698 
699 	NM_SELINFO_T si[NR_TXRX];	/* global wait queues */
700 
701 	/* count users of the global wait queues */
702 	int si_users[NR_TXRX];
703 
704 	void *pdev; /* used to store pci device */
705 
706 	/* copy of if_qflush and if_transmit pointers, to intercept
707 	 * packets from the network stack when netmap is active.
708 	 */
709 	int     (*if_transmit)(struct ifnet *, struct mbuf *);
710 
711 	/* copy of if_input for netmap_send_up() */
712 	void     (*if_input)(struct ifnet *, struct mbuf *);
713 
714 	/* references to the ifnet and device routines, used by
715 	 * the generic netmap functions.
716 	 */
717 	struct ifnet *ifp; /* adapter is ifp->if_softc */
718 
719 	/*---- callbacks for this netmap adapter -----*/
720 	/*
721 	 * nm_dtor() is the cleanup routine called when destroying
722 	 *	the adapter.
723 	 *	Called with NMG_LOCK held.
724 	 *
725 	 * nm_register() is called on NIOCREGIF and close() to enter
726 	 *	or exit netmap mode on the NIC
727 	 *	Called with NNG_LOCK held.
728 	 *
729 	 * nm_txsync() pushes packets to the underlying hw/switch
730 	 *
731 	 * nm_rxsync() collects packets from the underlying hw/switch
732 	 *
733 	 * nm_config() returns configuration information from the OS
734 	 *	Called with NMG_LOCK held.
735 	 *
736 	 * nm_krings_create() create and init the tx_rings and
737 	 * 	rx_rings arrays of kring structures. In particular,
738 	 * 	set the nm_sync callbacks for each ring.
739 	 * 	There is no need to also allocate the corresponding
740 	 * 	netmap_rings, since netmap_mem_rings_create() will always
741 	 * 	be called to provide the missing ones.
742 	 *	Called with NNG_LOCK held.
743 	 *
744 	 * nm_krings_delete() cleanup and delete the tx_rings and rx_rings
745 	 * 	arrays
746 	 *	Called with NMG_LOCK held.
747 	 *
748 	 * nm_notify() is used to act after data have become available
749 	 * 	(or the stopped state of the ring has changed)
750 	 *	For hw devices this is typically a selwakeup(),
751 	 *	but for NIC/host ports attached to a switch (or vice-versa)
752 	 *	we also need to invoke the 'txsync' code downstream.
753 	 *      This callback pointer is actually used only to initialize
754 	 *      kring->nm_notify.
755 	 *      Return values are the same as for netmap_rx_irq().
756 	 */
757 	void (*nm_dtor)(struct netmap_adapter *);
758 
759 	int (*nm_register)(struct netmap_adapter *, int onoff);
760 	void (*nm_intr)(struct netmap_adapter *, int onoff);
761 
762 	int (*nm_txsync)(struct netmap_kring *kring, int flags);
763 	int (*nm_rxsync)(struct netmap_kring *kring, int flags);
764 	int (*nm_notify)(struct netmap_kring *kring, int flags);
765 #define NAF_FORCE_READ      1
766 #define NAF_FORCE_RECLAIM   2
767 #define NAF_CAN_FORWARD_DOWN 4
768 	/* return configuration information */
769 	int (*nm_config)(struct netmap_adapter *,
770 		u_int *txr, u_int *txd, u_int *rxr, u_int *rxd);
771 	int (*nm_krings_create)(struct netmap_adapter *);
772 	void (*nm_krings_delete)(struct netmap_adapter *);
773 #ifdef WITH_VALE
774 	/*
775 	 * nm_bdg_attach() initializes the na_vp field to point
776 	 *      to an adapter that can be attached to a VALE switch. If the
777 	 *      current adapter is already a VALE port, na_vp is simply a cast;
778 	 *      otherwise, na_vp points to a netmap_bwrap_adapter.
779 	 *      If applicable, this callback also initializes na_hostvp,
780 	 *      that can be used to connect the adapter host rings to the
781 	 *      switch.
782 	 *      Called with NMG_LOCK held.
783 	 *
784 	 * nm_bdg_ctl() is called on the actual attach/detach to/from
785 	 *      to/from the switch, to perform adapter-specific
786 	 *      initializations
787 	 *      Called with NMG_LOCK held.
788 	 */
789 	int (*nm_bdg_attach)(const char *bdg_name, struct netmap_adapter *);
790 	int (*nm_bdg_ctl)(struct netmap_adapter *, struct nmreq *, int);
791 
792 	/* adapter used to attach this adapter to a VALE switch (if any) */
793 	struct netmap_vp_adapter *na_vp;
794 	/* adapter used to attach the host rings of this adapter
795 	 * to a VALE switch (if any) */
796 	struct netmap_vp_adapter *na_hostvp;
797 #endif
798 
799 	/* standard refcount to control the lifetime of the adapter
800 	 * (it should be equal to the lifetime of the corresponding ifp)
801 	 */
802 	int na_refcount;
803 
804 	/* memory allocator (opaque)
805 	 * We also cache a pointer to the lut_entry for translating
806 	 * buffer addresses, the total number of buffers and the buffer size.
807 	 */
808  	struct netmap_mem_d *nm_mem;
809 	struct netmap_lut na_lut;
810 
811 	/* additional information attached to this adapter
812 	 * by other netmap subsystems. Currently used by
813 	 * bwrap, LINUX/v1000 and ptnetmap
814 	 */
815 	void *na_private;
816 
817 	/* array of pipes that have this adapter as a parent */
818 	struct netmap_pipe_adapter **na_pipes;
819 	int na_next_pipe;	/* next free slot in the array */
820 	int na_max_pipes;	/* size of the array */
821 
822 	/* Offset of ethernet header for each packet. */
823 	u_int virt_hdr_len;
824 
825 	char name[64];
826 };
827 
828 static __inline u_int
829 nma_get_ndesc(struct netmap_adapter *na, enum txrx t)
830 {
831 	return (t == NR_TX ? na->num_tx_desc : na->num_rx_desc);
832 }
833 
834 static __inline void
835 nma_set_ndesc(struct netmap_adapter *na, enum txrx t, u_int v)
836 {
837 	if (t == NR_TX)
838 		na->num_tx_desc = v;
839 	else
840 		na->num_rx_desc = v;
841 }
842 
843 static __inline u_int
844 nma_get_nrings(struct netmap_adapter *na, enum txrx t)
845 {
846 	return (t == NR_TX ? na->num_tx_rings : na->num_rx_rings);
847 }
848 
849 static __inline void
850 nma_set_nrings(struct netmap_adapter *na, enum txrx t, u_int v)
851 {
852 	if (t == NR_TX)
853 		na->num_tx_rings = v;
854 	else
855 		na->num_rx_rings = v;
856 }
857 
858 static __inline struct netmap_kring*
859 NMR(struct netmap_adapter *na, enum txrx t)
860 {
861 	return (t == NR_TX ? na->tx_rings : na->rx_rings);
862 }
863 
864 /*
865  * If the NIC is owned by the kernel
866  * (i.e., bridge), neither another bridge nor user can use it;
867  * if the NIC is owned by a user, only users can share it.
868  * Evaluation must be done under NMG_LOCK().
869  */
870 #define NETMAP_OWNED_BY_KERN(na)	((na)->na_flags & NAF_BUSY)
871 #define NETMAP_OWNED_BY_ANY(na) \
872 	(NETMAP_OWNED_BY_KERN(na) || ((na)->active_fds > 0))
873 
874 /*
875  * derived netmap adapters for various types of ports
876  */
877 struct netmap_vp_adapter {	/* VALE software port */
878 	struct netmap_adapter up;
879 
880 	/*
881 	 * Bridge support:
882 	 *
883 	 * bdg_port is the port number used in the bridge;
884 	 * na_bdg points to the bridge this NA is attached to.
885 	 */
886 	int bdg_port;
887 	struct nm_bridge *na_bdg;
888 	int retry;
889 	int autodelete; /* remove the ifp on last reference */
890 
891 	/* Maximum Frame Size, used in bdg_mismatch_datapath() */
892 	u_int mfs;
893 	/* Last source MAC on this port */
894 	uint64_t last_smac;
895 };
896 
897 
898 struct netmap_hw_adapter {	/* physical device */
899 	struct netmap_adapter up;
900 
901 	struct net_device_ops nm_ndo;	// XXX linux only
902 	struct ethtool_ops    nm_eto;	// XXX linux only
903 	const struct ethtool_ops*   save_ethtool;
904 
905 	int (*nm_hw_register)(struct netmap_adapter *, int onoff);
906 };
907 
908 #ifdef WITH_GENERIC
909 /* Mitigation support. */
910 struct nm_generic_mit {
911 	struct hrtimer mit_timer;
912 	int mit_pending;
913 	int mit_ring_idx;  /* index of the ring being mitigated */
914 	struct netmap_adapter *mit_na;  /* backpointer */
915 };
916 
917 struct netmap_generic_adapter {	/* emulated device */
918 	struct netmap_hw_adapter up;
919 
920 	/* Pointer to a previously used netmap adapter. */
921 	struct netmap_adapter *prev;
922 
923 	/* generic netmap adapters support:
924 	 * a net_device_ops struct overrides ndo_select_queue(),
925 	 * save_if_input saves the if_input hook (FreeBSD),
926 	 * mit implements rx interrupt mitigation,
927 	 */
928 	struct net_device_ops generic_ndo;
929 	void (*save_if_input)(struct ifnet *, struct mbuf *);
930 
931 	struct nm_generic_mit *mit;
932 #ifdef linux
933         netdev_tx_t (*save_start_xmit)(struct mbuf *, struct ifnet *);
934 #endif
935 	/* Is the adapter able to use multiple RX slots to scatter
936 	 * each packet pushed up by the driver? */
937 	int rxsg;
938 
939 	/* Is the transmission path controlled by a netmap-aware
940 	 * device queue (i.e. qdisc on linux)? */
941 	int txqdisc;
942 };
943 #endif  /* WITH_GENERIC */
944 
945 static __inline int
946 netmap_real_rings(struct netmap_adapter *na, enum txrx t)
947 {
948 	return nma_get_nrings(na, t) + !!(na->na_flags & NAF_HOST_RINGS);
949 }
950 
951 #ifdef WITH_VALE
952 struct nm_bdg_polling_state;
953 /*
954  * Bridge wrapper for non VALE ports attached to a VALE switch.
955  *
956  * The real device must already have its own netmap adapter (hwna).
957  * The bridge wrapper and the hwna adapter share the same set of
958  * netmap rings and buffers, but they have two separate sets of
959  * krings descriptors, with tx/rx meanings swapped:
960  *
961  *                                  netmap
962  *           bwrap     krings       rings      krings      hwna
963  *         +------+   +------+     +-----+    +------+   +------+
964  *         |tx_rings->|      |\   /|     |----|      |<-tx_rings|
965  *         |      |   +------+ \ / +-----+    +------+   |      |
966  *         |      |             X                        |      |
967  *         |      |            / \                       |      |
968  *         |      |   +------+/   \+-----+    +------+   |      |
969  *         |rx_rings->|      |     |     |----|      |<-rx_rings|
970  *         |      |   +------+     +-----+    +------+   |      |
971  *         +------+                                      +------+
972  *
973  * - packets coming from the bridge go to the brwap rx rings,
974  *   which are also the hwna tx rings.  The bwrap notify callback
975  *   will then complete the hwna tx (see netmap_bwrap_notify).
976  *
977  * - packets coming from the outside go to the hwna rx rings,
978  *   which are also the bwrap tx rings.  The (overwritten) hwna
979  *   notify method will then complete the bridge tx
980  *   (see netmap_bwrap_intr_notify).
981  *
982  *   The bridge wrapper may optionally connect the hwna 'host' rings
983  *   to the bridge. This is done by using a second port in the
984  *   bridge and connecting it to the 'host' netmap_vp_adapter
985  *   contained in the netmap_bwrap_adapter. The brwap host adapter
986  *   cross-links the hwna host rings in the same way as shown above.
987  *
988  * - packets coming from the bridge and directed to the host stack
989  *   are handled by the bwrap host notify callback
990  *   (see netmap_bwrap_host_notify)
991  *
992  * - packets coming from the host stack are still handled by the
993  *   overwritten hwna notify callback (netmap_bwrap_intr_notify),
994  *   but are diverted to the host adapter depending on the ring number.
995  *
996  */
997 struct netmap_bwrap_adapter {
998 	struct netmap_vp_adapter up;
999 	struct netmap_vp_adapter host;  /* for host rings */
1000 	struct netmap_adapter *hwna;	/* the underlying device */
1001 
1002 	/*
1003 	 * When we attach a physical interface to the bridge, we
1004 	 * allow the controlling process to terminate, so we need
1005 	 * a place to store the n_detmap_priv_d data structure.
1006 	 * This is only done when physical interfaces
1007 	 * are attached to a bridge.
1008 	 */
1009 	struct netmap_priv_d *na_kpriv;
1010 	struct nm_bdg_polling_state *na_polling_state;
1011 };
1012 int netmap_bwrap_attach(const char *name, struct netmap_adapter *);
1013 int netmap_vi_create(struct nmreq *, int);
1014 
1015 #else /* !WITH_VALE */
1016 #define netmap_vi_create(nmr, a) (EOPNOTSUPP)
1017 #endif /* WITH_VALE */
1018 
1019 #ifdef WITH_PIPES
1020 
1021 #define NM_MAXPIPES 	64	/* max number of pipes per adapter */
1022 
1023 struct netmap_pipe_adapter {
1024 	struct netmap_adapter up;
1025 
1026 	u_int id; 	/* pipe identifier */
1027 	int role;	/* either NR_REG_PIPE_MASTER or NR_REG_PIPE_SLAVE */
1028 
1029 	struct netmap_adapter *parent; /* adapter that owns the memory */
1030 	struct netmap_pipe_adapter *peer; /* the other end of the pipe */
1031 	int peer_ref;		/* 1 iff we are holding a ref to the peer */
1032 	struct ifnet *parent_ifp;	/* maybe null */
1033 
1034 	u_int parent_slot; /* index in the parent pipe array */
1035 };
1036 
1037 #endif /* WITH_PIPES */
1038 
1039 
1040 /* return slots reserved to rx clients; used in drivers */
1041 static inline uint32_t
1042 nm_kr_rxspace(struct netmap_kring *k)
1043 {
1044 	int space = k->nr_hwtail - k->nr_hwcur;
1045 	if (space < 0)
1046 		space += k->nkr_num_slots;
1047 	ND("preserving %d rx slots %d -> %d", space, k->nr_hwcur, k->nr_hwtail);
1048 
1049 	return space;
1050 }
1051 
1052 /* return slots reserved to tx clients */
1053 #define nm_kr_txspace(_k) nm_kr_rxspace(_k)
1054 
1055 
1056 /* True if no space in the tx ring, only valid after txsync_prologue */
1057 static inline int
1058 nm_kr_txempty(struct netmap_kring *kring)
1059 {
1060 	return kring->rcur == kring->nr_hwtail;
1061 }
1062 
1063 /* True if no more completed slots in the rx ring, only valid after
1064  * rxsync_prologue */
1065 #define nm_kr_rxempty(_k)	nm_kr_txempty(_k)
1066 
1067 /*
1068  * protect against multiple threads using the same ring.
1069  * also check that the ring has not been stopped or locked
1070  */
1071 #define NM_KR_BUSY	1	/* some other thread is syncing the ring */
1072 #define NM_KR_STOPPED	2	/* unbounded stop (ifconfig down or driver unload) */
1073 #define NM_KR_LOCKED	3	/* bounded, brief stop for mutual exclusion */
1074 
1075 
1076 /* release the previously acquired right to use the *sync() methods of the ring */
1077 static __inline void nm_kr_put(struct netmap_kring *kr)
1078 {
1079 	NM_ATOMIC_CLEAR(&kr->nr_busy);
1080 }
1081 
1082 
1083 /* true if the ifp that backed the adapter has disappeared (e.g., the
1084  * driver has been unloaded)
1085  */
1086 static inline int nm_iszombie(struct netmap_adapter *na);
1087 
1088 /* try to obtain exclusive right to issue the *sync() operations on the ring.
1089  * The right is obtained and must be later relinquished via nm_kr_put() if and
1090  * only if nm_kr_tryget() returns 0.
1091  * If can_sleep is 1 there are only two other possible outcomes:
1092  * - the function returns NM_KR_BUSY
1093  * - the function returns NM_KR_STOPPED and sets the POLLERR bit in *perr
1094  *   (if non-null)
1095  * In both cases the caller will typically skip the ring, possibly collecting
1096  * errors along the way.
1097  * If the calling context does not allow sleeping, the caller must pass 0 in can_sleep.
1098  * In the latter case, the function may also return NM_KR_LOCKED and leave *perr
1099  * untouched: ideally, the caller should try again at a later time.
1100  */
1101 static __inline int nm_kr_tryget(struct netmap_kring *kr, int can_sleep, int *perr)
1102 {
1103 	int busy = 1, stopped;
1104 	/* check a first time without taking the lock
1105 	 * to avoid starvation for nm_kr_get()
1106 	 */
1107 retry:
1108 	stopped = kr->nkr_stopped;
1109 	if (unlikely(stopped)) {
1110 		goto stop;
1111 	}
1112 	busy = NM_ATOMIC_TEST_AND_SET(&kr->nr_busy);
1113 	/* we should not return NM_KR_BUSY if the ring was
1114 	 * actually stopped, so check another time after
1115 	 * the barrier provided by the atomic operation
1116 	 */
1117 	stopped = kr->nkr_stopped;
1118 	if (unlikely(stopped)) {
1119 		goto stop;
1120 	}
1121 
1122 	if (unlikely(nm_iszombie(kr->na))) {
1123 		stopped = NM_KR_STOPPED;
1124 		goto stop;
1125 	}
1126 
1127 	return unlikely(busy) ? NM_KR_BUSY : 0;
1128 
1129 stop:
1130 	if (!busy)
1131 		nm_kr_put(kr);
1132 	if (stopped == NM_KR_STOPPED) {
1133 /* if POLLERR is defined we want to use it to simplify netmap_poll().
1134  * Otherwise, any non-zero value will do.
1135  */
1136 #ifdef POLLERR
1137 #define NM_POLLERR POLLERR
1138 #else
1139 #define NM_POLLERR 1
1140 #endif /* POLLERR */
1141 		if (perr)
1142 			*perr |= NM_POLLERR;
1143 #undef NM_POLLERR
1144 	} else if (can_sleep) {
1145 		tsleep(kr, 0, "NM_KR_TRYGET", 4);
1146 		goto retry;
1147 	}
1148 	return stopped;
1149 }
1150 
1151 /* put the ring in the 'stopped' state and wait for the current user (if any) to
1152  * notice. stopped must be either NM_KR_STOPPED or NM_KR_LOCKED
1153  */
1154 static __inline void nm_kr_stop(struct netmap_kring *kr, int stopped)
1155 {
1156 	kr->nkr_stopped = stopped;
1157 	while (NM_ATOMIC_TEST_AND_SET(&kr->nr_busy))
1158 		tsleep(kr, 0, "NM_KR_GET", 4);
1159 }
1160 
1161 /* restart a ring after a stop */
1162 static __inline void nm_kr_start(struct netmap_kring *kr)
1163 {
1164 	kr->nkr_stopped = 0;
1165 	nm_kr_put(kr);
1166 }
1167 
1168 
1169 /*
1170  * The following functions are used by individual drivers to
1171  * support netmap operation.
1172  *
1173  * netmap_attach() initializes a struct netmap_adapter, allocating the
1174  * 	struct netmap_ring's and the struct selinfo.
1175  *
1176  * netmap_detach() frees the memory allocated by netmap_attach().
1177  *
1178  * netmap_transmit() replaces the if_transmit routine of the interface,
1179  *	and is used to intercept packets coming from the stack.
1180  *
1181  * netmap_load_map/netmap_reload_map are helper routines to set/reset
1182  *	the dmamap for a packet buffer
1183  *
1184  * netmap_reset() is a helper routine to be called in the hw driver
1185  *	when reinitializing a ring. It should not be called by
1186  *	virtual ports (vale, pipes, monitor)
1187  */
1188 int netmap_attach(struct netmap_adapter *);
1189 int netmap_attach_ext(struct netmap_adapter *, size_t size);
1190 void netmap_detach(struct ifnet *);
1191 int netmap_transmit(struct ifnet *, struct mbuf *);
1192 struct netmap_slot *netmap_reset(struct netmap_adapter *na,
1193 	enum txrx tx, u_int n, u_int new_cur);
1194 int netmap_ring_reinit(struct netmap_kring *);
1195 
1196 /* Return codes for netmap_*x_irq. */
1197 enum {
1198 	/* Driver should do normal interrupt processing, e.g. because
1199 	 * the interface is not in netmap mode. */
1200 	NM_IRQ_PASS = 0,
1201 	/* Port is in netmap mode, and the interrupt work has been
1202 	 * completed. The driver does not have to notify netmap
1203 	 * again before the next interrupt. */
1204 	NM_IRQ_COMPLETED = -1,
1205 	/* Port is in netmap mode, but the interrupt work has not been
1206 	 * completed. The driver has to make sure netmap will be
1207 	 * notified again soon, even if no more interrupts come (e.g.
1208 	 * on Linux the driver should not call napi_complete()). */
1209 	NM_IRQ_RESCHED = -2,
1210 };
1211 
1212 /* default functions to handle rx/tx interrupts */
1213 int netmap_rx_irq(struct ifnet *, u_int, u_int *);
1214 #define netmap_tx_irq(_n, _q) netmap_rx_irq(_n, _q, NULL)
1215 int netmap_common_irq(struct netmap_adapter *, u_int, u_int *work_done);
1216 
1217 
1218 #ifdef WITH_VALE
1219 /* functions used by external modules to interface with VALE */
1220 #define netmap_vp_to_ifp(_vp)	((_vp)->up.ifp)
1221 #define netmap_ifp_to_vp(_ifp)	(NA(_ifp)->na_vp)
1222 #define netmap_ifp_to_host_vp(_ifp) (NA(_ifp)->na_hostvp)
1223 #define netmap_bdg_idx(_vp)	((_vp)->bdg_port)
1224 const char *netmap_bdg_name(struct netmap_vp_adapter *);
1225 #else /* !WITH_VALE */
1226 #define netmap_vp_to_ifp(_vp)	NULL
1227 #define netmap_ifp_to_vp(_ifp)	NULL
1228 #define netmap_ifp_to_host_vp(_ifp) NULL
1229 #define netmap_bdg_idx(_vp)	-1
1230 #define netmap_bdg_name(_vp)	NULL
1231 #endif /* WITH_VALE */
1232 
1233 static inline int
1234 nm_netmap_on(struct netmap_adapter *na)
1235 {
1236 	return na && na->na_flags & NAF_NETMAP_ON;
1237 }
1238 
1239 static inline int
1240 nm_native_on(struct netmap_adapter *na)
1241 {
1242 	return nm_netmap_on(na) && (na->na_flags & NAF_NATIVE);
1243 }
1244 
1245 static inline int
1246 nm_iszombie(struct netmap_adapter *na)
1247 {
1248 	return na == NULL || (na->na_flags & NAF_ZOMBIE);
1249 }
1250 
1251 static inline void
1252 nm_update_hostrings_mode(struct netmap_adapter *na)
1253 {
1254 	/* Process nr_mode and nr_pending_mode for host rings. */
1255 	na->tx_rings[na->num_tx_rings].nr_mode =
1256 		na->tx_rings[na->num_tx_rings].nr_pending_mode;
1257 	na->rx_rings[na->num_rx_rings].nr_mode =
1258 		na->rx_rings[na->num_rx_rings].nr_pending_mode;
1259 }
1260 
1261 /* set/clear native flags and if_transmit/netdev_ops */
1262 static inline void
1263 nm_set_native_flags(struct netmap_adapter *na)
1264 {
1265 	struct ifnet *ifp = na->ifp;
1266 
1267 	/* We do the setup for intercepting packets only if we are the
1268 	 * first user of this adapapter. */
1269 	if (na->active_fds > 0) {
1270 		return;
1271 	}
1272 
1273 	na->na_flags |= NAF_NETMAP_ON;
1274 #ifdef IFCAP_NETMAP /* or FreeBSD ? */
1275 	ifp->if_capenable |= IFCAP_NETMAP;
1276 #endif
1277 #if defined (__FreeBSD__)
1278 	na->if_transmit = ifp->if_transmit;
1279 	ifp->if_transmit = netmap_transmit;
1280 #elif defined (_WIN32)
1281 	(void)ifp; /* prevent a warning */
1282 	//XXX_ale can we just comment those?
1283 	//na->if_transmit = ifp->if_transmit;
1284 	//ifp->if_transmit = netmap_transmit;
1285 #else
1286 	na->if_transmit = (void *)ifp->netdev_ops;
1287 	ifp->netdev_ops = &((struct netmap_hw_adapter *)na)->nm_ndo;
1288 	((struct netmap_hw_adapter *)na)->save_ethtool = ifp->ethtool_ops;
1289 	ifp->ethtool_ops = &((struct netmap_hw_adapter*)na)->nm_eto;
1290 #endif
1291 	nm_update_hostrings_mode(na);
1292 }
1293 
1294 static inline void
1295 nm_clear_native_flags(struct netmap_adapter *na)
1296 {
1297 	struct ifnet *ifp = na->ifp;
1298 
1299 	/* We undo the setup for intercepting packets only if we are the
1300 	 * last user of this adapapter. */
1301 	if (na->active_fds > 0) {
1302 		return;
1303 	}
1304 
1305 	nm_update_hostrings_mode(na);
1306 
1307 #if defined(__FreeBSD__)
1308 	ifp->if_transmit = na->if_transmit;
1309 #elif defined(_WIN32)
1310 	(void)ifp; /* prevent a warning */
1311 	//XXX_ale can we just comment those?
1312 	//ifp->if_transmit = na->if_transmit;
1313 #else
1314 	ifp->netdev_ops = (void *)na->if_transmit;
1315 	ifp->ethtool_ops = ((struct netmap_hw_adapter*)na)->save_ethtool;
1316 #endif
1317 	na->na_flags &= ~NAF_NETMAP_ON;
1318 #ifdef IFCAP_NETMAP /* or FreeBSD ? */
1319 	ifp->if_capenable &= ~IFCAP_NETMAP;
1320 #endif
1321 }
1322 
1323 /*
1324  * nm_*sync_prologue() functions are used in ioctl/poll and ptnetmap
1325  * kthreads.
1326  * We need netmap_ring* parameter, because in ptnetmap it is decoupled
1327  * from host kring.
1328  * The user-space ring pointers (head/cur/tail) are shared through
1329  * CSB between host and guest.
1330  */
1331 
1332 /*
1333  * validates parameters in the ring/kring, returns a value for head
1334  * If any error, returns ring_size to force a reinit.
1335  */
1336 uint32_t nm_txsync_prologue(struct netmap_kring *, struct netmap_ring *);
1337 
1338 
1339 /*
1340  * validates parameters in the ring/kring, returns a value for head
1341  * If any error, returns ring_size lim to force a reinit.
1342  */
1343 uint32_t nm_rxsync_prologue(struct netmap_kring *, struct netmap_ring *);
1344 
1345 
1346 /* check/fix address and len in tx rings */
1347 #if 1 /* debug version */
1348 #define	NM_CHECK_ADDR_LEN(_na, _a, _l)	do {				\
1349 	if (_a == NETMAP_BUF_BASE(_na) || _l > NETMAP_BUF_SIZE(_na)) {	\
1350 		RD(5, "bad addr/len ring %d slot %d idx %d len %d",	\
1351 			kring->ring_id, nm_i, slot->buf_idx, len);	\
1352 		if (_l > NETMAP_BUF_SIZE(_na))				\
1353 			_l = NETMAP_BUF_SIZE(_na);			\
1354 	} } while (0)
1355 #else /* no debug version */
1356 #define	NM_CHECK_ADDR_LEN(_na, _a, _l)	do {				\
1357 		if (_l > NETMAP_BUF_SIZE(_na))				\
1358 			_l = NETMAP_BUF_SIZE(_na);			\
1359 	} while (0)
1360 #endif
1361 
1362 
1363 /*---------------------------------------------------------------*/
1364 /*
1365  * Support routines used by netmap subsystems
1366  * (native drivers, VALE, generic, pipes, monitors, ...)
1367  */
1368 
1369 
1370 /* common routine for all functions that create a netmap adapter. It performs
1371  * two main tasks:
1372  * - if the na points to an ifp, mark the ifp as netmap capable
1373  *   using na as its native adapter;
1374  * - provide defaults for the setup callbacks and the memory allocator
1375  */
1376 int netmap_attach_common(struct netmap_adapter *);
1377 /* common actions to be performed on netmap adapter destruction */
1378 void netmap_detach_common(struct netmap_adapter *);
1379 /* fill priv->np_[tr]xq{first,last} using the ringid and flags information
1380  * coming from a struct nmreq
1381  */
1382 int netmap_interp_ringid(struct netmap_priv_d *priv, uint16_t ringid, uint32_t flags);
1383 /* update the ring parameters (number and size of tx and rx rings).
1384  * It calls the nm_config callback, if available.
1385  */
1386 int netmap_update_config(struct netmap_adapter *na);
1387 /* create and initialize the common fields of the krings array.
1388  * using the information that must be already available in the na.
1389  * tailroom can be used to request the allocation of additional
1390  * tailroom bytes after the krings array. This is used by
1391  * netmap_vp_adapter's (i.e., VALE ports) to make room for
1392  * leasing-related data structures
1393  */
1394 int netmap_krings_create(struct netmap_adapter *na, u_int tailroom);
1395 /* deletes the kring array of the adapter. The array must have
1396  * been created using netmap_krings_create
1397  */
1398 void netmap_krings_delete(struct netmap_adapter *na);
1399 
1400 int netmap_hw_krings_create(struct netmap_adapter *na);
1401 void netmap_hw_krings_delete(struct netmap_adapter *na);
1402 
1403 /* set the stopped/enabled status of ring
1404  * When stopping, they also wait for all current activity on the ring to
1405  * terminate. The status change is then notified using the na nm_notify
1406  * callback.
1407  */
1408 void netmap_set_ring(struct netmap_adapter *, u_int ring_id, enum txrx, int stopped);
1409 /* set the stopped/enabled status of all rings of the adapter. */
1410 void netmap_set_all_rings(struct netmap_adapter *, int stopped);
1411 /* convenience wrappers for netmap_set_all_rings */
1412 void netmap_disable_all_rings(struct ifnet *);
1413 void netmap_enable_all_rings(struct ifnet *);
1414 
1415 int netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na,
1416 	uint16_t ringid, uint32_t flags);
1417 void netmap_do_unregif(struct netmap_priv_d *priv);
1418 
1419 u_int nm_bound_var(u_int *v, u_int dflt, u_int lo, u_int hi, const char *msg);
1420 int netmap_get_na(struct nmreq *nmr, struct netmap_adapter **na,
1421 		  struct ifnet **ifp, struct netmap_mem_d *nmd, int create);
1422 void netmap_unget_na(struct netmap_adapter *na, struct ifnet *ifp);
1423 int netmap_get_hw_na(struct ifnet *ifp,
1424 		struct netmap_mem_d *nmd, struct netmap_adapter **na);
1425 
1426 
1427 #ifdef WITH_VALE
1428 /*
1429  * The following bridge-related functions are used by other
1430  * kernel modules.
1431  *
1432  * VALE only supports unicast or broadcast. The lookup
1433  * function can return 0 .. NM_BDG_MAXPORTS-1 for regular ports,
1434  * NM_BDG_MAXPORTS for broadcast, NM_BDG_MAXPORTS+1 for unknown.
1435  * XXX in practice "unknown" might be handled same as broadcast.
1436  */
1437 typedef u_int (*bdg_lookup_fn_t)(struct nm_bdg_fwd *ft, uint8_t *ring_nr,
1438 		struct netmap_vp_adapter *);
1439 typedef int (*bdg_config_fn_t)(struct nm_ifreq *);
1440 typedef void (*bdg_dtor_fn_t)(const struct netmap_vp_adapter *);
1441 struct netmap_bdg_ops {
1442 	bdg_lookup_fn_t lookup;
1443 	bdg_config_fn_t config;
1444 	bdg_dtor_fn_t	dtor;
1445 };
1446 
1447 u_int netmap_bdg_learning(struct nm_bdg_fwd *ft, uint8_t *dst_ring,
1448 		struct netmap_vp_adapter *);
1449 
1450 #define	NM_BRIDGES		8	/* number of bridges */
1451 #define	NM_BDG_MAXPORTS		254	/* up to 254 */
1452 #define	NM_BDG_BROADCAST	NM_BDG_MAXPORTS
1453 #define	NM_BDG_NOPORT		(NM_BDG_MAXPORTS+1)
1454 
1455 /* these are redefined in case of no VALE support */
1456 int netmap_get_bdg_na(struct nmreq *nmr, struct netmap_adapter **na,
1457 		struct netmap_mem_d *nmd, int create);
1458 struct nm_bridge *netmap_init_bridges2(u_int);
1459 void netmap_uninit_bridges2(struct nm_bridge *, u_int);
1460 int netmap_init_bridges(void);
1461 void netmap_uninit_bridges(void);
1462 int netmap_bdg_ctl(struct nmreq *nmr, struct netmap_bdg_ops *bdg_ops);
1463 int netmap_bdg_config(struct nmreq *nmr);
1464 
1465 #else /* !WITH_VALE */
1466 #define	netmap_get_bdg_na(_1, _2, _3, _4)	0
1467 #define netmap_init_bridges(_1) 0
1468 #define netmap_uninit_bridges()
1469 #define	netmap_bdg_ctl(_1, _2)	EINVAL
1470 #endif /* !WITH_VALE */
1471 
1472 #ifdef WITH_PIPES
1473 /* max number of pipes per device */
1474 #define NM_MAXPIPES	64	/* XXX how many? */
1475 void netmap_pipe_dealloc(struct netmap_adapter *);
1476 int netmap_get_pipe_na(struct nmreq *nmr, struct netmap_adapter **na,
1477 		struct netmap_mem_d *nmd, int create);
1478 #else /* !WITH_PIPES */
1479 #define NM_MAXPIPES	0
1480 #define netmap_pipe_alloc(_1, _2) 	0
1481 #define netmap_pipe_dealloc(_1)
1482 #define netmap_get_pipe_na(nmr, _2, _3, _4)	\
1483 	({ int role__ = (nmr)->nr_flags & NR_REG_MASK; \
1484 	   (role__ == NR_REG_PIPE_MASTER || 	       \
1485 	    role__ == NR_REG_PIPE_SLAVE) ? EOPNOTSUPP : 0; })
1486 #endif
1487 
1488 #ifdef WITH_MONITOR
1489 int netmap_get_monitor_na(struct nmreq *nmr, struct netmap_adapter **na,
1490 		struct netmap_mem_d *nmd, int create);
1491 void netmap_monitor_stop(struct netmap_adapter *na);
1492 #else
1493 #define netmap_get_monitor_na(nmr, _2, _3, _4) \
1494 	((nmr)->nr_flags & (NR_MONITOR_TX | NR_MONITOR_RX) ? EOPNOTSUPP : 0)
1495 #endif
1496 
1497 #ifdef CONFIG_NET_NS
1498 struct net *netmap_bns_get(void);
1499 void netmap_bns_put(struct net *);
1500 void netmap_bns_getbridges(struct nm_bridge **, u_int *);
1501 #else
1502 #define netmap_bns_get()
1503 #define netmap_bns_put(_1)
1504 #define netmap_bns_getbridges(b, n) \
1505 	do { *b = nm_bridges; *n = NM_BRIDGES; } while (0)
1506 #endif
1507 
1508 /* Various prototypes */
1509 int netmap_poll(struct netmap_priv_d *, int events, NM_SELRECORD_T *td);
1510 int netmap_init(void);
1511 void netmap_fini(void);
1512 int netmap_get_memory(struct netmap_priv_d* p);
1513 void netmap_dtor(void *data);
1514 
1515 int netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data, struct thread *);
1516 
1517 /* netmap_adapter creation/destruction */
1518 
1519 // #define NM_DEBUG_PUTGET 1
1520 
1521 #ifdef NM_DEBUG_PUTGET
1522 
1523 #define NM_DBG(f) __##f
1524 
1525 void __netmap_adapter_get(struct netmap_adapter *na);
1526 
1527 #define netmap_adapter_get(na) 				\
1528 	do {						\
1529 		struct netmap_adapter *__na = na;	\
1530 		D("getting %p:%s (%d)", __na, (__na)->name, (__na)->na_refcount);	\
1531 		__netmap_adapter_get(__na);		\
1532 	} while (0)
1533 
1534 int __netmap_adapter_put(struct netmap_adapter *na);
1535 
1536 #define netmap_adapter_put(na)				\
1537 	({						\
1538 		struct netmap_adapter *__na = na;	\
1539 		D("putting %p:%s (%d)", __na, (__na)->name, (__na)->na_refcount);	\
1540 		__netmap_adapter_put(__na);		\
1541 	})
1542 
1543 #else /* !NM_DEBUG_PUTGET */
1544 
1545 #define NM_DBG(f) f
1546 void netmap_adapter_get(struct netmap_adapter *na);
1547 int netmap_adapter_put(struct netmap_adapter *na);
1548 
1549 #endif /* !NM_DEBUG_PUTGET */
1550 
1551 
1552 /*
1553  * module variables
1554  */
1555 #define NETMAP_BUF_BASE(_na)	((_na)->na_lut.lut[0].vaddr)
1556 #define NETMAP_BUF_SIZE(_na)	((_na)->na_lut.objsize)
1557 extern int netmap_no_pendintr;
1558 extern int netmap_mitigate;
1559 extern int netmap_verbose;		/* for debugging */
1560 enum {                                  /* verbose flags */
1561 	NM_VERB_ON = 1,                 /* generic verbose */
1562 	NM_VERB_HOST = 0x2,             /* verbose host stack */
1563 	NM_VERB_RXSYNC = 0x10,          /* verbose on rxsync/txsync */
1564 	NM_VERB_TXSYNC = 0x20,
1565 	NM_VERB_RXINTR = 0x100,         /* verbose on rx/tx intr (driver) */
1566 	NM_VERB_TXINTR = 0x200,
1567 	NM_VERB_NIC_RXSYNC = 0x1000,    /* verbose on rx/tx intr (driver) */
1568 	NM_VERB_NIC_TXSYNC = 0x2000,
1569 };
1570 
1571 extern int netmap_txsync_retry;
1572 extern int netmap_flags;
1573 extern int netmap_generic_mit;
1574 extern int netmap_generic_ringsize;
1575 extern int netmap_generic_rings;
1576 extern int netmap_generic_txqdisc;
1577 extern int ptnetmap_tx_workers;
1578 
1579 /*
1580  * NA returns a pointer to the struct netmap adapter from the ifp,
1581  * WNA is used to write it.
1582  */
1583 #define	NA(_ifp)	((struct netmap_adapter *)WNA(_ifp))
1584 
1585 /*
1586  * On old versions of FreeBSD, NA(ifp) is a pspare. On linux we
1587  * overload another pointer in the netdev.
1588  *
1589  * We check if NA(ifp) is set and its first element has a related
1590  * magic value. The capenable is within the struct netmap_adapter.
1591  */
1592 #define	NETMAP_MAGIC	0x52697a7a
1593 
1594 #define NM_NA_VALID(ifp)	(NA(ifp) &&		\
1595 	((uint32_t)(uintptr_t)NA(ifp) ^ NA(ifp)->magic) == NETMAP_MAGIC )
1596 
1597 #define	NM_ATTACH_NA(ifp, na) do {					\
1598 	WNA(ifp) = na;							\
1599 	if (NA(ifp))							\
1600 		NA(ifp)->magic = 					\
1601 			((uint32_t)(uintptr_t)NA(ifp)) ^ NETMAP_MAGIC;	\
1602 } while(0)
1603 
1604 #define NM_IS_NATIVE(ifp)	(NM_NA_VALID(ifp) && NA(ifp)->nm_dtor == netmap_hw_dtor)
1605 
1606 #if defined(__FreeBSD__)
1607 
1608 /* Assigns the device IOMMU domain to an allocator.
1609  * Returns -ENOMEM in case the domain is different */
1610 #define nm_iommu_group_id(dev) (0)
1611 
1612 /* Callback invoked by the dma machinery after a successful dmamap_load */
1613 static void netmap_dmamap_cb(__unused void *arg,
1614     __unused bus_dma_segment_t * segs, __unused int nseg, __unused int error)
1615 {
1616 }
1617 
1618 /* bus_dmamap_load wrapper: call aforementioned function if map != NULL.
1619  * XXX can we do it without a callback ?
1620  */
1621 static inline void
1622 netmap_load_map(struct netmap_adapter *na,
1623 	bus_dma_tag_t tag, bus_dmamap_t map, void *buf)
1624 {
1625 	if (map)
1626 		bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE(na),
1627 		    netmap_dmamap_cb, NULL, BUS_DMA_NOWAIT);
1628 }
1629 
1630 static inline void
1631 netmap_unload_map(struct netmap_adapter *na,
1632         bus_dma_tag_t tag, bus_dmamap_t map)
1633 {
1634 	if (map)
1635 		bus_dmamap_unload(tag, map);
1636 }
1637 
1638 /* update the map when a buffer changes. */
1639 static inline void
1640 netmap_reload_map(struct netmap_adapter *na,
1641 	bus_dma_tag_t tag, bus_dmamap_t map, void *buf)
1642 {
1643 	if (map) {
1644 		bus_dmamap_unload(tag, map);
1645 		bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE(na),
1646 		    netmap_dmamap_cb, NULL, BUS_DMA_NOWAIT);
1647 	}
1648 }
1649 
1650 #elif defined(_WIN32)
1651 
1652 #else /* linux */
1653 
1654 int nm_iommu_group_id(bus_dma_tag_t dev);
1655 #include <linux/dma-mapping.h>
1656 
1657 static inline void
1658 netmap_load_map(struct netmap_adapter *na,
1659 	bus_dma_tag_t tag, bus_dmamap_t map, void *buf)
1660 {
1661 	if (0 && map) {
1662 		*map = dma_map_single(na->pdev, buf, NETMAP_BUF_SIZE(na),
1663 				      DMA_BIDIRECTIONAL);
1664 	}
1665 }
1666 
1667 static inline void
1668 netmap_unload_map(struct netmap_adapter *na,
1669 	bus_dma_tag_t tag, bus_dmamap_t map)
1670 {
1671 	u_int sz = NETMAP_BUF_SIZE(na);
1672 
1673 	if (*map) {
1674 		dma_unmap_single(na->pdev, *map, sz,
1675 				 DMA_BIDIRECTIONAL);
1676 	}
1677 }
1678 
1679 static inline void
1680 netmap_reload_map(struct netmap_adapter *na,
1681 	bus_dma_tag_t tag, bus_dmamap_t map, void *buf)
1682 {
1683 	u_int sz = NETMAP_BUF_SIZE(na);
1684 
1685 	if (*map) {
1686 		dma_unmap_single(na->pdev, *map, sz,
1687 				DMA_BIDIRECTIONAL);
1688 	}
1689 
1690 	*map = dma_map_single(na->pdev, buf, sz,
1691 				DMA_BIDIRECTIONAL);
1692 }
1693 
1694 /*
1695  * XXX How do we redefine these functions:
1696  *
1697  * on linux we need
1698  *	dma_map_single(&pdev->dev, virt_addr, len, direction)
1699  *	dma_unmap_single(&adapter->pdev->dev, phys_addr, len, direction
1700  * The len can be implicit (on netmap it is NETMAP_BUF_SIZE)
1701  * unfortunately the direction is not, so we need to change
1702  * something to have a cross API
1703  */
1704 
1705 #if 0
1706 	struct e1000_buffer *buffer_info =  &tx_ring->buffer_info[l];
1707 	/* set time_stamp *before* dma to help avoid a possible race */
1708 	buffer_info->time_stamp = jiffies;
1709 	buffer_info->mapped_as_page = false;
1710 	buffer_info->length = len;
1711 	//buffer_info->next_to_watch = l;
1712 	/* reload dma map */
1713 	dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1714 			NETMAP_BUF_SIZE, DMA_TO_DEVICE);
1715 	buffer_info->dma = dma_map_single(&adapter->pdev->dev,
1716 			addr, NETMAP_BUF_SIZE, DMA_TO_DEVICE);
1717 
1718 	if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1719 		D("dma mapping error");
1720 		/* goto dma_error; See e1000_put_txbuf() */
1721 		/* XXX reset */
1722 	}
1723 	tx_desc->buffer_addr = htole64(buffer_info->dma); //XXX
1724 
1725 #endif
1726 
1727 /*
1728  * The bus_dmamap_sync() can be one of wmb() or rmb() depending on direction.
1729  */
1730 #define bus_dmamap_sync(_a, _b, _c)
1731 
1732 #endif /* linux */
1733 
1734 
1735 /*
1736  * functions to map NIC to KRING indexes (n2k) and vice versa (k2n)
1737  */
1738 static inline int
1739 netmap_idx_n2k(struct netmap_kring *kr, int idx)
1740 {
1741 	int n = kr->nkr_num_slots;
1742 	idx += kr->nkr_hwofs;
1743 	if (idx < 0)
1744 		return idx + n;
1745 	else if (idx < n)
1746 		return idx;
1747 	else
1748 		return idx - n;
1749 }
1750 
1751 
1752 static inline int
1753 netmap_idx_k2n(struct netmap_kring *kr, int idx)
1754 {
1755 	int n = kr->nkr_num_slots;
1756 	idx -= kr->nkr_hwofs;
1757 	if (idx < 0)
1758 		return idx + n;
1759 	else if (idx < n)
1760 		return idx;
1761 	else
1762 		return idx - n;
1763 }
1764 
1765 
1766 /* Entries of the look-up table. */
1767 struct lut_entry {
1768 	void *vaddr;		/* virtual address. */
1769 	vm_paddr_t paddr;	/* physical address. */
1770 };
1771 
1772 struct netmap_obj_pool;
1773 
1774 /*
1775  * NMB return the virtual address of a buffer (buffer 0 on bad index)
1776  * PNMB also fills the physical address
1777  */
1778 static inline void *
1779 NMB(struct netmap_adapter *na, struct netmap_slot *slot)
1780 {
1781 	struct lut_entry *lut = na->na_lut.lut;
1782 	uint32_t i = slot->buf_idx;
1783 	return (unlikely(i >= na->na_lut.objtotal)) ?
1784 		lut[0].vaddr : lut[i].vaddr;
1785 }
1786 
1787 static inline void *
1788 PNMB(struct netmap_adapter *na, struct netmap_slot *slot, uint64_t *pp)
1789 {
1790 	uint32_t i = slot->buf_idx;
1791 	struct lut_entry *lut = na->na_lut.lut;
1792 	void *ret = (i >= na->na_lut.objtotal) ? lut[0].vaddr : lut[i].vaddr;
1793 
1794 #ifndef _WIN32
1795 	*pp = (i >= na->na_lut.objtotal) ? lut[0].paddr : lut[i].paddr;
1796 #else
1797 	*pp = (i >= na->na_lut.objtotal) ? (uint64_t)lut[0].paddr.QuadPart : (uint64_t)lut[i].paddr.QuadPart;
1798 #endif
1799 	return ret;
1800 }
1801 
1802 
1803 /*
1804  * Structure associated to each netmap file descriptor.
1805  * It is created on open and left unbound (np_nifp == NULL).
1806  * A successful NIOCREGIF will set np_nifp and the first few fields;
1807  * this is protected by a global lock (NMG_LOCK) due to low contention.
1808  *
1809  * np_refs counts the number of references to the structure: one for the fd,
1810  * plus (on FreeBSD) one for each active mmap which we track ourselves
1811  * (linux automatically tracks them, but FreeBSD does not).
1812  * np_refs is protected by NMG_LOCK.
1813  *
1814  * Read access to the structure is lock free, because ni_nifp once set
1815  * can only go to 0 when nobody is using the entry anymore. Readers
1816  * must check that np_nifp != NULL before using the other fields.
1817  */
1818 struct netmap_priv_d {
1819 	struct netmap_if * volatile np_nifp;	/* netmap if descriptor. */
1820 
1821 	struct netmap_adapter	*np_na;
1822 	struct ifnet	*np_ifp;
1823 	uint32_t	np_flags;	/* from the ioctl */
1824 	u_int		np_qfirst[NR_TXRX],
1825 			np_qlast[NR_TXRX]; /* range of tx/rx rings to scan */
1826 	uint16_t	np_txpoll;	/* XXX and also np_rxpoll ? */
1827 	int             np_sync_flags; /* to be passed to nm_sync */
1828 
1829 	int		np_refs;	/* use with NMG_LOCK held */
1830 
1831 	/* pointers to the selinfo to be used for selrecord.
1832 	 * Either the local or the global one depending on the
1833 	 * number of rings.
1834 	 */
1835 	NM_SELINFO_T *np_si[NR_TXRX];
1836 	struct thread	*np_td;		/* kqueue, just debugging */
1837 };
1838 
1839 struct netmap_priv_d *netmap_priv_new(void);
1840 void netmap_priv_delete(struct netmap_priv_d *);
1841 
1842 static inline int nm_kring_pending(struct netmap_priv_d *np)
1843 {
1844 	struct netmap_adapter *na = np->np_na;
1845 	enum txrx t;
1846 	int i;
1847 
1848 	for_rx_tx(t) {
1849 		for (i = np->np_qfirst[t]; i < np->np_qlast[t]; i++) {
1850 			struct netmap_kring *kring = &NMR(na, t)[i];
1851 			if (kring->nr_mode != kring->nr_pending_mode) {
1852 				return 1;
1853 			}
1854 		}
1855 	}
1856 	return 0;
1857 }
1858 
1859 #ifdef WITH_PIPES
1860 int netmap_pipe_txsync(struct netmap_kring *txkring, int flags);
1861 int netmap_pipe_rxsync(struct netmap_kring *rxkring, int flags);
1862 #endif /* WITH_PIPES */
1863 
1864 #ifdef WITH_MONITOR
1865 
1866 struct netmap_monitor_adapter {
1867 	struct netmap_adapter up;
1868 
1869 	struct netmap_priv_d priv;
1870 	uint32_t flags;
1871 };
1872 
1873 #endif /* WITH_MONITOR */
1874 
1875 
1876 #ifdef WITH_GENERIC
1877 /*
1878  * generic netmap emulation for devices that do not have
1879  * native netmap support.
1880  */
1881 int generic_netmap_attach(struct ifnet *ifp);
1882 int generic_rx_handler(struct ifnet *ifp, struct mbuf *m);;
1883 
1884 int nm_os_catch_rx(struct netmap_generic_adapter *gna, int intercept);
1885 int nm_os_catch_tx(struct netmap_generic_adapter *gna, int intercept);
1886 
1887 int na_is_generic(struct netmap_adapter *na);
1888 
1889 /*
1890  * the generic transmit routine is passed a structure to optionally
1891  * build a queue of descriptors, in an OS-specific way.
1892  * The payload is at addr, if non-null, and the routine should send or queue
1893  * the packet, returning 0 if successful, 1 on failure.
1894  *
1895  * At the end, if head is non-null, there will be an additional call
1896  * to the function with addr = NULL; this should tell the OS-specific
1897  * routine to send the queue and free any resources. Failure is ignored.
1898  */
1899 struct nm_os_gen_arg {
1900 	struct ifnet *ifp;
1901 	void *m;	/* os-specific mbuf-like object */
1902 	void *head, *tail; /* tailq, if the OS-specific routine needs to build one */
1903 	void *addr;	/* payload of current packet */
1904 	u_int len;	/* packet length */
1905 	u_int ring_nr;	/* packet length */
1906 	u_int qevent;   /* in txqdisc mode, place an event on this mbuf */
1907 };
1908 
1909 int nm_os_generic_xmit_frame(struct nm_os_gen_arg *);
1910 int nm_os_generic_find_num_desc(struct ifnet *ifp, u_int *tx, u_int *rx);
1911 void nm_os_generic_find_num_queues(struct ifnet *ifp, u_int *txq, u_int *rxq);
1912 void nm_os_generic_set_features(struct netmap_generic_adapter *gna);
1913 
1914 static inline struct ifnet*
1915 netmap_generic_getifp(struct netmap_generic_adapter *gna)
1916 {
1917         if (gna->prev)
1918             return gna->prev->ifp;
1919 
1920         return gna->up.up.ifp;
1921 }
1922 
1923 void netmap_generic_irq(struct netmap_adapter *na, u_int q, u_int *work_done);
1924 
1925 //#define RATE_GENERIC  /* Enables communication statistics for generic. */
1926 #ifdef RATE_GENERIC
1927 void generic_rate(int txp, int txs, int txi, int rxp, int rxs, int rxi);
1928 #else
1929 #define generic_rate(txp, txs, txi, rxp, rxs, rxi)
1930 #endif
1931 
1932 /*
1933  * netmap_mitigation API. This is used by the generic adapter
1934  * to reduce the number of interrupt requests/selwakeup
1935  * to clients on incoming packets.
1936  */
1937 void nm_os_mitigation_init(struct nm_generic_mit *mit, int idx,
1938                                 struct netmap_adapter *na);
1939 void nm_os_mitigation_start(struct nm_generic_mit *mit);
1940 void nm_os_mitigation_restart(struct nm_generic_mit *mit);
1941 int nm_os_mitigation_active(struct nm_generic_mit *mit);
1942 void nm_os_mitigation_cleanup(struct nm_generic_mit *mit);
1943 #else /* !WITH_GENERIC */
1944 #define generic_netmap_attach(ifp)	(EOPNOTSUPP)
1945 #define na_is_generic(na)		(0)
1946 #endif /* WITH_GENERIC */
1947 
1948 /* Shared declarations for the VALE switch. */
1949 
1950 /*
1951  * Each transmit queue accumulates a batch of packets into
1952  * a structure before forwarding. Packets to the same
1953  * destination are put in a list using ft_next as a link field.
1954  * ft_frags and ft_next are valid only on the first fragment.
1955  */
1956 struct nm_bdg_fwd {	/* forwarding entry for a bridge */
1957 	void *ft_buf;		/* netmap or indirect buffer */
1958 	uint8_t ft_frags;	/* how many fragments (only on 1st frag) */
1959 	uint8_t _ft_port;	/* dst port (unused) */
1960 	uint16_t ft_flags;	/* flags, e.g. indirect */
1961 	uint16_t ft_len;	/* src fragment len */
1962 	uint16_t ft_next;	/* next packet to same destination */
1963 };
1964 
1965 /* struct 'virtio_net_hdr' from linux. */
1966 struct nm_vnet_hdr {
1967 #define VIRTIO_NET_HDR_F_NEEDS_CSUM     1	/* Use csum_start, csum_offset */
1968 #define VIRTIO_NET_HDR_F_DATA_VALID    2	/* Csum is valid */
1969     uint8_t flags;
1970 #define VIRTIO_NET_HDR_GSO_NONE         0       /* Not a GSO frame */
1971 #define VIRTIO_NET_HDR_GSO_TCPV4        1       /* GSO frame, IPv4 TCP (TSO) */
1972 #define VIRTIO_NET_HDR_GSO_UDP          3       /* GSO frame, IPv4 UDP (UFO) */
1973 #define VIRTIO_NET_HDR_GSO_TCPV6        4       /* GSO frame, IPv6 TCP */
1974 #define VIRTIO_NET_HDR_GSO_ECN          0x80    /* TCP has ECN set */
1975     uint8_t gso_type;
1976     uint16_t hdr_len;
1977     uint16_t gso_size;
1978     uint16_t csum_start;
1979     uint16_t csum_offset;
1980 };
1981 
1982 #define WORST_CASE_GSO_HEADER	(14+40+60)  /* IPv6 + TCP */
1983 
1984 /* Private definitions for IPv4, IPv6, UDP and TCP headers. */
1985 
1986 struct nm_iphdr {
1987 	uint8_t		version_ihl;
1988 	uint8_t		tos;
1989 	uint16_t	tot_len;
1990 	uint16_t	id;
1991 	uint16_t	frag_off;
1992 	uint8_t		ttl;
1993 	uint8_t		protocol;
1994 	uint16_t	check;
1995 	uint32_t	saddr;
1996 	uint32_t	daddr;
1997 	/*The options start here. */
1998 };
1999 
2000 struct nm_tcphdr {
2001 	uint16_t	source;
2002 	uint16_t	dest;
2003 	uint32_t	seq;
2004 	uint32_t	ack_seq;
2005 	uint8_t		doff;  /* Data offset + Reserved */
2006 	uint8_t		flags;
2007 	uint16_t	window;
2008 	uint16_t	check;
2009 	uint16_t	urg_ptr;
2010 };
2011 
2012 struct nm_udphdr {
2013 	uint16_t	source;
2014 	uint16_t	dest;
2015 	uint16_t	len;
2016 	uint16_t	check;
2017 };
2018 
2019 struct nm_ipv6hdr {
2020 	uint8_t		priority_version;
2021 	uint8_t		flow_lbl[3];
2022 
2023 	uint16_t	payload_len;
2024 	uint8_t		nexthdr;
2025 	uint8_t		hop_limit;
2026 
2027 	uint8_t		saddr[16];
2028 	uint8_t		daddr[16];
2029 };
2030 
2031 /* Type used to store a checksum (in host byte order) that hasn't been
2032  * folded yet.
2033  */
2034 #define rawsum_t uint32_t
2035 
2036 rawsum_t nm_os_csum_raw(uint8_t *data, size_t len, rawsum_t cur_sum);
2037 uint16_t nm_os_csum_ipv4(struct nm_iphdr *iph);
2038 void nm_os_csum_tcpudp_ipv4(struct nm_iphdr *iph, void *data,
2039 		      size_t datalen, uint16_t *check);
2040 void nm_os_csum_tcpudp_ipv6(struct nm_ipv6hdr *ip6h, void *data,
2041 		      size_t datalen, uint16_t *check);
2042 uint16_t nm_os_csum_fold(rawsum_t cur_sum);
2043 
2044 void bdg_mismatch_datapath(struct netmap_vp_adapter *na,
2045 			   struct netmap_vp_adapter *dst_na,
2046 			   const struct nm_bdg_fwd *ft_p,
2047 			   struct netmap_ring *dst_ring,
2048 			   u_int *j, u_int lim, u_int *howmany);
2049 
2050 /* persistent virtual port routines */
2051 int nm_os_vi_persist(const char *, struct ifnet **);
2052 void nm_os_vi_detach(struct ifnet *);
2053 void nm_os_vi_init_index(void);
2054 
2055 /*
2056  * kernel thread routines
2057  */
2058 struct nm_kctx; /* OS-specific kernel context - opaque */
2059 typedef void (*nm_kctx_worker_fn_t)(void *data, int is_kthread);
2060 typedef void (*nm_kctx_notify_fn_t)(void *data);
2061 
2062 /* kthread configuration */
2063 struct nm_kctx_cfg {
2064 	long			type;		/* kthread type/identifier */
2065 	nm_kctx_worker_fn_t	worker_fn;	/* worker function */
2066 	void			*worker_private;/* worker parameter */
2067 	nm_kctx_notify_fn_t	notify_fn;	/* notify function */
2068 	int			attach_user;	/* attach kthread to user process */
2069 	int			use_kthread;	/* use a kthread for the context */
2070 };
2071 /* kthread configuration */
2072 struct nm_kctx *nm_os_kctx_create(struct nm_kctx_cfg *cfg,
2073 					unsigned int cfgtype,
2074 					void *opaque);
2075 int nm_os_kctx_worker_start(struct nm_kctx *);
2076 void nm_os_kctx_worker_stop(struct nm_kctx *);
2077 void nm_os_kctx_destroy(struct nm_kctx *);
2078 void nm_os_kctx_worker_wakeup(struct nm_kctx *nmk);
2079 void nm_os_kctx_send_irq(struct nm_kctx *);
2080 void nm_os_kctx_worker_setaff(struct nm_kctx *, int);
2081 u_int nm_os_ncpus(void);
2082 
2083 #ifdef WITH_PTNETMAP_HOST
2084 /*
2085  * netmap adapter for host ptnetmap ports
2086  */
2087 struct netmap_pt_host_adapter {
2088 	struct netmap_adapter up;
2089 
2090 	/* the passed-through adapter */
2091 	struct netmap_adapter *parent;
2092 	/* parent->na_flags, saved at NETMAP_PT_HOST_CREATE time,
2093 	 * and restored at NETMAP_PT_HOST_DELETE time */
2094 	uint32_t parent_na_flags;
2095 
2096 	int (*parent_nm_notify)(struct netmap_kring *kring, int flags);
2097 	void *ptns;
2098 };
2099 /* ptnetmap HOST routines */
2100 int netmap_get_pt_host_na(struct nmreq *nmr, struct netmap_adapter **na,
2101 		struct netmap_mem_d * nmd, int create);
2102 int ptnetmap_ctl(struct nmreq *nmr, struct netmap_adapter *na);
2103 static inline int
2104 nm_ptnetmap_host_on(struct netmap_adapter *na)
2105 {
2106 	return na && na->na_flags & NAF_PTNETMAP_HOST;
2107 }
2108 #else /* !WITH_PTNETMAP_HOST */
2109 #define netmap_get_pt_host_na(nmr, _2, _3, _4) \
2110 	((nmr)->nr_flags & (NR_PTNETMAP_HOST) ? EOPNOTSUPP : 0)
2111 #define ptnetmap_ctl(_1, _2)   EINVAL
2112 #define nm_ptnetmap_host_on(_1)   EINVAL
2113 #endif /* !WITH_PTNETMAP_HOST */
2114 
2115 #ifdef WITH_PTNETMAP_GUEST
2116 /* ptnetmap GUEST routines */
2117 
2118 /*
2119  * netmap adapter for guest ptnetmap ports
2120  */
2121 struct netmap_pt_guest_adapter {
2122         /* The netmap adapter to be used by netmap applications.
2123 	 * This field must be the first, to allow upcast. */
2124 	struct netmap_hw_adapter hwup;
2125 
2126         /* The netmap adapter to be used by the driver. */
2127         struct netmap_hw_adapter dr;
2128 
2129 	void *csb;
2130 
2131 	/* Reference counter to track users of backend netmap port: the
2132 	 * network stack and netmap clients.
2133 	 * Used to decide when we need (de)allocate krings/rings and
2134 	 * start (stop) ptnetmap kthreads. */
2135 	int backend_regifs;
2136 
2137 };
2138 
2139 int netmap_pt_guest_attach(struct netmap_adapter *na, void *csb,
2140 			   unsigned int nifp_offset, unsigned int memid);
2141 struct ptnet_ring;
2142 bool netmap_pt_guest_txsync(struct ptnet_ring *ptring, struct netmap_kring *kring,
2143 			    int flags);
2144 bool netmap_pt_guest_rxsync(struct ptnet_ring *ptring, struct netmap_kring *kring,
2145 			    int flags);
2146 int ptnet_nm_krings_create(struct netmap_adapter *na);
2147 void ptnet_nm_krings_delete(struct netmap_adapter *na);
2148 void ptnet_nm_dtor(struct netmap_adapter *na);
2149 #endif /* WITH_PTNETMAP_GUEST */
2150 
2151 #endif /* _NET_NETMAP_KERN_H_ */
2152