xref: /dragonfly/sys/net/netmap/netmap_freebsd.c (revision 03517d4e)
1 /*
2  * Copyright (C) 2013 Universita` di Pisa. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  *   1. Redistributions of source code must retain the above copyright
8  *      notice, this list of conditions and the following disclaimer.
9  *   2. Redistributions in binary form must reproduce the above copyright
10  *      notice, this list of conditions and the following disclaimer in the
11  *      documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 #include <sys/types.h>
27 #include <sys/module.h>
28 #include <sys/errno.h>
29 #include <sys/param.h>  /* defines used in kernel.h */
30 #include <sys/kernel.h> /* types used in module initialization */
31 #include <sys/conf.h>	/* DEV_MODULE */
32 
33 #include <sys/devfs.h>
34 
35 #include <vm/vm.h>      /* vtophys */
36 #include <vm/pmap.h>    /* vtophys */
37 #include <vm/vm_param.h>
38 #include <vm/vm_object.h>
39 #include <vm/vm_page.h>
40 #include <vm/vm_page2.h>
41 #include <vm/vm_pager.h>
42 
43 
44 #include <sys/malloc.h>
45 #include <sys/socket.h> /* sockaddrs */
46 #include <sys/event.h>
47 #include <net/if.h>
48 #include <net/if_var.h>
49 #include <net/ifq_var.h>
50 #include <sys/bus.h>	/* bus_dmamap_* */
51 
52 #include <net/netmap/netmap.h>
53 #include <net/netmap/netmap_kern.h>
54 #include <net/netmap/netmap_mem2.h>
55 
56 
57 /* ======================== FREEBSD-SPECIFIC ROUTINES ================== */
58 
59 /*
60  * Intercept the rx routine in the standard device driver.
61  * Second argument is non-zero to intercept, 0 to restore
62  */
63 int
64 netmap_catch_rx(struct netmap_adapter *na, int intercept)
65 {
66 	struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na;
67 	struct ifnet *ifp = na->ifp;
68 
69 	if (intercept) {
70 		if (gna->save_if_input) {
71 			D("cannot intercept again");
72 			return EINVAL; /* already set */
73 		}
74 		gna->save_if_input = ifp->if_input;
75 		ifp->if_input = generic_rx_handler;
76 	} else {
77 		if (!gna->save_if_input){
78 			D("cannot restore");
79 			return EINVAL;  /* not saved */
80 		}
81 		ifp->if_input = gna->save_if_input;
82 		gna->save_if_input = NULL;
83 	}
84 
85 	return 0;
86 }
87 
88 /*
89  * Intercept the packet steering routine in the tx path,
90  * so that we can decide which queue is used for an mbuf.
91  * Second argument is non-zero to intercept, 0 to restore.
92  *
93  * XXX see if FreeBSD has such a mechanism
94  */
95 void
96 netmap_catch_packet_steering(struct netmap_generic_adapter *na, int enable)
97 {
98 	if (enable) {
99 	} else {
100 	}
101 }
102 
103 /* Transmit routine used by generic_netmap_txsync(). Returns 0 on success
104  * and non-zero on error (which may be packet drops or other errors).
105  * addr and len identify the netmap buffer, m is the (preallocated)
106  * mbuf to use for transmissions.
107  *
108  * We should add a reference to the mbuf so the m_freem() at the end
109  * of the transmission does not consume resources.
110  *
111  * On FreeBSD, and on multiqueue cards, we can force the queue using
112  *      if ((m->m_flags & M_FLOWID) != 0)
113  *              i = m->m_pkthdr.flowid % adapter->num_queues;
114  *      else
115  *              i = curcpu % adapter->num_queues;
116  *
117  */
118 int
119 generic_xmit_frame(struct ifnet *ifp, struct mbuf *m,
120 	void *addr, u_int len, u_int ring_nr)
121 {
122 	int ret;
123 
124 	m->m_len = m->m_pkthdr.len = 0;
125 
126 	// copy data to the mbuf
127 	ret = m_copyback2(m, 0, len, addr, M_NOWAIT);
128 	if (ret != 0)
129 		return ret;
130 
131 #if 0
132 	// inc refcount. We are alone, so we can skip the atomic
133 	atomic_fetchadd_int(m->m_ext.ref_cnt, 1);
134 	m->m_flags |= M_FLOWID;
135 #endif
136 	m->m_pkthdr.hash = ring_nr;	/* XXX probably not accurate */
137 	m->m_pkthdr.rcvif = ifp; /* used for tx notification */
138 	ret = ifq_dispatch(ifp, m, NULL);
139 	return ret;
140 }
141 
142 /*
143  * The following two functions are empty until we have a generic
144  * way to extract the info from the ifp
145  */
146 int
147 generic_find_num_desc(struct ifnet *ifp, unsigned int *tx, unsigned int *rx)
148 {
149 	D("called");
150 	return 0;
151 }
152 
153 void
154 generic_find_num_queues(struct ifnet *ifp, u_int *txq, u_int *rxq)
155 {
156 	D("called");
157 	*txq = 1;
158 	*rxq = 1;
159 }
160 
161 void netmap_mitigation_init(struct netmap_generic_adapter *na)
162 {
163 	ND("called");
164 	na->mit_pending = 0;
165 }
166 
167 
168 void netmap_mitigation_start(struct netmap_generic_adapter *na)
169 {
170 	ND("called");
171 }
172 
173 void netmap_mitigation_restart(struct netmap_generic_adapter *na)
174 {
175 	ND("called");
176 }
177 
178 int netmap_mitigation_active(struct netmap_generic_adapter *na)
179 {
180 	ND("called");
181 	return 0;
182 }
183 
184 void netmap_mitigation_cleanup(struct netmap_generic_adapter *na)
185 {
186 	ND("called");
187 }
188 
189 
190 /*
191  * In order to track whether pages are still mapped, we hook into
192  * the standard cdev_pager and intercept the constructor and
193  * destructor.
194  */
195 
196 struct netmap_vm_handle_t {
197 	struct cdev 		*dev;
198 	struct netmap_priv_d	*priv;
199 };
200 
201 static int
202 netmap_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
203     vm_ooffset_t foff, struct ucred *cred, u_short *color)
204 {
205 	struct netmap_vm_handle_t *vmh = handle;
206 	(void)vmh;
207 	D("handle %p size %jd prot %d foff %jd",
208 		handle, (intmax_t)size, prot, (intmax_t)foff);
209 #if 0
210 	dev_ref(vmh->dev);
211 #endif
212 	return 0;
213 }
214 
215 
216 static void
217 netmap_dev_pager_dtor(void *handle)
218 {
219 	struct netmap_vm_handle_t *vmh = handle;
220 	struct cdev *dev = vmh->dev;
221 	struct netmap_priv_d *priv = vmh->priv;
222 	(void)dev;
223 	D("handle %p", handle);
224 	netmap_dtor(priv);
225 	kfree(vmh, M_DEVBUF);
226 #if 0
227 	dev_rel(dev);
228 #endif
229 }
230 
231 MALLOC_DEFINE(M_FICT_PAGES, "", "");
232 
233 static inline vm_page_t
234 vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr)
235 {
236 	vm_page_t m;
237 
238 	m = kmalloc(sizeof(struct vm_page), M_FICT_PAGES, M_WAITOK | M_ZERO);
239 	vm_page_initfake(m, paddr, memattr);
240 	return (m);
241 }
242 
243 static inline void
244 vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
245 {
246 	KASSERT((m->flags & PG_FICTITIOUS) != 0,
247 	    ("vm_page_updatefake: bad page %p", m));
248 	m->phys_addr = paddr;
249 	pmap_page_set_memattr(m, memattr);
250 }
251 
252 static int
253 netmap_dev_pager_fault(vm_object_t object, vm_ooffset_t offset,
254 	int prot, vm_page_t *mres)
255 {
256 	struct netmap_vm_handle_t *vmh = object->handle;
257 	struct netmap_priv_d *priv = vmh->priv;
258 	vm_paddr_t paddr;
259 	vm_page_t page;
260 	vm_memattr_t memattr;
261 	vm_pindex_t pidx;
262 
263 	ND("object %p offset %jd prot %d mres %p",
264 			object, (intmax_t)offset, prot, mres);
265 	memattr = object->memattr;
266 	pidx = OFF_TO_IDX(offset);
267 	paddr = netmap_mem_ofstophys(priv->np_mref, offset);
268 	if (paddr == 0)
269 		return VM_PAGER_FAIL;
270 
271 	if (((*mres)->flags & PG_FICTITIOUS) != 0) {
272 		/*
273 		 * If the passed in result page is a fake page, update it with
274 		 * the new physical address.
275 		 */
276 		page = *mres;
277 		vm_page_updatefake(page, paddr, memattr);
278 	} else {
279 		/*
280 		 * Replace the passed in reqpage page with our own fake page and
281 		 * free up the all of the original pages.
282 		 */
283 #ifndef VM_OBJECT_WUNLOCK	/* FreeBSD < 10.x */
284 #define VM_OBJECT_WUNLOCK VM_OBJECT_UNLOCK
285 #define VM_OBJECT_WLOCK	VM_OBJECT_LOCK
286 #endif /* VM_OBJECT_WUNLOCK */
287 
288 		VM_OBJECT_WUNLOCK(object);
289 		page = vm_page_getfake(paddr, memattr);
290 		VM_OBJECT_WLOCK(object);
291 		vm_page_free(*mres);
292 		*mres = page;
293 		vm_page_insert(page, object, pidx);
294 	}
295 	page->valid = VM_PAGE_BITS_ALL;
296 	return (VM_PAGER_OK);
297 }
298 
299 
300 static struct cdev_pager_ops netmap_cdev_pager_ops = {
301 	.cdev_pg_ctor = netmap_dev_pager_ctor,
302 	.cdev_pg_dtor = netmap_dev_pager_dtor,
303 	.cdev_pg_fault = netmap_dev_pager_fault,
304 };
305 
306 
307 static int
308 netmap_mmap_single(struct dev_mmap_single_args *ap)
309 {
310 	int error;
311 	struct cdev *cdev = ap->a_head.a_dev;
312 	vm_ooffset_t *foff = ap->a_offset;
313 	vm_object_t *objp = ap->a_object;
314 	vm_size_t objsize = ap->a_size;
315 	struct netmap_vm_handle_t *vmh;
316 	struct netmap_priv_d *priv;
317 	int prot = ap->a_nprot;
318 	vm_object_t obj;
319 
320 	D("cdev %p foff %jd size %jd objp %p prot %d", cdev,
321 	    (intmax_t )*foff, (intmax_t )objsize, objp, prot);
322 
323 	vmh = kmalloc(sizeof(struct netmap_vm_handle_t), M_DEVBUF,
324 			      M_NOWAIT | M_ZERO);
325 	if (vmh == NULL)
326 		return ENOMEM;
327 	vmh->dev = cdev;
328 
329 	NMG_LOCK();
330 	error = devfs_get_cdevpriv(ap->a_fp, (void**)&priv);
331 	if (error)
332 		goto err_unlock;
333 	vmh->priv = priv;
334 	priv->np_refcount++;
335 	NMG_UNLOCK();
336 
337 	error = netmap_get_memory(priv);
338 	if (error)
339 		goto err_deref;
340 
341 	obj = cdev_pager_allocate(vmh, OBJT_DEVICE,
342 		&netmap_cdev_pager_ops, objsize, prot,
343 		*foff, NULL);
344 	if (obj == NULL) {
345 		D("cdev_pager_allocate failed");
346 		error = EINVAL;
347 		goto err_deref;
348 	}
349 
350 	*objp = obj;
351 	return 0;
352 
353 err_deref:
354 	NMG_LOCK();
355 	priv->np_refcount--;
356 err_unlock:
357 	NMG_UNLOCK();
358 // err:
359 	kfree(vmh, M_DEVBUF);
360 	return error;
361 }
362 
363 
364 // XXX can we remove this ?
365 static int
366 netmap_close(struct dev_close_args *ap)
367 {
368 	if (netmap_verbose)
369 		D("dev %p fflag 0x%x devtype %d",
370 			ap->a_head.a_dev, ap->a_fflag, ap->a_devtype);
371 	return 0;
372 }
373 
374 
375 static int
376 netmap_open(struct dev_open_args *ap)
377 {
378 	struct netmap_priv_d *priv;
379 	int error;
380 
381 	// XXX wait or nowait ?
382 	priv = kmalloc(sizeof(struct netmap_priv_d), M_DEVBUF,
383 			      M_NOWAIT | M_ZERO);
384 	if (priv == NULL)
385 		return ENOMEM;
386 
387 	error = devfs_set_cdevpriv(ap->a_fp, priv, netmap_dtor);
388 	if (error)
389 	        return error;
390 
391 	priv->np_refcount = 1;
392 
393 	return 0;
394 }
395 
396 
397 struct dev_ops netmap_cdevsw = {
398 	{ "netmap", 0, 0 },
399 	.d_open = netmap_open,
400 	.d_mmap_single = netmap_mmap_single,
401 	.d_ioctl = netmap_ioctl,
402 	.d_kqfilter = netmap_kqfilter,
403 	.d_close = netmap_close,
404 };
405 
406 
407 /*
408  * Kernel entry point.
409  *
410  * Initialize/finalize the module and return.
411  *
412  * Return 0 on success, errno on failure.
413  */
414 static int
415 netmap_loader(__unused struct module *module, int event, __unused void *arg)
416 {
417 	int error = 0;
418 
419 	switch (event) {
420 	case MOD_LOAD:
421 		error = netmap_init();
422 		break;
423 
424 	case MOD_UNLOAD:
425 		netmap_fini();
426 		break;
427 
428 	default:
429 		error = EOPNOTSUPP;
430 		break;
431 	}
432 
433 	return (error);
434 }
435 
436 
437 DEV_MODULE(netmap, netmap_loader, NULL);
438