1 /* 2 * Copyright (C) 2013 Universita` di Pisa. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26 #include <sys/types.h> 27 #include <sys/module.h> 28 #include <sys/errno.h> 29 #include <sys/param.h> /* defines used in kernel.h */ 30 #include <sys/kernel.h> /* types used in module initialization */ 31 #include <sys/conf.h> /* DEV_MODULE */ 32 33 #include <sys/devfs.h> 34 35 #include <vm/vm.h> /* vtophys */ 36 #include <vm/pmap.h> /* vtophys */ 37 #include <vm/vm_param.h> 38 #include <vm/vm_object.h> 39 #include <vm/vm_page.h> 40 #include <vm/vm_page2.h> 41 #include <vm/vm_pager.h> 42 43 44 #include <sys/malloc.h> 45 #include <sys/socket.h> /* sockaddrs */ 46 #include <sys/event.h> 47 #include <net/if.h> 48 #include <net/if_var.h> 49 #include <net/ifq_var.h> 50 #include <sys/bus.h> /* bus_dmamap_* */ 51 52 #include <net/netmap.h> 53 #include <net/netmap/netmap_kern.h> 54 #include <net/netmap/netmap_mem2.h> 55 56 57 /* ======================== FREEBSD-SPECIFIC ROUTINES ================== */ 58 59 /* 60 * Intercept the rx routine in the standard device driver. 61 * Second argument is non-zero to intercept, 0 to restore 62 */ 63 int 64 netmap_catch_rx(struct netmap_adapter *na, int intercept) 65 { 66 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na; 67 struct ifnet *ifp = na->ifp; 68 69 if (intercept) { 70 if (gna->save_if_input) { 71 D("cannot intercept again"); 72 return EINVAL; /* already set */ 73 } 74 gna->save_if_input = ifp->if_input; 75 ifp->if_input = generic_rx_handler; 76 } else { 77 if (!gna->save_if_input){ 78 D("cannot restore"); 79 return EINVAL; /* not saved */ 80 } 81 ifp->if_input = gna->save_if_input; 82 gna->save_if_input = NULL; 83 } 84 85 return 0; 86 } 87 88 /* 89 * Intercept the packet steering routine in the tx path, 90 * so that we can decide which queue is used for an mbuf. 91 * Second argument is non-zero to intercept, 0 to restore. 92 * 93 * XXX see if FreeBSD has such a mechanism 94 */ 95 void 96 netmap_catch_packet_steering(struct netmap_generic_adapter *na, int enable) 97 { 98 if (enable) { 99 } else { 100 } 101 } 102 103 /* Transmit routine used by generic_netmap_txsync(). Returns 0 on success 104 * and non-zero on error (which may be packet drops or other errors). 105 * addr and len identify the netmap buffer, m is the (preallocated) 106 * mbuf to use for transmissions. 107 * 108 * We should add a reference to the mbuf so the m_freem() at the end 109 * of the transmission does not consume resources. 110 * 111 * On FreeBSD, and on multiqueue cards, we can force the queue using 112 * if ((m->m_flags & M_FLOWID) != 0) 113 * i = m->m_pkthdr.flowid % adapter->num_queues; 114 * else 115 * i = curcpu % adapter->num_queues; 116 * 117 */ 118 int 119 generic_xmit_frame(struct ifnet *ifp, struct mbuf *m, 120 void *addr, u_int len, u_int ring_nr) 121 { 122 int ret; 123 124 m->m_len = m->m_pkthdr.len = 0; 125 126 // copy data to the mbuf 127 m_copyback(m, 0, len, addr); 128 129 #if 0 130 // inc refcount. We are alone, so we can skip the atomic 131 atomic_fetchadd_int(m->m_ext.ref_cnt, 1); 132 m->m_flags |= M_FLOWID; 133 #endif 134 m->m_pkthdr.hash = ring_nr; /* XXX probably not accurate */ 135 m->m_pkthdr.rcvif = ifp; /* used for tx notification */ 136 ret = ifq_dispatch(ifp, m, NULL); 137 return ret; 138 } 139 140 /* 141 * The following two functions are empty until we have a generic 142 * way to extract the info from the ifp 143 */ 144 int 145 generic_find_num_desc(struct ifnet *ifp, unsigned int *tx, unsigned int *rx) 146 { 147 D("called"); 148 return 0; 149 } 150 151 void 152 generic_find_num_queues(struct ifnet *ifp, u_int *txq, u_int *rxq) 153 { 154 D("called"); 155 *txq = 1; 156 *rxq = 1; 157 } 158 159 void netmap_mitigation_init(struct netmap_generic_adapter *na) 160 { 161 ND("called"); 162 na->mit_pending = 0; 163 } 164 165 166 void netmap_mitigation_start(struct netmap_generic_adapter *na) 167 { 168 ND("called"); 169 } 170 171 void netmap_mitigation_restart(struct netmap_generic_adapter *na) 172 { 173 ND("called"); 174 } 175 176 int netmap_mitigation_active(struct netmap_generic_adapter *na) 177 { 178 ND("called"); 179 return 0; 180 } 181 182 void netmap_mitigation_cleanup(struct netmap_generic_adapter *na) 183 { 184 ND("called"); 185 } 186 187 188 /* 189 * In order to track whether pages are still mapped, we hook into 190 * the standard cdev_pager and intercept the constructor and 191 * destructor. 192 */ 193 194 struct netmap_vm_handle_t { 195 struct cdev *dev; 196 struct netmap_priv_d *priv; 197 }; 198 199 static int 200 netmap_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 201 vm_ooffset_t foff, struct ucred *cred, u_short *color) 202 { 203 struct netmap_vm_handle_t *vmh = handle; 204 (void)vmh; 205 D("handle %p size %jd prot %d foff %jd", 206 handle, (intmax_t)size, prot, (intmax_t)foff); 207 #if 0 208 dev_ref(vmh->dev); 209 #endif 210 return 0; 211 } 212 213 214 static void 215 netmap_dev_pager_dtor(void *handle) 216 { 217 struct netmap_vm_handle_t *vmh = handle; 218 struct cdev *dev = vmh->dev; 219 struct netmap_priv_d *priv = vmh->priv; 220 (void)dev; 221 D("handle %p", handle); 222 netmap_dtor(priv); 223 kfree(vmh, M_DEVBUF); 224 #if 0 225 dev_rel(dev); 226 #endif 227 } 228 229 MALLOC_DEFINE(M_FICT_PAGES, "", ""); 230 231 static inline vm_page_t 232 vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr) 233 { 234 vm_page_t m; 235 236 m = kmalloc(sizeof(struct vm_page), M_FICT_PAGES, 237 M_WAITOK | M_ZERO); 238 vm_page_initfake(m, paddr, memattr); 239 return (m); 240 } 241 242 static inline void 243 vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr) 244 { 245 KASSERT((m->flags & PG_FICTITIOUS) != 0, 246 ("vm_page_updatefake: bad page %p", m)); 247 m->phys_addr = paddr; 248 pmap_page_set_memattr(m, memattr); 249 } 250 251 static int 252 netmap_dev_pager_fault(vm_object_t object, vm_ooffset_t offset, 253 int prot, vm_page_t *mres) 254 { 255 struct netmap_vm_handle_t *vmh = object->handle; 256 struct netmap_priv_d *priv = vmh->priv; 257 vm_paddr_t paddr; 258 vm_page_t page; 259 vm_memattr_t memattr; 260 vm_pindex_t pidx; 261 262 ND("object %p offset %jd prot %d mres %p", 263 object, (intmax_t)offset, prot, mres); 264 memattr = object->memattr; 265 pidx = OFF_TO_IDX(offset); 266 paddr = netmap_mem_ofstophys(priv->np_mref, offset); 267 if (paddr == 0) 268 return VM_PAGER_FAIL; 269 270 if (((*mres)->flags & PG_FICTITIOUS) != 0) { 271 /* 272 * If the passed in result page is a fake page, update it with 273 * the new physical address. 274 */ 275 page = *mres; 276 vm_page_updatefake(page, paddr, memattr); 277 } else { 278 /* 279 * Replace the passed in reqpage page with our own fake page and 280 * free up the all of the original pages. 281 */ 282 #ifndef VM_OBJECT_WUNLOCK /* FreeBSD < 10.x */ 283 #define VM_OBJECT_WUNLOCK VM_OBJECT_UNLOCK 284 #define VM_OBJECT_WLOCK VM_OBJECT_LOCK 285 #endif /* VM_OBJECT_WUNLOCK */ 286 287 VM_OBJECT_WUNLOCK(object); 288 page = vm_page_getfake(paddr, memattr); 289 VM_OBJECT_WLOCK(object); 290 vm_page_free(*mres); 291 *mres = page; 292 vm_page_insert(page, object, pidx); 293 } 294 page->valid = VM_PAGE_BITS_ALL; 295 return (VM_PAGER_OK); 296 } 297 298 299 static struct cdev_pager_ops netmap_cdev_pager_ops = { 300 .cdev_pg_ctor = netmap_dev_pager_ctor, 301 .cdev_pg_dtor = netmap_dev_pager_dtor, 302 .cdev_pg_fault = netmap_dev_pager_fault, 303 }; 304 305 306 static int 307 netmap_mmap_single(struct dev_mmap_single_args *ap) 308 { 309 int error; 310 struct cdev *cdev = ap->a_head.a_dev; 311 vm_ooffset_t *foff = ap->a_offset; 312 vm_object_t *objp = ap->a_object; 313 vm_size_t objsize = ap->a_size; 314 struct netmap_vm_handle_t *vmh; 315 struct netmap_priv_d *priv; 316 int prot = ap->a_nprot; 317 vm_object_t obj; 318 319 D("cdev %p foff %jd size %jd objp %p prot %d", cdev, 320 (intmax_t )*foff, (intmax_t )objsize, objp, prot); 321 322 vmh = kmalloc(sizeof(struct netmap_vm_handle_t), M_DEVBUF, 323 M_NOWAIT | M_ZERO); 324 if (vmh == NULL) 325 return ENOMEM; 326 vmh->dev = cdev; 327 328 NMG_LOCK(); 329 error = devfs_get_cdevpriv(ap->a_fp, (void**)&priv); 330 if (error) 331 goto err_unlock; 332 vmh->priv = priv; 333 priv->np_refcount++; 334 NMG_UNLOCK(); 335 336 error = netmap_get_memory(priv); 337 if (error) 338 goto err_deref; 339 340 obj = cdev_pager_allocate(vmh, OBJT_DEVICE, 341 &netmap_cdev_pager_ops, objsize, prot, 342 *foff, NULL); 343 if (obj == NULL) { 344 D("cdev_pager_allocate failed"); 345 error = EINVAL; 346 goto err_deref; 347 } 348 349 *objp = obj; 350 return 0; 351 352 err_deref: 353 NMG_LOCK(); 354 priv->np_refcount--; 355 err_unlock: 356 NMG_UNLOCK(); 357 // err: 358 kfree(vmh, M_DEVBUF); 359 return error; 360 } 361 362 363 // XXX can we remove this ? 364 static int 365 netmap_close(struct dev_close_args *ap) 366 { 367 if (netmap_verbose) 368 D("dev %p fflag 0x%x devtype %d", 369 ap->a_head.a_dev, ap->a_fflag, ap->a_devtype); 370 return 0; 371 } 372 373 374 static int 375 netmap_open(struct dev_open_args *ap) 376 { 377 struct netmap_priv_d *priv; 378 int error; 379 380 // XXX wait or nowait ? 381 priv = kmalloc(sizeof(struct netmap_priv_d), M_DEVBUF, 382 M_NOWAIT | M_ZERO); 383 if (priv == NULL) 384 return ENOMEM; 385 386 error = devfs_set_cdevpriv(ap->a_fp, priv, netmap_dtor); 387 if (error) 388 return error; 389 390 priv->np_refcount = 1; 391 392 return 0; 393 } 394 395 396 struct dev_ops netmap_cdevsw = { 397 { "netmap", 0, 0 }, 398 .d_open = netmap_open, 399 .d_mmap_single = netmap_mmap_single, 400 .d_ioctl = netmap_ioctl, 401 .d_kqfilter = netmap_kqfilter, 402 .d_close = netmap_close, 403 }; 404 405 406 /* 407 * Kernel entry point. 408 * 409 * Initialize/finalize the module and return. 410 * 411 * Return 0 on success, errno on failure. 412 */ 413 static int 414 netmap_loader(__unused struct module *module, int event, __unused void *arg) 415 { 416 int error = 0; 417 418 switch (event) { 419 case MOD_LOAD: 420 error = netmap_init(); 421 break; 422 423 case MOD_UNLOAD: 424 netmap_fini(); 425 break; 426 427 default: 428 error = EOPNOTSUPP; 429 break; 430 } 431 432 return (error); 433 } 434 435 436 DEV_MODULE(netmap, netmap_loader, NULL); 437