1 /* 2 * Copyright (C) 2012-2013 Matteo Landi, Luigi Rizzo, Giuseppe Lettieri. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26 #include <sys/cdefs.h> /* prerequisite */ 27 __FBSDID("$FreeBSD: head/sys/dev/netmap/netmap.c 241723 2012-10-19 09:41:45Z glebius $"); 28 29 #include <sys/types.h> 30 #include <sys/malloc.h> 31 #include <sys/proc.h> 32 #include <vm/vm.h> /* vtophys */ 33 #include <vm/pmap.h> /* vtophys */ 34 #include <sys/socket.h> /* sockaddrs */ 35 #include <sys/sysctl.h> 36 #include <net/if.h> 37 #include <net/if_var.h> 38 #include <sys/bus.h> /* bus_dmamap_* */ 39 40 #include <net/netmap.h> 41 #include <net/netmap/netmap_kern.h> 42 #include <net/netmap/netmap_mem2.h> 43 44 #define NMA_LOCK_INIT(n) lockinit(&(n)->nm_mtx, "netmap memory allocator lock", 0, LK_CANRECURSE) 45 #define NMA_LOCK_DESTROY(n) lockuninit(&(n)->nm_mtx) 46 #define NMA_LOCK(n) lockmgr(&(n)->nm_mtx, LK_EXCLUSIVE) 47 #define NMA_UNLOCK(n) lockmgr(&(n)->nm_mtx, LK_RELEASE) 48 49 struct netmap_obj_params netmap_params[NETMAP_POOLS_NR] = { 50 [NETMAP_IF_POOL] = { 51 .size = 1024, 52 .num = 100, 53 }, 54 [NETMAP_RING_POOL] = { 55 .size = 9*PAGE_SIZE, 56 .num = 200, 57 }, 58 [NETMAP_BUF_POOL] = { 59 .size = 2048, 60 .num = NETMAP_BUF_MAX_NUM, 61 }, 62 }; 63 64 65 /* 66 * nm_mem is the memory allocator used for all physical interfaces 67 * running in netmap mode. 68 * Virtual (VALE) ports will have each its own allocator. 69 */ 70 static int netmap_mem_global_config(struct netmap_mem_d *nmd); 71 static int netmap_mem_global_finalize(struct netmap_mem_d *nmd); 72 static void netmap_mem_global_deref(struct netmap_mem_d *nmd); 73 struct netmap_mem_d nm_mem = { /* Our memory allocator. */ 74 .pools = { 75 [NETMAP_IF_POOL] = { 76 .name = "netmap_if", 77 .objminsize = sizeof(struct netmap_if), 78 .objmaxsize = 4096, 79 .nummin = 10, /* don't be stingy */ 80 .nummax = 10000, /* XXX very large */ 81 }, 82 [NETMAP_RING_POOL] = { 83 .name = "netmap_ring", 84 .objminsize = sizeof(struct netmap_ring), 85 .objmaxsize = 32*PAGE_SIZE, 86 .nummin = 2, 87 .nummax = 1024, 88 }, 89 [NETMAP_BUF_POOL] = { 90 .name = "netmap_buf", 91 .objminsize = 64, 92 .objmaxsize = 65536, 93 .nummin = 4, 94 .nummax = 1000000, /* one million! */ 95 }, 96 }, 97 .config = netmap_mem_global_config, 98 .finalize = netmap_mem_global_finalize, 99 .deref = netmap_mem_global_deref, 100 }; 101 102 103 // XXX logically belongs to nm_mem 104 struct lut_entry *netmap_buffer_lut; /* exported */ 105 106 /* blueprint for the private memory allocators */ 107 static int netmap_mem_private_config(struct netmap_mem_d *nmd); 108 static int netmap_mem_private_finalize(struct netmap_mem_d *nmd); 109 static void netmap_mem_private_deref(struct netmap_mem_d *nmd); 110 const struct netmap_mem_d nm_blueprint = { 111 .pools = { 112 [NETMAP_IF_POOL] = { 113 .name = "%s_if", 114 .objminsize = sizeof(struct netmap_if), 115 .objmaxsize = 4096, 116 .nummin = 1, 117 .nummax = 10, 118 }, 119 [NETMAP_RING_POOL] = { 120 .name = "%s_ring", 121 .objminsize = sizeof(struct netmap_ring), 122 .objmaxsize = 32*PAGE_SIZE, 123 .nummin = 2, 124 .nummax = 1024, 125 }, 126 [NETMAP_BUF_POOL] = { 127 .name = "%s_buf", 128 .objminsize = 64, 129 .objmaxsize = 65536, 130 .nummin = 4, 131 .nummax = 1000000, /* one million! */ 132 }, 133 }, 134 .config = netmap_mem_private_config, 135 .finalize = netmap_mem_private_finalize, 136 .deref = netmap_mem_private_deref, 137 138 .flags = NETMAP_MEM_PRIVATE, 139 }; 140 141 /* memory allocator related sysctls */ 142 143 #define STRINGIFY(x) #x 144 145 146 #define DECLARE_SYSCTLS(id, name) \ 147 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \ 148 CTLFLAG_RW, &netmap_params[id].size, 0, "Requested size of netmap " STRINGIFY(name) "s"); \ 149 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \ 150 CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, "Current size of netmap " STRINGIFY(name) "s"); \ 151 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \ 152 CTLFLAG_RW, &netmap_params[id].num, 0, "Requested number of netmap " STRINGIFY(name) "s"); \ 153 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \ 154 CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, "Current number of netmap " STRINGIFY(name) "s") 155 156 SYSCTL_DECL(_dev_netmap); 157 DECLARE_SYSCTLS(NETMAP_IF_POOL, if); 158 DECLARE_SYSCTLS(NETMAP_RING_POOL, ring); 159 DECLARE_SYSCTLS(NETMAP_BUF_POOL, buf); 160 161 /* 162 * First, find the allocator that contains the requested offset, 163 * then locate the cluster through a lookup table. 164 */ 165 vm_paddr_t 166 netmap_mem_ofstophys(struct netmap_mem_d* nmd, vm_ooffset_t offset) 167 { 168 int i; 169 vm_ooffset_t o = offset; 170 vm_paddr_t pa; 171 struct netmap_obj_pool *p; 172 173 NMA_LOCK(nmd); 174 p = nmd->pools; 175 176 for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i].memtotal, i++) { 177 if (offset >= p[i].memtotal) 178 continue; 179 // now lookup the cluster's address 180 pa = p[i].lut[offset / p[i]._objsize].paddr + 181 offset % p[i]._objsize; 182 NMA_UNLOCK(nmd); 183 return pa; 184 } 185 /* this is only in case of errors */ 186 D("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", (u_int)o, 187 p[NETMAP_IF_POOL].memtotal, 188 p[NETMAP_IF_POOL].memtotal 189 + p[NETMAP_RING_POOL].memtotal, 190 p[NETMAP_IF_POOL].memtotal 191 + p[NETMAP_RING_POOL].memtotal 192 + p[NETMAP_BUF_POOL].memtotal); 193 NMA_UNLOCK(nmd); 194 return 0; // XXX bad address 195 } 196 197 int 198 netmap_mem_get_info(struct netmap_mem_d* nmd, u_int* size, u_int *memflags) 199 { 200 int error = 0; 201 NMA_LOCK(nmd); 202 error = nmd->config(nmd); 203 if (error) 204 goto out; 205 if (nmd->flags & NETMAP_MEM_FINALIZED) { 206 *size = nmd->nm_totalsize; 207 } else { 208 int i; 209 *size = 0; 210 for (i = 0; i < NETMAP_POOLS_NR; i++) { 211 struct netmap_obj_pool *p = nmd->pools + i; 212 *size += (p->_numclusters * p->_clustsize); 213 } 214 } 215 *memflags = nmd->flags; 216 out: 217 NMA_UNLOCK(nmd); 218 return error; 219 } 220 221 /* 222 * we store objects by kernel address, need to find the offset 223 * within the pool to export the value to userspace. 224 * Algorithm: scan until we find the cluster, then add the 225 * actual offset in the cluster 226 */ 227 static ssize_t 228 netmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr) 229 { 230 int i, k = p->_clustentries, n = p->objtotal; 231 ssize_t ofs = 0; 232 233 for (i = 0; i < n; i += k, ofs += p->_clustsize) { 234 const char *base = p->lut[i].vaddr; 235 ssize_t relofs = (const char *) vaddr - base; 236 237 if (relofs < 0 || relofs >= p->_clustsize) 238 continue; 239 240 ofs = ofs + relofs; 241 ND("%s: return offset %d (cluster %d) for pointer %p", 242 p->name, ofs, i, vaddr); 243 return ofs; 244 } 245 D("address %p is not contained inside any cluster (%s)", 246 vaddr, p->name); 247 return 0; /* An error occurred */ 248 } 249 250 /* Helper functions which convert virtual addresses to offsets */ 251 #define netmap_if_offset(n, v) \ 252 netmap_obj_offset(&(n)->pools[NETMAP_IF_POOL], (v)) 253 254 #define netmap_ring_offset(n, v) \ 255 ((n)->pools[NETMAP_IF_POOL].memtotal + \ 256 netmap_obj_offset(&(n)->pools[NETMAP_RING_POOL], (v))) 257 258 #define netmap_buf_offset(n, v) \ 259 ((n)->pools[NETMAP_IF_POOL].memtotal + \ 260 (n)->pools[NETMAP_RING_POOL].memtotal + \ 261 netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v))) 262 263 264 ssize_t 265 netmap_mem_if_offset(struct netmap_mem_d *nmd, const void *addr) 266 { 267 ssize_t v; 268 NMA_LOCK(nmd); 269 v = netmap_if_offset(nmd, addr); 270 NMA_UNLOCK(nmd); 271 return v; 272 } 273 274 /* 275 * report the index, and use start position as a hint, 276 * otherwise buffer allocation becomes terribly expensive. 277 */ 278 static void * 279 netmap_obj_malloc(struct netmap_obj_pool *p, u_int len, uint32_t *start, uint32_t *index) 280 { 281 uint32_t i = 0; /* index in the bitmap */ 282 uint32_t mask, j; /* slot counter */ 283 void *vaddr = NULL; 284 285 if (len > p->_objsize) { 286 D("%s request size %d too large", p->name, len); 287 // XXX cannot reduce the size 288 return NULL; 289 } 290 291 if (p->objfree == 0) { 292 D("%s allocator: run out of memory", p->name); 293 return NULL; 294 } 295 if (start) 296 i = *start; 297 298 /* termination is guaranteed by p->free, but better check bounds on i */ 299 while (vaddr == NULL && i < p->bitmap_slots) { 300 uint32_t cur = p->bitmap[i]; 301 if (cur == 0) { /* bitmask is fully used */ 302 i++; 303 continue; 304 } 305 /* locate a slot */ 306 for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1) 307 ; 308 309 p->bitmap[i] &= ~mask; /* mark object as in use */ 310 p->objfree--; 311 312 vaddr = p->lut[i * 32 + j].vaddr; 313 if (index) 314 *index = i * 32 + j; 315 } 316 ND("%s allocator: allocated object @ [%d][%d]: vaddr %p", i, j, vaddr); 317 318 if (start) 319 *start = i; 320 return vaddr; 321 } 322 323 324 /* 325 * free by index, not by address. This is slow, but is only used 326 * for a small number of objects (rings, nifp) 327 */ 328 static void 329 netmap_obj_free(struct netmap_obj_pool *p, uint32_t j) 330 { 331 if (j >= p->objtotal) { 332 D("invalid index %u, max %u", j, p->objtotal); 333 return; 334 } 335 p->bitmap[j / 32] |= (1 << (j % 32)); 336 p->objfree++; 337 return; 338 } 339 340 static void 341 netmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr) 342 { 343 u_int i, j, n = p->numclusters; 344 345 for (i = 0, j = 0; i < n; i++, j += p->_clustentries) { 346 void *base = p->lut[i * p->_clustentries].vaddr; 347 ssize_t relofs = (ssize_t) vaddr - (ssize_t) base; 348 349 /* Given address, is out of the scope of the current cluster.*/ 350 if (vaddr < base || relofs >= p->_clustsize) 351 continue; 352 353 j = j + relofs / p->_objsize; 354 /* KASSERT(j != 0, ("Cannot free object 0")); */ 355 netmap_obj_free(p, j); 356 return; 357 } 358 D("address %p is not contained inside any cluster (%s)", 359 vaddr, p->name); 360 } 361 362 #define netmap_if_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_IF_POOL], len, NULL, NULL) 363 #define netmap_if_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_IF_POOL], (v)) 364 #define netmap_ring_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_RING_POOL], len, NULL, NULL) 365 #define netmap_ring_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_RING_POOL], (v)) 366 #define netmap_buf_malloc(n, _pos, _index) \ 367 netmap_obj_malloc(&(n)->pools[NETMAP_BUF_POOL], NETMAP_BDG_BUF_SIZE(n), _pos, _index) 368 369 370 /* Return the index associated to the given packet buffer */ 371 #define netmap_buf_index(n, v) \ 372 (netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)) / NETMAP_BDG_BUF_SIZE(n)) 373 374 375 /* Return nonzero on error */ 376 static int 377 netmap_new_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n) 378 { 379 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; 380 u_int i = 0; /* slot counter */ 381 uint32_t pos = 0; /* slot in p->bitmap */ 382 uint32_t index = 0; /* buffer index */ 383 384 for (i = 0; i < n; i++) { 385 void *vaddr = netmap_buf_malloc(nmd, &pos, &index); 386 if (vaddr == NULL) { 387 D("unable to locate empty packet buffer"); 388 goto cleanup; 389 } 390 slot[i].buf_idx = index; 391 slot[i].len = p->_objsize; 392 /* XXX setting flags=NS_BUF_CHANGED forces a pointer reload 393 * in the NIC ring. This is a hack that hides missing 394 * initializations in the drivers, and should go away. 395 */ 396 // slot[i].flags = NS_BUF_CHANGED; 397 } 398 399 ND("allocated %d buffers, %d available, first at %d", n, p->objfree, pos); 400 return (0); 401 402 cleanup: 403 while (i > 0) { 404 i--; 405 netmap_obj_free(p, slot[i].buf_idx); 406 } 407 bzero(slot, n * sizeof(slot[0])); 408 return (ENOMEM); 409 } 410 411 412 static void 413 netmap_free_buf(struct netmap_mem_d *nmd, uint32_t i) 414 { 415 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; 416 417 if (i < 2 || i >= p->objtotal) { 418 D("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal); 419 return; 420 } 421 netmap_obj_free(p, i); 422 } 423 424 static void 425 netmap_reset_obj_allocator(struct netmap_obj_pool *p) 426 { 427 428 if (p == NULL) 429 return; 430 if (p->bitmap) 431 kfree(p->bitmap, M_NETMAP); 432 p->bitmap = NULL; 433 if (p->lut) { 434 u_int i; 435 size_t sz = p->_clustsize; 436 437 for (i = 0; i < p->objtotal; i += p->_clustentries) { 438 if (p->lut[i].vaddr) 439 contigfree(p->lut[i].vaddr, sz, M_NETMAP); 440 } 441 bzero(p->lut, sizeof(struct lut_entry) * p->objtotal); 442 kfree(p->lut, M_NETMAP); 443 } 444 p->lut = NULL; 445 p->objtotal = 0; 446 p->memtotal = 0; 447 p->numclusters = 0; 448 p->objfree = 0; 449 } 450 451 /* 452 * Free all resources related to an allocator. 453 */ 454 static void 455 netmap_destroy_obj_allocator(struct netmap_obj_pool *p) 456 { 457 if (p == NULL) 458 return; 459 netmap_reset_obj_allocator(p); 460 } 461 462 /* 463 * We receive a request for objtotal objects, of size objsize each. 464 * Internally we may round up both numbers, as we allocate objects 465 * in small clusters multiple of the page size. 466 * We need to keep track of objtotal and clustentries, 467 * as they are needed when freeing memory. 468 * 469 * XXX note -- userspace needs the buffers to be contiguous, 470 * so we cannot afford gaps at the end of a cluster. 471 */ 472 473 474 /* call with NMA_LOCK held */ 475 static int 476 netmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int objsize) 477 { 478 int i; 479 u_int clustsize; /* the cluster size, multiple of page size */ 480 u_int clustentries; /* how many objects per entry */ 481 482 /* we store the current request, so we can 483 * detect configuration changes later */ 484 p->r_objtotal = objtotal; 485 p->r_objsize = objsize; 486 487 #define MAX_CLUSTSIZE (1<<17) 488 #define LINE_ROUND 64 489 if (objsize >= MAX_CLUSTSIZE) { 490 /* we could do it but there is no point */ 491 D("unsupported allocation for %d bytes", objsize); 492 return EINVAL; 493 } 494 /* make sure objsize is a multiple of LINE_ROUND */ 495 i = (objsize & (LINE_ROUND - 1)); 496 if (i) { 497 D("XXX aligning object by %d bytes", LINE_ROUND - i); 498 objsize += LINE_ROUND - i; 499 } 500 if (objsize < p->objminsize || objsize > p->objmaxsize) { 501 D("requested objsize %d out of range [%d, %d]", 502 objsize, p->objminsize, p->objmaxsize); 503 return EINVAL; 504 } 505 if (objtotal < p->nummin || objtotal > p->nummax) { 506 D("requested objtotal %d out of range [%d, %d]", 507 objtotal, p->nummin, p->nummax); 508 return EINVAL; 509 } 510 /* 511 * Compute number of objects using a brute-force approach: 512 * given a max cluster size, 513 * we try to fill it with objects keeping track of the 514 * wasted space to the next page boundary. 515 */ 516 for (clustentries = 0, i = 1;; i++) { 517 u_int delta, used = i * objsize; 518 if (used > MAX_CLUSTSIZE) 519 break; 520 delta = used % PAGE_SIZE; 521 if (delta == 0) { // exact solution 522 clustentries = i; 523 break; 524 } 525 if (delta > ( (clustentries*objsize) % PAGE_SIZE) ) 526 clustentries = i; 527 } 528 // D("XXX --- ouch, delta %d (bad for buffers)", delta); 529 /* compute clustsize and round to the next page */ 530 clustsize = clustentries * objsize; 531 i = (clustsize & (PAGE_SIZE - 1)); 532 if (i) 533 clustsize += PAGE_SIZE - i; 534 if (netmap_verbose) 535 D("objsize %d clustsize %d objects %d", 536 objsize, clustsize, clustentries); 537 538 /* 539 * The number of clusters is n = ceil(objtotal/clustentries) 540 * objtotal' = n * clustentries 541 */ 542 p->_clustentries = clustentries; 543 p->_clustsize = clustsize; 544 p->_numclusters = (objtotal + clustentries - 1) / clustentries; 545 546 /* actual values (may be larger than requested) */ 547 p->_objsize = objsize; 548 p->_objtotal = p->_numclusters * clustentries; 549 550 return 0; 551 } 552 553 554 /* call with NMA_LOCK held */ 555 static int 556 netmap_finalize_obj_allocator(struct netmap_obj_pool *p) 557 { 558 int i; /* must be signed */ 559 size_t n; 560 561 /* optimistically assume we have enough memory */ 562 p->numclusters = p->_numclusters; 563 p->objtotal = p->_objtotal; 564 565 n = sizeof(struct lut_entry) * p->objtotal; 566 p->lut = kmalloc(n, M_NETMAP, M_NOWAIT | M_ZERO); 567 if (p->lut == NULL) { 568 D("Unable to create lookup table (%d bytes) for '%s'", (int)n, p->name); 569 goto clean; 570 } 571 572 /* Allocate the bitmap */ 573 n = (p->objtotal + 31) / 32; 574 p->bitmap = kmalloc(sizeof(uint32_t) * n, M_NETMAP, M_NOWAIT | M_ZERO); 575 if (p->bitmap == NULL) { 576 D("Unable to create bitmap (%d entries) for allocator '%s'", (int)n, 577 p->name); 578 goto clean; 579 } 580 p->bitmap_slots = n; 581 582 /* 583 * Allocate clusters, init pointers and bitmap 584 */ 585 586 n = p->_clustsize; 587 for (i = 0; i < (int)p->objtotal;) { 588 int lim = i + p->_clustentries; 589 char *clust; 590 591 clust = contigmalloc(n, M_NETMAP, M_NOWAIT | M_ZERO, 592 (size_t)0, -1UL, PAGE_SIZE, 0); 593 if (clust == NULL) { 594 /* 595 * If we get here, there is a severe memory shortage, 596 * so halve the allocated memory to reclaim some. 597 */ 598 D("Unable to create cluster at %d for '%s' allocator", 599 i, p->name); 600 if (i < 2) /* nothing to halve */ 601 goto out; 602 lim = i / 2; 603 for (i--; i >= lim; i--) { 604 p->bitmap[ (i>>5) ] &= ~( 1 << (i & 31) ); 605 if (i % p->_clustentries == 0 && p->lut[i].vaddr) 606 contigfree(p->lut[i].vaddr, 607 n, M_NETMAP); 608 } 609 out: 610 p->objtotal = i; 611 /* we may have stopped in the middle of a cluster */ 612 p->numclusters = (i + p->_clustentries - 1) / p->_clustentries; 613 break; 614 } 615 for (; i < lim; i++, clust += p->_objsize) { 616 p->bitmap[ (i>>5) ] |= ( 1 << (i & 31) ); 617 p->lut[i].vaddr = clust; 618 p->lut[i].paddr = vtophys(clust); 619 } 620 } 621 p->objfree = p->objtotal; 622 p->memtotal = p->numclusters * p->_clustsize; 623 if (p->objfree == 0) 624 goto clean; 625 if (netmap_verbose) 626 D("Pre-allocated %d clusters (%d/%dKB) for '%s'", 627 p->numclusters, p->_clustsize >> 10, 628 p->memtotal >> 10, p->name); 629 630 return 0; 631 632 clean: 633 netmap_reset_obj_allocator(p); 634 return ENOMEM; 635 } 636 637 /* call with lock held */ 638 static int 639 netmap_memory_config_changed(struct netmap_mem_d *nmd) 640 { 641 int i; 642 643 for (i = 0; i < NETMAP_POOLS_NR; i++) { 644 if (nmd->pools[i].r_objsize != netmap_params[i].size || 645 nmd->pools[i].r_objtotal != netmap_params[i].num) 646 return 1; 647 } 648 return 0; 649 } 650 651 static void 652 netmap_mem_reset_all(struct netmap_mem_d *nmd) 653 { 654 int i; 655 D("resetting %p", nmd); 656 for (i = 0; i < NETMAP_POOLS_NR; i++) { 657 netmap_reset_obj_allocator(&nmd->pools[i]); 658 } 659 nmd->flags &= ~NETMAP_MEM_FINALIZED; 660 } 661 662 static int 663 netmap_mem_finalize_all(struct netmap_mem_d *nmd) 664 { 665 int i; 666 if (nmd->flags & NETMAP_MEM_FINALIZED) 667 return 0; 668 nmd->lasterr = 0; 669 nmd->nm_totalsize = 0; 670 for (i = 0; i < NETMAP_POOLS_NR; i++) { 671 nmd->lasterr = netmap_finalize_obj_allocator(&nmd->pools[i]); 672 if (nmd->lasterr) 673 goto error; 674 nmd->nm_totalsize += nmd->pools[i].memtotal; 675 } 676 /* buffers 0 and 1 are reserved */ 677 nmd->pools[NETMAP_BUF_POOL].objfree -= 2; 678 nmd->pools[NETMAP_BUF_POOL].bitmap[0] = ~3; 679 nmd->flags |= NETMAP_MEM_FINALIZED; 680 681 D("Have %d KB for interfaces, %d KB for rings and %d MB for buffers", 682 nmd->pools[NETMAP_IF_POOL].memtotal >> 10, 683 nmd->pools[NETMAP_RING_POOL].memtotal >> 10, 684 nmd->pools[NETMAP_BUF_POOL].memtotal >> 20); 685 686 D("Free buffers: %d", nmd->pools[NETMAP_BUF_POOL].objfree); 687 688 689 return 0; 690 error: 691 netmap_mem_reset_all(nmd); 692 return nmd->lasterr; 693 } 694 695 696 697 void 698 netmap_mem_private_delete(struct netmap_mem_d *nmd) 699 { 700 if (nmd == NULL) 701 return; 702 D("deleting %p", nmd); 703 if (nmd->refcount > 0) 704 D("bug: deleting mem allocator with refcount=%d!", nmd->refcount); 705 D("done deleting %p", nmd); 706 NMA_LOCK_DESTROY(nmd); 707 kfree(nmd, M_DEVBUF); 708 } 709 710 static int 711 netmap_mem_private_config(struct netmap_mem_d *nmd) 712 { 713 /* nothing to do, we are configured on creation 714 * and configuration never changes thereafter 715 */ 716 return 0; 717 } 718 719 static int 720 netmap_mem_private_finalize(struct netmap_mem_d *nmd) 721 { 722 int err; 723 NMA_LOCK(nmd); 724 nmd->refcount++; 725 err = netmap_mem_finalize_all(nmd); 726 NMA_UNLOCK(nmd); 727 return err; 728 729 } 730 731 static void 732 netmap_mem_private_deref(struct netmap_mem_d *nmd) 733 { 734 NMA_LOCK(nmd); 735 if (--nmd->refcount <= 0) 736 netmap_mem_reset_all(nmd); 737 NMA_UNLOCK(nmd); 738 } 739 740 struct netmap_mem_d * 741 netmap_mem_private_new(const char *name, u_int txr, u_int txd, u_int rxr, u_int rxd) 742 { 743 struct netmap_mem_d *d = NULL; 744 struct netmap_obj_params p[NETMAP_POOLS_NR]; 745 int i; 746 u_int maxd; 747 748 d = kmalloc(sizeof(struct netmap_mem_d), 749 M_DEVBUF, M_NOWAIT | M_ZERO); 750 if (d == NULL) 751 return NULL; 752 753 *d = nm_blueprint; 754 755 /* XXX the rest of the code assumes the stack rings are alwasy present */ 756 txr++; 757 rxr++; 758 p[NETMAP_IF_POOL].size = sizeof(struct netmap_if) + 759 sizeof(ssize_t) * (txr + rxr); 760 p[NETMAP_IF_POOL].num = 2; 761 maxd = (txd > rxd) ? txd : rxd; 762 p[NETMAP_RING_POOL].size = sizeof(struct netmap_ring) + 763 sizeof(struct netmap_slot) * maxd; 764 p[NETMAP_RING_POOL].num = txr + rxr; 765 p[NETMAP_BUF_POOL].size = 2048; /* XXX find a way to let the user choose this */ 766 p[NETMAP_BUF_POOL].num = rxr * (rxd + 2) + txr * (txd + 2); 767 768 D("req if %d*%d ring %d*%d buf %d*%d", 769 p[NETMAP_IF_POOL].num, 770 p[NETMAP_IF_POOL].size, 771 p[NETMAP_RING_POOL].num, 772 p[NETMAP_RING_POOL].size, 773 p[NETMAP_BUF_POOL].num, 774 p[NETMAP_BUF_POOL].size); 775 776 for (i = 0; i < NETMAP_POOLS_NR; i++) { 777 ksnprintf(d->pools[i].name, NETMAP_POOL_MAX_NAMSZ, 778 nm_blueprint.pools[i].name, 779 name); 780 if (netmap_config_obj_allocator(&d->pools[i], 781 p[i].num, p[i].size)) 782 goto error; 783 } 784 785 d->flags &= ~NETMAP_MEM_FINALIZED; 786 787 NMA_LOCK_INIT(d); 788 789 return d; 790 error: 791 netmap_mem_private_delete(d); 792 return NULL; 793 } 794 795 796 /* call with lock held */ 797 static int 798 netmap_mem_global_config(struct netmap_mem_d *nmd) 799 { 800 int i; 801 802 if (nmd->refcount) 803 /* already in use, we cannot change the configuration */ 804 goto out; 805 806 if (!netmap_memory_config_changed(nmd)) 807 goto out; 808 809 D("reconfiguring"); 810 811 if (nmd->flags & NETMAP_MEM_FINALIZED) { 812 /* reset previous allocation */ 813 for (i = 0; i < NETMAP_POOLS_NR; i++) { 814 netmap_reset_obj_allocator(&nmd->pools[i]); 815 } 816 nmd->flags &= ~NETMAP_MEM_FINALIZED; 817 } 818 819 for (i = 0; i < NETMAP_POOLS_NR; i++) { 820 nmd->lasterr = netmap_config_obj_allocator(&nmd->pools[i], 821 netmap_params[i].num, netmap_params[i].size); 822 if (nmd->lasterr) 823 goto out; 824 } 825 826 out: 827 828 return nmd->lasterr; 829 } 830 831 static int 832 netmap_mem_global_finalize(struct netmap_mem_d *nmd) 833 { 834 int err; 835 836 NMA_LOCK(nmd); 837 838 839 /* update configuration if changed */ 840 if (netmap_mem_global_config(nmd)) 841 goto out; 842 843 nmd->refcount++; 844 845 if (nmd->flags & NETMAP_MEM_FINALIZED) { 846 /* may happen if config is not changed */ 847 ND("nothing to do"); 848 goto out; 849 } 850 851 if (netmap_mem_finalize_all(nmd)) 852 goto out; 853 854 /* backward compatibility */ 855 netmap_buf_size = nmd->pools[NETMAP_BUF_POOL]._objsize; 856 netmap_total_buffers = nmd->pools[NETMAP_BUF_POOL].objtotal; 857 858 netmap_buffer_lut = nmd->pools[NETMAP_BUF_POOL].lut; 859 netmap_buffer_base = nmd->pools[NETMAP_BUF_POOL].lut[0].vaddr; 860 861 nmd->lasterr = 0; 862 863 out: 864 if (nmd->lasterr) 865 nmd->refcount--; 866 err = nmd->lasterr; 867 868 NMA_UNLOCK(nmd); 869 870 return err; 871 872 } 873 874 int 875 netmap_mem_init(void) 876 { 877 NMA_LOCK_INIT(&nm_mem); 878 return (0); 879 } 880 881 void 882 netmap_mem_fini(void) 883 { 884 int i; 885 886 for (i = 0; i < NETMAP_POOLS_NR; i++) { 887 netmap_destroy_obj_allocator(&nm_mem.pools[i]); 888 } 889 NMA_LOCK_DESTROY(&nm_mem); 890 } 891 892 static void 893 netmap_free_rings(struct netmap_adapter *na) 894 { 895 u_int i; 896 if (!na->tx_rings) 897 return; 898 for (i = 0; i < na->num_tx_rings + 1; i++) { 899 if (na->tx_rings[i].ring) { 900 netmap_ring_free(na->nm_mem, na->tx_rings[i].ring); 901 na->tx_rings[i].ring = NULL; 902 } 903 } 904 for (i = 0; i < na->num_rx_rings + 1; i++) { 905 if (na->rx_rings[i].ring) { 906 netmap_ring_free(na->nm_mem, na->rx_rings[i].ring); 907 na->rx_rings[i].ring = NULL; 908 } 909 } 910 } 911 912 /* call with NMA_LOCK held * 913 * 914 * Allocate netmap rings and buffers for this card 915 * The rings are contiguous, but have variable size. 916 */ 917 int 918 netmap_mem_rings_create(struct netmap_adapter *na) 919 { 920 struct netmap_ring *ring; 921 u_int len, ndesc; 922 struct netmap_kring *kring; 923 924 NMA_LOCK(na->nm_mem); 925 926 for (kring = na->tx_rings; kring != na->rx_rings; kring++) { /* Transmit rings */ 927 ndesc = kring->nkr_num_slots; 928 len = sizeof(struct netmap_ring) + 929 ndesc * sizeof(struct netmap_slot); 930 ring = netmap_ring_malloc(na->nm_mem, len); 931 if (ring == NULL) { 932 D("Cannot allocate tx_ring"); 933 goto cleanup; 934 } 935 ND("txring[%d] at %p ofs %d", i, ring); 936 kring->ring = ring; 937 *(uint32_t *)(uintptr_t)&ring->num_slots = ndesc; 938 *(ssize_t *)(uintptr_t)&ring->buf_ofs = 939 (na->nm_mem->pools[NETMAP_IF_POOL].memtotal + 940 na->nm_mem->pools[NETMAP_RING_POOL].memtotal) - 941 netmap_ring_offset(na->nm_mem, ring); 942 943 ring->avail = kring->nr_hwavail; 944 ring->cur = kring->nr_hwcur; 945 *(uint16_t *)(uintptr_t)&ring->nr_buf_size = 946 NETMAP_BDG_BUF_SIZE(na->nm_mem); 947 ND("initializing slots for txring"); 948 if (netmap_new_bufs(na->nm_mem, ring->slot, ndesc)) { 949 D("Cannot allocate buffers for tx_ring"); 950 goto cleanup; 951 } 952 } 953 954 for ( ; kring != na->tailroom; kring++) { /* Receive rings */ 955 ndesc = kring->nkr_num_slots; 956 len = sizeof(struct netmap_ring) + 957 ndesc * sizeof(struct netmap_slot); 958 ring = netmap_ring_malloc(na->nm_mem, len); 959 if (ring == NULL) { 960 D("Cannot allocate rx_ring"); 961 goto cleanup; 962 } 963 ND("rxring at %p ofs %d", ring); 964 965 kring->ring = ring; 966 *(uint32_t *)(uintptr_t)&ring->num_slots = ndesc; 967 *(ssize_t *)(uintptr_t)&ring->buf_ofs = 968 (na->nm_mem->pools[NETMAP_IF_POOL].memtotal + 969 na->nm_mem->pools[NETMAP_RING_POOL].memtotal) - 970 netmap_ring_offset(na->nm_mem, ring); 971 972 ring->cur = kring->nr_hwcur; 973 ring->avail = kring->nr_hwavail; 974 *(int *)(uintptr_t)&ring->nr_buf_size = 975 NETMAP_BDG_BUF_SIZE(na->nm_mem); 976 ND("initializing slots for rxring[%d]", i); 977 if (netmap_new_bufs(na->nm_mem, ring->slot, ndesc)) { 978 D("Cannot allocate buffers for rx_ring"); 979 goto cleanup; 980 } 981 } 982 983 NMA_UNLOCK(na->nm_mem); 984 985 return 0; 986 987 cleanup: 988 netmap_free_rings(na); 989 990 NMA_UNLOCK(na->nm_mem); 991 992 return ENOMEM; 993 } 994 995 void 996 netmap_mem_rings_delete(struct netmap_adapter *na) 997 { 998 /* last instance, release bufs and rings */ 999 u_int i, lim; 1000 struct netmap_kring *kring; 1001 struct netmap_ring *ring; 1002 1003 NMA_LOCK(na->nm_mem); 1004 1005 for (kring = na->tx_rings; kring != na->tailroom; kring++) { 1006 ring = kring->ring; 1007 if (ring == NULL) 1008 continue; 1009 lim = kring->nkr_num_slots; 1010 for (i = 0; i < lim; i++) 1011 netmap_free_buf(na->nm_mem, ring->slot[i].buf_idx); 1012 } 1013 netmap_free_rings(na); 1014 1015 NMA_UNLOCK(na->nm_mem); 1016 } 1017 1018 1019 /* call with NMA_LOCK held */ 1020 /* 1021 * Allocate the per-fd structure netmap_if. 1022 * 1023 * We assume that the configuration stored in na 1024 * (number of tx/rx rings and descs) does not change while 1025 * the interface is in netmap mode. 1026 */ 1027 struct netmap_if * 1028 netmap_mem_if_new(const char *ifname, struct netmap_adapter *na) 1029 { 1030 struct netmap_if *nifp; 1031 ssize_t base; /* handy for relative offsets between rings and nifp */ 1032 u_int i, len, ntx, nrx; 1033 1034 /* 1035 * verify whether virtual port need the stack ring 1036 */ 1037 ntx = na->num_tx_rings + 1; /* shorthand, include stack ring */ 1038 nrx = na->num_rx_rings + 1; /* shorthand, include stack ring */ 1039 /* 1040 * the descriptor is followed inline by an array of offsets 1041 * to the tx and rx rings in the shared memory region. 1042 * For virtual rx rings we also allocate an array of 1043 * pointers to assign to nkr_leases. 1044 */ 1045 1046 NMA_LOCK(na->nm_mem); 1047 1048 len = sizeof(struct netmap_if) + (nrx + ntx) * sizeof(ssize_t); 1049 nifp = netmap_if_malloc(na->nm_mem, len); 1050 if (nifp == NULL) { 1051 NMA_UNLOCK(na->nm_mem); 1052 return NULL; 1053 } 1054 1055 /* initialize base fields -- override const */ 1056 *(u_int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings; 1057 *(u_int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings; 1058 strncpy(nifp->ni_name, ifname, (size_t)IFNAMSIZ); 1059 1060 /* 1061 * fill the slots for the rx and tx rings. They contain the offset 1062 * between the ring and nifp, so the information is usable in 1063 * userspace to reach the ring from the nifp. 1064 */ 1065 base = netmap_if_offset(na->nm_mem, nifp); 1066 for (i = 0; i < ntx; i++) { 1067 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] = 1068 netmap_ring_offset(na->nm_mem, na->tx_rings[i].ring) - base; 1069 } 1070 for (i = 0; i < nrx; i++) { 1071 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+ntx] = 1072 netmap_ring_offset(na->nm_mem, na->rx_rings[i].ring) - base; 1073 } 1074 1075 NMA_UNLOCK(na->nm_mem); 1076 1077 return (nifp); 1078 } 1079 1080 void 1081 netmap_mem_if_delete(struct netmap_adapter *na, struct netmap_if *nifp) 1082 { 1083 if (nifp == NULL) 1084 /* nothing to do */ 1085 return; 1086 NMA_LOCK(na->nm_mem); 1087 1088 netmap_if_free(na->nm_mem, nifp); 1089 1090 NMA_UNLOCK(na->nm_mem); 1091 } 1092 1093 static void 1094 netmap_mem_global_deref(struct netmap_mem_d *nmd) 1095 { 1096 NMA_LOCK(nmd); 1097 1098 nmd->refcount--; 1099 if (netmap_verbose) 1100 D("refcount = %d", nmd->refcount); 1101 1102 NMA_UNLOCK(nmd); 1103 } 1104 1105 int 1106 netmap_mem_finalize(struct netmap_mem_d *nmd) 1107 { 1108 return nmd->finalize(nmd); 1109 } 1110 1111 void 1112 netmap_mem_deref(struct netmap_mem_d *nmd) 1113 { 1114 return nmd->deref(nmd); 1115 } 1116