1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (C) 2012-2014 Matteo Landi
5 * Copyright (C) 2012-2016 Luigi Rizzo
6 * Copyright (C) 2012-2016 Giuseppe Lettieri
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #ifdef linux
32 #include "bsd_glue.h"
33 #endif /* linux */
34
35 #ifdef __APPLE__
36 #include "osx_glue.h"
37 #endif /* __APPLE__ */
38
39 #ifdef __FreeBSD__
40 #include <sys/cdefs.h> /* prerequisite */
41 #include <sys/types.h>
42 #include <sys/malloc.h>
43 #include <sys/kernel.h> /* MALLOC_DEFINE */
44 #include <sys/proc.h>
45 #include <vm/vm.h> /* vtophys */
46 #include <vm/pmap.h> /* vtophys */
47 #include <sys/socket.h> /* sockaddrs */
48 #include <sys/selinfo.h>
49 #include <sys/sysctl.h>
50 #include <net/if.h>
51 #include <net/if_var.h>
52 #include <net/vnet.h>
53 #include <machine/bus.h> /* bus_dmamap_* */
54
55 /* M_NETMAP only used in here */
56 MALLOC_DECLARE(M_NETMAP);
57 MALLOC_DEFINE(M_NETMAP, "netmap", "Network memory map");
58
59 #endif /* __FreeBSD__ */
60
61 #ifdef _WIN32
62 #include <win_glue.h>
63 #endif
64
65 #include <net/netmap.h>
66 #include <dev/netmap/netmap_kern.h>
67 #include <net/netmap_virt.h>
68 #include "netmap_mem2.h"
69
70 #ifdef _WIN32_USE_SMALL_GENERIC_DEVICES_MEMORY
71 #define NETMAP_BUF_MAX_NUM 8*4096 /* if too big takes too much time to allocate */
72 #else
73 #define NETMAP_BUF_MAX_NUM 20*4096*2 /* large machine */
74 #endif
75
76 #define NETMAP_POOL_MAX_NAMSZ 32
77
78
79 enum {
80 NETMAP_IF_POOL = 0,
81 NETMAP_RING_POOL,
82 NETMAP_BUF_POOL,
83 NETMAP_POOLS_NR
84 };
85
86
87 struct netmap_obj_params {
88 u_int size;
89 u_int num;
90
91 u_int last_size;
92 u_int last_num;
93 };
94
95 struct netmap_obj_pool {
96 char name[NETMAP_POOL_MAX_NAMSZ]; /* name of the allocator */
97
98 /* ---------------------------------------------------*/
99 /* these are only meaningful if the pool is finalized */
100 /* (see 'finalized' field in netmap_mem_d) */
101 size_t memtotal; /* actual total memory space */
102
103 struct lut_entry *lut; /* virt,phys addresses, objtotal entries */
104 uint32_t *bitmap; /* one bit per buffer, 1 means free */
105 uint32_t *invalid_bitmap;/* one bit per buffer, 1 means invalid */
106 uint32_t bitmap_slots; /* number of uint32 entries in bitmap */
107
108 u_int objtotal; /* actual total number of objects. */
109 u_int numclusters; /* actual number of clusters */
110 u_int objfree; /* number of free objects. */
111
112 int alloc_done; /* we have allocated the memory */
113 /* ---------------------------------------------------*/
114
115 /* limits */
116 u_int objminsize; /* minimum object size */
117 u_int objmaxsize; /* maximum object size */
118 u_int nummin; /* minimum number of objects */
119 u_int nummax; /* maximum number of objects */
120
121 /* these are changed only by config */
122 u_int _objtotal; /* total number of objects */
123 u_int _objsize; /* object size */
124 u_int _clustsize; /* cluster size */
125 u_int _clustentries; /* objects per cluster */
126 u_int _numclusters; /* number of clusters */
127
128 /* requested values */
129 u_int r_objtotal;
130 u_int r_objsize;
131 };
132
133 #define NMA_LOCK_T NM_MTX_T
134 #define NMA_LOCK_INIT(n) NM_MTX_INIT((n)->nm_mtx)
135 #define NMA_LOCK_DESTROY(n) NM_MTX_DESTROY((n)->nm_mtx)
136 #define NMA_LOCK(n) NM_MTX_LOCK((n)->nm_mtx)
137 #define NMA_SPINLOCK(n) NM_MTX_SPINLOCK((n)->nm_mtx)
138 #define NMA_UNLOCK(n) NM_MTX_UNLOCK((n)->nm_mtx)
139
140 struct netmap_mem_ops {
141 int (*nmd_get_lut)(struct netmap_mem_d *, struct netmap_lut*);
142 int (*nmd_get_info)(struct netmap_mem_d *, uint64_t *size,
143 u_int *memflags, uint16_t *id);
144
145 vm_paddr_t (*nmd_ofstophys)(struct netmap_mem_d *, vm_ooffset_t);
146 int (*nmd_config)(struct netmap_mem_d *);
147 int (*nmd_finalize)(struct netmap_mem_d *, struct netmap_adapter *);
148 void (*nmd_deref)(struct netmap_mem_d *, struct netmap_adapter *);
149 ssize_t (*nmd_if_offset)(struct netmap_mem_d *, const void *vaddr);
150 void (*nmd_delete)(struct netmap_mem_d *);
151
152 struct netmap_if * (*nmd_if_new)(struct netmap_mem_d *,
153 struct netmap_adapter *, struct netmap_priv_d *);
154 void (*nmd_if_delete)(struct netmap_mem_d *,
155 struct netmap_adapter *, struct netmap_if *);
156 int (*nmd_rings_create)(struct netmap_mem_d *,
157 struct netmap_adapter *);
158 void (*nmd_rings_delete)(struct netmap_mem_d *,
159 struct netmap_adapter *);
160 };
161
162 struct netmap_mem_d {
163 NMA_LOCK_T nm_mtx; /* protect the allocator */
164 size_t nm_totalsize; /* shorthand */
165
166 u_int flags;
167 #define NETMAP_MEM_FINALIZED 0x1 /* preallocation done */
168 #define NETMAP_MEM_HIDDEN 0x8 /* being prepared */
169 #define NETMAP_MEM_NOMAP 0x10 /* do not map/unmap pdevs */
170 int lasterr; /* last error for curr config */
171 int active; /* active users */
172 int refcount;
173 /* the three allocators */
174 struct netmap_obj_pool pools[NETMAP_POOLS_NR];
175
176 nm_memid_t nm_id; /* allocator identifier */
177 int nm_grp; /* iommu group id */
178
179 /* list of all existing allocators, sorted by nm_id */
180 struct netmap_mem_d *prev, *next;
181
182 struct netmap_mem_ops *ops;
183
184 struct netmap_obj_params params[NETMAP_POOLS_NR];
185
186 #define NM_MEM_NAMESZ 16
187 char name[NM_MEM_NAMESZ];
188 };
189
190 int
netmap_mem_get_lut(struct netmap_mem_d * nmd,struct netmap_lut * lut)191 netmap_mem_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut)
192 {
193 int rv;
194
195 NMA_LOCK(nmd);
196 rv = nmd->ops->nmd_get_lut(nmd, lut);
197 NMA_UNLOCK(nmd);
198
199 return rv;
200 }
201
202 int
netmap_mem_get_info(struct netmap_mem_d * nmd,uint64_t * size,u_int * memflags,nm_memid_t * memid)203 netmap_mem_get_info(struct netmap_mem_d *nmd, uint64_t *size,
204 u_int *memflags, nm_memid_t *memid)
205 {
206 int rv;
207
208 NMA_LOCK(nmd);
209 rv = nmd->ops->nmd_get_info(nmd, size, memflags, memid);
210 NMA_UNLOCK(nmd);
211
212 return rv;
213 }
214
215 vm_paddr_t
netmap_mem_ofstophys(struct netmap_mem_d * nmd,vm_ooffset_t off)216 netmap_mem_ofstophys(struct netmap_mem_d *nmd, vm_ooffset_t off)
217 {
218 vm_paddr_t pa;
219
220 #if defined(__FreeBSD__)
221 /* This function is called by netmap_dev_pager_fault(), which holds a
222 * non-sleepable lock since FreeBSD 12. Since we cannot sleep, we
223 * spin on the trylock. */
224 NMA_SPINLOCK(nmd);
225 #else
226 NMA_LOCK(nmd);
227 #endif
228 pa = nmd->ops->nmd_ofstophys(nmd, off);
229 NMA_UNLOCK(nmd);
230
231 return pa;
232 }
233
234 static int
netmap_mem_config(struct netmap_mem_d * nmd)235 netmap_mem_config(struct netmap_mem_d *nmd)
236 {
237 if (nmd->active) {
238 /* already in use. Not fatal, but we
239 * cannot change the configuration
240 */
241 return 0;
242 }
243
244 return nmd->ops->nmd_config(nmd);
245 }
246
247 ssize_t
netmap_mem_if_offset(struct netmap_mem_d * nmd,const void * off)248 netmap_mem_if_offset(struct netmap_mem_d *nmd, const void *off)
249 {
250 ssize_t rv;
251
252 NMA_LOCK(nmd);
253 rv = nmd->ops->nmd_if_offset(nmd, off);
254 NMA_UNLOCK(nmd);
255
256 return rv;
257 }
258
259 static void
netmap_mem_delete(struct netmap_mem_d * nmd)260 netmap_mem_delete(struct netmap_mem_d *nmd)
261 {
262 nmd->ops->nmd_delete(nmd);
263 }
264
265 struct netmap_if *
netmap_mem_if_new(struct netmap_adapter * na,struct netmap_priv_d * priv)266 netmap_mem_if_new(struct netmap_adapter *na, struct netmap_priv_d *priv)
267 {
268 struct netmap_if *nifp;
269 struct netmap_mem_d *nmd = na->nm_mem;
270
271 NMA_LOCK(nmd);
272 nifp = nmd->ops->nmd_if_new(nmd, na, priv);
273 NMA_UNLOCK(nmd);
274
275 return nifp;
276 }
277
278 void
netmap_mem_if_delete(struct netmap_adapter * na,struct netmap_if * nif)279 netmap_mem_if_delete(struct netmap_adapter *na, struct netmap_if *nif)
280 {
281 struct netmap_mem_d *nmd = na->nm_mem;
282
283 NMA_LOCK(nmd);
284 nmd->ops->nmd_if_delete(nmd, na, nif);
285 NMA_UNLOCK(nmd);
286 }
287
288 int
netmap_mem_rings_create(struct netmap_adapter * na)289 netmap_mem_rings_create(struct netmap_adapter *na)
290 {
291 int rv;
292 struct netmap_mem_d *nmd = na->nm_mem;
293
294 NMA_LOCK(nmd);
295 rv = nmd->ops->nmd_rings_create(nmd, na);
296 NMA_UNLOCK(nmd);
297
298 return rv;
299 }
300
301 void
netmap_mem_rings_delete(struct netmap_adapter * na)302 netmap_mem_rings_delete(struct netmap_adapter *na)
303 {
304 struct netmap_mem_d *nmd = na->nm_mem;
305
306 NMA_LOCK(nmd);
307 nmd->ops->nmd_rings_delete(nmd, na);
308 NMA_UNLOCK(nmd);
309 }
310
311 static int netmap_mem_map(struct netmap_obj_pool *, struct netmap_adapter *);
312 static int netmap_mem_unmap(struct netmap_obj_pool *, struct netmap_adapter *);
313 static int nm_mem_check_group(struct netmap_mem_d *, bus_dma_tag_t);
314 static void nm_mem_release_id(struct netmap_mem_d *);
315
316 nm_memid_t
netmap_mem_get_id(struct netmap_mem_d * nmd)317 netmap_mem_get_id(struct netmap_mem_d *nmd)
318 {
319 return nmd->nm_id;
320 }
321
322 #ifdef NM_DEBUG_MEM_PUTGET
323 #define NM_DBG_REFC(nmd, func, line) \
324 nm_prinf("%s:%d mem[%d:%d] -> %d", func, line, (nmd)->nm_id, (nmd)->nm_grp, (nmd)->refcount);
325 #else
326 #define NM_DBG_REFC(nmd, func, line)
327 #endif
328
329 /* circular list of all existing allocators */
330 static struct netmap_mem_d *netmap_last_mem_d = &nm_mem;
331 static NM_MTX_T nm_mem_list_lock;
332
333 struct netmap_mem_d *
__netmap_mem_get(struct netmap_mem_d * nmd,const char * func,int line)334 __netmap_mem_get(struct netmap_mem_d *nmd, const char *func, int line)
335 {
336 NM_MTX_LOCK(nm_mem_list_lock);
337 nmd->refcount++;
338 NM_DBG_REFC(nmd, func, line);
339 NM_MTX_UNLOCK(nm_mem_list_lock);
340 return nmd;
341 }
342
343 void
__netmap_mem_put(struct netmap_mem_d * nmd,const char * func,int line)344 __netmap_mem_put(struct netmap_mem_d *nmd, const char *func, int line)
345 {
346 int last;
347 NM_MTX_LOCK(nm_mem_list_lock);
348 last = (--nmd->refcount == 0);
349 if (last)
350 nm_mem_release_id(nmd);
351 NM_DBG_REFC(nmd, func, line);
352 NM_MTX_UNLOCK(nm_mem_list_lock);
353 if (last)
354 netmap_mem_delete(nmd);
355 }
356
357 int
netmap_mem_finalize(struct netmap_mem_d * nmd,struct netmap_adapter * na)358 netmap_mem_finalize(struct netmap_mem_d *nmd, struct netmap_adapter *na)
359 {
360 int lasterr = 0;
361 if (nm_mem_check_group(nmd, na->pdev) < 0) {
362 return ENOMEM;
363 }
364
365 NMA_LOCK(nmd);
366
367 if (netmap_mem_config(nmd))
368 goto out;
369
370 nmd->active++;
371
372 nmd->lasterr = nmd->ops->nmd_finalize(nmd, na);
373
374 if (!nmd->lasterr && !(nmd->flags & NETMAP_MEM_NOMAP)) {
375 nmd->lasterr = netmap_mem_map(&nmd->pools[NETMAP_BUF_POOL], na);
376 }
377
378 out:
379 lasterr = nmd->lasterr;
380 NMA_UNLOCK(nmd);
381
382 if (lasterr)
383 netmap_mem_deref(nmd, na);
384
385 return lasterr;
386 }
387
388 static int
nm_isset(uint32_t * bitmap,u_int i)389 nm_isset(uint32_t *bitmap, u_int i)
390 {
391 return bitmap[ (i>>5) ] & ( 1U << (i & 31U) );
392 }
393
394
395 static int
netmap_init_obj_allocator_bitmap(struct netmap_obj_pool * p)396 netmap_init_obj_allocator_bitmap(struct netmap_obj_pool *p)
397 {
398 u_int n, j;
399
400 if (p->bitmap == NULL) {
401 /* Allocate the bitmap */
402 n = (p->objtotal + 31) / 32;
403 p->bitmap = nm_os_malloc(sizeof(p->bitmap[0]) * n);
404 if (p->bitmap == NULL) {
405 nm_prerr("Unable to create bitmap (%d entries) for allocator '%s'", (int)n,
406 p->name);
407 return ENOMEM;
408 }
409 p->bitmap_slots = n;
410 } else {
411 memset(p->bitmap, 0, p->bitmap_slots * sizeof(p->bitmap[0]));
412 }
413
414 p->objfree = 0;
415 /*
416 * Set all the bits in the bitmap that have
417 * corresponding buffers to 1 to indicate they are
418 * free.
419 */
420 for (j = 0; j < p->objtotal; j++) {
421 if (p->invalid_bitmap && nm_isset(p->invalid_bitmap, j)) {
422 if (netmap_debug & NM_DEBUG_MEM)
423 nm_prinf("skipping %s %d", p->name, j);
424 continue;
425 }
426 p->bitmap[ (j>>5) ] |= ( 1U << (j & 31U) );
427 p->objfree++;
428 }
429
430 if (netmap_verbose)
431 nm_prinf("%s free %u", p->name, p->objfree);
432 if (p->objfree == 0) {
433 if (netmap_verbose)
434 nm_prerr("%s: no objects available", p->name);
435 return ENOMEM;
436 }
437
438 return 0;
439 }
440
441 static int
netmap_mem_init_bitmaps(struct netmap_mem_d * nmd)442 netmap_mem_init_bitmaps(struct netmap_mem_d *nmd)
443 {
444 int i, error = 0;
445
446 for (i = 0; i < NETMAP_POOLS_NR; i++) {
447 struct netmap_obj_pool *p = &nmd->pools[i];
448
449 error = netmap_init_obj_allocator_bitmap(p);
450 if (error)
451 return error;
452 }
453
454 /*
455 * buffers 0 and 1 are reserved
456 */
457 if (nmd->pools[NETMAP_BUF_POOL].objfree < 2) {
458 nm_prerr("%s: not enough buffers", nmd->pools[NETMAP_BUF_POOL].name);
459 return ENOMEM;
460 }
461
462 nmd->pools[NETMAP_BUF_POOL].objfree -= 2;
463 if (nmd->pools[NETMAP_BUF_POOL].bitmap) {
464 /* XXX This check is a workaround that prevents a
465 * NULL pointer crash which currently happens only
466 * with ptnetmap guests.
467 * Removed shared-info --> is the bug still there? */
468 nmd->pools[NETMAP_BUF_POOL].bitmap[0] = ~3U;
469 }
470 return 0;
471 }
472
473 int
netmap_mem_deref(struct netmap_mem_d * nmd,struct netmap_adapter * na)474 netmap_mem_deref(struct netmap_mem_d *nmd, struct netmap_adapter *na)
475 {
476 int last_user = 0;
477 NMA_LOCK(nmd);
478 if (na->active_fds <= 0 && !(nmd->flags & NETMAP_MEM_NOMAP))
479 netmap_mem_unmap(&nmd->pools[NETMAP_BUF_POOL], na);
480 if (nmd->active == 1) {
481 last_user = 1;
482 /*
483 * Reset the allocator when it falls out of use so that any
484 * pool resources leaked by unclean application exits are
485 * reclaimed.
486 */
487 netmap_mem_init_bitmaps(nmd);
488 }
489 nmd->ops->nmd_deref(nmd, na);
490
491 nmd->active--;
492 if (last_user) {
493 nmd->lasterr = 0;
494 }
495
496 NMA_UNLOCK(nmd);
497 return last_user;
498 }
499
500
501 /* accessor functions */
502 static int
netmap_mem2_get_lut(struct netmap_mem_d * nmd,struct netmap_lut * lut)503 netmap_mem2_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut)
504 {
505 lut->lut = nmd->pools[NETMAP_BUF_POOL].lut;
506 #ifdef __FreeBSD__
507 lut->plut = lut->lut;
508 #endif
509 lut->objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal;
510 lut->objsize = nmd->pools[NETMAP_BUF_POOL]._objsize;
511
512 return 0;
513 }
514
515 static struct netmap_obj_params netmap_min_priv_params[NETMAP_POOLS_NR] = {
516 [NETMAP_IF_POOL] = {
517 .size = 1024,
518 .num = 2,
519 },
520 [NETMAP_RING_POOL] = {
521 .size = 5*PAGE_SIZE,
522 .num = 4,
523 },
524 [NETMAP_BUF_POOL] = {
525 .size = 2048,
526 .num = 4098,
527 },
528 };
529
530
531 /*
532 * nm_mem is the memory allocator used for all physical interfaces
533 * running in netmap mode.
534 * Virtual (VALE) ports will have each its own allocator.
535 */
536 extern struct netmap_mem_ops netmap_mem_global_ops; /* forward */
537 struct netmap_mem_d nm_mem = { /* Our memory allocator. */
538 .pools = {
539 [NETMAP_IF_POOL] = {
540 .name = "netmap_if",
541 .objminsize = sizeof(struct netmap_if),
542 .objmaxsize = 4096,
543 .nummin = 10, /* don't be stingy */
544 .nummax = 10000, /* XXX very large */
545 },
546 [NETMAP_RING_POOL] = {
547 .name = "netmap_ring",
548 .objminsize = sizeof(struct netmap_ring),
549 .objmaxsize = 32*PAGE_SIZE,
550 .nummin = 2,
551 .nummax = 1024,
552 },
553 [NETMAP_BUF_POOL] = {
554 .name = "netmap_buf",
555 .objminsize = 64,
556 .objmaxsize = 65536,
557 .nummin = 4,
558 .nummax = 1000000, /* one million! */
559 },
560 },
561
562 .params = {
563 [NETMAP_IF_POOL] = {
564 .size = 1024,
565 .num = 100,
566 },
567 [NETMAP_RING_POOL] = {
568 .size = 9*PAGE_SIZE,
569 .num = 200,
570 },
571 [NETMAP_BUF_POOL] = {
572 .size = 2048,
573 .num = NETMAP_BUF_MAX_NUM,
574 },
575 },
576
577 .nm_id = 1,
578 .nm_grp = -1,
579
580 .prev = &nm_mem,
581 .next = &nm_mem,
582
583 .ops = &netmap_mem_global_ops,
584
585 .name = "1"
586 };
587
588 static struct netmap_mem_d nm_mem_blueprint;
589
590 /* blueprint for the private memory allocators */
591 /* XXX clang is not happy about using name as a print format */
592 static const struct netmap_mem_d nm_blueprint = {
593 .pools = {
594 [NETMAP_IF_POOL] = {
595 .name = "%s_if",
596 .objminsize = sizeof(struct netmap_if),
597 .objmaxsize = 4096,
598 .nummin = 1,
599 .nummax = 100,
600 },
601 [NETMAP_RING_POOL] = {
602 .name = "%s_ring",
603 .objminsize = sizeof(struct netmap_ring),
604 .objmaxsize = 32*PAGE_SIZE,
605 .nummin = 2,
606 .nummax = 1024,
607 },
608 [NETMAP_BUF_POOL] = {
609 .name = "%s_buf",
610 .objminsize = 64,
611 .objmaxsize = 65536,
612 .nummin = 4,
613 .nummax = 1000000, /* one million! */
614 },
615 },
616
617 .nm_grp = -1,
618
619 .flags = NETMAP_MEM_PRIVATE,
620
621 .ops = &netmap_mem_global_ops,
622 };
623
624 /* memory allocator related sysctls */
625
626 #define STRINGIFY(x) #x
627
628
629 #define DECLARE_SYSCTLS(id, name) \
630 SYSBEGIN(mem2_ ## name); \
631 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \
632 CTLFLAG_RW, &nm_mem.params[id].size, 0, "Requested size of netmap " STRINGIFY(name) "s"); \
633 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \
634 CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, "Current size of netmap " STRINGIFY(name) "s"); \
635 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \
636 CTLFLAG_RW, &nm_mem.params[id].num, 0, "Requested number of netmap " STRINGIFY(name) "s"); \
637 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \
638 CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, "Current number of netmap " STRINGIFY(name) "s"); \
639 SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_size, \
640 CTLFLAG_RW, &netmap_min_priv_params[id].size, 0, \
641 "Default size of private netmap " STRINGIFY(name) "s"); \
642 SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_num, \
643 CTLFLAG_RW, &netmap_min_priv_params[id].num, 0, \
644 "Default number of private netmap " STRINGIFY(name) "s"); \
645 SYSEND
646
647 SYSCTL_DECL(_dev_netmap);
648 DECLARE_SYSCTLS(NETMAP_IF_POOL, if);
649 DECLARE_SYSCTLS(NETMAP_RING_POOL, ring);
650 DECLARE_SYSCTLS(NETMAP_BUF_POOL, buf);
651
652 /* call with nm_mem_list_lock held */
653 static int
nm_mem_assign_id_locked(struct netmap_mem_d * nmd,int grp_id)654 nm_mem_assign_id_locked(struct netmap_mem_d *nmd, int grp_id)
655 {
656 nm_memid_t id;
657 struct netmap_mem_d *scan = netmap_last_mem_d;
658 int error = ENOMEM;
659
660 do {
661 /* we rely on unsigned wrap around */
662 id = scan->nm_id + 1;
663 if (id == 0) /* reserve 0 as error value */
664 id = 1;
665 scan = scan->next;
666 if (id != scan->nm_id) {
667 nmd->nm_id = id;
668 nmd->nm_grp = grp_id;
669 nmd->prev = scan->prev;
670 nmd->next = scan;
671 scan->prev->next = nmd;
672 scan->prev = nmd;
673 netmap_last_mem_d = nmd;
674 nmd->refcount = 1;
675 NM_DBG_REFC(nmd, __FUNCTION__, __LINE__);
676 error = 0;
677 break;
678 }
679 } while (scan != netmap_last_mem_d);
680
681 return error;
682 }
683
684 /* call with nm_mem_list_lock *not* held */
685 static int
nm_mem_assign_id(struct netmap_mem_d * nmd,int grp_id)686 nm_mem_assign_id(struct netmap_mem_d *nmd, int grp_id)
687 {
688 int ret;
689
690 NM_MTX_LOCK(nm_mem_list_lock);
691 ret = nm_mem_assign_id_locked(nmd, grp_id);
692 NM_MTX_UNLOCK(nm_mem_list_lock);
693
694 return ret;
695 }
696
697 /* call with nm_mem_list_lock held */
698 static void
nm_mem_release_id(struct netmap_mem_d * nmd)699 nm_mem_release_id(struct netmap_mem_d *nmd)
700 {
701 nmd->prev->next = nmd->next;
702 nmd->next->prev = nmd->prev;
703
704 if (netmap_last_mem_d == nmd)
705 netmap_last_mem_d = nmd->prev;
706
707 nmd->prev = nmd->next = NULL;
708 }
709
710 struct netmap_mem_d *
netmap_mem_find(nm_memid_t id)711 netmap_mem_find(nm_memid_t id)
712 {
713 struct netmap_mem_d *nmd;
714
715 NM_MTX_LOCK(nm_mem_list_lock);
716 nmd = netmap_last_mem_d;
717 do {
718 if (!(nmd->flags & NETMAP_MEM_HIDDEN) && nmd->nm_id == id) {
719 nmd->refcount++;
720 NM_DBG_REFC(nmd, __FUNCTION__, __LINE__);
721 NM_MTX_UNLOCK(nm_mem_list_lock);
722 return nmd;
723 }
724 nmd = nmd->next;
725 } while (nmd != netmap_last_mem_d);
726 NM_MTX_UNLOCK(nm_mem_list_lock);
727 return NULL;
728 }
729
730 static int
nm_mem_check_group(struct netmap_mem_d * nmd,bus_dma_tag_t dev)731 nm_mem_check_group(struct netmap_mem_d *nmd, bus_dma_tag_t dev)
732 {
733 int err = 0, id;
734
735 /* Skip not hw adapters.
736 * Vale port can use particular allocator through vale-ctl -m option
737 */
738 if (!dev)
739 return 0;
740 id = nm_iommu_group_id(dev);
741 if (netmap_debug & NM_DEBUG_MEM)
742 nm_prinf("iommu_group %d", id);
743
744 NMA_LOCK(nmd);
745
746 if (nmd->nm_grp != id) {
747 if (netmap_verbose)
748 nm_prerr("iommu group mismatch: %d vs %d",
749 nmd->nm_grp, id);
750 nmd->lasterr = err = ENOMEM;
751 }
752
753 NMA_UNLOCK(nmd);
754 return err;
755 }
756
757 static struct lut_entry *
nm_alloc_lut(u_int nobj)758 nm_alloc_lut(u_int nobj)
759 {
760 size_t n = sizeof(struct lut_entry) * nobj;
761 struct lut_entry *lut;
762 #ifdef linux
763 lut = vmalloc(n);
764 #else
765 lut = nm_os_malloc(n);
766 #endif
767 return lut;
768 }
769
770 static void
nm_free_lut(struct lut_entry * lut,u_int objtotal)771 nm_free_lut(struct lut_entry *lut, u_int objtotal)
772 {
773 bzero(lut, sizeof(struct lut_entry) * objtotal);
774 #ifdef linux
775 vfree(lut);
776 #else
777 nm_os_free(lut);
778 #endif
779 }
780
781 #if defined(linux) || defined(_WIN32)
782 static struct plut_entry *
nm_alloc_plut(u_int nobj)783 nm_alloc_plut(u_int nobj)
784 {
785 size_t n = sizeof(struct plut_entry) * nobj;
786 struct plut_entry *lut;
787 lut = vmalloc(n);
788 return lut;
789 }
790
791 static void
nm_free_plut(struct plut_entry * lut)792 nm_free_plut(struct plut_entry * lut)
793 {
794 vfree(lut);
795 }
796 #endif /* linux or _WIN32 */
797
798
799 /*
800 * First, find the allocator that contains the requested offset,
801 * then locate the cluster through a lookup table.
802 */
803 static vm_paddr_t
netmap_mem2_ofstophys(struct netmap_mem_d * nmd,vm_ooffset_t offset)804 netmap_mem2_ofstophys(struct netmap_mem_d* nmd, vm_ooffset_t offset)
805 {
806 int i;
807 vm_ooffset_t o = offset;
808 vm_paddr_t pa;
809 struct netmap_obj_pool *p;
810
811 p = nmd->pools;
812
813 for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i].memtotal, i++) {
814 if (offset >= p[i].memtotal)
815 continue;
816 // now lookup the cluster's address
817 #ifndef _WIN32
818 pa = vtophys(p[i].lut[offset / p[i]._objsize].vaddr) +
819 offset % p[i]._objsize;
820 #else
821 pa = vtophys(p[i].lut[offset / p[i]._objsize].vaddr);
822 pa.QuadPart += offset % p[i]._objsize;
823 #endif
824 return pa;
825 }
826 /* this is only in case of errors */
827 nm_prerr("invalid ofs 0x%x out of 0x%zx 0x%zx 0x%zx", (u_int)o,
828 p[NETMAP_IF_POOL].memtotal,
829 p[NETMAP_IF_POOL].memtotal
830 + p[NETMAP_RING_POOL].memtotal,
831 p[NETMAP_IF_POOL].memtotal
832 + p[NETMAP_RING_POOL].memtotal
833 + p[NETMAP_BUF_POOL].memtotal);
834 #ifndef _WIN32
835 return 0; /* bad address */
836 #else
837 vm_paddr_t res;
838 res.QuadPart = 0;
839 return res;
840 #endif
841 }
842
843 #ifdef _WIN32
844
845 /*
846 * win32_build_virtual_memory_for_userspace
847 *
848 * This function get all the object making part of the pools and maps
849 * a contiguous virtual memory space for the userspace
850 * It works this way
851 * 1 - allocate a Memory Descriptor List wide as the sum
852 * of the memory needed for the pools
853 * 2 - cycle all the objects in every pool and for every object do
854 *
855 * 2a - cycle all the objects in every pool, get the list
856 * of the physical address descriptors
857 * 2b - calculate the offset in the array of pages descriptor in the
858 * main MDL
859 * 2c - copy the descriptors of the object in the main MDL
860 *
861 * 3 - return the resulting MDL that needs to be mapped in userland
862 *
863 * In this way we will have an MDL that describes all the memory for the
864 * objects in a single object
865 */
866
867 PMDL
win32_build_user_vm_map(struct netmap_mem_d * nmd)868 win32_build_user_vm_map(struct netmap_mem_d* nmd)
869 {
870 u_int memflags, ofs = 0;
871 PMDL mainMdl, tempMdl;
872 uint64_t memsize;
873 int i, j;
874
875 if (netmap_mem_get_info(nmd, &memsize, &memflags, NULL)) {
876 nm_prerr("memory not finalised yet");
877 return NULL;
878 }
879
880 mainMdl = IoAllocateMdl(NULL, memsize, FALSE, FALSE, NULL);
881 if (mainMdl == NULL) {
882 nm_prerr("failed to allocate mdl");
883 return NULL;
884 }
885
886 NMA_LOCK(nmd);
887 for (i = 0; i < NETMAP_POOLS_NR; i++) {
888 struct netmap_obj_pool *p = &nmd->pools[i];
889 int clsz = p->_clustsize;
890 int clobjs = p->_clustentries; /* objects per cluster */
891 int mdl_len = sizeof(PFN_NUMBER) * BYTES_TO_PAGES(clsz);
892 PPFN_NUMBER pSrc, pDst;
893
894 /* each pool has a different cluster size so we need to reallocate */
895 tempMdl = IoAllocateMdl(p->lut[0].vaddr, clsz, FALSE, FALSE, NULL);
896 if (tempMdl == NULL) {
897 NMA_UNLOCK(nmd);
898 nm_prerr("fail to allocate tempMdl");
899 IoFreeMdl(mainMdl);
900 return NULL;
901 }
902 pSrc = MmGetMdlPfnArray(tempMdl);
903 /* create one entry per cluster, the lut[] has one entry per object */
904 for (j = 0; j < p->numclusters; j++, ofs += clsz) {
905 pDst = &MmGetMdlPfnArray(mainMdl)[BYTES_TO_PAGES(ofs)];
906 MmInitializeMdl(tempMdl, p->lut[j*clobjs].vaddr, clsz);
907 MmBuildMdlForNonPagedPool(tempMdl); /* compute physical page addresses */
908 RtlCopyMemory(pDst, pSrc, mdl_len); /* copy the page descriptors */
909 mainMdl->MdlFlags = tempMdl->MdlFlags; /* XXX what is in here ? */
910 }
911 IoFreeMdl(tempMdl);
912 }
913 NMA_UNLOCK(nmd);
914 return mainMdl;
915 }
916
917 #endif /* _WIN32 */
918
919 /*
920 * helper function for OS-specific mmap routines (currently only windows).
921 * Given an nmd and a pool index, returns the cluster size and number of clusters.
922 * Returns 0 if memory is finalised and the pool is valid, otherwise 1.
923 * It should be called under NMA_LOCK(nmd) otherwise the underlying info can change.
924 */
925
926 int
netmap_mem2_get_pool_info(struct netmap_mem_d * nmd,u_int pool,u_int * clustsize,u_int * numclusters)927 netmap_mem2_get_pool_info(struct netmap_mem_d* nmd, u_int pool, u_int *clustsize, u_int *numclusters)
928 {
929 if (!nmd || !clustsize || !numclusters || pool >= NETMAP_POOLS_NR)
930 return 1; /* invalid arguments */
931 // NMA_LOCK_ASSERT(nmd);
932 if (!(nmd->flags & NETMAP_MEM_FINALIZED)) {
933 *clustsize = *numclusters = 0;
934 return 1; /* not ready yet */
935 }
936 *clustsize = nmd->pools[pool]._clustsize;
937 *numclusters = nmd->pools[pool].numclusters;
938 return 0; /* success */
939 }
940
941 static int
netmap_mem2_get_info(struct netmap_mem_d * nmd,uint64_t * size,u_int * memflags,nm_memid_t * id)942 netmap_mem2_get_info(struct netmap_mem_d* nmd, uint64_t* size,
943 u_int *memflags, nm_memid_t *id)
944 {
945 int error = 0;
946 error = netmap_mem_config(nmd);
947 if (error)
948 goto out;
949 if (size) {
950 if (nmd->flags & NETMAP_MEM_FINALIZED) {
951 *size = nmd->nm_totalsize;
952 } else {
953 int i;
954 *size = 0;
955 for (i = 0; i < NETMAP_POOLS_NR; i++) {
956 struct netmap_obj_pool *p = nmd->pools + i;
957 *size += ((size_t)p->_numclusters * (size_t)p->_clustsize);
958 }
959 }
960 }
961 if (memflags)
962 *memflags = nmd->flags;
963 if (id)
964 *id = nmd->nm_id;
965 out:
966 return error;
967 }
968
969 /*
970 * we store objects by kernel address, need to find the offset
971 * within the pool to export the value to userspace.
972 * Algorithm: scan until we find the cluster, then add the
973 * actual offset in the cluster
974 */
975 static ssize_t
netmap_obj_offset(struct netmap_obj_pool * p,const void * vaddr)976 netmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr)
977 {
978 int i, k = p->_clustentries, n = p->objtotal;
979 ssize_t ofs = 0;
980
981 for (i = 0; i < n; i += k, ofs += p->_clustsize) {
982 const char *base = p->lut[i].vaddr;
983 ssize_t relofs = (const char *) vaddr - base;
984
985 if (relofs < 0 || relofs >= p->_clustsize)
986 continue;
987
988 ofs = ofs + relofs;
989 nm_prdis("%s: return offset %d (cluster %d) for pointer %p",
990 p->name, ofs, i, vaddr);
991 return ofs;
992 }
993 nm_prerr("address %p is not contained inside any cluster (%s)",
994 vaddr, p->name);
995 return 0; /* An error occurred */
996 }
997
998 /* Helper functions which convert virtual addresses to offsets */
999 #define netmap_if_offset(n, v) \
1000 netmap_obj_offset(&(n)->pools[NETMAP_IF_POOL], (v))
1001
1002 #define netmap_ring_offset(n, v) \
1003 ((n)->pools[NETMAP_IF_POOL].memtotal + \
1004 netmap_obj_offset(&(n)->pools[NETMAP_RING_POOL], (v)))
1005
1006 static ssize_t
netmap_mem2_if_offset(struct netmap_mem_d * nmd,const void * addr)1007 netmap_mem2_if_offset(struct netmap_mem_d *nmd, const void *addr)
1008 {
1009 return netmap_if_offset(nmd, addr);
1010 }
1011
1012 /*
1013 * report the index, and use start position as a hint,
1014 * otherwise buffer allocation becomes terribly expensive.
1015 */
1016 static void *
netmap_obj_malloc(struct netmap_obj_pool * p,u_int len,uint32_t * start,uint32_t * index)1017 netmap_obj_malloc(struct netmap_obj_pool *p, u_int len, uint32_t *start, uint32_t *index)
1018 {
1019 uint32_t i = 0; /* index in the bitmap */
1020 uint32_t mask, j = 0; /* slot counter */
1021 void *vaddr = NULL;
1022
1023 if (len > p->_objsize) {
1024 nm_prerr("%s request size %d too large", p->name, len);
1025 return NULL;
1026 }
1027
1028 if (p->objfree == 0) {
1029 nm_prerr("no more %s objects", p->name);
1030 return NULL;
1031 }
1032 if (start)
1033 i = *start;
1034
1035 /* termination is guaranteed by p->free, but better check bounds on i */
1036 while (vaddr == NULL && i < p->bitmap_slots) {
1037 uint32_t cur = p->bitmap[i];
1038 if (cur == 0) { /* bitmask is fully used */
1039 i++;
1040 continue;
1041 }
1042 /* locate a slot */
1043 for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1)
1044 ;
1045
1046 p->bitmap[i] &= ~mask; /* mark object as in use */
1047 p->objfree--;
1048
1049 vaddr = p->lut[i * 32 + j].vaddr;
1050 if (index)
1051 *index = i * 32 + j;
1052 }
1053 nm_prdis("%s allocator: allocated object @ [%d][%d]: vaddr %p",p->name, i, j, vaddr);
1054
1055 if (start)
1056 *start = i;
1057 return vaddr;
1058 }
1059
1060
1061 /*
1062 * free by index, not by address.
1063 * XXX should we also cleanup the content ?
1064 */
1065 static int
netmap_obj_free(struct netmap_obj_pool * p,uint32_t j)1066 netmap_obj_free(struct netmap_obj_pool *p, uint32_t j)
1067 {
1068 uint32_t *ptr, mask;
1069
1070 if (j >= p->objtotal) {
1071 nm_prerr("invalid index %u, max %u", j, p->objtotal);
1072 return 1;
1073 }
1074 ptr = &p->bitmap[j / 32];
1075 mask = (1 << (j % 32));
1076 if (*ptr & mask) {
1077 nm_prerr("ouch, double free on buffer %d", j);
1078 return 1;
1079 } else {
1080 *ptr |= mask;
1081 p->objfree++;
1082 return 0;
1083 }
1084 }
1085
1086 /*
1087 * free by address. This is slow but is only used for a few
1088 * objects (rings, nifp)
1089 */
1090 static void
netmap_obj_free_va(struct netmap_obj_pool * p,void * vaddr)1091 netmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr)
1092 {
1093 u_int i, j, n = p->numclusters;
1094
1095 for (i = 0, j = 0; i < n; i++, j += p->_clustentries) {
1096 void *base = p->lut[i * p->_clustentries].vaddr;
1097 ssize_t relofs = (ssize_t) vaddr - (ssize_t) base;
1098
1099 /* Given address, is out of the scope of the current cluster.*/
1100 if (base == NULL || vaddr < base || relofs >= p->_clustsize)
1101 continue;
1102
1103 j = j + relofs / p->_objsize;
1104 /* KASSERT(j != 0, ("Cannot free object 0")); */
1105 netmap_obj_free(p, j);
1106 return;
1107 }
1108 nm_prerr("address %p is not contained inside any cluster (%s)",
1109 vaddr, p->name);
1110 }
1111
1112 unsigned
netmap_mem_bufsize(struct netmap_mem_d * nmd)1113 netmap_mem_bufsize(struct netmap_mem_d *nmd)
1114 {
1115 return nmd->pools[NETMAP_BUF_POOL]._objsize;
1116 }
1117
1118 #define netmap_if_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_IF_POOL], len, NULL, NULL)
1119 #define netmap_if_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_IF_POOL], (v))
1120 #define netmap_ring_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_RING_POOL], len, NULL, NULL)
1121 #define netmap_ring_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_RING_POOL], (v))
1122 #define netmap_buf_malloc(n, _pos, _index) \
1123 netmap_obj_malloc(&(n)->pools[NETMAP_BUF_POOL], netmap_mem_bufsize(n), _pos, _index)
1124
1125
1126 #if 0 /* currently unused */
1127 /* Return the index associated to the given packet buffer */
1128 #define netmap_buf_index(n, v) \
1129 (netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)) / NETMAP_BDG_BUF_SIZE(n))
1130 #endif
1131
1132 /*
1133 * allocate extra buffers in a linked list.
1134 * returns the actual number.
1135 */
1136 uint32_t
netmap_extra_alloc(struct netmap_adapter * na,uint32_t * head,uint32_t n)1137 netmap_extra_alloc(struct netmap_adapter *na, uint32_t *head, uint32_t n)
1138 {
1139 struct netmap_mem_d *nmd = na->nm_mem;
1140 uint32_t i, pos = 0; /* opaque, scan position in the bitmap */
1141
1142 NMA_LOCK(nmd);
1143
1144 *head = 0; /* default, 'null' index ie empty list */
1145 for (i = 0 ; i < n; i++) {
1146 uint32_t cur = *head; /* save current head */
1147 uint32_t *p = netmap_buf_malloc(nmd, &pos, head);
1148 if (p == NULL) {
1149 nm_prerr("no more buffers after %d of %d", i, n);
1150 *head = cur; /* restore */
1151 break;
1152 }
1153 nm_prdis(5, "allocate buffer %d -> %d", *head, cur);
1154 *p = cur; /* link to previous head */
1155 }
1156
1157 NMA_UNLOCK(nmd);
1158
1159 return i;
1160 }
1161
1162 static void
netmap_extra_free(struct netmap_adapter * na,uint32_t head)1163 netmap_extra_free(struct netmap_adapter *na, uint32_t head)
1164 {
1165 struct lut_entry *lut = na->na_lut.lut;
1166 struct netmap_mem_d *nmd = na->nm_mem;
1167 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
1168 uint32_t i, cur, *buf;
1169
1170 nm_prdis("freeing the extra list");
1171 for (i = 0; head >=2 && head < p->objtotal; i++) {
1172 cur = head;
1173 buf = lut[head].vaddr;
1174 head = *buf;
1175 *buf = 0;
1176 if (netmap_obj_free(p, cur))
1177 break;
1178 }
1179 if (head != 0)
1180 nm_prerr("breaking with head %d", head);
1181 if (netmap_debug & NM_DEBUG_MEM)
1182 nm_prinf("freed %d buffers", i);
1183 }
1184
1185
1186 /* Return nonzero on error */
1187 static int
netmap_new_bufs(struct netmap_mem_d * nmd,struct netmap_slot * slot,u_int n)1188 netmap_new_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n)
1189 {
1190 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
1191 u_int i = 0; /* slot counter */
1192 uint32_t pos = 0; /* slot in p->bitmap */
1193 uint32_t index = 0; /* buffer index */
1194
1195 for (i = 0; i < n; i++) {
1196 void *vaddr = netmap_buf_malloc(nmd, &pos, &index);
1197 if (vaddr == NULL) {
1198 nm_prerr("no more buffers after %d of %d", i, n);
1199 goto cleanup;
1200 }
1201 slot[i].buf_idx = index;
1202 slot[i].len = p->_objsize;
1203 slot[i].flags = 0;
1204 slot[i].ptr = 0;
1205 }
1206
1207 nm_prdis("%s: allocated %d buffers, %d available, first at %d", p->name, n, p->objfree, pos);
1208 return (0);
1209
1210 cleanup:
1211 while (i > 0) {
1212 i--;
1213 netmap_obj_free(p, slot[i].buf_idx);
1214 }
1215 bzero(slot, n * sizeof(slot[0]));
1216 return (ENOMEM);
1217 }
1218
1219 static void
netmap_mem_set_ring(struct netmap_mem_d * nmd,struct netmap_slot * slot,u_int n,uint32_t index)1220 netmap_mem_set_ring(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n, uint32_t index)
1221 {
1222 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
1223 u_int i;
1224
1225 for (i = 0; i < n; i++) {
1226 slot[i].buf_idx = index;
1227 slot[i].len = p->_objsize;
1228 slot[i].flags = 0;
1229 }
1230 }
1231
1232
1233 static void
netmap_free_buf(struct netmap_mem_d * nmd,uint32_t i)1234 netmap_free_buf(struct netmap_mem_d *nmd, uint32_t i)
1235 {
1236 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
1237
1238 if (i < 2 || i >= p->objtotal) {
1239 nm_prerr("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal);
1240 return;
1241 }
1242 netmap_obj_free(p, i);
1243 }
1244
1245
1246 static void
netmap_free_bufs(struct netmap_mem_d * nmd,struct netmap_slot * slot,u_int n)1247 netmap_free_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n)
1248 {
1249 u_int i;
1250
1251 for (i = 0; i < n; i++) {
1252 if (slot[i].buf_idx > 1)
1253 netmap_free_buf(nmd, slot[i].buf_idx);
1254 }
1255 nm_prdis("%s: released some buffers, available: %u",
1256 p->name, p->objfree);
1257 }
1258
1259 static void
netmap_reset_obj_allocator(struct netmap_obj_pool * p)1260 netmap_reset_obj_allocator(struct netmap_obj_pool *p)
1261 {
1262
1263 if (p == NULL)
1264 return;
1265 if (p->bitmap)
1266 nm_os_free(p->bitmap);
1267 p->bitmap = NULL;
1268 if (p->invalid_bitmap)
1269 nm_os_free(p->invalid_bitmap);
1270 p->invalid_bitmap = NULL;
1271 if (!p->alloc_done) {
1272 /* allocation was done by somebody else.
1273 * Let them clean up after themselves.
1274 */
1275 return;
1276 }
1277 if (p->lut) {
1278 u_int i;
1279
1280 /*
1281 * Free each cluster allocated in
1282 * netmap_finalize_obj_allocator(). The cluster start
1283 * addresses are stored at multiples of p->_clusterentries
1284 * in the lut.
1285 */
1286 for (i = 0; i < p->objtotal; i += p->_clustentries) {
1287 contigfree(p->lut[i].vaddr, p->_clustsize, M_NETMAP);
1288 }
1289 nm_free_lut(p->lut, p->objtotal);
1290 }
1291 p->lut = NULL;
1292 p->objtotal = 0;
1293 p->memtotal = 0;
1294 p->numclusters = 0;
1295 p->objfree = 0;
1296 p->alloc_done = 0;
1297 }
1298
1299 /*
1300 * Free all resources related to an allocator.
1301 */
1302 static void
netmap_destroy_obj_allocator(struct netmap_obj_pool * p)1303 netmap_destroy_obj_allocator(struct netmap_obj_pool *p)
1304 {
1305 if (p == NULL)
1306 return;
1307 netmap_reset_obj_allocator(p);
1308 }
1309
1310 /*
1311 * We receive a request for objtotal objects, of size objsize each.
1312 * Internally we may round up both numbers, as we allocate objects
1313 * in small clusters multiple of the page size.
1314 * We need to keep track of objtotal and clustentries,
1315 * as they are needed when freeing memory.
1316 *
1317 * XXX note -- userspace needs the buffers to be contiguous,
1318 * so we cannot afford gaps at the end of a cluster.
1319 */
1320
1321
1322 /* call with NMA_LOCK held */
1323 static int
netmap_config_obj_allocator(struct netmap_obj_pool * p,u_int objtotal,u_int objsize)1324 netmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int objsize)
1325 {
1326 int i;
1327 u_int clustsize; /* the cluster size, multiple of page size */
1328 u_int clustentries; /* how many objects per entry */
1329
1330 /* we store the current request, so we can
1331 * detect configuration changes later */
1332 p->r_objtotal = objtotal;
1333 p->r_objsize = objsize;
1334
1335 #define MAX_CLUSTSIZE (1<<22) // 4 MB
1336 #define LINE_ROUND NM_BUF_ALIGN // 64
1337 if (objsize >= MAX_CLUSTSIZE) {
1338 /* we could do it but there is no point */
1339 nm_prerr("unsupported allocation for %d bytes", objsize);
1340 return EINVAL;
1341 }
1342 /* make sure objsize is a multiple of LINE_ROUND */
1343 i = (objsize & (LINE_ROUND - 1));
1344 if (i) {
1345 nm_prinf("aligning object by %d bytes", LINE_ROUND - i);
1346 objsize += LINE_ROUND - i;
1347 }
1348 if (objsize < p->objminsize || objsize > p->objmaxsize) {
1349 nm_prerr("requested objsize %d out of range [%d, %d]",
1350 objsize, p->objminsize, p->objmaxsize);
1351 return EINVAL;
1352 }
1353 if (objtotal < p->nummin || objtotal > p->nummax) {
1354 nm_prerr("requested objtotal %d out of range [%d, %d]",
1355 objtotal, p->nummin, p->nummax);
1356 return EINVAL;
1357 }
1358 /*
1359 * Compute number of objects using a brute-force approach:
1360 * given a max cluster size,
1361 * we try to fill it with objects keeping track of the
1362 * wasted space to the next page boundary.
1363 */
1364 for (clustentries = 0, i = 1;; i++) {
1365 u_int delta, used = i * objsize;
1366 if (used > MAX_CLUSTSIZE)
1367 break;
1368 delta = used % PAGE_SIZE;
1369 if (delta == 0) { // exact solution
1370 clustentries = i;
1371 break;
1372 }
1373 }
1374 /* exact solution not found */
1375 if (clustentries == 0) {
1376 nm_prerr("unsupported allocation for %d bytes", objsize);
1377 return EINVAL;
1378 }
1379 /* compute clustsize */
1380 clustsize = clustentries * objsize;
1381 if (netmap_debug & NM_DEBUG_MEM)
1382 nm_prinf("objsize %d clustsize %d objects %d",
1383 objsize, clustsize, clustentries);
1384
1385 /*
1386 * The number of clusters is n = ceil(objtotal/clustentries)
1387 * objtotal' = n * clustentries
1388 */
1389 p->_clustentries = clustentries;
1390 p->_clustsize = clustsize;
1391 p->_numclusters = (objtotal + clustentries - 1) / clustentries;
1392
1393 /* actual values (may be larger than requested) */
1394 p->_objsize = objsize;
1395 p->_objtotal = p->_numclusters * clustentries;
1396
1397 return 0;
1398 }
1399
1400 /* call with NMA_LOCK held */
1401 static int
netmap_finalize_obj_allocator(struct netmap_obj_pool * p)1402 netmap_finalize_obj_allocator(struct netmap_obj_pool *p)
1403 {
1404 int i; /* must be signed */
1405 size_t n;
1406
1407 if (p->lut) {
1408 /* if the lut is already there we assume that also all the
1409 * clusters have already been allocated, possibly by somebody
1410 * else (e.g., extmem). In the latter case, the alloc_done flag
1411 * will remain at zero, so that we will not attempt to
1412 * deallocate the clusters by ourselves in
1413 * netmap_reset_obj_allocator.
1414 */
1415 return 0;
1416 }
1417
1418 /* optimistically assume we have enough memory */
1419 p->numclusters = p->_numclusters;
1420 p->objtotal = p->_objtotal;
1421 p->alloc_done = 1;
1422
1423 p->lut = nm_alloc_lut(p->objtotal);
1424 if (p->lut == NULL) {
1425 nm_prerr("Unable to create lookup table for '%s'", p->name);
1426 goto clean;
1427 }
1428
1429 /*
1430 * Allocate clusters, init pointers
1431 */
1432
1433 n = p->_clustsize;
1434 for (i = 0; i < (int)p->objtotal;) {
1435 int lim = i + p->_clustentries;
1436 char *clust;
1437
1438 /*
1439 * XXX Note, we only need contigmalloc() for buffers attached
1440 * to native interfaces. In all other cases (nifp, netmap rings
1441 * and even buffers for VALE ports or emulated interfaces) we
1442 * can live with standard malloc, because the hardware will not
1443 * access the pages directly.
1444 */
1445 clust = contigmalloc(n, M_NETMAP, M_NOWAIT | M_ZERO,
1446 (size_t)0, -1UL, PAGE_SIZE, 0);
1447 if (clust == NULL) {
1448 /*
1449 * If we get here, there is a severe memory shortage,
1450 * so halve the allocated memory to reclaim some.
1451 */
1452 nm_prerr("Unable to create cluster at %d for '%s' allocator",
1453 i, p->name);
1454 if (i < 2) /* nothing to halve */
1455 goto out;
1456 lim = i / 2;
1457 for (i--; i >= lim; i--) {
1458 if (i % p->_clustentries == 0 && p->lut[i].vaddr)
1459 contigfree(p->lut[i].vaddr,
1460 n, M_NETMAP);
1461 p->lut[i].vaddr = NULL;
1462 }
1463 out:
1464 p->objtotal = i;
1465 /* we may have stopped in the middle of a cluster */
1466 p->numclusters = (i + p->_clustentries - 1) / p->_clustentries;
1467 break;
1468 }
1469 /*
1470 * Set lut state for all buffers in the current cluster.
1471 *
1472 * [i, lim) is the set of buffer indexes that cover the
1473 * current cluster.
1474 *
1475 * 'clust' is really the address of the current buffer in
1476 * the current cluster as we index through it with a stride
1477 * of p->_objsize.
1478 */
1479 for (; i < lim; i++, clust += p->_objsize) {
1480 p->lut[i].vaddr = clust;
1481 #if !defined(linux) && !defined(_WIN32)
1482 p->lut[i].paddr = vtophys(clust);
1483 #endif
1484 }
1485 }
1486 p->memtotal = (size_t)p->numclusters * (size_t)p->_clustsize;
1487 if (netmap_verbose)
1488 nm_prinf("Pre-allocated %d clusters (%d/%zuKB) for '%s'",
1489 p->numclusters, p->_clustsize >> 10,
1490 p->memtotal >> 10, p->name);
1491
1492 return 0;
1493
1494 clean:
1495 netmap_reset_obj_allocator(p);
1496 return ENOMEM;
1497 }
1498
1499 /* call with lock held */
1500 static int
netmap_mem_params_changed(struct netmap_obj_params * p)1501 netmap_mem_params_changed(struct netmap_obj_params* p)
1502 {
1503 int i, rv = 0;
1504
1505 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1506 if (p[i].last_size != p[i].size || p[i].last_num != p[i].num) {
1507 p[i].last_size = p[i].size;
1508 p[i].last_num = p[i].num;
1509 rv = 1;
1510 }
1511 }
1512 return rv;
1513 }
1514
1515 static void
netmap_mem_reset_all(struct netmap_mem_d * nmd)1516 netmap_mem_reset_all(struct netmap_mem_d *nmd)
1517 {
1518 int i;
1519
1520 if (netmap_debug & NM_DEBUG_MEM)
1521 nm_prinf("resetting %p", nmd);
1522 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1523 netmap_reset_obj_allocator(&nmd->pools[i]);
1524 }
1525 nmd->flags &= ~NETMAP_MEM_FINALIZED;
1526 }
1527
1528 static int
netmap_mem_unmap(struct netmap_obj_pool * p,struct netmap_adapter * na)1529 netmap_mem_unmap(struct netmap_obj_pool *p, struct netmap_adapter *na)
1530 {
1531 int i, lim = p->objtotal;
1532 struct netmap_lut *lut;
1533 if (na == NULL || na->pdev == NULL)
1534 return 0;
1535
1536 lut = &na->na_lut;
1537
1538
1539
1540 #if defined(__FreeBSD__)
1541 /* On FreeBSD mapping and unmapping is performed by the txsync
1542 * and rxsync routine, packet by packet. */
1543 (void)i;
1544 (void)lim;
1545 (void)lut;
1546 #elif defined(_WIN32)
1547 (void)i;
1548 (void)lim;
1549 (void)lut;
1550 nm_prerr("unsupported on Windows");
1551 #else /* linux */
1552 nm_prdis("unmapping and freeing plut for %s", na->name);
1553 if (lut->plut == NULL || na->pdev == NULL)
1554 return 0;
1555 for (i = 0; i < lim; i += p->_clustentries) {
1556 if (lut->plut[i].paddr)
1557 netmap_unload_map(na, (bus_dma_tag_t) na->pdev, &lut->plut[i].paddr, p->_clustsize);
1558 }
1559 nm_free_plut(lut->plut);
1560 lut->plut = NULL;
1561 #endif /* linux */
1562
1563 return 0;
1564 }
1565
1566 static int
netmap_mem_map(struct netmap_obj_pool * p,struct netmap_adapter * na)1567 netmap_mem_map(struct netmap_obj_pool *p, struct netmap_adapter *na)
1568 {
1569 int error = 0;
1570 int i, lim = p->objtotal;
1571 struct netmap_lut *lut = &na->na_lut;
1572
1573 if (na->pdev == NULL)
1574 return 0;
1575
1576 #if defined(__FreeBSD__)
1577 /* On FreeBSD mapping and unmapping is performed by the txsync
1578 * and rxsync routine, packet by packet. */
1579 (void)i;
1580 (void)lim;
1581 (void)lut;
1582 #elif defined(_WIN32)
1583 (void)i;
1584 (void)lim;
1585 (void)lut;
1586 nm_prerr("unsupported on Windows");
1587 #else /* linux */
1588
1589 if (lut->plut != NULL) {
1590 nm_prdis("plut already allocated for %s", na->name);
1591 return 0;
1592 }
1593
1594 nm_prdis("allocating physical lut for %s", na->name);
1595 lut->plut = nm_alloc_plut(lim);
1596 if (lut->plut == NULL) {
1597 nm_prerr("Failed to allocate physical lut for %s", na->name);
1598 return ENOMEM;
1599 }
1600
1601 for (i = 0; i < lim; i += p->_clustentries) {
1602 lut->plut[i].paddr = 0;
1603 }
1604
1605 for (i = 0; i < lim; i += p->_clustentries) {
1606 int j;
1607
1608 if (p->lut[i].vaddr == NULL)
1609 continue;
1610
1611 error = netmap_load_map(na, (bus_dma_tag_t) na->pdev, &lut->plut[i].paddr,
1612 p->lut[i].vaddr, p->_clustsize);
1613 if (error) {
1614 nm_prerr("Failed to map cluster #%d from the %s pool", i, p->name);
1615 break;
1616 }
1617
1618 for (j = 1; j < p->_clustentries; j++) {
1619 lut->plut[i + j].paddr = lut->plut[i + j - 1].paddr + p->_objsize;
1620 }
1621 }
1622
1623 if (error)
1624 netmap_mem_unmap(p, na);
1625
1626 #endif /* linux */
1627
1628 return error;
1629 }
1630
1631 static int
netmap_mem_finalize_all(struct netmap_mem_d * nmd)1632 netmap_mem_finalize_all(struct netmap_mem_d *nmd)
1633 {
1634 int i;
1635 if (nmd->flags & NETMAP_MEM_FINALIZED)
1636 return 0;
1637 nmd->lasterr = 0;
1638 nmd->nm_totalsize = 0;
1639 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1640 nmd->lasterr = netmap_finalize_obj_allocator(&nmd->pools[i]);
1641 if (nmd->lasterr)
1642 goto error;
1643 nmd->nm_totalsize += nmd->pools[i].memtotal;
1644 }
1645 nmd->nm_totalsize = (nmd->nm_totalsize + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
1646 nmd->lasterr = netmap_mem_init_bitmaps(nmd);
1647 if (nmd->lasterr)
1648 goto error;
1649
1650 nmd->flags |= NETMAP_MEM_FINALIZED;
1651
1652 if (netmap_verbose)
1653 nm_prinf("interfaces %zd KB, rings %zd KB, buffers %zd MB",
1654 nmd->pools[NETMAP_IF_POOL].memtotal >> 10,
1655 nmd->pools[NETMAP_RING_POOL].memtotal >> 10,
1656 nmd->pools[NETMAP_BUF_POOL].memtotal >> 20);
1657
1658 if (netmap_verbose)
1659 nm_prinf("Free buffers: %d", nmd->pools[NETMAP_BUF_POOL].objfree);
1660
1661
1662 return 0;
1663 error:
1664 netmap_mem_reset_all(nmd);
1665 return nmd->lasterr;
1666 }
1667
1668 /*
1669 * allocator for private memory
1670 */
1671 static void *
_netmap_mem_private_new(size_t size,struct netmap_obj_params * p,int grp_id,struct netmap_mem_ops * ops,uint64_t memtotal,int * perr)1672 _netmap_mem_private_new(size_t size, struct netmap_obj_params *p, int grp_id,
1673 struct netmap_mem_ops *ops, uint64_t memtotal, int *perr)
1674 {
1675 struct netmap_mem_d *d = NULL;
1676 int i, err = 0;
1677 int checksz = 0;
1678
1679 /* if memtotal is !=0 we check that the request fits the available
1680 * memory. Moreover, any surprlus memory is assigned to buffers.
1681 */
1682 checksz = (memtotal > 0);
1683
1684 d = nm_os_malloc(size);
1685 if (d == NULL) {
1686 err = ENOMEM;
1687 goto error;
1688 }
1689
1690 *d = nm_blueprint;
1691 d->ops = ops;
1692
1693 err = nm_mem_assign_id(d, grp_id);
1694 if (err)
1695 goto error_free;
1696 snprintf(d->name, NM_MEM_NAMESZ, "%d", d->nm_id);
1697
1698 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1699 snprintf(d->pools[i].name, NETMAP_POOL_MAX_NAMSZ,
1700 nm_blueprint.pools[i].name,
1701 d->name);
1702 if (checksz) {
1703 uint64_t poolsz = (uint64_t)p[i].num * p[i].size;
1704 if (memtotal < poolsz) {
1705 nm_prerr("%s: request too large", d->pools[i].name);
1706 err = ENOMEM;
1707 goto error_rel_id;
1708 }
1709 memtotal -= poolsz;
1710 }
1711 d->params[i].num = p[i].num;
1712 d->params[i].size = p[i].size;
1713 }
1714 if (checksz && memtotal > 0) {
1715 uint64_t sz = d->params[NETMAP_BUF_POOL].size;
1716 uint64_t n = (memtotal + sz - 1) / sz;
1717
1718 if (n) {
1719 if (netmap_verbose) {
1720 nm_prinf("%s: adding %llu more buffers",
1721 d->pools[NETMAP_BUF_POOL].name,
1722 (unsigned long long)n);
1723 }
1724 d->params[NETMAP_BUF_POOL].num += n;
1725 }
1726 }
1727
1728 NMA_LOCK_INIT(d);
1729
1730 err = netmap_mem_config(d);
1731 if (err)
1732 goto error_destroy_lock;
1733
1734 d->flags &= ~NETMAP_MEM_FINALIZED;
1735
1736 return d;
1737
1738 error_destroy_lock:
1739 NMA_LOCK_DESTROY(d);
1740 error_rel_id:
1741 nm_mem_release_id(d);
1742 error_free:
1743 nm_os_free(d);
1744 error:
1745 if (perr)
1746 *perr = err;
1747 return NULL;
1748 }
1749
1750 struct netmap_mem_d *
netmap_mem_private_new(u_int txr,u_int txd,u_int rxr,u_int rxd,u_int extra_bufs,u_int npipes,int * perr)1751 netmap_mem_private_new(u_int txr, u_int txd, u_int rxr, u_int rxd,
1752 u_int extra_bufs, u_int npipes, int *perr)
1753 {
1754 struct netmap_mem_d *d = NULL;
1755 struct netmap_obj_params p[NETMAP_POOLS_NR];
1756 int i;
1757 u_int v, maxd;
1758 /* account for the fake host rings */
1759 txr++;
1760 rxr++;
1761
1762 /* copy the min values */
1763 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1764 p[i] = netmap_min_priv_params[i];
1765 }
1766
1767 /* possibly increase them to fit user request */
1768 v = sizeof(struct netmap_if) + sizeof(ssize_t) * (txr + rxr);
1769 if (p[NETMAP_IF_POOL].size < v)
1770 p[NETMAP_IF_POOL].size = v;
1771 v = 2 + 4 * npipes;
1772 if (p[NETMAP_IF_POOL].num < v)
1773 p[NETMAP_IF_POOL].num = v;
1774 maxd = (txd > rxd) ? txd : rxd;
1775 v = sizeof(struct netmap_ring) + sizeof(struct netmap_slot) * maxd;
1776 if (p[NETMAP_RING_POOL].size < v)
1777 p[NETMAP_RING_POOL].size = v;
1778 /* each pipe endpoint needs two tx rings (1 normal + 1 host, fake)
1779 * and two rx rings (again, 1 normal and 1 fake host)
1780 */
1781 v = txr + rxr + 8 * npipes;
1782 if (p[NETMAP_RING_POOL].num < v)
1783 p[NETMAP_RING_POOL].num = v;
1784 /* for each pipe we only need the buffers for the 4 "real" rings.
1785 * On the other end, the pipe ring dimension may be different from
1786 * the parent port ring dimension. As a compromise, we allocate twice the
1787 * space actually needed if the pipe rings were the same size as the parent rings
1788 */
1789 v = (4 * npipes + rxr) * rxd + (4 * npipes + txr) * txd + 2 + extra_bufs;
1790 /* the +2 is for the tx and rx fake buffers (indices 0 and 1) */
1791 if (p[NETMAP_BUF_POOL].num < v)
1792 p[NETMAP_BUF_POOL].num = v;
1793
1794 if (netmap_verbose)
1795 nm_prinf("req if %d*%d ring %d*%d buf %d*%d",
1796 p[NETMAP_IF_POOL].num,
1797 p[NETMAP_IF_POOL].size,
1798 p[NETMAP_RING_POOL].num,
1799 p[NETMAP_RING_POOL].size,
1800 p[NETMAP_BUF_POOL].num,
1801 p[NETMAP_BUF_POOL].size);
1802
1803 d = _netmap_mem_private_new(sizeof(*d), p, -1, &netmap_mem_global_ops, 0, perr);
1804
1805 return d;
1806 }
1807
1808 /* Reference iommu allocator - find existing or create new,
1809 * for not hw addapeters fallback to global allocator.
1810 */
1811 struct netmap_mem_d *
netmap_mem_get_iommu(struct netmap_adapter * na)1812 netmap_mem_get_iommu(struct netmap_adapter *na)
1813 {
1814 int i, err, grp_id;
1815 struct netmap_mem_d *nmd;
1816
1817 if (na == NULL || na->pdev == NULL)
1818 return netmap_mem_get(&nm_mem);
1819
1820 grp_id = nm_iommu_group_id(na->pdev);
1821
1822 NM_MTX_LOCK(nm_mem_list_lock);
1823 nmd = netmap_last_mem_d;
1824 do {
1825 if (!(nmd->flags & NETMAP_MEM_HIDDEN) && nmd->nm_grp == grp_id) {
1826 nmd->refcount++;
1827 NM_DBG_REFC(nmd, __FUNCTION__, __LINE__);
1828 NM_MTX_UNLOCK(nm_mem_list_lock);
1829 return nmd;
1830 }
1831 nmd = nmd->next;
1832 } while (nmd != netmap_last_mem_d);
1833
1834 nmd = nm_os_malloc(sizeof(*nmd));
1835 if (nmd == NULL)
1836 goto error;
1837
1838 *nmd = nm_mem_blueprint;
1839
1840 err = nm_mem_assign_id_locked(nmd, grp_id);
1841 if (err)
1842 goto error_free;
1843
1844 snprintf(nmd->name, sizeof(nmd->name), "%d", nmd->nm_id);
1845
1846 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1847 snprintf(nmd->pools[i].name, NETMAP_POOL_MAX_NAMSZ, "%s-%s",
1848 nm_mem_blueprint.pools[i].name, nmd->name);
1849 }
1850
1851 NMA_LOCK_INIT(nmd);
1852
1853 NM_MTX_UNLOCK(nm_mem_list_lock);
1854 return nmd;
1855
1856 error_free:
1857 nm_os_free(nmd);
1858 error:
1859 NM_MTX_UNLOCK(nm_mem_list_lock);
1860 return NULL;
1861 }
1862
1863 /* call with lock held */
1864 static int
netmap_mem2_config(struct netmap_mem_d * nmd)1865 netmap_mem2_config(struct netmap_mem_d *nmd)
1866 {
1867 int i;
1868
1869 if (!netmap_mem_params_changed(nmd->params))
1870 goto out;
1871
1872 nm_prdis("reconfiguring");
1873
1874 if (nmd->flags & NETMAP_MEM_FINALIZED) {
1875 /* reset previous allocation */
1876 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1877 netmap_reset_obj_allocator(&nmd->pools[i]);
1878 }
1879 nmd->flags &= ~NETMAP_MEM_FINALIZED;
1880 }
1881
1882 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1883 nmd->lasterr = netmap_config_obj_allocator(&nmd->pools[i],
1884 nmd->params[i].num, nmd->params[i].size);
1885 if (nmd->lasterr)
1886 goto out;
1887 }
1888
1889 out:
1890
1891 return nmd->lasterr;
1892 }
1893
1894 static int
netmap_mem2_finalize(struct netmap_mem_d * nmd,struct netmap_adapter * na)1895 netmap_mem2_finalize(struct netmap_mem_d *nmd, struct netmap_adapter *na)
1896 {
1897 if (nmd->flags & NETMAP_MEM_FINALIZED)
1898 goto out;
1899
1900 if (netmap_mem_finalize_all(nmd))
1901 goto out;
1902
1903 nmd->lasterr = 0;
1904
1905 out:
1906 return nmd->lasterr;
1907 }
1908
1909 static void
netmap_mem2_delete(struct netmap_mem_d * nmd)1910 netmap_mem2_delete(struct netmap_mem_d *nmd)
1911 {
1912 int i;
1913
1914 for (i = 0; i < NETMAP_POOLS_NR; i++) {
1915 netmap_destroy_obj_allocator(&nmd->pools[i]);
1916 }
1917
1918 NMA_LOCK_DESTROY(nmd);
1919 if (nmd != &nm_mem)
1920 nm_os_free(nmd);
1921 }
1922
1923 #ifdef WITH_EXTMEM
1924 /* doubly linekd list of all existing external allocators */
1925 static struct netmap_mem_ext *netmap_mem_ext_list = NULL;
1926 NM_MTX_T nm_mem_ext_list_lock;
1927 #endif /* WITH_EXTMEM */
1928
1929 int
netmap_mem_init(void)1930 netmap_mem_init(void)
1931 {
1932 nm_mem_blueprint = nm_mem;
1933 NM_MTX_INIT(nm_mem_list_lock);
1934 NMA_LOCK_INIT(&nm_mem);
1935 netmap_mem_get(&nm_mem);
1936 #ifdef WITH_EXTMEM
1937 NM_MTX_INIT(nm_mem_ext_list_lock);
1938 #endif /* WITH_EXTMEM */
1939 return (0);
1940 }
1941
1942 void
netmap_mem_fini(void)1943 netmap_mem_fini(void)
1944 {
1945 netmap_mem_put(&nm_mem);
1946 }
1947
1948 static int
netmap_mem_ring_needed(struct netmap_kring * kring)1949 netmap_mem_ring_needed(struct netmap_kring *kring)
1950 {
1951 return kring->ring == NULL &&
1952 (kring->users > 0 ||
1953 (kring->nr_kflags & NKR_NEEDRING));
1954 }
1955
1956 static int
netmap_mem_ring_todelete(struct netmap_kring * kring)1957 netmap_mem_ring_todelete(struct netmap_kring *kring)
1958 {
1959 return kring->ring != NULL &&
1960 kring->users == 0 &&
1961 !(kring->nr_kflags & NKR_NEEDRING);
1962 }
1963
1964
1965 /* call with NMA_LOCK held *
1966 *
1967 * Allocate netmap rings and buffers for this card
1968 * The rings are contiguous, but have variable size.
1969 * The kring array must follow the layout described
1970 * in netmap_krings_create().
1971 */
1972 static int
netmap_mem2_rings_create(struct netmap_mem_d * nmd,struct netmap_adapter * na)1973 netmap_mem2_rings_create(struct netmap_mem_d *nmd, struct netmap_adapter *na)
1974 {
1975 enum txrx t;
1976
1977 for_rx_tx(t) {
1978 u_int i;
1979
1980 for (i = 0; i < netmap_all_rings(na, t); i++) {
1981 struct netmap_kring *kring = NMR(na, t)[i];
1982 struct netmap_ring *ring = kring->ring;
1983 u_int len, ndesc;
1984
1985 if (!netmap_mem_ring_needed(kring)) {
1986 /* unneeded, or already created by somebody else */
1987 if (netmap_debug & NM_DEBUG_MEM)
1988 nm_prinf("NOT creating ring %s (ring %p, users %d neekring %d)",
1989 kring->name, ring, kring->users, kring->nr_kflags & NKR_NEEDRING);
1990 continue;
1991 }
1992 if (netmap_debug & NM_DEBUG_MEM)
1993 nm_prinf("creating %s", kring->name);
1994 ndesc = kring->nkr_num_slots;
1995 len = sizeof(struct netmap_ring) +
1996 ndesc * sizeof(struct netmap_slot);
1997 ring = netmap_ring_malloc(nmd, len);
1998 if (ring == NULL) {
1999 nm_prerr("Cannot allocate %s_ring", nm_txrx2str(t));
2000 goto cleanup;
2001 }
2002 nm_prdis("txring at %p", ring);
2003 kring->ring = ring;
2004 *(uint32_t *)(uintptr_t)&ring->num_slots = ndesc;
2005 *(int64_t *)(uintptr_t)&ring->buf_ofs =
2006 (nmd->pools[NETMAP_IF_POOL].memtotal +
2007 nmd->pools[NETMAP_RING_POOL].memtotal) -
2008 netmap_ring_offset(nmd, ring);
2009
2010 /* copy values from kring */
2011 ring->head = kring->rhead;
2012 ring->cur = kring->rcur;
2013 ring->tail = kring->rtail;
2014 *(uint32_t *)(uintptr_t)&ring->nr_buf_size =
2015 netmap_mem_bufsize(nmd);
2016 nm_prdis("%s h %d c %d t %d", kring->name,
2017 ring->head, ring->cur, ring->tail);
2018 nm_prdis("initializing slots for %s_ring", nm_txrx2str(t));
2019 if (!(kring->nr_kflags & NKR_FAKERING)) {
2020 /* this is a real ring */
2021 if (netmap_debug & NM_DEBUG_MEM)
2022 nm_prinf("allocating buffers for %s", kring->name);
2023 if (netmap_new_bufs(nmd, ring->slot, ndesc)) {
2024 nm_prerr("Cannot allocate buffers for %s_ring", nm_txrx2str(t));
2025 goto cleanup;
2026 }
2027 } else {
2028 /* this is a fake ring, set all indices to 0 */
2029 if (netmap_debug & NM_DEBUG_MEM)
2030 nm_prinf("NOT allocating buffers for %s", kring->name);
2031 netmap_mem_set_ring(nmd, ring->slot, ndesc, 0);
2032 }
2033 /* ring info */
2034 *(uint16_t *)(uintptr_t)&ring->ringid = kring->ring_id;
2035 *(uint16_t *)(uintptr_t)&ring->dir = kring->tx;
2036 }
2037 }
2038
2039 return 0;
2040
2041 cleanup:
2042 /* we cannot actually cleanup here, since we don't own kring->users
2043 * and kring->nr_klags & NKR_NEEDRING. The caller must decrement
2044 * the first or zero-out the second, then call netmap_free_rings()
2045 * to do the cleanup
2046 */
2047
2048 return ENOMEM;
2049 }
2050
2051 static void
netmap_mem2_rings_delete(struct netmap_mem_d * nmd,struct netmap_adapter * na)2052 netmap_mem2_rings_delete(struct netmap_mem_d *nmd, struct netmap_adapter *na)
2053 {
2054 enum txrx t;
2055
2056 for_rx_tx(t) {
2057 u_int i;
2058 for (i = 0; i < netmap_all_rings(na, t); i++) {
2059 struct netmap_kring *kring = NMR(na, t)[i];
2060 struct netmap_ring *ring = kring->ring;
2061
2062 if (!netmap_mem_ring_todelete(kring)) {
2063 if (netmap_debug & NM_DEBUG_MEM)
2064 nm_prinf("NOT deleting ring %s (ring %p, users %d neekring %d)",
2065 kring->name, ring, kring->users, kring->nr_kflags & NKR_NEEDRING);
2066 continue;
2067 }
2068 if (netmap_debug & NM_DEBUG_MEM)
2069 nm_prinf("deleting ring %s", kring->name);
2070 if (!(kring->nr_kflags & NKR_FAKERING)) {
2071 nm_prdis("freeing bufs for %s", kring->name);
2072 netmap_free_bufs(nmd, ring->slot, kring->nkr_num_slots);
2073 } else {
2074 nm_prdis("NOT freeing bufs for %s", kring->name);
2075 }
2076 netmap_ring_free(nmd, ring);
2077 kring->ring = NULL;
2078 }
2079 }
2080 }
2081
2082 /* call with NMA_LOCK held */
2083 /*
2084 * Allocate the per-fd structure netmap_if.
2085 *
2086 * We assume that the configuration stored in na
2087 * (number of tx/rx rings and descs) does not change while
2088 * the interface is in netmap mode.
2089 */
2090 static struct netmap_if *
netmap_mem2_if_new(struct netmap_mem_d * nmd,struct netmap_adapter * na,struct netmap_priv_d * priv)2091 netmap_mem2_if_new(struct netmap_mem_d *nmd,
2092 struct netmap_adapter *na, struct netmap_priv_d *priv)
2093 {
2094 struct netmap_if *nifp;
2095 ssize_t base; /* handy for relative offsets between rings and nifp */
2096 u_int i, len, n[NR_TXRX], ntot;
2097 enum txrx t;
2098
2099 ntot = 0;
2100 for_rx_tx(t) {
2101 /* account for the (eventually fake) host rings */
2102 n[t] = netmap_all_rings(na, t);
2103 ntot += n[t];
2104 }
2105 /*
2106 * the descriptor is followed inline by an array of offsets
2107 * to the tx and rx rings in the shared memory region.
2108 */
2109
2110 len = sizeof(struct netmap_if) + (ntot * sizeof(ssize_t));
2111 nifp = netmap_if_malloc(nmd, len);
2112 if (nifp == NULL) {
2113 return NULL;
2114 }
2115
2116 /* initialize base fields -- override const */
2117 *(u_int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings;
2118 *(u_int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings;
2119 *(u_int *)(uintptr_t)&nifp->ni_host_tx_rings =
2120 (na->num_host_tx_rings ? na->num_host_tx_rings : 1);
2121 *(u_int *)(uintptr_t)&nifp->ni_host_rx_rings =
2122 (na->num_host_rx_rings ? na->num_host_rx_rings : 1);
2123 strlcpy(nifp->ni_name, na->name, sizeof(nifp->ni_name));
2124
2125 /*
2126 * fill the slots for the rx and tx rings. They contain the offset
2127 * between the ring and nifp, so the information is usable in
2128 * userspace to reach the ring from the nifp.
2129 */
2130 base = netmap_if_offset(nmd, nifp);
2131 for (i = 0; i < n[NR_TX]; i++) {
2132 /* XXX instead of ofs == 0 maybe use the offset of an error
2133 * ring, like we do for buffers? */
2134 ssize_t ofs = 0;
2135
2136 if (na->tx_rings[i]->ring != NULL && i >= priv->np_qfirst[NR_TX]
2137 && i < priv->np_qlast[NR_TX]) {
2138 ofs = netmap_ring_offset(nmd,
2139 na->tx_rings[i]->ring) - base;
2140 }
2141 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] = ofs;
2142 }
2143 for (i = 0; i < n[NR_RX]; i++) {
2144 /* XXX instead of ofs == 0 maybe use the offset of an error
2145 * ring, like we do for buffers? */
2146 ssize_t ofs = 0;
2147
2148 if (na->rx_rings[i]->ring != NULL && i >= priv->np_qfirst[NR_RX]
2149 && i < priv->np_qlast[NR_RX]) {
2150 ofs = netmap_ring_offset(nmd,
2151 na->rx_rings[i]->ring) - base;
2152 }
2153 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+n[NR_TX]] = ofs;
2154 }
2155
2156 return (nifp);
2157 }
2158
2159 static void
netmap_mem2_if_delete(struct netmap_mem_d * nmd,struct netmap_adapter * na,struct netmap_if * nifp)2160 netmap_mem2_if_delete(struct netmap_mem_d *nmd,
2161 struct netmap_adapter *na, struct netmap_if *nifp)
2162 {
2163 if (nifp == NULL)
2164 /* nothing to do */
2165 return;
2166 if (nifp->ni_bufs_head)
2167 netmap_extra_free(na, nifp->ni_bufs_head);
2168 netmap_if_free(nmd, nifp);
2169 }
2170
2171 static void
netmap_mem2_deref(struct netmap_mem_d * nmd,struct netmap_adapter * na)2172 netmap_mem2_deref(struct netmap_mem_d *nmd, struct netmap_adapter *na)
2173 {
2174
2175 if (netmap_debug & NM_DEBUG_MEM)
2176 nm_prinf("active = %d", nmd->active);
2177
2178 }
2179
2180 struct netmap_mem_ops netmap_mem_global_ops = {
2181 .nmd_get_lut = netmap_mem2_get_lut,
2182 .nmd_get_info = netmap_mem2_get_info,
2183 .nmd_ofstophys = netmap_mem2_ofstophys,
2184 .nmd_config = netmap_mem2_config,
2185 .nmd_finalize = netmap_mem2_finalize,
2186 .nmd_deref = netmap_mem2_deref,
2187 .nmd_delete = netmap_mem2_delete,
2188 .nmd_if_offset = netmap_mem2_if_offset,
2189 .nmd_if_new = netmap_mem2_if_new,
2190 .nmd_if_delete = netmap_mem2_if_delete,
2191 .nmd_rings_create = netmap_mem2_rings_create,
2192 .nmd_rings_delete = netmap_mem2_rings_delete
2193 };
2194
2195 int
netmap_mem_pools_info_get(struct nmreq_pools_info * req,struct netmap_mem_d * nmd)2196 netmap_mem_pools_info_get(struct nmreq_pools_info *req,
2197 struct netmap_mem_d *nmd)
2198 {
2199 int ret;
2200
2201 ret = netmap_mem_get_info(nmd, &req->nr_memsize, NULL,
2202 &req->nr_mem_id);
2203 if (ret) {
2204 return ret;
2205 }
2206
2207 NMA_LOCK(nmd);
2208 req->nr_if_pool_offset = 0;
2209 req->nr_if_pool_objtotal = nmd->pools[NETMAP_IF_POOL].objtotal;
2210 req->nr_if_pool_objsize = nmd->pools[NETMAP_IF_POOL]._objsize;
2211
2212 req->nr_ring_pool_offset = nmd->pools[NETMAP_IF_POOL].memtotal;
2213 req->nr_ring_pool_objtotal = nmd->pools[NETMAP_RING_POOL].objtotal;
2214 req->nr_ring_pool_objsize = nmd->pools[NETMAP_RING_POOL]._objsize;
2215
2216 req->nr_buf_pool_offset = nmd->pools[NETMAP_IF_POOL].memtotal +
2217 nmd->pools[NETMAP_RING_POOL].memtotal;
2218 req->nr_buf_pool_objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal;
2219 req->nr_buf_pool_objsize = nmd->pools[NETMAP_BUF_POOL]._objsize;
2220 NMA_UNLOCK(nmd);
2221
2222 return 0;
2223 }
2224
2225 #ifdef WITH_EXTMEM
2226 struct netmap_mem_ext {
2227 struct netmap_mem_d up;
2228
2229 struct nm_os_extmem *os;
2230 struct netmap_mem_ext *next, *prev;
2231 };
2232
2233 /* call with nm_mem_list_lock held */
2234 static void
netmap_mem_ext_register(struct netmap_mem_ext * e)2235 netmap_mem_ext_register(struct netmap_mem_ext *e)
2236 {
2237 NM_MTX_LOCK(nm_mem_ext_list_lock);
2238 if (netmap_mem_ext_list)
2239 netmap_mem_ext_list->prev = e;
2240 e->next = netmap_mem_ext_list;
2241 netmap_mem_ext_list = e;
2242 e->prev = NULL;
2243 NM_MTX_UNLOCK(nm_mem_ext_list_lock);
2244 }
2245
2246 /* call with nm_mem_list_lock held */
2247 static void
netmap_mem_ext_unregister(struct netmap_mem_ext * e)2248 netmap_mem_ext_unregister(struct netmap_mem_ext *e)
2249 {
2250 if (e->prev)
2251 e->prev->next = e->next;
2252 else
2253 netmap_mem_ext_list = e->next;
2254 if (e->next)
2255 e->next->prev = e->prev;
2256 e->prev = e->next = NULL;
2257 }
2258
2259 static struct netmap_mem_ext *
netmap_mem_ext_search(struct nm_os_extmem * os)2260 netmap_mem_ext_search(struct nm_os_extmem *os)
2261 {
2262 struct netmap_mem_ext *e;
2263
2264 NM_MTX_LOCK(nm_mem_ext_list_lock);
2265 for (e = netmap_mem_ext_list; e; e = e->next) {
2266 if (nm_os_extmem_isequal(e->os, os)) {
2267 netmap_mem_get(&e->up);
2268 break;
2269 }
2270 }
2271 NM_MTX_UNLOCK(nm_mem_ext_list_lock);
2272 return e;
2273 }
2274
2275
2276 static void
netmap_mem_ext_delete(struct netmap_mem_d * d)2277 netmap_mem_ext_delete(struct netmap_mem_d *d)
2278 {
2279 int i;
2280 struct netmap_mem_ext *e =
2281 (struct netmap_mem_ext *)d;
2282
2283 netmap_mem_ext_unregister(e);
2284
2285 for (i = 0; i < NETMAP_POOLS_NR; i++) {
2286 struct netmap_obj_pool *p = &d->pools[i];
2287
2288 if (p->lut) {
2289 nm_free_lut(p->lut, p->objtotal);
2290 p->lut = NULL;
2291 }
2292 }
2293 if (e->os)
2294 nm_os_extmem_delete(e->os);
2295 netmap_mem2_delete(d);
2296 }
2297
2298 static int
netmap_mem_ext_config(struct netmap_mem_d * nmd)2299 netmap_mem_ext_config(struct netmap_mem_d *nmd)
2300 {
2301 return 0;
2302 }
2303
2304 struct netmap_mem_ops netmap_mem_ext_ops = {
2305 .nmd_get_lut = netmap_mem2_get_lut,
2306 .nmd_get_info = netmap_mem2_get_info,
2307 .nmd_ofstophys = netmap_mem2_ofstophys,
2308 .nmd_config = netmap_mem_ext_config,
2309 .nmd_finalize = netmap_mem2_finalize,
2310 .nmd_deref = netmap_mem2_deref,
2311 .nmd_delete = netmap_mem_ext_delete,
2312 .nmd_if_offset = netmap_mem2_if_offset,
2313 .nmd_if_new = netmap_mem2_if_new,
2314 .nmd_if_delete = netmap_mem2_if_delete,
2315 .nmd_rings_create = netmap_mem2_rings_create,
2316 .nmd_rings_delete = netmap_mem2_rings_delete
2317 };
2318
2319 struct netmap_mem_d *
netmap_mem_ext_create(uint64_t usrptr,struct nmreq_pools_info * pi,int * perror)2320 netmap_mem_ext_create(uint64_t usrptr, struct nmreq_pools_info *pi, int *perror)
2321 {
2322 int error = 0;
2323 int i, j;
2324 struct netmap_mem_ext *nme;
2325 char *clust;
2326 size_t off;
2327 struct nm_os_extmem *os = NULL;
2328 int nr_pages;
2329
2330 // XXX sanity checks
2331 if (pi->nr_if_pool_objtotal == 0)
2332 pi->nr_if_pool_objtotal = netmap_min_priv_params[NETMAP_IF_POOL].num;
2333 if (pi->nr_if_pool_objsize == 0)
2334 pi->nr_if_pool_objsize = netmap_min_priv_params[NETMAP_IF_POOL].size;
2335 if (pi->nr_ring_pool_objtotal == 0)
2336 pi->nr_ring_pool_objtotal = netmap_min_priv_params[NETMAP_RING_POOL].num;
2337 if (pi->nr_ring_pool_objsize == 0)
2338 pi->nr_ring_pool_objsize = netmap_min_priv_params[NETMAP_RING_POOL].size;
2339 if (pi->nr_buf_pool_objtotal == 0)
2340 pi->nr_buf_pool_objtotal = netmap_min_priv_params[NETMAP_BUF_POOL].num;
2341 if (pi->nr_buf_pool_objsize == 0)
2342 pi->nr_buf_pool_objsize = netmap_min_priv_params[NETMAP_BUF_POOL].size;
2343 if (netmap_verbose & NM_DEBUG_MEM)
2344 nm_prinf("if %d %d ring %d %d buf %d %d",
2345 pi->nr_if_pool_objtotal, pi->nr_if_pool_objsize,
2346 pi->nr_ring_pool_objtotal, pi->nr_ring_pool_objsize,
2347 pi->nr_buf_pool_objtotal, pi->nr_buf_pool_objsize);
2348
2349 os = nm_os_extmem_create(usrptr, pi, &error);
2350 if (os == NULL) {
2351 nm_prerr("os extmem creation failed");
2352 goto out;
2353 }
2354
2355 nme = netmap_mem_ext_search(os);
2356 if (nme) {
2357 nm_os_extmem_delete(os);
2358 return &nme->up;
2359 }
2360 if (netmap_verbose & NM_DEBUG_MEM)
2361 nm_prinf("not found, creating new");
2362
2363 nme = _netmap_mem_private_new(sizeof(*nme),
2364
2365 (struct netmap_obj_params[]){
2366 { pi->nr_if_pool_objsize, pi->nr_if_pool_objtotal },
2367 { pi->nr_ring_pool_objsize, pi->nr_ring_pool_objtotal },
2368 { pi->nr_buf_pool_objsize, pi->nr_buf_pool_objtotal }},
2369 -1,
2370 &netmap_mem_ext_ops,
2371 pi->nr_memsize,
2372 &error);
2373 if (nme == NULL)
2374 goto out_unmap;
2375
2376 nr_pages = nm_os_extmem_nr_pages(os);
2377
2378 /* from now on pages will be released by nme destructor;
2379 * we let res = 0 to prevent release in out_unmap below
2380 */
2381 nme->os = os;
2382 os = NULL; /* pass ownership */
2383
2384 clust = nm_os_extmem_nextpage(nme->os);
2385 off = 0;
2386 for (i = 0; i < NETMAP_POOLS_NR; i++) {
2387 struct netmap_obj_pool *p = &nme->up.pools[i];
2388 struct netmap_obj_params *o = &nme->up.params[i];
2389
2390 p->_objsize = o->size;
2391 p->_clustsize = o->size;
2392 p->_clustentries = 1;
2393
2394 p->lut = nm_alloc_lut(o->num);
2395 if (p->lut == NULL) {
2396 error = ENOMEM;
2397 goto out_delete;
2398 }
2399
2400 p->bitmap_slots = (o->num + sizeof(uint32_t) - 1) / sizeof(uint32_t);
2401 p->invalid_bitmap = nm_os_malloc(sizeof(uint32_t) * p->bitmap_slots);
2402 if (p->invalid_bitmap == NULL) {
2403 error = ENOMEM;
2404 goto out_delete;
2405 }
2406
2407 if (nr_pages == 0) {
2408 p->objtotal = 0;
2409 p->memtotal = 0;
2410 p->objfree = 0;
2411 continue;
2412 }
2413
2414 for (j = 0; j < o->num && nr_pages > 0; j++) {
2415 size_t noff;
2416
2417 p->lut[j].vaddr = clust + off;
2418 #if !defined(linux) && !defined(_WIN32)
2419 p->lut[j].paddr = vtophys(p->lut[j].vaddr);
2420 #endif
2421 nm_prdis("%s %d at %p", p->name, j, p->lut[j].vaddr);
2422 noff = off + p->_objsize;
2423 if (noff < PAGE_SIZE) {
2424 off = noff;
2425 continue;
2426 }
2427 nm_prdis("too big, recomputing offset...");
2428 while (noff >= PAGE_SIZE) {
2429 char *old_clust = clust;
2430 noff -= PAGE_SIZE;
2431 clust = nm_os_extmem_nextpage(nme->os);
2432 nr_pages--;
2433 nm_prdis("noff %zu page %p nr_pages %d", noff,
2434 page_to_virt(*pages), nr_pages);
2435 if (noff > 0 && !nm_isset(p->invalid_bitmap, j) &&
2436 (nr_pages == 0 ||
2437 old_clust + PAGE_SIZE != clust))
2438 {
2439 /* out of space or non contiguous,
2440 * drop this object
2441 * */
2442 p->invalid_bitmap[ (j>>5) ] |= 1U << (j & 31U);
2443 nm_prdis("non contiguous at off %zu, drop", noff);
2444 }
2445 if (nr_pages == 0)
2446 break;
2447 }
2448 off = noff;
2449 }
2450 p->objtotal = j;
2451 p->numclusters = p->objtotal;
2452 p->memtotal = j * (size_t)p->_objsize;
2453 nm_prdis("%d memtotal %zu", j, p->memtotal);
2454 }
2455
2456 netmap_mem_ext_register(nme);
2457
2458 return &nme->up;
2459
2460 out_delete:
2461 netmap_mem_put(&nme->up);
2462 out_unmap:
2463 if (os)
2464 nm_os_extmem_delete(os);
2465 out:
2466 if (perror)
2467 *perror = error;
2468 return NULL;
2469
2470 }
2471 #endif /* WITH_EXTMEM */
2472
2473
2474 #ifdef WITH_PTNETMAP
2475 struct mem_pt_if {
2476 struct mem_pt_if *next;
2477 if_t ifp;
2478 unsigned int nifp_offset;
2479 };
2480
2481 /* Netmap allocator for ptnetmap guests. */
2482 struct netmap_mem_ptg {
2483 struct netmap_mem_d up;
2484
2485 vm_paddr_t nm_paddr; /* physical address in the guest */
2486 void *nm_addr; /* virtual address in the guest */
2487 struct netmap_lut buf_lut; /* lookup table for BUF pool in the guest */
2488 nm_memid_t host_mem_id; /* allocator identifier in the host */
2489 struct ptnetmap_memdev *ptn_dev;/* ptnetmap memdev */
2490 struct mem_pt_if *pt_ifs; /* list of interfaces in passthrough */
2491 };
2492
2493 /* Link a passthrough interface to a passthrough netmap allocator. */
2494 static int
netmap_mem_pt_guest_ifp_add(struct netmap_mem_d * nmd,if_t ifp,unsigned int nifp_offset)2495 netmap_mem_pt_guest_ifp_add(struct netmap_mem_d *nmd, if_t ifp,
2496 unsigned int nifp_offset)
2497 {
2498 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2499 struct mem_pt_if *ptif = nm_os_malloc(sizeof(*ptif));
2500
2501 if (!ptif) {
2502 return ENOMEM;
2503 }
2504
2505 NMA_LOCK(nmd);
2506
2507 ptif->ifp = ifp;
2508 ptif->nifp_offset = nifp_offset;
2509
2510 if (ptnmd->pt_ifs) {
2511 ptif->next = ptnmd->pt_ifs;
2512 }
2513 ptnmd->pt_ifs = ptif;
2514
2515 NMA_UNLOCK(nmd);
2516
2517 nm_prinf("ifp=%s,nifp_offset=%u",
2518 if_name(ptif->ifp), ptif->nifp_offset);
2519
2520 return 0;
2521 }
2522
2523 /* Called with NMA_LOCK(nmd) held. */
2524 static struct mem_pt_if *
netmap_mem_pt_guest_ifp_lookup(struct netmap_mem_d * nmd,if_t ifp)2525 netmap_mem_pt_guest_ifp_lookup(struct netmap_mem_d *nmd, if_t ifp)
2526 {
2527 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2528 struct mem_pt_if *curr;
2529
2530 for (curr = ptnmd->pt_ifs; curr; curr = curr->next) {
2531 if (curr->ifp == ifp) {
2532 return curr;
2533 }
2534 }
2535
2536 return NULL;
2537 }
2538
2539 /* Unlink a passthrough interface from a passthrough netmap allocator. */
2540 int
netmap_mem_pt_guest_ifp_del(struct netmap_mem_d * nmd,if_t ifp)2541 netmap_mem_pt_guest_ifp_del(struct netmap_mem_d *nmd, if_t ifp)
2542 {
2543 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2544 struct mem_pt_if *prev = NULL;
2545 struct mem_pt_if *curr;
2546 int ret = -1;
2547
2548 NMA_LOCK(nmd);
2549
2550 for (curr = ptnmd->pt_ifs; curr; curr = curr->next) {
2551 if (curr->ifp == ifp) {
2552 if (prev) {
2553 prev->next = curr->next;
2554 } else {
2555 ptnmd->pt_ifs = curr->next;
2556 }
2557 nm_prinf("removed (ifp=%s,nifp_offset=%u)",
2558 if_name(curr->ifp), curr->nifp_offset);
2559 nm_os_free(curr);
2560 ret = 0;
2561 break;
2562 }
2563 prev = curr;
2564 }
2565
2566 NMA_UNLOCK(nmd);
2567
2568 return ret;
2569 }
2570
2571 static int
netmap_mem_pt_guest_get_lut(struct netmap_mem_d * nmd,struct netmap_lut * lut)2572 netmap_mem_pt_guest_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut)
2573 {
2574 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2575
2576 if (!(nmd->flags & NETMAP_MEM_FINALIZED)) {
2577 return EINVAL;
2578 }
2579
2580 *lut = ptnmd->buf_lut;
2581 return 0;
2582 }
2583
2584 static int
netmap_mem_pt_guest_get_info(struct netmap_mem_d * nmd,uint64_t * size,u_int * memflags,uint16_t * id)2585 netmap_mem_pt_guest_get_info(struct netmap_mem_d *nmd, uint64_t *size,
2586 u_int *memflags, uint16_t *id)
2587 {
2588 int error = 0;
2589
2590 error = nmd->ops->nmd_config(nmd);
2591 if (error)
2592 goto out;
2593
2594 if (size)
2595 *size = nmd->nm_totalsize;
2596 if (memflags)
2597 *memflags = nmd->flags;
2598 if (id)
2599 *id = nmd->nm_id;
2600
2601 out:
2602
2603 return error;
2604 }
2605
2606 static vm_paddr_t
netmap_mem_pt_guest_ofstophys(struct netmap_mem_d * nmd,vm_ooffset_t off)2607 netmap_mem_pt_guest_ofstophys(struct netmap_mem_d *nmd, vm_ooffset_t off)
2608 {
2609 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2610 vm_paddr_t paddr;
2611 /* if the offset is valid, just return csb->base_addr + off */
2612 paddr = (vm_paddr_t)(ptnmd->nm_paddr + off);
2613 nm_prdis("off %lx padr %lx", off, (unsigned long)paddr);
2614 return paddr;
2615 }
2616
2617 static int
netmap_mem_pt_guest_config(struct netmap_mem_d * nmd)2618 netmap_mem_pt_guest_config(struct netmap_mem_d *nmd)
2619 {
2620 /* nothing to do, we are configured on creation
2621 * and configuration never changes thereafter
2622 */
2623 return 0;
2624 }
2625
2626 static int
netmap_mem_pt_guest_finalize(struct netmap_mem_d * nmd,struct netmap_adapter * na)2627 netmap_mem_pt_guest_finalize(struct netmap_mem_d *nmd, struct netmap_adapter *na)
2628 {
2629 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2630 uint64_t mem_size;
2631 uint32_t bufsize;
2632 uint32_t nbuffers;
2633 uint32_t poolofs;
2634 vm_paddr_t paddr;
2635 char *vaddr;
2636 int i;
2637 int error = 0;
2638
2639 if (nmd->flags & NETMAP_MEM_FINALIZED)
2640 goto out;
2641
2642 if (ptnmd->ptn_dev == NULL) {
2643 nm_prerr("ptnetmap memdev not attached");
2644 error = ENOMEM;
2645 goto out;
2646 }
2647 /* Map memory through ptnetmap-memdev BAR. */
2648 error = nm_os_pt_memdev_iomap(ptnmd->ptn_dev, &ptnmd->nm_paddr,
2649 &ptnmd->nm_addr, &mem_size);
2650 if (error)
2651 goto out;
2652
2653 /* Initialize the lut using the information contained in the
2654 * ptnetmap memory device. */
2655 bufsize = nm_os_pt_memdev_ioread(ptnmd->ptn_dev,
2656 PTNET_MDEV_IO_BUF_POOL_OBJSZ);
2657 nbuffers = nm_os_pt_memdev_ioread(ptnmd->ptn_dev,
2658 PTNET_MDEV_IO_BUF_POOL_OBJNUM);
2659
2660 /* allocate the lut */
2661 if (ptnmd->buf_lut.lut == NULL) {
2662 nm_prinf("allocating lut");
2663 ptnmd->buf_lut.lut = nm_alloc_lut(nbuffers);
2664 if (ptnmd->buf_lut.lut == NULL) {
2665 nm_prerr("lut allocation failed");
2666 return ENOMEM;
2667 }
2668 }
2669
2670 /* we have physically contiguous memory mapped through PCI BAR */
2671 poolofs = nm_os_pt_memdev_ioread(ptnmd->ptn_dev,
2672 PTNET_MDEV_IO_BUF_POOL_OFS);
2673 vaddr = (char *)(ptnmd->nm_addr) + poolofs;
2674 paddr = ptnmd->nm_paddr + poolofs;
2675
2676 for (i = 0; i < nbuffers; i++) {
2677 ptnmd->buf_lut.lut[i].vaddr = vaddr;
2678 vaddr += bufsize;
2679 paddr += bufsize;
2680 }
2681
2682 ptnmd->buf_lut.objtotal = nbuffers;
2683 ptnmd->buf_lut.objsize = bufsize;
2684 nmd->nm_totalsize = mem_size;
2685
2686 /* Initialize these fields as are needed by
2687 * netmap_mem_bufsize().
2688 * XXX please improve this, why do we need this
2689 * replication? maybe we nmd->pools[] should no be
2690 * there for the guest allocator? */
2691 nmd->pools[NETMAP_BUF_POOL]._objsize = bufsize;
2692 nmd->pools[NETMAP_BUF_POOL]._objtotal = nbuffers;
2693
2694 nmd->flags |= NETMAP_MEM_FINALIZED;
2695 out:
2696 return error;
2697 }
2698
2699 static void
netmap_mem_pt_guest_deref(struct netmap_mem_d * nmd,struct netmap_adapter * na)2700 netmap_mem_pt_guest_deref(struct netmap_mem_d *nmd, struct netmap_adapter *na)
2701 {
2702 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2703
2704 if (nmd->active == 1 &&
2705 (nmd->flags & NETMAP_MEM_FINALIZED)) {
2706 nmd->flags &= ~NETMAP_MEM_FINALIZED;
2707 /* unmap ptnetmap-memdev memory */
2708 if (ptnmd->ptn_dev) {
2709 nm_os_pt_memdev_iounmap(ptnmd->ptn_dev);
2710 }
2711 ptnmd->nm_addr = NULL;
2712 ptnmd->nm_paddr = 0;
2713 }
2714 }
2715
2716 static ssize_t
netmap_mem_pt_guest_if_offset(struct netmap_mem_d * nmd,const void * vaddr)2717 netmap_mem_pt_guest_if_offset(struct netmap_mem_d *nmd, const void *vaddr)
2718 {
2719 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2720
2721 return (const char *)(vaddr) - (char *)(ptnmd->nm_addr);
2722 }
2723
2724 static void
netmap_mem_pt_guest_delete(struct netmap_mem_d * nmd)2725 netmap_mem_pt_guest_delete(struct netmap_mem_d *nmd)
2726 {
2727 if (nmd == NULL)
2728 return;
2729 if (netmap_verbose)
2730 nm_prinf("deleting %p", nmd);
2731 if (nmd->active > 0)
2732 nm_prerr("bug: deleting mem allocator with active=%d!", nmd->active);
2733 if (netmap_verbose)
2734 nm_prinf("done deleting %p", nmd);
2735 NMA_LOCK_DESTROY(nmd);
2736 nm_os_free(nmd);
2737 }
2738
2739 static struct netmap_if *
netmap_mem_pt_guest_if_new(struct netmap_mem_d * nmd,struct netmap_adapter * na,struct netmap_priv_d * priv)2740 netmap_mem_pt_guest_if_new(struct netmap_mem_d *nmd,
2741 struct netmap_adapter *na, struct netmap_priv_d *priv)
2742 {
2743 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2744 struct mem_pt_if *ptif;
2745 struct netmap_if *nifp = NULL;
2746
2747 ptif = netmap_mem_pt_guest_ifp_lookup(nmd, na->ifp);
2748 if (ptif == NULL) {
2749 nm_prerr("interface %s is not in passthrough", na->name);
2750 goto out;
2751 }
2752
2753 nifp = (struct netmap_if *)((char *)(ptnmd->nm_addr) +
2754 ptif->nifp_offset);
2755 out:
2756 return nifp;
2757 }
2758
2759 static void
netmap_mem_pt_guest_if_delete(struct netmap_mem_d * nmd,struct netmap_adapter * na,struct netmap_if * nifp)2760 netmap_mem_pt_guest_if_delete(struct netmap_mem_d * nmd,
2761 struct netmap_adapter *na, struct netmap_if *nifp)
2762 {
2763 struct mem_pt_if *ptif;
2764
2765 ptif = netmap_mem_pt_guest_ifp_lookup(nmd, na->ifp);
2766 if (ptif == NULL) {
2767 nm_prerr("interface %s is not in passthrough", na->name);
2768 }
2769 }
2770
2771 static int
netmap_mem_pt_guest_rings_create(struct netmap_mem_d * nmd,struct netmap_adapter * na)2772 netmap_mem_pt_guest_rings_create(struct netmap_mem_d *nmd,
2773 struct netmap_adapter *na)
2774 {
2775 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd;
2776 struct mem_pt_if *ptif;
2777 struct netmap_if *nifp;
2778 int i, error = -1;
2779
2780 ptif = netmap_mem_pt_guest_ifp_lookup(nmd, na->ifp);
2781 if (ptif == NULL) {
2782 nm_prerr("interface %s is not in passthrough", na->name);
2783 goto out;
2784 }
2785
2786
2787 /* point each kring to the corresponding backend ring */
2788 nifp = (struct netmap_if *)((char *)ptnmd->nm_addr + ptif->nifp_offset);
2789 for (i = 0; i < netmap_all_rings(na, NR_TX); i++) {
2790 struct netmap_kring *kring = na->tx_rings[i];
2791 if (kring->ring)
2792 continue;
2793 kring->ring = (struct netmap_ring *)
2794 ((char *)nifp + nifp->ring_ofs[i]);
2795 }
2796 for (i = 0; i < netmap_all_rings(na, NR_RX); i++) {
2797 struct netmap_kring *kring = na->rx_rings[i];
2798 if (kring->ring)
2799 continue;
2800 kring->ring = (struct netmap_ring *)
2801 ((char *)nifp +
2802 nifp->ring_ofs[netmap_all_rings(na, NR_TX) + i]);
2803 }
2804
2805 error = 0;
2806 out:
2807 return error;
2808 }
2809
2810 static void
netmap_mem_pt_guest_rings_delete(struct netmap_mem_d * nmd,struct netmap_adapter * na)2811 netmap_mem_pt_guest_rings_delete(struct netmap_mem_d *nmd, struct netmap_adapter *na)
2812 {
2813 #if 0
2814 enum txrx t;
2815
2816 for_rx_tx(t) {
2817 u_int i;
2818 for (i = 0; i < nma_get_nrings(na, t) + 1; i++) {
2819 struct netmap_kring *kring = &NMR(na, t)[i];
2820
2821 kring->ring = NULL;
2822 }
2823 }
2824 #endif
2825 (void)nmd;
2826 (void)na;
2827 }
2828
2829 static struct netmap_mem_ops netmap_mem_pt_guest_ops = {
2830 .nmd_get_lut = netmap_mem_pt_guest_get_lut,
2831 .nmd_get_info = netmap_mem_pt_guest_get_info,
2832 .nmd_ofstophys = netmap_mem_pt_guest_ofstophys,
2833 .nmd_config = netmap_mem_pt_guest_config,
2834 .nmd_finalize = netmap_mem_pt_guest_finalize,
2835 .nmd_deref = netmap_mem_pt_guest_deref,
2836 .nmd_if_offset = netmap_mem_pt_guest_if_offset,
2837 .nmd_delete = netmap_mem_pt_guest_delete,
2838 .nmd_if_new = netmap_mem_pt_guest_if_new,
2839 .nmd_if_delete = netmap_mem_pt_guest_if_delete,
2840 .nmd_rings_create = netmap_mem_pt_guest_rings_create,
2841 .nmd_rings_delete = netmap_mem_pt_guest_rings_delete
2842 };
2843
2844 /* Called with nm_mem_list_lock held. */
2845 static struct netmap_mem_d *
netmap_mem_pt_guest_find_memid(nm_memid_t mem_id)2846 netmap_mem_pt_guest_find_memid(nm_memid_t mem_id)
2847 {
2848 struct netmap_mem_d *mem = NULL;
2849 struct netmap_mem_d *scan = netmap_last_mem_d;
2850
2851 do {
2852 /* find ptnetmap allocator through host ID */
2853 if (scan->ops->nmd_deref == netmap_mem_pt_guest_deref &&
2854 ((struct netmap_mem_ptg *)(scan))->host_mem_id == mem_id) {
2855 mem = scan;
2856 mem->refcount++;
2857 NM_DBG_REFC(mem, __FUNCTION__, __LINE__);
2858 break;
2859 }
2860 scan = scan->next;
2861 } while (scan != netmap_last_mem_d);
2862
2863 return mem;
2864 }
2865
2866 /* Called with nm_mem_list_lock held. */
2867 static struct netmap_mem_d *
netmap_mem_pt_guest_create(nm_memid_t mem_id)2868 netmap_mem_pt_guest_create(nm_memid_t mem_id)
2869 {
2870 struct netmap_mem_ptg *ptnmd;
2871 int err = 0;
2872
2873 ptnmd = nm_os_malloc(sizeof(struct netmap_mem_ptg));
2874 if (ptnmd == NULL) {
2875 err = ENOMEM;
2876 goto error;
2877 }
2878
2879 ptnmd->up.ops = &netmap_mem_pt_guest_ops;
2880 ptnmd->host_mem_id = mem_id;
2881 ptnmd->pt_ifs = NULL;
2882
2883 /* Assign new id in the guest (We have the lock) */
2884 err = nm_mem_assign_id_locked(&ptnmd->up, -1);
2885 if (err)
2886 goto error;
2887
2888 ptnmd->up.flags &= ~NETMAP_MEM_FINALIZED;
2889 ptnmd->up.flags |= NETMAP_MEM_IO;
2890
2891 NMA_LOCK_INIT(&ptnmd->up);
2892
2893 snprintf(ptnmd->up.name, NM_MEM_NAMESZ, "%d", ptnmd->up.nm_id);
2894
2895
2896 return &ptnmd->up;
2897 error:
2898 netmap_mem_pt_guest_delete(&ptnmd->up);
2899 return NULL;
2900 }
2901
2902 /*
2903 * find host id in guest allocators and create guest allocator
2904 * if it is not there
2905 */
2906 static struct netmap_mem_d *
netmap_mem_pt_guest_get(nm_memid_t mem_id)2907 netmap_mem_pt_guest_get(nm_memid_t mem_id)
2908 {
2909 struct netmap_mem_d *nmd;
2910
2911 NM_MTX_LOCK(nm_mem_list_lock);
2912 nmd = netmap_mem_pt_guest_find_memid(mem_id);
2913 if (nmd == NULL) {
2914 nmd = netmap_mem_pt_guest_create(mem_id);
2915 }
2916 NM_MTX_UNLOCK(nm_mem_list_lock);
2917
2918 return nmd;
2919 }
2920
2921 /*
2922 * The guest allocator can be created by ptnetmap_memdev (during the device
2923 * attach) or by ptnetmap device (ptnet), during the netmap_attach.
2924 *
2925 * The order is not important (we have different order in LINUX and FreeBSD).
2926 * The first one, creates the device, and the second one simply attaches it.
2927 */
2928
2929 /* Called when ptnetmap_memdev is attaching, to attach a new allocator in
2930 * the guest */
2931 struct netmap_mem_d *
netmap_mem_pt_guest_attach(struct ptnetmap_memdev * ptn_dev,nm_memid_t mem_id)2932 netmap_mem_pt_guest_attach(struct ptnetmap_memdev *ptn_dev, nm_memid_t mem_id)
2933 {
2934 struct netmap_mem_d *nmd;
2935 struct netmap_mem_ptg *ptnmd;
2936
2937 nmd = netmap_mem_pt_guest_get(mem_id);
2938
2939 /* assign this device to the guest allocator */
2940 if (nmd) {
2941 ptnmd = (struct netmap_mem_ptg *)nmd;
2942 ptnmd->ptn_dev = ptn_dev;
2943 }
2944
2945 return nmd;
2946 }
2947
2948 /* Called when ptnet device is attaching */
2949 struct netmap_mem_d *
netmap_mem_pt_guest_new(if_t ifp,unsigned int nifp_offset,unsigned int memid)2950 netmap_mem_pt_guest_new(if_t ifp,
2951 unsigned int nifp_offset,
2952 unsigned int memid)
2953 {
2954 struct netmap_mem_d *nmd;
2955
2956 if (ifp == NULL) {
2957 return NULL;
2958 }
2959
2960 nmd = netmap_mem_pt_guest_get((nm_memid_t)memid);
2961
2962 if (nmd) {
2963 netmap_mem_pt_guest_ifp_add(nmd, ifp, nifp_offset);
2964 }
2965
2966 return nmd;
2967 }
2968
2969 #endif /* WITH_PTNETMAP */
2970