xref: /freebsd/lib/libnetmap/nmport.c (revision 16038816)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (C) 2018 Universita` di Pisa
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  *   1. Redistributions of source code must retain the above copyright
12  *      notice, this list of conditions and the following disclaimer.
13  *   2. Redistributions in binary form must reproduce the above copyright
14  *      notice, this list of conditions and the following disclaimer in the
15  *      documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 
32 #include <sys/types.h>
33 #include <sys/stat.h>
34 #include <sys/ioctl.h>
35 #include <sys/mman.h>
36 #include <fcntl.h>
37 #include <inttypes.h>
38 #include <stdlib.h>
39 #include <stdio.h>
40 #include <stdarg.h>
41 #include <string.h>
42 #include <unistd.h>
43 #include <errno.h>
44 #include <net/netmap_user.h>
45 #define LIBNETMAP_NOTHREADSAFE
46 #include "libnetmap.h"
47 
48 struct nmport_cleanup_d {
49 	struct nmport_cleanup_d *next;
50 	void (*cleanup)(struct nmport_cleanup_d *, struct nmport_d *);
51 };
52 
53 static void
54 nmport_push_cleanup(struct nmport_d *d, struct nmport_cleanup_d *c)
55 {
56 	c->next = d->clist;
57 	d->clist = c;
58 }
59 
60 static void
61 nmport_pop_cleanup(struct nmport_d *d)
62 {
63 	struct nmport_cleanup_d *top;
64 
65 	top = d->clist;
66 	d->clist = d->clist->next;
67 	(*top->cleanup)(top, d);
68 	nmctx_free(d->ctx, top);
69 }
70 
71 void nmport_do_cleanup(struct nmport_d *d)
72 {
73 	while (d->clist != NULL) {
74 		nmport_pop_cleanup(d);
75 	}
76 }
77 
78 static struct nmport_d *
79 nmport_new_with_ctx(struct nmctx *ctx)
80 {
81 	struct nmport_d *d;
82 
83 	/* allocate a descriptor */
84 	d = nmctx_malloc(ctx, sizeof(*d));
85 	if (d == NULL) {
86 		nmctx_ferror(ctx, "cannot allocate nmport descriptor");
87 		goto out;
88 	}
89 	memset(d, 0, sizeof(*d));
90 
91 	nmreq_header_init(&d->hdr, NETMAP_REQ_REGISTER, &d->reg);
92 
93 	d->ctx = ctx;
94 	d->fd = -1;
95 
96 out:
97 	return d;
98 }
99 
100 struct nmport_d *
101 nmport_new(void)
102 {
103 	struct nmctx *ctx = nmctx_get();
104 	return nmport_new_with_ctx(ctx);
105 }
106 
107 
108 void
109 nmport_delete(struct nmport_d *d)
110 {
111 	nmctx_free(d->ctx, d);
112 }
113 
114 void
115 nmport_extmem_cleanup(struct nmport_cleanup_d *c, struct nmport_d *d)
116 {
117 	(void)c;
118 
119 	if (d->extmem == NULL)
120 		return;
121 
122 	nmreq_remove_option(&d->hdr, &d->extmem->nro_opt);
123 	nmctx_free(d->ctx, d->extmem);
124 	d->extmem = NULL;
125 }
126 
127 
128 int
129 nmport_extmem(struct nmport_d *d, void *base, size_t size)
130 {
131 	struct nmctx *ctx = d->ctx;
132 	struct nmport_cleanup_d *clnup = NULL;
133 
134 	if (d->register_done) {
135 		nmctx_ferror(ctx, "%s: cannot set extmem of an already registered port", d->hdr.nr_name);
136 		errno = EINVAL;
137 		return -1;
138 	}
139 
140 	if (d->extmem != NULL) {
141 		nmctx_ferror(ctx, "%s: extmem already in use", d->hdr.nr_name);
142 		errno = EINVAL;
143 		return -1;
144 	}
145 
146 	clnup = (struct nmport_cleanup_d *)nmctx_malloc(ctx, sizeof(*clnup));
147 	if (clnup == NULL) {
148 		nmctx_ferror(ctx, "failed to allocate cleanup descriptor");
149 		errno = ENOMEM;
150 		return -1;
151 	}
152 
153 	d->extmem = nmctx_malloc(ctx, sizeof(*d->extmem));
154 	if (d->extmem == NULL) {
155 		nmctx_ferror(ctx, "%s: cannot allocate extmem option", d->hdr.nr_name);
156 		nmctx_free(ctx, clnup);
157 		errno = ENOMEM;
158 		return -1;
159 	}
160 	memset(d->extmem, 0, sizeof(*d->extmem));
161 	d->extmem->nro_usrptr = (uintptr_t)base;
162 	d->extmem->nro_opt.nro_reqtype = NETMAP_REQ_OPT_EXTMEM;
163 	d->extmem->nro_info.nr_memsize = size;
164 	nmreq_push_option(&d->hdr, &d->extmem->nro_opt);
165 
166 	clnup->cleanup = nmport_extmem_cleanup;
167 	nmport_push_cleanup(d, clnup);
168 
169 	return 0;
170 }
171 
172 struct nmport_extmem_from_file_cleanup_d {
173 	struct nmport_cleanup_d up;
174 	void *p;
175 	size_t size;
176 };
177 
178 void nmport_extmem_from_file_cleanup(struct nmport_cleanup_d *c,
179 		struct nmport_d *d)
180 {
181 	(void)d;
182 	struct nmport_extmem_from_file_cleanup_d *cc =
183 		(struct nmport_extmem_from_file_cleanup_d *)c;
184 
185 	munmap(cc->p, cc->size);
186 }
187 
188 int
189 nmport_extmem_from_file(struct nmport_d *d, const char *fname)
190 {
191 	struct nmctx *ctx = d->ctx;
192 	int fd = -1;
193 	off_t mapsize;
194 	void *p;
195 	struct nmport_extmem_from_file_cleanup_d *clnup = NULL;
196 
197 	clnup = nmctx_malloc(ctx, sizeof(*clnup));
198 	if (clnup == NULL) {
199 		nmctx_ferror(ctx, "cannot allocate cleanup descriptor");
200 		errno = ENOMEM;
201 		goto fail;
202 	}
203 
204 	fd = open(fname, O_RDWR);
205 	if (fd < 0) {
206 		nmctx_ferror(ctx, "cannot open '%s': %s", fname, strerror(errno));
207 		goto fail;
208 	}
209 	mapsize = lseek(fd, 0, SEEK_END);
210 	if (mapsize < 0) {
211 		nmctx_ferror(ctx, "failed to obtain filesize of '%s': %s", fname, strerror(errno));
212 		goto fail;
213 	}
214 	p = mmap(0, mapsize, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
215 	if (p == MAP_FAILED) {
216 		nmctx_ferror(ctx, "cannot mmap '%s': %s", fname, strerror(errno));
217 		goto fail;
218 	}
219 	close(fd);
220 
221 	clnup->p = p;
222 	clnup->size = mapsize;
223 	clnup->up.cleanup = nmport_extmem_from_file_cleanup;
224 	nmport_push_cleanup(d, &clnup->up);
225 
226 	if (nmport_extmem(d, p, mapsize) < 0)
227 		goto fail;
228 
229 	return 0;
230 
231 fail:
232 	if (fd >= 0)
233 		close(fd);
234 	if (clnup != NULL) {
235 		if (clnup->p != MAP_FAILED)
236 			nmport_pop_cleanup(d);
237 		else
238 			nmctx_free(ctx, clnup);
239 	}
240 	return -1;
241 }
242 
243 struct nmreq_pools_info*
244 nmport_extmem_getinfo(struct nmport_d *d)
245 {
246 	if (d->extmem == NULL)
247 		return NULL;
248 	return &d->extmem->nro_info;
249 }
250 
251 struct nmport_offset_cleanup_d {
252 	struct nmport_cleanup_d up;
253 	struct nmreq_opt_offsets *opt;
254 };
255 
256 static void
257 nmport_offset_cleanup(struct nmport_cleanup_d *c,
258 		struct nmport_d *d)
259 {
260 	struct nmport_offset_cleanup_d *cc =
261 		(struct nmport_offset_cleanup_d *)c;
262 
263 	nmreq_remove_option(&d->hdr, &cc->opt->nro_opt);
264 	nmctx_free(d->ctx, cc->opt);
265 }
266 
267 int
268 nmport_offset(struct nmport_d *d, uint64_t initial,
269 		uint64_t maxoff, uint64_t bits, uint64_t mingap)
270 {
271 	struct nmctx *ctx = d->ctx;
272 	struct nmreq_opt_offsets *opt;
273 	struct nmport_offset_cleanup_d *clnup = NULL;
274 
275 	clnup = nmctx_malloc(ctx, sizeof(*clnup));
276 	if (clnup == NULL) {
277 		nmctx_ferror(ctx, "cannot allocate cleanup descriptor");
278 		errno = ENOMEM;
279 		return -1;
280 	}
281 
282 	opt = nmctx_malloc(ctx, sizeof(*opt));
283 	if (opt == NULL) {
284 		nmctx_ferror(ctx, "%s: cannot allocate offset option", d->hdr.nr_name);
285 		nmctx_free(ctx, clnup);
286 		errno = ENOMEM;
287 		return -1;
288 	}
289 	memset(opt, 0, sizeof(*opt));
290 	opt->nro_opt.nro_reqtype = NETMAP_REQ_OPT_OFFSETS;
291 	opt->nro_offset_bits = bits;
292 	opt->nro_initial_offset = initial;
293 	opt->nro_max_offset = maxoff;
294 	opt->nro_min_gap = mingap;
295 	nmreq_push_option(&d->hdr, &opt->nro_opt);
296 
297 	clnup->up.cleanup = nmport_offset_cleanup;
298 	clnup->opt = opt;
299 	nmport_push_cleanup(d, &clnup->up);
300 
301 	return 0;
302 }
303 
304 /* head of the list of options */
305 static struct nmreq_opt_parser *nmport_opt_parsers;
306 
307 #define NPOPT_PARSER(o)		nmport_opt_##o##_parser
308 #define NPOPT_DESC(o)		nmport_opt_##o##_desc
309 #define NPOPT_NRKEYS(o)		(NPOPT_DESC(o).nr_keys)
310 #define NPOPT_DECL(o, f)						\
311 static int NPOPT_PARSER(o)(struct nmreq_parse_ctx *);			\
312 static struct nmreq_opt_parser NPOPT_DESC(o) = {			\
313 	.prefix = #o,							\
314 	.parse = NPOPT_PARSER(o),					\
315 	.flags = (f),							\
316 	.default_key = -1,						\
317 	.nr_keys = 0,							\
318 	.next = NULL,							\
319 };									\
320 static void __attribute__((constructor))				\
321 nmport_opt_##o##_ctor(void)						\
322 {									\
323 	NPOPT_DESC(o).next = nmport_opt_parsers;			\
324 	nmport_opt_parsers = &NPOPT_DESC(o);				\
325 }
326 struct nmport_key_desc {
327 	struct nmreq_opt_parser *option;
328 	const char *key;
329 	unsigned int flags;
330 	int id;
331 };
332 static void
333 nmport_opt_key_ctor(struct nmport_key_desc *k)
334 {
335 	struct nmreq_opt_parser *o = k->option;
336 	struct nmreq_opt_key *ok;
337 
338 	k->id = o->nr_keys;
339 	ok = &o->keys[k->id];
340 	ok->key = k->key;
341 	ok->id = k->id;
342 	ok->flags = k->flags;
343 	o->nr_keys++;
344 	if (ok->flags & NMREQ_OPTK_DEFAULT)
345 		o->default_key = ok->id;
346 }
347 #define NPKEY_DESC(o, k)	nmport_opt_##o##_key_##k##_desc
348 #define NPKEY_ID(o, k)		(NPKEY_DESC(o, k).id)
349 #define NPKEY_DECL(o, k, f)						\
350 static struct nmport_key_desc NPKEY_DESC(o, k) = {			\
351 	.option = &NPOPT_DESC(o),					\
352 	.key = #k,							\
353 	.flags = (f),							\
354 	.id = -1,							\
355 };									\
356 static void __attribute__((constructor))				\
357 nmport_opt_##o##_key_##k##_ctor(void)					\
358 {									\
359 	nmport_opt_key_ctor(&NPKEY_DESC(o, k));				\
360 }
361 #define nmport_key(p, o, k)	((p)->keys[NPKEY_ID(o, k)])
362 #define nmport_defkey(p, o)	((p)->keys[NPOPT_DESC(o).default_key])
363 
364 NPOPT_DECL(share, 0)
365 	NPKEY_DECL(share, port, NMREQ_OPTK_DEFAULT|NMREQ_OPTK_MUSTSET)
366 NPOPT_DECL(extmem, 0)
367 	NPKEY_DECL(extmem, file, NMREQ_OPTK_DEFAULT|NMREQ_OPTK_MUSTSET)
368 	NPKEY_DECL(extmem, if_num, 0)
369 	NPKEY_DECL(extmem, if_size, 0)
370 	NPKEY_DECL(extmem, ring_num, 0)
371 	NPKEY_DECL(extmem, ring_size, 0)
372 	NPKEY_DECL(extmem, buf_num, 0)
373 	NPKEY_DECL(extmem, buf_size, 0)
374 NPOPT_DECL(conf, 0)
375 	NPKEY_DECL(conf, rings, 0)
376 	NPKEY_DECL(conf, host_rings, 0)
377 	NPKEY_DECL(conf, slots, 0)
378 	NPKEY_DECL(conf, tx_rings, 0)
379 	NPKEY_DECL(conf, rx_rings, 0)
380 	NPKEY_DECL(conf, host_tx_rings, 0)
381 	NPKEY_DECL(conf, host_rx_rings, 0)
382 	NPKEY_DECL(conf, tx_slots, 0)
383 	NPKEY_DECL(conf, rx_slots, 0)
384 NPOPT_DECL(offset, NMREQ_OPTF_DISABLED)
385 	NPKEY_DECL(offset, initial, NMREQ_OPTK_DEFAULT|NMREQ_OPTK_MUSTSET)
386 	NPKEY_DECL(offset, bits, 0)
387 
388 
389 static int
390 NPOPT_PARSER(share)(struct nmreq_parse_ctx *p)
391 {
392 	struct nmctx *ctx = p->ctx;
393 	struct nmport_d *d = p->token;
394 	int32_t mem_id;
395 	const char *v = nmport_defkey(p, share);
396 
397 	mem_id = nmreq_get_mem_id(&v, ctx);
398 	if (mem_id < 0)
399 		return -1;
400 	if (d->reg.nr_mem_id && d->reg.nr_mem_id != mem_id) {
401 		nmctx_ferror(ctx, "cannot set mem_id to %"PRId32", already set to %"PRIu16"",
402 				mem_id, d->reg.nr_mem_id);
403 		errno = EINVAL;
404 		return -1;
405 	}
406 	d->reg.nr_mem_id = mem_id;
407 	return 0;
408 }
409 
410 static int
411 NPOPT_PARSER(extmem)(struct nmreq_parse_ctx *p)
412 {
413 	struct nmport_d *d;
414 	struct nmreq_pools_info *pi;
415 	int i;
416 
417 	d = p->token;
418 
419 	if (nmport_extmem_from_file(d, nmport_key(p, extmem, file)) < 0)
420 		return -1;
421 
422 	pi = &d->extmem->nro_info;
423 
424 	for  (i = 0; i < NPOPT_NRKEYS(extmem); i++) {
425 		const char *k = p->keys[i];
426 		uint32_t v;
427 
428 		if (k == NULL)
429 			continue;
430 
431 		v = atoi(k);
432 		if (i == NPKEY_ID(extmem, if_num)) {
433 			pi->nr_if_pool_objtotal = v;
434 		} else if (i == NPKEY_ID(extmem, if_size)) {
435 			pi->nr_if_pool_objsize = v;
436 		} else if (i == NPKEY_ID(extmem, ring_num)) {
437 			pi->nr_ring_pool_objtotal = v;
438 		} else if (i == NPKEY_ID(extmem, ring_size)) {
439 			pi->nr_ring_pool_objsize = v;
440 		} else if (i == NPKEY_ID(extmem, buf_num)) {
441 			pi->nr_buf_pool_objtotal = v;
442 		} else if (i == NPKEY_ID(extmem, buf_size)) {
443 			pi->nr_buf_pool_objsize = v;
444 		}
445 	}
446 	return 0;
447 }
448 
449 static int
450 NPOPT_PARSER(conf)(struct nmreq_parse_ctx *p)
451 {
452 	struct nmport_d *d;
453 
454 	d = p->token;
455 
456 	if (nmport_key(p, conf, rings) != NULL) {
457 		uint16_t nr_rings = atoi(nmport_key(p, conf, rings));
458 		d->reg.nr_tx_rings = nr_rings;
459 		d->reg.nr_rx_rings = nr_rings;
460 	}
461 	if (nmport_key(p, conf, host_rings) != NULL) {
462 		uint16_t nr_rings = atoi(nmport_key(p, conf, host_rings));
463 		d->reg.nr_host_tx_rings = nr_rings;
464 		d->reg.nr_host_rx_rings = nr_rings;
465 	}
466 	if (nmport_key(p, conf, slots) != NULL) {
467 		uint32_t nr_slots = atoi(nmport_key(p, conf, slots));
468 		d->reg.nr_tx_slots = nr_slots;
469 		d->reg.nr_rx_slots = nr_slots;
470 	}
471 	if (nmport_key(p, conf, tx_rings) != NULL) {
472 		d->reg.nr_tx_rings = atoi(nmport_key(p, conf, tx_rings));
473 	}
474 	if (nmport_key(p, conf, rx_rings) != NULL) {
475 		d->reg.nr_rx_rings = atoi(nmport_key(p, conf, rx_rings));
476 	}
477 	if (nmport_key(p, conf, host_tx_rings) != NULL) {
478 		d->reg.nr_host_tx_rings = atoi(nmport_key(p, conf, host_tx_rings));
479 	}
480 	if (nmport_key(p, conf, host_rx_rings) != NULL) {
481 		d->reg.nr_host_rx_rings = atoi(nmport_key(p, conf, host_rx_rings));
482 	}
483 	if (nmport_key(p, conf, tx_slots) != NULL) {
484 		d->reg.nr_tx_slots = atoi(nmport_key(p, conf, tx_slots));
485 	}
486 	if (nmport_key(p, conf, rx_slots) != NULL) {
487 		d->reg.nr_rx_slots = atoi(nmport_key(p, conf, rx_slots));
488 	}
489 	return 0;
490 }
491 
492 static int
493 NPOPT_PARSER(offset)(struct nmreq_parse_ctx *p)
494 {
495 	struct nmport_d *d;
496 	uint64_t initial, bits;
497 
498 	d = p->token;
499 
500 	initial = atoi(nmport_key(p, offset, initial));
501 	bits = 0;
502 	if (nmport_key(p, offset, bits) != NULL)
503 		bits = atoi(nmport_key(p, offset, bits));
504 
505 	return nmport_offset(d, initial, initial, bits, 0);
506 }
507 
508 
509 void
510 nmport_disable_option(const char *opt)
511 {
512 	struct nmreq_opt_parser *p;
513 
514 	for (p = nmport_opt_parsers; p != NULL; p = p->next) {
515 		if (!strcmp(p->prefix, opt)) {
516 			p->flags |= NMREQ_OPTF_DISABLED;
517 		}
518 	}
519 }
520 
521 int
522 nmport_enable_option(const char *opt)
523 {
524 	struct nmreq_opt_parser *p;
525 
526 	for (p = nmport_opt_parsers; p != NULL; p = p->next) {
527 		if (!strcmp(p->prefix, opt)) {
528 			p->flags &= ~NMREQ_OPTF_DISABLED;
529 			return 0;
530 		}
531 	}
532 	errno = EOPNOTSUPP;
533 	return -1;
534 }
535 
536 
537 int
538 nmport_parse(struct nmport_d *d, const char *ifname)
539 {
540 	const char *scan = ifname;
541 
542 	if (nmreq_header_decode(&scan, &d->hdr, d->ctx) < 0) {
543 		goto err;
544 	}
545 
546 	/* parse the register request */
547 	if (nmreq_register_decode(&scan, &d->reg, d->ctx) < 0) {
548 		goto err;
549 	}
550 
551 	/* parse the options, if any */
552 	if (nmreq_options_decode(scan, nmport_opt_parsers, d, d->ctx) < 0) {
553 		goto err;
554 	}
555 	return 0;
556 
557 err:
558 	nmport_undo_parse(d);
559 	return -1;
560 }
561 
562 void
563 nmport_undo_parse(struct nmport_d *d)
564 {
565 	nmport_do_cleanup(d);
566 	memset(&d->reg, 0, sizeof(d->reg));
567 	memset(&d->hdr, 0, sizeof(d->hdr));
568 }
569 
570 struct nmport_d *
571 nmport_prepare(const char *ifname)
572 {
573 	struct nmport_d *d;
574 
575 	/* allocate a descriptor */
576 	d = nmport_new();
577 	if (d == NULL)
578 		goto err;
579 
580 	/* parse the header */
581 	if (nmport_parse(d, ifname) < 0)
582 		goto err;
583 
584 	return d;
585 
586 err:
587 	nmport_undo_prepare(d);
588 	return NULL;
589 }
590 
591 void
592 nmport_undo_prepare(struct nmport_d *d)
593 {
594 	if (d == NULL)
595 		return;
596 	nmport_undo_parse(d);
597 	nmport_delete(d);
598 }
599 
600 int
601 nmport_register(struct nmport_d *d)
602 {
603 	struct nmctx *ctx = d->ctx;
604 
605 	if (d->register_done) {
606 		errno = EINVAL;
607 		nmctx_ferror(ctx, "%s: already registered", d->hdr.nr_name);
608 		return -1;
609 	}
610 
611 	d->fd = open("/dev/netmap", O_RDWR);
612 	if (d->fd < 0) {
613 		nmctx_ferror(ctx, "/dev/netmap: %s", strerror(errno));
614 		goto err;
615 	}
616 
617 	if (ioctl(d->fd, NIOCCTRL, &d->hdr) < 0) {
618 		struct nmreq_option *o;
619 		int option_errors = 0;
620 
621 		nmreq_foreach_option(&d->hdr, o) {
622 			if (o->nro_status) {
623 				nmctx_ferror(ctx, "%s: option %s: %s",
624 						d->hdr.nr_name,
625 						nmreq_option_name(o->nro_reqtype),
626 						strerror(o->nro_status));
627 				option_errors++;
628 			}
629 
630 		}
631 		if (!option_errors)
632 			nmctx_ferror(ctx, "%s: %s", d->hdr.nr_name, strerror(errno));
633 		goto err;
634 	}
635 
636 	d->register_done = 1;
637 
638 	return 0;
639 
640 err:
641 	nmport_undo_register(d);
642 	return -1;
643 }
644 
645 void
646 nmport_undo_register(struct nmport_d *d)
647 {
648 	if (d->fd >= 0)
649 		close(d->fd);
650 	d->fd = -1;
651 	d->register_done = 0;
652 }
653 
654 /* lookup the mem_id in the mem-list: do a new mmap() if
655  * not found, reuse existing otherwise
656  */
657 int
658 nmport_mmap(struct nmport_d *d)
659 {
660 	struct nmctx *ctx = d->ctx;
661 	struct nmem_d *m = NULL;
662 	u_int num_tx, num_rx;
663 	unsigned int i;
664 
665 	if (d->mmap_done) {
666 		errno = EINVAL;
667 		nmctx_ferror(ctx, "%s: already mapped", d->hdr.nr_name);
668 		return -1;
669 	}
670 
671 	if (!d->register_done) {
672 		errno = EINVAL;
673 		nmctx_ferror(ctx, "cannot map unregistered port");
674 		return -1;
675 	}
676 
677 	nmctx_lock(ctx);
678 
679 	for (m = ctx->mem_descs; m != NULL; m = m->next)
680 		if (m->mem_id == d->reg.nr_mem_id)
681 			break;
682 
683 	if (m == NULL) {
684 		m = nmctx_malloc(ctx, sizeof(*m));
685 		if (m == NULL) {
686 			nmctx_ferror(ctx, "cannot allocate memory descriptor");
687 			goto err;
688 		}
689 		memset(m, 0, sizeof(*m));
690 		if (d->extmem != NULL) {
691 			m->mem = (void *)((uintptr_t)d->extmem->nro_usrptr);
692 			m->size = d->extmem->nro_info.nr_memsize;
693 			m->is_extmem = 1;
694 		} else {
695 			m->mem = mmap(NULL, d->reg.nr_memsize, PROT_READ|PROT_WRITE,
696 					MAP_SHARED, d->fd, 0);
697 			if (m->mem == MAP_FAILED) {
698 				nmctx_ferror(ctx, "mmap: %s", strerror(errno));
699 				goto err;
700 			}
701 			m->size = d->reg.nr_memsize;
702 		}
703 		m->mem_id = d->reg.nr_mem_id;
704 		m->next = ctx->mem_descs;
705 		if (ctx->mem_descs != NULL)
706 			ctx->mem_descs->prev = m;
707 		ctx->mem_descs = m;
708 	}
709 	m->refcount++;
710 
711 	nmctx_unlock(ctx);
712 
713 	d->mem = m;
714 
715 	d->nifp = NETMAP_IF(m->mem, d->reg.nr_offset);
716 
717 	num_tx = d->reg.nr_tx_rings + d->nifp->ni_host_tx_rings;
718 	for (i = 0; i < num_tx && !d->nifp->ring_ofs[i]; i++)
719 		;
720 	d->cur_tx_ring = d->first_tx_ring = i;
721 	for ( ; i < num_tx && d->nifp->ring_ofs[i]; i++)
722 		;
723 	d->last_tx_ring = i - 1;
724 
725 	num_rx = d->reg.nr_rx_rings + d->nifp->ni_host_rx_rings;
726 	for (i = 0; i < num_rx && !d->nifp->ring_ofs[i + num_tx]; i++)
727 		;
728 	d->cur_rx_ring = d->first_rx_ring = i;
729 	for ( ; i < num_rx && d->nifp->ring_ofs[i + num_tx]; i++)
730 		;
731 	d->last_rx_ring = i - 1;
732 
733 	d->mmap_done = 1;
734 
735 	return 0;
736 
737 err:
738 	nmctx_unlock(ctx);
739 	nmport_undo_mmap(d);
740 	return -1;
741 }
742 
743 void
744 nmport_undo_mmap(struct nmport_d *d)
745 {
746 	struct nmem_d *m;
747 	struct nmctx *ctx = d->ctx;
748 
749 	m = d->mem;
750 	if (m == NULL)
751 		return;
752 	nmctx_lock(ctx);
753 	m->refcount--;
754 	if (m->refcount <= 0) {
755 		if (!m->is_extmem && m->mem != MAP_FAILED)
756 			munmap(m->mem, m->size);
757 		/* extract from the list and free */
758 		if (m->next != NULL)
759 			m->next->prev = m->prev;
760 		if (m->prev != NULL)
761 			m->prev->next = m->next;
762 		else
763 			ctx->mem_descs = m->next;
764 		nmctx_free(ctx, m);
765 		d->mem = NULL;
766 	}
767 	nmctx_unlock(ctx);
768 	d->mmap_done = 0;
769 	d->mem = NULL;
770 	d->nifp = NULL;
771 	d->first_tx_ring = 0;
772 	d->last_tx_ring = 0;
773 	d->first_rx_ring = 0;
774 	d->last_rx_ring = 0;
775 	d->cur_tx_ring = 0;
776 	d->cur_rx_ring = 0;
777 }
778 
779 int
780 nmport_open_desc(struct nmport_d *d)
781 {
782 	if (nmport_register(d) < 0)
783 		goto err;
784 
785 	if (nmport_mmap(d) < 0)
786 		goto err;
787 
788 	return 0;
789 err:
790 	nmport_undo_open_desc(d);
791 	return -1;
792 }
793 
794 void
795 nmport_undo_open_desc(struct nmport_d *d)
796 {
797 	nmport_undo_mmap(d);
798 	nmport_undo_register(d);
799 }
800 
801 
802 struct nmport_d *
803 nmport_open(const char *ifname)
804 {
805 	struct nmport_d *d;
806 
807 	/* prepare the descriptor */
808 	d = nmport_prepare(ifname);
809 	if (d == NULL)
810 		goto err;
811 
812 	/* open netmap and register */
813 	if (nmport_open_desc(d) < 0)
814 		goto err;
815 
816 	return d;
817 
818 err:
819 	nmport_close(d);
820 	return NULL;
821 }
822 
823 void
824 nmport_close(struct nmport_d *d)
825 {
826 	if (d == NULL)
827 		return;
828 	nmport_undo_open_desc(d);
829 	nmport_undo_prepare(d);
830 }
831 
832 struct nmport_d *
833 nmport_clone(struct nmport_d *d)
834 {
835 	struct nmport_d *c;
836 	struct nmctx *ctx;
837 
838 	ctx = d->ctx;
839 
840 	if (d->extmem != NULL && !d->register_done) {
841 		errno = EINVAL;
842 		nmctx_ferror(ctx, "cannot clone unregistered port that is using extmem");
843 		return NULL;
844 	}
845 
846 	c = nmport_new_with_ctx(ctx);
847 	if (c == NULL)
848 		return NULL;
849 	/* copy the output of parse */
850 	c->hdr = d->hdr;
851 	/* redirect the pointer to the body */
852 	c->hdr.nr_body = (uintptr_t)&c->reg;
853 	/* options are not cloned */
854 	c->hdr.nr_options = 0;
855 	c->reg = d->reg; /* this also copies the mem_id */
856 	/* put the new port in an un-registered, unmapped state */
857 	c->fd = -1;
858 	c->nifp = NULL;
859 	c->register_done = 0;
860 	c->mem = NULL;
861 	c->extmem = NULL;
862 	c->mmap_done = 0;
863 	c->first_tx_ring = 0;
864 	c->last_tx_ring = 0;
865 	c->first_rx_ring = 0;
866 	c->last_rx_ring = 0;
867 	c->cur_tx_ring = 0;
868 	c->cur_rx_ring = 0;
869 
870 	return c;
871 }
872 
873 int
874 nmport_inject(struct nmport_d *d, const void *buf, size_t size)
875 {
876 	u_int c, n = d->last_tx_ring - d->first_tx_ring + 1,
877 		ri = d->cur_tx_ring;
878 
879 	for (c = 0; c < n ; c++, ri++) {
880 		/* compute current ring to use */
881 		struct netmap_ring *ring;
882 		uint32_t i, j, idx;
883 		size_t rem;
884 
885 		if (ri > d->last_tx_ring)
886 			ri = d->first_tx_ring;
887 		ring = NETMAP_TXRING(d->nifp, ri);
888 		rem = size;
889 		j = ring->cur;
890 		while (rem > ring->nr_buf_size && j != ring->tail) {
891 			rem -= ring->nr_buf_size;
892 			j = nm_ring_next(ring, j);
893 		}
894 		if (j == ring->tail && rem > 0)
895 			continue;
896 		i = ring->cur;
897 		while (i != j) {
898 			idx = ring->slot[i].buf_idx;
899 			ring->slot[i].len = ring->nr_buf_size;
900 			ring->slot[i].flags = NS_MOREFRAG;
901 			nm_pkt_copy(buf, NETMAP_BUF(ring, idx), ring->nr_buf_size);
902 			i = nm_ring_next(ring, i);
903 			buf = (char *)buf + ring->nr_buf_size;
904 		}
905 		idx = ring->slot[i].buf_idx;
906 		ring->slot[i].len = rem;
907 		ring->slot[i].flags = 0;
908 		nm_pkt_copy(buf, NETMAP_BUF(ring, idx), rem);
909 		ring->head = ring->cur = nm_ring_next(ring, i);
910 		d->cur_tx_ring = ri;
911 		return size;
912 	}
913 	return 0; /* fail */
914 }
915