xref: /linux/tools/bpf/bpftool/common.c (revision 478a535a)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3 
4 #ifndef _GNU_SOURCE
5 #define _GNU_SOURCE
6 #endif
7 #include <ctype.h>
8 #include <errno.h>
9 #include <fcntl.h>
10 #include <ftw.h>
11 #include <libgen.h>
12 #include <mntent.h>
13 #include <stdbool.h>
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <string.h>
17 #include <unistd.h>
18 #include <net/if.h>
19 #include <sys/mount.h>
20 #include <sys/resource.h>
21 #include <sys/stat.h>
22 #include <sys/vfs.h>
23 
24 #include <linux/filter.h>
25 #include <linux/limits.h>
26 #include <linux/magic.h>
27 #include <linux/unistd.h>
28 
29 #include <bpf/bpf.h>
30 #include <bpf/hashmap.h>
31 #include <bpf/libbpf.h> /* libbpf_num_possible_cpus */
32 #include <bpf/btf.h>
33 
34 #include "main.h"
35 
36 #ifndef BPF_FS_MAGIC
37 #define BPF_FS_MAGIC		0xcafe4a11
38 #endif
39 
p_err(const char * fmt,...)40 void p_err(const char *fmt, ...)
41 {
42 	va_list ap;
43 
44 	va_start(ap, fmt);
45 	if (json_output) {
46 		jsonw_start_object(json_wtr);
47 		jsonw_name(json_wtr, "error");
48 		jsonw_vprintf_enquote(json_wtr, fmt, ap);
49 		jsonw_end_object(json_wtr);
50 	} else {
51 		fprintf(stderr, "Error: ");
52 		vfprintf(stderr, fmt, ap);
53 		fprintf(stderr, "\n");
54 	}
55 	va_end(ap);
56 }
57 
p_info(const char * fmt,...)58 void p_info(const char *fmt, ...)
59 {
60 	va_list ap;
61 
62 	if (json_output)
63 		return;
64 
65 	va_start(ap, fmt);
66 	vfprintf(stderr, fmt, ap);
67 	fprintf(stderr, "\n");
68 	va_end(ap);
69 }
70 
is_bpffs(const char * path)71 static bool is_bpffs(const char *path)
72 {
73 	struct statfs st_fs;
74 
75 	if (statfs(path, &st_fs) < 0)
76 		return false;
77 
78 	return (unsigned long)st_fs.f_type == BPF_FS_MAGIC;
79 }
80 
81 /* Probe whether kernel switched from memlock-based (RLIMIT_MEMLOCK) to
82  * memcg-based memory accounting for BPF maps and programs. This was done in
83  * commit 97306be45fbe ("Merge branch 'switch to memcg-based memory
84  * accounting'"), in Linux 5.11.
85  *
86  * Libbpf also offers to probe for memcg-based accounting vs rlimit, but does
87  * so by checking for the availability of a given BPF helper and this has
88  * failed on some kernels with backports in the past, see commit 6b4384ff1088
89  * ("Revert "bpftool: Use libbpf 1.0 API mode instead of RLIMIT_MEMLOCK"").
90  * Instead, we can probe by lowering the process-based rlimit to 0, trying to
91  * load a BPF object, and resetting the rlimit. If the load succeeds then
92  * memcg-based accounting is supported.
93  *
94  * This would be too dangerous to do in the library, because multithreaded
95  * applications might attempt to load items while the rlimit is at 0. Given
96  * that bpftool is single-threaded, this is fine to do here.
97  */
known_to_need_rlimit(void)98 static bool known_to_need_rlimit(void)
99 {
100 	struct rlimit rlim_init, rlim_cur_zero = {};
101 	struct bpf_insn insns[] = {
102 		BPF_MOV64_IMM(BPF_REG_0, 0),
103 		BPF_EXIT_INSN(),
104 	};
105 	size_t insn_cnt = ARRAY_SIZE(insns);
106 	union bpf_attr attr;
107 	int prog_fd, err;
108 
109 	memset(&attr, 0, sizeof(attr));
110 	attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
111 	attr.insns = ptr_to_u64(insns);
112 	attr.insn_cnt = insn_cnt;
113 	attr.license = ptr_to_u64("GPL");
114 
115 	if (getrlimit(RLIMIT_MEMLOCK, &rlim_init))
116 		return false;
117 
118 	/* Drop the soft limit to zero. We maintain the hard limit to its
119 	 * current value, because lowering it would be a permanent operation
120 	 * for unprivileged users.
121 	 */
122 	rlim_cur_zero.rlim_max = rlim_init.rlim_max;
123 	if (setrlimit(RLIMIT_MEMLOCK, &rlim_cur_zero))
124 		return false;
125 
126 	/* Do not use bpf_prog_load() from libbpf here, because it calls
127 	 * bump_rlimit_memlock(), interfering with the current probe.
128 	 */
129 	prog_fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
130 	err = errno;
131 
132 	/* reset soft rlimit to its initial value */
133 	setrlimit(RLIMIT_MEMLOCK, &rlim_init);
134 
135 	if (prog_fd < 0)
136 		return err == EPERM;
137 
138 	close(prog_fd);
139 	return false;
140 }
141 
set_max_rlimit(void)142 void set_max_rlimit(void)
143 {
144 	struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
145 
146 	if (known_to_need_rlimit())
147 		setrlimit(RLIMIT_MEMLOCK, &rinf);
148 }
149 
150 static int
mnt_fs(const char * target,const char * type,char * buff,size_t bufflen)151 mnt_fs(const char *target, const char *type, char *buff, size_t bufflen)
152 {
153 	bool bind_done = false;
154 
155 	while (mount("", target, "none", MS_PRIVATE | MS_REC, NULL)) {
156 		if (errno != EINVAL || bind_done) {
157 			snprintf(buff, bufflen,
158 				 "mount --make-private %s failed: %s",
159 				 target, strerror(errno));
160 			return -1;
161 		}
162 
163 		if (mount(target, target, "none", MS_BIND, NULL)) {
164 			snprintf(buff, bufflen,
165 				 "mount --bind %s %s failed: %s",
166 				 target, target, strerror(errno));
167 			return -1;
168 		}
169 
170 		bind_done = true;
171 	}
172 
173 	if (mount(type, target, type, 0, "mode=0700")) {
174 		snprintf(buff, bufflen, "mount -t %s %s %s failed: %s",
175 			 type, type, target, strerror(errno));
176 		return -1;
177 	}
178 
179 	return 0;
180 }
181 
mount_tracefs(const char * target)182 int mount_tracefs(const char *target)
183 {
184 	char err_str[ERR_MAX_LEN];
185 	int err;
186 
187 	err = mnt_fs(target, "tracefs", err_str, ERR_MAX_LEN);
188 	if (err) {
189 		err_str[ERR_MAX_LEN - 1] = '\0';
190 		p_err("can't mount tracefs: %s", err_str);
191 	}
192 
193 	return err;
194 }
195 
open_obj_pinned(const char * path,bool quiet)196 int open_obj_pinned(const char *path, bool quiet)
197 {
198 	char *pname;
199 	int fd = -1;
200 
201 	pname = strdup(path);
202 	if (!pname) {
203 		if (!quiet)
204 			p_err("mem alloc failed");
205 		goto out_ret;
206 	}
207 
208 	fd = bpf_obj_get(pname);
209 	if (fd < 0) {
210 		if (!quiet)
211 			p_err("bpf obj get (%s): %s", pname,
212 			      errno == EACCES && !is_bpffs(dirname(pname)) ?
213 			    "directory not in bpf file system (bpffs)" :
214 			    strerror(errno));
215 		goto out_free;
216 	}
217 
218 out_free:
219 	free(pname);
220 out_ret:
221 	return fd;
222 }
223 
open_obj_pinned_any(const char * path,enum bpf_obj_type exp_type)224 int open_obj_pinned_any(const char *path, enum bpf_obj_type exp_type)
225 {
226 	enum bpf_obj_type type;
227 	int fd;
228 
229 	fd = open_obj_pinned(path, false);
230 	if (fd < 0)
231 		return -1;
232 
233 	type = get_fd_type(fd);
234 	if (type < 0) {
235 		close(fd);
236 		return type;
237 	}
238 	if (type != exp_type) {
239 		p_err("incorrect object type: %s", get_fd_type_name(type));
240 		close(fd);
241 		return -1;
242 	}
243 
244 	return fd;
245 }
246 
create_and_mount_bpffs_dir(const char * dir_name)247 int create_and_mount_bpffs_dir(const char *dir_name)
248 {
249 	char err_str[ERR_MAX_LEN];
250 	bool dir_exists;
251 	int err = 0;
252 
253 	if (is_bpffs(dir_name))
254 		return err;
255 
256 	dir_exists = access(dir_name, F_OK) == 0;
257 
258 	if (!dir_exists) {
259 		char *temp_name;
260 		char *parent_name;
261 
262 		temp_name = strdup(dir_name);
263 		if (!temp_name) {
264 			p_err("mem alloc failed");
265 			return -1;
266 		}
267 
268 		parent_name = dirname(temp_name);
269 
270 		if (is_bpffs(parent_name)) {
271 			/* nothing to do if already mounted */
272 			free(temp_name);
273 			return err;
274 		}
275 
276 		if (access(parent_name, F_OK) == -1) {
277 			p_err("can't create dir '%s' to pin BPF object: parent dir '%s' doesn't exist",
278 			      dir_name, parent_name);
279 			free(temp_name);
280 			return -1;
281 		}
282 
283 		free(temp_name);
284 	}
285 
286 	if (block_mount) {
287 		p_err("no BPF file system found, not mounting it due to --nomount option");
288 		return -1;
289 	}
290 
291 	if (!dir_exists) {
292 		err = mkdir(dir_name, S_IRWXU);
293 		if (err) {
294 			p_err("failed to create dir '%s': %s", dir_name, strerror(errno));
295 			return err;
296 		}
297 	}
298 
299 	err = mnt_fs(dir_name, "bpf", err_str, ERR_MAX_LEN);
300 	if (err) {
301 		err_str[ERR_MAX_LEN - 1] = '\0';
302 		p_err("can't mount BPF file system on given dir '%s': %s",
303 		      dir_name, err_str);
304 
305 		if (!dir_exists)
306 			rmdir(dir_name);
307 	}
308 
309 	return err;
310 }
311 
mount_bpffs_for_file(const char * file_name)312 int mount_bpffs_for_file(const char *file_name)
313 {
314 	char err_str[ERR_MAX_LEN];
315 	char *temp_name;
316 	char *dir;
317 	int err = 0;
318 
319 	if (access(file_name, F_OK) != -1) {
320 		p_err("can't pin BPF object: path '%s' already exists", file_name);
321 		return -1;
322 	}
323 
324 	temp_name = strdup(file_name);
325 	if (!temp_name) {
326 		p_err("mem alloc failed");
327 		return -1;
328 	}
329 
330 	dir = dirname(temp_name);
331 
332 	if (is_bpffs(dir))
333 		/* nothing to do if already mounted */
334 		goto out_free;
335 
336 	if (access(dir, F_OK) == -1) {
337 		p_err("can't pin BPF object: dir '%s' doesn't exist", dir);
338 		err = -1;
339 		goto out_free;
340 	}
341 
342 	if (block_mount) {
343 		p_err("no BPF file system found, not mounting it due to --nomount option");
344 		err = -1;
345 		goto out_free;
346 	}
347 
348 	err = mnt_fs(dir, "bpf", err_str, ERR_MAX_LEN);
349 	if (err) {
350 		err_str[ERR_MAX_LEN - 1] = '\0';
351 		p_err("can't mount BPF file system to pin the object '%s': %s",
352 		      file_name, err_str);
353 	}
354 
355 out_free:
356 	free(temp_name);
357 	return err;
358 }
359 
do_pin_fd(int fd,const char * name)360 int do_pin_fd(int fd, const char *name)
361 {
362 	int err;
363 
364 	err = mount_bpffs_for_file(name);
365 	if (err)
366 		return err;
367 
368 	err = bpf_obj_pin(fd, name);
369 	if (err)
370 		p_err("can't pin the object (%s): %s", name, strerror(errno));
371 
372 	return err;
373 }
374 
do_pin_any(int argc,char ** argv,int (* get_fd)(int *,char ***))375 int do_pin_any(int argc, char **argv, int (*get_fd)(int *, char ***))
376 {
377 	int err;
378 	int fd;
379 
380 	if (!REQ_ARGS(3))
381 		return -EINVAL;
382 
383 	fd = get_fd(&argc, &argv);
384 	if (fd < 0)
385 		return fd;
386 
387 	err = do_pin_fd(fd, *argv);
388 
389 	close(fd);
390 	return err;
391 }
392 
get_fd_type_name(enum bpf_obj_type type)393 const char *get_fd_type_name(enum bpf_obj_type type)
394 {
395 	static const char * const names[] = {
396 		[BPF_OBJ_UNKNOWN]	= "unknown",
397 		[BPF_OBJ_PROG]		= "prog",
398 		[BPF_OBJ_MAP]		= "map",
399 		[BPF_OBJ_LINK]		= "link",
400 	};
401 
402 	if (type < 0 || type >= ARRAY_SIZE(names) || !names[type])
403 		return names[BPF_OBJ_UNKNOWN];
404 
405 	return names[type];
406 }
407 
get_prog_full_name(const struct bpf_prog_info * prog_info,int prog_fd,char * name_buff,size_t buff_len)408 void get_prog_full_name(const struct bpf_prog_info *prog_info, int prog_fd,
409 			char *name_buff, size_t buff_len)
410 {
411 	const char *prog_name = prog_info->name;
412 	const struct btf_type *func_type;
413 	const struct bpf_func_info finfo = {};
414 	struct bpf_prog_info info = {};
415 	__u32 info_len = sizeof(info);
416 	struct btf *prog_btf = NULL;
417 
418 	if (buff_len <= BPF_OBJ_NAME_LEN ||
419 	    strlen(prog_info->name) < BPF_OBJ_NAME_LEN - 1)
420 		goto copy_name;
421 
422 	if (!prog_info->btf_id || prog_info->nr_func_info == 0)
423 		goto copy_name;
424 
425 	info.nr_func_info = 1;
426 	info.func_info_rec_size = prog_info->func_info_rec_size;
427 	if (info.func_info_rec_size > sizeof(finfo))
428 		info.func_info_rec_size = sizeof(finfo);
429 	info.func_info = ptr_to_u64(&finfo);
430 
431 	if (bpf_prog_get_info_by_fd(prog_fd, &info, &info_len))
432 		goto copy_name;
433 
434 	prog_btf = btf__load_from_kernel_by_id(info.btf_id);
435 	if (!prog_btf)
436 		goto copy_name;
437 
438 	func_type = btf__type_by_id(prog_btf, finfo.type_id);
439 	if (!func_type || !btf_is_func(func_type))
440 		goto copy_name;
441 
442 	prog_name = btf__name_by_offset(prog_btf, func_type->name_off);
443 
444 copy_name:
445 	snprintf(name_buff, buff_len, "%s", prog_name);
446 
447 	if (prog_btf)
448 		btf__free(prog_btf);
449 }
450 
get_fd_type(int fd)451 int get_fd_type(int fd)
452 {
453 	char path[PATH_MAX];
454 	char buf[512];
455 	ssize_t n;
456 
457 	snprintf(path, sizeof(path), "/proc/self/fd/%d", fd);
458 
459 	n = readlink(path, buf, sizeof(buf));
460 	if (n < 0) {
461 		p_err("can't read link type: %s", strerror(errno));
462 		return -1;
463 	}
464 	if (n == sizeof(path)) {
465 		p_err("can't read link type: path too long!");
466 		return -1;
467 	}
468 
469 	if (strstr(buf, "bpf-map"))
470 		return BPF_OBJ_MAP;
471 	else if (strstr(buf, "bpf-prog"))
472 		return BPF_OBJ_PROG;
473 	else if (strstr(buf, "bpf-link"))
474 		return BPF_OBJ_LINK;
475 
476 	return BPF_OBJ_UNKNOWN;
477 }
478 
get_fdinfo(int fd,const char * key)479 char *get_fdinfo(int fd, const char *key)
480 {
481 	char path[PATH_MAX];
482 	char *line = NULL;
483 	size_t line_n = 0;
484 	ssize_t n;
485 	FILE *fdi;
486 
487 	snprintf(path, sizeof(path), "/proc/self/fdinfo/%d", fd);
488 
489 	fdi = fopen(path, "r");
490 	if (!fdi)
491 		return NULL;
492 
493 	while ((n = getline(&line, &line_n, fdi)) > 0) {
494 		char *value;
495 		int len;
496 
497 		if (!strstr(line, key))
498 			continue;
499 
500 		fclose(fdi);
501 
502 		value = strchr(line, '\t');
503 		if (!value || !value[1]) {
504 			free(line);
505 			return NULL;
506 		}
507 		value++;
508 
509 		len = strlen(value);
510 		memmove(line, value, len);
511 		line[len - 1] = '\0';
512 
513 		return line;
514 	}
515 
516 	free(line);
517 	fclose(fdi);
518 	return NULL;
519 }
520 
print_data_json(uint8_t * data,size_t len)521 void print_data_json(uint8_t *data, size_t len)
522 {
523 	unsigned int i;
524 
525 	jsonw_start_array(json_wtr);
526 	for (i = 0; i < len; i++)
527 		jsonw_printf(json_wtr, "%d", data[i]);
528 	jsonw_end_array(json_wtr);
529 }
530 
print_hex_data_json(uint8_t * data,size_t len)531 void print_hex_data_json(uint8_t *data, size_t len)
532 {
533 	unsigned int i;
534 
535 	jsonw_start_array(json_wtr);
536 	for (i = 0; i < len; i++)
537 		jsonw_printf(json_wtr, "\"0x%02hhx\"", data[i]);
538 	jsonw_end_array(json_wtr);
539 }
540 
541 /* extra params for nftw cb */
542 static struct hashmap *build_fn_table;
543 static enum bpf_obj_type build_fn_type;
544 
do_build_table_cb(const char * fpath,const struct stat * sb,int typeflag,struct FTW * ftwbuf)545 static int do_build_table_cb(const char *fpath, const struct stat *sb,
546 			     int typeflag, struct FTW *ftwbuf)
547 {
548 	struct bpf_prog_info pinned_info;
549 	__u32 len = sizeof(pinned_info);
550 	enum bpf_obj_type objtype;
551 	int fd, err = 0;
552 	char *path;
553 
554 	if (typeflag != FTW_F)
555 		goto out_ret;
556 
557 	fd = open_obj_pinned(fpath, true);
558 	if (fd < 0)
559 		goto out_ret;
560 
561 	objtype = get_fd_type(fd);
562 	if (objtype != build_fn_type)
563 		goto out_close;
564 
565 	memset(&pinned_info, 0, sizeof(pinned_info));
566 	if (bpf_prog_get_info_by_fd(fd, &pinned_info, &len))
567 		goto out_close;
568 
569 	path = strdup(fpath);
570 	if (!path) {
571 		err = -1;
572 		goto out_close;
573 	}
574 
575 	err = hashmap__append(build_fn_table, pinned_info.id, path);
576 	if (err) {
577 		p_err("failed to append entry to hashmap for ID %u, path '%s': %s",
578 		      pinned_info.id, path, strerror(errno));
579 		free(path);
580 		goto out_close;
581 	}
582 
583 out_close:
584 	close(fd);
585 out_ret:
586 	return err;
587 }
588 
build_pinned_obj_table(struct hashmap * tab,enum bpf_obj_type type)589 int build_pinned_obj_table(struct hashmap *tab,
590 			   enum bpf_obj_type type)
591 {
592 	struct mntent *mntent = NULL;
593 	FILE *mntfile = NULL;
594 	int flags = FTW_PHYS;
595 	int nopenfd = 16;
596 	int err = 0;
597 
598 	mntfile = setmntent("/proc/mounts", "r");
599 	if (!mntfile)
600 		return -1;
601 
602 	build_fn_table = tab;
603 	build_fn_type = type;
604 
605 	while ((mntent = getmntent(mntfile))) {
606 		char *path = mntent->mnt_dir;
607 
608 		if (strncmp(mntent->mnt_type, "bpf", 3) != 0)
609 			continue;
610 		err = nftw(path, do_build_table_cb, nopenfd, flags);
611 		if (err)
612 			break;
613 	}
614 	fclose(mntfile);
615 	return err;
616 }
617 
delete_pinned_obj_table(struct hashmap * map)618 void delete_pinned_obj_table(struct hashmap *map)
619 {
620 	struct hashmap_entry *entry;
621 	size_t bkt;
622 
623 	if (!map)
624 		return;
625 
626 	hashmap__for_each_entry(map, entry, bkt)
627 		free(entry->pvalue);
628 
629 	hashmap__free(map);
630 }
631 
get_page_size(void)632 unsigned int get_page_size(void)
633 {
634 	static int result;
635 
636 	if (!result)
637 		result = getpagesize();
638 	return result;
639 }
640 
get_possible_cpus(void)641 unsigned int get_possible_cpus(void)
642 {
643 	int cpus = libbpf_num_possible_cpus();
644 
645 	if (cpus < 0) {
646 		p_err("Can't get # of possible cpus: %s", strerror(-cpus));
647 		exit(-1);
648 	}
649 	return cpus;
650 }
651 
652 static char *
ifindex_to_name_ns(__u32 ifindex,__u32 ns_dev,__u32 ns_ino,char * buf)653 ifindex_to_name_ns(__u32 ifindex, __u32 ns_dev, __u32 ns_ino, char *buf)
654 {
655 	struct stat st;
656 	int err;
657 
658 	err = stat("/proc/self/ns/net", &st);
659 	if (err) {
660 		p_err("Can't stat /proc/self: %s", strerror(errno));
661 		return NULL;
662 	}
663 
664 	if (st.st_dev != ns_dev || st.st_ino != ns_ino)
665 		return NULL;
666 
667 	return if_indextoname(ifindex, buf);
668 }
669 
read_sysfs_hex_int(char * path)670 static int read_sysfs_hex_int(char *path)
671 {
672 	char vendor_id_buf[8];
673 	int len;
674 	int fd;
675 
676 	fd = open(path, O_RDONLY);
677 	if (fd < 0) {
678 		p_err("Can't open %s: %s", path, strerror(errno));
679 		return -1;
680 	}
681 
682 	len = read(fd, vendor_id_buf, sizeof(vendor_id_buf));
683 	close(fd);
684 	if (len < 0) {
685 		p_err("Can't read %s: %s", path, strerror(errno));
686 		return -1;
687 	}
688 	if (len >= (int)sizeof(vendor_id_buf)) {
689 		p_err("Value in %s too long", path);
690 		return -1;
691 	}
692 
693 	vendor_id_buf[len] = 0;
694 
695 	return strtol(vendor_id_buf, NULL, 0);
696 }
697 
read_sysfs_netdev_hex_int(char * devname,const char * entry_name)698 static int read_sysfs_netdev_hex_int(char *devname, const char *entry_name)
699 {
700 	char full_path[64];
701 
702 	snprintf(full_path, sizeof(full_path), "/sys/class/net/%s/device/%s",
703 		 devname, entry_name);
704 
705 	return read_sysfs_hex_int(full_path);
706 }
707 
708 const char *
ifindex_to_arch(__u32 ifindex,__u64 ns_dev,__u64 ns_ino,const char ** opt)709 ifindex_to_arch(__u32 ifindex, __u64 ns_dev, __u64 ns_ino, const char **opt)
710 {
711 	__maybe_unused int device_id;
712 	char devname[IF_NAMESIZE];
713 	int vendor_id;
714 
715 	if (!ifindex_to_name_ns(ifindex, ns_dev, ns_ino, devname)) {
716 		p_err("Can't get net device name for ifindex %d: %s", ifindex,
717 		      strerror(errno));
718 		return NULL;
719 	}
720 
721 	vendor_id = read_sysfs_netdev_hex_int(devname, "vendor");
722 	if (vendor_id < 0) {
723 		p_err("Can't get device vendor id for %s", devname);
724 		return NULL;
725 	}
726 
727 	switch (vendor_id) {
728 #ifdef HAVE_LIBBFD_SUPPORT
729 	case 0x19ee:
730 		device_id = read_sysfs_netdev_hex_int(devname, "device");
731 		if (device_id != 0x4000 &&
732 		    device_id != 0x6000 &&
733 		    device_id != 0x6003)
734 			p_info("Unknown NFP device ID, assuming it is NFP-6xxx arch");
735 		*opt = "ctx4";
736 		return "NFP-6xxx";
737 #endif /* HAVE_LIBBFD_SUPPORT */
738 	/* No NFP support in LLVM, we have no valid triple to return. */
739 	default:
740 		p_err("Can't get arch name for device vendor id 0x%04x",
741 		      vendor_id);
742 		return NULL;
743 	}
744 }
745 
print_dev_plain(__u32 ifindex,__u64 ns_dev,__u64 ns_inode)746 void print_dev_plain(__u32 ifindex, __u64 ns_dev, __u64 ns_inode)
747 {
748 	char name[IF_NAMESIZE];
749 
750 	if (!ifindex)
751 		return;
752 
753 	printf("  offloaded_to ");
754 	if (ifindex_to_name_ns(ifindex, ns_dev, ns_inode, name))
755 		printf("%s", name);
756 	else
757 		printf("ifindex %u ns_dev %llu ns_ino %llu",
758 		       ifindex, ns_dev, ns_inode);
759 }
760 
print_dev_json(__u32 ifindex,__u64 ns_dev,__u64 ns_inode)761 void print_dev_json(__u32 ifindex, __u64 ns_dev, __u64 ns_inode)
762 {
763 	char name[IF_NAMESIZE];
764 
765 	if (!ifindex)
766 		return;
767 
768 	jsonw_name(json_wtr, "dev");
769 	jsonw_start_object(json_wtr);
770 	jsonw_uint_field(json_wtr, "ifindex", ifindex);
771 	jsonw_uint_field(json_wtr, "ns_dev", ns_dev);
772 	jsonw_uint_field(json_wtr, "ns_inode", ns_inode);
773 	if (ifindex_to_name_ns(ifindex, ns_dev, ns_inode, name))
774 		jsonw_string_field(json_wtr, "ifname", name);
775 	jsonw_end_object(json_wtr);
776 }
777 
parse_u32_arg(int * argc,char *** argv,__u32 * val,const char * what)778 int parse_u32_arg(int *argc, char ***argv, __u32 *val, const char *what)
779 {
780 	char *endptr;
781 
782 	NEXT_ARGP();
783 
784 	if (*val) {
785 		p_err("%s already specified", what);
786 		return -1;
787 	}
788 
789 	*val = strtoul(**argv, &endptr, 0);
790 	if (*endptr) {
791 		p_err("can't parse %s as %s", **argv, what);
792 		return -1;
793 	}
794 	NEXT_ARGP();
795 
796 	return 0;
797 }
798 
799 int __printf(2, 0)
print_all_levels(__maybe_unused enum libbpf_print_level level,const char * format,va_list args)800 print_all_levels(__maybe_unused enum libbpf_print_level level,
801 		 const char *format, va_list args)
802 {
803 	return vfprintf(stderr, format, args);
804 }
805 
prog_fd_by_nametag(void * nametag,int ** fds,bool tag)806 static int prog_fd_by_nametag(void *nametag, int **fds, bool tag)
807 {
808 	char prog_name[MAX_PROG_FULL_NAME];
809 	unsigned int id = 0;
810 	int fd, nb_fds = 0;
811 	void *tmp;
812 	int err;
813 
814 	while (true) {
815 		struct bpf_prog_info info = {};
816 		__u32 len = sizeof(info);
817 
818 		err = bpf_prog_get_next_id(id, &id);
819 		if (err) {
820 			if (errno != ENOENT) {
821 				p_err("%s", strerror(errno));
822 				goto err_close_fds;
823 			}
824 			return nb_fds;
825 		}
826 
827 		fd = bpf_prog_get_fd_by_id(id);
828 		if (fd < 0) {
829 			p_err("can't get prog by id (%u): %s",
830 			      id, strerror(errno));
831 			goto err_close_fds;
832 		}
833 
834 		err = bpf_prog_get_info_by_fd(fd, &info, &len);
835 		if (err) {
836 			p_err("can't get prog info (%u): %s",
837 			      id, strerror(errno));
838 			goto err_close_fd;
839 		}
840 
841 		if (tag && memcmp(nametag, info.tag, BPF_TAG_SIZE)) {
842 			close(fd);
843 			continue;
844 		}
845 
846 		if (!tag) {
847 			get_prog_full_name(&info, fd, prog_name,
848 					   sizeof(prog_name));
849 			if (strncmp(nametag, prog_name, sizeof(prog_name))) {
850 				close(fd);
851 				continue;
852 			}
853 		}
854 
855 		if (nb_fds > 0) {
856 			tmp = realloc(*fds, (nb_fds + 1) * sizeof(int));
857 			if (!tmp) {
858 				p_err("failed to realloc");
859 				goto err_close_fd;
860 			}
861 			*fds = tmp;
862 		}
863 		(*fds)[nb_fds++] = fd;
864 	}
865 
866 err_close_fd:
867 	close(fd);
868 err_close_fds:
869 	while (--nb_fds >= 0)
870 		close((*fds)[nb_fds]);
871 	return -1;
872 }
873 
prog_parse_fds(int * argc,char *** argv,int ** fds)874 int prog_parse_fds(int *argc, char ***argv, int **fds)
875 {
876 	if (is_prefix(**argv, "id")) {
877 		unsigned int id;
878 		char *endptr;
879 
880 		NEXT_ARGP();
881 
882 		id = strtoul(**argv, &endptr, 0);
883 		if (*endptr) {
884 			p_err("can't parse %s as ID", **argv);
885 			return -1;
886 		}
887 		NEXT_ARGP();
888 
889 		(*fds)[0] = bpf_prog_get_fd_by_id(id);
890 		if ((*fds)[0] < 0) {
891 			p_err("get by id (%u): %s", id, strerror(errno));
892 			return -1;
893 		}
894 		return 1;
895 	} else if (is_prefix(**argv, "tag")) {
896 		unsigned char tag[BPF_TAG_SIZE];
897 
898 		NEXT_ARGP();
899 
900 		if (sscanf(**argv, BPF_TAG_FMT, tag, tag + 1, tag + 2,
901 			   tag + 3, tag + 4, tag + 5, tag + 6, tag + 7)
902 		    != BPF_TAG_SIZE) {
903 			p_err("can't parse tag");
904 			return -1;
905 		}
906 		NEXT_ARGP();
907 
908 		return prog_fd_by_nametag(tag, fds, true);
909 	} else if (is_prefix(**argv, "name")) {
910 		char *name;
911 
912 		NEXT_ARGP();
913 
914 		name = **argv;
915 		if (strlen(name) > MAX_PROG_FULL_NAME - 1) {
916 			p_err("can't parse name");
917 			return -1;
918 		}
919 		NEXT_ARGP();
920 
921 		return prog_fd_by_nametag(name, fds, false);
922 	} else if (is_prefix(**argv, "pinned")) {
923 		char *path;
924 
925 		NEXT_ARGP();
926 
927 		path = **argv;
928 		NEXT_ARGP();
929 
930 		(*fds)[0] = open_obj_pinned_any(path, BPF_OBJ_PROG);
931 		if ((*fds)[0] < 0)
932 			return -1;
933 		return 1;
934 	}
935 
936 	p_err("expected 'id', 'tag', 'name' or 'pinned', got: '%s'?", **argv);
937 	return -1;
938 }
939 
prog_parse_fd(int * argc,char *** argv)940 int prog_parse_fd(int *argc, char ***argv)
941 {
942 	int *fds = NULL;
943 	int nb_fds, fd;
944 
945 	fds = malloc(sizeof(int));
946 	if (!fds) {
947 		p_err("mem alloc failed");
948 		return -1;
949 	}
950 	nb_fds = prog_parse_fds(argc, argv, &fds);
951 	if (nb_fds != 1) {
952 		if (nb_fds > 1) {
953 			p_err("several programs match this handle");
954 			while (nb_fds--)
955 				close(fds[nb_fds]);
956 		}
957 		fd = -1;
958 		goto exit_free;
959 	}
960 
961 	fd = fds[0];
962 exit_free:
963 	free(fds);
964 	return fd;
965 }
966 
map_fd_by_name(char * name,int ** fds)967 static int map_fd_by_name(char *name, int **fds)
968 {
969 	unsigned int id = 0;
970 	int fd, nb_fds = 0;
971 	void *tmp;
972 	int err;
973 
974 	while (true) {
975 		struct bpf_map_info info = {};
976 		__u32 len = sizeof(info);
977 
978 		err = bpf_map_get_next_id(id, &id);
979 		if (err) {
980 			if (errno != ENOENT) {
981 				p_err("%s", strerror(errno));
982 				goto err_close_fds;
983 			}
984 			return nb_fds;
985 		}
986 
987 		fd = bpf_map_get_fd_by_id(id);
988 		if (fd < 0) {
989 			p_err("can't get map by id (%u): %s",
990 			      id, strerror(errno));
991 			goto err_close_fds;
992 		}
993 
994 		err = bpf_map_get_info_by_fd(fd, &info, &len);
995 		if (err) {
996 			p_err("can't get map info (%u): %s",
997 			      id, strerror(errno));
998 			goto err_close_fd;
999 		}
1000 
1001 		if (strncmp(name, info.name, BPF_OBJ_NAME_LEN)) {
1002 			close(fd);
1003 			continue;
1004 		}
1005 
1006 		if (nb_fds > 0) {
1007 			tmp = realloc(*fds, (nb_fds + 1) * sizeof(int));
1008 			if (!tmp) {
1009 				p_err("failed to realloc");
1010 				goto err_close_fd;
1011 			}
1012 			*fds = tmp;
1013 		}
1014 		(*fds)[nb_fds++] = fd;
1015 	}
1016 
1017 err_close_fd:
1018 	close(fd);
1019 err_close_fds:
1020 	while (--nb_fds >= 0)
1021 		close((*fds)[nb_fds]);
1022 	return -1;
1023 }
1024 
map_parse_fds(int * argc,char *** argv,int ** fds)1025 int map_parse_fds(int *argc, char ***argv, int **fds)
1026 {
1027 	if (is_prefix(**argv, "id")) {
1028 		unsigned int id;
1029 		char *endptr;
1030 
1031 		NEXT_ARGP();
1032 
1033 		id = strtoul(**argv, &endptr, 0);
1034 		if (*endptr) {
1035 			p_err("can't parse %s as ID", **argv);
1036 			return -1;
1037 		}
1038 		NEXT_ARGP();
1039 
1040 		(*fds)[0] = bpf_map_get_fd_by_id(id);
1041 		if ((*fds)[0] < 0) {
1042 			p_err("get map by id (%u): %s", id, strerror(errno));
1043 			return -1;
1044 		}
1045 		return 1;
1046 	} else if (is_prefix(**argv, "name")) {
1047 		char *name;
1048 
1049 		NEXT_ARGP();
1050 
1051 		name = **argv;
1052 		if (strlen(name) > BPF_OBJ_NAME_LEN - 1) {
1053 			p_err("can't parse name");
1054 			return -1;
1055 		}
1056 		NEXT_ARGP();
1057 
1058 		return map_fd_by_name(name, fds);
1059 	} else if (is_prefix(**argv, "pinned")) {
1060 		char *path;
1061 
1062 		NEXT_ARGP();
1063 
1064 		path = **argv;
1065 		NEXT_ARGP();
1066 
1067 		(*fds)[0] = open_obj_pinned_any(path, BPF_OBJ_MAP);
1068 		if ((*fds)[0] < 0)
1069 			return -1;
1070 		return 1;
1071 	}
1072 
1073 	p_err("expected 'id', 'name' or 'pinned', got: '%s'?", **argv);
1074 	return -1;
1075 }
1076 
map_parse_fd(int * argc,char *** argv)1077 int map_parse_fd(int *argc, char ***argv)
1078 {
1079 	int *fds = NULL;
1080 	int nb_fds, fd;
1081 
1082 	fds = malloc(sizeof(int));
1083 	if (!fds) {
1084 		p_err("mem alloc failed");
1085 		return -1;
1086 	}
1087 	nb_fds = map_parse_fds(argc, argv, &fds);
1088 	if (nb_fds != 1) {
1089 		if (nb_fds > 1) {
1090 			p_err("several maps match this handle");
1091 			while (nb_fds--)
1092 				close(fds[nb_fds]);
1093 		}
1094 		fd = -1;
1095 		goto exit_free;
1096 	}
1097 
1098 	fd = fds[0];
1099 exit_free:
1100 	free(fds);
1101 	return fd;
1102 }
1103 
map_parse_fd_and_info(int * argc,char *** argv,struct bpf_map_info * info,__u32 * info_len)1104 int map_parse_fd_and_info(int *argc, char ***argv, struct bpf_map_info *info,
1105 			  __u32 *info_len)
1106 {
1107 	int err;
1108 	int fd;
1109 
1110 	fd = map_parse_fd(argc, argv);
1111 	if (fd < 0)
1112 		return -1;
1113 
1114 	err = bpf_map_get_info_by_fd(fd, info, info_len);
1115 	if (err) {
1116 		p_err("can't get map info: %s", strerror(errno));
1117 		close(fd);
1118 		return err;
1119 	}
1120 
1121 	return fd;
1122 }
1123 
hash_fn_for_key_as_id(long key,void * ctx)1124 size_t hash_fn_for_key_as_id(long key, void *ctx)
1125 {
1126 	return key;
1127 }
1128 
equal_fn_for_key_as_id(long k1,long k2,void * ctx)1129 bool equal_fn_for_key_as_id(long k1, long k2, void *ctx)
1130 {
1131 	return k1 == k2;
1132 }
1133 
bpf_attach_type_input_str(enum bpf_attach_type t)1134 const char *bpf_attach_type_input_str(enum bpf_attach_type t)
1135 {
1136 	switch (t) {
1137 	case BPF_CGROUP_INET_INGRESS:		return "ingress";
1138 	case BPF_CGROUP_INET_EGRESS:		return "egress";
1139 	case BPF_CGROUP_INET_SOCK_CREATE:	return "sock_create";
1140 	case BPF_CGROUP_INET_SOCK_RELEASE:	return "sock_release";
1141 	case BPF_CGROUP_SOCK_OPS:		return "sock_ops";
1142 	case BPF_CGROUP_DEVICE:			return "device";
1143 	case BPF_CGROUP_INET4_BIND:		return "bind4";
1144 	case BPF_CGROUP_INET6_BIND:		return "bind6";
1145 	case BPF_CGROUP_INET4_CONNECT:		return "connect4";
1146 	case BPF_CGROUP_INET6_CONNECT:		return "connect6";
1147 	case BPF_CGROUP_INET4_POST_BIND:	return "post_bind4";
1148 	case BPF_CGROUP_INET6_POST_BIND:	return "post_bind6";
1149 	case BPF_CGROUP_INET4_GETPEERNAME:	return "getpeername4";
1150 	case BPF_CGROUP_INET6_GETPEERNAME:	return "getpeername6";
1151 	case BPF_CGROUP_INET4_GETSOCKNAME:	return "getsockname4";
1152 	case BPF_CGROUP_INET6_GETSOCKNAME:	return "getsockname6";
1153 	case BPF_CGROUP_UDP4_SENDMSG:		return "sendmsg4";
1154 	case BPF_CGROUP_UDP6_SENDMSG:		return "sendmsg6";
1155 	case BPF_CGROUP_SYSCTL:			return "sysctl";
1156 	case BPF_CGROUP_UDP4_RECVMSG:		return "recvmsg4";
1157 	case BPF_CGROUP_UDP6_RECVMSG:		return "recvmsg6";
1158 	case BPF_CGROUP_GETSOCKOPT:		return "getsockopt";
1159 	case BPF_CGROUP_SETSOCKOPT:		return "setsockopt";
1160 	case BPF_TRACE_RAW_TP:			return "raw_tp";
1161 	case BPF_TRACE_FENTRY:			return "fentry";
1162 	case BPF_TRACE_FEXIT:			return "fexit";
1163 	case BPF_MODIFY_RETURN:			return "mod_ret";
1164 	case BPF_SK_REUSEPORT_SELECT:		return "sk_skb_reuseport_select";
1165 	case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE:	return "sk_skb_reuseport_select_or_migrate";
1166 	default:	return libbpf_bpf_attach_type_str(t);
1167 	}
1168 }
1169 
pathname_concat(char * buf,int buf_sz,const char * path,const char * name)1170 int pathname_concat(char *buf, int buf_sz, const char *path,
1171 		    const char *name)
1172 {
1173 	int len;
1174 
1175 	len = snprintf(buf, buf_sz, "%s/%s", path, name);
1176 	if (len < 0)
1177 		return -EINVAL;
1178 	if (len >= buf_sz)
1179 		return -ENAMETOOLONG;
1180 
1181 	return 0;
1182 }
1183