1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3
4 #define _GNU_SOURCE
5 #include <errno.h>
6 #include <fcntl.h>
7 #include <signal.h>
8 #include <stdarg.h>
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <string.h>
12 #include <time.h>
13 #include <unistd.h>
14 #include <net/if.h>
15 #include <sys/ioctl.h>
16 #include <sys/types.h>
17 #include <sys/stat.h>
18 #include <sys/syscall.h>
19
20 #include <linux/err.h>
21 #include <linux/perf_event.h>
22 #include <linux/sizes.h>
23
24 #include <bpf/bpf.h>
25 #include <bpf/btf.h>
26 #include <bpf/libbpf.h>
27
28 #include "cfg.h"
29 #include "main.h"
30 #include "xlated_dumper.h"
31
32 #define BPF_METADATA_PREFIX "bpf_metadata_"
33 #define BPF_METADATA_PREFIX_LEN (sizeof(BPF_METADATA_PREFIX) - 1)
34
35 const char * const prog_type_name[] = {
36 [BPF_PROG_TYPE_UNSPEC] = "unspec",
37 [BPF_PROG_TYPE_SOCKET_FILTER] = "socket_filter",
38 [BPF_PROG_TYPE_KPROBE] = "kprobe",
39 [BPF_PROG_TYPE_SCHED_CLS] = "sched_cls",
40 [BPF_PROG_TYPE_SCHED_ACT] = "sched_act",
41 [BPF_PROG_TYPE_TRACEPOINT] = "tracepoint",
42 [BPF_PROG_TYPE_XDP] = "xdp",
43 [BPF_PROG_TYPE_PERF_EVENT] = "perf_event",
44 [BPF_PROG_TYPE_CGROUP_SKB] = "cgroup_skb",
45 [BPF_PROG_TYPE_CGROUP_SOCK] = "cgroup_sock",
46 [BPF_PROG_TYPE_LWT_IN] = "lwt_in",
47 [BPF_PROG_TYPE_LWT_OUT] = "lwt_out",
48 [BPF_PROG_TYPE_LWT_XMIT] = "lwt_xmit",
49 [BPF_PROG_TYPE_SOCK_OPS] = "sock_ops",
50 [BPF_PROG_TYPE_SK_SKB] = "sk_skb",
51 [BPF_PROG_TYPE_CGROUP_DEVICE] = "cgroup_device",
52 [BPF_PROG_TYPE_SK_MSG] = "sk_msg",
53 [BPF_PROG_TYPE_RAW_TRACEPOINT] = "raw_tracepoint",
54 [BPF_PROG_TYPE_CGROUP_SOCK_ADDR] = "cgroup_sock_addr",
55 [BPF_PROG_TYPE_LWT_SEG6LOCAL] = "lwt_seg6local",
56 [BPF_PROG_TYPE_LIRC_MODE2] = "lirc_mode2",
57 [BPF_PROG_TYPE_SK_REUSEPORT] = "sk_reuseport",
58 [BPF_PROG_TYPE_FLOW_DISSECTOR] = "flow_dissector",
59 [BPF_PROG_TYPE_CGROUP_SYSCTL] = "cgroup_sysctl",
60 [BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE] = "raw_tracepoint_writable",
61 [BPF_PROG_TYPE_CGROUP_SOCKOPT] = "cgroup_sockopt",
62 [BPF_PROG_TYPE_TRACING] = "tracing",
63 [BPF_PROG_TYPE_STRUCT_OPS] = "struct_ops",
64 [BPF_PROG_TYPE_EXT] = "ext",
65 [BPF_PROG_TYPE_LSM] = "lsm",
66 [BPF_PROG_TYPE_SK_LOOKUP] = "sk_lookup",
67 };
68
69 const size_t prog_type_name_size = ARRAY_SIZE(prog_type_name);
70
71 enum dump_mode {
72 DUMP_JITED,
73 DUMP_XLATED,
74 };
75
76 static const char * const attach_type_strings[] = {
77 [BPF_SK_SKB_STREAM_PARSER] = "stream_parser",
78 [BPF_SK_SKB_STREAM_VERDICT] = "stream_verdict",
79 [BPF_SK_SKB_VERDICT] = "skb_verdict",
80 [BPF_SK_MSG_VERDICT] = "msg_verdict",
81 [BPF_FLOW_DISSECTOR] = "flow_dissector",
82 [__MAX_BPF_ATTACH_TYPE] = NULL,
83 };
84
parse_attach_type(const char * str)85 static enum bpf_attach_type parse_attach_type(const char *str)
86 {
87 enum bpf_attach_type type;
88
89 for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
90 if (attach_type_strings[type] &&
91 is_prefix(str, attach_type_strings[type]))
92 return type;
93 }
94
95 return __MAX_BPF_ATTACH_TYPE;
96 }
97
print_boot_time(__u64 nsecs,char * buf,unsigned int size)98 static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
99 {
100 struct timespec real_time_ts, boot_time_ts;
101 time_t wallclock_secs;
102 struct tm load_tm;
103
104 buf[--size] = '\0';
105
106 if (clock_gettime(CLOCK_REALTIME, &real_time_ts) ||
107 clock_gettime(CLOCK_BOOTTIME, &boot_time_ts)) {
108 perror("Can't read clocks");
109 snprintf(buf, size, "%llu", nsecs / 1000000000);
110 return;
111 }
112
113 wallclock_secs = (real_time_ts.tv_sec - boot_time_ts.tv_sec) +
114 (real_time_ts.tv_nsec - boot_time_ts.tv_nsec + nsecs) /
115 1000000000;
116
117
118 if (!localtime_r(&wallclock_secs, &load_tm)) {
119 snprintf(buf, size, "%llu", nsecs / 1000000000);
120 return;
121 }
122
123 if (json_output)
124 strftime(buf, size, "%s", &load_tm);
125 else
126 strftime(buf, size, "%FT%T%z", &load_tm);
127 }
128
show_prog_maps(int fd,__u32 num_maps)129 static void show_prog_maps(int fd, __u32 num_maps)
130 {
131 struct bpf_prog_info info = {};
132 __u32 len = sizeof(info);
133 __u32 map_ids[num_maps];
134 unsigned int i;
135 int err;
136
137 info.nr_map_ids = num_maps;
138 info.map_ids = ptr_to_u64(map_ids);
139
140 err = bpf_obj_get_info_by_fd(fd, &info, &len);
141 if (err || !info.nr_map_ids)
142 return;
143
144 if (json_output) {
145 jsonw_name(json_wtr, "map_ids");
146 jsonw_start_array(json_wtr);
147 for (i = 0; i < info.nr_map_ids; i++)
148 jsonw_uint(json_wtr, map_ids[i]);
149 jsonw_end_array(json_wtr);
150 } else {
151 printf(" map_ids ");
152 for (i = 0; i < info.nr_map_ids; i++)
153 printf("%u%s", map_ids[i],
154 i == info.nr_map_ids - 1 ? "" : ",");
155 }
156 }
157
find_metadata(int prog_fd,struct bpf_map_info * map_info)158 static void *find_metadata(int prog_fd, struct bpf_map_info *map_info)
159 {
160 struct bpf_prog_info prog_info;
161 __u32 prog_info_len;
162 __u32 map_info_len;
163 void *value = NULL;
164 __u32 *map_ids;
165 int nr_maps;
166 int key = 0;
167 int map_fd;
168 int ret;
169 __u32 i;
170
171 memset(&prog_info, 0, sizeof(prog_info));
172 prog_info_len = sizeof(prog_info);
173 ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
174 if (ret)
175 return NULL;
176
177 if (!prog_info.nr_map_ids)
178 return NULL;
179
180 map_ids = calloc(prog_info.nr_map_ids, sizeof(__u32));
181 if (!map_ids)
182 return NULL;
183
184 nr_maps = prog_info.nr_map_ids;
185 memset(&prog_info, 0, sizeof(prog_info));
186 prog_info.nr_map_ids = nr_maps;
187 prog_info.map_ids = ptr_to_u64(map_ids);
188 prog_info_len = sizeof(prog_info);
189
190 ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
191 if (ret)
192 goto free_map_ids;
193
194 for (i = 0; i < prog_info.nr_map_ids; i++) {
195 map_fd = bpf_map_get_fd_by_id(map_ids[i]);
196 if (map_fd < 0)
197 goto free_map_ids;
198
199 memset(map_info, 0, sizeof(*map_info));
200 map_info_len = sizeof(*map_info);
201 ret = bpf_obj_get_info_by_fd(map_fd, map_info, &map_info_len);
202 if (ret < 0) {
203 close(map_fd);
204 goto free_map_ids;
205 }
206
207 if (map_info->type != BPF_MAP_TYPE_ARRAY ||
208 map_info->key_size != sizeof(int) ||
209 map_info->max_entries != 1 ||
210 !map_info->btf_value_type_id ||
211 !strstr(map_info->name, ".rodata")) {
212 close(map_fd);
213 continue;
214 }
215
216 value = malloc(map_info->value_size);
217 if (!value) {
218 close(map_fd);
219 goto free_map_ids;
220 }
221
222 if (bpf_map_lookup_elem(map_fd, &key, value)) {
223 close(map_fd);
224 free(value);
225 value = NULL;
226 goto free_map_ids;
227 }
228
229 close(map_fd);
230 break;
231 }
232
233 free_map_ids:
234 free(map_ids);
235 return value;
236 }
237
has_metadata_prefix(const char * s)238 static bool has_metadata_prefix(const char *s)
239 {
240 return strncmp(s, BPF_METADATA_PREFIX, BPF_METADATA_PREFIX_LEN) == 0;
241 }
242
show_prog_metadata(int fd,__u32 num_maps)243 static void show_prog_metadata(int fd, __u32 num_maps)
244 {
245 const struct btf_type *t_datasec, *t_var;
246 struct bpf_map_info map_info;
247 struct btf_var_secinfo *vsi;
248 bool printed_header = false;
249 struct btf *btf = NULL;
250 unsigned int i, vlen;
251 void *value = NULL;
252 const char *name;
253 int err;
254
255 if (!num_maps)
256 return;
257
258 memset(&map_info, 0, sizeof(map_info));
259 value = find_metadata(fd, &map_info);
260 if (!value)
261 return;
262
263 err = btf__get_from_id(map_info.btf_id, &btf);
264 if (err || !btf)
265 goto out_free;
266
267 t_datasec = btf__type_by_id(btf, map_info.btf_value_type_id);
268 if (!btf_is_datasec(t_datasec))
269 goto out_free;
270
271 vlen = btf_vlen(t_datasec);
272 vsi = btf_var_secinfos(t_datasec);
273
274 /* We don't proceed to check the kinds of the elements of the DATASEC.
275 * The verifier enforces them to be BTF_KIND_VAR.
276 */
277
278 if (json_output) {
279 struct btf_dumper d = {
280 .btf = btf,
281 .jw = json_wtr,
282 .is_plain_text = false,
283 };
284
285 for (i = 0; i < vlen; i++, vsi++) {
286 t_var = btf__type_by_id(btf, vsi->type);
287 name = btf__name_by_offset(btf, t_var->name_off);
288
289 if (!has_metadata_prefix(name))
290 continue;
291
292 if (!printed_header) {
293 jsonw_name(json_wtr, "metadata");
294 jsonw_start_object(json_wtr);
295 printed_header = true;
296 }
297
298 jsonw_name(json_wtr, name + BPF_METADATA_PREFIX_LEN);
299 err = btf_dumper_type(&d, t_var->type, value + vsi->offset);
300 if (err) {
301 p_err("btf dump failed: %d", err);
302 break;
303 }
304 }
305 if (printed_header)
306 jsonw_end_object(json_wtr);
307 } else {
308 json_writer_t *btf_wtr = jsonw_new(stdout);
309 struct btf_dumper d = {
310 .btf = btf,
311 .jw = btf_wtr,
312 .is_plain_text = true,
313 };
314
315 if (!btf_wtr) {
316 p_err("jsonw alloc failed");
317 goto out_free;
318 }
319
320 for (i = 0; i < vlen; i++, vsi++) {
321 t_var = btf__type_by_id(btf, vsi->type);
322 name = btf__name_by_offset(btf, t_var->name_off);
323
324 if (!has_metadata_prefix(name))
325 continue;
326
327 if (!printed_header) {
328 printf("\tmetadata:");
329 printed_header = true;
330 }
331
332 printf("\n\t\t%s = ", name + BPF_METADATA_PREFIX_LEN);
333
334 jsonw_reset(btf_wtr);
335 err = btf_dumper_type(&d, t_var->type, value + vsi->offset);
336 if (err) {
337 p_err("btf dump failed: %d", err);
338 break;
339 }
340 }
341 if (printed_header)
342 jsonw_destroy(&btf_wtr);
343 }
344
345 out_free:
346 btf__free(btf);
347 free(value);
348 }
349
print_prog_header_json(struct bpf_prog_info * info)350 static void print_prog_header_json(struct bpf_prog_info *info)
351 {
352 jsonw_uint_field(json_wtr, "id", info->id);
353 if (info->type < ARRAY_SIZE(prog_type_name))
354 jsonw_string_field(json_wtr, "type",
355 prog_type_name[info->type]);
356 else
357 jsonw_uint_field(json_wtr, "type", info->type);
358
359 if (*info->name)
360 jsonw_string_field(json_wtr, "name", info->name);
361
362 jsonw_name(json_wtr, "tag");
363 jsonw_printf(json_wtr, "\"" BPF_TAG_FMT "\"",
364 info->tag[0], info->tag[1], info->tag[2], info->tag[3],
365 info->tag[4], info->tag[5], info->tag[6], info->tag[7]);
366
367 jsonw_bool_field(json_wtr, "gpl_compatible", info->gpl_compatible);
368 if (info->run_time_ns) {
369 jsonw_uint_field(json_wtr, "run_time_ns", info->run_time_ns);
370 jsonw_uint_field(json_wtr, "run_cnt", info->run_cnt);
371 }
372 if (info->recursion_misses)
373 jsonw_uint_field(json_wtr, "recursion_misses", info->recursion_misses);
374 }
375
print_prog_json(struct bpf_prog_info * info,int fd)376 static void print_prog_json(struct bpf_prog_info *info, int fd)
377 {
378 char *memlock;
379
380 jsonw_start_object(json_wtr);
381 print_prog_header_json(info);
382 print_dev_json(info->ifindex, info->netns_dev, info->netns_ino);
383
384 if (info->load_time) {
385 char buf[32];
386
387 print_boot_time(info->load_time, buf, sizeof(buf));
388
389 /* Piggy back on load_time, since 0 uid is a valid one */
390 jsonw_name(json_wtr, "loaded_at");
391 jsonw_printf(json_wtr, "%s", buf);
392 jsonw_uint_field(json_wtr, "uid", info->created_by_uid);
393 }
394
395 jsonw_uint_field(json_wtr, "bytes_xlated", info->xlated_prog_len);
396
397 if (info->jited_prog_len) {
398 jsonw_bool_field(json_wtr, "jited", true);
399 jsonw_uint_field(json_wtr, "bytes_jited", info->jited_prog_len);
400 } else {
401 jsonw_bool_field(json_wtr, "jited", false);
402 }
403
404 memlock = get_fdinfo(fd, "memlock");
405 if (memlock)
406 jsonw_int_field(json_wtr, "bytes_memlock", atoi(memlock));
407 free(memlock);
408
409 if (info->nr_map_ids)
410 show_prog_maps(fd, info->nr_map_ids);
411
412 if (info->btf_id)
413 jsonw_int_field(json_wtr, "btf_id", info->btf_id);
414
415 if (!hash_empty(prog_table.table)) {
416 struct pinned_obj *obj;
417
418 jsonw_name(json_wtr, "pinned");
419 jsonw_start_array(json_wtr);
420 hash_for_each_possible(prog_table.table, obj, hash, info->id) {
421 if (obj->id == info->id)
422 jsonw_string(json_wtr, obj->path);
423 }
424 jsonw_end_array(json_wtr);
425 }
426
427 emit_obj_refs_json(&refs_table, info->id, json_wtr);
428
429 show_prog_metadata(fd, info->nr_map_ids);
430
431 jsonw_end_object(json_wtr);
432 }
433
print_prog_header_plain(struct bpf_prog_info * info)434 static void print_prog_header_plain(struct bpf_prog_info *info)
435 {
436 printf("%u: ", info->id);
437 if (info->type < ARRAY_SIZE(prog_type_name))
438 printf("%s ", prog_type_name[info->type]);
439 else
440 printf("type %u ", info->type);
441
442 if (*info->name)
443 printf("name %s ", info->name);
444
445 printf("tag ");
446 fprint_hex(stdout, info->tag, BPF_TAG_SIZE, "");
447 print_dev_plain(info->ifindex, info->netns_dev, info->netns_ino);
448 printf("%s", info->gpl_compatible ? " gpl" : "");
449 if (info->run_time_ns)
450 printf(" run_time_ns %lld run_cnt %lld",
451 info->run_time_ns, info->run_cnt);
452 if (info->recursion_misses)
453 printf(" recursion_misses %lld", info->recursion_misses);
454 printf("\n");
455 }
456
print_prog_plain(struct bpf_prog_info * info,int fd)457 static void print_prog_plain(struct bpf_prog_info *info, int fd)
458 {
459 char *memlock;
460
461 print_prog_header_plain(info);
462
463 if (info->load_time) {
464 char buf[32];
465
466 print_boot_time(info->load_time, buf, sizeof(buf));
467
468 /* Piggy back on load_time, since 0 uid is a valid one */
469 printf("\tloaded_at %s uid %u\n", buf, info->created_by_uid);
470 }
471
472 printf("\txlated %uB", info->xlated_prog_len);
473
474 if (info->jited_prog_len)
475 printf(" jited %uB", info->jited_prog_len);
476 else
477 printf(" not jited");
478
479 memlock = get_fdinfo(fd, "memlock");
480 if (memlock)
481 printf(" memlock %sB", memlock);
482 free(memlock);
483
484 if (info->nr_map_ids)
485 show_prog_maps(fd, info->nr_map_ids);
486
487 if (!hash_empty(prog_table.table)) {
488 struct pinned_obj *obj;
489
490 hash_for_each_possible(prog_table.table, obj, hash, info->id) {
491 if (obj->id == info->id)
492 printf("\n\tpinned %s", obj->path);
493 }
494 }
495
496 if (info->btf_id)
497 printf("\n\tbtf_id %d", info->btf_id);
498
499 emit_obj_refs_plain(&refs_table, info->id, "\n\tpids ");
500
501 printf("\n");
502
503 show_prog_metadata(fd, info->nr_map_ids);
504 }
505
show_prog(int fd)506 static int show_prog(int fd)
507 {
508 struct bpf_prog_info info = {};
509 __u32 len = sizeof(info);
510 int err;
511
512 err = bpf_obj_get_info_by_fd(fd, &info, &len);
513 if (err) {
514 p_err("can't get prog info: %s", strerror(errno));
515 return -1;
516 }
517
518 if (json_output)
519 print_prog_json(&info, fd);
520 else
521 print_prog_plain(&info, fd);
522
523 return 0;
524 }
525
do_show_subset(int argc,char ** argv)526 static int do_show_subset(int argc, char **argv)
527 {
528 int *fds = NULL;
529 int nb_fds, i;
530 int err = -1;
531
532 fds = malloc(sizeof(int));
533 if (!fds) {
534 p_err("mem alloc failed");
535 return -1;
536 }
537 nb_fds = prog_parse_fds(&argc, &argv, &fds);
538 if (nb_fds < 1)
539 goto exit_free;
540
541 if (json_output && nb_fds > 1)
542 jsonw_start_array(json_wtr); /* root array */
543 for (i = 0; i < nb_fds; i++) {
544 err = show_prog(fds[i]);
545 if (err) {
546 for (; i < nb_fds; i++)
547 close(fds[i]);
548 break;
549 }
550 close(fds[i]);
551 }
552 if (json_output && nb_fds > 1)
553 jsonw_end_array(json_wtr); /* root array */
554
555 exit_free:
556 free(fds);
557 return err;
558 }
559
do_show(int argc,char ** argv)560 static int do_show(int argc, char **argv)
561 {
562 __u32 id = 0;
563 int err;
564 int fd;
565
566 if (show_pinned)
567 build_pinned_obj_table(&prog_table, BPF_OBJ_PROG);
568 build_obj_refs_table(&refs_table, BPF_OBJ_PROG);
569
570 if (argc == 2)
571 return do_show_subset(argc, argv);
572
573 if (argc)
574 return BAD_ARG();
575
576 if (json_output)
577 jsonw_start_array(json_wtr);
578 while (true) {
579 err = bpf_prog_get_next_id(id, &id);
580 if (err) {
581 if (errno == ENOENT) {
582 err = 0;
583 break;
584 }
585 p_err("can't get next program: %s%s", strerror(errno),
586 errno == EINVAL ? " -- kernel too old?" : "");
587 err = -1;
588 break;
589 }
590
591 fd = bpf_prog_get_fd_by_id(id);
592 if (fd < 0) {
593 if (errno == ENOENT)
594 continue;
595 p_err("can't get prog by id (%u): %s",
596 id, strerror(errno));
597 err = -1;
598 break;
599 }
600
601 err = show_prog(fd);
602 close(fd);
603 if (err)
604 break;
605 }
606
607 if (json_output)
608 jsonw_end_array(json_wtr);
609
610 delete_obj_refs_table(&refs_table);
611
612 return err;
613 }
614
615 static int
prog_dump(struct bpf_prog_info * info,enum dump_mode mode,char * filepath,bool opcodes,bool visual,bool linum)616 prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
617 char *filepath, bool opcodes, bool visual, bool linum)
618 {
619 struct bpf_prog_linfo *prog_linfo = NULL;
620 const char *disasm_opt = NULL;
621 struct dump_data dd = {};
622 void *func_info = NULL;
623 struct btf *btf = NULL;
624 char func_sig[1024];
625 unsigned char *buf;
626 __u32 member_len;
627 ssize_t n;
628 int fd;
629
630 if (mode == DUMP_JITED) {
631 if (info->jited_prog_len == 0 || !info->jited_prog_insns) {
632 p_info("no instructions returned");
633 return -1;
634 }
635 buf = u64_to_ptr(info->jited_prog_insns);
636 member_len = info->jited_prog_len;
637 } else { /* DUMP_XLATED */
638 if (info->xlated_prog_len == 0 || !info->xlated_prog_insns) {
639 p_err("error retrieving insn dump: kernel.kptr_restrict set?");
640 return -1;
641 }
642 buf = u64_to_ptr(info->xlated_prog_insns);
643 member_len = info->xlated_prog_len;
644 }
645
646 if (info->btf_id && btf__get_from_id(info->btf_id, &btf)) {
647 p_err("failed to get btf");
648 return -1;
649 }
650
651 func_info = u64_to_ptr(info->func_info);
652
653 if (info->nr_line_info) {
654 prog_linfo = bpf_prog_linfo__new(info);
655 if (!prog_linfo)
656 p_info("error in processing bpf_line_info. continue without it.");
657 }
658
659 if (filepath) {
660 fd = open(filepath, O_WRONLY | O_CREAT | O_TRUNC, 0600);
661 if (fd < 0) {
662 p_err("can't open file %s: %s", filepath,
663 strerror(errno));
664 return -1;
665 }
666
667 n = write(fd, buf, member_len);
668 close(fd);
669 if (n != (ssize_t)member_len) {
670 p_err("error writing output file: %s",
671 n < 0 ? strerror(errno) : "short write");
672 return -1;
673 }
674
675 if (json_output)
676 jsonw_null(json_wtr);
677 } else if (mode == DUMP_JITED) {
678 const char *name = NULL;
679
680 if (info->ifindex) {
681 name = ifindex_to_bfd_params(info->ifindex,
682 info->netns_dev,
683 info->netns_ino,
684 &disasm_opt);
685 if (!name)
686 return -1;
687 }
688
689 if (info->nr_jited_func_lens && info->jited_func_lens) {
690 struct kernel_sym *sym = NULL;
691 struct bpf_func_info *record;
692 char sym_name[SYM_MAX_NAME];
693 unsigned char *img = buf;
694 __u64 *ksyms = NULL;
695 __u32 *lens;
696 __u32 i;
697 if (info->nr_jited_ksyms) {
698 kernel_syms_load(&dd);
699 ksyms = u64_to_ptr(info->jited_ksyms);
700 }
701
702 if (json_output)
703 jsonw_start_array(json_wtr);
704
705 lens = u64_to_ptr(info->jited_func_lens);
706 for (i = 0; i < info->nr_jited_func_lens; i++) {
707 if (ksyms) {
708 sym = kernel_syms_search(&dd, ksyms[i]);
709 if (sym)
710 sprintf(sym_name, "%s", sym->name);
711 else
712 sprintf(sym_name, "0x%016llx", ksyms[i]);
713 } else {
714 strcpy(sym_name, "unknown");
715 }
716
717 if (func_info) {
718 record = func_info + i * info->func_info_rec_size;
719 btf_dumper_type_only(btf, record->type_id,
720 func_sig,
721 sizeof(func_sig));
722 }
723
724 if (json_output) {
725 jsonw_start_object(json_wtr);
726 if (func_info && func_sig[0] != '\0') {
727 jsonw_name(json_wtr, "proto");
728 jsonw_string(json_wtr, func_sig);
729 }
730 jsonw_name(json_wtr, "name");
731 jsonw_string(json_wtr, sym_name);
732 jsonw_name(json_wtr, "insns");
733 } else {
734 if (func_info && func_sig[0] != '\0')
735 printf("%s:\n", func_sig);
736 printf("%s:\n", sym_name);
737 }
738
739 disasm_print_insn(img, lens[i], opcodes,
740 name, disasm_opt, btf,
741 prog_linfo, ksyms[i], i,
742 linum);
743
744 img += lens[i];
745
746 if (json_output)
747 jsonw_end_object(json_wtr);
748 else
749 printf("\n");
750 }
751
752 if (json_output)
753 jsonw_end_array(json_wtr);
754 } else {
755 disasm_print_insn(buf, member_len, opcodes, name,
756 disasm_opt, btf, NULL, 0, 0, false);
757 }
758 } else if (visual) {
759 if (json_output)
760 jsonw_null(json_wtr);
761 else
762 dump_xlated_cfg(buf, member_len);
763 } else {
764 kernel_syms_load(&dd);
765 dd.nr_jited_ksyms = info->nr_jited_ksyms;
766 dd.jited_ksyms = u64_to_ptr(info->jited_ksyms);
767 dd.btf = btf;
768 dd.func_info = func_info;
769 dd.finfo_rec_size = info->func_info_rec_size;
770 dd.prog_linfo = prog_linfo;
771
772 if (json_output)
773 dump_xlated_json(&dd, buf, member_len, opcodes,
774 linum);
775 else
776 dump_xlated_plain(&dd, buf, member_len, opcodes,
777 linum);
778 kernel_syms_destroy(&dd);
779 }
780
781 return 0;
782 }
783
do_dump(int argc,char ** argv)784 static int do_dump(int argc, char **argv)
785 {
786 struct bpf_prog_info_linear *info_linear;
787 char *filepath = NULL;
788 bool opcodes = false;
789 bool visual = false;
790 enum dump_mode mode;
791 bool linum = false;
792 int *fds = NULL;
793 int nb_fds, i = 0;
794 int err = -1;
795 __u64 arrays;
796
797 if (is_prefix(*argv, "jited")) {
798 if (disasm_init())
799 return -1;
800 mode = DUMP_JITED;
801 } else if (is_prefix(*argv, "xlated")) {
802 mode = DUMP_XLATED;
803 } else {
804 p_err("expected 'xlated' or 'jited', got: %s", *argv);
805 return -1;
806 }
807 NEXT_ARG();
808
809 if (argc < 2)
810 usage();
811
812 fds = malloc(sizeof(int));
813 if (!fds) {
814 p_err("mem alloc failed");
815 return -1;
816 }
817 nb_fds = prog_parse_fds(&argc, &argv, &fds);
818 if (nb_fds < 1)
819 goto exit_free;
820
821 if (is_prefix(*argv, "file")) {
822 NEXT_ARG();
823 if (!argc) {
824 p_err("expected file path");
825 goto exit_close;
826 }
827 if (nb_fds > 1) {
828 p_err("several programs matched");
829 goto exit_close;
830 }
831
832 filepath = *argv;
833 NEXT_ARG();
834 } else if (is_prefix(*argv, "opcodes")) {
835 opcodes = true;
836 NEXT_ARG();
837 } else if (is_prefix(*argv, "visual")) {
838 if (nb_fds > 1) {
839 p_err("several programs matched");
840 goto exit_close;
841 }
842
843 visual = true;
844 NEXT_ARG();
845 } else if (is_prefix(*argv, "linum")) {
846 linum = true;
847 NEXT_ARG();
848 }
849
850 if (argc) {
851 usage();
852 goto exit_close;
853 }
854
855 if (mode == DUMP_JITED)
856 arrays = 1UL << BPF_PROG_INFO_JITED_INSNS;
857 else
858 arrays = 1UL << BPF_PROG_INFO_XLATED_INSNS;
859
860 arrays |= 1UL << BPF_PROG_INFO_JITED_KSYMS;
861 arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
862 arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
863 arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
864 arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
865
866 if (json_output && nb_fds > 1)
867 jsonw_start_array(json_wtr); /* root array */
868 for (i = 0; i < nb_fds; i++) {
869 info_linear = bpf_program__get_prog_info_linear(fds[i], arrays);
870 if (IS_ERR_OR_NULL(info_linear)) {
871 p_err("can't get prog info: %s", strerror(errno));
872 break;
873 }
874
875 if (json_output && nb_fds > 1) {
876 jsonw_start_object(json_wtr); /* prog object */
877 print_prog_header_json(&info_linear->info);
878 jsonw_name(json_wtr, "insns");
879 } else if (nb_fds > 1) {
880 print_prog_header_plain(&info_linear->info);
881 }
882
883 err = prog_dump(&info_linear->info, mode, filepath, opcodes,
884 visual, linum);
885
886 if (json_output && nb_fds > 1)
887 jsonw_end_object(json_wtr); /* prog object */
888 else if (i != nb_fds - 1 && nb_fds > 1)
889 printf("\n");
890
891 free(info_linear);
892 if (err)
893 break;
894 close(fds[i]);
895 }
896 if (json_output && nb_fds > 1)
897 jsonw_end_array(json_wtr); /* root array */
898
899 exit_close:
900 for (; i < nb_fds; i++)
901 close(fds[i]);
902 exit_free:
903 free(fds);
904 return err;
905 }
906
do_pin(int argc,char ** argv)907 static int do_pin(int argc, char **argv)
908 {
909 int err;
910
911 err = do_pin_any(argc, argv, prog_parse_fd);
912 if (!err && json_output)
913 jsonw_null(json_wtr);
914 return err;
915 }
916
917 struct map_replace {
918 int idx;
919 int fd;
920 char *name;
921 };
922
map_replace_compar(const void * p1,const void * p2)923 static int map_replace_compar(const void *p1, const void *p2)
924 {
925 const struct map_replace *a = p1, *b = p2;
926
927 return a->idx - b->idx;
928 }
929
parse_attach_detach_args(int argc,char ** argv,int * progfd,enum bpf_attach_type * attach_type,int * mapfd)930 static int parse_attach_detach_args(int argc, char **argv, int *progfd,
931 enum bpf_attach_type *attach_type,
932 int *mapfd)
933 {
934 if (!REQ_ARGS(3))
935 return -EINVAL;
936
937 *progfd = prog_parse_fd(&argc, &argv);
938 if (*progfd < 0)
939 return *progfd;
940
941 *attach_type = parse_attach_type(*argv);
942 if (*attach_type == __MAX_BPF_ATTACH_TYPE) {
943 p_err("invalid attach/detach type");
944 return -EINVAL;
945 }
946
947 if (*attach_type == BPF_FLOW_DISSECTOR) {
948 *mapfd = 0;
949 return 0;
950 }
951
952 NEXT_ARG();
953 if (!REQ_ARGS(2))
954 return -EINVAL;
955
956 *mapfd = map_parse_fd(&argc, &argv);
957 if (*mapfd < 0)
958 return *mapfd;
959
960 return 0;
961 }
962
do_attach(int argc,char ** argv)963 static int do_attach(int argc, char **argv)
964 {
965 enum bpf_attach_type attach_type;
966 int err, progfd;
967 int mapfd;
968
969 err = parse_attach_detach_args(argc, argv,
970 &progfd, &attach_type, &mapfd);
971 if (err)
972 return err;
973
974 err = bpf_prog_attach(progfd, mapfd, attach_type, 0);
975 if (err) {
976 p_err("failed prog attach to map");
977 return -EINVAL;
978 }
979
980 if (json_output)
981 jsonw_null(json_wtr);
982 return 0;
983 }
984
do_detach(int argc,char ** argv)985 static int do_detach(int argc, char **argv)
986 {
987 enum bpf_attach_type attach_type;
988 int err, progfd;
989 int mapfd;
990
991 err = parse_attach_detach_args(argc, argv,
992 &progfd, &attach_type, &mapfd);
993 if (err)
994 return err;
995
996 err = bpf_prog_detach2(progfd, mapfd, attach_type);
997 if (err) {
998 p_err("failed prog detach from map");
999 return -EINVAL;
1000 }
1001
1002 if (json_output)
1003 jsonw_null(json_wtr);
1004 return 0;
1005 }
1006
check_single_stdin(char * file_data_in,char * file_ctx_in)1007 static int check_single_stdin(char *file_data_in, char *file_ctx_in)
1008 {
1009 if (file_data_in && file_ctx_in &&
1010 !strcmp(file_data_in, "-") && !strcmp(file_ctx_in, "-")) {
1011 p_err("cannot use standard input for both data_in and ctx_in");
1012 return -1;
1013 }
1014
1015 return 0;
1016 }
1017
get_run_data(const char * fname,void ** data_ptr,unsigned int * size)1018 static int get_run_data(const char *fname, void **data_ptr, unsigned int *size)
1019 {
1020 size_t block_size = 256;
1021 size_t buf_size = block_size;
1022 size_t nb_read = 0;
1023 void *tmp;
1024 FILE *f;
1025
1026 if (!fname) {
1027 *data_ptr = NULL;
1028 *size = 0;
1029 return 0;
1030 }
1031
1032 if (!strcmp(fname, "-"))
1033 f = stdin;
1034 else
1035 f = fopen(fname, "r");
1036 if (!f) {
1037 p_err("failed to open %s: %s", fname, strerror(errno));
1038 return -1;
1039 }
1040
1041 *data_ptr = malloc(block_size);
1042 if (!*data_ptr) {
1043 p_err("failed to allocate memory for data_in/ctx_in: %s",
1044 strerror(errno));
1045 goto err_fclose;
1046 }
1047
1048 while ((nb_read += fread(*data_ptr + nb_read, 1, block_size, f))) {
1049 if (feof(f))
1050 break;
1051 if (ferror(f)) {
1052 p_err("failed to read data_in/ctx_in from %s: %s",
1053 fname, strerror(errno));
1054 goto err_free;
1055 }
1056 if (nb_read > buf_size - block_size) {
1057 if (buf_size == UINT32_MAX) {
1058 p_err("data_in/ctx_in is too long (max: %d)",
1059 UINT32_MAX);
1060 goto err_free;
1061 }
1062 /* No space for fread()-ing next chunk; realloc() */
1063 buf_size *= 2;
1064 tmp = realloc(*data_ptr, buf_size);
1065 if (!tmp) {
1066 p_err("failed to reallocate data_in/ctx_in: %s",
1067 strerror(errno));
1068 goto err_free;
1069 }
1070 *data_ptr = tmp;
1071 }
1072 }
1073 if (f != stdin)
1074 fclose(f);
1075
1076 *size = nb_read;
1077 return 0;
1078
1079 err_free:
1080 free(*data_ptr);
1081 *data_ptr = NULL;
1082 err_fclose:
1083 if (f != stdin)
1084 fclose(f);
1085 return -1;
1086 }
1087
hex_print(void * data,unsigned int size,FILE * f)1088 static void hex_print(void *data, unsigned int size, FILE *f)
1089 {
1090 size_t i, j;
1091 char c;
1092
1093 for (i = 0; i < size; i += 16) {
1094 /* Row offset */
1095 fprintf(f, "%07zx\t", i);
1096
1097 /* Hexadecimal values */
1098 for (j = i; j < i + 16 && j < size; j++)
1099 fprintf(f, "%02x%s", *(uint8_t *)(data + j),
1100 j % 2 ? " " : "");
1101 for (; j < i + 16; j++)
1102 fprintf(f, " %s", j % 2 ? " " : "");
1103
1104 /* ASCII values (if relevant), '.' otherwise */
1105 fprintf(f, "| ");
1106 for (j = i; j < i + 16 && j < size; j++) {
1107 c = *(char *)(data + j);
1108 if (c < ' ' || c > '~')
1109 c = '.';
1110 fprintf(f, "%c%s", c, j == i + 7 ? " " : "");
1111 }
1112
1113 fprintf(f, "\n");
1114 }
1115 }
1116
1117 static int
print_run_output(void * data,unsigned int size,const char * fname,const char * json_key)1118 print_run_output(void *data, unsigned int size, const char *fname,
1119 const char *json_key)
1120 {
1121 size_t nb_written;
1122 FILE *f;
1123
1124 if (!fname)
1125 return 0;
1126
1127 if (!strcmp(fname, "-")) {
1128 f = stdout;
1129 if (json_output) {
1130 jsonw_name(json_wtr, json_key);
1131 print_data_json(data, size);
1132 } else {
1133 hex_print(data, size, f);
1134 }
1135 return 0;
1136 }
1137
1138 f = fopen(fname, "w");
1139 if (!f) {
1140 p_err("failed to open %s: %s", fname, strerror(errno));
1141 return -1;
1142 }
1143
1144 nb_written = fwrite(data, 1, size, f);
1145 fclose(f);
1146 if (nb_written != size) {
1147 p_err("failed to write output data/ctx: %s", strerror(errno));
1148 return -1;
1149 }
1150
1151 return 0;
1152 }
1153
alloc_run_data(void ** data_ptr,unsigned int size_out)1154 static int alloc_run_data(void **data_ptr, unsigned int size_out)
1155 {
1156 *data_ptr = calloc(size_out, 1);
1157 if (!*data_ptr) {
1158 p_err("failed to allocate memory for output data/ctx: %s",
1159 strerror(errno));
1160 return -1;
1161 }
1162
1163 return 0;
1164 }
1165
do_run(int argc,char ** argv)1166 static int do_run(int argc, char **argv)
1167 {
1168 char *data_fname_in = NULL, *data_fname_out = NULL;
1169 char *ctx_fname_in = NULL, *ctx_fname_out = NULL;
1170 struct bpf_prog_test_run_attr test_attr = {0};
1171 const unsigned int default_size = SZ_32K;
1172 void *data_in = NULL, *data_out = NULL;
1173 void *ctx_in = NULL, *ctx_out = NULL;
1174 unsigned int repeat = 1;
1175 int fd, err;
1176
1177 if (!REQ_ARGS(4))
1178 return -1;
1179
1180 fd = prog_parse_fd(&argc, &argv);
1181 if (fd < 0)
1182 return -1;
1183
1184 while (argc) {
1185 if (detect_common_prefix(*argv, "data_in", "data_out",
1186 "data_size_out", NULL))
1187 return -1;
1188 if (detect_common_prefix(*argv, "ctx_in", "ctx_out",
1189 "ctx_size_out", NULL))
1190 return -1;
1191
1192 if (is_prefix(*argv, "data_in")) {
1193 NEXT_ARG();
1194 if (!REQ_ARGS(1))
1195 return -1;
1196
1197 data_fname_in = GET_ARG();
1198 if (check_single_stdin(data_fname_in, ctx_fname_in))
1199 return -1;
1200 } else if (is_prefix(*argv, "data_out")) {
1201 NEXT_ARG();
1202 if (!REQ_ARGS(1))
1203 return -1;
1204
1205 data_fname_out = GET_ARG();
1206 } else if (is_prefix(*argv, "data_size_out")) {
1207 char *endptr;
1208
1209 NEXT_ARG();
1210 if (!REQ_ARGS(1))
1211 return -1;
1212
1213 test_attr.data_size_out = strtoul(*argv, &endptr, 0);
1214 if (*endptr) {
1215 p_err("can't parse %s as output data size",
1216 *argv);
1217 return -1;
1218 }
1219 NEXT_ARG();
1220 } else if (is_prefix(*argv, "ctx_in")) {
1221 NEXT_ARG();
1222 if (!REQ_ARGS(1))
1223 return -1;
1224
1225 ctx_fname_in = GET_ARG();
1226 if (check_single_stdin(data_fname_in, ctx_fname_in))
1227 return -1;
1228 } else if (is_prefix(*argv, "ctx_out")) {
1229 NEXT_ARG();
1230 if (!REQ_ARGS(1))
1231 return -1;
1232
1233 ctx_fname_out = GET_ARG();
1234 } else if (is_prefix(*argv, "ctx_size_out")) {
1235 char *endptr;
1236
1237 NEXT_ARG();
1238 if (!REQ_ARGS(1))
1239 return -1;
1240
1241 test_attr.ctx_size_out = strtoul(*argv, &endptr, 0);
1242 if (*endptr) {
1243 p_err("can't parse %s as output context size",
1244 *argv);
1245 return -1;
1246 }
1247 NEXT_ARG();
1248 } else if (is_prefix(*argv, "repeat")) {
1249 char *endptr;
1250
1251 NEXT_ARG();
1252 if (!REQ_ARGS(1))
1253 return -1;
1254
1255 repeat = strtoul(*argv, &endptr, 0);
1256 if (*endptr) {
1257 p_err("can't parse %s as repeat number",
1258 *argv);
1259 return -1;
1260 }
1261 NEXT_ARG();
1262 } else {
1263 p_err("expected no more arguments, 'data_in', 'data_out', 'data_size_out', 'ctx_in', 'ctx_out', 'ctx_size_out' or 'repeat', got: '%s'?",
1264 *argv);
1265 return -1;
1266 }
1267 }
1268
1269 err = get_run_data(data_fname_in, &data_in, &test_attr.data_size_in);
1270 if (err)
1271 return -1;
1272
1273 if (data_in) {
1274 if (!test_attr.data_size_out)
1275 test_attr.data_size_out = default_size;
1276 err = alloc_run_data(&data_out, test_attr.data_size_out);
1277 if (err)
1278 goto free_data_in;
1279 }
1280
1281 err = get_run_data(ctx_fname_in, &ctx_in, &test_attr.ctx_size_in);
1282 if (err)
1283 goto free_data_out;
1284
1285 if (ctx_in) {
1286 if (!test_attr.ctx_size_out)
1287 test_attr.ctx_size_out = default_size;
1288 err = alloc_run_data(&ctx_out, test_attr.ctx_size_out);
1289 if (err)
1290 goto free_ctx_in;
1291 }
1292
1293 test_attr.prog_fd = fd;
1294 test_attr.repeat = repeat;
1295 test_attr.data_in = data_in;
1296 test_attr.data_out = data_out;
1297 test_attr.ctx_in = ctx_in;
1298 test_attr.ctx_out = ctx_out;
1299
1300 err = bpf_prog_test_run_xattr(&test_attr);
1301 if (err) {
1302 p_err("failed to run program: %s", strerror(errno));
1303 goto free_ctx_out;
1304 }
1305
1306 err = 0;
1307
1308 if (json_output)
1309 jsonw_start_object(json_wtr); /* root */
1310
1311 /* Do not exit on errors occurring when printing output data/context,
1312 * we still want to print return value and duration for program run.
1313 */
1314 if (test_attr.data_size_out)
1315 err += print_run_output(test_attr.data_out,
1316 test_attr.data_size_out,
1317 data_fname_out, "data_out");
1318 if (test_attr.ctx_size_out)
1319 err += print_run_output(test_attr.ctx_out,
1320 test_attr.ctx_size_out,
1321 ctx_fname_out, "ctx_out");
1322
1323 if (json_output) {
1324 jsonw_uint_field(json_wtr, "retval", test_attr.retval);
1325 jsonw_uint_field(json_wtr, "duration", test_attr.duration);
1326 jsonw_end_object(json_wtr); /* root */
1327 } else {
1328 fprintf(stdout, "Return value: %u, duration%s: %uns\n",
1329 test_attr.retval,
1330 repeat > 1 ? " (average)" : "", test_attr.duration);
1331 }
1332
1333 free_ctx_out:
1334 free(ctx_out);
1335 free_ctx_in:
1336 free(ctx_in);
1337 free_data_out:
1338 free(data_out);
1339 free_data_in:
1340 free(data_in);
1341
1342 return err;
1343 }
1344
1345 static int
get_prog_type_by_name(const char * name,enum bpf_prog_type * prog_type,enum bpf_attach_type * expected_attach_type)1346 get_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
1347 enum bpf_attach_type *expected_attach_type)
1348 {
1349 libbpf_print_fn_t print_backup;
1350 int ret;
1351
1352 ret = libbpf_prog_type_by_name(name, prog_type, expected_attach_type);
1353 if (!ret)
1354 return ret;
1355
1356 /* libbpf_prog_type_by_name() failed, let's re-run with debug level */
1357 print_backup = libbpf_set_print(print_all_levels);
1358 ret = libbpf_prog_type_by_name(name, prog_type, expected_attach_type);
1359 libbpf_set_print(print_backup);
1360
1361 return ret;
1362 }
1363
load_with_options(int argc,char ** argv,bool first_prog_only)1364 static int load_with_options(int argc, char **argv, bool first_prog_only)
1365 {
1366 enum bpf_prog_type common_prog_type = BPF_PROG_TYPE_UNSPEC;
1367 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts,
1368 .relaxed_maps = relaxed_maps,
1369 );
1370 struct bpf_object_load_attr load_attr = { 0 };
1371 enum bpf_attach_type expected_attach_type;
1372 struct map_replace *map_replace = NULL;
1373 struct bpf_program *prog = NULL, *pos;
1374 unsigned int old_map_fds = 0;
1375 const char *pinmaps = NULL;
1376 struct bpf_object *obj;
1377 struct bpf_map *map;
1378 const char *pinfile;
1379 unsigned int i, j;
1380 __u32 ifindex = 0;
1381 const char *file;
1382 int idx, err;
1383
1384
1385 if (!REQ_ARGS(2))
1386 return -1;
1387 file = GET_ARG();
1388 pinfile = GET_ARG();
1389
1390 while (argc) {
1391 if (is_prefix(*argv, "type")) {
1392 char *type;
1393
1394 NEXT_ARG();
1395
1396 if (common_prog_type != BPF_PROG_TYPE_UNSPEC) {
1397 p_err("program type already specified");
1398 goto err_free_reuse_maps;
1399 }
1400 if (!REQ_ARGS(1))
1401 goto err_free_reuse_maps;
1402
1403 /* Put a '/' at the end of type to appease libbpf */
1404 type = malloc(strlen(*argv) + 2);
1405 if (!type) {
1406 p_err("mem alloc failed");
1407 goto err_free_reuse_maps;
1408 }
1409 *type = 0;
1410 strcat(type, *argv);
1411 strcat(type, "/");
1412
1413 err = get_prog_type_by_name(type, &common_prog_type,
1414 &expected_attach_type);
1415 free(type);
1416 if (err < 0)
1417 goto err_free_reuse_maps;
1418
1419 NEXT_ARG();
1420 } else if (is_prefix(*argv, "map")) {
1421 void *new_map_replace;
1422 char *endptr, *name;
1423 int fd;
1424
1425 NEXT_ARG();
1426
1427 if (!REQ_ARGS(4))
1428 goto err_free_reuse_maps;
1429
1430 if (is_prefix(*argv, "idx")) {
1431 NEXT_ARG();
1432
1433 idx = strtoul(*argv, &endptr, 0);
1434 if (*endptr) {
1435 p_err("can't parse %s as IDX", *argv);
1436 goto err_free_reuse_maps;
1437 }
1438 name = NULL;
1439 } else if (is_prefix(*argv, "name")) {
1440 NEXT_ARG();
1441
1442 name = *argv;
1443 idx = -1;
1444 } else {
1445 p_err("expected 'idx' or 'name', got: '%s'?",
1446 *argv);
1447 goto err_free_reuse_maps;
1448 }
1449 NEXT_ARG();
1450
1451 fd = map_parse_fd(&argc, &argv);
1452 if (fd < 0)
1453 goto err_free_reuse_maps;
1454
1455 new_map_replace = reallocarray(map_replace,
1456 old_map_fds + 1,
1457 sizeof(*map_replace));
1458 if (!new_map_replace) {
1459 p_err("mem alloc failed");
1460 goto err_free_reuse_maps;
1461 }
1462 map_replace = new_map_replace;
1463
1464 map_replace[old_map_fds].idx = idx;
1465 map_replace[old_map_fds].name = name;
1466 map_replace[old_map_fds].fd = fd;
1467 old_map_fds++;
1468 } else if (is_prefix(*argv, "dev")) {
1469 NEXT_ARG();
1470
1471 if (ifindex) {
1472 p_err("offload device already specified");
1473 goto err_free_reuse_maps;
1474 }
1475 if (!REQ_ARGS(1))
1476 goto err_free_reuse_maps;
1477
1478 ifindex = if_nametoindex(*argv);
1479 if (!ifindex) {
1480 p_err("unrecognized netdevice '%s': %s",
1481 *argv, strerror(errno));
1482 goto err_free_reuse_maps;
1483 }
1484 NEXT_ARG();
1485 } else if (is_prefix(*argv, "pinmaps")) {
1486 NEXT_ARG();
1487
1488 if (!REQ_ARGS(1))
1489 goto err_free_reuse_maps;
1490
1491 pinmaps = GET_ARG();
1492 } else {
1493 p_err("expected no more arguments, 'type', 'map' or 'dev', got: '%s'?",
1494 *argv);
1495 goto err_free_reuse_maps;
1496 }
1497 }
1498
1499 set_max_rlimit();
1500
1501 obj = bpf_object__open_file(file, &open_opts);
1502 if (IS_ERR_OR_NULL(obj)) {
1503 p_err("failed to open object file");
1504 goto err_free_reuse_maps;
1505 }
1506
1507 bpf_object__for_each_program(pos, obj) {
1508 enum bpf_prog_type prog_type = common_prog_type;
1509
1510 if (prog_type == BPF_PROG_TYPE_UNSPEC) {
1511 const char *sec_name = bpf_program__section_name(pos);
1512
1513 err = get_prog_type_by_name(sec_name, &prog_type,
1514 &expected_attach_type);
1515 if (err < 0)
1516 goto err_close_obj;
1517 }
1518
1519 bpf_program__set_ifindex(pos, ifindex);
1520 bpf_program__set_type(pos, prog_type);
1521 bpf_program__set_expected_attach_type(pos, expected_attach_type);
1522 }
1523
1524 qsort(map_replace, old_map_fds, sizeof(*map_replace),
1525 map_replace_compar);
1526
1527 /* After the sort maps by name will be first on the list, because they
1528 * have idx == -1. Resolve them.
1529 */
1530 j = 0;
1531 while (j < old_map_fds && map_replace[j].name) {
1532 i = 0;
1533 bpf_object__for_each_map(map, obj) {
1534 if (!strcmp(bpf_map__name(map), map_replace[j].name)) {
1535 map_replace[j].idx = i;
1536 break;
1537 }
1538 i++;
1539 }
1540 if (map_replace[j].idx == -1) {
1541 p_err("unable to find map '%s'", map_replace[j].name);
1542 goto err_close_obj;
1543 }
1544 j++;
1545 }
1546 /* Resort if any names were resolved */
1547 if (j)
1548 qsort(map_replace, old_map_fds, sizeof(*map_replace),
1549 map_replace_compar);
1550
1551 /* Set ifindex and name reuse */
1552 j = 0;
1553 idx = 0;
1554 bpf_object__for_each_map(map, obj) {
1555 if (!bpf_map__is_offload_neutral(map))
1556 bpf_map__set_ifindex(map, ifindex);
1557
1558 if (j < old_map_fds && idx == map_replace[j].idx) {
1559 err = bpf_map__reuse_fd(map, map_replace[j++].fd);
1560 if (err) {
1561 p_err("unable to set up map reuse: %d", err);
1562 goto err_close_obj;
1563 }
1564
1565 /* Next reuse wants to apply to the same map */
1566 if (j < old_map_fds && map_replace[j].idx == idx) {
1567 p_err("replacement for map idx %d specified more than once",
1568 idx);
1569 goto err_close_obj;
1570 }
1571 }
1572
1573 idx++;
1574 }
1575 if (j < old_map_fds) {
1576 p_err("map idx '%d' not used", map_replace[j].idx);
1577 goto err_close_obj;
1578 }
1579
1580 load_attr.obj = obj;
1581 if (verifier_logs)
1582 /* log_level1 + log_level2 + stats, but not stable UAPI */
1583 load_attr.log_level = 1 + 2 + 4;
1584
1585 err = bpf_object__load_xattr(&load_attr);
1586 if (err) {
1587 p_err("failed to load object file");
1588 goto err_close_obj;
1589 }
1590
1591 err = mount_bpffs_for_pin(pinfile);
1592 if (err)
1593 goto err_close_obj;
1594
1595 if (first_prog_only) {
1596 prog = bpf_program__next(NULL, obj);
1597 if (!prog) {
1598 p_err("object file doesn't contain any bpf program");
1599 goto err_close_obj;
1600 }
1601
1602 err = bpf_obj_pin(bpf_program__fd(prog), pinfile);
1603 if (err) {
1604 p_err("failed to pin program %s",
1605 bpf_program__section_name(prog));
1606 goto err_close_obj;
1607 }
1608 } else {
1609 err = bpf_object__pin_programs(obj, pinfile);
1610 if (err) {
1611 p_err("failed to pin all programs");
1612 goto err_close_obj;
1613 }
1614 }
1615
1616 if (pinmaps) {
1617 err = bpf_object__pin_maps(obj, pinmaps);
1618 if (err) {
1619 p_err("failed to pin all maps");
1620 goto err_unpin;
1621 }
1622 }
1623
1624 if (json_output)
1625 jsonw_null(json_wtr);
1626
1627 bpf_object__close(obj);
1628 for (i = 0; i < old_map_fds; i++)
1629 close(map_replace[i].fd);
1630 free(map_replace);
1631
1632 return 0;
1633
1634 err_unpin:
1635 if (first_prog_only)
1636 unlink(pinfile);
1637 else
1638 bpf_object__unpin_programs(obj, pinfile);
1639 err_close_obj:
1640 bpf_object__close(obj);
1641 err_free_reuse_maps:
1642 for (i = 0; i < old_map_fds; i++)
1643 close(map_replace[i].fd);
1644 free(map_replace);
1645 return -1;
1646 }
1647
do_load(int argc,char ** argv)1648 static int do_load(int argc, char **argv)
1649 {
1650 return load_with_options(argc, argv, true);
1651 }
1652
do_loadall(int argc,char ** argv)1653 static int do_loadall(int argc, char **argv)
1654 {
1655 return load_with_options(argc, argv, false);
1656 }
1657
1658 #ifdef BPFTOOL_WITHOUT_SKELETONS
1659
do_profile(int argc,char ** argv)1660 static int do_profile(int argc, char **argv)
1661 {
1662 p_err("bpftool prog profile command is not supported. Please build bpftool with clang >= 10.0.0");
1663 return 0;
1664 }
1665
1666 #else /* BPFTOOL_WITHOUT_SKELETONS */
1667
1668 #include "profiler.skel.h"
1669
1670 struct profile_metric {
1671 const char *name;
1672 struct bpf_perf_event_value val;
1673 struct perf_event_attr attr;
1674 bool selected;
1675
1676 /* calculate ratios like instructions per cycle */
1677 const int ratio_metric; /* 0 for N/A, 1 for index 0 (cycles) */
1678 const char *ratio_desc;
1679 const float ratio_mul;
1680 } metrics[] = {
1681 {
1682 .name = "cycles",
1683 .attr = {
1684 .type = PERF_TYPE_HARDWARE,
1685 .config = PERF_COUNT_HW_CPU_CYCLES,
1686 .exclude_user = 1,
1687 },
1688 },
1689 {
1690 .name = "instructions",
1691 .attr = {
1692 .type = PERF_TYPE_HARDWARE,
1693 .config = PERF_COUNT_HW_INSTRUCTIONS,
1694 .exclude_user = 1,
1695 },
1696 .ratio_metric = 1,
1697 .ratio_desc = "insns per cycle",
1698 .ratio_mul = 1.0,
1699 },
1700 {
1701 .name = "l1d_loads",
1702 .attr = {
1703 .type = PERF_TYPE_HW_CACHE,
1704 .config =
1705 PERF_COUNT_HW_CACHE_L1D |
1706 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1707 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
1708 .exclude_user = 1,
1709 },
1710 },
1711 {
1712 .name = "llc_misses",
1713 .attr = {
1714 .type = PERF_TYPE_HW_CACHE,
1715 .config =
1716 PERF_COUNT_HW_CACHE_LL |
1717 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1718 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
1719 .exclude_user = 1
1720 },
1721 .ratio_metric = 2,
1722 .ratio_desc = "LLC misses per million insns",
1723 .ratio_mul = 1e6,
1724 },
1725 {
1726 .name = "itlb_misses",
1727 .attr = {
1728 .type = PERF_TYPE_HW_CACHE,
1729 .config =
1730 PERF_COUNT_HW_CACHE_ITLB |
1731 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1732 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
1733 .exclude_user = 1
1734 },
1735 .ratio_metric = 2,
1736 .ratio_desc = "itlb misses per million insns",
1737 .ratio_mul = 1e6,
1738 },
1739 {
1740 .name = "dtlb_misses",
1741 .attr = {
1742 .type = PERF_TYPE_HW_CACHE,
1743 .config =
1744 PERF_COUNT_HW_CACHE_DTLB |
1745 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1746 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
1747 .exclude_user = 1
1748 },
1749 .ratio_metric = 2,
1750 .ratio_desc = "dtlb misses per million insns",
1751 .ratio_mul = 1e6,
1752 },
1753 };
1754
1755 static __u64 profile_total_count;
1756
1757 #define MAX_NUM_PROFILE_METRICS 4
1758
profile_parse_metrics(int argc,char ** argv)1759 static int profile_parse_metrics(int argc, char **argv)
1760 {
1761 unsigned int metric_cnt;
1762 int selected_cnt = 0;
1763 unsigned int i;
1764
1765 metric_cnt = sizeof(metrics) / sizeof(struct profile_metric);
1766
1767 while (argc > 0) {
1768 for (i = 0; i < metric_cnt; i++) {
1769 if (is_prefix(argv[0], metrics[i].name)) {
1770 if (!metrics[i].selected)
1771 selected_cnt++;
1772 metrics[i].selected = true;
1773 break;
1774 }
1775 }
1776 if (i == metric_cnt) {
1777 p_err("unknown metric %s", argv[0]);
1778 return -1;
1779 }
1780 NEXT_ARG();
1781 }
1782 if (selected_cnt > MAX_NUM_PROFILE_METRICS) {
1783 p_err("too many (%d) metrics, please specify no more than %d metrics at at time",
1784 selected_cnt, MAX_NUM_PROFILE_METRICS);
1785 return -1;
1786 }
1787 return selected_cnt;
1788 }
1789
profile_read_values(struct profiler_bpf * obj)1790 static void profile_read_values(struct profiler_bpf *obj)
1791 {
1792 __u32 m, cpu, num_cpu = obj->rodata->num_cpu;
1793 int reading_map_fd, count_map_fd;
1794 __u64 counts[num_cpu];
1795 __u32 key = 0;
1796 int err;
1797
1798 reading_map_fd = bpf_map__fd(obj->maps.accum_readings);
1799 count_map_fd = bpf_map__fd(obj->maps.counts);
1800 if (reading_map_fd < 0 || count_map_fd < 0) {
1801 p_err("failed to get fd for map");
1802 return;
1803 }
1804
1805 err = bpf_map_lookup_elem(count_map_fd, &key, counts);
1806 if (err) {
1807 p_err("failed to read count_map: %s", strerror(errno));
1808 return;
1809 }
1810
1811 profile_total_count = 0;
1812 for (cpu = 0; cpu < num_cpu; cpu++)
1813 profile_total_count += counts[cpu];
1814
1815 for (m = 0; m < ARRAY_SIZE(metrics); m++) {
1816 struct bpf_perf_event_value values[num_cpu];
1817
1818 if (!metrics[m].selected)
1819 continue;
1820
1821 err = bpf_map_lookup_elem(reading_map_fd, &key, values);
1822 if (err) {
1823 p_err("failed to read reading_map: %s",
1824 strerror(errno));
1825 return;
1826 }
1827 for (cpu = 0; cpu < num_cpu; cpu++) {
1828 metrics[m].val.counter += values[cpu].counter;
1829 metrics[m].val.enabled += values[cpu].enabled;
1830 metrics[m].val.running += values[cpu].running;
1831 }
1832 key++;
1833 }
1834 }
1835
profile_print_readings_json(void)1836 static void profile_print_readings_json(void)
1837 {
1838 __u32 m;
1839
1840 jsonw_start_array(json_wtr);
1841 for (m = 0; m < ARRAY_SIZE(metrics); m++) {
1842 if (!metrics[m].selected)
1843 continue;
1844 jsonw_start_object(json_wtr);
1845 jsonw_string_field(json_wtr, "metric", metrics[m].name);
1846 jsonw_lluint_field(json_wtr, "run_cnt", profile_total_count);
1847 jsonw_lluint_field(json_wtr, "value", metrics[m].val.counter);
1848 jsonw_lluint_field(json_wtr, "enabled", metrics[m].val.enabled);
1849 jsonw_lluint_field(json_wtr, "running", metrics[m].val.running);
1850
1851 jsonw_end_object(json_wtr);
1852 }
1853 jsonw_end_array(json_wtr);
1854 }
1855
profile_print_readings_plain(void)1856 static void profile_print_readings_plain(void)
1857 {
1858 __u32 m;
1859
1860 printf("\n%18llu %-20s\n", profile_total_count, "run_cnt");
1861 for (m = 0; m < ARRAY_SIZE(metrics); m++) {
1862 struct bpf_perf_event_value *val = &metrics[m].val;
1863 int r;
1864
1865 if (!metrics[m].selected)
1866 continue;
1867 printf("%18llu %-20s", val->counter, metrics[m].name);
1868
1869 r = metrics[m].ratio_metric - 1;
1870 if (r >= 0 && metrics[r].selected &&
1871 metrics[r].val.counter > 0) {
1872 printf("# %8.2f %-30s",
1873 val->counter * metrics[m].ratio_mul /
1874 metrics[r].val.counter,
1875 metrics[m].ratio_desc);
1876 } else {
1877 printf("%-41s", "");
1878 }
1879
1880 if (val->enabled > val->running)
1881 printf("(%4.2f%%)",
1882 val->running * 100.0 / val->enabled);
1883 printf("\n");
1884 }
1885 }
1886
profile_print_readings(void)1887 static void profile_print_readings(void)
1888 {
1889 if (json_output)
1890 profile_print_readings_json();
1891 else
1892 profile_print_readings_plain();
1893 }
1894
profile_target_name(int tgt_fd)1895 static char *profile_target_name(int tgt_fd)
1896 {
1897 struct bpf_prog_info_linear *info_linear;
1898 struct bpf_func_info *func_info;
1899 const struct btf_type *t;
1900 char *name = NULL;
1901 struct btf *btf;
1902
1903 info_linear = bpf_program__get_prog_info_linear(
1904 tgt_fd, 1UL << BPF_PROG_INFO_FUNC_INFO);
1905 if (IS_ERR_OR_NULL(info_linear)) {
1906 p_err("failed to get info_linear for prog FD %d", tgt_fd);
1907 return NULL;
1908 }
1909
1910 if (info_linear->info.btf_id == 0 ||
1911 btf__get_from_id(info_linear->info.btf_id, &btf)) {
1912 p_err("prog FD %d doesn't have valid btf", tgt_fd);
1913 goto out;
1914 }
1915
1916 func_info = u64_to_ptr(info_linear->info.func_info);
1917 t = btf__type_by_id(btf, func_info[0].type_id);
1918 if (!t) {
1919 p_err("btf %d doesn't have type %d",
1920 info_linear->info.btf_id, func_info[0].type_id);
1921 goto out;
1922 }
1923 name = strdup(btf__name_by_offset(btf, t->name_off));
1924 out:
1925 free(info_linear);
1926 return name;
1927 }
1928
1929 static struct profiler_bpf *profile_obj;
1930 static int profile_tgt_fd = -1;
1931 static char *profile_tgt_name;
1932 static int *profile_perf_events;
1933 static int profile_perf_event_cnt;
1934
profile_close_perf_events(struct profiler_bpf * obj)1935 static void profile_close_perf_events(struct profiler_bpf *obj)
1936 {
1937 int i;
1938
1939 for (i = profile_perf_event_cnt - 1; i >= 0; i--)
1940 close(profile_perf_events[i]);
1941
1942 free(profile_perf_events);
1943 profile_perf_event_cnt = 0;
1944 }
1945
profile_open_perf_events(struct profiler_bpf * obj)1946 static int profile_open_perf_events(struct profiler_bpf *obj)
1947 {
1948 unsigned int cpu, m;
1949 int map_fd, pmu_fd;
1950
1951 profile_perf_events = calloc(
1952 sizeof(int), obj->rodata->num_cpu * obj->rodata->num_metric);
1953 if (!profile_perf_events) {
1954 p_err("failed to allocate memory for perf_event array: %s",
1955 strerror(errno));
1956 return -1;
1957 }
1958 map_fd = bpf_map__fd(obj->maps.events);
1959 if (map_fd < 0) {
1960 p_err("failed to get fd for events map");
1961 return -1;
1962 }
1963
1964 for (m = 0; m < ARRAY_SIZE(metrics); m++) {
1965 if (!metrics[m].selected)
1966 continue;
1967 for (cpu = 0; cpu < obj->rodata->num_cpu; cpu++) {
1968 pmu_fd = syscall(__NR_perf_event_open, &metrics[m].attr,
1969 -1/*pid*/, cpu, -1/*group_fd*/, 0);
1970 if (pmu_fd < 0 ||
1971 bpf_map_update_elem(map_fd, &profile_perf_event_cnt,
1972 &pmu_fd, BPF_ANY) ||
1973 ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0)) {
1974 p_err("failed to create event %s on cpu %d",
1975 metrics[m].name, cpu);
1976 return -1;
1977 }
1978 profile_perf_events[profile_perf_event_cnt++] = pmu_fd;
1979 }
1980 }
1981 return 0;
1982 }
1983
profile_print_and_cleanup(void)1984 static void profile_print_and_cleanup(void)
1985 {
1986 profile_close_perf_events(profile_obj);
1987 profile_read_values(profile_obj);
1988 profile_print_readings();
1989 profiler_bpf__destroy(profile_obj);
1990
1991 close(profile_tgt_fd);
1992 free(profile_tgt_name);
1993 }
1994
int_exit(int signo)1995 static void int_exit(int signo)
1996 {
1997 profile_print_and_cleanup();
1998 exit(0);
1999 }
2000
do_profile(int argc,char ** argv)2001 static int do_profile(int argc, char **argv)
2002 {
2003 int num_metric, num_cpu, err = -1;
2004 struct bpf_program *prog;
2005 unsigned long duration;
2006 char *endptr;
2007
2008 /* we at least need two args for the prog and one metric */
2009 if (!REQ_ARGS(3))
2010 return -EINVAL;
2011
2012 /* parse target fd */
2013 profile_tgt_fd = prog_parse_fd(&argc, &argv);
2014 if (profile_tgt_fd < 0) {
2015 p_err("failed to parse fd");
2016 return -1;
2017 }
2018
2019 /* parse profiling optional duration */
2020 if (argc > 2 && is_prefix(argv[0], "duration")) {
2021 NEXT_ARG();
2022 duration = strtoul(*argv, &endptr, 0);
2023 if (*endptr)
2024 usage();
2025 NEXT_ARG();
2026 } else {
2027 duration = UINT_MAX;
2028 }
2029
2030 num_metric = profile_parse_metrics(argc, argv);
2031 if (num_metric <= 0)
2032 goto out;
2033
2034 num_cpu = libbpf_num_possible_cpus();
2035 if (num_cpu <= 0) {
2036 p_err("failed to identify number of CPUs");
2037 goto out;
2038 }
2039
2040 profile_obj = profiler_bpf__open();
2041 if (!profile_obj) {
2042 p_err("failed to open and/or load BPF object");
2043 goto out;
2044 }
2045
2046 profile_obj->rodata->num_cpu = num_cpu;
2047 profile_obj->rodata->num_metric = num_metric;
2048
2049 /* adjust map sizes */
2050 bpf_map__resize(profile_obj->maps.events, num_metric * num_cpu);
2051 bpf_map__resize(profile_obj->maps.fentry_readings, num_metric);
2052 bpf_map__resize(profile_obj->maps.accum_readings, num_metric);
2053 bpf_map__resize(profile_obj->maps.counts, 1);
2054
2055 /* change target name */
2056 profile_tgt_name = profile_target_name(profile_tgt_fd);
2057 if (!profile_tgt_name)
2058 goto out;
2059
2060 bpf_object__for_each_program(prog, profile_obj->obj) {
2061 err = bpf_program__set_attach_target(prog, profile_tgt_fd,
2062 profile_tgt_name);
2063 if (err) {
2064 p_err("failed to set attach target\n");
2065 goto out;
2066 }
2067 }
2068
2069 set_max_rlimit();
2070 err = profiler_bpf__load(profile_obj);
2071 if (err) {
2072 p_err("failed to load profile_obj");
2073 goto out;
2074 }
2075
2076 err = profile_open_perf_events(profile_obj);
2077 if (err)
2078 goto out;
2079
2080 err = profiler_bpf__attach(profile_obj);
2081 if (err) {
2082 p_err("failed to attach profile_obj");
2083 goto out;
2084 }
2085 signal(SIGINT, int_exit);
2086
2087 sleep(duration);
2088 profile_print_and_cleanup();
2089 return 0;
2090
2091 out:
2092 profile_close_perf_events(profile_obj);
2093 if (profile_obj)
2094 profiler_bpf__destroy(profile_obj);
2095 close(profile_tgt_fd);
2096 free(profile_tgt_name);
2097 return err;
2098 }
2099
2100 #endif /* BPFTOOL_WITHOUT_SKELETONS */
2101
do_help(int argc,char ** argv)2102 static int do_help(int argc, char **argv)
2103 {
2104 if (json_output) {
2105 jsonw_null(json_wtr);
2106 return 0;
2107 }
2108
2109 fprintf(stderr,
2110 "Usage: %1$s %2$s { show | list } [PROG]\n"
2111 " %1$s %2$s dump xlated PROG [{ file FILE | opcodes | visual | linum }]\n"
2112 " %1$s %2$s dump jited PROG [{ file FILE | opcodes | linum }]\n"
2113 " %1$s %2$s pin PROG FILE\n"
2114 " %1$s %2$s { load | loadall } OBJ PATH \\\n"
2115 " [type TYPE] [dev NAME] \\\n"
2116 " [map { idx IDX | name NAME } MAP]\\\n"
2117 " [pinmaps MAP_DIR]\n"
2118 " %1$s %2$s attach PROG ATTACH_TYPE [MAP]\n"
2119 " %1$s %2$s detach PROG ATTACH_TYPE [MAP]\n"
2120 " %1$s %2$s run PROG \\\n"
2121 " data_in FILE \\\n"
2122 " [data_out FILE [data_size_out L]] \\\n"
2123 " [ctx_in FILE [ctx_out FILE [ctx_size_out M]]] \\\n"
2124 " [repeat N]\n"
2125 " %1$s %2$s profile PROG [duration DURATION] METRICs\n"
2126 " %1$s %2$s tracelog\n"
2127 " %1$s %2$s help\n"
2128 "\n"
2129 " " HELP_SPEC_MAP "\n"
2130 " " HELP_SPEC_PROGRAM "\n"
2131 " TYPE := { socket | kprobe | kretprobe | classifier | action |\n"
2132 " tracepoint | raw_tracepoint | xdp | perf_event | cgroup/skb |\n"
2133 " cgroup/sock | cgroup/dev | lwt_in | lwt_out | lwt_xmit |\n"
2134 " lwt_seg6local | sockops | sk_skb | sk_msg | lirc_mode2 |\n"
2135 " sk_reuseport | flow_dissector | cgroup/sysctl |\n"
2136 " cgroup/bind4 | cgroup/bind6 | cgroup/post_bind4 |\n"
2137 " cgroup/post_bind6 | cgroup/connect4 | cgroup/connect6 |\n"
2138 " cgroup/getpeername4 | cgroup/getpeername6 |\n"
2139 " cgroup/getsockname4 | cgroup/getsockname6 | cgroup/sendmsg4 |\n"
2140 " cgroup/sendmsg6 | cgroup/recvmsg4 | cgroup/recvmsg6 |\n"
2141 " cgroup/getsockopt | cgroup/setsockopt |\n"
2142 " struct_ops | fentry | fexit | freplace | sk_lookup }\n"
2143 " ATTACH_TYPE := { msg_verdict | stream_verdict | stream_parser |\n"
2144 " flow_dissector }\n"
2145 " METRIC := { cycles | instructions | l1d_loads | llc_misses | itlb_misses | dtlb_misses }\n"
2146 " " HELP_SPEC_OPTIONS "\n"
2147 "",
2148 bin_name, argv[-2]);
2149
2150 return 0;
2151 }
2152
2153 static const struct cmd cmds[] = {
2154 { "show", do_show },
2155 { "list", do_show },
2156 { "help", do_help },
2157 { "dump", do_dump },
2158 { "pin", do_pin },
2159 { "load", do_load },
2160 { "loadall", do_loadall },
2161 { "attach", do_attach },
2162 { "detach", do_detach },
2163 { "tracelog", do_tracelog },
2164 { "run", do_run },
2165 { "profile", do_profile },
2166 { 0 }
2167 };
2168
do_prog(int argc,char ** argv)2169 int do_prog(int argc, char **argv)
2170 {
2171 return cmd_select(cmds, argc, argv, do_help);
2172 }
2173