1 // SPDX-License-Identifier: GPL-2.0
2 #define _GNU_SOURCE
3 #include <pthread.h>
4 #include <sched.h>
5 #include <sys/socket.h>
6 #include <test_progs.h>
7 #include "bpf/libbpf_internal.h"
8 
9 static void on_sample(void *ctx, int cpu, void *data, __u32 size)
10 {
11 	int cpu_data = *(int *)data, duration = 0;
12 	cpu_set_t *cpu_seen = ctx;
13 
14 	if (cpu_data != cpu)
15 		CHECK(cpu_data != cpu, "check_cpu_data",
16 		      "cpu_data %d != cpu %d\n", cpu_data, cpu);
17 
18 	CPU_SET(cpu, cpu_seen);
19 }
20 
21 void test_perf_buffer(void)
22 {
23 	int err, prog_fd, on_len, nr_on_cpus = 0,  nr_cpus, i, duration = 0;
24 	const char *prog_name = "kprobe/sys_nanosleep";
25 	const char *file = "./test_perf_buffer.o";
26 	struct perf_buffer_opts pb_opts = {};
27 	struct bpf_map *perf_buf_map;
28 	cpu_set_t cpu_set, cpu_seen;
29 	struct bpf_program *prog;
30 	struct bpf_object *obj;
31 	struct perf_buffer *pb;
32 	struct bpf_link *link;
33 	bool *online;
34 
35 	nr_cpus = libbpf_num_possible_cpus();
36 	if (CHECK(nr_cpus < 0, "nr_cpus", "err %d\n", nr_cpus))
37 		return;
38 
39 	err = parse_cpu_mask_file("/sys/devices/system/cpu/online",
40 				  &online, &on_len);
41 	if (CHECK(err, "nr_on_cpus", "err %d\n", err))
42 		return;
43 
44 	for (i = 0; i < on_len; i++)
45 		if (online[i])
46 			nr_on_cpus++;
47 
48 	/* load program */
49 	err = bpf_prog_load(file, BPF_PROG_TYPE_KPROBE, &obj, &prog_fd);
50 	if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno)) {
51 		obj = NULL;
52 		goto out_close;
53 	}
54 
55 	prog = bpf_object__find_program_by_title(obj, prog_name);
56 	if (CHECK(!prog, "find_probe", "prog '%s' not found\n", prog_name))
57 		goto out_close;
58 
59 	/* load map */
60 	perf_buf_map = bpf_object__find_map_by_name(obj, "perf_buf_map");
61 	if (CHECK(!perf_buf_map, "find_perf_buf_map", "not found\n"))
62 		goto out_close;
63 
64 	/* attach kprobe */
65 	link = bpf_program__attach_kprobe(prog, false /* retprobe */,
66 					  SYS_NANOSLEEP_KPROBE_NAME);
67 	if (CHECK(IS_ERR(link), "attach_kprobe", "err %ld\n", PTR_ERR(link)))
68 		goto out_close;
69 
70 	/* set up perf buffer */
71 	pb_opts.sample_cb = on_sample;
72 	pb_opts.ctx = &cpu_seen;
73 	pb = perf_buffer__new(bpf_map__fd(perf_buf_map), 1, &pb_opts);
74 	if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb)))
75 		goto out_detach;
76 
77 	/* trigger kprobe on every CPU */
78 	CPU_ZERO(&cpu_seen);
79 	for (i = 0; i < nr_cpus; i++) {
80 		if (i >= on_len || !online[i]) {
81 			printf("skipping offline CPU #%d\n", i);
82 			continue;
83 		}
84 
85 		CPU_ZERO(&cpu_set);
86 		CPU_SET(i, &cpu_set);
87 
88 		err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set),
89 					     &cpu_set);
90 		if (err && CHECK(err, "set_affinity", "cpu #%d, err %d\n",
91 				 i, err))
92 			goto out_detach;
93 
94 		usleep(1);
95 	}
96 
97 	/* read perf buffer */
98 	err = perf_buffer__poll(pb, 100);
99 	if (CHECK(err < 0, "perf_buffer__poll", "err %d\n", err))
100 		goto out_free_pb;
101 
102 	if (CHECK(CPU_COUNT(&cpu_seen) != nr_on_cpus, "seen_cpu_cnt",
103 		  "expect %d, seen %d\n", nr_on_cpus, CPU_COUNT(&cpu_seen)))
104 		goto out_free_pb;
105 
106 out_free_pb:
107 	perf_buffer__free(pb);
108 out_detach:
109 	bpf_link__destroy(link);
110 out_close:
111 	bpf_object__close(obj);
112 	free(online);
113 }
114