1 // SPDX-License-Identifier: GPL-2.0 2 #include <test_progs.h> 3 4 static __u64 read_perf_max_sample_freq(void) 5 { 6 __u64 sample_freq = 5000; /* fallback to 5000 on error */ 7 FILE *f; 8 9 f = fopen("/proc/sys/kernel/perf_event_max_sample_rate", "r"); 10 if (f == NULL) 11 return sample_freq; 12 fscanf(f, "%llu", &sample_freq); 13 fclose(f); 14 return sample_freq; 15 } 16 17 void test_stacktrace_build_id_nmi(void) 18 { 19 int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd; 20 const char *file = "./test_stacktrace_build_id.o"; 21 int err, pmu_fd, prog_fd; 22 struct perf_event_attr attr = { 23 .freq = 1, 24 .type = PERF_TYPE_HARDWARE, 25 .config = PERF_COUNT_HW_CPU_CYCLES, 26 }; 27 __u32 key, previous_key, val, duration = 0; 28 struct bpf_object *obj; 29 char buf[256]; 30 int i, j; 31 struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; 32 int build_id_matches = 0; 33 int retry = 1; 34 35 attr.sample_freq = read_perf_max_sample_freq(); 36 37 retry: 38 err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd); 39 if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) 40 return; 41 42 pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 43 0 /* cpu 0 */, -1 /* group id */, 44 0 /* flags */); 45 if (CHECK(pmu_fd < 0, "perf_event_open", 46 "err %d errno %d. Does the test host support PERF_COUNT_HW_CPU_CYCLES?\n", 47 pmu_fd, errno)) 48 goto close_prog; 49 50 err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0); 51 if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", 52 err, errno)) 53 goto close_pmu; 54 55 err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd); 56 if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", 57 err, errno)) 58 goto disable_pmu; 59 60 /* find map fds */ 61 control_map_fd = bpf_find_map(__func__, obj, "control_map"); 62 if (CHECK(control_map_fd < 0, "bpf_find_map control_map", 63 "err %d errno %d\n", err, errno)) 64 goto disable_pmu; 65 66 stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap"); 67 if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap", 68 "err %d errno %d\n", err, errno)) 69 goto disable_pmu; 70 71 stackmap_fd = bpf_find_map(__func__, obj, "stackmap"); 72 if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n", 73 err, errno)) 74 goto disable_pmu; 75 76 stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap"); 77 if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap", 78 "err %d errno %d\n", err, errno)) 79 goto disable_pmu; 80 81 assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null") 82 == 0); 83 assert(system("taskset 0x1 ./urandom_read 100000") == 0); 84 /* disable stack trace collection */ 85 key = 0; 86 val = 1; 87 bpf_map_update_elem(control_map_fd, &key, &val, 0); 88 89 /* for every element in stackid_hmap, we can find a corresponding one 90 * in stackmap, and vise versa. 91 */ 92 err = compare_map_keys(stackid_hmap_fd, stackmap_fd); 93 if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap", 94 "err %d errno %d\n", err, errno)) 95 goto disable_pmu; 96 97 err = compare_map_keys(stackmap_fd, stackid_hmap_fd); 98 if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap", 99 "err %d errno %d\n", err, errno)) 100 goto disable_pmu; 101 102 err = extract_build_id(buf, 256); 103 104 if (CHECK(err, "get build_id with readelf", 105 "err %d errno %d\n", err, errno)) 106 goto disable_pmu; 107 108 err = bpf_map_get_next_key(stackmap_fd, NULL, &key); 109 if (CHECK(err, "get_next_key from stackmap", 110 "err %d, errno %d\n", err, errno)) 111 goto disable_pmu; 112 113 do { 114 char build_id[64]; 115 116 err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs); 117 if (CHECK(err, "lookup_elem from stackmap", 118 "err %d, errno %d\n", err, errno)) 119 goto disable_pmu; 120 for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i) 121 if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID && 122 id_offs[i].offset != 0) { 123 for (j = 0; j < 20; ++j) 124 sprintf(build_id + 2 * j, "%02x", 125 id_offs[i].build_id[j] & 0xff); 126 if (strstr(buf, build_id) != NULL) 127 build_id_matches = 1; 128 } 129 previous_key = key; 130 } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); 131 132 /* stack_map_get_build_id_offset() is racy and sometimes can return 133 * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID; 134 * try it one more time. 135 */ 136 if (build_id_matches < 1 && retry--) { 137 ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); 138 close(pmu_fd); 139 bpf_object__close(obj); 140 printf("%s:WARN:Didn't find expected build ID from the map, retrying\n", 141 __func__); 142 goto retry; 143 } 144 145 if (CHECK(build_id_matches < 1, "build id match", 146 "Didn't find expected build ID from the map\n")) 147 goto disable_pmu; 148 149 /* 150 * We intentionally skip compare_stack_ips(). This is because we 151 * only support one in_nmi() ips-to-build_id translation per cpu 152 * at any time, thus stack_amap here will always fallback to 153 * BPF_STACK_BUILD_ID_IP; 154 */ 155 156 disable_pmu: 157 ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); 158 159 close_pmu: 160 close(pmu_fd); 161 162 close_prog: 163 bpf_object__close(obj); 164 } 165