1 /*
2 american fuzzy lop++ - free CPU gizmo
3 -----------------------------------
4
5 Originally written by Michal Zalewski
6
7 Now maintained by Marc Heuse <mh@mh-sec.de>,
8 Heiko Eißfeldt <heiko.eissfeldt@hexco.de> and
9 Andrea Fioraldi <andreafioraldi@gmail.com>
10
11 Copyright 2016, 2017 Google Inc. All rights reserved.
12 Copyright 2019-2020 AFLplusplus Project. All rights reserved.
13
14 Licensed under the Apache License, Version 2.0 (the "License");
15 you may not use this file except in compliance with the License.
16 You may obtain a copy of the License at:
17
18 http://www.apache.org/licenses/LICENSE-2.0
19
20 This tool provides a fairly accurate measurement of CPU preemption rate.
21 It is meant to complement the quick-and-dirty load average widget shown
22 in the afl-fuzz UI. See docs/parallel_fuzzing.md for more info.
23
24 For some work loads, the tool may actually suggest running more instances
25 than you have CPU cores. This can happen if the tested program is spending
26 a portion of its run time waiting for I/O, rather than being 100%
27 CPU-bound.
28
29 The idea for the getrusage()-based approach comes from Jakub Wilk.
30
31 */
32
33 #define AFL_MAIN
34 #ifndef _GNU_SOURCE
35 #define _GNU_SOURCE
36 #endif
37
38 #include <stdio.h>
39 #include <stdlib.h>
40 #include <unistd.h>
41 #include <string.h>
42 #include <sched.h>
43
44 #include <sys/time.h>
45 #include <sys/times.h>
46 #include <sys/resource.h>
47 #include <sys/wait.h>
48
49 #include "types.h"
50 #include "debug.h"
51 #include "common.h"
52
53 #if defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__) || \
54 defined(__APPLE__) || defined(__DragonFly__) || defined(__sun)
55 #define HAVE_AFFINITY 1
56 #if defined(__FreeBSD__) || defined(__DragonFly__)
57 #include <pthread.h>
58 #include <pthread_np.h>
59 #if defined(__FreeBSD__)
60 #include <sys/cpuset.h>
61 #endif
62 #define cpu_set_t cpuset_t
63 #elif defined(__NetBSD__)
64 #include <pthread.h>
65 #elif defined(__APPLE__)
66 #include <pthread.h>
67 #include <mach/thread_act.h>
68 #include <mach/thread_policy.h>
69 #elif defined(__sun)
70 #include <sys/pset.h>
71 #endif
72 #endif /* __linux__ || __FreeBSD__ || __NetBSD__ || __APPLE__ */
73
74 /* Get CPU usage in microseconds. */
75
get_cpu_usage_us(void)76 static u64 get_cpu_usage_us(void) {
77
78 struct rusage u;
79
80 getrusage(RUSAGE_SELF, &u);
81
82 return (u.ru_utime.tv_sec * 1000000ULL) + u.ru_utime.tv_usec +
83 (u.ru_stime.tv_sec * 1000000ULL) + u.ru_stime.tv_usec;
84
85 }
86
87 /* Measure preemption rate. */
88
measure_preemption(u32 target_ms)89 static u32 measure_preemption(u32 target_ms) {
90
91 volatile u32 v1, v2 = 0;
92
93 u64 st_t, en_t, st_c, en_c, real_delta, slice_delta;
94 s32 loop_repeats = 0;
95
96 st_t = get_cur_time_us();
97 st_c = get_cpu_usage_us();
98
99 repeat_loop:
100
101 v1 = CTEST_BUSY_CYCLES;
102
103 while (v1--) {
104
105 v2++;
106
107 }
108
109 sched_yield();
110
111 en_t = get_cur_time_us();
112
113 if (en_t - st_t < target_ms * 1000) {
114
115 loop_repeats++;
116 goto repeat_loop;
117
118 }
119
120 /* Let's see what percentage of this time we actually had a chance to
121 run, and how much time was spent in the penalty box. */
122
123 en_c = get_cpu_usage_us();
124
125 real_delta = (en_t - st_t) / 1000;
126 slice_delta = (en_c - st_c) / 1000;
127
128 return real_delta * 100 / slice_delta;
129
130 }
131
132 /* Do the benchmark thing. */
133
main(int argc,char ** argv)134 int main(int argc, char **argv) {
135
136 if (argc > 1) {
137
138 printf("afl-gotcpu" VERSION " by Michal Zalewski\n");
139 printf("\n%s \n\n", argv[0]);
140 printf("afl-gotcpu does not have command line options\n");
141 printf("afl-gotcpu prints out which CPUs are available\n");
142 return -1;
143
144 }
145
146 #ifdef HAVE_AFFINITY
147
148 u32 cpu_cnt = sysconf(_SC_NPROCESSORS_ONLN), idle_cpus = 0, maybe_cpus = 0, i;
149
150 SAYF(cCYA "afl-gotcpu" VERSION cRST " by Michal Zalewski\n");
151
152 ACTF("Measuring per-core preemption rate (this will take %0.02f sec)...",
153 ((double)CTEST_CORE_TRG_MS) / 1000);
154
155 for (i = 0; i < cpu_cnt; i++) {
156
157 s32 fr = fork();
158
159 if (fr < 0) { PFATAL("fork failed"); }
160
161 if (!fr) {
162
163 u32 util_perc;
164 #if defined(__linux__) || defined(__FreeBSD__) || defined(__DragonFly__)
165 cpu_set_t c;
166
167 CPU_ZERO(&c);
168 CPU_SET(i, &c);
169 #elif defined(__NetBSD__)
170 cpuset_t *c;
171
172 c = cpuset_create();
173 if (c == NULL) PFATAL("cpuset_create failed");
174
175 cpuset_set(i, c);
176 #elif defined(__APPLE__)
177 thread_affinity_policy_data_t c = {i};
178 thread_port_t native_thread = pthread_mach_thread_np(pthread_self());
179 if (thread_policy_set(native_thread, THREAD_AFFINITY_POLICY,
180 (thread_policy_t)&c, 1) != KERN_SUCCESS)
181 PFATAL("thread_policy_set failed");
182 #elif defined(__sun)
183 psetid_t c;
184
185 if (pset_create(&c)) PFATAL("pset_create failed");
186
187 if (pset_assign(c, i, NULL)) PFATAL("pset_assign failed");
188 #endif
189
190 #if defined(__FreeBSD__) || defined(__DragonFly__)
191 if (pthread_setaffinity_np(pthread_self(), sizeof(c), &c))
192 PFATAL("pthread_setaffinity_np failed");
193 #endif
194
195 #if defined(__NetBSD__)
196 if (pthread_setaffinity_np(pthread_self(), cpuset_size(c), c))
197 PFATAL("pthread_setaffinity_np failed");
198
199 cpuset_destroy(c);
200 #endif
201
202 #if defined(__sun)
203 if (pset_bind(c, P_PID, getpid(), NULL)) PFATAL("pset_bind failed");
204
205 pset_destroy(c);
206 #endif
207
208 #if defined(__linux__)
209 if (sched_setaffinity(0, sizeof(c), &c)) {
210
211 PFATAL("sched_setaffinity failed for cpu %d", i);
212
213 }
214
215 #endif
216
217 util_perc = measure_preemption(CTEST_CORE_TRG_MS);
218
219 if (util_perc < 110) {
220
221 SAYF(" Core #%u: " cLGN "AVAILABLE" cRST "(%u%%)\n", i, util_perc);
222 exit(0);
223
224 } else if (util_perc < 250) {
225
226 SAYF(" Core #%u: " cYEL "CAUTION " cRST "(%u%%)\n", i, util_perc);
227 exit(1);
228
229 }
230
231 SAYF(" Core #%u: " cLRD "OVERBOOKED " cRST "(%u%%)\n" cRST, i,
232 util_perc);
233 exit(2);
234
235 }
236
237 }
238
239 for (i = 0; i < cpu_cnt; i++) {
240
241 int ret;
242 if (waitpid(-1, &ret, 0) < 0) { PFATAL("waitpid failed"); }
243
244 if (WEXITSTATUS(ret) == 0) { idle_cpus++; }
245 if (WEXITSTATUS(ret) <= 1) { maybe_cpus++; }
246
247 }
248
249 SAYF(cGRA "\n>>> ");
250
251 if (idle_cpus) {
252
253 if (maybe_cpus == idle_cpus) {
254
255 SAYF(cLGN "PASS: " cRST "You can run more processes on %u core%s.",
256 idle_cpus, idle_cpus > 1 ? "s" : "");
257
258 } else {
259
260 SAYF(cLGN "PASS: " cRST "You can run more processes on %u to %u core%s.",
261 idle_cpus, maybe_cpus, maybe_cpus > 1 ? "s" : "");
262
263 }
264
265 SAYF(cGRA " <<<" cRST "\n\n");
266 return 0;
267
268 }
269
270 if (maybe_cpus) {
271
272 SAYF(cYEL "CAUTION: " cRST "You may still have %u core%s available.",
273 maybe_cpus, maybe_cpus > 1 ? "s" : "");
274 SAYF(cGRA " <<<" cRST "\n\n");
275 return 1;
276
277 }
278
279 SAYF(cLRD "FAIL: " cRST "All cores are overbooked.");
280 SAYF(cGRA " <<<" cRST "\n\n");
281 return 2;
282
283 #else
284
285 u32 util_perc;
286
287 SAYF(cCYA "afl-gotcpu" VERSION cRST " by Michal Zalewski\n");
288
289 /* Run a busy loop for CTEST_TARGET_MS. */
290
291 ACTF("Measuring gross preemption rate (this will take %0.02f sec)...",
292 ((double)CTEST_TARGET_MS) / 1000);
293
294 util_perc = measure_preemption(CTEST_TARGET_MS);
295
296 /* Deliver the final verdict. */
297
298 SAYF(cGRA "\n>>> ");
299
300 if (util_perc < 105) {
301
302 SAYF(cLGN "PASS: " cRST "You can probably run additional processes.");
303
304 } else if (util_perc < 130) {
305
306 SAYF(cYEL "CAUTION: " cRST "Your CPU may be somewhat overbooked (%u%%).",
307 util_perc);
308
309 } else {
310
311 SAYF(cLRD "FAIL: " cRST "Your CPU is overbooked (%u%%).", util_perc);
312
313 }
314
315 SAYF(cGRA " <<<" cRST "\n\n");
316
317 return (util_perc > 105) + (util_perc > 130);
318
319 #endif /* ^HAVE_AFFINITY */
320
321 }
322
323