1 /*
2 * Copyright (C) 2013-2021 Canonical, Ltd.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 *
18 * This code is a complete clean re-write of the stress tool by
19 * Colin Ian King <colin.king@canonical.com> and attempts to be
20 * backwardly compatible with the stress tool by Amos Waterland
21 * <apw@rossby.metr.ou.edu> but has more stress tests and more
22 * functionality.
23 *
24 */
25 #include "stress-ng.h"
26
27 static const stress_help_t help[] = {
28 { NULL, "opcode N", "start N workers exercising random opcodes" },
29 { NULL, "opcode-ops N", "stop after N opcode bogo operations" },
30 { NULL, "opcode-method M", "set opcode stress method (M = random, inc, mixed, text)" },
31 { NULL, NULL, NULL }
32 };
33
34 #if defined(HAVE_LINUX_SECCOMP_H) && \
35 defined(HAVE_LINUX_AUDIT_H) && \
36 defined(HAVE_LINUX_FILTER_H) && \
37 defined(HAVE_MPROTECT) && \
38 defined(HAVE_SYS_PRCTL_H)
39
40 #define SYSCALL_NR (offsetof(struct seccomp_data, nr))
41
42 #define ALLOW_SYSCALL(syscall) \
43 BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, __NR_##syscall, 0, 1), \
44 BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ALLOW)
45
46 #define PAGES (16)
47 #define TRACK_SIGCOUNT (0)
48 #define EXIT_TRAPPED (255)
49
50 typedef void(*stress_opcode_func)(uint8_t *ops_begin, const uint8_t *ops_end, uint32_t *op);
51
52 typedef struct {
53 const char *name;
54 const stress_opcode_func func;
55 } stress_opcode_method_info_t;
56
57 static const int sigs[] = {
58 #if defined(SIGILL)
59 SIGILL,
60 #endif
61 #if defined(SIGTRAP)
62 SIGTRAP,
63 #endif
64 #if defined(SIGFPE)
65 SIGFPE,
66 #endif
67 #if defined(SIGBUS)
68 SIGBUS,
69 #endif
70 #if defined(SIGSEGV)
71 SIGSEGV,
72 #endif
73 #if defined(SIGIOT)
74 SIGIOT,
75 #endif
76 #if defined(SIGEMT)
77 SIGEMT,
78 #endif
79 #if defined(SIGALRM)
80 SIGALRM,
81 #endif
82 #if defined(SIGINT)
83 SIGINT,
84 #endif
85 #if defined(SIGHUP)
86 SIGHUP,
87 #endif
88 #if defined(SIGSYS)
89 SIGSYS
90 #endif
91 };
92
93 #if defined(HAVE_LINUX_SECCOMP_H) && \
94 defined(SECCOMP_SET_MODE_FILTER)
95 static struct sock_filter filter[] = {
96 BPF_STMT(BPF_LD+BPF_W+BPF_ABS, SYSCALL_NR),
97 #if defined(__NR_exit_group)
98 ALLOW_SYSCALL(exit_group),
99 #endif
100 BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_TRAP)
101 };
102
103 static struct sock_fprog prog = {
104 .len = (unsigned short)SIZEOF_ARRAY(filter),
105 .filter = filter
106 };
107
108 #endif
109
110 #if defined(NSIG)
111 #define MAX_SIGS (NSIG)
112 #elif defined(_NSIG)
113 #define MAX_SIGS (_NSIG)
114 #else
115 #define MAX_SIGS (256)
116 #endif
117
118 #if TRACK_SIGCOUNT
119 static uint64_t *sig_count;
120 #endif
121
stress_badhandler(int signum)122 static void MLOCKED_TEXT NORETURN stress_badhandler(int signum)
123 {
124 #if TRACK_SIGCOUNT
125 if (signum < MAX_SIGS)
126 sig_count[signum]++;
127 #else
128 (void)signum;
129 #endif
130 _exit(1);
131 }
132
reverse32(register uint64_t x)133 static inline uint32_t reverse32(register uint64_t x)
134 {
135 x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
136 x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
137 x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
138 x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
139 return (uint32_t)(x >> 16) | (uint32_t)(x << 16);
140 }
141
stress_opcode_random(uint8_t * ops_begin,const uint8_t * ops_end,uint32_t * op)142 static void stress_opcode_random(
143 uint8_t *ops_begin,
144 const uint8_t *ops_end,
145 uint32_t *op)
146 {
147 register uint8_t *ops = (uint8_t *)ops_begin;
148 (void)op;
149
150 while (ops < ops_end)
151 *(ops++) = stress_mwc8();
152 }
153
stress_opcode_inc(uint8_t * ops_begin,const uint8_t * ops_end,uint32_t * op)154 static void stress_opcode_inc(
155 uint8_t *ops_begin,
156 const uint8_t *ops_end,
157 uint32_t *op)
158 {
159 register uint32_t tmp = *op;
160 register uint32_t *ops = (uint32_t *)ops_begin;
161
162 while (ops < (const uint32_t *)ops_end)
163 *(ops++) = tmp++;
164
165 *op = tmp;
166 }
167
stress_opcode_mixed(uint8_t * ops_begin,const uint8_t * ops_end,uint32_t * op)168 static void stress_opcode_mixed(
169 uint8_t *ops_begin,
170 const uint8_t *ops_end,
171 uint32_t *op)
172 {
173 register uint32_t tmp = *op;
174 register uint32_t *ops = (uint32_t *)ops_begin;
175
176 while (ops < (const uint32_t *)ops_end) {
177 register uint32_t rnd = stress_mwc32();
178
179 *(ops++) = tmp;
180 *(ops++) = tmp ^ 0xffffffff; /* Inverted */
181 *(ops++) = ((tmp >> 1) ^ tmp); /* Gray */
182 *(ops++) = reverse32(tmp);
183
184 *(ops++) = rnd;
185 *(ops++) = rnd ^ 0xffffffff;
186 *(ops++) = ((rnd >> 1) ^ rnd);
187 *(ops++) = reverse32(rnd);
188 }
189 *op = tmp;
190 }
191
stress_opcode_text(uint8_t * ops_begin,const uint8_t * ops_end,uint32_t * op)192 static void stress_opcode_text(
193 uint8_t *ops_begin,
194 const uint8_t *ops_end,
195 uint32_t *op)
196 {
197 char *text_start, *text_end;
198 const size_t ops_len = (uintptr_t)ops_end - (uintptr_t)ops_begin;
199 const size_t text_len = stress_text_addr(&text_start, &text_end) - 8;
200 uint8_t *ops;
201 size_t offset;
202
203 if (text_len < ops_len) {
204 stress_opcode_random(ops_begin, ops_end, op);
205 return;
206 }
207
208 offset = stress_mwc64() % (text_len - ops_len);
209 offset &= ~(0x7ULL);
210
211 (void)memcpy(ops_begin, text_start + offset, ops_len);
212 for (ops = ops_begin; ops < ops_end; ops++) {
213 uint8_t rnd = stress_mwc8();
214
215 /* 1 in 8 chance of random bit corruption */
216 if (rnd < 32) {
217 uint8_t bit = (uint8_t)(1 << (rnd & 7));
218 *ops ^= bit;
219 }
220 }
221 }
222
223 static const stress_opcode_method_info_t stress_opcode_methods[] = {
224 { "random", stress_opcode_random },
225 { "text", stress_opcode_text },
226 { "inc", stress_opcode_inc },
227 { "mixed", stress_opcode_mixed },
228 { NULL, NULL }
229 };
230
231 /*
232 * stress_set_opcode_method()
233 * set default opcode stress method
234 */
stress_set_opcode_method(const char * name)235 static int stress_set_opcode_method(const char *name)
236 {
237 stress_opcode_method_info_t const *info;
238
239 for (info = stress_opcode_methods; info->func; info++) {
240 if (!strcmp(info->name, name)) {
241 stress_set_setting("opcode-method", TYPE_ID_UINTPTR_T, &info);
242 return 0;
243 }
244 }
245
246 (void)fprintf(stderr, "opcode-method must be one of:");
247 for (info = stress_opcode_methods; info->func; info++) {
248 (void)fprintf(stderr, " %s", info->name);
249 }
250 (void)fprintf(stderr, "\n");
251
252 return -1;
253 }
254
255 /*
256 * stress_opcode
257 * stress with random opcodes
258 */
stress_opcode(const stress_args_t * args)259 static int stress_opcode(const stress_args_t *args)
260 {
261 const size_t page_size = args->page_size;
262 int rc;
263 uint32_t op = 0;
264 size_t i;
265 const stress_opcode_method_info_t *opcode_method = &stress_opcode_methods[0];
266 #if TRACK_SIGCOUNT
267 const size_t sig_count_size = MAX_SIGS * sizeof(*sig_count);
268 #endif
269
270 #if TRACK_SIGCOUNT
271 sig_count = (uint64_t *)mmap(NULL, sig_count_size, PROT_READ | PROT_WRITE,
272 MAP_ANONYMOUS | MAP_SHARED, -1, 0);
273 if (sig_count == MAP_FAILED) {
274 pr_fail("%s: mmap failed, errno=%d (%s)\n",
275 args->name, errno, strerror(errno));
276 return EXIT_NO_RESOURCE;
277 }
278 #endif
279
280 (void)stress_get_setting("opcode-method", &opcode_method);
281
282 stress_set_proc_state(args->name, STRESS_STATE_RUN);
283
284 do {
285 pid_t pid;
286
287 /*
288 * Force a new random value so that child always
289 * gets a different random value on each fork
290 */
291 (void)stress_mwc32();
292 op += 1024;
293 again:
294 pid = fork();
295 if (pid < 0) {
296 if (stress_redo_fork(errno))
297 goto again;
298 if (!keep_stressing(args))
299 goto finish;
300 pr_fail("%s: fork failed, errno=%d (%s)\n",
301 args->name, errno, strerror(errno));
302 rc = EXIT_NO_RESOURCE;
303 goto err;
304 }
305 if (pid == 0) {
306 struct itimerval it;
307 uint8_t *opcodes, *ops_begin, *ops_end;
308
309 (void)sched_settings_apply(true);
310
311 /* We don't want bad ops clobbering this region */
312 stress_shared_unmap();
313
314 /* We don't want core dumps either */
315 stress_process_dumpable(false);
316
317 /* Drop all capabilities */
318 if (stress_drop_capabilities(args->name) < 0) {
319 _exit(EXIT_NO_RESOURCE);
320 }
321 for (i = 0; i < SIZEOF_ARRAY(sigs); i++) {
322 if (stress_sighandler(args->name, sigs[i], stress_badhandler, NULL) < 0)
323 _exit(EXIT_FAILURE);
324 }
325
326 opcodes = mmap(NULL, page_size * PAGES, PROT_READ | PROT_WRITE,
327 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
328 if (opcodes == MAP_FAILED) {
329 pr_fail("%s: mmap failed, errno=%d (%s)\n",
330 args->name, errno, strerror(errno));
331 _exit(EXIT_NO_RESOURCE);
332 }
333 /* Force pages resident */
334 (void)memset(opcodes, 0x00, page_size * PAGES);
335
336 ops_begin = opcodes + page_size;
337 ops_end = opcodes + (page_size * (PAGES - 1));
338
339 (void)mprotect(opcodes, page_size, PROT_NONE);
340 (void)mprotect(ops_end, page_size, PROT_NONE);
341 (void)mprotect(ops_begin, page_size, PROT_WRITE);
342
343 opcode_method->func(ops_begin, ops_end, &op);
344
345 (void)mprotect(ops_begin, page_size, PROT_READ | PROT_EXEC);
346 shim_flush_icache((char *)ops_begin, (char *)ops_end);
347 (void)setpgid(0, g_pgrp);
348 stress_parent_died_alarm();
349
350 /*
351 * Force abort if the opcodes magically
352 * do an infinite loop
353 */
354 it.it_interval.tv_sec = 0;
355 it.it_interval.tv_usec = 50000;
356 it.it_value.tv_sec = 0;
357 it.it_value.tv_usec = 50000;
358 if (setitimer(ITIMER_REAL, &it, NULL) < 0) {
359 pr_fail("%s: setitimer failed, errno=%d (%s)\n",
360 args->name, errno, strerror(errno));
361 _exit(EXIT_NO_RESOURCE);
362 }
363
364 /* Disable stack smashing messages */
365 stress_set_stack_smash_check_flag(false);
366
367 /*
368 * Flush and close stdio fds, we
369 * really don't care if the child dies
370 * in a bad way and libc or whatever
371 * reports of stack smashing or heap
372 * corruption since the child will
373 * die soon anyhow
374 */
375 (void)fflush(NULL);
376 (void)close(fileno(stdin));
377 (void)close(fileno(stdout));
378 (void)close(fileno(stderr));
379
380 for (i = 0; i < 1024; i++) {
381 #if defined(HAVE_LINUX_SECCOMP_H) && \
382 defined(SECCOMP_SET_MODE_FILTER)
383 /*
384 * Limit syscall using seccomp
385 */
386 (void)shim_seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog);
387 #endif
388 ((void (*)(void))(ops_begin + i))();
389 }
390
391 /*
392 * Originally we unmapped these, but this is
393 * another system call required that may go
394 * wrong because libc or the stack has been
395 * trashsed, so just skip it.
396 *
397 (void)munmap(opcodes, page_size * PAGES);
398 */
399 _exit(0);
400 }
401 if (pid > 0) {
402 int ret, status;
403
404 ret = shim_waitpid(pid, &status, 0);
405 if (ret < 0) {
406 if (errno != EINTR)
407 pr_dbg("%s: waitpid(): errno=%d (%s)\n",
408 args->name, errno, strerror(errno));
409 (void)kill(pid, SIGTERM);
410 (void)kill(pid, SIGKILL);
411 (void)shim_waitpid(pid, &status, 0);
412 }
413 inc_counter(args);
414 }
415 } while (keep_stressing(args));
416
417 finish:
418 rc = EXIT_SUCCESS;
419
420 #if TRACK_SIGCOUNT
421 for (i = 0; i < MAX_SIGS; i++) {
422 if (sig_count[i]) {
423 pr_dbg("%s: %-25.25s: %" PRIu64 "\n",
424 args->name, strsignal(i), sig_count[i]);
425 }
426 }
427 #endif
428 err:
429 stress_set_proc_state(args->name, STRESS_STATE_DEINIT);
430
431 #if TRACK_SIGCOUNT
432 (void)munmap(sig_count, sig_count_size);
433 #endif
434 return rc;
435 }
436
stress_opcode_set_default(void)437 static void stress_opcode_set_default(void)
438 {
439 stress_set_opcode_method("random");
440 }
441
442 static const stress_opt_set_func_t opt_set_funcs[] = {
443 { OPT_opcode_method, stress_set_opcode_method },
444 { 0, NULL }
445 };
446
447 stressor_info_t stress_opcode_info = {
448 .stressor = stress_opcode,
449 .set_default = stress_opcode_set_default,
450 .class = CLASS_CPU | CLASS_OS,
451 .opt_set_funcs = opt_set_funcs,
452 .help = help
453 };
454 #else
455
stress_set_opcode_method(const char * name)456 static int stress_set_opcode_method(const char *name)
457 {
458 (void)name;
459
460 (void)fprintf(stderr, "opcode-method not implemented");
461
462 return -1;
463 }
464
465 static const stress_opt_set_func_t opt_set_funcs[] = {
466 { OPT_opcode_method, stress_set_opcode_method },
467 { 0, NULL }
468 };
469
470 stressor_info_t stress_opcode_info = {
471 .stressor = stress_not_implemented,
472 .class = CLASS_CPU | CLASS_OS,
473 .opt_set_funcs = opt_set_funcs,
474 .help = help
475 };
476 #endif
477