1 /*
2  * Copyright (C) 2014-2021 Canonical, Ltd.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * This code is a complete clean re-write of the stress tool by
19  * Colin Ian King <colin.king@canonical.com> and attempts to be
20  * backwardly compatible with the stress tool by Amos Waterland
21  * <apw@rossby.metr.ou.edu> but has more stress tests and more
22  * functionality.
23  *
24  */
25 #include "stress-ng.h"
26 #include "git-commit-id.h"
27 
28 #if !defined(PR_SET_DISABLE)
29 #define SUID_DUMP_DISABLE	(0)       /* No setuid dumping */
30 #endif
31 #if !defined(SUID_DUMP_USER)
32 #define SUID_DUMP_USER		(1)       /* Dump as user of process */
33 #endif
34 
35 #if defined(NSIG)
36 #define STRESS_NSIG	NSIG
37 #elif defined(_NSIG)
38 #define STRESS_NSIG	_NSIG
39 #endif
40 
41 #if defined(__TINYC__) || defined(__PCC__)
42 int __dso_handle;
43 #endif
44 
45 static bool stress_stack_check_flag;
46 
47 typedef struct {
48 	const int  signum;
49 	const char *name;
50 } stress_sig_name_t;
51 
52 #define SIG_NAME(x) { x, #x }
53 
54 static const stress_sig_name_t sig_names[] = {
55 #if defined(SIGABRT)
56 	SIG_NAME(SIGABRT),
57 #endif
58 #if defined(SIGALRM)
59 	SIG_NAME(SIGALRM),
60 #endif
61 #if defined(SIGBUS)
62 	SIG_NAME(SIGBUS),
63 #endif
64 #if defined(SIGCHLD)
65 	SIG_NAME(SIGCHLD),
66 #endif
67 #if defined(SIGCLD)
68 	SIG_NAME(SIGCLD),
69 #endif
70 #if defined(SIGCONT)
71 	SIG_NAME(SIGCONT),
72 #endif
73 #if defined(SIGEMT)
74 	SIG_NAME(SIGEMT),
75 #endif
76 #if defined(SIGFPE)
77 	SIG_NAME(SIGFPE),
78 #endif
79 #if defined(SIGHUP)
80 	SIG_NAME(SIGHUP),
81 #endif
82 #if defined(SIGILL)
83 	SIG_NAME(SIGILL),
84 #endif
85 #if defined(SIGINFO)
86 	SIG_NAME(SIGINFO),
87 #endif
88 #if defined(SIGINT)
89 	SIG_NAME(SIGINT),
90 #endif
91 #if defined(SIGIO)
92 	SIG_NAME(SIGIO),
93 #endif
94 #if defined(SIGIOT)
95 	SIG_NAME(SIGIOT),
96 #endif
97 #if defined(SIGKILL)
98 	SIG_NAME(SIGKILL),
99 #endif
100 #if defined(SIGLOST)
101 	SIG_NAME(SIGLOST),
102 #endif
103 #if defined(SIGPIPE)
104 	SIG_NAME(SIGPIPE),
105 #endif
106 #if defined(SIGPOLL)
107 	SIG_NAME(SIGPOLL),
108 #endif
109 #if defined(SIGPROF)
110 	SIG_NAME(SIGPROF),
111 #endif
112 #if defined(SIGPWR)
113 	SIG_NAME(SIGPWR),
114 #endif
115 #if defined(SIGQUIT)
116 	SIG_NAME(SIGQUIT),
117 #endif
118 #if defined(SIGSEGV)
119 	SIG_NAME(SIGSEGV),
120 #endif
121 #if defined(SIGSTKFLT)
122 	SIG_NAME(SIGSTKFLT),
123 #endif
124 #if defined(SIGSTOP)
125 	SIG_NAME(SIGSTOP),
126 #endif
127 #if defined(SIGTSTP)
128 	SIG_NAME(SIGTSTP),
129 #endif
130 #if defined(SIGSYS)
131 	SIG_NAME(SIGSYS),
132 #endif
133 #if defined(SIGTERM)
134 	SIG_NAME(SIGTERM),
135 #endif
136 #if defined(SIGTRAP)
137 	SIG_NAME(SIGTRAP),
138 #endif
139 #if defined(SIGTTIN)
140 	SIG_NAME(SIGTTIN),
141 #endif
142 #if defined(SIGTTOU)
143 	SIG_NAME(SIGTTOU),
144 #endif
145 #if defined(SIGUNUSED)
146 	SIG_NAME(SIGUNUSED),
147 #endif
148 #if defined(SIGURG)
149 	SIG_NAME(SIGURG),
150 #endif
151 #if defined(SIGUSR1)
152 	SIG_NAME(SIGUSR1),
153 #endif
154 #if defined(SIGUSR2)
155 	SIG_NAME(SIGUSR2),
156 #endif
157 #if defined(SIGVTALRM)
158 	SIG_NAME(SIGVTALRM),
159 #endif
160 #if defined(SIGXCPU)
161 	SIG_NAME(SIGXCPU),
162 #endif
163 #if defined(SIGXFSZ)
164 	SIG_NAME(SIGXFSZ),
165 #endif
166 #if defined(SIGWINCH)
167 	SIG_NAME(SIGWINCH),
168 #endif
169 };
170 
171 static char *stress_temp_path;
172 
173 /*
174  *  stress_temp_path_free()
175  *	free and NULLify temporary file path
176  */
stress_temp_path_free(void)177 void stress_temp_path_free(void)
178 {
179 	if (stress_temp_path)
180 		free(stress_temp_path);
181 
182 	stress_temp_path = NULL;
183 }
184 
185 /*
186  *  stress_set_temp_path()
187  *	set temporary file path, default
188  *	is . - current dir
189  */
stress_set_temp_path(const char * path)190 int stress_set_temp_path(const char *path)
191 {
192 	stress_temp_path_free();
193 
194 	stress_temp_path = stress_const_optdup(path);
195 	if (!stress_temp_path) {
196 		(void)fprintf(stderr, "aborting: cannot allocate memory for '%s'\n", path);
197 		return -1;
198 	}
199 	return 0;
200 }
201 
202 /*
203  *  stress_get_temp_path()
204  *	get temporary file path, return "." if null
205  */
stress_get_temp_path(void)206 const char *stress_get_temp_path(void)
207 {
208 	if (!stress_temp_path)
209 		return ".";
210 	return stress_temp_path;
211 }
212 
213 /*
214  *  stress_check_temp_path()
215  *	check if temp path is accessible
216  */
stress_check_temp_path(void)217 int stress_check_temp_path(void)
218 {
219 	const char *path = stress_get_temp_path();
220 
221 	if (access(path, R_OK | W_OK) < 0) {
222 		(void)fprintf(stderr, "aborting: temp-path '%s' must be readable "
223 			"and writeable\n", path);
224 		return -1;
225 	}
226 	return 0;
227 }
228 
229 /*
230  *  stress_mk_filename()
231  *	generate a full file name from a path and filename
232  */
stress_mk_filename(char * fullname,const size_t fullname_len,const char * pathname,const char * filename)233 size_t stress_mk_filename(
234 	char *fullname,
235 	const size_t fullname_len,
236 	const char *pathname,
237 	const char *filename)
238 {
239 	/*
240 	 *  This may not be efficient, but it works. Do not
241 	 *  be tempted to optimize this, it is not used frequently
242 	 *  and is not a CPU bottleneck.
243 	 */
244 	(void)shim_strlcpy(fullname, pathname, fullname_len);
245 	(void)shim_strlcat(fullname, "/", fullname_len);
246 	return shim_strlcat(fullname, filename, fullname_len);
247 }
248 
249 /*
250  *  stress_get_pagesize()
251  *	get pagesize
252  */
stress_get_pagesize(void)253 size_t stress_get_pagesize(void)
254 {
255 	static size_t page_size = 0;
256 
257 	/* Use cached size */
258 	if (page_size > 0)
259 		return page_size;
260 
261 #if defined(_SC_PAGESIZE)
262 	{
263 		/* Use modern sysconf */
264 		long sz = sysconf(_SC_PAGESIZE);
265 		if (sz > 0) {
266 			page_size = (size_t)sz;
267 			return page_size;
268 		}
269 	}
270 #endif
271 #if defined(HAVE_GETPAGESIZE)
272 	{
273 		/* Use deprecated getpagesize */
274 		long sz = getpagesize();
275 		if (sz > 0) {
276 			page_size = (size_t)sz;
277 			return page_size;
278 		}
279 	}
280 #endif
281 	/* Guess */
282 	page_size = PAGE_4K;
283 	return page_size;
284 }
285 
286 /*
287  *  stress_get_processors_online()
288  *	get number of processors that are online
289  */
stress_get_processors_online(void)290 int32_t stress_get_processors_online(void)
291 {
292 	static int32_t processors_online = 0;
293 
294 	if (processors_online > 0)
295 		return processors_online;
296 
297 #if defined(_SC_NPROCESSORS_ONLN)
298 	processors_online = (int32_t)sysconf(_SC_NPROCESSORS_ONLN);
299 	if (processors_online < 0)
300 		processors_online = 1;
301 #else
302 	processors_online = 1;
303 #endif
304 	return processors_online;
305 }
306 
307 /*
308  *  stress_get_processors_configured()
309  *	get number of processors that are configured
310  */
stress_get_processors_configured(void)311 int32_t stress_get_processors_configured(void)
312 {
313 	static int32_t processors_configured = 0;
314 
315 	if (processors_configured > 0)
316 		return processors_configured;
317 
318 #if defined(_SC_NPROCESSORS_CONF)
319 	processors_configured = (int32_t)sysconf(_SC_NPROCESSORS_CONF);
320 	if (processors_configured < 0)
321 		processors_configured = stress_get_processors_online();
322 #else
323 	processors_configured = 1;
324 #endif
325 	return processors_configured;
326 }
327 
328 /*
329  *  stress_get_ticks_per_second()
330  *	get number of ticks perf second
331  */
stress_get_ticks_per_second(void)332 int32_t stress_get_ticks_per_second(void)
333 {
334 #if defined(_SC_CLK_TCK)
335 	static int32_t ticks_per_second = 0;
336 
337 	if (ticks_per_second > 0)
338 		return ticks_per_second;
339 
340 	ticks_per_second = (int32_t)sysconf(_SC_CLK_TCK);
341 	return ticks_per_second;
342 #else
343 	return -1;
344 #endif
345 }
346 
347 /*
348  *  stress_get_memlimits()
349  *	get SHMALL and memory in system
350  *	these are set to zero on failure
351  */
stress_get_memlimits(size_t * shmall,size_t * freemem,size_t * totalmem,size_t * freeswap)352 void stress_get_memlimits(
353 	size_t *shmall,
354 	size_t *freemem,
355 	size_t *totalmem,
356 	size_t *freeswap)
357 {
358 #if defined(HAVE_SYS_SYSINFO_H) &&	\
359     defined(HAVE_SYSINFO)
360 	struct sysinfo info;
361 	FILE *fp;
362 #endif
363 	*shmall = 0;
364 	*freemem = 0;
365 	*totalmem = 0;
366 	*freeswap = 0;
367 
368 #if defined(HAVE_SYS_SYSINFO_H) &&	\
369     defined(HAVE_SYSINFO)
370 	(void)memset(&info, 0, sizeof(info));
371 
372 	if (sysinfo(&info) == 0) {
373 		*freemem = info.freeram * info.mem_unit;
374 		*totalmem = info.totalram * info.mem_unit;
375 		*freeswap = info.freeswap * info.mem_unit;
376 	}
377 
378 	fp = fopen("/proc/sys/kernel/shmall", "r");
379 	if (!fp)
380 		return;
381 
382 	if (fscanf(fp, "%zu", shmall) != 1) {
383 		(void)fclose(fp);
384 		return;
385 	}
386 	(void)fclose(fp);
387 #endif
388 }
389 
390 #if defined(_SC_AVPHYS_PAGES)
391 #define STRESS_SC_PAGES	_SC_AVPHYS_PAGES
392 #elif defined(_SC_PHYS_PAGES)
393 #define STRESS_SC_PAGES	_SC_PHYS_PAGES
394 #endif
395 
396 /*
397  *  stress_get_phys_mem_size()
398  *	get size of physical memory still available, 0 if failed
399  */
stress_get_phys_mem_size(void)400 uint64_t stress_get_phys_mem_size(void)
401 {
402 #if defined(STRESS_SC_PAGES)
403 	uint64_t phys_pages = 0;
404 	const size_t page_size = stress_get_pagesize();
405 	const uint64_t max_pages = ~0ULL / page_size;
406 
407 	phys_pages = (uint64_t)sysconf(STRESS_SC_PAGES);
408 	/* Avoid overflow */
409 	if (phys_pages > max_pages)
410 		phys_pages = max_pages;
411 	return phys_pages * page_size;
412 #else
413 	return 0ULL;
414 #endif
415 }
416 
417 /*
418  *  stress_get_filesystem_size()
419  *	get size of free space still available on the
420  *	file system where stress temporary path is located,
421  *	return 0 if failed
422  */
stress_get_filesystem_size(void)423 uint64_t stress_get_filesystem_size(void)
424 {
425 #if defined(HAVE_SYS_STATVFS_H)
426 	int rc;
427 	struct statvfs buf;
428 	fsblkcnt_t blocks, max_blocks;
429 	const char *path = stress_get_temp_path();
430 
431 	if (!path)
432 		return 0;
433 
434 	(void)memset(&buf, 0, sizeof(buf));
435 	rc = statvfs(path, &buf);
436 	if (rc < 0)
437 		return 0;
438 
439 	max_blocks = (~(fsblkcnt_t)0) / buf.f_bsize;
440 	blocks = buf.f_bavail;
441 
442 	if (blocks > max_blocks)
443 		blocks = max_blocks;
444 
445 	return (uint64_t)buf.f_bsize * blocks;
446 #else
447 	return 0ULL;
448 #endif
449 }
450 
451 /*
452  *  stress_get_filesystem_available_inodes()
453  *	get number of free available inodes on the current stress
454  *	temporary path, return 0 if failed
455  */
stress_get_filesystem_available_inodes(void)456 uint64_t stress_get_filesystem_available_inodes(void)
457 {
458 #if defined(HAVE_SYS_STATVFS_H)
459 	int rc;
460 	struct statvfs buf;
461 	const char *path = stress_get_temp_path();
462 
463 	if (!path)
464 		return 0;
465 
466 	(void)memset(&buf, 0, sizeof(buf));
467 	rc = statvfs(path, &buf);
468 	if (rc < 0)
469 		return 0;
470 
471 	return (uint64_t)buf.f_favail;
472 #else
473 	return 0ULL;
474 #endif
475 }
476 
477 /*
478  *  stress_set_nonblock()
479  *	try to make fd non-blocking
480  */
stress_set_nonblock(const int fd)481 int stress_set_nonblock(const int fd)
482 {
483 	int flags;
484 #if defined(O_NONBLOCK)
485 
486 	if ((flags = fcntl(fd, F_GETFL, 0)) < 0)
487 		flags = 0;
488 	return fcntl(fd, F_SETFL, O_NONBLOCK | flags);
489 #else
490 	flags = 1;
491 	return ioctl(fd, FIOBIO, &flags);
492 #endif
493 }
494 
495 /*
496  *  stress_get_load_avg()
497  *	get load average
498  */
stress_get_load_avg(double * min1,double * min5,double * min15)499 int stress_get_load_avg(
500 	double *min1,
501 	double *min5,
502 	double *min15)
503 {
504 #if defined(HAVE_GETLOADAVG) &&	\
505     !defined(__UCLIBC__)
506 	int rc;
507 	double loadavg[3];
508 
509 	loadavg[0] = 0.0;
510 	loadavg[1] = 0.0;
511 	loadavg[2] = 0.0;
512 
513 	rc = getloadavg(loadavg, 3);
514 	if (rc < 0)
515 		goto fail;
516 
517 	*min1 = loadavg[0];
518 	*min5 = loadavg[1];
519 	*min15 = loadavg[2];
520 
521 	return 0;
522 fail:
523 #elif defined(HAVE_SYS_SYSINFO_H) &&	\
524       defined(HAVE_SYSINFO)
525 	struct sysinfo info;
526 	const double scale = 1.0 / (double)(1 << SI_LOAD_SHIFT);
527 
528 	if (sysinfo(&info) < 0)
529 		goto fail;
530 
531 	*min1 = info.loads[0] * scale;
532 	*min5 = info.loads[1] * scale;
533 	*min15 = info.loads[2] * scale;
534 
535 	return 0;
536 fail:
537 #endif
538 	*min1 = *min5 = *min15 = 0.0;
539 	return -1;
540 }
541 
542 /*
543  *  stress_parent_died_alarm()
544  *	send child SIGALRM if the parent died
545  */
stress_parent_died_alarm(void)546 void stress_parent_died_alarm(void)
547 {
548 #if defined(HAVE_PRCTL) &&		\
549     defined(HAVE_SYS_PRCTL_H) &&	\
550     defined(PR_SET_PDEATHSIG)
551 	(void)prctl(PR_SET_PDEATHSIG, SIGALRM);
552 #endif
553 }
554 
555 /*
556  *  stress_process_dumpable()
557  *	set dumpable flag, e.g. produce a core dump or not,
558  *	don't print an error if these fail, it's not that
559  *	critical
560  */
stress_process_dumpable(const bool dumpable)561 int stress_process_dumpable(const bool dumpable)
562 {
563 	int fd, rc = 0;
564 
565 #if defined(RLIMIT_CORE)
566 	{
567 		struct rlimit lim;
568 		int ret;
569 
570 		ret = getrlimit(RLIMIT_CORE, &lim);
571 		if (ret == 0) {
572 			lim.rlim_cur = 0;
573 			(void)setrlimit(RLIMIT_CORE, &lim);
574 		}
575 		lim.rlim_cur = 0;
576 		lim.rlim_max = 0;
577 		(void)setrlimit(RLIMIT_CORE, &lim);
578 	}
579 #endif
580 
581 	/*
582 	 *  changing PR_SET_DUMPABLE also affects the
583 	 *  oom adjust capability, so for now, we disable
584 	 *  this as I'd rather have a oom'able process when
585 	 *  memory gets constrained. Don't enable this
586 	 *  unless one checks that processes able oomable!
587 	 */
588 #if 0 && defined(HAVE_PRCTL) &&		\
589     defined(HAVE_SYS_PRCTL_H) &&	\
590     defined(PR_SET_DUMPABLE)
591 	(void)prctl(PR_SET_DUMPABLE,
592 		dumpable ? SUID_DUMP_USER : SUID_DUMP_DISABLE);
593 #endif
594 	if ((fd = open("/proc/self/coredump_filter", O_WRONLY)) >= 0) {
595 		char const *str =
596 			dumpable ? "0x33" : "0x00";
597 
598 		if (write(fd, str, strlen(str)) < 0)
599 			rc = -1;
600 		(void)close(fd);
601 	}
602 	return rc;
603 }
604 
605 /*
606  *  stress_set_timer_slackigned_longns()
607  *	set timer slack in nanoseconds
608  */
stress_set_timer_slack_ns(const char * opt)609 int stress_set_timer_slack_ns(const char *opt)
610 {
611 #if defined(HAVE_PRCTL_TIMER_SLACK)
612 	uint32_t timer_slack;
613 
614 	timer_slack = stress_get_uint32(opt);
615 	(void)stress_set_setting("timer-slack", TYPE_ID_UINT32, &timer_slack);
616 #else
617 	(void)opt;
618 #endif
619 	return 0;
620 }
621 
622 /*
623  *  stress_set_timer_slack()
624  *	set timer slack
625  */
stress_set_timer_slack(void)626 void stress_set_timer_slack(void)
627 {
628 #if defined(HAVE_PRCTL) && 		\
629     defined(HAVE_SYS_PRCTL_H) &&	\
630     defined(HAVE_PRCTL_TIMER_SLACK)
631 	uint32_t timer_slack;
632 
633 	if (stress_get_setting("timer-slack", &timer_slack))
634 		(void)prctl(PR_SET_TIMERSLACK, timer_slack);
635 #endif
636 }
637 
638 /*
639  *  stress_set_proc_name_init()
640  *	init setproctitle if supported
641  */
stress_set_proc_name_init(int argc,char * argv[],char * envp[])642 void stress_set_proc_name_init(int argc, char *argv[], char *envp[])
643 {
644 #if defined(HAVE_BSD_UNISTD_H) &&	\
645     defined(HAVE_SETPROCTITLE)
646 	(void)setproctitle_init(argc, argv, envp);
647 #else
648 	(void)argc;
649 	(void)argv;
650 	(void)envp;
651 #endif
652 }
653 
654 /*
655  *  stress_set_proc_name()
656  *	Set process name, we don't care if it fails
657  */
stress_set_proc_name(const char * name)658 void stress_set_proc_name(const char *name)
659 {
660 	(void)name;
661 
662 	if (g_opt_flags & OPT_FLAGS_KEEP_NAME)
663 		return;
664 
665 #if defined(HAVE_BSD_UNISTD_H) &&	\
666     defined(HAVE_SETPROCTITLE)
667 	/* Sets argv[0] */
668 	setproctitle("-%s", name);
669 #endif
670 #if defined(HAVE_PRCTL) &&		\
671     defined(HAVE_SYS_PRCTL_H) &&	\
672     defined(PR_SET_NAME)
673 	/* Sets the comm field */
674 	(void)prctl(PR_SET_NAME, name);
675 #endif
676 }
677 
678 /*
679  *  stress_set_proc_state
680  *	set process name based on run state, see
681  *	macros STRESS_STATE_*
682  */
stress_set_proc_state(const char * name,const int state)683 void stress_set_proc_state(const char *name, const int state)
684 {
685 	static const char *stress_states[] = {
686 		"start",
687 		"init",
688 		"run",
689 		"deinit",
690 		"stop",
691 		"exit",
692 		"wait"
693 	};
694 
695 	(void)name;
696 
697 	if (g_opt_flags & OPT_FLAGS_KEEP_NAME)
698 		return;
699 
700 	if ((state < 0) || (state >= (int)SIZEOF_ARRAY(stress_states)))
701 		return;
702 
703 #if defined(HAVE_BSD_UNISTD_H) &&	\
704     defined(HAVE_SETPROCTITLE)
705 	setproctitle("-%s [%s]", name, stress_states[state]);
706 #endif
707 }
708 
709 /*
710  *  stress_munge_underscore()
711  *	turn '_' to '-' in strings
712  */
stress_munge_underscore(const char * str)713 char *stress_munge_underscore(const char *str)
714 {
715 	static char munged[128];
716 	char *dst;
717 	const char *src;
718 	const size_t str_len = strlen(str);
719 	const ssize_t len = (ssize_t)STRESS_MINIMUM(str_len, sizeof(munged) - 1);
720 
721 	for (src = str, dst = munged; *src && (dst - munged) < len; src++)
722 		*dst++ = (*src == '_' ? '-' : *src);
723 
724 	*dst = '\0';
725 
726 	return munged;
727 }
728 
729 /*
730  *  stress_get_stack_direction_helper()
731  *	helper to determine direction of stack
732  */
stress_get_stack_direction_helper(const uint8_t * val1)733 static ssize_t NOINLINE OPTIMIZE0 stress_get_stack_direction_helper(const uint8_t *val1)
734 {
735 	const uint8_t val2 = 0;
736 	const ssize_t diff = &val2 - (const uint8_t *)val1;
737 
738 	return (diff > 0) - (diff < 0);
739 }
740 
741 /*
742  *  stress_get_stack_direction()
743  *      determine which way the stack goes, up / down
744  *	just pass in any var on the stack before calling
745  *	return:
746  *		 1 - stack goes down (conventional)
747  *		 0 - error
748  *	  	-1 - stack goes up (unconventional)
749  */
stress_get_stack_direction(void)750 ssize_t stress_get_stack_direction(void)
751 {
752 	uint8_t val1 = 0;
753 	uint8_t waste[64];
754 
755 	waste[(sizeof waste) - 1] = 0;
756 	return stress_get_stack_direction_helper(&val1);
757 }
758 
759 /*
760  *  stress_get_stack_top()
761  *	Get the stack top given the start and size of the stack,
762  *	offset by a bit of slop. Assumes stack is > 64 bytes
763  */
stress_get_stack_top(void * start,size_t size)764 void *stress_get_stack_top(void *start, size_t size)
765 {
766 	const size_t offset = stress_get_stack_direction() < 0 ? (size - 64) : 64;
767 
768 	return (void *)((char *)start + offset);
769 }
770 
771 /*
772  *  stress_uint64_zero()
773  *	return uint64 zero in way that force less smart
774  *	static analysers to realise we are doing this
775  *	to force a division by zero. I'd like to have
776  *	a better solution than this ghastly way.
777  */
stress_uint64_zero(void)778 uint64_t stress_uint64_zero(void)
779 {
780 	return g_shared->zero;
781 }
782 
783 /*
784  *  stress_base36_encode_uint64()
785  *	encode 64 bit hash of filename into a unique base 36
786  *	filename of up to 13 chars long + 1 char eos
787  */
stress_base36_encode_uint64(char dst[14],uint64_t val)788 static void stress_base36_encode_uint64(char dst[14], uint64_t val)
789 {
790         static const char b36[] = "abcdefghijklmnopqrstuvwxyz0123456789";
791         const int b = 36;
792         char *ptr = dst;
793 
794         while (val) {
795                 *ptr++ = b36[val % b];
796                 val /= b;
797         }
798         *ptr = '\0';
799 }
800 
801 /*
802  *  stress_temp_hash_truncate()
803  *	filenames may be too long for the underlying filesystem
804  *	so workaround this by hashing them into a 64 bit hex
805  *	filename.
806  */
stress_temp_hash_truncate(char * filename)807 static void stress_temp_hash_truncate(char *filename)
808 {
809 	size_t f_namemax = 16;
810 	size_t len = strlen(filename);
811 #if defined(HAVE_SYS_STATVFS_H)
812 	struct statvfs buf;
813 
814 	(void)memset(&buf, 0, sizeof(buf));
815 	if (statvfs(stress_get_temp_path(), &buf) == 0)
816 		f_namemax = buf.f_namemax;
817 #endif
818 
819 	if (strlen(filename) > f_namemax) {
820 		uint32_t upper, lower;
821 		uint64_t val;
822 
823 		upper = stress_hash_jenkin((uint8_t *)filename, len);
824 		lower = stress_hash_pjw(filename);
825 		val = ((uint64_t)upper << 32) | lower;
826 
827 		stress_base36_encode_uint64(filename, val);
828 	}
829 }
830 
831 /*
832  *  stress_temp_filename()
833  *      construct a temp filename
834  */
stress_temp_filename(char * path,const size_t len,const char * name,const pid_t pid,const uint32_t instance,const uint64_t magic)835 int stress_temp_filename(
836 	char *path,
837 	const size_t len,
838 	const char *name,
839 	const pid_t pid,
840 	const uint32_t instance,
841 	const uint64_t magic)
842 {
843 	char directoryname[PATH_MAX];
844 	char filename[PATH_MAX];
845 
846 	(void)snprintf(directoryname, sizeof(directoryname),
847 		"tmp-%s-%d-%" PRIu32,
848 		name, (int)pid, instance);
849 	stress_temp_hash_truncate(directoryname);
850 
851 	(void)snprintf(filename, sizeof(filename),
852 		"%s-%d-%" PRIu32 "-%" PRIu64,
853 		name, (int)pid, instance, magic);
854 	stress_temp_hash_truncate(filename);
855 
856 	return snprintf(path, len, "%s/%s/%s",
857 		stress_get_temp_path(), directoryname, filename);
858 }
859 
860 /*
861  *  stress_temp_filename_args()
862  *      construct a temp filename using info from args
863  */
stress_temp_filename_args(const stress_args_t * args,char * path,const size_t len,const uint64_t magic)864 int stress_temp_filename_args(
865 	const stress_args_t *args,
866 	char *path,
867 	const size_t len,
868 	const uint64_t magic)
869 {
870 	return stress_temp_filename(path, len, args->name,
871 		args->pid, args->instance, magic);
872 }
873 
874 /*
875  *  stress_temp_dir()
876  *	create a temporary directory name
877  */
stress_temp_dir(char * path,const size_t len,const char * name,const pid_t pid,const uint32_t instance)878 int stress_temp_dir(
879 	char *path,
880 	const size_t len,
881 	const char *name,
882 	const pid_t pid,
883 	const uint32_t instance)
884 {
885 	char directoryname[256];
886 
887 	(void)snprintf(directoryname, sizeof(directoryname),
888 		"tmp-%s-%d-%" PRIu32,
889 		name, (int)pid, instance);
890 	stress_temp_hash_truncate(directoryname);
891 
892 	return snprintf(path, len, "%s/%s",
893 		stress_get_temp_path(), directoryname);
894 }
895 
896 /*
897  *  stress_temp_dir_args()
898  *	create a temporary directory name using info from args
899  */
stress_temp_dir_args(const stress_args_t * args,char * path,const size_t len)900 int stress_temp_dir_args(
901 	const stress_args_t *args,
902 	char *path,
903 	const size_t len)
904 {
905 	return stress_temp_dir(path, len,
906 		args->name, args->pid, args->instance);
907 }
908 
909 /*
910  *   stress_temp_dir_mk()
911  *	create a temporary directory
912  */
stress_temp_dir_mk(const char * name,const pid_t pid,const uint32_t instance)913 int stress_temp_dir_mk(
914 	const char *name,
915 	const pid_t pid,
916 	const uint32_t instance)
917 {
918 	int ret;
919 	char tmp[PATH_MAX];
920 
921 	stress_temp_dir(tmp, sizeof(tmp), name, pid, instance);
922 	ret = mkdir(tmp, S_IRWXU);
923 	if (ret < 0) {
924 		ret = -errno;
925 		pr_fail("%s: mkdir '%s' failed, errno=%d (%s)\n",
926 			name, tmp, errno, strerror(errno));
927 		(void)unlink(tmp);
928 	}
929 
930 	return ret;
931 }
932 
933 /*
934  *   stress_temp_dir_mk_args()
935  *	create a temporary director using info from args
936  */
stress_temp_dir_mk_args(const stress_args_t * args)937 int stress_temp_dir_mk_args(const stress_args_t *args)
938 {
939 	return stress_temp_dir_mk(args->name, args->pid, args->instance);
940 }
941 
942 /*
943  *  stress_temp_dir_rm()
944  *	remove a temporary directory
945  */
stress_temp_dir_rm(const char * name,const pid_t pid,const uint32_t instance)946 int stress_temp_dir_rm(
947 	const char *name,
948 	const pid_t pid,
949 	const uint32_t instance)
950 {
951 	int ret;
952 	char tmp[PATH_MAX + 1];
953 
954 	stress_temp_dir(tmp, sizeof(tmp), name, pid, instance);
955 	ret = rmdir(tmp);
956 	if (ret < 0) {
957 		ret = -errno;
958 		pr_fail("%s: rmdir '%s' failed, errno=%d (%s)\n",
959 			name, tmp, errno, strerror(errno));
960 	}
961 
962 	return ret;
963 }
964 
965 /*
966  *  stress_temp_dir_rm_args()
967  *	remove a temporary directory using info from args
968  */
stress_temp_dir_rm_args(const stress_args_t * args)969 int stress_temp_dir_rm_args(const stress_args_t *args)
970 {
971 	return stress_temp_dir_rm(args->name, args->pid, args->instance);
972 }
973 
974 /*
975  *  stress_cwd_readwriteable()
976  *	check if cwd is read/writeable
977  */
stress_cwd_readwriteable(void)978 void stress_cwd_readwriteable(void)
979 {
980 	char path[PATH_MAX];
981 
982 	if (getcwd(path, sizeof(path)) == NULL) {
983 		pr_dbg("cwd: Cannot determine current working directory\n");
984 		return;
985 	}
986 	if (access(path, R_OK | W_OK)) {
987 		pr_inf("Working directory %s is not read/writeable, "
988 			"some I/O tests may fail\n", path);
989 		return;
990 	}
991 }
992 
993 /*
994  *  stress_signal_name()
995  *	return string version of signal number, NULL if not found
996  */
stress_signal_name(const int signum)997 const char *stress_signal_name(const int signum)
998 {
999 	size_t i;
1000 
1001 	for (i = 0; i < SIZEOF_ARRAY(sig_names); i++) {
1002 		if (signum == sig_names[i].signum)
1003 			return sig_names[i].name;
1004 	}
1005 	return NULL;
1006 }
1007 
1008 /*
1009  *  stress_strsignal()
1010  *	signum to human readable string
1011  */
stress_strsignal(const int signum)1012 const char *stress_strsignal(const int signum)
1013 {
1014 	static char buffer[40];
1015 	const char *str = stress_signal_name(signum);
1016 
1017 	if (str)
1018 		(void)snprintf(buffer, sizeof(buffer), "signal %d '%s'",
1019 			signum, str);
1020 	else
1021 		(void)snprintf(buffer, sizeof(buffer), "signal %d", signum);
1022 	return buffer;
1023 }
1024 
1025 /*
1026  *  stress_strnrnd()
1027  *	fill string with random chars
1028  */
stress_strnrnd(char * str,const size_t len)1029 void stress_strnrnd(char *str, const size_t len)
1030 {
1031 	const char *end = str + len;
1032 
1033 	while (str < end - 1)
1034 		*str++ = (stress_mwc8() % 26) + 'a';
1035 
1036 	*str = '\0';
1037 }
1038 
1039 /*
1040  *  pr_run_info()
1041  *	short info about the system we are running stress-ng on
1042  *	for the -v option
1043  */
pr_runinfo(void)1044 void pr_runinfo(void)
1045 {
1046 #if defined(HAVE_UNAME) &&	\
1047     defined(HAVE_SYS_UTSNAME_H)
1048 	struct utsname uts;
1049 #endif
1050 #if defined(HAVE_SYS_SYSINFO_H) &&	\
1051     defined(HAVE_SYSINFO)
1052 	struct sysinfo info;
1053 #endif
1054 	if (!(g_opt_flags & PR_DEBUG))
1055 		return;
1056 
1057 	if (sizeof(STRESS_GIT_COMMIT_ID) > 1) {
1058 		pr_dbg("%s %s g%12.12s\n",
1059 			g_app_name, VERSION, STRESS_GIT_COMMIT_ID);
1060 	} else {
1061 		pr_dbg("%s %s\n",
1062 			g_app_name, VERSION);
1063 	}
1064 
1065 #if defined(HAVE_UNAME) &&	\
1066     defined(HAVE_SYS_UTSNAME_H)
1067 	if (uname(&uts) == 0) {
1068 		pr_dbg("system: %s %s %s %s %s\n",
1069 			uts.sysname, uts.nodename, uts.release,
1070 			uts.version, uts.machine);
1071 	}
1072 #endif
1073 #if defined(HAVE_SYS_SYSINFO_H) &&	\
1074     defined(HAVE_SYSINFO)
1075 	/* Keep static analyzer happy */
1076 	(void)memset(&info, 0, sizeof(info));
1077 	if (sysinfo(&info) == 0) {
1078 		char ram_t[32], ram_f[32], ram_s[32];
1079 
1080 		stress_uint64_to_str(ram_t, sizeof(ram_t), (uint64_t)info.totalram);
1081 		stress_uint64_to_str(ram_f, sizeof(ram_f), (uint64_t)info.freeram);
1082 		stress_uint64_to_str(ram_s, sizeof(ram_s), (uint64_t)info.freeswap);
1083 		pr_dbg("RAM total: %s, RAM free: %s, swap free: %s\n", ram_t, ram_f, ram_s);
1084 	}
1085 #endif
1086 }
1087 
1088 /*
1089  *  pr_yaml_runinfo()
1090  *	log info about the system we are running stress-ng on
1091  */
pr_yaml_runinfo(FILE * yaml)1092 void pr_yaml_runinfo(FILE *yaml)
1093 {
1094 #if defined(HAVE_UNAME) &&	\
1095     defined(HAVE_SYS_UTSNAME_H)
1096 	struct utsname uts;
1097 #endif
1098 #if defined(HAVE_SYS_SYSINFO_H) &&	\
1099     defined(HAVE_SYSINFO)
1100 	struct sysinfo info;
1101 #endif
1102 	time_t t;
1103 	struct tm *tm = NULL;
1104 	const size_t hostname_len = stress_hostname_length();
1105 	char hostname[hostname_len];
1106 	const char *user = shim_getlogin();
1107 
1108 	pr_yaml(yaml, "system-info:\n");
1109 	if (time(&t) != ((time_t)-1))
1110 		tm = localtime(&t);
1111 
1112 	pr_yaml(yaml, "      stress-ng-version: " VERSION "\n");
1113 	pr_yaml(yaml, "      run-by: %s\n", user ? user : "unknown");
1114 	if (tm) {
1115 		pr_yaml(yaml, "      date-yyyy-mm-dd: %4.4d:%2.2d:%2.2d\n",
1116 			tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday);
1117 		pr_yaml(yaml, "      time-hh-mm-ss: %2.2d:%2.2d:%2.2d\n",
1118 			tm->tm_hour, tm->tm_min, tm->tm_sec);
1119 		pr_yaml(yaml, "      epoch-secs: %ld\n", (long)t);
1120 	}
1121 	if (!gethostname(hostname, sizeof(hostname)))
1122 		pr_yaml(yaml, "      hostname: %s\n", hostname);
1123 #if defined(HAVE_UNAME) &&	\
1124     defined(HAVE_SYS_UTSNAME_H)
1125 	if (uname(&uts) == 0) {
1126 		pr_yaml(yaml, "      sysname: %s\n", uts.sysname);
1127 		pr_yaml(yaml, "      nodename: %s\n", uts.nodename);
1128 		pr_yaml(yaml, "      release: %s\n", uts.release);
1129 		pr_yaml(yaml, "      version: '%s'\n", uts.version);
1130 		pr_yaml(yaml, "      machine: %s\n", uts.machine);
1131 	}
1132 #endif
1133 #if defined(HAVE_SYS_SYSINFO_H) &&	\
1134     defined(HAVE_SYSINFO)
1135 	(void)memset(&info, 0, sizeof(info));
1136 	if (sysinfo(&info) == 0) {
1137 		pr_yaml(yaml, "      uptime: %ld\n", info.uptime);
1138 		pr_yaml(yaml, "      totalram: %lu\n", info.totalram);
1139 		pr_yaml(yaml, "      freeram: %lu\n", info.freeram);
1140 		pr_yaml(yaml, "      sharedram: %lu\n", info.sharedram);
1141 		pr_yaml(yaml, "      bufferram: %lu\n", info.bufferram);
1142 		pr_yaml(yaml, "      totalswap: %lu\n", info.totalswap);
1143 		pr_yaml(yaml, "      freeswap: %lu\n", info.freeswap);
1144 	}
1145 #endif
1146 	pr_yaml(yaml, "      pagesize: %zd\n", stress_get_pagesize());
1147 	pr_yaml(yaml, "      cpus: %" PRId32 "\n", stress_get_processors_configured());
1148 	pr_yaml(yaml, "      cpus-online: %" PRId32 "\n", stress_get_processors_online());
1149 	pr_yaml(yaml, "      ticks-per-second: %" PRId32 "\n", stress_get_ticks_per_second());
1150 	pr_yaml(yaml, "\n");
1151 }
1152 
1153 /*
1154  *  stress_cache_alloc()
1155  *	allocate shared cache buffer
1156  */
stress_cache_alloc(const char * name)1157 int stress_cache_alloc(const char *name)
1158 {
1159 #if defined(__linux__)
1160 	stress_cpus_t *cpu_caches;
1161 	stress_cpu_cache_t *cache = NULL;
1162 	uint16_t max_cache_level = 0;
1163 #endif
1164 
1165 #if !defined(__linux__)
1166 	g_shared->mem_cache_size = MEM_CACHE_SIZE;
1167 #else
1168 	cpu_caches = stress_get_all_cpu_cache_details();
1169 	if (!cpu_caches) {
1170 		if (stress_warn_once())
1171 			pr_dbg("%s: using defaults, cannot determine cache details\n", name);
1172 		g_shared->mem_cache_size = MEM_CACHE_SIZE;
1173 		goto init_done;
1174 	}
1175 
1176 	max_cache_level = stress_get_max_cache_level(cpu_caches);
1177 	if (max_cache_level == 0) {
1178 		if (stress_warn_once())
1179 			pr_dbg("%s: using defaults, cannot determine cache level details\n", name);
1180 		g_shared->mem_cache_size = MEM_CACHE_SIZE;
1181 		goto init_done;
1182 	}
1183 	if (g_shared->mem_cache_level > max_cache_level) {
1184 		if (stress_warn_once())
1185 			pr_dbg("%s: using cache maximum level L%d\n", name,
1186 				max_cache_level);
1187 		g_shared->mem_cache_level = max_cache_level;
1188 	}
1189 
1190 	cache = stress_get_cpu_cache(cpu_caches, g_shared->mem_cache_level);
1191 	if (!cache) {
1192 		if (stress_warn_once())
1193 			pr_dbg("%s: using built-in defaults as no suitable "
1194 				"cache found\n", name);
1195 		g_shared->mem_cache_size = MEM_CACHE_SIZE;
1196 		goto init_done;
1197 	}
1198 
1199 	if (g_shared->mem_cache_ways > 0) {
1200 		uint64_t way_size;
1201 
1202 		if (g_shared->mem_cache_ways > cache->ways) {
1203 			if (stress_warn_once())
1204 				pr_inf("%s: cache way value too high - "
1205 					"defaulting to %d (the maximum)\n",
1206 					name, cache->ways);
1207 			g_shared->mem_cache_ways = cache->ways;
1208 		}
1209 		way_size = cache->size / cache->ways;
1210 
1211 		/* only fill the specified number of cache ways */
1212 		g_shared->mem_cache_size = way_size * g_shared->mem_cache_ways;
1213 	} else {
1214 		/* fill the entire cache */
1215 		g_shared->mem_cache_size = cache->size;
1216 	}
1217 
1218 	if (!g_shared->mem_cache_size) {
1219 		if (stress_warn_once())
1220 			pr_dbg("%s: using built-in defaults as "
1221 				"unable to determine cache size\n", name);
1222 		g_shared->mem_cache_size = MEM_CACHE_SIZE;
1223 	}
1224 init_done:
1225 	stress_free_cpu_caches(cpu_caches);
1226 #endif
1227 	g_shared->mem_cache =
1228 		(uint8_t *)mmap(NULL, g_shared->mem_cache_size,
1229 				PROT_READ | PROT_WRITE,
1230 				MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1231 	if (g_shared->mem_cache == MAP_FAILED) {
1232 		g_shared->mem_cache = NULL;
1233 		pr_err("%s: failed to mmap shared cache buffer, errno=%d (%s)\n",
1234 			name, errno, strerror(errno));
1235 		return -1;
1236 	}
1237 	if (stress_warn_once())
1238 		pr_dbg("%s: shared cache buffer size: %" PRIu64 "K\n",
1239 			name, g_shared->mem_cache_size / 1024);
1240 
1241 	return 0;
1242 }
1243 
1244 /*
1245  *  stress_cache_free()
1246  *	free shared cache buffer
1247  */
stress_cache_free(void)1248 void stress_cache_free(void)
1249 {
1250 	if (g_shared->mem_cache)
1251 		(void)munmap((void *)g_shared->mem_cache, g_shared->mem_cache_size);
1252 }
1253 
1254 /*
1255  *  system_write()
1256  *	write a buffer to a /sys or /proc entry
1257  */
system_write(const char * path,const char * buf,const size_t buf_len)1258 ssize_t system_write(
1259 	const char *path,
1260 	const char *buf,
1261 	const size_t buf_len)
1262 {
1263 	int fd;
1264 	ssize_t ret;
1265 
1266 	fd = open(path, O_WRONLY);
1267 	if (fd < 0)
1268 		return -errno;
1269 	ret = write(fd, buf, buf_len);
1270 	if (ret < (ssize_t)buf_len)
1271 		ret = -errno;
1272 	(void)close(fd);
1273 
1274 	return ret;
1275 }
1276 
1277 /*
1278  *  system_read()
1279  *	read a buffer from a /sys or /proc entry
1280  */
system_read(const char * path,char * buf,const size_t buf_len)1281 ssize_t system_read(
1282 	const char *path,
1283 	char *buf,
1284 	const size_t buf_len)
1285 {
1286 	int fd;
1287 	ssize_t ret;
1288 
1289 	(void)memset(buf, 0, buf_len);
1290 
1291 	fd = open(path, O_RDONLY);
1292 	if (fd < 0)
1293 		return -errno;
1294 	ret = read(fd, buf, buf_len);
1295 	if (ret < 0) {
1296 		buf[0] = '\0';
1297 		ret = -errno;
1298 	}
1299 	(void)close(fd);
1300 	if ((ssize_t)buf_len == ret)
1301 		buf[buf_len - 1] = '\0';
1302 	else
1303 		buf[ret] = '\0';
1304 
1305 	return ret;
1306 }
1307 
1308 /*
1309  *  stress_is_prime64()
1310  *      return true if 64 bit value n is prime
1311  *      http://en.wikipedia.org/wiki/Primality_test
1312  */
stress_is_prime64(const uint64_t n)1313 bool stress_is_prime64(const uint64_t n)
1314 {
1315 	register uint64_t i, max;
1316 	double max_d;
1317 
1318 	if (n <= 3)
1319 		return n >= 2;
1320 	if ((n % 2 == 0) || (n % 3 == 0))
1321 		return false;
1322 	max_d = 1.0 + sqrt((double)n);
1323 	max = (uint64_t)max_d;
1324 	for (i = 5; i < max; i+= 6)
1325 		if ((n % i == 0) || (n % (i + 2) == 0))
1326 			return false;
1327 	return true;
1328 }
1329 
1330 /*
1331  *  stress_get_prime64()
1332  *	find a prime that is not a multiple of n,
1333  *	used for file name striding
1334  */
stress_get_prime64(const uint64_t n)1335 uint64_t stress_get_prime64(const uint64_t n)
1336 {
1337 	static uint p = 1009;
1338 
1339 	if (n != p)
1340 		return p;
1341 
1342 	/* Search for next prime.. */
1343 	for (;;) {
1344 		p += 2;
1345 
1346 		if ((n % p) && stress_is_prime64(p))
1347 			return p;
1348 	}
1349 }
1350 
1351 /*
1352  *  stress_get_max_file_limit()
1353  *	get max number of files that the current
1354  *	process can open not counting the files that
1355  *	may already been opened.
1356  */
stress_get_max_file_limit(void)1357 size_t stress_get_max_file_limit(void)
1358 {
1359 #if defined(RLIMIT_NOFILE)
1360 	struct rlimit rlim;
1361 #endif
1362 	size_t max_rlim = SIZE_MAX;
1363 	size_t max_sysconf;
1364 
1365 #if defined(RLIMIT_NOFILE)
1366 	if (!getrlimit(RLIMIT_NOFILE, &rlim))
1367 		max_rlim = (size_t)rlim.rlim_cur;
1368 #endif
1369 #if defined(_SC_OPEN_MAX)
1370 	{
1371 		const long open_max = sysconf(_SC_OPEN_MAX);
1372 
1373 		max_sysconf = (open_max > 0) ? (size_t)open_max : SIZE_MAX;
1374 	}
1375 #else
1376 	max_sysconf = SIZE_MAX;
1377 #endif
1378 	/* return the lowest of these two */
1379 	return STRESS_MINIMUM(max_rlim, max_sysconf);
1380 }
1381 
1382 /*
1383  *  stress_get_file_limit()
1384  *	get max number of files that the current
1385  *	process can open excluding currently opened
1386  *	files.
1387  */
stress_get_file_limit(void)1388 size_t stress_get_file_limit(void)
1389 {
1390 	struct rlimit rlim;
1391 	size_t i, last_opened, opened = 0, max = 65536;	/* initial guess */
1392 
1393 	if (!getrlimit(RLIMIT_NOFILE, &rlim))
1394 		max = (size_t)rlim.rlim_cur;
1395 
1396 	last_opened = 0;
1397 
1398 	/* Determine max number of free file descriptors we have */
1399 	for (i = 0; i < max; i++) {
1400 		if (fcntl((int)i, F_GETFL) > -1) {
1401 			opened++;
1402 			last_opened = i;
1403 		} else {
1404 			/*
1405 			 *  Hack: Over 250 contiguously closed files
1406 			 *  most probably indicates we're at the point
1407 			 *  were no more opened file descriptors are
1408 			 *  going to be found, so bail out rather then
1409 			 *  scanning for any more opened files
1410 			 */
1411 			if (i - last_opened > 250)
1412 				break;
1413 		}
1414 	}
1415 	return max - opened;
1416 }
1417 
1418 /*
1419  *  stress_get_bad_fd()
1420  *	return a fd that will produce -EINVAL when using it
1421  *	either because it is not open or it is just out of range
1422  */
stress_get_bad_fd(void)1423 int stress_get_bad_fd(void)
1424 {
1425 #if defined(RLIMIT_NOFILE) &&	\
1426     defined(F_GETFL)
1427 	struct rlimit rlim;
1428 
1429 	(void)memset(&rlim, 0, sizeof(rlim));
1430 
1431 	if (getrlimit(RLIMIT_NOFILE, &rlim) == 0) {
1432 		if (rlim.rlim_cur < INT_MAX - 1) {
1433 			if (fcntl((int)rlim.rlim_cur, F_GETFL) == -1) {
1434 				return (int)rlim.rlim_cur + 1;
1435 			}
1436 		}
1437 	}
1438 #elif defined(F_GETFL)
1439 	int i;
1440 
1441 	for (i = 2048; i > fileno(stdout); i--) {
1442 		if (fcntl((int)i, F_GETFL) == -1)
1443 			return i;
1444 	}
1445 #endif
1446 	return -1;
1447 }
1448 
1449 /*
1450  *  stress_sigaltstack_no_check()
1451  *	attempt to set up an alternative signal stack with no
1452  *	minimum size check on stack
1453  *	  stack - must be at least MINSIGSTKSZ
1454  *	  size  - size of stack (- STACK_ALIGNMENT)
1455  */
stress_sigaltstack_no_check(void * stack,const size_t size)1456 int stress_sigaltstack_no_check(void *stack, const size_t size)
1457 {
1458 #if defined(HAVE_SIGALTSTACK)
1459 	stack_t ss;
1460 
1461 	ss.ss_sp = (void *)stack;
1462 	ss.ss_size = size;
1463 	ss.ss_flags = 0;
1464 	return sigaltstack(&ss, NULL);
1465 #else
1466 	(void)stack;
1467 	(void)size;
1468 #endif
1469 	return 0;
1470 }
1471 
1472 /*
1473  *  stress_sigaltstack()
1474  *	attempt to set up an alternative signal stack
1475  *	  stack - must be at least MINSIGSTKSZ
1476  *	  size  - size of stack (- STACK_ALIGNMENT)
1477  */
stress_sigaltstack(void * stack,const size_t size)1478 int stress_sigaltstack(void *stack, const size_t size)
1479 {
1480 #if defined(HAVE_SIGALTSTACK)
1481 	if (size < (size_t)STRESS_MINSIGSTKSZ) {
1482 		pr_err("sigaltstack stack size %zu must be more than %zuK\n",
1483 			size, (size_t)STRESS_MINSIGSTKSZ / 1024);
1484 		return -1;
1485 	}
1486 
1487 	if (stress_sigaltstack_no_check(stack, size) < 0) {
1488 		pr_fail("sigaltstack failed: errno=%d (%s)\n",
1489 			errno, strerror(errno));
1490 		return -1;
1491 	}
1492 #else
1493 	(void)stack;
1494 	(void)size;
1495 #endif
1496 	return 0;
1497 }
1498 
1499 /*
1500  *  stress_sighandler()
1501  *	set signal handler in generic way
1502  */
stress_sighandler(const char * name,const int signum,void (* handler)(int),struct sigaction * orig_action)1503 int stress_sighandler(
1504 	const char *name,
1505 	const int signum,
1506 	void (*handler)(int),
1507 	struct sigaction *orig_action)
1508 {
1509 	struct sigaction new_action;
1510 #if defined(HAVE_SIGALTSTACK)
1511 	{
1512 		static uint8_t *stack = NULL;
1513 
1514 		if (stack == NULL) {
1515 			/* Allocate stack, we currently leak this */
1516 			stack = (uint8_t *)mmap(NULL, STRESS_SIGSTKSZ, PROT_READ | PROT_WRITE,
1517 					MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
1518 			if (stack == MAP_FAILED) {
1519 				pr_inf("%s: sigaction %s: cannot allocated signal stack, "
1520 					"errno = %d (%s)\n",
1521 					name, stress_strsignal(signum),
1522 					errno, strerror(errno));
1523 				return -1;
1524 			}
1525 			if (stress_sigaltstack(stack, STRESS_SIGSTKSZ) < 0)
1526 				return -1;
1527 		}
1528 	}
1529 #endif
1530 	(void)memset(&new_action, 0, sizeof new_action);
1531 	new_action.sa_handler = handler;
1532 	(void)sigemptyset(&new_action.sa_mask);
1533 	new_action.sa_flags = SA_ONSTACK;
1534 
1535 	if (sigaction(signum, &new_action, orig_action) < 0) {
1536 		pr_fail("%s: sigaction %s: errno=%d (%s)\n",
1537 			name, stress_strsignal(signum), errno, strerror(errno));
1538 		return -1;
1539 	}
1540 	return 0;
1541 }
1542 
1543 /*
1544  *  stress_sighandler_default
1545  *	restore signal handler to default handler
1546  */
stress_sighandler_default(const int signum)1547 int stress_sighandler_default(const int signum)
1548 {
1549 	struct sigaction new_action;
1550 
1551 	(void)memset(&new_action, 0, sizeof new_action);
1552 	new_action.sa_handler = SIG_DFL;
1553 
1554 	return sigaction(signum, &new_action, NULL);
1555 }
1556 
1557 /*
1558  *  stress_handle_stop_stressing()
1559  *	set flag to indicate to stressor to stop stressing
1560  */
stress_handle_stop_stressing(int signum)1561 void stress_handle_stop_stressing(int signum)
1562 {
1563 	(void)signum;
1564 
1565 	keep_stressing_set_flag(false);
1566 	/*
1567 	 * Trigger another SIGARLM until stressor gets the message
1568 	 * that it needs to terminate
1569 	 */
1570 	(void)alarm(1);
1571 }
1572 
1573 /*
1574  *  stress_sig_stop_stressing()
1575  *	install a handler that sets the global flag
1576  *	to indicate to a stressor to stop stressing
1577  */
stress_sig_stop_stressing(const char * name,const int sig)1578 int stress_sig_stop_stressing(const char *name, const int sig)
1579 {
1580 	return stress_sighandler(name, sig, stress_handle_stop_stressing, NULL);
1581 }
1582 
1583 /*
1584  *  stress_sigrestore()
1585  *	restore a handler
1586  */
stress_sigrestore(const char * name,const int signum,struct sigaction * orig_action)1587 int stress_sigrestore(
1588 	const char *name,
1589 	const int signum,
1590 	struct sigaction *orig_action)
1591 {
1592 	if (sigaction(signum, orig_action, NULL) < 0) {
1593 		pr_fail("%s: sigaction %s restore: errno=%d (%s)\n",
1594 			name, stress_strsignal(signum), errno, strerror(errno));
1595 		return -1;
1596 	}
1597 	return 0;
1598 }
1599 
1600 /*
1601  *  stress_get_cpu()
1602  *	get cpu number that process is currently on
1603  */
stress_get_cpu(void)1604 unsigned int stress_get_cpu(void)
1605 {
1606 #if defined(HAVE_SCHED_GETCPU) &&	\
1607     !defined(__PPC64__) &&		\
1608     !defined(__s390x__)
1609 	const int cpu = sched_getcpu();
1610 
1611 	return (unsigned int)((cpu < 0) ? 0 : cpu);
1612 #else
1613 	return 0;
1614 #endif
1615 }
1616 
1617 #define XSTRINGIFY(s) STRINGIFY(s)
1618 #define STRINGIFY(s) #s
1619 
1620 /*
1621  *  stress_get_compiler()
1622  *	return compiler info
1623  */
stress_get_compiler(void)1624 const char *stress_get_compiler(void)
1625 {
1626 #if defined(__clang_major__) &&	\
1627     defined(__clang_minor__)
1628 	static const char cc[] = "clang " XSTRINGIFY(__clang_major__) "." XSTRINGIFY(__clang_minor__) "";
1629 #elif defined(__GNUC__) &&	\
1630       defined(__GNUC_MINOR__)
1631 	static const char cc[] = "gcc " XSTRINGIFY(__GNUC__) "." XSTRINGIFY(__GNUC_MINOR__) "";
1632 #else
1633 	static const char cc[] = "cc unknown";
1634 #endif
1635 	return cc;
1636 }
1637 
1638 /*
1639  *  stress_get_uname_info()
1640  *	return uname information
1641  */
stress_get_uname_info(void)1642 const char *stress_get_uname_info(void)
1643 {
1644 #if defined(HAVE_UNAME) &&	\
1645     defined(HAVE_SYS_UTSNAME_H)
1646 	struct utsname buf;
1647 
1648 	if (!uname(&buf)) {
1649 		static char str[sizeof(buf.machine) +
1650 	                        sizeof(buf.sysname) +
1651 				sizeof(buf.release) + 3];
1652 
1653 		(void)snprintf(str, sizeof(str), "%s %s %s", buf.machine, buf.sysname, buf.release);
1654 		return str;
1655 	}
1656 #endif
1657 	return "unknown";
1658 }
1659 
1660 /*
1661  *  stress_not_implemented()
1662  *	report that a stressor is not implemented
1663  *	on a particular arch or kernel
1664  */
stress_not_implemented(const stress_args_t * args)1665 int stress_not_implemented(const stress_args_t *args)
1666 {
1667 	static const char msg[] = "this stressor is not implemented on "
1668 				  "this system";
1669 	if (args->instance == 0) {
1670 #if defined(HAVE_UNAME) &&	\
1671     defined(HAVE_SYS_UTSNAME_H)
1672 		struct utsname buf;
1673 
1674 		if (!uname(&buf)) {
1675 			pr_inf_skip("%s: %s: %s %s\n",
1676 				args->name, msg, stress_get_uname_info(),
1677 				stress_get_compiler());
1678 			return EXIT_NOT_IMPLEMENTED;
1679 		}
1680 #endif
1681 		pr_inf_skip("%s: %s: %s\n",
1682 			args->name, msg, stress_get_compiler());
1683 	}
1684 	return EXIT_NOT_IMPLEMENTED;
1685 }
1686 
1687 #if defined(F_SETPIPE_SZ)
1688 /*
1689  *  stress_check_max_pipe_size()
1690  *	check if the given pipe size is allowed
1691  */
stress_check_max_pipe_size(const size_t sz,const size_t page_size)1692 static inline int stress_check_max_pipe_size(
1693 	const size_t sz,
1694 	const size_t page_size)
1695 {
1696 	int fds[2];
1697 
1698 	if (sz < page_size)
1699 		return -1;
1700 
1701 	if (pipe(fds) < 0)
1702 		return -1;
1703 
1704 	if (fcntl(fds[0], F_SETPIPE_SZ, sz) < 0)
1705 		return -1;
1706 
1707 	(void)close(fds[0]);
1708 	(void)close(fds[1]);
1709 	return 0;
1710 }
1711 #endif
1712 
1713 /*
1714  *  stress_probe_max_pipe_size()
1715  *	determine the maximum allowed pipe size
1716  */
stress_probe_max_pipe_size(void)1717 size_t stress_probe_max_pipe_size(void)
1718 {
1719 	static size_t max_pipe_size;
1720 
1721 #if defined(F_SETPIPE_SZ)
1722 	ssize_t ret;
1723 	size_t i, prev_sz, sz, min, max;
1724 	char buf[64];
1725 	size_t page_size;
1726 #endif
1727 	/* Already determined? returned cached size */
1728 	if (max_pipe_size)
1729 		return max_pipe_size;
1730 
1731 #if defined(F_SETPIPE_SZ)
1732 	page_size = stress_get_pagesize();
1733 
1734 	/*
1735 	 *  Try and find maximum pipe size directly
1736 	 */
1737 	ret = system_read("/proc/sys/fs/pipe-max-size", buf, sizeof(buf));
1738 	if (ret > 0) {
1739 		if (sscanf(buf, "%zd", &sz) == 1)
1740 			if (!stress_check_max_pipe_size(sz, page_size))
1741 				goto ret;
1742 	}
1743 
1744 	/*
1745 	 *  Need to find size by binary chop probing
1746 	 */
1747 	min = page_size;
1748 	max = INT_MAX;
1749 	prev_sz = 0;
1750 	sz = 0;
1751 	for (i = 0; i < 64; i++) {
1752 		sz = min + (max - min) / 2;
1753 		if (prev_sz == sz)
1754 			return sz;
1755 		prev_sz = sz;
1756 		if (stress_check_max_pipe_size(sz, page_size) == 0) {
1757 			min = sz;
1758 		} else {
1759 			max = sz;
1760 		}
1761 	}
1762 ret:
1763 	max_pipe_size = sz;
1764 #else
1765 	max_pipe_size = stress_get_pagesize();
1766 #endif
1767 	return max_pipe_size;
1768 }
1769 
1770 /*
1771  *  stress_align_address
1772  *	align address to alignment, alignment MUST be a power of 2
1773  */
stress_align_address(const void * addr,const size_t alignment)1774 void *stress_align_address(const void *addr, const size_t alignment)
1775 {
1776 	const uintptr_t uintptr =
1777 		((uintptr_t)addr + alignment) & ~(alignment - 1);
1778 
1779 	return (void *)uintptr;
1780 }
1781 
1782 /*
1783  *  stress_sigalrm_pending()
1784  *	return true if SIGALRM is pending
1785  */
stress_sigalrm_pending(void)1786 bool stress_sigalrm_pending(void)
1787 {
1788 	sigset_t set;
1789 
1790 	(void)sigemptyset(&set);
1791 	(void)sigpending(&set);
1792 	return sigismember(&set, SIGALRM);
1793 
1794 }
1795 
1796 /*
1797  *  stress_uint64_to_str()
1798  *	turn 64 bit size to human readable string
1799  */
stress_uint64_to_str(char * str,size_t len,const uint64_t val)1800 char *stress_uint64_to_str(char *str, size_t len, const uint64_t val)
1801 {
1802 	typedef struct {
1803 		uint64_t size;
1804 		char *suffix;
1805 	} stress_size_info_t;
1806 
1807 	static const stress_size_info_t size_info[] = {
1808 		{ EB, "E" },
1809 		{ PB, "P" },
1810 		{ TB, "T" },
1811 		{ GB, "G" },
1812 		{ MB, "M" },
1813 		{ KB, "K" },
1814 	};
1815 	size_t i;
1816 	char *suffix = "";
1817 	uint64_t scale = 1;
1818 
1819 	for (i = 0; i < SIZEOF_ARRAY(size_info); i++) {
1820 		uint64_t scaled = val / size_info[i].size;
1821 
1822 		if ((scaled >= 1) && (scaled < 1024)) {
1823 			suffix = size_info[i].suffix;
1824 			scale = size_info[i].size;
1825 			break;
1826 		}
1827 	}
1828 
1829 	(void)snprintf(str, len, "%.1f%s", (double)val / (double)scale, suffix);
1830 
1831 	return str;
1832 }
1833 
1834 /*
1835  *  stress_check_root()
1836  *	returns true if root
1837  */
stress_check_root(void)1838 static bool stress_check_root(void)
1839 {
1840 	return (geteuid() == 0);
1841 }
1842 
1843 #if defined(HAVE_SYS_CAPABILITY_H)
1844 /*
1845  *  stress_check_capability()
1846  *	returns true if process has the given capability,
1847  *	if capability is SHIM_CAP_IS_ROOT then just check if process is
1848  *	root.
1849  */
stress_check_capability(const int capability)1850 bool stress_check_capability(const int capability)
1851 {
1852 	int ret;
1853 	struct __user_cap_header_struct uch;
1854 	struct __user_cap_data_struct ucd[_LINUX_CAPABILITY_U32S_3];
1855 	uint32_t mask;
1856 	size_t idx;
1857 
1858 	if (capability == SHIM_CAP_IS_ROOT)
1859 		return stress_check_root();
1860 
1861 	(void)memset(&uch, 0, sizeof uch);
1862 	(void)memset(ucd, 0, sizeof ucd);
1863 
1864 	uch.version = _LINUX_CAPABILITY_VERSION_3;
1865 	uch.pid = getpid();
1866 
1867 	ret = capget(&uch, ucd);
1868 	if (ret < 0)
1869 		return stress_check_root();
1870 
1871 	idx = (size_t)CAP_TO_INDEX(capability);
1872 	mask = CAP_TO_MASK(capability);
1873 
1874 	return (ucd[idx].permitted &= mask) ? true : false;
1875 }
1876 #else
stress_check_capability(const int capability)1877 bool stress_check_capability(const int capability)
1878 {
1879 	(void)capability;
1880 
1881 	return stress_check_root();
1882 }
1883 #endif
1884 
1885 #if defined(HAVE_SYS_CAPABILITY_H)
stress_drop_capabilities(const char * name)1886 int stress_drop_capabilities(const char *name)
1887 {
1888 	int ret;
1889 	uint32_t i;
1890 	struct __user_cap_header_struct uch;
1891 	struct __user_cap_data_struct ucd[_LINUX_CAPABILITY_U32S_3];
1892 
1893 	(void)memset(&uch, 0, sizeof uch);
1894 	(void)memset(ucd, 0, sizeof ucd);
1895 
1896 	uch.version = _LINUX_CAPABILITY_VERSION_3;
1897 	uch.pid = getpid();
1898 
1899 	ret = capget(&uch, ucd);
1900 	if (ret < 0) {
1901 		pr_fail("%s: capget on pid %d failed: errno=%d (%s)\n",
1902 			name, uch.pid, errno, strerror(errno));
1903 		return -1;
1904 	}
1905 
1906 	/*
1907 	 *  We could just memset ucd to zero, but
1908 	 *  lets explicitly set all the capability
1909 	 *  bits to zero to show the intent
1910 	 */
1911 	for (i = 0; i <= CAP_LAST_CAP; i++) {
1912 		uint32_t idx = CAP_TO_INDEX(i);
1913 		uint32_t mask = CAP_TO_MASK(i);
1914 
1915 		ucd[idx].inheritable &= ~mask;
1916 		ucd[idx].permitted &= ~mask;
1917 		ucd[idx].effective &= ~mask;
1918 	}
1919 
1920 	ret = capset(&uch, ucd);
1921 	if (ret < 0) {
1922 		pr_fail("%s: capset on pid %d failed: errno=%d (%s)\n",
1923 			name, uch.pid, errno, strerror(errno));
1924 		return -1;
1925 	}
1926 #if defined(HAVE_PRCTL) &&		\
1927     defined(HAVE_SYS_PRCTL_H) &&	\
1928     defined(PR_SET_NO_NEW_PRIVS)
1929 	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1930 	if (ret < 0) {
1931 		pr_inf("%s: prctl PR_SET_NO_NEW_PRIVS on pid %d failed: "
1932 			"errno=%d (%s)\n",
1933 			name, uch.pid, errno, strerror(errno));
1934 		return -1;
1935 	}
1936 #endif
1937 	return 0;
1938 }
1939 #else
stress_drop_capabilities(const char * name)1940 int stress_drop_capabilities(const char *name)
1941 {
1942 	(void)name;
1943 
1944 	return 0;
1945 }
1946 #endif
1947 
1948 /*
1949  *  stress_is_dot_filename()
1950  *	is filename "." or ".."
1951  */
stress_is_dot_filename(const char * name)1952 bool stress_is_dot_filename(const char *name)
1953 {
1954 	if (!strcmp(name, "."))
1955 		return true;
1956 	if (!strcmp(name, ".."))
1957 		return true;
1958 	return false;
1959 }
1960 
1961 /*
1962  *  stress_const_optdup(const char *opt)
1963  *	duplicate a modifiable copy of a const option string opt
1964  */
stress_const_optdup(const char * opt)1965 char *stress_const_optdup(const char *opt)
1966 {
1967 	char *str = strdup(opt);
1968 
1969 	if (!str)
1970 		(void)fprintf(stderr, "out of memory duplicating option '%s'\n", opt);
1971 
1972 	return str;
1973 }
1974 
1975 /*
1976  *  stress_text_addr()
1977  *	return length and start/end addresses of text segment
1978  */
stress_text_addr(char ** start,char ** end)1979 size_t stress_text_addr(char **start, char **end)
1980 {
1981 #if defined(HAVE_EXECUTABLE_START)
1982 	extern char __executable_start;
1983 	intptr_t text_start = (intptr_t)&__executable_start;
1984 #elif defined(__APPLE__)
1985         extern char _mh_execute_header;
1986         intptr_t text_start = (intptr_t)&_mh_execute_header;
1987 #elif defined(__OpenBSD__)
1988         extern char _start[];
1989         intptr_t text_start = (intptr_t)&_start[0];
1990 #elif defined(__TINYC__)
1991         extern char _start;
1992         intptr_t text_start = (intptr_t)&_start;
1993 #else
1994         extern char _start;
1995         intptr_t text_start = (intptr_t)&_start;
1996 #endif
1997 
1998 #if defined(__APPLE__)
1999         extern void *get_etext(void);
2000         intptr_t text_end = (intptr_t)get_etext();
2001 #elif defined(__TINYC__)
2002         extern char _etext;
2003         intptr_t text_end = (intptr_t)&_etext;
2004 #else
2005         extern char etext;
2006         intptr_t text_end = (intptr_t)&etext;
2007 #endif
2008         const size_t text_len = (size_t)(text_end - text_start);
2009 
2010 	if ((start == NULL) || (end == NULL) || (text_start >= text_end))
2011 		return 0;
2012 
2013 	*start = (char *)text_start;
2014 	*end = (char *)text_end;
2015 
2016 	return text_len;
2017 }
2018 
2019 /*
2020  *  stress_is_dev_tty()
2021  *	return true if fd is on a /dev/ttyN device. If it can't
2022  *	be determined than default to assuming it is.
2023  */
stress_is_dev_tty(const int fd)2024 bool stress_is_dev_tty(const int fd)
2025 {
2026 #if defined(HAVE_TTYNAME)
2027 	const char *name = ttyname(fd);
2028 
2029 	if (!name)
2030 		return true;
2031 	return !strncmp("/dev/tty", name, 8);
2032 #else
2033 	(void)fd;
2034 
2035 	/* Assume it is */
2036 	return true;
2037 #endif
2038 }
2039 
2040 /*
2041  *  stress_dirent_list_free()
2042  *	free dirent list
2043  */
stress_dirent_list_free(struct dirent ** dlist,const int n)2044 void stress_dirent_list_free(struct dirent **dlist, const int n)
2045 {
2046 	if (dlist) {
2047 		int i;
2048 
2049 		for (i = 0; i < n; i++) {
2050 			if (dlist[i])
2051 				free(dlist[i]);
2052 		}
2053 		free(dlist);
2054 	}
2055 }
2056 
2057 /*
2058  *  stress_dirent_list_prune()
2059  *	remove . and .. files from directory list
2060  */
stress_dirent_list_prune(struct dirent ** dlist,const int n)2061 int stress_dirent_list_prune(struct dirent **dlist, const int n)
2062 {
2063 	int i, j;
2064 
2065 	for (i = 0, j = 0; i < n; i++) {
2066 		if (dlist[i]) {
2067 			if (stress_is_dot_filename(dlist[i]->d_name)) {
2068 				free(dlist[i]);
2069 				dlist[i] = NULL;
2070 			} else {
2071 				dlist[j] = dlist[i];
2072 				j++;
2073 			}
2074 		}
2075 	}
2076 	return j;
2077 }
2078 
2079 /*
2080  *  stress_warn_once_hash()
2081  *	computes a hash for a filename and a line and stores it,
2082  *	returns true if this is the first time this has been
2083  *	called for that specific filename and line
2084  *
2085  *	Without libpthread this is potentially racy.
2086  */
stress_warn_once_hash(const char * filename,const int line)2087 bool stress_warn_once_hash(const char *filename, const int line)
2088 {
2089 	uint32_t free_slot, i, j, h = (stress_hash_pjw(filename) + (uint32_t)line);
2090 	bool not_warned_yet = true;
2091 #if defined(HAVE_LIB_PTHREAD)
2092         int ret;
2093 #endif
2094 	if (!g_shared)
2095 		return true;
2096 
2097 #if defined(HAVE_LIB_PTHREAD)
2098         ret = shim_pthread_spin_lock(&g_shared->warn_once.lock);
2099 #endif
2100 	free_slot = STRESS_WARN_HASH_MAX;
2101 
2102 	/*
2103 	 * Ensure hash is never zero so that it does not
2104 	 * match and empty slot value of zero
2105 	 */
2106 	if (h == 0)
2107 		h += STRESS_WARN_HASH_MAX;
2108 
2109 	j = h % STRESS_WARN_HASH_MAX;
2110 	for (i = 0; i < STRESS_WARN_HASH_MAX; i++) {
2111 		if (g_shared->warn_once.hash[j] == h) {
2112 			not_warned_yet = false;
2113 			goto unlock;
2114 		}
2115 		if ((free_slot == STRESS_WARN_HASH_MAX) &&
2116 		    (g_shared->warn_once.hash[j] == 0)) {
2117 			free_slot = j;
2118 		}
2119 		j = (j + 1) % STRESS_WARN_HASH_MAX;
2120 	}
2121 	if (free_slot != STRESS_WARN_HASH_MAX) {
2122 		g_shared->warn_once.hash[free_slot] = h;
2123 	}
2124 unlock:
2125 #if defined(HAVE_LIB_PTHREAD)
2126         if (!ret)
2127                 shim_pthread_spin_unlock(&g_shared->warn_once.lock);
2128 #endif
2129         return not_warned_yet;
2130 }
2131 
2132 /*
2133  *  stress_ipv4_checksum()
2134  *	ipv4 data checksum
2135  */
stress_ipv4_checksum(uint16_t * ptr,const size_t sz)2136 uint16_t HOT OPTIMIZE3 stress_ipv4_checksum(uint16_t *ptr, const size_t sz)
2137 {
2138 	register uint32_t sum = 0;
2139 	register size_t n = sz;
2140 
2141 	while (n > 1) {
2142 		sum += *ptr++;
2143 		n -= 2;
2144 	}
2145 
2146 	if (n)
2147 		sum += *(uint8_t*)ptr;
2148 	sum = (sum >> 16) + (sum & 0xffff);
2149 	sum += (sum >> 16);
2150 
2151 	return (uint16_t)~sum;
2152 }
2153 
2154 #if defined(HAVE_SETPWENT) &&	\
2155     defined(HAVE_GETPWENT) &&	\
2156     defined(HAVE_ENDPWENT) &&	\
2157     !defined(BUILD_STATIC)
stress_uid_comp(const void * p1,const void * p2)2158 static int stress_uid_comp(const void *p1, const void *p2)
2159 {
2160 	const uid_t *uid1 = (const uid_t *)p1;
2161 	const uid_t *uid2 = (const uid_t *)p2;
2162 
2163 	if (*uid1 > *uid2)
2164 		return 1;
2165 	else if (*uid1 < *uid2)
2166 		return -1;
2167 	else
2168 		return 0;
2169 }
2170 
2171 /*
2172  *  stress_get_unused_uid()
2173  *	find the lowest free unused UID greater than 250,
2174  *	returns -1 if it can't find one and uid is set to 0;
2175  *      if successful it returns 0 and sets uid to the free uid.
2176  *
2177  *	This also caches the uid so this can be called
2178  *	frequently. If the cached uid is in use it will
2179  *	perform the expensive lookup again.
2180  */
stress_get_unused_uid(uid_t * uid)2181 int stress_get_unused_uid(uid_t *uid)
2182 {
2183 	static uid_t cached_uid = 0;
2184 	uid_t *uids;
2185 
2186 	*uid = 0;
2187 
2188 	/*
2189 	 *  If we have a cached unused uid and it's no longer
2190 	 *  unused then force a rescan for a new one
2191 	 */
2192 	if ((cached_uid != 0) && (getpwuid(cached_uid) != NULL))
2193 		cached_uid = 0;
2194 
2195 	if (cached_uid == 0) {
2196 		struct passwd *pw;
2197 		size_t i, n;
2198 
2199 		setpwent();
2200 		for (n = 0; getpwent() != NULL; n++) {
2201 		}
2202 		endpwent();
2203 
2204 		uids = calloc(n, sizeof(*uids));
2205 		if (!uids)
2206 			return -1;
2207 
2208 		setpwent();
2209 		for (i = 0; i < n && (pw = getpwent()) != NULL; i++) {
2210 			uids[i] = pw->pw_uid;
2211 		}
2212 		endpwent();
2213 		n = i;
2214 
2215 		qsort(uids, n, sizeof(*uids), stress_uid_comp);
2216 
2217 		/* Look for a suitable gap from uid 250 upwards */
2218 		for (i = 0; i < n - 1; i++) {
2219 			/*
2220 			 *  Add a large gap in case new uids
2221 			 *  are added to reduce free uid race window
2222 			 */
2223 			const uid_t uid_try = uids[i] + 250;
2224 
2225 			if (uids[i + 1] > uid_try) {
2226 				if (getpwuid(uid_try) == NULL) {
2227 					cached_uid = uid_try;
2228 					break;
2229 				}
2230 			}
2231 		}
2232 		free(uids);
2233 	}
2234 
2235 	/*
2236 	 *  Not found?
2237 	 */
2238 	if (cached_uid == 0)
2239 		return -1;
2240 
2241 	*uid = cached_uid;
2242 
2243 	return 0;
2244 }
2245 #else
stress_get_unused_uid(uid_t * uid)2246 int stress_get_unused_uid(uid_t *uid)
2247 {
2248 	*uid = 0;
2249 
2250 	return -1;
2251 }
2252 #endif
2253 
2254 /*
2255  *  stress_read_buffer()
2256  *	In addition to read() this function makes sure all bytes have been
2257  *	written. You're also able to ignore EINTR interrupts which could happen
2258  *	on alarm() in the parent process.
2259  */
stress_read_buffer(int fd,void * buffer,ssize_t size,bool ignore_int)2260 ssize_t stress_read_buffer(int fd, void* buffer, ssize_t size, bool ignore_int)
2261 {
2262 	ssize_t rbytes = 0, ret;
2263 
2264 	do {
2265 		char *ptr = ((char *)buffer) + rbytes;
2266 ignore_eintr:
2267 
2268 		ret = read(fd, (void *)ptr, (size_t)(size - rbytes));
2269 		if (ignore_int && (ret < 0) && (errno == EINTR))
2270 			goto ignore_eintr;
2271 		if (ret > 0)
2272 			rbytes += ret;
2273 	} while (ret > 0 && (rbytes != size));
2274 
2275 	pr_dbg_v("stress_read_buffer: size=%ld read=%ld sz2=%ld\n", size, rbytes, ret);
2276 
2277 	return (ret <= 0)? ret : rbytes;
2278 }
2279 
2280 /*
2281  *  stress_write_buffer()
2282  *	In addition to write() this function makes sure all bytes have been
2283  *	written. You're also able to ignore EINTR interrupts which could happen
2284  *	on alarm() in the parent process.
2285  */
stress_write_buffer(int fd,void * buffer,ssize_t size,bool ignore_int)2286 ssize_t stress_write_buffer(int fd, void* buffer, ssize_t size, bool ignore_int)
2287 {
2288 	ssize_t wbytes = 0, ret;
2289 
2290 	do {
2291 		char *ptr = ((char *)buffer) + wbytes;
2292 ignore_eintr:
2293 		ret = write(fd, (void *)ptr, (size_t)(size - wbytes));
2294 		/* retry if interrupted */
2295 		if (ignore_int && (ret < 0) && (errno == EINTR))
2296 			goto ignore_eintr;
2297 		if (ret > 0)
2298 			wbytes += ret;
2299 	} while (ret > 0 && (wbytes != size));
2300 
2301 	pr_dbg_v("stress_write_buffer: size=%ld written=%ld sz2=%ld\n", size, wbytes, ret);
2302 
2303 	return (ret <= 0)? ret : wbytes;
2304 }
2305 
2306 /*
2307  *  stress_kernel_release()
2308  *	turn release major.minor.patchlevel triplet into base 100 value
2309  */
stress_kernel_release(const int major,const int minor,const int patchlevel)2310 int stress_kernel_release(const int major, const int minor, const int patchlevel)
2311 {
2312 	return (major * 10000) + (minor * 100) + patchlevel;
2313 }
2314 
2315 /*
2316  *  stress_get_kernel_release()
2317  *	return kernel release number in base 100, e.g.
2318  *	 4.15.2 -> 401502, return -1 if failed.
2319  */
stress_get_kernel_release(void)2320 int stress_get_kernel_release(void)
2321 {
2322 #if defined(HAVE_UNAME)
2323 	struct utsname buf;
2324 	int major = 0, minor = 0, patchlevel = 0;
2325 
2326 	if (uname(&buf) < 0)
2327 		return -1;
2328 
2329 	if (sscanf(buf.release, "%d.%d.%d\n", &major, &minor, &patchlevel) < 1)
2330 		return -1;
2331 
2332 	return stress_kernel_release(major, minor, patchlevel);
2333 #else
2334 	return -1;
2335 #endif
2336 }
2337 
2338 /*
2339  *  stress_get_unused_pid_racy()
2340  *	try to find an unused pid. This is racy and may actually
2341  *	return pid that is unused at test time but will become
2342  *	used by the time the pid is accessed.
2343  */
stress_get_unused_pid_racy(const bool fork_test)2344 pid_t stress_get_unused_pid_racy(const bool fork_test)
2345 {
2346 	char buf[64];
2347 #if defined(PID_MAX_LIMIT)
2348 	pid_t max_pid = PID_MAX_LIMIT;
2349 #elif defined(PID_MAX)
2350 	pid_t max_pid = PID_MAX;
2351 #elif defined(PID_MAX_DEFAULT)
2352 	pid_t max_pid = PID_MAX_DEFAULT;
2353 #else
2354 	pid_t max_pid = 32767;
2355 #endif
2356 	int i;
2357 	pid_t pid;
2358 	uint32_t n;
2359 
2360 	(void)memset(buf, 0, sizeof(buf));
2361 	if (system_read("/proc/sys/kernel/pid_max", buf, sizeof(buf) - 1) > 0) {
2362 		max_pid = atoi(buf);
2363 	}
2364 	if (max_pid < 1024)
2365 		max_pid = 1024;
2366 
2367 	/*
2368 	 *  Create a child, terminate it, use this pid as an unused
2369 	 *  pid. Slow but should be OK if system doesn't recycle PIDs
2370 	 *  quickly.
2371 	 */
2372 	if (fork_test) {
2373 		pid = fork();
2374 		if (pid == 0) {
2375 			_exit(0);
2376 		} else if (pid > 0) {
2377 			int status, ret;
2378 
2379 			ret = waitpid(pid, &status, 0);
2380 			if ((ret == pid) &&
2381 			    ((kill(pid, 0) < 0) && (errno == ESRCH))) {
2382 				return pid;
2383 			}
2384 		}
2385 	}
2386 
2387 	/*
2388 	 *  Make a random PID guess.
2389 	 */
2390 	n = (uint32_t)max_pid - 1023;
2391 	for (i = 0; i < 20; i++) {
2392 		pid = (pid_t)(stress_mwc32() % n) + 1023;
2393 
2394 		if ((kill(pid, 0) < 0) && (errno == ESRCH))
2395 			return pid;
2396 	}
2397 
2398 	/*
2399 	 *  Give up.
2400 	 */
2401 	return max_pid;
2402 }
2403 
2404 /*
2405  *  stress_read_fdinfo()
2406  *	read the fdinfo for a specific pid's fd, Linux only
2407  */
stress_read_fdinfo(const pid_t pid,const int fd)2408 int stress_read_fdinfo(const pid_t pid, const int fd)
2409 {
2410 #if defined(__linux__)
2411 	char path[PATH_MAX];
2412 	char buf[4096];
2413 
2414 	(void)snprintf(path, sizeof(path), "/proc/%d/fdinfo/%d",
2415                 (int)pid, fd);
2416 
2417         return (int)system_read(path, buf, sizeof(buf));
2418 #else
2419 	(void)pid;
2420 	(void)fd;
2421 
2422 	return 0;
2423 #endif
2424 }
2425 
2426 /*
2427  *  stress_hostname_length()
2428  *	return the maximum allowed hostname length
2429  */
stress_hostname_length(void)2430 size_t stress_hostname_length(void)
2431 {
2432 #if defined(HOST_NAME_MAX)
2433 	return HOST_NAME_MAX + 1;
2434 #elif defined(HAVE_UNAME) && \
2435       defined(HAVE_SYS_UTSNAME_H)
2436 	struct utsname uts;
2437 
2438 	return sizeof(uts.nodename);	/* Linux */
2439 #else
2440 	return 255 + 1;			/* SUSv2 */
2441 #endif
2442 }
2443 
2444 /*
2445  *  stress_min_aux_sig_stack_size()
2446  *	For ARM we should check AT_MINSIGSTKSZ as this
2447  *	also includes SVE register saving overhead
2448  *	https://blog.linuxplumbersconf.org/2017/ocw/system/presentations/4671/original/plumbers-dm-2017.pdf
2449  */
stress_min_aux_sig_stack_size(void)2450 static inline long stress_min_aux_sig_stack_size(void)
2451 {
2452 #if defined(HAVE_GETAUXVAL) &&	\
2453     defined(AT_MINSIGSTKSZ)
2454 	long sz = getauxval(AT_MINSIGSTKSZ);
2455 
2456 	if (sz > 0)
2457 		return sz;
2458 #endif
2459 	return -1;
2460 }
2461 
2462 /*
2463  *  stress_sig_stack_size()
2464  *	wrapper for STRESS_SIGSTKSZ, try and find
2465  *	stack size required
2466  */
stress_sig_stack_size(void)2467 size_t stress_sig_stack_size(void)
2468 {
2469 	static long sz = -1, min;
2470 
2471 	/* return cached copy */
2472 	if (sz > 0)
2473 		return sz;
2474 
2475 	min = stress_min_aux_sig_stack_size();
2476 #if defined(_SC_SIGSTKSZ)
2477 	sz = sysconf(_SC_SIGSTKSZ);
2478 	if (sz > min)
2479 		min = sz;
2480 #endif
2481 #if defined(SIGSTKSZ)
2482 	if (SIGSTKSZ > min) {
2483 		/* SIGSTKSZ may be sysconf(_SC_SIGSTKSZ) */
2484 		min = SIGSTKSZ;
2485 		if (min < 0)
2486 			min = 8192;
2487 	}
2488 #else
2489 	if (8192 > min)
2490 		min = 8192;
2491 #endif
2492 	sz = min;
2493 
2494 	return (size_t)sz;
2495 }
2496 
2497 /*
2498  *  stress_min_sig_stack_size()
2499  *	wrapper for STRESS_MINSIGSTKSZ
2500  */
stress_min_sig_stack_size(void)2501 size_t stress_min_sig_stack_size(void)
2502 {
2503 	static long sz = -1, min;
2504 
2505 	/* return cached copy */
2506 	if (sz > 0)
2507 		return sz;
2508 
2509 	min = stress_min_aux_sig_stack_size();
2510 #if defined(_SC_MINSIGSTKSZ)
2511 	sz = sysconf(_SC_MINSIGSTKSZ);
2512 	if (sz > min)
2513 		min = sz;
2514 #endif
2515 #if defined(SIGSTKSZ)
2516 	if (SIGSTKSZ > min) {
2517 		/* SIGSTKSZ may be sysconf(_SC_SIGSTKSZ) */
2518 		min = SIGSTKSZ;
2519 		if (min < 0)
2520 			min = 8192;
2521 	}
2522 #else
2523 	if (8192 > min)
2524 		min = 8192;
2525 #endif
2526 	sz = min;
2527 
2528 	return (size_t)sz;
2529 }
2530 
2531 /*
2532  *  stress_min_pthread_stack_size()
2533  *	return the minimum size of stack for a pthread
2534  */
stress_min_pthread_stack_size(void)2535 size_t stress_min_pthread_stack_size(void)
2536 {
2537 	static long sz = -1, min;
2538 
2539 	/* return cached copy */
2540 	if (sz > 0)
2541 		return sz;
2542 
2543 	min = stress_min_aux_sig_stack_size();
2544 #if defined(__SC_THREAD_STACK_MIN_VALUE)
2545 	sz = sysconf(__SC_THREAD_STACK_MIN_VALUE);
2546 	if (sz > min)
2547 		min = sz;
2548 #endif
2549 #if defined(_SC_THREAD_STACK_MIN_VALUE)
2550 	sz = sysconf(_SC_THREAD_STACK_MIN_VALUE);
2551 	if (sz > min)
2552 		min = sz;
2553 #endif
2554 #if defined(PTHREAD_STACK_MIN)
2555 	if (PTHREAD_STACK_MIN > min)
2556 		min = PTHREAD_STACK_MIN;
2557 #endif
2558 	if (8192 > min)
2559 		min = 8192;
2560 
2561 	sz = min;
2562 
2563 	return (size_t)sz;
2564 }
2565 
2566 /*
2567  *  stress_sig_handler_exit()
2568  *	signal handler that exits a process via _exit(0) for
2569  *	immediate dead stop termination.
2570  */
stress_sig_handler_exit(int signum)2571 void NORETURN MLOCKED_TEXT stress_sig_handler_exit(int signum)
2572 {
2573 	(void)signum;
2574 
2575 	_exit(0);
2576 }
2577 
2578 /*
2579  *  __stack_chk_fail()
2580  *	override stack smashing callback
2581  */
2582 #if (defined(__GNUC__) || defined(__clang__)) &&	\
2583     defined(HAVE_WEAK_ATTRIBUTE)
2584 extern void __stack_chk_fail(void);
2585 
__stack_chk_fail(void)2586 NORETURN WEAK void __stack_chk_fail(void)
2587 {
2588 	if (stress_stack_check_flag) {
2589 		(void)fprintf(stderr, "Stack overflow detected! Aborting stress-ng.\n");
2590 		(void)fflush(stderr);
2591 		abort();
2592 	}
2593 	/* silently exit */
2594 	_exit(0);
2595 }
2596 #endif
2597 
2598 /*
2599  *  stress_set_stack_smash_check_flag()
2600  *	set flag, true = report flag, false = silently ignore
2601  */
stress_set_stack_smash_check_flag(const bool flag)2602 void stress_set_stack_smash_check_flag(const bool flag)
2603 {
2604 	stress_stack_check_flag = flag;
2605 }
2606 
stress_tty_width(void)2607 int stress_tty_width(void)
2608 {
2609 	const int max_width = 80;
2610 #if defined(HAVE_WINSIZE) &&	\
2611     defined(TIOCGWINSZ)
2612 	struct winsize ws;
2613 	int ret;
2614 
2615 	ret = ioctl(fileno(stdout), TIOCGWINSZ, &ws);
2616 	if (ret < 0)
2617 		return max_width;
2618 	ret = (int)ws.ws_col;
2619 	if ((ret < 0) || (ret > 1024))
2620 		return max_width;
2621 	return ret;
2622 #else
2623 	return max_width;
2624 #endif
2625 }
2626 
2627 /*
2628  *  stress_get_extents()
2629  *	try to determine number extents in a file
2630  */
stress_get_extents(const int fd)2631 size_t stress_get_extents(const int fd)
2632 {
2633 #if defined(FS_IOC_FIEMAP) &&	\
2634     defined(HAVE_LINUX_FIEMAP_H)
2635 	struct fiemap fiemap;
2636 
2637 	(void)memset(&fiemap, 0, sizeof(fiemap));
2638 	fiemap.fm_length = ~0UL;
2639 
2640 	/* Find out how many extents there are */
2641 	if (ioctl(fd, FS_IOC_FIEMAP, &fiemap) < 0)
2642 		return 0;
2643 
2644 	return fiemap.fm_mapped_extents;
2645 #else
2646 	(void)fd;
2647 
2648 	return 0;
2649 #endif
2650 }
2651 
2652 /*
2653  *  stress_redo_fork()
2654  *	check fork errno (in err) and return true if
2655  *	an immediate fork can be retried due to known
2656  *	error cases that are retryable. Also force a
2657  *	scheduling yield.
2658  */
stress_redo_fork(const int err)2659 bool stress_redo_fork(const int err)
2660 {
2661 	if (keep_stressing_flag() &&
2662 	    ((err == EAGAIN) || (err == EINTR) || (err == ENOMEM))) {
2663 		(void)shim_sched_yield();
2664 		return true;
2665 	}
2666 	return false;
2667 }
2668