1 /*
2 * Copyright (C) 2013-2021 Canonical, Ltd.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 *
18 * This code is a complete clean re-write of the stress tool by
19 * Colin Ian King <colin.king@canonical.com> and attempts to be
20 * backwardly compatible with the stress tool by Amos Waterland
21 * <apw@rossby.metr.ou.edu> but has more stress tests and more
22 * functionality.
23 *
24 */
25 #include "stress-ng.h"
26
27 #define STRESS_AFFINITY_PROCS (16)
28
29 typedef struct {
30 volatile uint32_t cpu; /* Pinned CPU to use, in pin mode */
31 uint32_t cpus; /* Number of CPUs available */
32 bool affinity_rand; /* True if --affinity-rand set */
33 bool affinity_pin; /* True if --affinity-pin set */
34 uint64_t affinity_delay; /* Affinity nanosecond delay, 0 default */
35 uint64_t affinity_sleep; /* Affinity nanosecond delay, 0 default */
36 uint64_t counters[0]; /* Child stressor bogo counters */
37 } stress_affinity_info_t;
38
39 static const stress_help_t help[] = {
40 { NULL, "affinity N", "start N workers that rapidly change CPU affinity" },
41 { NULL, "affinity-ops N", "stop after N affinity bogo operations" },
42 { NULL, "affinity-rand", "change affinity randomly rather than sequentially" },
43 { NULL, "affinity-delay", "delay in nanoseconds between affinity changes" },
44 { NULL, "affinity-pin", "keep per stressor threads pinned to same CPU" },
45 { NULL, "affinity-sleep", "sleep in nanoseconds between affinity changes" },
46 { NULL, NULL, NULL }
47 };
48
stress_set_affinity_delay(const char * opt)49 static int stress_set_affinity_delay(const char *opt)
50 {
51 uint64_t affinity_delay;
52
53 affinity_delay = stress_get_uint64(opt);
54 stress_check_range("affinity-delay", affinity_delay,
55 0, STRESS_NANOSECOND);
56 return stress_set_setting("affinity-delay", TYPE_ID_UINT64, &affinity_delay);
57 }
58
stress_set_affinity_rand(const char * opt)59 static int stress_set_affinity_rand(const char *opt)
60 {
61 bool affinity_rand = true;
62
63 (void)opt;
64 return stress_set_setting("affinity-rand", TYPE_ID_BOOL, &affinity_rand);
65 }
66
stress_set_affinity_pin(const char * opt)67 static int stress_set_affinity_pin(const char *opt)
68 {
69 bool affinity_pin = true;
70
71 (void)opt;
72 return stress_set_setting("affinity-pin", TYPE_ID_BOOL, &affinity_pin);
73 }
74
stress_set_affinity_sleep(const char * opt)75 static int stress_set_affinity_sleep(const char *opt)
76 {
77 uint64_t affinity_sleep;
78
79 affinity_sleep = stress_get_uint64(opt);
80 stress_check_range("affinity-sleep", affinity_sleep,
81 0, STRESS_NANOSECOND);
82 return stress_set_setting("affinity-sleep", TYPE_ID_UINT64, &affinity_sleep);
83 }
84
85 static const stress_opt_set_func_t opt_set_funcs[] = {
86 { OPT_affinity_delay, stress_set_affinity_delay },
87 { OPT_affinity_pin, stress_set_affinity_pin },
88 { OPT_affinity_rand, stress_set_affinity_rand },
89 { OPT_affinity_sleep, stress_set_affinity_sleep },
90 { 0, NULL }
91 };
92
93 /*
94 * stress on sched_affinity()
95 * stress system by changing CPU affinity periodically
96 */
97 #if defined(HAVE_AFFINITY) && \
98 defined(HAVE_SCHED_GETAFFINITY)
99
100 /*
101 * stress_affinity_supported()
102 * check that we can set affinity
103 */
stress_affinity_supported(const char * name)104 static int stress_affinity_supported(const char *name)
105 {
106 cpu_set_t mask;
107
108 CPU_ZERO(&mask);
109
110 if (sched_getaffinity(0, sizeof(mask), &mask) < 0) {
111 pr_inf_skip("%s stressor cannot get CPU affinity, skipping the stressor\n", name);
112 return -1;
113 }
114 if (sched_setaffinity(0, sizeof(mask), &mask) < 0) {
115 if (errno == EPERM) {
116 pr_inf_skip("%s stressor cannot set CPU affinity, "
117 "process lacks privilege, skipping the stressor\n", name);
118 return -1;
119 }
120 }
121 return 0;
122 }
123
124 /*
125 * stress_affinity_reap()
126 * kill and wait on child processes
127 */
stress_affinity_reap(const pid_t * pids)128 static void stress_affinity_reap(const pid_t *pids)
129 {
130 size_t i;
131 const pid_t mypid = getpid();
132
133 /*
134 * Kill and reap children
135 */
136 for (i = 1; i < STRESS_AFFINITY_PROCS; i++) {
137 if ((pids[i] > 1) && (pids[i] != mypid))
138 (void)kill(pids[i], SIGKILL);
139 }
140 for (i = 1; i < STRESS_AFFINITY_PROCS; i++) {
141 if ((pids[i] > 1) && (pids[i] != mypid)) {
142 int status;
143
144 (void)waitpid(pids[i], &status, 0);
145 }
146 }
147 }
148
149 /*
150 * stress_affinity_racy_count()
151 * racy bogo op counter, we have a lot of contention
152 * if we lock the args->counter, so sum per-process
153 * counters in a racy way.
154 */
stress_affinity_racy_count(uint64_t * counters)155 static uint64_t stress_affinity_racy_count(uint64_t *counters)
156 {
157 register uint64_t count = 0;
158 register size_t i;
159
160 for (i = 0; i < STRESS_AFFINITY_PROCS; i++)
161 count += counters[i];
162
163 return count;
164 }
165
166 /*
167 * affinity_keep_stressing(args)
168 * check if SIGALRM has triggered to the bogo ops count
169 * has been reached, counter is racy, but that's OK
170 */
affinity_keep_stressing(const stress_args_t * args,uint64_t * counters)171 static bool HOT OPTIMIZE3 affinity_keep_stressing(
172 const stress_args_t *args,
173 uint64_t *counters)
174 {
175 return (LIKELY(g_keep_stressing_flag) &&
176 LIKELY(!args->max_ops ||
177 (stress_affinity_racy_count(counters) < args->max_ops)));
178 }
179
180 /*
181 * stress_affinity_spin_delay()
182 * delay by delay nanoseconds, spinning on rescheduling
183 * eat cpu cycles.
184 */
stress_affinity_spin_delay(const uint64_t delay,stress_affinity_info_t * info)185 static inline void stress_affinity_spin_delay(
186 const uint64_t delay,
187 stress_affinity_info_t *info)
188 {
189 const uint32_t cpu = info->cpu;
190 const double end = stress_time_now() +
191 ((double)delay / (double)STRESS_NANOSECOND);
192
193 while ((stress_time_now() < end) && (cpu == info->cpu))
194 shim_sched_yield();
195 }
196
197 /*
198 * stress_affinity_child()
199 * affinity stressor child process
200 */
stress_affinity_child(const stress_args_t * args,stress_affinity_info_t * info,const pid_t * pids,const size_t instance,const bool pin_controller)201 static void stress_affinity_child(
202 const stress_args_t *args,
203 stress_affinity_info_t *info,
204 const pid_t *pids,
205 const size_t instance,
206 const bool pin_controller)
207 {
208 uint32_t cpu = args->instance;
209 cpu_set_t mask0;
210 uint64_t *counters = info->counters;
211
212 CPU_ZERO(&mask0);
213
214 do {
215 cpu_set_t mask;
216 int ret;
217
218 cpu = info->affinity_rand ? (stress_mwc32() >> 4) : cpu + 1;
219 cpu %= info->cpus;
220
221 /*
222 * In pin mode stressor instance 0 controls the CPU
223 * to use, other instances use that CPU too
224 */
225 if (info->affinity_pin) {
226 if (pin_controller) {
227 info->cpu = cpu;
228 shim_mb();
229 } else {
230 shim_mb();
231 cpu = info->cpu;
232 }
233 }
234 CPU_ZERO(&mask);
235 CPU_SET(cpu, &mask);
236 if (sched_setaffinity(0, sizeof(mask), &mask) < 0) {
237 if (errno == EINVAL) {
238 /*
239 * We get this if CPU is offline'd,
240 * and since that can be dynamically
241 * set, we should just retry
242 */
243 continue;
244 }
245 pr_fail("%s: failed to move to CPU %" PRIu32 ", errno=%d (%s)\n",
246 args->name, cpu, errno, strerror(errno));
247 (void)shim_sched_yield();
248 } else {
249 /* Now get and check */
250 CPU_ZERO(&mask);
251 CPU_SET(cpu, &mask);
252 if (sched_getaffinity(0, sizeof(mask), &mask) == 0) {
253 if ((g_opt_flags & OPT_FLAGS_VERIFY) &&
254 (!CPU_ISSET(cpu, &mask)))
255 pr_fail("%s: failed to move " "to CPU %" PRIu32 "\n",
256 args->name, cpu);
257 }
258 }
259 /* Exercise getaffinity with invalid pid */
260 ret = sched_getaffinity(-1, sizeof(mask), &mask);
261 (void)ret;
262
263 /* Exercise getaffinity with mask size */
264 ret = sched_getaffinity(0, 0, &mask);
265 (void)ret;
266
267 /* Exercise setaffinity with invalid mask size */
268 ret = sched_setaffinity(0, 0, &mask);
269 (void)ret;
270
271 /* Exercise setaffinity with invalid mask */
272 ret = sched_setaffinity(0, sizeof(mask), &mask0);
273 (void)ret;
274
275 counters[instance]++;
276
277 if (info->affinity_delay > 0)
278 stress_affinity_spin_delay(info->affinity_delay, info);
279 if (info->affinity_sleep > 0)
280 shim_nanosleep_uint64(info->affinity_sleep);
281 } while (affinity_keep_stressing(args, counters));
282
283 stress_affinity_reap(pids);
284 }
285
stress_affinity(const stress_args_t * args)286 static int stress_affinity(const stress_args_t *args)
287 {
288 pid_t pids[STRESS_AFFINITY_PROCS];
289 size_t i;
290 stress_affinity_info_t *info;
291 size_t counters_sz = sizeof(info->counters[0]) * STRESS_AFFINITY_PROCS;
292 size_t info_sz = ((sizeof(*info) + counters_sz) + args->page_size) & ~(args->page_size - 1);
293
294 info = (stress_affinity_info_t *)mmap(NULL, info_sz, PROT_READ | PROT_WRITE,
295 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
296 if (info == MAP_FAILED) {
297 pr_inf_skip("%s: cannot mmap %zd bytes for shared counters, skipping stressor\n",
298 args->name, info_sz);
299 return EXIT_NO_RESOURCE;
300 }
301
302 (void)memset(pids, 0, sizeof(pids));
303
304 info->affinity_delay = 0;
305 info->affinity_pin = false;
306 info->affinity_rand = false;
307 info->affinity_sleep = 0;
308 info->cpus = (uint32_t)stress_get_processors_configured();
309
310 (void)stress_get_setting("affinity-delay", &info->affinity_delay);
311 (void)stress_get_setting("affinity-pin", &info->affinity_pin);
312 (void)stress_get_setting("affinity-rand", &info->affinity_rand);
313 (void)stress_get_setting("affinity-sleep", &info->affinity_sleep);
314
315 /*
316 * process slots 1..STRESS_AFFINITY_PROCS are the children,
317 * slot 0 is the parent.
318 */
319 for (i = 1; i < STRESS_AFFINITY_PROCS; i++) {
320 pids[i] = fork();
321
322 if (pids[i] == 0) {
323 stress_affinity_child(args, info, pids, i, false);
324 _exit(EXIT_SUCCESS);
325 }
326 }
327
328 stress_set_proc_state(args->name, STRESS_STATE_RUN);
329 stress_affinity_child(args, info, pids, 0, true);
330 stress_set_proc_state(args->name, STRESS_STATE_DEINIT);
331
332 /*
333 * The first process to hit the bogo op limit or get a SIGALRM
334 * will have reap'd the processes, but to be safe, reap again
335 * to ensure all processes are really dead and reaped.
336 */
337 stress_affinity_reap(pids);
338
339 /*
340 * Set counter, this is always going to be >= the bogo_ops
341 * threshold because it is racy, but that is OK
342 */
343 set_counter(args, stress_affinity_racy_count(info->counters));
344
345 (void)munmap((void *)info, info_sz);
346
347 return EXIT_SUCCESS;
348 }
349
350 stressor_info_t stress_affinity_info = {
351 .stressor = stress_affinity,
352 .class = CLASS_SCHEDULER,
353 .supported = stress_affinity_supported,
354 .opt_set_funcs = opt_set_funcs,
355 .help = help
356 };
357 #else
358 stressor_info_t stress_affinity_info = {
359 .stressor = stress_not_implemented,
360 .class = CLASS_SCHEDULER,
361 .opt_set_funcs = opt_set_funcs,
362 .help = help
363 };
364 #endif
365