1 /*
2 * Copyright (C) 2017-2021 Canonical, Ltd.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 *
18 * This code is a complete clean re-write of the stress tool by
19 * Colin Ian King <colin.king@canonical.com> and attempts to be
20 * backwardly compatible with the stress tool by Amos Waterland
21 * <apw@rossby.metr.ou.edu> but has more stress tests and more
22 * functionality.
23 *
24 */
25 #include "stress-ng.h"
26
27 static const stress_help_t help[] = {
28 { NULL, "tmpfs N", "start N workers mmap'ing a file on tmpfs" },
29 { NULL, "tmpfs-ops N", "stop after N tmpfs bogo ops" },
30 { NULL, "tmpfs-mmap-async", "using asynchronous msyncs for tmpfs file based mmap" },
31 { NULL, "tmpfs-mmap-file", "mmap onto a tmpfs file using synchronous msyncs" },
32 { NULL, NULL, NULL }
33 };
34
stress_set_tmpfs_mmap_file(const char * opt)35 static int stress_set_tmpfs_mmap_file(const char *opt)
36 {
37 bool tmpfs_mmap_file = true;
38
39 (void)opt;
40 return stress_set_setting("tmpfs-mmap-file", TYPE_ID_BOOL, &tmpfs_mmap_file);
41 }
42
stress_set_tmpfs_mmap_async(const char * opt)43 static int stress_set_tmpfs_mmap_async(const char *opt)
44 {
45 bool tmpfs_mmap_async = true;
46
47 (void)opt;
48 return stress_set_setting("tmpfs-mmap-async", TYPE_ID_BOOL, &tmpfs_mmap_async);
49 }
50
51 static const stress_opt_set_func_t opt_set_funcs[] = {
52 { OPT_tmpfs_mmap_async, stress_set_tmpfs_mmap_async },
53 { OPT_tmpfs_mmap_file, stress_set_tmpfs_mmap_file },
54 { 0, NULL }
55 };
56
57 #if defined(HAVE_SYS_VFS_H) && \
58 defined(HAVE_STATFS)
59
60 #define MAX_MOUNTS (256)
61 #define NO_MEM_RETRIES_MAX (256)
62 #define TMPFS_MAGIC (0x01021994)
63 #define MAX_TMPFS_SIZE (512 * MB)
64
65 /* Misc randomly chosen mmap flags */
66 static const int mmap_flags[] = {
67 #if defined(MAP_HUGE_2MB) && \
68 defined(MAP_HUGETLB)
69 MAP_HUGE_2MB | MAP_HUGETLB,
70 #endif
71 #if defined(MAP_HUGE_1GB) && \
72 defined(MAP_HUGETLB)
73 MAP_HUGE_1GB | MAP_HUGETLB,
74 #endif
75 #if defined(MAP_HUGETLB)
76 MAP_HUGETLB,
77 #endif
78 #if defined(MAP_NONBLOCK)
79 MAP_NONBLOCK,
80 #endif
81 #if defined(MAP_LOCKED)
82 MAP_LOCKED,
83 #endif
84 0
85 };
86
87 typedef struct {
88 int fd;
89 off_t sz;
90 } stress_tmpfs_context_t;
91
92 /*
93 * stress_tmpfs_open()
94 * attempts to find a writeable tmpfs file system and open
95 * a tmpfs temp file. The file is unlinked so the final close
96 * will enforce and automatic space reap if the child process
97 * exits prematurely.
98 */
stress_tmpfs_open(const stress_args_t * args,off_t * len)99 static int stress_tmpfs_open(const stress_args_t *args, off_t *len)
100 {
101 const uint32_t rnd = stress_mwc32();
102 char path[PATH_MAX];
103 char *mnts[MAX_MOUNTS];
104 int i, n, fd = -1;
105
106 (void)memset(mnts, 0, sizeof(mnts));
107
108 *len = 0;
109 n = stress_mount_get(mnts, SIZEOF_ARRAY(mnts));
110 if (n < 0)
111 return -1;
112
113 for (i = 0; i < n; i++) {
114 struct statfs buf;
115
116 if (!mnts[i])
117 continue;
118 /* Some paths should be avoided... */
119 if (!strncmp(mnts[i], "/dev", 4))
120 continue;
121 if (!strncmp(mnts[i], "/sys", 4))
122 continue;
123 if (!strncmp(mnts[i], "/run/lock", 9))
124 continue;
125 (void)memset(&buf, 0, sizeof(buf));
126 if (statfs(mnts[i], &buf) < 0)
127 continue;
128
129 /* ..and must be TMPFS too.. */
130 if (buf.f_type != TMPFS_MAGIC)
131 continue;
132
133 /* We have a candidate, try to create a tmpfs file */
134 (void)snprintf(path, sizeof(path), "%s/%s-%" PRIdMAX "-%" PRIu32 "-%" PRIu32,
135 mnts[i], args->name, (intmax_t)args->pid, args->instance, rnd);
136 fd = open(path, O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR);
137 if (fd >= 0) {
138 const char data = 0;
139 off_t rc, max_size = (off_t)buf.f_bsize * (off_t)buf.f_bavail;
140
141 /*
142 * Don't use all the tmpfs, just 98% for all instance
143 */
144 max_size = (max_size * 98) / 100;
145 if (!(g_opt_flags & OPT_FLAGS_MAXIMIZE)) {
146 if (max_size > (off_t)MAX_TMPFS_SIZE)
147 max_size = (off_t)MAX_TMPFS_SIZE;
148 }
149 max_size /= args->num_instances;
150
151 (void)unlink(path);
152 /*
153 * make file with hole; we want this
154 * to be autopopulated with pages
155 * over time
156 */
157 rc = lseek(fd, max_size, SEEK_SET);
158 if (rc < 0) {
159 (void)close(fd);
160 fd = -1;
161 continue;
162 }
163 rc = write(fd, &data, sizeof(data));
164 if (rc < 0) {
165 (void)close(fd);
166 fd = -1;
167 continue;
168 }
169 *len = max_size;
170 break;
171 }
172 }
173 stress_mount_free(mnts, n);
174
175 return fd;
176 }
177
stress_tmpfs_child(const stress_args_t * args,void * ctxt)178 static int stress_tmpfs_child(const stress_args_t *args, void *ctxt)
179 {
180 stress_tmpfs_context_t *context = (stress_tmpfs_context_t *)ctxt;
181 const size_t page_size = args->page_size;
182 const size_t sz = (size_t)context->sz;
183 const size_t pages4k = (size_t)sz / page_size;
184 const int fd = context->fd;
185 bool tmpfs_mmap_async = false;
186 bool tmpfs_mmap_file = false;
187 int no_mem_retries = 0;
188 int ms_flags;
189 int flags = MAP_SHARED;
190
191 #if defined(MAP_POPULATE)
192 flags |= MAP_POPULATE;
193 #endif
194
195 (void)stress_get_setting("tmpfs-mmap-async", &tmpfs_mmap_async);
196 (void)stress_get_setting("tmpfs-mmap-file", &tmpfs_mmap_file);
197
198 ms_flags = tmpfs_mmap_async ? MS_ASYNC : MS_SYNC;
199
200 do {
201 uint8_t mapped[pages4k];
202 uint8_t *mappings[pages4k];
203 size_t n;
204 const int rnd = stress_mwc32() % SIZEOF_ARRAY(mmap_flags); /* cppcheck-suppress moduloofone */
205 const int rnd_flag = mmap_flags[rnd];
206 uint8_t *buf = NULL;
207 off_t offset;
208
209 if (no_mem_retries >= NO_MEM_RETRIES_MAX) {
210 pr_err("%s: gave up trying to mmap, no available memory\n",
211 args->name);
212 break;
213 }
214
215 /*
216 * exercise some random file operations
217 */
218 offset = (off_t)(stress_mwc64() % (sz + 1));
219 if (lseek(fd, offset, SEEK_SET) != (off_t)-1) {
220 char data[1];
221 ssize_t rd;
222
223 rd = read(fd, data, sizeof(data));
224 (void)rd;
225 }
226 if (!keep_stressing_flag())
227 break;
228
229 offset = (off_t)(stress_mwc64() % (sz + 1));
230 if (lseek(fd, offset, SEEK_SET) != (off_t)-1) {
231 char data[1];
232 ssize_t wr;
233
234 data[0] = (char)0xff;
235 wr = write(fd, data, sizeof(data));
236 (void)wr;
237 }
238 (void)shim_fsync(fd);
239
240 buf = (uint8_t *)mmap(NULL, sz,
241 PROT_READ | PROT_WRITE, flags | rnd_flag, fd, 0);
242 if (buf == MAP_FAILED) {
243 #if defined(MAP_POPULATE)
244 /* Force MAP_POPULATE off, just in case */
245 if (flags & MAP_POPULATE) {
246 flags &= ~MAP_POPULATE;
247 no_mem_retries++;
248 continue;
249 }
250 #endif
251 #if defined(MAP_HUGETLB)
252 /* Force MAP_HUGETLB off, just in case */
253 if (flags & MAP_HUGETLB) {
254 flags &= ~MAP_HUGETLB;
255 no_mem_retries++;
256 continue;
257 }
258 #endif
259 no_mem_retries++;
260 if (no_mem_retries > 1)
261 (void)shim_usleep(10000);
262 continue; /* Try again */
263 }
264 if (tmpfs_mmap_file) {
265 (void)memset(buf, 0xff, sz);
266 (void)shim_msync((void *)buf, sz, ms_flags);
267 }
268 (void)stress_madvise_random(buf, sz);
269 (void)stress_mincore_touch_pages(buf, sz);
270 (void)memset(mapped, PAGE_MAPPED, sizeof(mapped));
271 for (n = 0; n < pages4k; n++)
272 mappings[n] = buf + (n * page_size);
273
274 /* Ensure we can write to the mapped pages */
275 stress_mmap_set(buf, sz, page_size);
276 if (g_opt_flags & OPT_FLAGS_VERIFY) {
277 if (stress_mmap_check(buf, sz, page_size) < 0)
278 pr_fail("%s: mmap'd region of %zu bytes does "
279 "not contain expected data\n", args->name, sz);
280 }
281
282 /*
283 * Step #1, unmap all pages in random order
284 */
285 (void)stress_mincore_touch_pages(buf, sz);
286 for (n = pages4k; n; ) {
287 uint64_t j, i = stress_mwc64() % pages4k;
288
289 for (j = 0; j < n; j++) {
290 uint64_t page = (i + j) % pages4k;
291
292 if (mapped[page] == PAGE_MAPPED) {
293 mapped[page] = 0;
294 (void)stress_madvise_random(mappings[page], page_size);
295 (void)munmap((void *)mappings[page], page_size);
296 n--;
297 break;
298 }
299 if (!keep_stressing_flag())
300 goto cleanup;
301 }
302 }
303 (void)munmap((void *)buf, sz);
304 #if defined(MAP_FIXED)
305 /*
306 * Step #2, map them back in random order
307 */
308 for (n = pages4k; n; ) {
309 uint64_t j, i = stress_mwc64() % pages4k;
310
311 for (j = 0; j < n; j++) {
312 uint64_t page = (i + j) % pages4k;
313
314 if (!mapped[page]) {
315 offset = tmpfs_mmap_file ? (off_t)(page * page_size) : 0;
316 /*
317 * Attempt to map them back into the original address, this
318 * may fail (it's not the most portable operation), so keep
319 * track of failed mappings too
320 */
321 mappings[page] = (uint8_t *)mmap((void *)mappings[page],
322 page_size, PROT_READ | PROT_WRITE, MAP_FIXED | flags, fd, offset);
323 if (mappings[page] == MAP_FAILED) {
324 mapped[page] = PAGE_MAPPED_FAIL;
325 mappings[page] = NULL;
326 } else {
327 (void)stress_mincore_touch_pages(mappings[page], page_size);
328 (void)stress_madvise_random(mappings[page], page_size);
329 mapped[page] = PAGE_MAPPED;
330 /* Ensure we can write to the mapped page */
331 stress_mmap_set(mappings[page], page_size, page_size);
332 if (stress_mmap_check(mappings[page], page_size, page_size) < 0)
333 pr_fail("%s: mmap'd region of %zu bytes does "
334 "not contain expected data\n", args->name, page_size);
335 if (tmpfs_mmap_file) {
336 (void)memset(mappings[page], (int)n, page_size);
337 (void)shim_msync((void *)mappings[page], page_size, ms_flags);
338 }
339 }
340 n--;
341 break;
342 }
343 if (!keep_stressing_flag())
344 goto cleanup;
345 }
346 }
347 #endif
348 cleanup:
349 /*
350 * Step #3, unmap them all
351 */
352 for (n = 0; n < pages4k; n++) {
353 if (mapped[n] & PAGE_MAPPED) {
354 (void)stress_madvise_random(mappings[n], page_size);
355 (void)munmap((void *)mappings[n], page_size);
356 }
357 }
358 inc_counter(args);
359 } while (keep_stressing(args));
360
361 (void)close(fd);
362
363 return EXIT_SUCCESS;
364 }
365
366 /*
367 * stress_tmpfs()
368 * stress tmpfs
369 */
stress_tmpfs(const stress_args_t * args)370 static int stress_tmpfs(const stress_args_t *args)
371 {
372 stress_tmpfs_context_t context;
373 int ret;
374
375 context.fd = stress_tmpfs_open(args, &context.sz);
376 if (context.fd < 0) {
377 pr_err("%s: cannot find writeable free space on a "
378 "tmpfs filesystem\n", args->name);
379 return EXIT_NO_RESOURCE;
380 }
381
382 stress_set_proc_state(args->name, STRESS_STATE_RUN);
383
384 ret = stress_oomable_child(args, &context, stress_tmpfs_child, STRESS_OOMABLE_NORMAL);
385
386 stress_set_proc_state(args->name, STRESS_STATE_DEINIT);
387
388 (void)close(context.fd);
389
390 return ret;
391 }
392 stressor_info_t stress_tmpfs_info = {
393 .stressor = stress_tmpfs,
394 .class = CLASS_MEMORY | CLASS_VM | CLASS_OS,
395 .opt_set_funcs = opt_set_funcs,
396 .help = help
397 };
398 #else
399 stressor_info_t stress_tmpfs_info = {
400 .stressor = stress_not_implemented,
401 .class = CLASS_MEMORY | CLASS_VM | CLASS_OS,
402 .opt_set_funcs = opt_set_funcs,
403 .help = help
404 };
405 #endif
406