1 /*
2 * Copyright (C) 2013-2021 Canonical, Ltd.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 *
18 * This code is a complete clean re-write of the stress tool by
19 * Colin Ian King <colin.king@canonical.com> and attempts to be
20 * backwardly compatible with the stress tool by Amos Waterland
21 * <apw@rossby.metr.ou.edu> but has more stress tests and more
22 * functionality.
23 *
24 */
25 #include "stress-ng.h"
26
27 static const stress_help_t help[] = {
28 { NULL, "mremap N", "start N workers stressing mremap" },
29 { NULL, "mremap-ops N", "stop after N mremap bogo operations" },
30 { NULL, "mremap-bytes N", "mremap N bytes maximum for each stress iteration" },
31 { NULL, "mremap-lock", "mlock remap pages, force pages to be unswappable" },
32 { NULL, NULL, NULL }
33 };
34
stress_set_mremap_bytes(const char * opt)35 static int stress_set_mremap_bytes(const char *opt)
36 {
37 size_t mremap_bytes;
38
39 mremap_bytes = (size_t)stress_get_uint64_byte_memory(opt, 1);
40 stress_check_range_bytes("mremap-bytes", mremap_bytes,
41 MIN_MREMAP_BYTES, MAX_MEM_LIMIT);
42 return stress_set_setting("mremap-bytes", TYPE_ID_SIZE_T, &mremap_bytes);
43 }
44
stress_set_mremap_mlock(const char * opt)45 static int stress_set_mremap_mlock(const char *opt)
46 {
47 bool mremap_mlock = true;
48
49 (void)opt;
50 return stress_set_setting("mremap-mlock", TYPE_ID_BOOL, &mremap_mlock);
51 }
52
53 static const stress_opt_set_func_t opt_set_funcs[] = {
54 { OPT_mremap_bytes, stress_set_mremap_bytes },
55 { OPT_mremap_mlock, stress_set_mremap_mlock },
56 { 0, NULL }
57 };
58
59 #if defined(HAVE_MREMAP) && \
60 NEED_GLIBC(2,4,0)
61
62 #if defined(MREMAP_FIXED)
63 /*
64 * rand_mremap_addr()
65 * try and find a random unmapped region of memory
66 */
rand_mremap_addr(const size_t sz,int flags)67 static inline void *rand_mremap_addr(const size_t sz, int flags)
68 {
69 void *addr;
70 int mask = MREMAP_FIXED | MAP_SHARED;
71
72 #if defined(MAP_POPULATE)
73 mask |= MAP_POPULATE;
74 #endif
75 flags &= ~(mask);
76 flags |= (MAP_PRIVATE | MAP_ANONYMOUS);
77
78 addr = mmap(NULL, sz, PROT_READ | PROT_WRITE, flags, -1, 0);
79 if (addr == MAP_FAILED)
80 return NULL;
81
82 (void)munmap(addr, sz);
83
84 /*
85 * At this point, we know that we can remap to this addr
86 * in this process if we don't do any memory mappings between
87 * the munmap above and the remapping
88 */
89 return addr;
90 }
91 #endif
92
93 /*
94 * try_remap()
95 * try and remap old size to new size
96 */
try_remap(const stress_args_t * args,uint8_t ** buf,const size_t old_sz,const size_t new_sz,const bool mremap_mlock)97 static int try_remap(
98 const stress_args_t *args,
99 uint8_t **buf,
100 const size_t old_sz,
101 const size_t new_sz,
102 const bool mremap_mlock)
103 {
104 uint8_t *newbuf;
105 int retry, flags = 0;
106 #if defined(MREMAP_MAYMOVE)
107 const int maymove = MREMAP_MAYMOVE;
108 #else
109 const int maymove = 0;
110 #endif
111
112 #if defined(MREMAP_FIXED) && \
113 defined(MREMAP_MAYMOVE)
114 flags = maymove | (stress_mwc32() & MREMAP_FIXED);
115 #else
116 flags = maymove;
117 #endif
118
119 for (retry = 0; retry < 100; retry++) {
120 #if defined(MREMAP_FIXED)
121 void *addr = rand_mremap_addr(new_sz + args->page_size, flags);
122 #endif
123 if (!keep_stressing_flag()) {
124 (void)munmap(*buf, old_sz);
125 *buf = 0;
126 return 0;
127 }
128 #if defined(MREMAP_FIXED)
129 if (addr) {
130 newbuf = mremap(*buf, old_sz, new_sz, flags, addr);
131 } else {
132 newbuf = mremap(*buf, old_sz, new_sz, flags & ~MREMAP_FIXED);
133 }
134 #else
135 newbuf = mremap(*buf, old_sz, new_sz, flags);
136 #endif
137 if (newbuf && newbuf != MAP_FAILED) {
138 *buf = newbuf;
139
140 #if defined(MREMAP_DONTUNMAP)
141 /*
142 * Move and explicitly don't unmap old mapping,
143 * followed by an unmap of the old mapping for
144 * some more exercise
145 */
146 newbuf = mremap(*buf, new_sz, new_sz,
147 MREMAP_DONTUNMAP | MREMAP_MAYMOVE);
148 if (newbuf && newbuf != MAP_FAILED) {
149 if (*buf)
150 (void)munmap(*buf, new_sz);
151 *buf = newbuf;
152 }
153 #endif
154
155 #if defined(HAVE_MLOCK)
156 if (mremap_mlock && *buf)
157 (void)shim_mlock(*buf, new_sz);
158 #else
159 (void)mremap_mlock;
160 #endif
161 return 0;
162 }
163
164 switch (errno) {
165 case ENOMEM:
166 case EAGAIN:
167 continue;
168 case EINVAL:
169 #if defined(MREMAP_FIXED)
170 /*
171 * Earlier kernels may not support this or we
172 * chose a bad random address, so just fall
173 * back to non fixed remapping
174 */
175 if (flags & MREMAP_FIXED)
176 flags &= ~MREMAP_FIXED;
177 #endif
178 break;
179 case EFAULT:
180 default:
181 break;
182 }
183 }
184 pr_fail("%s: mremap failed, errno=%d (%s)\n",
185 args->name, errno, strerror(errno));
186 return -1;
187 }
188
stress_mremap_child(const stress_args_t * args,void * context)189 static int stress_mremap_child(const stress_args_t *args, void *context)
190 {
191 size_t new_sz, sz, mremap_bytes = DEFAULT_MREMAP_BYTES;
192 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
193 const size_t page_size = args->page_size;
194 bool mremap_mlock = false;
195
196 #if defined(MAP_POPULATE)
197 flags |= MAP_POPULATE;
198 #endif
199 (void)context;
200
201 if (!stress_get_setting("mremap-bytes", &mremap_bytes)) {
202 if (g_opt_flags & OPT_FLAGS_MAXIMIZE)
203 mremap_bytes = MAX_32;
204 if (g_opt_flags & OPT_FLAGS_MINIMIZE)
205 mremap_bytes = MIN_MREMAP_BYTES;
206 }
207 mremap_bytes /= args->num_instances;
208 if (mremap_bytes < MIN_MREMAP_BYTES)
209 mremap_bytes = MIN_MREMAP_BYTES;
210 if (mremap_bytes < page_size)
211 mremap_bytes = page_size;
212 new_sz = sz = mremap_bytes & ~(page_size - 1);
213
214 (void)stress_get_setting("mremap-mlock", &mremap_mlock);
215
216 stress_set_proc_state(args->name, STRESS_STATE_RUN);
217
218 do {
219 uint8_t *buf = NULL, *ptr;
220 size_t old_sz;
221
222 if (!keep_stressing_flag())
223 break;
224
225 buf = mmap(NULL, new_sz, PROT_READ | PROT_WRITE, flags, -1, 0);
226 if (buf == MAP_FAILED) {
227 /* Force MAP_POPULATE off, just in case */
228 #if defined(MAP_POPULATE)
229 flags &= ~MAP_POPULATE;
230 #endif
231 continue; /* Try again */
232 }
233 (void)stress_madvise_random(buf, new_sz);
234 (void)stress_mincore_touch_pages(buf, mremap_bytes);
235
236 /* Ensure we can write to the mapped pages */
237 if (g_opt_flags & OPT_FLAGS_VERIFY) {
238 stress_mmap_set(buf, new_sz, page_size);
239 if (stress_mmap_check(buf, sz, page_size) < 0) {
240 pr_fail("%s: mmap'd region of %zu "
241 "bytes does not contain expected data\n",
242 args->name, sz);
243 (void)munmap(buf, new_sz);
244 return EXIT_FAILURE;
245 }
246 }
247
248 old_sz = new_sz;
249 new_sz >>= 1;
250 while (new_sz > page_size) {
251 if (try_remap(args, &buf, old_sz, new_sz, mremap_mlock) < 0) {
252 (void)munmap(buf, old_sz);
253 return EXIT_FAILURE;
254 }
255 if (!keep_stressing(args))
256 return EXIT_SUCCESS;
257 (void)stress_madvise_random(buf, new_sz);
258 if (g_opt_flags & OPT_FLAGS_VERIFY) {
259 if (stress_mmap_check(buf, new_sz, page_size) < 0) {
260 pr_fail("%s: mremap'd region "
261 "of %zu bytes does "
262 "not contain expected data\n",
263 args->name, sz);
264 (void)munmap(buf, new_sz);
265 return EXIT_FAILURE;
266 }
267 }
268 old_sz = new_sz;
269 new_sz >>= 1;
270 }
271
272 new_sz <<= 1;
273 while (new_sz < mremap_bytes) {
274 if (try_remap(args, &buf, old_sz, new_sz, mremap_mlock) < 0) {
275 (void)munmap(buf, old_sz);
276 return EXIT_FAILURE;
277 }
278 if (!keep_stressing(args))
279 return EXIT_SUCCESS;
280 (void)stress_madvise_random(buf, new_sz);
281 old_sz = new_sz;
282 new_sz <<= 1;
283 }
284
285 /* Invalid remap flags */
286 ptr = mremap(buf, old_sz, old_sz, ~0);
287 if (ptr && ptr != MAP_FAILED)
288 buf = ptr;
289 ptr = mremap(buf, old_sz, old_sz, MREMAP_FIXED | MREMAP_MAYMOVE);
290 if (ptr && ptr != MAP_FAILED)
291 buf = ptr;
292 #if defined(MREMAP_MAYMOVE)
293 /* Invalid new size */
294 ptr = mremap(buf, old_sz, 0, MREMAP_MAYMOVE);
295 if (ptr && ptr != MAP_FAILED)
296 buf = ptr;
297 #endif
298 (void)munmap(buf, old_sz);
299
300 inc_counter(args);
301 } while (keep_stressing(args));
302
303 stress_set_proc_state(args->name, STRESS_STATE_DEINIT);
304
305 return EXIT_SUCCESS;
306 }
307
308 /*
309 * stress_mremap()
310 * stress mmap
311 */
stress_mremap(const stress_args_t * args)312 static int stress_mremap(const stress_args_t *args)
313 {
314 return stress_oomable_child(args, NULL, stress_mremap_child, STRESS_OOMABLE_NORMAL);
315 }
316
317 stressor_info_t stress_mremap_info = {
318 .stressor = stress_mremap,
319 .class = CLASS_VM | CLASS_OS,
320 .opt_set_funcs = opt_set_funcs,
321 .help = help
322 };
323 #else
324 stressor_info_t stress_mremap_info = {
325 .stressor = stress_not_implemented,
326 .class = CLASS_VM | CLASS_OS,
327 .opt_set_funcs = opt_set_funcs,
328 .help = help
329 };
330 #endif
331