1 #include "test/jemalloc_test.h"
2
3 #include "jemalloc/internal/ticker.h"
4
5 static nstime_monotonic_t *nstime_monotonic_orig;
6 static nstime_update_t *nstime_update_orig;
7
8 static unsigned nupdates_mock;
9 static nstime_t time_mock;
10 static bool monotonic_mock;
11
12 static bool
check_background_thread_enabled(void)13 check_background_thread_enabled(void) {
14 bool enabled;
15 size_t sz = sizeof(bool);
16 int ret = mallctl("background_thread", (void *)&enabled, &sz, NULL,0);
17 if (ret == ENOENT) {
18 return false;
19 }
20 assert_d_eq(ret, 0, "Unexpected mallctl error");
21 return enabled;
22 }
23
24 static bool
nstime_monotonic_mock(void)25 nstime_monotonic_mock(void) {
26 return monotonic_mock;
27 }
28
29 static bool
nstime_update_mock(nstime_t * time)30 nstime_update_mock(nstime_t *time) {
31 nupdates_mock++;
32 if (monotonic_mock) {
33 nstime_copy(time, &time_mock);
34 }
35 return !monotonic_mock;
36 }
37
38 static unsigned
do_arena_create(ssize_t dirty_decay_ms,ssize_t muzzy_decay_ms)39 do_arena_create(ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms) {
40 unsigned arena_ind;
41 size_t sz = sizeof(unsigned);
42 assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
43 0, "Unexpected mallctl() failure");
44 size_t mib[3];
45 size_t miblen = sizeof(mib)/sizeof(size_t);
46
47 assert_d_eq(mallctlnametomib("arena.0.dirty_decay_ms", mib, &miblen),
48 0, "Unexpected mallctlnametomib() failure");
49 mib[1] = (size_t)arena_ind;
50 assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL,
51 (void *)&dirty_decay_ms, sizeof(dirty_decay_ms)), 0,
52 "Unexpected mallctlbymib() failure");
53
54 assert_d_eq(mallctlnametomib("arena.0.muzzy_decay_ms", mib, &miblen),
55 0, "Unexpected mallctlnametomib() failure");
56 mib[1] = (size_t)arena_ind;
57 assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL,
58 (void *)&muzzy_decay_ms, sizeof(muzzy_decay_ms)), 0,
59 "Unexpected mallctlbymib() failure");
60
61 return arena_ind;
62 }
63
64 static void
do_arena_destroy(unsigned arena_ind)65 do_arena_destroy(unsigned arena_ind) {
66 size_t mib[3];
67 size_t miblen = sizeof(mib)/sizeof(size_t);
68 assert_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0,
69 "Unexpected mallctlnametomib() failure");
70 mib[1] = (size_t)arena_ind;
71 assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
72 "Unexpected mallctlbymib() failure");
73 }
74
75 void
do_epoch(void)76 do_epoch(void) {
77 uint64_t epoch = 1;
78 assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
79 0, "Unexpected mallctl() failure");
80 }
81
82 void
do_purge(unsigned arena_ind)83 do_purge(unsigned arena_ind) {
84 size_t mib[3];
85 size_t miblen = sizeof(mib)/sizeof(size_t);
86 assert_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0,
87 "Unexpected mallctlnametomib() failure");
88 mib[1] = (size_t)arena_ind;
89 assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
90 "Unexpected mallctlbymib() failure");
91 }
92
93 void
do_decay(unsigned arena_ind)94 do_decay(unsigned arena_ind) {
95 size_t mib[3];
96 size_t miblen = sizeof(mib)/sizeof(size_t);
97 assert_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0,
98 "Unexpected mallctlnametomib() failure");
99 mib[1] = (size_t)arena_ind;
100 assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
101 "Unexpected mallctlbymib() failure");
102 }
103
104 static uint64_t
get_arena_npurge_impl(const char * mibname,unsigned arena_ind)105 get_arena_npurge_impl(const char *mibname, unsigned arena_ind) {
106 size_t mib[4];
107 size_t miblen = sizeof(mib)/sizeof(size_t);
108 assert_d_eq(mallctlnametomib(mibname, mib, &miblen), 0,
109 "Unexpected mallctlnametomib() failure");
110 mib[2] = (size_t)arena_ind;
111 uint64_t npurge = 0;
112 size_t sz = sizeof(npurge);
113 assert_d_eq(mallctlbymib(mib, miblen, (void *)&npurge, &sz, NULL, 0),
114 config_stats ? 0 : ENOENT, "Unexpected mallctlbymib() failure");
115 return npurge;
116 }
117
118 static uint64_t
get_arena_dirty_npurge(unsigned arena_ind)119 get_arena_dirty_npurge(unsigned arena_ind) {
120 do_epoch();
121 return get_arena_npurge_impl("stats.arenas.0.dirty_npurge", arena_ind);
122 }
123
124 static uint64_t
get_arena_dirty_purged(unsigned arena_ind)125 get_arena_dirty_purged(unsigned arena_ind) {
126 do_epoch();
127 return get_arena_npurge_impl("stats.arenas.0.dirty_purged", arena_ind);
128 }
129
130 static uint64_t
get_arena_muzzy_npurge(unsigned arena_ind)131 get_arena_muzzy_npurge(unsigned arena_ind) {
132 do_epoch();
133 return get_arena_npurge_impl("stats.arenas.0.muzzy_npurge", arena_ind);
134 }
135
136 static uint64_t
get_arena_npurge(unsigned arena_ind)137 get_arena_npurge(unsigned arena_ind) {
138 do_epoch();
139 return get_arena_npurge_impl("stats.arenas.0.dirty_npurge", arena_ind) +
140 get_arena_npurge_impl("stats.arenas.0.muzzy_npurge", arena_ind);
141 }
142
143 static size_t
get_arena_pdirty(unsigned arena_ind)144 get_arena_pdirty(unsigned arena_ind) {
145 do_epoch();
146 size_t mib[4];
147 size_t miblen = sizeof(mib)/sizeof(size_t);
148 assert_d_eq(mallctlnametomib("stats.arenas.0.pdirty", mib, &miblen), 0,
149 "Unexpected mallctlnametomib() failure");
150 mib[2] = (size_t)arena_ind;
151 size_t pdirty;
152 size_t sz = sizeof(pdirty);
153 assert_d_eq(mallctlbymib(mib, miblen, (void *)&pdirty, &sz, NULL, 0), 0,
154 "Unexpected mallctlbymib() failure");
155 return pdirty;
156 }
157
158 static size_t
get_arena_pmuzzy(unsigned arena_ind)159 get_arena_pmuzzy(unsigned arena_ind) {
160 do_epoch();
161 size_t mib[4];
162 size_t miblen = sizeof(mib)/sizeof(size_t);
163 assert_d_eq(mallctlnametomib("stats.arenas.0.pmuzzy", mib, &miblen), 0,
164 "Unexpected mallctlnametomib() failure");
165 mib[2] = (size_t)arena_ind;
166 size_t pmuzzy;
167 size_t sz = sizeof(pmuzzy);
168 assert_d_eq(mallctlbymib(mib, miblen, (void *)&pmuzzy, &sz, NULL, 0), 0,
169 "Unexpected mallctlbymib() failure");
170 return pmuzzy;
171 }
172
173 static void *
do_mallocx(size_t size,int flags)174 do_mallocx(size_t size, int flags) {
175 void *p = mallocx(size, flags);
176 assert_ptr_not_null(p, "Unexpected mallocx() failure");
177 return p;
178 }
179
180 static void
generate_dirty(unsigned arena_ind,size_t size)181 generate_dirty(unsigned arena_ind, size_t size) {
182 int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
183 void *p = do_mallocx(size, flags);
184 dallocx(p, flags);
185 }
186
TEST_BEGIN(test_decay_ticks)187 TEST_BEGIN(test_decay_ticks) {
188 test_skip_if(check_background_thread_enabled());
189
190 ticker_t *decay_ticker;
191 unsigned tick0, tick1, arena_ind;
192 size_t sz, large0;
193 void *p;
194
195 sz = sizeof(size_t);
196 assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL,
197 0), 0, "Unexpected mallctl failure");
198
199 /* Set up a manually managed arena for test. */
200 arena_ind = do_arena_create(0, 0);
201
202 /* Migrate to the new arena, and get the ticker. */
203 unsigned old_arena_ind;
204 size_t sz_arena_ind = sizeof(old_arena_ind);
205 assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind,
206 &sz_arena_ind, (void *)&arena_ind, sizeof(arena_ind)), 0,
207 "Unexpected mallctl() failure");
208 decay_ticker = decay_ticker_get(tsd_fetch(), arena_ind);
209 assert_ptr_not_null(decay_ticker,
210 "Unexpected failure getting decay ticker");
211
212 /*
213 * Test the standard APIs using a large size class, since we can't
214 * control tcache interactions for small size classes (except by
215 * completely disabling tcache for the entire test program).
216 */
217
218 /* malloc(). */
219 tick0 = ticker_read(decay_ticker);
220 p = malloc(large0);
221 assert_ptr_not_null(p, "Unexpected malloc() failure");
222 tick1 = ticker_read(decay_ticker);
223 assert_u32_ne(tick1, tick0, "Expected ticker to tick during malloc()");
224 /* free(). */
225 tick0 = ticker_read(decay_ticker);
226 free(p);
227 tick1 = ticker_read(decay_ticker);
228 assert_u32_ne(tick1, tick0, "Expected ticker to tick during free()");
229
230 /* calloc(). */
231 tick0 = ticker_read(decay_ticker);
232 p = calloc(1, large0);
233 assert_ptr_not_null(p, "Unexpected calloc() failure");
234 tick1 = ticker_read(decay_ticker);
235 assert_u32_ne(tick1, tick0, "Expected ticker to tick during calloc()");
236 free(p);
237
238 /* posix_memalign(). */
239 tick0 = ticker_read(decay_ticker);
240 assert_d_eq(posix_memalign(&p, sizeof(size_t), large0), 0,
241 "Unexpected posix_memalign() failure");
242 tick1 = ticker_read(decay_ticker);
243 assert_u32_ne(tick1, tick0,
244 "Expected ticker to tick during posix_memalign()");
245 free(p);
246
247 /* aligned_alloc(). */
248 tick0 = ticker_read(decay_ticker);
249 p = aligned_alloc(sizeof(size_t), large0);
250 assert_ptr_not_null(p, "Unexpected aligned_alloc() failure");
251 tick1 = ticker_read(decay_ticker);
252 assert_u32_ne(tick1, tick0,
253 "Expected ticker to tick during aligned_alloc()");
254 free(p);
255
256 /* realloc(). */
257 /* Allocate. */
258 tick0 = ticker_read(decay_ticker);
259 p = realloc(NULL, large0);
260 assert_ptr_not_null(p, "Unexpected realloc() failure");
261 tick1 = ticker_read(decay_ticker);
262 assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
263 /* Reallocate. */
264 tick0 = ticker_read(decay_ticker);
265 p = realloc(p, large0);
266 assert_ptr_not_null(p, "Unexpected realloc() failure");
267 tick1 = ticker_read(decay_ticker);
268 assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
269 /* Deallocate. */
270 tick0 = ticker_read(decay_ticker);
271 realloc(p, 0);
272 tick1 = ticker_read(decay_ticker);
273 assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
274
275 /*
276 * Test the *allocx() APIs using large and small size classes, with
277 * tcache explicitly disabled.
278 */
279 {
280 unsigned i;
281 size_t allocx_sizes[2];
282 allocx_sizes[0] = large0;
283 allocx_sizes[1] = 1;
284
285 for (i = 0; i < sizeof(allocx_sizes) / sizeof(size_t); i++) {
286 sz = allocx_sizes[i];
287
288 /* mallocx(). */
289 tick0 = ticker_read(decay_ticker);
290 p = mallocx(sz, MALLOCX_TCACHE_NONE);
291 assert_ptr_not_null(p, "Unexpected mallocx() failure");
292 tick1 = ticker_read(decay_ticker);
293 assert_u32_ne(tick1, tick0,
294 "Expected ticker to tick during mallocx() (sz=%zu)",
295 sz);
296 /* rallocx(). */
297 tick0 = ticker_read(decay_ticker);
298 p = rallocx(p, sz, MALLOCX_TCACHE_NONE);
299 assert_ptr_not_null(p, "Unexpected rallocx() failure");
300 tick1 = ticker_read(decay_ticker);
301 assert_u32_ne(tick1, tick0,
302 "Expected ticker to tick during rallocx() (sz=%zu)",
303 sz);
304 /* xallocx(). */
305 tick0 = ticker_read(decay_ticker);
306 xallocx(p, sz, 0, MALLOCX_TCACHE_NONE);
307 tick1 = ticker_read(decay_ticker);
308 assert_u32_ne(tick1, tick0,
309 "Expected ticker to tick during xallocx() (sz=%zu)",
310 sz);
311 /* dallocx(). */
312 tick0 = ticker_read(decay_ticker);
313 dallocx(p, MALLOCX_TCACHE_NONE);
314 tick1 = ticker_read(decay_ticker);
315 assert_u32_ne(tick1, tick0,
316 "Expected ticker to tick during dallocx() (sz=%zu)",
317 sz);
318 /* sdallocx(). */
319 p = mallocx(sz, MALLOCX_TCACHE_NONE);
320 assert_ptr_not_null(p, "Unexpected mallocx() failure");
321 tick0 = ticker_read(decay_ticker);
322 sdallocx(p, sz, MALLOCX_TCACHE_NONE);
323 tick1 = ticker_read(decay_ticker);
324 assert_u32_ne(tick1, tick0,
325 "Expected ticker to tick during sdallocx() "
326 "(sz=%zu)", sz);
327 }
328 }
329
330 /*
331 * Test tcache fill/flush interactions for large and small size classes,
332 * using an explicit tcache.
333 */
334 unsigned tcache_ind, i;
335 size_t tcache_sizes[2];
336 tcache_sizes[0] = large0;
337 tcache_sizes[1] = 1;
338
339 size_t tcache_max, sz_tcache_max;
340 sz_tcache_max = sizeof(tcache_max);
341 assert_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max,
342 &sz_tcache_max, NULL, 0), 0, "Unexpected mallctl() failure");
343
344 sz = sizeof(unsigned);
345 assert_d_eq(mallctl("tcache.create", (void *)&tcache_ind, &sz,
346 NULL, 0), 0, "Unexpected mallctl failure");
347
348 for (i = 0; i < sizeof(tcache_sizes) / sizeof(size_t); i++) {
349 sz = tcache_sizes[i];
350
351 /* tcache fill. */
352 tick0 = ticker_read(decay_ticker);
353 p = mallocx(sz, MALLOCX_TCACHE(tcache_ind));
354 assert_ptr_not_null(p, "Unexpected mallocx() failure");
355 tick1 = ticker_read(decay_ticker);
356 assert_u32_ne(tick1, tick0,
357 "Expected ticker to tick during tcache fill "
358 "(sz=%zu)", sz);
359 /* tcache flush. */
360 dallocx(p, MALLOCX_TCACHE(tcache_ind));
361 tick0 = ticker_read(decay_ticker);
362 assert_d_eq(mallctl("tcache.flush", NULL, NULL,
363 (void *)&tcache_ind, sizeof(unsigned)), 0,
364 "Unexpected mallctl failure");
365 tick1 = ticker_read(decay_ticker);
366
367 /* Will only tick if it's in tcache. */
368 if (sz <= tcache_max) {
369 assert_u32_ne(tick1, tick0,
370 "Expected ticker to tick during tcache "
371 "flush (sz=%zu)", sz);
372 } else {
373 assert_u32_eq(tick1, tick0,
374 "Unexpected ticker tick during tcache "
375 "flush (sz=%zu)", sz);
376 }
377 }
378 }
379 TEST_END
380
381 static void
decay_ticker_helper(unsigned arena_ind,int flags,bool dirty,ssize_t dt,uint64_t dirty_npurge0,uint64_t muzzy_npurge0,bool terminate_asap)382 decay_ticker_helper(unsigned arena_ind, int flags, bool dirty, ssize_t dt,
383 uint64_t dirty_npurge0, uint64_t muzzy_npurge0, bool terminate_asap) {
384 #define NINTERVALS 101
385 nstime_t time, update_interval, decay_ms, deadline;
386
387 nstime_init(&time, 0);
388 nstime_update(&time);
389
390 nstime_init2(&decay_ms, dt, 0);
391 nstime_copy(&deadline, &time);
392 nstime_add(&deadline, &decay_ms);
393
394 nstime_init2(&update_interval, dt, 0);
395 nstime_idivide(&update_interval, NINTERVALS);
396
397 /*
398 * Keep q's slab from being deallocated during the looping below. If a
399 * cached slab were to repeatedly come and go during looping, it could
400 * prevent the decay backlog ever becoming empty.
401 */
402 void *p = do_mallocx(1, flags);
403 uint64_t dirty_npurge1, muzzy_npurge1;
404 do {
405 for (unsigned i = 0; i < DECAY_NTICKS_PER_UPDATE / 2;
406 i++) {
407 void *q = do_mallocx(1, flags);
408 dallocx(q, flags);
409 }
410 dirty_npurge1 = get_arena_dirty_npurge(arena_ind);
411 muzzy_npurge1 = get_arena_muzzy_npurge(arena_ind);
412
413 nstime_add(&time_mock, &update_interval);
414 nstime_update(&time);
415 } while (nstime_compare(&time, &deadline) <= 0 && ((dirty_npurge1 ==
416 dirty_npurge0 && muzzy_npurge1 == muzzy_npurge0) ||
417 !terminate_asap));
418 dallocx(p, flags);
419
420 if (config_stats) {
421 assert_u64_gt(dirty_npurge1 + muzzy_npurge1, dirty_npurge0 +
422 muzzy_npurge0, "Expected purging to occur");
423 }
424 #undef NINTERVALS
425 }
426
TEST_BEGIN(test_decay_ticker)427 TEST_BEGIN(test_decay_ticker) {
428 test_skip_if(check_background_thread_enabled());
429 #define NPS 2048
430 ssize_t ddt = opt_dirty_decay_ms;
431 ssize_t mdt = opt_muzzy_decay_ms;
432 unsigned arena_ind = do_arena_create(ddt, mdt);
433 int flags = (MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE);
434 void *ps[NPS];
435 size_t large;
436
437 /*
438 * Allocate a bunch of large objects, pause the clock, deallocate every
439 * other object (to fragment virtual memory), restore the clock, then
440 * [md]allocx() in a tight loop while advancing time rapidly to verify
441 * the ticker triggers purging.
442 */
443
444 size_t tcache_max;
445 size_t sz = sizeof(size_t);
446 assert_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max, &sz, NULL,
447 0), 0, "Unexpected mallctl failure");
448 large = nallocx(tcache_max + 1, flags);
449
450 do_purge(arena_ind);
451 uint64_t dirty_npurge0 = get_arena_dirty_npurge(arena_ind);
452 uint64_t muzzy_npurge0 = get_arena_muzzy_npurge(arena_ind);
453
454 for (unsigned i = 0; i < NPS; i++) {
455 ps[i] = do_mallocx(large, flags);
456 }
457
458 nupdates_mock = 0;
459 nstime_init(&time_mock, 0);
460 nstime_update(&time_mock);
461 monotonic_mock = true;
462
463 nstime_monotonic_orig = nstime_monotonic;
464 nstime_update_orig = nstime_update;
465 nstime_monotonic = nstime_monotonic_mock;
466 nstime_update = nstime_update_mock;
467
468 for (unsigned i = 0; i < NPS; i += 2) {
469 dallocx(ps[i], flags);
470 unsigned nupdates0 = nupdates_mock;
471 do_decay(arena_ind);
472 assert_u_gt(nupdates_mock, nupdates0,
473 "Expected nstime_update() to be called");
474 }
475
476 decay_ticker_helper(arena_ind, flags, true, ddt, dirty_npurge0,
477 muzzy_npurge0, true);
478 decay_ticker_helper(arena_ind, flags, false, ddt+mdt, dirty_npurge0,
479 muzzy_npurge0, false);
480
481 do_arena_destroy(arena_ind);
482
483 nstime_monotonic = nstime_monotonic_orig;
484 nstime_update = nstime_update_orig;
485 #undef NPS
486 }
487 TEST_END
488
TEST_BEGIN(test_decay_nonmonotonic)489 TEST_BEGIN(test_decay_nonmonotonic) {
490 test_skip_if(check_background_thread_enabled());
491 #define NPS (SMOOTHSTEP_NSTEPS + 1)
492 int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE);
493 void *ps[NPS];
494 uint64_t npurge0 = 0;
495 uint64_t npurge1 = 0;
496 size_t sz, large0;
497 unsigned i, nupdates0;
498
499 sz = sizeof(size_t);
500 assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL,
501 0), 0, "Unexpected mallctl failure");
502
503 assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
504 "Unexpected mallctl failure");
505 do_epoch();
506 sz = sizeof(uint64_t);
507 npurge0 = get_arena_npurge(0);
508
509 nupdates_mock = 0;
510 nstime_init(&time_mock, 0);
511 nstime_update(&time_mock);
512 monotonic_mock = false;
513
514 nstime_monotonic_orig = nstime_monotonic;
515 nstime_update_orig = nstime_update;
516 nstime_monotonic = nstime_monotonic_mock;
517 nstime_update = nstime_update_mock;
518
519 for (i = 0; i < NPS; i++) {
520 ps[i] = mallocx(large0, flags);
521 assert_ptr_not_null(ps[i], "Unexpected mallocx() failure");
522 }
523
524 for (i = 0; i < NPS; i++) {
525 dallocx(ps[i], flags);
526 nupdates0 = nupdates_mock;
527 assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
528 "Unexpected arena.0.decay failure");
529 assert_u_gt(nupdates_mock, nupdates0,
530 "Expected nstime_update() to be called");
531 }
532
533 do_epoch();
534 sz = sizeof(uint64_t);
535 npurge1 = get_arena_npurge(0);
536
537 if (config_stats) {
538 assert_u64_eq(npurge0, npurge1, "Unexpected purging occurred");
539 }
540
541 nstime_monotonic = nstime_monotonic_orig;
542 nstime_update = nstime_update_orig;
543 #undef NPS
544 }
545 TEST_END
546
TEST_BEGIN(test_decay_now)547 TEST_BEGIN(test_decay_now) {
548 test_skip_if(check_background_thread_enabled());
549
550 unsigned arena_ind = do_arena_create(0, 0);
551 assert_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages");
552 assert_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages");
553 size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2};
554 /* Verify that dirty/muzzy pages never linger after deallocation. */
555 for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
556 size_t size = sizes[i];
557 generate_dirty(arena_ind, size);
558 assert_zu_eq(get_arena_pdirty(arena_ind), 0,
559 "Unexpected dirty pages");
560 assert_zu_eq(get_arena_pmuzzy(arena_ind), 0,
561 "Unexpected muzzy pages");
562 }
563 do_arena_destroy(arena_ind);
564 }
565 TEST_END
566
TEST_BEGIN(test_decay_never)567 TEST_BEGIN(test_decay_never) {
568 test_skip_if(check_background_thread_enabled() || !config_stats);
569
570 unsigned arena_ind = do_arena_create(-1, -1);
571 int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
572 assert_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages");
573 assert_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages");
574 size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2};
575 void *ptrs[sizeof(sizes)/sizeof(size_t)];
576 for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
577 ptrs[i] = do_mallocx(sizes[i], flags);
578 }
579 /* Verify that each deallocation generates additional dirty pages. */
580 size_t pdirty_prev = get_arena_pdirty(arena_ind);
581 size_t pmuzzy_prev = get_arena_pmuzzy(arena_ind);
582 assert_zu_eq(pdirty_prev, 0, "Unexpected dirty pages");
583 assert_zu_eq(pmuzzy_prev, 0, "Unexpected muzzy pages");
584 for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
585 dallocx(ptrs[i], flags);
586 size_t pdirty = get_arena_pdirty(arena_ind);
587 size_t pmuzzy = get_arena_pmuzzy(arena_ind);
588 assert_zu_gt(pdirty + (size_t)get_arena_dirty_purged(arena_ind),
589 pdirty_prev, "Expected dirty pages to increase.");
590 assert_zu_eq(pmuzzy, 0, "Unexpected muzzy pages");
591 pdirty_prev = pdirty;
592 }
593 do_arena_destroy(arena_ind);
594 }
595 TEST_END
596
597 int
main(void)598 main(void) {
599 return test(
600 test_decay_ticks,
601 test_decay_ticker,
602 test_decay_nonmonotonic,
603 test_decay_now,
604 test_decay_never);
605 }
606