1 /* ----------------------------------------------------------------------------
2 Copyright (c) 2019-2020, Microsoft Research, Daan Leijen
3 This is free software; you can redistribute it and/or modify it under the
4 terms of the MIT license. A copy of the license can be found in the file
5 "LICENSE" at the root of this distribution.
6 -----------------------------------------------------------------------------*/
7
8 /* ----------------------------------------------------------------------------
9 This implements a layer between the raw OS memory (VirtualAlloc/mmap/sbrk/..)
10 and the segment and huge object allocation by mimalloc. There may be multiple
11 implementations of this (one could be the identity going directly to the OS,
12 another could be a simple cache etc), but the current one uses large "regions".
13 In contrast to the rest of mimalloc, the "regions" are shared between threads and
14 need to be accessed using atomic operations.
15 We need this memory layer between the raw OS calls because of:
16 1. on `sbrk` like systems (like WebAssembly) we need our own memory maps in order
17 to reuse memory effectively.
18 2. It turns out that for large objects, between 1MiB and 32MiB (?), the cost of
19 an OS allocation/free is still (much) too expensive relative to the accesses
20 in that object :-( (`malloc-large` tests this). This means we need a cheaper
21 way to reuse memory.
22 3. This layer allows for NUMA aware allocation.
23
24 Possible issues:
25 - (2) can potentially be addressed too with a small cache per thread which is much
26 simpler. Generally though that requires shrinking of huge pages, and may overuse
27 memory per thread. (and is not compatible with `sbrk`).
28 - Since the current regions are per-process, we need atomic operations to
29 claim blocks which may be contended
30 - In the worst case, we need to search the whole region map (16KiB for 256GiB)
31 linearly. At what point will direct OS calls be faster? Is there a way to
32 do this better without adding too much complexity?
33 -----------------------------------------------------------------------------*/
34 #include "mimalloc.h"
35 #include "mimalloc-internal.h"
36 #include "mimalloc-atomic.h"
37
38 #include <string.h> // memset
39
40 #include "bitmap.h"
41
42 // Internal raw OS interface
43 size_t _mi_os_large_page_size(void);
44 bool _mi_os_protect(void* addr, size_t size);
45 bool _mi_os_unprotect(void* addr, size_t size);
46 bool _mi_os_commit(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
47 bool _mi_os_decommit(void* p, size_t size, mi_stats_t* stats);
48 bool _mi_os_reset(void* p, size_t size, mi_stats_t* stats);
49 bool _mi_os_unreset(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
50
51 // arena.c
52 void _mi_arena_free(void* p, size_t size, size_t memid, bool all_committed, mi_stats_t* stats);
53 void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld);
54 void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld);
55
56
57
58 // Constants
59 #if (MI_INTPTR_SIZE==8)
60 #define MI_HEAP_REGION_MAX_SIZE (256 * MI_GiB) // 64KiB for the region map
61 #elif (MI_INTPTR_SIZE==4)
62 #define MI_HEAP_REGION_MAX_SIZE (3 * MI_GiB) // ~ KiB for the region map
63 #else
64 #error "define the maximum heap space allowed for regions on this platform"
65 #endif
66
67 #define MI_SEGMENT_ALIGN MI_SEGMENT_SIZE
68
69 #define MI_REGION_MAX_BLOCKS MI_BITMAP_FIELD_BITS
70 #define MI_REGION_SIZE (MI_SEGMENT_SIZE * MI_BITMAP_FIELD_BITS) // 256MiB (64MiB on 32 bits)
71 #define MI_REGION_MAX (MI_HEAP_REGION_MAX_SIZE / MI_REGION_SIZE) // 1024 (48 on 32 bits)
72 #define MI_REGION_MAX_OBJ_BLOCKS (MI_REGION_MAX_BLOCKS/4) // 64MiB
73 #define MI_REGION_MAX_OBJ_SIZE (MI_REGION_MAX_OBJ_BLOCKS*MI_SEGMENT_SIZE)
74
75 // Region info
76 typedef union mi_region_info_u {
77 size_t value;
78 struct {
79 bool valid; // initialized?
80 bool is_large:1; // allocated in fixed large/huge OS pages
81 bool is_pinned:1; // pinned memory cannot be decommitted
82 short numa_node; // the associated NUMA node (where -1 means no associated node)
83 } x;
84 } mi_region_info_t;
85
86
87 // A region owns a chunk of REGION_SIZE (256MiB) (virtual) memory with
88 // a bit map with one bit per MI_SEGMENT_SIZE (4MiB) block.
89 typedef struct mem_region_s {
90 _Atomic(size_t) info; // mi_region_info_t.value
91 _Atomic(void*) start; // start of the memory area
92 mi_bitmap_field_t in_use; // bit per in-use block
93 mi_bitmap_field_t dirty; // track if non-zero per block
94 mi_bitmap_field_t commit; // track if committed per block
95 mi_bitmap_field_t reset; // track if reset per block
96 _Atomic(size_t) arena_memid; // if allocated from a (huge page) arena
97 size_t padding; // round to 8 fields
98 } mem_region_t;
99
100 // The region map
101 static mem_region_t regions[MI_REGION_MAX];
102
103 // Allocated regions
104 static _Atomic(size_t) regions_count; // = 0;
105
106
107 /* ----------------------------------------------------------------------------
108 Utility functions
109 -----------------------------------------------------------------------------*/
110
111 // Blocks (of 4MiB) needed for the given size.
mi_region_block_count(size_t size)112 static size_t mi_region_block_count(size_t size) {
113 return _mi_divide_up(size, MI_SEGMENT_SIZE);
114 }
115
116 /*
117 // Return a rounded commit/reset size such that we don't fragment large OS pages into small ones.
118 static size_t mi_good_commit_size(size_t size) {
119 if (size > (SIZE_MAX - _mi_os_large_page_size())) return size;
120 return _mi_align_up(size, _mi_os_large_page_size());
121 }
122 */
123
124 // Return if a pointer points into a region reserved by us.
mi_is_in_heap_region(const void * p)125 bool mi_is_in_heap_region(const void* p) mi_attr_noexcept {
126 if (p==NULL) return false;
127 size_t count = mi_atomic_load_relaxed(®ions_count);
128 for (size_t i = 0; i < count; i++) {
129 uint8_t* start = (uint8_t*)mi_atomic_load_ptr_relaxed(uint8_t, ®ions[i].start);
130 if (start != NULL && (uint8_t*)p >= start && (uint8_t*)p < start + MI_REGION_SIZE) return true;
131 }
132 return false;
133 }
134
135
mi_region_blocks_start(const mem_region_t * region,mi_bitmap_index_t bit_idx)136 static void* mi_region_blocks_start(const mem_region_t* region, mi_bitmap_index_t bit_idx) {
137 uint8_t* start = (uint8_t*)mi_atomic_load_ptr_acquire(uint8_t, &((mem_region_t*)region)->start);
138 mi_assert_internal(start != NULL);
139 return (start + (bit_idx * MI_SEGMENT_SIZE));
140 }
141
mi_memid_create(mem_region_t * region,mi_bitmap_index_t bit_idx)142 static size_t mi_memid_create(mem_region_t* region, mi_bitmap_index_t bit_idx) {
143 mi_assert_internal(bit_idx < MI_BITMAP_FIELD_BITS);
144 size_t idx = region - regions;
145 mi_assert_internal(®ions[idx] == region);
146 return (idx*MI_BITMAP_FIELD_BITS + bit_idx)<<1;
147 }
148
mi_memid_create_from_arena(size_t arena_memid)149 static size_t mi_memid_create_from_arena(size_t arena_memid) {
150 return (arena_memid << 1) | 1;
151 }
152
153
mi_memid_is_arena(size_t id,mem_region_t ** region,mi_bitmap_index_t * bit_idx,size_t * arena_memid)154 static bool mi_memid_is_arena(size_t id, mem_region_t** region, mi_bitmap_index_t* bit_idx, size_t* arena_memid) {
155 if ((id&1)==1) {
156 if (arena_memid != NULL) *arena_memid = (id>>1);
157 return true;
158 }
159 else {
160 size_t idx = (id >> 1) / MI_BITMAP_FIELD_BITS;
161 *bit_idx = (mi_bitmap_index_t)(id>>1) % MI_BITMAP_FIELD_BITS;
162 *region = ®ions[idx];
163 return false;
164 }
165 }
166
167
168 /* ----------------------------------------------------------------------------
169 Allocate a region is allocated from the OS (or an arena)
170 -----------------------------------------------------------------------------*/
171
mi_region_try_alloc_os(size_t blocks,bool commit,bool allow_large,mem_region_t ** region,mi_bitmap_index_t * bit_idx,mi_os_tld_t * tld)172 static bool mi_region_try_alloc_os(size_t blocks, bool commit, bool allow_large, mem_region_t** region, mi_bitmap_index_t* bit_idx, mi_os_tld_t* tld)
173 {
174 // not out of regions yet?
175 if (mi_atomic_load_relaxed(®ions_count) >= MI_REGION_MAX - 1) return false;
176
177 // try to allocate a fresh region from the OS
178 bool region_commit = (commit && mi_option_is_enabled(mi_option_eager_region_commit));
179 bool region_large = (commit && allow_large);
180 bool is_zero = false;
181 bool is_pinned = false;
182 size_t arena_memid = 0;
183 void* const start = _mi_arena_alloc_aligned(MI_REGION_SIZE, MI_SEGMENT_ALIGN, ®ion_commit, ®ion_large, &is_pinned, &is_zero, &arena_memid, tld);
184 if (start == NULL) return false;
185 mi_assert_internal(!(region_large && !allow_large));
186 mi_assert_internal(!region_large || region_commit);
187
188 // claim a fresh slot
189 const size_t idx = mi_atomic_increment_acq_rel(®ions_count);
190 if (idx >= MI_REGION_MAX) {
191 mi_atomic_decrement_acq_rel(®ions_count);
192 _mi_arena_free(start, MI_REGION_SIZE, arena_memid, region_commit, tld->stats);
193 _mi_warning_message("maximum regions used: %zu GiB (perhaps recompile with a larger setting for MI_HEAP_REGION_MAX_SIZE)", _mi_divide_up(MI_HEAP_REGION_MAX_SIZE, MI_GiB));
194 return false;
195 }
196
197 // allocated, initialize and claim the initial blocks
198 mem_region_t* r = ®ions[idx];
199 r->arena_memid = arena_memid;
200 mi_atomic_store_release(&r->in_use, (size_t)0);
201 mi_atomic_store_release(&r->dirty, (is_zero ? 0 : MI_BITMAP_FIELD_FULL));
202 mi_atomic_store_release(&r->commit, (region_commit ? MI_BITMAP_FIELD_FULL : 0));
203 mi_atomic_store_release(&r->reset, (size_t)0);
204 *bit_idx = 0;
205 _mi_bitmap_claim(&r->in_use, 1, blocks, *bit_idx, NULL);
206 mi_atomic_store_ptr_release(void,&r->start, start);
207
208 // and share it
209 mi_region_info_t info;
210 info.value = 0; // initialize the full union to zero
211 info.x.valid = true;
212 info.x.is_large = region_large;
213 info.x.is_pinned = is_pinned;
214 info.x.numa_node = (short)_mi_os_numa_node(tld);
215 mi_atomic_store_release(&r->info, info.value); // now make it available to others
216 *region = r;
217 return true;
218 }
219
220 /* ----------------------------------------------------------------------------
221 Try to claim blocks in suitable regions
222 -----------------------------------------------------------------------------*/
223
mi_region_is_suitable(const mem_region_t * region,int numa_node,bool allow_large)224 static bool mi_region_is_suitable(const mem_region_t* region, int numa_node, bool allow_large ) {
225 // initialized at all?
226 mi_region_info_t info;
227 info.value = mi_atomic_load_relaxed(&((mem_region_t*)region)->info);
228 if (info.value==0) return false;
229
230 // numa correct
231 if (numa_node >= 0) { // use negative numa node to always succeed
232 int rnode = info.x.numa_node;
233 if (rnode >= 0 && rnode != numa_node) return false;
234 }
235
236 // check allow-large
237 if (!allow_large && info.x.is_large) return false;
238
239 return true;
240 }
241
242
mi_region_try_claim(int numa_node,size_t blocks,bool allow_large,mem_region_t ** region,mi_bitmap_index_t * bit_idx,mi_os_tld_t * tld)243 static bool mi_region_try_claim(int numa_node, size_t blocks, bool allow_large, mem_region_t** region, mi_bitmap_index_t* bit_idx, mi_os_tld_t* tld)
244 {
245 // try all regions for a free slot
246 const size_t count = mi_atomic_load_relaxed(®ions_count); // monotonic, so ok to be relaxed
247 size_t idx = tld->region_idx; // Or start at 0 to reuse low addresses? Starting at 0 seems to increase latency though
248 for (size_t visited = 0; visited < count; visited++, idx++) {
249 if (idx >= count) idx = 0; // wrap around
250 mem_region_t* r = ®ions[idx];
251 // if this region suits our demand (numa node matches, large OS page matches)
252 if (mi_region_is_suitable(r, numa_node, allow_large)) {
253 // then try to atomically claim a segment(s) in this region
254 if (_mi_bitmap_try_find_claim_field(&r->in_use, 0, blocks, bit_idx)) {
255 tld->region_idx = idx; // remember the last found position
256 *region = r;
257 return true;
258 }
259 }
260 }
261 return false;
262 }
263
264
mi_region_try_alloc(size_t blocks,bool * commit,bool * large,bool * is_pinned,bool * is_zero,size_t * memid,mi_os_tld_t * tld)265 static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld)
266 {
267 mi_assert_internal(blocks <= MI_BITMAP_FIELD_BITS);
268 mem_region_t* region;
269 mi_bitmap_index_t bit_idx;
270 const int numa_node = (_mi_os_numa_node_count() <= 1 ? -1 : _mi_os_numa_node(tld));
271 // try to claim in existing regions
272 if (!mi_region_try_claim(numa_node, blocks, *large, ®ion, &bit_idx, tld)) {
273 // otherwise try to allocate a fresh region and claim in there
274 if (!mi_region_try_alloc_os(blocks, *commit, *large, ®ion, &bit_idx, tld)) {
275 // out of regions or memory
276 return NULL;
277 }
278 }
279
280 // ------------------------------------------------
281 // found a region and claimed `blocks` at `bit_idx`, initialize them now
282 mi_assert_internal(region != NULL);
283 mi_assert_internal(_mi_bitmap_is_claimed(®ion->in_use, 1, blocks, bit_idx));
284
285 mi_region_info_t info;
286 info.value = mi_atomic_load_acquire(®ion->info);
287 uint8_t* start = (uint8_t*)mi_atomic_load_ptr_acquire(uint8_t,®ion->start);
288 mi_assert_internal(!(info.x.is_large && !*large));
289 mi_assert_internal(start != NULL);
290
291 *is_zero = _mi_bitmap_claim(®ion->dirty, 1, blocks, bit_idx, NULL);
292 *large = info.x.is_large;
293 *is_pinned = info.x.is_pinned;
294 *memid = mi_memid_create(region, bit_idx);
295 void* p = start + (mi_bitmap_index_bit_in_field(bit_idx) * MI_SEGMENT_SIZE);
296
297 // commit
298 if (*commit) {
299 // ensure commit
300 bool any_uncommitted;
301 _mi_bitmap_claim(®ion->commit, 1, blocks, bit_idx, &any_uncommitted);
302 if (any_uncommitted) {
303 mi_assert_internal(!info.x.is_large && !info.x.is_pinned);
304 bool commit_zero = false;
305 if (!_mi_mem_commit(p, blocks * MI_SEGMENT_SIZE, &commit_zero, tld)) {
306 // failed to commit! unclaim and return
307 mi_bitmap_unclaim(®ion->in_use, 1, blocks, bit_idx);
308 return NULL;
309 }
310 if (commit_zero) *is_zero = true;
311 }
312 }
313 else {
314 // no need to commit, but check if already fully committed
315 *commit = _mi_bitmap_is_claimed(®ion->commit, 1, blocks, bit_idx);
316 }
317 mi_assert_internal(!*commit || _mi_bitmap_is_claimed(®ion->commit, 1, blocks, bit_idx));
318
319 // unreset reset blocks
320 if (_mi_bitmap_is_any_claimed(®ion->reset, 1, blocks, bit_idx)) {
321 // some blocks are still reset
322 mi_assert_internal(!info.x.is_large && !info.x.is_pinned);
323 mi_assert_internal(!mi_option_is_enabled(mi_option_eager_commit) || *commit || mi_option_get(mi_option_eager_commit_delay) > 0);
324 mi_bitmap_unclaim(®ion->reset, 1, blocks, bit_idx);
325 if (*commit || !mi_option_is_enabled(mi_option_reset_decommits)) { // only if needed
326 bool reset_zero = false;
327 _mi_mem_unreset(p, blocks * MI_SEGMENT_SIZE, &reset_zero, tld);
328 if (reset_zero) *is_zero = true;
329 }
330 }
331 mi_assert_internal(!_mi_bitmap_is_any_claimed(®ion->reset, 1, blocks, bit_idx));
332
333 #if (MI_DEBUG>=2)
334 if (*commit) { ((uint8_t*)p)[0] = 0; }
335 #endif
336
337 // and return the allocation
338 mi_assert_internal(p != NULL);
339 return p;
340 }
341
342
343 /* ----------------------------------------------------------------------------
344 Allocation
345 -----------------------------------------------------------------------------*/
346
347 // Allocate `size` memory aligned at `alignment`. Return non NULL on success, with a given memory `id`.
348 // (`id` is abstract, but `id = idx*MI_REGION_MAP_BITS + bitidx`)
_mi_mem_alloc_aligned(size_t size,size_t alignment,bool * commit,bool * large,bool * is_pinned,bool * is_zero,size_t * memid,mi_os_tld_t * tld)349 void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld)
350 {
351 mi_assert_internal(memid != NULL && tld != NULL);
352 mi_assert_internal(size > 0);
353 *memid = 0;
354 *is_zero = false;
355 *is_pinned = false;
356 bool default_large = false;
357 if (large==NULL) large = &default_large; // ensure `large != NULL`
358 if (size == 0) return NULL;
359 size = _mi_align_up(size, _mi_os_page_size());
360
361 // allocate from regions if possible
362 void* p = NULL;
363 size_t arena_memid;
364 const size_t blocks = mi_region_block_count(size);
365 if (blocks <= MI_REGION_MAX_OBJ_BLOCKS && alignment <= MI_SEGMENT_ALIGN) {
366 p = mi_region_try_alloc(blocks, commit, large, is_pinned, is_zero, memid, tld);
367 if (p == NULL) {
368 _mi_warning_message("unable to allocate from region: size %zu\n", size);
369 }
370 }
371 if (p == NULL) {
372 // and otherwise fall back to the OS
373 p = _mi_arena_alloc_aligned(size, alignment, commit, large, is_pinned, is_zero, &arena_memid, tld);
374 *memid = mi_memid_create_from_arena(arena_memid);
375 }
376
377 if (p != NULL) {
378 mi_assert_internal((uintptr_t)p % alignment == 0);
379 #if (MI_DEBUG>=2)
380 if (*commit) { ((uint8_t*)p)[0] = 0; } // ensure the memory is committed
381 #endif
382 }
383 return p;
384 }
385
386
387
388 /* ----------------------------------------------------------------------------
389 Free
390 -----------------------------------------------------------------------------*/
391
392 // Free previously allocated memory with a given id.
_mi_mem_free(void * p,size_t size,size_t id,bool full_commit,bool any_reset,mi_os_tld_t * tld)393 void _mi_mem_free(void* p, size_t size, size_t id, bool full_commit, bool any_reset, mi_os_tld_t* tld) {
394 mi_assert_internal(size > 0 && tld != NULL);
395 if (p==NULL) return;
396 if (size==0) return;
397 size = _mi_align_up(size, _mi_os_page_size());
398
399 size_t arena_memid = 0;
400 mi_bitmap_index_t bit_idx;
401 mem_region_t* region;
402 if (mi_memid_is_arena(id,®ion,&bit_idx,&arena_memid)) {
403 // was a direct arena allocation, pass through
404 _mi_arena_free(p, size, arena_memid, full_commit, tld->stats);
405 }
406 else {
407 // allocated in a region
408 mi_assert_internal(size <= MI_REGION_MAX_OBJ_SIZE); if (size > MI_REGION_MAX_OBJ_SIZE) return;
409 const size_t blocks = mi_region_block_count(size);
410 mi_assert_internal(blocks + bit_idx <= MI_BITMAP_FIELD_BITS);
411 mi_region_info_t info;
412 info.value = mi_atomic_load_acquire(®ion->info);
413 mi_assert_internal(info.value != 0);
414 void* blocks_start = mi_region_blocks_start(region, bit_idx);
415 mi_assert_internal(blocks_start == p); // not a pointer in our area?
416 mi_assert_internal(bit_idx + blocks <= MI_BITMAP_FIELD_BITS);
417 if (blocks_start != p || bit_idx + blocks > MI_BITMAP_FIELD_BITS) return; // or `abort`?
418
419 // committed?
420 if (full_commit && (size % MI_SEGMENT_SIZE) == 0) {
421 _mi_bitmap_claim(®ion->commit, 1, blocks, bit_idx, NULL);
422 }
423
424 if (any_reset) {
425 // set the is_reset bits if any pages were reset
426 _mi_bitmap_claim(®ion->reset, 1, blocks, bit_idx, NULL);
427 }
428
429 // reset the blocks to reduce the working set.
430 if (!info.x.is_large && !info.x.is_pinned && mi_option_is_enabled(mi_option_segment_reset)
431 && (mi_option_is_enabled(mi_option_eager_commit) ||
432 mi_option_is_enabled(mi_option_reset_decommits))) // cannot reset halfway committed segments, use only `option_page_reset` instead
433 {
434 bool any_unreset;
435 _mi_bitmap_claim(®ion->reset, 1, blocks, bit_idx, &any_unreset);
436 if (any_unreset) {
437 _mi_abandoned_await_readers(); // ensure no more pending write (in case reset = decommit)
438 _mi_mem_reset(p, blocks * MI_SEGMENT_SIZE, tld);
439 }
440 }
441
442 // and unclaim
443 bool all_unclaimed = mi_bitmap_unclaim(®ion->in_use, 1, blocks, bit_idx);
444 mi_assert_internal(all_unclaimed); MI_UNUSED(all_unclaimed);
445 }
446 }
447
448
449 /* ----------------------------------------------------------------------------
450 collection
451 -----------------------------------------------------------------------------*/
_mi_mem_collect(mi_os_tld_t * tld)452 void _mi_mem_collect(mi_os_tld_t* tld) {
453 // free every region that has no segments in use.
454 size_t rcount = mi_atomic_load_relaxed(®ions_count);
455 for (size_t i = 0; i < rcount; i++) {
456 mem_region_t* region = ®ions[i];
457 if (mi_atomic_load_relaxed(®ion->info) != 0) {
458 // if no segments used, try to claim the whole region
459 size_t m = mi_atomic_load_relaxed(®ion->in_use);
460 while (m == 0 && !mi_atomic_cas_weak_release(®ion->in_use, &m, MI_BITMAP_FIELD_FULL)) { /* nothing */ };
461 if (m == 0) {
462 // on success, free the whole region
463 uint8_t* start = (uint8_t*)mi_atomic_load_ptr_acquire(uint8_t,®ions[i].start);
464 size_t arena_memid = mi_atomic_load_relaxed(®ions[i].arena_memid);
465 size_t commit = mi_atomic_load_relaxed(®ions[i].commit);
466 memset((void*)®ions[i], 0, sizeof(mem_region_t)); // cast to void* to avoid atomic warning
467 // and release the whole region
468 mi_atomic_store_release(®ion->info, (size_t)0);
469 if (start != NULL) { // && !_mi_os_is_huge_reserved(start)) {
470 _mi_abandoned_await_readers(); // ensure no pending reads
471 _mi_arena_free(start, MI_REGION_SIZE, arena_memid, (~commit == 0), tld->stats);
472 }
473 }
474 }
475 }
476 }
477
478
479 /* ----------------------------------------------------------------------------
480 Other
481 -----------------------------------------------------------------------------*/
482
_mi_mem_reset(void * p,size_t size,mi_os_tld_t * tld)483 bool _mi_mem_reset(void* p, size_t size, mi_os_tld_t* tld) {
484 return _mi_os_reset(p, size, tld->stats);
485 }
486
_mi_mem_unreset(void * p,size_t size,bool * is_zero,mi_os_tld_t * tld)487 bool _mi_mem_unreset(void* p, size_t size, bool* is_zero, mi_os_tld_t* tld) {
488 return _mi_os_unreset(p, size, is_zero, tld->stats);
489 }
490
_mi_mem_commit(void * p,size_t size,bool * is_zero,mi_os_tld_t * tld)491 bool _mi_mem_commit(void* p, size_t size, bool* is_zero, mi_os_tld_t* tld) {
492 return _mi_os_commit(p, size, is_zero, tld->stats);
493 }
494
_mi_mem_decommit(void * p,size_t size,mi_os_tld_t * tld)495 bool _mi_mem_decommit(void* p, size_t size, mi_os_tld_t* tld) {
496 return _mi_os_decommit(p, size, tld->stats);
497 }
498
_mi_mem_protect(void * p,size_t size)499 bool _mi_mem_protect(void* p, size_t size) {
500 return _mi_os_protect(p, size);
501 }
502
_mi_mem_unprotect(void * p,size_t size)503 bool _mi_mem_unprotect(void* p, size_t size) {
504 return _mi_os_unprotect(p, size);
505 }
506