1 /* ----------------------------------------------------------------------------
2 Copyright (c) 2018-2021, Microsoft Research, Daan Leijen
3 This is free software; you can redistribute it and/or modify it under the
4 terms of the MIT license. A copy of the license can be found in the file
5 "LICENSE" at the root of this distribution.
6 -----------------------------------------------------------------------------*/
7 #ifndef _DEFAULT_SOURCE
8 #define _DEFAULT_SOURCE   // for realpath() on Linux
9 #endif
10 
11 #include "mimalloc.h"
12 #include "mimalloc-internal.h"
13 #include "mimalloc-atomic.h"
14 
15 #include <string.h>  // memset, strlen
16 #include <stdlib.h>  // malloc, exit
17 
18 #define MI_IN_ALLOC_C
19 #include "alloc-override.c"
20 #undef MI_IN_ALLOC_C
21 
22 // ------------------------------------------------------
23 // Allocation
24 // ------------------------------------------------------
25 
26 // Fast allocation in a page: just pop from the free list.
27 // Fall back to generic allocation only if the list is empty.
_mi_page_malloc(mi_heap_t * heap,mi_page_t * page,size_t size)28 extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept {
29   mi_assert_internal(page->xblock_size==0||mi_page_block_size(page) >= size);
30   mi_block_t* const block = page->free;
31   if (mi_unlikely(block == NULL)) {
32     return _mi_malloc_generic(heap, size);
33   }
34   mi_assert_internal(block != NULL && _mi_ptr_page(block) == page);
35   // pop from the free list
36   page->used++;
37   page->free = mi_block_next(page, block);
38   mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page);
39 
40 #if (MI_DEBUG>0)
41   if (!page->is_zero) { memset(block, MI_DEBUG_UNINIT, size); }
42 #elif (MI_SECURE!=0)
43   block->next = 0;  // don't leak internal data
44 #endif
45 
46 #if (MI_STAT>0)
47   const size_t bsize = mi_page_usable_block_size(page);
48   if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
49     mi_heap_stat_increase(heap, normal, bsize);
50     mi_heap_stat_counter_increase(heap, normal_count, 1);
51 #if (MI_STAT>1)
52     const size_t bin = _mi_bin(bsize);
53     mi_heap_stat_increase(heap, normal_bins[bin], 1);
54 #endif
55   }
56 #endif
57 
58 #if (MI_PADDING > 0) && defined(MI_ENCODE_FREELIST)
59   mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + mi_page_usable_block_size(page));
60   ptrdiff_t delta = ((uint8_t*)padding - (uint8_t*)block - (size - MI_PADDING_SIZE));
61   mi_assert_internal(delta >= 0 && mi_page_usable_block_size(page) >= (size - MI_PADDING_SIZE + delta));
62   padding->canary = (uint32_t)(mi_ptr_encode(page,block,page->keys));
63   padding->delta  = (uint32_t)(delta);
64   uint8_t* fill = (uint8_t*)padding - delta;
65   const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // set at most N initial padding bytes
66   for (size_t i = 0; i < maxpad; i++) { fill[i] = MI_DEBUG_PADDING; }
67 #endif
68 
69   return block;
70 }
71 
72 // allocate a small block
mi_heap_malloc_small(mi_heap_t * heap,size_t size)73 extern inline mi_decl_restrict void* mi_heap_malloc_small(mi_heap_t* heap, size_t size) mi_attr_noexcept {
74   mi_assert(heap!=NULL);
75   mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
76   mi_assert(size <= MI_SMALL_SIZE_MAX);
77   #if (MI_PADDING)
78   if (size == 0) {
79     size = sizeof(void*);
80   }
81   #endif
82   mi_page_t* page = _mi_heap_get_free_small_page(heap,size + MI_PADDING_SIZE);
83   void* p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE);
84   mi_assert_internal(p==NULL || mi_usable_size(p) >= size);
85   #if MI_STAT>1
86   if (p != NULL) {
87     if (!mi_heap_is_initialized(heap)) { heap = mi_get_default_heap(); }
88     mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
89   }
90   #endif
91   return p;
92 }
93 
mi_malloc_small(size_t size)94 extern inline mi_decl_restrict void* mi_malloc_small(size_t size) mi_attr_noexcept {
95   return mi_heap_malloc_small(mi_get_default_heap(), size);
96 }
97 
98 // The main allocation function
mi_heap_malloc(mi_heap_t * heap,size_t size)99 extern inline mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
100   if (mi_likely(size <= MI_SMALL_SIZE_MAX)) {
101     return mi_heap_malloc_small(heap, size);
102   }
103   else {
104     mi_assert(heap!=NULL);
105     mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
106     void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE);      // note: size can overflow but it is detected in malloc_generic
107     mi_assert_internal(p == NULL || mi_usable_size(p) >= size);
108     #if MI_STAT>1
109     if (p != NULL) {
110       if (!mi_heap_is_initialized(heap)) { heap = mi_get_default_heap(); }
111       mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
112     }
113     #endif
114     return p;
115   }
116 }
117 
mi_malloc(size_t size)118 extern inline mi_decl_restrict void* mi_malloc(size_t size) mi_attr_noexcept {
119   return mi_heap_malloc(mi_get_default_heap(), size);
120 }
121 
122 
_mi_block_zero_init(const mi_page_t * page,void * p,size_t size)123 void _mi_block_zero_init(const mi_page_t* page, void* p, size_t size) {
124   // note: we need to initialize the whole usable block size to zero, not just the requested size,
125   // or the recalloc/rezalloc functions cannot safely expand in place (see issue #63)
126   MI_UNUSED(size);
127   mi_assert_internal(p != NULL);
128   mi_assert_internal(mi_usable_size(p) >= size); // size can be zero
129   mi_assert_internal(_mi_ptr_page(p)==page);
130   if (page->is_zero && size > sizeof(mi_block_t)) {
131     // already zero initialized memory
132     ((mi_block_t*)p)->next = 0;  // clear the free list pointer
133     mi_assert_expensive(mi_mem_is_zero(p, mi_usable_size(p)));
134   }
135   else {
136     // otherwise memset
137     memset(p, 0, mi_usable_size(p));
138   }
139 }
140 
141 // zero initialized small block
mi_zalloc_small(size_t size)142 mi_decl_restrict void* mi_zalloc_small(size_t size) mi_attr_noexcept {
143   void* p = mi_malloc_small(size);
144   if (p != NULL) {
145     _mi_block_zero_init(_mi_ptr_page(p), p, size);  // todo: can we avoid getting the page again?
146   }
147   return p;
148 }
149 
_mi_heap_malloc_zero(mi_heap_t * heap,size_t size,bool zero)150 void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) {
151   void* p = mi_heap_malloc(heap,size);
152   if (zero && p != NULL) {
153     _mi_block_zero_init(_mi_ptr_page(p),p,size);  // todo: can we avoid getting the page again?
154   }
155   return p;
156 }
157 
mi_heap_zalloc(mi_heap_t * heap,size_t size)158 extern inline mi_decl_restrict void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
159   return _mi_heap_malloc_zero(heap, size, true);
160 }
161 
mi_zalloc(size_t size)162 mi_decl_restrict void* mi_zalloc(size_t size) mi_attr_noexcept {
163   return mi_heap_zalloc(mi_get_default_heap(),size);
164 }
165 
166 
167 // ------------------------------------------------------
168 // Check for double free in secure and debug mode
169 // This is somewhat expensive so only enabled for secure mode 4
170 // ------------------------------------------------------
171 
172 #if (MI_ENCODE_FREELIST && (MI_SECURE>=4 || MI_DEBUG!=0))
173 // linear check if the free list contains a specific element
mi_list_contains(const mi_page_t * page,const mi_block_t * list,const mi_block_t * elem)174 static bool mi_list_contains(const mi_page_t* page, const mi_block_t* list, const mi_block_t* elem) {
175   while (list != NULL) {
176     if (elem==list) return true;
177     list = mi_block_next(page, list);
178   }
179   return false;
180 }
181 
mi_check_is_double_freex(const mi_page_t * page,const mi_block_t * block)182 static mi_decl_noinline bool mi_check_is_double_freex(const mi_page_t* page, const mi_block_t* block) {
183   // The decoded value is in the same page (or NULL).
184   // Walk the free lists to verify positively if it is already freed
185   if (mi_list_contains(page, page->free, block) ||
186       mi_list_contains(page, page->local_free, block) ||
187       mi_list_contains(page, mi_page_thread_free(page), block))
188   {
189     _mi_error_message(EAGAIN, "double free detected of block %p with size %zu\n", block, mi_page_block_size(page));
190     return true;
191   }
192   return false;
193 }
194 
mi_check_is_double_free(const mi_page_t * page,const mi_block_t * block)195 static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) {
196   mi_block_t* n = mi_block_nextx(page, block, page->keys); // pretend it is freed, and get the decoded first field
197   if (((uintptr_t)n & (MI_INTPTR_SIZE-1))==0 &&  // quick check: aligned pointer?
198       (n==NULL || mi_is_in_same_page(block, n))) // quick check: in same page or NULL?
199   {
200     // Suspicous: decoded value a in block is in the same page (or NULL) -- maybe a double free?
201     // (continue in separate function to improve code generation)
202     return mi_check_is_double_freex(page, block);
203   }
204   return false;
205 }
206 #else
mi_check_is_double_free(const mi_page_t * page,const mi_block_t * block)207 static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) {
208   MI_UNUSED(page);
209   MI_UNUSED(block);
210   return false;
211 }
212 #endif
213 
214 // ---------------------------------------------------------------------------
215 // Check for heap block overflow by setting up padding at the end of the block
216 // ---------------------------------------------------------------------------
217 
218 #if (MI_PADDING>0) && defined(MI_ENCODE_FREELIST)
mi_page_decode_padding(const mi_page_t * page,const mi_block_t * block,size_t * delta,size_t * bsize)219 static bool mi_page_decode_padding(const mi_page_t* page, const mi_block_t* block, size_t* delta, size_t* bsize) {
220   *bsize = mi_page_usable_block_size(page);
221   const mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + *bsize);
222   *delta = padding->delta;
223   return ((uint32_t)mi_ptr_encode(page,block,page->keys) == padding->canary && *delta <= *bsize);
224 }
225 
226 // Return the exact usable size of a block.
mi_page_usable_size_of(const mi_page_t * page,const mi_block_t * block)227 static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) {
228   size_t bsize;
229   size_t delta;
230   bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
231   mi_assert_internal(ok); mi_assert_internal(delta <= bsize);
232   return (ok ? bsize - delta : 0);
233 }
234 
mi_verify_padding(const mi_page_t * page,const mi_block_t * block,size_t * size,size_t * wrong)235 static bool mi_verify_padding(const mi_page_t* page, const mi_block_t* block, size_t* size, size_t* wrong) {
236   size_t bsize;
237   size_t delta;
238   bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
239   *size = *wrong = bsize;
240   if (!ok) return false;
241   mi_assert_internal(bsize >= delta);
242   *size = bsize - delta;
243   uint8_t* fill = (uint8_t*)block + bsize - delta;
244   const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // check at most the first N padding bytes
245   for (size_t i = 0; i < maxpad; i++) {
246     if (fill[i] != MI_DEBUG_PADDING) {
247       *wrong = bsize - delta + i;
248       return false;
249     }
250   }
251   return true;
252 }
253 
mi_check_padding(const mi_page_t * page,const mi_block_t * block)254 static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
255   size_t size;
256   size_t wrong;
257   if (!mi_verify_padding(page,block,&size,&wrong)) {
258     _mi_error_message(EFAULT, "buffer overflow in heap block %p of size %zu: write after %zu bytes\n", block, size, wrong );
259   }
260 }
261 
262 // When a non-thread-local block is freed, it becomes part of the thread delayed free
263 // list that is freed later by the owning heap. If the exact usable size is too small to
264 // contain the pointer for the delayed list, then shrink the padding (by decreasing delta)
265 // so it will later not trigger an overflow error in `mi_free_block`.
mi_padding_shrink(const mi_page_t * page,const mi_block_t * block,const size_t min_size)266 static void mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) {
267   size_t bsize;
268   size_t delta;
269   bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
270   mi_assert_internal(ok);
271   if (!ok || (bsize - delta) >= min_size) return;  // usually already enough space
272   mi_assert_internal(bsize >= min_size);
273   if (bsize < min_size) return;  // should never happen
274   size_t new_delta = (bsize - min_size);
275   mi_assert_internal(new_delta < bsize);
276   mi_padding_t* padding = (mi_padding_t*)((uint8_t*)block + bsize);
277   padding->delta = (uint32_t)new_delta;
278 }
279 #else
mi_check_padding(const mi_page_t * page,const mi_block_t * block)280 static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
281   MI_UNUSED(page);
282   MI_UNUSED(block);
283 }
284 
mi_page_usable_size_of(const mi_page_t * page,const mi_block_t * block)285 static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) {
286   MI_UNUSED(block);
287   return mi_page_usable_block_size(page);
288 }
289 
mi_padding_shrink(const mi_page_t * page,const mi_block_t * block,const size_t min_size)290 static void mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) {
291   MI_UNUSED(page);
292   MI_UNUSED(block);
293   MI_UNUSED(min_size);
294 }
295 #endif
296 
297 // only maintain stats for smaller objects if requested
298 #if (MI_STAT>0)
mi_stat_free(const mi_page_t * page,const mi_block_t * block)299 static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
300 #if (MI_STAT < 2)
301   MI_UNUSED(block);
302 #endif
303   mi_heap_t* const heap = mi_heap_get_default();
304   const size_t bsize = mi_page_usable_block_size(page);
305 #if (MI_STAT>1)
306   const size_t usize = mi_page_usable_size_of(page, block);
307   mi_heap_stat_decrease(heap, malloc, usize);
308 #endif
309   if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
310     mi_heap_stat_decrease(heap, normal, bsize);
311 #if (MI_STAT > 1)
312     mi_heap_stat_decrease(heap, normal_bins[_mi_bin(bsize)], 1);
313 #endif
314   }
315 }
316 #else
mi_stat_free(const mi_page_t * page,const mi_block_t * block)317 static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
318   MI_UNUSED(page); MI_UNUSED(block);
319 }
320 #endif
321 
322 #if (MI_STAT>0)
323 // maintain stats for huge objects
mi_stat_huge_free(const mi_page_t * page)324 static void mi_stat_huge_free(const mi_page_t* page) {
325   mi_heap_t* const heap = mi_heap_get_default();
326   const size_t bsize = mi_page_block_size(page); // to match stats in `page.c:mi_page_huge_alloc`
327   if (bsize <= MI_HUGE_OBJ_SIZE_MAX) {
328     mi_heap_stat_decrease(heap, huge, bsize);
329   }
330   else {
331     mi_heap_stat_decrease(heap, giant, bsize);
332   }
333 }
334 #else
mi_stat_huge_free(const mi_page_t * page)335 static void mi_stat_huge_free(const mi_page_t* page) {
336   MI_UNUSED(page);
337 }
338 #endif
339 
340 // ------------------------------------------------------
341 // Free
342 // ------------------------------------------------------
343 
344 // multi-threaded free
_mi_free_block_mt(mi_page_t * page,mi_block_t * block)345 static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* block)
346 {
347   // The padding check may access the non-thread-owned page for the key values.
348   // that is safe as these are constant and the page won't be freed (as the block is not freed yet).
349   mi_check_padding(page, block);
350   mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection
351   #if (MI_DEBUG!=0)
352   memset(block, MI_DEBUG_FREED, mi_usable_size(block));
353   #endif
354 
355   // huge page segments are always abandoned and can be freed immediately
356   mi_segment_t* const segment = _mi_page_segment(page);
357   if (segment->page_kind==MI_PAGE_HUGE) {
358     mi_stat_huge_free(page);
359     _mi_segment_huge_page_free(segment, page, block);
360     return;
361   }
362 
363   // Try to put the block on either the page-local thread free list, or the heap delayed free list.
364   mi_thread_free_t tfreex;
365   bool use_delayed;
366   mi_thread_free_t tfree = mi_atomic_load_relaxed(&page->xthread_free);
367   do {
368     use_delayed = (mi_tf_delayed(tfree) == MI_USE_DELAYED_FREE);
369     if (mi_unlikely(use_delayed)) {
370       // unlikely: this only happens on the first concurrent free in a page that is in the full list
371       tfreex = mi_tf_set_delayed(tfree,MI_DELAYED_FREEING);
372     }
373     else {
374       // usual: directly add to page thread_free list
375       mi_block_set_next(page, block, mi_tf_block(tfree));
376       tfreex = mi_tf_set_block(tfree,block);
377     }
378   } while (!mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex));
379 
380   if (mi_unlikely(use_delayed)) {
381     // racy read on `heap`, but ok because MI_DELAYED_FREEING is set (see `mi_heap_delete` and `mi_heap_collect_abandon`)
382     mi_heap_t* const heap = (mi_heap_t*)(mi_atomic_load_acquire(&page->xheap)); //mi_page_heap(page);
383     mi_assert_internal(heap != NULL);
384     if (heap != NULL) {
385       // add to the delayed free list of this heap. (do this atomically as the lock only protects heap memory validity)
386       mi_block_t* dfree = mi_atomic_load_ptr_relaxed(mi_block_t, &heap->thread_delayed_free);
387       do {
388         mi_block_set_nextx(heap,block,dfree, heap->keys);
389       } while (!mi_atomic_cas_ptr_weak_release(mi_block_t,&heap->thread_delayed_free, &dfree, block));
390     }
391 
392     // and reset the MI_DELAYED_FREEING flag
393     tfree = mi_atomic_load_relaxed(&page->xthread_free);
394     do {
395       tfreex = tfree;
396       mi_assert_internal(mi_tf_delayed(tfree) == MI_DELAYED_FREEING);
397       tfreex = mi_tf_set_delayed(tfree,MI_NO_DELAYED_FREE);
398     } while (!mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex));
399   }
400 }
401 
402 // regular free
_mi_free_block(mi_page_t * page,bool local,mi_block_t * block)403 static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block)
404 {
405   // and push it on the free list
406   if (mi_likely(local)) {
407     // owning thread can free a block directly
408     if (mi_unlikely(mi_check_is_double_free(page, block))) return;
409     mi_check_padding(page, block);
410     #if (MI_DEBUG!=0)
411     memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
412     #endif
413     mi_block_set_next(page, block, page->local_free);
414     page->local_free = block;
415     page->used--;
416     if (mi_unlikely(mi_page_all_free(page))) {
417       _mi_page_retire(page);
418     }
419     else if (mi_unlikely(mi_page_is_in_full(page))) {
420       _mi_page_unfull(page);
421     }
422   }
423   else {
424     _mi_free_block_mt(page,block);
425   }
426 }
427 
428 
429 // Adjust a block that was allocated aligned, to the actual start of the block in the page.
_mi_page_ptr_unalign(const mi_segment_t * segment,const mi_page_t * page,const void * p)430 mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p) {
431   mi_assert_internal(page!=NULL && p!=NULL);
432   const size_t diff   = (uint8_t*)p - _mi_page_start(segment, page, NULL);
433   const size_t adjust = (diff % mi_page_block_size(page));
434   return (mi_block_t*)((uintptr_t)p - adjust);
435 }
436 
437 
mi_free_generic(const mi_segment_t * segment,bool local,void * p)438 static void mi_decl_noinline mi_free_generic(const mi_segment_t* segment, bool local, void* p) mi_attr_noexcept {
439   mi_page_t* const page = _mi_segment_page_of(segment, p);
440   mi_block_t* const block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(segment, page, p) : (mi_block_t*)p);
441   mi_stat_free(page, block);
442   _mi_free_block(page, local, block);
443 }
444 
445 // Get the segment data belonging to a pointer
446 // This is just a single `and` in assembly but does further checks in debug mode
447 // (and secure mode) if this was a valid pointer.
mi_checked_ptr_segment(const void * p,const char * msg)448 static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* msg)
449 {
450   MI_UNUSED(msg);
451 #if (MI_DEBUG>0)
452   if (mi_unlikely(((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0)) {
453     _mi_error_message(EINVAL, "%s: invalid (unaligned) pointer: %p\n", msg, p);
454     return NULL;
455   }
456 #endif
457 
458   mi_segment_t* const segment = _mi_ptr_segment(p);
459   if (mi_unlikely(segment == NULL)) return NULL;  // checks also for (p==NULL)
460 
461 #if (MI_DEBUG>0)
462   if (mi_unlikely(!mi_is_in_heap_region(p))) {
463     _mi_warning_message("%s: pointer might not point to a valid heap region: %p\n"
464       "(this may still be a valid very large allocation (over 64MiB))\n", msg, p);
465     if (mi_likely(_mi_ptr_cookie(segment) == segment->cookie)) {
466       _mi_warning_message("(yes, the previous pointer %p was valid after all)\n", p);
467     }
468   }
469 #endif
470 #if (MI_DEBUG>0 || MI_SECURE>=4)
471   if (mi_unlikely(_mi_ptr_cookie(segment) != segment->cookie)) {
472     _mi_error_message(EINVAL, "%s: pointer does not point to a valid heap space: %p\n", msg, p);
473   }
474 #endif
475   return segment;
476 }
477 
478 
479 // Free a block
mi_free(void * p)480 void mi_free(void* p) mi_attr_noexcept
481 {
482   const mi_segment_t* const segment = mi_checked_ptr_segment(p,"mi_free");
483   if (mi_unlikely(segment == NULL)) return;
484 
485   const mi_threadid_t tid = _mi_thread_id();
486   mi_page_t* const page = _mi_segment_page_of(segment, p);
487   mi_block_t* const block = (mi_block_t*)p;
488 
489   if (mi_likely(tid == mi_atomic_load_relaxed(&segment->thread_id) && page->flags.full_aligned == 0)) {  // the thread id matches and it is not a full page, nor has aligned blocks
490     // local, and not full or aligned
491     if (mi_unlikely(mi_check_is_double_free(page,block))) return;
492     mi_check_padding(page, block);
493     mi_stat_free(page, block);
494     #if (MI_DEBUG!=0)
495     memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
496     #endif
497     mi_block_set_next(page, block, page->local_free);
498     page->local_free = block;
499     if (mi_unlikely(--page->used == 0)) {   // using this expression generates better code than: page->used--; if (mi_page_all_free(page))
500       _mi_page_retire(page);
501     }
502   }
503   else {
504     // non-local, aligned blocks, or a full page; use the more generic path
505     // note: recalc page in generic to improve code generation
506     mi_free_generic(segment, tid == segment->thread_id, p);
507   }
508 }
509 
_mi_free_delayed_block(mi_block_t * block)510 bool _mi_free_delayed_block(mi_block_t* block) {
511   // get segment and page
512   const mi_segment_t* const segment = _mi_ptr_segment(block);
513   mi_assert_internal(_mi_ptr_cookie(segment) == segment->cookie);
514   mi_assert_internal(_mi_thread_id() == segment->thread_id);
515   mi_page_t* const page = _mi_segment_page_of(segment, block);
516 
517   // Clear the no-delayed flag so delayed freeing is used again for this page.
518   // This must be done before collecting the free lists on this page -- otherwise
519   // some blocks may end up in the page `thread_free` list with no blocks in the
520   // heap `thread_delayed_free` list which may cause the page to be never freed!
521   // (it would only be freed if we happen to scan it in `mi_page_queue_find_free_ex`)
522   _mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, false /* dont overwrite never delayed */);
523 
524   // collect all other non-local frees to ensure up-to-date `used` count
525   _mi_page_free_collect(page, false);
526 
527   // and free the block (possibly freeing the page as well since used is updated)
528   _mi_free_block(page, true, block);
529   return true;
530 }
531 
532 // Bytes available in a block
_mi_usable_size(const void * p,const char * msg)533 static size_t _mi_usable_size(const void* p, const char* msg) mi_attr_noexcept {
534   const mi_segment_t* const segment = mi_checked_ptr_segment(p,msg);
535   if (segment==NULL) return 0;
536   const mi_page_t* const page = _mi_segment_page_of(segment, p);
537   const mi_block_t* block = (const mi_block_t*)p;
538   if (mi_unlikely(mi_page_has_aligned(page))) {
539     block = _mi_page_ptr_unalign(segment, page, p);
540     size_t size = mi_page_usable_size_of(page, block);
541     ptrdiff_t const adjust = (uint8_t*)p - (uint8_t*)block;
542     mi_assert_internal(adjust >= 0 && (size_t)adjust <= size);
543     return (size - adjust);
544   }
545   else {
546     return mi_page_usable_size_of(page, block);
547   }
548 }
549 
mi_usable_size(const void * p)550 size_t mi_usable_size(const void* p) mi_attr_noexcept {
551   return _mi_usable_size(p, "mi_usable_size");
552 }
553 
554 
555 // ------------------------------------------------------
556 // ensure explicit external inline definitions are emitted!
557 // ------------------------------------------------------
558 
559 #ifdef __cplusplus
560 void* _mi_externs[] = {
561   (void*)&_mi_page_malloc,
562   (void*)&mi_malloc,
563   (void*)&mi_malloc_small,
564   (void*)&mi_zalloc_small,
565   (void*)&mi_heap_malloc,
566   (void*)&mi_heap_zalloc,
567   (void*)&mi_heap_malloc_small
568 };
569 #endif
570 
571 
572 // ------------------------------------------------------
573 // Allocation extensions
574 // ------------------------------------------------------
575 
mi_free_size(void * p,size_t size)576 void mi_free_size(void* p, size_t size) mi_attr_noexcept {
577   MI_UNUSED_RELEASE(size);
578   mi_assert(p == NULL || size <= _mi_usable_size(p,"mi_free_size"));
579   mi_free(p);
580 }
581 
mi_free_size_aligned(void * p,size_t size,size_t alignment)582 void mi_free_size_aligned(void* p, size_t size, size_t alignment) mi_attr_noexcept {
583   MI_UNUSED_RELEASE(alignment);
584   mi_assert(((uintptr_t)p % alignment) == 0);
585   mi_free_size(p,size);
586 }
587 
mi_free_aligned(void * p,size_t alignment)588 void mi_free_aligned(void* p, size_t alignment) mi_attr_noexcept {
589   MI_UNUSED_RELEASE(alignment);
590   mi_assert(((uintptr_t)p % alignment) == 0);
591   mi_free(p);
592 }
593 
mi_heap_calloc(mi_heap_t * heap,size_t count,size_t size)594 extern inline mi_decl_restrict void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept {
595   size_t total;
596   if (mi_count_size_overflow(count,size,&total)) return NULL;
597   return mi_heap_zalloc(heap,total);
598 }
599 
mi_calloc(size_t count,size_t size)600 mi_decl_restrict void* mi_calloc(size_t count, size_t size) mi_attr_noexcept {
601   return mi_heap_calloc(mi_get_default_heap(),count,size);
602 }
603 
604 // Uninitialized `calloc`
mi_heap_mallocn(mi_heap_t * heap,size_t count,size_t size)605 extern mi_decl_restrict void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept {
606   size_t total;
607   if (mi_count_size_overflow(count, size, &total)) return NULL;
608   return mi_heap_malloc(heap, total);
609 }
610 
mi_mallocn(size_t count,size_t size)611 mi_decl_restrict void* mi_mallocn(size_t count, size_t size) mi_attr_noexcept {
612   return mi_heap_mallocn(mi_get_default_heap(),count,size);
613 }
614 
615 // Expand in place or fail
mi_expand(void * p,size_t newsize)616 void* mi_expand(void* p, size_t newsize) mi_attr_noexcept {
617   if (p == NULL) return NULL;
618   size_t size = _mi_usable_size(p,"mi_expand");
619   if (newsize > size) return NULL;
620   return p; // it fits
621 }
622 
_mi_heap_realloc_zero(mi_heap_t * heap,void * p,size_t newsize,bool zero)623 void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) {
624   if (p == NULL) return _mi_heap_malloc_zero(heap,newsize,zero);
625   size_t size = _mi_usable_size(p,"mi_realloc");
626   if (newsize <= size && newsize >= (size / 2)) {
627     return p;  // reallocation still fits and not more than 50% waste
628   }
629   void* newp = mi_heap_malloc(heap,newsize);
630   if (mi_likely(newp != NULL)) {
631     if (zero && newsize > size) {
632       // also set last word in the previous allocation to zero to ensure any padding is zero-initialized
633       size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0);
634       memset((uint8_t*)newp + start, 0, newsize - start);
635     }
636     _mi_memcpy_aligned(newp, p, (newsize > size ? size : newsize));
637     mi_free(p); // only free if successful
638   }
639   return newp;
640 }
641 
mi_heap_realloc(mi_heap_t * heap,void * p,size_t newsize)642 void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
643   return _mi_heap_realloc_zero(heap, p, newsize, false);
644 }
645 
mi_heap_reallocn(mi_heap_t * heap,void * p,size_t count,size_t size)646 void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {
647   size_t total;
648   if (mi_count_size_overflow(count, size, &total)) return NULL;
649   return mi_heap_realloc(heap, p, total);
650 }
651 
652 
653 // Reallocate but free `p` on errors
mi_heap_reallocf(mi_heap_t * heap,void * p,size_t newsize)654 void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
655   void* newp = mi_heap_realloc(heap, p, newsize);
656   if (newp==NULL && p!=NULL) mi_free(p);
657   return newp;
658 }
659 
mi_heap_rezalloc(mi_heap_t * heap,void * p,size_t newsize)660 void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
661   return _mi_heap_realloc_zero(heap, p, newsize, true);
662 }
663 
mi_heap_recalloc(mi_heap_t * heap,void * p,size_t count,size_t size)664 void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {
665   size_t total;
666   if (mi_count_size_overflow(count, size, &total)) return NULL;
667   return mi_heap_rezalloc(heap, p, total);
668 }
669 
670 
mi_realloc(void * p,size_t newsize)671 void* mi_realloc(void* p, size_t newsize) mi_attr_noexcept {
672   return mi_heap_realloc(mi_get_default_heap(),p,newsize);
673 }
674 
mi_reallocn(void * p,size_t count,size_t size)675 void* mi_reallocn(void* p, size_t count, size_t size) mi_attr_noexcept {
676   return mi_heap_reallocn(mi_get_default_heap(),p,count,size);
677 }
678 
679 // Reallocate but free `p` on errors
mi_reallocf(void * p,size_t newsize)680 void* mi_reallocf(void* p, size_t newsize) mi_attr_noexcept {
681   return mi_heap_reallocf(mi_get_default_heap(),p,newsize);
682 }
683 
mi_rezalloc(void * p,size_t newsize)684 void* mi_rezalloc(void* p, size_t newsize) mi_attr_noexcept {
685   return mi_heap_rezalloc(mi_get_default_heap(), p, newsize);
686 }
687 
mi_recalloc(void * p,size_t count,size_t size)688 void* mi_recalloc(void* p, size_t count, size_t size) mi_attr_noexcept {
689   return mi_heap_recalloc(mi_get_default_heap(), p, count, size);
690 }
691 
692 
693 
694 // ------------------------------------------------------
695 // strdup, strndup, and realpath
696 // ------------------------------------------------------
697 
698 // `strdup` using mi_malloc
mi_heap_strdup(mi_heap_t * heap,const char * s)699 mi_decl_restrict char* mi_heap_strdup(mi_heap_t* heap, const char* s) mi_attr_noexcept {
700   if (s == NULL) return NULL;
701   size_t n = strlen(s);
702   char* t = (char*)mi_heap_malloc(heap,n+1);
703   if (t != NULL) _mi_memcpy(t, s, n + 1);
704   return t;
705 }
706 
mi_strdup(const char * s)707 mi_decl_restrict char* mi_strdup(const char* s) mi_attr_noexcept {
708   return mi_heap_strdup(mi_get_default_heap(), s);
709 }
710 
711 // `strndup` using mi_malloc
mi_heap_strndup(mi_heap_t * heap,const char * s,size_t n)712 mi_decl_restrict char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n) mi_attr_noexcept {
713   if (s == NULL) return NULL;
714   const char* end = (const char*)memchr(s, 0, n);  // find end of string in the first `n` characters (returns NULL if not found)
715   const size_t m = (end != NULL ? (size_t)(end - s) : n);  // `m` is the minimum of `n` or the end-of-string
716   mi_assert_internal(m <= n);
717   char* t = (char*)mi_heap_malloc(heap, m+1);
718   if (t == NULL) return NULL;
719   _mi_memcpy(t, s, m);
720   t[m] = 0;
721   return t;
722 }
723 
mi_strndup(const char * s,size_t n)724 mi_decl_restrict char* mi_strndup(const char* s, size_t n) mi_attr_noexcept {
725   return mi_heap_strndup(mi_get_default_heap(),s,n);
726 }
727 
728 #ifndef __wasi__
729 // `realpath` using mi_malloc
730 #ifdef _WIN32
731 #ifndef PATH_MAX
732 #define PATH_MAX MAX_PATH
733 #endif
734 #include <windows.h>
mi_heap_realpath(mi_heap_t * heap,const char * fname,char * resolved_name)735 mi_decl_restrict char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept {
736   // todo: use GetFullPathNameW to allow longer file names
737   char buf[PATH_MAX];
738   DWORD res = GetFullPathNameA(fname, PATH_MAX, (resolved_name == NULL ? buf : resolved_name), NULL);
739   if (res == 0) {
740     errno = GetLastError(); return NULL;
741   }
742   else if (res > PATH_MAX) {
743     errno = EINVAL; return NULL;
744   }
745   else if (resolved_name != NULL) {
746     return resolved_name;
747   }
748   else {
749     return mi_heap_strndup(heap, buf, PATH_MAX);
750   }
751 }
752 #else
753 #include <unistd.h>  // pathconf
mi_path_max(void)754 static size_t mi_path_max(void) {
755   static size_t path_max = 0;
756   if (path_max <= 0) {
757     long m = pathconf("/",_PC_PATH_MAX);
758     if (m <= 0) path_max = 4096;      // guess
759     else if (m < 256) path_max = 256; // at least 256
760     else path_max = m;
761   }
762   return path_max;
763 }
764 
mi_heap_realpath(mi_heap_t * heap,const char * fname,char * resolved_name)765 char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept {
766   if (resolved_name != NULL) {
767     return realpath(fname,resolved_name);
768   }
769   else {
770     size_t n  = mi_path_max();
771     char* buf = (char*)mi_malloc(n+1);
772     if (buf==NULL) return NULL;
773     char* rname  = realpath(fname,buf);
774     char* result = mi_heap_strndup(heap,rname,n); // ok if `rname==NULL`
775     mi_free(buf);
776     return result;
777   }
778 }
779 #endif
780 
mi_realpath(const char * fname,char * resolved_name)781 mi_decl_restrict char* mi_realpath(const char* fname, char* resolved_name) mi_attr_noexcept {
782   return mi_heap_realpath(mi_get_default_heap(),fname,resolved_name);
783 }
784 #endif
785 
786 /*-------------------------------------------------------
787 C++ new and new_aligned
788 The standard requires calling into `get_new_handler` and
789 throwing the bad_alloc exception on failure. If we compile
790 with a C++ compiler we can implement this precisely. If we
791 use a C compiler we cannot throw a `bad_alloc` exception
792 but we call `exit` instead (i.e. not returning).
793 -------------------------------------------------------*/
794 
795 #ifdef __cplusplus
796 #include <new>
mi_try_new_handler(bool nothrow)797 static bool mi_try_new_handler(bool nothrow) {
798   #if defined(_MSC_VER) || (__cplusplus >= 201103L)
799     std::new_handler h = std::get_new_handler();
800   #else
801     std::new_handler h = std::set_new_handler();
802     std::set_new_handler(h);
803   #endif
804   if (h==NULL) {
805     _mi_error_message(ENOMEM, "out of memory in 'new'");
806     if (!nothrow) {
807       throw std::bad_alloc();
808     }
809     return false;
810   }
811   else {
812     h();
813     return true;
814   }
815 }
816 #else
817 typedef void (*std_new_handler_t)(void);
818 
819 #if (defined(__GNUC__) || defined(__clang__))
std_new_handler_t(weak)820 std_new_handler_t __attribute((weak)) _ZSt15get_new_handlerv(void) {
821   return NULL;
822 }
mi_get_new_handler(void)823 static std_new_handler_t mi_get_new_handler(void) {
824   return _ZSt15get_new_handlerv();
825 }
826 #else
827 // note: on windows we could dynamically link to `?get_new_handler@std@@YAP6AXXZXZ`.
mi_get_new_handler()828 static std_new_handler_t mi_get_new_handler() {
829   return NULL;
830 }
831 #endif
832 
mi_try_new_handler(bool nothrow)833 static bool mi_try_new_handler(bool nothrow) {
834   std_new_handler_t h = mi_get_new_handler();
835   if (h==NULL) {
836     _mi_error_message(ENOMEM, "out of memory in 'new'");
837     if (!nothrow) {
838       abort();  // cannot throw in plain C, use abort
839     }
840     return false;
841   }
842   else {
843     h();
844     return true;
845   }
846 }
847 #endif
848 
mi_try_new(size_t size,bool nothrow)849 static mi_decl_noinline void* mi_try_new(size_t size, bool nothrow ) {
850   void* p = NULL;
851   while(p == NULL && mi_try_new_handler(nothrow)) {
852     p = mi_malloc(size);
853   }
854   return p;
855 }
856 
mi_new(size_t size)857 mi_decl_restrict void* mi_new(size_t size) {
858   void* p = mi_malloc(size);
859   if (mi_unlikely(p == NULL)) return mi_try_new(size,false);
860   return p;
861 }
862 
mi_new_nothrow(size_t size)863 mi_decl_restrict void* mi_new_nothrow(size_t size) mi_attr_noexcept {
864   void* p = mi_malloc(size);
865   if (mi_unlikely(p == NULL)) return mi_try_new(size, true);
866   return p;
867 }
868 
mi_new_aligned(size_t size,size_t alignment)869 mi_decl_restrict void* mi_new_aligned(size_t size, size_t alignment) {
870   void* p;
871   do {
872     p = mi_malloc_aligned(size, alignment);
873   }
874   while(p == NULL && mi_try_new_handler(false));
875   return p;
876 }
877 
mi_new_aligned_nothrow(size_t size,size_t alignment)878 mi_decl_restrict void* mi_new_aligned_nothrow(size_t size, size_t alignment) mi_attr_noexcept {
879   void* p;
880   do {
881     p = mi_malloc_aligned(size, alignment);
882   }
883   while(p == NULL && mi_try_new_handler(true));
884   return p;
885 }
886 
mi_new_n(size_t count,size_t size)887 mi_decl_restrict void* mi_new_n(size_t count, size_t size) {
888   size_t total;
889   if (mi_unlikely(mi_count_size_overflow(count, size, &total))) {
890     mi_try_new_handler(false);  // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc
891     return NULL;
892   }
893   else {
894     return mi_new(total);
895   }
896 }
897 
mi_new_realloc(void * p,size_t newsize)898 void* mi_new_realloc(void* p, size_t newsize) {
899   void* q;
900   do {
901     q = mi_realloc(p, newsize);
902   } while (q == NULL && mi_try_new_handler(false));
903   return q;
904 }
905 
mi_new_reallocn(void * p,size_t newcount,size_t size)906 void* mi_new_reallocn(void* p, size_t newcount, size_t size) {
907   size_t total;
908   if (mi_unlikely(mi_count_size_overflow(newcount, size, &total))) {
909     mi_try_new_handler(false);  // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc
910     return NULL;
911   }
912   else {
913     return mi_new_realloc(p, total);
914   }
915 }
916