1 /* ----------------------------------------------------------------------------
2 Copyright (c) 2018-2021, Microsoft Research, Daan Leijen
3 This is free software; you can redistribute it and/or modify it under the
4 terms of the MIT license. A copy of the license can be found in the file
5 "LICENSE" at the root of this distribution.
6 -----------------------------------------------------------------------------*/
7 #include "mimalloc.h"
8 #include "mimalloc-internal.h"
9 #include "mimalloc-atomic.h"
10 
11 #include <string.h>  // memset, strlen
12 #include <stdlib.h>  // malloc, exit
13 
14 #define MI_IN_ALLOC_C
15 #include "alloc-override.c"
16 #undef MI_IN_ALLOC_C
17 
18 // ------------------------------------------------------
19 // Allocation
20 // ------------------------------------------------------
21 
22 // Fast allocation in a page: just pop from the free list.
23 // Fall back to generic allocation only if the list is empty.
_mi_page_malloc(mi_heap_t * heap,mi_page_t * page,size_t size)24 extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept {
25   mi_assert_internal(page->xblock_size==0||mi_page_block_size(page) >= size);
26   mi_block_t* const block = page->free;
27   if (mi_unlikely(block == NULL)) {
28     return _mi_malloc_generic(heap, size);
29   }
30   mi_assert_internal(block != NULL && _mi_ptr_page(block) == page);
31   // pop from the free list
32   page->used++;
33   page->free = mi_block_next(page, block);
34   mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page);
35 
36 #if (MI_DEBUG>0)
37   if (!page->is_zero) { memset(block, MI_DEBUG_UNINIT, size); }
38 #elif (MI_SECURE!=0)
39   block->next = 0;  // don't leak internal data
40 #endif
41 
42 #if (MI_STAT>0)
43   const size_t bsize = mi_page_usable_block_size(page);
44   if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
45     mi_heap_stat_increase(heap, normal, bsize);
46     mi_heap_stat_counter_increase(heap, normal_count, 1);
47 #if (MI_STAT>1)
48     const size_t bin = _mi_bin(bsize);
49     mi_heap_stat_increase(heap, normal_bins[bin], 1);
50 #endif
51   }
52 #endif
53 
54 #if (MI_PADDING > 0) && defined(MI_ENCODE_FREELIST)
55   mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + mi_page_usable_block_size(page));
56   ptrdiff_t delta = ((uint8_t*)padding - (uint8_t*)block - (size - MI_PADDING_SIZE));
57   mi_assert_internal(delta >= 0 && mi_page_usable_block_size(page) >= (size - MI_PADDING_SIZE + delta));
58   padding->canary = (uint32_t)(mi_ptr_encode(page,block,page->keys));
59   padding->delta  = (uint32_t)(delta);
60   uint8_t* fill = (uint8_t*)padding - delta;
61   const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // set at most N initial padding bytes
62   for (size_t i = 0; i < maxpad; i++) { fill[i] = MI_DEBUG_PADDING; }
63 #endif
64 
65   return block;
66 }
67 
68 // allocate a small block
mi_heap_malloc_small(mi_heap_t * heap,size_t size)69 extern inline mi_decl_restrict void* mi_heap_malloc_small(mi_heap_t* heap, size_t size) mi_attr_noexcept {
70   mi_assert(heap!=NULL);
71   mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
72   mi_assert(size <= MI_SMALL_SIZE_MAX);
73   #if (MI_PADDING)
74   if (size == 0) {
75     size = sizeof(void*);
76   }
77   #endif
78   mi_page_t* page = _mi_heap_get_free_small_page(heap,size + MI_PADDING_SIZE);
79   void* p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE);
80   mi_assert_internal(p==NULL || mi_usable_size(p) >= size);
81   #if MI_STAT>1
82   if (p != NULL) {
83     if (!mi_heap_is_initialized(heap)) { heap = mi_get_default_heap(); }
84     mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
85   }
86   #endif
87   return p;
88 }
89 
mi_malloc_small(size_t size)90 extern inline mi_decl_restrict void* mi_malloc_small(size_t size) mi_attr_noexcept {
91   return mi_heap_malloc_small(mi_get_default_heap(), size);
92 }
93 
94 // The main allocation function
mi_heap_malloc(mi_heap_t * heap,size_t size)95 extern inline mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
96   if (mi_likely(size <= MI_SMALL_SIZE_MAX)) {
97     return mi_heap_malloc_small(heap, size);
98   }
99   else {
100     mi_assert(heap!=NULL);
101     mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
102     void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE);      // note: size can overflow but it is detected in malloc_generic
103     mi_assert_internal(p == NULL || mi_usable_size(p) >= size);
104     #if MI_STAT>1
105     if (p != NULL) {
106       if (!mi_heap_is_initialized(heap)) { heap = mi_get_default_heap(); }
107       mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
108     }
109     #endif
110     return p;
111   }
112 }
113 
mi_malloc(size_t size)114 extern inline mi_decl_restrict void* mi_malloc(size_t size) mi_attr_noexcept {
115   return mi_heap_malloc(mi_get_default_heap(), size);
116 }
117 
118 
_mi_block_zero_init(const mi_page_t * page,void * p,size_t size)119 void _mi_block_zero_init(const mi_page_t* page, void* p, size_t size) {
120   // note: we need to initialize the whole usable block size to zero, not just the requested size,
121   // or the recalloc/rezalloc functions cannot safely expand in place (see issue #63)
122   UNUSED(size);
123   mi_assert_internal(p != NULL);
124   mi_assert_internal(mi_usable_size(p) >= size); // size can be zero
125   mi_assert_internal(_mi_ptr_page(p)==page);
126   if (page->is_zero && size > sizeof(mi_block_t)) {
127     // already zero initialized memory
128     ((mi_block_t*)p)->next = 0;  // clear the free list pointer
129     mi_assert_expensive(mi_mem_is_zero(p, mi_usable_size(p)));
130   }
131   else {
132     // otherwise memset
133     memset(p, 0, mi_usable_size(p));
134   }
135 }
136 
137 // zero initialized small block
mi_zalloc_small(size_t size)138 mi_decl_restrict void* mi_zalloc_small(size_t size) mi_attr_noexcept {
139   void* p = mi_malloc_small(size);
140   if (p != NULL) {
141     _mi_block_zero_init(_mi_ptr_page(p), p, size);  // todo: can we avoid getting the page again?
142   }
143   return p;
144 }
145 
_mi_heap_malloc_zero(mi_heap_t * heap,size_t size,bool zero)146 void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) {
147   void* p = mi_heap_malloc(heap,size);
148   if (zero && p != NULL) {
149     _mi_block_zero_init(_mi_ptr_page(p),p,size);  // todo: can we avoid getting the page again?
150   }
151   return p;
152 }
153 
mi_heap_zalloc(mi_heap_t * heap,size_t size)154 extern inline mi_decl_restrict void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
155   return _mi_heap_malloc_zero(heap, size, true);
156 }
157 
mi_zalloc(size_t size)158 mi_decl_restrict void* mi_zalloc(size_t size) mi_attr_noexcept {
159   return mi_heap_zalloc(mi_get_default_heap(),size);
160 }
161 
162 
163 // ------------------------------------------------------
164 // Check for double free in secure and debug mode
165 // This is somewhat expensive so only enabled for secure mode 4
166 // ------------------------------------------------------
167 
168 #if (MI_ENCODE_FREELIST && (MI_SECURE>=4 || MI_DEBUG!=0))
169 // linear check if the free list contains a specific element
mi_list_contains(const mi_page_t * page,const mi_block_t * list,const mi_block_t * elem)170 static bool mi_list_contains(const mi_page_t* page, const mi_block_t* list, const mi_block_t* elem) {
171   while (list != NULL) {
172     if (elem==list) return true;
173     list = mi_block_next(page, list);
174   }
175   return false;
176 }
177 
mi_check_is_double_freex(const mi_page_t * page,const mi_block_t * block)178 static mi_decl_noinline bool mi_check_is_double_freex(const mi_page_t* page, const mi_block_t* block) {
179   // The decoded value is in the same page (or NULL).
180   // Walk the free lists to verify positively if it is already freed
181   if (mi_list_contains(page, page->free, block) ||
182       mi_list_contains(page, page->local_free, block) ||
183       mi_list_contains(page, mi_page_thread_free(page), block))
184   {
185     _mi_error_message(EAGAIN, "double free detected of block %p with size %zu\n", block, mi_page_block_size(page));
186     return true;
187   }
188   return false;
189 }
190 
mi_check_is_double_free(const mi_page_t * page,const mi_block_t * block)191 static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) {
192   mi_block_t* n = mi_block_nextx(page, block, page->keys); // pretend it is freed, and get the decoded first field
193   if (((uintptr_t)n & (MI_INTPTR_SIZE-1))==0 &&  // quick check: aligned pointer?
194       (n==NULL || mi_is_in_same_page(block, n))) // quick check: in same page or NULL?
195   {
196     // Suspicous: decoded value a in block is in the same page (or NULL) -- maybe a double free?
197     // (continue in separate function to improve code generation)
198     return mi_check_is_double_freex(page, block);
199   }
200   return false;
201 }
202 #else
mi_check_is_double_free(const mi_page_t * page,const mi_block_t * block)203 static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) {
204   UNUSED(page);
205   UNUSED(block);
206   return false;
207 }
208 #endif
209 
210 // ---------------------------------------------------------------------------
211 // Check for heap block overflow by setting up padding at the end of the block
212 // ---------------------------------------------------------------------------
213 
214 #if (MI_PADDING>0) && defined(MI_ENCODE_FREELIST)
mi_page_decode_padding(const mi_page_t * page,const mi_block_t * block,size_t * delta,size_t * bsize)215 static bool mi_page_decode_padding(const mi_page_t* page, const mi_block_t* block, size_t* delta, size_t* bsize) {
216   *bsize = mi_page_usable_block_size(page);
217   const mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + *bsize);
218   *delta = padding->delta;
219   return ((uint32_t)mi_ptr_encode(page,block,page->keys) == padding->canary && *delta <= *bsize);
220 }
221 
222 // Return the exact usable size of a block.
mi_page_usable_size_of(const mi_page_t * page,const mi_block_t * block)223 static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) {
224   size_t bsize;
225   size_t delta;
226   bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
227   mi_assert_internal(ok); mi_assert_internal(delta <= bsize);
228   return (ok ? bsize - delta : 0);
229 }
230 
mi_verify_padding(const mi_page_t * page,const mi_block_t * block,size_t * size,size_t * wrong)231 static bool mi_verify_padding(const mi_page_t* page, const mi_block_t* block, size_t* size, size_t* wrong) {
232   size_t bsize;
233   size_t delta;
234   bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
235   *size = *wrong = bsize;
236   if (!ok) return false;
237   mi_assert_internal(bsize >= delta);
238   *size = bsize - delta;
239   uint8_t* fill = (uint8_t*)block + bsize - delta;
240   const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // check at most the first N padding bytes
241   for (size_t i = 0; i < maxpad; i++) {
242     if (fill[i] != MI_DEBUG_PADDING) {
243       *wrong = bsize - delta + i;
244       return false;
245     }
246   }
247   return true;
248 }
249 
mi_check_padding(const mi_page_t * page,const mi_block_t * block)250 static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
251   size_t size;
252   size_t wrong;
253   if (!mi_verify_padding(page,block,&size,&wrong)) {
254     _mi_error_message(EFAULT, "buffer overflow in heap block %p of size %zu: write after %zu bytes\n", block, size, wrong );
255   }
256 }
257 
258 // When a non-thread-local block is freed, it becomes part of the thread delayed free
259 // list that is freed later by the owning heap. If the exact usable size is too small to
260 // contain the pointer for the delayed list, then shrink the padding (by decreasing delta)
261 // so it will later not trigger an overflow error in `mi_free_block`.
mi_padding_shrink(const mi_page_t * page,const mi_block_t * block,const size_t min_size)262 static void mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) {
263   size_t bsize;
264   size_t delta;
265   bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
266   mi_assert_internal(ok);
267   if (!ok || (bsize - delta) >= min_size) return;  // usually already enough space
268   mi_assert_internal(bsize >= min_size);
269   if (bsize < min_size) return;  // should never happen
270   size_t new_delta = (bsize - min_size);
271   mi_assert_internal(new_delta < bsize);
272   mi_padding_t* padding = (mi_padding_t*)((uint8_t*)block + bsize);
273   padding->delta = (uint32_t)new_delta;
274 }
275 #else
mi_check_padding(const mi_page_t * page,const mi_block_t * block)276 static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
277   UNUSED(page);
278   UNUSED(block);
279 }
280 
mi_page_usable_size_of(const mi_page_t * page,const mi_block_t * block)281 static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) {
282   UNUSED(block);
283   return mi_page_usable_block_size(page);
284 }
285 
mi_padding_shrink(const mi_page_t * page,const mi_block_t * block,const size_t min_size)286 static void mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) {
287   UNUSED(page);
288   UNUSED(block);
289   UNUSED(min_size);
290 }
291 #endif
292 
293 // only maintain stats for smaller objects if requested
294 #if (MI_STAT>0)
mi_stat_free(const mi_page_t * page,const mi_block_t * block)295 static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
296 #if (MI_STAT < 2)
297   UNUSED(block);
298 #endif
299   mi_heap_t* const heap = mi_heap_get_default();
300   const size_t bsize = mi_page_usable_block_size(page);
301 #if (MI_STAT>1)
302   const size_t usize = mi_page_usable_size_of(page, block);
303   mi_heap_stat_decrease(heap, malloc, usize);
304 #endif
305   if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
306     mi_heap_stat_decrease(heap, normal, bsize);
307 #if (MI_STAT > 1)
308     mi_heap_stat_decrease(heap, normal_bins[_mi_bin(bsize)], 1);
309 #endif
310   }
311 }
312 #else
mi_stat_free(const mi_page_t * page,const mi_block_t * block)313 static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
314   UNUSED(page); UNUSED(block);
315 }
316 #endif
317 
318 #if (MI_STAT>0)
319 // maintain stats for huge objects
mi_stat_huge_free(const mi_page_t * page)320 static void mi_stat_huge_free(const mi_page_t* page) {
321   mi_heap_t* const heap = mi_heap_get_default();
322   const size_t bsize = mi_page_block_size(page); // to match stats in `page.c:mi_page_huge_alloc`
323   if (bsize <= MI_HUGE_OBJ_SIZE_MAX) {
324     mi_heap_stat_decrease(heap, huge, bsize);
325   }
326   else {
327     mi_heap_stat_decrease(heap, giant, bsize);
328   }
329 }
330 #else
mi_stat_huge_free(const mi_page_t * page)331 static void mi_stat_huge_free(const mi_page_t* page) {
332   UNUSED(page);
333 }
334 #endif
335 
336 // ------------------------------------------------------
337 // Free
338 // ------------------------------------------------------
339 
340 // multi-threaded free
_mi_free_block_mt(mi_page_t * page,mi_block_t * block)341 static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* block)
342 {
343   // The padding check may access the non-thread-owned page for the key values.
344   // that is safe as these are constant and the page won't be freed (as the block is not freed yet).
345   mi_check_padding(page, block);
346   mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection
347   #if (MI_DEBUG!=0)
348   memset(block, MI_DEBUG_FREED, mi_usable_size(block));
349   #endif
350 
351   // huge page segments are always abandoned and can be freed immediately
352   mi_segment_t* const segment = _mi_page_segment(page);
353   if (segment->page_kind==MI_PAGE_HUGE) {
354     mi_stat_huge_free(page);
355     _mi_segment_huge_page_free(segment, page, block);
356     return;
357   }
358 
359   // Try to put the block on either the page-local thread free list, or the heap delayed free list.
360   mi_thread_free_t tfreex;
361   bool use_delayed;
362   mi_thread_free_t tfree = mi_atomic_load_relaxed(&page->xthread_free);
363   do {
364     use_delayed = (mi_tf_delayed(tfree) == MI_USE_DELAYED_FREE);
365     if (mi_unlikely(use_delayed)) {
366       // unlikely: this only happens on the first concurrent free in a page that is in the full list
367       tfreex = mi_tf_set_delayed(tfree,MI_DELAYED_FREEING);
368     }
369     else {
370       // usual: directly add to page thread_free list
371       mi_block_set_next(page, block, mi_tf_block(tfree));
372       tfreex = mi_tf_set_block(tfree,block);
373     }
374   } while (!mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex));
375 
376   if (mi_unlikely(use_delayed)) {
377     // racy read on `heap`, but ok because MI_DELAYED_FREEING is set (see `mi_heap_delete` and `mi_heap_collect_abandon`)
378     mi_heap_t* const heap = (mi_heap_t*)(mi_atomic_load_acquire(&page->xheap)); //mi_page_heap(page);
379     mi_assert_internal(heap != NULL);
380     if (heap != NULL) {
381       // add to the delayed free list of this heap. (do this atomically as the lock only protects heap memory validity)
382       mi_block_t* dfree = mi_atomic_load_ptr_relaxed(mi_block_t, &heap->thread_delayed_free);
383       do {
384         mi_block_set_nextx(heap,block,dfree, heap->keys);
385       } while (!mi_atomic_cas_ptr_weak_release(mi_block_t,&heap->thread_delayed_free, &dfree, block));
386     }
387 
388     // and reset the MI_DELAYED_FREEING flag
389     tfree = mi_atomic_load_relaxed(&page->xthread_free);
390     do {
391       tfreex = tfree;
392       mi_assert_internal(mi_tf_delayed(tfree) == MI_DELAYED_FREEING);
393       tfreex = mi_tf_set_delayed(tfree,MI_NO_DELAYED_FREE);
394     } while (!mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex));
395   }
396 }
397 
398 // regular free
_mi_free_block(mi_page_t * page,bool local,mi_block_t * block)399 static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block)
400 {
401   // and push it on the free list
402   if (mi_likely(local)) {
403     // owning thread can free a block directly
404     if (mi_unlikely(mi_check_is_double_free(page, block))) return;
405     mi_check_padding(page, block);
406     #if (MI_DEBUG!=0)
407     memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
408     #endif
409     mi_block_set_next(page, block, page->local_free);
410     page->local_free = block;
411     page->used--;
412     if (mi_unlikely(mi_page_all_free(page))) {
413       _mi_page_retire(page);
414     }
415     else if (mi_unlikely(mi_page_is_in_full(page))) {
416       _mi_page_unfull(page);
417     }
418   }
419   else {
420     _mi_free_block_mt(page,block);
421   }
422 }
423 
424 
425 // Adjust a block that was allocated aligned, to the actual start of the block in the page.
_mi_page_ptr_unalign(const mi_segment_t * segment,const mi_page_t * page,const void * p)426 mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p) {
427   mi_assert_internal(page!=NULL && p!=NULL);
428   const size_t diff   = (uint8_t*)p - _mi_page_start(segment, page, NULL);
429   const size_t adjust = (diff % mi_page_block_size(page));
430   return (mi_block_t*)((uintptr_t)p - adjust);
431 }
432 
433 
mi_free_generic(const mi_segment_t * segment,bool local,void * p)434 static void mi_decl_noinline mi_free_generic(const mi_segment_t* segment, bool local, void* p) {
435   mi_page_t* const page = _mi_segment_page_of(segment, p);
436   mi_block_t* const block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(segment, page, p) : (mi_block_t*)p);
437   mi_stat_free(page, block);
438   _mi_free_block(page, local, block);
439 }
440 
441 // Get the segment data belonging to a pointer
442 // This is just a single `and` in assembly but does further checks in debug mode
443 // (and secure mode) if this was a valid pointer.
mi_checked_ptr_segment(const void * p,const char * msg)444 static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* msg)
445 {
446   UNUSED(msg);
447 #if (MI_DEBUG>0)
448   if (mi_unlikely(((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0)) {
449     _mi_error_message(EINVAL, "%s: invalid (unaligned) pointer: %p\n", msg, p);
450     return NULL;
451   }
452 #endif
453 
454   mi_segment_t* const segment = _mi_ptr_segment(p);
455   if (mi_unlikely(segment == NULL)) return NULL;  // checks also for (p==NULL)
456 
457 #if (MI_DEBUG>0)
458   if (mi_unlikely(!mi_is_in_heap_region(p))) {
459     _mi_warning_message("%s: pointer might not point to a valid heap region: %p\n"
460       "(this may still be a valid very large allocation (over 64MiB))\n", msg, p);
461     if (mi_likely(_mi_ptr_cookie(segment) == segment->cookie)) {
462       _mi_warning_message("(yes, the previous pointer %p was valid after all)\n", p);
463     }
464   }
465 #endif
466 #if (MI_DEBUG>0 || MI_SECURE>=4)
467   if (mi_unlikely(_mi_ptr_cookie(segment) != segment->cookie)) {
468     _mi_error_message(EINVAL, "%s: pointer does not point to a valid heap space: %p\n", p);
469   }
470 #endif
471   return segment;
472 }
473 
474 
475 // Free a block
mi_free(void * p)476 void mi_free(void* p) mi_attr_noexcept
477 {
478   const mi_segment_t* const segment = mi_checked_ptr_segment(p,"mi_free");
479   if (mi_unlikely(segment == NULL)) return;
480 
481   const uintptr_t tid = _mi_thread_id();
482   mi_page_t* const page = _mi_segment_page_of(segment, p);
483   mi_block_t* const block = (mi_block_t*)p;
484 
485   if (mi_likely(tid == segment->thread_id && page->flags.full_aligned == 0)) {  // the thread id matches and it is not a full page, nor has aligned blocks
486     // local, and not full or aligned
487     if (mi_unlikely(mi_check_is_double_free(page,block))) return;
488     mi_check_padding(page, block);
489     mi_stat_free(page, block);
490     #if (MI_DEBUG!=0)
491     memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
492     #endif
493     mi_block_set_next(page, block, page->local_free);
494     page->local_free = block;
495     if (mi_unlikely(--page->used == 0)) {   // using this expression generates better code than: page->used--; if (mi_page_all_free(page))
496       _mi_page_retire(page);
497     }
498   }
499   else {
500     // non-local, aligned blocks, or a full page; use the more generic path
501     // note: recalc page in generic to improve code generation
502     mi_free_generic(segment, tid == segment->thread_id, p);
503   }
504 }
505 
_mi_free_delayed_block(mi_block_t * block)506 bool _mi_free_delayed_block(mi_block_t* block) {
507   // get segment and page
508   const mi_segment_t* const segment = _mi_ptr_segment(block);
509   mi_assert_internal(_mi_ptr_cookie(segment) == segment->cookie);
510   mi_assert_internal(_mi_thread_id() == segment->thread_id);
511   mi_page_t* const page = _mi_segment_page_of(segment, block);
512 
513   // Clear the no-delayed flag so delayed freeing is used again for this page.
514   // This must be done before collecting the free lists on this page -- otherwise
515   // some blocks may end up in the page `thread_free` list with no blocks in the
516   // heap `thread_delayed_free` list which may cause the page to be never freed!
517   // (it would only be freed if we happen to scan it in `mi_page_queue_find_free_ex`)
518   _mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, false /* dont overwrite never delayed */);
519 
520   // collect all other non-local frees to ensure up-to-date `used` count
521   _mi_page_free_collect(page, false);
522 
523   // and free the block (possibly freeing the page as well since used is updated)
524   _mi_free_block(page, true, block);
525   return true;
526 }
527 
528 // Bytes available in a block
_mi_usable_size(const void * p,const char * msg)529 static size_t _mi_usable_size(const void* p, const char* msg) mi_attr_noexcept {
530   const mi_segment_t* const segment = mi_checked_ptr_segment(p,msg);
531   if (segment==NULL) return 0;
532   const mi_page_t* const page = _mi_segment_page_of(segment, p);
533   const mi_block_t* block = (const mi_block_t*)p;
534   if (mi_unlikely(mi_page_has_aligned(page))) {
535     block = _mi_page_ptr_unalign(segment, page, p);
536     size_t size = mi_page_usable_size_of(page, block);
537     ptrdiff_t const adjust = (uint8_t*)p - (uint8_t*)block;
538     mi_assert_internal(adjust >= 0 && (size_t)adjust <= size);
539     return (size - adjust);
540   }
541   else {
542     return mi_page_usable_size_of(page, block);
543   }
544 }
545 
mi_usable_size(const void * p)546 size_t mi_usable_size(const void* p) mi_attr_noexcept {
547   return _mi_usable_size(p, "mi_usable_size");
548 }
549 
550 
551 // ------------------------------------------------------
552 // ensure explicit external inline definitions are emitted!
553 // ------------------------------------------------------
554 
555 #ifdef __cplusplus
556 void* _mi_externs[] = {
557   (void*)&_mi_page_malloc,
558   (void*)&mi_malloc,
559   (void*)&mi_malloc_small,
560   (void*)&mi_zalloc_small,
561   (void*)&mi_heap_malloc,
562   (void*)&mi_heap_zalloc,
563   (void*)&mi_heap_malloc_small
564 };
565 #endif
566 
567 
568 // ------------------------------------------------------
569 // Allocation extensions
570 // ------------------------------------------------------
571 
mi_free_size(void * p,size_t size)572 void mi_free_size(void* p, size_t size) mi_attr_noexcept {
573   UNUSED_RELEASE(size);
574   mi_assert(p == NULL || size <= _mi_usable_size(p,"mi_free_size"));
575   mi_free(p);
576 }
577 
mi_free_size_aligned(void * p,size_t size,size_t alignment)578 void mi_free_size_aligned(void* p, size_t size, size_t alignment) mi_attr_noexcept {
579   UNUSED_RELEASE(alignment);
580   mi_assert(((uintptr_t)p % alignment) == 0);
581   mi_free_size(p,size);
582 }
583 
mi_free_aligned(void * p,size_t alignment)584 void mi_free_aligned(void* p, size_t alignment) mi_attr_noexcept {
585   UNUSED_RELEASE(alignment);
586   mi_assert(((uintptr_t)p % alignment) == 0);
587   mi_free(p);
588 }
589 
mi_heap_calloc(mi_heap_t * heap,size_t count,size_t size)590 extern inline mi_decl_restrict void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept {
591   size_t total;
592   if (mi_count_size_overflow(count,size,&total)) return NULL;
593   return mi_heap_zalloc(heap,total);
594 }
595 
mi_calloc(size_t count,size_t size)596 mi_decl_restrict void* mi_calloc(size_t count, size_t size) mi_attr_noexcept {
597   return mi_heap_calloc(mi_get_default_heap(),count,size);
598 }
599 
600 // Uninitialized `calloc`
mi_heap_mallocn(mi_heap_t * heap,size_t count,size_t size)601 extern mi_decl_restrict void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept {
602   size_t total;
603   if (mi_count_size_overflow(count, size, &total)) return NULL;
604   return mi_heap_malloc(heap, total);
605 }
606 
mi_mallocn(size_t count,size_t size)607 mi_decl_restrict void* mi_mallocn(size_t count, size_t size) mi_attr_noexcept {
608   return mi_heap_mallocn(mi_get_default_heap(),count,size);
609 }
610 
611 // Expand in place or fail
mi_expand(void * p,size_t newsize)612 void* mi_expand(void* p, size_t newsize) mi_attr_noexcept {
613   if (p == NULL) return NULL;
614   size_t size = _mi_usable_size(p,"mi_expand");
615   if (newsize > size) return NULL;
616   return p; // it fits
617 }
618 
_mi_heap_realloc_zero(mi_heap_t * heap,void * p,size_t newsize,bool zero)619 void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) {
620   if (p == NULL) return _mi_heap_malloc_zero(heap,newsize,zero);
621   size_t size = _mi_usable_size(p,"mi_realloc");
622   if (newsize <= size && newsize >= (size / 2)) {
623     return p;  // reallocation still fits and not more than 50% waste
624   }
625   void* newp = mi_heap_malloc(heap,newsize);
626   if (mi_likely(newp != NULL)) {
627     if (zero && newsize > size) {
628       // also set last word in the previous allocation to zero to ensure any padding is zero-initialized
629       size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0);
630       memset((uint8_t*)newp + start, 0, newsize - start);
631     }
632     _mi_memcpy_aligned(newp, p, (newsize > size ? size : newsize));
633     mi_free(p); // only free if successful
634   }
635   return newp;
636 }
637 
mi_heap_realloc(mi_heap_t * heap,void * p,size_t newsize)638 void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
639   return _mi_heap_realloc_zero(heap, p, newsize, false);
640 }
641 
mi_heap_reallocn(mi_heap_t * heap,void * p,size_t count,size_t size)642 void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {
643   size_t total;
644   if (mi_count_size_overflow(count, size, &total)) return NULL;
645   return mi_heap_realloc(heap, p, total);
646 }
647 
648 
649 // Reallocate but free `p` on errors
mi_heap_reallocf(mi_heap_t * heap,void * p,size_t newsize)650 void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
651   void* newp = mi_heap_realloc(heap, p, newsize);
652   if (newp==NULL && p!=NULL) mi_free(p);
653   return newp;
654 }
655 
mi_heap_rezalloc(mi_heap_t * heap,void * p,size_t newsize)656 void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
657   return _mi_heap_realloc_zero(heap, p, newsize, true);
658 }
659 
mi_heap_recalloc(mi_heap_t * heap,void * p,size_t count,size_t size)660 void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {
661   size_t total;
662   if (mi_count_size_overflow(count, size, &total)) return NULL;
663   return mi_heap_rezalloc(heap, p, total);
664 }
665 
666 
mi_realloc(void * p,size_t newsize)667 void* mi_realloc(void* p, size_t newsize) mi_attr_noexcept {
668   return mi_heap_realloc(mi_get_default_heap(),p,newsize);
669 }
670 
mi_reallocn(void * p,size_t count,size_t size)671 void* mi_reallocn(void* p, size_t count, size_t size) mi_attr_noexcept {
672   return mi_heap_reallocn(mi_get_default_heap(),p,count,size);
673 }
674 
675 // Reallocate but free `p` on errors
mi_reallocf(void * p,size_t newsize)676 void* mi_reallocf(void* p, size_t newsize) mi_attr_noexcept {
677   return mi_heap_reallocf(mi_get_default_heap(),p,newsize);
678 }
679 
mi_rezalloc(void * p,size_t newsize)680 void* mi_rezalloc(void* p, size_t newsize) mi_attr_noexcept {
681   return mi_heap_rezalloc(mi_get_default_heap(), p, newsize);
682 }
683 
mi_recalloc(void * p,size_t count,size_t size)684 void* mi_recalloc(void* p, size_t count, size_t size) mi_attr_noexcept {
685   return mi_heap_recalloc(mi_get_default_heap(), p, count, size);
686 }
687 
688 
689 
690 // ------------------------------------------------------
691 // strdup, strndup, and realpath
692 // ------------------------------------------------------
693 
694 // `strdup` using mi_malloc
mi_heap_strdup(mi_heap_t * heap,const char * s)695 mi_decl_restrict char* mi_heap_strdup(mi_heap_t* heap, const char* s) mi_attr_noexcept {
696   if (s == NULL) return NULL;
697   size_t n = strlen(s);
698   char* t = (char*)mi_heap_malloc(heap,n+1);
699   if (t != NULL) _mi_memcpy(t, s, n + 1);
700   return t;
701 }
702 
mi_strdup(const char * s)703 mi_decl_restrict char* mi_strdup(const char* s) mi_attr_noexcept {
704   return mi_heap_strdup(mi_get_default_heap(), s);
705 }
706 
707 // `strndup` using mi_malloc
mi_heap_strndup(mi_heap_t * heap,const char * s,size_t n)708 mi_decl_restrict char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n) mi_attr_noexcept {
709   if (s == NULL) return NULL;
710   const char* end = (const char*)memchr(s, 0, n);  // find end of string in the first `n` characters (returns NULL if not found)
711   const size_t m = (end != NULL ? (size_t)(end - s) : n);  // `m` is the minimum of `n` or the end-of-string
712   mi_assert_internal(m <= n);
713   char* t = (char*)mi_heap_malloc(heap, m+1);
714   if (t == NULL) return NULL;
715   _mi_memcpy(t, s, m);
716   t[m] = 0;
717   return t;
718 }
719 
mi_strndup(const char * s,size_t n)720 mi_decl_restrict char* mi_strndup(const char* s, size_t n) mi_attr_noexcept {
721   return mi_heap_strndup(mi_get_default_heap(),s,n);
722 }
723 
724 #ifndef __wasi__
725 // `realpath` using mi_malloc
726 #ifdef _WIN32
727 #ifndef PATH_MAX
728 #define PATH_MAX MAX_PATH
729 #endif
730 #include <windows.h>
mi_heap_realpath(mi_heap_t * heap,const char * fname,char * resolved_name)731 mi_decl_restrict char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept {
732   // todo: use GetFullPathNameW to allow longer file names
733   char buf[PATH_MAX];
734   DWORD res = GetFullPathNameA(fname, PATH_MAX, (resolved_name == NULL ? buf : resolved_name), NULL);
735   if (res == 0) {
736     errno = GetLastError(); return NULL;
737   }
738   else if (res > PATH_MAX) {
739     errno = EINVAL; return NULL;
740   }
741   else if (resolved_name != NULL) {
742     return resolved_name;
743   }
744   else {
745     return mi_heap_strndup(heap, buf, PATH_MAX);
746   }
747 }
748 #else
749 #include <unistd.h>  // pathconf
mi_path_max()750 static size_t mi_path_max() {
751   static size_t path_max = 0;
752   if (path_max <= 0) {
753     long m = pathconf("/",_PC_PATH_MAX);
754     if (m <= 0) path_max = 4096;      // guess
755     else if (m < 256) path_max = 256; // at least 256
756     else path_max = m;
757   }
758   return path_max;
759 }
760 
mi_heap_realpath(mi_heap_t * heap,const char * fname,char * resolved_name)761 char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept {
762   if (resolved_name != NULL) {
763     return realpath(fname,resolved_name);
764   }
765   else {
766     size_t n  = mi_path_max();
767     char* buf = (char*)mi_malloc(n+1);
768     if (buf==NULL) return NULL;
769     char* rname  = realpath(fname,buf);
770     char* result = mi_heap_strndup(heap,rname,n); // ok if `rname==NULL`
771     mi_free(buf);
772     return result;
773   }
774 }
775 #endif
776 
mi_realpath(const char * fname,char * resolved_name)777 mi_decl_restrict char* mi_realpath(const char* fname, char* resolved_name) mi_attr_noexcept {
778   return mi_heap_realpath(mi_get_default_heap(),fname,resolved_name);
779 }
780 #endif
781 
782 /*-------------------------------------------------------
783 C++ new and new_aligned
784 The standard requires calling into `get_new_handler` and
785 throwing the bad_alloc exception on failure. If we compile
786 with a C++ compiler we can implement this precisely. If we
787 use a C compiler we cannot throw a `bad_alloc` exception
788 but we call `exit` instead (i.e. not returning).
789 -------------------------------------------------------*/
790 
791 #ifdef __cplusplus
792 #include <new>
mi_try_new_handler(bool nothrow)793 static bool mi_try_new_handler(bool nothrow) {
794   #if defined(_MSC_VER) || (__cplusplus >= 201103L)
795     std::new_handler h = std::get_new_handler();
796   #else
797     std::new_handler h = std::set_new_handler();
798     std::set_new_handler(h);
799   #endif
800   if (h==NULL) {
801     if (!nothrow) throw std::bad_alloc();
802     return false;
803   }
804   else {
805     h();
806     return true;
807   }
808 }
809 #else
810 typedef void (*std_new_handler_t)();
811 
812 #if (defined(__GNUC__) || defined(__clang__))
std_new_handler_t(weak)813 std_new_handler_t __attribute((weak)) _ZSt15get_new_handlerv() {
814   return NULL;
815 }
mi_get_new_handler()816 static std_new_handler_t mi_get_new_handler() {
817   return _ZSt15get_new_handlerv();
818 }
819 #else
820 // note: on windows we could dynamically link to `?get_new_handler@std@@YAP6AXXZXZ`.
mi_get_new_handler()821 static std_new_handler_t mi_get_new_handler() {
822   return NULL;
823 }
824 #endif
825 
mi_try_new_handler(bool nothrow)826 static bool mi_try_new_handler(bool nothrow) {
827   std_new_handler_t h = mi_get_new_handler();
828   if (h==NULL) {
829     if (!nothrow) exit(ENOMEM);  // cannot throw in plain C, use exit as we are out of memory anyway.
830     return false;
831   }
832   else {
833     h();
834     return true;
835   }
836 }
837 #endif
838 
mi_try_new(size_t size,bool nothrow)839 static mi_decl_noinline void* mi_try_new(size_t size, bool nothrow ) {
840   void* p = NULL;
841   while(p == NULL && mi_try_new_handler(nothrow)) {
842     p = mi_malloc(size);
843   }
844   return p;
845 }
846 
mi_new(size_t size)847 mi_decl_restrict void* mi_new(size_t size) {
848   void* p = mi_malloc(size);
849   if (mi_unlikely(p == NULL)) return mi_try_new(size,false);
850   return p;
851 }
852 
mi_new_nothrow(size_t size)853 mi_decl_restrict void* mi_new_nothrow(size_t size) mi_attr_noexcept {
854   void* p = mi_malloc(size);
855   if (mi_unlikely(p == NULL)) return mi_try_new(size, true);
856   return p;
857 }
858 
mi_new_aligned(size_t size,size_t alignment)859 mi_decl_restrict void* mi_new_aligned(size_t size, size_t alignment) {
860   void* p;
861   do {
862     p = mi_malloc_aligned(size, alignment);
863   }
864   while(p == NULL && mi_try_new_handler(false));
865   return p;
866 }
867 
mi_new_aligned_nothrow(size_t size,size_t alignment)868 mi_decl_restrict void* mi_new_aligned_nothrow(size_t size, size_t alignment) mi_attr_noexcept {
869   void* p;
870   do {
871     p = mi_malloc_aligned(size, alignment);
872   }
873   while(p == NULL && mi_try_new_handler(true));
874   return p;
875 }
876 
mi_new_n(size_t count,size_t size)877 mi_decl_restrict void* mi_new_n(size_t count, size_t size) {
878   size_t total;
879   if (mi_unlikely(mi_count_size_overflow(count, size, &total))) {
880     mi_try_new_handler(false);  // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc
881     return NULL;
882   }
883   else {
884     return mi_new(total);
885   }
886 }
887 
mi_new_realloc(void * p,size_t newsize)888 void* mi_new_realloc(void* p, size_t newsize) {
889   void* q;
890   do {
891     q = mi_realloc(p, newsize);
892   } while (q == NULL && mi_try_new_handler(false));
893   return q;
894 }
895 
mi_new_reallocn(void * p,size_t newcount,size_t size)896 void* mi_new_reallocn(void* p, size_t newcount, size_t size) {
897   size_t total;
898   if (mi_unlikely(mi_count_size_overflow(newcount, size, &total))) {
899     mi_try_new_handler(false);  // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc
900     return NULL;
901   }
902   else {
903     return mi_new_realloc(p, total);
904   }
905 }
906