1 //===------------------------ fallback_malloc.cpp -------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "fallback_malloc.h"
10
11 #include <__threading_support>
12 #ifndef _LIBCXXABI_HAS_NO_THREADS
13 #if defined(__ELF__) && defined(_LIBCXXABI_LINK_PTHREAD_LIB)
14 #pragma comment(lib, "pthread")
15 #endif
16 #endif
17
18 #include <stdlib.h> // for malloc, calloc, free
19 #include <string.h> // for memset
20 #include <new> // for std::__libcpp_aligned_{alloc,free}
21
22 // A small, simple heap manager based (loosely) on
23 // the startup heap manager from FreeBSD, optimized for space.
24 //
25 // Manages a fixed-size memory pool, supports malloc and free only.
26 // No support for realloc.
27 //
28 // Allocates chunks in multiples of four bytes, with a four byte header
29 // for each chunk. The overhead of each chunk is kept low by keeping pointers
30 // as two byte offsets within the heap, rather than (4 or 8 byte) pointers.
31
32 namespace {
33
34 // When POSIX threads are not available, make the mutex operations a nop
35 #ifndef _LIBCXXABI_HAS_NO_THREADS
36 _LIBCPP_SAFE_STATIC
37 static std::__libcpp_mutex_t heap_mutex = _LIBCPP_MUTEX_INITIALIZER;
38 #else
39 static void* heap_mutex = 0;
40 #endif
41
42 class mutexor {
43 public:
44 #ifndef _LIBCXXABI_HAS_NO_THREADS
mutexor(std::__libcpp_mutex_t * m)45 mutexor(std::__libcpp_mutex_t* m) : mtx_(m) {
46 std::__libcpp_mutex_lock(mtx_);
47 }
~mutexor()48 ~mutexor() { std::__libcpp_mutex_unlock(mtx_); }
49 #else
50 mutexor(void*) {}
51 ~mutexor() {}
52 #endif
53 private:
54 mutexor(const mutexor& rhs);
55 mutexor& operator=(const mutexor& rhs);
56 #ifndef _LIBCXXABI_HAS_NO_THREADS
57 std::__libcpp_mutex_t* mtx_;
58 #endif
59 };
60
61 static const size_t HEAP_SIZE = 512;
62 char heap[HEAP_SIZE] __attribute__((aligned));
63
64 typedef unsigned short heap_offset;
65 typedef unsigned short heap_size;
66
67 struct heap_node {
68 heap_offset next_node; // offset into heap
69 heap_size len; // size in units of "sizeof(heap_node)"
70 };
71
72 static const heap_node* list_end =
73 (heap_node*)(&heap[HEAP_SIZE]); // one past the end of the heap
74 static heap_node* freelist = NULL;
75
node_from_offset(const heap_offset offset)76 heap_node* node_from_offset(const heap_offset offset) {
77 return (heap_node*)(heap + (offset * sizeof(heap_node)));
78 }
79
offset_from_node(const heap_node * ptr)80 heap_offset offset_from_node(const heap_node* ptr) {
81 return static_cast<heap_offset>(
82 static_cast<size_t>(reinterpret_cast<const char*>(ptr) - heap) /
83 sizeof(heap_node));
84 }
85
init_heap()86 void init_heap() {
87 freelist = (heap_node*)heap;
88 freelist->next_node = offset_from_node(list_end);
89 freelist->len = HEAP_SIZE / sizeof(heap_node);
90 }
91
92 // How big a chunk we allocate
alloc_size(size_t len)93 size_t alloc_size(size_t len) {
94 return (len + sizeof(heap_node) - 1) / sizeof(heap_node) + 1;
95 }
96
is_fallback_ptr(void * ptr)97 bool is_fallback_ptr(void* ptr) {
98 return ptr >= heap && ptr < (heap + HEAP_SIZE);
99 }
100
fallback_malloc(size_t len)101 void* fallback_malloc(size_t len) {
102 heap_node *p, *prev;
103 const size_t nelems = alloc_size(len);
104 mutexor mtx(&heap_mutex);
105
106 if (NULL == freelist)
107 init_heap();
108
109 // Walk the free list, looking for a "big enough" chunk
110 for (p = freelist, prev = 0; p && p != list_end;
111 prev = p, p = node_from_offset(p->next_node)) {
112
113 if (p->len > nelems) { // chunk is larger, shorten, and return the tail
114 heap_node* q;
115
116 p->len = static_cast<heap_size>(p->len - nelems);
117 q = p + p->len;
118 q->next_node = 0;
119 q->len = static_cast<heap_size>(nelems);
120 return (void*)(q + 1);
121 }
122
123 if (p->len == nelems) { // exact size match
124 if (prev == 0)
125 freelist = node_from_offset(p->next_node);
126 else
127 prev->next_node = p->next_node;
128 p->next_node = 0;
129 return (void*)(p + 1);
130 }
131 }
132 return NULL; // couldn't find a spot big enough
133 }
134
135 // Return the start of the next block
after(struct heap_node * p)136 heap_node* after(struct heap_node* p) { return p + p->len; }
137
fallback_free(void * ptr)138 void fallback_free(void* ptr) {
139 struct heap_node* cp = ((struct heap_node*)ptr) - 1; // retrieve the chunk
140 struct heap_node *p, *prev;
141
142 mutexor mtx(&heap_mutex);
143
144 #ifdef DEBUG_FALLBACK_MALLOC
145 std::printf("Freeing item at %d of size %d\n", offset_from_node(cp), cp->len);
146 #endif
147
148 for (p = freelist, prev = 0; p && p != list_end;
149 prev = p, p = node_from_offset(p->next_node)) {
150 #ifdef DEBUG_FALLBACK_MALLOC
151 std::printf(" p=%d, cp=%d, after(p)=%d, after(cp)=%d\n",
152 offset_from_node(p), offset_from_node(cp),
153 offset_from_node(after(p)), offset_from_node(after(cp)));
154 #endif
155 if (after(p) == cp) {
156 #ifdef DEBUG_FALLBACK_MALLOC
157 std::printf(" Appending onto chunk at %d\n", offset_from_node(p));
158 #endif
159 p->len = static_cast<heap_size>(
160 p->len + cp->len); // make the free heap_node larger
161 return;
162 } else if (after(cp) == p) { // there's a free heap_node right after
163 #ifdef DEBUG_FALLBACK_MALLOC
164 std::printf(" Appending free chunk at %d\n", offset_from_node(p));
165 #endif
166 cp->len = static_cast<heap_size>(cp->len + p->len);
167 if (prev == 0) {
168 freelist = cp;
169 cp->next_node = p->next_node;
170 } else
171 prev->next_node = offset_from_node(cp);
172 return;
173 }
174 }
175 // Nothing to merge with, add it to the start of the free list
176 #ifdef DEBUG_FALLBACK_MALLOC
177 std::printf(" Making new free list entry %d\n", offset_from_node(cp));
178 #endif
179 cp->next_node = offset_from_node(freelist);
180 freelist = cp;
181 }
182
183 #ifdef INSTRUMENT_FALLBACK_MALLOC
print_free_list()184 size_t print_free_list() {
185 struct heap_node *p, *prev;
186 heap_size total_free = 0;
187 if (NULL == freelist)
188 init_heap();
189
190 for (p = freelist, prev = 0; p && p != list_end;
191 prev = p, p = node_from_offset(p->next_node)) {
192 std::printf("%sOffset: %d\tsize: %d Next: %d\n",
193 (prev == 0 ? "" : " "), offset_from_node(p), p->len, p->next_node);
194 total_free += p->len;
195 }
196 std::printf("Total Free space: %d\n", total_free);
197 return total_free;
198 }
199 #endif
200 } // end unnamed namespace
201
202 namespace __cxxabiv1 {
203
204 struct __attribute__((aligned)) __aligned_type {};
205
__aligned_malloc_with_fallback(size_t size)206 void* __aligned_malloc_with_fallback(size_t size) {
207 #if defined(_WIN32)
208 if (void* dest = std::__libcpp_aligned_alloc(alignof(__aligned_type), size))
209 return dest;
210 #elif defined(_LIBCPP_HAS_NO_LIBRARY_ALIGNED_ALLOCATION)
211 if (void* dest = ::malloc(size))
212 return dest;
213 #else
214 if (size == 0)
215 size = 1;
216 if (void* dest = std::__libcpp_aligned_alloc(__alignof(__aligned_type), size))
217 return dest;
218 #endif
219 return fallback_malloc(size);
220 }
221
__calloc_with_fallback(size_t count,size_t size)222 void* __calloc_with_fallback(size_t count, size_t size) {
223 void* ptr = ::calloc(count, size);
224 if (NULL != ptr)
225 return ptr;
226 // if calloc fails, fall back to emergency stash
227 ptr = fallback_malloc(size * count);
228 if (NULL != ptr)
229 ::memset(ptr, 0, size * count);
230 return ptr;
231 }
232
__aligned_free_with_fallback(void * ptr)233 void __aligned_free_with_fallback(void* ptr) {
234 if (is_fallback_ptr(ptr))
235 fallback_free(ptr);
236 else {
237 #if defined(_LIBCPP_HAS_NO_LIBRARY_ALIGNED_ALLOCATION)
238 ::free(ptr);
239 #else
240 std::__libcpp_aligned_free(ptr);
241 #endif
242 }
243 }
244
__free_with_fallback(void * ptr)245 void __free_with_fallback(void* ptr) {
246 if (is_fallback_ptr(ptr))
247 fallback_free(ptr);
248 else
249 ::free(ptr);
250 }
251
252 } // namespace __cxxabiv1
253