1 /*------------------------------------------------------------------------------
2 *
3 * Copyright (c) 2011-2021, EURid vzw. All rights reserved.
4 * The YADIFA TM software product is provided under the BSD 3-clause license:
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * * Neither the name of EURid nor the names of its contributors may be
16 * used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 *------------------------------------------------------------------------------
32 *
33 */
34
35 /** @defgroup zalloc very fast, no-overhead specialised memory allocation functions
36 * @ingroup dnscore
37 * @brief no-overhead specialised allocation functions
38 *
39 * These memory allocations are using memory mapping to allocate blocks.
40 *
41 * One difficulty is that to free a block, its size has to be known first.
42 * Which is not an issue for most of our uses.
43 *
44 * One drawback is that once allocated, the memory is never released to the system
45 * (but is still available to be allocated again by the program)
46 *
47 * Much faster than malloc, and no overhead.
48 *
49 * Allocated memory is always aligned to at least 64 bits
50 *
51 * The granularity of the size of a block is 8 bytes
52 *
53 * The base alignment is always 4096 + real size of the block
54 *
55 * @{
56 */
57
58 #pragma once
59
60 #include <dnscore/dnscore-config-features.h>
61 #include <dnscore/thread.h>
62 #include <dnscore/config_settings.h>
63 #include <dnscore/sys_types.h>
64 #include <dnscore/debug.h>
65
66 #ifdef __cplusplus
67 extern "C" {
68 #endif
69
70 #ifndef DNSCORE_HAS_ZALLOC_SUPPORT
71 #error "DNSCORE_HAS_ZALLOC_SUPPORT should be set to 1 or 0"
72 #endif
73
74 #if !DNSCORE_HAS_ZALLOC_SUPPORT
75
76 /**
77 * Uses malloc to mimmick zalloc_unaligned. Source is in zalloc.c.
78 *
79 * @param len
80 * @param tag
81 * @return
82 */
83
84 void *malloc_string_or_die(size_t len, u64 tag);
85
86 /**
87 * Uses malloc to mimmick zfree_unaligned. Source is in zalloc.c.
88 *
89 * @param ptr
90 */
91
92 void mfree_string(void *ptr);
93
94 /* 8 bytes aligned */
95
zalloc_memory_block_size(size_t size)96 static inline size_t zalloc_memory_block_size(size_t size)
97 {
98 return size;
99 }
100
free_erases(void * ptr,size_t size)101 static inline void free_erases(void *ptr, size_t size)
102 {
103 if(ptr != NULL)
104 {
105 memset(ptr, 0xfe, size);
106 }
107 free(ptr);
108 }
109
110 #if DEBUG
111 #define ZFREE(label,object) free_erases(label, sizeof(object))
112 #else
113 #define ZFREE(label,object) free(label)
114 #endif
115
116 #define ZALLOC_ARRAY_OR_DIE(cast_,label_,size_,tag_) MALLOC_OR_DIE(cast_,label_,size_,tag_);assert((label_) != NULL)
117 #if DEBUG
118 #define ZFREE_ARRAY(ptr_,size_) free_erases((ptr_),(size_))
119 #else
120 #define ZFREE_ARRAY(ptr_,size_) free(ptr_)
121 #endif
122
123 // preferred way of allocating one instance of a type (struct, ...)
124 #define ZALLOC_OBJECT_OR_DIE(label__,object__,tag__) MALLOC_OBJECT_OR_DIE(label__, object__, tag__);assert((label__) != NULL)
125 #define ZALLOC_OBJECT_ARRAY_OR_DIE(label__,object__, count__,tag__) MALLOC_OBJECT_ARRAY_OR_DIE(label__, object__, count__, tag__);assert((label__) != NULL)
126
127 #define ZALLOC_ARRAY_RESIZE(type_,array_,count_,newcount_) \
128 { \
129 int zalloc_new_count = (newcount_); \
130 (array_) = (type_*)realloc((array_),zalloc_new_count*sizeof(type_));\
131 (count_) = zalloc_new_count; \
132 }
133
134 #define ZFREE_OBJECT(label__) free((label__))
135 #define ZFREE_OBJECT_OF_TYPE(label__,type__) free(label__)
136
137 #else
138
139 /**
140 * ZALLOC_PG_SIZE_COUNT tells how many memory sizes are supported, with 8 bytes increments.
141 * Setting this up involves some computation and bigger numbers may lead to unmanagable amounts of memory.
142 * The current setting (256 = 2K) should be enough for most structures.
143 * Exceptions like message_data should be on stack or mallocated any maybe in a pool too.
144 *
145 * I feel more and more that this allocator could and should be put in the core.
146 * The logger could benefit greatly from it. (although I don't know if I'd use
147 * it like this or by forcing an higher granularity like 32 or 64 to avoid mapping too many slots.)
148 *
149 */
150
151 #define ZALLOC_PG_SIZE_COUNT 256 // 2K
152 #define ZALLOC_PG_PAGEABLE_MAXSIZE (ZALLOC_PG_SIZE_COUNT * 8) /* x 8 because we are going by 8 increments */
153 #define ZALLOC_SIZE_TO_PAGE(size_) ((s32)(((size_)-1)>>3))
154 #define ZALLOC_CANHANDLE(size_) (((s32)(size_))<=ZALLOC_PG_PAGEABLE_MAXSIZE)
155
156 // prepares the zalloc tables
157
158 int zalloc_init();
159
160 // actually does nothing, just there for symmetry
161
162 void zalloc_finalize();
163
164 /**
165 * @brief Allocates one slot in a memory set
166 *
167 * Allocates one slot in a memory set.
168 *
169 * The size of a slot is page_index*8
170 *
171 * @param[in] page_index the index of the memory set
172 *
173 * @return a pointer to the allocated memory
174 */
175
176 void* zalloc_line(u32 page_index
177 #if HAS_ZALLOC_DEBUG_SUPPORT && DNSCORE_DEBUG_HAS_BLOCK_TAG
178 ,u64 tag
179 #endif
180 );
181
182 /**
183 * @brief Frees one slot in a memory set
184 *
185 * Frees one slot in a memory set
186 *
187 * The size of a slot is page_index*8
188 *
189 * @param[in] ptr a pointer to the memory to free
190 * @param[in] page_index the index of the memory set
191 *
192 */
193
194 void zfree_line(void* ptr, u32 page_index);
195
196 /**
197 * DEBUG
198 */
199
200 u64 zheap_line_total(u32 page);
201 u64 zheap_line_avail(u32 page);
202
203 /**
204 * zalloc_set_owner_thread made sense when it was not thread-safe.
205 * Now this does nothing
206 */
207
208 static inline void zalloc_set_owner_thread(thread_t owner) {(void)owner;}
209
210 static inline void* zalloc(s32 size
211 #if HAS_ZALLOC_DEBUG_SUPPORT && DNSCORE_DEBUG_HAS_BLOCK_TAG
212 ,u64 tag
213 #endif
214 )
215 {
216 u32 page = ZALLOC_SIZE_TO_PAGE(size);
217 void* ptr;
218
219 if(page < ZALLOC_PG_SIZE_COUNT)
220 {
221 ptr = zalloc_line(ZALLOC_SIZE_TO_PAGE(size)
222 #if HAS_ZALLOC_DEBUG_SUPPORT && DNSCORE_DEBUG_HAS_BLOCK_TAG
223 ,tag
224 #endif
225 );
226 }
227 else
228 {
229 ptr = malloc(size);
230 }
231
232 return ptr;
233 }
234
235 static inline void zfree(void* ptr, s32 size)
236 {
237 u32 page = ZALLOC_SIZE_TO_PAGE(size);
238
239 if(page < ZALLOC_PG_SIZE_COUNT)
240 {
241 zfree_line(ptr,ZALLOC_SIZE_TO_PAGE(size));
242 }
243 else
244 {
245 free(ptr);
246 }
247 }
248
249 /**
250 *
251 * Only works if --enable-zalloc-statistics has been set with ./configure
252 *
253 * @return the number of bytes allocated in the zalloc memory system, or -1 if the statistics are not enabled
254 */
255
256 s64 zallocatedtotal();
257
258 /**
259 * @brief Allocates unaligned memory of an arbitrary size using zalloc_line and malloc
260 *
261 * Allocates unaligned memory of an arbitrary size using zalloc_line and malloc
262 *
263 * @param[in] size the size to allocated
264 *
265 * @return a pointer to the allocated memory
266 */
267
268 void* zalloc_unaligned(u32 size
269 #if HAS_ZALLOC_DEBUG_SUPPORT && DNSCORE_DEBUG_HAS_BLOCK_TAG
270 ,u64 tag
271 #endif
272 );
273
274 /**
275 * @brief Frees unaligned memory of an arbitrary size using zfree_line and free
276 *
277 * Allocates unaligned memory of an arbitrary size using zalloc_line and malloc
278 *
279 * @param[in] ptr a pointer to the memory to free
280 *
281 */
282
283 void zfree_unaligned(void* ptr);
284
285 /*
286 * THIS SHOULD BE OPTIMIZED BY THE COMPILER AS ONE AND ONLY ONE CALL
287 */
288
289 static inline size_t zalloc_memory_block_size(size_t size)
290 {
291 return (size + 7) & ~7;
292 }
293
294 #define ZALLOC_SIZE_TO_LINE(size__) ((((size__) + 7) >> 3)-1)
295 #define ZALLOC_TYPE_TO_LINE(object__) ZALLOC_SIZE_TO_LINE(sizeof(object__))
296 #define ZALLOC_TYPE_HAS_LINE(object__) (ZALLOC_TYPE_TO_LINE(object__) < ZALLOC_PG_SIZE_COUNT)
297 #define ZALLOC_SIZE_HAS_LINE(size__) (ZALLOC_SIZE_TO_LINE((size__)) < ZALLOC_PG_SIZE_COUNT)
298
299 #if HAS_ZALLOC_DEBUG_SUPPORT && DNSCORE_DEBUG_HAS_BLOCK_TAG
300
301 #if DNSCORE_HAS_MALLOC_DEBUG_SUPPORT
302 void* debug_malloc(size_t size_,const char* file, int line, u64 tag);
303 #define ZALLOC_OBJECT(object__,tag__) ZALLOC_TYPE_HAS_LINE(object__)?zalloc_line(ZALLOC_TYPE_TO_LINE(object__), (tag__)):debug_malloc(sizeof(object__), __FILE__, __LINE__, (tag__))
304 #define ZALLOC_BYTES(size__,tag__) (ZALLOC_SIZE_HAS_LINE(size__)?zalloc_line(ZALLOC_SIZE_TO_LINE(size__), (tag__)):debug_malloc((size__), __FILE__, __LINE__, (tag__)))
305 #else
306 #define ZALLOC_OBJECT(object__,tag__) ZALLOC_TYPE_HAS_LINE(object__)?zalloc_line(ZALLOC_TYPE_TO_LINE(object__), (tag__)):malloc(sizeof(object__))
307 #define ZALLOC_BYTES(size__,tag__) (ZALLOC_SIZE_HAS_LINE(size__)?zalloc_line(ZALLOC_SIZE_TO_LINE(size__), (tag__)):malloc((size__)))
308 #endif
309
310 #define ZALLOC_ARRAY_OR_DIE(cast__,label__,size__,tag__) if((label__ = (cast__)zalloc((size__),(tag__))) == NULL) {DIE(ZALLOC_ERROR_OUTOFMEMORY); } assert((label__) != NULL)
311 // preferred way of allocating one instance of a type (struct, ...)
312 #define ZALLOC_OBJECT_OR_DIE(label__,object__,tag__) if((label__=(object__*)ZALLOC_OBJECT(object__,(tag__)))==NULL) {DIE(ZALLOC_ERROR_OUTOFMEMORY); } assert((label__) != NULL)
313 #define ZALLOC_OBJECT_ARRAY_OR_DIE(label__,object__,count__,tag__) if((label__=(object__*)ZALLOC_BYTES(sizeof(object__)*(count__), (tag__)))==NULL) {DIE(ZALLOC_ERROR_OUTOFMEMORY); } assert((label__) != NULL)
314 #else
315 #define ZALLOC_OBJECT(object__) ZALLOC_TYPE_HAS_LINE(object__)?zalloc_line(ZALLOC_TYPE_TO_LINE(object__)):malloc(sizeof(object__))
316 #define ZALLOC_BYTES(size__) (ZALLOC_SIZE_HAS_LINE(size__)?zalloc_line(ZALLOC_SIZE_TO_LINE(size__)):malloc((size__)))
317 #define ZALLOC_ARRAY_OR_DIE(cast,label,size_,tag) if((label = (cast)zalloc(size_)) == NULL) {DIE(ZALLOC_ERROR_OUTOFMEMORY); } assert((label) != NULL)
318 // preferred way of allocating one instance of a type (struct, ...)
319 #define ZALLOC_OBJECT_OR_DIE(label__,object__,tag__) if((label__=(object__*)ZALLOC_OBJECT(object__))==NULL) {DIE(ZALLOC_ERROR_OUTOFMEMORY); } assert((label__) != NULL)
320 #define ZALLOC_OBJECT_ARRAY_OR_DIE(label__,object__,count__,tag__) if((label__=(object__*)ZALLOC_BYTES(sizeof(object__)*(count__)))==NULL) {DIE(ZALLOC_ERROR_OUTOFMEMORY); } assert((label__) != NULL)
321 #endif
322
323 #define ZFREE(ptr,object__) ZALLOC_TYPE_HAS_LINE(object__)?zfree_line(ptr,ZALLOC_TYPE_TO_LINE(object__)):free(ptr)
324 #define ZFREE_ARRAY(ptr,size_) zfree(ptr,size_)
325 #define ZFREE_OBJECT(label__) zfree((label__), sizeof(*(label__)))
326 #define ZFREE_OBJECT_OF_TYPE(label__,type__) zfree((label__), sizeof(type__))
327 /**
328 * (Z)Allocates a new array of count type elements so it can hold
329 * newcount type elements. (It takes granularity into account to avoid
330 * unnecessary work)
331 *
332 * If the array is smaller, the end is truncated
333 * If the new count is zero the array is deleted.
334 *
335 * After the macro, array_ and count_ are changed so don't use consts.
336 *
337 * This helper is meant to be used by NSEC3 structures
338 */
339
340 #define ZALLOC_ARRAY_RESIZE_TAG 0x44455a49534552 /* RESIZED */
341
342 #define ZALLOC_ARRAY_RESIZE(type_,array_,count_,newcount_) \
343 { \
344 u32 zalloc_new_count = (u32)(newcount_); \
345 if(((u32)(count_)) != zalloc_new_count) \
346 { \
347 if( ZALLOC_SIZE_TO_PAGE(sizeof(type_)*((u32)(count_))) != \
348 ZALLOC_SIZE_TO_PAGE(sizeof(type_)*zalloc_new_count)) \
349 { \
350 type_* __tmp__; \
351 \
352 if(zalloc_new_count > 0) \
353 { \
354 ZALLOC_ARRAY_OR_DIE(type_*,__tmp__,sizeof(type_)*zalloc_new_count, ZALLOC_ARRAY_RESIZE_TAG); \
355 MEMCOPY(__tmp__,(array_),sizeof(type_)*MIN((u32)(count_),zalloc_new_count)); \
356 } \
357 else \
358 { \
359 __tmp__ = NULL; \
360 } \
361 \
362 ZFREE_ARRAY((array_),sizeof(type_)*((u32)(count_))); \
363 array_ = __tmp__; \
364 count_ = newcount_; \
365 } \
366 } \
367 assert(array_ != NULL); \
368 }
369
370 #endif
371
372 struct output_stream;
373 void zalloc_print_stats(struct output_stream *os);
374
375 #ifdef __cplusplus
376 }
377 #endif
378
379 /** @} */
380