1 /*------------------------------------------------------------------------------
2 *
3 * Copyright (c) 2011-2021, EURid vzw. All rights reserved.
4 * The YADIFA TM software product is provided under the BSD 3-clause license:
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * * Neither the name of EURid nor the names of its contributors may be
16 * used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 *------------------------------------------------------------------------------
32 *
33 */
34
35 #include "dnscore/dnscore-config.h"
36 #include <stddef.h>
37 #include <unistd.h>
38 #include <sys/types.h>
39
40 #include "dnscore/dnscore.h"
41 #include "dnscore/fdtools.h"
42 #include "dnscore/shared-heap.h"
43
44
45 /** @defgroup
46 * @ingroup
47 * @brief
48 *
49 *
50 *
51 * @{
52 *
53 *----------------------------------------------------------------------------*/
54
55 #include <sys/mman.h>
56 #include <dnscore/mutex.h>
57 #include <dnscore/format.h>
58
59 #define L1_DATA_LINE_SIZE 0x40
60 #define L1_DATA_LINE_MASK (L1_DATA_LINE_SIZE - 1)
61
62 #define MUTEX_PROCESS_SHARED_SUPPORTED 1
63 //#define MUTEX_PROCESS_SHARED_SUPPORTED 0 // experimental, not enough resources to make this work
64 #define SHARED_HEAP_ALLOC_DEBUG 0
65 #define SHARED_HEAP_ALLOC_PRINT 0
66
67 struct shared_heap_bloc
68 {
69 s32 prev_size;
70 s32 real_size;
71
72 u8 heap_index;
73 u8 allocated;
74 u16 _reserved0;
75 s32 size;
76 };
77
78 struct shared_heap_free_bloc
79 {
80 s32 prev_size;
81 s32 real_size;
82
83 u8 heap_index;
84 u8 allocated;
85 u16 _reserved0;
86 s32 size;
87
88 struct shared_heap_free_bloc *next;
89 struct shared_heap_free_bloc *prev;
90 };
91
92 #define SHARED_HEAP_BLOC_SIZE ((sizeof(struct shared_heap_bloc) + 7) & ~7)
93
94 struct shared_heap_ctx
95 {
96 #if MUTEX_PROCESS_SHARED_SUPPORTED
97 mutex_t mtx;
98 cond_t cond;
99 #else
100 semaphore_t sem;
101 #endif
102 #if DEBUG
103 #if SHARED_HEAP_ALLOC_DEBUG
104 debug_memory_by_tag_context_t *mem_ctx;
105 #endif
106 #endif
107 struct shared_heap_bloc *base;
108 struct shared_heap_free_bloc free;
109 struct shared_heap_bloc *limit;
110 size_t size;
111 };
112
113 static struct shared_heap_ctx *shared_heaps = NULL;
114 static int shared_heap_next = -1;
115
116 #if MUTEX_PROCESS_SHARED_SUPPORTED
117
shared_heap_lock_init(shared_heap_ctx * ctx)118 static inline int shared_heap_lock_init(shared_heap_ctx *ctx)
119 {
120 int ret;
121 if((ret = mutex_init_process_shared(&ctx->mtx)) == 0)
122 {
123 if((ret = cond_init_process_shared(&ctx->cond)) != 0)
124 {
125 mutex_destroy(&ctx->mtx);
126 ret = MAKE_ERRNO_ERROR(ret);
127 }
128 }
129 else
130 {
131 ret = MAKE_ERRNO_ERROR(ret);
132 }
133 return ret;
134 }
135
shared_heap_lock_finalize(shared_heap_ctx * ctx)136 static inline void shared_heap_lock_finalize(shared_heap_ctx *ctx)
137 {
138 cond_finalize(&ctx->cond);
139 mutex_destroy(&ctx->mtx);
140 }
141
shared_heap_lock(shared_heap_ctx * ctx)142 static inline void shared_heap_lock(shared_heap_ctx *ctx)
143 {
144 mutex_lock(&ctx->mtx);
145 }
146
shared_heap_try_lock(shared_heap_ctx * ctx)147 static inline bool shared_heap_try_lock(shared_heap_ctx *ctx)
148 {
149 bool ret = mutex_trylock(&ctx->mtx);
150 return ret;
151 }
152
shared_heap_unlock(shared_heap_ctx * ctx)153 static inline void shared_heap_unlock(shared_heap_ctx *ctx)
154 {
155 mutex_unlock(&ctx->mtx);
156 }
157
shared_heap_wait(shared_heap_ctx * ctx)158 static inline void shared_heap_wait(shared_heap_ctx *ctx)
159 {
160 // cond_wait(&ctx->cond, &ctx->mtx);
161 cond_wait_auto_time_out(&ctx->cond, &ctx->mtx);
162 }
163
shared_heap_notify_unlock(shared_heap_ctx * ctx)164 static inline void shared_heap_notify_unlock(shared_heap_ctx *ctx)
165 {
166 cond_notify(&ctx->cond); // @NOTE https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=884776
167 mutex_unlock(&ctx->mtx);
168 }
169
170 #else
171
shared_heap_lock_init(shared_heap_ctx * ctx)172 static inline void shared_heap_lock_init(shared_heap_ctx *ctx)
173 {
174 semaphone_init_process_shared(&ctx->sem);
175 }
176
shared_heap_lock_finalize(shared_heap_ctx * ctx)177 static inline void shared_heap_lock_finalize(shared_heap_ctx *ctx)
178 {
179 semaphore_finalize(&ctx->sem);
180 }
181
shared_heap_lock(shared_heap_ctx * ctx)182 static inline void shared_heap_lock(shared_heap_ctx *ctx)
183 {
184 semaphore_lock(&ctx->sem);
185 }
186
shared_heap_unlock(shared_heap_ctx * ctx)187 static inline void shared_heap_unlock(shared_heap_ctx *ctx)
188 {
189 semaphore_unlock(&ctx->sem);
190 }
191
shared_heap_wait(shared_heap_ctx * ctx)192 static inline void shared_heap_wait(shared_heap_ctx *ctx)
193 {
194 semaphore_unlock(&ctx->sem);
195 semaphore_lock(&ctx->sem);
196 }
197
shared_heap_notify_unlock(shared_heap_ctx * ctx)198 static inline void shared_heap_notify_unlock(shared_heap_ctx *ctx)
199 {
200 semaphore_unlock(&ctx->sem);
201 }
202
203 #endif
204
205
206 void
shared_heap_check_bloc(u8 id,void * bloc_,u8 allocated)207 shared_heap_check_bloc(u8 id, void *bloc_, u8 allocated)
208 {
209 #if DEBUG
210 struct shared_heap_bloc *bloc = (struct shared_heap_bloc*)bloc_;
211 assert(bloc->heap_index == id);
212 assert((size_t)bloc->prev_size < shared_heaps[id].size);
213 assert((size_t)bloc->real_size < shared_heaps[id].size);
214 assert((bloc >= shared_heaps[id].base) && (bloc < shared_heaps[id].limit));
215
216 if(allocated <= 1)
217 {
218 assert(bloc->allocated == allocated);
219
220 if(bloc->allocated != allocated)
221 {
222 if(allocated == 1)
223 {
224 osformatln(termerr, "%i: shared-heap[%i] : double free at %p", getpid(), id, bloc_);flusherr();
225 }
226 else
227 {
228 osformatln(termerr, "%i: shared-heap[%i] : corruption at %p", id, bloc_);flusherr();
229 }
230
231 osprint_dump(termerr, bloc, bloc->size, 16, OSPRINT_DUMP_ADDRESS|OSPRINT_DUMP_HEX|OSPRINT_DUMP_TEXT);
232 flusherr();
233 }
234 }
235
236 if(bloc->allocated == 1)
237 {
238 #ifndef NDEBUG
239 size_t real_size = (bloc->size + SHARED_HEAP_BLOC_SIZE + L1_DATA_LINE_MASK) & ~L1_DATA_LINE_MASK;
240 assert((size_t)bloc->real_size == real_size);
241 #endif
242 }
243 #endif
244 (void)id;
245 (void)bloc_;
246 (void)allocated;
247 }
248
249 void
shared_heap_check_ptr(u8 id,void * ptr)250 shared_heap_check_ptr(u8 id, void *ptr)
251 {
252 #if DEBUG
253 struct shared_heap_bloc *bloc = (struct shared_heap_bloc *)&(((u8*)ptr)[-SHARED_HEAP_BLOC_SIZE]);
254 shared_heap_check_bloc(id, bloc, 1);
255 #endif
256 (void)id;
257 (void)ptr;
258 }
259
260 ya_result
shared_heap_init()261 shared_heap_init()
262 {
263 if(shared_heaps == NULL)
264 {
265 const size_t ctx_size = (sizeof(struct shared_heap_ctx) + L1_DATA_LINE_MASK) & ~L1_DATA_LINE_MASK;
266 const size_t ctx_array_size = ((ctx_size * 256) + 4095) & ~4095;
267
268 shared_heaps = (struct shared_heap_ctx*)mmap(NULL, ctx_array_size, PROT_READ|PROT_WRITE, MAP_SHARED|MAP_ANONYMOUS, -1, 0);
269
270 if(shared_heaps != ((struct shared_heap_ctx*)MAP_FAILED))
271 {
272 memset(shared_heaps, 0, ctx_array_size);
273 shared_heap_next = 0;
274 return SUCCESS;
275 }
276 else
277 {
278 return ERRNO_ERROR;
279 }
280 }
281
282 return SUCCESS;
283 }
284
285 void
shared_heap_finalize()286 shared_heap_finalize()
287 {
288 if(shared_heaps != NULL)
289 {
290 const size_t ctx_size = (sizeof(struct shared_heap_ctx) + L1_DATA_LINE_MASK) & ~L1_DATA_LINE_MASK;
291 const size_t ctx_array_size = ((ctx_size * 256) + 4095) & ~4095;
292
293 munmap(shared_heaps, ctx_array_size);
294
295 shared_heaps = NULL;
296 }
297 }
298
299 ya_result
shared_heap_create(size_t size)300 shared_heap_create(size_t size)
301 {
302 if(shared_heap_next < 0)
303 {
304 return OBJECT_NOT_INITIALIZED;
305 }
306
307 size = (size + 4093) & ~4093;
308
309 void *ptr = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_SHARED|MAP_ANONYMOUS, -1, 0);
310
311 if(ptr == MAP_FAILED)
312 {
313 return ERRNO_ERROR;
314 }
315
316 struct shared_heap_ctx *ctx = &shared_heaps[shared_heap_next];
317
318 ya_result ret;
319 if(FAIL(ret = shared_heap_lock_init(ctx)))
320 {
321 munmap(ptr, size);
322 return ret;
323 }
324
325 ctx->base = (struct shared_heap_bloc*)ptr;
326 ctx->limit = (struct shared_heap_bloc*)&((u8*)ptr)[size];
327 ctx->free.prev_size = 0;
328 ctx->free.real_size = 0;
329 ctx->free.heap_index = (u8)shared_heap_next;
330 ctx->free.allocated = 1;
331 ctx->free.next = (struct shared_heap_free_bloc*)&((u8*)ptr)[L1_DATA_LINE_SIZE];
332 ctx->free.prev = ctx->free.next;
333 ctx->size = size;
334
335 ctx->free.next->prev_size = L1_DATA_LINE_SIZE;
336 ctx->free.next->real_size = size - L1_DATA_LINE_SIZE * 2;
337 ctx->free.next->heap_index = (u8)shared_heap_next;
338 ctx->free.next->allocated = 0;
339 ctx->free.next->size = 0;
340 ctx->free.next->next = &ctx->free;
341 ctx->free.next->prev = &ctx->free;
342
343 #if DEBUG
344 #if SHARED_HEAP_ALLOC_DEBUG
345 ctx->mem_ctx = debug_memory_by_tag_new_instance("shared-heap");
346 #endif
347 #endif
348
349 struct shared_heap_bloc *header = (struct shared_heap_bloc *)&(((u8*)ptr)[0]);
350 header->real_size = L1_DATA_LINE_SIZE;
351 header->prev_size = 0;
352 header->heap_index = (u8)shared_heap_next;
353 header->allocated = 1;
354 #if DEBUG
355 header->_reserved0 = 0x4848;
356 #endif
357 header->size = 0;
358 memset((struct shared_heap_bloc *)&(((u8*)ptr)[SHARED_HEAP_BLOC_SIZE]), 'H', L1_DATA_LINE_SIZE - SHARED_HEAP_BLOC_SIZE);
359
360 struct shared_heap_bloc *footer = (struct shared_heap_bloc *)&(((u8*)ptr)[size - L1_DATA_LINE_SIZE]);
361 footer->real_size = L1_DATA_LINE_SIZE;
362 footer->prev_size = size - L1_DATA_LINE_SIZE * 2;
363 footer->heap_index = (u8)shared_heap_next;
364 footer->allocated = 1;
365 #if DEBUG
366 footer->_reserved0 = 0x4646;
367 #endif
368 footer->size = 0;
369 memset((struct shared_heap_bloc *)&(((u8*)ptr)[size - L1_DATA_LINE_SIZE + SHARED_HEAP_BLOC_SIZE]), 'F', L1_DATA_LINE_SIZE - SHARED_HEAP_BLOC_SIZE);
370
371 ret = shared_heap_next;
372
373 while(shared_heap_next < 256)
374 {
375 ++shared_heap_next;
376 if(shared_heaps[shared_heap_next].base == NULL)
377 {
378 return ret;
379 }
380 }
381
382 shared_heap_next = 0;
383
384 while(shared_heap_next < ret)
385 {
386 if(shared_heaps[shared_heap_next].base == NULL)
387 {
388 return ret;
389 }
390
391 shared_heap_next++;
392 }
393
394 shared_heap_next = -1;
395
396 return ret;
397 }
398
399 void
shared_heap_destroy(u8 id)400 shared_heap_destroy(u8 id)
401 {
402 if(shared_heaps[id].base != NULL)
403 {
404 shared_heap_lock_finalize(&shared_heaps[id]);
405 #if DEBUG
406 memset(shared_heaps[id].base, 0xfe, shared_heaps[id].size);
407 #endif
408 munmap(shared_heaps[id].base, shared_heaps[id].size);
409 shared_heaps[id].base = NULL;
410 shared_heaps[id].free.next = NULL;
411 shared_heaps[id].free.prev = NULL;
412 shared_heaps[id].size = 0;
413 #if DEBUG
414 #if SHARED_HEAP_ALLOC_DEBUG
415 debug_memory_by_tag_delete(shared_heaps[id].mem_ctx);
416 shared_heaps[id].mem_ctx = NULL;
417 #endif
418 #endif
419 }
420 if(shared_heap_next < 0)
421 {
422 shared_heap_next = id;
423 }
424 }
425
426 void*
shared_heap_alloc_from_ctx(struct shared_heap_ctx * ctx,size_t size)427 shared_heap_alloc_from_ctx(struct shared_heap_ctx *ctx, size_t size)
428 {
429 size_t real_size = (SHARED_HEAP_BLOC_SIZE + size + L1_DATA_LINE_MASK) & ~L1_DATA_LINE_MASK;
430
431 shared_heap_lock(ctx);
432
433 struct shared_heap_free_bloc *bloc = ctx->free.next;
434
435 while(bloc != &ctx->free)
436 {
437 assert(bloc->allocated == 0);
438 // will be wrong: assert(bloc->real_size >= bloc->size);
439
440 if((size_t)bloc->real_size >= real_size)
441 {
442 // take from this bloc
443
444 if((size_t)bloc->real_size == real_size)
445 {
446 // perfect match
447
448 bloc->next->prev = bloc->prev;
449 bloc->prev->next = bloc->next;
450
451 // bloc prev & next are now irrelevant
452
453 bloc->allocated = 1;
454 #if DEBUG
455 if(bloc->_reserved0 == 0)
456 {
457 bloc->_reserved0 = 0x4141;
458 }
459 #if SHARED_HEAP_ALLOC_DEBUG
460 debug_memory_by_tag_alloc_notify(ctx->mem_ctx, GENERIC_TAG, size);
461 #endif
462 #endif
463 bloc->size = size;
464
465 shared_heap_unlock(ctx);
466
467 return &((u8*)bloc)[SHARED_HEAP_BLOC_SIZE];
468 }
469 else
470 {
471 // cut the bloc
472
473 struct shared_heap_free_bloc *next_bloc = (struct shared_heap_free_bloc*)&(((u8*)bloc)[real_size]);
474 next_bloc->real_size = bloc->real_size - real_size;
475 next_bloc->prev_size = real_size;
476
477 next_bloc->next = bloc->next;
478 next_bloc->prev = bloc->prev;
479 bloc->next->prev = next_bloc;
480 bloc->prev->next = next_bloc;
481
482 // bloc prev & next are now irrelevant
483
484 next_bloc->heap_index = bloc->heap_index;
485 next_bloc->allocated = 0;
486 #if DEBUG
487 next_bloc->_reserved0 = 0x4343;
488 #endif
489
490 struct shared_heap_free_bloc *next_next_bloc = (struct shared_heap_free_bloc*)&((u8*)next_bloc)[next_bloc->real_size];
491 next_next_bloc->prev_size = next_bloc->real_size;
492
493 bloc->real_size = real_size;
494
495 bloc->allocated = 1;
496 bloc->size = size;
497
498 #if DEBUG
499 shared_heap_check_bloc(bloc->heap_index, bloc, 1);
500 shared_heap_check_bloc(bloc->heap_index, next_bloc, 0);
501 shared_heap_check_bloc(bloc->heap_index, next_next_bloc, 2);
502 #if SHARED_HEAP_ALLOC_DEBUG
503 debug_memory_by_tag_alloc_notify(ctx->mem_ctx, GENERIC_TAG, size);
504 #endif
505 #endif
506
507 shared_heap_unlock(ctx);
508
509 return &((u8*)bloc)[SHARED_HEAP_BLOC_SIZE];
510 }
511 }
512 else
513 {
514 bloc = bloc->next;
515 }
516 }
517
518 shared_heap_unlock(ctx);
519
520 return NULL;
521 }
522
523 void*
shared_heap_try_alloc_from_ctx(struct shared_heap_ctx * ctx,size_t size)524 shared_heap_try_alloc_from_ctx(struct shared_heap_ctx *ctx, size_t size)
525 {
526 size_t real_size = (SHARED_HEAP_BLOC_SIZE + size + L1_DATA_LINE_MASK) & ~L1_DATA_LINE_MASK;
527
528 if(shared_heap_try_lock(ctx))
529 {
530 struct shared_heap_free_bloc *bloc = ctx->free.next;
531
532 while(bloc != &ctx->free)
533 {
534 assert(bloc->allocated == 0);
535 // will be wrong: assert(bloc->real_size >= bloc->size);
536
537 if((size_t)bloc->real_size >= real_size)
538 {
539 // take from this bloc
540
541 if((size_t)bloc->real_size == real_size)
542 {
543 // perfect match
544
545 bloc->next->prev = bloc->prev;
546 bloc->prev->next = bloc->next;
547
548 // bloc prev & next are now irrelevant
549
550 bloc->allocated = 1;
551 #if DEBUG
552 if(bloc->_reserved0 == 0)
553 {
554 bloc->_reserved0 = 0x4141;
555 }
556 #if SHARED_HEAP_ALLOC_DEBUG
557 debug_memory_by_tag_alloc_notify(ctx->mem_ctx, GENERIC_TAG, size);
558 #endif
559 #endif
560 bloc->size = size;
561
562 shared_heap_unlock(ctx);
563
564 return &((u8*)bloc)[SHARED_HEAP_BLOC_SIZE];
565 }
566 else
567 {
568 // cut the bloc
569
570 struct shared_heap_free_bloc *next_bloc = (struct shared_heap_free_bloc*)&(((u8*)bloc)[real_size]);
571 next_bloc->real_size = bloc->real_size - real_size;
572 next_bloc->prev_size = real_size;
573
574 next_bloc->next = bloc->next;
575 next_bloc->prev = bloc->prev;
576 bloc->next->prev = next_bloc;
577 bloc->prev->next = next_bloc;
578
579 // bloc prev & next are now irrelevant
580
581 next_bloc->heap_index = bloc->heap_index;
582 next_bloc->allocated = 0;
583 #if DEBUG
584 next_bloc->_reserved0 = 0x4343;
585 #endif
586
587 struct shared_heap_free_bloc *next_next_bloc = (struct shared_heap_free_bloc*)&((u8*)next_bloc)[next_bloc->real_size];
588 next_next_bloc->prev_size = next_bloc->real_size;
589
590 bloc->real_size = real_size;
591
592 bloc->allocated = 1;
593 bloc->size = size;
594
595 #if DEBUG
596 shared_heap_check_bloc(bloc->heap_index, bloc, 1);
597 shared_heap_check_bloc(bloc->heap_index, next_bloc, 0);
598 shared_heap_check_bloc(bloc->heap_index, next_next_bloc, 2);
599 #if SHARED_HEAP_ALLOC_DEBUG
600 debug_memory_by_tag_alloc_notify(ctx->mem_ctx, GENERIC_TAG, size);
601 #endif
602 #endif
603
604 shared_heap_unlock(ctx);
605
606 return &((u8*)bloc)[SHARED_HEAP_BLOC_SIZE];
607 }
608 }
609 else
610 {
611 bloc = bloc->next;
612 }
613 }
614
615 shared_heap_unlock(ctx);
616 }
617
618 return NULL;
619 }
620
621 void*
shared_heap_wait_alloc_from_ctx(struct shared_heap_ctx * ctx,size_t size)622 shared_heap_wait_alloc_from_ctx(struct shared_heap_ctx *ctx, size_t size)
623 {
624 size_t real_size = (SHARED_HEAP_BLOC_SIZE + size + L1_DATA_LINE_MASK) & ~L1_DATA_LINE_MASK;
625
626 shared_heap_lock(ctx);
627
628 for(;;)
629 {
630 struct shared_heap_free_bloc *bloc = ctx->free.next;
631
632 while(bloc != &ctx->free)
633 {
634 assert(bloc->allocated == 0);
635 // will be wrong: assert(bloc->real_size >= bloc->size);
636 #if DEBUG
637 shared_heap_check_bloc(bloc->heap_index, bloc, 0);
638 #endif
639 if((size_t)bloc->real_size >= real_size)
640 {
641 // take from this bloc
642
643 if((size_t)bloc->real_size == real_size)
644 {
645 // perfect match
646
647 bloc->next->prev = bloc->prev;
648 bloc->prev->next = bloc->next;
649
650 // bloc prev & next are now irrelevant
651
652 bloc->allocated = 1;
653 #if DEBUG
654 if(bloc->_reserved0 == 0)
655 {
656 bloc->_reserved0 = 0x4141;
657 }
658 #if SHARED_HEAP_ALLOC_DEBUG
659 debug_memory_by_tag_alloc_notify(ctx->mem_ctx, GENERIC_TAG, size);
660 #endif
661 #endif
662 bloc->size = size;
663 #if DEBUG
664 shared_heap_check_bloc(bloc->heap_index, bloc, 1);
665 #endif
666 shared_heap_unlock(ctx);
667 #if DEBUG
668 memset(&((u8*)bloc)[SHARED_HEAP_BLOC_SIZE], 'A', bloc->real_size - SHARED_HEAP_BLOC_SIZE);
669 #endif
670 return &((u8*)bloc)[SHARED_HEAP_BLOC_SIZE];
671 }
672 else
673 {
674 // cut the bloc
675
676 struct shared_heap_free_bloc *next_bloc = (struct shared_heap_free_bloc*)&(((u8*)bloc)[real_size]);
677 next_bloc->real_size = bloc->real_size - real_size;
678 next_bloc->prev_size = real_size;
679
680 next_bloc->next = bloc->next;
681 next_bloc->prev = bloc->prev;
682 bloc->next->prev = next_bloc;
683 bloc->prev->next = next_bloc;
684
685 // bloc prev & next are now irrelevant
686
687 next_bloc->heap_index = bloc->heap_index;
688 next_bloc->allocated = 0;
689 #if DEBUG
690 next_bloc->_reserved0 = 0x4343;
691 #endif
692 struct shared_heap_free_bloc *next_next_bloc = (struct shared_heap_free_bloc*)&((u8*)next_bloc)[next_bloc->real_size];
693 next_next_bloc->prev_size = next_bloc->real_size;
694
695 bloc->allocated = 1;
696 bloc->real_size = real_size;
697 bloc->size = size;
698 #if DEBUG
699 shared_heap_check_bloc(bloc->heap_index, bloc, 1);
700 shared_heap_check_bloc(bloc->heap_index, next_bloc, 0);
701 shared_heap_check_bloc(bloc->heap_index, next_next_bloc, 2);
702 #if SHARED_HEAP_ALLOC_DEBUG
703 debug_memory_by_tag_alloc_notify(ctx->mem_ctx, GENERIC_TAG, size);
704 #endif
705 #endif
706 shared_heap_unlock(ctx);
707 #if DEBUG
708 memset(&((u8*)bloc)[SHARED_HEAP_BLOC_SIZE], 'a', bloc->real_size - SHARED_HEAP_BLOC_SIZE);
709 #endif
710 return &((u8*)bloc)[SHARED_HEAP_BLOC_SIZE];
711 }
712 }
713 else
714 {
715 bloc = bloc->next;
716 }
717 } // while bloc != ctx->free
718
719 shared_heap_wait(ctx);
720 }
721 }
722
723 static void
shared_heap_grow_allocated_with_following_free_bloc(struct shared_heap_free_bloc * bloc,struct shared_heap_free_bloc * next_bloc)724 shared_heap_grow_allocated_with_following_free_bloc(struct shared_heap_free_bloc *bloc, struct shared_heap_free_bloc *next_bloc)
725 {
726 #if DEBUG
727 shared_heap_check_bloc(bloc->heap_index, bloc, 1);
728 shared_heap_check_bloc(next_bloc->heap_index, next_bloc, 0);
729 #endif
730
731 next_bloc->next->prev = next_bloc->prev;
732 next_bloc->prev->next = next_bloc->next;
733
734 bloc->real_size += next_bloc->real_size;
735
736 #if DEBUG
737 bloc->size = bloc->real_size - SHARED_HEAP_BLOC_SIZE;
738 #endif
739
740 struct shared_heap_free_bloc *next_next_bloc = (struct shared_heap_free_bloc*)&((u8*)next_bloc)[next_bloc->real_size];
741
742 next_next_bloc->prev_size = bloc->real_size;
743 }
744
745 /**
746 * Merge two blocks in specific states.
747 * After the call, the allocated block will nolonger be (obviously).
748 */
749
750 static void
shared_heap_merge_allocated_with_following_free_bloc(struct shared_heap_free_bloc * bloc,struct shared_heap_free_bloc * next_bloc)751 shared_heap_merge_allocated_with_following_free_bloc(struct shared_heap_free_bloc *bloc, struct shared_heap_free_bloc *next_bloc)
752 {
753 #if DEBUG
754 shared_heap_check_bloc(bloc->heap_index, bloc, 1);
755 shared_heap_check_bloc(next_bloc->heap_index, next_bloc, 0);
756 #endif
757
758 bloc->real_size += next_bloc->real_size;
759
760 #if DEBUG
761 bloc->size = bloc->real_size - SHARED_HEAP_BLOC_SIZE;
762 #endif
763
764 bloc->next = next_bloc->next;
765 bloc->next->prev = bloc;
766
767 bloc->prev = next_bloc->prev;
768 bloc->prev->next = bloc;
769
770 bloc->allocated = 0;
771
772 struct shared_heap_free_bloc *next_next_bloc = (struct shared_heap_free_bloc*)&((u8*)next_bloc)[next_bloc->real_size];
773 next_next_bloc->prev_size = bloc->real_size;
774
775 #if DEBUG
776 memset(next_bloc, 'T', L1_DATA_LINE_SIZE);
777 #endif
778 }
779
780 /**
781 * Merge two blocks in specific states.
782 * After the call, the allocated block will nolonger be (obviously).
783 */
784
785 static void
shared_heap_merge_free_with_following_allocated_bloc(struct shared_heap_free_bloc * bloc,struct shared_heap_free_bloc * next_bloc)786 shared_heap_merge_free_with_following_allocated_bloc(struct shared_heap_free_bloc *bloc, struct shared_heap_free_bloc *next_bloc)
787 {
788 #if DEBUG
789 shared_heap_check_bloc(bloc->heap_index, bloc, 0);
790 shared_heap_check_bloc(next_bloc->heap_index, next_bloc, 1);
791 #endif
792
793 bloc->real_size += next_bloc->real_size;
794
795 #if DEBUG
796 bloc->size = bloc->real_size - SHARED_HEAP_BLOC_SIZE;
797 #endif
798
799 struct shared_heap_free_bloc *next_next_bloc = (struct shared_heap_free_bloc*)&((u8*)next_bloc)[next_bloc->real_size];
800 next_next_bloc->prev_size = bloc->real_size;
801
802 #if DEBUG
803 memset(next_bloc, 'U', L1_DATA_LINE_SIZE);
804 #endif
805 }
806
807 /**
808 * Merge three blocks in specific states.
809 * After the call, the allocated block will nolonger be (obviously).
810 */
811
812 static void
shared_heap_merge_allocated_with_surrounding_free_blocs(struct shared_heap_free_bloc * prev_bloc,struct shared_heap_free_bloc * bloc,struct shared_heap_free_bloc * next_bloc)813 shared_heap_merge_allocated_with_surrounding_free_blocs(struct shared_heap_free_bloc *prev_bloc, struct shared_heap_free_bloc *bloc, struct shared_heap_free_bloc *next_bloc)
814 {
815 #if DEBUG
816 shared_heap_check_bloc(prev_bloc->heap_index, prev_bloc, 0);
817 shared_heap_check_bloc(bloc->heap_index, bloc, 1);
818 shared_heap_check_bloc(next_bloc->heap_index, next_bloc, 0);
819 #endif
820
821 // detach the next bloc from the chain
822 // merge the 3
823 next_bloc->next->prev = next_bloc->prev;
824 next_bloc->prev->next = next_bloc->next;
825
826 prev_bloc->real_size += bloc->real_size + next_bloc->real_size;
827
828 #if DEBUG
829 prev_bloc->size = bloc->real_size - SHARED_HEAP_BLOC_SIZE;
830 #endif
831
832 struct shared_heap_free_bloc *next_next_bloc = (struct shared_heap_free_bloc*)&((u8*)next_bloc)[next_bloc->real_size];
833 next_next_bloc->prev_size = prev_bloc->real_size;
834
835 #if DEBUG
836 memset(bloc, 'V', L1_DATA_LINE_SIZE);
837 memset(next_bloc, 'W', L1_DATA_LINE_SIZE);
838 #endif
839 }
840
841 void
shared_heap_free_from_ctx(struct shared_heap_ctx * ctx,void * ptr)842 shared_heap_free_from_ctx(struct shared_heap_ctx *ctx, void *ptr)
843 {
844 struct shared_heap_free_bloc *bloc = (struct shared_heap_free_bloc *)&(((u8*)ptr)[-SHARED_HEAP_BLOC_SIZE]);
845
846 shared_heap_lock(ctx);
847
848 #if DEBUG
849 shared_heap_check_bloc(bloc->heap_index, bloc, 1);
850 #if SHARED_HEAP_ALLOC_DEBUG
851 debug_memory_by_tag_free_notify(ctx->mem_ctx, GENERIC_TAG, bloc->size);
852 #endif
853 #endif
854
855 struct shared_heap_free_bloc *next_bloc = (struct shared_heap_free_bloc*)&(((u8*)bloc)[bloc->real_size]);
856
857 #if DEBUG
858 shared_heap_check_bloc(bloc->heap_index, next_bloc, 2);
859 #endif
860
861 if(next_bloc->allocated == 0)
862 {
863 struct shared_heap_free_bloc *prev_bloc = (struct shared_heap_free_bloc*)&(((u8*)bloc)[-bloc->prev_size]);
864 #if DEBUG
865 shared_heap_check_bloc(bloc->heap_index, prev_bloc, 2);
866 #endif
867 if(prev_bloc->allocated == 0)
868 {
869 // merge 3
870 #if DEBUG
871 prev_bloc->_reserved0 = 0xfe03;
872 bloc->_reserved0 = 0xfe13;
873 next_bloc->_reserved0 = 0xfe23;
874 #endif
875 shared_heap_merge_allocated_with_surrounding_free_blocs(prev_bloc, bloc, next_bloc);
876 bloc = prev_bloc;
877 }
878 else
879 {
880 // merge 2
881 #if DEBUG
882 bloc->_reserved0 = 0xfe02;
883 next_bloc->_reserved0 = 0xfe12;
884 #endif
885
886 shared_heap_merge_allocated_with_following_free_bloc(bloc, next_bloc);
887 }
888 }
889 else
890 {
891 struct shared_heap_free_bloc *prev_bloc = (struct shared_heap_free_bloc*)&(((u8*)bloc)[-bloc->prev_size]);
892
893 #if DEBUG
894 shared_heap_check_bloc(bloc->heap_index, prev_bloc, 2);
895 #endif
896
897 if(prev_bloc->allocated == 0)
898 {
899 // merge 2
900 #if DEBUG
901 prev_bloc->_reserved0 = 0xfe01;
902 bloc->_reserved0 = 0xfe11;
903 #endif
904 shared_heap_merge_free_with_following_allocated_bloc(prev_bloc, bloc);
905 bloc = prev_bloc;
906 }
907 else
908 {
909 #if DEBUG
910 bloc->_reserved0 = 0xfe00;
911 #endif
912 bloc->next = &ctx->free;
913 bloc->prev = ctx->free.prev;
914 ctx->free.prev = bloc;
915 bloc->prev->next = bloc;
916 bloc->allocated = 0;
917 }
918 }
919
920 #if DEBUG
921 assert(bloc->_reserved0 != 0);
922 #endif
923
924 shared_heap_notify_unlock(ctx);
925 }
926
927 void*
shared_heap_realloc_from_ctx(struct shared_heap_ctx * ctx,void * ptr,size_t new_size)928 shared_heap_realloc_from_ctx(struct shared_heap_ctx *ctx, void *ptr, size_t new_size)
929 {
930 struct shared_heap_free_bloc *bloc = (struct shared_heap_free_bloc *)&(((u8*)ptr)[-SHARED_HEAP_BLOC_SIZE]);
931
932 assert(bloc->allocated == 1);
933
934 if(new_size <= (size_t)bloc->real_size)
935 {
936 return ptr;
937 }
938
939 shared_heap_lock(ctx);
940
941 struct shared_heap_free_bloc *next_bloc = (struct shared_heap_free_bloc*)&(((u8*)bloc)[bloc->real_size]);
942
943 if(next_bloc->allocated == 0)
944 {
945 // maybe the next bloc can be stolen from
946
947 size_t real_size = (SHARED_HEAP_BLOC_SIZE + new_size + L1_DATA_LINE_MASK) & ~L1_DATA_LINE_MASK;
948
949 size_t needed_size = real_size - bloc->real_size;
950
951 if((size_t)next_bloc->real_size >= needed_size)
952 {
953 // steal some memory from the next bloc
954 // create a new bloc in the next bloc
955 // update pointers
956
957 if((size_t)next_bloc->real_size == needed_size)
958 {
959 // remove the bloc from the free chain
960 // add its space to the current bloc
961
962 shared_heap_grow_allocated_with_following_free_bloc(bloc, next_bloc);
963
964 shared_heap_unlock(ctx);
965 }
966 else
967 {
968 // split the bloc
969
970 struct shared_heap_free_bloc *split_bloc = (struct shared_heap_free_bloc*)&(((u8*)next_bloc)[needed_size]);
971 split_bloc->real_size -= needed_size;
972 split_bloc->prev_size = real_size;
973
974 split_bloc->next = next_bloc->next;
975 split_bloc->prev = next_bloc->prev;
976 split_bloc->next->prev = split_bloc;
977 split_bloc->prev->next = split_bloc;
978
979 // bloc prev & next are now irrelevant
980
981 split_bloc->heap_index = next_bloc->heap_index;
982 split_bloc->allocated = 0;
983
984 bloc->real_size = real_size;
985
986 shared_heap_unlock(ctx);
987 }
988
989 return ptr;
990 }
991 }
992
993 // cannot grow : allocate a new bloc and free the current one
994
995 shared_heap_unlock(ctx);
996
997 void *new_ptr = shared_heap_alloc_from_ctx(ctx, new_size);
998 memcpy(new_ptr, ptr, bloc->size);
999 shared_heap_free_from_ctx(ctx, ptr);
1000 return new_ptr;
1001 }
1002
1003 void*
shared_heap_alloc(u8 id,size_t size)1004 shared_heap_alloc(u8 id, size_t size)
1005 {
1006 void *ptr = shared_heap_alloc_from_ctx(&shared_heaps[id], size);
1007 #if DEBUG && SHARED_HEAP_ALLOC_DEBUG
1008 #if SHARED_HEAP_ALLOC_PRINT
1009 osformatln(termout, "%i : shared_heap_alloc(%i, %lli) = %p", getpid(), id, size, ptr);
1010 #endif
1011 if(ptr != NULL)
1012 {
1013 shared_heap_ctx *ctx = &shared_heaps[id];
1014 debug_memory_by_tag_alloc_notify(ctx->mem_ctx, 0, size);
1015 }
1016 #endif
1017 return ptr;
1018 }
1019
1020 void*
shared_heap_wait_alloc(u8 id,size_t size)1021 shared_heap_wait_alloc(u8 id, size_t size)
1022 {
1023 void *ptr = shared_heap_wait_alloc_from_ctx(&shared_heaps[id], size);
1024 #if DEBUG && SHARED_HEAP_ALLOC_DEBUG
1025 #if SHARED_HEAP_ALLOC_PRINT
1026 osformatln(termout, "%i : shared_heap_wait_alloc(%i, %lli) = %p", getpid(), id, size, ptr);
1027 #endif
1028 if(ptr != NULL)
1029 {
1030 shared_heap_ctx *ctx = &shared_heaps[id];
1031 debug_memory_by_tag_alloc_notify(ctx->mem_ctx, 0, size);
1032 }
1033 #endif
1034 return ptr;
1035 }
1036
1037 void*
shared_heap_try_alloc(u8 id,size_t size)1038 shared_heap_try_alloc(u8 id, size_t size)
1039 {
1040 void *ptr = shared_heap_try_alloc_from_ctx(&shared_heaps[id], size);
1041 #if DEBUG && SHARED_HEAP_ALLOC_DEBUG
1042 #if SHARED_HEAP_ALLOC_PRINT
1043 osformatln(termout, "%i : shared_heap_try_alloc(%i, %lli) = %p", getpid(), id, size, ptr);
1044 #endif
1045 if(ptr != NULL)
1046 {
1047 shared_heap_ctx *ctx = &shared_heaps[id];
1048 debug_memory_by_tag_alloc_notify(ctx->mem_ctx, 0, size);
1049 }
1050 #endif
1051 return ptr;
1052 }
1053
1054 void
shared_heap_free(void * ptr)1055 shared_heap_free(void *ptr)
1056 {
1057 assert(ptr != NULL);
1058 struct shared_heap_free_bloc *bloc = (struct shared_heap_free_bloc *)&(((u8*)ptr)[-SHARED_HEAP_BLOC_SIZE]);
1059
1060 #if DEBUG && SHARED_HEAP_ALLOC_DEBUG
1061 shared_heap_ctx *ctx = &shared_heaps[bloc->heap_index];
1062 #if SHARED_HEAP_ALLOC_PRINT
1063 osformatln(termout, "%i : shared_heap_free(%p) of size=%i", getpid(), ptr, bloc->size);
1064 #endif
1065 debug_memory_by_tag_free_notify(ctx->mem_ctx, 0, bloc->size);
1066 #endif
1067
1068 shared_heap_free_from_ctx(&shared_heaps[bloc->heap_index], ptr);
1069 }
1070
1071 void*
shared_heap_realloc(u8 id,void * ptr,size_t new_size)1072 shared_heap_realloc(u8 id, void *ptr, size_t new_size)
1073 {
1074 return shared_heap_realloc_from_ctx(&shared_heaps[id], ptr, new_size);
1075 }
1076
1077 struct shared_heap_ctx *
shared_heap_context_from_id(u8 id)1078 shared_heap_context_from_id(u8 id)
1079 {
1080 return &shared_heaps[id];
1081 }
1082
1083 void
shared_heap_check(u8 id)1084 shared_heap_check(u8 id)
1085 {
1086 struct shared_heap_ctx *ctx = &shared_heaps[id];
1087
1088 shared_heap_lock(ctx);
1089
1090 void *ptr = ctx->base;
1091 size_t size = ctx->size;
1092
1093 const struct shared_heap_bloc *header = (const struct shared_heap_bloc *)&(((u8*)ptr)[0]);
1094 assert(header->real_size == L1_DATA_LINE_SIZE);
1095 assert(header->prev_size == 0);
1096 assert(header->heap_index == id);
1097 assert(header->allocated == 1);
1098 assert(header->size == 0);
1099 //memset((struct shared_heap_bloc *)&(((u8*)ptr)[SHARED_HEAP_BLOC_SIZE]), 'H', L1_DATA_LINE_SIZE - SHARED_HEAP_BLOC_SIZE);
1100
1101 const struct shared_heap_bloc *footer = (const struct shared_heap_bloc *)&(((u8*)ptr)[size - L1_DATA_LINE_SIZE]);
1102 assert(footer->real_size == L1_DATA_LINE_SIZE);
1103 //assert(footer->prev_size == size - L1_DATA_LINE_SIZE * 2);
1104 assert(footer->heap_index == id);
1105 assert(footer->allocated == 1);
1106 assert(footer->size == 0);
1107 //memset((struct shared_heap_bloc *)&(((u8*)ptr)[size - L1_DATA_LINE_SIZE + SHARED_HEAP_BLOC_SIZE]), 'F', L1_DATA_LINE_SIZE - SHARED_HEAP_BLOC_SIZE);
1108
1109 const struct shared_heap_bloc *prev_bloc = header;
1110
1111 for(;;)
1112 {
1113 const struct shared_heap_bloc *bloc = (const struct shared_heap_bloc *)&(((u8*)prev_bloc)[prev_bloc->real_size]);
1114 if(bloc >= footer)
1115 {
1116 assert(bloc == footer);
1117 break;
1118 }
1119 assert(bloc->prev_size == prev_bloc->real_size);
1120 assert(bloc->heap_index == id);
1121 prev_bloc = bloc;
1122 }
1123
1124 const struct shared_heap_free_bloc *pf = &ctx->free;
1125 for(;;)
1126 {
1127 const struct shared_heap_free_bloc *f = pf->next;
1128
1129 assert(f->heap_index == id);
1130 assert(f->prev == pf);
1131
1132 if(f == &ctx->free)
1133 {
1134 break;
1135 }
1136
1137 assert(f->allocated == 0);
1138
1139 pf = f;
1140 }
1141
1142 //assert(footer->prev_size == prev_bloc->real_size);
1143
1144 shared_heap_unlock(ctx);
1145 }
1146
1147 void
shared_heap_count_allocated(u8 id,size_t * totalp,size_t * countp)1148 shared_heap_count_allocated(u8 id, size_t* totalp, size_t* countp)
1149 {
1150 struct shared_heap_ctx *ctx = &shared_heaps[id];
1151
1152 shared_heap_lock(ctx);
1153
1154 const void *ptr = ctx->base;
1155 size_t size = ctx->size;
1156
1157 const struct shared_heap_bloc *header = (const struct shared_heap_bloc *)&(((u8*)ptr)[L1_DATA_LINE_SIZE]);
1158 const struct shared_heap_bloc *footer = (const struct shared_heap_bloc *)&(((u8*)ptr)[size - L1_DATA_LINE_SIZE]);
1159 const struct shared_heap_bloc *prev_bloc = header;
1160
1161 size_t total = 0;
1162 size_t count = 0;
1163
1164 for(;;)
1165 {
1166 const struct shared_heap_bloc *bloc = (const struct shared_heap_bloc *)&(((u8*)prev_bloc)[prev_bloc->real_size]);
1167
1168 if(bloc >= footer)
1169 {
1170 break;
1171 }
1172
1173 if(bloc->allocated == 1)
1174 {
1175 total += bloc->real_size;
1176 ++count;
1177 }
1178
1179 prev_bloc = bloc;
1180 }
1181
1182 shared_heap_unlock(ctx);
1183
1184 if(totalp != NULL)
1185 {
1186 *totalp = total;
1187 }
1188
1189 if(countp != NULL)
1190 {
1191 *countp = count;
1192 }
1193 }
1194
1195 void
shared_heap_print_map(u8 id,size_t * totalp,size_t * countp)1196 shared_heap_print_map(u8 id, size_t* totalp, size_t* countp)
1197 {
1198 #if DEBUG
1199 struct shared_heap_ctx *ctx = &shared_heaps[id];
1200
1201 shared_heap_lock(ctx);
1202
1203 const void *ptr = ctx->base;
1204 size_t size = ctx->size;
1205
1206 const struct shared_heap_bloc *header = (const struct shared_heap_bloc *)&(((u8*)ptr)[L1_DATA_LINE_SIZE]);
1207 const struct shared_heap_bloc *footer = (const struct shared_heap_bloc *)&(((u8*)ptr)[size - L1_DATA_LINE_SIZE]);
1208
1209 size_t total = 0;
1210 size_t count = 0;
1211
1212 u8 allocated = 255;
1213 const u8 *range_start = NULL;
1214
1215 const struct shared_heap_bloc *bloc = header;
1216
1217 for(;;)
1218 {
1219 if(bloc >= footer)
1220 {
1221 if(range_start != NULL)
1222 {
1223 formatln("shared-heap[%i] [%p ; %p] %8x allocated=%i", id, range_start, ((u8*)bloc) - 1, (u8*)bloc - range_start, allocated);
1224 }
1225
1226 break;
1227 }
1228
1229 if(bloc->allocated != allocated)
1230 {
1231 if(range_start != NULL)
1232 {
1233 formatln("shared-heap[%i] [%p ; %p] %8x allocated=%i", id, range_start, ((u8*)bloc) - 1, (u8*)bloc - range_start, allocated);
1234 }
1235
1236 range_start = (const u8*)bloc;
1237 allocated = bloc->allocated;
1238 }
1239
1240 if(bloc->allocated == 1)
1241 {
1242 total += bloc->real_size;
1243 ++count;
1244 }
1245
1246 const struct shared_heap_bloc *next_bloc = (const struct shared_heap_bloc*)&(((u8*)bloc)[bloc->real_size]);
1247 bloc = next_bloc;
1248 }
1249
1250 shared_heap_unlock(ctx);
1251
1252 if(totalp != NULL)
1253 {
1254 *totalp = total;
1255 }
1256
1257 if(countp != NULL)
1258 {
1259 *countp = count;
1260 }
1261
1262 formatln("shared-heap[%i] total=%llu count=%llu", id, total, count);
1263 #endif
1264 }
1265
1266 /** @} */
1267