xref: /freebsd/crypto/openssl/crypto/mem_sec.c (revision d0b2dbfa)
1 /*
2  * Copyright 2015-2023 The OpenSSL Project Authors. All Rights Reserved.
3  * Copyright 2004-2014, Akamai Technologies. All Rights Reserved.
4  *
5  * Licensed under the Apache License 2.0 (the "License").  You may not use
6  * this file except in compliance with the License.  You can obtain a copy
7  * in the file LICENSE in the source distribution or at
8  * https://www.openssl.org/source/license.html
9  */
10 
11 /*
12  * This file is in two halves. The first half implements the public API
13  * to be used by external consumers, and to be used by OpenSSL to store
14  * data in a "secure arena." The second half implements the secure arena.
15  * For details on that implementation, see below (look for uppercase
16  * "SECURE HEAP IMPLEMENTATION").
17  */
18 #include "e_os.h"
19 #include <openssl/crypto.h>
20 
21 #include <string.h>
22 
23 #ifndef OPENSSL_NO_SECURE_MEMORY
24 # if defined(_WIN32)
25 #  include <windows.h>
26 #  if defined(WINAPI_FAMILY_PARTITION)
27 #   if !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP | WINAPI_PARTITION_SYSTEM)
28 /*
29  * While VirtualLock is available under the app partition (e.g. UWP),
30  * the headers do not define the API. Define it ourselves instead.
31  */
32 WINBASEAPI
33 BOOL
34 WINAPI
35 VirtualLock(
36     _In_ LPVOID lpAddress,
37     _In_ SIZE_T dwSize
38     );
39 #   endif
40 #  endif
41 # endif
42 # include <stdlib.h>
43 # include <assert.h>
44 # if defined(OPENSSL_SYS_UNIX)
45 #  include <unistd.h>
46 # endif
47 # include <sys/types.h>
48 # if defined(OPENSSL_SYS_UNIX)
49 #  include <sys/mman.h>
50 #  if defined(__FreeBSD__)
51 #    define MADV_DONTDUMP MADV_NOCORE
52 #  endif
53 #  if !defined(MAP_CONCEAL)
54 #    define MAP_CONCEAL 0
55 #  endif
56 # endif
57 # if defined(OPENSSL_SYS_LINUX)
58 #  include <sys/syscall.h>
59 #  if defined(SYS_mlock2)
60 #   include <linux/mman.h>
61 #   include <errno.h>
62 #  endif
63 #  include <sys/param.h>
64 # endif
65 # include <sys/stat.h>
66 # include <fcntl.h>
67 #endif
68 
69 #define CLEAR(p, s) OPENSSL_cleanse(p, s)
70 #ifndef PAGE_SIZE
71 # define PAGE_SIZE    4096
72 #endif
73 #if !defined(MAP_ANON) && defined(MAP_ANONYMOUS)
74 # define MAP_ANON MAP_ANONYMOUS
75 #endif
76 
77 #ifndef OPENSSL_NO_SECURE_MEMORY
78 static size_t secure_mem_used;
79 
80 static int secure_mem_initialized;
81 
82 static CRYPTO_RWLOCK *sec_malloc_lock = NULL;
83 
84 /*
85  * These are the functions that must be implemented by a secure heap (sh).
86  */
87 static int sh_init(size_t size, size_t minsize);
88 static void *sh_malloc(size_t size);
89 static void sh_free(void *ptr);
90 static void sh_done(void);
91 static size_t sh_actual_size(char *ptr);
92 static int sh_allocated(const char *ptr);
93 #endif
94 
95 int CRYPTO_secure_malloc_init(size_t size, size_t minsize)
96 {
97 #ifndef OPENSSL_NO_SECURE_MEMORY
98     int ret = 0;
99 
100     if (!secure_mem_initialized) {
101         sec_malloc_lock = CRYPTO_THREAD_lock_new();
102         if (sec_malloc_lock == NULL)
103             return 0;
104         if ((ret = sh_init(size, minsize)) != 0) {
105             secure_mem_initialized = 1;
106         } else {
107             CRYPTO_THREAD_lock_free(sec_malloc_lock);
108             sec_malloc_lock = NULL;
109         }
110     }
111 
112     return ret;
113 #else
114     return 0;
115 #endif /* OPENSSL_NO_SECURE_MEMORY */
116 }
117 
118 int CRYPTO_secure_malloc_done(void)
119 {
120 #ifndef OPENSSL_NO_SECURE_MEMORY
121     if (secure_mem_used == 0) {
122         sh_done();
123         secure_mem_initialized = 0;
124         CRYPTO_THREAD_lock_free(sec_malloc_lock);
125         sec_malloc_lock = NULL;
126         return 1;
127     }
128 #endif /* OPENSSL_NO_SECURE_MEMORY */
129     return 0;
130 }
131 
132 int CRYPTO_secure_malloc_initialized(void)
133 {
134 #ifndef OPENSSL_NO_SECURE_MEMORY
135     return secure_mem_initialized;
136 #else
137     return 0;
138 #endif /* OPENSSL_NO_SECURE_MEMORY */
139 }
140 
141 void *CRYPTO_secure_malloc(size_t num, const char *file, int line)
142 {
143 #ifndef OPENSSL_NO_SECURE_MEMORY
144     void *ret;
145     size_t actual_size;
146 
147     if (!secure_mem_initialized) {
148         return CRYPTO_malloc(num, file, line);
149     }
150     if (!CRYPTO_THREAD_write_lock(sec_malloc_lock))
151         return NULL;
152     ret = sh_malloc(num);
153     actual_size = ret ? sh_actual_size(ret) : 0;
154     secure_mem_used += actual_size;
155     CRYPTO_THREAD_unlock(sec_malloc_lock);
156     return ret;
157 #else
158     return CRYPTO_malloc(num, file, line);
159 #endif /* OPENSSL_NO_SECURE_MEMORY */
160 }
161 
162 void *CRYPTO_secure_zalloc(size_t num, const char *file, int line)
163 {
164 #ifndef OPENSSL_NO_SECURE_MEMORY
165     if (secure_mem_initialized)
166         /* CRYPTO_secure_malloc() zeroes allocations when it is implemented */
167         return CRYPTO_secure_malloc(num, file, line);
168 #endif
169     return CRYPTO_zalloc(num, file, line);
170 }
171 
172 void CRYPTO_secure_free(void *ptr, const char *file, int line)
173 {
174 #ifndef OPENSSL_NO_SECURE_MEMORY
175     size_t actual_size;
176 
177     if (ptr == NULL)
178         return;
179     if (!CRYPTO_secure_allocated(ptr)) {
180         CRYPTO_free(ptr, file, line);
181         return;
182     }
183     if (!CRYPTO_THREAD_write_lock(sec_malloc_lock))
184         return;
185     actual_size = sh_actual_size(ptr);
186     CLEAR(ptr, actual_size);
187     secure_mem_used -= actual_size;
188     sh_free(ptr);
189     CRYPTO_THREAD_unlock(sec_malloc_lock);
190 #else
191     CRYPTO_free(ptr, file, line);
192 #endif /* OPENSSL_NO_SECURE_MEMORY */
193 }
194 
195 void CRYPTO_secure_clear_free(void *ptr, size_t num,
196                               const char *file, int line)
197 {
198 #ifndef OPENSSL_NO_SECURE_MEMORY
199     size_t actual_size;
200 
201     if (ptr == NULL)
202         return;
203     if (!CRYPTO_secure_allocated(ptr)) {
204         OPENSSL_cleanse(ptr, num);
205         CRYPTO_free(ptr, file, line);
206         return;
207     }
208     if (!CRYPTO_THREAD_write_lock(sec_malloc_lock))
209         return;
210     actual_size = sh_actual_size(ptr);
211     CLEAR(ptr, actual_size);
212     secure_mem_used -= actual_size;
213     sh_free(ptr);
214     CRYPTO_THREAD_unlock(sec_malloc_lock);
215 #else
216     if (ptr == NULL)
217         return;
218     OPENSSL_cleanse(ptr, num);
219     CRYPTO_free(ptr, file, line);
220 #endif /* OPENSSL_NO_SECURE_MEMORY */
221 }
222 
223 int CRYPTO_secure_allocated(const void *ptr)
224 {
225 #ifndef OPENSSL_NO_SECURE_MEMORY
226     if (!secure_mem_initialized)
227         return 0;
228     /*
229      * Only read accesses to the arena take place in sh_allocated() and this
230      * is only changed by the sh_init() and sh_done() calls which are not
231      * locked.  Hence, it is safe to make this check without a lock too.
232      */
233     return sh_allocated(ptr);
234 #else
235     return 0;
236 #endif /* OPENSSL_NO_SECURE_MEMORY */
237 }
238 
239 size_t CRYPTO_secure_used(void)
240 {
241 #ifndef OPENSSL_NO_SECURE_MEMORY
242     return secure_mem_used;
243 #else
244     return 0;
245 #endif /* OPENSSL_NO_SECURE_MEMORY */
246 }
247 
248 size_t CRYPTO_secure_actual_size(void *ptr)
249 {
250 #ifndef OPENSSL_NO_SECURE_MEMORY
251     size_t actual_size;
252 
253     if (!CRYPTO_THREAD_write_lock(sec_malloc_lock))
254         return 0;
255     actual_size = sh_actual_size(ptr);
256     CRYPTO_THREAD_unlock(sec_malloc_lock);
257     return actual_size;
258 #else
259     return 0;
260 #endif
261 }
262 
263 /*
264  * SECURE HEAP IMPLEMENTATION
265  */
266 #ifndef OPENSSL_NO_SECURE_MEMORY
267 
268 
269 /*
270  * The implementation provided here uses a fixed-sized mmap() heap,
271  * which is locked into memory, not written to core files, and protected
272  * on either side by an unmapped page, which will catch pointer overruns
273  * (or underruns) and an attempt to read data out of the secure heap.
274  * Free'd memory is zero'd or otherwise cleansed.
275  *
276  * This is a pretty standard buddy allocator.  We keep areas in a multiple
277  * of "sh.minsize" units.  The freelist and bitmaps are kept separately,
278  * so all (and only) data is kept in the mmap'd heap.
279  *
280  * This code assumes eight-bit bytes.  The numbers 3 and 7 are all over the
281  * place.
282  */
283 
284 #define ONE ((size_t)1)
285 
286 # define TESTBIT(t, b)  (t[(b) >> 3] &  (ONE << ((b) & 7)))
287 # define SETBIT(t, b)   (t[(b) >> 3] |= (ONE << ((b) & 7)))
288 # define CLEARBIT(t, b) (t[(b) >> 3] &= (0xFF & ~(ONE << ((b) & 7))))
289 
290 #define WITHIN_ARENA(p) \
291     ((char*)(p) >= sh.arena && (char*)(p) < &sh.arena[sh.arena_size])
292 #define WITHIN_FREELIST(p) \
293     ((char*)(p) >= (char*)sh.freelist && (char*)(p) < (char*)&sh.freelist[sh.freelist_size])
294 
295 
296 typedef struct sh_list_st
297 {
298     struct sh_list_st *next;
299     struct sh_list_st **p_next;
300 } SH_LIST;
301 
302 typedef struct sh_st
303 {
304     char* map_result;
305     size_t map_size;
306     char *arena;
307     size_t arena_size;
308     char **freelist;
309     ossl_ssize_t freelist_size;
310     size_t minsize;
311     unsigned char *bittable;
312     unsigned char *bitmalloc;
313     size_t bittable_size; /* size in bits */
314 } SH;
315 
316 static SH sh;
317 
318 static size_t sh_getlist(char *ptr)
319 {
320     ossl_ssize_t list = sh.freelist_size - 1;
321     size_t bit = (sh.arena_size + ptr - sh.arena) / sh.minsize;
322 
323     for (; bit; bit >>= 1, list--) {
324         if (TESTBIT(sh.bittable, bit))
325             break;
326         OPENSSL_assert((bit & 1) == 0);
327     }
328 
329     return list;
330 }
331 
332 
333 static int sh_testbit(char *ptr, int list, unsigned char *table)
334 {
335     size_t bit;
336 
337     OPENSSL_assert(list >= 0 && list < sh.freelist_size);
338     OPENSSL_assert(((ptr - sh.arena) & ((sh.arena_size >> list) - 1)) == 0);
339     bit = (ONE << list) + ((ptr - sh.arena) / (sh.arena_size >> list));
340     OPENSSL_assert(bit > 0 && bit < sh.bittable_size);
341     return TESTBIT(table, bit);
342 }
343 
344 static void sh_clearbit(char *ptr, int list, unsigned char *table)
345 {
346     size_t bit;
347 
348     OPENSSL_assert(list >= 0 && list < sh.freelist_size);
349     OPENSSL_assert(((ptr - sh.arena) & ((sh.arena_size >> list) - 1)) == 0);
350     bit = (ONE << list) + ((ptr - sh.arena) / (sh.arena_size >> list));
351     OPENSSL_assert(bit > 0 && bit < sh.bittable_size);
352     OPENSSL_assert(TESTBIT(table, bit));
353     CLEARBIT(table, bit);
354 }
355 
356 static void sh_setbit(char *ptr, int list, unsigned char *table)
357 {
358     size_t bit;
359 
360     OPENSSL_assert(list >= 0 && list < sh.freelist_size);
361     OPENSSL_assert(((ptr - sh.arena) & ((sh.arena_size >> list) - 1)) == 0);
362     bit = (ONE << list) + ((ptr - sh.arena) / (sh.arena_size >> list));
363     OPENSSL_assert(bit > 0 && bit < sh.bittable_size);
364     OPENSSL_assert(!TESTBIT(table, bit));
365     SETBIT(table, bit);
366 }
367 
368 static void sh_add_to_list(char **list, char *ptr)
369 {
370     SH_LIST *temp;
371 
372     OPENSSL_assert(WITHIN_FREELIST(list));
373     OPENSSL_assert(WITHIN_ARENA(ptr));
374 
375     temp = (SH_LIST *)ptr;
376     temp->next = *(SH_LIST **)list;
377     OPENSSL_assert(temp->next == NULL || WITHIN_ARENA(temp->next));
378     temp->p_next = (SH_LIST **)list;
379 
380     if (temp->next != NULL) {
381         OPENSSL_assert((char **)temp->next->p_next == list);
382         temp->next->p_next = &(temp->next);
383     }
384 
385     *list = ptr;
386 }
387 
388 static void sh_remove_from_list(char *ptr)
389 {
390     SH_LIST *temp, *temp2;
391 
392     temp = (SH_LIST *)ptr;
393     if (temp->next != NULL)
394         temp->next->p_next = temp->p_next;
395     *temp->p_next = temp->next;
396     if (temp->next == NULL)
397         return;
398 
399     temp2 = temp->next;
400     OPENSSL_assert(WITHIN_FREELIST(temp2->p_next) || WITHIN_ARENA(temp2->p_next));
401 }
402 
403 
404 static int sh_init(size_t size, size_t minsize)
405 {
406     int ret;
407     size_t i;
408     size_t pgsize;
409     size_t aligned;
410 #if defined(_WIN32)
411     DWORD flOldProtect;
412     SYSTEM_INFO systemInfo;
413 #endif
414 
415     memset(&sh, 0, sizeof(sh));
416 
417     /* make sure size is a powers of 2 */
418     OPENSSL_assert(size > 0);
419     OPENSSL_assert((size & (size - 1)) == 0);
420     if (size == 0 || (size & (size - 1)) != 0)
421         goto err;
422 
423     if (minsize <= sizeof(SH_LIST)) {
424         OPENSSL_assert(sizeof(SH_LIST) <= 65536);
425         /*
426          * Compute the minimum possible allocation size.
427          * This must be a power of 2 and at least as large as the SH_LIST
428          * structure.
429          */
430         minsize = sizeof(SH_LIST) - 1;
431         minsize |= minsize >> 1;
432         minsize |= minsize >> 2;
433         if (sizeof(SH_LIST) > 16)
434             minsize |= minsize >> 4;
435         if (sizeof(SH_LIST) > 256)
436             minsize |= minsize >> 8;
437         minsize++;
438     } else {
439         /* make sure minsize is a powers of 2 */
440           OPENSSL_assert((minsize & (minsize - 1)) == 0);
441           if ((minsize & (minsize - 1)) != 0)
442               goto err;
443     }
444 
445     sh.arena_size = size;
446     sh.minsize = minsize;
447     sh.bittable_size = (sh.arena_size / sh.minsize) * 2;
448 
449     /* Prevent allocations of size 0 later on */
450     if (sh.bittable_size >> 3 == 0)
451         goto err;
452 
453     sh.freelist_size = -1;
454     for (i = sh.bittable_size; i; i >>= 1)
455         sh.freelist_size++;
456 
457     sh.freelist = OPENSSL_zalloc(sh.freelist_size * sizeof(char *));
458     OPENSSL_assert(sh.freelist != NULL);
459     if (sh.freelist == NULL)
460         goto err;
461 
462     sh.bittable = OPENSSL_zalloc(sh.bittable_size >> 3);
463     OPENSSL_assert(sh.bittable != NULL);
464     if (sh.bittable == NULL)
465         goto err;
466 
467     sh.bitmalloc = OPENSSL_zalloc(sh.bittable_size >> 3);
468     OPENSSL_assert(sh.bitmalloc != NULL);
469     if (sh.bitmalloc == NULL)
470         goto err;
471 
472     /* Allocate space for heap, and two extra pages as guards */
473 #if defined(_SC_PAGE_SIZE) || defined (_SC_PAGESIZE)
474     {
475 # if defined(_SC_PAGE_SIZE)
476         long tmppgsize = sysconf(_SC_PAGE_SIZE);
477 # else
478         long tmppgsize = sysconf(_SC_PAGESIZE);
479 # endif
480         if (tmppgsize < 1)
481             pgsize = PAGE_SIZE;
482         else
483             pgsize = (size_t)tmppgsize;
484     }
485 #elif defined(_WIN32)
486     GetSystemInfo(&systemInfo);
487     pgsize = (size_t)systemInfo.dwPageSize;
488 #else
489     pgsize = PAGE_SIZE;
490 #endif
491     sh.map_size = pgsize + sh.arena_size + pgsize;
492 
493 #if !defined(_WIN32)
494 # ifdef MAP_ANON
495     sh.map_result = mmap(NULL, sh.map_size,
496                          PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE|MAP_CONCEAL, -1, 0);
497 # else
498     {
499         int fd;
500 
501         sh.map_result = MAP_FAILED;
502         if ((fd = open("/dev/zero", O_RDWR)) >= 0) {
503             sh.map_result = mmap(NULL, sh.map_size,
504                                  PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
505             close(fd);
506         }
507     }
508 # endif
509     if (sh.map_result == MAP_FAILED)
510         goto err;
511 #else
512     sh.map_result = VirtualAlloc(NULL, sh.map_size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
513 
514     if (sh.map_result == NULL)
515             goto err;
516 #endif
517 
518     sh.arena = (char *)(sh.map_result + pgsize);
519     sh_setbit(sh.arena, 0, sh.bittable);
520     sh_add_to_list(&sh.freelist[0], sh.arena);
521 
522     /* Now try to add guard pages and lock into memory. */
523     ret = 1;
524 
525 #if !defined(_WIN32)
526     /* Starting guard is already aligned from mmap. */
527     if (mprotect(sh.map_result, pgsize, PROT_NONE) < 0)
528         ret = 2;
529 #else
530     if (VirtualProtect(sh.map_result, pgsize, PAGE_NOACCESS, &flOldProtect) == FALSE)
531         ret = 2;
532 #endif
533 
534     /* Ending guard page - need to round up to page boundary */
535     aligned = (pgsize + sh.arena_size + (pgsize - 1)) & ~(pgsize - 1);
536 #if !defined(_WIN32)
537     if (mprotect(sh.map_result + aligned, pgsize, PROT_NONE) < 0)
538         ret = 2;
539 #else
540     if (VirtualProtect(sh.map_result + aligned, pgsize, PAGE_NOACCESS, &flOldProtect) == FALSE)
541         ret = 2;
542 #endif
543 
544 #if defined(OPENSSL_SYS_LINUX) && defined(MLOCK_ONFAULT) && defined(SYS_mlock2)
545     if (syscall(SYS_mlock2, sh.arena, sh.arena_size, MLOCK_ONFAULT) < 0) {
546         if (errno == ENOSYS) {
547             if (mlock(sh.arena, sh.arena_size) < 0)
548                 ret = 2;
549         } else {
550             ret = 2;
551         }
552     }
553 #elif defined(_WIN32)
554     if (VirtualLock(sh.arena, sh.arena_size) == FALSE)
555         ret = 2;
556 #else
557     if (mlock(sh.arena, sh.arena_size) < 0)
558         ret = 2;
559 #endif
560 #ifdef MADV_DONTDUMP
561     if (madvise(sh.arena, sh.arena_size, MADV_DONTDUMP) < 0)
562         ret = 2;
563 #endif
564 
565     return ret;
566 
567  err:
568     sh_done();
569     return 0;
570 }
571 
572 static void sh_done(void)
573 {
574     OPENSSL_free(sh.freelist);
575     OPENSSL_free(sh.bittable);
576     OPENSSL_free(sh.bitmalloc);
577 #if !defined(_WIN32)
578     if (sh.map_result != MAP_FAILED && sh.map_size)
579         munmap(sh.map_result, sh.map_size);
580 #else
581     if (sh.map_result != NULL && sh.map_size)
582         VirtualFree(sh.map_result, 0, MEM_RELEASE);
583 #endif
584     memset(&sh, 0, sizeof(sh));
585 }
586 
587 static int sh_allocated(const char *ptr)
588 {
589     return WITHIN_ARENA(ptr) ? 1 : 0;
590 }
591 
592 static char *sh_find_my_buddy(char *ptr, int list)
593 {
594     size_t bit;
595     char *chunk = NULL;
596 
597     bit = (ONE << list) + (ptr - sh.arena) / (sh.arena_size >> list);
598     bit ^= 1;
599 
600     if (TESTBIT(sh.bittable, bit) && !TESTBIT(sh.bitmalloc, bit))
601         chunk = sh.arena + ((bit & ((ONE << list) - 1)) * (sh.arena_size >> list));
602 
603     return chunk;
604 }
605 
606 static void *sh_malloc(size_t size)
607 {
608     ossl_ssize_t list, slist;
609     size_t i;
610     char *chunk;
611 
612     if (size > sh.arena_size)
613         return NULL;
614 
615     list = sh.freelist_size - 1;
616     for (i = sh.minsize; i < size; i <<= 1)
617         list--;
618     if (list < 0)
619         return NULL;
620 
621     /* try to find a larger entry to split */
622     for (slist = list; slist >= 0; slist--)
623         if (sh.freelist[slist] != NULL)
624             break;
625     if (slist < 0)
626         return NULL;
627 
628     /* split larger entry */
629     while (slist != list) {
630         char *temp = sh.freelist[slist];
631 
632         /* remove from bigger list */
633         OPENSSL_assert(!sh_testbit(temp, slist, sh.bitmalloc));
634         sh_clearbit(temp, slist, sh.bittable);
635         sh_remove_from_list(temp);
636         OPENSSL_assert(temp != sh.freelist[slist]);
637 
638         /* done with bigger list */
639         slist++;
640 
641         /* add to smaller list */
642         OPENSSL_assert(!sh_testbit(temp, slist, sh.bitmalloc));
643         sh_setbit(temp, slist, sh.bittable);
644         sh_add_to_list(&sh.freelist[slist], temp);
645         OPENSSL_assert(sh.freelist[slist] == temp);
646 
647         /* split in 2 */
648         temp += sh.arena_size >> slist;
649         OPENSSL_assert(!sh_testbit(temp, slist, sh.bitmalloc));
650         sh_setbit(temp, slist, sh.bittable);
651         sh_add_to_list(&sh.freelist[slist], temp);
652         OPENSSL_assert(sh.freelist[slist] == temp);
653 
654         OPENSSL_assert(temp-(sh.arena_size >> slist) == sh_find_my_buddy(temp, slist));
655     }
656 
657     /* peel off memory to hand back */
658     chunk = sh.freelist[list];
659     OPENSSL_assert(sh_testbit(chunk, list, sh.bittable));
660     sh_setbit(chunk, list, sh.bitmalloc);
661     sh_remove_from_list(chunk);
662 
663     OPENSSL_assert(WITHIN_ARENA(chunk));
664 
665     /* zero the free list header as a precaution against information leakage */
666     memset(chunk, 0, sizeof(SH_LIST));
667 
668     return chunk;
669 }
670 
671 static void sh_free(void *ptr)
672 {
673     size_t list;
674     void *buddy;
675 
676     if (ptr == NULL)
677         return;
678     OPENSSL_assert(WITHIN_ARENA(ptr));
679     if (!WITHIN_ARENA(ptr))
680         return;
681 
682     list = sh_getlist(ptr);
683     OPENSSL_assert(sh_testbit(ptr, list, sh.bittable));
684     sh_clearbit(ptr, list, sh.bitmalloc);
685     sh_add_to_list(&sh.freelist[list], ptr);
686 
687     /* Try to coalesce two adjacent free areas. */
688     while ((buddy = sh_find_my_buddy(ptr, list)) != NULL) {
689         OPENSSL_assert(ptr == sh_find_my_buddy(buddy, list));
690         OPENSSL_assert(ptr != NULL);
691         OPENSSL_assert(!sh_testbit(ptr, list, sh.bitmalloc));
692         sh_clearbit(ptr, list, sh.bittable);
693         sh_remove_from_list(ptr);
694         OPENSSL_assert(!sh_testbit(ptr, list, sh.bitmalloc));
695         sh_clearbit(buddy, list, sh.bittable);
696         sh_remove_from_list(buddy);
697 
698         list--;
699 
700         /* Zero the higher addressed block's free list pointers */
701         memset(ptr > buddy ? ptr : buddy, 0, sizeof(SH_LIST));
702         if (ptr > buddy)
703             ptr = buddy;
704 
705         OPENSSL_assert(!sh_testbit(ptr, list, sh.bitmalloc));
706         sh_setbit(ptr, list, sh.bittable);
707         sh_add_to_list(&sh.freelist[list], ptr);
708         OPENSSL_assert(sh.freelist[list] == ptr);
709     }
710 }
711 
712 static size_t sh_actual_size(char *ptr)
713 {
714     int list;
715 
716     OPENSSL_assert(WITHIN_ARENA(ptr));
717     if (!WITHIN_ARENA(ptr))
718         return 0;
719     list = sh_getlist(ptr);
720     OPENSSL_assert(sh_testbit(ptr, list, sh.bittable));
721     return sh.arena_size / (ONE << list);
722 }
723 #endif /* OPENSSL_NO_SECURE_MEMORY */
724