1 /*
2      This file is part of libmicrohttpd
3      Copyright (C) 2007--2021 Daniel Pittman, Christian Grothoff, and
4      Karlson2k (Evgeny Grin)
5 
6      This library is free software; you can redistribute it and/or
7      modify it under the terms of the GNU Lesser General Public
8      License as published by the Free Software Foundation; either
9      version 2.1 of the License, or (at your option) any later version.
10 
11      This library is distributed in the hope that it will be useful,
12      but WITHOUT ANY WARRANTY; without even the implied warranty of
13      MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14      Lesser General Public License for more details.
15 
16      You should have received a copy of the GNU Lesser General Public
17      License along with this library; if not, write to the Free Software
18      Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
19 */
20 
21 /**
22  * @file memorypool.c
23  * @brief memory pool
24  * @author Christian Grothoff
25  * @author Karlson2k (Evgeny Grin)
26  */
27 #include "memorypool.h"
28 #ifdef HAVE_STDLIB_H
29 #include <stdlib.h>
30 #endif /* HAVE_STDLIB_H */
31 #include <string.h>
32 #include <stdint.h>
33 #include "mhd_assert.h"
34 #if HAVE_SYS_MMAN_H
35 #include <sys/mman.h>
36 #endif
37 #ifdef _WIN32
38 #include <windows.h>
39 #endif
40 #ifdef HAVE_SYSCONF
41 #include <unistd.h>
42 #if defined(_SC_PAGE_SIZE)
43 #define MHD_SC_PAGESIZE _SC_PAGE_SIZE
44 #elif defined(_SC_PAGESIZE)
45 #define MHD_SC_PAGESIZE _SC_PAGESIZE
46 #endif /* _SC_PAGESIZE */
47 #endif /* HAVE_SYSCONF */
48 #include "mhd_limits.h" /* for SIZE_MAX, PAGESIZE / PAGE_SIZE */
49 
50 #if defined(MHD_USE_PAGESIZE_MACRO) || defined (MHD_USE_PAGE_SIZE_MACRO)
51 #ifndef HAVE_SYSCONF /* Avoid duplicate include */
52 #include <unistd.h>
53 #endif /* HAVE_SYSCONF */
54 #ifdef HAVE_SYS_PARAM_H
55 #include <sys/param.h>
56 #endif /* HAVE_SYS_PARAM_H */
57 #endif /* MHD_USE_PAGESIZE_MACRO || MHD_USE_PAGE_SIZE_MACRO */
58 
59 /**
60  * Fallback value of page size
61  */
62 #define _MHD_FALLBACK_PAGE_SIZE (4096)
63 
64 #if defined(MHD_USE_PAGESIZE_MACRO)
65 #define MHD_DEF_PAGE_SIZE_ PAGESIZE
66 #elif defined(MHD_USE_PAGE_SIZE_MACRO)
67 #define MHD_DEF_PAGE_SIZE_ PAGE_SIZE
68 #else  /* ! PAGESIZE */
69 #define MHD_DEF_PAGE_SIZE_ _MHD_FALLBACK_PAGE_SIZE
70 #endif /* ! PAGESIZE */
71 
72 
73 #ifdef MHD_ASAN_POISON_ACTIVE
74 #include <sanitizer/asan_interface.h>
75 #endif /* MHD_ASAN_POISON_ACTIVE */
76 
77 /* define MAP_ANONYMOUS for Mac OS X */
78 #if defined(MAP_ANON) && ! defined(MAP_ANONYMOUS)
79 #define MAP_ANONYMOUS MAP_ANON
80 #endif
81 #if defined(_WIN32)
82 #define MAP_FAILED NULL
83 #elif ! defined(MAP_FAILED)
84 #define MAP_FAILED ((void*) -1)
85 #endif
86 
87 /**
88  * Align to 2x word size (as GNU libc does).
89  */
90 #define ALIGN_SIZE (2 * sizeof(void*))
91 
92 /**
93  * Round up 'n' to a multiple of ALIGN_SIZE.
94  */
95 #define ROUND_TO_ALIGN(n) (((n) + (ALIGN_SIZE - 1)) \
96                            / (ALIGN_SIZE) *(ALIGN_SIZE))
97 
98 
99 #ifndef MHD_ASAN_POISON_ACTIVE
100 #define _MHD_NOSANITIZE_PTRS /**/
101 #define _MHD_RED_ZONE_SIZE (0)
102 #define ROUND_TO_ALIGN_PLUS_RED_ZONE(n) ROUND_TO_ALIGN(n)
103 #define _MHD_POISON_MEMORY(pointer, size) (void)0
104 #define _MHD_UNPOISON_MEMORY(pointer, size) (void)0
105 #else  /* MHD_ASAN_POISON_ACTIVE */
106 #if defined(FUNC_ATTR_PTRCOMPARE_WOKRS)
107 #define _MHD_NOSANITIZE_PTRS \
108   __attribute__((no_sanitize("pointer-compare","pointer-subtract")))
109 #elif defined(FUNC_ATTR_NOSANITIZE_WORKS)
110 #define _MHD_NOSANITIZE_PTRS __attribute__((no_sanitize("address")))
111 #endif
112 #define _MHD_RED_ZONE_SIZE (ALIGN_SIZE)
113 #define ROUND_TO_ALIGN_PLUS_RED_ZONE(n) (ROUND_TO_ALIGN(n) + _MHD_RED_ZONE_SIZE)
114 #define _MHD_POISON_MEMORY(pointer, size) \
115   ASAN_POISON_MEMORY_REGION ((pointer), (size))
116 #define _MHD_UNPOISON_MEMORY(pointer, size) \
117   ASAN_UNPOISON_MEMORY_REGION ((pointer), (size))
118 #endif /* MHD_ASAN_POISON_ACTIVE */
119 
120 /**
121  * Size of memory page
122  */
123 static size_t MHD_sys_page_size_ =
124 #if defined(MHD_USE_PAGESIZE_MACRO_STATIC)
125   PAGESIZE;
126 #elif defined(MHD_USE_PAGE_SIZE_MACRO_STATIC)
127   PAGE_SIZE;
128 #else  /* ! MHD_USE_PAGE_SIZE_MACRO_STATIC */
129   _MHD_FALLBACK_PAGE_SIZE;   /* Default fallback value */
130 #endif /* ! MHD_USE_PAGE_SIZE_MACRO_STATIC */
131 
132 /**
133  * Initialise values for memory pools
134  */
135 void
MHD_init_mem_pools_(void)136 MHD_init_mem_pools_ (void)
137 {
138 #ifdef MHD_SC_PAGESIZE
139   long result;
140   result = sysconf (MHD_SC_PAGESIZE);
141   if (-1 != result)
142     MHD_sys_page_size_ = (size_t) result;
143   else
144     MHD_sys_page_size_ = MHD_DEF_PAGE_SIZE_;
145 #elif defined(_WIN32)
146   SYSTEM_INFO si;
147   GetSystemInfo (&si);
148   MHD_sys_page_size_ = (size_t) si.dwPageSize;
149 #else
150   MHD_sys_page_size_ = MHD_DEF_PAGE_SIZE_;
151 #endif /* _WIN32 */
152   mhd_assert (0 == (MHD_sys_page_size_ % ALIGN_SIZE));
153 }
154 
155 
156 /**
157  * Handle for a memory pool.  Pools are not reentrant and must not be
158  * used by multiple threads.
159  */
160 struct MemoryPool
161 {
162 
163   /**
164    * Pointer to the pool's memory
165    */
166   uint8_t *memory;
167 
168   /**
169    * Size of the pool.
170    */
171   size_t size;
172 
173   /**
174    * Offset of the first unallocated byte.
175    */
176   size_t pos;
177 
178   /**
179    * Offset of the byte after the last unallocated byte.
180    */
181   size_t end;
182 
183   /**
184    * 'false' if pool was malloc'ed, 'true' if mmapped (VirtualAlloc'ed for W32).
185    */
186   bool is_mmap;
187 };
188 
189 
190 /**
191  * Create a memory pool.
192  *
193  * @param max maximum size of the pool
194  * @return NULL on error
195  */
196 struct MemoryPool *
MHD_pool_create(size_t max)197 MHD_pool_create (size_t max)
198 {
199   struct MemoryPool *pool;
200   size_t alloc_size;
201 
202   mhd_assert (max > 0);
203   alloc_size = 0;
204   pool = malloc (sizeof (struct MemoryPool));
205   if (NULL == pool)
206     return NULL;
207 #if defined(MAP_ANONYMOUS) || defined(_WIN32)
208   if ( (max <= 32 * 1024) ||
209        (max < MHD_sys_page_size_ * 4 / 3) )
210   {
211     pool->memory = MAP_FAILED;
212   }
213   else
214   {
215     /* Round up allocation to page granularity. */
216     alloc_size = max + MHD_sys_page_size_ - 1;
217     alloc_size -= alloc_size % MHD_sys_page_size_;
218 #if defined(MAP_ANONYMOUS) && ! defined(_WIN32)
219     pool->memory = mmap (NULL,
220                          alloc_size,
221                          PROT_READ | PROT_WRITE,
222                          MAP_PRIVATE | MAP_ANONYMOUS,
223                          -1,
224                          0);
225 #elif defined(_WIN32)
226     pool->memory = VirtualAlloc (NULL,
227                                  alloc_size,
228                                  MEM_COMMIT | MEM_RESERVE,
229                                  PAGE_READWRITE);
230 #endif /* _WIN32 */
231   }
232 #else  /* ! _WIN32 && ! MAP_ANONYMOUS */
233   pool->memory = MAP_FAILED;
234 #endif /* ! _WIN32 && ! MAP_ANONYMOUS */
235   if (MAP_FAILED == pool->memory)
236   {
237     alloc_size = ROUND_TO_ALIGN (max);
238     pool->memory = malloc (alloc_size);
239     if (NULL == pool->memory)
240     {
241       free (pool);
242       return NULL;
243     }
244     pool->is_mmap = false;
245   }
246 #if defined(MAP_ANONYMOUS) || defined(_WIN32)
247   else
248   {
249     pool->is_mmap = true;
250   }
251 #endif /* _WIN32 || MAP_ANONYMOUS */
252   mhd_assert (0 == (((uintptr_t) pool->memory) % ALIGN_SIZE));
253   pool->pos = 0;
254   pool->end = alloc_size;
255   pool->size = alloc_size;
256   mhd_assert (0 < alloc_size);
257   _MHD_POISON_MEMORY (pool->memory, pool->size);
258   return pool;
259 }
260 
261 
262 /**
263  * Destroy a memory pool.
264  *
265  * @param pool memory pool to destroy
266  */
267 void
MHD_pool_destroy(struct MemoryPool * pool)268 MHD_pool_destroy (struct MemoryPool *pool)
269 {
270   if (NULL == pool)
271     return;
272 
273   mhd_assert (pool->end >= pool->pos);
274   mhd_assert (pool->size >= pool->end - pool->pos);
275   _MHD_POISON_MEMORY (pool->memory, pool->size);
276   if (! pool->is_mmap)
277     free (pool->memory);
278   else
279 #if defined(MAP_ANONYMOUS) && ! defined(_WIN32)
280     munmap (pool->memory,
281             pool->size);
282 #elif defined(_WIN32)
283     VirtualFree (pool->memory,
284                  0,
285                  MEM_RELEASE);
286 #else
287     abort ();
288 #endif
289   free (pool);
290 }
291 
292 
293 /**
294  * Check how much memory is left in the @a pool
295  *
296  * @param pool pool to check
297  * @return number of bytes still available in @a pool
298  */
299 size_t
MHD_pool_get_free(struct MemoryPool * pool)300 MHD_pool_get_free (struct MemoryPool *pool)
301 {
302   mhd_assert (pool->end >= pool->pos);
303   mhd_assert (pool->size >= pool->end - pool->pos);
304 #ifdef MHD_ASAN_POISON_ACTIVE
305   if ((pool->end - pool->pos) <= _MHD_RED_ZONE_SIZE)
306     return 0;
307 #endif /* MHD_ASAN_POISON_ACTIVE */
308   return (pool->end - pool->pos) - _MHD_RED_ZONE_SIZE;
309 }
310 
311 
312 /**
313  * Allocate size bytes from the pool.
314  *
315  * @param pool memory pool to use for the operation
316  * @param size number of bytes to allocate
317  * @param from_end allocate from end of pool (set to 'true');
318  *        use this for small, persistent allocations that
319  *        will never be reallocated
320  * @return NULL if the pool cannot support size more
321  *         bytes
322  */
323 void *
MHD_pool_allocate(struct MemoryPool * pool,size_t size,bool from_end)324 MHD_pool_allocate (struct MemoryPool *pool,
325                    size_t size,
326                    bool from_end)
327 {
328   void *ret;
329   size_t asize;
330 
331   mhd_assert (pool->end >= pool->pos);
332   mhd_assert (pool->size >= pool->end - pool->pos);
333   asize = ROUND_TO_ALIGN_PLUS_RED_ZONE (size);
334   if ( (0 == asize) && (0 != size) )
335     return NULL; /* size too close to SIZE_MAX */
336   if (asize > pool->end - pool->pos)
337     return NULL;
338   if (from_end)
339   {
340     ret = &pool->memory[pool->end - asize];
341     pool->end -= asize;
342   }
343   else
344   {
345     ret = &pool->memory[pool->pos];
346     pool->pos += asize;
347   }
348   _MHD_UNPOISON_MEMORY (ret, size);
349   return ret;
350 }
351 
352 
353 /**
354  * Try to allocate @a size bytes memory area from the @a pool.
355  *
356  * If allocation fails, @a required_bytes is updated with size required to be
357  * freed in the @a pool from rellocatable area to allocate requested number
358  * of bytes.
359  * Allocated memory area is always not rellocatable ("from end").
360  *
361  * @param pool memory pool to use for the operation
362  * @param size the size of memory in bytes to allocate
363  * @param[out] required_bytes the pointer to variable to be updated with
364  *                            the size of the required additional free
365  *                            memory area, not updated if function succeed.
366  *                            Cannot be NULL.
367  * @return the pointer to allocated memory area if succeed,
368  *         NULL if the pool doesn't have enough space, required_bytes is updated
369  *         with amount of space needed to be freed in rellocatable area or
370  *         set to SIZE_MAX if requested size is too large for the pool.
371  */
372 void *
MHD_pool_try_alloc(struct MemoryPool * pool,size_t size,size_t * required_bytes)373 MHD_pool_try_alloc (struct MemoryPool *pool,
374                     size_t size,
375                     size_t *required_bytes)
376 {
377   void *ret;
378   size_t asize;
379 
380   mhd_assert (pool->end >= pool->pos);
381   mhd_assert (pool->size >= pool->end - pool->pos);
382   asize = ROUND_TO_ALIGN_PLUS_RED_ZONE (size);
383   if ( (0 == asize) && (0 != size) )
384   { /* size is too close to SIZE_MAX, very unlikely */
385     *required_bytes = SIZE_MAX;
386     return NULL;
387   }
388   if (asize > pool->end - pool->pos)
389   {
390     mhd_assert ((pool->end - pool->pos) == \
391                 ROUND_TO_ALIGN (pool->end - pool->pos));
392     if (asize <= pool->end)
393       *required_bytes = asize - (pool->end - pool->pos);
394     else
395       *required_bytes = SIZE_MAX;
396     return NULL;
397   }
398   ret = &pool->memory[pool->end - asize];
399   pool->end -= asize;
400   _MHD_UNPOISON_MEMORY (ret, size);
401   return ret;
402 }
403 
404 
405 /**
406  * Reallocate a block of memory obtained from the pool.
407  * This is particularly efficient when growing or
408  * shrinking the block that was last (re)allocated.
409  * If the given block is not the most recently
410  * (re)allocated block, the memory of the previous
411  * allocation may be not released until the pool is
412  * destroyed or reset.
413  *
414  * @param pool memory pool to use for the operation
415  * @param old the existing block
416  * @param old_size the size of the existing block
417  * @param new_size the new size of the block
418  * @return new address of the block, or
419  *         NULL if the pool cannot support @a new_size
420  *         bytes (old continues to be valid for @a old_size)
421  */
422 _MHD_NOSANITIZE_PTRS void *
MHD_pool_reallocate(struct MemoryPool * pool,void * old,size_t old_size,size_t new_size)423 MHD_pool_reallocate (struct MemoryPool *pool,
424                      void *old,
425                      size_t old_size,
426                      size_t new_size)
427 {
428   size_t asize;
429   uint8_t *new_blc;
430 
431   mhd_assert (pool->end >= pool->pos);
432   mhd_assert (pool->size >= pool->end - pool->pos);
433   mhd_assert (old != NULL || old_size == 0);
434   mhd_assert (pool->size >= old_size);
435   mhd_assert (old == NULL || pool->memory <= (uint8_t *) old);
436   /* (old == NULL || pool->memory + pool->size >= (uint8_t*) old + old_size) */
437   mhd_assert (old == NULL || \
438               (pool->size - _MHD_RED_ZONE_SIZE) >= \
439               (((size_t) (((uint8_t *) old) - pool->memory)) + old_size));
440   /* Blocks "from the end" must not be reallocated */
441   /* (old == NULL || old_size == 0 || pool->memory + pool->pos > (uint8_t*) old) */
442   mhd_assert (old == NULL || old_size == 0 || \
443               pool->pos > (size_t) ((uint8_t *) old - pool->memory));
444   mhd_assert (old == NULL || old_size == 0 || \
445               (size_t) (((uint8_t *) old) - pool->memory) + old_size <= \
446               pool->end - _MHD_RED_ZONE_SIZE);
447 
448   if (NULL != old)
449   {   /* Have previously allocated data */
450     const size_t old_offset = (uint8_t *) old - pool->memory;
451     const bool shrinking = (old_size > new_size);
452     /* Try resizing in-place */
453     if (shrinking)
454     {     /* Shrinking in-place, zero-out freed part */
455       memset ((uint8_t *) old + new_size, 0, old_size - new_size);
456       _MHD_POISON_MEMORY ((uint8_t *) old + new_size, old_size - new_size);
457     }
458     if (pool->pos ==
459         ROUND_TO_ALIGN_PLUS_RED_ZONE (old_offset + old_size))
460     {     /* "old" block is the last allocated block */
461       const size_t new_apos =
462         ROUND_TO_ALIGN_PLUS_RED_ZONE (old_offset + new_size);
463       if (! shrinking)
464       {                               /* Grow in-place, check for enough space. */
465         if ( (new_apos > pool->end) ||
466              (new_apos < pool->pos) ) /* Value wrap */
467           return NULL;                /* No space */
468       }
469       /* Resized in-place */
470       pool->pos = new_apos;
471       _MHD_UNPOISON_MEMORY (old, new_size);
472       return old;
473     }
474     if (shrinking)
475       return old;   /* Resized in-place, freed part remains allocated */
476   }
477   /* Need to allocate new block */
478   asize = ROUND_TO_ALIGN_PLUS_RED_ZONE (new_size);
479   if ( ( (0 == asize) &&
480          (0 != new_size) ) || /* Value wrap, too large new_size. */
481        (asize > pool->end - pool->pos) ) /* Not enough space */
482     return NULL;
483 
484   new_blc = pool->memory + pool->pos;
485   pool->pos += asize;
486 
487   _MHD_UNPOISON_MEMORY (new_blc, new_size);
488   if (0 != old_size)
489   {
490     /* Move data to new block, old block remains allocated */
491     memcpy (new_blc, old, old_size);
492     /* Zero-out old block */
493     memset (old, 0, old_size);
494     _MHD_POISON_MEMORY (old, old_size);
495   }
496   return new_blc;
497 }
498 
499 
500 /**
501  * Clear all entries from the memory pool except
502  * for @a keep of the given @a size. The pointer
503  * returned should be a buffer of @a new_size where
504  * the first @a copy_bytes are from @a keep.
505  *
506  * @param pool memory pool to use for the operation
507  * @param keep pointer to the entry to keep (maybe NULL)
508  * @param copy_bytes how many bytes need to be kept at this address
509  * @param new_size how many bytes should the allocation we return have?
510  *                 (should be larger or equal to @a copy_bytes)
511  * @return addr new address of @a keep (if it had to change)
512  */
513 _MHD_NOSANITIZE_PTRS void *
MHD_pool_reset(struct MemoryPool * pool,void * keep,size_t copy_bytes,size_t new_size)514 MHD_pool_reset (struct MemoryPool *pool,
515                 void *keep,
516                 size_t copy_bytes,
517                 size_t new_size)
518 {
519   mhd_assert (pool->end >= pool->pos);
520   mhd_assert (pool->size >= pool->end - pool->pos);
521   mhd_assert (copy_bytes <= new_size);
522   mhd_assert (copy_bytes <= pool->size);
523   mhd_assert (keep != NULL || copy_bytes == 0);
524   mhd_assert (keep == NULL || pool->memory <= (uint8_t *) keep);
525   /* (keep == NULL || pool->memory + pool->size >= (uint8_t*) keep + copy_bytes) */
526   mhd_assert (keep == NULL || \
527               pool->size >= \
528               ((size_t) ((uint8_t *) keep - pool->memory)) + copy_bytes);
529   _MHD_UNPOISON_MEMORY (pool->memory, new_size);
530   if ( (NULL != keep) &&
531        (keep != pool->memory) )
532   {
533     if (0 != copy_bytes)
534       memmove (pool->memory,
535                keep,
536                copy_bytes);
537   }
538   /* technically not needed, but safer to zero out */
539   if (pool->size > copy_bytes)
540   {
541     size_t to_zero;   /** Size of area to zero-out */
542 
543     to_zero = pool->size - copy_bytes;
544     _MHD_UNPOISON_MEMORY (pool->memory + copy_bytes, to_zero);
545 #ifdef _WIN32
546     if (pool->is_mmap)
547     {
548       size_t to_recommit;     /** Size of decommitted and re-committed area. */
549       uint8_t *recommit_addr;
550       /* Round down to page size */
551       to_recommit = to_zero - to_zero % MHD_sys_page_size_;
552       recommit_addr = pool->memory + pool->size - to_recommit;
553 
554       /* De-committing and re-committing again clear memory and make
555        * pages free / available for other needs until accessed. */
556       if (VirtualFree (recommit_addr,
557                        to_recommit,
558                        MEM_DECOMMIT))
559       {
560         to_zero -= to_recommit;
561 
562         if (recommit_addr != VirtualAlloc (recommit_addr,
563                                            to_recommit,
564                                            MEM_COMMIT,
565                                            PAGE_READWRITE))
566           abort ();      /* Serious error, must never happen */
567       }
568     }
569 #endif /* _WIN32 */
570     memset (&pool->memory[copy_bytes],
571             0,
572             to_zero);
573   }
574   pool->pos = ROUND_TO_ALIGN_PLUS_RED_ZONE (new_size);
575   pool->end = pool->size;
576   _MHD_POISON_MEMORY (((uint8_t *) pool->memory) + new_size, \
577                       pool->size - new_size);
578   return pool->memory;
579 }
580 
581 
582 /* end of memorypool.c */
583