1 /* Copyright (c) 2008-2021, The Tor Project, Inc. */
2 /* See LICENSE for licensing information */
3 
4 /**
5  * \file memarea.c
6  *
7  * \brief Implementation for memarea_t, an allocator for allocating lots of
8  * small objects that will be freed all at once.
9  */
10 
11 #include "orconfig.h"
12 #include "lib/memarea/memarea.h"
13 
14 #include <stdlib.h>
15 #include <string.h>
16 
17 #include "lib/arch/bytes.h"
18 #include "lib/cc/torint.h"
19 #include "lib/smartlist_core/smartlist_core.h"
20 #include "lib/smartlist_core/smartlist_foreach.h"
21 #include "lib/log/log.h"
22 #include "lib/log/util_bug.h"
23 #include "lib/malloc/malloc.h"
24 
25 #ifndef DISABLE_MEMORY_SENTINELS
26 
27 /** If true, we try to detect any attempts to write beyond the length of a
28  * memarea. */
29 #define USE_SENTINELS
30 
31 /** All returned pointers should be aligned to the nearest multiple of this
32  * value. */
33 #define MEMAREA_ALIGN SIZEOF_VOID_P
34 
35 /** A value which, when masked out of a pointer, produces a maximally aligned
36  * pointer. */
37 #if MEMAREA_ALIGN == 4
38 #define MEMAREA_ALIGN_MASK ((uintptr_t)3)
39 #elif MEMAREA_ALIGN == 8
40 #define MEMAREA_ALIGN_MASK ((uintptr_t)7)
41 #else
42 #error "void* is neither 4 nor 8 bytes long."
43 #endif /* MEMAREA_ALIGN == 4 || ... */
44 
45 #if defined(__GNUC__) && defined(FLEXIBLE_ARRAY_MEMBER)
46 #define USE_ALIGNED_ATTRIBUTE
47 /** Name for the 'memory' member of a memory chunk. */
48 #define U_MEM mem
49 #else
50 #define U_MEM u.mem
51 #endif /* defined(__GNUC__) && defined(FLEXIBLE_ARRAY_MEMBER) */
52 
53 #ifdef USE_SENTINELS
54 /** Magic value that we stick at the end of a memarea so we can make sure
55  * there are no run-off-the-end bugs. */
56 #define SENTINEL_VAL 0x90806622u
57 /** How many bytes per area do we devote to the sentinel? */
58 #define SENTINEL_LEN sizeof(uint32_t)
59 /** Given a mem_area_chunk_t with SENTINEL_LEN extra bytes allocated at the
60  * end, set those bytes. */
61 #define SET_SENTINEL(chunk)                                     \
62   STMT_BEGIN                                                    \
63   set_uint32( &(chunk)->U_MEM[chunk->mem_size], SENTINEL_VAL ); \
64   STMT_END
65 /** Assert that the sentinel on a memarea is set correctly. */
66 #define CHECK_SENTINEL(chunk)                                           \
67   STMT_BEGIN                                                            \
68   uint32_t sent_val = get_uint32(&(chunk)->U_MEM[chunk->mem_size]);     \
69   tor_assert(sent_val == SENTINEL_VAL);                                 \
70   STMT_END
71 #else /* !defined(USE_SENTINELS) */
72 #define SENTINEL_LEN 0
73 #define SET_SENTINEL(chunk) STMT_NIL
74 #define CHECK_SENTINEL(chunk) STMT_NIL
75 #endif /* defined(USE_SENTINELS) */
76 
77 /** Increment <b>ptr</b> until it is aligned to MEMAREA_ALIGN. */
78 static inline void *
realign_pointer(void * ptr)79 realign_pointer(void *ptr)
80 {
81   uintptr_t x = (uintptr_t)ptr;
82   x = (x+MEMAREA_ALIGN_MASK) & ~MEMAREA_ALIGN_MASK;
83   /* Reinstate this if bug 930 ever reappears
84   tor_assert(((void*)x) >= ptr);
85   */
86   return (void*)x;
87 }
88 
89 /** Implements part of a memarea.  New memory is carved off from chunk->mem in
90  * increasing order until a request is too big, at which point a new chunk is
91  * allocated. */
92 typedef struct memarea_chunk_t {
93   /** Next chunk in this area. Only kept around so we can free it. */
94   struct memarea_chunk_t *next_chunk;
95   size_t mem_size; /**< How much RAM is available in mem, total? */
96   char *next_mem; /**< Next position in mem to allocate data at.  If it's
97                    * equal to mem+mem_size, this chunk is full. */
98 #ifdef USE_ALIGNED_ATTRIBUTE
99   /** Actual content of the memory chunk. */
100   char mem[FLEXIBLE_ARRAY_MEMBER] __attribute__((aligned(MEMAREA_ALIGN)));
101 #else
102   union {
103     char mem[1]; /**< Memory space in this chunk.  */
104     void *void_for_alignment_; /**< Dummy; used to make sure mem is aligned. */
105   } u; /**< Union used to enforce alignment when we don't have support for
106         * doing it right. */
107 #endif /* defined(USE_ALIGNED_ATTRIBUTE) */
108 } memarea_chunk_t;
109 
110 /** How many bytes are needed for overhead before we get to the memory part
111  * of a chunk? */
112 #define CHUNK_HEADER_SIZE offsetof(memarea_chunk_t, U_MEM)
113 
114 /** What's the smallest that we'll allocate a chunk? */
115 #define CHUNK_SIZE 4096
116 
117 /** A memarea_t is an allocation region for a set of small memory requests
118  * that will all be freed at once. */
119 struct memarea_t {
120   memarea_chunk_t *first; /**< Top of the chunk stack: never NULL. */
121 };
122 
123 /** Helper: allocate a new memarea chunk of around <b>chunk_size</b> bytes. */
124 static memarea_chunk_t *
alloc_chunk(size_t sz)125 alloc_chunk(size_t sz)
126 {
127   tor_assert(sz < SIZE_T_CEILING);
128 
129   size_t chunk_size = sz < CHUNK_SIZE ? CHUNK_SIZE : sz;
130   memarea_chunk_t *res;
131   chunk_size += SENTINEL_LEN;
132   res = tor_malloc(chunk_size);
133   res->next_chunk = NULL;
134   res->mem_size = chunk_size - CHUNK_HEADER_SIZE - SENTINEL_LEN;
135   res->next_mem = res->U_MEM;
136   tor_assert(res->next_mem+res->mem_size+SENTINEL_LEN ==
137              ((char*)res)+chunk_size);
138   tor_assert(realign_pointer(res->next_mem) == res->next_mem);
139   SET_SENTINEL(res);
140   return res;
141 }
142 
143 /** Release <b>chunk</b> from a memarea. */
144 static void
memarea_chunk_free_unchecked(memarea_chunk_t * chunk)145 memarea_chunk_free_unchecked(memarea_chunk_t *chunk)
146 {
147   CHECK_SENTINEL(chunk);
148   tor_free(chunk);
149 }
150 
151 /** Allocate and return new memarea. */
152 memarea_t *
memarea_new(void)153 memarea_new(void)
154 {
155   memarea_t *head = tor_malloc(sizeof(memarea_t));
156   head->first = alloc_chunk(CHUNK_SIZE);
157   return head;
158 }
159 
160 /** Free <b>area</b>, invalidating all pointers returned from memarea_alloc()
161  * and friends for this area */
162 void
memarea_drop_all_(memarea_t * area)163 memarea_drop_all_(memarea_t *area)
164 {
165   memarea_chunk_t *chunk, *next;
166   for (chunk = area->first; chunk; chunk = next) {
167     next = chunk->next_chunk;
168     memarea_chunk_free_unchecked(chunk);
169   }
170   area->first = NULL; /*fail fast on */
171   tor_free(area);
172 }
173 
174 /** Forget about having allocated anything in <b>area</b>, and free some of
175  * the backing storage associated with it, as appropriate. Invalidates all
176  * pointers returned from memarea_alloc() for this area. */
177 void
memarea_clear(memarea_t * area)178 memarea_clear(memarea_t *area)
179 {
180   memarea_chunk_t *chunk, *next;
181   if (area->first->next_chunk) {
182     for (chunk = area->first->next_chunk; chunk; chunk = next) {
183       next = chunk->next_chunk;
184       memarea_chunk_free_unchecked(chunk);
185     }
186     area->first->next_chunk = NULL;
187   }
188   area->first->next_mem = area->first->U_MEM;
189 }
190 
191 /** Return true iff <b>p</b> is in a range that has been returned by an
192  * allocation from <b>area</b>. */
193 int
memarea_owns_ptr(const memarea_t * area,const void * p)194 memarea_owns_ptr(const memarea_t *area, const void *p)
195 {
196   memarea_chunk_t *chunk;
197   const char *ptr = p;
198   for (chunk = area->first; chunk; chunk = chunk->next_chunk) {
199     if (ptr >= chunk->U_MEM && ptr < chunk->next_mem)
200       return 1;
201   }
202   return 0;
203 }
204 
205 /** Return a pointer to a chunk of memory in <b>area</b> of at least <b>sz</b>
206  * bytes.  <b>sz</b> should be significantly smaller than the area's chunk
207  * size, though we can deal if it isn't. */
208 void *
memarea_alloc(memarea_t * area,size_t sz)209 memarea_alloc(memarea_t *area, size_t sz)
210 {
211   memarea_chunk_t *chunk = area->first;
212   char *result;
213   tor_assert(chunk);
214   CHECK_SENTINEL(chunk);
215   tor_assert(sz < SIZE_T_CEILING);
216   if (sz == 0)
217     sz = 1;
218   tor_assert(chunk->next_mem <= chunk->U_MEM + chunk->mem_size);
219   const size_t space_remaining =
220     (chunk->U_MEM + chunk->mem_size) - chunk->next_mem;
221   if (sz > space_remaining) {
222     if (sz+CHUNK_HEADER_SIZE >= CHUNK_SIZE) {
223       /* This allocation is too big.  Stick it in a special chunk, and put
224        * that chunk second in the list. */
225       memarea_chunk_t *new_chunk = alloc_chunk(sz+CHUNK_HEADER_SIZE);
226       new_chunk->next_chunk = chunk->next_chunk;
227       chunk->next_chunk = new_chunk;
228       chunk = new_chunk;
229     } else {
230       memarea_chunk_t *new_chunk = alloc_chunk(CHUNK_SIZE);
231       new_chunk->next_chunk = chunk;
232       area->first = chunk = new_chunk;
233     }
234     tor_assert(chunk->mem_size >= sz);
235   }
236   result = chunk->next_mem;
237   chunk->next_mem = chunk->next_mem + sz;
238   /* Reinstate these if bug 930 ever comes back
239   tor_assert(chunk->next_mem >= chunk->U_MEM);
240   tor_assert(chunk->next_mem <= chunk->U_MEM+chunk->mem_size);
241   */
242   chunk->next_mem = realign_pointer(chunk->next_mem);
243   return result;
244 }
245 
246 /** As memarea_alloc(), but clears the memory it returns. */
247 void *
memarea_alloc_zero(memarea_t * area,size_t sz)248 memarea_alloc_zero(memarea_t *area, size_t sz)
249 {
250   void *result = memarea_alloc(area, sz);
251   memset(result, 0, sz);
252   return result;
253 }
254 
255 /** As memdup, but returns the memory from <b>area</b>. */
256 void *
memarea_memdup(memarea_t * area,const void * s,size_t n)257 memarea_memdup(memarea_t *area, const void *s, size_t n)
258 {
259   char *result = memarea_alloc(area, n);
260   memcpy(result, s, n);
261   return result;
262 }
263 
264 /** As strdup, but returns the memory from <b>area</b>. */
265 char *
memarea_strdup(memarea_t * area,const char * s)266 memarea_strdup(memarea_t *area, const char *s)
267 {
268   return memarea_memdup(area, s, strlen(s)+1);
269 }
270 
271 /** As strndup, but returns the memory from <b>area</b>. */
272 char *
memarea_strndup(memarea_t * area,const char * s,size_t n)273 memarea_strndup(memarea_t *area, const char *s, size_t n)
274 {
275   size_t ln = 0;
276   char *result;
277   tor_assert(n < SIZE_T_CEILING);
278   for (ln = 0; ln < n && s[ln]; ++ln)
279     ;
280   result = memarea_alloc(area, ln+1);
281   memcpy(result, s, ln);
282   result[ln]='\0';
283   return result;
284 }
285 
286 /** Set <b>allocated_out</b> to the number of bytes allocated in <b>area</b>,
287  * and <b>used_out</b> to the number of bytes currently used. */
288 void
memarea_get_stats(memarea_t * area,size_t * allocated_out,size_t * used_out)289 memarea_get_stats(memarea_t *area, size_t *allocated_out, size_t *used_out)
290 {
291   size_t a = 0, u = 0;
292   memarea_chunk_t *chunk;
293   for (chunk = area->first; chunk; chunk = chunk->next_chunk) {
294     CHECK_SENTINEL(chunk);
295     a += CHUNK_HEADER_SIZE + chunk->mem_size;
296     tor_assert(chunk->next_mem >= chunk->U_MEM);
297     u += CHUNK_HEADER_SIZE + (chunk->next_mem - chunk->U_MEM);
298   }
299   *allocated_out = a;
300   *used_out = u;
301 }
302 
303 /** Assert that <b>area</b> is okay. */
304 void
memarea_assert_ok(memarea_t * area)305 memarea_assert_ok(memarea_t *area)
306 {
307   memarea_chunk_t *chunk;
308   tor_assert(area->first);
309 
310   for (chunk = area->first; chunk; chunk = chunk->next_chunk) {
311     CHECK_SENTINEL(chunk);
312     tor_assert(chunk->next_mem >= chunk->U_MEM);
313     tor_assert(chunk->next_mem <=
314           (char*) realign_pointer(chunk->U_MEM+chunk->mem_size));
315   }
316 }
317 
318 #else /* defined(DISABLE_MEMORY_SENTINELS) */
319 
320 struct memarea_t {
321   smartlist_t *pieces;
322 };
323 
324 memarea_t *
memarea_new(void)325 memarea_new(void)
326 {
327   memarea_t *ma = tor_malloc_zero(sizeof(memarea_t));
328   ma->pieces = smartlist_new();
329   return ma;
330 }
331 void
memarea_drop_all_(memarea_t * area)332 memarea_drop_all_(memarea_t *area)
333 {
334   memarea_clear(area);
335   smartlist_free(area->pieces);
336   tor_free(area);
337 }
338 void
memarea_clear(memarea_t * area)339 memarea_clear(memarea_t *area)
340 {
341   SMARTLIST_FOREACH(area->pieces, void *, p, tor_free_(p));
342   smartlist_clear(area->pieces);
343 }
344 int
memarea_owns_ptr(const memarea_t * area,const void * ptr)345 memarea_owns_ptr(const memarea_t *area, const void *ptr)
346 {
347   SMARTLIST_FOREACH(area->pieces, const void *, p, if (ptr == p) return 1;);
348   return 0;
349 }
350 
351 void *
memarea_alloc(memarea_t * area,size_t sz)352 memarea_alloc(memarea_t *area, size_t sz)
353 {
354   void *result = tor_malloc(sz);
355   smartlist_add(area->pieces, result);
356   return result;
357 }
358 
359 void *
memarea_alloc_zero(memarea_t * area,size_t sz)360 memarea_alloc_zero(memarea_t *area, size_t sz)
361 {
362   void *result = tor_malloc_zero(sz);
363   smartlist_add(area->pieces, result);
364   return result;
365 }
366 void *
memarea_memdup(memarea_t * area,const void * s,size_t n)367 memarea_memdup(memarea_t *area, const void *s, size_t n)
368 {
369   void *r = memarea_alloc(area, n);
370   memcpy(r, s, n);
371   return r;
372 }
373 char *
memarea_strdup(memarea_t * area,const char * s)374 memarea_strdup(memarea_t *area, const char *s)
375 {
376   size_t n = strlen(s);
377   char *r = memarea_alloc(area, n+1);
378   memcpy(r, s, n);
379   r[n] = 0;
380   return r;
381 }
382 char *
memarea_strndup(memarea_t * area,const char * s,size_t n)383 memarea_strndup(memarea_t *area, const char *s, size_t n)
384 {
385   size_t ln = strnlen(s, n);
386   char *r = memarea_alloc(area, ln+1);
387   memcpy(r, s, ln);
388   r[ln] = 0;
389   return r;
390 }
391 void
memarea_get_stats(memarea_t * area,size_t * allocated_out,size_t * used_out)392 memarea_get_stats(memarea_t *area,
393                   size_t *allocated_out, size_t *used_out)
394 {
395   (void)area;
396   *allocated_out = *used_out = 128;
397 }
398 void
memarea_assert_ok(memarea_t * area)399 memarea_assert_ok(memarea_t *area)
400 {
401   (void)area;
402 }
403 
404 #endif /* !defined(DISABLE_MEMORY_SENTINELS) */
405