1 /*
2 
3    american fuzzy lop++ - dislocator, an abusive allocator
4    -----------------------------------------------------
5 
6    Originally written by Michal Zalewski
7 
8    Copyright 2016 Google Inc. All rights reserved.
9    Copyright 2019-2020 AFLplusplus Project. All rights reserved.
10 
11    Licensed under the Apache License, Version 2.0 (the "License");
12    you may not use this file except in compliance with the License.
13    You may obtain a copy of the License at:
14 
15      http://www.apache.org/licenses/LICENSE-2.0
16 
17    This is a companion library that can be used as a drop-in replacement
18    for the libc allocator in the fuzzed binaries. See README.dislocator.md for
19    more info.
20 
21  */
22 
23 #define _GNU_SOURCE
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <stddef.h>
27 #include <string.h>
28 #include <limits.h>
29 #include <errno.h>
30 #include <sys/mman.h>
31 
32 #ifdef __APPLE__
33   #include <mach/vm_statistics.h>
34 #endif
35 
36 #ifdef __FreeBSD__
37   #include <sys/param.h>
38 #endif
39 
40 #if (defined(__linux__) && !defined(__ANDROID__)) || defined(__HAIKU__)
41   #include <unistd.h>
42   #ifdef __linux__
43     #include <sys/syscall.h>
44     #include <malloc.h>
45   #endif
46   #ifdef __NR_getrandom
47     #define arc4random_buf(p, l)                       \
48       do {                                             \
49                                                        \
50         ssize_t rd = syscall(__NR_getrandom, p, l, 0); \
51         if (rd != l) DEBUGF("getrandom failed");       \
52                                                        \
53       } while (0)
54 
55   #else
56     #include <time.h>
57     #define arc4random_buf(p, l)     \
58       do {                           \
59                                      \
60         srand(time(NULL));           \
61         u32 i;                       \
62         u8 *ptr = (u8 *)p;           \
63         for (i = 0; i < l; i++)      \
64           ptr[i] = rand() % INT_MAX; \
65                                      \
66       } while (0)
67 
68   #endif
69 #endif
70 
71 #include "config.h"
72 #include "types.h"
73 
74 #if __STDC_VERSION__ < 201112L || \
75     (defined(__FreeBSD__) && __FreeBSD_version < 1200000)
76 // use this hack if not C11
77 typedef struct {
78 
79   long long   __ll;
80   long double __ld;
81 
82 } max_align_t;
83 
84 #endif
85 
86 #define ALLOC_ALIGN_SIZE (_Alignof(max_align_t))
87 
88 #ifndef PAGE_SIZE
89   #define PAGE_SIZE 4096
90 #endif                                                        /* !PAGE_SIZE */
91 
92 #ifndef MAP_ANONYMOUS
93   #define MAP_ANONYMOUS MAP_ANON
94 #endif                                                    /* !MAP_ANONYMOUS */
95 
96 #define SUPER_PAGE_SIZE 1 << 21
97 
98 /* Error / message handling: */
99 
100 #define DEBUGF(_x...)                 \
101   do {                                \
102                                       \
103     if (alloc_verbose) {              \
104                                       \
105       if (++call_depth == 1) {        \
106                                       \
107         fprintf(stderr, "[AFL] " _x); \
108         fprintf(stderr, "\n");        \
109                                       \
110       }                               \
111       call_depth--;                   \
112                                       \
113     }                                 \
114                                       \
115   } while (0)
116 
117 #define FATAL(_x...)                    \
118   do {                                  \
119                                         \
120     if (++call_depth == 1) {            \
121                                         \
122       fprintf(stderr, "*** [AFL] " _x); \
123       fprintf(stderr, " ***\n");        \
124       abort();                          \
125                                         \
126     }                                   \
127     call_depth--;                       \
128                                         \
129   } while (0)
130 
131 /* Macro to count the number of pages needed to store a buffer: */
132 
133 #define PG_COUNT(_l) (((_l) + (PAGE_SIZE - 1)) / PAGE_SIZE)
134 
135 /* Canary & clobber bytes: */
136 
137 #define ALLOC_CANARY 0xAACCAACC
138 #define ALLOC_CLOBBER 0xCC
139 
140 #define TAIL_ALLOC_CANARY 0xAC
141 
142 #define PTR_C(_p) (((u32 *)(_p))[-1])
143 #define PTR_L(_p) (((u32 *)(_p))[-2])
144 
145 /* Configurable stuff (use AFL_LD_* to set): */
146 
147 static size_t max_mem = MAX_ALLOC;      /* Max heap usage to permit         */
148 static u8     alloc_verbose,            /* Additional debug messages        */
149     hard_fail,                          /* abort() when max_mem exceeded?   */
150     no_calloc_over,                     /* abort() on calloc() overflows?   */
151     align_allocations;                  /* Force alignment to sizeof(void*) */
152 
153 #if defined __OpenBSD__ || defined __APPLE__
154   #define __thread
155   #warning no thread support available
156 #endif
157 static _Atomic size_t total_mem;        /* Currently allocated mem          */
158 
159 static __thread u32 call_depth;         /* To avoid recursion via fprintf() */
160 static u32          alloc_canary;
161 
162 /* This is the main alloc function. It allocates one page more than necessary,
163    sets that tailing page to PROT_NONE, and then increments the return address
164    so that it is right-aligned to that boundary. Since it always uses mmap(),
165    the returned memory will be zeroed. */
166 
__dislocator_alloc(size_t len)167 static void *__dislocator_alloc(size_t len) {
168 
169   u8 *   ret, *base;
170   size_t tlen;
171   int    flags, protflags, fd, sp;
172 
173   if (total_mem + len > max_mem || total_mem + len < total_mem) {
174 
175     if (hard_fail) FATAL("total allocs exceed %zu MB", max_mem / 1024 / 1024);
176 
177     DEBUGF("total allocs exceed %zu MB, returning NULL", max_mem / 1024 / 1024);
178 
179     return NULL;
180 
181   }
182 
183   size_t rlen;
184   if (align_allocations && (len & (ALLOC_ALIGN_SIZE - 1)))
185     rlen = (len & ~(ALLOC_ALIGN_SIZE - 1)) + ALLOC_ALIGN_SIZE;
186   else
187     rlen = len;
188 
189   /* We will also store buffer length and a canary below the actual buffer, so
190      let's add 8 bytes for that. */
191 
192   base = NULL;
193   tlen = (1 + PG_COUNT(rlen + 8)) * PAGE_SIZE;
194   protflags = PROT_READ | PROT_WRITE;
195   flags = MAP_PRIVATE | MAP_ANONYMOUS;
196   fd = -1;
197 #if defined(PROT_MAX)
198   // apply when sysctl vm.imply_prot_max is set to 1
199   // no-op otherwise
200   protflags |= PROT_MAX(PROT_READ | PROT_WRITE);
201 #endif
202 #if defined(USEHUGEPAGE)
203   sp = (rlen >= SUPER_PAGE_SIZE && !(rlen % SUPER_PAGE_SIZE));
204 
205   #if defined(__APPLE__)
206   if (sp) fd = VM_FLAGS_SUPERPAGE_SIZE_2MB;
207   #elif defined(__linux__)
208   if (sp) flags |= MAP_HUGETLB;
209   #elif defined(__FreeBSD__)
210   if (sp) flags |= MAP_ALIGNED_SUPER;
211   #elif defined(__sun)
212   if (sp) {
213 
214     base = (void *)(caddr_t)(1 << 21);
215     flags |= MAP_ALIGN;
216 
217   }
218 
219   #endif
220 #else
221   (void)sp;
222 #endif
223 
224   ret = (u8 *)mmap(base, tlen, protflags, flags, fd, 0);
225 #if defined(USEHUGEPAGE)
226   /* We try one more time with regular call */
227   if (ret == MAP_FAILED) {
228 
229   #if defined(__APPLE__)
230     fd = -1;
231   #elif defined(__linux__)
232     flags &= -MAP_HUGETLB;
233   #elif defined(__FreeBSD__)
234     flags &= -MAP_ALIGNED_SUPER;
235   #elif defined(__sun)
236     flags &= -MAP_ALIGN;
237   #endif
238     ret = (u8 *)mmap(NULL, tlen, protflags, flags, fd, 0);
239 
240   }
241 
242 #endif
243 
244   if (ret == MAP_FAILED) {
245 
246     if (hard_fail) FATAL("mmap() failed on alloc (OOM?)");
247 
248     DEBUGF("mmap() failed on alloc (OOM?)");
249 
250     return NULL;
251 
252   }
253 
254   /* Set PROT_NONE on the last page. */
255 
256   if (mprotect(ret + PG_COUNT(rlen + 8) * PAGE_SIZE, PAGE_SIZE, PROT_NONE))
257     FATAL("mprotect() failed when allocating memory");
258 
259   /* Offset the return pointer so that it's right-aligned to the page
260      boundary. */
261 
262   ret += PAGE_SIZE * PG_COUNT(rlen + 8) - rlen - 8;
263 
264   /* Store allocation metadata. */
265 
266   ret += 8;
267 
268   PTR_L(ret) = len;
269   PTR_C(ret) = alloc_canary;
270 
271   total_mem += len;
272 
273   if (rlen != len) {
274 
275     size_t i;
276     for (i = len; i < rlen; ++i)
277       ret[i] = TAIL_ALLOC_CANARY;
278 
279   }
280 
281   return ret;
282 
283 }
284 
285 /* The "user-facing" wrapper for calloc(). This just checks for overflows and
286    displays debug messages if requested. */
287 
calloc(size_t elem_len,size_t elem_cnt)288 void *calloc(size_t elem_len, size_t elem_cnt) {
289 
290   void *ret;
291 
292   size_t len = elem_len * elem_cnt;
293 
294   /* Perform some sanity checks to detect obvious issues... */
295 
296   if (elem_cnt && len / elem_cnt != elem_len) {
297 
298     if (no_calloc_over) {
299 
300       DEBUGF("calloc(%zu, %zu) would overflow, returning NULL", elem_len,
301              elem_cnt);
302       return NULL;
303 
304     }
305 
306     FATAL("calloc(%zu, %zu) would overflow", elem_len, elem_cnt);
307 
308   }
309 
310   ret = __dislocator_alloc(len);
311 
312   DEBUGF("calloc(%zu, %zu) = %p [%zu total]", elem_len, elem_cnt, ret,
313          total_mem);
314 
315   return ret;
316 
317 }
318 
319 /* The wrapper for malloc(). Roughly the same, also clobbers the returned
320    memory (unlike calloc(), malloc() is not guaranteed to return zeroed
321    memory). */
322 
malloc(size_t len)323 void *malloc(size_t len) {
324 
325   void *ret;
326 
327   ret = __dislocator_alloc(len);
328 
329   DEBUGF("malloc(%zu) = %p [%zu total]", len, ret, total_mem);
330 
331   if (ret && len) memset(ret, ALLOC_CLOBBER, len);
332 
333   return ret;
334 
335 }
336 
337 /* The wrapper for free(). This simply marks the entire region as PROT_NONE.
338    If the region is already freed, the code will segfault during the attempt to
339    read the canary. Not very graceful, but works, right? */
340 
free(void * ptr)341 void free(void *ptr) {
342 
343   u32 len;
344 
345   DEBUGF("free(%p)", ptr);
346 
347   if (!ptr) return;
348 
349   if (PTR_C(ptr) != alloc_canary) FATAL("bad allocator canary on free()");
350 
351   len = PTR_L(ptr);
352 
353   total_mem -= len;
354   u8 *ptr_ = ptr;
355 
356   if (align_allocations && (len & (ALLOC_ALIGN_SIZE - 1))) {
357 
358     size_t rlen = (len & ~(ALLOC_ALIGN_SIZE - 1)) + ALLOC_ALIGN_SIZE;
359     for (; len < rlen; ++len)
360       if (ptr_[len] != TAIL_ALLOC_CANARY)
361         FATAL("bad tail allocator canary on free()");
362 
363   }
364 
365   /* Protect everything. Note that the extra page at the end is already
366      set as PROT_NONE, so we don't need to touch that. */
367 
368   ptr_ -= PAGE_SIZE * PG_COUNT(len + 8) - len - 8;
369 
370   if (mprotect(ptr_ - 8, PG_COUNT(len + 8) * PAGE_SIZE, PROT_NONE))
371     FATAL("mprotect() failed when freeing memory");
372 
373   ptr = ptr_;
374 
375   /* Keep the mapping; this is wasteful, but prevents ptr reuse. */
376 
377 }
378 
379 /* Realloc is pretty straightforward, too. We forcibly reallocate the buffer,
380    move data, and then free (aka mprotect()) the original one. */
381 
realloc(void * ptr,size_t len)382 void *realloc(void *ptr, size_t len) {
383 
384   void *ret;
385 
386   ret = malloc(len);
387 
388   if (ret && ptr) {
389 
390     if (PTR_C(ptr) != alloc_canary) FATAL("bad allocator canary on realloc()");
391     // Here the tail canary check is delayed to free()
392 
393     memcpy(ret, ptr, MIN(len, PTR_L(ptr)));
394     free(ptr);
395 
396   }
397 
398   DEBUGF("realloc(%p, %zu) = %p [%zu total]", ptr, len, ret, total_mem);
399 
400   return ret;
401 
402 }
403 
404 /* posix_memalign we mainly check the proper alignment argument
405    if the requested size fits within the alignment we do
406    a normal request */
407 
posix_memalign(void ** ptr,size_t align,size_t len)408 int posix_memalign(void **ptr, size_t align, size_t len) {
409 
410   // if (*ptr == NULL) return EINVAL; // (andrea) Why? I comment it out for now
411   if ((align % 2) || (align % sizeof(void *))) return EINVAL;
412   if (len == 0) {
413 
414     *ptr = NULL;
415     return 0;
416 
417   }
418 
419   size_t rem = len % align;
420   if (rem) len += align - rem;
421 
422   *ptr = __dislocator_alloc(len);
423 
424   if (*ptr && len) memset(*ptr, ALLOC_CLOBBER, len);
425 
426   DEBUGF("posix_memalign(%p %zu, %zu) [*ptr = %p]", ptr, align, len, *ptr);
427 
428   return 0;
429 
430 }
431 
432 /* just the non-posix fashion */
433 
memalign(size_t align,size_t len)434 void *memalign(size_t align, size_t len) {
435 
436   void *ret = NULL;
437 
438   if (posix_memalign(&ret, align, len)) {
439 
440     DEBUGF("memalign(%zu, %zu) failed", align, len);
441 
442   }
443 
444   return ret;
445 
446 }
447 
448 /* sort of C11 alias of memalign only more severe, alignment-wise */
449 
aligned_alloc(size_t align,size_t len)450 void *aligned_alloc(size_t align, size_t len) {
451 
452   void *ret = NULL;
453 
454   if ((len % align)) return NULL;
455 
456   if (posix_memalign(&ret, align, len)) {
457 
458     DEBUGF("aligned_alloc(%zu, %zu) failed", align, len);
459 
460   }
461 
462   return ret;
463 
464 }
465 
466 /* specific BSD api mainly checking possible overflow for the size */
467 
reallocarray(void * ptr,size_t elem_len,size_t elem_cnt)468 void *reallocarray(void *ptr, size_t elem_len, size_t elem_cnt) {
469 
470   const size_t elem_lim = 1UL << (sizeof(size_t) * 4);
471   const size_t elem_tot = elem_len * elem_cnt;
472   void *       ret = NULL;
473 
474   if ((elem_len >= elem_lim || elem_cnt >= elem_lim) && elem_len > 0 &&
475       elem_cnt > (SIZE_MAX / elem_len)) {
476 
477     DEBUGF("reallocarray size overflow (%zu)", elem_tot);
478 
479   } else {
480 
481     ret = realloc(ptr, elem_tot);
482 
483   }
484 
485   return ret;
486 
487 }
488 
489 #if !defined(__ANDROID__)
malloc_usable_size(void * ptr)490 size_t malloc_usable_size(void *ptr) {
491 
492 #else
493 size_t malloc_usable_size(const void *ptr) {
494 
495 #endif
496 
497   return ptr ? PTR_L(ptr) : 0;
498 
499 }
500 
501 __attribute__((constructor)) void __dislocator_init(void) {
502 
503   char *tmp = getenv("AFL_LD_LIMIT_MB");
504 
505   if (tmp) {
506 
507     char *             tok;
508     unsigned long long mmem = strtoull(tmp, &tok, 10);
509     if (*tok != '\0' || errno == ERANGE || mmem > SIZE_MAX / 1024 / 1024)
510       FATAL("Bad value for AFL_LD_LIMIT_MB");
511     max_mem = mmem * 1024 * 1024;
512 
513   }
514 
515   alloc_canary = ALLOC_CANARY;
516   tmp = getenv("AFL_RANDOM_ALLOC_CANARY");
517 
518   if (tmp) arc4random_buf(&alloc_canary, sizeof(alloc_canary));
519 
520   alloc_verbose = !!getenv("AFL_LD_VERBOSE");
521   hard_fail = !!getenv("AFL_LD_HARD_FAIL");
522   no_calloc_over = !!getenv("AFL_LD_NO_CALLOC_OVER");
523   align_allocations = !!getenv("AFL_ALIGNED_ALLOC");
524 
525 }
526 
527 /* NetBSD fault handler specific api subset */
528 
529 void (*esetfunc(void (*fn)(int, const char *, ...)))(int, const char *, ...) {
530 
531   /* Might not be meaningful to implement; upper calls already report errors */
532   return NULL;
533 
534 }
535 
536 void *emalloc(size_t len) {
537 
538   return malloc(len);
539 
540 }
541 
542 void *ecalloc(size_t elem_len, size_t elem_cnt) {
543 
544   return calloc(elem_len, elem_cnt);
545 
546 }
547 
548 void *erealloc(void *ptr, size_t len) {
549 
550   return realloc(ptr, len);
551 
552 }
553 
554