1 /*
2    american fuzzy lop - fuzzer code
3    --------------------------------
4 
5    Written and maintained by Michal Zalewski <lcamtuf@google.com>
6 
7    Forkserver design by Jann Horn <jannhorn@googlemail.com>
8 
9    Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved.
10 
11    Licensed under the Apache License, Version 2.0 (the "License");
12    you may not use this file except in compliance with the License.
13    You may obtain a copy of the License at:
14 
15      http://www.apache.org/licenses/LICENSE-2.0
16 
17    This is the real deal: the program takes an instrumented binary and
18    attempts a variety of basic fuzzing tricks, paying close attention to
19    how they affect the execution path.
20 
21  */
22 
23 #define AFL_MAIN
24 #define MESSAGES_TO_STDOUT
25 
26 #define _GNU_SOURCE
27 #define _FILE_OFFSET_BITS 64
28 
29 #include "config.h"
30 #include "types.h"
31 #include "debug.h"
32 #include "alloc-inl.h"
33 #include "hash.h"
34 
35 #include <stdio.h>
36 #include <unistd.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <time.h>
40 #include <errno.h>
41 #include <signal.h>
42 #include <dirent.h>
43 #include <ctype.h>
44 #include <fcntl.h>
45 #include <termios.h>
46 #include <dlfcn.h>
47 #include <sched.h>
48 
49 #include <sys/wait.h>
50 #include <sys/time.h>
51 #include <sys/shm.h>
52 #include <sys/stat.h>
53 #include <sys/types.h>
54 #include <sys/resource.h>
55 #include <sys/mman.h>
56 #include <sys/ioctl.h>
57 #include <sys/file.h>
58 
59 #if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__)
60 #  include <sys/sysctl.h>
61 #endif /* __APPLE__ || __FreeBSD__ || __OpenBSD__ */
62 
63 /* For systems that have sched_setaffinity; right now just Linux, but one
64    can hope... */
65 
66 #ifdef __linux__
67 #  define HAVE_AFFINITY 1
68 #endif /* __linux__ */
69 
70 /* A toggle to export some variables when building as a library. Not very
71    useful for the general public. */
72 
73 #ifdef AFL_LIB
74 #  define EXP_ST
75 #else
76 #  define EXP_ST static
77 #endif /* ^AFL_LIB */
78 
79 /* Lots of globals, but mostly for the status UI and other things where it
80    really makes no sense to haul them around as function parameters. */
81 
82 
83 EXP_ST u8 *in_dir,                    /* Input directory with test cases  */
84           *out_file,                  /* File to fuzz, if any             */
85           *out_dir,                   /* Working & output directory       */
86           *sync_dir,                  /* Synchronization directory        */
87           *sync_id,                   /* Fuzzer ID                        */
88           *use_banner,                /* Display banner                   */
89           *in_bitmap,                 /* Input bitmap                     */
90           *doc_path,                  /* Path to documentation dir        */
91           *target_path,               /* Path to target binary            */
92           *orig_cmdline;              /* Original command line            */
93 
94 EXP_ST u32 exec_tmout = EXEC_TIMEOUT; /* Configurable exec timeout (ms)   */
95 static u32 hang_tmout = EXEC_TIMEOUT; /* Timeout used for hang det (ms)   */
96 
97 EXP_ST u64 mem_limit  = MEM_LIMIT;    /* Memory cap for child (MB)        */
98 
99 static u32 stats_update_freq = 1;     /* Stats update frequency (execs)   */
100 
101 EXP_ST u8  skip_deterministic,        /* Skip deterministic stages?       */
102            force_deterministic,       /* Force deterministic stages?      */
103            use_splicing,              /* Recombine input files?           */
104            dumb_mode,                 /* Run in non-instrumented mode?    */
105            score_changed,             /* Scoring for favorites changed?   */
106            kill_signal,               /* Signal that killed the child     */
107            resuming_fuzz,             /* Resuming an older fuzzing job?   */
108            timeout_given,             /* Specific timeout given?          */
109            not_on_tty,                /* stdout is not a tty              */
110            term_too_small,            /* terminal dimensions too small    */
111            uses_asan,                 /* Target uses ASAN?                */
112            no_forkserver,             /* Disable forkserver?              */
113            crash_mode,                /* Crash mode! Yeah!                */
114            in_place_resume,           /* Attempt in-place resume?         */
115            auto_changed,              /* Auto-generated tokens changed?   */
116            no_cpu_meter_red,          /* Feng shui on the status screen   */
117            no_arith,                  /* Skip most arithmetic ops         */
118            shuffle_queue,             /* Shuffle input queue?             */
119            bitmap_changed = 1,        /* Time to update bitmap?           */
120            qemu_mode,                 /* Running in QEMU mode?            */
121            skip_requested,            /* Skip request, via SIGUSR1        */
122            run_over10m,               /* Run time over 10 minutes?        */
123            persistent_mode,           /* Running in persistent mode?      */
124            deferred_mode,             /* Deferred forkserver mode?        */
125            fast_cal;                  /* Try to calibrate faster?         */
126 
127 static s32 out_fd,                    /* Persistent fd for out_file       */
128            dev_urandom_fd = -1,       /* Persistent fd for /dev/urandom   */
129            dev_null_fd = -1,          /* Persistent fd for /dev/null      */
130            fsrv_ctl_fd,               /* Fork server control pipe (write) */
131            fsrv_st_fd;                /* Fork server status pipe (read)   */
132 
133 static s32 forksrv_pid,               /* PID of the fork server           */
134            child_pid = -1,            /* PID of the fuzzed program        */
135            out_dir_fd = -1;           /* FD of the lock file              */
136 
137 EXP_ST u8* trace_bits;                /* SHM with instrumentation bitmap  */
138 
139 EXP_ST u8  virgin_bits[MAP_SIZE],     /* Regions yet untouched by fuzzing */
140            virgin_tmout[MAP_SIZE],    /* Bits we haven't seen in tmouts   */
141            virgin_crash[MAP_SIZE];    /* Bits we haven't seen in crashes  */
142 
143 static u8  var_bytes[MAP_SIZE];       /* Bytes that appear to be variable */
144 
145 static s32 shm_id;                    /* ID of the SHM region             */
146 
147 static volatile u8 stop_soon,         /* Ctrl-C pressed?                  */
148                    clear_screen = 1,  /* Window resized?                  */
149                    child_timed_out;   /* Traced process timed out?        */
150 
151 EXP_ST u32 queued_paths,              /* Total number of queued testcases */
152            queued_variable,           /* Testcases with variable behavior */
153            queued_at_start,           /* Total number of initial inputs   */
154            queued_discovered,         /* Items discovered during this run */
155            queued_imported,           /* Items imported via -S            */
156            queued_favored,            /* Paths deemed favorable           */
157            queued_with_cov,           /* Paths with new coverage bytes    */
158            pending_not_fuzzed,        /* Queued but not done yet          */
159            pending_favored,           /* Pending favored paths            */
160            cur_skipped_paths,         /* Abandoned inputs in cur cycle    */
161            cur_depth,                 /* Current path depth               */
162            max_depth,                 /* Max path depth                   */
163            useless_at_start,          /* Number of useless starting paths */
164            var_byte_count,            /* Bitmap bytes with var behavior   */
165            current_entry,             /* Current queue entry ID           */
166            havoc_div = 1;             /* Cycle count divisor for havoc    */
167 
168 EXP_ST u64 total_crashes,             /* Total number of crashes          */
169            unique_crashes,            /* Crashes with unique signatures   */
170            total_tmouts,              /* Total number of timeouts         */
171            unique_tmouts,             /* Timeouts with unique signatures  */
172            unique_hangs,              /* Hangs with unique signatures     */
173            total_execs,               /* Total execve() calls             */
174            start_time,                /* Unix start time (ms)             */
175            last_path_time,            /* Time for most recent path (ms)   */
176            last_crash_time,           /* Time for most recent crash (ms)  */
177            last_hang_time,            /* Time for most recent hang (ms)   */
178            last_crash_execs,          /* Exec counter at last crash       */
179            queue_cycle,               /* Queue round counter              */
180            cycles_wo_finds,           /* Cycles without any new paths     */
181            trim_execs,                /* Execs done to trim input files   */
182            bytes_trim_in,             /* Bytes coming into the trimmer    */
183            bytes_trim_out,            /* Bytes coming outa the trimmer    */
184            blocks_eff_total,          /* Blocks subject to effector maps  */
185            blocks_eff_select;         /* Blocks selected as fuzzable      */
186 
187 static u32 subseq_tmouts;             /* Number of timeouts in a row      */
188 
189 static u8 *stage_name = "init",       /* Name of the current fuzz stage   */
190           *stage_short,               /* Short stage name                 */
191           *syncing_party;             /* Currently syncing with...        */
192 
193 static s32 stage_cur, stage_max;      /* Stage progression                */
194 static s32 splicing_with = -1;        /* Splicing with which test case?   */
195 
196 static u32 master_id, master_max;     /* Master instance job splitting    */
197 
198 static u32 syncing_case;              /* Syncing with case #...           */
199 
200 static s32 stage_cur_byte,            /* Byte offset of current stage op  */
201            stage_cur_val;             /* Value used for stage op          */
202 
203 static u8  stage_val_type;            /* Value type (STAGE_VAL_*)         */
204 
205 static u64 stage_finds[32],           /* Patterns found per fuzz stage    */
206            stage_cycles[32];          /* Execs per fuzz stage             */
207 
208 static u32 rand_cnt;                  /* Random number counter            */
209 
210 static u64 total_cal_us,              /* Total calibration time (us)      */
211            total_cal_cycles;          /* Total calibration cycles         */
212 
213 static u64 total_bitmap_size,         /* Total bit count for all bitmaps  */
214            total_bitmap_entries;      /* Number of bitmaps counted        */
215 
216 static s32 cpu_core_count;            /* CPU core count                   */
217 
218 #ifdef HAVE_AFFINITY
219 
220 static s32 cpu_aff = -1;       	      /* Selected CPU core                */
221 
222 #endif /* HAVE_AFFINITY */
223 
224 static FILE* plot_file;               /* Gnuplot output file              */
225 
226 struct queue_entry {
227 
228   u8* fname;                          /* File name for the test case      */
229   u32 len;                            /* Input length                     */
230 
231   u8  cal_failed,                     /* Calibration failed?              */
232       trim_done,                      /* Trimmed?                         */
233       was_fuzzed,                     /* Had any fuzzing done yet?        */
234       passed_det,                     /* Deterministic stages passed?     */
235       has_new_cov,                    /* Triggers new coverage?           */
236       var_behavior,                   /* Variable behavior?               */
237       favored,                        /* Currently favored?               */
238       fs_redundant;                   /* Marked as redundant in the fs?   */
239 
240   u32 bitmap_size,                    /* Number of bits set in bitmap     */
241       exec_cksum;                     /* Checksum of the execution trace  */
242 
243   u64 exec_us,                        /* Execution time (us)              */
244       handicap,                       /* Number of queue cycles behind    */
245       depth;                          /* Path depth                       */
246 
247   u8* trace_mini;                     /* Trace bytes, if kept             */
248   u32 tc_ref;                         /* Trace bytes ref count            */
249 
250   struct queue_entry *next,           /* Next element, if any             */
251                      *next_100;       /* 100 elements ahead               */
252 
253 };
254 
255 static struct queue_entry *queue,     /* Fuzzing queue (linked list)      */
256                           *queue_cur, /* Current offset within the queue  */
257                           *queue_top, /* Top of the list                  */
258                           *q_prev100; /* Previous 100 marker              */
259 
260 static struct queue_entry*
261   top_rated[MAP_SIZE];                /* Top entries for bitmap bytes     */
262 
263 struct extra_data {
264   u8* data;                           /* Dictionary token data            */
265   u32 len;                            /* Dictionary token length          */
266   u32 hit_cnt;                        /* Use count in the corpus          */
267 };
268 
269 static struct extra_data* extras;     /* Extra tokens to fuzz with        */
270 static u32 extras_cnt;                /* Total number of tokens read      */
271 
272 static struct extra_data* a_extras;   /* Automatically selected extras    */
273 static u32 a_extras_cnt;              /* Total number of tokens available */
274 
275 static u8* (*post_handler)(u8* buf, u32* len);
276 
277 /* Interesting values, as per config.h */
278 
279 static s8  interesting_8[]  = { INTERESTING_8 };
280 static s16 interesting_16[] = { INTERESTING_8, INTERESTING_16 };
281 static s32 interesting_32[] = { INTERESTING_8, INTERESTING_16, INTERESTING_32 };
282 
283 /* Fuzzing stages */
284 
285 enum {
286   /* 00 */ STAGE_FLIP1,
287   /* 01 */ STAGE_FLIP2,
288   /* 02 */ STAGE_FLIP4,
289   /* 03 */ STAGE_FLIP8,
290   /* 04 */ STAGE_FLIP16,
291   /* 05 */ STAGE_FLIP32,
292   /* 06 */ STAGE_ARITH8,
293   /* 07 */ STAGE_ARITH16,
294   /* 08 */ STAGE_ARITH32,
295   /* 09 */ STAGE_INTEREST8,
296   /* 10 */ STAGE_INTEREST16,
297   /* 11 */ STAGE_INTEREST32,
298   /* 12 */ STAGE_EXTRAS_UO,
299   /* 13 */ STAGE_EXTRAS_UI,
300   /* 14 */ STAGE_EXTRAS_AO,
301   /* 15 */ STAGE_HAVOC,
302   /* 16 */ STAGE_SPLICE
303 };
304 
305 /* Stage value types */
306 
307 enum {
308   /* 00 */ STAGE_VAL_NONE,
309   /* 01 */ STAGE_VAL_LE,
310   /* 02 */ STAGE_VAL_BE
311 };
312 
313 /* Execution status fault codes */
314 
315 enum {
316   /* 00 */ FAULT_NONE,
317   /* 01 */ FAULT_TMOUT,
318   /* 02 */ FAULT_CRASH,
319   /* 03 */ FAULT_ERROR,
320   /* 04 */ FAULT_NOINST,
321   /* 05 */ FAULT_NOBITS
322 };
323 
324 
325 /* Get unix time in milliseconds */
326 
get_cur_time(void)327 static u64 get_cur_time(void) {
328 
329   struct timeval tv;
330   struct timezone tz;
331 
332   gettimeofday(&tv, &tz);
333 
334   return (tv.tv_sec * 1000ULL) + (tv.tv_usec / 1000);
335 
336 }
337 
338 
339 /* Get unix time in microseconds */
340 
get_cur_time_us(void)341 static u64 get_cur_time_us(void) {
342 
343   struct timeval tv;
344   struct timezone tz;
345 
346   gettimeofday(&tv, &tz);
347 
348   return (tv.tv_sec * 1000000ULL) + tv.tv_usec;
349 
350 }
351 
352 
353 /* Generate a random number (from 0 to limit - 1). This may
354    have slight bias. */
355 
UR(u32 limit)356 static inline u32 UR(u32 limit) {
357 
358   if (unlikely(!rand_cnt--)) {
359 
360     u32 seed[2];
361 
362     ck_read(dev_urandom_fd, &seed, sizeof(seed), "/dev/urandom");
363 
364     srandom(seed[0]);
365     rand_cnt = (RESEED_RNG / 2) + (seed[1] % RESEED_RNG);
366 
367   }
368 
369   return random() % limit;
370 
371 }
372 
373 
374 /* Shuffle an array of pointers. Might be slightly biased. */
375 
shuffle_ptrs(void ** ptrs,u32 cnt)376 static void shuffle_ptrs(void** ptrs, u32 cnt) {
377 
378   u32 i;
379 
380   for (i = 0; i < cnt - 2; i++) {
381 
382     u32 j = i + UR(cnt - i);
383     void *s = ptrs[i];
384     ptrs[i] = ptrs[j];
385     ptrs[j] = s;
386 
387   }
388 
389 }
390 
391 
392 #ifdef HAVE_AFFINITY
393 
394 /* Build a list of processes bound to specific cores. Returns -1 if nothing
395    can be found. Assumes an upper bound of 4k CPUs. */
396 
bind_to_free_cpu(void)397 static void bind_to_free_cpu(void) {
398 
399   DIR* d;
400   struct dirent* de;
401   cpu_set_t c;
402 
403   u8 cpu_used[4096] = { 0 };
404   u32 i;
405 
406   if (cpu_core_count < 2) return;
407 
408   if (getenv("AFL_NO_AFFINITY")) {
409 
410     WARNF("Not binding to a CPU core (AFL_NO_AFFINITY set).");
411     return;
412 
413   }
414 
415   d = opendir("/proc");
416 
417   if (!d) {
418 
419     WARNF("Unable to access /proc - can't scan for free CPU cores.");
420     return;
421 
422   }
423 
424   ACTF("Checking CPU core loadout...");
425 
426   /* Introduce some jitter, in case multiple AFL tasks are doing the same
427      thing at the same time... */
428 
429   usleep(R(1000) * 250);
430 
431   /* Scan all /proc/<pid>/status entries, checking for Cpus_allowed_list.
432      Flag all processes bound to a specific CPU using cpu_used[]. This will
433      fail for some exotic binding setups, but is likely good enough in almost
434      all real-world use cases. */
435 
436   while ((de = readdir(d))) {
437 
438     u8* fn;
439     FILE* f;
440     u8 tmp[MAX_LINE];
441     u8 has_vmsize = 0;
442 
443     if (!isdigit(de->d_name[0])) continue;
444 
445     fn = alloc_printf("/proc/%s/status", de->d_name);
446 
447     if (!(f = fopen(fn, "r"))) {
448       ck_free(fn);
449       continue;
450     }
451 
452     while (fgets(tmp, MAX_LINE, f)) {
453 
454       u32 hval;
455 
456       /* Processes without VmSize are probably kernel tasks. */
457 
458       if (!strncmp(tmp, "VmSize:\t", 8)) has_vmsize = 1;
459 
460       if (!strncmp(tmp, "Cpus_allowed_list:\t", 19) &&
461           !strchr(tmp, '-') && !strchr(tmp, ',') &&
462           sscanf(tmp + 19, "%u", &hval) == 1 && hval < sizeof(cpu_used) &&
463           has_vmsize) {
464 
465         cpu_used[hval] = 1;
466         break;
467 
468       }
469 
470     }
471 
472     ck_free(fn);
473     fclose(f);
474 
475   }
476 
477   closedir(d);
478 
479   for (i = 0; i < cpu_core_count; i++) if (!cpu_used[i]) break;
480 
481   if (i == cpu_core_count) {
482 
483     SAYF("\n" cLRD "[-] " cRST
484          "Uh-oh, looks like all %u CPU cores on your system are allocated to\n"
485          "    other instances of afl-fuzz (or similar CPU-locked tasks). Starting\n"
486          "    another fuzzer on this machine is probably a bad plan, but if you are\n"
487          "    absolutely sure, you can set AFL_NO_AFFINITY and try again.\n",
488          cpu_core_count);
489 
490     FATAL("No more free CPU cores");
491 
492   }
493 
494   OKF("Found a free CPU core, binding to #%u.", i);
495 
496   cpu_aff = i;
497 
498   CPU_ZERO(&c);
499   CPU_SET(i, &c);
500 
501   if (sched_setaffinity(0, sizeof(c), &c))
502     PFATAL("sched_setaffinity failed");
503 
504 }
505 
506 #endif /* HAVE_AFFINITY */
507 
508 #ifndef IGNORE_FINDS
509 
510 /* Helper function to compare buffers; returns first and last differing offset. We
511    use this to find reasonable locations for splicing two files. */
512 
locate_diffs(u8 * ptr1,u8 * ptr2,u32 len,s32 * first,s32 * last)513 static void locate_diffs(u8* ptr1, u8* ptr2, u32 len, s32* first, s32* last) {
514 
515   s32 f_loc = -1;
516   s32 l_loc = -1;
517   u32 pos;
518 
519   for (pos = 0; pos < len; pos++) {
520 
521     if (*(ptr1++) != *(ptr2++)) {
522 
523       if (f_loc == -1) f_loc = pos;
524       l_loc = pos;
525 
526     }
527 
528   }
529 
530   *first = f_loc;
531   *last = l_loc;
532 
533   return;
534 
535 }
536 
537 #endif /* !IGNORE_FINDS */
538 
539 
540 /* Describe integer. Uses 12 cyclic static buffers for return values. The value
541    returned should be five characters or less for all the integers we reasonably
542    expect to see. */
543 
DI(u64 val)544 static u8* DI(u64 val) {
545 
546   static u8 tmp[12][16];
547   static u8 cur;
548 
549   cur = (cur + 1) % 12;
550 
551 #define CHK_FORMAT(_divisor, _limit_mult, _fmt, _cast) do { \
552     if (val < (_divisor) * (_limit_mult)) { \
553       sprintf(tmp[cur], _fmt, ((_cast)val) / (_divisor)); \
554       return tmp[cur]; \
555     } \
556   } while (0)
557 
558   /* 0-9999 */
559   CHK_FORMAT(1, 10000, "%llu", u64);
560 
561   /* 10.0k - 99.9k */
562   CHK_FORMAT(1000, 99.95, "%0.01fk", double);
563 
564   /* 100k - 999k */
565   CHK_FORMAT(1000, 1000, "%lluk", u64);
566 
567   /* 1.00M - 9.99M */
568   CHK_FORMAT(1000 * 1000, 9.995, "%0.02fM", double);
569 
570   /* 10.0M - 99.9M */
571   CHK_FORMAT(1000 * 1000, 99.95, "%0.01fM", double);
572 
573   /* 100M - 999M */
574   CHK_FORMAT(1000 * 1000, 1000, "%lluM", u64);
575 
576   /* 1.00G - 9.99G */
577   CHK_FORMAT(1000LL * 1000 * 1000, 9.995, "%0.02fG", double);
578 
579   /* 10.0G - 99.9G */
580   CHK_FORMAT(1000LL * 1000 * 1000, 99.95, "%0.01fG", double);
581 
582   /* 100G - 999G */
583   CHK_FORMAT(1000LL * 1000 * 1000, 1000, "%lluG", u64);
584 
585   /* 1.00T - 9.99G */
586   CHK_FORMAT(1000LL * 1000 * 1000 * 1000, 9.995, "%0.02fT", double);
587 
588   /* 10.0T - 99.9T */
589   CHK_FORMAT(1000LL * 1000 * 1000 * 1000, 99.95, "%0.01fT", double);
590 
591   /* 100T+ */
592   strcpy(tmp[cur], "infty");
593   return tmp[cur];
594 
595 }
596 
597 
598 /* Describe float. Similar to the above, except with a single
599    static buffer. */
600 
DF(double val)601 static u8* DF(double val) {
602 
603   static u8 tmp[16];
604 
605   if (val < 99.995) {
606     sprintf(tmp, "%0.02f", val);
607     return tmp;
608   }
609 
610   if (val < 999.95) {
611     sprintf(tmp, "%0.01f", val);
612     return tmp;
613   }
614 
615   return DI((u64)val);
616 
617 }
618 
619 
620 /* Describe integer as memory size. */
621 
DMS(u64 val)622 static u8* DMS(u64 val) {
623 
624   static u8 tmp[12][16];
625   static u8 cur;
626 
627   cur = (cur + 1) % 12;
628 
629   /* 0-9999 */
630   CHK_FORMAT(1, 10000, "%llu B", u64);
631 
632   /* 10.0k - 99.9k */
633   CHK_FORMAT(1024, 99.95, "%0.01f kB", double);
634 
635   /* 100k - 999k */
636   CHK_FORMAT(1024, 1000, "%llu kB", u64);
637 
638   /* 1.00M - 9.99M */
639   CHK_FORMAT(1024 * 1024, 9.995, "%0.02f MB", double);
640 
641   /* 10.0M - 99.9M */
642   CHK_FORMAT(1024 * 1024, 99.95, "%0.01f MB", double);
643 
644   /* 100M - 999M */
645   CHK_FORMAT(1024 * 1024, 1000, "%llu MB", u64);
646 
647   /* 1.00G - 9.99G */
648   CHK_FORMAT(1024LL * 1024 * 1024, 9.995, "%0.02f GB", double);
649 
650   /* 10.0G - 99.9G */
651   CHK_FORMAT(1024LL * 1024 * 1024, 99.95, "%0.01f GB", double);
652 
653   /* 100G - 999G */
654   CHK_FORMAT(1024LL * 1024 * 1024, 1000, "%llu GB", u64);
655 
656   /* 1.00T - 9.99G */
657   CHK_FORMAT(1024LL * 1024 * 1024 * 1024, 9.995, "%0.02f TB", double);
658 
659   /* 10.0T - 99.9T */
660   CHK_FORMAT(1024LL * 1024 * 1024 * 1024, 99.95, "%0.01f TB", double);
661 
662 #undef CHK_FORMAT
663 
664   /* 100T+ */
665   strcpy(tmp[cur], "infty");
666   return tmp[cur];
667 
668 }
669 
670 
671 /* Describe time delta. Returns one static buffer, 34 chars of less. */
672 
DTD(u64 cur_ms,u64 event_ms)673 static u8* DTD(u64 cur_ms, u64 event_ms) {
674 
675   static u8 tmp[64];
676   u64 delta;
677   s32 t_d, t_h, t_m, t_s;
678 
679   if (!event_ms) return "none seen yet";
680 
681   delta = cur_ms - event_ms;
682 
683   t_d = delta / 1000 / 60 / 60 / 24;
684   t_h = (delta / 1000 / 60 / 60) % 24;
685   t_m = (delta / 1000 / 60) % 60;
686   t_s = (delta / 1000) % 60;
687 
688   sprintf(tmp, "%s days, %u hrs, %u min, %u sec", DI(t_d), t_h, t_m, t_s);
689   return tmp;
690 
691 }
692 
693 
694 /* Mark deterministic checks as done for a particular queue entry. We use the
695    .state file to avoid repeating deterministic fuzzing when resuming aborted
696    scans. */
697 
mark_as_det_done(struct queue_entry * q)698 static void mark_as_det_done(struct queue_entry* q) {
699 
700   u8* fn = strrchr(q->fname, '/');
701   s32 fd;
702 
703   fn = alloc_printf("%s/queue/.state/deterministic_done/%s", out_dir, fn + 1);
704 
705   fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
706   if (fd < 0) PFATAL("Unable to create '%s'", fn);
707   close(fd);
708 
709   ck_free(fn);
710 
711   q->passed_det = 1;
712 
713 }
714 
715 
716 /* Mark as variable. Create symlinks if possible to make it easier to examine
717    the files. */
718 
mark_as_variable(struct queue_entry * q)719 static void mark_as_variable(struct queue_entry* q) {
720 
721   u8 *fn = strrchr(q->fname, '/') + 1, *ldest;
722 
723   ldest = alloc_printf("../../%s", fn);
724   fn = alloc_printf("%s/queue/.state/variable_behavior/%s", out_dir, fn);
725 
726   if (symlink(ldest, fn)) {
727 
728     s32 fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
729     if (fd < 0) PFATAL("Unable to create '%s'", fn);
730     close(fd);
731 
732   }
733 
734   ck_free(ldest);
735   ck_free(fn);
736 
737   q->var_behavior = 1;
738 
739 }
740 
741 
742 /* Mark / unmark as redundant (edge-only). This is not used for restoring state,
743    but may be useful for post-processing datasets. */
744 
mark_as_redundant(struct queue_entry * q,u8 state)745 static void mark_as_redundant(struct queue_entry* q, u8 state) {
746 
747   u8* fn;
748   s32 fd;
749 
750   if (state == q->fs_redundant) return;
751 
752   q->fs_redundant = state;
753 
754   fn = strrchr(q->fname, '/');
755   fn = alloc_printf("%s/queue/.state/redundant_edges/%s", out_dir, fn + 1);
756 
757   if (state) {
758 
759     fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
760     if (fd < 0) PFATAL("Unable to create '%s'", fn);
761     close(fd);
762 
763   } else {
764 
765     if (unlink(fn)) PFATAL("Unable to remove '%s'", fn);
766 
767   }
768 
769   ck_free(fn);
770 
771 }
772 
773 
774 /* Append new test case to the queue. */
775 
add_to_queue(u8 * fname,u32 len,u8 passed_det)776 static void add_to_queue(u8* fname, u32 len, u8 passed_det) {
777 
778   struct queue_entry* q = ck_alloc(sizeof(struct queue_entry));
779 
780   q->fname        = fname;
781   q->len          = len;
782   q->depth        = cur_depth + 1;
783   q->passed_det   = passed_det;
784 
785   if (q->depth > max_depth) max_depth = q->depth;
786 
787   if (queue_top) {
788 
789     queue_top->next = q;
790     queue_top = q;
791 
792   } else q_prev100 = queue = queue_top = q;
793 
794   queued_paths++;
795   pending_not_fuzzed++;
796 
797   cycles_wo_finds = 0;
798 
799   if (!(queued_paths % 100)) {
800 
801     q_prev100->next_100 = q;
802     q_prev100 = q;
803 
804   }
805 
806   last_path_time = get_cur_time();
807 
808 }
809 
810 
811 /* Destroy the entire queue. */
812 
destroy_queue(void)813 EXP_ST void destroy_queue(void) {
814 
815   struct queue_entry *q = queue, *n;
816 
817   while (q) {
818 
819     n = q->next;
820     ck_free(q->fname);
821     ck_free(q->trace_mini);
822     ck_free(q);
823     q = n;
824 
825   }
826 
827 }
828 
829 
830 /* Write bitmap to file. The bitmap is useful mostly for the secret
831    -B option, to focus a separate fuzzing session on a particular
832    interesting input without rediscovering all the others. */
833 
write_bitmap(void)834 EXP_ST void write_bitmap(void) {
835 
836   u8* fname;
837   s32 fd;
838 
839   if (!bitmap_changed) return;
840   bitmap_changed = 0;
841 
842   fname = alloc_printf("%s/fuzz_bitmap", out_dir);
843   fd = open(fname, O_WRONLY | O_CREAT | O_TRUNC, 0600);
844 
845   if (fd < 0) PFATAL("Unable to open '%s'", fname);
846 
847   ck_write(fd, virgin_bits, MAP_SIZE, fname);
848 
849   close(fd);
850   ck_free(fname);
851 
852 }
853 
854 
855 /* Read bitmap from file. This is for the -B option again. */
856 
read_bitmap(u8 * fname)857 EXP_ST void read_bitmap(u8* fname) {
858 
859   s32 fd = open(fname, O_RDONLY);
860 
861   if (fd < 0) PFATAL("Unable to open '%s'", fname);
862 
863   ck_read(fd, virgin_bits, MAP_SIZE, fname);
864 
865   close(fd);
866 
867 }
868 
869 
870 /* Check if the current execution path brings anything new to the table.
871    Update virgin bits to reflect the finds. Returns 1 if the only change is
872    the hit-count for a particular tuple; 2 if there are new tuples seen.
873    Updates the map, so subsequent calls will always return 0.
874 
875    This function is called after every exec() on a fairly large buffer, so
876    it needs to be fast. We do this in 32-bit and 64-bit flavors. */
877 
has_new_bits(u8 * virgin_map)878 static inline u8 has_new_bits(u8* virgin_map) {
879 
880 #ifdef __x86_64__
881 
882   u64* current = (u64*)trace_bits;
883   u64* virgin  = (u64*)virgin_map;
884 
885   u32  i = (MAP_SIZE >> 3);
886 
887 #else
888 
889   u32* current = (u32*)trace_bits;
890   u32* virgin  = (u32*)virgin_map;
891 
892   u32  i = (MAP_SIZE >> 2);
893 
894 #endif /* ^__x86_64__ */
895 
896   u8   ret = 0;
897 
898   while (i--) {
899 
900     /* Optimize for (*current & *virgin) == 0 - i.e., no bits in current bitmap
901        that have not been already cleared from the virgin map - since this will
902        almost always be the case. */
903 
904     if (unlikely(*current) && unlikely(*current & *virgin)) {
905 
906       if (likely(ret < 2)) {
907 
908         u8* cur = (u8*)current;
909         u8* vir = (u8*)virgin;
910 
911         /* Looks like we have not found any new bytes yet; see if any non-zero
912            bytes in current[] are pristine in virgin[]. */
913 
914 #ifdef __x86_64__
915 
916         if ((cur[0] && vir[0] == 0xff) || (cur[1] && vir[1] == 0xff) ||
917             (cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff) ||
918             (cur[4] && vir[4] == 0xff) || (cur[5] && vir[5] == 0xff) ||
919             (cur[6] && vir[6] == 0xff) || (cur[7] && vir[7] == 0xff)) ret = 2;
920         else ret = 1;
921 
922 #else
923 
924         if ((cur[0] && vir[0] == 0xff) || (cur[1] && vir[1] == 0xff) ||
925             (cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff)) ret = 2;
926         else ret = 1;
927 
928 #endif /* ^__x86_64__ */
929 
930       }
931 
932       *virgin &= ~*current;
933 
934     }
935 
936     current++;
937     virgin++;
938 
939   }
940 
941   if (ret && virgin_map == virgin_bits) bitmap_changed = 1;
942 
943   return ret;
944 
945 }
946 
947 
948 /* Count the number of bits set in the provided bitmap. Used for the status
949    screen several times every second, does not have to be fast. */
950 
count_bits(u8 * mem)951 static u32 count_bits(u8* mem) {
952 
953   u32* ptr = (u32*)mem;
954   u32  i   = (MAP_SIZE >> 2);
955   u32  ret = 0;
956 
957   while (i--) {
958 
959     u32 v = *(ptr++);
960 
961     /* This gets called on the inverse, virgin bitmap; optimize for sparse
962        data. */
963 
964     if (v == 0xffffffff) {
965       ret += 32;
966       continue;
967     }
968 
969     v -= ((v >> 1) & 0x55555555);
970     v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
971     ret += (((v + (v >> 4)) & 0xF0F0F0F) * 0x01010101) >> 24;
972 
973   }
974 
975   return ret;
976 
977 }
978 
979 
980 #define FF(_b)  (0xff << ((_b) << 3))
981 
982 /* Count the number of bytes set in the bitmap. Called fairly sporadically,
983    mostly to update the status screen or calibrate and examine confirmed
984    new paths. */
985 
count_bytes(u8 * mem)986 static u32 count_bytes(u8* mem) {
987 
988   u32* ptr = (u32*)mem;
989   u32  i   = (MAP_SIZE >> 2);
990   u32  ret = 0;
991 
992   while (i--) {
993 
994     u32 v = *(ptr++);
995 
996     if (!v) continue;
997     if (v & FF(0)) ret++;
998     if (v & FF(1)) ret++;
999     if (v & FF(2)) ret++;
1000     if (v & FF(3)) ret++;
1001 
1002   }
1003 
1004   return ret;
1005 
1006 }
1007 
1008 
1009 /* Count the number of non-255 bytes set in the bitmap. Used strictly for the
1010    status screen, several calls per second or so. */
1011 
count_non_255_bytes(u8 * mem)1012 static u32 count_non_255_bytes(u8* mem) {
1013 
1014   u32* ptr = (u32*)mem;
1015   u32  i   = (MAP_SIZE >> 2);
1016   u32  ret = 0;
1017 
1018   while (i--) {
1019 
1020     u32 v = *(ptr++);
1021 
1022     /* This is called on the virgin bitmap, so optimize for the most likely
1023        case. */
1024 
1025     if (v == 0xffffffff) continue;
1026     if ((v & FF(0)) != FF(0)) ret++;
1027     if ((v & FF(1)) != FF(1)) ret++;
1028     if ((v & FF(2)) != FF(2)) ret++;
1029     if ((v & FF(3)) != FF(3)) ret++;
1030 
1031   }
1032 
1033   return ret;
1034 
1035 }
1036 
1037 
1038 /* Destructively simplify trace by eliminating hit count information
1039    and replacing it with 0x80 or 0x01 depending on whether the tuple
1040    is hit or not. Called on every new crash or timeout, should be
1041    reasonably fast. */
1042 
1043 static const u8 simplify_lookup[256] = {
1044 
1045   [0]         = 1,
1046   [1 ... 255] = 128
1047 
1048 };
1049 
1050 #ifdef __x86_64__
1051 
simplify_trace(u64 * mem)1052 static void simplify_trace(u64* mem) {
1053 
1054   u32 i = MAP_SIZE >> 3;
1055 
1056   while (i--) {
1057 
1058     /* Optimize for sparse bitmaps. */
1059 
1060     if (unlikely(*mem)) {
1061 
1062       u8* mem8 = (u8*)mem;
1063 
1064       mem8[0] = simplify_lookup[mem8[0]];
1065       mem8[1] = simplify_lookup[mem8[1]];
1066       mem8[2] = simplify_lookup[mem8[2]];
1067       mem8[3] = simplify_lookup[mem8[3]];
1068       mem8[4] = simplify_lookup[mem8[4]];
1069       mem8[5] = simplify_lookup[mem8[5]];
1070       mem8[6] = simplify_lookup[mem8[6]];
1071       mem8[7] = simplify_lookup[mem8[7]];
1072 
1073     } else *mem = 0x0101010101010101ULL;
1074 
1075     mem++;
1076 
1077   }
1078 
1079 }
1080 
1081 #else
1082 
simplify_trace(u32 * mem)1083 static void simplify_trace(u32* mem) {
1084 
1085   u32 i = MAP_SIZE >> 2;
1086 
1087   while (i--) {
1088 
1089     /* Optimize for sparse bitmaps. */
1090 
1091     if (unlikely(*mem)) {
1092 
1093       u8* mem8 = (u8*)mem;
1094 
1095       mem8[0] = simplify_lookup[mem8[0]];
1096       mem8[1] = simplify_lookup[mem8[1]];
1097       mem8[2] = simplify_lookup[mem8[2]];
1098       mem8[3] = simplify_lookup[mem8[3]];
1099 
1100     } else *mem = 0x01010101;
1101 
1102     mem++;
1103   }
1104 
1105 }
1106 
1107 #endif /* ^__x86_64__ */
1108 
1109 
1110 /* Destructively classify execution counts in a trace. This is used as a
1111    preprocessing step for any newly acquired traces. Called on every exec,
1112    must be fast. */
1113 
1114 static const u8 count_class_lookup8[256] = {
1115 
1116   [0]           = 0,
1117   [1]           = 1,
1118   [2]           = 2,
1119   [3]           = 4,
1120   [4 ... 7]     = 8,
1121   [8 ... 15]    = 16,
1122   [16 ... 31]   = 32,
1123   [32 ... 127]  = 64,
1124   [128 ... 255] = 128
1125 
1126 };
1127 
1128 static u16 count_class_lookup16[65536];
1129 
1130 
init_count_class16(void)1131 EXP_ST void init_count_class16(void) {
1132 
1133   u32 b1, b2;
1134 
1135   for (b1 = 0; b1 < 256; b1++)
1136     for (b2 = 0; b2 < 256; b2++)
1137       count_class_lookup16[(b1 << 8) + b2] =
1138         (count_class_lookup8[b1] << 8) |
1139         count_class_lookup8[b2];
1140 
1141 }
1142 
1143 
1144 #ifdef __x86_64__
1145 
classify_counts(u64 * mem)1146 static inline void classify_counts(u64* mem) {
1147 
1148   u32 i = MAP_SIZE >> 3;
1149 
1150   while (i--) {
1151 
1152     /* Optimize for sparse bitmaps. */
1153 
1154     if (unlikely(*mem)) {
1155 
1156       u16* mem16 = (u16*)mem;
1157 
1158       mem16[0] = count_class_lookup16[mem16[0]];
1159       mem16[1] = count_class_lookup16[mem16[1]];
1160       mem16[2] = count_class_lookup16[mem16[2]];
1161       mem16[3] = count_class_lookup16[mem16[3]];
1162 
1163     }
1164 
1165     mem++;
1166 
1167   }
1168 
1169 }
1170 
1171 #else
1172 
classify_counts(u32 * mem)1173 static inline void classify_counts(u32* mem) {
1174 
1175   u32 i = MAP_SIZE >> 2;
1176 
1177   while (i--) {
1178 
1179     /* Optimize for sparse bitmaps. */
1180 
1181     if (unlikely(*mem)) {
1182 
1183       u16* mem16 = (u16*)mem;
1184 
1185       mem16[0] = count_class_lookup16[mem16[0]];
1186       mem16[1] = count_class_lookup16[mem16[1]];
1187 
1188     }
1189 
1190     mem++;
1191 
1192   }
1193 
1194 }
1195 
1196 #endif /* ^__x86_64__ */
1197 
1198 
1199 /* Get rid of shared memory (atexit handler). */
1200 
remove_shm(void)1201 static void remove_shm(void) {
1202 
1203   shmctl(shm_id, IPC_RMID, NULL);
1204 
1205 }
1206 
1207 
1208 /* Compact trace bytes into a smaller bitmap. We effectively just drop the
1209    count information here. This is called only sporadically, for some
1210    new paths. */
1211 
minimize_bits(u8 * dst,u8 * src)1212 static void minimize_bits(u8* dst, u8* src) {
1213 
1214   u32 i = 0;
1215 
1216   while (i < MAP_SIZE) {
1217 
1218     if (*(src++)) dst[i >> 3] |= 1 << (i & 7);
1219     i++;
1220 
1221   }
1222 
1223 }
1224 
1225 
1226 /* When we bump into a new path, we call this to see if the path appears
1227    more "favorable" than any of the existing ones. The purpose of the
1228    "favorables" is to have a minimal set of paths that trigger all the bits
1229    seen in the bitmap so far, and focus on fuzzing them at the expense of
1230    the rest.
1231 
1232    The first step of the process is to maintain a list of top_rated[] entries
1233    for every byte in the bitmap. We win that slot if there is no previous
1234    contender, or if the contender has a more favorable speed x size factor. */
1235 
update_bitmap_score(struct queue_entry * q)1236 static void update_bitmap_score(struct queue_entry* q) {
1237 
1238   u32 i;
1239   u64 fav_factor = q->exec_us * q->len;
1240 
1241   /* For every byte set in trace_bits[], see if there is a previous winner,
1242      and how it compares to us. */
1243 
1244   for (i = 0; i < MAP_SIZE; i++)
1245 
1246     if (trace_bits[i]) {
1247 
1248        if (top_rated[i]) {
1249 
1250          /* Faster-executing or smaller test cases are favored. */
1251 
1252          if (fav_factor > top_rated[i]->exec_us * top_rated[i]->len) continue;
1253 
1254          /* Looks like we're going to win. Decrease ref count for the
1255             previous winner, discard its trace_bits[] if necessary. */
1256 
1257          if (!--top_rated[i]->tc_ref) {
1258            ck_free(top_rated[i]->trace_mini);
1259            top_rated[i]->trace_mini = 0;
1260          }
1261 
1262        }
1263 
1264        /* Insert ourselves as the new winner. */
1265 
1266        top_rated[i] = q;
1267        q->tc_ref++;
1268 
1269        if (!q->trace_mini) {
1270          q->trace_mini = ck_alloc(MAP_SIZE >> 3);
1271          minimize_bits(q->trace_mini, trace_bits);
1272        }
1273 
1274        score_changed = 1;
1275 
1276      }
1277 
1278 }
1279 
1280 
1281 /* The second part of the mechanism discussed above is a routine that
1282    goes over top_rated[] entries, and then sequentially grabs winners for
1283    previously-unseen bytes (temp_v) and marks them as favored, at least
1284    until the next run. The favored entries are given more air time during
1285    all fuzzing steps. */
1286 
cull_queue(void)1287 static void cull_queue(void) {
1288 
1289   struct queue_entry* q;
1290   static u8 temp_v[MAP_SIZE >> 3];
1291   u32 i;
1292 
1293   if (dumb_mode || !score_changed) return;
1294 
1295   score_changed = 0;
1296 
1297   memset(temp_v, 255, MAP_SIZE >> 3);
1298 
1299   queued_favored  = 0;
1300   pending_favored = 0;
1301 
1302   q = queue;
1303 
1304   while (q) {
1305     q->favored = 0;
1306     q = q->next;
1307   }
1308 
1309   /* Let's see if anything in the bitmap isn't captured in temp_v.
1310      If yes, and if it has a top_rated[] contender, let's use it. */
1311 
1312   for (i = 0; i < MAP_SIZE; i++)
1313     if (top_rated[i] && (temp_v[i >> 3] & (1 << (i & 7)))) {
1314 
1315       u32 j = MAP_SIZE >> 3;
1316 
1317       /* Remove all bits belonging to the current entry from temp_v. */
1318 
1319       while (j--)
1320         if (top_rated[i]->trace_mini[j])
1321           temp_v[j] &= ~top_rated[i]->trace_mini[j];
1322 
1323       top_rated[i]->favored = 1;
1324       queued_favored++;
1325 
1326       if (!top_rated[i]->was_fuzzed) pending_favored++;
1327 
1328     }
1329 
1330   q = queue;
1331 
1332   while (q) {
1333     mark_as_redundant(q, !q->favored);
1334     q = q->next;
1335   }
1336 
1337 }
1338 
1339 
1340 /* Configure shared memory and virgin_bits. This is called at startup. */
1341 
setup_shm(void)1342 EXP_ST void setup_shm(void) {
1343 
1344   u8* shm_str;
1345 
1346   if (!in_bitmap) memset(virgin_bits, 255, MAP_SIZE);
1347 
1348   memset(virgin_tmout, 255, MAP_SIZE);
1349   memset(virgin_crash, 255, MAP_SIZE);
1350 
1351   shm_id = shmget(IPC_PRIVATE, MAP_SIZE, IPC_CREAT | IPC_EXCL | 0600);
1352 
1353   if (shm_id < 0) PFATAL("shmget() failed");
1354 
1355   atexit(remove_shm);
1356 
1357   shm_str = alloc_printf("%d", shm_id);
1358 
1359   /* If somebody is asking us to fuzz instrumented binaries in dumb mode,
1360      we don't want them to detect instrumentation, since we won't be sending
1361      fork server commands. This should be replaced with better auto-detection
1362      later on, perhaps? */
1363 
1364   if (!dumb_mode) setenv(SHM_ENV_VAR, shm_str, 1);
1365 
1366   ck_free(shm_str);
1367 
1368   trace_bits = shmat(shm_id, NULL, 0);
1369 
1370   if (!trace_bits) PFATAL("shmat() failed");
1371 
1372 }
1373 
1374 
1375 /* Load postprocessor, if available. */
1376 
setup_post(void)1377 static void setup_post(void) {
1378 
1379   void* dh;
1380   u8* fn = getenv("AFL_POST_LIBRARY");
1381   u32 tlen = 6;
1382 
1383   if (!fn) return;
1384 
1385   ACTF("Loading postprocessor from '%s'...", fn);
1386 
1387   dh = dlopen(fn, RTLD_NOW);
1388   if (!dh) FATAL("%s", dlerror());
1389 
1390   post_handler = dlsym(dh, "afl_postprocess");
1391   if (!post_handler) FATAL("Symbol 'afl_postprocess' not found.");
1392 
1393   /* Do a quick test. It's better to segfault now than later =) */
1394 
1395   post_handler("hello", &tlen);
1396 
1397   OKF("Postprocessor installed successfully.");
1398 
1399 }
1400 
1401 
1402 /* Read all testcases from the input directory, then queue them for testing.
1403    Called at startup. */
1404 
read_testcases(void)1405 static void read_testcases(void) {
1406 
1407   struct dirent **nl;
1408   s32 nl_cnt;
1409   u32 i;
1410   u8* fn;
1411 
1412   /* Auto-detect non-in-place resumption attempts. */
1413 
1414   fn = alloc_printf("%s/queue", in_dir);
1415   if (!access(fn, F_OK)) in_dir = fn; else ck_free(fn);
1416 
1417   ACTF("Scanning '%s'...", in_dir);
1418 
1419   /* We use scandir() + alphasort() rather than readdir() because otherwise,
1420      the ordering  of test cases would vary somewhat randomly and would be
1421      difficult to control. */
1422 
1423   nl_cnt = scandir(in_dir, &nl, NULL, alphasort);
1424 
1425   if (nl_cnt < 0) {
1426 
1427     if (errno == ENOENT || errno == ENOTDIR)
1428 
1429       SAYF("\n" cLRD "[-] " cRST
1430            "The input directory does not seem to be valid - try again. The fuzzer needs\n"
1431            "    one or more test case to start with - ideally, a small file under 1 kB\n"
1432            "    or so. The cases must be stored as regular files directly in the input\n"
1433            "    directory.\n");
1434 
1435     PFATAL("Unable to open '%s'", in_dir);
1436 
1437   }
1438 
1439   if (shuffle_queue && nl_cnt > 1) {
1440 
1441     ACTF("Shuffling queue...");
1442     shuffle_ptrs((void**)nl, nl_cnt);
1443 
1444   }
1445 
1446   for (i = 0; i < nl_cnt; i++) {
1447 
1448     struct stat st;
1449 
1450     u8* fn = alloc_printf("%s/%s", in_dir, nl[i]->d_name);
1451     u8* dfn = alloc_printf("%s/.state/deterministic_done/%s", in_dir, nl[i]->d_name);
1452 
1453     u8  passed_det = 0;
1454 
1455     free(nl[i]); /* not tracked */
1456 
1457     if (lstat(fn, &st) || access(fn, R_OK))
1458       PFATAL("Unable to access '%s'", fn);
1459 
1460     /* This also takes care of . and .. */
1461 
1462     if (!S_ISREG(st.st_mode) || !st.st_size || strstr(fn, "/README.txt")) {
1463 
1464       ck_free(fn);
1465       ck_free(dfn);
1466       continue;
1467 
1468     }
1469 
1470     if (st.st_size > MAX_FILE)
1471       FATAL("Test case '%s' is too big (%s, limit is %s)", fn,
1472             DMS(st.st_size), DMS(MAX_FILE));
1473 
1474     /* Check for metadata that indicates that deterministic fuzzing
1475        is complete for this entry. We don't want to repeat deterministic
1476        fuzzing when resuming aborted scans, because it would be pointless
1477        and probably very time-consuming. */
1478 
1479     if (!access(dfn, F_OK)) passed_det = 1;
1480     ck_free(dfn);
1481 
1482     add_to_queue(fn, st.st_size, passed_det);
1483 
1484   }
1485 
1486   free(nl); /* not tracked */
1487 
1488   if (!queued_paths) {
1489 
1490     SAYF("\n" cLRD "[-] " cRST
1491          "Looks like there are no valid test cases in the input directory! The fuzzer\n"
1492          "    needs one or more test case to start with - ideally, a small file under\n"
1493          "    1 kB or so. The cases must be stored as regular files directly in the\n"
1494          "    input directory.\n");
1495 
1496     FATAL("No usable test cases in '%s'", in_dir);
1497 
1498   }
1499 
1500   last_path_time = 0;
1501   queued_at_start = queued_paths;
1502 
1503 }
1504 
1505 
1506 /* Helper function for load_extras. */
1507 
compare_extras_len(const void * p1,const void * p2)1508 static int compare_extras_len(const void* p1, const void* p2) {
1509   struct extra_data *e1 = (struct extra_data*)p1,
1510                     *e2 = (struct extra_data*)p2;
1511 
1512   return e1->len - e2->len;
1513 }
1514 
compare_extras_use_d(const void * p1,const void * p2)1515 static int compare_extras_use_d(const void* p1, const void* p2) {
1516   struct extra_data *e1 = (struct extra_data*)p1,
1517                     *e2 = (struct extra_data*)p2;
1518 
1519   return e2->hit_cnt - e1->hit_cnt;
1520 }
1521 
1522 
1523 /* Read extras from a file, sort by size. */
1524 
load_extras_file(u8 * fname,u32 * min_len,u32 * max_len,u32 dict_level)1525 static void load_extras_file(u8* fname, u32* min_len, u32* max_len,
1526                              u32 dict_level) {
1527 
1528   FILE* f;
1529   u8  buf[MAX_LINE];
1530   u8  *lptr;
1531   u32 cur_line = 0;
1532 
1533   f = fopen(fname, "r");
1534 
1535   if (!f) PFATAL("Unable to open '%s'", fname);
1536 
1537   while ((lptr = fgets(buf, MAX_LINE, f))) {
1538 
1539     u8 *rptr, *wptr;
1540     u32 klen = 0;
1541 
1542     cur_line++;
1543 
1544     /* Trim on left and right. */
1545 
1546     while (isspace(*lptr)) lptr++;
1547 
1548     rptr = lptr + strlen(lptr) - 1;
1549     while (rptr >= lptr && isspace(*rptr)) rptr--;
1550     rptr++;
1551     *rptr = 0;
1552 
1553     /* Skip empty lines and comments. */
1554 
1555     if (!*lptr || *lptr == '#') continue;
1556 
1557     /* All other lines must end with '"', which we can consume. */
1558 
1559     rptr--;
1560 
1561     if (rptr < lptr || *rptr != '"')
1562       FATAL("Malformed name=\"value\" pair in line %u.", cur_line);
1563 
1564     *rptr = 0;
1565 
1566     /* Skip alphanumerics and dashes (label). */
1567 
1568     while (isalnum(*lptr) || *lptr == '_') lptr++;
1569 
1570     /* If @number follows, parse that. */
1571 
1572     if (*lptr == '@') {
1573 
1574       lptr++;
1575       if (atoi(lptr) > dict_level) continue;
1576       while (isdigit(*lptr)) lptr++;
1577 
1578     }
1579 
1580     /* Skip whitespace and = signs. */
1581 
1582     while (isspace(*lptr) || *lptr == '=') lptr++;
1583 
1584     /* Consume opening '"'. */
1585 
1586     if (*lptr != '"')
1587       FATAL("Malformed name=\"keyword\" pair in line %u.", cur_line);
1588 
1589     lptr++;
1590 
1591     if (!*lptr) FATAL("Empty keyword in line %u.", cur_line);
1592 
1593     /* Okay, let's allocate memory and copy data between "...", handling
1594        \xNN escaping, \\, and \". */
1595 
1596     extras = ck_realloc_block(extras, (extras_cnt + 1) *
1597                sizeof(struct extra_data));
1598 
1599     wptr = extras[extras_cnt].data = ck_alloc(rptr - lptr);
1600 
1601     while (*lptr) {
1602 
1603       char* hexdigits = "0123456789abcdef";
1604 
1605       switch (*lptr) {
1606 
1607         case 1 ... 31:
1608         case 128 ... 255:
1609           FATAL("Non-printable characters in line %u.", cur_line);
1610 
1611         case '\\':
1612 
1613           lptr++;
1614 
1615           if (*lptr == '\\' || *lptr == '"') {
1616             *(wptr++) = *(lptr++);
1617             klen++;
1618             break;
1619           }
1620 
1621           if (*lptr != 'x' || !isxdigit(lptr[1]) || !isxdigit(lptr[2]))
1622             FATAL("Invalid escaping (not \\xNN) in line %u.", cur_line);
1623 
1624           *(wptr++) =
1625             ((strchr(hexdigits, tolower(lptr[1])) - hexdigits) << 4) |
1626             (strchr(hexdigits, tolower(lptr[2])) - hexdigits);
1627 
1628           lptr += 3;
1629           klen++;
1630 
1631           break;
1632 
1633         default:
1634 
1635           *(wptr++) = *(lptr++);
1636           klen++;
1637 
1638       }
1639 
1640     }
1641 
1642     extras[extras_cnt].len = klen;
1643 
1644     if (extras[extras_cnt].len > MAX_DICT_FILE)
1645       FATAL("Keyword too big in line %u (%s, limit is %s)", cur_line,
1646             DMS(klen), DMS(MAX_DICT_FILE));
1647 
1648     if (*min_len > klen) *min_len = klen;
1649     if (*max_len < klen) *max_len = klen;
1650 
1651     extras_cnt++;
1652 
1653   }
1654 
1655   fclose(f);
1656 
1657 }
1658 
1659 
1660 /* Read extras from the extras directory and sort them by size. */
1661 
load_extras(u8 * dir)1662 static void load_extras(u8* dir) {
1663 
1664   DIR* d;
1665   struct dirent* de;
1666   u32 min_len = MAX_DICT_FILE, max_len = 0, dict_level = 0;
1667   u8* x;
1668 
1669   /* If the name ends with @, extract level and continue. */
1670 
1671   if ((x = strchr(dir, '@'))) {
1672 
1673     *x = 0;
1674     dict_level = atoi(x + 1);
1675 
1676   }
1677 
1678   ACTF("Loading extra dictionary from '%s' (level %u)...", dir, dict_level);
1679 
1680   d = opendir(dir);
1681 
1682   if (!d) {
1683 
1684     if (errno == ENOTDIR) {
1685       load_extras_file(dir, &min_len, &max_len, dict_level);
1686       goto check_and_sort;
1687     }
1688 
1689     PFATAL("Unable to open '%s'", dir);
1690 
1691   }
1692 
1693   if (x) FATAL("Dictionary levels not supported for directories.");
1694 
1695   while ((de = readdir(d))) {
1696 
1697     struct stat st;
1698     u8* fn = alloc_printf("%s/%s", dir, de->d_name);
1699     s32 fd;
1700 
1701     if (lstat(fn, &st) || access(fn, R_OK))
1702       PFATAL("Unable to access '%s'", fn);
1703 
1704     /* This also takes care of . and .. */
1705     if (!S_ISREG(st.st_mode) || !st.st_size) {
1706 
1707       ck_free(fn);
1708       continue;
1709 
1710     }
1711 
1712     if (st.st_size > MAX_DICT_FILE)
1713       FATAL("Extra '%s' is too big (%s, limit is %s)", fn,
1714             DMS(st.st_size), DMS(MAX_DICT_FILE));
1715 
1716     if (min_len > st.st_size) min_len = st.st_size;
1717     if (max_len < st.st_size) max_len = st.st_size;
1718 
1719     extras = ck_realloc_block(extras, (extras_cnt + 1) *
1720                sizeof(struct extra_data));
1721 
1722     extras[extras_cnt].data = ck_alloc(st.st_size);
1723     extras[extras_cnt].len  = st.st_size;
1724 
1725     fd = open(fn, O_RDONLY);
1726 
1727     if (fd < 0) PFATAL("Unable to open '%s'", fn);
1728 
1729     ck_read(fd, extras[extras_cnt].data, st.st_size, fn);
1730 
1731     close(fd);
1732     ck_free(fn);
1733 
1734     extras_cnt++;
1735 
1736   }
1737 
1738   closedir(d);
1739 
1740 check_and_sort:
1741 
1742   if (!extras_cnt) FATAL("No usable files in '%s'", dir);
1743 
1744   qsort(extras, extras_cnt, sizeof(struct extra_data), compare_extras_len);
1745 
1746   OKF("Loaded %u extra tokens, size range %s to %s.", extras_cnt,
1747       DMS(min_len), DMS(max_len));
1748 
1749   if (max_len > 32)
1750     WARNF("Some tokens are relatively large (%s) - consider trimming.",
1751           DMS(max_len));
1752 
1753   if (extras_cnt > MAX_DET_EXTRAS)
1754     WARNF("More than %u tokens - will use them probabilistically.",
1755           MAX_DET_EXTRAS);
1756 
1757 }
1758 
1759 
1760 
1761 
1762 /* Helper function for maybe_add_auto() */
1763 
memcmp_nocase(u8 * m1,u8 * m2,u32 len)1764 static inline u8 memcmp_nocase(u8* m1, u8* m2, u32 len) {
1765 
1766   while (len--) if (tolower(*(m1++)) ^ tolower(*(m2++))) return 1;
1767   return 0;
1768 
1769 }
1770 
1771 
1772 /* Maybe add automatic extra. */
1773 
maybe_add_auto(u8 * mem,u32 len)1774 static void maybe_add_auto(u8* mem, u32 len) {
1775 
1776   u32 i;
1777 
1778   /* Allow users to specify that they don't want auto dictionaries. */
1779 
1780   if (!MAX_AUTO_EXTRAS || !USE_AUTO_EXTRAS) return;
1781 
1782   /* Skip runs of identical bytes. */
1783 
1784   for (i = 1; i < len; i++)
1785     if (mem[0] ^ mem[i]) break;
1786 
1787   if (i == len) return;
1788 
1789   /* Reject builtin interesting values. */
1790 
1791   if (len == 2) {
1792 
1793     i = sizeof(interesting_16) >> 1;
1794 
1795     while (i--)
1796       if (*((u16*)mem) == interesting_16[i] ||
1797           *((u16*)mem) == SWAP16(interesting_16[i])) return;
1798 
1799   }
1800 
1801   if (len == 4) {
1802 
1803     i = sizeof(interesting_32) >> 2;
1804 
1805     while (i--)
1806       if (*((u32*)mem) == interesting_32[i] ||
1807           *((u32*)mem) == SWAP32(interesting_32[i])) return;
1808 
1809   }
1810 
1811   /* Reject anything that matches existing extras. Do a case-insensitive
1812      match. We optimize by exploiting the fact that extras[] are sorted
1813      by size. */
1814 
1815   for (i = 0; i < extras_cnt; i++)
1816     if (extras[i].len >= len) break;
1817 
1818   for (; i < extras_cnt && extras[i].len == len; i++)
1819     if (!memcmp_nocase(extras[i].data, mem, len)) return;
1820 
1821   /* Last but not least, check a_extras[] for matches. There are no
1822      guarantees of a particular sort order. */
1823 
1824   auto_changed = 1;
1825 
1826   for (i = 0; i < a_extras_cnt; i++) {
1827 
1828     if (a_extras[i].len == len && !memcmp_nocase(a_extras[i].data, mem, len)) {
1829 
1830       a_extras[i].hit_cnt++;
1831       goto sort_a_extras;
1832 
1833     }
1834 
1835   }
1836 
1837   /* At this point, looks like we're dealing with a new entry. So, let's
1838      append it if we have room. Otherwise, let's randomly evict some other
1839      entry from the bottom half of the list. */
1840 
1841   if (a_extras_cnt < MAX_AUTO_EXTRAS) {
1842 
1843     a_extras = ck_realloc_block(a_extras, (a_extras_cnt + 1) *
1844                                 sizeof(struct extra_data));
1845 
1846     a_extras[a_extras_cnt].data = ck_memdup(mem, len);
1847     a_extras[a_extras_cnt].len  = len;
1848     a_extras_cnt++;
1849 
1850   } else {
1851 
1852     i = MAX_AUTO_EXTRAS / 2 +
1853         UR((MAX_AUTO_EXTRAS + 1) / 2);
1854 
1855     ck_free(a_extras[i].data);
1856 
1857     a_extras[i].data    = ck_memdup(mem, len);
1858     a_extras[i].len     = len;
1859     a_extras[i].hit_cnt = 0;
1860 
1861   }
1862 
1863 sort_a_extras:
1864 
1865   /* First, sort all auto extras by use count, descending order. */
1866 
1867   qsort(a_extras, a_extras_cnt, sizeof(struct extra_data),
1868         compare_extras_use_d);
1869 
1870   /* Then, sort the top USE_AUTO_EXTRAS entries by size. */
1871 
1872   qsort(a_extras, MIN(USE_AUTO_EXTRAS, a_extras_cnt),
1873         sizeof(struct extra_data), compare_extras_len);
1874 
1875 }
1876 
1877 
1878 /* Save automatically generated extras. */
1879 
save_auto(void)1880 static void save_auto(void) {
1881 
1882   u32 i;
1883 
1884   if (!auto_changed) return;
1885   auto_changed = 0;
1886 
1887   for (i = 0; i < MIN(USE_AUTO_EXTRAS, a_extras_cnt); i++) {
1888 
1889     u8* fn = alloc_printf("%s/queue/.state/auto_extras/auto_%06u", out_dir, i);
1890     s32 fd;
1891 
1892     fd = open(fn, O_WRONLY | O_CREAT | O_TRUNC, 0600);
1893 
1894     if (fd < 0) PFATAL("Unable to create '%s'", fn);
1895 
1896     ck_write(fd, a_extras[i].data, a_extras[i].len, fn);
1897 
1898     close(fd);
1899     ck_free(fn);
1900 
1901   }
1902 
1903 }
1904 
1905 
1906 /* Load automatically generated extras. */
1907 
load_auto(void)1908 static void load_auto(void) {
1909 
1910   u32 i;
1911 
1912   for (i = 0; i < USE_AUTO_EXTRAS; i++) {
1913 
1914     u8  tmp[MAX_AUTO_EXTRA + 1];
1915     u8* fn = alloc_printf("%s/.state/auto_extras/auto_%06u", in_dir, i);
1916     s32 fd, len;
1917 
1918     fd = open(fn, O_RDONLY, 0600);
1919 
1920     if (fd < 0) {
1921 
1922       if (errno != ENOENT) PFATAL("Unable to open '%s'", fn);
1923       ck_free(fn);
1924       break;
1925 
1926     }
1927 
1928     /* We read one byte more to cheaply detect tokens that are too
1929        long (and skip them). */
1930 
1931     len = read(fd, tmp, MAX_AUTO_EXTRA + 1);
1932 
1933     if (len < 0) PFATAL("Unable to read from '%s'", fn);
1934 
1935     if (len >= MIN_AUTO_EXTRA && len <= MAX_AUTO_EXTRA)
1936       maybe_add_auto(tmp, len);
1937 
1938     close(fd);
1939     ck_free(fn);
1940 
1941   }
1942 
1943   if (i) OKF("Loaded %u auto-discovered dictionary tokens.", i);
1944   else OKF("No auto-generated dictionary tokens to reuse.");
1945 
1946 }
1947 
1948 
1949 /* Destroy extras. */
1950 
destroy_extras(void)1951 static void destroy_extras(void) {
1952 
1953   u32 i;
1954 
1955   for (i = 0; i < extras_cnt; i++)
1956     ck_free(extras[i].data);
1957 
1958   ck_free(extras);
1959 
1960   for (i = 0; i < a_extras_cnt; i++)
1961     ck_free(a_extras[i].data);
1962 
1963   ck_free(a_extras);
1964 
1965 }
1966 
1967 
1968 /* Spin up fork server (instrumented mode only). The idea is explained here:
1969 
1970    http://lcamtuf.blogspot.com/2014/10/fuzzing-binaries-without-execve.html
1971 
1972    In essence, the instrumentation allows us to skip execve(), and just keep
1973    cloning a stopped child. So, we just execute once, and then send commands
1974    through a pipe. The other part of this logic is in afl-as.h. */
1975 
init_forkserver(char ** argv)1976 EXP_ST void init_forkserver(char** argv) {
1977 
1978   static struct itimerval it;
1979   int st_pipe[2], ctl_pipe[2];
1980   int status;
1981   s32 rlen;
1982 
1983   ACTF("Spinning up the fork server...");
1984 
1985   if (pipe(st_pipe) || pipe(ctl_pipe)) PFATAL("pipe() failed");
1986 
1987   forksrv_pid = fork();
1988 
1989   if (forksrv_pid < 0) PFATAL("fork() failed");
1990 
1991   if (!forksrv_pid) {
1992 
1993     struct rlimit r;
1994 
1995     /* Umpf. On OpenBSD, the default fd limit for root users is set to
1996        soft 128. Let's try to fix that... */
1997 
1998     if (!getrlimit(RLIMIT_NOFILE, &r) && r.rlim_cur < FORKSRV_FD + 2) {
1999 
2000       r.rlim_cur = FORKSRV_FD + 2;
2001       setrlimit(RLIMIT_NOFILE, &r); /* Ignore errors */
2002 
2003     }
2004 
2005     if (mem_limit) {
2006 
2007       r.rlim_max = r.rlim_cur = ((rlim_t)mem_limit) << 20;
2008 
2009 #ifdef RLIMIT_AS
2010 
2011       setrlimit(RLIMIT_AS, &r); /* Ignore errors */
2012 
2013 #else
2014 
2015       /* This takes care of OpenBSD, which doesn't have RLIMIT_AS, but
2016          according to reliable sources, RLIMIT_DATA covers anonymous
2017          maps - so we should be getting good protection against OOM bugs. */
2018 
2019       setrlimit(RLIMIT_DATA, &r); /* Ignore errors */
2020 
2021 #endif /* ^RLIMIT_AS */
2022 
2023 
2024     }
2025 
2026     /* Dumping cores is slow and can lead to anomalies if SIGKILL is delivered
2027        before the dump is complete. */
2028 
2029     r.rlim_max = r.rlim_cur = 0;
2030 
2031     setrlimit(RLIMIT_CORE, &r); /* Ignore errors */
2032 
2033     /* Isolate the process and configure standard descriptors. If out_file is
2034        specified, stdin is /dev/null; otherwise, out_fd is cloned instead. */
2035 
2036     setsid();
2037 
2038     dup2(dev_null_fd, 1);
2039     dup2(dev_null_fd, 2);
2040 
2041     if (out_file) {
2042 
2043       dup2(dev_null_fd, 0);
2044 
2045     } else {
2046 
2047       dup2(out_fd, 0);
2048       close(out_fd);
2049 
2050     }
2051 
2052     /* Set up control and status pipes, close the unneeded original fds. */
2053 
2054     if (dup2(ctl_pipe[0], FORKSRV_FD) < 0) PFATAL("dup2() failed");
2055     if (dup2(st_pipe[1], FORKSRV_FD + 1) < 0) PFATAL("dup2() failed");
2056 
2057     close(ctl_pipe[0]);
2058     close(ctl_pipe[1]);
2059     close(st_pipe[0]);
2060     close(st_pipe[1]);
2061 
2062     close(out_dir_fd);
2063     close(dev_null_fd);
2064     close(dev_urandom_fd);
2065     close(fileno(plot_file));
2066 
2067     /* This should improve performance a bit, since it stops the linker from
2068        doing extra work post-fork(). */
2069 
2070     if (!getenv("LD_BIND_LAZY")) setenv("LD_BIND_NOW", "1", 0);
2071 
2072     /* Set sane defaults for ASAN if nothing else specified. */
2073 
2074     setenv("ASAN_OPTIONS", "abort_on_error=1:"
2075                            "detect_leaks=0:"
2076                            "symbolize=0:"
2077                            "allocator_may_return_null=1", 0);
2078 
2079     /* MSAN is tricky, because it doesn't support abort_on_error=1 at this
2080        point. So, we do this in a very hacky way. */
2081 
2082     setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":"
2083                            "symbolize=0:"
2084                            "abort_on_error=1:"
2085                            "allocator_may_return_null=1:"
2086                            "msan_track_origins=0", 0);
2087 
2088     execv(target_path, argv);
2089 
2090     /* Use a distinctive bitmap signature to tell the parent about execv()
2091        falling through. */
2092 
2093     *(u32*)trace_bits = EXEC_FAIL_SIG;
2094     exit(0);
2095 
2096   }
2097 
2098   /* Close the unneeded endpoints. */
2099 
2100   close(ctl_pipe[0]);
2101   close(st_pipe[1]);
2102 
2103   fsrv_ctl_fd = ctl_pipe[1];
2104   fsrv_st_fd  = st_pipe[0];
2105 
2106   /* Wait for the fork server to come up, but don't wait too long. */
2107 
2108   it.it_value.tv_sec = ((exec_tmout * FORK_WAIT_MULT) / 1000);
2109   it.it_value.tv_usec = ((exec_tmout * FORK_WAIT_MULT) % 1000) * 1000;
2110 
2111   setitimer(ITIMER_REAL, &it, NULL);
2112 
2113   rlen = read(fsrv_st_fd, &status, 4);
2114 
2115   it.it_value.tv_sec = 0;
2116   it.it_value.tv_usec = 0;
2117 
2118   setitimer(ITIMER_REAL, &it, NULL);
2119 
2120   /* If we have a four-byte "hello" message from the server, we're all set.
2121      Otherwise, try to figure out what went wrong. */
2122 
2123   if (rlen == 4) {
2124     OKF("All right - fork server is up.");
2125     return;
2126   }
2127 
2128   if (child_timed_out)
2129     FATAL("Timeout while initializing fork server (adjusting -t may help)");
2130 
2131   if (waitpid(forksrv_pid, &status, 0) <= 0)
2132     PFATAL("waitpid() failed");
2133 
2134   if (WIFSIGNALED(status)) {
2135 
2136     if (mem_limit && mem_limit < 500 && uses_asan) {
2137 
2138       SAYF("\n" cLRD "[-] " cRST
2139            "Whoops, the target binary crashed suddenly, before receiving any input\n"
2140            "    from the fuzzer! Since it seems to be built with ASAN and you have a\n"
2141            "    restrictive memory limit configured, this is expected; please read\n"
2142            "    %s/notes_for_asan.txt for help.\n", doc_path);
2143 
2144     } else if (!mem_limit) {
2145 
2146       SAYF("\n" cLRD "[-] " cRST
2147            "Whoops, the target binary crashed suddenly, before receiving any input\n"
2148            "    from the fuzzer! There are several probable explanations:\n\n"
2149 
2150            "    - The binary is just buggy and explodes entirely on its own. If so, you\n"
2151            "      need to fix the underlying problem or find a better replacement.\n\n"
2152 
2153 #ifdef __APPLE__
2154 
2155            "    - On MacOS X, the semantics of fork() syscalls are non-standard and may\n"
2156            "      break afl-fuzz performance optimizations when running platform-specific\n"
2157            "      targets. To fix this, set AFL_NO_FORKSRV=1 in the environment.\n\n"
2158 
2159 #endif /* __APPLE__ */
2160 
2161            "    - Less likely, there is a horrible bug in the fuzzer. If other options\n"
2162            "      fail, poke <lcamtuf@coredump.cx> for troubleshooting tips.\n");
2163 
2164     } else {
2165 
2166       SAYF("\n" cLRD "[-] " cRST
2167            "Whoops, the target binary crashed suddenly, before receiving any input\n"
2168            "    from the fuzzer! There are several probable explanations:\n\n"
2169 
2170            "    - The current memory limit (%s) is too restrictive, causing the\n"
2171            "      target to hit an OOM condition in the dynamic linker. Try bumping up\n"
2172            "      the limit with the -m setting in the command line. A simple way confirm\n"
2173            "      this diagnosis would be:\n\n"
2174 
2175 #ifdef RLIMIT_AS
2176            "      ( ulimit -Sv $[%llu << 10]; /path/to/fuzzed_app )\n\n"
2177 #else
2178            "      ( ulimit -Sd $[%llu << 10]; /path/to/fuzzed_app )\n\n"
2179 #endif /* ^RLIMIT_AS */
2180 
2181            "      Tip: you can use http://jwilk.net/software/recidivm to quickly\n"
2182            "      estimate the required amount of virtual memory for the binary.\n\n"
2183 
2184            "    - The binary is just buggy and explodes entirely on its own. If so, you\n"
2185            "      need to fix the underlying problem or find a better replacement.\n\n"
2186 
2187 #ifdef __APPLE__
2188 
2189            "    - On MacOS X, the semantics of fork() syscalls are non-standard and may\n"
2190            "      break afl-fuzz performance optimizations when running platform-specific\n"
2191            "      targets. To fix this, set AFL_NO_FORKSRV=1 in the environment.\n\n"
2192 
2193 #endif /* __APPLE__ */
2194 
2195            "    - Less likely, there is a horrible bug in the fuzzer. If other options\n"
2196            "      fail, poke <lcamtuf@coredump.cx> for troubleshooting tips.\n",
2197            DMS(mem_limit << 20), mem_limit - 1);
2198 
2199     }
2200 
2201     FATAL("Fork server crashed with signal %d", WTERMSIG(status));
2202 
2203   }
2204 
2205   if (*(u32*)trace_bits == EXEC_FAIL_SIG)
2206     FATAL("Unable to execute target application ('%s')", argv[0]);
2207 
2208   if (mem_limit && mem_limit < 500 && uses_asan) {
2209 
2210     SAYF("\n" cLRD "[-] " cRST
2211            "Hmm, looks like the target binary terminated before we could complete a\n"
2212            "    handshake with the injected code. Since it seems to be built with ASAN and\n"
2213            "    you have a restrictive memory limit configured, this is expected; please\n"
2214            "    read %s/notes_for_asan.txt for help.\n", doc_path);
2215 
2216   } else if (!mem_limit) {
2217 
2218     SAYF("\n" cLRD "[-] " cRST
2219          "Hmm, looks like the target binary terminated before we could complete a\n"
2220          "    handshake with the injected code. Perhaps there is a horrible bug in the\n"
2221          "    fuzzer. Poke <lcamtuf@coredump.cx> for troubleshooting tips.\n");
2222 
2223   } else {
2224 
2225     SAYF("\n" cLRD "[-] " cRST
2226          "Hmm, looks like the target binary terminated before we could complete a\n"
2227          "    handshake with the injected code. There are %s probable explanations:\n\n"
2228 
2229          "%s"
2230          "    - The current memory limit (%s) is too restrictive, causing an OOM\n"
2231          "      fault in the dynamic linker. This can be fixed with the -m option. A\n"
2232          "      simple way to confirm the diagnosis may be:\n\n"
2233 
2234 #ifdef RLIMIT_AS
2235          "      ( ulimit -Sv $[%llu << 10]; /path/to/fuzzed_app )\n\n"
2236 #else
2237          "      ( ulimit -Sd $[%llu << 10]; /path/to/fuzzed_app )\n\n"
2238 #endif /* ^RLIMIT_AS */
2239 
2240          "      Tip: you can use http://jwilk.net/software/recidivm to quickly\n"
2241          "      estimate the required amount of virtual memory for the binary.\n\n"
2242 
2243          "    - Less likely, there is a horrible bug in the fuzzer. If other options\n"
2244          "      fail, poke <lcamtuf@coredump.cx> for troubleshooting tips.\n",
2245          getenv(DEFER_ENV_VAR) ? "three" : "two",
2246          getenv(DEFER_ENV_VAR) ?
2247          "    - You are using deferred forkserver, but __AFL_INIT() is never\n"
2248          "      reached before the program terminates.\n\n" : "",
2249          DMS(mem_limit << 20), mem_limit - 1);
2250 
2251   }
2252 
2253   FATAL("Fork server handshake failed");
2254 
2255 }
2256 
2257 
2258 /* Execute target application, monitoring for timeouts. Return status
2259    information. The called program will update trace_bits[]. */
2260 
run_target(char ** argv,u32 timeout)2261 static u8 run_target(char** argv, u32 timeout) {
2262 
2263   static struct itimerval it;
2264   static u32 prev_timed_out = 0;
2265 
2266   int status = 0;
2267   u32 tb4;
2268 
2269   child_timed_out = 0;
2270 
2271   /* After this memset, trace_bits[] are effectively volatile, so we
2272      must prevent any earlier operations from venturing into that
2273      territory. */
2274 
2275   memset(trace_bits, 0, MAP_SIZE);
2276   MEM_BARRIER();
2277 
2278   /* If we're running in "dumb" mode, we can't rely on the fork server
2279      logic compiled into the target program, so we will just keep calling
2280      execve(). There is a bit of code duplication between here and
2281      init_forkserver(), but c'est la vie. */
2282 
2283   if (dumb_mode == 1 || no_forkserver) {
2284 
2285     child_pid = fork();
2286 
2287     if (child_pid < 0) PFATAL("fork() failed");
2288 
2289     if (!child_pid) {
2290 
2291       struct rlimit r;
2292 
2293       if (mem_limit) {
2294 
2295         r.rlim_max = r.rlim_cur = ((rlim_t)mem_limit) << 20;
2296 
2297 #ifdef RLIMIT_AS
2298 
2299         setrlimit(RLIMIT_AS, &r); /* Ignore errors */
2300 
2301 #else
2302 
2303         setrlimit(RLIMIT_DATA, &r); /* Ignore errors */
2304 
2305 #endif /* ^RLIMIT_AS */
2306 
2307       }
2308 
2309       r.rlim_max = r.rlim_cur = 0;
2310 
2311       setrlimit(RLIMIT_CORE, &r); /* Ignore errors */
2312 
2313       /* Isolate the process and configure standard descriptors. If out_file is
2314          specified, stdin is /dev/null; otherwise, out_fd is cloned instead. */
2315 
2316       setsid();
2317 
2318       dup2(dev_null_fd, 1);
2319       dup2(dev_null_fd, 2);
2320 
2321       if (out_file) {
2322 
2323         dup2(dev_null_fd, 0);
2324 
2325       } else {
2326 
2327         dup2(out_fd, 0);
2328         close(out_fd);
2329 
2330       }
2331 
2332       /* On Linux, would be faster to use O_CLOEXEC. Maybe TODO. */
2333 
2334       close(dev_null_fd);
2335       close(out_dir_fd);
2336       close(dev_urandom_fd);
2337       close(fileno(plot_file));
2338 
2339       /* Set sane defaults for ASAN if nothing else specified. */
2340 
2341       setenv("ASAN_OPTIONS", "abort_on_error=1:"
2342                              "detect_leaks=0:"
2343                              "symbolize=0:"
2344                              "allocator_may_return_null=1", 0);
2345 
2346       setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":"
2347                              "symbolize=0:"
2348                              "msan_track_origins=0", 0);
2349 
2350       execv(target_path, argv);
2351 
2352       /* Use a distinctive bitmap value to tell the parent about execv()
2353          falling through. */
2354 
2355       *(u32*)trace_bits = EXEC_FAIL_SIG;
2356       exit(0);
2357 
2358     }
2359 
2360   } else {
2361 
2362     s32 res;
2363 
2364     /* In non-dumb mode, we have the fork server up and running, so simply
2365        tell it to have at it, and then read back PID. */
2366 
2367     if ((res = write(fsrv_ctl_fd, &prev_timed_out, 4)) != 4) {
2368 
2369       if (stop_soon) return 0;
2370       RPFATAL(res, "Unable to request new process from fork server (OOM?)");
2371 
2372     }
2373 
2374     if ((res = read(fsrv_st_fd, &child_pid, 4)) != 4) {
2375 
2376       if (stop_soon) return 0;
2377       RPFATAL(res, "Unable to request new process from fork server (OOM?)");
2378 
2379     }
2380 
2381     if (child_pid <= 0) FATAL("Fork server is misbehaving (OOM?)");
2382 
2383   }
2384 
2385   /* Configure timeout, as requested by user, then wait for child to terminate. */
2386 
2387   it.it_value.tv_sec = (timeout / 1000);
2388   it.it_value.tv_usec = (timeout % 1000) * 1000;
2389 
2390   setitimer(ITIMER_REAL, &it, NULL);
2391 
2392   /* The SIGALRM handler simply kills the child_pid and sets child_timed_out. */
2393 
2394   if (dumb_mode == 1 || no_forkserver) {
2395 
2396     if (waitpid(child_pid, &status, 0) <= 0) PFATAL("waitpid() failed");
2397 
2398   } else {
2399 
2400     s32 res;
2401 
2402     if ((res = read(fsrv_st_fd, &status, 4)) != 4) {
2403 
2404       if (stop_soon) return 0;
2405       RPFATAL(res, "Unable to communicate with fork server (OOM?)");
2406 
2407     }
2408 
2409   }
2410 
2411   if (!WIFSTOPPED(status)) child_pid = 0;
2412 
2413   it.it_value.tv_sec = 0;
2414   it.it_value.tv_usec = 0;
2415 
2416   setitimer(ITIMER_REAL, &it, NULL);
2417 
2418   total_execs++;
2419 
2420   /* Any subsequent operations on trace_bits must not be moved by the
2421      compiler below this point. Past this location, trace_bits[] behave
2422      very normally and do not have to be treated as volatile. */
2423 
2424   MEM_BARRIER();
2425 
2426   tb4 = *(u32*)trace_bits;
2427 
2428 #ifdef __x86_64__
2429   classify_counts((u64*)trace_bits);
2430 #else
2431   classify_counts((u32*)trace_bits);
2432 #endif /* ^__x86_64__ */
2433 
2434   prev_timed_out = child_timed_out;
2435 
2436   /* Report outcome to caller. */
2437 
2438   if (WIFSIGNALED(status) && !stop_soon) {
2439 
2440     kill_signal = WTERMSIG(status);
2441 
2442     if (child_timed_out && kill_signal == SIGKILL) return FAULT_TMOUT;
2443 
2444     return FAULT_CRASH;
2445 
2446   }
2447 
2448   /* A somewhat nasty hack for MSAN, which doesn't support abort_on_error and
2449      must use a special exit code. */
2450 
2451   if (uses_asan && WEXITSTATUS(status) == MSAN_ERROR) {
2452     kill_signal = 0;
2453     return FAULT_CRASH;
2454   }
2455 
2456   if ((dumb_mode == 1 || no_forkserver) && tb4 == EXEC_FAIL_SIG)
2457     return FAULT_ERROR;
2458 
2459   return FAULT_NONE;
2460 
2461 }
2462 
2463 
2464 /* Write modified data to file for testing. If out_file is set, the old file
2465    is unlinked and a new one is created. Otherwise, out_fd is rewound and
2466    truncated. */
2467 
write_to_testcase(void * mem,u32 len)2468 static void write_to_testcase(void* mem, u32 len) {
2469 
2470   s32 fd = out_fd;
2471 
2472   if (out_file) {
2473 
2474     unlink(out_file); /* Ignore errors. */
2475 
2476     fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600);
2477 
2478     if (fd < 0) PFATAL("Unable to create '%s'", out_file);
2479 
2480   } else lseek(fd, 0, SEEK_SET);
2481 
2482   ck_write(fd, mem, len, out_file);
2483 
2484   if (!out_file) {
2485 
2486     if (ftruncate(fd, len)) PFATAL("ftruncate() failed");
2487     lseek(fd, 0, SEEK_SET);
2488 
2489   } else close(fd);
2490 
2491 }
2492 
2493 
2494 /* The same, but with an adjustable gap. Used for trimming. */
2495 
write_with_gap(void * mem,u32 len,u32 skip_at,u32 skip_len)2496 static void write_with_gap(void* mem, u32 len, u32 skip_at, u32 skip_len) {
2497 
2498   s32 fd = out_fd;
2499   u32 tail_len = len - skip_at - skip_len;
2500 
2501   if (out_file) {
2502 
2503     unlink(out_file); /* Ignore errors. */
2504 
2505     fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600);
2506 
2507     if (fd < 0) PFATAL("Unable to create '%s'", out_file);
2508 
2509   } else lseek(fd, 0, SEEK_SET);
2510 
2511   if (skip_at) ck_write(fd, mem, skip_at, out_file);
2512 
2513   if (tail_len) ck_write(fd, mem + skip_at + skip_len, tail_len, out_file);
2514 
2515   if (!out_file) {
2516 
2517     if (ftruncate(fd, len - skip_len)) PFATAL("ftruncate() failed");
2518     lseek(fd, 0, SEEK_SET);
2519 
2520   } else close(fd);
2521 
2522 }
2523 
2524 
2525 static void show_stats(void);
2526 
2527 /* Calibrate a new test case. This is done when processing the input directory
2528    to warn about flaky or otherwise problematic test cases early on; and when
2529    new paths are discovered to detect variable behavior and so on. */
2530 
calibrate_case(char ** argv,struct queue_entry * q,u8 * use_mem,u32 handicap,u8 from_queue)2531 static u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem,
2532                          u32 handicap, u8 from_queue) {
2533 
2534   static u8 first_trace[MAP_SIZE];
2535 
2536   u8  fault = 0, new_bits = 0, var_detected = 0,
2537       first_run = (q->exec_cksum == 0);
2538 
2539   u64 start_us, stop_us;
2540 
2541   s32 old_sc = stage_cur, old_sm = stage_max;
2542   u32 use_tmout = exec_tmout;
2543   u8* old_sn = stage_name;
2544 
2545   /* Be a bit more generous about timeouts when resuming sessions, or when
2546      trying to calibrate already-added finds. This helps avoid trouble due
2547      to intermittent latency. */
2548 
2549   if (!from_queue || resuming_fuzz)
2550     use_tmout = MAX(exec_tmout + CAL_TMOUT_ADD,
2551                     exec_tmout * CAL_TMOUT_PERC / 100);
2552 
2553   q->cal_failed++;
2554 
2555   stage_name = "calibration";
2556   stage_max  = fast_cal ? 3 : CAL_CYCLES;
2557 
2558   /* Make sure the forkserver is up before we do anything, and let's not
2559      count its spin-up time toward binary calibration. */
2560 
2561   if (dumb_mode != 1 && !no_forkserver && !forksrv_pid)
2562     init_forkserver(argv);
2563 
2564   if (q->exec_cksum) memcpy(first_trace, trace_bits, MAP_SIZE);
2565 
2566   start_us = get_cur_time_us();
2567 
2568   for (stage_cur = 0; stage_cur < stage_max; stage_cur++) {
2569 
2570     u32 cksum;
2571 
2572     if (!first_run && !(stage_cur % stats_update_freq)) show_stats();
2573 
2574     write_to_testcase(use_mem, q->len);
2575 
2576     fault = run_target(argv, use_tmout);
2577 
2578     /* stop_soon is set by the handler for Ctrl+C. When it's pressed,
2579        we want to bail out quickly. */
2580 
2581     if (stop_soon || fault != crash_mode) goto abort_calibration;
2582 
2583     if (!dumb_mode && !stage_cur && !count_bytes(trace_bits)) {
2584       fault = FAULT_NOINST;
2585       goto abort_calibration;
2586     }
2587 
2588     cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
2589 
2590     if (q->exec_cksum != cksum) {
2591 
2592       u8 hnb = has_new_bits(virgin_bits);
2593       if (hnb > new_bits) new_bits = hnb;
2594 
2595       if (q->exec_cksum) {
2596 
2597         u32 i;
2598 
2599         for (i = 0; i < MAP_SIZE; i++) {
2600 
2601           if (!var_bytes[i] && first_trace[i] != trace_bits[i]) {
2602 
2603             var_bytes[i] = 1;
2604             stage_max    = CAL_CYCLES_LONG;
2605 
2606           }
2607 
2608         }
2609 
2610         var_detected = 1;
2611 
2612       } else {
2613 
2614         q->exec_cksum = cksum;
2615         memcpy(first_trace, trace_bits, MAP_SIZE);
2616 
2617       }
2618 
2619     }
2620 
2621   }
2622 
2623   stop_us = get_cur_time_us();
2624 
2625   total_cal_us     += stop_us - start_us;
2626   total_cal_cycles += stage_max;
2627 
2628   /* OK, let's collect some stats about the performance of this test case.
2629      This is used for fuzzing air time calculations in calculate_score(). */
2630 
2631   q->exec_us     = (stop_us - start_us) / stage_max;
2632   q->bitmap_size = count_bytes(trace_bits);
2633   q->handicap    = handicap;
2634   q->cal_failed  = 0;
2635 
2636   total_bitmap_size += q->bitmap_size;
2637   total_bitmap_entries++;
2638 
2639   update_bitmap_score(q);
2640 
2641   /* If this case didn't result in new output from the instrumentation, tell
2642      parent. This is a non-critical problem, but something to warn the user
2643      about. */
2644 
2645   if (!dumb_mode && first_run && !fault && !new_bits) fault = FAULT_NOBITS;
2646 
2647 abort_calibration:
2648 
2649   if (new_bits == 2 && !q->has_new_cov) {
2650     q->has_new_cov = 1;
2651     queued_with_cov++;
2652   }
2653 
2654   /* Mark variable paths. */
2655 
2656   if (var_detected) {
2657 
2658     var_byte_count = count_bytes(var_bytes);
2659 
2660     if (!q->var_behavior) {
2661       mark_as_variable(q);
2662       queued_variable++;
2663     }
2664 
2665   }
2666 
2667   stage_name = old_sn;
2668   stage_cur  = old_sc;
2669   stage_max  = old_sm;
2670 
2671   if (!first_run) show_stats();
2672 
2673   return fault;
2674 
2675 }
2676 
2677 
2678 /* Examine map coverage. Called once, for first test case. */
2679 
check_map_coverage(void)2680 static void check_map_coverage(void) {
2681 
2682   u32 i;
2683 
2684   if (count_bytes(trace_bits) < 100) return;
2685 
2686   for (i = (1 << (MAP_SIZE_POW2 - 1)); i < MAP_SIZE; i++)
2687     if (trace_bits[i]) return;
2688 
2689   WARNF("Recompile binary with newer version of afl to improve coverage!");
2690 
2691 }
2692 
2693 
2694 /* Perform dry run of all test cases to confirm that the app is working as
2695    expected. This is done only for the initial inputs, and only once. */
2696 
perform_dry_run(char ** argv)2697 static void perform_dry_run(char** argv) {
2698 
2699   struct queue_entry* q = queue;
2700   u32 cal_failures = 0;
2701   u8* skip_crashes = getenv("AFL_SKIP_CRASHES");
2702 
2703   while (q) {
2704 
2705     u8* use_mem;
2706     u8  res;
2707     s32 fd;
2708 
2709     u8* fn = strrchr(q->fname, '/') + 1;
2710 
2711     ACTF("Attempting dry run with '%s'...", fn);
2712 
2713     fd = open(q->fname, O_RDONLY);
2714     if (fd < 0) PFATAL("Unable to open '%s'", q->fname);
2715 
2716     use_mem = ck_alloc_nozero(q->len);
2717 
2718     if (read(fd, use_mem, q->len) != q->len)
2719       FATAL("Short read from '%s'", q->fname);
2720 
2721     close(fd);
2722 
2723     res = calibrate_case(argv, q, use_mem, 0, 1);
2724     ck_free(use_mem);
2725 
2726     if (stop_soon) return;
2727 
2728     if (res == crash_mode || res == FAULT_NOBITS)
2729       SAYF(cGRA "    len = %u, map size = %u, exec speed = %llu us\n" cRST,
2730            q->len, q->bitmap_size, q->exec_us);
2731 
2732     switch (res) {
2733 
2734       case FAULT_NONE:
2735 
2736         if (q == queue) check_map_coverage();
2737 
2738         if (crash_mode) FATAL("Test case '%s' does *NOT* crash", fn);
2739 
2740         break;
2741 
2742       case FAULT_TMOUT:
2743 
2744         if (timeout_given) {
2745 
2746           /* The -t nn+ syntax in the command line sets timeout_given to '2' and
2747              instructs afl-fuzz to tolerate but skip queue entries that time
2748              out. */
2749 
2750           if (timeout_given > 1) {
2751             WARNF("Test case results in a timeout (skipping)");
2752             q->cal_failed = CAL_CHANCES;
2753             cal_failures++;
2754             break;
2755           }
2756 
2757           SAYF("\n" cLRD "[-] " cRST
2758                "The program took more than %u ms to process one of the initial test cases.\n"
2759                "    Usually, the right thing to do is to relax the -t option - or to delete it\n"
2760                "    altogether and allow the fuzzer to auto-calibrate. That said, if you know\n"
2761                "    what you are doing and want to simply skip the unruly test cases, append\n"
2762                "    '+' at the end of the value passed to -t ('-t %u+').\n", exec_tmout,
2763                exec_tmout);
2764 
2765           FATAL("Test case '%s' results in a timeout", fn);
2766 
2767         } else {
2768 
2769           SAYF("\n" cLRD "[-] " cRST
2770                "The program took more than %u ms to process one of the initial test cases.\n"
2771                "    This is bad news; raising the limit with the -t option is possible, but\n"
2772                "    will probably make the fuzzing process extremely slow.\n\n"
2773 
2774                "    If this test case is just a fluke, the other option is to just avoid it\n"
2775                "    altogether, and find one that is less of a CPU hog.\n", exec_tmout);
2776 
2777           FATAL("Test case '%s' results in a timeout", fn);
2778 
2779         }
2780 
2781       case FAULT_CRASH:
2782 
2783         if (crash_mode) break;
2784 
2785         if (skip_crashes) {
2786           WARNF("Test case results in a crash (skipping)");
2787           q->cal_failed = CAL_CHANCES;
2788           cal_failures++;
2789           break;
2790         }
2791 
2792         if (mem_limit) {
2793 
2794           SAYF("\n" cLRD "[-] " cRST
2795                "Oops, the program crashed with one of the test cases provided. There are\n"
2796                "    several possible explanations:\n\n"
2797 
2798                "    - The test case causes known crashes under normal working conditions. If\n"
2799                "      so, please remove it. The fuzzer should be seeded with interesting\n"
2800                "      inputs - but not ones that cause an outright crash.\n\n"
2801 
2802                "    - The current memory limit (%s) is too low for this program, causing\n"
2803                "      it to die due to OOM when parsing valid files. To fix this, try\n"
2804                "      bumping it up with the -m setting in the command line. If in doubt,\n"
2805                "      try something along the lines of:\n\n"
2806 
2807 #ifdef RLIMIT_AS
2808                "      ( ulimit -Sv $[%llu << 10]; /path/to/binary [...] <testcase )\n\n"
2809 #else
2810                "      ( ulimit -Sd $[%llu << 10]; /path/to/binary [...] <testcase )\n\n"
2811 #endif /* ^RLIMIT_AS */
2812 
2813                "      Tip: you can use http://jwilk.net/software/recidivm to quickly\n"
2814                "      estimate the required amount of virtual memory for the binary. Also,\n"
2815                "      if you are using ASAN, see %s/notes_for_asan.txt.\n\n"
2816 
2817 #ifdef __APPLE__
2818 
2819                "    - On MacOS X, the semantics of fork() syscalls are non-standard and may\n"
2820                "      break afl-fuzz performance optimizations when running platform-specific\n"
2821                "      binaries. To fix this, set AFL_NO_FORKSRV=1 in the environment.\n\n"
2822 
2823 #endif /* __APPLE__ */
2824 
2825                "    - Least likely, there is a horrible bug in the fuzzer. If other options\n"
2826                "      fail, poke <lcamtuf@coredump.cx> for troubleshooting tips.\n",
2827                DMS(mem_limit << 20), mem_limit - 1, doc_path);
2828 
2829         } else {
2830 
2831           SAYF("\n" cLRD "[-] " cRST
2832                "Oops, the program crashed with one of the test cases provided. There are\n"
2833                "    several possible explanations:\n\n"
2834 
2835                "    - The test case causes known crashes under normal working conditions. If\n"
2836                "      so, please remove it. The fuzzer should be seeded with interesting\n"
2837                "      inputs - but not ones that cause an outright crash.\n\n"
2838 
2839 #ifdef __APPLE__
2840 
2841                "    - On MacOS X, the semantics of fork() syscalls are non-standard and may\n"
2842                "      break afl-fuzz performance optimizations when running platform-specific\n"
2843                "      binaries. To fix this, set AFL_NO_FORKSRV=1 in the environment.\n\n"
2844 
2845 #endif /* __APPLE__ */
2846 
2847                "    - Least likely, there is a horrible bug in the fuzzer. If other options\n"
2848                "      fail, poke <lcamtuf@coredump.cx> for troubleshooting tips.\n");
2849 
2850         }
2851 
2852         FATAL("Test case '%s' results in a crash", fn);
2853 
2854       case FAULT_ERROR:
2855 
2856         FATAL("Unable to execute target application ('%s')", argv[0]);
2857 
2858       case FAULT_NOINST:
2859 
2860         FATAL("No instrumentation detected");
2861 
2862       case FAULT_NOBITS:
2863 
2864         useless_at_start++;
2865 
2866         if (!in_bitmap && !shuffle_queue)
2867           WARNF("No new instrumentation output, test case may be useless.");
2868 
2869         break;
2870 
2871     }
2872 
2873     if (q->var_behavior) WARNF("Instrumentation output varies across runs.");
2874 
2875     q = q->next;
2876 
2877   }
2878 
2879   if (cal_failures) {
2880 
2881     if (cal_failures == queued_paths)
2882       FATAL("All test cases time out%s, giving up!",
2883             skip_crashes ? " or crash" : "");
2884 
2885     WARNF("Skipped %u test cases (%0.02f%%) due to timeouts%s.", cal_failures,
2886           ((double)cal_failures) * 100 / queued_paths,
2887           skip_crashes ? " or crashes" : "");
2888 
2889     if (cal_failures * 5 > queued_paths)
2890       WARNF(cLRD "High percentage of rejected test cases, check settings!");
2891 
2892   }
2893 
2894   OKF("All test cases processed.");
2895 
2896 }
2897 
2898 
2899 /* Helper function: link() if possible, copy otherwise. */
2900 
link_or_copy(u8 * old_path,u8 * new_path)2901 static void link_or_copy(u8* old_path, u8* new_path) {
2902 
2903   s32 i = link(old_path, new_path);
2904   s32 sfd, dfd;
2905   u8* tmp;
2906 
2907   if (!i) return;
2908 
2909   sfd = open(old_path, O_RDONLY);
2910   if (sfd < 0) PFATAL("Unable to open '%s'", old_path);
2911 
2912   dfd = open(new_path, O_WRONLY | O_CREAT | O_EXCL, 0600);
2913   if (dfd < 0) PFATAL("Unable to create '%s'", new_path);
2914 
2915   tmp = ck_alloc(64 * 1024);
2916 
2917   while ((i = read(sfd, tmp, 64 * 1024)) > 0)
2918     ck_write(dfd, tmp, i, new_path);
2919 
2920   if (i < 0) PFATAL("read() failed");
2921 
2922   ck_free(tmp);
2923   close(sfd);
2924   close(dfd);
2925 
2926 }
2927 
2928 
2929 static void nuke_resume_dir(void);
2930 
2931 /* Create hard links for input test cases in the output directory, choosing
2932    good names and pivoting accordingly. */
2933 
pivot_inputs(void)2934 static void pivot_inputs(void) {
2935 
2936   struct queue_entry* q = queue;
2937   u32 id = 0;
2938 
2939   ACTF("Creating hard links for all input files...");
2940 
2941   while (q) {
2942 
2943     u8  *nfn, *rsl = strrchr(q->fname, '/');
2944     u32 orig_id;
2945 
2946     if (!rsl) rsl = q->fname; else rsl++;
2947 
2948     /* If the original file name conforms to the syntax and the recorded
2949        ID matches the one we'd assign, just use the original file name.
2950        This is valuable for resuming fuzzing runs. */
2951 
2952 #ifndef SIMPLE_FILES
2953 #  define CASE_PREFIX "id:"
2954 #else
2955 #  define CASE_PREFIX "id_"
2956 #endif /* ^!SIMPLE_FILES */
2957 
2958     if (!strncmp(rsl, CASE_PREFIX, 3) &&
2959         sscanf(rsl + 3, "%06u", &orig_id) == 1 && orig_id == id) {
2960 
2961       u8* src_str;
2962       u32 src_id;
2963 
2964       resuming_fuzz = 1;
2965       nfn = alloc_printf("%s/queue/%s", out_dir, rsl);
2966 
2967       /* Since we're at it, let's also try to find parent and figure out the
2968          appropriate depth for this entry. */
2969 
2970       src_str = strchr(rsl + 3, ':');
2971 
2972       if (src_str && sscanf(src_str + 1, "%06u", &src_id) == 1) {
2973 
2974         struct queue_entry* s = queue;
2975         while (src_id-- && s) s = s->next;
2976         if (s) q->depth = s->depth + 1;
2977 
2978         if (max_depth < q->depth) max_depth = q->depth;
2979 
2980       }
2981 
2982     } else {
2983 
2984       /* No dice - invent a new name, capturing the original one as a
2985          substring. */
2986 
2987 #ifndef SIMPLE_FILES
2988 
2989       u8* use_name = strstr(rsl, ",orig:");
2990 
2991       if (use_name) use_name += 6; else use_name = rsl;
2992       nfn = alloc_printf("%s/queue/id:%06u,orig:%s", out_dir, id, use_name);
2993 
2994 #else
2995 
2996       nfn = alloc_printf("%s/queue/id_%06u", out_dir, id);
2997 
2998 #endif /* ^!SIMPLE_FILES */
2999 
3000     }
3001 
3002     /* Pivot to the new queue entry. */
3003 
3004     link_or_copy(q->fname, nfn);
3005     ck_free(q->fname);
3006     q->fname = nfn;
3007 
3008     /* Make sure that the passed_det value carries over, too. */
3009 
3010     if (q->passed_det) mark_as_det_done(q);
3011 
3012     q = q->next;
3013     id++;
3014 
3015   }
3016 
3017   if (in_place_resume) nuke_resume_dir();
3018 
3019 }
3020 
3021 
3022 #ifndef SIMPLE_FILES
3023 
3024 /* Construct a file name for a new test case, capturing the operation
3025    that led to its discovery. Uses a static buffer. */
3026 
describe_op(u8 hnb)3027 static u8* describe_op(u8 hnb) {
3028 
3029   static u8 ret[256];
3030 
3031   if (syncing_party) {
3032 
3033     sprintf(ret, "sync:%s,src:%06u", syncing_party, syncing_case);
3034 
3035   } else {
3036 
3037     sprintf(ret, "src:%06u", current_entry);
3038 
3039     if (splicing_with >= 0)
3040       sprintf(ret + strlen(ret), "+%06u", splicing_with);
3041 
3042     sprintf(ret + strlen(ret), ",op:%s", stage_short);
3043 
3044     if (stage_cur_byte >= 0) {
3045 
3046       sprintf(ret + strlen(ret), ",pos:%u", stage_cur_byte);
3047 
3048       if (stage_val_type != STAGE_VAL_NONE)
3049         sprintf(ret + strlen(ret), ",val:%s%+d",
3050                 (stage_val_type == STAGE_VAL_BE) ? "be:" : "",
3051                 stage_cur_val);
3052 
3053     } else sprintf(ret + strlen(ret), ",rep:%u", stage_cur_val);
3054 
3055   }
3056 
3057   if (hnb == 2) strcat(ret, ",+cov");
3058 
3059   return ret;
3060 
3061 }
3062 
3063 #endif /* !SIMPLE_FILES */
3064 
3065 
3066 /* Write a message accompanying the crash directory :-) */
3067 
write_crash_readme(void)3068 static void write_crash_readme(void) {
3069 
3070   u8* fn = alloc_printf("%s/crashes/README.txt", out_dir);
3071   s32 fd;
3072   FILE* f;
3073 
3074   fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
3075   ck_free(fn);
3076 
3077   /* Do not die on errors here - that would be impolite. */
3078 
3079   if (fd < 0) return;
3080 
3081   f = fdopen(fd, "w");
3082 
3083   if (!f) {
3084     close(fd);
3085     return;
3086   }
3087 
3088   fprintf(f, "Command line used to find this crash:\n\n"
3089 
3090              "%s\n\n"
3091 
3092              "If you can't reproduce a bug outside of afl-fuzz, be sure to set the same\n"
3093              "memory limit. The limit used for this fuzzing session was %s.\n\n"
3094 
3095              "Need a tool to minimize test cases before investigating the crashes or sending\n"
3096              "them to a vendor? Check out the afl-tmin that comes with the fuzzer!\n\n"
3097 
3098              "Found any cool bugs in open-source tools using afl-fuzz? If yes, please drop\n"
3099              "me a mail at <lcamtuf@coredump.cx> once the issues are fixed - I'd love to\n"
3100              "add your finds to the gallery at:\n\n"
3101 
3102              "  http://lcamtuf.coredump.cx/afl/\n\n"
3103 
3104              "Thanks :-)\n",
3105 
3106              orig_cmdline, DMS(mem_limit << 20)); /* ignore errors */
3107 
3108   fclose(f);
3109 
3110 }
3111 
3112 
3113 /* Check if the result of an execve() during routine fuzzing is interesting,
3114    save or queue the input test case for further analysis if so. Returns 1 if
3115    entry is saved, 0 otherwise. */
3116 
save_if_interesting(char ** argv,void * mem,u32 len,u8 fault)3117 static u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) {
3118 
3119   u8  *fn = "";
3120   u8  hnb;
3121   s32 fd;
3122   u8  keeping = 0, res;
3123 
3124   if (fault == crash_mode) {
3125 
3126     /* Keep only if there are new bits in the map, add to queue for
3127        future fuzzing, etc. */
3128 
3129     if (!(hnb = has_new_bits(virgin_bits))) {
3130       if (crash_mode) total_crashes++;
3131       return 0;
3132     }
3133 
3134 #ifndef SIMPLE_FILES
3135 
3136     fn = alloc_printf("%s/queue/id:%06u,%s", out_dir, queued_paths,
3137                       describe_op(hnb));
3138 
3139 #else
3140 
3141     fn = alloc_printf("%s/queue/id_%06u", out_dir, queued_paths);
3142 
3143 #endif /* ^!SIMPLE_FILES */
3144 
3145     add_to_queue(fn, len, 0);
3146 
3147     if (hnb == 2) {
3148       queue_top->has_new_cov = 1;
3149       queued_with_cov++;
3150     }
3151 
3152     queue_top->exec_cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
3153 
3154     /* Try to calibrate inline; this also calls update_bitmap_score() when
3155        successful. */
3156 
3157     res = calibrate_case(argv, queue_top, mem, queue_cycle - 1, 0);
3158 
3159     if (res == FAULT_ERROR)
3160       FATAL("Unable to execute target application");
3161 
3162     fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
3163     if (fd < 0) PFATAL("Unable to create '%s'", fn);
3164     ck_write(fd, mem, len, fn);
3165     close(fd);
3166 
3167     keeping = 1;
3168 
3169   }
3170 
3171   switch (fault) {
3172 
3173     case FAULT_TMOUT:
3174 
3175       /* Timeouts are not very interesting, but we're still obliged to keep
3176          a handful of samples. We use the presence of new bits in the
3177          hang-specific bitmap as a signal of uniqueness. In "dumb" mode, we
3178          just keep everything. */
3179 
3180       total_tmouts++;
3181 
3182       if (unique_hangs >= KEEP_UNIQUE_HANG) return keeping;
3183 
3184       if (!dumb_mode) {
3185 
3186 #ifdef __x86_64__
3187         simplify_trace((u64*)trace_bits);
3188 #else
3189         simplify_trace((u32*)trace_bits);
3190 #endif /* ^__x86_64__ */
3191 
3192         if (!has_new_bits(virgin_tmout)) return keeping;
3193 
3194       }
3195 
3196       unique_tmouts++;
3197 
3198       /* Before saving, we make sure that it's a genuine hang by re-running
3199          the target with a more generous timeout (unless the default timeout
3200          is already generous). */
3201 
3202       if (exec_tmout < hang_tmout) {
3203 
3204         u8 new_fault;
3205         write_to_testcase(mem, len);
3206         new_fault = run_target(argv, hang_tmout);
3207 
3208         /* A corner case that one user reported bumping into: increasing the
3209            timeout actually uncovers a crash. Make sure we don't discard it if
3210            so. */
3211 
3212         if (!stop_soon && new_fault == FAULT_CRASH) goto keep_as_crash;
3213 
3214         if (stop_soon || new_fault != FAULT_TMOUT) return keeping;
3215 
3216       }
3217 
3218 #ifndef SIMPLE_FILES
3219 
3220       fn = alloc_printf("%s/hangs/id:%06llu,%s", out_dir,
3221                         unique_hangs, describe_op(0));
3222 
3223 #else
3224 
3225       fn = alloc_printf("%s/hangs/id_%06llu", out_dir,
3226                         unique_hangs);
3227 
3228 #endif /* ^!SIMPLE_FILES */
3229 
3230       unique_hangs++;
3231 
3232       last_hang_time = get_cur_time();
3233 
3234       break;
3235 
3236     case FAULT_CRASH:
3237 
3238 keep_as_crash:
3239 
3240       /* This is handled in a manner roughly similar to timeouts,
3241          except for slightly different limits and no need to re-run test
3242          cases. */
3243 
3244       total_crashes++;
3245 
3246       if (unique_crashes >= KEEP_UNIQUE_CRASH) return keeping;
3247 
3248       if (!dumb_mode) {
3249 
3250 #ifdef __x86_64__
3251         simplify_trace((u64*)trace_bits);
3252 #else
3253         simplify_trace((u32*)trace_bits);
3254 #endif /* ^__x86_64__ */
3255 
3256         if (!has_new_bits(virgin_crash)) return keeping;
3257 
3258       }
3259 
3260       if (!unique_crashes) write_crash_readme();
3261 
3262 #ifndef SIMPLE_FILES
3263 
3264       fn = alloc_printf("%s/crashes/id:%06llu,sig:%02u,%s", out_dir,
3265                         unique_crashes, kill_signal, describe_op(0));
3266 
3267 #else
3268 
3269       fn = alloc_printf("%s/crashes/id_%06llu_%02u", out_dir, unique_crashes,
3270                         kill_signal);
3271 
3272 #endif /* ^!SIMPLE_FILES */
3273 
3274       unique_crashes++;
3275 
3276       last_crash_time = get_cur_time();
3277       last_crash_execs = total_execs;
3278 
3279       break;
3280 
3281     case FAULT_ERROR: FATAL("Unable to execute target application");
3282 
3283     default: return keeping;
3284 
3285   }
3286 
3287   /* If we're here, we apparently want to save the crash or hang
3288      test case, too. */
3289 
3290   fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
3291   if (fd < 0) PFATAL("Unable to create '%s'", fn);
3292   ck_write(fd, mem, len, fn);
3293   close(fd);
3294 
3295   ck_free(fn);
3296 
3297   return keeping;
3298 
3299 }
3300 
3301 
3302 /* When resuming, try to find the queue position to start from. This makes sense
3303    only when resuming, and when we can find the original fuzzer_stats. */
3304 
find_start_position(void)3305 static u32 find_start_position(void) {
3306 
3307   static u8 tmp[4096]; /* Ought to be enough for anybody. */
3308 
3309   u8  *fn, *off;
3310   s32 fd, i;
3311   u32 ret;
3312 
3313   if (!resuming_fuzz) return 0;
3314 
3315   if (in_place_resume) fn = alloc_printf("%s/fuzzer_stats", out_dir);
3316   else fn = alloc_printf("%s/../fuzzer_stats", in_dir);
3317 
3318   fd = open(fn, O_RDONLY);
3319   ck_free(fn);
3320 
3321   if (fd < 0) return 0;
3322 
3323   i = read(fd, tmp, sizeof(tmp) - 1); (void)i; /* Ignore errors */
3324   close(fd);
3325 
3326   off = strstr(tmp, "cur_path          : ");
3327   if (!off) return 0;
3328 
3329   ret = atoi(off + 20);
3330   if (ret >= queued_paths) ret = 0;
3331   return ret;
3332 
3333 }
3334 
3335 
3336 /* The same, but for timeouts. The idea is that when resuming sessions without
3337    -t given, we don't want to keep auto-scaling the timeout over and over
3338    again to prevent it from growing due to random flukes. */
3339 
find_timeout(void)3340 static void find_timeout(void) {
3341 
3342   static u8 tmp[4096]; /* Ought to be enough for anybody. */
3343 
3344   u8  *fn, *off;
3345   s32 fd, i;
3346   u32 ret;
3347 
3348   if (!resuming_fuzz) return;
3349 
3350   if (in_place_resume) fn = alloc_printf("%s/fuzzer_stats", out_dir);
3351   else fn = alloc_printf("%s/../fuzzer_stats", in_dir);
3352 
3353   fd = open(fn, O_RDONLY);
3354   ck_free(fn);
3355 
3356   if (fd < 0) return;
3357 
3358   i = read(fd, tmp, sizeof(tmp) - 1); (void)i; /* Ignore errors */
3359   close(fd);
3360 
3361   off = strstr(tmp, "exec_timeout   : ");
3362   if (!off) return;
3363 
3364   ret = atoi(off + 17);
3365   if (ret <= 4) return;
3366 
3367   exec_tmout = ret;
3368   timeout_given = 3;
3369 
3370 }
3371 
3372 
3373 /* Update stats file for unattended monitoring. */
3374 
write_stats_file(double bitmap_cvg,double stability,double eps)3375 static void write_stats_file(double bitmap_cvg, double stability, double eps) {
3376 
3377   static double last_bcvg, last_stab, last_eps;
3378 
3379   u8* fn = alloc_printf("%s/fuzzer_stats", out_dir);
3380   s32 fd;
3381   FILE* f;
3382 
3383   fd = open(fn, O_WRONLY | O_CREAT | O_TRUNC, 0600);
3384 
3385   if (fd < 0) PFATAL("Unable to create '%s'", fn);
3386 
3387   ck_free(fn);
3388 
3389   f = fdopen(fd, "w");
3390 
3391   if (!f) PFATAL("fdopen() failed");
3392 
3393   /* Keep last values in case we're called from another context
3394      where exec/sec stats and such are not readily available. */
3395 
3396   if (!bitmap_cvg && !stability && !eps) {
3397     bitmap_cvg = last_bcvg;
3398     stability  = last_stab;
3399     eps        = last_eps;
3400   } else {
3401     last_bcvg = bitmap_cvg;
3402     last_stab = stability;
3403     last_eps  = eps;
3404   }
3405 
3406   fprintf(f, "start_time        : %llu\n"
3407              "last_update       : %llu\n"
3408              "fuzzer_pid        : %u\n"
3409              "cycles_done       : %llu\n"
3410              "execs_done        : %llu\n"
3411              "execs_per_sec     : %0.02f\n"
3412              "paths_total       : %u\n"
3413              "paths_favored     : %u\n"
3414              "paths_found       : %u\n"
3415              "paths_imported    : %u\n"
3416              "max_depth         : %u\n"
3417              "cur_path          : %u\n" /* Must match find_start_position() */
3418              "pending_favs      : %u\n"
3419              "pending_total     : %u\n"
3420              "variable_paths    : %u\n"
3421              "stability         : %0.02f%%\n"
3422              "bitmap_cvg        : %0.02f%%\n"
3423              "unique_crashes    : %llu\n"
3424              "unique_hangs      : %llu\n"
3425              "last_path         : %llu\n"
3426              "last_crash        : %llu\n"
3427              "last_hang         : %llu\n"
3428              "execs_since_crash : %llu\n"
3429              "exec_timeout      : %u\n"
3430              "afl_banner        : %s\n"
3431              "afl_version       : " VERSION "\n"
3432              "target_mode       : %s%s%s%s%s%s%s\n"
3433              "command_line      : %s\n",
3434              start_time / 1000, get_cur_time() / 1000, getpid(),
3435              queue_cycle ? (queue_cycle - 1) : 0, total_execs, eps,
3436              queued_paths, queued_favored, queued_discovered, queued_imported,
3437              max_depth, current_entry, pending_favored, pending_not_fuzzed,
3438              queued_variable, stability, bitmap_cvg, unique_crashes,
3439              unique_hangs, last_path_time / 1000, last_crash_time / 1000,
3440              last_hang_time / 1000, total_execs - last_crash_execs,
3441              exec_tmout, use_banner,
3442              qemu_mode ? "qemu " : "", dumb_mode ? " dumb " : "",
3443              no_forkserver ? "no_forksrv " : "", crash_mode ? "crash " : "",
3444              persistent_mode ? "persistent " : "", deferred_mode ? "deferred " : "",
3445              (qemu_mode || dumb_mode || no_forkserver || crash_mode ||
3446               persistent_mode || deferred_mode) ? "" : "default",
3447              orig_cmdline);
3448              /* ignore errors */
3449 
3450   fclose(f);
3451 
3452 }
3453 
3454 
3455 /* Update the plot file if there is a reason to. */
3456 
maybe_update_plot_file(double bitmap_cvg,double eps)3457 static void maybe_update_plot_file(double bitmap_cvg, double eps) {
3458 
3459   static u32 prev_qp, prev_pf, prev_pnf, prev_ce, prev_md;
3460   static u64 prev_qc, prev_uc, prev_uh;
3461 
3462   if (prev_qp == queued_paths && prev_pf == pending_favored &&
3463       prev_pnf == pending_not_fuzzed && prev_ce == current_entry &&
3464       prev_qc == queue_cycle && prev_uc == unique_crashes &&
3465       prev_uh == unique_hangs && prev_md == max_depth) return;
3466 
3467   prev_qp  = queued_paths;
3468   prev_pf  = pending_favored;
3469   prev_pnf = pending_not_fuzzed;
3470   prev_ce  = current_entry;
3471   prev_qc  = queue_cycle;
3472   prev_uc  = unique_crashes;
3473   prev_uh  = unique_hangs;
3474   prev_md  = max_depth;
3475 
3476   /* Fields in the file:
3477 
3478      unix_time, cycles_done, cur_path, paths_total, paths_not_fuzzed,
3479      favored_not_fuzzed, unique_crashes, unique_hangs, max_depth,
3480      execs_per_sec */
3481 
3482   fprintf(plot_file,
3483           "%llu, %llu, %u, %u, %u, %u, %0.02f%%, %llu, %llu, %u, %0.02f\n",
3484           get_cur_time() / 1000, queue_cycle - 1, current_entry, queued_paths,
3485           pending_not_fuzzed, pending_favored, bitmap_cvg, unique_crashes,
3486           unique_hangs, max_depth, eps); /* ignore errors */
3487 
3488   fflush(plot_file);
3489 
3490 }
3491 
3492 
3493 
3494 /* A helper function for maybe_delete_out_dir(), deleting all prefixed
3495    files in a directory. */
3496 
delete_files(u8 * path,u8 * prefix)3497 static u8 delete_files(u8* path, u8* prefix) {
3498 
3499   DIR* d;
3500   struct dirent* d_ent;
3501 
3502   d = opendir(path);
3503 
3504   if (!d) return 0;
3505 
3506   while ((d_ent = readdir(d))) {
3507 
3508     if (d_ent->d_name[0] != '.' && (!prefix ||
3509         !strncmp(d_ent->d_name, prefix, strlen(prefix)))) {
3510 
3511       u8* fname = alloc_printf("%s/%s", path, d_ent->d_name);
3512       if (unlink(fname)) PFATAL("Unable to delete '%s'", fname);
3513       ck_free(fname);
3514 
3515     }
3516 
3517   }
3518 
3519   closedir(d);
3520 
3521   return !!rmdir(path);
3522 
3523 }
3524 
3525 
3526 /* Get the number of runnable processes, with some simple smoothing. */
3527 
get_runnable_processes(void)3528 static double get_runnable_processes(void) {
3529 
3530   static double res;
3531 
3532 #if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__)
3533 
3534   /* I don't see any portable sysctl or so that would quickly give us the
3535      number of runnable processes; the 1-minute load average can be a
3536      semi-decent approximation, though. */
3537 
3538   if (getloadavg(&res, 1) != 1) return 0;
3539 
3540 #else
3541 
3542   /* On Linux, /proc/stat is probably the best way; load averages are
3543      computed in funny ways and sometimes don't reflect extremely short-lived
3544      processes well. */
3545 
3546   FILE* f = fopen("/proc/stat", "r");
3547   u8 tmp[1024];
3548   u32 val = 0;
3549 
3550   if (!f) return 0;
3551 
3552   while (fgets(tmp, sizeof(tmp), f)) {
3553 
3554     if (!strncmp(tmp, "procs_running ", 14) ||
3555         !strncmp(tmp, "procs_blocked ", 14)) val += atoi(tmp + 14);
3556 
3557   }
3558 
3559   fclose(f);
3560 
3561   if (!res) {
3562 
3563     res = val;
3564 
3565   } else {
3566 
3567     res = res * (1.0 - 1.0 / AVG_SMOOTHING) +
3568           ((double)val) * (1.0 / AVG_SMOOTHING);
3569 
3570   }
3571 
3572 #endif /* ^(__APPLE__ || __FreeBSD__ || __OpenBSD__) */
3573 
3574   return res;
3575 
3576 }
3577 
3578 
3579 /* Delete the temporary directory used for in-place session resume. */
3580 
nuke_resume_dir(void)3581 static void nuke_resume_dir(void) {
3582 
3583   u8* fn;
3584 
3585   fn = alloc_printf("%s/_resume/.state/deterministic_done", out_dir);
3586   if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
3587   ck_free(fn);
3588 
3589   fn = alloc_printf("%s/_resume/.state/auto_extras", out_dir);
3590   if (delete_files(fn, "auto_")) goto dir_cleanup_failed;
3591   ck_free(fn);
3592 
3593   fn = alloc_printf("%s/_resume/.state/redundant_edges", out_dir);
3594   if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
3595   ck_free(fn);
3596 
3597   fn = alloc_printf("%s/_resume/.state/variable_behavior", out_dir);
3598   if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
3599   ck_free(fn);
3600 
3601   fn = alloc_printf("%s/_resume/.state", out_dir);
3602   if (rmdir(fn) && errno != ENOENT) goto dir_cleanup_failed;
3603   ck_free(fn);
3604 
3605   fn = alloc_printf("%s/_resume", out_dir);
3606   if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
3607   ck_free(fn);
3608 
3609   return;
3610 
3611 dir_cleanup_failed:
3612 
3613   FATAL("_resume directory cleanup failed");
3614 
3615 }
3616 
3617 
3618 /* Delete fuzzer output directory if we recognize it as ours, if the fuzzer
3619    is not currently running, and if the last run time isn't too great. */
3620 
maybe_delete_out_dir(void)3621 static void maybe_delete_out_dir(void) {
3622 
3623   FILE* f;
3624   u8 *fn = alloc_printf("%s/fuzzer_stats", out_dir);
3625 
3626   /* See if the output directory is locked. If yes, bail out. If not,
3627      create a lock that will persist for the lifetime of the process
3628      (this requires leaving the descriptor open).*/
3629 
3630   out_dir_fd = open(out_dir, O_RDONLY);
3631   if (out_dir_fd < 0) PFATAL("Unable to open '%s'", out_dir);
3632 
3633 #ifndef __sun
3634 
3635   if (flock(out_dir_fd, LOCK_EX | LOCK_NB) && errno == EWOULDBLOCK) {
3636 
3637     SAYF("\n" cLRD "[-] " cRST
3638          "Looks like the job output directory is being actively used by another\n"
3639          "    instance of afl-fuzz. You will need to choose a different %s\n"
3640          "    or stop the other process first.\n",
3641          sync_id ? "fuzzer ID" : "output location");
3642 
3643     FATAL("Directory '%s' is in use", out_dir);
3644 
3645   }
3646 
3647 #endif /* !__sun */
3648 
3649   f = fopen(fn, "r");
3650 
3651   if (f) {
3652 
3653     u64 start_time, last_update;
3654 
3655     if (fscanf(f, "start_time     : %llu\n"
3656                   "last_update    : %llu\n", &start_time, &last_update) != 2)
3657       FATAL("Malformed data in '%s'", fn);
3658 
3659     fclose(f);
3660 
3661     /* Let's see how much work is at stake. */
3662 
3663     if (!in_place_resume && last_update - start_time > OUTPUT_GRACE * 60) {
3664 
3665       SAYF("\n" cLRD "[-] " cRST
3666            "The job output directory already exists and contains the results of more\n"
3667            "    than %u minutes worth of fuzzing. To avoid data loss, afl-fuzz will *NOT*\n"
3668            "    automatically delete this data for you.\n\n"
3669 
3670            "    If you wish to start a new session, remove or rename the directory manually,\n"
3671            "    or specify a different output location for this job. To resume the old\n"
3672            "    session, put '-' as the input directory in the command line ('-i -') and\n"
3673            "    try again.\n", OUTPUT_GRACE);
3674 
3675        FATAL("At-risk data found in '%s'", out_dir);
3676 
3677     }
3678 
3679   }
3680 
3681   ck_free(fn);
3682 
3683   /* The idea for in-place resume is pretty simple: we temporarily move the old
3684      queue/ to a new location that gets deleted once import to the new queue/
3685      is finished. If _resume/ already exists, the current queue/ may be
3686      incomplete due to an earlier abort, so we want to use the old _resume/
3687      dir instead, and we let rename() fail silently. */
3688 
3689   if (in_place_resume) {
3690 
3691     u8* orig_q = alloc_printf("%s/queue", out_dir);
3692 
3693     in_dir = alloc_printf("%s/_resume", out_dir);
3694 
3695     rename(orig_q, in_dir); /* Ignore errors */
3696 
3697     OKF("Output directory exists, will attempt session resume.");
3698 
3699     ck_free(orig_q);
3700 
3701   } else {
3702 
3703     OKF("Output directory exists but deemed OK to reuse.");
3704 
3705   }
3706 
3707   ACTF("Deleting old session data...");
3708 
3709   /* Okay, let's get the ball rolling! First, we need to get rid of the entries
3710      in <out_dir>/.synced/.../id:*, if any are present. */
3711 
3712   if (!in_place_resume) {
3713 
3714     fn = alloc_printf("%s/.synced", out_dir);
3715     if (delete_files(fn, NULL)) goto dir_cleanup_failed;
3716     ck_free(fn);
3717 
3718   }
3719 
3720   /* Next, we need to clean up <out_dir>/queue/.state/ subdirectories: */
3721 
3722   fn = alloc_printf("%s/queue/.state/deterministic_done", out_dir);
3723   if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
3724   ck_free(fn);
3725 
3726   fn = alloc_printf("%s/queue/.state/auto_extras", out_dir);
3727   if (delete_files(fn, "auto_")) goto dir_cleanup_failed;
3728   ck_free(fn);
3729 
3730   fn = alloc_printf("%s/queue/.state/redundant_edges", out_dir);
3731   if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
3732   ck_free(fn);
3733 
3734   fn = alloc_printf("%s/queue/.state/variable_behavior", out_dir);
3735   if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
3736   ck_free(fn);
3737 
3738   /* Then, get rid of the .state subdirectory itself (should be empty by now)
3739      and everything matching <out_dir>/queue/id:*. */
3740 
3741   fn = alloc_printf("%s/queue/.state", out_dir);
3742   if (rmdir(fn) && errno != ENOENT) goto dir_cleanup_failed;
3743   ck_free(fn);
3744 
3745   fn = alloc_printf("%s/queue", out_dir);
3746   if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
3747   ck_free(fn);
3748 
3749   /* All right, let's do <out_dir>/crashes/id:* and <out_dir>/hangs/id:*. */
3750 
3751   if (!in_place_resume) {
3752 
3753     fn = alloc_printf("%s/crashes/README.txt", out_dir);
3754     unlink(fn); /* Ignore errors */
3755     ck_free(fn);
3756 
3757   }
3758 
3759   fn = alloc_printf("%s/crashes", out_dir);
3760 
3761   /* Make backup of the crashes directory if it's not empty and if we're
3762      doing in-place resume. */
3763 
3764   if (in_place_resume && rmdir(fn)) {
3765 
3766     time_t cur_t = time(0);
3767     struct tm* t = localtime(&cur_t);
3768 
3769 #ifndef SIMPLE_FILES
3770 
3771     u8* nfn = alloc_printf("%s.%04u-%02u-%02u-%02u:%02u:%02u", fn,
3772                            t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
3773                            t->tm_hour, t->tm_min, t->tm_sec);
3774 
3775 #else
3776 
3777     u8* nfn = alloc_printf("%s_%04u%02u%02u%02u%02u%02u", fn,
3778                            t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
3779                            t->tm_hour, t->tm_min, t->tm_sec);
3780 
3781 #endif /* ^!SIMPLE_FILES */
3782 
3783     rename(fn, nfn); /* Ignore errors. */
3784     ck_free(nfn);
3785 
3786   }
3787 
3788   if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
3789   ck_free(fn);
3790 
3791   fn = alloc_printf("%s/hangs", out_dir);
3792 
3793   /* Backup hangs, too. */
3794 
3795   if (in_place_resume && rmdir(fn)) {
3796 
3797     time_t cur_t = time(0);
3798     struct tm* t = localtime(&cur_t);
3799 
3800 #ifndef SIMPLE_FILES
3801 
3802     u8* nfn = alloc_printf("%s.%04u-%02u-%02u-%02u:%02u:%02u", fn,
3803                            t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
3804                            t->tm_hour, t->tm_min, t->tm_sec);
3805 
3806 #else
3807 
3808     u8* nfn = alloc_printf("%s_%04u%02u%02u%02u%02u%02u", fn,
3809                            t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
3810                            t->tm_hour, t->tm_min, t->tm_sec);
3811 
3812 #endif /* ^!SIMPLE_FILES */
3813 
3814     rename(fn, nfn); /* Ignore errors. */
3815     ck_free(nfn);
3816 
3817   }
3818 
3819   if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
3820   ck_free(fn);
3821 
3822   /* And now, for some finishing touches. */
3823 
3824   fn = alloc_printf("%s/.cur_input", out_dir);
3825   if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed;
3826   ck_free(fn);
3827 
3828   fn = alloc_printf("%s/fuzz_bitmap", out_dir);
3829   if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed;
3830   ck_free(fn);
3831 
3832   if (!in_place_resume) {
3833     fn  = alloc_printf("%s/fuzzer_stats", out_dir);
3834     if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed;
3835     ck_free(fn);
3836   }
3837 
3838   fn = alloc_printf("%s/plot_data", out_dir);
3839   if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed;
3840   ck_free(fn);
3841 
3842   OKF("Output dir cleanup successful.");
3843 
3844   /* Wow... is that all? If yes, celebrate! */
3845 
3846   return;
3847 
3848 dir_cleanup_failed:
3849 
3850   SAYF("\n" cLRD "[-] " cRST
3851        "Whoops, the fuzzer tried to reuse your output directory, but bumped into\n"
3852        "    some files that shouldn't be there or that couldn't be removed - so it\n"
3853        "    decided to abort! This happened while processing this path:\n\n"
3854 
3855        "    %s\n\n"
3856        "    Please examine and manually delete the files, or specify a different\n"
3857        "    output location for the tool.\n", fn);
3858 
3859   FATAL("Output directory cleanup failed");
3860 
3861 }
3862 
3863 
3864 static void check_term_size(void);
3865 
3866 
3867 /* A spiffy retro stats screen! This is called every stats_update_freq
3868    execve() calls, plus in several other circumstances. */
3869 
show_stats(void)3870 static void show_stats(void) {
3871 
3872   static u64 last_stats_ms, last_plot_ms, last_ms, last_execs;
3873   static double avg_exec;
3874   double t_byte_ratio, stab_ratio;
3875 
3876   u64 cur_ms;
3877   u32 t_bytes, t_bits;
3878 
3879   u32 banner_len, banner_pad;
3880   u8  tmp[256];
3881 
3882   cur_ms = get_cur_time();
3883 
3884   /* If not enough time has passed since last UI update, bail out. */
3885 
3886   if (cur_ms - last_ms < 1000 / UI_TARGET_HZ) return;
3887 
3888   /* Check if we're past the 10 minute mark. */
3889 
3890   if (cur_ms - start_time > 10 * 60 * 1000) run_over10m = 1;
3891 
3892   /* Calculate smoothed exec speed stats. */
3893 
3894   if (!last_execs) {
3895 
3896     avg_exec = ((double)total_execs) * 1000 / (cur_ms - start_time);
3897 
3898   } else {
3899 
3900     double cur_avg = ((double)(total_execs - last_execs)) * 1000 /
3901                      (cur_ms - last_ms);
3902 
3903     /* If there is a dramatic (5x+) jump in speed, reset the indicator
3904        more quickly. */
3905 
3906     if (cur_avg * 5 < avg_exec || cur_avg / 5 > avg_exec)
3907       avg_exec = cur_avg;
3908 
3909     avg_exec = avg_exec * (1.0 - 1.0 / AVG_SMOOTHING) +
3910                cur_avg * (1.0 / AVG_SMOOTHING);
3911 
3912   }
3913 
3914   last_ms = cur_ms;
3915   last_execs = total_execs;
3916 
3917   /* Tell the callers when to contact us (as measured in execs). */
3918 
3919   stats_update_freq = avg_exec / (UI_TARGET_HZ * 10);
3920   if (!stats_update_freq) stats_update_freq = 1;
3921 
3922   /* Do some bitmap stats. */
3923 
3924   t_bytes = count_non_255_bytes(virgin_bits);
3925   t_byte_ratio = ((double)t_bytes * 100) / MAP_SIZE;
3926 
3927   if (t_bytes)
3928     stab_ratio = 100 - ((double)var_byte_count) * 100 / t_bytes;
3929   else
3930     stab_ratio = 100;
3931 
3932   /* Roughly every minute, update fuzzer stats and save auto tokens. */
3933 
3934   if (cur_ms - last_stats_ms > STATS_UPDATE_SEC * 1000) {
3935 
3936     last_stats_ms = cur_ms;
3937     write_stats_file(t_byte_ratio, stab_ratio, avg_exec);
3938     save_auto();
3939     write_bitmap();
3940 
3941   }
3942 
3943   /* Every now and then, write plot data. */
3944 
3945   if (cur_ms - last_plot_ms > PLOT_UPDATE_SEC * 1000) {
3946 
3947     last_plot_ms = cur_ms;
3948     maybe_update_plot_file(t_byte_ratio, avg_exec);
3949 
3950   }
3951 
3952   /* Honor AFL_EXIT_WHEN_DONE and AFL_BENCH_UNTIL_CRASH. */
3953 
3954   if (!dumb_mode && cycles_wo_finds > 100 && !pending_not_fuzzed &&
3955       getenv("AFL_EXIT_WHEN_DONE")) stop_soon = 2;
3956 
3957   if (total_crashes && getenv("AFL_BENCH_UNTIL_CRASH")) stop_soon = 2;
3958 
3959   /* If we're not on TTY, bail out. */
3960 
3961   if (not_on_tty) return;
3962 
3963   /* Compute some mildly useful bitmap stats. */
3964 
3965   t_bits = (MAP_SIZE << 3) - count_bits(virgin_bits);
3966 
3967   /* Now, for the visuals... */
3968 
3969   if (clear_screen) {
3970 
3971     SAYF(TERM_CLEAR CURSOR_HIDE);
3972     clear_screen = 0;
3973 
3974     check_term_size();
3975 
3976   }
3977 
3978   SAYF(TERM_HOME);
3979 
3980   if (term_too_small) {
3981 
3982     SAYF(cBRI "Your terminal is too small to display the UI.\n"
3983          "Please resize terminal window to at least 80x25.\n" cRST);
3984 
3985     return;
3986 
3987   }
3988 
3989   /* Let's start by drawing a centered banner. */
3990 
3991   banner_len = (crash_mode ? 24 : 22) + strlen(VERSION) + strlen(use_banner);
3992   banner_pad = (80 - banner_len) / 2;
3993   memset(tmp, ' ', banner_pad);
3994 
3995   sprintf(tmp + banner_pad, "%s " cLCY VERSION cLGN
3996           " (%s)",  crash_mode ? cPIN "peruvian were-rabbit" :
3997           cYEL "american fuzzy lop", use_banner);
3998 
3999   SAYF("\n%s\n\n", tmp);
4000 
4001   /* "Handy" shortcuts for drawing boxes... */
4002 
4003 #define bSTG    bSTART cGRA
4004 #define bH2     bH bH
4005 #define bH5     bH2 bH2 bH
4006 #define bH10    bH5 bH5
4007 #define bH20    bH10 bH10
4008 #define bH30    bH20 bH10
4009 #define SP5     "     "
4010 #define SP10    SP5 SP5
4011 #define SP20    SP10 SP10
4012 
4013   /* Lord, forgive me this. */
4014 
4015   SAYF(SET_G1 bSTG bLT bH bSTOP cCYA " process timing " bSTG bH30 bH5 bH2 bHB
4016        bH bSTOP cCYA " overall results " bSTG bH5 bRT "\n");
4017 
4018   if (dumb_mode) {
4019 
4020     strcpy(tmp, cRST);
4021 
4022   } else {
4023 
4024     u64 min_wo_finds = (cur_ms - last_path_time) / 1000 / 60;
4025 
4026     /* First queue cycle: don't stop now! */
4027     if (queue_cycle == 1 || min_wo_finds < 15) strcpy(tmp, cMGN); else
4028 
4029     /* Subsequent cycles, but we're still making finds. */
4030     if (cycles_wo_finds < 25 || min_wo_finds < 30) strcpy(tmp, cYEL); else
4031 
4032     /* No finds for a long time and no test cases to try. */
4033     if (cycles_wo_finds > 100 && !pending_not_fuzzed && min_wo_finds > 120)
4034       strcpy(tmp, cLGN);
4035 
4036     /* Default: cautiously OK to stop? */
4037     else strcpy(tmp, cLBL);
4038 
4039   }
4040 
4041   SAYF(bV bSTOP "        run time : " cRST "%-34s " bSTG bV bSTOP
4042        "  cycles done : %s%-5s  " bSTG bV "\n",
4043        DTD(cur_ms, start_time), tmp, DI(queue_cycle - 1));
4044 
4045   /* We want to warn people about not seeing new paths after a full cycle,
4046      except when resuming fuzzing or running in non-instrumented mode. */
4047 
4048   if (!dumb_mode && (last_path_time || resuming_fuzz || queue_cycle == 1 ||
4049       in_bitmap || crash_mode)) {
4050 
4051     SAYF(bV bSTOP "   last new path : " cRST "%-34s ",
4052          DTD(cur_ms, last_path_time));
4053 
4054   } else {
4055 
4056     if (dumb_mode)
4057 
4058       SAYF(bV bSTOP "   last new path : " cPIN "n/a" cRST
4059            " (non-instrumented mode)        ");
4060 
4061      else
4062 
4063       SAYF(bV bSTOP "   last new path : " cRST "none yet " cLRD
4064            "(odd, check syntax!)      ");
4065 
4066   }
4067 
4068   SAYF(bSTG bV bSTOP "  total paths : " cRST "%-5s  " bSTG bV "\n",
4069        DI(queued_paths));
4070 
4071   /* Highlight crashes in red if found, denote going over the KEEP_UNIQUE_CRASH
4072      limit with a '+' appended to the count. */
4073 
4074   sprintf(tmp, "%s%s", DI(unique_crashes),
4075           (unique_crashes >= KEEP_UNIQUE_CRASH) ? "+" : "");
4076 
4077   SAYF(bV bSTOP " last uniq crash : " cRST "%-34s " bSTG bV bSTOP
4078        " uniq crashes : %s%-6s " bSTG bV "\n",
4079        DTD(cur_ms, last_crash_time), unique_crashes ? cLRD : cRST,
4080        tmp);
4081 
4082   sprintf(tmp, "%s%s", DI(unique_hangs),
4083          (unique_hangs >= KEEP_UNIQUE_HANG) ? "+" : "");
4084 
4085   SAYF(bV bSTOP "  last uniq hang : " cRST "%-34s " bSTG bV bSTOP
4086        "   uniq hangs : " cRST "%-6s " bSTG bV "\n",
4087        DTD(cur_ms, last_hang_time), tmp);
4088 
4089   SAYF(bVR bH bSTOP cCYA " cycle progress " bSTG bH20 bHB bH bSTOP cCYA
4090        " map coverage " bSTG bH bHT bH20 bH2 bH bVL "\n");
4091 
4092   /* This gets funny because we want to print several variable-length variables
4093      together, but then cram them into a fixed-width field - so we need to
4094      put them in a temporary buffer first. */
4095 
4096   sprintf(tmp, "%s%s (%0.02f%%)", DI(current_entry),
4097           queue_cur->favored ? "" : "*",
4098           ((double)current_entry * 100) / queued_paths);
4099 
4100   SAYF(bV bSTOP "  now processing : " cRST "%-17s " bSTG bV bSTOP, tmp);
4101 
4102   sprintf(tmp, "%0.02f%% / %0.02f%%", ((double)queue_cur->bitmap_size) *
4103           100 / MAP_SIZE, t_byte_ratio);
4104 
4105   SAYF("    map density : %s%-21s " bSTG bV "\n", t_byte_ratio > 70 ? cLRD :
4106        ((t_bytes < 200 && !dumb_mode) ? cPIN : cRST), tmp);
4107 
4108   sprintf(tmp, "%s (%0.02f%%)", DI(cur_skipped_paths),
4109           ((double)cur_skipped_paths * 100) / queued_paths);
4110 
4111   SAYF(bV bSTOP " paths timed out : " cRST "%-17s " bSTG bV, tmp);
4112 
4113   sprintf(tmp, "%0.02f bits/tuple",
4114           t_bytes ? (((double)t_bits) / t_bytes) : 0);
4115 
4116   SAYF(bSTOP " count coverage : " cRST "%-21s " bSTG bV "\n", tmp);
4117 
4118   SAYF(bVR bH bSTOP cCYA " stage progress " bSTG bH20 bX bH bSTOP cCYA
4119        " findings in depth " bSTG bH20 bVL "\n");
4120 
4121   sprintf(tmp, "%s (%0.02f%%)", DI(queued_favored),
4122           ((double)queued_favored) * 100 / queued_paths);
4123 
4124   /* Yeah... it's still going on... halp? */
4125 
4126   SAYF(bV bSTOP "  now trying : " cRST "%-21s " bSTG bV bSTOP
4127        " favored paths : " cRST "%-22s " bSTG bV "\n", stage_name, tmp);
4128 
4129   if (!stage_max) {
4130 
4131     sprintf(tmp, "%s/-", DI(stage_cur));
4132 
4133   } else {
4134 
4135     sprintf(tmp, "%s/%s (%0.02f%%)", DI(stage_cur), DI(stage_max),
4136             ((double)stage_cur) * 100 / stage_max);
4137 
4138   }
4139 
4140   SAYF(bV bSTOP " stage execs : " cRST "%-21s " bSTG bV bSTOP, tmp);
4141 
4142   sprintf(tmp, "%s (%0.02f%%)", DI(queued_with_cov),
4143           ((double)queued_with_cov) * 100 / queued_paths);
4144 
4145   SAYF("  new edges on : " cRST "%-22s " bSTG bV "\n", tmp);
4146 
4147   sprintf(tmp, "%s (%s%s unique)", DI(total_crashes), DI(unique_crashes),
4148           (unique_crashes >= KEEP_UNIQUE_CRASH) ? "+" : "");
4149 
4150   if (crash_mode) {
4151 
4152     SAYF(bV bSTOP " total execs : " cRST "%-21s " bSTG bV bSTOP
4153          "   new crashes : %s%-22s " bSTG bV "\n", DI(total_execs),
4154          unique_crashes ? cLRD : cRST, tmp);
4155 
4156   } else {
4157 
4158     SAYF(bV bSTOP " total execs : " cRST "%-21s " bSTG bV bSTOP
4159          " total crashes : %s%-22s " bSTG bV "\n", DI(total_execs),
4160          unique_crashes ? cLRD : cRST, tmp);
4161 
4162   }
4163 
4164   /* Show a warning about slow execution. */
4165 
4166   if (avg_exec < 100) {
4167 
4168     sprintf(tmp, "%s/sec (%s)", DF(avg_exec), avg_exec < 20 ?
4169             "zzzz..." : "slow!");
4170 
4171     SAYF(bV bSTOP "  exec speed : " cLRD "%-21s ", tmp);
4172 
4173   } else {
4174 
4175     sprintf(tmp, "%s/sec", DF(avg_exec));
4176     SAYF(bV bSTOP "  exec speed : " cRST "%-21s ", tmp);
4177 
4178   }
4179 
4180   sprintf(tmp, "%s (%s%s unique)", DI(total_tmouts), DI(unique_tmouts),
4181           (unique_hangs >= KEEP_UNIQUE_HANG) ? "+" : "");
4182 
4183   SAYF (bSTG bV bSTOP "  total tmouts : " cRST "%-22s " bSTG bV "\n", tmp);
4184 
4185   /* Aaaalmost there... hold on! */
4186 
4187   SAYF(bVR bH cCYA bSTOP " fuzzing strategy yields " bSTG bH10 bH bHT bH10
4188        bH5 bHB bH bSTOP cCYA " path geometry " bSTG bH5 bH2 bH bVL "\n");
4189 
4190   if (skip_deterministic) {
4191 
4192     strcpy(tmp, "n/a, n/a, n/a");
4193 
4194   } else {
4195 
4196     sprintf(tmp, "%s/%s, %s/%s, %s/%s",
4197             DI(stage_finds[STAGE_FLIP1]), DI(stage_cycles[STAGE_FLIP1]),
4198             DI(stage_finds[STAGE_FLIP2]), DI(stage_cycles[STAGE_FLIP2]),
4199             DI(stage_finds[STAGE_FLIP4]), DI(stage_cycles[STAGE_FLIP4]));
4200 
4201   }
4202 
4203   SAYF(bV bSTOP "   bit flips : " cRST "%-37s " bSTG bV bSTOP "    levels : "
4204        cRST "%-10s " bSTG bV "\n", tmp, DI(max_depth));
4205 
4206   if (!skip_deterministic)
4207     sprintf(tmp, "%s/%s, %s/%s, %s/%s",
4208             DI(stage_finds[STAGE_FLIP8]), DI(stage_cycles[STAGE_FLIP8]),
4209             DI(stage_finds[STAGE_FLIP16]), DI(stage_cycles[STAGE_FLIP16]),
4210             DI(stage_finds[STAGE_FLIP32]), DI(stage_cycles[STAGE_FLIP32]));
4211 
4212   SAYF(bV bSTOP "  byte flips : " cRST "%-37s " bSTG bV bSTOP "   pending : "
4213        cRST "%-10s " bSTG bV "\n", tmp, DI(pending_not_fuzzed));
4214 
4215   if (!skip_deterministic)
4216     sprintf(tmp, "%s/%s, %s/%s, %s/%s",
4217             DI(stage_finds[STAGE_ARITH8]), DI(stage_cycles[STAGE_ARITH8]),
4218             DI(stage_finds[STAGE_ARITH16]), DI(stage_cycles[STAGE_ARITH16]),
4219             DI(stage_finds[STAGE_ARITH32]), DI(stage_cycles[STAGE_ARITH32]));
4220 
4221   SAYF(bV bSTOP " arithmetics : " cRST "%-37s " bSTG bV bSTOP "  pend fav : "
4222        cRST "%-10s " bSTG bV "\n", tmp, DI(pending_favored));
4223 
4224   if (!skip_deterministic)
4225     sprintf(tmp, "%s/%s, %s/%s, %s/%s",
4226             DI(stage_finds[STAGE_INTEREST8]), DI(stage_cycles[STAGE_INTEREST8]),
4227             DI(stage_finds[STAGE_INTEREST16]), DI(stage_cycles[STAGE_INTEREST16]),
4228             DI(stage_finds[STAGE_INTEREST32]), DI(stage_cycles[STAGE_INTEREST32]));
4229 
4230   SAYF(bV bSTOP "  known ints : " cRST "%-37s " bSTG bV bSTOP " own finds : "
4231        cRST "%-10s " bSTG bV "\n", tmp, DI(queued_discovered));
4232 
4233   if (!skip_deterministic)
4234     sprintf(tmp, "%s/%s, %s/%s, %s/%s",
4235             DI(stage_finds[STAGE_EXTRAS_UO]), DI(stage_cycles[STAGE_EXTRAS_UO]),
4236             DI(stage_finds[STAGE_EXTRAS_UI]), DI(stage_cycles[STAGE_EXTRAS_UI]),
4237             DI(stage_finds[STAGE_EXTRAS_AO]), DI(stage_cycles[STAGE_EXTRAS_AO]));
4238 
4239   SAYF(bV bSTOP "  dictionary : " cRST "%-37s " bSTG bV bSTOP
4240        "  imported : " cRST "%-10s " bSTG bV "\n", tmp,
4241        sync_id ? DI(queued_imported) : (u8*)"n/a");
4242 
4243   sprintf(tmp, "%s/%s, %s/%s",
4244           DI(stage_finds[STAGE_HAVOC]), DI(stage_cycles[STAGE_HAVOC]),
4245           DI(stage_finds[STAGE_SPLICE]), DI(stage_cycles[STAGE_SPLICE]));
4246 
4247   SAYF(bV bSTOP "       havoc : " cRST "%-37s " bSTG bV bSTOP, tmp);
4248 
4249   if (t_bytes) sprintf(tmp, "%0.02f%%", stab_ratio);
4250     else strcpy(tmp, "n/a");
4251 
4252   SAYF(" stability : %s%-10s " bSTG bV "\n", (stab_ratio < 85 && var_byte_count > 40)
4253        ? cLRD : ((queued_variable && (!persistent_mode || var_byte_count > 20))
4254        ? cMGN : cRST), tmp);
4255 
4256   if (!bytes_trim_out) {
4257 
4258     sprintf(tmp, "n/a, ");
4259 
4260   } else {
4261 
4262     sprintf(tmp, "%0.02f%%/%s, ",
4263             ((double)(bytes_trim_in - bytes_trim_out)) * 100 / bytes_trim_in,
4264             DI(trim_execs));
4265 
4266   }
4267 
4268   if (!blocks_eff_total) {
4269 
4270     u8 tmp2[128];
4271 
4272     sprintf(tmp2, "n/a");
4273     strcat(tmp, tmp2);
4274 
4275   } else {
4276 
4277     u8 tmp2[128];
4278 
4279     sprintf(tmp2, "%0.02f%%",
4280             ((double)(blocks_eff_total - blocks_eff_select)) * 100 /
4281             blocks_eff_total);
4282 
4283     strcat(tmp, tmp2);
4284 
4285   }
4286 
4287   SAYF(bV bSTOP "        trim : " cRST "%-37s " bSTG bVR bH20 bH2 bH2 bRB "\n"
4288        bLB bH30 bH20 bH2 bH bRB bSTOP cRST RESET_G1, tmp);
4289 
4290   /* Provide some CPU utilization stats. */
4291 
4292   if (cpu_core_count) {
4293 
4294     double cur_runnable = get_runnable_processes();
4295     u32 cur_utilization = cur_runnable * 100 / cpu_core_count;
4296 
4297     u8* cpu_color = cCYA;
4298 
4299     /* If we could still run one or more processes, use green. */
4300 
4301     if (cpu_core_count > 1 && cur_runnable + 1 <= cpu_core_count)
4302       cpu_color = cLGN;
4303 
4304     /* If we're clearly oversubscribed, use red. */
4305 
4306     if (!no_cpu_meter_red && cur_utilization >= 150) cpu_color = cLRD;
4307 
4308 #ifdef HAVE_AFFINITY
4309 
4310     if (cpu_aff >= 0) {
4311 
4312       SAYF(SP10 cGRA "[cpu%03u:%s%3u%%" cGRA "]\r" cRST,
4313            MIN(cpu_aff, 999), cpu_color,
4314            MIN(cur_utilization, 999));
4315 
4316     } else {
4317 
4318       SAYF(SP10 cGRA "   [cpu:%s%3u%%" cGRA "]\r" cRST,
4319            cpu_color, MIN(cur_utilization, 999));
4320 
4321    }
4322 
4323 #else
4324 
4325     SAYF(SP10 cGRA "   [cpu:%s%3u%%" cGRA "]\r" cRST,
4326          cpu_color, MIN(cur_utilization, 999));
4327 
4328 #endif /* ^HAVE_AFFINITY */
4329 
4330   } else SAYF("\r");
4331 
4332   /* Hallelujah! */
4333 
4334   fflush(0);
4335 
4336 }
4337 
4338 
4339 /* Display quick statistics at the end of processing the input directory,
4340    plus a bunch of warnings. Some calibration stuff also ended up here,
4341    along with several hardcoded constants. Maybe clean up eventually. */
4342 
show_init_stats(void)4343 static void show_init_stats(void) {
4344 
4345   struct queue_entry* q = queue;
4346   u32 min_bits = 0, max_bits = 0;
4347   u64 min_us = 0, max_us = 0;
4348   u64 avg_us = 0;
4349   u32 max_len = 0;
4350 
4351   if (total_cal_cycles) avg_us = total_cal_us / total_cal_cycles;
4352 
4353   while (q) {
4354 
4355     if (!min_us || q->exec_us < min_us) min_us = q->exec_us;
4356     if (q->exec_us > max_us) max_us = q->exec_us;
4357 
4358     if (!min_bits || q->bitmap_size < min_bits) min_bits = q->bitmap_size;
4359     if (q->bitmap_size > max_bits) max_bits = q->bitmap_size;
4360 
4361     if (q->len > max_len) max_len = q->len;
4362 
4363     q = q->next;
4364 
4365   }
4366 
4367   SAYF("\n");
4368 
4369   if (avg_us > (qemu_mode ? 50000 : 10000))
4370     WARNF(cLRD "The target binary is pretty slow! See %s/perf_tips.txt.",
4371           doc_path);
4372 
4373   /* Let's keep things moving with slow binaries. */
4374 
4375   if (avg_us > 50000) havoc_div = 10;     /* 0-19 execs/sec   */
4376   else if (avg_us > 20000) havoc_div = 5; /* 20-49 execs/sec  */
4377   else if (avg_us > 10000) havoc_div = 2; /* 50-100 execs/sec */
4378 
4379   if (!resuming_fuzz) {
4380 
4381     if (max_len > 50 * 1024)
4382       WARNF(cLRD "Some test cases are huge (%s) - see %s/perf_tips.txt!",
4383             DMS(max_len), doc_path);
4384     else if (max_len > 10 * 1024)
4385       WARNF("Some test cases are big (%s) - see %s/perf_tips.txt.",
4386             DMS(max_len), doc_path);
4387 
4388     if (useless_at_start && !in_bitmap)
4389       WARNF(cLRD "Some test cases look useless. Consider using a smaller set.");
4390 
4391     if (queued_paths > 100)
4392       WARNF(cLRD "You probably have far too many input files! Consider trimming down.");
4393     else if (queued_paths > 20)
4394       WARNF("You have lots of input files; try starting small.");
4395 
4396   }
4397 
4398   OKF("Here are some useful stats:\n\n"
4399 
4400       cGRA "    Test case count : " cRST "%u favored, %u variable, %u total\n"
4401       cGRA "       Bitmap range : " cRST "%u to %u bits (average: %0.02f bits)\n"
4402       cGRA "        Exec timing : " cRST "%s to %s us (average: %s us)\n",
4403       queued_favored, queued_variable, queued_paths, min_bits, max_bits,
4404       ((double)total_bitmap_size) / (total_bitmap_entries ? total_bitmap_entries : 1),
4405       DI(min_us), DI(max_us), DI(avg_us));
4406 
4407   if (!timeout_given) {
4408 
4409     /* Figure out the appropriate timeout. The basic idea is: 5x average or
4410        1x max, rounded up to EXEC_TM_ROUND ms and capped at 1 second.
4411 
4412        If the program is slow, the multiplier is lowered to 2x or 3x, because
4413        random scheduler jitter is less likely to have any impact, and because
4414        our patience is wearing thin =) */
4415 
4416     if (avg_us > 50000) exec_tmout = avg_us * 2 / 1000;
4417     else if (avg_us > 10000) exec_tmout = avg_us * 3 / 1000;
4418     else exec_tmout = avg_us * 5 / 1000;
4419 
4420     exec_tmout = MAX(exec_tmout, max_us / 1000);
4421     exec_tmout = (exec_tmout + EXEC_TM_ROUND) / EXEC_TM_ROUND * EXEC_TM_ROUND;
4422 
4423     if (exec_tmout > EXEC_TIMEOUT) exec_tmout = EXEC_TIMEOUT;
4424 
4425     ACTF("No -t option specified, so I'll use exec timeout of %u ms.",
4426          exec_tmout);
4427 
4428     timeout_given = 1;
4429 
4430   } else if (timeout_given == 3) {
4431 
4432     ACTF("Applying timeout settings from resumed session (%u ms).", exec_tmout);
4433 
4434   }
4435 
4436   /* In dumb mode, re-running every timing out test case with a generous time
4437      limit is very expensive, so let's select a more conservative default. */
4438 
4439   if (dumb_mode && !getenv("AFL_HANG_TMOUT"))
4440     hang_tmout = MIN(EXEC_TIMEOUT, exec_tmout * 2 + 100);
4441 
4442   OKF("All set and ready to roll!");
4443 
4444 }
4445 
4446 
4447 /* Find first power of two greater or equal to val (assuming val under
4448    2^31). */
4449 
next_p2(u32 val)4450 static u32 next_p2(u32 val) {
4451 
4452   u32 ret = 1;
4453   while (val > ret) ret <<= 1;
4454   return ret;
4455 
4456 }
4457 
4458 
4459 /* Trim all new test cases to save cycles when doing deterministic checks. The
4460    trimmer uses power-of-two increments somewhere between 1/16 and 1/1024 of
4461    file size, to keep the stage short and sweet. */
4462 
trim_case(char ** argv,struct queue_entry * q,u8 * in_buf)4463 static u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) {
4464 
4465   static u8 tmp[64];
4466   static u8 clean_trace[MAP_SIZE];
4467 
4468   u8  needs_write = 0, fault = 0;
4469   u32 trim_exec = 0;
4470   u32 remove_len;
4471   u32 len_p2;
4472 
4473   /* Although the trimmer will be less useful when variable behavior is
4474      detected, it will still work to some extent, so we don't check for
4475      this. */
4476 
4477   if (q->len < 5) return 0;
4478 
4479   stage_name = tmp;
4480   bytes_trim_in += q->len;
4481 
4482   /* Select initial chunk len, starting with large steps. */
4483 
4484   len_p2 = next_p2(q->len);
4485 
4486   remove_len = MAX(len_p2 / TRIM_START_STEPS, TRIM_MIN_BYTES);
4487 
4488   /* Continue until the number of steps gets too high or the stepover
4489      gets too small. */
4490 
4491   while (remove_len >= MAX(len_p2 / TRIM_END_STEPS, TRIM_MIN_BYTES)) {
4492 
4493     u32 remove_pos = remove_len;
4494 
4495     sprintf(tmp, "trim %s/%s", DI(remove_len), DI(remove_len));
4496 
4497     stage_cur = 0;
4498     stage_max = q->len / remove_len;
4499 
4500     while (remove_pos < q->len) {
4501 
4502       u32 trim_avail = MIN(remove_len, q->len - remove_pos);
4503       u32 cksum;
4504 
4505       write_with_gap(in_buf, q->len, remove_pos, trim_avail);
4506 
4507       fault = run_target(argv, exec_tmout);
4508       trim_execs++;
4509 
4510       if (stop_soon || fault == FAULT_ERROR) goto abort_trimming;
4511 
4512       /* Note that we don't keep track of crashes or hangs here; maybe TODO? */
4513 
4514       cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
4515 
4516       /* If the deletion had no impact on the trace, make it permanent. This
4517          isn't perfect for variable-path inputs, but we're just making a
4518          best-effort pass, so it's not a big deal if we end up with false
4519          negatives every now and then. */
4520 
4521       if (cksum == q->exec_cksum) {
4522 
4523         u32 move_tail = q->len - remove_pos - trim_avail;
4524 
4525         q->len -= trim_avail;
4526         len_p2  = next_p2(q->len);
4527 
4528         memmove(in_buf + remove_pos, in_buf + remove_pos + trim_avail,
4529                 move_tail);
4530 
4531         /* Let's save a clean trace, which will be needed by
4532            update_bitmap_score once we're done with the trimming stuff. */
4533 
4534         if (!needs_write) {
4535 
4536           needs_write = 1;
4537           memcpy(clean_trace, trace_bits, MAP_SIZE);
4538 
4539         }
4540 
4541       } else remove_pos += remove_len;
4542 
4543       /* Since this can be slow, update the screen every now and then. */
4544 
4545       if (!(trim_exec++ % stats_update_freq)) show_stats();
4546       stage_cur++;
4547 
4548     }
4549 
4550     remove_len >>= 1;
4551 
4552   }
4553 
4554   /* If we have made changes to in_buf, we also need to update the on-disk
4555      version of the test case. */
4556 
4557   if (needs_write) {
4558 
4559     s32 fd;
4560 
4561     unlink(q->fname); /* ignore errors */
4562 
4563     fd = open(q->fname, O_WRONLY | O_CREAT | O_EXCL, 0600);
4564 
4565     if (fd < 0) PFATAL("Unable to create '%s'", q->fname);
4566 
4567     ck_write(fd, in_buf, q->len, q->fname);
4568     close(fd);
4569 
4570     memcpy(trace_bits, clean_trace, MAP_SIZE);
4571     update_bitmap_score(q);
4572 
4573   }
4574 
4575 abort_trimming:
4576 
4577   bytes_trim_out += q->len;
4578   return fault;
4579 
4580 }
4581 
4582 
4583 /* Write a modified test case, run program, process results. Handle
4584    error conditions, returning 1 if it's time to bail out. This is
4585    a helper function for fuzz_one(). */
4586 
common_fuzz_stuff(char ** argv,u8 * out_buf,u32 len)4587 EXP_ST u8 common_fuzz_stuff(char** argv, u8* out_buf, u32 len) {
4588 
4589   u8 fault;
4590 
4591   if (post_handler) {
4592 
4593     out_buf = post_handler(out_buf, &len);
4594     if (!out_buf || !len) return 0;
4595 
4596   }
4597 
4598   write_to_testcase(out_buf, len);
4599 
4600   fault = run_target(argv, exec_tmout);
4601 
4602   if (stop_soon) return 1;
4603 
4604   if (fault == FAULT_TMOUT) {
4605 
4606     if (subseq_tmouts++ > TMOUT_LIMIT) {
4607       cur_skipped_paths++;
4608       return 1;
4609     }
4610 
4611   } else subseq_tmouts = 0;
4612 
4613   /* Users can hit us with SIGUSR1 to request the current input
4614      to be abandoned. */
4615 
4616   if (skip_requested) {
4617 
4618      skip_requested = 0;
4619      cur_skipped_paths++;
4620      return 1;
4621 
4622   }
4623 
4624   /* This handles FAULT_ERROR for us: */
4625 
4626   queued_discovered += save_if_interesting(argv, out_buf, len, fault);
4627 
4628   if (!(stage_cur % stats_update_freq) || stage_cur + 1 == stage_max)
4629     show_stats();
4630 
4631   return 0;
4632 
4633 }
4634 
4635 
4636 /* Helper to choose random block len for block operations in fuzz_one().
4637    Doesn't return zero, provided that max_len is > 0. */
4638 
choose_block_len(u32 limit)4639 static u32 choose_block_len(u32 limit) {
4640 
4641   u32 min_value, max_value;
4642   u32 rlim = MIN(queue_cycle, 3);
4643 
4644   if (!run_over10m) rlim = 1;
4645 
4646   switch (UR(rlim)) {
4647 
4648     case 0:  min_value = 1;
4649              max_value = HAVOC_BLK_SMALL;
4650              break;
4651 
4652     case 1:  min_value = HAVOC_BLK_SMALL;
4653              max_value = HAVOC_BLK_MEDIUM;
4654              break;
4655 
4656     default:
4657 
4658              if (UR(10)) {
4659 
4660                min_value = HAVOC_BLK_MEDIUM;
4661                max_value = HAVOC_BLK_LARGE;
4662 
4663              } else {
4664 
4665                min_value = HAVOC_BLK_LARGE;
4666                max_value = HAVOC_BLK_XL;
4667 
4668              }
4669 
4670   }
4671 
4672   if (min_value >= limit) min_value = 1;
4673 
4674   return min_value + UR(MIN(max_value, limit) - min_value + 1);
4675 
4676 }
4677 
4678 
4679 /* Calculate case desirability score to adjust the length of havoc fuzzing.
4680    A helper function for fuzz_one(). Maybe some of these constants should
4681    go into config.h. */
4682 
calculate_score(struct queue_entry * q)4683 static u32 calculate_score(struct queue_entry* q) {
4684 
4685   u32 avg_exec_us = total_cal_us / total_cal_cycles;
4686   u32 avg_bitmap_size = total_bitmap_size / total_bitmap_entries;
4687   u32 perf_score = 100;
4688 
4689   /* Adjust score based on execution speed of this path, compared to the
4690      global average. Multiplier ranges from 0.1x to 3x. Fast inputs are
4691      less expensive to fuzz, so we're giving them more air time. */
4692 
4693   if (q->exec_us * 0.1 > avg_exec_us) perf_score = 10;
4694   else if (q->exec_us * 0.25 > avg_exec_us) perf_score = 25;
4695   else if (q->exec_us * 0.5 > avg_exec_us) perf_score = 50;
4696   else if (q->exec_us * 0.75 > avg_exec_us) perf_score = 75;
4697   else if (q->exec_us * 4 < avg_exec_us) perf_score = 300;
4698   else if (q->exec_us * 3 < avg_exec_us) perf_score = 200;
4699   else if (q->exec_us * 2 < avg_exec_us) perf_score = 150;
4700 
4701   /* Adjust score based on bitmap size. The working theory is that better
4702      coverage translates to better targets. Multiplier from 0.25x to 3x. */
4703 
4704   if (q->bitmap_size * 0.3 > avg_bitmap_size) perf_score *= 3;
4705   else if (q->bitmap_size * 0.5 > avg_bitmap_size) perf_score *= 2;
4706   else if (q->bitmap_size * 0.75 > avg_bitmap_size) perf_score *= 1.5;
4707   else if (q->bitmap_size * 3 < avg_bitmap_size) perf_score *= 0.25;
4708   else if (q->bitmap_size * 2 < avg_bitmap_size) perf_score *= 0.5;
4709   else if (q->bitmap_size * 1.5 < avg_bitmap_size) perf_score *= 0.75;
4710 
4711   /* Adjust score based on handicap. Handicap is proportional to how late
4712      in the game we learned about this path. Latecomers are allowed to run
4713      for a bit longer until they catch up with the rest. */
4714 
4715   if (q->handicap >= 4) {
4716 
4717     perf_score *= 4;
4718     q->handicap -= 4;
4719 
4720   } else if (q->handicap) {
4721 
4722     perf_score *= 2;
4723     q->handicap--;
4724 
4725   }
4726 
4727   /* Final adjustment based on input depth, under the assumption that fuzzing
4728      deeper test cases is more likely to reveal stuff that can't be
4729      discovered with traditional fuzzers. */
4730 
4731   switch (q->depth) {
4732 
4733     case 0 ... 3:   break;
4734     case 4 ... 7:   perf_score *= 2; break;
4735     case 8 ... 13:  perf_score *= 3; break;
4736     case 14 ... 25: perf_score *= 4; break;
4737     default:        perf_score *= 5;
4738 
4739   }
4740 
4741   /* Make sure that we don't go over limit. */
4742 
4743   if (perf_score > HAVOC_MAX_MULT * 100) perf_score = HAVOC_MAX_MULT * 100;
4744 
4745   return perf_score;
4746 
4747 }
4748 
4749 
4750 /* Helper function to see if a particular change (xor_val = old ^ new) could
4751    be a product of deterministic bit flips with the lengths and stepovers
4752    attempted by afl-fuzz. This is used to avoid dupes in some of the
4753    deterministic fuzzing operations that follow bit flips. We also
4754    return 1 if xor_val is zero, which implies that the old and attempted new
4755    values are identical and the exec would be a waste of time. */
4756 
could_be_bitflip(u32 xor_val)4757 static u8 could_be_bitflip(u32 xor_val) {
4758 
4759   u32 sh = 0;
4760 
4761   if (!xor_val) return 1;
4762 
4763   /* Shift left until first bit set. */
4764 
4765   while (!(xor_val & 1)) { sh++; xor_val >>= 1; }
4766 
4767   /* 1-, 2-, and 4-bit patterns are OK anywhere. */
4768 
4769   if (xor_val == 1 || xor_val == 3 || xor_val == 15) return 1;
4770 
4771   /* 8-, 16-, and 32-bit patterns are OK only if shift factor is
4772      divisible by 8, since that's the stepover for these ops. */
4773 
4774   if (sh & 7) return 0;
4775 
4776   if (xor_val == 0xff || xor_val == 0xffff || xor_val == 0xffffffff)
4777     return 1;
4778 
4779   return 0;
4780 
4781 }
4782 
4783 
4784 /* Helper function to see if a particular value is reachable through
4785    arithmetic operations. Used for similar purposes. */
4786 
could_be_arith(u32 old_val,u32 new_val,u8 blen)4787 static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) {
4788 
4789   u32 i, ov = 0, nv = 0, diffs = 0;
4790 
4791   if (old_val == new_val) return 1;
4792 
4793   /* See if one-byte adjustments to any byte could produce this result. */
4794 
4795   for (i = 0; i < blen; i++) {
4796 
4797     u8 a = old_val >> (8 * i),
4798        b = new_val >> (8 * i);
4799 
4800     if (a != b) { diffs++; ov = a; nv = b; }
4801 
4802   }
4803 
4804   /* If only one byte differs and the values are within range, return 1. */
4805 
4806   if (diffs == 1) {
4807 
4808     if ((u8)(ov - nv) <= ARITH_MAX ||
4809         (u8)(nv - ov) <= ARITH_MAX) return 1;
4810 
4811   }
4812 
4813   if (blen == 1) return 0;
4814 
4815   /* See if two-byte adjustments to any byte would produce this result. */
4816 
4817   diffs = 0;
4818 
4819   for (i = 0; i < blen / 2; i++) {
4820 
4821     u16 a = old_val >> (16 * i),
4822         b = new_val >> (16 * i);
4823 
4824     if (a != b) { diffs++; ov = a; nv = b; }
4825 
4826   }
4827 
4828   /* If only one word differs and the values are within range, return 1. */
4829 
4830   if (diffs == 1) {
4831 
4832     if ((u16)(ov - nv) <= ARITH_MAX ||
4833         (u16)(nv - ov) <= ARITH_MAX) return 1;
4834 
4835     ov = SWAP16(ov); nv = SWAP16(nv);
4836 
4837     if ((u16)(ov - nv) <= ARITH_MAX ||
4838         (u16)(nv - ov) <= ARITH_MAX) return 1;
4839 
4840   }
4841 
4842   /* Finally, let's do the same thing for dwords. */
4843 
4844   if (blen == 4) {
4845 
4846     if ((u32)(old_val - new_val) <= ARITH_MAX ||
4847         (u32)(new_val - old_val) <= ARITH_MAX) return 1;
4848 
4849     new_val = SWAP32(new_val);
4850     old_val = SWAP32(old_val);
4851 
4852     if ((u32)(old_val - new_val) <= ARITH_MAX ||
4853         (u32)(new_val - old_val) <= ARITH_MAX) return 1;
4854 
4855   }
4856 
4857   return 0;
4858 
4859 }
4860 
4861 
4862 /* Last but not least, a similar helper to see if insertion of an
4863    interesting integer is redundant given the insertions done for
4864    shorter blen. The last param (check_le) is set if the caller
4865    already executed LE insertion for current blen and wants to see
4866    if BE variant passed in new_val is unique. */
4867 
could_be_interest(u32 old_val,u32 new_val,u8 blen,u8 check_le)4868 static u8 could_be_interest(u32 old_val, u32 new_val, u8 blen, u8 check_le) {
4869 
4870   u32 i, j;
4871 
4872   if (old_val == new_val) return 1;
4873 
4874   /* See if one-byte insertions from interesting_8 over old_val could
4875      produce new_val. */
4876 
4877   for (i = 0; i < blen; i++) {
4878 
4879     for (j = 0; j < sizeof(interesting_8); j++) {
4880 
4881       u32 tval = (old_val & ~(0xff << (i * 8))) |
4882                  (((u8)interesting_8[j]) << (i * 8));
4883 
4884       if (new_val == tval) return 1;
4885 
4886     }
4887 
4888   }
4889 
4890   /* Bail out unless we're also asked to examine two-byte LE insertions
4891      as a preparation for BE attempts. */
4892 
4893   if (blen == 2 && !check_le) return 0;
4894 
4895   /* See if two-byte insertions over old_val could give us new_val. */
4896 
4897   for (i = 0; i < blen - 1; i++) {
4898 
4899     for (j = 0; j < sizeof(interesting_16) / 2; j++) {
4900 
4901       u32 tval = (old_val & ~(0xffff << (i * 8))) |
4902                  (((u16)interesting_16[j]) << (i * 8));
4903 
4904       if (new_val == tval) return 1;
4905 
4906       /* Continue here only if blen > 2. */
4907 
4908       if (blen > 2) {
4909 
4910         tval = (old_val & ~(0xffff << (i * 8))) |
4911                (SWAP16(interesting_16[j]) << (i * 8));
4912 
4913         if (new_val == tval) return 1;
4914 
4915       }
4916 
4917     }
4918 
4919   }
4920 
4921   if (blen == 4 && check_le) {
4922 
4923     /* See if four-byte insertions could produce the same result
4924        (LE only). */
4925 
4926     for (j = 0; j < sizeof(interesting_32) / 4; j++)
4927       if (new_val == (u32)interesting_32[j]) return 1;
4928 
4929   }
4930 
4931   return 0;
4932 
4933 }
4934 
4935 
4936 /* Take the current entry from the queue, fuzz it for a while. This
4937    function is a tad too long... returns 0 if fuzzed successfully, 1 if
4938    skipped or bailed out. */
4939 
fuzz_one(char ** argv)4940 static u8 fuzz_one(char** argv) {
4941 
4942   s32 len, fd, temp_len, i, j;
4943   u8  *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0;
4944   u64 havoc_queued,  orig_hit_cnt, new_hit_cnt;
4945   u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1;
4946 
4947   u8  ret_val = 1, doing_det = 0;
4948 
4949   u8  a_collect[MAX_AUTO_EXTRA];
4950   u32 a_len = 0;
4951 
4952 #ifdef IGNORE_FINDS
4953 
4954   /* In IGNORE_FINDS mode, skip any entries that weren't in the
4955      initial data set. */
4956 
4957   if (queue_cur->depth > 1) return 1;
4958 
4959 #else
4960 
4961   if (pending_favored) {
4962 
4963     /* If we have any favored, non-fuzzed new arrivals in the queue,
4964        possibly skip to them at the expense of already-fuzzed or non-favored
4965        cases. */
4966 
4967     if ((queue_cur->was_fuzzed || !queue_cur->favored) &&
4968         UR(100) < SKIP_TO_NEW_PROB) return 1;
4969 
4970   } else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) {
4971 
4972     /* Otherwise, still possibly skip non-favored cases, albeit less often.
4973        The odds of skipping stuff are higher for already-fuzzed inputs and
4974        lower for never-fuzzed entries. */
4975 
4976     if (queue_cycle > 1 && !queue_cur->was_fuzzed) {
4977 
4978       if (UR(100) < SKIP_NFAV_NEW_PROB) return 1;
4979 
4980     } else {
4981 
4982       if (UR(100) < SKIP_NFAV_OLD_PROB) return 1;
4983 
4984     }
4985 
4986   }
4987 
4988 #endif /* ^IGNORE_FINDS */
4989 
4990   if (not_on_tty) {
4991     ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...",
4992          current_entry, queued_paths, unique_crashes);
4993     fflush(stdout);
4994   }
4995 
4996   /* Map the test case into memory. */
4997 
4998   fd = open(queue_cur->fname, O_RDONLY);
4999 
5000   if (fd < 0) PFATAL("Unable to open '%s'", queue_cur->fname);
5001 
5002   len = queue_cur->len;
5003 
5004   orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
5005 
5006   if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s'", queue_cur->fname);
5007 
5008   close(fd);
5009 
5010   /* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every
5011      single byte anyway, so it wouldn't give us any performance or memory usage
5012      benefits. */
5013 
5014   out_buf = ck_alloc_nozero(len);
5015 
5016   subseq_tmouts = 0;
5017 
5018   cur_depth = queue_cur->depth;
5019 
5020   /*******************************************
5021    * CALIBRATION (only if failed earlier on) *
5022    *******************************************/
5023 
5024   if (queue_cur->cal_failed) {
5025 
5026     u8 res = FAULT_TMOUT;
5027 
5028     if (queue_cur->cal_failed < CAL_CHANCES) {
5029 
5030       res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0);
5031 
5032       if (res == FAULT_ERROR)
5033         FATAL("Unable to execute target application");
5034 
5035     }
5036 
5037     if (stop_soon || res != crash_mode) {
5038       cur_skipped_paths++;
5039       goto abandon_entry;
5040     }
5041 
5042   }
5043 
5044   /************
5045    * TRIMMING *
5046    ************/
5047 
5048   if (!dumb_mode && !queue_cur->trim_done) {
5049 
5050     u8 res = trim_case(argv, queue_cur, in_buf);
5051 
5052     if (res == FAULT_ERROR)
5053       FATAL("Unable to execute target application");
5054 
5055     if (stop_soon) {
5056       cur_skipped_paths++;
5057       goto abandon_entry;
5058     }
5059 
5060     /* Don't retry trimming, even if it failed. */
5061 
5062     queue_cur->trim_done = 1;
5063 
5064     if (len != queue_cur->len) len = queue_cur->len;
5065 
5066   }
5067 
5068   memcpy(out_buf, in_buf, len);
5069 
5070   /*********************
5071    * PERFORMANCE SCORE *
5072    *********************/
5073 
5074   orig_perf = perf_score = calculate_score(queue_cur);
5075 
5076   /* Skip right away if -d is given, if we have done deterministic fuzzing on
5077      this entry ourselves (was_fuzzed), or if it has gone through deterministic
5078      testing in earlier, resumed runs (passed_det). */
5079 
5080   if (skip_deterministic || queue_cur->was_fuzzed || queue_cur->passed_det)
5081     goto havoc_stage;
5082 
5083   /* Skip deterministic fuzzing if exec path checksum puts this out of scope
5084      for this master instance. */
5085 
5086   if (master_max && (queue_cur->exec_cksum % master_max) != master_id - 1)
5087     goto havoc_stage;
5088 
5089   doing_det = 1;
5090 
5091   /*********************************************
5092    * SIMPLE BITFLIP (+dictionary construction) *
5093    *********************************************/
5094 
5095 #define FLIP_BIT(_ar, _b) do { \
5096     u8* _arf = (u8*)(_ar); \
5097     u32 _bf = (_b); \
5098     _arf[(_bf) >> 3] ^= (128 >> ((_bf) & 7)); \
5099   } while (0)
5100 
5101   /* Single walking bit. */
5102 
5103   stage_short = "flip1";
5104   stage_max   = len << 3;
5105   stage_name  = "bitflip 1/1";
5106 
5107   stage_val_type = STAGE_VAL_NONE;
5108 
5109   orig_hit_cnt = queued_paths + unique_crashes;
5110 
5111   prev_cksum = queue_cur->exec_cksum;
5112 
5113   for (stage_cur = 0; stage_cur < stage_max; stage_cur++) {
5114 
5115     stage_cur_byte = stage_cur >> 3;
5116 
5117     FLIP_BIT(out_buf, stage_cur);
5118 
5119     if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
5120 
5121     FLIP_BIT(out_buf, stage_cur);
5122 
5123     /* While flipping the least significant bit in every byte, pull of an extra
5124        trick to detect possible syntax tokens. In essence, the idea is that if
5125        you have a binary blob like this:
5126 
5127        xxxxxxxxIHDRxxxxxxxx
5128 
5129        ...and changing the leading and trailing bytes causes variable or no
5130        changes in program flow, but touching any character in the "IHDR" string
5131        always produces the same, distinctive path, it's highly likely that
5132        "IHDR" is an atomically-checked magic value of special significance to
5133        the fuzzed format.
5134 
5135        We do this here, rather than as a separate stage, because it's a nice
5136        way to keep the operation approximately "free" (i.e., no extra execs).
5137 
5138        Empirically, performing the check when flipping the least significant bit
5139        is advantageous, compared to doing it at the time of more disruptive
5140        changes, where the program flow may be affected in more violent ways.
5141 
5142        The caveat is that we won't generate dictionaries in the -d mode or -S
5143        mode - but that's probably a fair trade-off.
5144 
5145        This won't work particularly well with paths that exhibit variable
5146        behavior, but fails gracefully, so we'll carry out the checks anyway.
5147 
5148       */
5149 
5150     if (!dumb_mode && (stage_cur & 7) == 7) {
5151 
5152       u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
5153 
5154       if (stage_cur == stage_max - 1 && cksum == prev_cksum) {
5155 
5156         /* If at end of file and we are still collecting a string, grab the
5157            final character and force output. */
5158 
5159         if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
5160         a_len++;
5161 
5162         if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
5163           maybe_add_auto(a_collect, a_len);
5164 
5165       } else if (cksum != prev_cksum) {
5166 
5167         /* Otherwise, if the checksum has changed, see if we have something
5168            worthwhile queued up, and collect that if the answer is yes. */
5169 
5170         if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
5171           maybe_add_auto(a_collect, a_len);
5172 
5173         a_len = 0;
5174         prev_cksum = cksum;
5175 
5176       }
5177 
5178       /* Continue collecting string, but only if the bit flip actually made
5179          any difference - we don't want no-op tokens. */
5180 
5181       if (cksum != queue_cur->exec_cksum) {
5182 
5183         if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
5184         a_len++;
5185 
5186       }
5187 
5188     }
5189 
5190   }
5191 
5192   new_hit_cnt = queued_paths + unique_crashes;
5193 
5194   stage_finds[STAGE_FLIP1]  += new_hit_cnt - orig_hit_cnt;
5195   stage_cycles[STAGE_FLIP1] += stage_max;
5196 
5197   /* Two walking bits. */
5198 
5199   stage_name  = "bitflip 2/1";
5200   stage_short = "flip2";
5201   stage_max   = (len << 3) - 1;
5202 
5203   orig_hit_cnt = new_hit_cnt;
5204 
5205   for (stage_cur = 0; stage_cur < stage_max; stage_cur++) {
5206 
5207     stage_cur_byte = stage_cur >> 3;
5208 
5209     FLIP_BIT(out_buf, stage_cur);
5210     FLIP_BIT(out_buf, stage_cur + 1);
5211 
5212     if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
5213 
5214     FLIP_BIT(out_buf, stage_cur);
5215     FLIP_BIT(out_buf, stage_cur + 1);
5216 
5217   }
5218 
5219   new_hit_cnt = queued_paths + unique_crashes;
5220 
5221   stage_finds[STAGE_FLIP2]  += new_hit_cnt - orig_hit_cnt;
5222   stage_cycles[STAGE_FLIP2] += stage_max;
5223 
5224   /* Four walking bits. */
5225 
5226   stage_name  = "bitflip 4/1";
5227   stage_short = "flip4";
5228   stage_max   = (len << 3) - 3;
5229 
5230   orig_hit_cnt = new_hit_cnt;
5231 
5232   for (stage_cur = 0; stage_cur < stage_max; stage_cur++) {
5233 
5234     stage_cur_byte = stage_cur >> 3;
5235 
5236     FLIP_BIT(out_buf, stage_cur);
5237     FLIP_BIT(out_buf, stage_cur + 1);
5238     FLIP_BIT(out_buf, stage_cur + 2);
5239     FLIP_BIT(out_buf, stage_cur + 3);
5240 
5241     if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
5242 
5243     FLIP_BIT(out_buf, stage_cur);
5244     FLIP_BIT(out_buf, stage_cur + 1);
5245     FLIP_BIT(out_buf, stage_cur + 2);
5246     FLIP_BIT(out_buf, stage_cur + 3);
5247 
5248   }
5249 
5250   new_hit_cnt = queued_paths + unique_crashes;
5251 
5252   stage_finds[STAGE_FLIP4]  += new_hit_cnt - orig_hit_cnt;
5253   stage_cycles[STAGE_FLIP4] += stage_max;
5254 
5255   /* Effector map setup. These macros calculate:
5256 
5257      EFF_APOS      - position of a particular file offset in the map.
5258      EFF_ALEN      - length of a map with a particular number of bytes.
5259      EFF_SPAN_ALEN - map span for a sequence of bytes.
5260 
5261    */
5262 
5263 #define EFF_APOS(_p)          ((_p) >> EFF_MAP_SCALE2)
5264 #define EFF_REM(_x)           ((_x) & ((1 << EFF_MAP_SCALE2) - 1))
5265 #define EFF_ALEN(_l)          (EFF_APOS(_l) + !!EFF_REM(_l))
5266 #define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l) - 1) - EFF_APOS(_p) + 1)
5267 
5268   /* Initialize effector map for the next step (see comments below). Always
5269      flag first and last byte as doing something. */
5270 
5271   eff_map    = ck_alloc(EFF_ALEN(len));
5272   eff_map[0] = 1;
5273 
5274   if (EFF_APOS(len - 1) != 0) {
5275     eff_map[EFF_APOS(len - 1)] = 1;
5276     eff_cnt++;
5277   }
5278 
5279   /* Walking byte. */
5280 
5281   stage_name  = "bitflip 8/8";
5282   stage_short = "flip8";
5283   stage_max   = len;
5284 
5285   orig_hit_cnt = new_hit_cnt;
5286 
5287   for (stage_cur = 0; stage_cur < stage_max; stage_cur++) {
5288 
5289     stage_cur_byte = stage_cur;
5290 
5291     out_buf[stage_cur] ^= 0xFF;
5292 
5293     if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
5294 
5295     /* We also use this stage to pull off a simple trick: we identify
5296        bytes that seem to have no effect on the current execution path
5297        even when fully flipped - and we skip them during more expensive
5298        deterministic stages, such as arithmetics or known ints. */
5299 
5300     if (!eff_map[EFF_APOS(stage_cur)]) {
5301 
5302       u32 cksum;
5303 
5304       /* If in dumb mode or if the file is very short, just flag everything
5305          without wasting time on checksums. */
5306 
5307       if (!dumb_mode && len >= EFF_MIN_LEN)
5308         cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
5309       else
5310         cksum = ~queue_cur->exec_cksum;
5311 
5312       if (cksum != queue_cur->exec_cksum) {
5313         eff_map[EFF_APOS(stage_cur)] = 1;
5314         eff_cnt++;
5315       }
5316 
5317     }
5318 
5319     out_buf[stage_cur] ^= 0xFF;
5320 
5321   }
5322 
5323   /* If the effector map is more than EFF_MAX_PERC dense, just flag the
5324      whole thing as worth fuzzing, since we wouldn't be saving much time
5325      anyway. */
5326 
5327   if (eff_cnt != EFF_ALEN(len) &&
5328       eff_cnt * 100 / EFF_ALEN(len) > EFF_MAX_PERC) {
5329 
5330     memset(eff_map, 1, EFF_ALEN(len));
5331 
5332     blocks_eff_select += EFF_ALEN(len);
5333 
5334   } else {
5335 
5336     blocks_eff_select += eff_cnt;
5337 
5338   }
5339 
5340   blocks_eff_total += EFF_ALEN(len);
5341 
5342   new_hit_cnt = queued_paths + unique_crashes;
5343 
5344   stage_finds[STAGE_FLIP8]  += new_hit_cnt - orig_hit_cnt;
5345   stage_cycles[STAGE_FLIP8] += stage_max;
5346 
5347   /* Two walking bytes. */
5348 
5349   if (len < 2) goto skip_bitflip;
5350 
5351   stage_name  = "bitflip 16/8";
5352   stage_short = "flip16";
5353   stage_cur   = 0;
5354   stage_max   = len - 1;
5355 
5356   orig_hit_cnt = new_hit_cnt;
5357 
5358   for (i = 0; i < len - 1; i++) {
5359 
5360     /* Let's consult the effector map... */
5361 
5362     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
5363       stage_max--;
5364       continue;
5365     }
5366 
5367     stage_cur_byte = i;
5368 
5369     *(u16*)(out_buf + i) ^= 0xFFFF;
5370 
5371     if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
5372     stage_cur++;
5373 
5374     *(u16*)(out_buf + i) ^= 0xFFFF;
5375 
5376 
5377   }
5378 
5379   new_hit_cnt = queued_paths + unique_crashes;
5380 
5381   stage_finds[STAGE_FLIP16]  += new_hit_cnt - orig_hit_cnt;
5382   stage_cycles[STAGE_FLIP16] += stage_max;
5383 
5384   if (len < 4) goto skip_bitflip;
5385 
5386   /* Four walking bytes. */
5387 
5388   stage_name  = "bitflip 32/8";
5389   stage_short = "flip32";
5390   stage_cur   = 0;
5391   stage_max   = len - 3;
5392 
5393   orig_hit_cnt = new_hit_cnt;
5394 
5395   for (i = 0; i < len - 3; i++) {
5396 
5397     /* Let's consult the effector map... */
5398     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
5399         !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
5400       stage_max--;
5401       continue;
5402     }
5403 
5404     stage_cur_byte = i;
5405 
5406     *(u32*)(out_buf + i) ^= 0xFFFFFFFF;
5407 
5408     if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
5409     stage_cur++;
5410 
5411     *(u32*)(out_buf + i) ^= 0xFFFFFFFF;
5412 
5413   }
5414 
5415   new_hit_cnt = queued_paths + unique_crashes;
5416 
5417   stage_finds[STAGE_FLIP32]  += new_hit_cnt - orig_hit_cnt;
5418   stage_cycles[STAGE_FLIP32] += stage_max;
5419 
5420 skip_bitflip:
5421 
5422   if (no_arith) goto skip_arith;
5423 
5424   /**********************
5425    * ARITHMETIC INC/DEC *
5426    **********************/
5427 
5428   /* 8-bit arithmetics. */
5429 
5430   stage_name  = "arith 8/8";
5431   stage_short = "arith8";
5432   stage_cur   = 0;
5433   stage_max   = 2 * len * ARITH_MAX;
5434 
5435   stage_val_type = STAGE_VAL_LE;
5436 
5437   orig_hit_cnt = new_hit_cnt;
5438 
5439   for (i = 0; i < len; i++) {
5440 
5441     u8 orig = out_buf[i];
5442 
5443     /* Let's consult the effector map... */
5444 
5445     if (!eff_map[EFF_APOS(i)]) {
5446       stage_max -= 2 * ARITH_MAX;
5447       continue;
5448     }
5449 
5450     stage_cur_byte = i;
5451 
5452     for (j = 1; j <= ARITH_MAX; j++) {
5453 
5454       u8 r = orig ^ (orig + j);
5455 
5456       /* Do arithmetic operations only if the result couldn't be a product
5457          of a bitflip. */
5458 
5459       if (!could_be_bitflip(r)) {
5460 
5461         stage_cur_val = j;
5462         out_buf[i] = orig + j;
5463 
5464         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
5465         stage_cur++;
5466 
5467       } else stage_max--;
5468 
5469       r =  orig ^ (orig - j);
5470 
5471       if (!could_be_bitflip(r)) {
5472 
5473         stage_cur_val = -j;
5474         out_buf[i] = orig - j;
5475 
5476         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
5477         stage_cur++;
5478 
5479       } else stage_max--;
5480 
5481       out_buf[i] = orig;
5482 
5483     }
5484 
5485   }
5486 
5487   new_hit_cnt = queued_paths + unique_crashes;
5488 
5489   stage_finds[STAGE_ARITH8]  += new_hit_cnt - orig_hit_cnt;
5490   stage_cycles[STAGE_ARITH8] += stage_max;
5491 
5492   /* 16-bit arithmetics, both endians. */
5493 
5494   if (len < 2) goto skip_arith;
5495 
5496   stage_name  = "arith 16/8";
5497   stage_short = "arith16";
5498   stage_cur   = 0;
5499   stage_max   = 4 * (len - 1) * ARITH_MAX;
5500 
5501   orig_hit_cnt = new_hit_cnt;
5502 
5503   for (i = 0; i < len - 1; i++) {
5504 
5505     u16 orig = *(u16*)(out_buf + i);
5506 
5507     /* Let's consult the effector map... */
5508 
5509     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
5510       stage_max -= 4 * ARITH_MAX;
5511       continue;
5512     }
5513 
5514     stage_cur_byte = i;
5515 
5516     for (j = 1; j <= ARITH_MAX; j++) {
5517 
5518       u16 r1 = orig ^ (orig + j),
5519           r2 = orig ^ (orig - j),
5520           r3 = orig ^ SWAP16(SWAP16(orig) + j),
5521           r4 = orig ^ SWAP16(SWAP16(orig) - j);
5522 
5523       /* Try little endian addition and subtraction first. Do it only
5524          if the operation would affect more than one byte (hence the
5525          & 0xff overflow checks) and if it couldn't be a product of
5526          a bitflip. */
5527 
5528       stage_val_type = STAGE_VAL_LE;
5529 
5530       if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) {
5531 
5532         stage_cur_val = j;
5533         *(u16*)(out_buf + i) = orig + j;
5534 
5535         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
5536         stage_cur++;
5537 
5538       } else stage_max--;
5539 
5540       if ((orig & 0xff) < j && !could_be_bitflip(r2)) {
5541 
5542         stage_cur_val = -j;
5543         *(u16*)(out_buf + i) = orig - j;
5544 
5545         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
5546         stage_cur++;
5547 
5548       } else stage_max--;
5549 
5550       /* Big endian comes next. Same deal. */
5551 
5552       stage_val_type = STAGE_VAL_BE;
5553 
5554 
5555       if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) {
5556 
5557         stage_cur_val = j;
5558         *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j);
5559 
5560         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
5561         stage_cur++;
5562 
5563       } else stage_max--;
5564 
5565       if ((orig >> 8) < j && !could_be_bitflip(r4)) {
5566 
5567         stage_cur_val = -j;
5568         *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j);
5569 
5570         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
5571         stage_cur++;
5572 
5573       } else stage_max--;
5574 
5575       *(u16*)(out_buf + i) = orig;
5576 
5577     }
5578 
5579   }
5580 
5581   new_hit_cnt = queued_paths + unique_crashes;
5582 
5583   stage_finds[STAGE_ARITH16]  += new_hit_cnt - orig_hit_cnt;
5584   stage_cycles[STAGE_ARITH16] += stage_max;
5585 
5586   /* 32-bit arithmetics, both endians. */
5587 
5588   if (len < 4) goto skip_arith;
5589 
5590   stage_name  = "arith 32/8";
5591   stage_short = "arith32";
5592   stage_cur   = 0;
5593   stage_max   = 4 * (len - 3) * ARITH_MAX;
5594 
5595   orig_hit_cnt = new_hit_cnt;
5596 
5597   for (i = 0; i < len - 3; i++) {
5598 
5599     u32 orig = *(u32*)(out_buf + i);
5600 
5601     /* Let's consult the effector map... */
5602 
5603     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
5604         !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
5605       stage_max -= 4 * ARITH_MAX;
5606       continue;
5607     }
5608 
5609     stage_cur_byte = i;
5610 
5611     for (j = 1; j <= ARITH_MAX; j++) {
5612 
5613       u32 r1 = orig ^ (orig + j),
5614           r2 = orig ^ (orig - j),
5615           r3 = orig ^ SWAP32(SWAP32(orig) + j),
5616           r4 = orig ^ SWAP32(SWAP32(orig) - j);
5617 
5618       /* Little endian first. Same deal as with 16-bit: we only want to
5619          try if the operation would have effect on more than two bytes. */
5620 
5621       stage_val_type = STAGE_VAL_LE;
5622 
5623       if ((orig & 0xffff) + j > 0xffff && !could_be_bitflip(r1)) {
5624 
5625         stage_cur_val = j;
5626         *(u32*)(out_buf + i) = orig + j;
5627 
5628         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
5629         stage_cur++;
5630 
5631       } else stage_max--;
5632 
5633       if ((orig & 0xffff) < j && !could_be_bitflip(r2)) {
5634 
5635         stage_cur_val = -j;
5636         *(u32*)(out_buf + i) = orig - j;
5637 
5638         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
5639         stage_cur++;
5640 
5641       } else stage_max--;
5642 
5643       /* Big endian next. */
5644 
5645       stage_val_type = STAGE_VAL_BE;
5646 
5647       if ((SWAP32(orig) & 0xffff) + j > 0xffff && !could_be_bitflip(r3)) {
5648 
5649         stage_cur_val = j;
5650         *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j);
5651 
5652         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
5653         stage_cur++;
5654 
5655       } else stage_max--;
5656 
5657       if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) {
5658 
5659         stage_cur_val = -j;
5660         *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j);
5661 
5662         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
5663         stage_cur++;
5664 
5665       } else stage_max--;
5666 
5667       *(u32*)(out_buf + i) = orig;
5668 
5669     }
5670 
5671   }
5672 
5673   new_hit_cnt = queued_paths + unique_crashes;
5674 
5675   stage_finds[STAGE_ARITH32]  += new_hit_cnt - orig_hit_cnt;
5676   stage_cycles[STAGE_ARITH32] += stage_max;
5677 
5678 skip_arith:
5679 
5680   /**********************
5681    * INTERESTING VALUES *
5682    **********************/
5683 
5684   stage_name  = "interest 8/8";
5685   stage_short = "int8";
5686   stage_cur   = 0;
5687   stage_max   = len * sizeof(interesting_8);
5688 
5689   stage_val_type = STAGE_VAL_LE;
5690 
5691   orig_hit_cnt = new_hit_cnt;
5692 
5693   /* Setting 8-bit integers. */
5694 
5695   for (i = 0; i < len; i++) {
5696 
5697     u8 orig = out_buf[i];
5698 
5699     /* Let's consult the effector map... */
5700 
5701     if (!eff_map[EFF_APOS(i)]) {
5702       stage_max -= sizeof(interesting_8);
5703       continue;
5704     }
5705 
5706     stage_cur_byte = i;
5707 
5708     for (j = 0; j < sizeof(interesting_8); j++) {
5709 
5710       /* Skip if the value could be a product of bitflips or arithmetics. */
5711 
5712       if (could_be_bitflip(orig ^ (u8)interesting_8[j]) ||
5713           could_be_arith(orig, (u8)interesting_8[j], 1)) {
5714         stage_max--;
5715         continue;
5716       }
5717 
5718       stage_cur_val = interesting_8[j];
5719       out_buf[i] = interesting_8[j];
5720 
5721       if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
5722 
5723       out_buf[i] = orig;
5724       stage_cur++;
5725 
5726     }
5727 
5728   }
5729 
5730   new_hit_cnt = queued_paths + unique_crashes;
5731 
5732   stage_finds[STAGE_INTEREST8]  += new_hit_cnt - orig_hit_cnt;
5733   stage_cycles[STAGE_INTEREST8] += stage_max;
5734 
5735   /* Setting 16-bit integers, both endians. */
5736 
5737   if (no_arith || len < 2) goto skip_interest;
5738 
5739   stage_name  = "interest 16/8";
5740   stage_short = "int16";
5741   stage_cur   = 0;
5742   stage_max   = 2 * (len - 1) * (sizeof(interesting_16) >> 1);
5743 
5744   orig_hit_cnt = new_hit_cnt;
5745 
5746   for (i = 0; i < len - 1; i++) {
5747 
5748     u16 orig = *(u16*)(out_buf + i);
5749 
5750     /* Let's consult the effector map... */
5751 
5752     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
5753       stage_max -= sizeof(interesting_16);
5754       continue;
5755     }
5756 
5757     stage_cur_byte = i;
5758 
5759     for (j = 0; j < sizeof(interesting_16) / 2; j++) {
5760 
5761       stage_cur_val = interesting_16[j];
5762 
5763       /* Skip if this could be a product of a bitflip, arithmetics,
5764          or single-byte interesting value insertion. */
5765 
5766       if (!could_be_bitflip(orig ^ (u16)interesting_16[j]) &&
5767           !could_be_arith(orig, (u16)interesting_16[j], 2) &&
5768           !could_be_interest(orig, (u16)interesting_16[j], 2, 0)) {
5769 
5770         stage_val_type = STAGE_VAL_LE;
5771 
5772         *(u16*)(out_buf + i) = interesting_16[j];
5773 
5774         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
5775         stage_cur++;
5776 
5777       } else stage_max--;
5778 
5779       if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) &&
5780           !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) &&
5781           !could_be_arith(orig, SWAP16(interesting_16[j]), 2) &&
5782           !could_be_interest(orig, SWAP16(interesting_16[j]), 2, 1)) {
5783 
5784         stage_val_type = STAGE_VAL_BE;
5785 
5786         *(u16*)(out_buf + i) = SWAP16(interesting_16[j]);
5787         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
5788         stage_cur++;
5789 
5790       } else stage_max--;
5791 
5792     }
5793 
5794     *(u16*)(out_buf + i) = orig;
5795 
5796   }
5797 
5798   new_hit_cnt = queued_paths + unique_crashes;
5799 
5800   stage_finds[STAGE_INTEREST16]  += new_hit_cnt - orig_hit_cnt;
5801   stage_cycles[STAGE_INTEREST16] += stage_max;
5802 
5803   if (len < 4) goto skip_interest;
5804 
5805   /* Setting 32-bit integers, both endians. */
5806 
5807   stage_name  = "interest 32/8";
5808   stage_short = "int32";
5809   stage_cur   = 0;
5810   stage_max   = 2 * (len - 3) * (sizeof(interesting_32) >> 2);
5811 
5812   orig_hit_cnt = new_hit_cnt;
5813 
5814   for (i = 0; i < len - 3; i++) {
5815 
5816     u32 orig = *(u32*)(out_buf + i);
5817 
5818     /* Let's consult the effector map... */
5819 
5820     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
5821         !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
5822       stage_max -= sizeof(interesting_32) >> 1;
5823       continue;
5824     }
5825 
5826     stage_cur_byte = i;
5827 
5828     for (j = 0; j < sizeof(interesting_32) / 4; j++) {
5829 
5830       stage_cur_val = interesting_32[j];
5831 
5832       /* Skip if this could be a product of a bitflip, arithmetics,
5833          or word interesting value insertion. */
5834 
5835       if (!could_be_bitflip(orig ^ (u32)interesting_32[j]) &&
5836           !could_be_arith(orig, interesting_32[j], 4) &&
5837           !could_be_interest(orig, interesting_32[j], 4, 0)) {
5838 
5839         stage_val_type = STAGE_VAL_LE;
5840 
5841         *(u32*)(out_buf + i) = interesting_32[j];
5842 
5843         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
5844         stage_cur++;
5845 
5846       } else stage_max--;
5847 
5848       if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) &&
5849           !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) &&
5850           !could_be_arith(orig, SWAP32(interesting_32[j]), 4) &&
5851           !could_be_interest(orig, SWAP32(interesting_32[j]), 4, 1)) {
5852 
5853         stage_val_type = STAGE_VAL_BE;
5854 
5855         *(u32*)(out_buf + i) = SWAP32(interesting_32[j]);
5856         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
5857         stage_cur++;
5858 
5859       } else stage_max--;
5860 
5861     }
5862 
5863     *(u32*)(out_buf + i) = orig;
5864 
5865   }
5866 
5867   new_hit_cnt = queued_paths + unique_crashes;
5868 
5869   stage_finds[STAGE_INTEREST32]  += new_hit_cnt - orig_hit_cnt;
5870   stage_cycles[STAGE_INTEREST32] += stage_max;
5871 
5872 skip_interest:
5873 
5874   /********************
5875    * DICTIONARY STUFF *
5876    ********************/
5877 
5878   if (!extras_cnt) goto skip_user_extras;
5879 
5880   /* Overwrite with user-supplied extras. */
5881 
5882   stage_name  = "user extras (over)";
5883   stage_short = "ext_UO";
5884   stage_cur   = 0;
5885   stage_max   = extras_cnt * len;
5886 
5887   stage_val_type = STAGE_VAL_NONE;
5888 
5889   orig_hit_cnt = new_hit_cnt;
5890 
5891   for (i = 0; i < len; i++) {
5892 
5893     u32 last_len = 0;
5894 
5895     stage_cur_byte = i;
5896 
5897     /* Extras are sorted by size, from smallest to largest. This means
5898        that we don't have to worry about restoring the buffer in
5899        between writes at a particular offset determined by the outer
5900        loop. */
5901 
5902     for (j = 0; j < extras_cnt; j++) {
5903 
5904       /* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also
5905          skip them if there's no room to insert the payload, if the token
5906          is redundant, or if its entire span has no bytes set in the effector
5907          map. */
5908 
5909       if ((extras_cnt > MAX_DET_EXTRAS && UR(extras_cnt) >= MAX_DET_EXTRAS) ||
5910           extras[j].len > len - i ||
5911           !memcmp(extras[j].data, out_buf + i, extras[j].len) ||
5912           !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) {
5913 
5914         stage_max--;
5915         continue;
5916 
5917       }
5918 
5919       last_len = extras[j].len;
5920       memcpy(out_buf + i, extras[j].data, last_len);
5921 
5922       if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
5923 
5924       stage_cur++;
5925 
5926     }
5927 
5928     /* Restore all the clobbered memory. */
5929     memcpy(out_buf + i, in_buf + i, last_len);
5930 
5931   }
5932 
5933   new_hit_cnt = queued_paths + unique_crashes;
5934 
5935   stage_finds[STAGE_EXTRAS_UO]  += new_hit_cnt - orig_hit_cnt;
5936   stage_cycles[STAGE_EXTRAS_UO] += stage_max;
5937 
5938   /* Insertion of user-supplied extras. */
5939 
5940   stage_name  = "user extras (insert)";
5941   stage_short = "ext_UI";
5942   stage_cur   = 0;
5943   stage_max   = extras_cnt * len;
5944 
5945   orig_hit_cnt = new_hit_cnt;
5946 
5947   ex_tmp = ck_alloc(len + MAX_DICT_FILE);
5948 
5949   for (i = 0; i <= len; i++) {
5950 
5951     stage_cur_byte = i;
5952 
5953     for (j = 0; j < extras_cnt; j++) {
5954 
5955       if (len + extras[j].len > MAX_FILE) {
5956         stage_max--;
5957         continue;
5958       }
5959 
5960       /* Insert token */
5961       memcpy(ex_tmp + i, extras[j].data, extras[j].len);
5962 
5963       /* Copy tail */
5964       memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i);
5965 
5966       if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) {
5967         ck_free(ex_tmp);
5968         goto abandon_entry;
5969       }
5970 
5971       stage_cur++;
5972 
5973     }
5974 
5975     /* Copy head */
5976     ex_tmp[i] = out_buf[i];
5977 
5978   }
5979 
5980   ck_free(ex_tmp);
5981 
5982   new_hit_cnt = queued_paths + unique_crashes;
5983 
5984   stage_finds[STAGE_EXTRAS_UI]  += new_hit_cnt - orig_hit_cnt;
5985   stage_cycles[STAGE_EXTRAS_UI] += stage_max;
5986 
5987 skip_user_extras:
5988 
5989   if (!a_extras_cnt) goto skip_extras;
5990 
5991   stage_name  = "auto extras (over)";
5992   stage_short = "ext_AO";
5993   stage_cur   = 0;
5994   stage_max   = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len;
5995 
5996   stage_val_type = STAGE_VAL_NONE;
5997 
5998   orig_hit_cnt = new_hit_cnt;
5999 
6000   for (i = 0; i < len; i++) {
6001 
6002     u32 last_len = 0;
6003 
6004     stage_cur_byte = i;
6005 
6006     for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); j++) {
6007 
6008       /* See the comment in the earlier code; extras are sorted by size. */
6009 
6010       if (a_extras[j].len > len - i ||
6011           !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) ||
6012           !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, a_extras[j].len))) {
6013 
6014         stage_max--;
6015         continue;
6016 
6017       }
6018 
6019       last_len = a_extras[j].len;
6020       memcpy(out_buf + i, a_extras[j].data, last_len);
6021 
6022       if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
6023 
6024       stage_cur++;
6025 
6026     }
6027 
6028     /* Restore all the clobbered memory. */
6029     memcpy(out_buf + i, in_buf + i, last_len);
6030 
6031   }
6032 
6033   new_hit_cnt = queued_paths + unique_crashes;
6034 
6035   stage_finds[STAGE_EXTRAS_AO]  += new_hit_cnt - orig_hit_cnt;
6036   stage_cycles[STAGE_EXTRAS_AO] += stage_max;
6037 
6038 skip_extras:
6039 
6040   /* If we made this to here without jumping to havoc_stage or abandon_entry,
6041      we're properly done with deterministic steps and can mark it as such
6042      in the .state/ directory. */
6043 
6044   if (!queue_cur->passed_det) mark_as_det_done(queue_cur);
6045 
6046   /****************
6047    * RANDOM HAVOC *
6048    ****************/
6049 
6050 havoc_stage:
6051 
6052   stage_cur_byte = -1;
6053 
6054   /* The havoc stage mutation code is also invoked when splicing files; if the
6055      splice_cycle variable is set, generate different descriptions and such. */
6056 
6057   if (!splice_cycle) {
6058 
6059     stage_name  = "havoc";
6060     stage_short = "havoc";
6061     stage_max   = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) *
6062                   perf_score / havoc_div / 100;
6063 
6064   } else {
6065 
6066     static u8 tmp[32];
6067 
6068     perf_score = orig_perf;
6069 
6070     sprintf(tmp, "splice %u", splice_cycle);
6071     stage_name  = tmp;
6072     stage_short = "splice";
6073     stage_max   = SPLICE_HAVOC * perf_score / havoc_div / 100;
6074 
6075   }
6076 
6077   if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN;
6078 
6079   temp_len = len;
6080 
6081   orig_hit_cnt = queued_paths + unique_crashes;
6082 
6083   havoc_queued = queued_paths;
6084 
6085   /* We essentially just do several thousand runs (depending on perf_score)
6086      where we take the input file and make random stacked tweaks. */
6087 
6088   for (stage_cur = 0; stage_cur < stage_max; stage_cur++) {
6089 
6090     u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2));
6091 
6092     stage_cur_val = use_stacking;
6093 
6094     for (i = 0; i < use_stacking; i++) {
6095 
6096       switch (UR(15 + ((extras_cnt + a_extras_cnt) ? 2 : 0))) {
6097 
6098         case 0:
6099 
6100           /* Flip a single bit somewhere. Spooky! */
6101 
6102           FLIP_BIT(out_buf, UR(temp_len << 3));
6103           break;
6104 
6105         case 1:
6106 
6107           /* Set byte to interesting value. */
6108 
6109           out_buf[UR(temp_len)] = interesting_8[UR(sizeof(interesting_8))];
6110           break;
6111 
6112         case 2:
6113 
6114           /* Set word to interesting value, randomly choosing endian. */
6115 
6116           if (temp_len < 2) break;
6117 
6118           if (UR(2)) {
6119 
6120             *(u16*)(out_buf + UR(temp_len - 1)) =
6121               interesting_16[UR(sizeof(interesting_16) >> 1)];
6122 
6123           } else {
6124 
6125             *(u16*)(out_buf + UR(temp_len - 1)) = SWAP16(
6126               interesting_16[UR(sizeof(interesting_16) >> 1)]);
6127 
6128           }
6129 
6130           break;
6131 
6132         case 3:
6133 
6134           /* Set dword to interesting value, randomly choosing endian. */
6135 
6136           if (temp_len < 4) break;
6137 
6138           if (UR(2)) {
6139 
6140             *(u32*)(out_buf + UR(temp_len - 3)) =
6141               interesting_32[UR(sizeof(interesting_32) >> 2)];
6142 
6143           } else {
6144 
6145             *(u32*)(out_buf + UR(temp_len - 3)) = SWAP32(
6146               interesting_32[UR(sizeof(interesting_32) >> 2)]);
6147 
6148           }
6149 
6150           break;
6151 
6152         case 4:
6153 
6154           /* Randomly subtract from byte. */
6155 
6156           out_buf[UR(temp_len)] -= 1 + UR(ARITH_MAX);
6157           break;
6158 
6159         case 5:
6160 
6161           /* Randomly add to byte. */
6162 
6163           out_buf[UR(temp_len)] += 1 + UR(ARITH_MAX);
6164           break;
6165 
6166         case 6:
6167 
6168           /* Randomly subtract from word, random endian. */
6169 
6170           if (temp_len < 2) break;
6171 
6172           if (UR(2)) {
6173 
6174             u32 pos = UR(temp_len - 1);
6175 
6176             *(u16*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
6177 
6178           } else {
6179 
6180             u32 pos = UR(temp_len - 1);
6181             u16 num = 1 + UR(ARITH_MAX);
6182 
6183             *(u16*)(out_buf + pos) =
6184               SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num);
6185 
6186           }
6187 
6188           break;
6189 
6190         case 7:
6191 
6192           /* Randomly add to word, random endian. */
6193 
6194           if (temp_len < 2) break;
6195 
6196           if (UR(2)) {
6197 
6198             u32 pos = UR(temp_len - 1);
6199 
6200             *(u16*)(out_buf + pos) += 1 + UR(ARITH_MAX);
6201 
6202           } else {
6203 
6204             u32 pos = UR(temp_len - 1);
6205             u16 num = 1 + UR(ARITH_MAX);
6206 
6207             *(u16*)(out_buf + pos) =
6208               SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num);
6209 
6210           }
6211 
6212           break;
6213 
6214         case 8:
6215 
6216           /* Randomly subtract from dword, random endian. */
6217 
6218           if (temp_len < 4) break;
6219 
6220           if (UR(2)) {
6221 
6222             u32 pos = UR(temp_len - 3);
6223 
6224             *(u32*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
6225 
6226           } else {
6227 
6228             u32 pos = UR(temp_len - 3);
6229             u32 num = 1 + UR(ARITH_MAX);
6230 
6231             *(u32*)(out_buf + pos) =
6232               SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num);
6233 
6234           }
6235 
6236           break;
6237 
6238         case 9:
6239 
6240           /* Randomly add to dword, random endian. */
6241 
6242           if (temp_len < 4) break;
6243 
6244           if (UR(2)) {
6245 
6246             u32 pos = UR(temp_len - 3);
6247 
6248             *(u32*)(out_buf + pos) += 1 + UR(ARITH_MAX);
6249 
6250           } else {
6251 
6252             u32 pos = UR(temp_len - 3);
6253             u32 num = 1 + UR(ARITH_MAX);
6254 
6255             *(u32*)(out_buf + pos) =
6256               SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num);
6257 
6258           }
6259 
6260           break;
6261 
6262         case 10:
6263 
6264           /* Just set a random byte to a random value. Because,
6265              why not. We use XOR with 1-255 to eliminate the
6266              possibility of a no-op. */
6267 
6268           out_buf[UR(temp_len)] ^= 1 + UR(255);
6269           break;
6270 
6271         case 11 ... 12: {
6272 
6273             /* Delete bytes. We're making this a bit more likely
6274                than insertion (the next option) in hopes of keeping
6275                files reasonably small. */
6276 
6277             u32 del_from, del_len;
6278 
6279             if (temp_len < 2) break;
6280 
6281             /* Don't delete too much. */
6282 
6283             del_len = choose_block_len(temp_len - 1);
6284 
6285             del_from = UR(temp_len - del_len + 1);
6286 
6287             memmove(out_buf + del_from, out_buf + del_from + del_len,
6288                     temp_len - del_from - del_len);
6289 
6290             temp_len -= del_len;
6291 
6292             break;
6293 
6294           }
6295 
6296         case 13:
6297 
6298           if (temp_len + HAVOC_BLK_XL < MAX_FILE) {
6299 
6300             /* Clone bytes (75%) or insert a block of constant bytes (25%). */
6301 
6302             u8  actually_clone = UR(4);
6303             u32 clone_from, clone_to, clone_len;
6304             u8* new_buf;
6305 
6306             if (actually_clone) {
6307 
6308               clone_len  = choose_block_len(temp_len);
6309               clone_from = UR(temp_len - clone_len + 1);
6310 
6311             } else {
6312 
6313               clone_len = choose_block_len(HAVOC_BLK_XL);
6314               clone_from = 0;
6315 
6316             }
6317 
6318             clone_to   = UR(temp_len);
6319 
6320             new_buf = ck_alloc_nozero(temp_len + clone_len);
6321 
6322             /* Head */
6323 
6324             memcpy(new_buf, out_buf, clone_to);
6325 
6326             /* Inserted part */
6327 
6328             if (actually_clone)
6329               memcpy(new_buf + clone_to, out_buf + clone_from, clone_len);
6330             else
6331               memset(new_buf + clone_to,
6332                      UR(2) ? UR(256) : out_buf[UR(temp_len)], clone_len);
6333 
6334             /* Tail */
6335             memcpy(new_buf + clone_to + clone_len, out_buf + clone_to,
6336                    temp_len - clone_to);
6337 
6338             ck_free(out_buf);
6339             out_buf = new_buf;
6340             temp_len += clone_len;
6341 
6342           }
6343 
6344           break;
6345 
6346         case 14: {
6347 
6348             /* Overwrite bytes with a randomly selected chunk (75%) or fixed
6349                bytes (25%). */
6350 
6351             u32 copy_from, copy_to, copy_len;
6352 
6353             if (temp_len < 2) break;
6354 
6355             copy_len  = choose_block_len(temp_len - 1);
6356 
6357             copy_from = UR(temp_len - copy_len + 1);
6358             copy_to   = UR(temp_len - copy_len + 1);
6359 
6360             if (UR(4)) {
6361 
6362               if (copy_from != copy_to)
6363                 memmove(out_buf + copy_to, out_buf + copy_from, copy_len);
6364 
6365             } else memset(out_buf + copy_to,
6366                           UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len);
6367 
6368             break;
6369 
6370           }
6371 
6372         /* Values 15 and 16 can be selected only if there are any extras
6373            present in the dictionaries. */
6374 
6375         case 15: {
6376 
6377             /* Overwrite bytes with an extra. */
6378 
6379             if (!extras_cnt || (a_extras_cnt && UR(2))) {
6380 
6381               /* No user-specified extras or odds in our favor. Let's use an
6382                  auto-detected one. */
6383 
6384               u32 use_extra = UR(a_extras_cnt);
6385               u32 extra_len = a_extras[use_extra].len;
6386               u32 insert_at;
6387 
6388               if (extra_len > temp_len) break;
6389 
6390               insert_at = UR(temp_len - extra_len + 1);
6391               memcpy(out_buf + insert_at, a_extras[use_extra].data, extra_len);
6392 
6393             } else {
6394 
6395               /* No auto extras or odds in our favor. Use the dictionary. */
6396 
6397               u32 use_extra = UR(extras_cnt);
6398               u32 extra_len = extras[use_extra].len;
6399               u32 insert_at;
6400 
6401               if (extra_len > temp_len) break;
6402 
6403               insert_at = UR(temp_len - extra_len + 1);
6404               memcpy(out_buf + insert_at, extras[use_extra].data, extra_len);
6405 
6406             }
6407 
6408             break;
6409 
6410           }
6411 
6412         case 16: {
6413 
6414             u32 use_extra, extra_len, insert_at = UR(temp_len + 1);
6415             u8* new_buf;
6416 
6417             /* Insert an extra. Do the same dice-rolling stuff as for the
6418                previous case. */
6419 
6420             if (!extras_cnt || (a_extras_cnt && UR(2))) {
6421 
6422               use_extra = UR(a_extras_cnt);
6423               extra_len = a_extras[use_extra].len;
6424 
6425               if (temp_len + extra_len >= MAX_FILE) break;
6426 
6427               new_buf = ck_alloc_nozero(temp_len + extra_len);
6428 
6429               /* Head */
6430               memcpy(new_buf, out_buf, insert_at);
6431 
6432               /* Inserted part */
6433               memcpy(new_buf + insert_at, a_extras[use_extra].data, extra_len);
6434 
6435             } else {
6436 
6437               use_extra = UR(extras_cnt);
6438               extra_len = extras[use_extra].len;
6439 
6440               if (temp_len + extra_len >= MAX_FILE) break;
6441 
6442               new_buf = ck_alloc_nozero(temp_len + extra_len);
6443 
6444               /* Head */
6445               memcpy(new_buf, out_buf, insert_at);
6446 
6447               /* Inserted part */
6448               memcpy(new_buf + insert_at, extras[use_extra].data, extra_len);
6449 
6450             }
6451 
6452             /* Tail */
6453             memcpy(new_buf + insert_at + extra_len, out_buf + insert_at,
6454                    temp_len - insert_at);
6455 
6456             ck_free(out_buf);
6457             out_buf   = new_buf;
6458             temp_len += extra_len;
6459 
6460             break;
6461 
6462           }
6463 
6464       }
6465 
6466     }
6467 
6468     if (common_fuzz_stuff(argv, out_buf, temp_len))
6469       goto abandon_entry;
6470 
6471     /* out_buf might have been mangled a bit, so let's restore it to its
6472        original size and shape. */
6473 
6474     if (temp_len < len) out_buf = ck_realloc(out_buf, len);
6475     temp_len = len;
6476     memcpy(out_buf, in_buf, len);
6477 
6478     /* If we're finding new stuff, let's run for a bit longer, limits
6479        permitting. */
6480 
6481     if (queued_paths != havoc_queued) {
6482 
6483       if (perf_score <= HAVOC_MAX_MULT * 100) {
6484         stage_max  *= 2;
6485         perf_score *= 2;
6486       }
6487 
6488       havoc_queued = queued_paths;
6489 
6490     }
6491 
6492   }
6493 
6494   new_hit_cnt = queued_paths + unique_crashes;
6495 
6496   if (!splice_cycle) {
6497     stage_finds[STAGE_HAVOC]  += new_hit_cnt - orig_hit_cnt;
6498     stage_cycles[STAGE_HAVOC] += stage_max;
6499   } else {
6500     stage_finds[STAGE_SPLICE]  += new_hit_cnt - orig_hit_cnt;
6501     stage_cycles[STAGE_SPLICE] += stage_max;
6502   }
6503 
6504 #ifndef IGNORE_FINDS
6505 
6506   /************
6507    * SPLICING *
6508    ************/
6509 
6510   /* This is a last-resort strategy triggered by a full round with no findings.
6511      It takes the current input file, randomly selects another input, and
6512      splices them together at some offset, then relies on the havoc
6513      code to mutate that blob. */
6514 
6515 retry_splicing:
6516 
6517   if (use_splicing && splice_cycle++ < SPLICE_CYCLES &&
6518       queued_paths > 1 && queue_cur->len > 1) {
6519 
6520     struct queue_entry* target;
6521     u32 tid, split_at;
6522     u8* new_buf;
6523     s32 f_diff, l_diff;
6524 
6525     /* First of all, if we've modified in_buf for havoc, let's clean that
6526        up... */
6527 
6528     if (in_buf != orig_in) {
6529       ck_free(in_buf);
6530       in_buf = orig_in;
6531       len = queue_cur->len;
6532     }
6533 
6534     /* Pick a random queue entry and seek to it. Don't splice with yourself. */
6535 
6536     do { tid = UR(queued_paths); } while (tid == current_entry);
6537 
6538     splicing_with = tid;
6539     target = queue;
6540 
6541     while (tid >= 100) { target = target->next_100; tid -= 100; }
6542     while (tid--) target = target->next;
6543 
6544     /* Make sure that the target has a reasonable length. */
6545 
6546     while (target && (target->len < 2 || target == queue_cur)) {
6547       target = target->next;
6548       splicing_with++;
6549     }
6550 
6551     if (!target) goto retry_splicing;
6552 
6553     /* Read the testcase into a new buffer. */
6554 
6555     fd = open(target->fname, O_RDONLY);
6556 
6557     if (fd < 0) PFATAL("Unable to open '%s'", target->fname);
6558 
6559     new_buf = ck_alloc_nozero(target->len);
6560 
6561     ck_read(fd, new_buf, target->len, target->fname);
6562 
6563     close(fd);
6564 
6565     /* Find a suitable splicing location, somewhere between the first and
6566        the last differing byte. Bail out if the difference is just a single
6567        byte or so. */
6568 
6569     locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff);
6570 
6571     if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) {
6572       ck_free(new_buf);
6573       goto retry_splicing;
6574     }
6575 
6576     /* Split somewhere between the first and last differing byte. */
6577 
6578     split_at = f_diff + UR(l_diff - f_diff);
6579 
6580     /* Do the thing. */
6581 
6582     len = target->len;
6583     memcpy(new_buf, in_buf, split_at);
6584     in_buf = new_buf;
6585 
6586     ck_free(out_buf);
6587     out_buf = ck_alloc_nozero(len);
6588     memcpy(out_buf, in_buf, len);
6589 
6590     goto havoc_stage;
6591 
6592   }
6593 
6594 #endif /* !IGNORE_FINDS */
6595 
6596   ret_val = 0;
6597 
6598 abandon_entry:
6599 
6600   splicing_with = -1;
6601 
6602   /* Update pending_not_fuzzed count if we made it through the calibration
6603      cycle and have not seen this entry before. */
6604 
6605   if (!stop_soon && !queue_cur->cal_failed && !queue_cur->was_fuzzed) {
6606     queue_cur->was_fuzzed = 1;
6607     pending_not_fuzzed--;
6608     if (queue_cur->favored) pending_favored--;
6609   }
6610 
6611   munmap(orig_in, queue_cur->len);
6612 
6613   if (in_buf != orig_in) ck_free(in_buf);
6614   ck_free(out_buf);
6615   ck_free(eff_map);
6616 
6617   return ret_val;
6618 
6619 #undef FLIP_BIT
6620 
6621 }
6622 
6623 
6624 /* Grab interesting test cases from other fuzzers. */
6625 
sync_fuzzers(char ** argv)6626 static void sync_fuzzers(char** argv) {
6627 
6628   DIR* sd;
6629   struct dirent* sd_ent;
6630   u32 sync_cnt = 0;
6631 
6632   sd = opendir(sync_dir);
6633   if (!sd) PFATAL("Unable to open '%s'", sync_dir);
6634 
6635   stage_max = stage_cur = 0;
6636   cur_depth = 0;
6637 
6638   /* Look at the entries created for every other fuzzer in the sync directory. */
6639 
6640   while ((sd_ent = readdir(sd))) {
6641 
6642     static u8 stage_tmp[128];
6643 
6644     DIR* qd;
6645     struct dirent* qd_ent;
6646     u8 *qd_path, *qd_synced_path;
6647     u32 min_accept = 0, next_min_accept;
6648 
6649     s32 id_fd;
6650 
6651     /* Skip dot files and our own output directory. */
6652 
6653     if (sd_ent->d_name[0] == '.' || !strcmp(sync_id, sd_ent->d_name)) continue;
6654 
6655     /* Skip anything that doesn't have a queue/ subdirectory. */
6656 
6657     qd_path = alloc_printf("%s/%s/queue", sync_dir, sd_ent->d_name);
6658 
6659     if (!(qd = opendir(qd_path))) {
6660       ck_free(qd_path);
6661       continue;
6662     }
6663 
6664     /* Retrieve the ID of the last seen test case. */
6665 
6666     qd_synced_path = alloc_printf("%s/.synced/%s", out_dir, sd_ent->d_name);
6667 
6668     id_fd = open(qd_synced_path, O_RDWR | O_CREAT, 0600);
6669 
6670     if (id_fd < 0) PFATAL("Unable to create '%s'", qd_synced_path);
6671 
6672     if (read(id_fd, &min_accept, sizeof(u32)) > 0)
6673       lseek(id_fd, 0, SEEK_SET);
6674 
6675     next_min_accept = min_accept;
6676 
6677     /* Show stats */
6678 
6679     sprintf(stage_tmp, "sync %u", ++sync_cnt);
6680     stage_name = stage_tmp;
6681     stage_cur  = 0;
6682     stage_max  = 0;
6683 
6684     /* For every file queued by this fuzzer, parse ID and see if we have looked at
6685        it before; exec a test case if not. */
6686 
6687     while ((qd_ent = readdir(qd))) {
6688 
6689       u8* path;
6690       s32 fd;
6691       struct stat st;
6692 
6693       if (qd_ent->d_name[0] == '.' ||
6694           sscanf(qd_ent->d_name, CASE_PREFIX "%06u", &syncing_case) != 1 ||
6695           syncing_case < min_accept) continue;
6696 
6697       /* OK, sounds like a new one. Let's give it a try. */
6698 
6699       if (syncing_case >= next_min_accept)
6700         next_min_accept = syncing_case + 1;
6701 
6702       path = alloc_printf("%s/%s", qd_path, qd_ent->d_name);
6703 
6704       /* Allow this to fail in case the other fuzzer is resuming or so... */
6705 
6706       fd = open(path, O_RDONLY);
6707 
6708       if (fd < 0) {
6709          ck_free(path);
6710          continue;
6711       }
6712 
6713       if (fstat(fd, &st)) PFATAL("fstat() failed");
6714 
6715       /* Ignore zero-sized or oversized files. */
6716 
6717       if (st.st_size && st.st_size <= MAX_FILE) {
6718 
6719         u8  fault;
6720         u8* mem = mmap(0, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
6721 
6722         if (mem == MAP_FAILED) PFATAL("Unable to mmap '%s'", path);
6723 
6724         /* See what happens. We rely on save_if_interesting() to catch major
6725            errors and save the test case. */
6726 
6727         write_to_testcase(mem, st.st_size);
6728 
6729         fault = run_target(argv, exec_tmout);
6730 
6731         if (stop_soon) return;
6732 
6733         syncing_party = sd_ent->d_name;
6734         queued_imported += save_if_interesting(argv, mem, st.st_size, fault);
6735         syncing_party = 0;
6736 
6737         munmap(mem, st.st_size);
6738 
6739         if (!(stage_cur++ % stats_update_freq)) show_stats();
6740 
6741       }
6742 
6743       ck_free(path);
6744       close(fd);
6745 
6746     }
6747 
6748     ck_write(id_fd, &next_min_accept, sizeof(u32), qd_synced_path);
6749 
6750     close(id_fd);
6751     closedir(qd);
6752     ck_free(qd_path);
6753     ck_free(qd_synced_path);
6754 
6755   }
6756 
6757   closedir(sd);
6758 
6759 }
6760 
6761 
6762 /* Handle stop signal (Ctrl-C, etc). */
6763 
handle_stop_sig(int sig)6764 static void handle_stop_sig(int sig) {
6765 
6766   stop_soon = 1;
6767 
6768   if (child_pid > 0) kill(child_pid, SIGKILL);
6769   if (forksrv_pid > 0) kill(forksrv_pid, SIGKILL);
6770 
6771 }
6772 
6773 
6774 /* Handle skip request (SIGUSR1). */
6775 
handle_skipreq(int sig)6776 static void handle_skipreq(int sig) {
6777 
6778   skip_requested = 1;
6779 
6780 }
6781 
6782 /* Handle timeout (SIGALRM). */
6783 
handle_timeout(int sig)6784 static void handle_timeout(int sig) {
6785 
6786   if (child_pid > 0) {
6787 
6788     child_timed_out = 1;
6789     kill(child_pid, SIGKILL);
6790 
6791   } else if (child_pid == -1 && forksrv_pid > 0) {
6792 
6793     child_timed_out = 1;
6794     kill(forksrv_pid, SIGKILL);
6795 
6796   }
6797 
6798 }
6799 
6800 
6801 /* Do a PATH search and find target binary to see that it exists and
6802    isn't a shell script - a common and painful mistake. We also check for
6803    a valid ELF header and for evidence of AFL instrumentation. */
6804 
check_binary(u8 * fname)6805 EXP_ST void check_binary(u8* fname) {
6806 
6807   u8* env_path = 0;
6808   struct stat st;
6809 
6810   s32 fd;
6811   u8* f_data;
6812   u32 f_len = 0;
6813 
6814   ACTF("Validating target binary...");
6815 
6816   if (strchr(fname, '/') || !(env_path = getenv("PATH"))) {
6817 
6818     target_path = ck_strdup(fname);
6819     if (stat(target_path, &st) || !S_ISREG(st.st_mode) ||
6820         !(st.st_mode & 0111) || (f_len = st.st_size) < 4)
6821       FATAL("Program '%s' not found or not executable", fname);
6822 
6823   } else {
6824 
6825     while (env_path) {
6826 
6827       u8 *cur_elem, *delim = strchr(env_path, ':');
6828 
6829       if (delim) {
6830 
6831         cur_elem = ck_alloc(delim - env_path + 1);
6832         memcpy(cur_elem, env_path, delim - env_path);
6833         delim++;
6834 
6835       } else cur_elem = ck_strdup(env_path);
6836 
6837       env_path = delim;
6838 
6839       if (cur_elem[0])
6840         target_path = alloc_printf("%s/%s", cur_elem, fname);
6841       else
6842         target_path = ck_strdup(fname);
6843 
6844       ck_free(cur_elem);
6845 
6846       if (!stat(target_path, &st) && S_ISREG(st.st_mode) &&
6847           (st.st_mode & 0111) && (f_len = st.st_size) >= 4) break;
6848 
6849       ck_free(target_path);
6850       target_path = 0;
6851 
6852     }
6853 
6854     if (!target_path) FATAL("Program '%s' not found or not executable", fname);
6855 
6856   }
6857 
6858   if (getenv("AFL_SKIP_BIN_CHECK")) return;
6859 
6860   /* Check for blatant user errors. */
6861 
6862   if ((!strncmp(target_path, "/tmp/", 5) && !strchr(target_path + 5, '/')) ||
6863       (!strncmp(target_path, "/var/tmp/", 9) && !strchr(target_path + 9, '/')))
6864      FATAL("Please don't keep binaries in /tmp or /var/tmp");
6865 
6866   fd = open(target_path, O_RDONLY);
6867 
6868   if (fd < 0) PFATAL("Unable to open '%s'", target_path);
6869 
6870   f_data = mmap(0, f_len, PROT_READ, MAP_PRIVATE, fd, 0);
6871 
6872   if (f_data == MAP_FAILED) PFATAL("Unable to mmap file '%s'", target_path);
6873 
6874   close(fd);
6875 
6876   if (f_data[0] == '#' && f_data[1] == '!') {
6877 
6878     SAYF("\n" cLRD "[-] " cRST
6879          "Oops, the target binary looks like a shell script. Some build systems will\n"
6880          "    sometimes generate shell stubs for dynamically linked programs; try static\n"
6881          "    library mode (./configure --disable-shared) if that's the case.\n\n"
6882 
6883          "    Another possible cause is that you are actually trying to use a shell\n"
6884          "    wrapper around the fuzzed component. Invoking shell can slow down the\n"
6885          "    fuzzing process by a factor of 20x or more; it's best to write the wrapper\n"
6886          "    in a compiled language instead.\n");
6887 
6888     FATAL("Program '%s' is a shell script", target_path);
6889 
6890   }
6891 
6892 #ifndef __APPLE__
6893 
6894   if (f_data[0] != 0x7f || memcmp(f_data + 1, "ELF", 3))
6895     FATAL("Program '%s' is not an ELF binary", target_path);
6896 
6897 #else
6898 
6899   if (f_data[0] != 0xCF || f_data[1] != 0xFA || f_data[2] != 0xED)
6900     FATAL("Program '%s' is not a 64-bit Mach-O binary", target_path);
6901 
6902 #endif /* ^!__APPLE__ */
6903 
6904   if (!qemu_mode && !dumb_mode &&
6905       !memmem(f_data, f_len, SHM_ENV_VAR, strlen(SHM_ENV_VAR) + 1)) {
6906 
6907     SAYF("\n" cLRD "[-] " cRST
6908          "Looks like the target binary is not instrumented! The fuzzer depends on\n"
6909          "    compile-time instrumentation to isolate interesting test cases while\n"
6910          "    mutating the input data. For more information, and for tips on how to\n"
6911          "    instrument binaries, please see %s/README.\n\n"
6912 
6913          "    When source code is not available, you may be able to leverage QEMU\n"
6914          "    mode support. Consult the README for tips on how to enable this.\n"
6915 
6916          "    (It is also possible to use afl-fuzz as a traditional, \"dumb\" fuzzer.\n"
6917          "    For that, you can use the -n option - but expect much worse results.)\n",
6918          doc_path);
6919 
6920     FATAL("No instrumentation detected");
6921 
6922   }
6923 
6924   if (qemu_mode &&
6925       memmem(f_data, f_len, SHM_ENV_VAR, strlen(SHM_ENV_VAR) + 1)) {
6926 
6927     SAYF("\n" cLRD "[-] " cRST
6928          "This program appears to be instrumented with afl-gcc, but is being run in\n"
6929          "    QEMU mode (-Q). This is probably not what you want - this setup will be\n"
6930          "    slow and offer no practical benefits.\n");
6931 
6932     FATAL("Instrumentation found in -Q mode");
6933 
6934   }
6935 
6936   if (memmem(f_data, f_len, "libasan.so", 10) ||
6937       memmem(f_data, f_len, "__msan_init", 11)) uses_asan = 1;
6938 
6939   /* Detect persistent & deferred init signatures in the binary. */
6940 
6941   if (memmem(f_data, f_len, PERSIST_SIG, strlen(PERSIST_SIG) + 1)) {
6942 
6943     OKF(cPIN "Persistent mode binary detected.");
6944     setenv(PERSIST_ENV_VAR, "1", 1);
6945     persistent_mode = 1;
6946 
6947   } else if (getenv("AFL_PERSISTENT")) {
6948 
6949     WARNF("AFL_PERSISTENT is no longer supported and may misbehave!");
6950 
6951   }
6952 
6953   if (memmem(f_data, f_len, DEFER_SIG, strlen(DEFER_SIG) + 1)) {
6954 
6955     OKF(cPIN "Deferred forkserver binary detected.");
6956     setenv(DEFER_ENV_VAR, "1", 1);
6957     deferred_mode = 1;
6958 
6959   } else if (getenv("AFL_DEFER_FORKSRV")) {
6960 
6961     WARNF("AFL_DEFER_FORKSRV is no longer supported and may misbehave!");
6962 
6963   }
6964 
6965   if (munmap(f_data, f_len)) PFATAL("unmap() failed");
6966 
6967 }
6968 
6969 
6970 /* Trim and possibly create a banner for the run. */
6971 
fix_up_banner(u8 * name)6972 static void fix_up_banner(u8* name) {
6973 
6974   if (!use_banner) {
6975 
6976     if (sync_id) {
6977 
6978       use_banner = sync_id;
6979 
6980     } else {
6981 
6982       u8* trim = strrchr(name, '/');
6983       if (!trim) use_banner = name; else use_banner = trim + 1;
6984 
6985     }
6986 
6987   }
6988 
6989   if (strlen(use_banner) > 40) {
6990 
6991     u8* tmp = ck_alloc(44);
6992     sprintf(tmp, "%.40s...", use_banner);
6993     use_banner = tmp;
6994 
6995   }
6996 
6997 }
6998 
6999 
7000 /* Check if we're on TTY. */
7001 
check_if_tty(void)7002 static void check_if_tty(void) {
7003 
7004   struct winsize ws;
7005 
7006   if (getenv("AFL_NO_UI")) {
7007     OKF("Disabling the UI because AFL_NO_UI is set.");
7008     not_on_tty = 1;
7009     return;
7010   }
7011 
7012   if (ioctl(1, TIOCGWINSZ, &ws)) {
7013 
7014     if (errno == ENOTTY) {
7015       OKF("Looks like we're not running on a tty, so I'll be a bit less verbose.");
7016       not_on_tty = 1;
7017     }
7018 
7019     return;
7020   }
7021 
7022 }
7023 
7024 
7025 /* Check terminal dimensions after resize. */
7026 
check_term_size(void)7027 static void check_term_size(void) {
7028 
7029   struct winsize ws;
7030 
7031   term_too_small = 0;
7032 
7033   if (ioctl(1, TIOCGWINSZ, &ws)) return;
7034 
7035   if (ws.ws_row < 25 || ws.ws_col < 80) term_too_small = 1;
7036 
7037 }
7038 
7039 
7040 
7041 /* Display usage hints. */
7042 
usage(u8 * argv0)7043 static void usage(u8* argv0) {
7044 
7045   SAYF("\n%s [ options ] -- /path/to/fuzzed_app [ ... ]\n\n"
7046 
7047        "Required parameters:\n\n"
7048 
7049        "  -i dir        - input directory with test cases\n"
7050        "  -o dir        - output directory for fuzzer findings\n\n"
7051 
7052        "Execution control settings:\n\n"
7053 
7054        "  -f file       - location read by the fuzzed program (stdin)\n"
7055        "  -t msec       - timeout for each run (auto-scaled, 50-%u ms)\n"
7056        "  -m megs       - memory limit for child process (%u MB)\n"
7057        "  -Q            - use binary-only instrumentation (QEMU mode)\n\n"
7058 
7059        "Fuzzing behavior settings:\n\n"
7060 
7061        "  -d            - quick & dirty mode (skips deterministic steps)\n"
7062        "  -n            - fuzz without instrumentation (dumb mode)\n"
7063        "  -x dir        - optional fuzzer dictionary (see README)\n\n"
7064 
7065        "Other stuff:\n\n"
7066 
7067        "  -T text       - text banner to show on the screen\n"
7068        "  -M / -S id    - distributed mode (see parallel_fuzzing.txt)\n"
7069        "  -C            - crash exploration mode (the peruvian rabbit thing)\n\n"
7070 
7071        "For additional tips, please consult %s/README.\n\n",
7072 
7073        argv0, EXEC_TIMEOUT, MEM_LIMIT, doc_path);
7074 
7075   exit(1);
7076 
7077 }
7078 
7079 
7080 /* Prepare output directories and fds. */
7081 
setup_dirs_fds(void)7082 EXP_ST void setup_dirs_fds(void) {
7083 
7084   u8* tmp;
7085   s32 fd;
7086 
7087   ACTF("Setting up output directories...");
7088 
7089   if (sync_id && mkdir(sync_dir, 0700) && errno != EEXIST)
7090       PFATAL("Unable to create '%s'", sync_dir);
7091 
7092   if (mkdir(out_dir, 0700)) {
7093 
7094     if (errno != EEXIST) PFATAL("Unable to create '%s'", out_dir);
7095 
7096     maybe_delete_out_dir();
7097 
7098   } else {
7099 
7100     if (in_place_resume)
7101       FATAL("Resume attempted but old output directory not found");
7102 
7103     out_dir_fd = open(out_dir, O_RDONLY);
7104 
7105 #ifndef __sun
7106 
7107     if (out_dir_fd < 0 || flock(out_dir_fd, LOCK_EX | LOCK_NB))
7108       PFATAL("Unable to flock() output directory.");
7109 
7110 #endif /* !__sun */
7111 
7112   }
7113 
7114   /* Queue directory for any starting & discovered paths. */
7115 
7116   tmp = alloc_printf("%s/queue", out_dir);
7117   if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp);
7118   ck_free(tmp);
7119 
7120   /* Top-level directory for queue metadata used for session
7121      resume and related tasks. */
7122 
7123   tmp = alloc_printf("%s/queue/.state/", out_dir);
7124   if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp);
7125   ck_free(tmp);
7126 
7127   /* Directory for flagging queue entries that went through
7128      deterministic fuzzing in the past. */
7129 
7130   tmp = alloc_printf("%s/queue/.state/deterministic_done/", out_dir);
7131   if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp);
7132   ck_free(tmp);
7133 
7134   /* Directory with the auto-selected dictionary entries. */
7135 
7136   tmp = alloc_printf("%s/queue/.state/auto_extras/", out_dir);
7137   if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp);
7138   ck_free(tmp);
7139 
7140   /* The set of paths currently deemed redundant. */
7141 
7142   tmp = alloc_printf("%s/queue/.state/redundant_edges/", out_dir);
7143   if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp);
7144   ck_free(tmp);
7145 
7146   /* The set of paths showing variable behavior. */
7147 
7148   tmp = alloc_printf("%s/queue/.state/variable_behavior/", out_dir);
7149   if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp);
7150   ck_free(tmp);
7151 
7152   /* Sync directory for keeping track of cooperating fuzzers. */
7153 
7154   if (sync_id) {
7155 
7156     tmp = alloc_printf("%s/.synced/", out_dir);
7157 
7158     if (mkdir(tmp, 0700) && (!in_place_resume || errno != EEXIST))
7159       PFATAL("Unable to create '%s'", tmp);
7160 
7161     ck_free(tmp);
7162 
7163   }
7164 
7165   /* All recorded crashes. */
7166 
7167   tmp = alloc_printf("%s/crashes", out_dir);
7168   if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp);
7169   ck_free(tmp);
7170 
7171   /* All recorded hangs. */
7172 
7173   tmp = alloc_printf("%s/hangs", out_dir);
7174   if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp);
7175   ck_free(tmp);
7176 
7177   /* Generally useful file descriptors. */
7178 
7179   dev_null_fd = open("/dev/null", O_RDWR);
7180   if (dev_null_fd < 0) PFATAL("Unable to open /dev/null");
7181 
7182   dev_urandom_fd = open("/dev/urandom", O_RDONLY);
7183   if (dev_urandom_fd < 0) PFATAL("Unable to open /dev/urandom");
7184 
7185   /* Gnuplot output file. */
7186 
7187   tmp = alloc_printf("%s/plot_data", out_dir);
7188   fd = open(tmp, O_WRONLY | O_CREAT | O_EXCL, 0600);
7189   if (fd < 0) PFATAL("Unable to create '%s'", tmp);
7190   ck_free(tmp);
7191 
7192   plot_file = fdopen(fd, "w");
7193   if (!plot_file) PFATAL("fdopen() failed");
7194 
7195   fprintf(plot_file, "# unix_time, cycles_done, cur_path, paths_total, "
7196                      "pending_total, pending_favs, map_size, unique_crashes, "
7197                      "unique_hangs, max_depth, execs_per_sec\n");
7198                      /* ignore errors */
7199 
7200 }
7201 
7202 
7203 /* Setup the output file for fuzzed data, if not using -f. */
7204 
setup_stdio_file(void)7205 EXP_ST void setup_stdio_file(void) {
7206 
7207   u8* fn = alloc_printf("%s/.cur_input", out_dir);
7208 
7209   unlink(fn); /* Ignore errors */
7210 
7211   out_fd = open(fn, O_RDWR | O_CREAT | O_EXCL, 0600);
7212 
7213   if (out_fd < 0) PFATAL("Unable to create '%s'", fn);
7214 
7215   ck_free(fn);
7216 
7217 }
7218 
7219 
7220 /* Make sure that core dumps don't go to a program. */
7221 
check_crash_handling(void)7222 static void check_crash_handling(void) {
7223 
7224 #ifdef __APPLE__
7225 
7226   /* Yuck! There appears to be no simple C API to query for the state of
7227      loaded daemons on MacOS X, and I'm a bit hesitant to do something
7228      more sophisticated, such as disabling crash reporting via Mach ports,
7229      until I get a box to test the code. So, for now, we check for crash
7230      reporting the awful way. */
7231 
7232   if (system("launchctl list 2>/dev/null | grep -q '\\.ReportCrash$'")) return;
7233 
7234   SAYF("\n" cLRD "[-] " cRST
7235        "Whoops, your system is configured to forward crash notifications to an\n"
7236        "    external crash reporting utility. This will cause issues due to the\n"
7237        "    extended delay between the fuzzed binary malfunctioning and this fact\n"
7238        "    being relayed to the fuzzer via the standard waitpid() API.\n\n"
7239        "    To avoid having crashes misinterpreted as timeouts, please run the\n"
7240        "    following commands:\n\n"
7241 
7242        "    SL=/System/Library; PL=com.apple.ReportCrash\n"
7243        "    launchctl unload -w ${SL}/LaunchAgents/${PL}.plist\n"
7244        "    sudo launchctl unload -w ${SL}/LaunchDaemons/${PL}.Root.plist\n");
7245 
7246   if (!getenv("AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES"))
7247     FATAL("Crash reporter detected");
7248 
7249 #else
7250 
7251   /* This is Linux specific, but I don't think there's anything equivalent on
7252      *BSD, so we can just let it slide for now. */
7253 
7254   s32 fd = open("/proc/sys/kernel/core_pattern", O_RDONLY);
7255   u8  fchar;
7256 
7257   if (fd < 0) return;
7258 
7259   ACTF("Checking core_pattern...");
7260 
7261   if (read(fd, &fchar, 1) == 1 && fchar == '|') {
7262 
7263     SAYF("\n" cLRD "[-] " cRST
7264          "Hmm, your system is configured to send core dump notifications to an\n"
7265          "    external utility. This will cause issues: there will be an extended delay\n"
7266          "    between stumbling upon a crash and having this information relayed to the\n"
7267          "    fuzzer via the standard waitpid() API.\n\n"
7268 
7269          "    To avoid having crashes misinterpreted as timeouts, please log in as root\n"
7270          "    and temporarily modify /proc/sys/kernel/core_pattern, like so:\n\n"
7271 
7272          "    echo core >/proc/sys/kernel/core_pattern\n");
7273 
7274     if (!getenv("AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES"))
7275       FATAL("Pipe at the beginning of 'core_pattern'");
7276 
7277   }
7278 
7279   close(fd);
7280 
7281 #endif /* ^__APPLE__ */
7282 
7283 }
7284 
7285 
7286 /* Check CPU governor. */
7287 
check_cpu_governor(void)7288 static void check_cpu_governor(void) {
7289 
7290   FILE* f;
7291   u8 tmp[128];
7292   u64 min = 0, max = 0;
7293 
7294   if (getenv("AFL_SKIP_CPUFREQ")) return;
7295 
7296   f = fopen("/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor", "r");
7297   if (!f) return;
7298 
7299   ACTF("Checking CPU scaling governor...");
7300 
7301   if (!fgets(tmp, 128, f)) PFATAL("fgets() failed");
7302 
7303   fclose(f);
7304 
7305   if (!strncmp(tmp, "perf", 4)) return;
7306 
7307   f = fopen("/sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq", "r");
7308 
7309   if (f) {
7310     if (fscanf(f, "%llu", &min) != 1) min = 0;
7311     fclose(f);
7312   }
7313 
7314   f = fopen("/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq", "r");
7315 
7316   if (f) {
7317     if (fscanf(f, "%llu", &max) != 1) max = 0;
7318     fclose(f);
7319   }
7320 
7321   if (min == max) return;
7322 
7323   SAYF("\n" cLRD "[-] " cRST
7324        "Whoops, your system uses on-demand CPU frequency scaling, adjusted\n"
7325        "    between %llu and %llu MHz. Unfortunately, the scaling algorithm in the\n"
7326        "    kernel is imperfect and can miss the short-lived processes spawned by\n"
7327        "    afl-fuzz. To keep things moving, run these commands as root:\n\n"
7328 
7329        "    cd /sys/devices/system/cpu\n"
7330        "    echo performance | tee cpu*/cpufreq/scaling_governor\n\n"
7331 
7332        "    You can later go back to the original state by replacing 'performance' with\n"
7333        "    'ondemand'. If you don't want to change the settings, set AFL_SKIP_CPUFREQ\n"
7334        "    to make afl-fuzz skip this check - but expect some performance drop.\n",
7335        min / 1024, max / 1024);
7336 
7337   FATAL("Suboptimal CPU scaling governor");
7338 
7339 }
7340 
7341 
7342 /* Count the number of logical CPU cores. */
7343 
get_core_count(void)7344 static void get_core_count(void) {
7345 
7346   u32 cur_runnable = 0;
7347 
7348 #if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__)
7349 
7350   size_t s = sizeof(cpu_core_count);
7351 
7352   /* On *BSD systems, we can just use a sysctl to get the number of CPUs. */
7353 
7354 #ifdef __APPLE__
7355 
7356   if (sysctlbyname("hw.logicalcpu", &cpu_core_count, &s, NULL, 0) < 0)
7357     return;
7358 
7359 #else
7360 
7361   int s_name[2] = { CTL_HW, HW_NCPU };
7362 
7363   if (sysctl(s_name, 2, &cpu_core_count, &s, NULL, 0) < 0) return;
7364 
7365 #endif /* ^__APPLE__ */
7366 
7367 #else
7368 
7369 #ifdef HAVE_AFFINITY
7370 
7371   cpu_core_count = sysconf(_SC_NPROCESSORS_ONLN);
7372 
7373 #else
7374 
7375   FILE* f = fopen("/proc/stat", "r");
7376   u8 tmp[1024];
7377 
7378   if (!f) return;
7379 
7380   while (fgets(tmp, sizeof(tmp), f))
7381     if (!strncmp(tmp, "cpu", 3) && isdigit(tmp[3])) cpu_core_count++;
7382 
7383   fclose(f);
7384 
7385 #endif /* ^HAVE_AFFINITY */
7386 
7387 #endif /* ^(__APPLE__ || __FreeBSD__ || __OpenBSD__) */
7388 
7389   if (cpu_core_count > 0) {
7390 
7391     cur_runnable = (u32)get_runnable_processes();
7392 
7393 #if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__)
7394 
7395     /* Add ourselves, since the 1-minute average doesn't include that yet. */
7396 
7397     cur_runnable++;
7398 
7399 #endif /* __APPLE__ || __FreeBSD__ || __OpenBSD__ */
7400 
7401     OKF("You have %u CPU core%s and %u runnable tasks (utilization: %0.0f%%).",
7402         cpu_core_count, cpu_core_count > 1 ? "s" : "",
7403         cur_runnable, cur_runnable * 100.0 / cpu_core_count);
7404 
7405     if (cpu_core_count > 1) {
7406 
7407       if (cur_runnable > cpu_core_count * 1.5) {
7408 
7409         WARNF("System under apparent load, performance may be spotty.");
7410 
7411       } else if (cur_runnable + 1 <= cpu_core_count) {
7412 
7413         OKF("Try parallel jobs - see %s/parallel_fuzzing.txt.", doc_path);
7414 
7415       }
7416 
7417     }
7418 
7419   } else {
7420 
7421     cpu_core_count = 0;
7422     WARNF("Unable to figure out the number of CPU cores.");
7423 
7424   }
7425 
7426 }
7427 
7428 
7429 /* Validate and fix up out_dir and sync_dir when using -S. */
7430 
fix_up_sync(void)7431 static void fix_up_sync(void) {
7432 
7433   u8* x = sync_id;
7434 
7435   if (dumb_mode)
7436     FATAL("-S / -M and -n are mutually exclusive");
7437 
7438   if (skip_deterministic) {
7439 
7440     if (force_deterministic)
7441       FATAL("use -S instead of -M -d");
7442     else
7443       FATAL("-S already implies -d");
7444 
7445   }
7446 
7447   while (*x) {
7448 
7449     if (!isalnum(*x) && *x != '_' && *x != '-')
7450       FATAL("Non-alphanumeric fuzzer ID specified via -S or -M");
7451 
7452     x++;
7453 
7454   }
7455 
7456   if (strlen(sync_id) > 32) FATAL("Fuzzer ID too long");
7457 
7458   x = alloc_printf("%s/%s", out_dir, sync_id);
7459 
7460   sync_dir = out_dir;
7461   out_dir  = x;
7462 
7463   if (!force_deterministic) {
7464     skip_deterministic = 1;
7465     use_splicing = 1;
7466   }
7467 
7468 }
7469 
7470 
7471 /* Handle screen resize (SIGWINCH). */
7472 
handle_resize(int sig)7473 static void handle_resize(int sig) {
7474   clear_screen = 1;
7475 }
7476 
7477 
7478 /* Check ASAN options. */
7479 
check_asan_opts(void)7480 static void check_asan_opts(void) {
7481   u8* x = getenv("ASAN_OPTIONS");
7482 
7483   if (x) {
7484 
7485     if (!strstr(x, "abort_on_error=1"))
7486       FATAL("Custom ASAN_OPTIONS set without abort_on_error=1 - please fix!");
7487 
7488     if (!strstr(x, "symbolize=0"))
7489       FATAL("Custom ASAN_OPTIONS set without symbolize=0 - please fix!");
7490 
7491   }
7492 
7493   x = getenv("MSAN_OPTIONS");
7494 
7495   if (x) {
7496 
7497     if (!strstr(x, "exit_code=" STRINGIFY(MSAN_ERROR)))
7498       FATAL("Custom MSAN_OPTIONS set without exit_code="
7499             STRINGIFY(MSAN_ERROR) " - please fix!");
7500 
7501     if (!strstr(x, "symbolize=0"))
7502       FATAL("Custom MSAN_OPTIONS set without symbolize=0 - please fix!");
7503 
7504   }
7505 
7506 }
7507 
7508 
7509 /* Detect @@ in args. */
7510 
detect_file_args(char ** argv)7511 EXP_ST void detect_file_args(char** argv) {
7512 
7513   u32 i = 0;
7514   u8* cwd = getcwd(NULL, 0);
7515 
7516   if (!cwd) PFATAL("getcwd() failed");
7517 
7518   while (argv[i]) {
7519 
7520     u8* aa_loc = strstr(argv[i], "@@");
7521 
7522     if (aa_loc) {
7523 
7524       u8 *aa_subst, *n_arg;
7525 
7526       /* If we don't have a file name chosen yet, use a safe default. */
7527 
7528       if (!out_file)
7529         out_file = alloc_printf("%s/.cur_input", out_dir);
7530 
7531       /* Be sure that we're always using fully-qualified paths. */
7532 
7533       if (out_file[0] == '/') aa_subst = out_file;
7534       else aa_subst = alloc_printf("%s/%s", cwd, out_file);
7535 
7536       /* Construct a replacement argv value. */
7537 
7538       *aa_loc = 0;
7539       n_arg = alloc_printf("%s%s%s", argv[i], aa_subst, aa_loc + 2);
7540       argv[i] = n_arg;
7541       *aa_loc = '@';
7542 
7543       if (out_file[0] != '/') ck_free(aa_subst);
7544 
7545     }
7546 
7547     i++;
7548 
7549   }
7550 
7551   free(cwd); /* not tracked */
7552 
7553 }
7554 
7555 
7556 /* Set up signal handlers. More complicated that needs to be, because libc on
7557    Solaris doesn't resume interrupted reads(), sets SA_RESETHAND when you call
7558    siginterrupt(), and does other stupid things. */
7559 
setup_signal_handlers(void)7560 EXP_ST void setup_signal_handlers(void) {
7561 
7562   struct sigaction sa;
7563 
7564   sa.sa_handler   = NULL;
7565   sa.sa_flags     = SA_RESTART;
7566   sa.sa_sigaction = NULL;
7567 
7568   sigemptyset(&sa.sa_mask);
7569 
7570   /* Various ways of saying "stop". */
7571 
7572   sa.sa_handler = handle_stop_sig;
7573   sigaction(SIGHUP, &sa, NULL);
7574   sigaction(SIGINT, &sa, NULL);
7575   sigaction(SIGTERM, &sa, NULL);
7576 
7577   /* Exec timeout notifications. */
7578 
7579   sa.sa_handler = handle_timeout;
7580   sigaction(SIGALRM, &sa, NULL);
7581 
7582   /* Window resize */
7583 
7584   sa.sa_handler = handle_resize;
7585   sigaction(SIGWINCH, &sa, NULL);
7586 
7587   /* SIGUSR1: skip entry */
7588 
7589   sa.sa_handler = handle_skipreq;
7590   sigaction(SIGUSR1, &sa, NULL);
7591 
7592   /* Things we don't care about. */
7593 
7594   sa.sa_handler = SIG_IGN;
7595   sigaction(SIGTSTP, &sa, NULL);
7596   sigaction(SIGPIPE, &sa, NULL);
7597 
7598 }
7599 
7600 
7601 /* Rewrite argv for QEMU. */
7602 
get_qemu_argv(u8 * own_loc,char ** argv,int argc)7603 static char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
7604 
7605   char** new_argv = ck_alloc(sizeof(char*) * (argc + 4));
7606   u8 *tmp, *cp, *rsl, *own_copy;
7607 
7608   /* Workaround for a QEMU stability glitch. */
7609 
7610   setenv("QEMU_LOG", "nochain", 1);
7611 
7612   memcpy(new_argv + 3, argv + 1, sizeof(char*) * argc);
7613 
7614   new_argv[2] = target_path;
7615   new_argv[1] = "--";
7616 
7617   /* Now we need to actually find the QEMU binary to put in argv[0]. */
7618 
7619   tmp = getenv("AFL_PATH");
7620 
7621   if (tmp) {
7622 
7623     cp = alloc_printf("%s/afl-qemu-trace", tmp);
7624 
7625     if (access(cp, X_OK))
7626       FATAL("Unable to find '%s'", tmp);
7627 
7628     target_path = new_argv[0] = cp;
7629     return new_argv;
7630 
7631   }
7632 
7633   own_copy = ck_strdup(own_loc);
7634   rsl = strrchr(own_copy, '/');
7635 
7636   if (rsl) {
7637 
7638     *rsl = 0;
7639 
7640     cp = alloc_printf("%s/afl-qemu-trace", own_copy);
7641     ck_free(own_copy);
7642 
7643     if (!access(cp, X_OK)) {
7644 
7645       target_path = new_argv[0] = cp;
7646       return new_argv;
7647 
7648     }
7649 
7650   } else ck_free(own_copy);
7651 
7652   if (!access(BIN_PATH "/afl-qemu-trace", X_OK)) {
7653 
7654     target_path = new_argv[0] = ck_strdup(BIN_PATH "/afl-qemu-trace");
7655     return new_argv;
7656 
7657   }
7658 
7659   SAYF("\n" cLRD "[-] " cRST
7660        "Oops, unable to find the 'afl-qemu-trace' binary. The binary must be built\n"
7661        "    separately by following the instructions in qemu_mode/README.qemu. If you\n"
7662        "    already have the binary installed, you may need to specify AFL_PATH in the\n"
7663        "    environment.\n\n"
7664 
7665        "    Of course, even without QEMU, afl-fuzz can still work with binaries that are\n"
7666        "    instrumented at compile time with afl-gcc. It is also possible to use it as a\n"
7667        "    traditional \"dumb\" fuzzer by specifying '-n' in the command line.\n");
7668 
7669   FATAL("Failed to locate 'afl-qemu-trace'.");
7670 
7671 }
7672 
7673 
7674 /* Make a copy of the current command line. */
7675 
save_cmdline(u32 argc,char ** argv)7676 static void save_cmdline(u32 argc, char** argv) {
7677 
7678   u32 len = 1, i;
7679   u8* buf;
7680 
7681   for (i = 0; i < argc; i++)
7682     len += strlen(argv[i]) + 1;
7683 
7684   buf = orig_cmdline = ck_alloc(len);
7685 
7686   for (i = 0; i < argc; i++) {
7687 
7688     u32 l = strlen(argv[i]);
7689 
7690     memcpy(buf, argv[i], l);
7691     buf += l;
7692 
7693     if (i != argc - 1) *(buf++) = ' ';
7694 
7695   }
7696 
7697   *buf = 0;
7698 
7699 }
7700 
7701 
7702 #ifndef AFL_LIB
7703 
7704 /* Main entry point */
7705 
main(int argc,char ** argv)7706 int main(int argc, char** argv) {
7707 
7708   s32 opt;
7709   u64 prev_queued = 0;
7710   u32 sync_interval_cnt = 0, seek_to;
7711   u8  *extras_dir = 0;
7712   u8  mem_limit_given = 0;
7713   u8  exit_1 = !!getenv("AFL_BENCH_JUST_ONE");
7714   char** use_argv;
7715 
7716   struct timeval tv;
7717   struct timezone tz;
7718 
7719   SAYF(cCYA "afl-fuzz " cBRI VERSION cRST " by <lcamtuf@google.com>\n");
7720 
7721   doc_path = access(DOC_PATH, F_OK) ? "docs" : DOC_PATH;
7722 
7723   gettimeofday(&tv, &tz);
7724   srandom(tv.tv_sec ^ tv.tv_usec ^ getpid());
7725 
7726   while ((opt = getopt(argc, argv, "+i:o:f:m:t:T:dnCB:S:M:x:Q")) > 0)
7727 
7728     switch (opt) {
7729 
7730       case 'i': /* input dir */
7731 
7732         if (in_dir) FATAL("Multiple -i options not supported");
7733         in_dir = optarg;
7734 
7735         if (!strcmp(in_dir, "-")) in_place_resume = 1;
7736 
7737         break;
7738 
7739       case 'o': /* output dir */
7740 
7741         if (out_dir) FATAL("Multiple -o options not supported");
7742         out_dir = optarg;
7743         break;
7744 
7745       case 'M': { /* master sync ID */
7746 
7747           u8* c;
7748 
7749           if (sync_id) FATAL("Multiple -S or -M options not supported");
7750           sync_id = ck_strdup(optarg);
7751 
7752           if ((c = strchr(sync_id, ':'))) {
7753 
7754             *c = 0;
7755 
7756             if (sscanf(c + 1, "%u/%u", &master_id, &master_max) != 2 ||
7757                 !master_id || !master_max || master_id > master_max ||
7758                 master_max > 1000000) FATAL("Bogus master ID passed to -M");
7759 
7760           }
7761 
7762           force_deterministic = 1;
7763 
7764         }
7765 
7766         break;
7767 
7768       case 'S':
7769 
7770         if (sync_id) FATAL("Multiple -S or -M options not supported");
7771         sync_id = ck_strdup(optarg);
7772         break;
7773 
7774       case 'f': /* target file */
7775 
7776         if (out_file) FATAL("Multiple -f options not supported");
7777         out_file = optarg;
7778         break;
7779 
7780       case 'x': /* dictionary */
7781 
7782         if (extras_dir) FATAL("Multiple -x options not supported");
7783         extras_dir = optarg;
7784         break;
7785 
7786       case 't': { /* timeout */
7787 
7788           u8 suffix = 0;
7789 
7790           if (timeout_given) FATAL("Multiple -t options not supported");
7791 
7792           if (sscanf(optarg, "%u%c", &exec_tmout, &suffix) < 1 ||
7793               optarg[0] == '-') FATAL("Bad syntax used for -t");
7794 
7795           if (exec_tmout < 5) FATAL("Dangerously low value of -t");
7796 
7797           if (suffix == '+') timeout_given = 2; else timeout_given = 1;
7798 
7799           break;
7800 
7801       }
7802 
7803       case 'm': { /* mem limit */
7804 
7805           u8 suffix = 'M';
7806 
7807           if (mem_limit_given) FATAL("Multiple -m options not supported");
7808           mem_limit_given = 1;
7809 
7810           if (!strcmp(optarg, "none")) {
7811 
7812             mem_limit = 0;
7813             break;
7814 
7815           }
7816 
7817           if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 ||
7818               optarg[0] == '-') FATAL("Bad syntax used for -m");
7819 
7820           switch (suffix) {
7821 
7822             case 'T': mem_limit *= 1024 * 1024; break;
7823             case 'G': mem_limit *= 1024; break;
7824             case 'k': mem_limit /= 1024; break;
7825             case 'M': break;
7826 
7827             default:  FATAL("Unsupported suffix or bad syntax for -m");
7828 
7829           }
7830 
7831           if (mem_limit < 5) FATAL("Dangerously low value of -m");
7832 
7833           if (sizeof(rlim_t) == 4 && mem_limit > 2000)
7834             FATAL("Value of -m out of range on 32-bit systems");
7835 
7836         }
7837 
7838         break;
7839 
7840       case 'd': /* skip deterministic */
7841 
7842         if (skip_deterministic) FATAL("Multiple -d options not supported");
7843         skip_deterministic = 1;
7844         use_splicing = 1;
7845         break;
7846 
7847       case 'B': /* load bitmap */
7848 
7849         /* This is a secret undocumented option! It is useful if you find
7850            an interesting test case during a normal fuzzing process, and want
7851            to mutate it without rediscovering any of the test cases already
7852            found during an earlier run.
7853 
7854            To use this mode, you need to point -B to the fuzz_bitmap produced
7855            by an earlier run for the exact same binary... and that's it.
7856 
7857            I only used this once or twice to get variants of a particular
7858            file, so I'm not making this an official setting. */
7859 
7860         if (in_bitmap) FATAL("Multiple -B options not supported");
7861 
7862         in_bitmap = optarg;
7863         read_bitmap(in_bitmap);
7864         break;
7865 
7866       case 'C': /* crash mode */
7867 
7868         if (crash_mode) FATAL("Multiple -C options not supported");
7869         crash_mode = FAULT_CRASH;
7870         break;
7871 
7872       case 'n': /* dumb mode */
7873 
7874         if (dumb_mode) FATAL("Multiple -n options not supported");
7875         if (getenv("AFL_DUMB_FORKSRV")) dumb_mode = 2; else dumb_mode = 1;
7876 
7877         break;
7878 
7879       case 'T': /* banner */
7880 
7881         if (use_banner) FATAL("Multiple -T options not supported");
7882         use_banner = optarg;
7883         break;
7884 
7885       case 'Q': /* QEMU mode */
7886 
7887         if (qemu_mode) FATAL("Multiple -Q options not supported");
7888         qemu_mode = 1;
7889 
7890         if (!mem_limit_given) mem_limit = MEM_LIMIT_QEMU;
7891 
7892         break;
7893 
7894       default:
7895 
7896         usage(argv[0]);
7897 
7898     }
7899 
7900   if (optind == argc || !in_dir || !out_dir) usage(argv[0]);
7901 
7902   setup_signal_handlers();
7903   check_asan_opts();
7904 
7905   if (sync_id) fix_up_sync();
7906 
7907   if (!strcmp(in_dir, out_dir))
7908     FATAL("Input and output directories can't be the same");
7909 
7910   if (dumb_mode) {
7911 
7912     if (crash_mode) FATAL("-C and -n are mutually exclusive");
7913     if (qemu_mode)  FATAL("-Q and -n are mutually exclusive");
7914 
7915   }
7916 
7917   if (getenv("AFL_NO_FORKSRV"))    no_forkserver    = 1;
7918   if (getenv("AFL_NO_CPU_RED"))    no_cpu_meter_red = 1;
7919   if (getenv("AFL_NO_ARITH"))      no_arith         = 1;
7920   if (getenv("AFL_SHUFFLE_QUEUE")) shuffle_queue    = 1;
7921   if (getenv("AFL_FAST_CAL"))      fast_cal         = 1;
7922 
7923   if (getenv("AFL_HANG_TMOUT")) {
7924     hang_tmout = atoi(getenv("AFL_HANG_TMOUT"));
7925     if (!hang_tmout) FATAL("Invalid value of AFL_HANG_TMOUT");
7926   }
7927 
7928   if (dumb_mode == 2 && no_forkserver)
7929     FATAL("AFL_DUMB_FORKSRV and AFL_NO_FORKSRV are mutually exclusive");
7930 
7931   if (getenv("AFL_PRELOAD")) {
7932     setenv("LD_PRELOAD", getenv("AFL_PRELOAD"), 1);
7933     setenv("DYLD_INSERT_LIBRARIES", getenv("AFL_PRELOAD"), 1);
7934   }
7935 
7936   if (getenv("AFL_LD_PRELOAD"))
7937     FATAL("Use AFL_PRELOAD instead of AFL_LD_PRELOAD");
7938 
7939   save_cmdline(argc, argv);
7940 
7941   fix_up_banner(argv[optind]);
7942 
7943   check_if_tty();
7944 
7945   get_core_count();
7946 
7947 #ifdef HAVE_AFFINITY
7948   bind_to_free_cpu();
7949 #endif /* HAVE_AFFINITY */
7950 
7951   check_crash_handling();
7952   check_cpu_governor();
7953 
7954   setup_post();
7955   setup_shm();
7956   init_count_class16();
7957 
7958   setup_dirs_fds();
7959   read_testcases();
7960   load_auto();
7961 
7962   pivot_inputs();
7963 
7964   if (extras_dir) load_extras(extras_dir);
7965 
7966   if (!timeout_given) find_timeout();
7967 
7968   detect_file_args(argv + optind + 1);
7969 
7970   if (!out_file) setup_stdio_file();
7971 
7972   check_binary(argv[optind]);
7973 
7974   start_time = get_cur_time();
7975 
7976   if (qemu_mode)
7977     use_argv = get_qemu_argv(argv[0], argv + optind, argc - optind);
7978   else
7979     use_argv = argv + optind;
7980 
7981   perform_dry_run(use_argv);
7982 
7983   cull_queue();
7984 
7985   show_init_stats();
7986 
7987   seek_to = find_start_position();
7988 
7989   write_stats_file(0, 0, 0);
7990   save_auto();
7991 
7992   if (stop_soon) goto stop_fuzzing;
7993 
7994   /* Woop woop woop */
7995 
7996   if (!not_on_tty) {
7997     sleep(4);
7998     start_time += 4000;
7999     if (stop_soon) goto stop_fuzzing;
8000   }
8001 
8002   while (1) {
8003 
8004     u8 skipped_fuzz;
8005 
8006     cull_queue();
8007 
8008     if (!queue_cur) {
8009 
8010       queue_cycle++;
8011       current_entry     = 0;
8012       cur_skipped_paths = 0;
8013       queue_cur         = queue;
8014 
8015       while (seek_to) {
8016         current_entry++;
8017         seek_to--;
8018         queue_cur = queue_cur->next;
8019       }
8020 
8021       show_stats();
8022 
8023       if (not_on_tty) {
8024         ACTF("Entering queue cycle %llu.", queue_cycle);
8025         fflush(stdout);
8026       }
8027 
8028       /* If we had a full queue cycle with no new finds, try
8029          recombination strategies next. */
8030 
8031       if (queued_paths == prev_queued) {
8032 
8033         if (use_splicing) cycles_wo_finds++; else use_splicing = 1;
8034 
8035       } else cycles_wo_finds = 0;
8036 
8037       prev_queued = queued_paths;
8038 
8039       if (sync_id && queue_cycle == 1 && getenv("AFL_IMPORT_FIRST"))
8040         sync_fuzzers(use_argv);
8041 
8042     }
8043 
8044     skipped_fuzz = fuzz_one(use_argv);
8045 
8046     if (!stop_soon && sync_id && !skipped_fuzz) {
8047 
8048       if (!(sync_interval_cnt++ % SYNC_INTERVAL))
8049         sync_fuzzers(use_argv);
8050 
8051     }
8052 
8053     if (!stop_soon && exit_1) stop_soon = 2;
8054 
8055     if (stop_soon) break;
8056 
8057     queue_cur = queue_cur->next;
8058     current_entry++;
8059 
8060   }
8061 
8062   if (queue_cur) show_stats();
8063 
8064   write_bitmap();
8065   write_stats_file(0, 0, 0);
8066   save_auto();
8067 
8068 stop_fuzzing:
8069 
8070   SAYF(CURSOR_SHOW cLRD "\n\n+++ Testing aborted %s +++\n" cRST,
8071        stop_soon == 2 ? "programmatically" : "by user");
8072 
8073   /* Running for more than 30 minutes but still doing first cycle? */
8074 
8075   if (queue_cycle == 1 && get_cur_time() - start_time > 30 * 60 * 1000) {
8076 
8077     SAYF("\n" cYEL "[!] " cRST
8078            "Stopped during the first cycle, results may be incomplete.\n"
8079            "    (For info on resuming, see %s/README.)\n", doc_path);
8080 
8081   }
8082 
8083   fclose(plot_file);
8084   destroy_queue();
8085   destroy_extras();
8086   ck_free(target_path);
8087   ck_free(sync_id);
8088 
8089   alloc_report();
8090 
8091   OKF("We're done here. Have a nice day!\n");
8092 
8093   exit(0);
8094 
8095 }
8096 
8097 #endif /* !AFL_LIB */
8098