1 /*
2    american fuzzy lop++ - bitmap related routines
3    ----------------------------------------------
4 
5    Originally written by Michal Zalewski
6 
7    Now maintained by Marc Heuse <mh@mh-sec.de>,
8                         Heiko Eißfeldt <heiko.eissfeldt@hexco.de> and
9                         Andrea Fioraldi <andreafioraldi@gmail.com>
10 
11    Copyright 2016, 2017 Google Inc. All rights reserved.
12    Copyright 2019-2020 AFLplusplus Project. All rights reserved.
13 
14    Licensed under the Apache License, Version 2.0 (the "License");
15    you may not use this file except in compliance with the License.
16    You may obtain a copy of the License at:
17 
18      http://www.apache.org/licenses/LICENSE-2.0
19 
20    This is the real deal: the program takes an instrumented binary and
21    attempts a variety of basic fuzzing tricks, paying close attention to
22    how they affect the execution path.
23 
24  */
25 
26 #include "afl-fuzz.h"
27 #include <limits.h>
28 #if !defined NAME_MAX
29   #define NAME_MAX _XOPEN_NAME_MAX
30 #endif
31 
32 /* Write bitmap to file. The bitmap is useful mostly for the secret
33    -B option, to focus a separate fuzzing session on a particular
34    interesting input without rediscovering all the others. */
35 
36 void write_bitmap(afl_state_t *afl) {
37 
38   u8  fname[PATH_MAX];
39   s32 fd;
40 
41   if (!afl->bitmap_changed) { return; }
42   afl->bitmap_changed = 0;
43 
44   snprintf(fname, PATH_MAX, "%s/fuzz_bitmap", afl->out_dir);
45   fd = open(fname, O_WRONLY | O_CREAT | O_TRUNC, DEFAULT_PERMISSION);
46 
47   if (fd < 0) { PFATAL("Unable to open '%s'", fname); }
48 
49   ck_write(fd, afl->virgin_bits, afl->fsrv.map_size, fname);
50 
51   close(fd);
52 
53 }
54 
55 /* Count the number of bits set in the provided bitmap. Used for the status
56    screen several times every second, does not have to be fast. */
57 
58 u32 count_bits(afl_state_t *afl, u8 *mem) {
59 
60   u32 *ptr = (u32 *)mem;
61   u32  i = (afl->fsrv.map_size >> 2);
62   u32  ret = 0;
63 
64   while (i--) {
65 
66     u32 v = *(ptr++);
67 
68     /* This gets called on the inverse, virgin bitmap; optimize for sparse
69        data. */
70 
71     if (v == 0xffffffff) {
72 
73       ret += 32;
74       continue;
75 
76     }
77 
78     v -= ((v >> 1) & 0x55555555);
79     v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
80     ret += (((v + (v >> 4)) & 0xF0F0F0F) * 0x01010101) >> 24;
81 
82   }
83 
84   return ret;
85 
86 }
87 
88 /* Count the number of bytes set in the bitmap. Called fairly sporadically,
89    mostly to update the status screen or calibrate and examine confirmed
90    new paths. */
91 
92 u32 count_bytes(afl_state_t *afl, u8 *mem) {
93 
94   u32 *ptr = (u32 *)mem;
95   u32  i = (afl->fsrv.map_size >> 2);
96   u32  ret = 0;
97 
98   while (i--) {
99 
100     u32 v = *(ptr++);
101 
102     if (!v) { continue; }
103     if (v & 0x000000ffU) { ++ret; }
104     if (v & 0x0000ff00U) { ++ret; }
105     if (v & 0x00ff0000U) { ++ret; }
106     if (v & 0xff000000U) { ++ret; }
107 
108   }
109 
110   return ret;
111 
112 }
113 
114 /* Count the number of non-255 bytes set in the bitmap. Used strictly for the
115    status screen, several calls per second or so. */
116 
117 u32 count_non_255_bytes(afl_state_t *afl, u8 *mem) {
118 
119   u32 *ptr = (u32 *)mem;
120   u32  i = (afl->fsrv.map_size >> 2);
121   u32  ret = 0;
122 
123   while (i--) {
124 
125     u32 v = *(ptr++);
126 
127     /* This is called on the virgin bitmap, so optimize for the most likely
128        case. */
129 
130     if (v == 0xffffffffU) { continue; }
131     if ((v & 0x000000ffU) != 0x000000ffU) { ++ret; }
132     if ((v & 0x0000ff00U) != 0x0000ff00U) { ++ret; }
133     if ((v & 0x00ff0000U) != 0x00ff0000U) { ++ret; }
134     if ((v & 0xff000000U) != 0xff000000U) { ++ret; }
135 
136   }
137 
138   return ret;
139 
140 }
141 
142 /* Destructively simplify trace by eliminating hit count information
143    and replacing it with 0x80 or 0x01 depending on whether the tuple
144    is hit or not. Called on every new crash or timeout, should be
145    reasonably fast. */
146 #define TIMES4(x) x, x, x, x
147 #define TIMES8(x) TIMES4(x), TIMES4(x)
148 #define TIMES16(x) TIMES8(x), TIMES8(x)
149 #define TIMES32(x) TIMES16(x), TIMES16(x)
150 #define TIMES64(x) TIMES32(x), TIMES32(x)
151 #define TIMES255(x)                                                      \
152   TIMES64(x), TIMES64(x), TIMES64(x), TIMES32(x), TIMES16(x), TIMES8(x), \
153       TIMES4(x), x, x, x
154 const u8 simplify_lookup[256] = {
155 
156     [0] = 1, [1] = TIMES255(128)
157 
158 };
159 
160 /* Destructively classify execution counts in a trace. This is used as a
161    preprocessing step for any newly acquired traces. Called on every exec,
162    must be fast. */
163 
164 const u8 count_class_lookup8[256] = {
165 
166     [0] = 0,
167     [1] = 1,
168     [2] = 2,
169     [3] = 4,
170     [4] = TIMES4(8),
171     [8] = TIMES8(16),
172     [16] = TIMES16(32),
173     [32] = TIMES32(64),
174     [128] = TIMES64(128)
175 
176 };
177 
178 #undef TIMES255
179 #undef TIMES64
180 #undef TIMES32
181 #undef TIMES16
182 #undef TIMES8
183 #undef TIMES4
184 
185 u16 count_class_lookup16[65536];
186 
187 void init_count_class16(void) {
188 
189   u32 b1, b2;
190 
191   for (b1 = 0; b1 < 256; b1++) {
192 
193     for (b2 = 0; b2 < 256; b2++) {
194 
195       count_class_lookup16[(b1 << 8) + b2] =
196           (count_class_lookup8[b1] << 8) | count_class_lookup8[b2];
197 
198     }
199 
200   }
201 
202 }
203 
204 /* Import coverage processing routines. */
205 
206 #ifdef WORD_SIZE_64
207   #include "coverage-64.h"
208 #else
209   #include "coverage-32.h"
210 #endif
211 
212 /* Check if the current execution path brings anything new to the table.
213    Update virgin bits to reflect the finds. Returns 1 if the only change is
214    the hit-count for a particular tuple; 2 if there are new tuples seen.
215    Updates the map, so subsequent calls will always return 0.
216 
217    This function is called after every exec() on a fairly large buffer, so
218    it needs to be fast. We do this in 32-bit and 64-bit flavors. */
219 
220 inline u8 has_new_bits(afl_state_t *afl, u8 *virgin_map) {
221 
222 #ifdef WORD_SIZE_64
223 
224   u64 *current = (u64 *)afl->fsrv.trace_bits;
225   u64 *virgin = (u64 *)virgin_map;
226 
227   u32 i = (afl->fsrv.map_size >> 3);
228 
229 #else
230 
231   u32 *current = (u32 *)afl->fsrv.trace_bits;
232   u32 *virgin = (u32 *)virgin_map;
233 
234   u32 i = (afl->fsrv.map_size >> 2);
235 
236 #endif                                                     /* ^WORD_SIZE_64 */
237 
238   u8 ret = 0;
239   while (i--) {
240 
241     if (unlikely(*current)) discover_word(&ret, current, virgin);
242 
243     current++;
244     virgin++;
245 
246   }
247 
248   if (unlikely(ret) && likely(virgin_map == afl->virgin_bits))
249     afl->bitmap_changed = 1;
250 
251   return ret;
252 
253 }
254 
255 /* A combination of classify_counts and has_new_bits. If 0 is returned, then the
256  * trace bits are kept as-is. Otherwise, the trace bits are overwritten with
257  * classified values.
258  *
259  * This accelerates the processing: in most cases, no interesting behavior
260  * happen, and the trace bits will be discarded soon. This function optimizes
261  * for such cases: one-pass scan on trace bits without modifying anything. Only
262  * on rare cases it fall backs to the slow path: classify_counts() first, then
263  * return has_new_bits(). */
264 
265 inline u8 has_new_bits_unclassified(afl_state_t *afl, u8 *virgin_map) {
266 
267   /* Handle the hot path first: no new coverage */
268   u8 *end = afl->fsrv.trace_bits + afl->fsrv.map_size;
269 
270 #ifdef WORD_SIZE_64
271 
272   if (!skim((u64 *)virgin_map, (u64 *)afl->fsrv.trace_bits, (u64 *)end))
273     return 0;
274 
275 #else
276 
277   if (!skim((u32 *)virgin_map, (u32 *)afl->fsrv.trace_bits, (u32 *)end))
278     return 0;
279 
280 #endif                                                     /* ^WORD_SIZE_64 */
281   classify_counts(&afl->fsrv);
282   return has_new_bits(afl, virgin_map);
283 
284 }
285 
286 /* Compact trace bytes into a smaller bitmap. We effectively just drop the
287    count information here. This is called only sporadically, for some
288    new paths. */
289 
290 void minimize_bits(afl_state_t *afl, u8 *dst, u8 *src) {
291 
292   u32 i = 0;
293 
294   while (i < afl->fsrv.map_size) {
295 
296     if (*(src++)) { dst[i >> 3] |= 1 << (i & 7); }
297     ++i;
298 
299   }
300 
301 }
302 
303 #ifndef SIMPLE_FILES
304 
305 /* Construct a file name for a new test case, capturing the operation
306    that led to its discovery. Returns a ptr to afl->describe_op_buf_256. */
307 
308 u8 *describe_op(afl_state_t *afl, u8 new_bits, size_t max_description_len) {
309 
310   size_t real_max_len =
311       MIN(max_description_len, sizeof(afl->describe_op_buf_256));
312   u8 *ret = afl->describe_op_buf_256;
313 
314   if (unlikely(afl->syncing_party)) {
315 
316     sprintf(ret, "sync:%s,src:%06u", afl->syncing_party, afl->syncing_case);
317 
318   } else {
319 
320     sprintf(ret, "src:%06u", afl->current_entry);
321 
322     if (afl->splicing_with >= 0) {
323 
324       sprintf(ret + strlen(ret), "+%06d", afl->splicing_with);
325 
326     }
327 
328     sprintf(ret + strlen(ret), ",time:%llu",
329             get_cur_time() + afl->prev_run_time - afl->start_time);
330 
331     if (afl->current_custom_fuzz &&
332         afl->current_custom_fuzz->afl_custom_describe) {
333 
334       /* We are currently in a custom mutator that supports afl_custom_describe,
335        * use it! */
336 
337       size_t len_current = strlen(ret);
338       ret[len_current++] = ',';
339       ret[len_current] = '\0';
340 
341       ssize_t size_left = real_max_len - len_current - strlen(",+cov") - 2;
342       if (unlikely(size_left <= 0)) FATAL("filename got too long");
343 
344       const char *custom_description =
345           afl->current_custom_fuzz->afl_custom_describe(
346               afl->current_custom_fuzz->data, size_left);
347       if (!custom_description || !custom_description[0]) {
348 
349         DEBUGF("Error getting a description from afl_custom_describe");
350         /* Take the stage name as description fallback */
351         sprintf(ret + len_current, "op:%s", afl->stage_short);
352 
353       } else {
354 
355         /* We got a proper custom description, use it */
356         strncat(ret + len_current, custom_description, size_left);
357 
358       }
359 
360     } else {
361 
362       /* Normal testcase descriptions start here */
363       sprintf(ret + strlen(ret), ",op:%s", afl->stage_short);
364 
365       if (afl->stage_cur_byte >= 0) {
366 
367         sprintf(ret + strlen(ret), ",pos:%d", afl->stage_cur_byte);
368 
369         if (afl->stage_val_type != STAGE_VAL_NONE) {
370 
371           sprintf(ret + strlen(ret), ",val:%s%+d",
372                   (afl->stage_val_type == STAGE_VAL_BE) ? "be:" : "",
373                   afl->stage_cur_val);
374 
375         }
376 
377       } else {
378 
379         sprintf(ret + strlen(ret), ",rep:%d", afl->stage_cur_val);
380 
381       }
382 
383     }
384 
385   }
386 
387   if (new_bits == 2) { strcat(ret, ",+cov"); }
388 
389   if (unlikely(strlen(ret) >= max_description_len))
390     FATAL("describe string is too long");
391 
392   return ret;
393 
394 }
395 
396 #endif                                                     /* !SIMPLE_FILES */
397 
398 /* Write a message accompanying the crash directory :-) */
399 
400 void write_crash_readme(afl_state_t *afl) {
401 
402   u8    fn[PATH_MAX];
403   s32   fd;
404   FILE *f;
405 
406   u8 val_buf[STRINGIFY_VAL_SIZE_MAX];
407 
408   sprintf(fn, "%s/crashes/README.txt", afl->out_dir);
409 
410   fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, DEFAULT_PERMISSION);
411 
412   /* Do not die on errors here - that would be impolite. */
413 
414   if (unlikely(fd < 0)) { return; }
415 
416   f = fdopen(fd, "w");
417 
418   if (unlikely(!f)) {
419 
420     close(fd);
421     return;
422 
423   }
424 
425   fprintf(
426       f,
427       "Command line used to find this crash:\n\n"
428 
429       "%s\n\n"
430 
431       "If you can't reproduce a bug outside of afl-fuzz, be sure to set the "
432       "same\n"
433       "memory limit. The limit used for this fuzzing session was %s.\n\n"
434 
435       "Need a tool to minimize test cases before investigating the crashes or "
436       "sending\n"
437       "them to a vendor? Check out the afl-tmin that comes with the fuzzer!\n\n"
438 
439       "Found any cool bugs in open-source tools using afl-fuzz? If yes, please "
440       "drop\n"
441       "an mail at <afl-users@googlegroups.com> once the issues are fixed\n\n"
442 
443       "  https://github.com/AFLplusplus/AFLplusplus\n\n",
444 
445       afl->orig_cmdline,
446       stringify_mem_size(val_buf, sizeof(val_buf),
447                          afl->fsrv.mem_limit << 20));      /* ignore errors */
448 
449   fclose(f);
450 
451 }
452 
453 /* Check if the result of an execve() during routine fuzzing is interesting,
454    save or queue the input test case for further analysis if so. Returns 1 if
455    entry is saved, 0 otherwise. */
456 
457 u8 __attribute__((hot))
458 save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) {
459 
460   if (unlikely(len == 0)) { return 0; }
461 
462   u8 *queue_fn = "";
463   u8  new_bits = '\0';
464   s32 fd;
465   u8  keeping = 0, res, classified = 0;
466   u64 cksum = 0;
467 
468   u8 fn[PATH_MAX];
469 
470   /* Update path frequency. */
471 
472   /* Generating a hash on every input is super expensive. Bad idea and should
473      only be used for special schedules */
474   if (unlikely(afl->schedule >= FAST && afl->schedule <= RARE)) {
475 
476     cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
477 
478     /* Saturated increment */
479     if (afl->n_fuzz[cksum % N_FUZZ_SIZE] < 0xFFFFFFFF)
480       afl->n_fuzz[cksum % N_FUZZ_SIZE]++;
481 
482   }
483 
484   if (likely(fault == afl->crash_mode)) {
485 
486     /* Keep only if there are new bits in the map, add to queue for
487        future fuzzing, etc. */
488 
489     new_bits = has_new_bits_unclassified(afl, afl->virgin_bits);
490 
491     if (likely(!new_bits)) {
492 
493       if (unlikely(afl->crash_mode)) { ++afl->total_crashes; }
494       return 0;
495 
496     }
497 
498     classified = new_bits;
499 
500 #ifndef SIMPLE_FILES
501 
502     queue_fn = alloc_printf(
503         "%s/queue/id:%06u,%s", afl->out_dir, afl->queued_paths,
504         describe_op(afl, new_bits, NAME_MAX - strlen("id:000000,")));
505 
506 #else
507 
508     queue_fn =
509         alloc_printf("%s/queue/id_%06u", afl->out_dir, afl->queued_paths);
510 
511 #endif                                                    /* ^!SIMPLE_FILES */
512     fd = open(queue_fn, O_WRONLY | O_CREAT | O_EXCL, DEFAULT_PERMISSION);
513     if (unlikely(fd < 0)) { PFATAL("Unable to create '%s'", queue_fn); }
514     ck_write(fd, mem, len, queue_fn);
515     close(fd);
516     add_to_queue(afl, queue_fn, len, 0);
517 
518 #ifdef INTROSPECTION
519     if (afl->custom_mutators_count && afl->current_custom_fuzz) {
520 
521       LIST_FOREACH(&afl->custom_mutator_list, struct custom_mutator, {
522 
523         if (afl->current_custom_fuzz == el && el->afl_custom_introspection) {
524 
525           const char *ptr = el->afl_custom_introspection(el->data);
526 
527           if (ptr != NULL && *ptr != 0) {
528 
529             fprintf(afl->introspection_file, "QUEUE CUSTOM %s = %s\n", ptr,
530                     afl->queue_top->fname);
531 
532           }
533 
534         }
535 
536       });
537 
538     } else if (afl->mutation[0] != 0) {
539 
540       fprintf(afl->introspection_file, "QUEUE %s = %s\n", afl->mutation,
541               afl->queue_top->fname);
542 
543     }
544 
545 #endif
546 
547     if (new_bits == 2) {
548 
549       afl->queue_top->has_new_cov = 1;
550       ++afl->queued_with_cov;
551 
552     }
553 
554     /* AFLFast schedule? update the new queue entry */
555     if (cksum) {
556 
557       afl->queue_top->n_fuzz_entry = cksum % N_FUZZ_SIZE;
558       afl->n_fuzz[afl->queue_top->n_fuzz_entry] = 1;
559 
560     }
561 
562     /* due to classify counts we have to recalculate the checksum */
563     cksum = afl->queue_top->exec_cksum =
564         hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
565 
566     /* Try to calibrate inline; this also calls update_bitmap_score() when
567        successful. */
568 
569     res = calibrate_case(afl, afl->queue_top, mem, afl->queue_cycle - 1, 0);
570 
571     if (unlikely(res == FSRV_RUN_ERROR)) {
572 
573       FATAL("Unable to execute target application");
574 
575     }
576 
577     if (likely(afl->q_testcase_max_cache_size)) {
578 
579       queue_testcase_store_mem(afl, afl->queue_top, mem);
580 
581     }
582 
583     keeping = 1;
584 
585   }
586 
587   switch (fault) {
588 
589     case FSRV_RUN_TMOUT:
590 
591       /* Timeouts are not very interesting, but we're still obliged to keep
592          a handful of samples. We use the presence of new bits in the
593          hang-specific bitmap as a signal of uniqueness. In "non-instrumented"
594          mode, we just keep everything. */
595 
596       ++afl->total_tmouts;
597 
598       if (afl->unique_hangs >= KEEP_UNIQUE_HANG) { return keeping; }
599 
600       if (likely(!afl->non_instrumented_mode)) {
601 
602         if (!classified) {
603 
604           classify_counts(&afl->fsrv);
605           classified = 1;
606 
607         }
608 
609         simplify_trace(afl, afl->fsrv.trace_bits);
610 
611         if (!has_new_bits(afl, afl->virgin_tmout)) { return keeping; }
612 
613       }
614 
615       ++afl->unique_tmouts;
616 #ifdef INTROSPECTION
617       if (afl->custom_mutators_count && afl->current_custom_fuzz) {
618 
619         LIST_FOREACH(&afl->custom_mutator_list, struct custom_mutator, {
620 
621           if (afl->current_custom_fuzz == el && el->afl_custom_introspection) {
622 
623             const char *ptr = el->afl_custom_introspection(el->data);
624 
625             if (ptr != NULL && *ptr != 0) {
626 
627               fprintf(afl->introspection_file,
628                       "UNIQUE_TIMEOUT CUSTOM %s = %s\n", ptr,
629                       afl->queue_top->fname);
630 
631             }
632 
633           }
634 
635         });
636 
637       } else if (afl->mutation[0] != 0) {
638 
639         fprintf(afl->introspection_file, "UNIQUE_TIMEOUT %s\n", afl->mutation);
640 
641       }
642 
643 #endif
644 
645       /* Before saving, we make sure that it's a genuine hang by re-running
646          the target with a more generous timeout (unless the default timeout
647          is already generous). */
648 
649       if (afl->fsrv.exec_tmout < afl->hang_tmout) {
650 
651         u8 new_fault;
652         write_to_testcase(afl, mem, len);
653         new_fault = fuzz_run_target(afl, &afl->fsrv, afl->hang_tmout);
654         classify_counts(&afl->fsrv);
655 
656         /* A corner case that one user reported bumping into: increasing the
657            timeout actually uncovers a crash. Make sure we don't discard it if
658            so. */
659 
660         if (!afl->stop_soon && new_fault == FSRV_RUN_CRASH) {
661 
662           goto keep_as_crash;
663 
664         }
665 
666         if (afl->stop_soon || new_fault != FSRV_RUN_TMOUT) { return keeping; }
667 
668       }
669 
670 #ifndef SIMPLE_FILES
671 
672       snprintf(fn, PATH_MAX, "%s/hangs/id:%06llu,%s", afl->out_dir,
673                afl->unique_hangs,
674                describe_op(afl, 0, NAME_MAX - strlen("id:000000,")));
675 
676 #else
677 
678       snprintf(fn, PATH_MAX, "%s/hangs/id_%06llu", afl->out_dir,
679                afl->unique_hangs);
680 
681 #endif                                                    /* ^!SIMPLE_FILES */
682 
683       ++afl->unique_hangs;
684 
685       afl->last_hang_time = get_cur_time();
686 
687       break;
688 
689     case FSRV_RUN_CRASH:
690 
691     keep_as_crash:
692 
693       /* This is handled in a manner roughly similar to timeouts,
694          except for slightly different limits and no need to re-run test
695          cases. */
696 
697       ++afl->total_crashes;
698 
699       if (afl->unique_crashes >= KEEP_UNIQUE_CRASH) { return keeping; }
700 
701       if (likely(!afl->non_instrumented_mode)) {
702 
703         if (!classified) { classify_counts(&afl->fsrv); }
704 
705         simplify_trace(afl, afl->fsrv.trace_bits);
706 
707         if (!has_new_bits(afl, afl->virgin_crash)) { return keeping; }
708 
709       }
710 
711       if (unlikely(!afl->unique_crashes)) { write_crash_readme(afl); }
712 
713 #ifndef SIMPLE_FILES
714 
715       snprintf(fn, PATH_MAX, "%s/crashes/id:%06llu,sig:%02u,%s", afl->out_dir,
716                afl->unique_crashes, afl->fsrv.last_kill_signal,
717                describe_op(afl, 0, NAME_MAX - strlen("id:000000,sig:00,")));
718 
719 #else
720 
721       snprintf(fn, PATH_MAX, "%s/crashes/id_%06llu_%02u", afl->out_dir,
722                afl->unique_crashes, afl->last_kill_signal);
723 
724 #endif                                                    /* ^!SIMPLE_FILES */
725 
726       ++afl->unique_crashes;
727 #ifdef INTROSPECTION
728       if (afl->custom_mutators_count && afl->current_custom_fuzz) {
729 
730         LIST_FOREACH(&afl->custom_mutator_list, struct custom_mutator, {
731 
732           if (afl->current_custom_fuzz == el && el->afl_custom_introspection) {
733 
734             const char *ptr = el->afl_custom_introspection(el->data);
735 
736             if (ptr != NULL && *ptr != 0) {
737 
738               fprintf(afl->introspection_file, "UNIQUE_CRASH CUSTOM %s = %s\n",
739                       ptr, afl->queue_top->fname);
740 
741             }
742 
743           }
744 
745         });
746 
747       } else if (afl->mutation[0] != 0) {
748 
749         fprintf(afl->introspection_file, "UNIQUE_CRASH %s\n", afl->mutation);
750 
751       }
752 
753 #endif
754       if (unlikely(afl->infoexec)) {
755 
756         // if the user wants to be informed on new crashes - do that
757 #if !TARGET_OS_IPHONE
758         // we dont care if system errors, but we dont want a
759         // compiler warning either
760         // See
761         // https://stackoverflow.com/questions/11888594/ignoring-return-values-in-c
762         (void)(system(afl->infoexec) + 1);
763 #else
764         WARNF("command execution unsupported");
765 #endif
766 
767       }
768 
769       afl->last_crash_time = get_cur_time();
770       afl->last_crash_execs = afl->fsrv.total_execs;
771 
772       break;
773 
774     case FSRV_RUN_ERROR:
775       FATAL("Unable to execute target application");
776 
777     default:
778       return keeping;
779 
780   }
781 
782   /* If we're here, we apparently want to save the crash or hang
783      test case, too. */
784 
785   fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, DEFAULT_PERMISSION);
786   if (unlikely(fd < 0)) { PFATAL("Unable to create '%s'", fn); }
787   ck_write(fd, mem, len, fn);
788   close(fd);
789 
790   return keeping;
791 
792 }
793 
794