1 //===-- hwasan_report.cpp -------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of HWAddressSanitizer.
10 //
11 // Error reporting.
12 //===----------------------------------------------------------------------===//
13 
14 #include "hwasan_report.h"
15 
16 #include <dlfcn.h>
17 
18 #include "hwasan.h"
19 #include "hwasan_allocator.h"
20 #include "hwasan_globals.h"
21 #include "hwasan_mapping.h"
22 #include "hwasan_thread.h"
23 #include "hwasan_thread_list.h"
24 #include "sanitizer_common/sanitizer_allocator_internal.h"
25 #include "sanitizer_common/sanitizer_common.h"
26 #include "sanitizer_common/sanitizer_flags.h"
27 #include "sanitizer_common/sanitizer_mutex.h"
28 #include "sanitizer_common/sanitizer_report_decorator.h"
29 #include "sanitizer_common/sanitizer_stackdepot.h"
30 #include "sanitizer_common/sanitizer_stacktrace_printer.h"
31 #include "sanitizer_common/sanitizer_symbolizer.h"
32 
33 using namespace __sanitizer;
34 
35 namespace __hwasan {
36 
37 class ScopedReport {
38  public:
39   ScopedReport(bool fatal = false) : error_message_(1), fatal(fatal) {
40     BlockingMutexLock lock(&error_message_lock_);
41     error_message_ptr_ = fatal ? &error_message_ : nullptr;
42     ++hwasan_report_count;
43   }
44 
45   ~ScopedReport() {
46     void (*report_cb)(const char *);
47     {
48       BlockingMutexLock lock(&error_message_lock_);
49       report_cb = error_report_callback_;
50       error_message_ptr_ = nullptr;
51     }
52     if (report_cb)
53       report_cb(error_message_.data());
54     if (fatal)
55       SetAbortMessage(error_message_.data());
56     if (common_flags()->print_module_map >= 2 ||
57         (fatal && common_flags()->print_module_map))
58       DumpProcessMap();
59     if (fatal)
60       Die();
61   }
62 
63   static void MaybeAppendToErrorMessage(const char *msg) {
64     BlockingMutexLock lock(&error_message_lock_);
65     if (!error_message_ptr_)
66       return;
67     uptr len = internal_strlen(msg);
68     uptr old_size = error_message_ptr_->size();
69     error_message_ptr_->resize(old_size + len);
70     // overwrite old trailing '\0', keep new trailing '\0' untouched.
71     internal_memcpy(&(*error_message_ptr_)[old_size - 1], msg, len);
72   }
73 
74   static void SetErrorReportCallback(void (*callback)(const char *)) {
75     BlockingMutexLock lock(&error_message_lock_);
76     error_report_callback_ = callback;
77   }
78 
79  private:
80   ScopedErrorReportLock error_report_lock_;
81   InternalMmapVector<char> error_message_;
82   bool fatal;
83 
84   static InternalMmapVector<char> *error_message_ptr_;
85   static BlockingMutex error_message_lock_;
86   static void (*error_report_callback_)(const char *);
87 };
88 
89 InternalMmapVector<char> *ScopedReport::error_message_ptr_;
90 BlockingMutex ScopedReport::error_message_lock_;
91 void (*ScopedReport::error_report_callback_)(const char *);
92 
93 // If there is an active ScopedReport, append to its error message.
94 void AppendToErrorMessageBuffer(const char *buffer) {
95   ScopedReport::MaybeAppendToErrorMessage(buffer);
96 }
97 
98 static StackTrace GetStackTraceFromId(u32 id) {
99   CHECK(id);
100   StackTrace res = StackDepotGet(id);
101   CHECK(res.trace);
102   return res;
103 }
104 
105 // A RAII object that holds a copy of the current thread stack ring buffer.
106 // The actual stack buffer may change while we are iterating over it (for
107 // example, Printf may call syslog() which can itself be built with hwasan).
108 class SavedStackAllocations {
109  public:
110   SavedStackAllocations(StackAllocationsRingBuffer *rb) {
111     uptr size = rb->size() * sizeof(uptr);
112     void *storage =
113         MmapAlignedOrDieOnFatalError(size, size * 2, "saved stack allocations");
114     new (&rb_) StackAllocationsRingBuffer(*rb, storage);
115   }
116 
117   ~SavedStackAllocations() {
118     StackAllocationsRingBuffer *rb = get();
119     UnmapOrDie(rb->StartOfStorage(), rb->size() * sizeof(uptr));
120   }
121 
122   StackAllocationsRingBuffer *get() {
123     return (StackAllocationsRingBuffer *)&rb_;
124   }
125 
126  private:
127   uptr rb_;
128 };
129 
130 class Decorator: public __sanitizer::SanitizerCommonDecorator {
131  public:
132   Decorator() : SanitizerCommonDecorator() { }
133   const char *Access() { return Blue(); }
134   const char *Allocation() const { return Magenta(); }
135   const char *Origin() const { return Magenta(); }
136   const char *Name() const { return Green(); }
137   const char *Location() { return Green(); }
138   const char *Thread() { return Green(); }
139 };
140 
141 static bool FindHeapAllocation(HeapAllocationsRingBuffer *rb, uptr tagged_addr,
142                                HeapAllocationRecord *har, uptr *ring_index,
143                                uptr *num_matching_addrs,
144                                uptr *num_matching_addrs_4b) {
145   if (!rb) return false;
146 
147   *num_matching_addrs = 0;
148   *num_matching_addrs_4b = 0;
149   for (uptr i = 0, size = rb->size(); i < size; i++) {
150     auto h = (*rb)[i];
151     if (h.tagged_addr <= tagged_addr &&
152         h.tagged_addr + h.requested_size > tagged_addr) {
153       *har = h;
154       *ring_index = i;
155       return true;
156     }
157 
158     // Measure the number of heap ring buffer entries that would have matched
159     // if we had only one entry per address (e.g. if the ring buffer data was
160     // stored at the address itself). This will help us tune the allocator
161     // implementation for MTE.
162     if (UntagAddr(h.tagged_addr) <= UntagAddr(tagged_addr) &&
163         UntagAddr(h.tagged_addr) + h.requested_size > UntagAddr(tagged_addr)) {
164       ++*num_matching_addrs;
165     }
166 
167     // Measure the number of heap ring buffer entries that would have matched
168     // if we only had 4 tag bits, which is the case for MTE.
169     auto untag_4b = [](uptr p) {
170       return p & ((1ULL << 60) - 1);
171     };
172     if (untag_4b(h.tagged_addr) <= untag_4b(tagged_addr) &&
173         untag_4b(h.tagged_addr) + h.requested_size > untag_4b(tagged_addr)) {
174       ++*num_matching_addrs_4b;
175     }
176   }
177   return false;
178 }
179 
180 static void PrintStackAllocations(StackAllocationsRingBuffer *sa,
181                                   tag_t addr_tag, uptr untagged_addr) {
182   uptr frames = Min((uptr)flags()->stack_history_size, sa->size());
183   bool found_local = false;
184   for (uptr i = 0; i < frames; i++) {
185     const uptr *record_addr = &(*sa)[i];
186     uptr record = *record_addr;
187     if (!record)
188       break;
189     tag_t base_tag =
190         reinterpret_cast<uptr>(record_addr) >> kRecordAddrBaseTagShift;
191     uptr fp = (record >> kRecordFPShift) << kRecordFPLShift;
192     uptr pc_mask = (1ULL << kRecordFPShift) - 1;
193     uptr pc = record & pc_mask;
194     FrameInfo frame;
195     if (Symbolizer::GetOrInit()->SymbolizeFrame(pc, &frame)) {
196       for (LocalInfo &local : frame.locals) {
197         if (!local.has_frame_offset || !local.has_size || !local.has_tag_offset)
198           continue;
199         tag_t obj_tag = base_tag ^ local.tag_offset;
200         if (obj_tag != addr_tag)
201           continue;
202         // Calculate the offset from the object address to the faulting
203         // address. Because we only store bits 4-19 of FP (bits 0-3 are
204         // guaranteed to be zero), the calculation is performed mod 2^20 and may
205         // harmlessly underflow if the address mod 2^20 is below the object
206         // address.
207         uptr obj_offset =
208             (untagged_addr - fp - local.frame_offset) & (kRecordFPModulus - 1);
209         if (obj_offset >= local.size)
210           continue;
211         if (!found_local) {
212           Printf("Potentially referenced stack objects:\n");
213           found_local = true;
214         }
215         Printf("  %s in %s %s:%d\n", local.name, local.function_name,
216                local.decl_file, local.decl_line);
217       }
218       frame.Clear();
219     }
220   }
221 
222   if (found_local)
223     return;
224 
225   // We didn't find any locals. Most likely we don't have symbols, so dump
226   // the information that we have for offline analysis.
227   InternalScopedString frame_desc;
228   Printf("Previously allocated frames:\n");
229   for (uptr i = 0; i < frames; i++) {
230     const uptr *record_addr = &(*sa)[i];
231     uptr record = *record_addr;
232     if (!record)
233       break;
234     uptr pc_mask = (1ULL << 48) - 1;
235     uptr pc = record & pc_mask;
236     frame_desc.append("  record_addr:0x%zx record:0x%zx",
237                       reinterpret_cast<uptr>(record_addr), record);
238     if (SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc)) {
239       RenderFrame(&frame_desc, " %F %L", 0, frame->info.address, &frame->info,
240                   common_flags()->symbolize_vs_style,
241                   common_flags()->strip_path_prefix);
242       frame->ClearAll();
243     }
244     Printf("%s\n", frame_desc.data());
245     frame_desc.clear();
246   }
247 }
248 
249 // Returns true if tag == *tag_ptr, reading tags from short granules if
250 // necessary. This may return a false positive if tags 1-15 are used as a
251 // regular tag rather than a short granule marker.
252 static bool TagsEqual(tag_t tag, tag_t *tag_ptr) {
253   if (tag == *tag_ptr)
254     return true;
255   if (*tag_ptr == 0 || *tag_ptr > kShadowAlignment - 1)
256     return false;
257   uptr mem = ShadowToMem(reinterpret_cast<uptr>(tag_ptr));
258   tag_t inline_tag = *reinterpret_cast<tag_t *>(mem + kShadowAlignment - 1);
259   return tag == inline_tag;
260 }
261 
262 // HWASan globals store the size of the global in the descriptor. In cases where
263 // we don't have a binary with symbols, we can't grab the size of the global
264 // from the debug info - but we might be able to retrieve it from the
265 // descriptor. Returns zero if the lookup failed.
266 static uptr GetGlobalSizeFromDescriptor(uptr ptr) {
267   // Find the ELF object that this global resides in.
268   Dl_info info;
269   if (dladdr(reinterpret_cast<void *>(ptr), &info) == 0)
270     return 0;
271   auto *ehdr = reinterpret_cast<const ElfW(Ehdr) *>(info.dli_fbase);
272   auto *phdr_begin = reinterpret_cast<const ElfW(Phdr) *>(
273       reinterpret_cast<const u8 *>(ehdr) + ehdr->e_phoff);
274 
275   // Get the load bias. This is normally the same as the dli_fbase address on
276   // position-independent code, but can be different on non-PIE executables,
277   // binaries using LLD's partitioning feature, or binaries compiled with a
278   // linker script.
279   ElfW(Addr) load_bias = 0;
280   for (const auto &phdr :
281        ArrayRef<const ElfW(Phdr)>(phdr_begin, phdr_begin + ehdr->e_phnum)) {
282     if (phdr.p_type != PT_LOAD || phdr.p_offset != 0)
283       continue;
284     load_bias = reinterpret_cast<ElfW(Addr)>(ehdr) - phdr.p_vaddr;
285     break;
286   }
287 
288   // Walk all globals in this ELF object, looking for the one we're interested
289   // in. Once we find it, we can stop iterating and return the size of the
290   // global we're interested in.
291   for (const hwasan_global &global :
292        HwasanGlobalsFor(load_bias, phdr_begin, ehdr->e_phnum))
293     if (global.addr() <= ptr && ptr < global.addr() + global.size())
294       return global.size();
295 
296   return 0;
297 }
298 
299 static void ShowHeapOrGlobalCandidate(uptr untagged_addr, tag_t *candidate,
300                                       tag_t *left, tag_t *right) {
301   Decorator d;
302   uptr mem = ShadowToMem(reinterpret_cast<uptr>(candidate));
303   HwasanChunkView chunk = FindHeapChunkByAddress(mem);
304   if (chunk.IsAllocated()) {
305     uptr offset;
306     const char *whence;
307     if (untagged_addr < chunk.End() && untagged_addr >= chunk.Beg()) {
308       offset = untagged_addr - chunk.Beg();
309       whence = "inside";
310     } else if (candidate == left) {
311       offset = untagged_addr - chunk.End();
312       whence = "to the right of";
313     } else {
314       offset = chunk.Beg() - untagged_addr;
315       whence = "to the left of";
316     }
317     Printf("%s", d.Error());
318     Printf("\nCause: heap-buffer-overflow\n");
319     Printf("%s", d.Default());
320     Printf("%s", d.Location());
321     Printf("%p is located %zd bytes %s %zd-byte region [%p,%p)\n",
322            untagged_addr, offset, whence, chunk.UsedSize(), chunk.Beg(),
323            chunk.End());
324     Printf("%s", d.Allocation());
325     Printf("allocated here:\n");
326     Printf("%s", d.Default());
327     GetStackTraceFromId(chunk.GetAllocStackId()).Print();
328     return;
329   }
330   // Check whether the address points into a loaded library. If so, this is
331   // most likely a global variable.
332   const char *module_name;
333   uptr module_address;
334   Symbolizer *sym = Symbolizer::GetOrInit();
335   if (sym->GetModuleNameAndOffsetForPC(mem, &module_name, &module_address)) {
336     Printf("%s", d.Error());
337     Printf("\nCause: global-overflow\n");
338     Printf("%s", d.Default());
339     DataInfo info;
340     Printf("%s", d.Location());
341     if (sym->SymbolizeData(mem, &info) && info.start) {
342       Printf(
343           "%p is located %zd bytes to the %s of %zd-byte global variable "
344           "%s [%p,%p) in %s\n",
345           untagged_addr,
346           candidate == left ? untagged_addr - (info.start + info.size)
347                             : info.start - untagged_addr,
348           candidate == left ? "right" : "left", info.size, info.name,
349           info.start, info.start + info.size, module_name);
350     } else {
351       uptr size = GetGlobalSizeFromDescriptor(mem);
352       if (size == 0)
353         // We couldn't find the size of the global from the descriptors.
354         Printf("%p is located to the %s of a global variable in (%s+0x%x)\n",
355                untagged_addr, candidate == left ? "right" : "left", module_name,
356                module_address);
357       else
358         Printf(
359             "%p is located to the %s of a %zd-byte global variable in "
360             "(%s+0x%x)\n",
361             untagged_addr, candidate == left ? "right" : "left", size,
362             module_name, module_address);
363     }
364     Printf("%s", d.Default());
365   }
366 }
367 
368 void PrintAddressDescription(
369     uptr tagged_addr, uptr access_size,
370     StackAllocationsRingBuffer *current_stack_allocations) {
371   Decorator d;
372   int num_descriptions_printed = 0;
373   uptr untagged_addr = UntagAddr(tagged_addr);
374 
375   // Print some very basic information about the address, if it's a heap.
376   HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr);
377   if (uptr beg = chunk.Beg()) {
378     uptr size = chunk.ActualSize();
379     Printf("%s[%p,%p) is a %s %s heap chunk; "
380            "size: %zd offset: %zd\n%s",
381            d.Location(),
382            beg, beg + size,
383            chunk.FromSmallHeap() ? "small" : "large",
384            chunk.IsAllocated() ? "allocated" : "unallocated",
385            size, untagged_addr - beg,
386            d.Default());
387   }
388 
389   tag_t addr_tag = GetTagFromPointer(tagged_addr);
390 
391   bool on_stack = false;
392   // Check stack first. If the address is on the stack of a live thread, we
393   // know it cannot be a heap / global overflow.
394   hwasanThreadList().VisitAllLiveThreads([&](Thread *t) {
395     if (t->AddrIsInStack(untagged_addr)) {
396       on_stack = true;
397       // TODO(fmayer): figure out how to distinguish use-after-return and
398       // stack-buffer-overflow.
399       Printf("%s", d.Error());
400       Printf("\nCause: stack tag-mismatch\n");
401       Printf("%s", d.Location());
402       Printf("Address %p is located in stack of thread T%zd\n", untagged_addr,
403              t->unique_id());
404       Printf("%s", d.Default());
405       t->Announce();
406 
407       auto *sa = (t == GetCurrentThread() && current_stack_allocations)
408                      ? current_stack_allocations
409                      : t->stack_allocations();
410       PrintStackAllocations(sa, addr_tag, untagged_addr);
411       num_descriptions_printed++;
412     }
413   });
414 
415   // Check if this looks like a heap buffer overflow by scanning
416   // the shadow left and right and looking for the first adjacent
417   // object with a different memory tag. If that tag matches addr_tag,
418   // check the allocator if it has a live chunk there.
419   tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr));
420   tag_t *candidate = nullptr, *left = tag_ptr, *right = tag_ptr;
421   uptr candidate_distance = 0;
422   for (; candidate_distance < 1000; candidate_distance++) {
423     if (MemIsShadow(reinterpret_cast<uptr>(left)) &&
424         TagsEqual(addr_tag, left)) {
425       candidate = left;
426       break;
427     }
428     --left;
429     if (MemIsShadow(reinterpret_cast<uptr>(right)) &&
430         TagsEqual(addr_tag, right)) {
431       candidate = right;
432       break;
433     }
434     ++right;
435   }
436 
437   constexpr auto kCloseCandidateDistance = 1;
438 
439   if (!on_stack && candidate && candidate_distance <= kCloseCandidateDistance) {
440     ShowHeapOrGlobalCandidate(untagged_addr, candidate, left, right);
441     num_descriptions_printed++;
442   }
443 
444   hwasanThreadList().VisitAllLiveThreads([&](Thread *t) {
445     // Scan all threads' ring buffers to find if it's a heap-use-after-free.
446     HeapAllocationRecord har;
447     uptr ring_index, num_matching_addrs, num_matching_addrs_4b;
448     if (FindHeapAllocation(t->heap_allocations(), tagged_addr, &har,
449                            &ring_index, &num_matching_addrs,
450                            &num_matching_addrs_4b)) {
451       Printf("%s", d.Error());
452       Printf("\nCause: use-after-free\n");
453       Printf("%s", d.Location());
454       Printf("%p is located %zd bytes inside of %zd-byte region [%p,%p)\n",
455              untagged_addr, untagged_addr - UntagAddr(har.tagged_addr),
456              har.requested_size, UntagAddr(har.tagged_addr),
457              UntagAddr(har.tagged_addr) + har.requested_size);
458       Printf("%s", d.Allocation());
459       Printf("freed by thread T%zd here:\n", t->unique_id());
460       Printf("%s", d.Default());
461       GetStackTraceFromId(har.free_context_id).Print();
462 
463       Printf("%s", d.Allocation());
464       Printf("previously allocated here:\n", t);
465       Printf("%s", d.Default());
466       GetStackTraceFromId(har.alloc_context_id).Print();
467 
468       // Print a developer note: the index of this heap object
469       // in the thread's deallocation ring buffer.
470       Printf("hwasan_dev_note_heap_rb_distance: %zd %zd\n", ring_index + 1,
471              flags()->heap_history_size);
472       Printf("hwasan_dev_note_num_matching_addrs: %zd\n", num_matching_addrs);
473       Printf("hwasan_dev_note_num_matching_addrs_4b: %zd\n",
474              num_matching_addrs_4b);
475 
476       t->Announce();
477       num_descriptions_printed++;
478     }
479   });
480 
481   if (candidate && num_descriptions_printed == 0) {
482     ShowHeapOrGlobalCandidate(untagged_addr, candidate, left, right);
483     num_descriptions_printed++;
484   }
485 
486   // Print the remaining threads, as an extra information, 1 line per thread.
487   hwasanThreadList().VisitAllLiveThreads([&](Thread *t) { t->Announce(); });
488 
489   if (!num_descriptions_printed)
490     // We exhausted our possibilities. Bail out.
491     Printf("HWAddressSanitizer can not describe address in more detail.\n");
492   if (num_descriptions_printed > 1) {
493     Printf(
494         "There are %d potential causes, printed above in order "
495         "of likeliness.\n",
496         num_descriptions_printed);
497   }
498 }
499 
500 void ReportStats() {}
501 
502 static void PrintTagInfoAroundAddr(tag_t *tag_ptr, uptr num_rows,
503                                    void (*print_tag)(InternalScopedString &s,
504                                                      tag_t *tag)) {
505   const uptr row_len = 16;  // better be power of two.
506   tag_t *center_row_beg = reinterpret_cast<tag_t *>(
507       RoundDownTo(reinterpret_cast<uptr>(tag_ptr), row_len));
508   tag_t *beg_row = center_row_beg - row_len * (num_rows / 2);
509   tag_t *end_row = center_row_beg + row_len * ((num_rows + 1) / 2);
510   InternalScopedString s;
511   for (tag_t *row = beg_row; row < end_row; row += row_len) {
512     s.append("%s", row == center_row_beg ? "=>" : "  ");
513     s.append("%p:", row);
514     for (uptr i = 0; i < row_len; i++) {
515       s.append("%s", row + i == tag_ptr ? "[" : " ");
516       print_tag(s, &row[i]);
517       s.append("%s", row + i == tag_ptr ? "]" : " ");
518     }
519     s.append("\n");
520   }
521   Printf("%s", s.data());
522 }
523 
524 static void PrintTagsAroundAddr(tag_t *tag_ptr) {
525   Printf(
526       "Memory tags around the buggy address (one tag corresponds to %zd "
527       "bytes):\n", kShadowAlignment);
528   PrintTagInfoAroundAddr(tag_ptr, 17, [](InternalScopedString &s, tag_t *tag) {
529     s.append("%02x", *tag);
530   });
531 
532   Printf(
533       "Tags for short granules around the buggy address (one tag corresponds "
534       "to %zd bytes):\n",
535       kShadowAlignment);
536   PrintTagInfoAroundAddr(tag_ptr, 3, [](InternalScopedString &s, tag_t *tag) {
537     if (*tag >= 1 && *tag <= kShadowAlignment) {
538       uptr granule_addr = ShadowToMem(reinterpret_cast<uptr>(tag));
539       s.append("%02x",
540                *reinterpret_cast<u8 *>(granule_addr + kShadowAlignment - 1));
541     } else {
542       s.append("..");
543     }
544   });
545   Printf(
546       "See "
547       "https://clang.llvm.org/docs/"
548       "HardwareAssistedAddressSanitizerDesign.html#short-granules for a "
549       "description of short granule tags\n");
550 }
551 
552 void ReportInvalidFree(StackTrace *stack, uptr tagged_addr) {
553   ScopedReport R(flags()->halt_on_error);
554 
555   uptr untagged_addr = UntagAddr(tagged_addr);
556   tag_t ptr_tag = GetTagFromPointer(tagged_addr);
557   tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr));
558   tag_t mem_tag = *tag_ptr;
559   Decorator d;
560   Printf("%s", d.Error());
561   uptr pc = stack->size ? stack->trace[0] : 0;
562   const char *bug_type = "invalid-free";
563   Report("ERROR: %s: %s on address %p at pc %p\n", SanitizerToolName, bug_type,
564          untagged_addr, pc);
565   Printf("%s", d.Access());
566   Printf("tags: %02x/%02x (ptr/mem)\n", ptr_tag, mem_tag);
567   Printf("%s", d.Default());
568 
569   stack->Print();
570 
571   PrintAddressDescription(tagged_addr, 0, nullptr);
572 
573   PrintTagsAroundAddr(tag_ptr);
574 
575   ReportErrorSummary(bug_type, stack);
576 }
577 
578 void ReportTailOverwritten(StackTrace *stack, uptr tagged_addr, uptr orig_size,
579                            const u8 *expected) {
580   uptr tail_size = kShadowAlignment - (orig_size % kShadowAlignment);
581   ScopedReport R(flags()->halt_on_error);
582   Decorator d;
583   uptr untagged_addr = UntagAddr(tagged_addr);
584   Printf("%s", d.Error());
585   const char *bug_type = "allocation-tail-overwritten";
586   Report("ERROR: %s: %s; heap object [%p,%p) of size %zd\n", SanitizerToolName,
587          bug_type, untagged_addr, untagged_addr + orig_size, orig_size);
588   Printf("\n%s", d.Default());
589   Printf(
590       "Stack of invalid access unknown. Issue detected at deallocation "
591       "time.\n");
592   Printf("%s", d.Allocation());
593   Printf("deallocated here:\n");
594   Printf("%s", d.Default());
595   stack->Print();
596   HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr);
597   if (chunk.Beg()) {
598     Printf("%s", d.Allocation());
599     Printf("allocated here:\n");
600     Printf("%s", d.Default());
601     GetStackTraceFromId(chunk.GetAllocStackId()).Print();
602   }
603 
604   InternalScopedString s;
605   CHECK_GT(tail_size, 0U);
606   CHECK_LT(tail_size, kShadowAlignment);
607   u8 *tail = reinterpret_cast<u8*>(untagged_addr + orig_size);
608   s.append("Tail contains: ");
609   for (uptr i = 0; i < kShadowAlignment - tail_size; i++)
610     s.append(".. ");
611   for (uptr i = 0; i < tail_size; i++)
612     s.append("%02x ", tail[i]);
613   s.append("\n");
614   s.append("Expected:      ");
615   for (uptr i = 0; i < kShadowAlignment - tail_size; i++)
616     s.append(".. ");
617   for (uptr i = 0; i < tail_size; i++)
618     s.append("%02x ", expected[i]);
619   s.append("\n");
620   s.append("               ");
621   for (uptr i = 0; i < kShadowAlignment - tail_size; i++)
622     s.append("   ");
623   for (uptr i = 0; i < tail_size; i++)
624     s.append("%s ", expected[i] != tail[i] ? "^^" : "  ");
625 
626   s.append("\nThis error occurs when a buffer overflow overwrites memory\n"
627     "to the right of a heap object, but within the %zd-byte granule, e.g.\n"
628     "   char *x = new char[20];\n"
629     "   x[25] = 42;\n"
630     "%s does not detect such bugs in uninstrumented code at the time of write,"
631     "\nbut can detect them at the time of free/delete.\n"
632     "To disable this feature set HWASAN_OPTIONS=free_checks_tail_magic=0\n",
633     kShadowAlignment, SanitizerToolName);
634   Printf("%s", s.data());
635   GetCurrentThread()->Announce();
636 
637   tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr));
638   PrintTagsAroundAddr(tag_ptr);
639 
640   ReportErrorSummary(bug_type, stack);
641 }
642 
643 void ReportTagMismatch(StackTrace *stack, uptr tagged_addr, uptr access_size,
644                        bool is_store, bool fatal, uptr *registers_frame) {
645   ScopedReport R(fatal);
646   SavedStackAllocations current_stack_allocations(
647       GetCurrentThread()->stack_allocations());
648 
649   Decorator d;
650   Printf("%s", d.Error());
651   uptr untagged_addr = UntagAddr(tagged_addr);
652   // TODO: when possible, try to print heap-use-after-free, etc.
653   const char *bug_type = "tag-mismatch";
654   uptr pc = stack->size ? stack->trace[0] : 0;
655   Report("ERROR: %s: %s on address %p at pc %p\n", SanitizerToolName, bug_type,
656          untagged_addr, pc);
657 
658   Thread *t = GetCurrentThread();
659 
660   sptr offset =
661       __hwasan_test_shadow(reinterpret_cast<void *>(tagged_addr), access_size);
662   CHECK(offset >= 0 && offset < static_cast<sptr>(access_size));
663   tag_t ptr_tag = GetTagFromPointer(tagged_addr);
664   tag_t *tag_ptr =
665       reinterpret_cast<tag_t *>(MemToShadow(untagged_addr + offset));
666   tag_t mem_tag = *tag_ptr;
667 
668   Printf("%s", d.Access());
669   Printf("%s of size %zu at %p tags: %02x/%02x (ptr/mem) in thread T%zd\n",
670          is_store ? "WRITE" : "READ", access_size, untagged_addr, ptr_tag,
671          mem_tag, t->unique_id());
672   if (offset != 0)
673     Printf("Invalid access starting at offset [%zu, %zu)\n", offset,
674            Min(access_size, static_cast<uptr>(offset) + (1 << kShadowScale)));
675   Printf("%s", d.Default());
676 
677   stack->Print();
678 
679   PrintAddressDescription(tagged_addr, access_size,
680                           current_stack_allocations.get());
681   t->Announce();
682 
683   PrintTagsAroundAddr(tag_ptr);
684 
685   if (registers_frame)
686     ReportRegisters(registers_frame, pc);
687 
688   ReportErrorSummary(bug_type, stack);
689 }
690 
691 // See the frame breakdown defined in __hwasan_tag_mismatch (from
692 // hwasan_tag_mismatch_aarch64.S).
693 void ReportRegisters(uptr *frame, uptr pc) {
694   Printf("Registers where the failure occurred (pc %p):\n", pc);
695 
696   // We explicitly print a single line (4 registers/line) each iteration to
697   // reduce the amount of logcat error messages printed. Each Printf() will
698   // result in a new logcat line, irrespective of whether a newline is present,
699   // and so we wish to reduce the number of Printf() calls we have to make.
700   Printf("    x0  %016llx  x1  %016llx  x2  %016llx  x3  %016llx\n",
701        frame[0], frame[1], frame[2], frame[3]);
702   Printf("    x4  %016llx  x5  %016llx  x6  %016llx  x7  %016llx\n",
703        frame[4], frame[5], frame[6], frame[7]);
704   Printf("    x8  %016llx  x9  %016llx  x10 %016llx  x11 %016llx\n",
705        frame[8], frame[9], frame[10], frame[11]);
706   Printf("    x12 %016llx  x13 %016llx  x14 %016llx  x15 %016llx\n",
707        frame[12], frame[13], frame[14], frame[15]);
708   Printf("    x16 %016llx  x17 %016llx  x18 %016llx  x19 %016llx\n",
709        frame[16], frame[17], frame[18], frame[19]);
710   Printf("    x20 %016llx  x21 %016llx  x22 %016llx  x23 %016llx\n",
711        frame[20], frame[21], frame[22], frame[23]);
712   Printf("    x24 %016llx  x25 %016llx  x26 %016llx  x27 %016llx\n",
713        frame[24], frame[25], frame[26], frame[27]);
714   // hwasan_check* reduces the stack pointer by 256, then __hwasan_tag_mismatch
715   // passes it to this function.
716   Printf("    x28 %016llx  x29 %016llx  x30 %016llx   sp %016llx\n", frame[28],
717          frame[29], frame[30], reinterpret_cast<u8 *>(frame) + 256);
718 }
719 
720 }  // namespace __hwasan
721 
722 void __hwasan_set_error_report_callback(void (*callback)(const char *)) {
723   __hwasan::ScopedReport::SetErrorReportCallback(callback);
724 }
725