1 //===-- sanitizer_coverage_fuchsia.cc -------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Sanitizer Coverage Controller for Trace PC Guard, Fuchsia-specific version.
11 //
12 // This Fuchsia-specific implementation uses the same basic scheme and the
13 // same simple '.sancov' file format as the generic implementation.  The
14 // difference is that we just produce a single blob of output for the whole
15 // program, not a separate one per DSO.  We do not sort the PC table and do
16 // not prune the zeros, so the resulting file is always as large as it
17 // would be to report 100% coverage.  Implicit tracing information about
18 // the address ranges of DSOs allows offline tools to split the one big
19 // blob into separate files that the 'sancov' tool can understand.
20 //
21 // Unlike the traditional implementation that uses an atexit hook to write
22 // out data files at the end, the results on Fuchsia do not go into a file
23 // per se.  The 'coverage_dir' option is ignored.  Instead, they are stored
24 // directly into a shared memory object (a Zircon VMO).  At exit, that VMO
25 // is handed over to a system service that's responsible for getting the
26 // data out to somewhere that it can be fed into the sancov tool (where and
27 // how is not our problem).
28 
29 #include "sanitizer_platform.h"
30 #if SANITIZER_FUCHSIA
31 #include "sanitizer_atomic.h"
32 #include "sanitizer_common.h"
33 #include "sanitizer_internal_defs.h"
34 #include "sanitizer_symbolizer_fuchsia.h"
35 
36 #include <zircon/process.h>
37 #include <zircon/sanitizer.h>
38 #include <zircon/syscalls.h>
39 
40 using namespace __sanitizer;  // NOLINT
41 
42 namespace __sancov {
43 namespace {
44 
45 // TODO(mcgrathr): Move the constant into a header shared with other impls.
46 constexpr u64 Magic64 = 0xC0BFFFFFFFFFFF64ULL;
47 static_assert(SANITIZER_WORDSIZE == 64, "Fuchsia is always LP64");
48 
49 constexpr const char kSancovSinkName[] = "sancov";
50 
51 // Collects trace-pc guard coverage.
52 // This class relies on zero-initialization.
53 class TracePcGuardController final {
54  public:
55   // For each PC location being tracked, there is a u32 reserved in global
56   // data called the "guard".  At startup, we assign each guard slot a
57   // unique index into the big results array.  Later during runtime, the
58   // first call to TracePcGuard (below) will store the corresponding PC at
59   // that index in the array.  (Each later call with the same guard slot is
60   // presumed to be from the same PC.)  Then it clears the guard slot back
61   // to zero, which tells the compiler not to bother calling in again.  At
62   // the end of the run, we have a big array where each element is either
63   // zero or is a tracked PC location that was hit in the trace.
64 
65   // This is called from global constructors.  Each translation unit has a
66   // contiguous array of guard slots, and a constructor that calls here
67   // with the bounds of its array.  Those constructors are allowed to call
68   // here more than once for the same array.  Usually all of these
69   // constructors run in the initial thread, but it's possible that a
70   // dlopen call on a secondary thread will run constructors that get here.
InitTracePcGuard(u32 * start,u32 * end)71   void InitTracePcGuard(u32 *start, u32 *end) {
72     if (end > start && *start == 0 && common_flags()->coverage) {
73       // Complete the setup before filling in any guards with indices.
74       // This avoids the possibility of code called from Setup reentering
75       // TracePcGuard.
76       u32 idx = Setup(end - start);
77       for (u32 *p = start; p < end; ++p) {
78         *p = idx++;
79       }
80     }
81   }
82 
TracePcGuard(u32 * guard,uptr pc)83   void TracePcGuard(u32 *guard, uptr pc) {
84     atomic_uint32_t *guard_ptr = reinterpret_cast<atomic_uint32_t *>(guard);
85     u32 idx = atomic_exchange(guard_ptr, 0, memory_order_relaxed);
86     if (idx > 0) array_[idx] = pc;
87   }
88 
Dump()89   void Dump() {
90     BlockingMutexLock locked(&setup_lock_);
91     if (array_) {
92       CHECK_NE(vmo_, ZX_HANDLE_INVALID);
93 
94       // Publish the VMO to the system, where it can be collected and
95       // analyzed after this process exits.  This always consumes the VMO
96       // handle.  Any failure is just logged and not indicated to us.
97       __sanitizer_publish_data(kSancovSinkName, vmo_);
98       vmo_ = ZX_HANDLE_INVALID;
99 
100       // This will route to __sanitizer_log_write, which will ensure that
101       // information about shared libraries is written out.  This message
102       // uses the `dumpfile` symbolizer markup element to highlight the
103       // dump.  See the explanation for this in:
104       // https://fuchsia.googlesource.com/zircon/+/master/docs/symbolizer_markup.md
105       Printf("SanitizerCoverage: " FORMAT_DUMPFILE " with up to %u PCs\n",
106              kSancovSinkName, vmo_name_, next_index_ - 1);
107     }
108   }
109 
110  private:
111   // We map in the largest possible view into the VMO: one word
112   // for every possible 32-bit index value.  This avoids the need
113   // to change the mapping when increasing the size of the VMO.
114   // We can always spare the 32G of address space.
115   static constexpr size_t MappingSize = sizeof(uptr) << 32;
116 
117   BlockingMutex setup_lock_ = BlockingMutex(LINKER_INITIALIZED);
118   uptr *array_ = nullptr;
119   u32 next_index_ = 0;
120   zx_handle_t vmo_ = {};
121   char vmo_name_[ZX_MAX_NAME_LEN] = {};
122 
DataSize() const123   size_t DataSize() const { return next_index_ * sizeof(uintptr_t); }
124 
Setup(u32 num_guards)125   u32 Setup(u32 num_guards) {
126     BlockingMutexLock locked(&setup_lock_);
127     DCHECK(common_flags()->coverage);
128 
129     if (next_index_ == 0) {
130       CHECK_EQ(vmo_, ZX_HANDLE_INVALID);
131       CHECK_EQ(array_, nullptr);
132 
133       // The first sample goes at [1] to reserve [0] for the magic number.
134       next_index_ = 1 + num_guards;
135 
136       zx_status_t status = _zx_vmo_create(DataSize(), 0, &vmo_);
137       CHECK_EQ(status, ZX_OK);
138 
139       // Give the VMO a name including our process KOID so it's easy to spot.
140       internal_snprintf(vmo_name_, sizeof(vmo_name_), "%s.%zu", kSancovSinkName,
141                         internal_getpid());
142       _zx_object_set_property(vmo_, ZX_PROP_NAME, vmo_name_,
143                               internal_strlen(vmo_name_));
144 
145       // Map the largest possible view we might need into the VMO.  Later
146       // we might need to increase the VMO's size before we can use larger
147       // indices, but we'll never move the mapping address so we don't have
148       // any multi-thread synchronization issues with that.
149       uintptr_t mapping;
150       status =
151           _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
152                        0, vmo_, 0, MappingSize, &mapping);
153       CHECK_EQ(status, ZX_OK);
154 
155       // Hereafter other threads are free to start storing into
156       // elements [1, next_index_) of the big array.
157       array_ = reinterpret_cast<uptr *>(mapping);
158 
159       // Store the magic number.
160       // Hereafter, the VMO serves as the contents of the '.sancov' file.
161       array_[0] = Magic64;
162 
163       return 1;
164     } else {
165       // The VMO is already mapped in, but it's not big enough to use the
166       // new indices.  So increase the size to cover the new maximum index.
167 
168       CHECK_NE(vmo_, ZX_HANDLE_INVALID);
169       CHECK_NE(array_, nullptr);
170 
171       uint32_t first_index = next_index_;
172       next_index_ += num_guards;
173 
174       zx_status_t status = _zx_vmo_set_size(vmo_, DataSize());
175       CHECK_EQ(status, ZX_OK);
176 
177       return first_index;
178     }
179   }
180 };
181 
182 static TracePcGuardController pc_guard_controller;
183 
184 }  // namespace
185 }  // namespace __sancov
186 
187 namespace __sanitizer {
InitializeCoverage(bool enabled,const char * dir)188 void InitializeCoverage(bool enabled, const char *dir) {
189   CHECK_EQ(enabled, common_flags()->coverage);
190   CHECK_EQ(dir, common_flags()->coverage_dir);
191 
192   static bool coverage_enabled = false;
193   if (!coverage_enabled) {
194     coverage_enabled = enabled;
195     Atexit(__sanitizer_cov_dump);
196     AddDieCallback(__sanitizer_cov_dump);
197   }
198 }
199 }  // namespace __sanitizer
200 
201 extern "C" {
__sanitizer_dump_coverage(const uptr * pcs,uptr len)202 SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_coverage(  // NOLINT
203     const uptr *pcs, uptr len) {
204   UNIMPLEMENTED();
205 }
206 
SANITIZER_INTERFACE_WEAK_DEF(void,__sanitizer_cov_trace_pc_guard,u32 * guard)207 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard, u32 *guard) {
208   if (!*guard) return;
209   __sancov::pc_guard_controller.TracePcGuard(guard, GET_CALLER_PC() - 1);
210 }
211 
SANITIZER_INTERFACE_WEAK_DEF(void,__sanitizer_cov_trace_pc_guard_init,u32 * start,u32 * end)212 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard_init,
213                              u32 *start, u32 *end) {
214   if (start == end || *start) return;
215   __sancov::pc_guard_controller.InitTracePcGuard(start, end);
216 }
217 
__sanitizer_dump_trace_pc_guard_coverage()218 SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_trace_pc_guard_coverage() {
219   __sancov::pc_guard_controller.Dump();
220 }
__sanitizer_cov_dump()221 SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() {
222   __sanitizer_dump_trace_pc_guard_coverage();
223 }
224 // Default empty implementations (weak). Users should redefine them.
SANITIZER_INTERFACE_WEAK_DEF(void,__sanitizer_cov_trace_cmp,void)225 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp, void) {}
SANITIZER_INTERFACE_WEAK_DEF(void,__sanitizer_cov_trace_cmp1,void)226 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp1, void) {}
SANITIZER_INTERFACE_WEAK_DEF(void,__sanitizer_cov_trace_cmp2,void)227 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp2, void) {}
SANITIZER_INTERFACE_WEAK_DEF(void,__sanitizer_cov_trace_cmp4,void)228 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp4, void) {}
SANITIZER_INTERFACE_WEAK_DEF(void,__sanitizer_cov_trace_cmp8,void)229 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp8, void) {}
SANITIZER_INTERFACE_WEAK_DEF(void,__sanitizer_cov_trace_const_cmp1,void)230 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_const_cmp1, void) {}
SANITIZER_INTERFACE_WEAK_DEF(void,__sanitizer_cov_trace_const_cmp2,void)231 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_const_cmp2, void) {}
SANITIZER_INTERFACE_WEAK_DEF(void,__sanitizer_cov_trace_const_cmp4,void)232 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_const_cmp4, void) {}
SANITIZER_INTERFACE_WEAK_DEF(void,__sanitizer_cov_trace_const_cmp8,void)233 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_const_cmp8, void) {}
SANITIZER_INTERFACE_WEAK_DEF(void,__sanitizer_cov_trace_switch,void)234 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_switch, void) {}
SANITIZER_INTERFACE_WEAK_DEF(void,__sanitizer_cov_trace_div4,void)235 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_div4, void) {}
SANITIZER_INTERFACE_WEAK_DEF(void,__sanitizer_cov_trace_div8,void)236 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_div8, void) {}
SANITIZER_INTERFACE_WEAK_DEF(void,__sanitizer_cov_trace_gep,void)237 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_gep, void) {}
SANITIZER_INTERFACE_WEAK_DEF(void,__sanitizer_cov_trace_pc_indir,void)238 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_indir, void) {}
239 }  // extern "C"
240 
241 #endif  // !SANITIZER_FUCHSIA
242