1 //===-- sanitizer_coverage_fuchsia.cpp ------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Sanitizer Coverage Controller for Trace PC Guard, Fuchsia-specific version.
10 //
11 // This Fuchsia-specific implementation uses the same basic scheme and the
12 // same simple '.sancov' file format as the generic implementation. The
13 // difference is that we just produce a single blob of output for the whole
14 // program, not a separate one per DSO. We do not sort the PC table and do
15 // not prune the zeros, so the resulting file is always as large as it
16 // would be to report 100% coverage. Implicit tracing information about
17 // the address ranges of DSOs allows offline tools to split the one big
18 // blob into separate files that the 'sancov' tool can understand.
19 //
20 // Unlike the traditional implementation that uses an atexit hook to write
21 // out data files at the end, the results on Fuchsia do not go into a file
22 // per se. The 'coverage_dir' option is ignored. Instead, they are stored
23 // directly into a shared memory object (a Zircon VMO). At exit, that VMO
24 // is handed over to a system service that's responsible for getting the
25 // data out to somewhere that it can be fed into the sancov tool (where and
26 // how is not our problem).
27
28 #include "sanitizer_platform.h"
29 #if SANITIZER_FUCHSIA
30 #include <zircon/process.h>
31 #include <zircon/sanitizer.h>
32 #include <zircon/syscalls.h>
33
34 #include "sanitizer_atomic.h"
35 #include "sanitizer_common.h"
36 #include "sanitizer_interface_internal.h"
37 #include "sanitizer_internal_defs.h"
38 #include "sanitizer_symbolizer_fuchsia.h"
39
40 using namespace __sanitizer;
41
42 namespace __sancov {
43 namespace {
44
45 // TODO(mcgrathr): Move the constant into a header shared with other impls.
46 constexpr u64 Magic64 = 0xC0BFFFFFFFFFFF64ULL;
47 static_assert(SANITIZER_WORDSIZE == 64, "Fuchsia is always LP64");
48
49 constexpr const char kSancovSinkName[] = "sancov";
50
51 // Collects trace-pc guard coverage.
52 // This class relies on zero-initialization.
53 class TracePcGuardController final {
54 public:
TracePcGuardController()55 constexpr TracePcGuardController() {}
56
57 // For each PC location being tracked, there is a u32 reserved in global
58 // data called the "guard". At startup, we assign each guard slot a
59 // unique index into the big results array. Later during runtime, the
60 // first call to TracePcGuard (below) will store the corresponding PC at
61 // that index in the array. (Each later call with the same guard slot is
62 // presumed to be from the same PC.) Then it clears the guard slot back
63 // to zero, which tells the compiler not to bother calling in again. At
64 // the end of the run, we have a big array where each element is either
65 // zero or is a tracked PC location that was hit in the trace.
66
67 // This is called from global constructors. Each translation unit has a
68 // contiguous array of guard slots, and a constructor that calls here
69 // with the bounds of its array. Those constructors are allowed to call
70 // here more than once for the same array. Usually all of these
71 // constructors run in the initial thread, but it's possible that a
72 // dlopen call on a secondary thread will run constructors that get here.
InitTracePcGuard(u32 * start,u32 * end)73 void InitTracePcGuard(u32 *start, u32 *end) {
74 if (end > start && *start == 0 && common_flags()->coverage) {
75 // Complete the setup before filling in any guards with indices.
76 // This avoids the possibility of code called from Setup reentering
77 // TracePcGuard.
78 u32 idx = Setup(end - start);
79 for (u32 *p = start; p < end; ++p) {
80 *p = idx++;
81 }
82 }
83 }
84
TracePcGuard(u32 * guard,uptr pc)85 void TracePcGuard(u32 *guard, uptr pc) {
86 atomic_uint32_t *guard_ptr = reinterpret_cast<atomic_uint32_t *>(guard);
87 u32 idx = atomic_exchange(guard_ptr, 0, memory_order_relaxed);
88 if (idx > 0)
89 array_[idx] = pc;
90 }
91
Dump()92 void Dump() {
93 Lock locked(&setup_lock_);
94 if (array_) {
95 CHECK_NE(vmo_, ZX_HANDLE_INVALID);
96
97 // Publish the VMO to the system, where it can be collected and
98 // analyzed after this process exits. This always consumes the VMO
99 // handle. Any failure is just logged and not indicated to us.
100 __sanitizer_publish_data(kSancovSinkName, vmo_);
101 vmo_ = ZX_HANDLE_INVALID;
102
103 // This will route to __sanitizer_log_write, which will ensure that
104 // information about shared libraries is written out. This message
105 // uses the `dumpfile` symbolizer markup element to highlight the
106 // dump. See the explanation for this in:
107 // https://fuchsia.googlesource.com/zircon/+/master/docs/symbolizer_markup.md
108 Printf("SanitizerCoverage: " FORMAT_DUMPFILE " with up to %u PCs\n",
109 kSancovSinkName, vmo_name_, next_index_ - 1);
110 }
111 }
112
113 private:
114 // We map in the largest possible view into the VMO: one word
115 // for every possible 32-bit index value. This avoids the need
116 // to change the mapping when increasing the size of the VMO.
117 // We can always spare the 32G of address space.
118 static constexpr size_t MappingSize = sizeof(uptr) << 32;
119
120 Mutex setup_lock_;
121 uptr *array_ = nullptr;
122 u32 next_index_ = 0;
123 zx_handle_t vmo_ = {};
124 char vmo_name_[ZX_MAX_NAME_LEN] = {};
125
DataSize() const126 size_t DataSize() const { return next_index_ * sizeof(uintptr_t); }
127
Setup(u32 num_guards)128 u32 Setup(u32 num_guards) {
129 Lock locked(&setup_lock_);
130 DCHECK(common_flags()->coverage);
131
132 if (next_index_ == 0) {
133 CHECK_EQ(vmo_, ZX_HANDLE_INVALID);
134 CHECK_EQ(array_, nullptr);
135
136 // The first sample goes at [1] to reserve [0] for the magic number.
137 next_index_ = 1 + num_guards;
138
139 zx_status_t status = _zx_vmo_create(DataSize(), ZX_VMO_RESIZABLE, &vmo_);
140 CHECK_EQ(status, ZX_OK);
141
142 // Give the VMO a name including our process KOID so it's easy to spot.
143 internal_snprintf(vmo_name_, sizeof(vmo_name_), "%s.%zu", kSancovSinkName,
144 internal_getpid());
145 _zx_object_set_property(vmo_, ZX_PROP_NAME, vmo_name_,
146 internal_strlen(vmo_name_));
147 uint64_t size = DataSize();
148 status = _zx_object_set_property(vmo_, ZX_PROP_VMO_CONTENT_SIZE, &size,
149 sizeof(size));
150 CHECK_EQ(status, ZX_OK);
151
152 // Map the largest possible view we might need into the VMO. Later
153 // we might need to increase the VMO's size before we can use larger
154 // indices, but we'll never move the mapping address so we don't have
155 // any multi-thread synchronization issues with that.
156 uintptr_t mapping;
157 status =
158 _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
159 0, vmo_, 0, MappingSize, &mapping);
160 CHECK_EQ(status, ZX_OK);
161
162 // Hereafter other threads are free to start storing into
163 // elements [1, next_index_) of the big array.
164 array_ = reinterpret_cast<uptr *>(mapping);
165
166 // Store the magic number.
167 // Hereafter, the VMO serves as the contents of the '.sancov' file.
168 array_[0] = Magic64;
169
170 return 1;
171 } else {
172 // The VMO is already mapped in, but it's not big enough to use the
173 // new indices. So increase the size to cover the new maximum index.
174
175 CHECK_NE(vmo_, ZX_HANDLE_INVALID);
176 CHECK_NE(array_, nullptr);
177
178 uint32_t first_index = next_index_;
179 next_index_ += num_guards;
180
181 zx_status_t status = _zx_vmo_set_size(vmo_, DataSize());
182 CHECK_EQ(status, ZX_OK);
183 uint64_t size = DataSize();
184 status = _zx_object_set_property(vmo_, ZX_PROP_VMO_CONTENT_SIZE, &size,
185 sizeof(size));
186 CHECK_EQ(status, ZX_OK);
187
188 return first_index;
189 }
190 }
191 };
192
193 static TracePcGuardController pc_guard_controller;
194
195 } // namespace
196 } // namespace __sancov
197
198 namespace __sanitizer {
InitializeCoverage(bool enabled,const char * dir)199 void InitializeCoverage(bool enabled, const char *dir) {
200 CHECK_EQ(enabled, common_flags()->coverage);
201 CHECK_EQ(dir, common_flags()->coverage_dir);
202
203 static bool coverage_enabled = false;
204 if (!coverage_enabled) {
205 coverage_enabled = enabled;
206 Atexit(__sanitizer_cov_dump);
207 AddDieCallback(__sanitizer_cov_dump);
208 }
209 }
210 } // namespace __sanitizer
211
212 extern "C" {
__sanitizer_dump_coverage(const uptr * pcs,uptr len)213 SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_coverage(const uptr *pcs,
214 uptr len) {
215 UNIMPLEMENTED();
216 }
217
SANITIZER_INTERFACE_WEAK_DEF(void,__sanitizer_cov_trace_pc_guard,u32 * guard)218 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard, u32 *guard) {
219 if (!*guard)
220 return;
221 __sancov::pc_guard_controller.TracePcGuard(guard, GET_CALLER_PC() - 1);
222 }
223
SANITIZER_INTERFACE_WEAK_DEF(void,__sanitizer_cov_trace_pc_guard_init,u32 * start,u32 * end)224 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard_init,
225 u32 *start, u32 *end) {
226 if (start == end || *start)
227 return;
228 __sancov::pc_guard_controller.InitTracePcGuard(start, end);
229 }
230
__sanitizer_dump_trace_pc_guard_coverage()231 SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_trace_pc_guard_coverage() {
232 __sancov::pc_guard_controller.Dump();
233 }
__sanitizer_cov_dump()234 SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() {
235 __sanitizer_dump_trace_pc_guard_coverage();
236 }
237 // Default empty implementations (weak). Users should redefine them.
SANITIZER_INTERFACE_WEAK_DEF(void,__sanitizer_cov_trace_cmp,void)238 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp, void) {}
SANITIZER_INTERFACE_WEAK_DEF(void,__sanitizer_cov_trace_cmp1,void)239 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp1, void) {}
SANITIZER_INTERFACE_WEAK_DEF(void,__sanitizer_cov_trace_cmp2,void)240 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp2, void) {}
SANITIZER_INTERFACE_WEAK_DEF(void,__sanitizer_cov_trace_cmp4,void)241 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp4, void) {}
SANITIZER_INTERFACE_WEAK_DEF(void,__sanitizer_cov_trace_cmp8,void)242 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp8, void) {}
SANITIZER_INTERFACE_WEAK_DEF(void,__sanitizer_cov_trace_const_cmp1,void)243 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_const_cmp1, void) {}
SANITIZER_INTERFACE_WEAK_DEF(void,__sanitizer_cov_trace_const_cmp2,void)244 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_const_cmp2, void) {}
SANITIZER_INTERFACE_WEAK_DEF(void,__sanitizer_cov_trace_const_cmp4,void)245 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_const_cmp4, void) {}
SANITIZER_INTERFACE_WEAK_DEF(void,__sanitizer_cov_trace_const_cmp8,void)246 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_const_cmp8, void) {}
SANITIZER_INTERFACE_WEAK_DEF(void,__sanitizer_cov_trace_switch,void)247 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_switch, void) {}
SANITIZER_INTERFACE_WEAK_DEF(void,__sanitizer_cov_trace_div4,void)248 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_div4, void) {}
SANITIZER_INTERFACE_WEAK_DEF(void,__sanitizer_cov_trace_div8,void)249 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_div8, void) {}
SANITIZER_INTERFACE_WEAK_DEF(void,__sanitizer_cov_trace_gep,void)250 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_gep, void) {}
SANITIZER_INTERFACE_WEAK_DEF(void,__sanitizer_cov_trace_pc_indir,void)251 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_indir, void) {}
252 } // extern "C"
253
254 #endif // !SANITIZER_FUCHSIA
255