1 //===-- msan_linux.cc -----------------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of MemorySanitizer.
11 //
12 // Linux-, NetBSD- and FreeBSD-specific code.
13 //===----------------------------------------------------------------------===//
14
15 #include "sanitizer_common/sanitizer_platform.h"
16 #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
17
18 #include "msan.h"
19 #include "msan_report.h"
20 #include "msan_thread.h"
21
22 #include <elf.h>
23 #include <link.h>
24 #include <pthread.h>
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <signal.h>
28 #include <unistd.h>
29 #include <unwind.h>
30 #include <execinfo.h>
31 #include <sys/time.h>
32 #include <sys/resource.h>
33
34 #include "sanitizer_common/sanitizer_common.h"
35 #include "sanitizer_common/sanitizer_procmaps.h"
36
37 namespace __msan {
38
ReportMapRange(const char * descr,uptr beg,uptr size)39 void ReportMapRange(const char *descr, uptr beg, uptr size) {
40 if (size > 0) {
41 uptr end = beg + size - 1;
42 VPrintf(1, "%s : %p - %p\n", descr, beg, end);
43 }
44 }
45
CheckMemoryRangeAvailability(uptr beg,uptr size)46 static bool CheckMemoryRangeAvailability(uptr beg, uptr size) {
47 if (size > 0) {
48 uptr end = beg + size - 1;
49 if (!MemoryRangeIsAvailable(beg, end)) {
50 Printf("FATAL: Memory range %p - %p is not available.\n", beg, end);
51 return false;
52 }
53 }
54 return true;
55 }
56
ProtectMemoryRange(uptr beg,uptr size,const char * name)57 static bool ProtectMemoryRange(uptr beg, uptr size, const char *name) {
58 if (size > 0) {
59 void *addr = MmapFixedNoAccess(beg, size, name);
60 if (beg == 0 && addr) {
61 // Depending on the kernel configuration, we may not be able to protect
62 // the page at address zero.
63 uptr gap = 16 * GetPageSizeCached();
64 beg += gap;
65 size -= gap;
66 addr = MmapFixedNoAccess(beg, size, name);
67 }
68 if ((uptr)addr != beg) {
69 uptr end = beg + size - 1;
70 Printf("FATAL: Cannot protect memory range %p - %p (%s).\n", beg, end,
71 name);
72 return false;
73 }
74 }
75 return true;
76 }
77
CheckMemoryLayoutSanity()78 static void CheckMemoryLayoutSanity() {
79 uptr prev_end = 0;
80 for (unsigned i = 0; i < kMemoryLayoutSize; ++i) {
81 uptr start = kMemoryLayout[i].start;
82 uptr end = kMemoryLayout[i].end;
83 MappingDesc::Type type = kMemoryLayout[i].type;
84 CHECK_LT(start, end);
85 CHECK_EQ(prev_end, start);
86 CHECK(addr_is_type(start, type));
87 CHECK(addr_is_type((start + end) / 2, type));
88 CHECK(addr_is_type(end - 1, type));
89 if (type == MappingDesc::APP) {
90 uptr addr = start;
91 CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
92 CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
93 CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
94
95 addr = (start + end) / 2;
96 CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
97 CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
98 CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
99
100 addr = end - 1;
101 CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
102 CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
103 CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
104 }
105 prev_end = end;
106 }
107 }
108
InitShadow(bool init_origins)109 bool InitShadow(bool init_origins) {
110 // Let user know mapping parameters first.
111 VPrintf(1, "__msan_init %p\n", &__msan_init);
112 for (unsigned i = 0; i < kMemoryLayoutSize; ++i)
113 VPrintf(1, "%s: %zx - %zx\n", kMemoryLayout[i].name, kMemoryLayout[i].start,
114 kMemoryLayout[i].end - 1);
115
116 CheckMemoryLayoutSanity();
117
118 if (!MEM_IS_APP(&__msan_init)) {
119 Printf("FATAL: Code %p is out of application range. Non-PIE build?\n",
120 (uptr)&__msan_init);
121 return false;
122 }
123
124 const uptr maxVirtualAddress = GetMaxUserVirtualAddress();
125
126 for (unsigned i = 0; i < kMemoryLayoutSize; ++i) {
127 uptr start = kMemoryLayout[i].start;
128 uptr end = kMemoryLayout[i].end;
129 uptr size= end - start;
130 MappingDesc::Type type = kMemoryLayout[i].type;
131
132 // Check if the segment should be mapped based on platform constraints.
133 if (start >= maxVirtualAddress)
134 continue;
135
136 bool map = type == MappingDesc::SHADOW ||
137 (init_origins && type == MappingDesc::ORIGIN);
138 bool protect = type == MappingDesc::INVALID ||
139 (!init_origins && type == MappingDesc::ORIGIN);
140 CHECK(!(map && protect));
141 if (!map && !protect)
142 CHECK(type == MappingDesc::APP);
143 if (map) {
144 if (!CheckMemoryRangeAvailability(start, size))
145 return false;
146 if (!MmapFixedNoReserve(start, size, kMemoryLayout[i].name))
147 return false;
148 if (common_flags()->use_madv_dontdump)
149 DontDumpShadowMemory(start, size);
150 }
151 if (protect) {
152 if (!CheckMemoryRangeAvailability(start, size))
153 return false;
154 if (!ProtectMemoryRange(start, size, kMemoryLayout[i].name))
155 return false;
156 }
157 }
158
159 return true;
160 }
161
MsanAtExit(void)162 static void MsanAtExit(void) {
163 if (flags()->print_stats && (flags()->atexit || msan_report_count > 0))
164 ReportStats();
165 if (msan_report_count > 0) {
166 ReportAtExitStatistics();
167 if (common_flags()->exitcode)
168 internal__exit(common_flags()->exitcode);
169 }
170 }
171
InstallAtExitHandler()172 void InstallAtExitHandler() {
173 atexit(MsanAtExit);
174 }
175
176 // ---------------------- TSD ---------------- {{{1
177
178 #if SANITIZER_NETBSD || SANITIZER_FREEBSD
179 // Thread Static Data cannot be used in early init on NetBSD and FreeBSD.
180 // Reuse the MSan TSD API for compatibility with existing code
181 // with an alternative implementation.
182
183 static void (*tsd_destructor)(void *tsd) = nullptr;
184
185 struct tsd_key {
tsd_key__msan::tsd_key186 tsd_key() : key(nullptr) {}
~tsd_key__msan::tsd_key187 ~tsd_key() {
188 CHECK(tsd_destructor);
189 if (key)
190 (*tsd_destructor)(key);
191 }
192 MsanThread *key;
193 };
194
195 static thread_local struct tsd_key key;
196
MsanTSDInit(void (* destructor)(void * tsd))197 void MsanTSDInit(void (*destructor)(void *tsd)) {
198 CHECK(!tsd_destructor);
199 tsd_destructor = destructor;
200 }
201
GetCurrentThread()202 MsanThread *GetCurrentThread() {
203 CHECK(tsd_destructor);
204 return key.key;
205 }
206
SetCurrentThread(MsanThread * tsd)207 void SetCurrentThread(MsanThread *tsd) {
208 CHECK(tsd_destructor);
209 CHECK(tsd);
210 CHECK(!key.key);
211 key.key = tsd;
212 }
213
MsanTSDDtor(void * tsd)214 void MsanTSDDtor(void *tsd) {
215 CHECK(tsd_destructor);
216 CHECK_EQ(key.key, tsd);
217 key.key = nullptr;
218 // Make sure that signal handler can not see a stale current thread pointer.
219 atomic_signal_fence(memory_order_seq_cst);
220 MsanThread::TSDDtor(tsd);
221 }
222 #else
223 static pthread_key_t tsd_key;
224 static bool tsd_key_inited = false;
225
MsanTSDInit(void (* destructor)(void * tsd))226 void MsanTSDInit(void (*destructor)(void *tsd)) {
227 CHECK(!tsd_key_inited);
228 tsd_key_inited = true;
229 CHECK_EQ(0, pthread_key_create(&tsd_key, destructor));
230 }
231
232 static THREADLOCAL MsanThread* msan_current_thread;
233
GetCurrentThread()234 MsanThread *GetCurrentThread() {
235 return msan_current_thread;
236 }
237
SetCurrentThread(MsanThread * t)238 void SetCurrentThread(MsanThread *t) {
239 // Make sure we do not reset the current MsanThread.
240 CHECK_EQ(0, msan_current_thread);
241 msan_current_thread = t;
242 // Make sure that MsanTSDDtor gets called at the end.
243 CHECK(tsd_key_inited);
244 pthread_setspecific(tsd_key, (void *)t);
245 }
246
MsanTSDDtor(void * tsd)247 void MsanTSDDtor(void *tsd) {
248 MsanThread *t = (MsanThread*)tsd;
249 if (t->destructor_iterations_ > 1) {
250 t->destructor_iterations_--;
251 CHECK_EQ(0, pthread_setspecific(tsd_key, tsd));
252 return;
253 }
254 msan_current_thread = nullptr;
255 // Make sure that signal handler can not see a stale current thread pointer.
256 atomic_signal_fence(memory_order_seq_cst);
257 MsanThread::TSDDtor(tsd);
258 }
259 #endif
260
261 } // namespace __msan
262
263 #endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
264