1 //===-- tsan_mman.cc ------------------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //
12 //===----------------------------------------------------------------------===//
13 #include "sanitizer_common/sanitizer_allocator_checks.h"
14 #include "sanitizer_common/sanitizer_allocator_interface.h"
15 #include "sanitizer_common/sanitizer_allocator_report.h"
16 #include "sanitizer_common/sanitizer_common.h"
17 #include "sanitizer_common/sanitizer_errno.h"
18 #include "sanitizer_common/sanitizer_placement_new.h"
19 #include "tsan_mman.h"
20 #include "tsan_rtl.h"
21 #include "tsan_report.h"
22 #include "tsan_flags.h"
23
24 // May be overriden by front-end.
25 SANITIZER_WEAK_DEFAULT_IMPL
__sanitizer_malloc_hook(void * ptr,uptr size)26 void __sanitizer_malloc_hook(void *ptr, uptr size) {
27 (void)ptr;
28 (void)size;
29 }
30
31 SANITIZER_WEAK_DEFAULT_IMPL
__sanitizer_free_hook(void * ptr)32 void __sanitizer_free_hook(void *ptr) {
33 (void)ptr;
34 }
35
36 namespace __tsan {
37
38 struct MapUnmapCallback {
OnMap__tsan::MapUnmapCallback39 void OnMap(uptr p, uptr size) const { }
OnUnmap__tsan::MapUnmapCallback40 void OnUnmap(uptr p, uptr size) const {
41 // We are about to unmap a chunk of user memory.
42 // Mark the corresponding shadow memory as not needed.
43 DontNeedShadowFor(p, size);
44 // Mark the corresponding meta shadow memory as not needed.
45 // Note the block does not contain any meta info at this point
46 // (this happens after free).
47 const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
48 const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
49 // Block came from LargeMmapAllocator, so must be large.
50 // We rely on this in the calculations below.
51 CHECK_GE(size, 2 * kPageSize);
52 uptr diff = RoundUp(p, kPageSize) - p;
53 if (diff != 0) {
54 p += diff;
55 size -= diff;
56 }
57 diff = p + size - RoundDown(p + size, kPageSize);
58 if (diff != 0)
59 size -= diff;
60 uptr p_meta = (uptr)MemToMeta(p);
61 ReleaseMemoryPagesToOS(p_meta, p_meta + size / kMetaRatio);
62 }
63 };
64
65 static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64);
allocator()66 Allocator *allocator() {
67 return reinterpret_cast<Allocator*>(&allocator_placeholder);
68 }
69
70 struct GlobalProc {
71 Mutex mtx;
72 Processor *proc;
73
GlobalProc__tsan::GlobalProc74 GlobalProc()
75 : mtx(MutexTypeGlobalProc, StatMtxGlobalProc)
76 , proc(ProcCreate()) {
77 }
78 };
79
80 static char global_proc_placeholder[sizeof(GlobalProc)] ALIGNED(64);
global_proc()81 GlobalProc *global_proc() {
82 return reinterpret_cast<GlobalProc*>(&global_proc_placeholder);
83 }
84
ScopedGlobalProcessor()85 ScopedGlobalProcessor::ScopedGlobalProcessor() {
86 GlobalProc *gp = global_proc();
87 ThreadState *thr = cur_thread();
88 if (thr->proc())
89 return;
90 // If we don't have a proc, use the global one.
91 // There are currently only two known case where this path is triggered:
92 // __interceptor_free
93 // __nptl_deallocate_tsd
94 // start_thread
95 // clone
96 // and:
97 // ResetRange
98 // __interceptor_munmap
99 // __deallocate_stack
100 // start_thread
101 // clone
102 // Ideally, we destroy thread state (and unwire proc) when a thread actually
103 // exits (i.e. when we join/wait it). Then we would not need the global proc
104 gp->mtx.Lock();
105 ProcWire(gp->proc, thr);
106 }
107
~ScopedGlobalProcessor()108 ScopedGlobalProcessor::~ScopedGlobalProcessor() {
109 GlobalProc *gp = global_proc();
110 ThreadState *thr = cur_thread();
111 if (thr->proc() != gp->proc)
112 return;
113 ProcUnwire(gp->proc, thr);
114 gp->mtx.Unlock();
115 }
116
InitializeAllocator()117 void InitializeAllocator() {
118 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
119 allocator()->Init(common_flags()->allocator_release_to_os_interval_ms);
120 }
121
InitializeAllocatorLate()122 void InitializeAllocatorLate() {
123 new(global_proc()) GlobalProc();
124 }
125
AllocatorProcStart(Processor * proc)126 void AllocatorProcStart(Processor *proc) {
127 allocator()->InitCache(&proc->alloc_cache);
128 internal_allocator()->InitCache(&proc->internal_alloc_cache);
129 }
130
AllocatorProcFinish(Processor * proc)131 void AllocatorProcFinish(Processor *proc) {
132 allocator()->DestroyCache(&proc->alloc_cache);
133 internal_allocator()->DestroyCache(&proc->internal_alloc_cache);
134 }
135
AllocatorPrintStats()136 void AllocatorPrintStats() {
137 allocator()->PrintStats();
138 }
139
SignalUnsafeCall(ThreadState * thr,uptr pc)140 static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
141 if (atomic_load_relaxed(&thr->in_signal_handler) == 0 ||
142 !flags()->report_signal_unsafe)
143 return;
144 VarSizeStackTrace stack;
145 ObtainCurrentStack(thr, pc, &stack);
146 if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack))
147 return;
148 ThreadRegistryLock l(ctx->thread_registry);
149 ScopedReport rep(ReportTypeSignalUnsafe);
150 rep.AddStack(stack, true);
151 OutputReport(thr, rep);
152 }
153
154 static constexpr uptr kMaxAllowedMallocSize = 1ull << 40;
155
user_alloc_internal(ThreadState * thr,uptr pc,uptr sz,uptr align,bool signal)156 void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz, uptr align,
157 bool signal) {
158 if (sz >= kMaxAllowedMallocSize || align >= kMaxAllowedMallocSize) {
159 if (AllocatorMayReturnNull())
160 return nullptr;
161 GET_STACK_TRACE_FATAL(thr, pc);
162 ReportAllocationSizeTooBig(sz, kMaxAllowedMallocSize, &stack);
163 }
164 void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
165 if (UNLIKELY(!p)) {
166 SetAllocatorOutOfMemory();
167 if (AllocatorMayReturnNull())
168 return nullptr;
169 GET_STACK_TRACE_FATAL(thr, pc);
170 ReportOutOfMemory(sz, &stack);
171 }
172 if (ctx && ctx->initialized)
173 OnUserAlloc(thr, pc, (uptr)p, sz, true);
174 if (signal)
175 SignalUnsafeCall(thr, pc);
176 return p;
177 }
178
user_free(ThreadState * thr,uptr pc,void * p,bool signal)179 void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
180 ScopedGlobalProcessor sgp;
181 if (ctx && ctx->initialized)
182 OnUserFree(thr, pc, (uptr)p, true);
183 allocator()->Deallocate(&thr->proc()->alloc_cache, p);
184 if (signal)
185 SignalUnsafeCall(thr, pc);
186 }
187
user_alloc(ThreadState * thr,uptr pc,uptr sz)188 void *user_alloc(ThreadState *thr, uptr pc, uptr sz) {
189 return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, kDefaultAlignment));
190 }
191
user_calloc(ThreadState * thr,uptr pc,uptr size,uptr n)192 void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
193 if (UNLIKELY(CheckForCallocOverflow(size, n))) {
194 if (AllocatorMayReturnNull())
195 return SetErrnoOnNull(nullptr);
196 GET_STACK_TRACE_FATAL(thr, pc);
197 ReportCallocOverflow(n, size, &stack);
198 }
199 void *p = user_alloc_internal(thr, pc, n * size);
200 if (p)
201 internal_memset(p, 0, n * size);
202 return SetErrnoOnNull(p);
203 }
204
OnUserAlloc(ThreadState * thr,uptr pc,uptr p,uptr sz,bool write)205 void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
206 DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
207 ctx->metamap.AllocBlock(thr, pc, p, sz);
208 if (write && thr->ignore_reads_and_writes == 0)
209 MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
210 else
211 MemoryResetRange(thr, pc, (uptr)p, sz);
212 }
213
OnUserFree(ThreadState * thr,uptr pc,uptr p,bool write)214 void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
215 CHECK_NE(p, (void*)0);
216 uptr sz = ctx->metamap.FreeBlock(thr->proc(), p);
217 DPrintf("#%d: free(%p, %zu)\n", thr->tid, p, sz);
218 if (write && thr->ignore_reads_and_writes == 0)
219 MemoryRangeFreed(thr, pc, (uptr)p, sz);
220 }
221
user_realloc(ThreadState * thr,uptr pc,void * p,uptr sz)222 void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
223 // FIXME: Handle "shrinking" more efficiently,
224 // it seems that some software actually does this.
225 if (!p)
226 return SetErrnoOnNull(user_alloc_internal(thr, pc, sz));
227 if (!sz) {
228 user_free(thr, pc, p);
229 return nullptr;
230 }
231 void *new_p = user_alloc_internal(thr, pc, sz);
232 if (new_p) {
233 uptr old_sz = user_alloc_usable_size(p);
234 internal_memcpy(new_p, p, min(old_sz, sz));
235 user_free(thr, pc, p);
236 }
237 return SetErrnoOnNull(new_p);
238 }
239
user_memalign(ThreadState * thr,uptr pc,uptr align,uptr sz)240 void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz) {
241 if (UNLIKELY(!IsPowerOfTwo(align))) {
242 errno = errno_EINVAL;
243 if (AllocatorMayReturnNull())
244 return nullptr;
245 GET_STACK_TRACE_FATAL(thr, pc);
246 ReportInvalidAllocationAlignment(align, &stack);
247 }
248 return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
249 }
250
user_posix_memalign(ThreadState * thr,uptr pc,void ** memptr,uptr align,uptr sz)251 int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align,
252 uptr sz) {
253 if (UNLIKELY(!CheckPosixMemalignAlignment(align))) {
254 if (AllocatorMayReturnNull())
255 return errno_EINVAL;
256 GET_STACK_TRACE_FATAL(thr, pc);
257 ReportInvalidPosixMemalignAlignment(align, &stack);
258 }
259 void *ptr = user_alloc_internal(thr, pc, sz, align);
260 if (UNLIKELY(!ptr))
261 // OOM error is already taken care of by user_alloc_internal.
262 return errno_ENOMEM;
263 CHECK(IsAligned((uptr)ptr, align));
264 *memptr = ptr;
265 return 0;
266 }
267
user_aligned_alloc(ThreadState * thr,uptr pc,uptr align,uptr sz)268 void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz) {
269 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(align, sz))) {
270 errno = errno_EINVAL;
271 if (AllocatorMayReturnNull())
272 return nullptr;
273 GET_STACK_TRACE_FATAL(thr, pc);
274 ReportInvalidAlignedAllocAlignment(sz, align, &stack);
275 }
276 return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
277 }
278
user_valloc(ThreadState * thr,uptr pc,uptr sz)279 void *user_valloc(ThreadState *thr, uptr pc, uptr sz) {
280 return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, GetPageSizeCached()));
281 }
282
user_pvalloc(ThreadState * thr,uptr pc,uptr sz)283 void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz) {
284 uptr PageSize = GetPageSizeCached();
285 if (UNLIKELY(CheckForPvallocOverflow(sz, PageSize))) {
286 errno = errno_ENOMEM;
287 if (AllocatorMayReturnNull())
288 return nullptr;
289 GET_STACK_TRACE_FATAL(thr, pc);
290 ReportPvallocOverflow(sz, &stack);
291 }
292 // pvalloc(0) should allocate one page.
293 sz = sz ? RoundUpTo(sz, PageSize) : PageSize;
294 return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, PageSize));
295 }
296
user_alloc_usable_size(const void * p)297 uptr user_alloc_usable_size(const void *p) {
298 if (p == 0)
299 return 0;
300 MBlock *b = ctx->metamap.GetBlock((uptr)p);
301 if (!b)
302 return 0; // Not a valid pointer.
303 if (b->siz == 0)
304 return 1; // Zero-sized allocations are actually 1 byte.
305 return b->siz;
306 }
307
invoke_malloc_hook(void * ptr,uptr size)308 void invoke_malloc_hook(void *ptr, uptr size) {
309 ThreadState *thr = cur_thread();
310 if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
311 return;
312 __sanitizer_malloc_hook(ptr, size);
313 RunMallocHooks(ptr, size);
314 }
315
invoke_free_hook(void * ptr)316 void invoke_free_hook(void *ptr) {
317 ThreadState *thr = cur_thread();
318 if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
319 return;
320 __sanitizer_free_hook(ptr);
321 RunFreeHooks(ptr);
322 }
323
internal_alloc(MBlockType typ,uptr sz)324 void *internal_alloc(MBlockType typ, uptr sz) {
325 ThreadState *thr = cur_thread();
326 if (thr->nomalloc) {
327 thr->nomalloc = 0; // CHECK calls internal_malloc().
328 CHECK(0);
329 }
330 return InternalAlloc(sz, &thr->proc()->internal_alloc_cache);
331 }
332
internal_free(void * p)333 void internal_free(void *p) {
334 ThreadState *thr = cur_thread();
335 if (thr->nomalloc) {
336 thr->nomalloc = 0; // CHECK calls internal_malloc().
337 CHECK(0);
338 }
339 InternalFree(p, &thr->proc()->internal_alloc_cache);
340 }
341
342 } // namespace __tsan
343
344 using namespace __tsan;
345
346 extern "C" {
__sanitizer_get_current_allocated_bytes()347 uptr __sanitizer_get_current_allocated_bytes() {
348 uptr stats[AllocatorStatCount];
349 allocator()->GetStats(stats);
350 return stats[AllocatorStatAllocated];
351 }
352
__sanitizer_get_heap_size()353 uptr __sanitizer_get_heap_size() {
354 uptr stats[AllocatorStatCount];
355 allocator()->GetStats(stats);
356 return stats[AllocatorStatMapped];
357 }
358
__sanitizer_get_free_bytes()359 uptr __sanitizer_get_free_bytes() {
360 return 1;
361 }
362
__sanitizer_get_unmapped_bytes()363 uptr __sanitizer_get_unmapped_bytes() {
364 return 1;
365 }
366
__sanitizer_get_estimated_allocated_size(uptr size)367 uptr __sanitizer_get_estimated_allocated_size(uptr size) {
368 return size;
369 }
370
__sanitizer_get_ownership(const void * p)371 int __sanitizer_get_ownership(const void *p) {
372 return allocator()->GetBlockBegin(p) != 0;
373 }
374
__sanitizer_get_allocated_size(const void * p)375 uptr __sanitizer_get_allocated_size(const void *p) {
376 return user_alloc_usable_size(p);
377 }
378
__tsan_on_thread_idle()379 void __tsan_on_thread_idle() {
380 ThreadState *thr = cur_thread();
381 thr->clock.ResetCached(&thr->proc()->clock_cache);
382 thr->last_sleep_clock.ResetCached(&thr->proc()->clock_cache);
383 allocator()->SwallowCache(&thr->proc()->alloc_cache);
384 internal_allocator()->SwallowCache(&thr->proc()->internal_alloc_cache);
385 ctx->metamap.OnProcIdle(thr->proc());
386 }
387 } // extern "C"
388