1 //===-- tsan_mman.cc ------------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 //===----------------------------------------------------------------------===//
11 #include "sanitizer_common/sanitizer_allocator_checks.h"
12 #include "sanitizer_common/sanitizer_allocator_interface.h"
13 #include "sanitizer_common/sanitizer_allocator_report.h"
14 #include "sanitizer_common/sanitizer_common.h"
15 #include "sanitizer_common/sanitizer_errno.h"
16 #include "sanitizer_common/sanitizer_placement_new.h"
17 #include "tsan_mman.h"
18 #include "tsan_rtl.h"
19 #include "tsan_report.h"
20 #include "tsan_flags.h"
21
22 // May be overriden by front-end.
23 SANITIZER_WEAK_DEFAULT_IMPL
__sanitizer_malloc_hook(void * ptr,uptr size)24 void __sanitizer_malloc_hook(void *ptr, uptr size) {
25 (void)ptr;
26 (void)size;
27 }
28
29 SANITIZER_WEAK_DEFAULT_IMPL
__sanitizer_free_hook(void * ptr)30 void __sanitizer_free_hook(void *ptr) {
31 (void)ptr;
32 }
33
34 namespace __tsan {
35
36 struct MapUnmapCallback {
OnMap__tsan::MapUnmapCallback37 void OnMap(uptr p, uptr size) const { }
OnUnmap__tsan::MapUnmapCallback38 void OnUnmap(uptr p, uptr size) const {
39 // We are about to unmap a chunk of user memory.
40 // Mark the corresponding shadow memory as not needed.
41 DontNeedShadowFor(p, size);
42 // Mark the corresponding meta shadow memory as not needed.
43 // Note the block does not contain any meta info at this point
44 // (this happens after free).
45 const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
46 const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
47 // Block came from LargeMmapAllocator, so must be large.
48 // We rely on this in the calculations below.
49 CHECK_GE(size, 2 * kPageSize);
50 uptr diff = RoundUp(p, kPageSize) - p;
51 if (diff != 0) {
52 p += diff;
53 size -= diff;
54 }
55 diff = p + size - RoundDown(p + size, kPageSize);
56 if (diff != 0)
57 size -= diff;
58 uptr p_meta = (uptr)MemToMeta(p);
59 ReleaseMemoryPagesToOS(p_meta, p_meta + size / kMetaRatio);
60 }
61 };
62
63 static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64);
allocator()64 Allocator *allocator() {
65 return reinterpret_cast<Allocator*>(&allocator_placeholder);
66 }
67
68 struct GlobalProc {
69 Mutex mtx;
70 Processor *proc;
71
GlobalProc__tsan::GlobalProc72 GlobalProc()
73 : mtx(MutexTypeGlobalProc, StatMtxGlobalProc)
74 , proc(ProcCreate()) {
75 }
76 };
77
78 static char global_proc_placeholder[sizeof(GlobalProc)] ALIGNED(64);
global_proc()79 GlobalProc *global_proc() {
80 return reinterpret_cast<GlobalProc*>(&global_proc_placeholder);
81 }
82
ScopedGlobalProcessor()83 ScopedGlobalProcessor::ScopedGlobalProcessor() {
84 GlobalProc *gp = global_proc();
85 ThreadState *thr = cur_thread();
86 if (thr->proc())
87 return;
88 // If we don't have a proc, use the global one.
89 // There are currently only two known case where this path is triggered:
90 // __interceptor_free
91 // __nptl_deallocate_tsd
92 // start_thread
93 // clone
94 // and:
95 // ResetRange
96 // __interceptor_munmap
97 // __deallocate_stack
98 // start_thread
99 // clone
100 // Ideally, we destroy thread state (and unwire proc) when a thread actually
101 // exits (i.e. when we join/wait it). Then we would not need the global proc
102 gp->mtx.Lock();
103 ProcWire(gp->proc, thr);
104 }
105
~ScopedGlobalProcessor()106 ScopedGlobalProcessor::~ScopedGlobalProcessor() {
107 GlobalProc *gp = global_proc();
108 ThreadState *thr = cur_thread();
109 if (thr->proc() != gp->proc)
110 return;
111 ProcUnwire(gp->proc, thr);
112 gp->mtx.Unlock();
113 }
114
InitializeAllocator()115 void InitializeAllocator() {
116 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
117 allocator()->Init(common_flags()->allocator_release_to_os_interval_ms);
118 }
119
InitializeAllocatorLate()120 void InitializeAllocatorLate() {
121 new(global_proc()) GlobalProc();
122 }
123
AllocatorProcStart(Processor * proc)124 void AllocatorProcStart(Processor *proc) {
125 allocator()->InitCache(&proc->alloc_cache);
126 internal_allocator()->InitCache(&proc->internal_alloc_cache);
127 }
128
AllocatorProcFinish(Processor * proc)129 void AllocatorProcFinish(Processor *proc) {
130 allocator()->DestroyCache(&proc->alloc_cache);
131 internal_allocator()->DestroyCache(&proc->internal_alloc_cache);
132 }
133
AllocatorPrintStats()134 void AllocatorPrintStats() {
135 allocator()->PrintStats();
136 }
137
SignalUnsafeCall(ThreadState * thr,uptr pc)138 static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
139 if (atomic_load_relaxed(&thr->in_signal_handler) == 0 ||
140 !flags()->report_signal_unsafe)
141 return;
142 VarSizeStackTrace stack;
143 ObtainCurrentStack(thr, pc, &stack);
144 if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack))
145 return;
146 ThreadRegistryLock l(ctx->thread_registry);
147 ScopedReport rep(ReportTypeSignalUnsafe);
148 rep.AddStack(stack, true);
149 OutputReport(thr, rep);
150 }
151
152 static constexpr uptr kMaxAllowedMallocSize = 1ull << 40;
153
user_alloc_internal(ThreadState * thr,uptr pc,uptr sz,uptr align,bool signal)154 void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz, uptr align,
155 bool signal) {
156 if (sz >= kMaxAllowedMallocSize || align >= kMaxAllowedMallocSize) {
157 if (AllocatorMayReturnNull())
158 return nullptr;
159 GET_STACK_TRACE_FATAL(thr, pc);
160 ReportAllocationSizeTooBig(sz, kMaxAllowedMallocSize, &stack);
161 }
162 void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
163 if (UNLIKELY(!p)) {
164 SetAllocatorOutOfMemory();
165 if (AllocatorMayReturnNull())
166 return nullptr;
167 GET_STACK_TRACE_FATAL(thr, pc);
168 ReportOutOfMemory(sz, &stack);
169 }
170 if (ctx && ctx->initialized)
171 OnUserAlloc(thr, pc, (uptr)p, sz, true);
172 if (signal)
173 SignalUnsafeCall(thr, pc);
174 return p;
175 }
176
user_free(ThreadState * thr,uptr pc,void * p,bool signal)177 void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
178 ScopedGlobalProcessor sgp;
179 if (ctx && ctx->initialized)
180 OnUserFree(thr, pc, (uptr)p, true);
181 allocator()->Deallocate(&thr->proc()->alloc_cache, p);
182 if (signal)
183 SignalUnsafeCall(thr, pc);
184 }
185
user_alloc(ThreadState * thr,uptr pc,uptr sz)186 void *user_alloc(ThreadState *thr, uptr pc, uptr sz) {
187 return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, kDefaultAlignment));
188 }
189
user_calloc(ThreadState * thr,uptr pc,uptr size,uptr n)190 void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
191 if (UNLIKELY(CheckForCallocOverflow(size, n))) {
192 if (AllocatorMayReturnNull())
193 return SetErrnoOnNull(nullptr);
194 GET_STACK_TRACE_FATAL(thr, pc);
195 ReportCallocOverflow(n, size, &stack);
196 }
197 void *p = user_alloc_internal(thr, pc, n * size);
198 if (p)
199 internal_memset(p, 0, n * size);
200 return SetErrnoOnNull(p);
201 }
202
OnUserAlloc(ThreadState * thr,uptr pc,uptr p,uptr sz,bool write)203 void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
204 DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
205 ctx->metamap.AllocBlock(thr, pc, p, sz);
206 if (write && thr->ignore_reads_and_writes == 0)
207 MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
208 else
209 MemoryResetRange(thr, pc, (uptr)p, sz);
210 }
211
OnUserFree(ThreadState * thr,uptr pc,uptr p,bool write)212 void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
213 CHECK_NE(p, (void*)0);
214 uptr sz = ctx->metamap.FreeBlock(thr->proc(), p);
215 DPrintf("#%d: free(%p, %zu)\n", thr->tid, p, sz);
216 if (write && thr->ignore_reads_and_writes == 0)
217 MemoryRangeFreed(thr, pc, (uptr)p, sz);
218 }
219
user_realloc(ThreadState * thr,uptr pc,void * p,uptr sz)220 void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
221 // FIXME: Handle "shrinking" more efficiently,
222 // it seems that some software actually does this.
223 if (!p)
224 return SetErrnoOnNull(user_alloc_internal(thr, pc, sz));
225 if (!sz) {
226 user_free(thr, pc, p);
227 return nullptr;
228 }
229 void *new_p = user_alloc_internal(thr, pc, sz);
230 if (new_p) {
231 uptr old_sz = user_alloc_usable_size(p);
232 internal_memcpy(new_p, p, min(old_sz, sz));
233 user_free(thr, pc, p);
234 }
235 return SetErrnoOnNull(new_p);
236 }
237
user_memalign(ThreadState * thr,uptr pc,uptr align,uptr sz)238 void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz) {
239 if (UNLIKELY(!IsPowerOfTwo(align))) {
240 errno = errno_EINVAL;
241 if (AllocatorMayReturnNull())
242 return nullptr;
243 GET_STACK_TRACE_FATAL(thr, pc);
244 ReportInvalidAllocationAlignment(align, &stack);
245 }
246 return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
247 }
248
user_posix_memalign(ThreadState * thr,uptr pc,void ** memptr,uptr align,uptr sz)249 int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align,
250 uptr sz) {
251 if (UNLIKELY(!CheckPosixMemalignAlignment(align))) {
252 if (AllocatorMayReturnNull())
253 return errno_EINVAL;
254 GET_STACK_TRACE_FATAL(thr, pc);
255 ReportInvalidPosixMemalignAlignment(align, &stack);
256 }
257 void *ptr = user_alloc_internal(thr, pc, sz, align);
258 if (UNLIKELY(!ptr))
259 // OOM error is already taken care of by user_alloc_internal.
260 return errno_ENOMEM;
261 CHECK(IsAligned((uptr)ptr, align));
262 *memptr = ptr;
263 return 0;
264 }
265
user_aligned_alloc(ThreadState * thr,uptr pc,uptr align,uptr sz)266 void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz) {
267 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(align, sz))) {
268 errno = errno_EINVAL;
269 if (AllocatorMayReturnNull())
270 return nullptr;
271 GET_STACK_TRACE_FATAL(thr, pc);
272 ReportInvalidAlignedAllocAlignment(sz, align, &stack);
273 }
274 return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
275 }
276
user_valloc(ThreadState * thr,uptr pc,uptr sz)277 void *user_valloc(ThreadState *thr, uptr pc, uptr sz) {
278 return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, GetPageSizeCached()));
279 }
280
user_pvalloc(ThreadState * thr,uptr pc,uptr sz)281 void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz) {
282 uptr PageSize = GetPageSizeCached();
283 if (UNLIKELY(CheckForPvallocOverflow(sz, PageSize))) {
284 errno = errno_ENOMEM;
285 if (AllocatorMayReturnNull())
286 return nullptr;
287 GET_STACK_TRACE_FATAL(thr, pc);
288 ReportPvallocOverflow(sz, &stack);
289 }
290 // pvalloc(0) should allocate one page.
291 sz = sz ? RoundUpTo(sz, PageSize) : PageSize;
292 return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, PageSize));
293 }
294
user_alloc_usable_size(const void * p)295 uptr user_alloc_usable_size(const void *p) {
296 if (p == 0)
297 return 0;
298 MBlock *b = ctx->metamap.GetBlock((uptr)p);
299 if (!b)
300 return 0; // Not a valid pointer.
301 if (b->siz == 0)
302 return 1; // Zero-sized allocations are actually 1 byte.
303 return b->siz;
304 }
305
invoke_malloc_hook(void * ptr,uptr size)306 void invoke_malloc_hook(void *ptr, uptr size) {
307 ThreadState *thr = cur_thread();
308 if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
309 return;
310 __sanitizer_malloc_hook(ptr, size);
311 RunMallocHooks(ptr, size);
312 }
313
invoke_free_hook(void * ptr)314 void invoke_free_hook(void *ptr) {
315 ThreadState *thr = cur_thread();
316 if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
317 return;
318 __sanitizer_free_hook(ptr);
319 RunFreeHooks(ptr);
320 }
321
internal_alloc(MBlockType typ,uptr sz)322 void *internal_alloc(MBlockType typ, uptr sz) {
323 ThreadState *thr = cur_thread();
324 if (thr->nomalloc) {
325 thr->nomalloc = 0; // CHECK calls internal_malloc().
326 CHECK(0);
327 }
328 return InternalAlloc(sz, &thr->proc()->internal_alloc_cache);
329 }
330
internal_free(void * p)331 void internal_free(void *p) {
332 ThreadState *thr = cur_thread();
333 if (thr->nomalloc) {
334 thr->nomalloc = 0; // CHECK calls internal_malloc().
335 CHECK(0);
336 }
337 InternalFree(p, &thr->proc()->internal_alloc_cache);
338 }
339
340 } // namespace __tsan
341
342 using namespace __tsan;
343
344 extern "C" {
__sanitizer_get_current_allocated_bytes()345 uptr __sanitizer_get_current_allocated_bytes() {
346 uptr stats[AllocatorStatCount];
347 allocator()->GetStats(stats);
348 return stats[AllocatorStatAllocated];
349 }
350
__sanitizer_get_heap_size()351 uptr __sanitizer_get_heap_size() {
352 uptr stats[AllocatorStatCount];
353 allocator()->GetStats(stats);
354 return stats[AllocatorStatMapped];
355 }
356
__sanitizer_get_free_bytes()357 uptr __sanitizer_get_free_bytes() {
358 return 1;
359 }
360
__sanitizer_get_unmapped_bytes()361 uptr __sanitizer_get_unmapped_bytes() {
362 return 1;
363 }
364
__sanitizer_get_estimated_allocated_size(uptr size)365 uptr __sanitizer_get_estimated_allocated_size(uptr size) {
366 return size;
367 }
368
__sanitizer_get_ownership(const void * p)369 int __sanitizer_get_ownership(const void *p) {
370 return allocator()->GetBlockBegin(p) != 0;
371 }
372
__sanitizer_get_allocated_size(const void * p)373 uptr __sanitizer_get_allocated_size(const void *p) {
374 return user_alloc_usable_size(p);
375 }
376
__tsan_on_thread_idle()377 void __tsan_on_thread_idle() {
378 ThreadState *thr = cur_thread();
379 thr->clock.ResetCached(&thr->proc()->clock_cache);
380 thr->last_sleep_clock.ResetCached(&thr->proc()->clock_cache);
381 allocator()->SwallowCache(&thr->proc()->alloc_cache);
382 internal_allocator()->SwallowCache(&thr->proc()->internal_alloc_cache);
383 ctx->metamap.OnProcIdle(thr->proc());
384 }
385 } // extern "C"
386