1 //===-- tsan_mman.cpp -----------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 //===----------------------------------------------------------------------===//
12 #include "sanitizer_common/sanitizer_allocator_checks.h"
13 #include "sanitizer_common/sanitizer_allocator_interface.h"
14 #include "sanitizer_common/sanitizer_allocator_report.h"
15 #include "sanitizer_common/sanitizer_common.h"
16 #include "sanitizer_common/sanitizer_errno.h"
17 #include "sanitizer_common/sanitizer_placement_new.h"
18 #include "tsan_mman.h"
19 #include "tsan_rtl.h"
20 #include "tsan_report.h"
21 #include "tsan_flags.h"
22 
23 namespace __tsan {
24 
25 struct MapUnmapCallback {
26   void OnMap(uptr p, uptr size) const { }
27   void OnUnmap(uptr p, uptr size) const {
28     // We are about to unmap a chunk of user memory.
29     // Mark the corresponding shadow memory as not needed.
30     DontNeedShadowFor(p, size);
31     // Mark the corresponding meta shadow memory as not needed.
32     // Note the block does not contain any meta info at this point
33     // (this happens after free).
34     const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
35     const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
36     // Block came from LargeMmapAllocator, so must be large.
37     // We rely on this in the calculations below.
38     CHECK_GE(size, 2 * kPageSize);
39     uptr diff = RoundUp(p, kPageSize) - p;
40     if (diff != 0) {
41       p += diff;
42       size -= diff;
43     }
44     diff = p + size - RoundDown(p + size, kPageSize);
45     if (diff != 0)
46       size -= diff;
47     uptr p_meta = (uptr)MemToMeta(p);
48     ReleaseMemoryPagesToOS(p_meta, p_meta + size / kMetaRatio);
49   }
50 };
51 
52 static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64);
53 Allocator *allocator() {
54   return reinterpret_cast<Allocator*>(&allocator_placeholder);
55 }
56 
57 struct GlobalProc {
58   Mutex mtx;
59   Processor *proc;
60   // This mutex represents the internal allocator combined for
61   // the purposes of deadlock detection. The internal allocator
62   // uses multiple mutexes, moreover they are locked only occasionally
63   // and they are spin mutexes which don't support deadlock detection.
64   // So we use this fake mutex to serve as a substitute for these mutexes.
65   CheckedMutex internal_alloc_mtx;
66 
67   GlobalProc()
68       : mtx(MutexTypeGlobalProc),
69         proc(ProcCreate()),
70         internal_alloc_mtx(MutexTypeInternalAlloc) {}
71 };
72 
73 static char global_proc_placeholder[sizeof(GlobalProc)] ALIGNED(64);
74 GlobalProc *global_proc() {
75   return reinterpret_cast<GlobalProc*>(&global_proc_placeholder);
76 }
77 
78 static void InternalAllocAccess() {
79   global_proc()->internal_alloc_mtx.Lock();
80   global_proc()->internal_alloc_mtx.Unlock();
81 }
82 
83 ScopedGlobalProcessor::ScopedGlobalProcessor() {
84   GlobalProc *gp = global_proc();
85   ThreadState *thr = cur_thread();
86   if (thr->proc())
87     return;
88   // If we don't have a proc, use the global one.
89   // There are currently only two known case where this path is triggered:
90   //   __interceptor_free
91   //   __nptl_deallocate_tsd
92   //   start_thread
93   //   clone
94   // and:
95   //   ResetRange
96   //   __interceptor_munmap
97   //   __deallocate_stack
98   //   start_thread
99   //   clone
100   // Ideally, we destroy thread state (and unwire proc) when a thread actually
101   // exits (i.e. when we join/wait it). Then we would not need the global proc
102   gp->mtx.Lock();
103   ProcWire(gp->proc, thr);
104 }
105 
106 ScopedGlobalProcessor::~ScopedGlobalProcessor() {
107   GlobalProc *gp = global_proc();
108   ThreadState *thr = cur_thread();
109   if (thr->proc() != gp->proc)
110     return;
111   ProcUnwire(gp->proc, thr);
112   gp->mtx.Unlock();
113 }
114 
115 void AllocatorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
116   global_proc()->internal_alloc_mtx.Lock();
117   InternalAllocatorLock();
118 }
119 
120 void AllocatorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
121   InternalAllocatorUnlock();
122   global_proc()->internal_alloc_mtx.Unlock();
123 }
124 
125 void GlobalProcessorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
126   global_proc()->mtx.Lock();
127 }
128 
129 void GlobalProcessorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
130   global_proc()->mtx.Unlock();
131 }
132 
133 static constexpr uptr kMaxAllowedMallocSize = 1ull << 40;
134 static uptr max_user_defined_malloc_size;
135 
136 void InitializeAllocator() {
137   SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
138   allocator()->Init(common_flags()->allocator_release_to_os_interval_ms);
139   max_user_defined_malloc_size = common_flags()->max_allocation_size_mb
140                                      ? common_flags()->max_allocation_size_mb
141                                            << 20
142                                      : kMaxAllowedMallocSize;
143 }
144 
145 void InitializeAllocatorLate() {
146   new(global_proc()) GlobalProc();
147 }
148 
149 void AllocatorProcStart(Processor *proc) {
150   allocator()->InitCache(&proc->alloc_cache);
151   internal_allocator()->InitCache(&proc->internal_alloc_cache);
152 }
153 
154 void AllocatorProcFinish(Processor *proc) {
155   allocator()->DestroyCache(&proc->alloc_cache);
156   internal_allocator()->DestroyCache(&proc->internal_alloc_cache);
157 }
158 
159 void AllocatorPrintStats() {
160   allocator()->PrintStats();
161 }
162 
163 static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
164   if (atomic_load_relaxed(&thr->in_signal_handler) == 0 ||
165       !ShouldReport(thr, ReportTypeSignalUnsafe))
166     return;
167   VarSizeStackTrace stack;
168   ObtainCurrentStack(thr, pc, &stack);
169   if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack))
170     return;
171   ThreadRegistryLock l(&ctx->thread_registry);
172   ScopedReport rep(ReportTypeSignalUnsafe);
173   rep.AddStack(stack, true);
174   OutputReport(thr, rep);
175 }
176 
177 
178 void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz, uptr align,
179                           bool signal) {
180   if (sz >= kMaxAllowedMallocSize || align >= kMaxAllowedMallocSize ||
181       sz > max_user_defined_malloc_size) {
182     if (AllocatorMayReturnNull())
183       return nullptr;
184     uptr malloc_limit =
185         Min(kMaxAllowedMallocSize, max_user_defined_malloc_size);
186     GET_STACK_TRACE_FATAL(thr, pc);
187     ReportAllocationSizeTooBig(sz, malloc_limit, &stack);
188   }
189   if (UNLIKELY(IsRssLimitExceeded())) {
190     if (AllocatorMayReturnNull())
191       return nullptr;
192     GET_STACK_TRACE_FATAL(thr, pc);
193     ReportRssLimitExceeded(&stack);
194   }
195   void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
196   if (UNLIKELY(!p)) {
197     SetAllocatorOutOfMemory();
198     if (AllocatorMayReturnNull())
199       return nullptr;
200     GET_STACK_TRACE_FATAL(thr, pc);
201     ReportOutOfMemory(sz, &stack);
202   }
203   if (ctx && ctx->initialized)
204     OnUserAlloc(thr, pc, (uptr)p, sz, true);
205   if (signal)
206     SignalUnsafeCall(thr, pc);
207   return p;
208 }
209 
210 void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
211   ScopedGlobalProcessor sgp;
212   if (ctx && ctx->initialized)
213     OnUserFree(thr, pc, (uptr)p, true);
214   allocator()->Deallocate(&thr->proc()->alloc_cache, p);
215   if (signal)
216     SignalUnsafeCall(thr, pc);
217 }
218 
219 void *user_alloc(ThreadState *thr, uptr pc, uptr sz) {
220   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, kDefaultAlignment));
221 }
222 
223 void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
224   if (UNLIKELY(CheckForCallocOverflow(size, n))) {
225     if (AllocatorMayReturnNull())
226       return SetErrnoOnNull(nullptr);
227     GET_STACK_TRACE_FATAL(thr, pc);
228     ReportCallocOverflow(n, size, &stack);
229   }
230   void *p = user_alloc_internal(thr, pc, n * size);
231   if (p)
232     internal_memset(p, 0, n * size);
233   return SetErrnoOnNull(p);
234 }
235 
236 void *user_reallocarray(ThreadState *thr, uptr pc, void *p, uptr size, uptr n) {
237   if (UNLIKELY(CheckForCallocOverflow(size, n))) {
238     if (AllocatorMayReturnNull())
239       return SetErrnoOnNull(nullptr);
240     GET_STACK_TRACE_FATAL(thr, pc);
241     ReportReallocArrayOverflow(size, n, &stack);
242   }
243   return user_realloc(thr, pc, p, size * n);
244 }
245 
246 void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
247   DPrintf("#%d: alloc(%zu) = 0x%zx\n", thr->tid, sz, p);
248   // Note: this can run before thread initialization/after finalization.
249   // As a result this is not necessarily synchronized with DoReset,
250   // which iterates over and resets all sync objects,
251   // but it is fine to create new MBlocks in this context.
252   ctx->metamap.AllocBlock(thr, pc, p, sz);
253   // If this runs before thread initialization/after finalization
254   // and we don't have trace initialized, we can't imitate writes.
255   // In such case just reset the shadow range, it is fine since
256   // it affects only a small fraction of special objects.
257   if (write && thr->ignore_reads_and_writes == 0 &&
258       atomic_load_relaxed(&thr->trace_pos))
259     MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
260   else
261     MemoryResetRange(thr, pc, (uptr)p, sz);
262 }
263 
264 void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
265   CHECK_NE(p, (void*)0);
266   if (!thr->slot) {
267     // Very early/late in thread lifetime, or during fork.
268     UNUSED uptr sz = ctx->metamap.FreeBlock(thr->proc(), p, false);
269     DPrintf("#%d: free(0x%zx, %zu) (no slot)\n", thr->tid, p, sz);
270     return;
271   }
272   SlotLocker locker(thr);
273   uptr sz = ctx->metamap.FreeBlock(thr->proc(), p, true);
274   DPrintf("#%d: free(0x%zx, %zu)\n", thr->tid, p, sz);
275   if (write && thr->ignore_reads_and_writes == 0)
276     MemoryRangeFreed(thr, pc, (uptr)p, sz);
277 }
278 
279 void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
280   // FIXME: Handle "shrinking" more efficiently,
281   // it seems that some software actually does this.
282   if (!p)
283     return SetErrnoOnNull(user_alloc_internal(thr, pc, sz));
284   if (!sz) {
285     user_free(thr, pc, p);
286     return nullptr;
287   }
288   void *new_p = user_alloc_internal(thr, pc, sz);
289   if (new_p) {
290     uptr old_sz = user_alloc_usable_size(p);
291     internal_memcpy(new_p, p, min(old_sz, sz));
292     user_free(thr, pc, p);
293   }
294   return SetErrnoOnNull(new_p);
295 }
296 
297 void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz) {
298   if (UNLIKELY(!IsPowerOfTwo(align))) {
299     errno = errno_EINVAL;
300     if (AllocatorMayReturnNull())
301       return nullptr;
302     GET_STACK_TRACE_FATAL(thr, pc);
303     ReportInvalidAllocationAlignment(align, &stack);
304   }
305   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
306 }
307 
308 int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align,
309                         uptr sz) {
310   if (UNLIKELY(!CheckPosixMemalignAlignment(align))) {
311     if (AllocatorMayReturnNull())
312       return errno_EINVAL;
313     GET_STACK_TRACE_FATAL(thr, pc);
314     ReportInvalidPosixMemalignAlignment(align, &stack);
315   }
316   void *ptr = user_alloc_internal(thr, pc, sz, align);
317   if (UNLIKELY(!ptr))
318     // OOM error is already taken care of by user_alloc_internal.
319     return errno_ENOMEM;
320   CHECK(IsAligned((uptr)ptr, align));
321   *memptr = ptr;
322   return 0;
323 }
324 
325 void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz) {
326   if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(align, sz))) {
327     errno = errno_EINVAL;
328     if (AllocatorMayReturnNull())
329       return nullptr;
330     GET_STACK_TRACE_FATAL(thr, pc);
331     ReportInvalidAlignedAllocAlignment(sz, align, &stack);
332   }
333   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
334 }
335 
336 void *user_valloc(ThreadState *thr, uptr pc, uptr sz) {
337   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, GetPageSizeCached()));
338 }
339 
340 void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz) {
341   uptr PageSize = GetPageSizeCached();
342   if (UNLIKELY(CheckForPvallocOverflow(sz, PageSize))) {
343     errno = errno_ENOMEM;
344     if (AllocatorMayReturnNull())
345       return nullptr;
346     GET_STACK_TRACE_FATAL(thr, pc);
347     ReportPvallocOverflow(sz, &stack);
348   }
349   // pvalloc(0) should allocate one page.
350   sz = sz ? RoundUpTo(sz, PageSize) : PageSize;
351   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, PageSize));
352 }
353 
354 uptr user_alloc_usable_size(const void *p) {
355   if (p == 0 || !IsAppMem((uptr)p))
356     return 0;
357   MBlock *b = ctx->metamap.GetBlock((uptr)p);
358   if (!b)
359     return 0;  // Not a valid pointer.
360   if (b->siz == 0)
361     return 1;  // Zero-sized allocations are actually 1 byte.
362   return b->siz;
363 }
364 
365 void invoke_malloc_hook(void *ptr, uptr size) {
366   ThreadState *thr = cur_thread();
367   if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
368     return;
369   RunMallocHooks(ptr, size);
370 }
371 
372 void invoke_free_hook(void *ptr) {
373   ThreadState *thr = cur_thread();
374   if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
375     return;
376   RunFreeHooks(ptr);
377 }
378 
379 void *Alloc(uptr sz) {
380   ThreadState *thr = cur_thread();
381   if (thr->nomalloc) {
382     thr->nomalloc = 0;  // CHECK calls internal_malloc().
383     CHECK(0);
384   }
385   InternalAllocAccess();
386   return InternalAlloc(sz, &thr->proc()->internal_alloc_cache);
387 }
388 
389 void FreeImpl(void *p) {
390   ThreadState *thr = cur_thread();
391   if (thr->nomalloc) {
392     thr->nomalloc = 0;  // CHECK calls internal_malloc().
393     CHECK(0);
394   }
395   InternalAllocAccess();
396   InternalFree(p, &thr->proc()->internal_alloc_cache);
397 }
398 
399 }  // namespace __tsan
400 
401 using namespace __tsan;
402 
403 extern "C" {
404 uptr __sanitizer_get_current_allocated_bytes() {
405   uptr stats[AllocatorStatCount];
406   allocator()->GetStats(stats);
407   return stats[AllocatorStatAllocated];
408 }
409 
410 uptr __sanitizer_get_heap_size() {
411   uptr stats[AllocatorStatCount];
412   allocator()->GetStats(stats);
413   return stats[AllocatorStatMapped];
414 }
415 
416 uptr __sanitizer_get_free_bytes() {
417   return 1;
418 }
419 
420 uptr __sanitizer_get_unmapped_bytes() {
421   return 1;
422 }
423 
424 uptr __sanitizer_get_estimated_allocated_size(uptr size) {
425   return size;
426 }
427 
428 int __sanitizer_get_ownership(const void *p) {
429   return allocator()->GetBlockBegin(p) != 0;
430 }
431 
432 uptr __sanitizer_get_allocated_size(const void *p) {
433   return user_alloc_usable_size(p);
434 }
435 
436 void __tsan_on_thread_idle() {
437   ThreadState *thr = cur_thread();
438   allocator()->SwallowCache(&thr->proc()->alloc_cache);
439   internal_allocator()->SwallowCache(&thr->proc()->internal_alloc_cache);
440   ctx->metamap.OnProcIdle(thr->proc());
441 }
442 }  // extern "C"
443