1 //===-- tsan_mman.cc ------------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 //===----------------------------------------------------------------------===//
11 #include "sanitizer_common/sanitizer_allocator_checks.h"
12 #include "sanitizer_common/sanitizer_allocator_interface.h"
13 #include "sanitizer_common/sanitizer_common.h"
14 #include "sanitizer_common/sanitizer_errno.h"
15 #include "sanitizer_common/sanitizer_placement_new.h"
16 #include "tsan_mman.h"
17 #include "tsan_rtl.h"
18 #include "tsan_report.h"
19 #include "tsan_flags.h"
20 
21 // May be overriden by front-end.
22 SANITIZER_WEAK_DEFAULT_IMPL
__sanitizer_malloc_hook(void * ptr,uptr size)23 void __sanitizer_malloc_hook(void *ptr, uptr size) {
24   (void)ptr;
25   (void)size;
26 }
27 
28 SANITIZER_WEAK_DEFAULT_IMPL
__sanitizer_free_hook(void * ptr)29 void __sanitizer_free_hook(void *ptr) {
30   (void)ptr;
31 }
32 
33 namespace __tsan {
34 
35 struct MapUnmapCallback {
OnMap__tsan::MapUnmapCallback36   void OnMap(uptr p, uptr size) const { }
OnUnmap__tsan::MapUnmapCallback37   void OnUnmap(uptr p, uptr size) const {
38     // We are about to unmap a chunk of user memory.
39     // Mark the corresponding shadow memory as not needed.
40     DontNeedShadowFor(p, size);
41     // Mark the corresponding meta shadow memory as not needed.
42     // Note the block does not contain any meta info at this point
43     // (this happens after free).
44     const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
45     const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
46     // Block came from LargeMmapAllocator, so must be large.
47     // We rely on this in the calculations below.
48     CHECK_GE(size, 2 * kPageSize);
49     uptr diff = RoundUp(p, kPageSize) - p;
50     if (diff != 0) {
51       p += diff;
52       size -= diff;
53     }
54     diff = p + size - RoundDown(p + size, kPageSize);
55     if (diff != 0)
56       size -= diff;
57     uptr p_meta = (uptr)MemToMeta(p);
58     ReleaseMemoryPagesToOS(p_meta, p_meta + size / kMetaRatio);
59   }
60 };
61 
62 static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64);
allocator()63 Allocator *allocator() {
64   return reinterpret_cast<Allocator*>(&allocator_placeholder);
65 }
66 
67 struct GlobalProc {
68   Mutex mtx;
69   Processor *proc;
70 
GlobalProc__tsan::GlobalProc71   GlobalProc()
72       : mtx(MutexTypeGlobalProc, StatMtxGlobalProc)
73       , proc(ProcCreate()) {
74   }
75 };
76 
77 static char global_proc_placeholder[sizeof(GlobalProc)] ALIGNED(64);
global_proc()78 GlobalProc *global_proc() {
79   return reinterpret_cast<GlobalProc*>(&global_proc_placeholder);
80 }
81 
ScopedGlobalProcessor()82 ScopedGlobalProcessor::ScopedGlobalProcessor() {
83   GlobalProc *gp = global_proc();
84   ThreadState *thr = cur_thread();
85   if (thr->proc())
86     return;
87   // If we don't have a proc, use the global one.
88   // There are currently only two known case where this path is triggered:
89   //   __interceptor_free
90   //   __nptl_deallocate_tsd
91   //   start_thread
92   //   clone
93   // and:
94   //   ResetRange
95   //   __interceptor_munmap
96   //   __deallocate_stack
97   //   start_thread
98   //   clone
99   // Ideally, we destroy thread state (and unwire proc) when a thread actually
100   // exits (i.e. when we join/wait it). Then we would not need the global proc
101   gp->mtx.Lock();
102   ProcWire(gp->proc, thr);
103 }
104 
~ScopedGlobalProcessor()105 ScopedGlobalProcessor::~ScopedGlobalProcessor() {
106   GlobalProc *gp = global_proc();
107   ThreadState *thr = cur_thread();
108   if (thr->proc() != gp->proc)
109     return;
110   ProcUnwire(gp->proc, thr);
111   gp->mtx.Unlock();
112 }
113 
InitializeAllocator()114 void InitializeAllocator() {
115   SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
116   allocator()->Init(common_flags()->allocator_release_to_os_interval_ms);
117 }
118 
InitializeAllocatorLate()119 void InitializeAllocatorLate() {
120   new(global_proc()) GlobalProc();
121 }
122 
AllocatorProcStart(Processor * proc)123 void AllocatorProcStart(Processor *proc) {
124   allocator()->InitCache(&proc->alloc_cache);
125   internal_allocator()->InitCache(&proc->internal_alloc_cache);
126 }
127 
AllocatorProcFinish(Processor * proc)128 void AllocatorProcFinish(Processor *proc) {
129   allocator()->DestroyCache(&proc->alloc_cache);
130   internal_allocator()->DestroyCache(&proc->internal_alloc_cache);
131 }
132 
AllocatorPrintStats()133 void AllocatorPrintStats() {
134   allocator()->PrintStats();
135 }
136 
SignalUnsafeCall(ThreadState * thr,uptr pc)137 static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
138   if (atomic_load_relaxed(&thr->in_signal_handler) == 0 ||
139       !flags()->report_signal_unsafe)
140     return;
141   VarSizeStackTrace stack;
142   ObtainCurrentStack(thr, pc, &stack);
143   if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack))
144     return;
145   ThreadRegistryLock l(ctx->thread_registry);
146   ScopedReport rep(ReportTypeSignalUnsafe);
147   rep.AddStack(stack, true);
148   OutputReport(thr, rep);
149 }
150 
user_alloc_internal(ThreadState * thr,uptr pc,uptr sz,uptr align,bool signal)151 void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz, uptr align,
152                           bool signal) {
153   if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
154     return Allocator::FailureHandler::OnBadRequest();
155   void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
156   if (UNLIKELY(p == 0))
157     return 0;
158   if (ctx && ctx->initialized)
159     OnUserAlloc(thr, pc, (uptr)p, sz, true);
160   if (signal)
161     SignalUnsafeCall(thr, pc);
162   return p;
163 }
164 
user_free(ThreadState * thr,uptr pc,void * p,bool signal)165 void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
166   ScopedGlobalProcessor sgp;
167   if (ctx && ctx->initialized)
168     OnUserFree(thr, pc, (uptr)p, true);
169   allocator()->Deallocate(&thr->proc()->alloc_cache, p);
170   if (signal)
171     SignalUnsafeCall(thr, pc);
172 }
173 
user_alloc(ThreadState * thr,uptr pc,uptr sz)174 void *user_alloc(ThreadState *thr, uptr pc, uptr sz) {
175   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, kDefaultAlignment));
176 }
177 
user_calloc(ThreadState * thr,uptr pc,uptr size,uptr n)178 void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
179   if (UNLIKELY(CheckForCallocOverflow(size, n)))
180     return SetErrnoOnNull(Allocator::FailureHandler::OnBadRequest());
181   void *p = user_alloc_internal(thr, pc, n * size);
182   if (p)
183     internal_memset(p, 0, n * size);
184   return SetErrnoOnNull(p);
185 }
186 
OnUserAlloc(ThreadState * thr,uptr pc,uptr p,uptr sz,bool write)187 void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
188   DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
189   ctx->metamap.AllocBlock(thr, pc, p, sz);
190   if (write && thr->ignore_reads_and_writes == 0)
191     MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
192   else
193     MemoryResetRange(thr, pc, (uptr)p, sz);
194 }
195 
OnUserFree(ThreadState * thr,uptr pc,uptr p,bool write)196 void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
197   CHECK_NE(p, (void*)0);
198   uptr sz = ctx->metamap.FreeBlock(thr->proc(), p);
199   DPrintf("#%d: free(%p, %zu)\n", thr->tid, p, sz);
200   if (write && thr->ignore_reads_and_writes == 0)
201     MemoryRangeFreed(thr, pc, (uptr)p, sz);
202 }
203 
user_realloc(ThreadState * thr,uptr pc,void * p,uptr sz)204 void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
205   // FIXME: Handle "shrinking" more efficiently,
206   // it seems that some software actually does this.
207   if (!p)
208     return SetErrnoOnNull(user_alloc_internal(thr, pc, sz));
209   if (!sz) {
210     user_free(thr, pc, p);
211     return nullptr;
212   }
213   void *new_p = user_alloc_internal(thr, pc, sz);
214   if (new_p) {
215     uptr old_sz = user_alloc_usable_size(p);
216     internal_memcpy(new_p, p, min(old_sz, sz));
217     user_free(thr, pc, p);
218   }
219   return SetErrnoOnNull(new_p);
220 }
221 
user_memalign(ThreadState * thr,uptr pc,uptr align,uptr sz)222 void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz) {
223   if (UNLIKELY(!IsPowerOfTwo(align))) {
224     errno = errno_EINVAL;
225     return Allocator::FailureHandler::OnBadRequest();
226   }
227   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
228 }
229 
user_posix_memalign(ThreadState * thr,uptr pc,void ** memptr,uptr align,uptr sz)230 int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align,
231                         uptr sz) {
232   if (UNLIKELY(!CheckPosixMemalignAlignment(align))) {
233     Allocator::FailureHandler::OnBadRequest();
234     return errno_EINVAL;
235   }
236   void *ptr = user_alloc_internal(thr, pc, sz, align);
237   if (UNLIKELY(!ptr))
238     return errno_ENOMEM;
239   CHECK(IsAligned((uptr)ptr, align));
240   *memptr = ptr;
241   return 0;
242 }
243 
user_aligned_alloc(ThreadState * thr,uptr pc,uptr align,uptr sz)244 void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz) {
245   if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(align, sz))) {
246     errno = errno_EINVAL;
247     return Allocator::FailureHandler::OnBadRequest();
248   }
249   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
250 }
251 
user_valloc(ThreadState * thr,uptr pc,uptr sz)252 void *user_valloc(ThreadState *thr, uptr pc, uptr sz) {
253   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, GetPageSizeCached()));
254 }
255 
user_pvalloc(ThreadState * thr,uptr pc,uptr sz)256 void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz) {
257   uptr PageSize = GetPageSizeCached();
258   if (UNLIKELY(CheckForPvallocOverflow(sz, PageSize))) {
259     errno = errno_ENOMEM;
260     return Allocator::FailureHandler::OnBadRequest();
261   }
262   // pvalloc(0) should allocate one page.
263   sz = sz ? RoundUpTo(sz, PageSize) : PageSize;
264   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, PageSize));
265 }
266 
user_alloc_usable_size(const void * p)267 uptr user_alloc_usable_size(const void *p) {
268   if (p == 0)
269     return 0;
270   MBlock *b = ctx->metamap.GetBlock((uptr)p);
271   if (!b)
272     return 0;  // Not a valid pointer.
273   if (b->siz == 0)
274     return 1;  // Zero-sized allocations are actually 1 byte.
275   return b->siz;
276 }
277 
invoke_malloc_hook(void * ptr,uptr size)278 void invoke_malloc_hook(void *ptr, uptr size) {
279   ThreadState *thr = cur_thread();
280   if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
281     return;
282   __sanitizer_malloc_hook(ptr, size);
283   RunMallocHooks(ptr, size);
284 }
285 
invoke_free_hook(void * ptr)286 void invoke_free_hook(void *ptr) {
287   ThreadState *thr = cur_thread();
288   if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
289     return;
290   __sanitizer_free_hook(ptr);
291   RunFreeHooks(ptr);
292 }
293 
internal_alloc(MBlockType typ,uptr sz)294 void *internal_alloc(MBlockType typ, uptr sz) {
295   ThreadState *thr = cur_thread();
296   if (thr->nomalloc) {
297     thr->nomalloc = 0;  // CHECK calls internal_malloc().
298     CHECK(0);
299   }
300   return InternalAlloc(sz, &thr->proc()->internal_alloc_cache);
301 }
302 
internal_free(void * p)303 void internal_free(void *p) {
304   ThreadState *thr = cur_thread();
305   if (thr->nomalloc) {
306     thr->nomalloc = 0;  // CHECK calls internal_malloc().
307     CHECK(0);
308   }
309   InternalFree(p, &thr->proc()->internal_alloc_cache);
310 }
311 
312 }  // namespace __tsan
313 
314 using namespace __tsan;
315 
316 extern "C" {
__sanitizer_get_current_allocated_bytes()317 uptr __sanitizer_get_current_allocated_bytes() {
318   uptr stats[AllocatorStatCount];
319   allocator()->GetStats(stats);
320   return stats[AllocatorStatAllocated];
321 }
322 
__sanitizer_get_heap_size()323 uptr __sanitizer_get_heap_size() {
324   uptr stats[AllocatorStatCount];
325   allocator()->GetStats(stats);
326   return stats[AllocatorStatMapped];
327 }
328 
__sanitizer_get_free_bytes()329 uptr __sanitizer_get_free_bytes() {
330   return 1;
331 }
332 
__sanitizer_get_unmapped_bytes()333 uptr __sanitizer_get_unmapped_bytes() {
334   return 1;
335 }
336 
__sanitizer_get_estimated_allocated_size(uptr size)337 uptr __sanitizer_get_estimated_allocated_size(uptr size) {
338   return size;
339 }
340 
__sanitizer_get_ownership(const void * p)341 int __sanitizer_get_ownership(const void *p) {
342   return allocator()->GetBlockBegin(p) != 0;
343 }
344 
__sanitizer_get_allocated_size(const void * p)345 uptr __sanitizer_get_allocated_size(const void *p) {
346   return user_alloc_usable_size(p);
347 }
348 
__tsan_on_thread_idle()349 void __tsan_on_thread_idle() {
350   ThreadState *thr = cur_thread();
351   thr->clock.ResetCached(&thr->proc()->clock_cache);
352   thr->last_sleep_clock.ResetCached(&thr->proc()->clock_cache);
353   allocator()->SwallowCache(&thr->proc()->alloc_cache);
354   internal_allocator()->SwallowCache(&thr->proc()->internal_alloc_cache);
355   ctx->metamap.OnProcIdle(thr->proc());
356 }
357 }  // extern "C"
358