1 //===-- ThreadSanitizer.cpp - race detector -------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer, a race detector.
10 //
11 // The tool is under development, for the details about previous versions see
12 // http://code.google.com/p/data-race-test
13 //
14 // The instrumentation phase is quite simple:
15 // - Insert calls to run-time library before every memory access.
16 // - Optimizations may apply to avoid instrumenting some of the accesses.
17 // - Insert calls at function entry/exit.
18 // The rest is handled by the run-time library.
19 //===----------------------------------------------------------------------===//
20
21 #include "llvm/Transforms/Instrumentation/ThreadSanitizer.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallString.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/ADT/StringExtras.h"
27 #include "llvm/Analysis/CaptureTracking.h"
28 #include "llvm/Analysis/TargetLibraryInfo.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/Function.h"
32 #include "llvm/IR/IRBuilder.h"
33 #include "llvm/IR/IntrinsicInst.h"
34 #include "llvm/IR/Intrinsics.h"
35 #include "llvm/IR/LLVMContext.h"
36 #include "llvm/IR/Metadata.h"
37 #include "llvm/IR/Module.h"
38 #include "llvm/IR/Type.h"
39 #include "llvm/InitializePasses.h"
40 #include "llvm/ProfileData/InstrProf.h"
41 #include "llvm/Support/CommandLine.h"
42 #include "llvm/Support/Debug.h"
43 #include "llvm/Support/MathExtras.h"
44 #include "llvm/Support/raw_ostream.h"
45 #include "llvm/Transforms/Instrumentation.h"
46 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
47 #include "llvm/Transforms/Utils/EscapeEnumerator.h"
48 #include "llvm/Transforms/Utils/Local.h"
49 #include "llvm/Transforms/Utils/ModuleUtils.h"
50
51 using namespace llvm;
52
53 #define DEBUG_TYPE "tsan"
54
55 static cl::opt<bool> ClInstrumentMemoryAccesses(
56 "tsan-instrument-memory-accesses", cl::init(true),
57 cl::desc("Instrument memory accesses"), cl::Hidden);
58 static cl::opt<bool> ClInstrumentFuncEntryExit(
59 "tsan-instrument-func-entry-exit", cl::init(true),
60 cl::desc("Instrument function entry and exit"), cl::Hidden);
61 static cl::opt<bool> ClHandleCxxExceptions(
62 "tsan-handle-cxx-exceptions", cl::init(true),
63 cl::desc("Handle C++ exceptions (insert cleanup blocks for unwinding)"),
64 cl::Hidden);
65 static cl::opt<bool> ClInstrumentAtomics(
66 "tsan-instrument-atomics", cl::init(true),
67 cl::desc("Instrument atomics"), cl::Hidden);
68 static cl::opt<bool> ClInstrumentMemIntrinsics(
69 "tsan-instrument-memintrinsics", cl::init(true),
70 cl::desc("Instrument memintrinsics (memset/memcpy/memmove)"), cl::Hidden);
71 static cl::opt<bool> ClDistinguishVolatile(
72 "tsan-distinguish-volatile", cl::init(false),
73 cl::desc("Emit special instrumentation for accesses to volatiles"),
74 cl::Hidden);
75 static cl::opt<bool> ClInstrumentReadBeforeWrite(
76 "tsan-instrument-read-before-write", cl::init(false),
77 cl::desc("Do not eliminate read instrumentation for read-before-writes"),
78 cl::Hidden);
79
80 STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
81 STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
82 STATISTIC(NumOmittedReadsBeforeWrite,
83 "Number of reads ignored due to following writes");
84 STATISTIC(NumAccessesWithBadSize, "Number of accesses with bad size");
85 STATISTIC(NumInstrumentedVtableWrites, "Number of vtable ptr writes");
86 STATISTIC(NumInstrumentedVtableReads, "Number of vtable ptr reads");
87 STATISTIC(NumOmittedReadsFromConstantGlobals,
88 "Number of reads from constant globals");
89 STATISTIC(NumOmittedReadsFromVtable, "Number of vtable reads");
90 STATISTIC(NumOmittedNonCaptured, "Number of accesses ignored due to capturing");
91
92 static const char *const kTsanModuleCtorName = "tsan.module_ctor";
93 static const char *const kTsanInitName = "__tsan_init";
94
95 namespace {
96
97 /// ThreadSanitizer: instrument the code in module to find races.
98 ///
99 /// Instantiating ThreadSanitizer inserts the tsan runtime library API function
100 /// declarations into the module if they don't exist already. Instantiating
101 /// ensures the __tsan_init function is in the list of global constructors for
102 /// the module.
103 struct ThreadSanitizer {
104 bool sanitizeFunction(Function &F, const TargetLibraryInfo &TLI);
105
106 private:
107 void initialize(Module &M);
108 bool instrumentLoadOrStore(Instruction *I, const DataLayout &DL);
109 bool instrumentAtomic(Instruction *I, const DataLayout &DL);
110 bool instrumentMemIntrinsic(Instruction *I);
111 void chooseInstructionsToInstrument(SmallVectorImpl<Instruction *> &Local,
112 SmallVectorImpl<Instruction *> &All,
113 const DataLayout &DL);
114 bool addrPointsToConstantData(Value *Addr);
115 int getMemoryAccessFuncIndex(Value *Addr, const DataLayout &DL);
116 void InsertRuntimeIgnores(Function &F);
117
118 Type *IntptrTy;
119 FunctionCallee TsanFuncEntry;
120 FunctionCallee TsanFuncExit;
121 FunctionCallee TsanIgnoreBegin;
122 FunctionCallee TsanIgnoreEnd;
123 // Accesses sizes are powers of two: 1, 2, 4, 8, 16.
124 static const size_t kNumberOfAccessSizes = 5;
125 FunctionCallee TsanRead[kNumberOfAccessSizes];
126 FunctionCallee TsanWrite[kNumberOfAccessSizes];
127 FunctionCallee TsanUnalignedRead[kNumberOfAccessSizes];
128 FunctionCallee TsanUnalignedWrite[kNumberOfAccessSizes];
129 FunctionCallee TsanVolatileRead[kNumberOfAccessSizes];
130 FunctionCallee TsanVolatileWrite[kNumberOfAccessSizes];
131 FunctionCallee TsanUnalignedVolatileRead[kNumberOfAccessSizes];
132 FunctionCallee TsanUnalignedVolatileWrite[kNumberOfAccessSizes];
133 FunctionCallee TsanAtomicLoad[kNumberOfAccessSizes];
134 FunctionCallee TsanAtomicStore[kNumberOfAccessSizes];
135 FunctionCallee TsanAtomicRMW[AtomicRMWInst::LAST_BINOP + 1]
136 [kNumberOfAccessSizes];
137 FunctionCallee TsanAtomicCAS[kNumberOfAccessSizes];
138 FunctionCallee TsanAtomicThreadFence;
139 FunctionCallee TsanAtomicSignalFence;
140 FunctionCallee TsanVptrUpdate;
141 FunctionCallee TsanVptrLoad;
142 FunctionCallee MemmoveFn, MemcpyFn, MemsetFn;
143 };
144
145 struct ThreadSanitizerLegacyPass : FunctionPass {
ThreadSanitizerLegacyPass__anond34143300111::ThreadSanitizerLegacyPass146 ThreadSanitizerLegacyPass() : FunctionPass(ID) {
147 initializeThreadSanitizerLegacyPassPass(*PassRegistry::getPassRegistry());
148 }
149 StringRef getPassName() const override;
150 void getAnalysisUsage(AnalysisUsage &AU) const override;
151 bool runOnFunction(Function &F) override;
152 bool doInitialization(Module &M) override;
153 static char ID; // Pass identification, replacement for typeid.
154 private:
155 Optional<ThreadSanitizer> TSan;
156 };
157
insertModuleCtor(Module & M)158 void insertModuleCtor(Module &M) {
159 getOrCreateSanitizerCtorAndInitFunctions(
160 M, kTsanModuleCtorName, kTsanInitName, /*InitArgTypes=*/{},
161 /*InitArgs=*/{},
162 // This callback is invoked when the functions are created the first
163 // time. Hook them into the global ctors list in that case:
164 [&](Function *Ctor, FunctionCallee) { appendToGlobalCtors(M, Ctor, 0); });
165 }
166
167 } // namespace
168
run(Function & F,FunctionAnalysisManager & FAM)169 PreservedAnalyses ThreadSanitizerPass::run(Function &F,
170 FunctionAnalysisManager &FAM) {
171 ThreadSanitizer TSan;
172 if (TSan.sanitizeFunction(F, FAM.getResult<TargetLibraryAnalysis>(F)))
173 return PreservedAnalyses::none();
174 return PreservedAnalyses::all();
175 }
176
run(Module & M,ModuleAnalysisManager & MAM)177 PreservedAnalyses ThreadSanitizerPass::run(Module &M,
178 ModuleAnalysisManager &MAM) {
179 insertModuleCtor(M);
180 return PreservedAnalyses::none();
181 }
182
183 char ThreadSanitizerLegacyPass::ID = 0;
184 INITIALIZE_PASS_BEGIN(ThreadSanitizerLegacyPass, "tsan",
185 "ThreadSanitizer: detects data races.", false, false)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)186 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
187 INITIALIZE_PASS_END(ThreadSanitizerLegacyPass, "tsan",
188 "ThreadSanitizer: detects data races.", false, false)
189
190 StringRef ThreadSanitizerLegacyPass::getPassName() const {
191 return "ThreadSanitizerLegacyPass";
192 }
193
getAnalysisUsage(AnalysisUsage & AU) const194 void ThreadSanitizerLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const {
195 AU.addRequired<TargetLibraryInfoWrapperPass>();
196 }
197
doInitialization(Module & M)198 bool ThreadSanitizerLegacyPass::doInitialization(Module &M) {
199 insertModuleCtor(M);
200 TSan.emplace();
201 return true;
202 }
203
runOnFunction(Function & F)204 bool ThreadSanitizerLegacyPass::runOnFunction(Function &F) {
205 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
206 TSan->sanitizeFunction(F, TLI);
207 return true;
208 }
209
createThreadSanitizerLegacyPassPass()210 FunctionPass *llvm::createThreadSanitizerLegacyPassPass() {
211 return new ThreadSanitizerLegacyPass();
212 }
213
initialize(Module & M)214 void ThreadSanitizer::initialize(Module &M) {
215 const DataLayout &DL = M.getDataLayout();
216 IntptrTy = DL.getIntPtrType(M.getContext());
217
218 IRBuilder<> IRB(M.getContext());
219 AttributeList Attr;
220 Attr = Attr.addAttribute(M.getContext(), AttributeList::FunctionIndex,
221 Attribute::NoUnwind);
222 // Initialize the callbacks.
223 TsanFuncEntry = M.getOrInsertFunction("__tsan_func_entry", Attr,
224 IRB.getVoidTy(), IRB.getInt8PtrTy());
225 TsanFuncExit =
226 M.getOrInsertFunction("__tsan_func_exit", Attr, IRB.getVoidTy());
227 TsanIgnoreBegin = M.getOrInsertFunction("__tsan_ignore_thread_begin", Attr,
228 IRB.getVoidTy());
229 TsanIgnoreEnd =
230 M.getOrInsertFunction("__tsan_ignore_thread_end", Attr, IRB.getVoidTy());
231 IntegerType *OrdTy = IRB.getInt32Ty();
232 for (size_t i = 0; i < kNumberOfAccessSizes; ++i) {
233 const unsigned ByteSize = 1U << i;
234 const unsigned BitSize = ByteSize * 8;
235 std::string ByteSizeStr = utostr(ByteSize);
236 std::string BitSizeStr = utostr(BitSize);
237 SmallString<32> ReadName("__tsan_read" + ByteSizeStr);
238 TsanRead[i] = M.getOrInsertFunction(ReadName, Attr, IRB.getVoidTy(),
239 IRB.getInt8PtrTy());
240
241 SmallString<32> WriteName("__tsan_write" + ByteSizeStr);
242 TsanWrite[i] = M.getOrInsertFunction(WriteName, Attr, IRB.getVoidTy(),
243 IRB.getInt8PtrTy());
244
245 SmallString<64> UnalignedReadName("__tsan_unaligned_read" + ByteSizeStr);
246 TsanUnalignedRead[i] = M.getOrInsertFunction(
247 UnalignedReadName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
248
249 SmallString<64> UnalignedWriteName("__tsan_unaligned_write" + ByteSizeStr);
250 TsanUnalignedWrite[i] = M.getOrInsertFunction(
251 UnalignedWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
252
253 SmallString<64> VolatileReadName("__tsan_volatile_read" + ByteSizeStr);
254 TsanVolatileRead[i] = M.getOrInsertFunction(
255 VolatileReadName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
256
257 SmallString<64> VolatileWriteName("__tsan_volatile_write" + ByteSizeStr);
258 TsanVolatileWrite[i] = M.getOrInsertFunction(
259 VolatileWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
260
261 SmallString<64> UnalignedVolatileReadName("__tsan_unaligned_volatile_read" +
262 ByteSizeStr);
263 TsanUnalignedVolatileRead[i] = M.getOrInsertFunction(
264 UnalignedVolatileReadName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
265
266 SmallString<64> UnalignedVolatileWriteName(
267 "__tsan_unaligned_volatile_write" + ByteSizeStr);
268 TsanUnalignedVolatileWrite[i] = M.getOrInsertFunction(
269 UnalignedVolatileWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
270
271 Type *Ty = Type::getIntNTy(M.getContext(), BitSize);
272 Type *PtrTy = Ty->getPointerTo();
273 SmallString<32> AtomicLoadName("__tsan_atomic" + BitSizeStr + "_load");
274 TsanAtomicLoad[i] =
275 M.getOrInsertFunction(AtomicLoadName, Attr, Ty, PtrTy, OrdTy);
276
277 SmallString<32> AtomicStoreName("__tsan_atomic" + BitSizeStr + "_store");
278 TsanAtomicStore[i] = M.getOrInsertFunction(
279 AtomicStoreName, Attr, IRB.getVoidTy(), PtrTy, Ty, OrdTy);
280
281 for (unsigned Op = AtomicRMWInst::FIRST_BINOP;
282 Op <= AtomicRMWInst::LAST_BINOP; ++Op) {
283 TsanAtomicRMW[Op][i] = nullptr;
284 const char *NamePart = nullptr;
285 if (Op == AtomicRMWInst::Xchg)
286 NamePart = "_exchange";
287 else if (Op == AtomicRMWInst::Add)
288 NamePart = "_fetch_add";
289 else if (Op == AtomicRMWInst::Sub)
290 NamePart = "_fetch_sub";
291 else if (Op == AtomicRMWInst::And)
292 NamePart = "_fetch_and";
293 else if (Op == AtomicRMWInst::Or)
294 NamePart = "_fetch_or";
295 else if (Op == AtomicRMWInst::Xor)
296 NamePart = "_fetch_xor";
297 else if (Op == AtomicRMWInst::Nand)
298 NamePart = "_fetch_nand";
299 else
300 continue;
301 SmallString<32> RMWName("__tsan_atomic" + itostr(BitSize) + NamePart);
302 TsanAtomicRMW[Op][i] =
303 M.getOrInsertFunction(RMWName, Attr, Ty, PtrTy, Ty, OrdTy);
304 }
305
306 SmallString<32> AtomicCASName("__tsan_atomic" + BitSizeStr +
307 "_compare_exchange_val");
308 TsanAtomicCAS[i] = M.getOrInsertFunction(AtomicCASName, Attr, Ty, PtrTy, Ty,
309 Ty, OrdTy, OrdTy);
310 }
311 TsanVptrUpdate =
312 M.getOrInsertFunction("__tsan_vptr_update", Attr, IRB.getVoidTy(),
313 IRB.getInt8PtrTy(), IRB.getInt8PtrTy());
314 TsanVptrLoad = M.getOrInsertFunction("__tsan_vptr_read", Attr,
315 IRB.getVoidTy(), IRB.getInt8PtrTy());
316 TsanAtomicThreadFence = M.getOrInsertFunction("__tsan_atomic_thread_fence",
317 Attr, IRB.getVoidTy(), OrdTy);
318 TsanAtomicSignalFence = M.getOrInsertFunction("__tsan_atomic_signal_fence",
319 Attr, IRB.getVoidTy(), OrdTy);
320
321 MemmoveFn =
322 M.getOrInsertFunction("memmove", Attr, IRB.getInt8PtrTy(),
323 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy);
324 MemcpyFn =
325 M.getOrInsertFunction("memcpy", Attr, IRB.getInt8PtrTy(),
326 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy);
327 MemsetFn =
328 M.getOrInsertFunction("memset", Attr, IRB.getInt8PtrTy(),
329 IRB.getInt8PtrTy(), IRB.getInt32Ty(), IntptrTy);
330 }
331
isVtableAccess(Instruction * I)332 static bool isVtableAccess(Instruction *I) {
333 if (MDNode *Tag = I->getMetadata(LLVMContext::MD_tbaa))
334 return Tag->isTBAAVtableAccess();
335 return false;
336 }
337
338 // Do not instrument known races/"benign races" that come from compiler
339 // instrumentatin. The user has no way of suppressing them.
shouldInstrumentReadWriteFromAddress(const Module * M,Value * Addr)340 static bool shouldInstrumentReadWriteFromAddress(const Module *M, Value *Addr) {
341 // Peel off GEPs and BitCasts.
342 Addr = Addr->stripInBoundsOffsets();
343
344 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
345 if (GV->hasSection()) {
346 StringRef SectionName = GV->getSection();
347 // Check if the global is in the PGO counters section.
348 auto OF = Triple(M->getTargetTriple()).getObjectFormat();
349 if (SectionName.endswith(
350 getInstrProfSectionName(IPSK_cnts, OF, /*AddSegmentInfo=*/false)))
351 return false;
352 }
353
354 // Check if the global is private gcov data.
355 if (GV->getName().startswith("__llvm_gcov") ||
356 GV->getName().startswith("__llvm_gcda"))
357 return false;
358 }
359
360 // Do not instrument acesses from different address spaces; we cannot deal
361 // with them.
362 if (Addr) {
363 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
364 if (PtrTy->getPointerAddressSpace() != 0)
365 return false;
366 }
367
368 return true;
369 }
370
addrPointsToConstantData(Value * Addr)371 bool ThreadSanitizer::addrPointsToConstantData(Value *Addr) {
372 // If this is a GEP, just analyze its pointer operand.
373 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr))
374 Addr = GEP->getPointerOperand();
375
376 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
377 if (GV->isConstant()) {
378 // Reads from constant globals can not race with any writes.
379 NumOmittedReadsFromConstantGlobals++;
380 return true;
381 }
382 } else if (LoadInst *L = dyn_cast<LoadInst>(Addr)) {
383 if (isVtableAccess(L)) {
384 // Reads from a vtable pointer can not race with any writes.
385 NumOmittedReadsFromVtable++;
386 return true;
387 }
388 }
389 return false;
390 }
391
392 // Instrumenting some of the accesses may be proven redundant.
393 // Currently handled:
394 // - read-before-write (within same BB, no calls between)
395 // - not captured variables
396 //
397 // We do not handle some of the patterns that should not survive
398 // after the classic compiler optimizations.
399 // E.g. two reads from the same temp should be eliminated by CSE,
400 // two writes should be eliminated by DSE, etc.
401 //
402 // 'Local' is a vector of insns within the same BB (no calls between).
403 // 'All' is a vector of insns that will be instrumented.
chooseInstructionsToInstrument(SmallVectorImpl<Instruction * > & Local,SmallVectorImpl<Instruction * > & All,const DataLayout & DL)404 void ThreadSanitizer::chooseInstructionsToInstrument(
405 SmallVectorImpl<Instruction *> &Local, SmallVectorImpl<Instruction *> &All,
406 const DataLayout &DL) {
407 SmallPtrSet<Value*, 8> WriteTargets;
408 // Iterate from the end.
409 for (Instruction *I : reverse(Local)) {
410 if (StoreInst *Store = dyn_cast<StoreInst>(I)) {
411 Value *Addr = Store->getPointerOperand();
412 if (!shouldInstrumentReadWriteFromAddress(I->getModule(), Addr))
413 continue;
414 WriteTargets.insert(Addr);
415 } else {
416 LoadInst *Load = cast<LoadInst>(I);
417 Value *Addr = Load->getPointerOperand();
418 if (!shouldInstrumentReadWriteFromAddress(I->getModule(), Addr))
419 continue;
420 if (!ClInstrumentReadBeforeWrite && WriteTargets.count(Addr)) {
421 // We will write to this temp, so no reason to analyze the read.
422 NumOmittedReadsBeforeWrite++;
423 continue;
424 }
425 if (addrPointsToConstantData(Addr)) {
426 // Addr points to some constant data -- it can not race with any writes.
427 continue;
428 }
429 }
430 Value *Addr = isa<StoreInst>(*I)
431 ? cast<StoreInst>(I)->getPointerOperand()
432 : cast<LoadInst>(I)->getPointerOperand();
433 if (isa<AllocaInst>(GetUnderlyingObject(Addr, DL)) &&
434 !PointerMayBeCaptured(Addr, true, true)) {
435 // The variable is addressable but not captured, so it cannot be
436 // referenced from a different thread and participate in a data race
437 // (see llvm/Analysis/CaptureTracking.h for details).
438 NumOmittedNonCaptured++;
439 continue;
440 }
441 All.push_back(I);
442 }
443 Local.clear();
444 }
445
isAtomic(Instruction * I)446 static bool isAtomic(Instruction *I) {
447 // TODO: Ask TTI whether synchronization scope is between threads.
448 if (LoadInst *LI = dyn_cast<LoadInst>(I))
449 return LI->isAtomic() && LI->getSyncScopeID() != SyncScope::SingleThread;
450 if (StoreInst *SI = dyn_cast<StoreInst>(I))
451 return SI->isAtomic() && SI->getSyncScopeID() != SyncScope::SingleThread;
452 if (isa<AtomicRMWInst>(I))
453 return true;
454 if (isa<AtomicCmpXchgInst>(I))
455 return true;
456 if (isa<FenceInst>(I))
457 return true;
458 return false;
459 }
460
InsertRuntimeIgnores(Function & F)461 void ThreadSanitizer::InsertRuntimeIgnores(Function &F) {
462 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
463 IRB.CreateCall(TsanIgnoreBegin);
464 EscapeEnumerator EE(F, "tsan_ignore_cleanup", ClHandleCxxExceptions);
465 while (IRBuilder<> *AtExit = EE.Next()) {
466 AtExit->CreateCall(TsanIgnoreEnd);
467 }
468 }
469
sanitizeFunction(Function & F,const TargetLibraryInfo & TLI)470 bool ThreadSanitizer::sanitizeFunction(Function &F,
471 const TargetLibraryInfo &TLI) {
472 // This is required to prevent instrumenting call to __tsan_init from within
473 // the module constructor.
474 if (F.getName() == kTsanModuleCtorName)
475 return false;
476 // Naked functions can not have prologue/epilogue
477 // (__tsan_func_entry/__tsan_func_exit) generated, so don't instrument them at
478 // all.
479 if (F.hasFnAttribute(Attribute::Naked))
480 return false;
481 initialize(*F.getParent());
482 SmallVector<Instruction*, 8> AllLoadsAndStores;
483 SmallVector<Instruction*, 8> LocalLoadsAndStores;
484 SmallVector<Instruction*, 8> AtomicAccesses;
485 SmallVector<Instruction*, 8> MemIntrinCalls;
486 bool Res = false;
487 bool HasCalls = false;
488 bool SanitizeFunction = F.hasFnAttribute(Attribute::SanitizeThread);
489 const DataLayout &DL = F.getParent()->getDataLayout();
490
491 // Traverse all instructions, collect loads/stores/returns, check for calls.
492 for (auto &BB : F) {
493 for (auto &Inst : BB) {
494 if (isAtomic(&Inst))
495 AtomicAccesses.push_back(&Inst);
496 else if (isa<LoadInst>(Inst) || isa<StoreInst>(Inst))
497 LocalLoadsAndStores.push_back(&Inst);
498 else if (isa<CallInst>(Inst) || isa<InvokeInst>(Inst)) {
499 if (CallInst *CI = dyn_cast<CallInst>(&Inst))
500 maybeMarkSanitizerLibraryCallNoBuiltin(CI, &TLI);
501 if (isa<MemIntrinsic>(Inst))
502 MemIntrinCalls.push_back(&Inst);
503 HasCalls = true;
504 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores,
505 DL);
506 }
507 }
508 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores, DL);
509 }
510
511 // We have collected all loads and stores.
512 // FIXME: many of these accesses do not need to be checked for races
513 // (e.g. variables that do not escape, etc).
514
515 // Instrument memory accesses only if we want to report bugs in the function.
516 if (ClInstrumentMemoryAccesses && SanitizeFunction)
517 for (auto Inst : AllLoadsAndStores) {
518 Res |= instrumentLoadOrStore(Inst, DL);
519 }
520
521 // Instrument atomic memory accesses in any case (they can be used to
522 // implement synchronization).
523 if (ClInstrumentAtomics)
524 for (auto Inst : AtomicAccesses) {
525 Res |= instrumentAtomic(Inst, DL);
526 }
527
528 if (ClInstrumentMemIntrinsics && SanitizeFunction)
529 for (auto Inst : MemIntrinCalls) {
530 Res |= instrumentMemIntrinsic(Inst);
531 }
532
533 if (F.hasFnAttribute("sanitize_thread_no_checking_at_run_time")) {
534 assert(!F.hasFnAttribute(Attribute::SanitizeThread));
535 if (HasCalls)
536 InsertRuntimeIgnores(F);
537 }
538
539 // Instrument function entry/exit points if there were instrumented accesses.
540 if ((Res || HasCalls) && ClInstrumentFuncEntryExit) {
541 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
542 auto ProgramAsPtrTy = Type::getInt8PtrTy(F.getParent()->getContext(),
543 DL.getProgramAddressSpace());
544 Value *ReturnAddress = IRB.CreateCall(
545 Intrinsic::getDeclaration(F.getParent(), Intrinsic::returnaddress,
546 {ProgramAsPtrTy}),
547 IRB.getInt32(0));
548 IRB.CreateCall(TsanFuncEntry, ReturnAddress);
549
550 EscapeEnumerator EE(F, "tsan_cleanup", ClHandleCxxExceptions);
551 while (IRBuilder<> *AtExit = EE.Next()) {
552 AtExit->CreateCall(TsanFuncExit, {});
553 }
554 Res = true;
555 }
556 return Res;
557 }
558
instrumentLoadOrStore(Instruction * I,const DataLayout & DL)559 bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I,
560 const DataLayout &DL) {
561 IRBuilder<> IRB(I);
562 bool IsWrite = isa<StoreInst>(*I);
563 Value *Addr = IsWrite
564 ? cast<StoreInst>(I)->getPointerOperand()
565 : cast<LoadInst>(I)->getPointerOperand();
566
567 // swifterror memory addresses are mem2reg promoted by instruction selection.
568 // As such they cannot have regular uses like an instrumentation function and
569 // it makes no sense to track them as memory.
570 if (Addr->isSwiftError())
571 return false;
572
573 int Idx = getMemoryAccessFuncIndex(Addr, DL);
574 if (Idx < 0)
575 return false;
576 if (IsWrite && isVtableAccess(I)) {
577 LLVM_DEBUG(dbgs() << " VPTR : " << *I << "\n");
578 Value *StoredValue = cast<StoreInst>(I)->getValueOperand();
579 // StoredValue may be a vector type if we are storing several vptrs at once.
580 // In this case, just take the first element of the vector since this is
581 // enough to find vptr races.
582 if (isa<VectorType>(StoredValue->getType()))
583 StoredValue = IRB.CreateExtractElement(
584 StoredValue, ConstantInt::get(IRB.getInt32Ty(), 0));
585 if (StoredValue->getType()->isIntegerTy())
586 StoredValue = IRB.CreateIntToPtr(StoredValue, IRB.getInt8PtrTy());
587 // Call TsanVptrUpdate.
588 IRB.CreateCall(TsanVptrUpdate,
589 {IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
590 IRB.CreatePointerCast(StoredValue, IRB.getInt8PtrTy())});
591 NumInstrumentedVtableWrites++;
592 return true;
593 }
594 if (!IsWrite && isVtableAccess(I)) {
595 IRB.CreateCall(TsanVptrLoad,
596 IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
597 NumInstrumentedVtableReads++;
598 return true;
599 }
600 const unsigned Alignment = IsWrite
601 ? cast<StoreInst>(I)->getAlignment()
602 : cast<LoadInst>(I)->getAlignment();
603 const bool IsVolatile =
604 ClDistinguishVolatile && (IsWrite ? cast<StoreInst>(I)->isVolatile()
605 : cast<LoadInst>(I)->isVolatile());
606 Type *OrigTy = cast<PointerType>(Addr->getType())->getElementType();
607 const uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy);
608 FunctionCallee OnAccessFunc = nullptr;
609 if (Alignment == 0 || Alignment >= 8 || (Alignment % (TypeSize / 8)) == 0) {
610 if (IsVolatile)
611 OnAccessFunc = IsWrite ? TsanVolatileWrite[Idx] : TsanVolatileRead[Idx];
612 else
613 OnAccessFunc = IsWrite ? TsanWrite[Idx] : TsanRead[Idx];
614 } else {
615 if (IsVolatile)
616 OnAccessFunc = IsWrite ? TsanUnalignedVolatileWrite[Idx]
617 : TsanUnalignedVolatileRead[Idx];
618 else
619 OnAccessFunc = IsWrite ? TsanUnalignedWrite[Idx] : TsanUnalignedRead[Idx];
620 }
621 IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
622 if (IsWrite) NumInstrumentedWrites++;
623 else NumInstrumentedReads++;
624 return true;
625 }
626
createOrdering(IRBuilder<> * IRB,AtomicOrdering ord)627 static ConstantInt *createOrdering(IRBuilder<> *IRB, AtomicOrdering ord) {
628 uint32_t v = 0;
629 switch (ord) {
630 case AtomicOrdering::NotAtomic:
631 llvm_unreachable("unexpected atomic ordering!");
632 case AtomicOrdering::Unordered: LLVM_FALLTHROUGH;
633 case AtomicOrdering::Monotonic: v = 0; break;
634 // Not specified yet:
635 // case AtomicOrdering::Consume: v = 1; break;
636 case AtomicOrdering::Acquire: v = 2; break;
637 case AtomicOrdering::Release: v = 3; break;
638 case AtomicOrdering::AcquireRelease: v = 4; break;
639 case AtomicOrdering::SequentiallyConsistent: v = 5; break;
640 }
641 return IRB->getInt32(v);
642 }
643
644 // If a memset intrinsic gets inlined by the code gen, we will miss races on it.
645 // So, we either need to ensure the intrinsic is not inlined, or instrument it.
646 // We do not instrument memset/memmove/memcpy intrinsics (too complicated),
647 // instead we simply replace them with regular function calls, which are then
648 // intercepted by the run-time.
649 // Since tsan is running after everyone else, the calls should not be
650 // replaced back with intrinsics. If that becomes wrong at some point,
651 // we will need to call e.g. __tsan_memset to avoid the intrinsics.
instrumentMemIntrinsic(Instruction * I)652 bool ThreadSanitizer::instrumentMemIntrinsic(Instruction *I) {
653 IRBuilder<> IRB(I);
654 if (MemSetInst *M = dyn_cast<MemSetInst>(I)) {
655 IRB.CreateCall(
656 MemsetFn,
657 {IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()),
658 IRB.CreateIntCast(M->getArgOperand(1), IRB.getInt32Ty(), false),
659 IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false)});
660 I->eraseFromParent();
661 } else if (MemTransferInst *M = dyn_cast<MemTransferInst>(I)) {
662 IRB.CreateCall(
663 isa<MemCpyInst>(M) ? MemcpyFn : MemmoveFn,
664 {IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()),
665 IRB.CreatePointerCast(M->getArgOperand(1), IRB.getInt8PtrTy()),
666 IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false)});
667 I->eraseFromParent();
668 }
669 return false;
670 }
671
672 // Both llvm and ThreadSanitizer atomic operations are based on C++11/C1x
673 // standards. For background see C++11 standard. A slightly older, publicly
674 // available draft of the standard (not entirely up-to-date, but close enough
675 // for casual browsing) is available here:
676 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
677 // The following page contains more background information:
678 // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
679
instrumentAtomic(Instruction * I,const DataLayout & DL)680 bool ThreadSanitizer::instrumentAtomic(Instruction *I, const DataLayout &DL) {
681 IRBuilder<> IRB(I);
682 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
683 Value *Addr = LI->getPointerOperand();
684 int Idx = getMemoryAccessFuncIndex(Addr, DL);
685 if (Idx < 0)
686 return false;
687 const unsigned ByteSize = 1U << Idx;
688 const unsigned BitSize = ByteSize * 8;
689 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
690 Type *PtrTy = Ty->getPointerTo();
691 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
692 createOrdering(&IRB, LI->getOrdering())};
693 Type *OrigTy = cast<PointerType>(Addr->getType())->getElementType();
694 Value *C = IRB.CreateCall(TsanAtomicLoad[Idx], Args);
695 Value *Cast = IRB.CreateBitOrPointerCast(C, OrigTy);
696 I->replaceAllUsesWith(Cast);
697 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
698 Value *Addr = SI->getPointerOperand();
699 int Idx = getMemoryAccessFuncIndex(Addr, DL);
700 if (Idx < 0)
701 return false;
702 const unsigned ByteSize = 1U << Idx;
703 const unsigned BitSize = ByteSize * 8;
704 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
705 Type *PtrTy = Ty->getPointerTo();
706 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
707 IRB.CreateBitOrPointerCast(SI->getValueOperand(), Ty),
708 createOrdering(&IRB, SI->getOrdering())};
709 CallInst *C = CallInst::Create(TsanAtomicStore[Idx], Args);
710 ReplaceInstWithInst(I, C);
711 } else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) {
712 Value *Addr = RMWI->getPointerOperand();
713 int Idx = getMemoryAccessFuncIndex(Addr, DL);
714 if (Idx < 0)
715 return false;
716 FunctionCallee F = TsanAtomicRMW[RMWI->getOperation()][Idx];
717 if (!F)
718 return false;
719 const unsigned ByteSize = 1U << Idx;
720 const unsigned BitSize = ByteSize * 8;
721 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
722 Type *PtrTy = Ty->getPointerTo();
723 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
724 IRB.CreateIntCast(RMWI->getValOperand(), Ty, false),
725 createOrdering(&IRB, RMWI->getOrdering())};
726 CallInst *C = CallInst::Create(F, Args);
727 ReplaceInstWithInst(I, C);
728 } else if (AtomicCmpXchgInst *CASI = dyn_cast<AtomicCmpXchgInst>(I)) {
729 Value *Addr = CASI->getPointerOperand();
730 int Idx = getMemoryAccessFuncIndex(Addr, DL);
731 if (Idx < 0)
732 return false;
733 const unsigned ByteSize = 1U << Idx;
734 const unsigned BitSize = ByteSize * 8;
735 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
736 Type *PtrTy = Ty->getPointerTo();
737 Value *CmpOperand =
738 IRB.CreateBitOrPointerCast(CASI->getCompareOperand(), Ty);
739 Value *NewOperand =
740 IRB.CreateBitOrPointerCast(CASI->getNewValOperand(), Ty);
741 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
742 CmpOperand,
743 NewOperand,
744 createOrdering(&IRB, CASI->getSuccessOrdering()),
745 createOrdering(&IRB, CASI->getFailureOrdering())};
746 CallInst *C = IRB.CreateCall(TsanAtomicCAS[Idx], Args);
747 Value *Success = IRB.CreateICmpEQ(C, CmpOperand);
748 Value *OldVal = C;
749 Type *OrigOldValTy = CASI->getNewValOperand()->getType();
750 if (Ty != OrigOldValTy) {
751 // The value is a pointer, so we need to cast the return value.
752 OldVal = IRB.CreateIntToPtr(C, OrigOldValTy);
753 }
754
755 Value *Res =
756 IRB.CreateInsertValue(UndefValue::get(CASI->getType()), OldVal, 0);
757 Res = IRB.CreateInsertValue(Res, Success, 1);
758
759 I->replaceAllUsesWith(Res);
760 I->eraseFromParent();
761 } else if (FenceInst *FI = dyn_cast<FenceInst>(I)) {
762 Value *Args[] = {createOrdering(&IRB, FI->getOrdering())};
763 FunctionCallee F = FI->getSyncScopeID() == SyncScope::SingleThread
764 ? TsanAtomicSignalFence
765 : TsanAtomicThreadFence;
766 CallInst *C = CallInst::Create(F, Args);
767 ReplaceInstWithInst(I, C);
768 }
769 return true;
770 }
771
getMemoryAccessFuncIndex(Value * Addr,const DataLayout & DL)772 int ThreadSanitizer::getMemoryAccessFuncIndex(Value *Addr,
773 const DataLayout &DL) {
774 Type *OrigPtrTy = Addr->getType();
775 Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType();
776 assert(OrigTy->isSized());
777 uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy);
778 if (TypeSize != 8 && TypeSize != 16 &&
779 TypeSize != 32 && TypeSize != 64 && TypeSize != 128) {
780 NumAccessesWithBadSize++;
781 // Ignore all unusual sizes.
782 return -1;
783 }
784 size_t Idx = countTrailingZeros(TypeSize / 8);
785 assert(Idx < kNumberOfAccessSizes);
786 return Idx;
787 }
788