1 //===-- ThreadSanitizer.cpp - race detector -------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer, a race detector.
10 //
11 // The tool is under development, for the details about previous versions see
12 // http://code.google.com/p/data-race-test
13 //
14 // The instrumentation phase is quite simple:
15 // - Insert calls to run-time library before every memory access.
16 // - Optimizations may apply to avoid instrumenting some of the accesses.
17 // - Insert calls at function entry/exit.
18 // The rest is handled by the run-time library.
19 //===----------------------------------------------------------------------===//
20
21 #include "llvm/Transforms/Instrumentation/ThreadSanitizer.h"
22 #include "llvm/ADT/DenseMap.h"
23 #include "llvm/ADT/Optional.h"
24 #include "llvm/ADT/SmallString.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/Statistic.h"
27 #include "llvm/ADT/StringExtras.h"
28 #include "llvm/Analysis/CaptureTracking.h"
29 #include "llvm/Analysis/TargetLibraryInfo.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/IR/DataLayout.h"
32 #include "llvm/IR/Function.h"
33 #include "llvm/IR/IRBuilder.h"
34 #include "llvm/IR/Instructions.h"
35 #include "llvm/IR/IntrinsicInst.h"
36 #include "llvm/IR/Intrinsics.h"
37 #include "llvm/IR/LLVMContext.h"
38 #include "llvm/IR/Metadata.h"
39 #include "llvm/IR/Module.h"
40 #include "llvm/IR/Type.h"
41 #include "llvm/InitializePasses.h"
42 #include "llvm/ProfileData/InstrProf.h"
43 #include "llvm/Support/CommandLine.h"
44 #include "llvm/Support/Debug.h"
45 #include "llvm/Support/MathExtras.h"
46 #include "llvm/Support/raw_ostream.h"
47 #include "llvm/Transforms/Instrumentation.h"
48 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
49 #include "llvm/Transforms/Utils/EscapeEnumerator.h"
50 #include "llvm/Transforms/Utils/Local.h"
51 #include "llvm/Transforms/Utils/ModuleUtils.h"
52
53 using namespace llvm;
54
55 #define DEBUG_TYPE "tsan"
56
57 static cl::opt<bool> ClInstrumentMemoryAccesses(
58 "tsan-instrument-memory-accesses", cl::init(true),
59 cl::desc("Instrument memory accesses"), cl::Hidden);
60 static cl::opt<bool>
61 ClInstrumentFuncEntryExit("tsan-instrument-func-entry-exit", cl::init(true),
62 cl::desc("Instrument function entry and exit"),
63 cl::Hidden);
64 static cl::opt<bool> ClHandleCxxExceptions(
65 "tsan-handle-cxx-exceptions", cl::init(true),
66 cl::desc("Handle C++ exceptions (insert cleanup blocks for unwinding)"),
67 cl::Hidden);
68 static cl::opt<bool> ClInstrumentAtomics("tsan-instrument-atomics",
69 cl::init(true),
70 cl::desc("Instrument atomics"),
71 cl::Hidden);
72 static cl::opt<bool> ClInstrumentMemIntrinsics(
73 "tsan-instrument-memintrinsics", cl::init(true),
74 cl::desc("Instrument memintrinsics (memset/memcpy/memmove)"), cl::Hidden);
75 static cl::opt<bool> ClDistinguishVolatile(
76 "tsan-distinguish-volatile", cl::init(false),
77 cl::desc("Emit special instrumentation for accesses to volatiles"),
78 cl::Hidden);
79 static cl::opt<bool> ClInstrumentReadBeforeWrite(
80 "tsan-instrument-read-before-write", cl::init(false),
81 cl::desc("Do not eliminate read instrumentation for read-before-writes"),
82 cl::Hidden);
83 static cl::opt<bool> ClCompoundReadBeforeWrite(
84 "tsan-compound-read-before-write", cl::init(false),
85 cl::desc("Emit special compound instrumentation for reads-before-writes"),
86 cl::Hidden);
87
88 STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
89 STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
90 STATISTIC(NumOmittedReadsBeforeWrite,
91 "Number of reads ignored due to following writes");
92 STATISTIC(NumAccessesWithBadSize, "Number of accesses with bad size");
93 STATISTIC(NumInstrumentedVtableWrites, "Number of vtable ptr writes");
94 STATISTIC(NumInstrumentedVtableReads, "Number of vtable ptr reads");
95 STATISTIC(NumOmittedReadsFromConstantGlobals,
96 "Number of reads from constant globals");
97 STATISTIC(NumOmittedReadsFromVtable, "Number of vtable reads");
98 STATISTIC(NumOmittedNonCaptured, "Number of accesses ignored due to capturing");
99
100 const char kTsanModuleCtorName[] = "tsan.module_ctor";
101 const char kTsanInitName[] = "__tsan_init";
102
103 namespace {
104
105 /// ThreadSanitizer: instrument the code in module to find races.
106 ///
107 /// Instantiating ThreadSanitizer inserts the tsan runtime library API function
108 /// declarations into the module if they don't exist already. Instantiating
109 /// ensures the __tsan_init function is in the list of global constructors for
110 /// the module.
111 struct ThreadSanitizer {
ThreadSanitizer__anon533e7e9c0111::ThreadSanitizer112 ThreadSanitizer() {
113 // Sanity check options and warn user.
114 if (ClInstrumentReadBeforeWrite && ClCompoundReadBeforeWrite) {
115 errs()
116 << "warning: Option -tsan-compound-read-before-write has no effect "
117 "when -tsan-instrument-read-before-write is set.\n";
118 }
119 }
120
121 bool sanitizeFunction(Function &F, const TargetLibraryInfo &TLI);
122
123 private:
124 // Internal Instruction wrapper that contains more information about the
125 // Instruction from prior analysis.
126 struct InstructionInfo {
127 // Instrumentation emitted for this instruction is for a compounded set of
128 // read and write operations in the same basic block.
129 static constexpr unsigned kCompoundRW = (1U << 0);
130
InstructionInfo__anon533e7e9c0111::ThreadSanitizer::InstructionInfo131 explicit InstructionInfo(Instruction *Inst) : Inst(Inst) {}
132
133 Instruction *Inst;
134 unsigned Flags = 0;
135 };
136
137 void initialize(Module &M);
138 bool instrumentLoadOrStore(const InstructionInfo &II, const DataLayout &DL);
139 bool instrumentAtomic(Instruction *I, const DataLayout &DL);
140 bool instrumentMemIntrinsic(Instruction *I);
141 void chooseInstructionsToInstrument(SmallVectorImpl<Instruction *> &Local,
142 SmallVectorImpl<InstructionInfo> &All,
143 const DataLayout &DL);
144 bool addrPointsToConstantData(Value *Addr);
145 int getMemoryAccessFuncIndex(Type *OrigTy, Value *Addr, const DataLayout &DL);
146 void InsertRuntimeIgnores(Function &F);
147
148 Type *IntptrTy;
149 FunctionCallee TsanFuncEntry;
150 FunctionCallee TsanFuncExit;
151 FunctionCallee TsanIgnoreBegin;
152 FunctionCallee TsanIgnoreEnd;
153 // Accesses sizes are powers of two: 1, 2, 4, 8, 16.
154 static const size_t kNumberOfAccessSizes = 5;
155 FunctionCallee TsanRead[kNumberOfAccessSizes];
156 FunctionCallee TsanWrite[kNumberOfAccessSizes];
157 FunctionCallee TsanUnalignedRead[kNumberOfAccessSizes];
158 FunctionCallee TsanUnalignedWrite[kNumberOfAccessSizes];
159 FunctionCallee TsanVolatileRead[kNumberOfAccessSizes];
160 FunctionCallee TsanVolatileWrite[kNumberOfAccessSizes];
161 FunctionCallee TsanUnalignedVolatileRead[kNumberOfAccessSizes];
162 FunctionCallee TsanUnalignedVolatileWrite[kNumberOfAccessSizes];
163 FunctionCallee TsanCompoundRW[kNumberOfAccessSizes];
164 FunctionCallee TsanUnalignedCompoundRW[kNumberOfAccessSizes];
165 FunctionCallee TsanAtomicLoad[kNumberOfAccessSizes];
166 FunctionCallee TsanAtomicStore[kNumberOfAccessSizes];
167 FunctionCallee TsanAtomicRMW[AtomicRMWInst::LAST_BINOP + 1]
168 [kNumberOfAccessSizes];
169 FunctionCallee TsanAtomicCAS[kNumberOfAccessSizes];
170 FunctionCallee TsanAtomicThreadFence;
171 FunctionCallee TsanAtomicSignalFence;
172 FunctionCallee TsanVptrUpdate;
173 FunctionCallee TsanVptrLoad;
174 FunctionCallee MemmoveFn, MemcpyFn, MemsetFn;
175 };
176
177 struct ThreadSanitizerLegacyPass : FunctionPass {
ThreadSanitizerLegacyPass__anon533e7e9c0111::ThreadSanitizerLegacyPass178 ThreadSanitizerLegacyPass() : FunctionPass(ID) {
179 initializeThreadSanitizerLegacyPassPass(*PassRegistry::getPassRegistry());
180 }
181 StringRef getPassName() const override;
182 void getAnalysisUsage(AnalysisUsage &AU) const override;
183 bool runOnFunction(Function &F) override;
184 bool doInitialization(Module &M) override;
185 static char ID; // Pass identification, replacement for typeid.
186 private:
187 Optional<ThreadSanitizer> TSan;
188 };
189
insertModuleCtor(Module & M)190 void insertModuleCtor(Module &M) {
191 getOrCreateSanitizerCtorAndInitFunctions(
192 M, kTsanModuleCtorName, kTsanInitName, /*InitArgTypes=*/{},
193 /*InitArgs=*/{},
194 // This callback is invoked when the functions are created the first
195 // time. Hook them into the global ctors list in that case:
196 [&](Function *Ctor, FunctionCallee) { appendToGlobalCtors(M, Ctor, 0); });
197 }
198
199 } // namespace
200
run(Function & F,FunctionAnalysisManager & FAM)201 PreservedAnalyses ThreadSanitizerPass::run(Function &F,
202 FunctionAnalysisManager &FAM) {
203 ThreadSanitizer TSan;
204 if (TSan.sanitizeFunction(F, FAM.getResult<TargetLibraryAnalysis>(F)))
205 return PreservedAnalyses::none();
206 return PreservedAnalyses::all();
207 }
208
run(Module & M,ModuleAnalysisManager & MAM)209 PreservedAnalyses ThreadSanitizerPass::run(Module &M,
210 ModuleAnalysisManager &MAM) {
211 insertModuleCtor(M);
212 return PreservedAnalyses::none();
213 }
214
215 char ThreadSanitizerLegacyPass::ID = 0;
216 INITIALIZE_PASS_BEGIN(ThreadSanitizerLegacyPass, "tsan",
217 "ThreadSanitizer: detects data races.", false, false)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)218 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
219 INITIALIZE_PASS_END(ThreadSanitizerLegacyPass, "tsan",
220 "ThreadSanitizer: detects data races.", false, false)
221
222 StringRef ThreadSanitizerLegacyPass::getPassName() const {
223 return "ThreadSanitizerLegacyPass";
224 }
225
getAnalysisUsage(AnalysisUsage & AU) const226 void ThreadSanitizerLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const {
227 AU.addRequired<TargetLibraryInfoWrapperPass>();
228 }
229
doInitialization(Module & M)230 bool ThreadSanitizerLegacyPass::doInitialization(Module &M) {
231 insertModuleCtor(M);
232 TSan.emplace();
233 return true;
234 }
235
runOnFunction(Function & F)236 bool ThreadSanitizerLegacyPass::runOnFunction(Function &F) {
237 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
238 TSan->sanitizeFunction(F, TLI);
239 return true;
240 }
241
createThreadSanitizerLegacyPassPass()242 FunctionPass *llvm::createThreadSanitizerLegacyPassPass() {
243 return new ThreadSanitizerLegacyPass();
244 }
245
initialize(Module & M)246 void ThreadSanitizer::initialize(Module &M) {
247 const DataLayout &DL = M.getDataLayout();
248 IntptrTy = DL.getIntPtrType(M.getContext());
249
250 IRBuilder<> IRB(M.getContext());
251 AttributeList Attr;
252 Attr = Attr.addAttribute(M.getContext(), AttributeList::FunctionIndex,
253 Attribute::NoUnwind);
254 // Initialize the callbacks.
255 TsanFuncEntry = M.getOrInsertFunction("__tsan_func_entry", Attr,
256 IRB.getVoidTy(), IRB.getInt8PtrTy());
257 TsanFuncExit =
258 M.getOrInsertFunction("__tsan_func_exit", Attr, IRB.getVoidTy());
259 TsanIgnoreBegin = M.getOrInsertFunction("__tsan_ignore_thread_begin", Attr,
260 IRB.getVoidTy());
261 TsanIgnoreEnd =
262 M.getOrInsertFunction("__tsan_ignore_thread_end", Attr, IRB.getVoidTy());
263 IntegerType *OrdTy = IRB.getInt32Ty();
264 for (size_t i = 0; i < kNumberOfAccessSizes; ++i) {
265 const unsigned ByteSize = 1U << i;
266 const unsigned BitSize = ByteSize * 8;
267 std::string ByteSizeStr = utostr(ByteSize);
268 std::string BitSizeStr = utostr(BitSize);
269 SmallString<32> ReadName("__tsan_read" + ByteSizeStr);
270 TsanRead[i] = M.getOrInsertFunction(ReadName, Attr, IRB.getVoidTy(),
271 IRB.getInt8PtrTy());
272
273 SmallString<32> WriteName("__tsan_write" + ByteSizeStr);
274 TsanWrite[i] = M.getOrInsertFunction(WriteName, Attr, IRB.getVoidTy(),
275 IRB.getInt8PtrTy());
276
277 SmallString<64> UnalignedReadName("__tsan_unaligned_read" + ByteSizeStr);
278 TsanUnalignedRead[i] = M.getOrInsertFunction(
279 UnalignedReadName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
280
281 SmallString<64> UnalignedWriteName("__tsan_unaligned_write" + ByteSizeStr);
282 TsanUnalignedWrite[i] = M.getOrInsertFunction(
283 UnalignedWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
284
285 SmallString<64> VolatileReadName("__tsan_volatile_read" + ByteSizeStr);
286 TsanVolatileRead[i] = M.getOrInsertFunction(
287 VolatileReadName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
288
289 SmallString<64> VolatileWriteName("__tsan_volatile_write" + ByteSizeStr);
290 TsanVolatileWrite[i] = M.getOrInsertFunction(
291 VolatileWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
292
293 SmallString<64> UnalignedVolatileReadName("__tsan_unaligned_volatile_read" +
294 ByteSizeStr);
295 TsanUnalignedVolatileRead[i] = M.getOrInsertFunction(
296 UnalignedVolatileReadName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
297
298 SmallString<64> UnalignedVolatileWriteName(
299 "__tsan_unaligned_volatile_write" + ByteSizeStr);
300 TsanUnalignedVolatileWrite[i] = M.getOrInsertFunction(
301 UnalignedVolatileWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
302
303 SmallString<64> CompoundRWName("__tsan_read_write" + ByteSizeStr);
304 TsanCompoundRW[i] = M.getOrInsertFunction(
305 CompoundRWName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
306
307 SmallString<64> UnalignedCompoundRWName("__tsan_unaligned_read_write" +
308 ByteSizeStr);
309 TsanUnalignedCompoundRW[i] = M.getOrInsertFunction(
310 UnalignedCompoundRWName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
311
312 Type *Ty = Type::getIntNTy(M.getContext(), BitSize);
313 Type *PtrTy = Ty->getPointerTo();
314 SmallString<32> AtomicLoadName("__tsan_atomic" + BitSizeStr + "_load");
315 {
316 AttributeList AL = Attr;
317 AL = AL.addParamAttribute(M.getContext(), 1, Attribute::ZExt);
318 TsanAtomicLoad[i] =
319 M.getOrInsertFunction(AtomicLoadName, AL, Ty, PtrTy, OrdTy);
320 }
321
322 SmallString<32> AtomicStoreName("__tsan_atomic" + BitSizeStr + "_store");
323 {
324 AttributeList AL = Attr;
325 AL = AL.addParamAttribute(M.getContext(), 1, Attribute::ZExt);
326 AL = AL.addParamAttribute(M.getContext(), 2, Attribute::ZExt);
327 TsanAtomicStore[i] = M.getOrInsertFunction(
328 AtomicStoreName, AL, IRB.getVoidTy(), PtrTy, Ty, OrdTy);
329 }
330
331 for (unsigned Op = AtomicRMWInst::FIRST_BINOP;
332 Op <= AtomicRMWInst::LAST_BINOP; ++Op) {
333 TsanAtomicRMW[Op][i] = nullptr;
334 const char *NamePart = nullptr;
335 if (Op == AtomicRMWInst::Xchg)
336 NamePart = "_exchange";
337 else if (Op == AtomicRMWInst::Add)
338 NamePart = "_fetch_add";
339 else if (Op == AtomicRMWInst::Sub)
340 NamePart = "_fetch_sub";
341 else if (Op == AtomicRMWInst::And)
342 NamePart = "_fetch_and";
343 else if (Op == AtomicRMWInst::Or)
344 NamePart = "_fetch_or";
345 else if (Op == AtomicRMWInst::Xor)
346 NamePart = "_fetch_xor";
347 else if (Op == AtomicRMWInst::Nand)
348 NamePart = "_fetch_nand";
349 else
350 continue;
351 SmallString<32> RMWName("__tsan_atomic" + itostr(BitSize) + NamePart);
352 {
353 AttributeList AL = Attr;
354 AL = AL.addParamAttribute(M.getContext(), 1, Attribute::ZExt);
355 AL = AL.addParamAttribute(M.getContext(), 2, Attribute::ZExt);
356 TsanAtomicRMW[Op][i] =
357 M.getOrInsertFunction(RMWName, AL, Ty, PtrTy, Ty, OrdTy);
358 }
359 }
360
361 SmallString<32> AtomicCASName("__tsan_atomic" + BitSizeStr +
362 "_compare_exchange_val");
363 {
364 AttributeList AL = Attr;
365 AL = AL.addParamAttribute(M.getContext(), 1, Attribute::ZExt);
366 AL = AL.addParamAttribute(M.getContext(), 2, Attribute::ZExt);
367 AL = AL.addParamAttribute(M.getContext(), 3, Attribute::ZExt);
368 AL = AL.addParamAttribute(M.getContext(), 4, Attribute::ZExt);
369 TsanAtomicCAS[i] = M.getOrInsertFunction(AtomicCASName, AL, Ty, PtrTy, Ty,
370 Ty, OrdTy, OrdTy);
371 }
372 }
373 TsanVptrUpdate =
374 M.getOrInsertFunction("__tsan_vptr_update", Attr, IRB.getVoidTy(),
375 IRB.getInt8PtrTy(), IRB.getInt8PtrTy());
376 TsanVptrLoad = M.getOrInsertFunction("__tsan_vptr_read", Attr,
377 IRB.getVoidTy(), IRB.getInt8PtrTy());
378 {
379 AttributeList AL = Attr;
380 AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);
381 TsanAtomicThreadFence = M.getOrInsertFunction("__tsan_atomic_thread_fence",
382 AL, IRB.getVoidTy(), OrdTy);
383 }
384 {
385 AttributeList AL = Attr;
386 AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);
387 TsanAtomicSignalFence = M.getOrInsertFunction("__tsan_atomic_signal_fence",
388 AL, IRB.getVoidTy(), OrdTy);
389 }
390
391 MemmoveFn =
392 M.getOrInsertFunction("memmove", Attr, IRB.getInt8PtrTy(),
393 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy);
394 MemcpyFn =
395 M.getOrInsertFunction("memcpy", Attr, IRB.getInt8PtrTy(),
396 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy);
397 MemsetFn =
398 M.getOrInsertFunction("memset", Attr, IRB.getInt8PtrTy(),
399 IRB.getInt8PtrTy(), IRB.getInt32Ty(), IntptrTy);
400 }
401
isVtableAccess(Instruction * I)402 static bool isVtableAccess(Instruction *I) {
403 if (MDNode *Tag = I->getMetadata(LLVMContext::MD_tbaa))
404 return Tag->isTBAAVtableAccess();
405 return false;
406 }
407
408 // Do not instrument known races/"benign races" that come from compiler
409 // instrumentatin. The user has no way of suppressing them.
shouldInstrumentReadWriteFromAddress(const Module * M,Value * Addr)410 static bool shouldInstrumentReadWriteFromAddress(const Module *M, Value *Addr) {
411 // Peel off GEPs and BitCasts.
412 Addr = Addr->stripInBoundsOffsets();
413
414 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
415 if (GV->hasSection()) {
416 StringRef SectionName = GV->getSection();
417 // Check if the global is in the PGO counters section.
418 auto OF = Triple(M->getTargetTriple()).getObjectFormat();
419 if (SectionName.endswith(
420 getInstrProfSectionName(IPSK_cnts, OF, /*AddSegmentInfo=*/false)))
421 return false;
422 }
423
424 // Check if the global is private gcov data.
425 if (GV->getName().startswith("__llvm_gcov") ||
426 GV->getName().startswith("__llvm_gcda"))
427 return false;
428 }
429
430 // Do not instrument acesses from different address spaces; we cannot deal
431 // with them.
432 if (Addr) {
433 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
434 if (PtrTy->getPointerAddressSpace() != 0)
435 return false;
436 }
437
438 return true;
439 }
440
addrPointsToConstantData(Value * Addr)441 bool ThreadSanitizer::addrPointsToConstantData(Value *Addr) {
442 // If this is a GEP, just analyze its pointer operand.
443 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr))
444 Addr = GEP->getPointerOperand();
445
446 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
447 if (GV->isConstant()) {
448 // Reads from constant globals can not race with any writes.
449 NumOmittedReadsFromConstantGlobals++;
450 return true;
451 }
452 } else if (LoadInst *L = dyn_cast<LoadInst>(Addr)) {
453 if (isVtableAccess(L)) {
454 // Reads from a vtable pointer can not race with any writes.
455 NumOmittedReadsFromVtable++;
456 return true;
457 }
458 }
459 return false;
460 }
461
462 // Instrumenting some of the accesses may be proven redundant.
463 // Currently handled:
464 // - read-before-write (within same BB, no calls between)
465 // - not captured variables
466 //
467 // We do not handle some of the patterns that should not survive
468 // after the classic compiler optimizations.
469 // E.g. two reads from the same temp should be eliminated by CSE,
470 // two writes should be eliminated by DSE, etc.
471 //
472 // 'Local' is a vector of insns within the same BB (no calls between).
473 // 'All' is a vector of insns that will be instrumented.
chooseInstructionsToInstrument(SmallVectorImpl<Instruction * > & Local,SmallVectorImpl<InstructionInfo> & All,const DataLayout & DL)474 void ThreadSanitizer::chooseInstructionsToInstrument(
475 SmallVectorImpl<Instruction *> &Local,
476 SmallVectorImpl<InstructionInfo> &All, const DataLayout &DL) {
477 DenseMap<Value *, size_t> WriteTargets; // Map of addresses to index in All
478 // Iterate from the end.
479 for (Instruction *I : reverse(Local)) {
480 const bool IsWrite = isa<StoreInst>(*I);
481 Value *Addr = IsWrite ? cast<StoreInst>(I)->getPointerOperand()
482 : cast<LoadInst>(I)->getPointerOperand();
483
484 if (!shouldInstrumentReadWriteFromAddress(I->getModule(), Addr))
485 continue;
486
487 if (!IsWrite) {
488 const auto WriteEntry = WriteTargets.find(Addr);
489 if (!ClInstrumentReadBeforeWrite && WriteEntry != WriteTargets.end()) {
490 auto &WI = All[WriteEntry->second];
491 // If we distinguish volatile accesses and if either the read or write
492 // is volatile, do not omit any instrumentation.
493 const bool AnyVolatile =
494 ClDistinguishVolatile && (cast<LoadInst>(I)->isVolatile() ||
495 cast<StoreInst>(WI.Inst)->isVolatile());
496 if (!AnyVolatile) {
497 // We will write to this temp, so no reason to analyze the read.
498 // Mark the write instruction as compound.
499 WI.Flags |= InstructionInfo::kCompoundRW;
500 NumOmittedReadsBeforeWrite++;
501 continue;
502 }
503 }
504
505 if (addrPointsToConstantData(Addr)) {
506 // Addr points to some constant data -- it can not race with any writes.
507 continue;
508 }
509 }
510
511 if (isa<AllocaInst>(getUnderlyingObject(Addr)) &&
512 !PointerMayBeCaptured(Addr, true, true)) {
513 // The variable is addressable but not captured, so it cannot be
514 // referenced from a different thread and participate in a data race
515 // (see llvm/Analysis/CaptureTracking.h for details).
516 NumOmittedNonCaptured++;
517 continue;
518 }
519
520 // Instrument this instruction.
521 All.emplace_back(I);
522 if (IsWrite) {
523 // For read-before-write and compound instrumentation we only need one
524 // write target, and we can override any previous entry if it exists.
525 WriteTargets[Addr] = All.size() - 1;
526 }
527 }
528 Local.clear();
529 }
530
isAtomic(Instruction * I)531 static bool isAtomic(Instruction *I) {
532 // TODO: Ask TTI whether synchronization scope is between threads.
533 if (LoadInst *LI = dyn_cast<LoadInst>(I))
534 return LI->isAtomic() && LI->getSyncScopeID() != SyncScope::SingleThread;
535 if (StoreInst *SI = dyn_cast<StoreInst>(I))
536 return SI->isAtomic() && SI->getSyncScopeID() != SyncScope::SingleThread;
537 if (isa<AtomicRMWInst>(I))
538 return true;
539 if (isa<AtomicCmpXchgInst>(I))
540 return true;
541 if (isa<FenceInst>(I))
542 return true;
543 return false;
544 }
545
InsertRuntimeIgnores(Function & F)546 void ThreadSanitizer::InsertRuntimeIgnores(Function &F) {
547 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
548 IRB.CreateCall(TsanIgnoreBegin);
549 EscapeEnumerator EE(F, "tsan_ignore_cleanup", ClHandleCxxExceptions);
550 while (IRBuilder<> *AtExit = EE.Next()) {
551 AtExit->CreateCall(TsanIgnoreEnd);
552 }
553 }
554
sanitizeFunction(Function & F,const TargetLibraryInfo & TLI)555 bool ThreadSanitizer::sanitizeFunction(Function &F,
556 const TargetLibraryInfo &TLI) {
557 // This is required to prevent instrumenting call to __tsan_init from within
558 // the module constructor.
559 if (F.getName() == kTsanModuleCtorName)
560 return false;
561 // Naked functions can not have prologue/epilogue
562 // (__tsan_func_entry/__tsan_func_exit) generated, so don't instrument them at
563 // all.
564 if (F.hasFnAttribute(Attribute::Naked))
565 return false;
566 initialize(*F.getParent());
567 SmallVector<InstructionInfo, 8> AllLoadsAndStores;
568 SmallVector<Instruction*, 8> LocalLoadsAndStores;
569 SmallVector<Instruction*, 8> AtomicAccesses;
570 SmallVector<Instruction*, 8> MemIntrinCalls;
571 bool Res = false;
572 bool HasCalls = false;
573 bool SanitizeFunction = F.hasFnAttribute(Attribute::SanitizeThread);
574 const DataLayout &DL = F.getParent()->getDataLayout();
575
576 // Traverse all instructions, collect loads/stores/returns, check for calls.
577 for (auto &BB : F) {
578 for (auto &Inst : BB) {
579 if (isAtomic(&Inst))
580 AtomicAccesses.push_back(&Inst);
581 else if (isa<LoadInst>(Inst) || isa<StoreInst>(Inst))
582 LocalLoadsAndStores.push_back(&Inst);
583 else if (isa<CallInst>(Inst) || isa<InvokeInst>(Inst)) {
584 if (CallInst *CI = dyn_cast<CallInst>(&Inst))
585 maybeMarkSanitizerLibraryCallNoBuiltin(CI, &TLI);
586 if (isa<MemIntrinsic>(Inst))
587 MemIntrinCalls.push_back(&Inst);
588 HasCalls = true;
589 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores,
590 DL);
591 }
592 }
593 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores, DL);
594 }
595
596 // We have collected all loads and stores.
597 // FIXME: many of these accesses do not need to be checked for races
598 // (e.g. variables that do not escape, etc).
599
600 // Instrument memory accesses only if we want to report bugs in the function.
601 if (ClInstrumentMemoryAccesses && SanitizeFunction)
602 for (const auto &II : AllLoadsAndStores) {
603 Res |= instrumentLoadOrStore(II, DL);
604 }
605
606 // Instrument atomic memory accesses in any case (they can be used to
607 // implement synchronization).
608 if (ClInstrumentAtomics)
609 for (auto Inst : AtomicAccesses) {
610 Res |= instrumentAtomic(Inst, DL);
611 }
612
613 if (ClInstrumentMemIntrinsics && SanitizeFunction)
614 for (auto Inst : MemIntrinCalls) {
615 Res |= instrumentMemIntrinsic(Inst);
616 }
617
618 if (F.hasFnAttribute("sanitize_thread_no_checking_at_run_time")) {
619 assert(!F.hasFnAttribute(Attribute::SanitizeThread));
620 if (HasCalls)
621 InsertRuntimeIgnores(F);
622 }
623
624 // Instrument function entry/exit points if there were instrumented accesses.
625 if ((Res || HasCalls) && ClInstrumentFuncEntryExit) {
626 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
627 Value *ReturnAddress = IRB.CreateCall(
628 Intrinsic::getDeclaration(F.getParent(), Intrinsic::returnaddress),
629 IRB.getInt32(0));
630 IRB.CreateCall(TsanFuncEntry, ReturnAddress);
631
632 EscapeEnumerator EE(F, "tsan_cleanup", ClHandleCxxExceptions);
633 while (IRBuilder<> *AtExit = EE.Next()) {
634 AtExit->CreateCall(TsanFuncExit, {});
635 }
636 Res = true;
637 }
638 return Res;
639 }
640
instrumentLoadOrStore(const InstructionInfo & II,const DataLayout & DL)641 bool ThreadSanitizer::instrumentLoadOrStore(const InstructionInfo &II,
642 const DataLayout &DL) {
643 IRBuilder<> IRB(II.Inst);
644 const bool IsWrite = isa<StoreInst>(*II.Inst);
645 Value *Addr = IsWrite ? cast<StoreInst>(II.Inst)->getPointerOperand()
646 : cast<LoadInst>(II.Inst)->getPointerOperand();
647 Type *OrigTy = getLoadStoreType(II.Inst);
648
649 // swifterror memory addresses are mem2reg promoted by instruction selection.
650 // As such they cannot have regular uses like an instrumentation function and
651 // it makes no sense to track them as memory.
652 if (Addr->isSwiftError())
653 return false;
654
655 int Idx = getMemoryAccessFuncIndex(OrigTy, Addr, DL);
656 if (Idx < 0)
657 return false;
658 if (IsWrite && isVtableAccess(II.Inst)) {
659 LLVM_DEBUG(dbgs() << " VPTR : " << *II.Inst << "\n");
660 Value *StoredValue = cast<StoreInst>(II.Inst)->getValueOperand();
661 // StoredValue may be a vector type if we are storing several vptrs at once.
662 // In this case, just take the first element of the vector since this is
663 // enough to find vptr races.
664 if (isa<VectorType>(StoredValue->getType()))
665 StoredValue = IRB.CreateExtractElement(
666 StoredValue, ConstantInt::get(IRB.getInt32Ty(), 0));
667 if (StoredValue->getType()->isIntegerTy())
668 StoredValue = IRB.CreateIntToPtr(StoredValue, IRB.getInt8PtrTy());
669 // Call TsanVptrUpdate.
670 IRB.CreateCall(TsanVptrUpdate,
671 {IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
672 IRB.CreatePointerCast(StoredValue, IRB.getInt8PtrTy())});
673 NumInstrumentedVtableWrites++;
674 return true;
675 }
676 if (!IsWrite && isVtableAccess(II.Inst)) {
677 IRB.CreateCall(TsanVptrLoad,
678 IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
679 NumInstrumentedVtableReads++;
680 return true;
681 }
682
683 const unsigned Alignment = IsWrite ? cast<StoreInst>(II.Inst)->getAlignment()
684 : cast<LoadInst>(II.Inst)->getAlignment();
685 const bool IsCompoundRW =
686 ClCompoundReadBeforeWrite && (II.Flags & InstructionInfo::kCompoundRW);
687 const bool IsVolatile = ClDistinguishVolatile &&
688 (IsWrite ? cast<StoreInst>(II.Inst)->isVolatile()
689 : cast<LoadInst>(II.Inst)->isVolatile());
690 assert((!IsVolatile || !IsCompoundRW) && "Compound volatile invalid!");
691
692 const uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy);
693 FunctionCallee OnAccessFunc = nullptr;
694 if (Alignment == 0 || Alignment >= 8 || (Alignment % (TypeSize / 8)) == 0) {
695 if (IsCompoundRW)
696 OnAccessFunc = TsanCompoundRW[Idx];
697 else if (IsVolatile)
698 OnAccessFunc = IsWrite ? TsanVolatileWrite[Idx] : TsanVolatileRead[Idx];
699 else
700 OnAccessFunc = IsWrite ? TsanWrite[Idx] : TsanRead[Idx];
701 } else {
702 if (IsCompoundRW)
703 OnAccessFunc = TsanUnalignedCompoundRW[Idx];
704 else if (IsVolatile)
705 OnAccessFunc = IsWrite ? TsanUnalignedVolatileWrite[Idx]
706 : TsanUnalignedVolatileRead[Idx];
707 else
708 OnAccessFunc = IsWrite ? TsanUnalignedWrite[Idx] : TsanUnalignedRead[Idx];
709 }
710 IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
711 if (IsCompoundRW || IsWrite)
712 NumInstrumentedWrites++;
713 if (IsCompoundRW || !IsWrite)
714 NumInstrumentedReads++;
715 return true;
716 }
717
createOrdering(IRBuilder<> * IRB,AtomicOrdering ord)718 static ConstantInt *createOrdering(IRBuilder<> *IRB, AtomicOrdering ord) {
719 uint32_t v = 0;
720 switch (ord) {
721 case AtomicOrdering::NotAtomic:
722 llvm_unreachable("unexpected atomic ordering!");
723 case AtomicOrdering::Unordered: LLVM_FALLTHROUGH;
724 case AtomicOrdering::Monotonic: v = 0; break;
725 // Not specified yet:
726 // case AtomicOrdering::Consume: v = 1; break;
727 case AtomicOrdering::Acquire: v = 2; break;
728 case AtomicOrdering::Release: v = 3; break;
729 case AtomicOrdering::AcquireRelease: v = 4; break;
730 case AtomicOrdering::SequentiallyConsistent: v = 5; break;
731 }
732 return IRB->getInt32(v);
733 }
734
735 // If a memset intrinsic gets inlined by the code gen, we will miss races on it.
736 // So, we either need to ensure the intrinsic is not inlined, or instrument it.
737 // We do not instrument memset/memmove/memcpy intrinsics (too complicated),
738 // instead we simply replace them with regular function calls, which are then
739 // intercepted by the run-time.
740 // Since tsan is running after everyone else, the calls should not be
741 // replaced back with intrinsics. If that becomes wrong at some point,
742 // we will need to call e.g. __tsan_memset to avoid the intrinsics.
instrumentMemIntrinsic(Instruction * I)743 bool ThreadSanitizer::instrumentMemIntrinsic(Instruction *I) {
744 IRBuilder<> IRB(I);
745 if (MemSetInst *M = dyn_cast<MemSetInst>(I)) {
746 IRB.CreateCall(
747 MemsetFn,
748 {IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()),
749 IRB.CreateIntCast(M->getArgOperand(1), IRB.getInt32Ty(), false),
750 IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false)});
751 I->eraseFromParent();
752 } else if (MemTransferInst *M = dyn_cast<MemTransferInst>(I)) {
753 IRB.CreateCall(
754 isa<MemCpyInst>(M) ? MemcpyFn : MemmoveFn,
755 {IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()),
756 IRB.CreatePointerCast(M->getArgOperand(1), IRB.getInt8PtrTy()),
757 IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false)});
758 I->eraseFromParent();
759 }
760 return false;
761 }
762
763 // Both llvm and ThreadSanitizer atomic operations are based on C++11/C1x
764 // standards. For background see C++11 standard. A slightly older, publicly
765 // available draft of the standard (not entirely up-to-date, but close enough
766 // for casual browsing) is available here:
767 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
768 // The following page contains more background information:
769 // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
770
instrumentAtomic(Instruction * I,const DataLayout & DL)771 bool ThreadSanitizer::instrumentAtomic(Instruction *I, const DataLayout &DL) {
772 IRBuilder<> IRB(I);
773 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
774 Value *Addr = LI->getPointerOperand();
775 Type *OrigTy = LI->getType();
776 int Idx = getMemoryAccessFuncIndex(OrigTy, Addr, DL);
777 if (Idx < 0)
778 return false;
779 const unsigned ByteSize = 1U << Idx;
780 const unsigned BitSize = ByteSize * 8;
781 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
782 Type *PtrTy = Ty->getPointerTo();
783 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
784 createOrdering(&IRB, LI->getOrdering())};
785 Value *C = IRB.CreateCall(TsanAtomicLoad[Idx], Args);
786 Value *Cast = IRB.CreateBitOrPointerCast(C, OrigTy);
787 I->replaceAllUsesWith(Cast);
788 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
789 Value *Addr = SI->getPointerOperand();
790 int Idx =
791 getMemoryAccessFuncIndex(SI->getValueOperand()->getType(), Addr, DL);
792 if (Idx < 0)
793 return false;
794 const unsigned ByteSize = 1U << Idx;
795 const unsigned BitSize = ByteSize * 8;
796 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
797 Type *PtrTy = Ty->getPointerTo();
798 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
799 IRB.CreateBitOrPointerCast(SI->getValueOperand(), Ty),
800 createOrdering(&IRB, SI->getOrdering())};
801 CallInst *C = CallInst::Create(TsanAtomicStore[Idx], Args);
802 ReplaceInstWithInst(I, C);
803 } else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) {
804 Value *Addr = RMWI->getPointerOperand();
805 int Idx =
806 getMemoryAccessFuncIndex(RMWI->getValOperand()->getType(), Addr, DL);
807 if (Idx < 0)
808 return false;
809 FunctionCallee F = TsanAtomicRMW[RMWI->getOperation()][Idx];
810 if (!F)
811 return false;
812 const unsigned ByteSize = 1U << Idx;
813 const unsigned BitSize = ByteSize * 8;
814 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
815 Type *PtrTy = Ty->getPointerTo();
816 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
817 IRB.CreateIntCast(RMWI->getValOperand(), Ty, false),
818 createOrdering(&IRB, RMWI->getOrdering())};
819 CallInst *C = CallInst::Create(F, Args);
820 ReplaceInstWithInst(I, C);
821 } else if (AtomicCmpXchgInst *CASI = dyn_cast<AtomicCmpXchgInst>(I)) {
822 Value *Addr = CASI->getPointerOperand();
823 Type *OrigOldValTy = CASI->getNewValOperand()->getType();
824 int Idx = getMemoryAccessFuncIndex(OrigOldValTy, Addr, DL);
825 if (Idx < 0)
826 return false;
827 const unsigned ByteSize = 1U << Idx;
828 const unsigned BitSize = ByteSize * 8;
829 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
830 Type *PtrTy = Ty->getPointerTo();
831 Value *CmpOperand =
832 IRB.CreateBitOrPointerCast(CASI->getCompareOperand(), Ty);
833 Value *NewOperand =
834 IRB.CreateBitOrPointerCast(CASI->getNewValOperand(), Ty);
835 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
836 CmpOperand,
837 NewOperand,
838 createOrdering(&IRB, CASI->getSuccessOrdering()),
839 createOrdering(&IRB, CASI->getFailureOrdering())};
840 CallInst *C = IRB.CreateCall(TsanAtomicCAS[Idx], Args);
841 Value *Success = IRB.CreateICmpEQ(C, CmpOperand);
842 Value *OldVal = C;
843 if (Ty != OrigOldValTy) {
844 // The value is a pointer, so we need to cast the return value.
845 OldVal = IRB.CreateIntToPtr(C, OrigOldValTy);
846 }
847
848 Value *Res =
849 IRB.CreateInsertValue(UndefValue::get(CASI->getType()), OldVal, 0);
850 Res = IRB.CreateInsertValue(Res, Success, 1);
851
852 I->replaceAllUsesWith(Res);
853 I->eraseFromParent();
854 } else if (FenceInst *FI = dyn_cast<FenceInst>(I)) {
855 Value *Args[] = {createOrdering(&IRB, FI->getOrdering())};
856 FunctionCallee F = FI->getSyncScopeID() == SyncScope::SingleThread
857 ? TsanAtomicSignalFence
858 : TsanAtomicThreadFence;
859 CallInst *C = CallInst::Create(F, Args);
860 ReplaceInstWithInst(I, C);
861 }
862 return true;
863 }
864
getMemoryAccessFuncIndex(Type * OrigTy,Value * Addr,const DataLayout & DL)865 int ThreadSanitizer::getMemoryAccessFuncIndex(Type *OrigTy, Value *Addr,
866 const DataLayout &DL) {
867 assert(OrigTy->isSized());
868 assert(
869 cast<PointerType>(Addr->getType())->isOpaqueOrPointeeTypeMatches(OrigTy));
870 uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy);
871 if (TypeSize != 8 && TypeSize != 16 &&
872 TypeSize != 32 && TypeSize != 64 && TypeSize != 128) {
873 NumAccessesWithBadSize++;
874 // Ignore all unusual sizes.
875 return -1;
876 }
877 size_t Idx = countTrailingZeros(TypeSize / 8);
878 assert(Idx < kNumberOfAccessSizes);
879 return Idx;
880 }
881