1 //===-- ThreadSanitizer.cpp - race detector -------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file is a part of ThreadSanitizer, a race detector. 11 // 12 // The tool is under development, for the details about previous versions see 13 // http://code.google.com/p/data-race-test 14 // 15 // The instrumentation phase is quite simple: 16 // - Insert calls to run-time library before every memory access. 17 // - Optimizations may apply to avoid instrumenting some of the accesses. 18 // - Insert calls at function entry/exit. 19 // The rest is handled by the run-time library. 20 //===----------------------------------------------------------------------===// 21 22 #define DEBUG_TYPE "tsan" 23 24 #include "llvm/Transforms/Instrumentation.h" 25 #include "llvm/ADT/SmallSet.h" 26 #include "llvm/ADT/SmallString.h" 27 #include "llvm/ADT/SmallVector.h" 28 #include "llvm/ADT/Statistic.h" 29 #include "llvm/ADT/StringExtras.h" 30 #include "llvm/IR/DataLayout.h" 31 #include "llvm/IR/Function.h" 32 #include "llvm/IR/IRBuilder.h" 33 #include "llvm/IR/IntrinsicInst.h" 34 #include "llvm/IR/Intrinsics.h" 35 #include "llvm/IR/LLVMContext.h" 36 #include "llvm/IR/Metadata.h" 37 #include "llvm/IR/Module.h" 38 #include "llvm/IR/Type.h" 39 #include "llvm/Support/CommandLine.h" 40 #include "llvm/Support/Debug.h" 41 #include "llvm/Support/MathExtras.h" 42 #include "llvm/Support/raw_ostream.h" 43 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 44 #include "llvm/Transforms/Utils/ModuleUtils.h" 45 #include "llvm/Transforms/Utils/SpecialCaseList.h" 46 47 using namespace llvm; 48 49 static cl::opt<std::string> ClBlacklistFile("tsan-blacklist", 50 cl::desc("Blacklist file"), cl::Hidden); 51 static cl::opt<bool> ClInstrumentMemoryAccesses( 52 "tsan-instrument-memory-accesses", cl::init(true), 53 cl::desc("Instrument memory accesses"), cl::Hidden); 54 static cl::opt<bool> ClInstrumentFuncEntryExit( 55 "tsan-instrument-func-entry-exit", cl::init(true), 56 cl::desc("Instrument function entry and exit"), cl::Hidden); 57 static cl::opt<bool> ClInstrumentAtomics( 58 "tsan-instrument-atomics", cl::init(true), 59 cl::desc("Instrument atomics"), cl::Hidden); 60 static cl::opt<bool> ClInstrumentMemIntrinsics( 61 "tsan-instrument-memintrinsics", cl::init(true), 62 cl::desc("Instrument memintrinsics (memset/memcpy/memmove)"), cl::Hidden); 63 64 STATISTIC(NumInstrumentedReads, "Number of instrumented reads"); 65 STATISTIC(NumInstrumentedWrites, "Number of instrumented writes"); 66 STATISTIC(NumOmittedReadsBeforeWrite, 67 "Number of reads ignored due to following writes"); 68 STATISTIC(NumAccessesWithBadSize, "Number of accesses with bad size"); 69 STATISTIC(NumInstrumentedVtableWrites, "Number of vtable ptr writes"); 70 STATISTIC(NumInstrumentedVtableReads, "Number of vtable ptr reads"); 71 STATISTIC(NumOmittedReadsFromConstantGlobals, 72 "Number of reads from constant globals"); 73 STATISTIC(NumOmittedReadsFromVtable, "Number of vtable reads"); 74 75 namespace { 76 77 /// ThreadSanitizer: instrument the code in module to find races. 78 struct ThreadSanitizer : public FunctionPass { 79 ThreadSanitizer(StringRef BlacklistFile = StringRef()) 80 : FunctionPass(ID), 81 TD(0), 82 BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile 83 : BlacklistFile) { } 84 const char *getPassName() const; 85 bool runOnFunction(Function &F); 86 bool doInitialization(Module &M); 87 static char ID; // Pass identification, replacement for typeid. 88 89 private: 90 void initializeCallbacks(Module &M); 91 bool instrumentLoadOrStore(Instruction *I); 92 bool instrumentAtomic(Instruction *I); 93 bool instrumentMemIntrinsic(Instruction *I); 94 void chooseInstructionsToInstrument(SmallVectorImpl<Instruction*> &Local, 95 SmallVectorImpl<Instruction*> &All); 96 bool addrPointsToConstantData(Value *Addr); 97 int getMemoryAccessFuncIndex(Value *Addr); 98 99 DataLayout *TD; 100 Type *IntptrTy; 101 SmallString<64> BlacklistFile; 102 OwningPtr<SpecialCaseList> BL; 103 IntegerType *OrdTy; 104 // Callbacks to run-time library are computed in doInitialization. 105 Function *TsanFuncEntry; 106 Function *TsanFuncExit; 107 // Accesses sizes are powers of two: 1, 2, 4, 8, 16. 108 static const size_t kNumberOfAccessSizes = 5; 109 Function *TsanRead[kNumberOfAccessSizes]; 110 Function *TsanWrite[kNumberOfAccessSizes]; 111 Function *TsanAtomicLoad[kNumberOfAccessSizes]; 112 Function *TsanAtomicStore[kNumberOfAccessSizes]; 113 Function *TsanAtomicRMW[AtomicRMWInst::LAST_BINOP + 1][kNumberOfAccessSizes]; 114 Function *TsanAtomicCAS[kNumberOfAccessSizes]; 115 Function *TsanAtomicThreadFence; 116 Function *TsanAtomicSignalFence; 117 Function *TsanVptrUpdate; 118 Function *TsanVptrLoad; 119 Function *MemmoveFn, *MemcpyFn, *MemsetFn; 120 }; 121 } // namespace 122 123 char ThreadSanitizer::ID = 0; 124 INITIALIZE_PASS(ThreadSanitizer, "tsan", 125 "ThreadSanitizer: detects data races.", 126 false, false) 127 128 const char *ThreadSanitizer::getPassName() const { 129 return "ThreadSanitizer"; 130 } 131 132 FunctionPass *llvm::createThreadSanitizerPass(StringRef BlacklistFile) { 133 return new ThreadSanitizer(BlacklistFile); 134 } 135 136 static Function *checkInterfaceFunction(Constant *FuncOrBitcast) { 137 if (Function *F = dyn_cast<Function>(FuncOrBitcast)) 138 return F; 139 FuncOrBitcast->dump(); 140 report_fatal_error("ThreadSanitizer interface function redefined"); 141 } 142 143 void ThreadSanitizer::initializeCallbacks(Module &M) { 144 IRBuilder<> IRB(M.getContext()); 145 // Initialize the callbacks. 146 TsanFuncEntry = checkInterfaceFunction(M.getOrInsertFunction( 147 "__tsan_func_entry", IRB.getVoidTy(), IRB.getInt8PtrTy(), NULL)); 148 TsanFuncExit = checkInterfaceFunction(M.getOrInsertFunction( 149 "__tsan_func_exit", IRB.getVoidTy(), NULL)); 150 OrdTy = IRB.getInt32Ty(); 151 for (size_t i = 0; i < kNumberOfAccessSizes; ++i) { 152 const size_t ByteSize = 1 << i; 153 const size_t BitSize = ByteSize * 8; 154 SmallString<32> ReadName("__tsan_read" + itostr(ByteSize)); 155 TsanRead[i] = checkInterfaceFunction(M.getOrInsertFunction( 156 ReadName, IRB.getVoidTy(), IRB.getInt8PtrTy(), NULL)); 157 158 SmallString<32> WriteName("__tsan_write" + itostr(ByteSize)); 159 TsanWrite[i] = checkInterfaceFunction(M.getOrInsertFunction( 160 WriteName, IRB.getVoidTy(), IRB.getInt8PtrTy(), NULL)); 161 162 Type *Ty = Type::getIntNTy(M.getContext(), BitSize); 163 Type *PtrTy = Ty->getPointerTo(); 164 SmallString<32> AtomicLoadName("__tsan_atomic" + itostr(BitSize) + 165 "_load"); 166 TsanAtomicLoad[i] = checkInterfaceFunction(M.getOrInsertFunction( 167 AtomicLoadName, Ty, PtrTy, OrdTy, NULL)); 168 169 SmallString<32> AtomicStoreName("__tsan_atomic" + itostr(BitSize) + 170 "_store"); 171 TsanAtomicStore[i] = checkInterfaceFunction(M.getOrInsertFunction( 172 AtomicStoreName, IRB.getVoidTy(), PtrTy, Ty, OrdTy, 173 NULL)); 174 175 for (int op = AtomicRMWInst::FIRST_BINOP; 176 op <= AtomicRMWInst::LAST_BINOP; ++op) { 177 TsanAtomicRMW[op][i] = NULL; 178 const char *NamePart = NULL; 179 if (op == AtomicRMWInst::Xchg) 180 NamePart = "_exchange"; 181 else if (op == AtomicRMWInst::Add) 182 NamePart = "_fetch_add"; 183 else if (op == AtomicRMWInst::Sub) 184 NamePart = "_fetch_sub"; 185 else if (op == AtomicRMWInst::And) 186 NamePart = "_fetch_and"; 187 else if (op == AtomicRMWInst::Or) 188 NamePart = "_fetch_or"; 189 else if (op == AtomicRMWInst::Xor) 190 NamePart = "_fetch_xor"; 191 else if (op == AtomicRMWInst::Nand) 192 NamePart = "_fetch_nand"; 193 else 194 continue; 195 SmallString<32> RMWName("__tsan_atomic" + itostr(BitSize) + NamePart); 196 TsanAtomicRMW[op][i] = checkInterfaceFunction(M.getOrInsertFunction( 197 RMWName, Ty, PtrTy, Ty, OrdTy, NULL)); 198 } 199 200 SmallString<32> AtomicCASName("__tsan_atomic" + itostr(BitSize) + 201 "_compare_exchange_val"); 202 TsanAtomicCAS[i] = checkInterfaceFunction(M.getOrInsertFunction( 203 AtomicCASName, Ty, PtrTy, Ty, Ty, OrdTy, OrdTy, NULL)); 204 } 205 TsanVptrUpdate = checkInterfaceFunction(M.getOrInsertFunction( 206 "__tsan_vptr_update", IRB.getVoidTy(), IRB.getInt8PtrTy(), 207 IRB.getInt8PtrTy(), NULL)); 208 TsanVptrLoad = checkInterfaceFunction(M.getOrInsertFunction( 209 "__tsan_vptr_read", IRB.getVoidTy(), IRB.getInt8PtrTy(), NULL)); 210 TsanAtomicThreadFence = checkInterfaceFunction(M.getOrInsertFunction( 211 "__tsan_atomic_thread_fence", IRB.getVoidTy(), OrdTy, NULL)); 212 TsanAtomicSignalFence = checkInterfaceFunction(M.getOrInsertFunction( 213 "__tsan_atomic_signal_fence", IRB.getVoidTy(), OrdTy, NULL)); 214 215 MemmoveFn = checkInterfaceFunction(M.getOrInsertFunction( 216 "memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 217 IRB.getInt8PtrTy(), IntptrTy, NULL)); 218 MemcpyFn = checkInterfaceFunction(M.getOrInsertFunction( 219 "memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 220 IntptrTy, NULL)); 221 MemsetFn = checkInterfaceFunction(M.getOrInsertFunction( 222 "memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(), 223 IntptrTy, NULL)); 224 } 225 226 bool ThreadSanitizer::doInitialization(Module &M) { 227 TD = getAnalysisIfAvailable<DataLayout>(); 228 if (!TD) 229 return false; 230 BL.reset(SpecialCaseList::createOrDie(BlacklistFile)); 231 232 // Always insert a call to __tsan_init into the module's CTORs. 233 IRBuilder<> IRB(M.getContext()); 234 IntptrTy = IRB.getIntPtrTy(TD); 235 Value *TsanInit = M.getOrInsertFunction("__tsan_init", 236 IRB.getVoidTy(), NULL); 237 appendToGlobalCtors(M, cast<Function>(TsanInit), 0); 238 239 return true; 240 } 241 242 static bool isVtableAccess(Instruction *I) { 243 if (MDNode *Tag = I->getMetadata(LLVMContext::MD_tbaa)) 244 return Tag->isTBAAVtableAccess(); 245 return false; 246 } 247 248 bool ThreadSanitizer::addrPointsToConstantData(Value *Addr) { 249 // If this is a GEP, just analyze its pointer operand. 250 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr)) 251 Addr = GEP->getPointerOperand(); 252 253 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) { 254 if (GV->isConstant()) { 255 // Reads from constant globals can not race with any writes. 256 NumOmittedReadsFromConstantGlobals++; 257 return true; 258 } 259 } else if (LoadInst *L = dyn_cast<LoadInst>(Addr)) { 260 if (isVtableAccess(L)) { 261 // Reads from a vtable pointer can not race with any writes. 262 NumOmittedReadsFromVtable++; 263 return true; 264 } 265 } 266 return false; 267 } 268 269 // Instrumenting some of the accesses may be proven redundant. 270 // Currently handled: 271 // - read-before-write (within same BB, no calls between) 272 // 273 // We do not handle some of the patterns that should not survive 274 // after the classic compiler optimizations. 275 // E.g. two reads from the same temp should be eliminated by CSE, 276 // two writes should be eliminated by DSE, etc. 277 // 278 // 'Local' is a vector of insns within the same BB (no calls between). 279 // 'All' is a vector of insns that will be instrumented. 280 void ThreadSanitizer::chooseInstructionsToInstrument( 281 SmallVectorImpl<Instruction*> &Local, 282 SmallVectorImpl<Instruction*> &All) { 283 SmallSet<Value*, 8> WriteTargets; 284 // Iterate from the end. 285 for (SmallVectorImpl<Instruction*>::reverse_iterator It = Local.rbegin(), 286 E = Local.rend(); It != E; ++It) { 287 Instruction *I = *It; 288 if (StoreInst *Store = dyn_cast<StoreInst>(I)) { 289 WriteTargets.insert(Store->getPointerOperand()); 290 } else { 291 LoadInst *Load = cast<LoadInst>(I); 292 Value *Addr = Load->getPointerOperand(); 293 if (WriteTargets.count(Addr)) { 294 // We will write to this temp, so no reason to analyze the read. 295 NumOmittedReadsBeforeWrite++; 296 continue; 297 } 298 if (addrPointsToConstantData(Addr)) { 299 // Addr points to some constant data -- it can not race with any writes. 300 continue; 301 } 302 } 303 All.push_back(I); 304 } 305 Local.clear(); 306 } 307 308 static bool isAtomic(Instruction *I) { 309 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 310 return LI->isAtomic() && LI->getSynchScope() == CrossThread; 311 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 312 return SI->isAtomic() && SI->getSynchScope() == CrossThread; 313 if (isa<AtomicRMWInst>(I)) 314 return true; 315 if (isa<AtomicCmpXchgInst>(I)) 316 return true; 317 if (isa<FenceInst>(I)) 318 return true; 319 return false; 320 } 321 322 bool ThreadSanitizer::runOnFunction(Function &F) { 323 if (!TD) return false; 324 if (BL->isIn(F)) return false; 325 initializeCallbacks(*F.getParent()); 326 SmallVector<Instruction*, 8> RetVec; 327 SmallVector<Instruction*, 8> AllLoadsAndStores; 328 SmallVector<Instruction*, 8> LocalLoadsAndStores; 329 SmallVector<Instruction*, 8> AtomicAccesses; 330 SmallVector<Instruction*, 8> MemIntrinCalls; 331 bool Res = false; 332 bool HasCalls = false; 333 334 // Traverse all instructions, collect loads/stores/returns, check for calls. 335 for (Function::iterator FI = F.begin(), FE = F.end(); 336 FI != FE; ++FI) { 337 BasicBlock &BB = *FI; 338 for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); 339 BI != BE; ++BI) { 340 if (isAtomic(BI)) 341 AtomicAccesses.push_back(BI); 342 else if (isa<LoadInst>(BI) || isa<StoreInst>(BI)) 343 LocalLoadsAndStores.push_back(BI); 344 else if (isa<ReturnInst>(BI)) 345 RetVec.push_back(BI); 346 else if (isa<CallInst>(BI) || isa<InvokeInst>(BI)) { 347 if (isa<MemIntrinsic>(BI)) 348 MemIntrinCalls.push_back(BI); 349 HasCalls = true; 350 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores); 351 } 352 } 353 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores); 354 } 355 356 // We have collected all loads and stores. 357 // FIXME: many of these accesses do not need to be checked for races 358 // (e.g. variables that do not escape, etc). 359 360 // Instrument memory accesses. 361 if (ClInstrumentMemoryAccesses && F.hasFnAttribute(Attribute::SanitizeThread)) 362 for (size_t i = 0, n = AllLoadsAndStores.size(); i < n; ++i) { 363 Res |= instrumentLoadOrStore(AllLoadsAndStores[i]); 364 } 365 366 // Instrument atomic memory accesses. 367 if (ClInstrumentAtomics) 368 for (size_t i = 0, n = AtomicAccesses.size(); i < n; ++i) { 369 Res |= instrumentAtomic(AtomicAccesses[i]); 370 } 371 372 if (ClInstrumentMemIntrinsics) 373 for (size_t i = 0, n = MemIntrinCalls.size(); i < n; ++i) { 374 Res |= instrumentMemIntrinsic(MemIntrinCalls[i]); 375 } 376 377 // Instrument function entry/exit points if there were instrumented accesses. 378 if ((Res || HasCalls) && ClInstrumentFuncEntryExit) { 379 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI()); 380 Value *ReturnAddress = IRB.CreateCall( 381 Intrinsic::getDeclaration(F.getParent(), Intrinsic::returnaddress), 382 IRB.getInt32(0)); 383 IRB.CreateCall(TsanFuncEntry, ReturnAddress); 384 for (size_t i = 0, n = RetVec.size(); i < n; ++i) { 385 IRBuilder<> IRBRet(RetVec[i]); 386 IRBRet.CreateCall(TsanFuncExit); 387 } 388 Res = true; 389 } 390 return Res; 391 } 392 393 bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I) { 394 IRBuilder<> IRB(I); 395 bool IsWrite = isa<StoreInst>(*I); 396 Value *Addr = IsWrite 397 ? cast<StoreInst>(I)->getPointerOperand() 398 : cast<LoadInst>(I)->getPointerOperand(); 399 int Idx = getMemoryAccessFuncIndex(Addr); 400 if (Idx < 0) 401 return false; 402 if (IsWrite && isVtableAccess(I)) { 403 DEBUG(dbgs() << " VPTR : " << *I << "\n"); 404 Value *StoredValue = cast<StoreInst>(I)->getValueOperand(); 405 // StoredValue does not necessary have a pointer type. 406 if (isa<IntegerType>(StoredValue->getType())) 407 StoredValue = IRB.CreateIntToPtr(StoredValue, IRB.getInt8PtrTy()); 408 // Call TsanVptrUpdate. 409 IRB.CreateCall2(TsanVptrUpdate, 410 IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()), 411 IRB.CreatePointerCast(StoredValue, IRB.getInt8PtrTy())); 412 NumInstrumentedVtableWrites++; 413 return true; 414 } 415 if (!IsWrite && isVtableAccess(I)) { 416 IRB.CreateCall(TsanVptrLoad, 417 IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy())); 418 NumInstrumentedVtableReads++; 419 return true; 420 } 421 Value *OnAccessFunc = IsWrite ? TsanWrite[Idx] : TsanRead[Idx]; 422 IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy())); 423 if (IsWrite) NumInstrumentedWrites++; 424 else NumInstrumentedReads++; 425 return true; 426 } 427 428 static ConstantInt *createOrdering(IRBuilder<> *IRB, AtomicOrdering ord) { 429 uint32_t v = 0; 430 switch (ord) { 431 case NotAtomic: assert(false); 432 case Unordered: // Fall-through. 433 case Monotonic: v = 0; break; 434 // case Consume: v = 1; break; // Not specified yet. 435 case Acquire: v = 2; break; 436 case Release: v = 3; break; 437 case AcquireRelease: v = 4; break; 438 case SequentiallyConsistent: v = 5; break; 439 } 440 return IRB->getInt32(v); 441 } 442 443 static ConstantInt *createFailOrdering(IRBuilder<> *IRB, AtomicOrdering ord) { 444 uint32_t v = 0; 445 switch (ord) { 446 case NotAtomic: assert(false); 447 case Unordered: // Fall-through. 448 case Monotonic: v = 0; break; 449 // case Consume: v = 1; break; // Not specified yet. 450 case Acquire: v = 2; break; 451 case Release: v = 0; break; 452 case AcquireRelease: v = 2; break; 453 case SequentiallyConsistent: v = 5; break; 454 } 455 return IRB->getInt32(v); 456 } 457 458 // If a memset intrinsic gets inlined by the code gen, we will miss races on it. 459 // So, we either need to ensure the intrinsic is not inlined, or instrument it. 460 // We do not instrument memset/memmove/memcpy intrinsics (too complicated), 461 // instead we simply replace them with regular function calls, which are then 462 // intercepted by the run-time. 463 // Since tsan is running after everyone else, the calls should not be 464 // replaced back with intrinsics. If that becomes wrong at some point, 465 // we will need to call e.g. __tsan_memset to avoid the intrinsics. 466 bool ThreadSanitizer::instrumentMemIntrinsic(Instruction *I) { 467 IRBuilder<> IRB(I); 468 if (MemSetInst *M = dyn_cast<MemSetInst>(I)) { 469 IRB.CreateCall3(MemsetFn, 470 IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()), 471 IRB.CreateIntCast(M->getArgOperand(1), IRB.getInt32Ty(), false), 472 IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false)); 473 I->eraseFromParent(); 474 } else if (MemTransferInst *M = dyn_cast<MemTransferInst>(I)) { 475 IRB.CreateCall3(isa<MemCpyInst>(M) ? MemcpyFn : MemmoveFn, 476 IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()), 477 IRB.CreatePointerCast(M->getArgOperand(1), IRB.getInt8PtrTy()), 478 IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false)); 479 I->eraseFromParent(); 480 } 481 return false; 482 } 483 484 // Both llvm and ThreadSanitizer atomic operations are based on C++11/C1x 485 // standards. For background see C++11 standard. A slightly older, publically 486 // available draft of the standard (not entirely up-to-date, but close enough 487 // for casual browsing) is available here: 488 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf 489 // The following page contains more background information: 490 // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/ 491 492 bool ThreadSanitizer::instrumentAtomic(Instruction *I) { 493 IRBuilder<> IRB(I); 494 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 495 Value *Addr = LI->getPointerOperand(); 496 int Idx = getMemoryAccessFuncIndex(Addr); 497 if (Idx < 0) 498 return false; 499 const size_t ByteSize = 1 << Idx; 500 const size_t BitSize = ByteSize * 8; 501 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); 502 Type *PtrTy = Ty->getPointerTo(); 503 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), 504 createOrdering(&IRB, LI->getOrdering())}; 505 CallInst *C = CallInst::Create(TsanAtomicLoad[Idx], 506 ArrayRef<Value*>(Args)); 507 ReplaceInstWithInst(I, C); 508 509 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 510 Value *Addr = SI->getPointerOperand(); 511 int Idx = getMemoryAccessFuncIndex(Addr); 512 if (Idx < 0) 513 return false; 514 const size_t ByteSize = 1 << Idx; 515 const size_t BitSize = ByteSize * 8; 516 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); 517 Type *PtrTy = Ty->getPointerTo(); 518 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), 519 IRB.CreateIntCast(SI->getValueOperand(), Ty, false), 520 createOrdering(&IRB, SI->getOrdering())}; 521 CallInst *C = CallInst::Create(TsanAtomicStore[Idx], 522 ArrayRef<Value*>(Args)); 523 ReplaceInstWithInst(I, C); 524 } else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) { 525 Value *Addr = RMWI->getPointerOperand(); 526 int Idx = getMemoryAccessFuncIndex(Addr); 527 if (Idx < 0) 528 return false; 529 Function *F = TsanAtomicRMW[RMWI->getOperation()][Idx]; 530 if (F == NULL) 531 return false; 532 const size_t ByteSize = 1 << Idx; 533 const size_t BitSize = ByteSize * 8; 534 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); 535 Type *PtrTy = Ty->getPointerTo(); 536 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), 537 IRB.CreateIntCast(RMWI->getValOperand(), Ty, false), 538 createOrdering(&IRB, RMWI->getOrdering())}; 539 CallInst *C = CallInst::Create(F, ArrayRef<Value*>(Args)); 540 ReplaceInstWithInst(I, C); 541 } else if (AtomicCmpXchgInst *CASI = dyn_cast<AtomicCmpXchgInst>(I)) { 542 Value *Addr = CASI->getPointerOperand(); 543 int Idx = getMemoryAccessFuncIndex(Addr); 544 if (Idx < 0) 545 return false; 546 const size_t ByteSize = 1 << Idx; 547 const size_t BitSize = ByteSize * 8; 548 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); 549 Type *PtrTy = Ty->getPointerTo(); 550 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), 551 IRB.CreateIntCast(CASI->getCompareOperand(), Ty, false), 552 IRB.CreateIntCast(CASI->getNewValOperand(), Ty, false), 553 createOrdering(&IRB, CASI->getOrdering()), 554 createFailOrdering(&IRB, CASI->getOrdering())}; 555 CallInst *C = CallInst::Create(TsanAtomicCAS[Idx], ArrayRef<Value*>(Args)); 556 ReplaceInstWithInst(I, C); 557 } else if (FenceInst *FI = dyn_cast<FenceInst>(I)) { 558 Value *Args[] = {createOrdering(&IRB, FI->getOrdering())}; 559 Function *F = FI->getSynchScope() == SingleThread ? 560 TsanAtomicSignalFence : TsanAtomicThreadFence; 561 CallInst *C = CallInst::Create(F, ArrayRef<Value*>(Args)); 562 ReplaceInstWithInst(I, C); 563 } 564 return true; 565 } 566 567 int ThreadSanitizer::getMemoryAccessFuncIndex(Value *Addr) { 568 Type *OrigPtrTy = Addr->getType(); 569 Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType(); 570 assert(OrigTy->isSized()); 571 uint32_t TypeSize = TD->getTypeStoreSizeInBits(OrigTy); 572 if (TypeSize != 8 && TypeSize != 16 && 573 TypeSize != 32 && TypeSize != 64 && TypeSize != 128) { 574 NumAccessesWithBadSize++; 575 // Ignore all unusual sizes. 576 return -1; 577 } 578 size_t Idx = countTrailingZeros(TypeSize / 8); 579 assert(Idx < kNumberOfAccessSizes); 580 return Idx; 581 } 582