1 //===- AddressSanitizer.cpp - memory error detector -----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of AddressSanitizer, an address basic correctness 10 // checker. 11 // Details of the algorithm: 12 // https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm 13 // 14 // FIXME: This sanitizer does not yet handle scalable vectors 15 // 16 //===----------------------------------------------------------------------===// 17 18 #include "llvm/Transforms/Instrumentation/AddressSanitizer.h" 19 #include "llvm/ADT/ArrayRef.h" 20 #include "llvm/ADT/DenseMap.h" 21 #include "llvm/ADT/DepthFirstIterator.h" 22 #include "llvm/ADT/SmallPtrSet.h" 23 #include "llvm/ADT/SmallVector.h" 24 #include "llvm/ADT/Statistic.h" 25 #include "llvm/ADT/StringExtras.h" 26 #include "llvm/ADT/StringRef.h" 27 #include "llvm/ADT/Triple.h" 28 #include "llvm/ADT/Twine.h" 29 #include "llvm/Analysis/MemoryBuiltins.h" 30 #include "llvm/Analysis/StackSafetyAnalysis.h" 31 #include "llvm/Analysis/TargetLibraryInfo.h" 32 #include "llvm/Analysis/ValueTracking.h" 33 #include "llvm/BinaryFormat/MachO.h" 34 #include "llvm/Demangle/Demangle.h" 35 #include "llvm/IR/Argument.h" 36 #include "llvm/IR/Attributes.h" 37 #include "llvm/IR/BasicBlock.h" 38 #include "llvm/IR/Comdat.h" 39 #include "llvm/IR/Constant.h" 40 #include "llvm/IR/Constants.h" 41 #include "llvm/IR/DIBuilder.h" 42 #include "llvm/IR/DataLayout.h" 43 #include "llvm/IR/DebugInfoMetadata.h" 44 #include "llvm/IR/DebugLoc.h" 45 #include "llvm/IR/DerivedTypes.h" 46 #include "llvm/IR/Function.h" 47 #include "llvm/IR/GlobalAlias.h" 48 #include "llvm/IR/GlobalValue.h" 49 #include "llvm/IR/GlobalVariable.h" 50 #include "llvm/IR/IRBuilder.h" 51 #include "llvm/IR/InlineAsm.h" 52 #include "llvm/IR/InstVisitor.h" 53 #include "llvm/IR/InstrTypes.h" 54 #include "llvm/IR/Instruction.h" 55 #include "llvm/IR/Instructions.h" 56 #include "llvm/IR/IntrinsicInst.h" 57 #include "llvm/IR/Intrinsics.h" 58 #include "llvm/IR/LLVMContext.h" 59 #include "llvm/IR/MDBuilder.h" 60 #include "llvm/IR/Metadata.h" 61 #include "llvm/IR/Module.h" 62 #include "llvm/IR/Type.h" 63 #include "llvm/IR/Use.h" 64 #include "llvm/IR/Value.h" 65 #include "llvm/MC/MCSectionMachO.h" 66 #include "llvm/Support/Casting.h" 67 #include "llvm/Support/CommandLine.h" 68 #include "llvm/Support/Debug.h" 69 #include "llvm/Support/ErrorHandling.h" 70 #include "llvm/Support/MathExtras.h" 71 #include "llvm/Support/raw_ostream.h" 72 #include "llvm/Transforms/Instrumentation.h" 73 #include "llvm/Transforms/Instrumentation/AddressSanitizerCommon.h" 74 #include "llvm/Transforms/Instrumentation/AddressSanitizerOptions.h" 75 #include "llvm/Transforms/Utils/ASanStackFrameLayout.h" 76 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 77 #include "llvm/Transforms/Utils/Local.h" 78 #include "llvm/Transforms/Utils/ModuleUtils.h" 79 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 80 #include <algorithm> 81 #include <cassert> 82 #include <cstddef> 83 #include <cstdint> 84 #include <iomanip> 85 #include <limits> 86 #include <sstream> 87 #include <string> 88 #include <tuple> 89 90 using namespace llvm; 91 92 #define DEBUG_TYPE "asan" 93 94 static const uint64_t kDefaultShadowScale = 3; 95 static const uint64_t kDefaultShadowOffset32 = 1ULL << 29; 96 static const uint64_t kDefaultShadowOffset64 = 1ULL << 44; 97 static const uint64_t kDynamicShadowSentinel = 98 std::numeric_limits<uint64_t>::max(); 99 static const uint64_t kSmallX86_64ShadowOffsetBase = 0x7FFFFFFF; // < 2G. 100 static const uint64_t kSmallX86_64ShadowOffsetAlignMask = ~0xFFFULL; 101 static const uint64_t kLinuxKasan_ShadowOffset64 = 0xdffffc0000000000; 102 static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 44; 103 static const uint64_t kSystemZ_ShadowOffset64 = 1ULL << 52; 104 static const uint64_t kMIPS_ShadowOffsetN32 = 1ULL << 29; 105 static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa0000; 106 static const uint64_t kMIPS64_ShadowOffset64 = 1ULL << 37; 107 static const uint64_t kAArch64_ShadowOffset64 = 1ULL << 36; 108 static const uint64_t kRISCV64_ShadowOffset64 = 0xd55550000; 109 static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30; 110 static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46; 111 static const uint64_t kFreeBSDKasan_ShadowOffset64 = 0xdffff7c000000000; 112 static const uint64_t kNetBSD_ShadowOffset32 = 1ULL << 30; 113 static const uint64_t kNetBSD_ShadowOffset64 = 1ULL << 46; 114 static const uint64_t kNetBSDKasan_ShadowOffset64 = 0xdfff900000000000; 115 static const uint64_t kPS_ShadowOffset64 = 1ULL << 40; 116 static const uint64_t kWindowsShadowOffset32 = 3ULL << 28; 117 static const uint64_t kEmscriptenShadowOffset = 0; 118 119 // The shadow memory space is dynamically allocated. 120 static const uint64_t kWindowsShadowOffset64 = kDynamicShadowSentinel; 121 122 static const size_t kMinStackMallocSize = 1 << 6; // 64B 123 static const size_t kMaxStackMallocSize = 1 << 16; // 64K 124 static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3; 125 static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E; 126 127 const char kAsanModuleCtorName[] = "asan.module_ctor"; 128 const char kAsanModuleDtorName[] = "asan.module_dtor"; 129 static const uint64_t kAsanCtorAndDtorPriority = 1; 130 // On Emscripten, the system needs more than one priorities for constructors. 131 static const uint64_t kAsanEmscriptenCtorAndDtorPriority = 50; 132 const char kAsanReportErrorTemplate[] = "__asan_report_"; 133 const char kAsanRegisterGlobalsName[] = "__asan_register_globals"; 134 const char kAsanUnregisterGlobalsName[] = "__asan_unregister_globals"; 135 const char kAsanRegisterImageGlobalsName[] = "__asan_register_image_globals"; 136 const char kAsanUnregisterImageGlobalsName[] = 137 "__asan_unregister_image_globals"; 138 const char kAsanRegisterElfGlobalsName[] = "__asan_register_elf_globals"; 139 const char kAsanUnregisterElfGlobalsName[] = "__asan_unregister_elf_globals"; 140 const char kAsanPoisonGlobalsName[] = "__asan_before_dynamic_init"; 141 const char kAsanUnpoisonGlobalsName[] = "__asan_after_dynamic_init"; 142 const char kAsanInitName[] = "__asan_init"; 143 const char kAsanVersionCheckNamePrefix[] = "__asan_version_mismatch_check_v"; 144 const char kAsanPtrCmp[] = "__sanitizer_ptr_cmp"; 145 const char kAsanPtrSub[] = "__sanitizer_ptr_sub"; 146 const char kAsanHandleNoReturnName[] = "__asan_handle_no_return"; 147 static const int kMaxAsanStackMallocSizeClass = 10; 148 const char kAsanStackMallocNameTemplate[] = "__asan_stack_malloc_"; 149 const char kAsanStackMallocAlwaysNameTemplate[] = 150 "__asan_stack_malloc_always_"; 151 const char kAsanStackFreeNameTemplate[] = "__asan_stack_free_"; 152 const char kAsanGenPrefix[] = "___asan_gen_"; 153 const char kODRGenPrefix[] = "__odr_asan_gen_"; 154 const char kSanCovGenPrefix[] = "__sancov_gen_"; 155 const char kAsanSetShadowPrefix[] = "__asan_set_shadow_"; 156 const char kAsanPoisonStackMemoryName[] = "__asan_poison_stack_memory"; 157 const char kAsanUnpoisonStackMemoryName[] = "__asan_unpoison_stack_memory"; 158 159 // ASan version script has __asan_* wildcard. Triple underscore prevents a 160 // linker (gold) warning about attempting to export a local symbol. 161 const char kAsanGlobalsRegisteredFlagName[] = "___asan_globals_registered"; 162 163 const char kAsanOptionDetectUseAfterReturn[] = 164 "__asan_option_detect_stack_use_after_return"; 165 166 const char kAsanShadowMemoryDynamicAddress[] = 167 "__asan_shadow_memory_dynamic_address"; 168 169 const char kAsanAllocaPoison[] = "__asan_alloca_poison"; 170 const char kAsanAllocasUnpoison[] = "__asan_allocas_unpoison"; 171 172 const char kAMDGPUAddressSharedName[] = "llvm.amdgcn.is.shared"; 173 const char kAMDGPUAddressPrivateName[] = "llvm.amdgcn.is.private"; 174 175 // Accesses sizes are powers of two: 1, 2, 4, 8, 16. 176 static const size_t kNumberOfAccessSizes = 5; 177 178 static const uint64_t kAllocaRzSize = 32; 179 180 // ASanAccessInfo implementation constants. 181 constexpr size_t kCompileKernelShift = 0; 182 constexpr size_t kCompileKernelMask = 0x1; 183 constexpr size_t kAccessSizeIndexShift = 1; 184 constexpr size_t kAccessSizeIndexMask = 0xf; 185 constexpr size_t kIsWriteShift = 5; 186 constexpr size_t kIsWriteMask = 0x1; 187 188 // Command-line flags. 189 190 static cl::opt<bool> ClEnableKasan( 191 "asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"), 192 cl::Hidden, cl::init(false)); 193 194 static cl::opt<bool> ClRecover( 195 "asan-recover", 196 cl::desc("Enable recovery mode (continue-after-error)."), 197 cl::Hidden, cl::init(false)); 198 199 static cl::opt<bool> ClInsertVersionCheck( 200 "asan-guard-against-version-mismatch", 201 cl::desc("Guard against compiler/runtime version mismatch."), 202 cl::Hidden, cl::init(true)); 203 204 // This flag may need to be replaced with -f[no-]asan-reads. 205 static cl::opt<bool> ClInstrumentReads("asan-instrument-reads", 206 cl::desc("instrument read instructions"), 207 cl::Hidden, cl::init(true)); 208 209 static cl::opt<bool> ClInstrumentWrites( 210 "asan-instrument-writes", cl::desc("instrument write instructions"), 211 cl::Hidden, cl::init(true)); 212 213 static cl::opt<bool> 214 ClUseStackSafety("asan-use-stack-safety", cl::Hidden, cl::init(false), 215 cl::Hidden, cl::desc("Use Stack Safety analysis results"), 216 cl::Optional); 217 218 static cl::opt<bool> ClInstrumentAtomics( 219 "asan-instrument-atomics", 220 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, 221 cl::init(true)); 222 223 static cl::opt<bool> 224 ClInstrumentByval("asan-instrument-byval", 225 cl::desc("instrument byval call arguments"), cl::Hidden, 226 cl::init(true)); 227 228 static cl::opt<bool> ClAlwaysSlowPath( 229 "asan-always-slow-path", 230 cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden, 231 cl::init(false)); 232 233 static cl::opt<bool> ClForceDynamicShadow( 234 "asan-force-dynamic-shadow", 235 cl::desc("Load shadow address into a local variable for each function"), 236 cl::Hidden, cl::init(false)); 237 238 static cl::opt<bool> 239 ClWithIfunc("asan-with-ifunc", 240 cl::desc("Access dynamic shadow through an ifunc global on " 241 "platforms that support this"), 242 cl::Hidden, cl::init(true)); 243 244 static cl::opt<bool> ClWithIfuncSuppressRemat( 245 "asan-with-ifunc-suppress-remat", 246 cl::desc("Suppress rematerialization of dynamic shadow address by passing " 247 "it through inline asm in prologue."), 248 cl::Hidden, cl::init(true)); 249 250 // This flag limits the number of instructions to be instrumented 251 // in any given BB. Normally, this should be set to unlimited (INT_MAX), 252 // but due to http://llvm.org/bugs/show_bug.cgi?id=12652 we temporary 253 // set it to 10000. 254 static cl::opt<int> ClMaxInsnsToInstrumentPerBB( 255 "asan-max-ins-per-bb", cl::init(10000), 256 cl::desc("maximal number of instructions to instrument in any given BB"), 257 cl::Hidden); 258 259 // This flag may need to be replaced with -f[no]asan-stack. 260 static cl::opt<bool> ClStack("asan-stack", cl::desc("Handle stack memory"), 261 cl::Hidden, cl::init(true)); 262 static cl::opt<uint32_t> ClMaxInlinePoisoningSize( 263 "asan-max-inline-poisoning-size", 264 cl::desc( 265 "Inline shadow poisoning for blocks up to the given size in bytes."), 266 cl::Hidden, cl::init(64)); 267 268 static cl::opt<AsanDetectStackUseAfterReturnMode> ClUseAfterReturn( 269 "asan-use-after-return", 270 cl::desc("Sets the mode of detection for stack-use-after-return."), 271 cl::values( 272 clEnumValN(AsanDetectStackUseAfterReturnMode::Never, "never", 273 "Never detect stack use after return."), 274 clEnumValN( 275 AsanDetectStackUseAfterReturnMode::Runtime, "runtime", 276 "Detect stack use after return if " 277 "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set."), 278 clEnumValN(AsanDetectStackUseAfterReturnMode::Always, "always", 279 "Always detect stack use after return.")), 280 cl::Hidden, cl::init(AsanDetectStackUseAfterReturnMode::Runtime)); 281 282 static cl::opt<bool> ClRedzoneByvalArgs("asan-redzone-byval-args", 283 cl::desc("Create redzones for byval " 284 "arguments (extra copy " 285 "required)"), cl::Hidden, 286 cl::init(true)); 287 288 static cl::opt<bool> ClUseAfterScope("asan-use-after-scope", 289 cl::desc("Check stack-use-after-scope"), 290 cl::Hidden, cl::init(false)); 291 292 // This flag may need to be replaced with -f[no]asan-globals. 293 static cl::opt<bool> ClGlobals("asan-globals", 294 cl::desc("Handle global objects"), cl::Hidden, 295 cl::init(true)); 296 297 static cl::opt<bool> ClInitializers("asan-initialization-order", 298 cl::desc("Handle C++ initializer order"), 299 cl::Hidden, cl::init(true)); 300 301 static cl::opt<bool> ClInvalidPointerPairs( 302 "asan-detect-invalid-pointer-pair", 303 cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden, 304 cl::init(false)); 305 306 static cl::opt<bool> ClInvalidPointerCmp( 307 "asan-detect-invalid-pointer-cmp", 308 cl::desc("Instrument <, <=, >, >= with pointer operands"), cl::Hidden, 309 cl::init(false)); 310 311 static cl::opt<bool> ClInvalidPointerSub( 312 "asan-detect-invalid-pointer-sub", 313 cl::desc("Instrument - operations with pointer operands"), cl::Hidden, 314 cl::init(false)); 315 316 static cl::opt<unsigned> ClRealignStack( 317 "asan-realign-stack", 318 cl::desc("Realign stack to the value of this flag (power of two)"), 319 cl::Hidden, cl::init(32)); 320 321 static cl::opt<int> ClInstrumentationWithCallsThreshold( 322 "asan-instrumentation-with-call-threshold", 323 cl::desc( 324 "If the function being instrumented contains more than " 325 "this number of memory accesses, use callbacks instead of " 326 "inline checks (-1 means never use callbacks)."), 327 cl::Hidden, cl::init(7000)); 328 329 static cl::opt<std::string> ClMemoryAccessCallbackPrefix( 330 "asan-memory-access-callback-prefix", 331 cl::desc("Prefix for memory access callbacks"), cl::Hidden, 332 cl::init("__asan_")); 333 334 static cl::opt<bool> ClKasanMemIntrinCallbackPrefix( 335 "asan-kernel-mem-intrinsic-prefix", 336 cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, 337 cl::init(false)); 338 339 static cl::opt<bool> 340 ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas", 341 cl::desc("instrument dynamic allocas"), 342 cl::Hidden, cl::init(true)); 343 344 static cl::opt<bool> ClSkipPromotableAllocas( 345 "asan-skip-promotable-allocas", 346 cl::desc("Do not instrument promotable allocas"), cl::Hidden, 347 cl::init(true)); 348 349 // These flags allow to change the shadow mapping. 350 // The shadow mapping looks like 351 // Shadow = (Mem >> scale) + offset 352 353 static cl::opt<int> ClMappingScale("asan-mapping-scale", 354 cl::desc("scale of asan shadow mapping"), 355 cl::Hidden, cl::init(0)); 356 357 static cl::opt<uint64_t> 358 ClMappingOffset("asan-mapping-offset", 359 cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"), 360 cl::Hidden, cl::init(0)); 361 362 // Optimization flags. Not user visible, used mostly for testing 363 // and benchmarking the tool. 364 365 static cl::opt<bool> ClOpt("asan-opt", cl::desc("Optimize instrumentation"), 366 cl::Hidden, cl::init(true)); 367 368 static cl::opt<bool> ClOptimizeCallbacks("asan-optimize-callbacks", 369 cl::desc("Optimize callbacks"), 370 cl::Hidden, cl::init(false)); 371 372 static cl::opt<bool> ClOptSameTemp( 373 "asan-opt-same-temp", cl::desc("Instrument the same temp just once"), 374 cl::Hidden, cl::init(true)); 375 376 static cl::opt<bool> ClOptGlobals("asan-opt-globals", 377 cl::desc("Don't instrument scalar globals"), 378 cl::Hidden, cl::init(true)); 379 380 static cl::opt<bool> ClOptStack( 381 "asan-opt-stack", cl::desc("Don't instrument scalar stack variables"), 382 cl::Hidden, cl::init(false)); 383 384 static cl::opt<bool> ClDynamicAllocaStack( 385 "asan-stack-dynamic-alloca", 386 cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden, 387 cl::init(true)); 388 389 static cl::opt<uint32_t> ClForceExperiment( 390 "asan-force-experiment", 391 cl::desc("Force optimization experiment (for testing)"), cl::Hidden, 392 cl::init(0)); 393 394 static cl::opt<bool> 395 ClUsePrivateAlias("asan-use-private-alias", 396 cl::desc("Use private aliases for global variables"), 397 cl::Hidden, cl::init(false)); 398 399 static cl::opt<bool> 400 ClUseOdrIndicator("asan-use-odr-indicator", 401 cl::desc("Use odr indicators to improve ODR reporting"), 402 cl::Hidden, cl::init(false)); 403 404 static cl::opt<bool> 405 ClUseGlobalsGC("asan-globals-live-support", 406 cl::desc("Use linker features to support dead " 407 "code stripping of globals"), 408 cl::Hidden, cl::init(true)); 409 410 // This is on by default even though there is a bug in gold: 411 // https://sourceware.org/bugzilla/show_bug.cgi?id=19002 412 static cl::opt<bool> 413 ClWithComdat("asan-with-comdat", 414 cl::desc("Place ASan constructors in comdat sections"), 415 cl::Hidden, cl::init(true)); 416 417 static cl::opt<AsanDtorKind> ClOverrideDestructorKind( 418 "asan-destructor-kind", 419 cl::desc("Sets the ASan destructor kind. The default is to use the value " 420 "provided to the pass constructor"), 421 cl::values(clEnumValN(AsanDtorKind::None, "none", "No destructors"), 422 clEnumValN(AsanDtorKind::Global, "global", 423 "Use global destructors")), 424 cl::init(AsanDtorKind::Invalid), cl::Hidden); 425 426 // Debug flags. 427 428 static cl::opt<int> ClDebug("asan-debug", cl::desc("debug"), cl::Hidden, 429 cl::init(0)); 430 431 static cl::opt<int> ClDebugStack("asan-debug-stack", cl::desc("debug stack"), 432 cl::Hidden, cl::init(0)); 433 434 static cl::opt<std::string> ClDebugFunc("asan-debug-func", cl::Hidden, 435 cl::desc("Debug func")); 436 437 static cl::opt<int> ClDebugMin("asan-debug-min", cl::desc("Debug min inst"), 438 cl::Hidden, cl::init(-1)); 439 440 static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug max inst"), 441 cl::Hidden, cl::init(-1)); 442 443 STATISTIC(NumInstrumentedReads, "Number of instrumented reads"); 444 STATISTIC(NumInstrumentedWrites, "Number of instrumented writes"); 445 STATISTIC(NumOptimizedAccessesToGlobalVar, 446 "Number of optimized accesses to global vars"); 447 STATISTIC(NumOptimizedAccessesToStackVar, 448 "Number of optimized accesses to stack vars"); 449 450 namespace { 451 452 /// This struct defines the shadow mapping using the rule: 453 /// shadow = (mem >> Scale) ADD-or-OR Offset. 454 /// If InGlobal is true, then 455 /// extern char __asan_shadow[]; 456 /// shadow = (mem >> Scale) + &__asan_shadow 457 struct ShadowMapping { 458 int Scale; 459 uint64_t Offset; 460 bool OrShadowOffset; 461 bool InGlobal; 462 }; 463 464 } // end anonymous namespace 465 466 static ShadowMapping getShadowMapping(const Triple &TargetTriple, int LongSize, 467 bool IsKasan) { 468 bool IsAndroid = TargetTriple.isAndroid(); 469 bool IsIOS = TargetTriple.isiOS() || TargetTriple.isWatchOS() || 470 TargetTriple.isDriverKit(); 471 bool IsMacOS = TargetTriple.isMacOSX(); 472 bool IsFreeBSD = TargetTriple.isOSFreeBSD(); 473 bool IsNetBSD = TargetTriple.isOSNetBSD(); 474 bool IsPS = TargetTriple.isPS(); 475 bool IsLinux = TargetTriple.isOSLinux(); 476 bool IsPPC64 = TargetTriple.getArch() == Triple::ppc64 || 477 TargetTriple.getArch() == Triple::ppc64le; 478 bool IsSystemZ = TargetTriple.getArch() == Triple::systemz; 479 bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64; 480 bool IsMIPSN32ABI = TargetTriple.getEnvironment() == Triple::GNUABIN32; 481 bool IsMIPS32 = TargetTriple.isMIPS32(); 482 bool IsMIPS64 = TargetTriple.isMIPS64(); 483 bool IsArmOrThumb = TargetTriple.isARM() || TargetTriple.isThumb(); 484 bool IsAArch64 = TargetTriple.getArch() == Triple::aarch64; 485 bool IsRISCV64 = TargetTriple.getArch() == Triple::riscv64; 486 bool IsWindows = TargetTriple.isOSWindows(); 487 bool IsFuchsia = TargetTriple.isOSFuchsia(); 488 bool IsEmscripten = TargetTriple.isOSEmscripten(); 489 bool IsAMDGPU = TargetTriple.isAMDGPU(); 490 491 ShadowMapping Mapping; 492 493 Mapping.Scale = kDefaultShadowScale; 494 if (ClMappingScale.getNumOccurrences() > 0) { 495 Mapping.Scale = ClMappingScale; 496 } 497 498 if (LongSize == 32) { 499 if (IsAndroid) 500 Mapping.Offset = kDynamicShadowSentinel; 501 else if (IsMIPSN32ABI) 502 Mapping.Offset = kMIPS_ShadowOffsetN32; 503 else if (IsMIPS32) 504 Mapping.Offset = kMIPS32_ShadowOffset32; 505 else if (IsFreeBSD) 506 Mapping.Offset = kFreeBSD_ShadowOffset32; 507 else if (IsNetBSD) 508 Mapping.Offset = kNetBSD_ShadowOffset32; 509 else if (IsIOS) 510 Mapping.Offset = kDynamicShadowSentinel; 511 else if (IsWindows) 512 Mapping.Offset = kWindowsShadowOffset32; 513 else if (IsEmscripten) 514 Mapping.Offset = kEmscriptenShadowOffset; 515 else 516 Mapping.Offset = kDefaultShadowOffset32; 517 } else { // LongSize == 64 518 // Fuchsia is always PIE, which means that the beginning of the address 519 // space is always available. 520 if (IsFuchsia) 521 Mapping.Offset = 0; 522 else if (IsPPC64) 523 Mapping.Offset = kPPC64_ShadowOffset64; 524 else if (IsSystemZ) 525 Mapping.Offset = kSystemZ_ShadowOffset64; 526 else if (IsFreeBSD && !IsMIPS64) { 527 if (IsKasan) 528 Mapping.Offset = kFreeBSDKasan_ShadowOffset64; 529 else 530 Mapping.Offset = kFreeBSD_ShadowOffset64; 531 } else if (IsNetBSD) { 532 if (IsKasan) 533 Mapping.Offset = kNetBSDKasan_ShadowOffset64; 534 else 535 Mapping.Offset = kNetBSD_ShadowOffset64; 536 } else if (IsPS) 537 Mapping.Offset = kPS_ShadowOffset64; 538 else if (IsLinux && IsX86_64) { 539 if (IsKasan) 540 Mapping.Offset = kLinuxKasan_ShadowOffset64; 541 else 542 Mapping.Offset = (kSmallX86_64ShadowOffsetBase & 543 (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale)); 544 } else if (IsWindows && IsX86_64) { 545 Mapping.Offset = kWindowsShadowOffset64; 546 } else if (IsMIPS64) 547 Mapping.Offset = kMIPS64_ShadowOffset64; 548 else if (IsIOS) 549 Mapping.Offset = kDynamicShadowSentinel; 550 else if (IsMacOS && IsAArch64) 551 Mapping.Offset = kDynamicShadowSentinel; 552 else if (IsAArch64) 553 Mapping.Offset = kAArch64_ShadowOffset64; 554 else if (IsRISCV64) 555 Mapping.Offset = kRISCV64_ShadowOffset64; 556 else if (IsAMDGPU) 557 Mapping.Offset = (kSmallX86_64ShadowOffsetBase & 558 (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale)); 559 else 560 Mapping.Offset = kDefaultShadowOffset64; 561 } 562 563 if (ClForceDynamicShadow) { 564 Mapping.Offset = kDynamicShadowSentinel; 565 } 566 567 if (ClMappingOffset.getNumOccurrences() > 0) { 568 Mapping.Offset = ClMappingOffset; 569 } 570 571 // OR-ing shadow offset if more efficient (at least on x86) if the offset 572 // is a power of two, but on ppc64 we have to use add since the shadow 573 // offset is not necessary 1/8-th of the address space. On SystemZ, 574 // we could OR the constant in a single instruction, but it's more 575 // efficient to load it once and use indexed addressing. 576 Mapping.OrShadowOffset = !IsAArch64 && !IsPPC64 && !IsSystemZ && !IsPS && 577 !IsRISCV64 && 578 !(Mapping.Offset & (Mapping.Offset - 1)) && 579 Mapping.Offset != kDynamicShadowSentinel; 580 bool IsAndroidWithIfuncSupport = 581 IsAndroid && !TargetTriple.isAndroidVersionLT(21); 582 Mapping.InGlobal = ClWithIfunc && IsAndroidWithIfuncSupport && IsArmOrThumb; 583 584 return Mapping; 585 } 586 587 namespace llvm { 588 void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize, 589 bool IsKasan, uint64_t *ShadowBase, 590 int *MappingScale, bool *OrShadowOffset) { 591 auto Mapping = getShadowMapping(TargetTriple, LongSize, IsKasan); 592 *ShadowBase = Mapping.Offset; 593 *MappingScale = Mapping.Scale; 594 *OrShadowOffset = Mapping.OrShadowOffset; 595 } 596 597 ASanAccessInfo::ASanAccessInfo(int32_t Packed) 598 : Packed(Packed), 599 AccessSizeIndex((Packed >> kAccessSizeIndexShift) & kAccessSizeIndexMask), 600 IsWrite((Packed >> kIsWriteShift) & kIsWriteMask), 601 CompileKernel((Packed >> kCompileKernelShift) & kCompileKernelMask) {} 602 603 ASanAccessInfo::ASanAccessInfo(bool IsWrite, bool CompileKernel, 604 uint8_t AccessSizeIndex) 605 : Packed((IsWrite << kIsWriteShift) + 606 (CompileKernel << kCompileKernelShift) + 607 (AccessSizeIndex << kAccessSizeIndexShift)), 608 AccessSizeIndex(AccessSizeIndex), IsWrite(IsWrite), 609 CompileKernel(CompileKernel) {} 610 611 } // namespace llvm 612 613 static uint64_t getRedzoneSizeForScale(int MappingScale) { 614 // Redzone used for stack and globals is at least 32 bytes. 615 // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively. 616 return std::max(32U, 1U << MappingScale); 617 } 618 619 static uint64_t GetCtorAndDtorPriority(Triple &TargetTriple) { 620 if (TargetTriple.isOSEmscripten()) { 621 return kAsanEmscriptenCtorAndDtorPriority; 622 } else { 623 return kAsanCtorAndDtorPriority; 624 } 625 } 626 627 namespace { 628 629 /// AddressSanitizer: instrument the code in module to find memory bugs. 630 struct AddressSanitizer { 631 AddressSanitizer(Module &M, const StackSafetyGlobalInfo *SSGI, 632 bool CompileKernel = false, bool Recover = false, 633 bool UseAfterScope = false, 634 AsanDetectStackUseAfterReturnMode UseAfterReturn = 635 AsanDetectStackUseAfterReturnMode::Runtime) 636 : CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan 637 : CompileKernel), 638 Recover(ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover), 639 UseAfterScope(UseAfterScope || ClUseAfterScope), 640 UseAfterReturn(ClUseAfterReturn.getNumOccurrences() ? ClUseAfterReturn 641 : UseAfterReturn), 642 SSGI(SSGI) { 643 C = &(M.getContext()); 644 LongSize = M.getDataLayout().getPointerSizeInBits(); 645 IntptrTy = Type::getIntNTy(*C, LongSize); 646 Int8PtrTy = Type::getInt8PtrTy(*C); 647 Int32Ty = Type::getInt32Ty(*C); 648 TargetTriple = Triple(M.getTargetTriple()); 649 650 Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel); 651 652 assert(this->UseAfterReturn != AsanDetectStackUseAfterReturnMode::Invalid); 653 } 654 655 uint64_t getAllocaSizeInBytes(const AllocaInst &AI) const { 656 uint64_t ArraySize = 1; 657 if (AI.isArrayAllocation()) { 658 const ConstantInt *CI = dyn_cast<ConstantInt>(AI.getArraySize()); 659 assert(CI && "non-constant array size"); 660 ArraySize = CI->getZExtValue(); 661 } 662 Type *Ty = AI.getAllocatedType(); 663 uint64_t SizeInBytes = 664 AI.getModule()->getDataLayout().getTypeAllocSize(Ty); 665 return SizeInBytes * ArraySize; 666 } 667 668 /// Check if we want (and can) handle this alloca. 669 bool isInterestingAlloca(const AllocaInst &AI); 670 671 bool ignoreAccess(Instruction *Inst, Value *Ptr); 672 void getInterestingMemoryOperands( 673 Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting); 674 675 void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, 676 InterestingMemoryOperand &O, bool UseCalls, 677 const DataLayout &DL); 678 void instrumentPointerComparisonOrSubtraction(Instruction *I); 679 void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore, 680 Value *Addr, uint32_t TypeSize, bool IsWrite, 681 Value *SizeArgument, bool UseCalls, uint32_t Exp); 682 Instruction *instrumentAMDGPUAddress(Instruction *OrigIns, 683 Instruction *InsertBefore, Value *Addr, 684 uint32_t TypeSize, bool IsWrite, 685 Value *SizeArgument); 686 void instrumentUnusualSizeOrAlignment(Instruction *I, 687 Instruction *InsertBefore, Value *Addr, 688 uint32_t TypeSize, bool IsWrite, 689 Value *SizeArgument, bool UseCalls, 690 uint32_t Exp); 691 Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong, 692 Value *ShadowValue, uint32_t TypeSize); 693 Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr, 694 bool IsWrite, size_t AccessSizeIndex, 695 Value *SizeArgument, uint32_t Exp); 696 void instrumentMemIntrinsic(MemIntrinsic *MI); 697 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB); 698 bool suppressInstrumentationSiteForDebug(int &Instrumented); 699 bool instrumentFunction(Function &F, const TargetLibraryInfo *TLI); 700 bool maybeInsertAsanInitAtFunctionEntry(Function &F); 701 bool maybeInsertDynamicShadowAtFunctionEntry(Function &F); 702 void markEscapedLocalAllocas(Function &F); 703 704 private: 705 friend struct FunctionStackPoisoner; 706 707 void initializeCallbacks(Module &M); 708 709 bool LooksLikeCodeInBug11395(Instruction *I); 710 bool GlobalIsLinkerInitialized(GlobalVariable *G); 711 bool isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, Value *Addr, 712 uint64_t TypeSize) const; 713 714 /// Helper to cleanup per-function state. 715 struct FunctionStateRAII { 716 AddressSanitizer *Pass; 717 718 FunctionStateRAII(AddressSanitizer *Pass) : Pass(Pass) { 719 assert(Pass->ProcessedAllocas.empty() && 720 "last pass forgot to clear cache"); 721 assert(!Pass->LocalDynamicShadow); 722 } 723 724 ~FunctionStateRAII() { 725 Pass->LocalDynamicShadow = nullptr; 726 Pass->ProcessedAllocas.clear(); 727 } 728 }; 729 730 LLVMContext *C; 731 Triple TargetTriple; 732 int LongSize; 733 bool CompileKernel; 734 bool Recover; 735 bool UseAfterScope; 736 AsanDetectStackUseAfterReturnMode UseAfterReturn; 737 Type *IntptrTy; 738 Type *Int8PtrTy; 739 Type *Int32Ty; 740 ShadowMapping Mapping; 741 FunctionCallee AsanHandleNoReturnFunc; 742 FunctionCallee AsanPtrCmpFunction, AsanPtrSubFunction; 743 Constant *AsanShadowGlobal; 744 745 // These arrays is indexed by AccessIsWrite, Experiment and log2(AccessSize). 746 FunctionCallee AsanErrorCallback[2][2][kNumberOfAccessSizes]; 747 FunctionCallee AsanMemoryAccessCallback[2][2][kNumberOfAccessSizes]; 748 749 // These arrays is indexed by AccessIsWrite and Experiment. 750 FunctionCallee AsanErrorCallbackSized[2][2]; 751 FunctionCallee AsanMemoryAccessCallbackSized[2][2]; 752 753 FunctionCallee AsanMemmove, AsanMemcpy, AsanMemset; 754 Value *LocalDynamicShadow = nullptr; 755 const StackSafetyGlobalInfo *SSGI; 756 DenseMap<const AllocaInst *, bool> ProcessedAllocas; 757 758 FunctionCallee AMDGPUAddressShared; 759 FunctionCallee AMDGPUAddressPrivate; 760 }; 761 762 class ModuleAddressSanitizer { 763 public: 764 ModuleAddressSanitizer(Module &M, bool CompileKernel = false, 765 bool Recover = false, bool UseGlobalsGC = true, 766 bool UseOdrIndicator = false, 767 AsanDtorKind DestructorKind = AsanDtorKind::Global) 768 : CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan 769 : CompileKernel), 770 Recover(ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover), 771 UseGlobalsGC(UseGlobalsGC && ClUseGlobalsGC && !this->CompileKernel), 772 // Enable aliases as they should have no downside with ODR indicators. 773 UsePrivateAlias(UseOdrIndicator || ClUsePrivateAlias), 774 UseOdrIndicator(UseOdrIndicator || ClUseOdrIndicator), 775 // Not a typo: ClWithComdat is almost completely pointless without 776 // ClUseGlobalsGC (because then it only works on modules without 777 // globals, which are rare); it is a prerequisite for ClUseGlobalsGC; 778 // and both suffer from gold PR19002 for which UseGlobalsGC constructor 779 // argument is designed as workaround. Therefore, disable both 780 // ClWithComdat and ClUseGlobalsGC unless the frontend says it's ok to 781 // do globals-gc. 782 UseCtorComdat(UseGlobalsGC && ClWithComdat && !this->CompileKernel), 783 DestructorKind(DestructorKind) { 784 C = &(M.getContext()); 785 int LongSize = M.getDataLayout().getPointerSizeInBits(); 786 IntptrTy = Type::getIntNTy(*C, LongSize); 787 TargetTriple = Triple(M.getTargetTriple()); 788 Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel); 789 790 if (ClOverrideDestructorKind != AsanDtorKind::Invalid) 791 this->DestructorKind = ClOverrideDestructorKind; 792 assert(this->DestructorKind != AsanDtorKind::Invalid); 793 } 794 795 bool instrumentModule(Module &); 796 797 private: 798 void initializeCallbacks(Module &M); 799 800 bool InstrumentGlobals(IRBuilder<> &IRB, Module &M, bool *CtorComdat); 801 void InstrumentGlobalsCOFF(IRBuilder<> &IRB, Module &M, 802 ArrayRef<GlobalVariable *> ExtendedGlobals, 803 ArrayRef<Constant *> MetadataInitializers); 804 void InstrumentGlobalsELF(IRBuilder<> &IRB, Module &M, 805 ArrayRef<GlobalVariable *> ExtendedGlobals, 806 ArrayRef<Constant *> MetadataInitializers, 807 const std::string &UniqueModuleId); 808 void InstrumentGlobalsMachO(IRBuilder<> &IRB, Module &M, 809 ArrayRef<GlobalVariable *> ExtendedGlobals, 810 ArrayRef<Constant *> MetadataInitializers); 811 void 812 InstrumentGlobalsWithMetadataArray(IRBuilder<> &IRB, Module &M, 813 ArrayRef<GlobalVariable *> ExtendedGlobals, 814 ArrayRef<Constant *> MetadataInitializers); 815 816 GlobalVariable *CreateMetadataGlobal(Module &M, Constant *Initializer, 817 StringRef OriginalName); 818 void SetComdatForGlobalMetadata(GlobalVariable *G, GlobalVariable *Metadata, 819 StringRef InternalSuffix); 820 Instruction *CreateAsanModuleDtor(Module &M); 821 822 const GlobalVariable *getExcludedAliasedGlobal(const GlobalAlias &GA) const; 823 bool shouldInstrumentGlobal(GlobalVariable *G) const; 824 bool ShouldUseMachOGlobalsSection() const; 825 StringRef getGlobalMetadataSection() const; 826 void poisonOneInitializer(Function &GlobalInit, GlobalValue *ModuleName); 827 void createInitializerPoisonCalls(Module &M, GlobalValue *ModuleName); 828 uint64_t getMinRedzoneSizeForGlobal() const { 829 return getRedzoneSizeForScale(Mapping.Scale); 830 } 831 uint64_t getRedzoneSizeForGlobal(uint64_t SizeInBytes) const; 832 int GetAsanVersion(const Module &M) const; 833 834 bool CompileKernel; 835 bool Recover; 836 bool UseGlobalsGC; 837 bool UsePrivateAlias; 838 bool UseOdrIndicator; 839 bool UseCtorComdat; 840 AsanDtorKind DestructorKind; 841 Type *IntptrTy; 842 LLVMContext *C; 843 Triple TargetTriple; 844 ShadowMapping Mapping; 845 FunctionCallee AsanPoisonGlobals; 846 FunctionCallee AsanUnpoisonGlobals; 847 FunctionCallee AsanRegisterGlobals; 848 FunctionCallee AsanUnregisterGlobals; 849 FunctionCallee AsanRegisterImageGlobals; 850 FunctionCallee AsanUnregisterImageGlobals; 851 FunctionCallee AsanRegisterElfGlobals; 852 FunctionCallee AsanUnregisterElfGlobals; 853 854 Function *AsanCtorFunction = nullptr; 855 Function *AsanDtorFunction = nullptr; 856 }; 857 858 // Stack poisoning does not play well with exception handling. 859 // When an exception is thrown, we essentially bypass the code 860 // that unpoisones the stack. This is why the run-time library has 861 // to intercept __cxa_throw (as well as longjmp, etc) and unpoison the entire 862 // stack in the interceptor. This however does not work inside the 863 // actual function which catches the exception. Most likely because the 864 // compiler hoists the load of the shadow value somewhere too high. 865 // This causes asan to report a non-existing bug on 453.povray. 866 // It sounds like an LLVM bug. 867 struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> { 868 Function &F; 869 AddressSanitizer &ASan; 870 DIBuilder DIB; 871 LLVMContext *C; 872 Type *IntptrTy; 873 Type *IntptrPtrTy; 874 ShadowMapping Mapping; 875 876 SmallVector<AllocaInst *, 16> AllocaVec; 877 SmallVector<AllocaInst *, 16> StaticAllocasToMoveUp; 878 SmallVector<Instruction *, 8> RetVec; 879 880 FunctionCallee AsanStackMallocFunc[kMaxAsanStackMallocSizeClass + 1], 881 AsanStackFreeFunc[kMaxAsanStackMallocSizeClass + 1]; 882 FunctionCallee AsanSetShadowFunc[0x100] = {}; 883 FunctionCallee AsanPoisonStackMemoryFunc, AsanUnpoisonStackMemoryFunc; 884 FunctionCallee AsanAllocaPoisonFunc, AsanAllocasUnpoisonFunc; 885 886 // Stores a place and arguments of poisoning/unpoisoning call for alloca. 887 struct AllocaPoisonCall { 888 IntrinsicInst *InsBefore; 889 AllocaInst *AI; 890 uint64_t Size; 891 bool DoPoison; 892 }; 893 SmallVector<AllocaPoisonCall, 8> DynamicAllocaPoisonCallVec; 894 SmallVector<AllocaPoisonCall, 8> StaticAllocaPoisonCallVec; 895 bool HasUntracedLifetimeIntrinsic = false; 896 897 SmallVector<AllocaInst *, 1> DynamicAllocaVec; 898 SmallVector<IntrinsicInst *, 1> StackRestoreVec; 899 AllocaInst *DynamicAllocaLayout = nullptr; 900 IntrinsicInst *LocalEscapeCall = nullptr; 901 902 bool HasInlineAsm = false; 903 bool HasReturnsTwiceCall = false; 904 bool PoisonStack; 905 906 FunctionStackPoisoner(Function &F, AddressSanitizer &ASan) 907 : F(F), ASan(ASan), DIB(*F.getParent(), /*AllowUnresolved*/ false), 908 C(ASan.C), IntptrTy(ASan.IntptrTy), 909 IntptrPtrTy(PointerType::get(IntptrTy, 0)), Mapping(ASan.Mapping), 910 PoisonStack(ClStack && 911 !Triple(F.getParent()->getTargetTriple()).isAMDGPU()) {} 912 913 bool runOnFunction() { 914 if (!PoisonStack) 915 return false; 916 917 if (ClRedzoneByvalArgs) 918 copyArgsPassedByValToAllocas(); 919 920 // Collect alloca, ret, lifetime instructions etc. 921 for (BasicBlock *BB : depth_first(&F.getEntryBlock())) visit(*BB); 922 923 if (AllocaVec.empty() && DynamicAllocaVec.empty()) return false; 924 925 initializeCallbacks(*F.getParent()); 926 927 if (HasUntracedLifetimeIntrinsic) { 928 // If there are lifetime intrinsics which couldn't be traced back to an 929 // alloca, we may not know exactly when a variable enters scope, and 930 // therefore should "fail safe" by not poisoning them. 931 StaticAllocaPoisonCallVec.clear(); 932 DynamicAllocaPoisonCallVec.clear(); 933 } 934 935 processDynamicAllocas(); 936 processStaticAllocas(); 937 938 if (ClDebugStack) { 939 LLVM_DEBUG(dbgs() << F); 940 } 941 return true; 942 } 943 944 // Arguments marked with the "byval" attribute are implicitly copied without 945 // using an alloca instruction. To produce redzones for those arguments, we 946 // copy them a second time into memory allocated with an alloca instruction. 947 void copyArgsPassedByValToAllocas(); 948 949 // Finds all Alloca instructions and puts 950 // poisoned red zones around all of them. 951 // Then unpoison everything back before the function returns. 952 void processStaticAllocas(); 953 void processDynamicAllocas(); 954 955 void createDynamicAllocasInitStorage(); 956 957 // ----------------------- Visitors. 958 /// Collect all Ret instructions, or the musttail call instruction if it 959 /// precedes the return instruction. 960 void visitReturnInst(ReturnInst &RI) { 961 if (CallInst *CI = RI.getParent()->getTerminatingMustTailCall()) 962 RetVec.push_back(CI); 963 else 964 RetVec.push_back(&RI); 965 } 966 967 /// Collect all Resume instructions. 968 void visitResumeInst(ResumeInst &RI) { RetVec.push_back(&RI); } 969 970 /// Collect all CatchReturnInst instructions. 971 void visitCleanupReturnInst(CleanupReturnInst &CRI) { RetVec.push_back(&CRI); } 972 973 void unpoisonDynamicAllocasBeforeInst(Instruction *InstBefore, 974 Value *SavedStack) { 975 IRBuilder<> IRB(InstBefore); 976 Value *DynamicAreaPtr = IRB.CreatePtrToInt(SavedStack, IntptrTy); 977 // When we insert _asan_allocas_unpoison before @llvm.stackrestore, we 978 // need to adjust extracted SP to compute the address of the most recent 979 // alloca. We have a special @llvm.get.dynamic.area.offset intrinsic for 980 // this purpose. 981 if (!isa<ReturnInst>(InstBefore)) { 982 Function *DynamicAreaOffsetFunc = Intrinsic::getDeclaration( 983 InstBefore->getModule(), Intrinsic::get_dynamic_area_offset, 984 {IntptrTy}); 985 986 Value *DynamicAreaOffset = IRB.CreateCall(DynamicAreaOffsetFunc, {}); 987 988 DynamicAreaPtr = IRB.CreateAdd(IRB.CreatePtrToInt(SavedStack, IntptrTy), 989 DynamicAreaOffset); 990 } 991 992 IRB.CreateCall( 993 AsanAllocasUnpoisonFunc, 994 {IRB.CreateLoad(IntptrTy, DynamicAllocaLayout), DynamicAreaPtr}); 995 } 996 997 // Unpoison dynamic allocas redzones. 998 void unpoisonDynamicAllocas() { 999 for (Instruction *Ret : RetVec) 1000 unpoisonDynamicAllocasBeforeInst(Ret, DynamicAllocaLayout); 1001 1002 for (Instruction *StackRestoreInst : StackRestoreVec) 1003 unpoisonDynamicAllocasBeforeInst(StackRestoreInst, 1004 StackRestoreInst->getOperand(0)); 1005 } 1006 1007 // Deploy and poison redzones around dynamic alloca call. To do this, we 1008 // should replace this call with another one with changed parameters and 1009 // replace all its uses with new address, so 1010 // addr = alloca type, old_size, align 1011 // is replaced by 1012 // new_size = (old_size + additional_size) * sizeof(type) 1013 // tmp = alloca i8, new_size, max(align, 32) 1014 // addr = tmp + 32 (first 32 bytes are for the left redzone). 1015 // Additional_size is added to make new memory allocation contain not only 1016 // requested memory, but also left, partial and right redzones. 1017 void handleDynamicAllocaCall(AllocaInst *AI); 1018 1019 /// Collect Alloca instructions we want (and can) handle. 1020 void visitAllocaInst(AllocaInst &AI) { 1021 if (!ASan.isInterestingAlloca(AI)) { 1022 if (AI.isStaticAlloca()) { 1023 // Skip over allocas that are present *before* the first instrumented 1024 // alloca, we don't want to move those around. 1025 if (AllocaVec.empty()) 1026 return; 1027 1028 StaticAllocasToMoveUp.push_back(&AI); 1029 } 1030 return; 1031 } 1032 1033 if (!AI.isStaticAlloca()) 1034 DynamicAllocaVec.push_back(&AI); 1035 else 1036 AllocaVec.push_back(&AI); 1037 } 1038 1039 /// Collect lifetime intrinsic calls to check for use-after-scope 1040 /// errors. 1041 void visitIntrinsicInst(IntrinsicInst &II) { 1042 Intrinsic::ID ID = II.getIntrinsicID(); 1043 if (ID == Intrinsic::stackrestore) StackRestoreVec.push_back(&II); 1044 if (ID == Intrinsic::localescape) LocalEscapeCall = &II; 1045 if (!ASan.UseAfterScope) 1046 return; 1047 if (!II.isLifetimeStartOrEnd()) 1048 return; 1049 // Found lifetime intrinsic, add ASan instrumentation if necessary. 1050 auto *Size = cast<ConstantInt>(II.getArgOperand(0)); 1051 // If size argument is undefined, don't do anything. 1052 if (Size->isMinusOne()) return; 1053 // Check that size doesn't saturate uint64_t and can 1054 // be stored in IntptrTy. 1055 const uint64_t SizeValue = Size->getValue().getLimitedValue(); 1056 if (SizeValue == ~0ULL || 1057 !ConstantInt::isValueValidForType(IntptrTy, SizeValue)) 1058 return; 1059 // Find alloca instruction that corresponds to llvm.lifetime argument. 1060 // Currently we can only handle lifetime markers pointing to the 1061 // beginning of the alloca. 1062 AllocaInst *AI = findAllocaForValue(II.getArgOperand(1), true); 1063 if (!AI) { 1064 HasUntracedLifetimeIntrinsic = true; 1065 return; 1066 } 1067 // We're interested only in allocas we can handle. 1068 if (!ASan.isInterestingAlloca(*AI)) 1069 return; 1070 bool DoPoison = (ID == Intrinsic::lifetime_end); 1071 AllocaPoisonCall APC = {&II, AI, SizeValue, DoPoison}; 1072 if (AI->isStaticAlloca()) 1073 StaticAllocaPoisonCallVec.push_back(APC); 1074 else if (ClInstrumentDynamicAllocas) 1075 DynamicAllocaPoisonCallVec.push_back(APC); 1076 } 1077 1078 void visitCallBase(CallBase &CB) { 1079 if (CallInst *CI = dyn_cast<CallInst>(&CB)) { 1080 HasInlineAsm |= CI->isInlineAsm() && &CB != ASan.LocalDynamicShadow; 1081 HasReturnsTwiceCall |= CI->canReturnTwice(); 1082 } 1083 } 1084 1085 // ---------------------- Helpers. 1086 void initializeCallbacks(Module &M); 1087 1088 // Copies bytes from ShadowBytes into shadow memory for indexes where 1089 // ShadowMask is not zero. If ShadowMask[i] is zero, we assume that 1090 // ShadowBytes[i] is constantly zero and doesn't need to be overwritten. 1091 void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes, 1092 IRBuilder<> &IRB, Value *ShadowBase); 1093 void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes, 1094 size_t Begin, size_t End, IRBuilder<> &IRB, 1095 Value *ShadowBase); 1096 void copyToShadowInline(ArrayRef<uint8_t> ShadowMask, 1097 ArrayRef<uint8_t> ShadowBytes, size_t Begin, 1098 size_t End, IRBuilder<> &IRB, Value *ShadowBase); 1099 1100 void poisonAlloca(Value *V, uint64_t Size, IRBuilder<> &IRB, bool DoPoison); 1101 1102 Value *createAllocaForLayout(IRBuilder<> &IRB, const ASanStackFrameLayout &L, 1103 bool Dynamic); 1104 PHINode *createPHI(IRBuilder<> &IRB, Value *Cond, Value *ValueIfTrue, 1105 Instruction *ThenTerm, Value *ValueIfFalse); 1106 }; 1107 1108 } // end anonymous namespace 1109 1110 void ModuleAddressSanitizerPass::printPipeline( 1111 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) { 1112 static_cast<PassInfoMixin<ModuleAddressSanitizerPass> *>(this)->printPipeline( 1113 OS, MapClassName2PassName); 1114 OS << "<"; 1115 if (Options.CompileKernel) 1116 OS << "kernel"; 1117 OS << ">"; 1118 } 1119 1120 ModuleAddressSanitizerPass::ModuleAddressSanitizerPass( 1121 const AddressSanitizerOptions &Options, bool UseGlobalGC, 1122 bool UseOdrIndicator, AsanDtorKind DestructorKind) 1123 : Options(Options), UseGlobalGC(UseGlobalGC), 1124 UseOdrIndicator(UseOdrIndicator), DestructorKind(DestructorKind) {} 1125 1126 PreservedAnalyses ModuleAddressSanitizerPass::run(Module &M, 1127 ModuleAnalysisManager &MAM) { 1128 ModuleAddressSanitizer ModuleSanitizer(M, Options.CompileKernel, 1129 Options.Recover, UseGlobalGC, 1130 UseOdrIndicator, DestructorKind); 1131 bool Modified = false; 1132 auto &FAM = MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager(); 1133 const StackSafetyGlobalInfo *const SSGI = 1134 ClUseStackSafety ? &MAM.getResult<StackSafetyGlobalAnalysis>(M) : nullptr; 1135 for (Function &F : M) { 1136 AddressSanitizer FunctionSanitizer(M, SSGI, Options.CompileKernel, 1137 Options.Recover, Options.UseAfterScope, 1138 Options.UseAfterReturn); 1139 const TargetLibraryInfo &TLI = FAM.getResult<TargetLibraryAnalysis>(F); 1140 Modified |= FunctionSanitizer.instrumentFunction(F, &TLI); 1141 } 1142 Modified |= ModuleSanitizer.instrumentModule(M); 1143 return Modified ? PreservedAnalyses::none() : PreservedAnalyses::all(); 1144 } 1145 1146 static size_t TypeSizeToSizeIndex(uint32_t TypeSize) { 1147 size_t Res = countTrailingZeros(TypeSize / 8); 1148 assert(Res < kNumberOfAccessSizes); 1149 return Res; 1150 } 1151 1152 /// Check if \p G has been created by a trusted compiler pass. 1153 static bool GlobalWasGeneratedByCompiler(GlobalVariable *G) { 1154 // Do not instrument @llvm.global_ctors, @llvm.used, etc. 1155 if (G->getName().startswith("llvm.") || 1156 // Do not instrument gcov counter arrays. 1157 G->getName().startswith("__llvm_gcov_ctr") || 1158 // Do not instrument rtti proxy symbols for function sanitizer. 1159 G->getName().startswith("__llvm_rtti_proxy")) 1160 return true; 1161 1162 // Do not instrument asan globals. 1163 if (G->getName().startswith(kAsanGenPrefix) || 1164 G->getName().startswith(kSanCovGenPrefix) || 1165 G->getName().startswith(kODRGenPrefix)) 1166 return true; 1167 1168 return false; 1169 } 1170 1171 static bool isUnsupportedAMDGPUAddrspace(Value *Addr) { 1172 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType()); 1173 unsigned int AddrSpace = PtrTy->getPointerAddressSpace(); 1174 if (AddrSpace == 3 || AddrSpace == 5) 1175 return true; 1176 return false; 1177 } 1178 1179 Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) { 1180 // Shadow >> scale 1181 Shadow = IRB.CreateLShr(Shadow, Mapping.Scale); 1182 if (Mapping.Offset == 0) return Shadow; 1183 // (Shadow >> scale) | offset 1184 Value *ShadowBase; 1185 if (LocalDynamicShadow) 1186 ShadowBase = LocalDynamicShadow; 1187 else 1188 ShadowBase = ConstantInt::get(IntptrTy, Mapping.Offset); 1189 if (Mapping.OrShadowOffset) 1190 return IRB.CreateOr(Shadow, ShadowBase); 1191 else 1192 return IRB.CreateAdd(Shadow, ShadowBase); 1193 } 1194 1195 // Instrument memset/memmove/memcpy 1196 void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) { 1197 IRBuilder<> IRB(MI); 1198 if (isa<MemTransferInst>(MI)) { 1199 IRB.CreateCall( 1200 isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy, 1201 {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()), 1202 IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()), 1203 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)}); 1204 } else if (isa<MemSetInst>(MI)) { 1205 IRB.CreateCall( 1206 AsanMemset, 1207 {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()), 1208 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false), 1209 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)}); 1210 } 1211 MI->eraseFromParent(); 1212 } 1213 1214 /// Check if we want (and can) handle this alloca. 1215 bool AddressSanitizer::isInterestingAlloca(const AllocaInst &AI) { 1216 auto PreviouslySeenAllocaInfo = ProcessedAllocas.find(&AI); 1217 1218 if (PreviouslySeenAllocaInfo != ProcessedAllocas.end()) 1219 return PreviouslySeenAllocaInfo->getSecond(); 1220 1221 bool IsInteresting = 1222 (AI.getAllocatedType()->isSized() && 1223 // alloca() may be called with 0 size, ignore it. 1224 ((!AI.isStaticAlloca()) || getAllocaSizeInBytes(AI) > 0) && 1225 // We are only interested in allocas not promotable to registers. 1226 // Promotable allocas are common under -O0. 1227 (!ClSkipPromotableAllocas || !isAllocaPromotable(&AI)) && 1228 // inalloca allocas are not treated as static, and we don't want 1229 // dynamic alloca instrumentation for them as well. 1230 !AI.isUsedWithInAlloca() && 1231 // swifterror allocas are register promoted by ISel 1232 !AI.isSwiftError()); 1233 1234 ProcessedAllocas[&AI] = IsInteresting; 1235 return IsInteresting; 1236 } 1237 1238 bool AddressSanitizer::ignoreAccess(Instruction *Inst, Value *Ptr) { 1239 // Instrument acesses from different address spaces only for AMDGPU. 1240 Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType()); 1241 if (PtrTy->getPointerAddressSpace() != 0 && 1242 !(TargetTriple.isAMDGPU() && !isUnsupportedAMDGPUAddrspace(Ptr))) 1243 return true; 1244 1245 // Ignore swifterror addresses. 1246 // swifterror memory addresses are mem2reg promoted by instruction 1247 // selection. As such they cannot have regular uses like an instrumentation 1248 // function and it makes no sense to track them as memory. 1249 if (Ptr->isSwiftError()) 1250 return true; 1251 1252 // Treat memory accesses to promotable allocas as non-interesting since they 1253 // will not cause memory violations. This greatly speeds up the instrumented 1254 // executable at -O0. 1255 if (auto AI = dyn_cast_or_null<AllocaInst>(Ptr)) 1256 if (ClSkipPromotableAllocas && !isInterestingAlloca(*AI)) 1257 return true; 1258 1259 if (SSGI != nullptr && SSGI->stackAccessIsSafe(*Inst) && 1260 findAllocaForValue(Ptr)) 1261 return true; 1262 1263 return false; 1264 } 1265 1266 void AddressSanitizer::getInterestingMemoryOperands( 1267 Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting) { 1268 // Do not instrument the load fetching the dynamic shadow address. 1269 if (LocalDynamicShadow == I) 1270 return; 1271 1272 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 1273 if (!ClInstrumentReads || ignoreAccess(I, LI->getPointerOperand())) 1274 return; 1275 Interesting.emplace_back(I, LI->getPointerOperandIndex(), false, 1276 LI->getType(), LI->getAlign()); 1277 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 1278 if (!ClInstrumentWrites || ignoreAccess(I, SI->getPointerOperand())) 1279 return; 1280 Interesting.emplace_back(I, SI->getPointerOperandIndex(), true, 1281 SI->getValueOperand()->getType(), SI->getAlign()); 1282 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) { 1283 if (!ClInstrumentAtomics || ignoreAccess(I, RMW->getPointerOperand())) 1284 return; 1285 Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true, 1286 RMW->getValOperand()->getType(), None); 1287 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) { 1288 if (!ClInstrumentAtomics || ignoreAccess(I, XCHG->getPointerOperand())) 1289 return; 1290 Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true, 1291 XCHG->getCompareOperand()->getType(), None); 1292 } else if (auto CI = dyn_cast<CallInst>(I)) { 1293 if (CI->getIntrinsicID() == Intrinsic::masked_load || 1294 CI->getIntrinsicID() == Intrinsic::masked_store) { 1295 bool IsWrite = CI->getIntrinsicID() == Intrinsic::masked_store; 1296 // Masked store has an initial operand for the value. 1297 unsigned OpOffset = IsWrite ? 1 : 0; 1298 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads) 1299 return; 1300 1301 auto BasePtr = CI->getOperand(OpOffset); 1302 if (ignoreAccess(I, BasePtr)) 1303 return; 1304 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType(); 1305 MaybeAlign Alignment = Align(1); 1306 // Otherwise no alignment guarantees. We probably got Undef. 1307 if (auto *Op = dyn_cast<ConstantInt>(CI->getOperand(1 + OpOffset))) 1308 Alignment = Op->getMaybeAlignValue(); 1309 Value *Mask = CI->getOperand(2 + OpOffset); 1310 Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, Mask); 1311 } else { 1312 for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) { 1313 if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) || 1314 ignoreAccess(I, CI->getArgOperand(ArgNo))) 1315 continue; 1316 Type *Ty = CI->getParamByValType(ArgNo); 1317 Interesting.emplace_back(I, ArgNo, false, Ty, Align(1)); 1318 } 1319 } 1320 } 1321 } 1322 1323 static bool isPointerOperand(Value *V) { 1324 return V->getType()->isPointerTy() || isa<PtrToIntInst>(V); 1325 } 1326 1327 // This is a rough heuristic; it may cause both false positives and 1328 // false negatives. The proper implementation requires cooperation with 1329 // the frontend. 1330 static bool isInterestingPointerComparison(Instruction *I) { 1331 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(I)) { 1332 if (!Cmp->isRelational()) 1333 return false; 1334 } else { 1335 return false; 1336 } 1337 return isPointerOperand(I->getOperand(0)) && 1338 isPointerOperand(I->getOperand(1)); 1339 } 1340 1341 // This is a rough heuristic; it may cause both false positives and 1342 // false negatives. The proper implementation requires cooperation with 1343 // the frontend. 1344 static bool isInterestingPointerSubtraction(Instruction *I) { 1345 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) { 1346 if (BO->getOpcode() != Instruction::Sub) 1347 return false; 1348 } else { 1349 return false; 1350 } 1351 return isPointerOperand(I->getOperand(0)) && 1352 isPointerOperand(I->getOperand(1)); 1353 } 1354 1355 bool AddressSanitizer::GlobalIsLinkerInitialized(GlobalVariable *G) { 1356 // If a global variable does not have dynamic initialization we don't 1357 // have to instrument it. However, if a global does not have initializer 1358 // at all, we assume it has dynamic initializer (in other TU). 1359 if (!G->hasInitializer()) 1360 return false; 1361 1362 if (G->hasSanitizerMetadata() && G->getSanitizerMetadata().IsDynInit) 1363 return false; 1364 1365 return true; 1366 } 1367 1368 void AddressSanitizer::instrumentPointerComparisonOrSubtraction( 1369 Instruction *I) { 1370 IRBuilder<> IRB(I); 1371 FunctionCallee F = isa<ICmpInst>(I) ? AsanPtrCmpFunction : AsanPtrSubFunction; 1372 Value *Param[2] = {I->getOperand(0), I->getOperand(1)}; 1373 for (Value *&i : Param) { 1374 if (i->getType()->isPointerTy()) 1375 i = IRB.CreatePointerCast(i, IntptrTy); 1376 } 1377 IRB.CreateCall(F, Param); 1378 } 1379 1380 static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I, 1381 Instruction *InsertBefore, Value *Addr, 1382 MaybeAlign Alignment, unsigned Granularity, 1383 uint32_t TypeSize, bool IsWrite, 1384 Value *SizeArgument, bool UseCalls, 1385 uint32_t Exp) { 1386 // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check 1387 // if the data is properly aligned. 1388 if ((TypeSize == 8 || TypeSize == 16 || TypeSize == 32 || TypeSize == 64 || 1389 TypeSize == 128) && 1390 (!Alignment || *Alignment >= Granularity || *Alignment >= TypeSize / 8)) 1391 return Pass->instrumentAddress(I, InsertBefore, Addr, TypeSize, IsWrite, 1392 nullptr, UseCalls, Exp); 1393 Pass->instrumentUnusualSizeOrAlignment(I, InsertBefore, Addr, TypeSize, 1394 IsWrite, nullptr, UseCalls, Exp); 1395 } 1396 1397 static void instrumentMaskedLoadOrStore(AddressSanitizer *Pass, 1398 const DataLayout &DL, Type *IntptrTy, 1399 Value *Mask, Instruction *I, 1400 Value *Addr, MaybeAlign Alignment, 1401 unsigned Granularity, Type *OpType, 1402 bool IsWrite, Value *SizeArgument, 1403 bool UseCalls, uint32_t Exp) { 1404 auto *VTy = cast<FixedVectorType>(OpType); 1405 uint64_t ElemTypeSize = DL.getTypeStoreSizeInBits(VTy->getScalarType()); 1406 unsigned Num = VTy->getNumElements(); 1407 auto Zero = ConstantInt::get(IntptrTy, 0); 1408 for (unsigned Idx = 0; Idx < Num; ++Idx) { 1409 Value *InstrumentedAddress = nullptr; 1410 Instruction *InsertBefore = I; 1411 if (auto *Vector = dyn_cast<ConstantVector>(Mask)) { 1412 // dyn_cast as we might get UndefValue 1413 if (auto *Masked = dyn_cast<ConstantInt>(Vector->getOperand(Idx))) { 1414 if (Masked->isZero()) 1415 // Mask is constant false, so no instrumentation needed. 1416 continue; 1417 // If we have a true or undef value, fall through to doInstrumentAddress 1418 // with InsertBefore == I 1419 } 1420 } else { 1421 IRBuilder<> IRB(I); 1422 Value *MaskElem = IRB.CreateExtractElement(Mask, Idx); 1423 Instruction *ThenTerm = SplitBlockAndInsertIfThen(MaskElem, I, false); 1424 InsertBefore = ThenTerm; 1425 } 1426 1427 IRBuilder<> IRB(InsertBefore); 1428 InstrumentedAddress = 1429 IRB.CreateGEP(VTy, Addr, {Zero, ConstantInt::get(IntptrTy, Idx)}); 1430 doInstrumentAddress(Pass, I, InsertBefore, InstrumentedAddress, Alignment, 1431 Granularity, ElemTypeSize, IsWrite, SizeArgument, 1432 UseCalls, Exp); 1433 } 1434 } 1435 1436 void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, 1437 InterestingMemoryOperand &O, bool UseCalls, 1438 const DataLayout &DL) { 1439 Value *Addr = O.getPtr(); 1440 1441 // Optimization experiments. 1442 // The experiments can be used to evaluate potential optimizations that remove 1443 // instrumentation (assess false negatives). Instead of completely removing 1444 // some instrumentation, you set Exp to a non-zero value (mask of optimization 1445 // experiments that want to remove instrumentation of this instruction). 1446 // If Exp is non-zero, this pass will emit special calls into runtime 1447 // (e.g. __asan_report_exp_load1 instead of __asan_report_load1). These calls 1448 // make runtime terminate the program in a special way (with a different 1449 // exit status). Then you run the new compiler on a buggy corpus, collect 1450 // the special terminations (ideally, you don't see them at all -- no false 1451 // negatives) and make the decision on the optimization. 1452 uint32_t Exp = ClForceExperiment; 1453 1454 if (ClOpt && ClOptGlobals) { 1455 // If initialization order checking is disabled, a simple access to a 1456 // dynamically initialized global is always valid. 1457 GlobalVariable *G = dyn_cast<GlobalVariable>(getUnderlyingObject(Addr)); 1458 if (G && (!ClInitializers || GlobalIsLinkerInitialized(G)) && 1459 isSafeAccess(ObjSizeVis, Addr, O.TypeSize)) { 1460 NumOptimizedAccessesToGlobalVar++; 1461 return; 1462 } 1463 } 1464 1465 if (ClOpt && ClOptStack) { 1466 // A direct inbounds access to a stack variable is always valid. 1467 if (isa<AllocaInst>(getUnderlyingObject(Addr)) && 1468 isSafeAccess(ObjSizeVis, Addr, O.TypeSize)) { 1469 NumOptimizedAccessesToStackVar++; 1470 return; 1471 } 1472 } 1473 1474 if (O.IsWrite) 1475 NumInstrumentedWrites++; 1476 else 1477 NumInstrumentedReads++; 1478 1479 unsigned Granularity = 1 << Mapping.Scale; 1480 if (O.MaybeMask) { 1481 instrumentMaskedLoadOrStore(this, DL, IntptrTy, O.MaybeMask, O.getInsn(), 1482 Addr, O.Alignment, Granularity, O.OpType, 1483 O.IsWrite, nullptr, UseCalls, Exp); 1484 } else { 1485 doInstrumentAddress(this, O.getInsn(), O.getInsn(), Addr, O.Alignment, 1486 Granularity, O.TypeSize, O.IsWrite, nullptr, UseCalls, 1487 Exp); 1488 } 1489 } 1490 1491 Instruction *AddressSanitizer::generateCrashCode(Instruction *InsertBefore, 1492 Value *Addr, bool IsWrite, 1493 size_t AccessSizeIndex, 1494 Value *SizeArgument, 1495 uint32_t Exp) { 1496 IRBuilder<> IRB(InsertBefore); 1497 Value *ExpVal = Exp == 0 ? nullptr : ConstantInt::get(IRB.getInt32Ty(), Exp); 1498 CallInst *Call = nullptr; 1499 if (SizeArgument) { 1500 if (Exp == 0) 1501 Call = IRB.CreateCall(AsanErrorCallbackSized[IsWrite][0], 1502 {Addr, SizeArgument}); 1503 else 1504 Call = IRB.CreateCall(AsanErrorCallbackSized[IsWrite][1], 1505 {Addr, SizeArgument, ExpVal}); 1506 } else { 1507 if (Exp == 0) 1508 Call = 1509 IRB.CreateCall(AsanErrorCallback[IsWrite][0][AccessSizeIndex], Addr); 1510 else 1511 Call = IRB.CreateCall(AsanErrorCallback[IsWrite][1][AccessSizeIndex], 1512 {Addr, ExpVal}); 1513 } 1514 1515 Call->setCannotMerge(); 1516 return Call; 1517 } 1518 1519 Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong, 1520 Value *ShadowValue, 1521 uint32_t TypeSize) { 1522 size_t Granularity = static_cast<size_t>(1) << Mapping.Scale; 1523 // Addr & (Granularity - 1) 1524 Value *LastAccessedByte = 1525 IRB.CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1)); 1526 // (Addr & (Granularity - 1)) + size - 1 1527 if (TypeSize / 8 > 1) 1528 LastAccessedByte = IRB.CreateAdd( 1529 LastAccessedByte, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)); 1530 // (uint8_t) ((Addr & (Granularity-1)) + size - 1) 1531 LastAccessedByte = 1532 IRB.CreateIntCast(LastAccessedByte, ShadowValue->getType(), false); 1533 // ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue 1534 return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue); 1535 } 1536 1537 Instruction *AddressSanitizer::instrumentAMDGPUAddress( 1538 Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, 1539 uint32_t TypeSize, bool IsWrite, Value *SizeArgument) { 1540 // Do not instrument unsupported addrspaces. 1541 if (isUnsupportedAMDGPUAddrspace(Addr)) 1542 return nullptr; 1543 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType()); 1544 // Follow host instrumentation for global and constant addresses. 1545 if (PtrTy->getPointerAddressSpace() != 0) 1546 return InsertBefore; 1547 // Instrument generic addresses in supported addressspaces. 1548 IRBuilder<> IRB(InsertBefore); 1549 Value *AddrLong = IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()); 1550 Value *IsShared = IRB.CreateCall(AMDGPUAddressShared, {AddrLong}); 1551 Value *IsPrivate = IRB.CreateCall(AMDGPUAddressPrivate, {AddrLong}); 1552 Value *IsSharedOrPrivate = IRB.CreateOr(IsShared, IsPrivate); 1553 Value *Cmp = IRB.CreateICmpNE(IRB.getTrue(), IsSharedOrPrivate); 1554 Value *AddrSpaceZeroLanding = 1555 SplitBlockAndInsertIfThen(Cmp, InsertBefore, false); 1556 InsertBefore = cast<Instruction>(AddrSpaceZeroLanding); 1557 return InsertBefore; 1558 } 1559 1560 void AddressSanitizer::instrumentAddress(Instruction *OrigIns, 1561 Instruction *InsertBefore, Value *Addr, 1562 uint32_t TypeSize, bool IsWrite, 1563 Value *SizeArgument, bool UseCalls, 1564 uint32_t Exp) { 1565 if (TargetTriple.isAMDGPU()) { 1566 InsertBefore = instrumentAMDGPUAddress(OrigIns, InsertBefore, Addr, 1567 TypeSize, IsWrite, SizeArgument); 1568 if (!InsertBefore) 1569 return; 1570 } 1571 1572 IRBuilder<> IRB(InsertBefore); 1573 size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize); 1574 const ASanAccessInfo AccessInfo(IsWrite, CompileKernel, AccessSizeIndex); 1575 1576 if (UseCalls && ClOptimizeCallbacks) { 1577 const ASanAccessInfo AccessInfo(IsWrite, CompileKernel, AccessSizeIndex); 1578 Module *M = IRB.GetInsertBlock()->getParent()->getParent(); 1579 IRB.CreateCall( 1580 Intrinsic::getDeclaration(M, Intrinsic::asan_check_memaccess), 1581 {IRB.CreatePointerCast(Addr, Int8PtrTy), 1582 ConstantInt::get(Int32Ty, AccessInfo.Packed)}); 1583 return; 1584 } 1585 1586 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); 1587 if (UseCalls) { 1588 if (Exp == 0) 1589 IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex], 1590 AddrLong); 1591 else 1592 IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex], 1593 {AddrLong, ConstantInt::get(IRB.getInt32Ty(), Exp)}); 1594 return; 1595 } 1596 1597 Type *ShadowTy = 1598 IntegerType::get(*C, std::max(8U, TypeSize >> Mapping.Scale)); 1599 Type *ShadowPtrTy = PointerType::get(ShadowTy, 0); 1600 Value *ShadowPtr = memToShadow(AddrLong, IRB); 1601 Value *CmpVal = Constant::getNullValue(ShadowTy); 1602 Value *ShadowValue = 1603 IRB.CreateLoad(ShadowTy, IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy)); 1604 1605 Value *Cmp = IRB.CreateICmpNE(ShadowValue, CmpVal); 1606 size_t Granularity = 1ULL << Mapping.Scale; 1607 Instruction *CrashTerm = nullptr; 1608 1609 if (ClAlwaysSlowPath || (TypeSize < 8 * Granularity)) { 1610 // We use branch weights for the slow path check, to indicate that the slow 1611 // path is rarely taken. This seems to be the case for SPEC benchmarks. 1612 Instruction *CheckTerm = SplitBlockAndInsertIfThen( 1613 Cmp, InsertBefore, false, MDBuilder(*C).createBranchWeights(1, 100000)); 1614 assert(cast<BranchInst>(CheckTerm)->isUnconditional()); 1615 BasicBlock *NextBB = CheckTerm->getSuccessor(0); 1616 IRB.SetInsertPoint(CheckTerm); 1617 Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeSize); 1618 if (Recover) { 1619 CrashTerm = SplitBlockAndInsertIfThen(Cmp2, CheckTerm, false); 1620 } else { 1621 BasicBlock *CrashBlock = 1622 BasicBlock::Create(*C, "", NextBB->getParent(), NextBB); 1623 CrashTerm = new UnreachableInst(*C, CrashBlock); 1624 BranchInst *NewTerm = BranchInst::Create(CrashBlock, NextBB, Cmp2); 1625 ReplaceInstWithInst(CheckTerm, NewTerm); 1626 } 1627 } else { 1628 CrashTerm = SplitBlockAndInsertIfThen(Cmp, InsertBefore, !Recover); 1629 } 1630 1631 Instruction *Crash = generateCrashCode(CrashTerm, AddrLong, IsWrite, 1632 AccessSizeIndex, SizeArgument, Exp); 1633 Crash->setDebugLoc(OrigIns->getDebugLoc()); 1634 } 1635 1636 // Instrument unusual size or unusual alignment. 1637 // We can not do it with a single check, so we do 1-byte check for the first 1638 // and the last bytes. We call __asan_report_*_n(addr, real_size) to be able 1639 // to report the actual access size. 1640 void AddressSanitizer::instrumentUnusualSizeOrAlignment( 1641 Instruction *I, Instruction *InsertBefore, Value *Addr, uint32_t TypeSize, 1642 bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp) { 1643 IRBuilder<> IRB(InsertBefore); 1644 Value *Size = ConstantInt::get(IntptrTy, TypeSize / 8); 1645 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); 1646 if (UseCalls) { 1647 if (Exp == 0) 1648 IRB.CreateCall(AsanMemoryAccessCallbackSized[IsWrite][0], 1649 {AddrLong, Size}); 1650 else 1651 IRB.CreateCall(AsanMemoryAccessCallbackSized[IsWrite][1], 1652 {AddrLong, Size, ConstantInt::get(IRB.getInt32Ty(), Exp)}); 1653 } else { 1654 Value *LastByte = IRB.CreateIntToPtr( 1655 IRB.CreateAdd(AddrLong, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)), 1656 Addr->getType()); 1657 instrumentAddress(I, InsertBefore, Addr, 8, IsWrite, Size, false, Exp); 1658 instrumentAddress(I, InsertBefore, LastByte, 8, IsWrite, Size, false, Exp); 1659 } 1660 } 1661 1662 void ModuleAddressSanitizer::poisonOneInitializer(Function &GlobalInit, 1663 GlobalValue *ModuleName) { 1664 // Set up the arguments to our poison/unpoison functions. 1665 IRBuilder<> IRB(&GlobalInit.front(), 1666 GlobalInit.front().getFirstInsertionPt()); 1667 1668 // Add a call to poison all external globals before the given function starts. 1669 Value *ModuleNameAddr = ConstantExpr::getPointerCast(ModuleName, IntptrTy); 1670 IRB.CreateCall(AsanPoisonGlobals, ModuleNameAddr); 1671 1672 // Add calls to unpoison all globals before each return instruction. 1673 for (auto &BB : GlobalInit.getBasicBlockList()) 1674 if (ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator())) 1675 CallInst::Create(AsanUnpoisonGlobals, "", RI); 1676 } 1677 1678 void ModuleAddressSanitizer::createInitializerPoisonCalls( 1679 Module &M, GlobalValue *ModuleName) { 1680 GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors"); 1681 if (!GV) 1682 return; 1683 1684 ConstantArray *CA = dyn_cast<ConstantArray>(GV->getInitializer()); 1685 if (!CA) 1686 return; 1687 1688 for (Use &OP : CA->operands()) { 1689 if (isa<ConstantAggregateZero>(OP)) continue; 1690 ConstantStruct *CS = cast<ConstantStruct>(OP); 1691 1692 // Must have a function or null ptr. 1693 if (Function *F = dyn_cast<Function>(CS->getOperand(1))) { 1694 if (F->getName() == kAsanModuleCtorName) continue; 1695 auto *Priority = cast<ConstantInt>(CS->getOperand(0)); 1696 // Don't instrument CTORs that will run before asan.module_ctor. 1697 if (Priority->getLimitedValue() <= GetCtorAndDtorPriority(TargetTriple)) 1698 continue; 1699 poisonOneInitializer(*F, ModuleName); 1700 } 1701 } 1702 } 1703 1704 const GlobalVariable * 1705 ModuleAddressSanitizer::getExcludedAliasedGlobal(const GlobalAlias &GA) const { 1706 // In case this function should be expanded to include rules that do not just 1707 // apply when CompileKernel is true, either guard all existing rules with an 1708 // 'if (CompileKernel) { ... }' or be absolutely sure that all these rules 1709 // should also apply to user space. 1710 assert(CompileKernel && "Only expecting to be called when compiling kernel"); 1711 1712 const Constant *C = GA.getAliasee(); 1713 1714 // When compiling the kernel, globals that are aliased by symbols prefixed 1715 // by "__" are special and cannot be padded with a redzone. 1716 if (GA.getName().startswith("__")) 1717 return dyn_cast<GlobalVariable>(C->stripPointerCastsAndAliases()); 1718 1719 return nullptr; 1720 } 1721 1722 bool ModuleAddressSanitizer::shouldInstrumentGlobal(GlobalVariable *G) const { 1723 Type *Ty = G->getValueType(); 1724 LLVM_DEBUG(dbgs() << "GLOBAL: " << *G << "\n"); 1725 1726 if (G->hasSanitizerMetadata() && G->getSanitizerMetadata().NoAddress) 1727 return false; 1728 if (!Ty->isSized()) return false; 1729 if (!G->hasInitializer()) return false; 1730 // Globals in address space 1 and 4 are supported for AMDGPU. 1731 if (G->getAddressSpace() && 1732 !(TargetTriple.isAMDGPU() && !isUnsupportedAMDGPUAddrspace(G))) 1733 return false; 1734 if (GlobalWasGeneratedByCompiler(G)) return false; // Our own globals. 1735 // Two problems with thread-locals: 1736 // - The address of the main thread's copy can't be computed at link-time. 1737 // - Need to poison all copies, not just the main thread's one. 1738 if (G->isThreadLocal()) return false; 1739 // For now, just ignore this Global if the alignment is large. 1740 if (G->getAlignment() > getMinRedzoneSizeForGlobal()) return false; 1741 1742 // For non-COFF targets, only instrument globals known to be defined by this 1743 // TU. 1744 // FIXME: We can instrument comdat globals on ELF if we are using the 1745 // GC-friendly metadata scheme. 1746 if (!TargetTriple.isOSBinFormatCOFF()) { 1747 if (!G->hasExactDefinition() || G->hasComdat()) 1748 return false; 1749 } else { 1750 // On COFF, don't instrument non-ODR linkages. 1751 if (G->isInterposable()) 1752 return false; 1753 } 1754 1755 // If a comdat is present, it must have a selection kind that implies ODR 1756 // semantics: no duplicates, any, or exact match. 1757 if (Comdat *C = G->getComdat()) { 1758 switch (C->getSelectionKind()) { 1759 case Comdat::Any: 1760 case Comdat::ExactMatch: 1761 case Comdat::NoDeduplicate: 1762 break; 1763 case Comdat::Largest: 1764 case Comdat::SameSize: 1765 return false; 1766 } 1767 } 1768 1769 if (G->hasSection()) { 1770 // The kernel uses explicit sections for mostly special global variables 1771 // that we should not instrument. E.g. the kernel may rely on their layout 1772 // without redzones, or remove them at link time ("discard.*"), etc. 1773 if (CompileKernel) 1774 return false; 1775 1776 StringRef Section = G->getSection(); 1777 1778 // Globals from llvm.metadata aren't emitted, do not instrument them. 1779 if (Section == "llvm.metadata") return false; 1780 // Do not instrument globals from special LLVM sections. 1781 if (Section.contains("__llvm") || Section.contains("__LLVM")) 1782 return false; 1783 1784 // Do not instrument function pointers to initialization and termination 1785 // routines: dynamic linker will not properly handle redzones. 1786 if (Section.startswith(".preinit_array") || 1787 Section.startswith(".init_array") || 1788 Section.startswith(".fini_array")) { 1789 return false; 1790 } 1791 1792 // Do not instrument user-defined sections (with names resembling 1793 // valid C identifiers) 1794 if (TargetTriple.isOSBinFormatELF()) { 1795 if (llvm::all_of(Section, 1796 [](char c) { return llvm::isAlnum(c) || c == '_'; })) 1797 return false; 1798 } 1799 1800 // On COFF, if the section name contains '$', it is highly likely that the 1801 // user is using section sorting to create an array of globals similar to 1802 // the way initialization callbacks are registered in .init_array and 1803 // .CRT$XCU. The ATL also registers things in .ATL$__[azm]. Adding redzones 1804 // to such globals is counterproductive, because the intent is that they 1805 // will form an array, and out-of-bounds accesses are expected. 1806 // See https://github.com/google/sanitizers/issues/305 1807 // and http://msdn.microsoft.com/en-US/en-en/library/bb918180(v=vs.120).aspx 1808 if (TargetTriple.isOSBinFormatCOFF() && Section.contains('$')) { 1809 LLVM_DEBUG(dbgs() << "Ignoring global in sorted section (contains '$'): " 1810 << *G << "\n"); 1811 return false; 1812 } 1813 1814 if (TargetTriple.isOSBinFormatMachO()) { 1815 StringRef ParsedSegment, ParsedSection; 1816 unsigned TAA = 0, StubSize = 0; 1817 bool TAAParsed; 1818 cantFail(MCSectionMachO::ParseSectionSpecifier( 1819 Section, ParsedSegment, ParsedSection, TAA, TAAParsed, StubSize)); 1820 1821 // Ignore the globals from the __OBJC section. The ObjC runtime assumes 1822 // those conform to /usr/lib/objc/runtime.h, so we can't add redzones to 1823 // them. 1824 if (ParsedSegment == "__OBJC" || 1825 (ParsedSegment == "__DATA" && ParsedSection.startswith("__objc_"))) { 1826 LLVM_DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G << "\n"); 1827 return false; 1828 } 1829 // See https://github.com/google/sanitizers/issues/32 1830 // Constant CFString instances are compiled in the following way: 1831 // -- the string buffer is emitted into 1832 // __TEXT,__cstring,cstring_literals 1833 // -- the constant NSConstantString structure referencing that buffer 1834 // is placed into __DATA,__cfstring 1835 // Therefore there's no point in placing redzones into __DATA,__cfstring. 1836 // Moreover, it causes the linker to crash on OS X 10.7 1837 if (ParsedSegment == "__DATA" && ParsedSection == "__cfstring") { 1838 LLVM_DEBUG(dbgs() << "Ignoring CFString: " << *G << "\n"); 1839 return false; 1840 } 1841 // The linker merges the contents of cstring_literals and removes the 1842 // trailing zeroes. 1843 if (ParsedSegment == "__TEXT" && (TAA & MachO::S_CSTRING_LITERALS)) { 1844 LLVM_DEBUG(dbgs() << "Ignoring a cstring literal: " << *G << "\n"); 1845 return false; 1846 } 1847 } 1848 } 1849 1850 if (CompileKernel) { 1851 // Globals that prefixed by "__" are special and cannot be padded with a 1852 // redzone. 1853 if (G->getName().startswith("__")) 1854 return false; 1855 } 1856 1857 return true; 1858 } 1859 1860 // On Mach-O platforms, we emit global metadata in a separate section of the 1861 // binary in order to allow the linker to properly dead strip. This is only 1862 // supported on recent versions of ld64. 1863 bool ModuleAddressSanitizer::ShouldUseMachOGlobalsSection() const { 1864 if (!TargetTriple.isOSBinFormatMachO()) 1865 return false; 1866 1867 if (TargetTriple.isMacOSX() && !TargetTriple.isMacOSXVersionLT(10, 11)) 1868 return true; 1869 if (TargetTriple.isiOS() /* or tvOS */ && !TargetTriple.isOSVersionLT(9)) 1870 return true; 1871 if (TargetTriple.isWatchOS() && !TargetTriple.isOSVersionLT(2)) 1872 return true; 1873 if (TargetTriple.isDriverKit()) 1874 return true; 1875 1876 return false; 1877 } 1878 1879 StringRef ModuleAddressSanitizer::getGlobalMetadataSection() const { 1880 switch (TargetTriple.getObjectFormat()) { 1881 case Triple::COFF: return ".ASAN$GL"; 1882 case Triple::ELF: return "asan_globals"; 1883 case Triple::MachO: return "__DATA,__asan_globals,regular"; 1884 case Triple::Wasm: 1885 case Triple::GOFF: 1886 case Triple::SPIRV: 1887 case Triple::XCOFF: 1888 case Triple::DXContainer: 1889 report_fatal_error( 1890 "ModuleAddressSanitizer not implemented for object file format"); 1891 case Triple::UnknownObjectFormat: 1892 break; 1893 } 1894 llvm_unreachable("unsupported object format"); 1895 } 1896 1897 void ModuleAddressSanitizer::initializeCallbacks(Module &M) { 1898 IRBuilder<> IRB(*C); 1899 1900 // Declare our poisoning and unpoisoning functions. 1901 AsanPoisonGlobals = 1902 M.getOrInsertFunction(kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy); 1903 AsanUnpoisonGlobals = 1904 M.getOrInsertFunction(kAsanUnpoisonGlobalsName, IRB.getVoidTy()); 1905 1906 // Declare functions that register/unregister globals. 1907 AsanRegisterGlobals = M.getOrInsertFunction( 1908 kAsanRegisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy); 1909 AsanUnregisterGlobals = M.getOrInsertFunction( 1910 kAsanUnregisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy); 1911 1912 // Declare the functions that find globals in a shared object and then invoke 1913 // the (un)register function on them. 1914 AsanRegisterImageGlobals = M.getOrInsertFunction( 1915 kAsanRegisterImageGlobalsName, IRB.getVoidTy(), IntptrTy); 1916 AsanUnregisterImageGlobals = M.getOrInsertFunction( 1917 kAsanUnregisterImageGlobalsName, IRB.getVoidTy(), IntptrTy); 1918 1919 AsanRegisterElfGlobals = 1920 M.getOrInsertFunction(kAsanRegisterElfGlobalsName, IRB.getVoidTy(), 1921 IntptrTy, IntptrTy, IntptrTy); 1922 AsanUnregisterElfGlobals = 1923 M.getOrInsertFunction(kAsanUnregisterElfGlobalsName, IRB.getVoidTy(), 1924 IntptrTy, IntptrTy, IntptrTy); 1925 } 1926 1927 // Put the metadata and the instrumented global in the same group. This ensures 1928 // that the metadata is discarded if the instrumented global is discarded. 1929 void ModuleAddressSanitizer::SetComdatForGlobalMetadata( 1930 GlobalVariable *G, GlobalVariable *Metadata, StringRef InternalSuffix) { 1931 Module &M = *G->getParent(); 1932 Comdat *C = G->getComdat(); 1933 if (!C) { 1934 if (!G->hasName()) { 1935 // If G is unnamed, it must be internal. Give it an artificial name 1936 // so we can put it in a comdat. 1937 assert(G->hasLocalLinkage()); 1938 G->setName(Twine(kAsanGenPrefix) + "_anon_global"); 1939 } 1940 1941 if (!InternalSuffix.empty() && G->hasLocalLinkage()) { 1942 std::string Name = std::string(G->getName()); 1943 Name += InternalSuffix; 1944 C = M.getOrInsertComdat(Name); 1945 } else { 1946 C = M.getOrInsertComdat(G->getName()); 1947 } 1948 1949 // Make this IMAGE_COMDAT_SELECT_NODUPLICATES on COFF. Also upgrade private 1950 // linkage to internal linkage so that a symbol table entry is emitted. This 1951 // is necessary in order to create the comdat group. 1952 if (TargetTriple.isOSBinFormatCOFF()) { 1953 C->setSelectionKind(Comdat::NoDeduplicate); 1954 if (G->hasPrivateLinkage()) 1955 G->setLinkage(GlobalValue::InternalLinkage); 1956 } 1957 G->setComdat(C); 1958 } 1959 1960 assert(G->hasComdat()); 1961 Metadata->setComdat(G->getComdat()); 1962 } 1963 1964 // Create a separate metadata global and put it in the appropriate ASan 1965 // global registration section. 1966 GlobalVariable * 1967 ModuleAddressSanitizer::CreateMetadataGlobal(Module &M, Constant *Initializer, 1968 StringRef OriginalName) { 1969 auto Linkage = TargetTriple.isOSBinFormatMachO() 1970 ? GlobalVariable::InternalLinkage 1971 : GlobalVariable::PrivateLinkage; 1972 GlobalVariable *Metadata = new GlobalVariable( 1973 M, Initializer->getType(), false, Linkage, Initializer, 1974 Twine("__asan_global_") + GlobalValue::dropLLVMManglingEscape(OriginalName)); 1975 Metadata->setSection(getGlobalMetadataSection()); 1976 return Metadata; 1977 } 1978 1979 Instruction *ModuleAddressSanitizer::CreateAsanModuleDtor(Module &M) { 1980 AsanDtorFunction = Function::createWithDefaultAttr( 1981 FunctionType::get(Type::getVoidTy(*C), false), 1982 GlobalValue::InternalLinkage, 0, kAsanModuleDtorName, &M); 1983 AsanDtorFunction->addFnAttr(Attribute::NoUnwind); 1984 // Ensure Dtor cannot be discarded, even if in a comdat. 1985 appendToUsed(M, {AsanDtorFunction}); 1986 BasicBlock *AsanDtorBB = BasicBlock::Create(*C, "", AsanDtorFunction); 1987 1988 return ReturnInst::Create(*C, AsanDtorBB); 1989 } 1990 1991 void ModuleAddressSanitizer::InstrumentGlobalsCOFF( 1992 IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals, 1993 ArrayRef<Constant *> MetadataInitializers) { 1994 assert(ExtendedGlobals.size() == MetadataInitializers.size()); 1995 auto &DL = M.getDataLayout(); 1996 1997 SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size()); 1998 for (size_t i = 0; i < ExtendedGlobals.size(); i++) { 1999 Constant *Initializer = MetadataInitializers[i]; 2000 GlobalVariable *G = ExtendedGlobals[i]; 2001 GlobalVariable *Metadata = 2002 CreateMetadataGlobal(M, Initializer, G->getName()); 2003 MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G)); 2004 Metadata->setMetadata(LLVMContext::MD_associated, MD); 2005 MetadataGlobals[i] = Metadata; 2006 2007 // The MSVC linker always inserts padding when linking incrementally. We 2008 // cope with that by aligning each struct to its size, which must be a power 2009 // of two. 2010 unsigned SizeOfGlobalStruct = DL.getTypeAllocSize(Initializer->getType()); 2011 assert(isPowerOf2_32(SizeOfGlobalStruct) && 2012 "global metadata will not be padded appropriately"); 2013 Metadata->setAlignment(assumeAligned(SizeOfGlobalStruct)); 2014 2015 SetComdatForGlobalMetadata(G, Metadata, ""); 2016 } 2017 2018 // Update llvm.compiler.used, adding the new metadata globals. This is 2019 // needed so that during LTO these variables stay alive. 2020 if (!MetadataGlobals.empty()) 2021 appendToCompilerUsed(M, MetadataGlobals); 2022 } 2023 2024 void ModuleAddressSanitizer::InstrumentGlobalsELF( 2025 IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals, 2026 ArrayRef<Constant *> MetadataInitializers, 2027 const std::string &UniqueModuleId) { 2028 assert(ExtendedGlobals.size() == MetadataInitializers.size()); 2029 2030 // Putting globals in a comdat changes the semantic and potentially cause 2031 // false negative odr violations at link time. If odr indicators are used, we 2032 // keep the comdat sections, as link time odr violations will be dectected on 2033 // the odr indicator symbols. 2034 bool UseComdatForGlobalsGC = UseOdrIndicator; 2035 2036 SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size()); 2037 for (size_t i = 0; i < ExtendedGlobals.size(); i++) { 2038 GlobalVariable *G = ExtendedGlobals[i]; 2039 GlobalVariable *Metadata = 2040 CreateMetadataGlobal(M, MetadataInitializers[i], G->getName()); 2041 MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G)); 2042 Metadata->setMetadata(LLVMContext::MD_associated, MD); 2043 MetadataGlobals[i] = Metadata; 2044 2045 if (UseComdatForGlobalsGC) 2046 SetComdatForGlobalMetadata(G, Metadata, UniqueModuleId); 2047 } 2048 2049 // Update llvm.compiler.used, adding the new metadata globals. This is 2050 // needed so that during LTO these variables stay alive. 2051 if (!MetadataGlobals.empty()) 2052 appendToCompilerUsed(M, MetadataGlobals); 2053 2054 // RegisteredFlag serves two purposes. First, we can pass it to dladdr() 2055 // to look up the loaded image that contains it. Second, we can store in it 2056 // whether registration has already occurred, to prevent duplicate 2057 // registration. 2058 // 2059 // Common linkage ensures that there is only one global per shared library. 2060 GlobalVariable *RegisteredFlag = new GlobalVariable( 2061 M, IntptrTy, false, GlobalVariable::CommonLinkage, 2062 ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName); 2063 RegisteredFlag->setVisibility(GlobalVariable::HiddenVisibility); 2064 2065 // Create start and stop symbols. 2066 GlobalVariable *StartELFMetadata = new GlobalVariable( 2067 M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr, 2068 "__start_" + getGlobalMetadataSection()); 2069 StartELFMetadata->setVisibility(GlobalVariable::HiddenVisibility); 2070 GlobalVariable *StopELFMetadata = new GlobalVariable( 2071 M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr, 2072 "__stop_" + getGlobalMetadataSection()); 2073 StopELFMetadata->setVisibility(GlobalVariable::HiddenVisibility); 2074 2075 // Create a call to register the globals with the runtime. 2076 IRB.CreateCall(AsanRegisterElfGlobals, 2077 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy), 2078 IRB.CreatePointerCast(StartELFMetadata, IntptrTy), 2079 IRB.CreatePointerCast(StopELFMetadata, IntptrTy)}); 2080 2081 // We also need to unregister globals at the end, e.g., when a shared library 2082 // gets closed. 2083 if (DestructorKind != AsanDtorKind::None) { 2084 IRBuilder<> IrbDtor(CreateAsanModuleDtor(M)); 2085 IrbDtor.CreateCall(AsanUnregisterElfGlobals, 2086 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy), 2087 IRB.CreatePointerCast(StartELFMetadata, IntptrTy), 2088 IRB.CreatePointerCast(StopELFMetadata, IntptrTy)}); 2089 } 2090 } 2091 2092 void ModuleAddressSanitizer::InstrumentGlobalsMachO( 2093 IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals, 2094 ArrayRef<Constant *> MetadataInitializers) { 2095 assert(ExtendedGlobals.size() == MetadataInitializers.size()); 2096 2097 // On recent Mach-O platforms, use a structure which binds the liveness of 2098 // the global variable to the metadata struct. Keep the list of "Liveness" GV 2099 // created to be added to llvm.compiler.used 2100 StructType *LivenessTy = StructType::get(IntptrTy, IntptrTy); 2101 SmallVector<GlobalValue *, 16> LivenessGlobals(ExtendedGlobals.size()); 2102 2103 for (size_t i = 0; i < ExtendedGlobals.size(); i++) { 2104 Constant *Initializer = MetadataInitializers[i]; 2105 GlobalVariable *G = ExtendedGlobals[i]; 2106 GlobalVariable *Metadata = 2107 CreateMetadataGlobal(M, Initializer, G->getName()); 2108 2109 // On recent Mach-O platforms, we emit the global metadata in a way that 2110 // allows the linker to properly strip dead globals. 2111 auto LivenessBinder = 2112 ConstantStruct::get(LivenessTy, Initializer->getAggregateElement(0u), 2113 ConstantExpr::getPointerCast(Metadata, IntptrTy)); 2114 GlobalVariable *Liveness = new GlobalVariable( 2115 M, LivenessTy, false, GlobalVariable::InternalLinkage, LivenessBinder, 2116 Twine("__asan_binder_") + G->getName()); 2117 Liveness->setSection("__DATA,__asan_liveness,regular,live_support"); 2118 LivenessGlobals[i] = Liveness; 2119 } 2120 2121 // Update llvm.compiler.used, adding the new liveness globals. This is 2122 // needed so that during LTO these variables stay alive. The alternative 2123 // would be to have the linker handling the LTO symbols, but libLTO 2124 // current API does not expose access to the section for each symbol. 2125 if (!LivenessGlobals.empty()) 2126 appendToCompilerUsed(M, LivenessGlobals); 2127 2128 // RegisteredFlag serves two purposes. First, we can pass it to dladdr() 2129 // to look up the loaded image that contains it. Second, we can store in it 2130 // whether registration has already occurred, to prevent duplicate 2131 // registration. 2132 // 2133 // common linkage ensures that there is only one global per shared library. 2134 GlobalVariable *RegisteredFlag = new GlobalVariable( 2135 M, IntptrTy, false, GlobalVariable::CommonLinkage, 2136 ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName); 2137 RegisteredFlag->setVisibility(GlobalVariable::HiddenVisibility); 2138 2139 IRB.CreateCall(AsanRegisterImageGlobals, 2140 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)}); 2141 2142 // We also need to unregister globals at the end, e.g., when a shared library 2143 // gets closed. 2144 if (DestructorKind != AsanDtorKind::None) { 2145 IRBuilder<> IrbDtor(CreateAsanModuleDtor(M)); 2146 IrbDtor.CreateCall(AsanUnregisterImageGlobals, 2147 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)}); 2148 } 2149 } 2150 2151 void ModuleAddressSanitizer::InstrumentGlobalsWithMetadataArray( 2152 IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals, 2153 ArrayRef<Constant *> MetadataInitializers) { 2154 assert(ExtendedGlobals.size() == MetadataInitializers.size()); 2155 unsigned N = ExtendedGlobals.size(); 2156 assert(N > 0); 2157 2158 // On platforms that don't have a custom metadata section, we emit an array 2159 // of global metadata structures. 2160 ArrayType *ArrayOfGlobalStructTy = 2161 ArrayType::get(MetadataInitializers[0]->getType(), N); 2162 auto AllGlobals = new GlobalVariable( 2163 M, ArrayOfGlobalStructTy, false, GlobalVariable::InternalLinkage, 2164 ConstantArray::get(ArrayOfGlobalStructTy, MetadataInitializers), ""); 2165 if (Mapping.Scale > 3) 2166 AllGlobals->setAlignment(Align(1ULL << Mapping.Scale)); 2167 2168 IRB.CreateCall(AsanRegisterGlobals, 2169 {IRB.CreatePointerCast(AllGlobals, IntptrTy), 2170 ConstantInt::get(IntptrTy, N)}); 2171 2172 // We also need to unregister globals at the end, e.g., when a shared library 2173 // gets closed. 2174 if (DestructorKind != AsanDtorKind::None) { 2175 IRBuilder<> IrbDtor(CreateAsanModuleDtor(M)); 2176 IrbDtor.CreateCall(AsanUnregisterGlobals, 2177 {IRB.CreatePointerCast(AllGlobals, IntptrTy), 2178 ConstantInt::get(IntptrTy, N)}); 2179 } 2180 } 2181 2182 // This function replaces all global variables with new variables that have 2183 // trailing redzones. It also creates a function that poisons 2184 // redzones and inserts this function into llvm.global_ctors. 2185 // Sets *CtorComdat to true if the global registration code emitted into the 2186 // asan constructor is comdat-compatible. 2187 bool ModuleAddressSanitizer::InstrumentGlobals(IRBuilder<> &IRB, Module &M, 2188 bool *CtorComdat) { 2189 *CtorComdat = false; 2190 2191 // Build set of globals that are aliased by some GA, where 2192 // getExcludedAliasedGlobal(GA) returns the relevant GlobalVariable. 2193 SmallPtrSet<const GlobalVariable *, 16> AliasedGlobalExclusions; 2194 if (CompileKernel) { 2195 for (auto &GA : M.aliases()) { 2196 if (const GlobalVariable *GV = getExcludedAliasedGlobal(GA)) 2197 AliasedGlobalExclusions.insert(GV); 2198 } 2199 } 2200 2201 SmallVector<GlobalVariable *, 16> GlobalsToChange; 2202 for (auto &G : M.globals()) { 2203 if (!AliasedGlobalExclusions.count(&G) && shouldInstrumentGlobal(&G)) 2204 GlobalsToChange.push_back(&G); 2205 } 2206 2207 size_t n = GlobalsToChange.size(); 2208 if (n == 0) { 2209 *CtorComdat = true; 2210 return false; 2211 } 2212 2213 auto &DL = M.getDataLayout(); 2214 2215 // A global is described by a structure 2216 // size_t beg; 2217 // size_t size; 2218 // size_t size_with_redzone; 2219 // const char *name; 2220 // const char *module_name; 2221 // size_t has_dynamic_init; 2222 // size_t padding_for_windows_msvc_incremental_link; 2223 // size_t odr_indicator; 2224 // We initialize an array of such structures and pass it to a run-time call. 2225 StructType *GlobalStructTy = 2226 StructType::get(IntptrTy, IntptrTy, IntptrTy, IntptrTy, IntptrTy, 2227 IntptrTy, IntptrTy, IntptrTy); 2228 SmallVector<GlobalVariable *, 16> NewGlobals(n); 2229 SmallVector<Constant *, 16> Initializers(n); 2230 2231 bool HasDynamicallyInitializedGlobals = false; 2232 2233 // We shouldn't merge same module names, as this string serves as unique 2234 // module ID in runtime. 2235 GlobalVariable *ModuleName = createPrivateGlobalForString( 2236 M, M.getModuleIdentifier(), /*AllowMerging*/ false, kAsanGenPrefix); 2237 2238 for (size_t i = 0; i < n; i++) { 2239 GlobalVariable *G = GlobalsToChange[i]; 2240 2241 GlobalValue::SanitizerMetadata MD; 2242 if (G->hasSanitizerMetadata()) 2243 MD = G->getSanitizerMetadata(); 2244 2245 // TODO: Symbol names in the descriptor can be demangled by the runtime 2246 // library. This could save ~0.4% of VM size for a private large binary. 2247 std::string NameForGlobal = llvm::demangle(G->getName().str()); 2248 GlobalVariable *Name = 2249 createPrivateGlobalForString(M, NameForGlobal, 2250 /*AllowMerging*/ true, kAsanGenPrefix); 2251 2252 Type *Ty = G->getValueType(); 2253 const uint64_t SizeInBytes = DL.getTypeAllocSize(Ty); 2254 const uint64_t RightRedzoneSize = getRedzoneSizeForGlobal(SizeInBytes); 2255 Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize); 2256 2257 StructType *NewTy = StructType::get(Ty, RightRedZoneTy); 2258 Constant *NewInitializer = ConstantStruct::get( 2259 NewTy, G->getInitializer(), Constant::getNullValue(RightRedZoneTy)); 2260 2261 // Create a new global variable with enough space for a redzone. 2262 GlobalValue::LinkageTypes Linkage = G->getLinkage(); 2263 if (G->isConstant() && Linkage == GlobalValue::PrivateLinkage) 2264 Linkage = GlobalValue::InternalLinkage; 2265 GlobalVariable *NewGlobal = new GlobalVariable( 2266 M, NewTy, G->isConstant(), Linkage, NewInitializer, "", G, 2267 G->getThreadLocalMode(), G->getAddressSpace()); 2268 NewGlobal->copyAttributesFrom(G); 2269 NewGlobal->setComdat(G->getComdat()); 2270 NewGlobal->setAlignment(MaybeAlign(getMinRedzoneSizeForGlobal())); 2271 // Don't fold globals with redzones. ODR violation detector and redzone 2272 // poisoning implicitly creates a dependence on the global's address, so it 2273 // is no longer valid for it to be marked unnamed_addr. 2274 NewGlobal->setUnnamedAddr(GlobalValue::UnnamedAddr::None); 2275 2276 // Move null-terminated C strings to "__asan_cstring" section on Darwin. 2277 if (TargetTriple.isOSBinFormatMachO() && !G->hasSection() && 2278 G->isConstant()) { 2279 auto Seq = dyn_cast<ConstantDataSequential>(G->getInitializer()); 2280 if (Seq && Seq->isCString()) 2281 NewGlobal->setSection("__TEXT,__asan_cstring,regular"); 2282 } 2283 2284 // Transfer the debug info and type metadata. The payload starts at offset 2285 // zero so we can copy the metadata over as is. 2286 NewGlobal->copyMetadata(G, 0); 2287 2288 Value *Indices2[2]; 2289 Indices2[0] = IRB.getInt32(0); 2290 Indices2[1] = IRB.getInt32(0); 2291 2292 G->replaceAllUsesWith( 2293 ConstantExpr::getGetElementPtr(NewTy, NewGlobal, Indices2, true)); 2294 NewGlobal->takeName(G); 2295 G->eraseFromParent(); 2296 NewGlobals[i] = NewGlobal; 2297 2298 Constant *ODRIndicator = ConstantExpr::getNullValue(IRB.getInt8PtrTy()); 2299 GlobalValue *InstrumentedGlobal = NewGlobal; 2300 2301 bool CanUsePrivateAliases = 2302 TargetTriple.isOSBinFormatELF() || TargetTriple.isOSBinFormatMachO() || 2303 TargetTriple.isOSBinFormatWasm(); 2304 if (CanUsePrivateAliases && UsePrivateAlias) { 2305 // Create local alias for NewGlobal to avoid crash on ODR between 2306 // instrumented and non-instrumented libraries. 2307 InstrumentedGlobal = 2308 GlobalAlias::create(GlobalValue::PrivateLinkage, "", NewGlobal); 2309 } 2310 2311 // ODR should not happen for local linkage. 2312 if (NewGlobal->hasLocalLinkage()) { 2313 ODRIndicator = ConstantExpr::getIntToPtr(ConstantInt::get(IntptrTy, -1), 2314 IRB.getInt8PtrTy()); 2315 } else if (UseOdrIndicator) { 2316 // With local aliases, we need to provide another externally visible 2317 // symbol __odr_asan_XXX to detect ODR violation. 2318 auto *ODRIndicatorSym = 2319 new GlobalVariable(M, IRB.getInt8Ty(), false, Linkage, 2320 Constant::getNullValue(IRB.getInt8Ty()), 2321 kODRGenPrefix + NameForGlobal, nullptr, 2322 NewGlobal->getThreadLocalMode()); 2323 2324 // Set meaningful attributes for indicator symbol. 2325 ODRIndicatorSym->setVisibility(NewGlobal->getVisibility()); 2326 ODRIndicatorSym->setDLLStorageClass(NewGlobal->getDLLStorageClass()); 2327 ODRIndicatorSym->setAlignment(Align(1)); 2328 ODRIndicator = ODRIndicatorSym; 2329 } 2330 2331 Constant *Initializer = ConstantStruct::get( 2332 GlobalStructTy, 2333 ConstantExpr::getPointerCast(InstrumentedGlobal, IntptrTy), 2334 ConstantInt::get(IntptrTy, SizeInBytes), 2335 ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize), 2336 ConstantExpr::getPointerCast(Name, IntptrTy), 2337 ConstantExpr::getPointerCast(ModuleName, IntptrTy), 2338 ConstantInt::get(IntptrTy, MD.IsDynInit), 2339 Constant::getNullValue(IntptrTy), 2340 ConstantExpr::getPointerCast(ODRIndicator, IntptrTy)); 2341 2342 if (ClInitializers && MD.IsDynInit) 2343 HasDynamicallyInitializedGlobals = true; 2344 2345 LLVM_DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n"); 2346 2347 Initializers[i] = Initializer; 2348 } 2349 2350 // Add instrumented globals to llvm.compiler.used list to avoid LTO from 2351 // ConstantMerge'ing them. 2352 SmallVector<GlobalValue *, 16> GlobalsToAddToUsedList; 2353 for (size_t i = 0; i < n; i++) { 2354 GlobalVariable *G = NewGlobals[i]; 2355 if (G->getName().empty()) continue; 2356 GlobalsToAddToUsedList.push_back(G); 2357 } 2358 appendToCompilerUsed(M, ArrayRef<GlobalValue *>(GlobalsToAddToUsedList)); 2359 2360 std::string ELFUniqueModuleId = 2361 (UseGlobalsGC && TargetTriple.isOSBinFormatELF()) ? getUniqueModuleId(&M) 2362 : ""; 2363 2364 if (!ELFUniqueModuleId.empty()) { 2365 InstrumentGlobalsELF(IRB, M, NewGlobals, Initializers, ELFUniqueModuleId); 2366 *CtorComdat = true; 2367 } else if (UseGlobalsGC && TargetTriple.isOSBinFormatCOFF()) { 2368 InstrumentGlobalsCOFF(IRB, M, NewGlobals, Initializers); 2369 } else if (UseGlobalsGC && ShouldUseMachOGlobalsSection()) { 2370 InstrumentGlobalsMachO(IRB, M, NewGlobals, Initializers); 2371 } else { 2372 InstrumentGlobalsWithMetadataArray(IRB, M, NewGlobals, Initializers); 2373 } 2374 2375 // Create calls for poisoning before initializers run and unpoisoning after. 2376 if (HasDynamicallyInitializedGlobals) 2377 createInitializerPoisonCalls(M, ModuleName); 2378 2379 LLVM_DEBUG(dbgs() << M); 2380 return true; 2381 } 2382 2383 uint64_t 2384 ModuleAddressSanitizer::getRedzoneSizeForGlobal(uint64_t SizeInBytes) const { 2385 constexpr uint64_t kMaxRZ = 1 << 18; 2386 const uint64_t MinRZ = getMinRedzoneSizeForGlobal(); 2387 2388 uint64_t RZ = 0; 2389 if (SizeInBytes <= MinRZ / 2) { 2390 // Reduce redzone size for small size objects, e.g. int, char[1]. MinRZ is 2391 // at least 32 bytes, optimize when SizeInBytes is less than or equal to 2392 // half of MinRZ. 2393 RZ = MinRZ - SizeInBytes; 2394 } else { 2395 // Calculate RZ, where MinRZ <= RZ <= MaxRZ, and RZ ~ 1/4 * SizeInBytes. 2396 RZ = std::max(MinRZ, std::min(kMaxRZ, (SizeInBytes / MinRZ / 4) * MinRZ)); 2397 2398 // Round up to multiple of MinRZ. 2399 if (SizeInBytes % MinRZ) 2400 RZ += MinRZ - (SizeInBytes % MinRZ); 2401 } 2402 2403 assert((RZ + SizeInBytes) % MinRZ == 0); 2404 2405 return RZ; 2406 } 2407 2408 int ModuleAddressSanitizer::GetAsanVersion(const Module &M) const { 2409 int LongSize = M.getDataLayout().getPointerSizeInBits(); 2410 bool isAndroid = Triple(M.getTargetTriple()).isAndroid(); 2411 int Version = 8; 2412 // 32-bit Android is one version ahead because of the switch to dynamic 2413 // shadow. 2414 Version += (LongSize == 32 && isAndroid); 2415 return Version; 2416 } 2417 2418 bool ModuleAddressSanitizer::instrumentModule(Module &M) { 2419 initializeCallbacks(M); 2420 2421 // Create a module constructor. A destructor is created lazily because not all 2422 // platforms, and not all modules need it. 2423 if (CompileKernel) { 2424 // The kernel always builds with its own runtime, and therefore does not 2425 // need the init and version check calls. 2426 AsanCtorFunction = createSanitizerCtor(M, kAsanModuleCtorName); 2427 } else { 2428 std::string AsanVersion = std::to_string(GetAsanVersion(M)); 2429 std::string VersionCheckName = 2430 ClInsertVersionCheck ? (kAsanVersionCheckNamePrefix + AsanVersion) : ""; 2431 std::tie(AsanCtorFunction, std::ignore) = 2432 createSanitizerCtorAndInitFunctions(M, kAsanModuleCtorName, 2433 kAsanInitName, /*InitArgTypes=*/{}, 2434 /*InitArgs=*/{}, VersionCheckName); 2435 } 2436 2437 bool CtorComdat = true; 2438 if (ClGlobals) { 2439 IRBuilder<> IRB(AsanCtorFunction->getEntryBlock().getTerminator()); 2440 InstrumentGlobals(IRB, M, &CtorComdat); 2441 } 2442 2443 const uint64_t Priority = GetCtorAndDtorPriority(TargetTriple); 2444 2445 // Put the constructor and destructor in comdat if both 2446 // (1) global instrumentation is not TU-specific 2447 // (2) target is ELF. 2448 if (UseCtorComdat && TargetTriple.isOSBinFormatELF() && CtorComdat) { 2449 AsanCtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleCtorName)); 2450 appendToGlobalCtors(M, AsanCtorFunction, Priority, AsanCtorFunction); 2451 if (AsanDtorFunction) { 2452 AsanDtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleDtorName)); 2453 appendToGlobalDtors(M, AsanDtorFunction, Priority, AsanDtorFunction); 2454 } 2455 } else { 2456 appendToGlobalCtors(M, AsanCtorFunction, Priority); 2457 if (AsanDtorFunction) 2458 appendToGlobalDtors(M, AsanDtorFunction, Priority); 2459 } 2460 2461 return true; 2462 } 2463 2464 void AddressSanitizer::initializeCallbacks(Module &M) { 2465 IRBuilder<> IRB(*C); 2466 // Create __asan_report* callbacks. 2467 // IsWrite, TypeSize and Exp are encoded in the function name. 2468 for (int Exp = 0; Exp < 2; Exp++) { 2469 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) { 2470 const std::string TypeStr = AccessIsWrite ? "store" : "load"; 2471 const std::string ExpStr = Exp ? "exp_" : ""; 2472 const std::string EndingStr = Recover ? "_noabort" : ""; 2473 2474 SmallVector<Type *, 3> Args2 = {IntptrTy, IntptrTy}; 2475 SmallVector<Type *, 2> Args1{1, IntptrTy}; 2476 if (Exp) { 2477 Type *ExpType = Type::getInt32Ty(*C); 2478 Args2.push_back(ExpType); 2479 Args1.push_back(ExpType); 2480 } 2481 AsanErrorCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction( 2482 kAsanReportErrorTemplate + ExpStr + TypeStr + "_n" + EndingStr, 2483 FunctionType::get(IRB.getVoidTy(), Args2, false)); 2484 2485 AsanMemoryAccessCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction( 2486 ClMemoryAccessCallbackPrefix + ExpStr + TypeStr + "N" + EndingStr, 2487 FunctionType::get(IRB.getVoidTy(), Args2, false)); 2488 2489 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes; 2490 AccessSizeIndex++) { 2491 const std::string Suffix = TypeStr + itostr(1ULL << AccessSizeIndex); 2492 AsanErrorCallback[AccessIsWrite][Exp][AccessSizeIndex] = 2493 M.getOrInsertFunction( 2494 kAsanReportErrorTemplate + ExpStr + Suffix + EndingStr, 2495 FunctionType::get(IRB.getVoidTy(), Args1, false)); 2496 2497 AsanMemoryAccessCallback[AccessIsWrite][Exp][AccessSizeIndex] = 2498 M.getOrInsertFunction( 2499 ClMemoryAccessCallbackPrefix + ExpStr + Suffix + EndingStr, 2500 FunctionType::get(IRB.getVoidTy(), Args1, false)); 2501 } 2502 } 2503 } 2504 2505 const std::string MemIntrinCallbackPrefix = 2506 (CompileKernel && !ClKasanMemIntrinCallbackPrefix) 2507 ? std::string("") 2508 : ClMemoryAccessCallbackPrefix; 2509 AsanMemmove = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memmove", 2510 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 2511 IRB.getInt8PtrTy(), IntptrTy); 2512 AsanMemcpy = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memcpy", 2513 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 2514 IRB.getInt8PtrTy(), IntptrTy); 2515 AsanMemset = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memset", 2516 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 2517 IRB.getInt32Ty(), IntptrTy); 2518 2519 AsanHandleNoReturnFunc = 2520 M.getOrInsertFunction(kAsanHandleNoReturnName, IRB.getVoidTy()); 2521 2522 AsanPtrCmpFunction = 2523 M.getOrInsertFunction(kAsanPtrCmp, IRB.getVoidTy(), IntptrTy, IntptrTy); 2524 AsanPtrSubFunction = 2525 M.getOrInsertFunction(kAsanPtrSub, IRB.getVoidTy(), IntptrTy, IntptrTy); 2526 if (Mapping.InGlobal) 2527 AsanShadowGlobal = M.getOrInsertGlobal("__asan_shadow", 2528 ArrayType::get(IRB.getInt8Ty(), 0)); 2529 2530 AMDGPUAddressShared = M.getOrInsertFunction( 2531 kAMDGPUAddressSharedName, IRB.getInt1Ty(), IRB.getInt8PtrTy()); 2532 AMDGPUAddressPrivate = M.getOrInsertFunction( 2533 kAMDGPUAddressPrivateName, IRB.getInt1Ty(), IRB.getInt8PtrTy()); 2534 } 2535 2536 bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) { 2537 // For each NSObject descendant having a +load method, this method is invoked 2538 // by the ObjC runtime before any of the static constructors is called. 2539 // Therefore we need to instrument such methods with a call to __asan_init 2540 // at the beginning in order to initialize our runtime before any access to 2541 // the shadow memory. 2542 // We cannot just ignore these methods, because they may call other 2543 // instrumented functions. 2544 if (F.getName().find(" load]") != std::string::npos) { 2545 FunctionCallee AsanInitFunction = 2546 declareSanitizerInitFunction(*F.getParent(), kAsanInitName, {}); 2547 IRBuilder<> IRB(&F.front(), F.front().begin()); 2548 IRB.CreateCall(AsanInitFunction, {}); 2549 return true; 2550 } 2551 return false; 2552 } 2553 2554 bool AddressSanitizer::maybeInsertDynamicShadowAtFunctionEntry(Function &F) { 2555 // Generate code only when dynamic addressing is needed. 2556 if (Mapping.Offset != kDynamicShadowSentinel) 2557 return false; 2558 2559 IRBuilder<> IRB(&F.front().front()); 2560 if (Mapping.InGlobal) { 2561 if (ClWithIfuncSuppressRemat) { 2562 // An empty inline asm with input reg == output reg. 2563 // An opaque pointer-to-int cast, basically. 2564 InlineAsm *Asm = InlineAsm::get( 2565 FunctionType::get(IntptrTy, {AsanShadowGlobal->getType()}, false), 2566 StringRef(""), StringRef("=r,0"), 2567 /*hasSideEffects=*/false); 2568 LocalDynamicShadow = 2569 IRB.CreateCall(Asm, {AsanShadowGlobal}, ".asan.shadow"); 2570 } else { 2571 LocalDynamicShadow = 2572 IRB.CreatePointerCast(AsanShadowGlobal, IntptrTy, ".asan.shadow"); 2573 } 2574 } else { 2575 Value *GlobalDynamicAddress = F.getParent()->getOrInsertGlobal( 2576 kAsanShadowMemoryDynamicAddress, IntptrTy); 2577 LocalDynamicShadow = IRB.CreateLoad(IntptrTy, GlobalDynamicAddress); 2578 } 2579 return true; 2580 } 2581 2582 void AddressSanitizer::markEscapedLocalAllocas(Function &F) { 2583 // Find the one possible call to llvm.localescape and pre-mark allocas passed 2584 // to it as uninteresting. This assumes we haven't started processing allocas 2585 // yet. This check is done up front because iterating the use list in 2586 // isInterestingAlloca would be algorithmically slower. 2587 assert(ProcessedAllocas.empty() && "must process localescape before allocas"); 2588 2589 // Try to get the declaration of llvm.localescape. If it's not in the module, 2590 // we can exit early. 2591 if (!F.getParent()->getFunction("llvm.localescape")) return; 2592 2593 // Look for a call to llvm.localescape call in the entry block. It can't be in 2594 // any other block. 2595 for (Instruction &I : F.getEntryBlock()) { 2596 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I); 2597 if (II && II->getIntrinsicID() == Intrinsic::localescape) { 2598 // We found a call. Mark all the allocas passed in as uninteresting. 2599 for (Value *Arg : II->args()) { 2600 AllocaInst *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts()); 2601 assert(AI && AI->isStaticAlloca() && 2602 "non-static alloca arg to localescape"); 2603 ProcessedAllocas[AI] = false; 2604 } 2605 break; 2606 } 2607 } 2608 } 2609 2610 bool AddressSanitizer::suppressInstrumentationSiteForDebug(int &Instrumented) { 2611 bool ShouldInstrument = 2612 ClDebugMin < 0 || ClDebugMax < 0 || 2613 (Instrumented >= ClDebugMin && Instrumented <= ClDebugMax); 2614 Instrumented++; 2615 return !ShouldInstrument; 2616 } 2617 2618 bool AddressSanitizer::instrumentFunction(Function &F, 2619 const TargetLibraryInfo *TLI) { 2620 if (F.empty()) 2621 return false; 2622 if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) return false; 2623 if (!ClDebugFunc.empty() && ClDebugFunc == F.getName()) return false; 2624 if (F.getName().startswith("__asan_")) return false; 2625 2626 bool FunctionModified = false; 2627 2628 // If needed, insert __asan_init before checking for SanitizeAddress attr. 2629 // This function needs to be called even if the function body is not 2630 // instrumented. 2631 if (maybeInsertAsanInitAtFunctionEntry(F)) 2632 FunctionModified = true; 2633 2634 // Leave if the function doesn't need instrumentation. 2635 if (!F.hasFnAttribute(Attribute::SanitizeAddress)) return FunctionModified; 2636 2637 if (F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation)) 2638 return FunctionModified; 2639 2640 LLVM_DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n"); 2641 2642 initializeCallbacks(*F.getParent()); 2643 2644 FunctionStateRAII CleanupObj(this); 2645 2646 FunctionModified |= maybeInsertDynamicShadowAtFunctionEntry(F); 2647 2648 // We can't instrument allocas used with llvm.localescape. Only static allocas 2649 // can be passed to that intrinsic. 2650 markEscapedLocalAllocas(F); 2651 2652 // We want to instrument every address only once per basic block (unless there 2653 // are calls between uses). 2654 SmallPtrSet<Value *, 16> TempsToInstrument; 2655 SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument; 2656 SmallVector<MemIntrinsic *, 16> IntrinToInstrument; 2657 SmallVector<Instruction *, 8> NoReturnCalls; 2658 SmallVector<BasicBlock *, 16> AllBlocks; 2659 SmallVector<Instruction *, 16> PointerComparisonsOrSubtracts; 2660 2661 // Fill the set of memory operations to instrument. 2662 for (auto &BB : F) { 2663 AllBlocks.push_back(&BB); 2664 TempsToInstrument.clear(); 2665 int NumInsnsPerBB = 0; 2666 for (auto &Inst : BB) { 2667 if (LooksLikeCodeInBug11395(&Inst)) return false; 2668 // Skip instructions inserted by another instrumentation. 2669 if (Inst.hasMetadata(LLVMContext::MD_nosanitize)) 2670 continue; 2671 SmallVector<InterestingMemoryOperand, 1> InterestingOperands; 2672 getInterestingMemoryOperands(&Inst, InterestingOperands); 2673 2674 if (!InterestingOperands.empty()) { 2675 for (auto &Operand : InterestingOperands) { 2676 if (ClOpt && ClOptSameTemp) { 2677 Value *Ptr = Operand.getPtr(); 2678 // If we have a mask, skip instrumentation if we've already 2679 // instrumented the full object. But don't add to TempsToInstrument 2680 // because we might get another load/store with a different mask. 2681 if (Operand.MaybeMask) { 2682 if (TempsToInstrument.count(Ptr)) 2683 continue; // We've seen this (whole) temp in the current BB. 2684 } else { 2685 if (!TempsToInstrument.insert(Ptr).second) 2686 continue; // We've seen this temp in the current BB. 2687 } 2688 } 2689 OperandsToInstrument.push_back(Operand); 2690 NumInsnsPerBB++; 2691 } 2692 } else if (((ClInvalidPointerPairs || ClInvalidPointerCmp) && 2693 isInterestingPointerComparison(&Inst)) || 2694 ((ClInvalidPointerPairs || ClInvalidPointerSub) && 2695 isInterestingPointerSubtraction(&Inst))) { 2696 PointerComparisonsOrSubtracts.push_back(&Inst); 2697 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst)) { 2698 // ok, take it. 2699 IntrinToInstrument.push_back(MI); 2700 NumInsnsPerBB++; 2701 } else { 2702 if (auto *CB = dyn_cast<CallBase>(&Inst)) { 2703 // A call inside BB. 2704 TempsToInstrument.clear(); 2705 if (CB->doesNotReturn()) 2706 NoReturnCalls.push_back(CB); 2707 } 2708 if (CallInst *CI = dyn_cast<CallInst>(&Inst)) 2709 maybeMarkSanitizerLibraryCallNoBuiltin(CI, TLI); 2710 } 2711 if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB) break; 2712 } 2713 } 2714 2715 bool UseCalls = (ClInstrumentationWithCallsThreshold >= 0 && 2716 OperandsToInstrument.size() + IntrinToInstrument.size() > 2717 (unsigned)ClInstrumentationWithCallsThreshold); 2718 const DataLayout &DL = F.getParent()->getDataLayout(); 2719 ObjectSizeOpts ObjSizeOpts; 2720 ObjSizeOpts.RoundToAlign = true; 2721 ObjectSizeOffsetVisitor ObjSizeVis(DL, TLI, F.getContext(), ObjSizeOpts); 2722 2723 // Instrument. 2724 int NumInstrumented = 0; 2725 for (auto &Operand : OperandsToInstrument) { 2726 if (!suppressInstrumentationSiteForDebug(NumInstrumented)) 2727 instrumentMop(ObjSizeVis, Operand, UseCalls, 2728 F.getParent()->getDataLayout()); 2729 FunctionModified = true; 2730 } 2731 for (auto Inst : IntrinToInstrument) { 2732 if (!suppressInstrumentationSiteForDebug(NumInstrumented)) 2733 instrumentMemIntrinsic(Inst); 2734 FunctionModified = true; 2735 } 2736 2737 FunctionStackPoisoner FSP(F, *this); 2738 bool ChangedStack = FSP.runOnFunction(); 2739 2740 // We must unpoison the stack before NoReturn calls (throw, _exit, etc). 2741 // See e.g. https://github.com/google/sanitizers/issues/37 2742 for (auto CI : NoReturnCalls) { 2743 IRBuilder<> IRB(CI); 2744 IRB.CreateCall(AsanHandleNoReturnFunc, {}); 2745 } 2746 2747 for (auto Inst : PointerComparisonsOrSubtracts) { 2748 instrumentPointerComparisonOrSubtraction(Inst); 2749 FunctionModified = true; 2750 } 2751 2752 if (ChangedStack || !NoReturnCalls.empty()) 2753 FunctionModified = true; 2754 2755 LLVM_DEBUG(dbgs() << "ASAN done instrumenting: " << FunctionModified << " " 2756 << F << "\n"); 2757 2758 return FunctionModified; 2759 } 2760 2761 // Workaround for bug 11395: we don't want to instrument stack in functions 2762 // with large assembly blobs (32-bit only), otherwise reg alloc may crash. 2763 // FIXME: remove once the bug 11395 is fixed. 2764 bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) { 2765 if (LongSize != 32) return false; 2766 CallInst *CI = dyn_cast<CallInst>(I); 2767 if (!CI || !CI->isInlineAsm()) return false; 2768 if (CI->arg_size() <= 5) 2769 return false; 2770 // We have inline assembly with quite a few arguments. 2771 return true; 2772 } 2773 2774 void FunctionStackPoisoner::initializeCallbacks(Module &M) { 2775 IRBuilder<> IRB(*C); 2776 if (ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Always || 2777 ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Runtime) { 2778 const char *MallocNameTemplate = 2779 ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Always 2780 ? kAsanStackMallocAlwaysNameTemplate 2781 : kAsanStackMallocNameTemplate; 2782 for (int Index = 0; Index <= kMaxAsanStackMallocSizeClass; Index++) { 2783 std::string Suffix = itostr(Index); 2784 AsanStackMallocFunc[Index] = M.getOrInsertFunction( 2785 MallocNameTemplate + Suffix, IntptrTy, IntptrTy); 2786 AsanStackFreeFunc[Index] = 2787 M.getOrInsertFunction(kAsanStackFreeNameTemplate + Suffix, 2788 IRB.getVoidTy(), IntptrTy, IntptrTy); 2789 } 2790 } 2791 if (ASan.UseAfterScope) { 2792 AsanPoisonStackMemoryFunc = M.getOrInsertFunction( 2793 kAsanPoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy); 2794 AsanUnpoisonStackMemoryFunc = M.getOrInsertFunction( 2795 kAsanUnpoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy); 2796 } 2797 2798 for (size_t Val : {0x00, 0xf1, 0xf2, 0xf3, 0xf5, 0xf8}) { 2799 std::ostringstream Name; 2800 Name << kAsanSetShadowPrefix; 2801 Name << std::setw(2) << std::setfill('0') << std::hex << Val; 2802 AsanSetShadowFunc[Val] = 2803 M.getOrInsertFunction(Name.str(), IRB.getVoidTy(), IntptrTy, IntptrTy); 2804 } 2805 2806 AsanAllocaPoisonFunc = M.getOrInsertFunction( 2807 kAsanAllocaPoison, IRB.getVoidTy(), IntptrTy, IntptrTy); 2808 AsanAllocasUnpoisonFunc = M.getOrInsertFunction( 2809 kAsanAllocasUnpoison, IRB.getVoidTy(), IntptrTy, IntptrTy); 2810 } 2811 2812 void FunctionStackPoisoner::copyToShadowInline(ArrayRef<uint8_t> ShadowMask, 2813 ArrayRef<uint8_t> ShadowBytes, 2814 size_t Begin, size_t End, 2815 IRBuilder<> &IRB, 2816 Value *ShadowBase) { 2817 if (Begin >= End) 2818 return; 2819 2820 const size_t LargestStoreSizeInBytes = 2821 std::min<size_t>(sizeof(uint64_t), ASan.LongSize / 8); 2822 2823 const bool IsLittleEndian = F.getParent()->getDataLayout().isLittleEndian(); 2824 2825 // Poison given range in shadow using larges store size with out leading and 2826 // trailing zeros in ShadowMask. Zeros never change, so they need neither 2827 // poisoning nor up-poisoning. Still we don't mind if some of them get into a 2828 // middle of a store. 2829 for (size_t i = Begin; i < End;) { 2830 if (!ShadowMask[i]) { 2831 assert(!ShadowBytes[i]); 2832 ++i; 2833 continue; 2834 } 2835 2836 size_t StoreSizeInBytes = LargestStoreSizeInBytes; 2837 // Fit store size into the range. 2838 while (StoreSizeInBytes > End - i) 2839 StoreSizeInBytes /= 2; 2840 2841 // Minimize store size by trimming trailing zeros. 2842 for (size_t j = StoreSizeInBytes - 1; j && !ShadowMask[i + j]; --j) { 2843 while (j <= StoreSizeInBytes / 2) 2844 StoreSizeInBytes /= 2; 2845 } 2846 2847 uint64_t Val = 0; 2848 for (size_t j = 0; j < StoreSizeInBytes; j++) { 2849 if (IsLittleEndian) 2850 Val |= (uint64_t)ShadowBytes[i + j] << (8 * j); 2851 else 2852 Val = (Val << 8) | ShadowBytes[i + j]; 2853 } 2854 2855 Value *Ptr = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)); 2856 Value *Poison = IRB.getIntN(StoreSizeInBytes * 8, Val); 2857 IRB.CreateAlignedStore( 2858 Poison, IRB.CreateIntToPtr(Ptr, Poison->getType()->getPointerTo()), 2859 Align(1)); 2860 2861 i += StoreSizeInBytes; 2862 } 2863 } 2864 2865 void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask, 2866 ArrayRef<uint8_t> ShadowBytes, 2867 IRBuilder<> &IRB, Value *ShadowBase) { 2868 copyToShadow(ShadowMask, ShadowBytes, 0, ShadowMask.size(), IRB, ShadowBase); 2869 } 2870 2871 void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask, 2872 ArrayRef<uint8_t> ShadowBytes, 2873 size_t Begin, size_t End, 2874 IRBuilder<> &IRB, Value *ShadowBase) { 2875 assert(ShadowMask.size() == ShadowBytes.size()); 2876 size_t Done = Begin; 2877 for (size_t i = Begin, j = Begin + 1; i < End; i = j++) { 2878 if (!ShadowMask[i]) { 2879 assert(!ShadowBytes[i]); 2880 continue; 2881 } 2882 uint8_t Val = ShadowBytes[i]; 2883 if (!AsanSetShadowFunc[Val]) 2884 continue; 2885 2886 // Skip same values. 2887 for (; j < End && ShadowMask[j] && Val == ShadowBytes[j]; ++j) { 2888 } 2889 2890 if (j - i >= ClMaxInlinePoisoningSize) { 2891 copyToShadowInline(ShadowMask, ShadowBytes, Done, i, IRB, ShadowBase); 2892 IRB.CreateCall(AsanSetShadowFunc[Val], 2893 {IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)), 2894 ConstantInt::get(IntptrTy, j - i)}); 2895 Done = j; 2896 } 2897 } 2898 2899 copyToShadowInline(ShadowMask, ShadowBytes, Done, End, IRB, ShadowBase); 2900 } 2901 2902 // Fake stack allocator (asan_fake_stack.h) has 11 size classes 2903 // for every power of 2 from kMinStackMallocSize to kMaxAsanStackMallocSizeClass 2904 static int StackMallocSizeClass(uint64_t LocalStackSize) { 2905 assert(LocalStackSize <= kMaxStackMallocSize); 2906 uint64_t MaxSize = kMinStackMallocSize; 2907 for (int i = 0;; i++, MaxSize *= 2) 2908 if (LocalStackSize <= MaxSize) return i; 2909 llvm_unreachable("impossible LocalStackSize"); 2910 } 2911 2912 void FunctionStackPoisoner::copyArgsPassedByValToAllocas() { 2913 Instruction *CopyInsertPoint = &F.front().front(); 2914 if (CopyInsertPoint == ASan.LocalDynamicShadow) { 2915 // Insert after the dynamic shadow location is determined 2916 CopyInsertPoint = CopyInsertPoint->getNextNode(); 2917 assert(CopyInsertPoint); 2918 } 2919 IRBuilder<> IRB(CopyInsertPoint); 2920 const DataLayout &DL = F.getParent()->getDataLayout(); 2921 for (Argument &Arg : F.args()) { 2922 if (Arg.hasByValAttr()) { 2923 Type *Ty = Arg.getParamByValType(); 2924 const Align Alignment = 2925 DL.getValueOrABITypeAlignment(Arg.getParamAlign(), Ty); 2926 2927 AllocaInst *AI = IRB.CreateAlloca( 2928 Ty, nullptr, 2929 (Arg.hasName() ? Arg.getName() : "Arg" + Twine(Arg.getArgNo())) + 2930 ".byval"); 2931 AI->setAlignment(Alignment); 2932 Arg.replaceAllUsesWith(AI); 2933 2934 uint64_t AllocSize = DL.getTypeAllocSize(Ty); 2935 IRB.CreateMemCpy(AI, Alignment, &Arg, Alignment, AllocSize); 2936 } 2937 } 2938 } 2939 2940 PHINode *FunctionStackPoisoner::createPHI(IRBuilder<> &IRB, Value *Cond, 2941 Value *ValueIfTrue, 2942 Instruction *ThenTerm, 2943 Value *ValueIfFalse) { 2944 PHINode *PHI = IRB.CreatePHI(IntptrTy, 2); 2945 BasicBlock *CondBlock = cast<Instruction>(Cond)->getParent(); 2946 PHI->addIncoming(ValueIfFalse, CondBlock); 2947 BasicBlock *ThenBlock = ThenTerm->getParent(); 2948 PHI->addIncoming(ValueIfTrue, ThenBlock); 2949 return PHI; 2950 } 2951 2952 Value *FunctionStackPoisoner::createAllocaForLayout( 2953 IRBuilder<> &IRB, const ASanStackFrameLayout &L, bool Dynamic) { 2954 AllocaInst *Alloca; 2955 if (Dynamic) { 2956 Alloca = IRB.CreateAlloca(IRB.getInt8Ty(), 2957 ConstantInt::get(IRB.getInt64Ty(), L.FrameSize), 2958 "MyAlloca"); 2959 } else { 2960 Alloca = IRB.CreateAlloca(ArrayType::get(IRB.getInt8Ty(), L.FrameSize), 2961 nullptr, "MyAlloca"); 2962 assert(Alloca->isStaticAlloca()); 2963 } 2964 assert((ClRealignStack & (ClRealignStack - 1)) == 0); 2965 uint64_t FrameAlignment = std::max(L.FrameAlignment, uint64_t(ClRealignStack)); 2966 Alloca->setAlignment(Align(FrameAlignment)); 2967 return IRB.CreatePointerCast(Alloca, IntptrTy); 2968 } 2969 2970 void FunctionStackPoisoner::createDynamicAllocasInitStorage() { 2971 BasicBlock &FirstBB = *F.begin(); 2972 IRBuilder<> IRB(dyn_cast<Instruction>(FirstBB.begin())); 2973 DynamicAllocaLayout = IRB.CreateAlloca(IntptrTy, nullptr); 2974 IRB.CreateStore(Constant::getNullValue(IntptrTy), DynamicAllocaLayout); 2975 DynamicAllocaLayout->setAlignment(Align(32)); 2976 } 2977 2978 void FunctionStackPoisoner::processDynamicAllocas() { 2979 if (!ClInstrumentDynamicAllocas || DynamicAllocaVec.empty()) { 2980 assert(DynamicAllocaPoisonCallVec.empty()); 2981 return; 2982 } 2983 2984 // Insert poison calls for lifetime intrinsics for dynamic allocas. 2985 for (const auto &APC : DynamicAllocaPoisonCallVec) { 2986 assert(APC.InsBefore); 2987 assert(APC.AI); 2988 assert(ASan.isInterestingAlloca(*APC.AI)); 2989 assert(!APC.AI->isStaticAlloca()); 2990 2991 IRBuilder<> IRB(APC.InsBefore); 2992 poisonAlloca(APC.AI, APC.Size, IRB, APC.DoPoison); 2993 // Dynamic allocas will be unpoisoned unconditionally below in 2994 // unpoisonDynamicAllocas. 2995 // Flag that we need unpoison static allocas. 2996 } 2997 2998 // Handle dynamic allocas. 2999 createDynamicAllocasInitStorage(); 3000 for (auto &AI : DynamicAllocaVec) 3001 handleDynamicAllocaCall(AI); 3002 unpoisonDynamicAllocas(); 3003 } 3004 3005 /// Collect instructions in the entry block after \p InsBefore which initialize 3006 /// permanent storage for a function argument. These instructions must remain in 3007 /// the entry block so that uninitialized values do not appear in backtraces. An 3008 /// added benefit is that this conserves spill slots. This does not move stores 3009 /// before instrumented / "interesting" allocas. 3010 static void findStoresToUninstrumentedArgAllocas( 3011 AddressSanitizer &ASan, Instruction &InsBefore, 3012 SmallVectorImpl<Instruction *> &InitInsts) { 3013 Instruction *Start = InsBefore.getNextNonDebugInstruction(); 3014 for (Instruction *It = Start; It; It = It->getNextNonDebugInstruction()) { 3015 // Argument initialization looks like: 3016 // 1) store <Argument>, <Alloca> OR 3017 // 2) <CastArgument> = cast <Argument> to ... 3018 // store <CastArgument> to <Alloca> 3019 // Do not consider any other kind of instruction. 3020 // 3021 // Note: This covers all known cases, but may not be exhaustive. An 3022 // alternative to pattern-matching stores is to DFS over all Argument uses: 3023 // this might be more general, but is probably much more complicated. 3024 if (isa<AllocaInst>(It) || isa<CastInst>(It)) 3025 continue; 3026 if (auto *Store = dyn_cast<StoreInst>(It)) { 3027 // The store destination must be an alloca that isn't interesting for 3028 // ASan to instrument. These are moved up before InsBefore, and they're 3029 // not interesting because allocas for arguments can be mem2reg'd. 3030 auto *Alloca = dyn_cast<AllocaInst>(Store->getPointerOperand()); 3031 if (!Alloca || ASan.isInterestingAlloca(*Alloca)) 3032 continue; 3033 3034 Value *Val = Store->getValueOperand(); 3035 bool IsDirectArgInit = isa<Argument>(Val); 3036 bool IsArgInitViaCast = 3037 isa<CastInst>(Val) && 3038 isa<Argument>(cast<CastInst>(Val)->getOperand(0)) && 3039 // Check that the cast appears directly before the store. Otherwise 3040 // moving the cast before InsBefore may break the IR. 3041 Val == It->getPrevNonDebugInstruction(); 3042 bool IsArgInit = IsDirectArgInit || IsArgInitViaCast; 3043 if (!IsArgInit) 3044 continue; 3045 3046 if (IsArgInitViaCast) 3047 InitInsts.push_back(cast<Instruction>(Val)); 3048 InitInsts.push_back(Store); 3049 continue; 3050 } 3051 3052 // Do not reorder past unknown instructions: argument initialization should 3053 // only involve casts and stores. 3054 return; 3055 } 3056 } 3057 3058 void FunctionStackPoisoner::processStaticAllocas() { 3059 if (AllocaVec.empty()) { 3060 assert(StaticAllocaPoisonCallVec.empty()); 3061 return; 3062 } 3063 3064 int StackMallocIdx = -1; 3065 DebugLoc EntryDebugLocation; 3066 if (auto SP = F.getSubprogram()) 3067 EntryDebugLocation = 3068 DILocation::get(SP->getContext(), SP->getScopeLine(), 0, SP); 3069 3070 Instruction *InsBefore = AllocaVec[0]; 3071 IRBuilder<> IRB(InsBefore); 3072 3073 // Make sure non-instrumented allocas stay in the entry block. Otherwise, 3074 // debug info is broken, because only entry-block allocas are treated as 3075 // regular stack slots. 3076 auto InsBeforeB = InsBefore->getParent(); 3077 assert(InsBeforeB == &F.getEntryBlock()); 3078 for (auto *AI : StaticAllocasToMoveUp) 3079 if (AI->getParent() == InsBeforeB) 3080 AI->moveBefore(InsBefore); 3081 3082 // Move stores of arguments into entry-block allocas as well. This prevents 3083 // extra stack slots from being generated (to house the argument values until 3084 // they can be stored into the allocas). This also prevents uninitialized 3085 // values from being shown in backtraces. 3086 SmallVector<Instruction *, 8> ArgInitInsts; 3087 findStoresToUninstrumentedArgAllocas(ASan, *InsBefore, ArgInitInsts); 3088 for (Instruction *ArgInitInst : ArgInitInsts) 3089 ArgInitInst->moveBefore(InsBefore); 3090 3091 // If we have a call to llvm.localescape, keep it in the entry block. 3092 if (LocalEscapeCall) LocalEscapeCall->moveBefore(InsBefore); 3093 3094 SmallVector<ASanStackVariableDescription, 16> SVD; 3095 SVD.reserve(AllocaVec.size()); 3096 for (AllocaInst *AI : AllocaVec) { 3097 ASanStackVariableDescription D = {AI->getName().data(), 3098 ASan.getAllocaSizeInBytes(*AI), 3099 0, 3100 AI->getAlign().value(), 3101 AI, 3102 0, 3103 0}; 3104 SVD.push_back(D); 3105 } 3106 3107 // Minimal header size (left redzone) is 4 pointers, 3108 // i.e. 32 bytes on 64-bit platforms and 16 bytes in 32-bit platforms. 3109 uint64_t Granularity = 1ULL << Mapping.Scale; 3110 uint64_t MinHeaderSize = std::max((uint64_t)ASan.LongSize / 2, Granularity); 3111 const ASanStackFrameLayout &L = 3112 ComputeASanStackFrameLayout(SVD, Granularity, MinHeaderSize); 3113 3114 // Build AllocaToSVDMap for ASanStackVariableDescription lookup. 3115 DenseMap<const AllocaInst *, ASanStackVariableDescription *> AllocaToSVDMap; 3116 for (auto &Desc : SVD) 3117 AllocaToSVDMap[Desc.AI] = &Desc; 3118 3119 // Update SVD with information from lifetime intrinsics. 3120 for (const auto &APC : StaticAllocaPoisonCallVec) { 3121 assert(APC.InsBefore); 3122 assert(APC.AI); 3123 assert(ASan.isInterestingAlloca(*APC.AI)); 3124 assert(APC.AI->isStaticAlloca()); 3125 3126 ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI]; 3127 Desc.LifetimeSize = Desc.Size; 3128 if (const DILocation *FnLoc = EntryDebugLocation.get()) { 3129 if (const DILocation *LifetimeLoc = APC.InsBefore->getDebugLoc().get()) { 3130 if (LifetimeLoc->getFile() == FnLoc->getFile()) 3131 if (unsigned Line = LifetimeLoc->getLine()) 3132 Desc.Line = std::min(Desc.Line ? Desc.Line : Line, Line); 3133 } 3134 } 3135 } 3136 3137 auto DescriptionString = ComputeASanStackFrameDescription(SVD); 3138 LLVM_DEBUG(dbgs() << DescriptionString << " --- " << L.FrameSize << "\n"); 3139 uint64_t LocalStackSize = L.FrameSize; 3140 bool DoStackMalloc = 3141 ASan.UseAfterReturn != AsanDetectStackUseAfterReturnMode::Never && 3142 !ASan.CompileKernel && LocalStackSize <= kMaxStackMallocSize; 3143 bool DoDynamicAlloca = ClDynamicAllocaStack; 3144 // Don't do dynamic alloca or stack malloc if: 3145 // 1) There is inline asm: too often it makes assumptions on which registers 3146 // are available. 3147 // 2) There is a returns_twice call (typically setjmp), which is 3148 // optimization-hostile, and doesn't play well with introduced indirect 3149 // register-relative calculation of local variable addresses. 3150 DoDynamicAlloca &= !HasInlineAsm && !HasReturnsTwiceCall; 3151 DoStackMalloc &= !HasInlineAsm && !HasReturnsTwiceCall; 3152 3153 Value *StaticAlloca = 3154 DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L, false); 3155 3156 Value *FakeStack; 3157 Value *LocalStackBase; 3158 Value *LocalStackBaseAlloca; 3159 uint8_t DIExprFlags = DIExpression::ApplyOffset; 3160 3161 if (DoStackMalloc) { 3162 LocalStackBaseAlloca = 3163 IRB.CreateAlloca(IntptrTy, nullptr, "asan_local_stack_base"); 3164 if (ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Runtime) { 3165 // void *FakeStack = __asan_option_detect_stack_use_after_return 3166 // ? __asan_stack_malloc_N(LocalStackSize) 3167 // : nullptr; 3168 // void *LocalStackBase = (FakeStack) ? FakeStack : 3169 // alloca(LocalStackSize); 3170 Constant *OptionDetectUseAfterReturn = F.getParent()->getOrInsertGlobal( 3171 kAsanOptionDetectUseAfterReturn, IRB.getInt32Ty()); 3172 Value *UseAfterReturnIsEnabled = IRB.CreateICmpNE( 3173 IRB.CreateLoad(IRB.getInt32Ty(), OptionDetectUseAfterReturn), 3174 Constant::getNullValue(IRB.getInt32Ty())); 3175 Instruction *Term = 3176 SplitBlockAndInsertIfThen(UseAfterReturnIsEnabled, InsBefore, false); 3177 IRBuilder<> IRBIf(Term); 3178 StackMallocIdx = StackMallocSizeClass(LocalStackSize); 3179 assert(StackMallocIdx <= kMaxAsanStackMallocSizeClass); 3180 Value *FakeStackValue = 3181 IRBIf.CreateCall(AsanStackMallocFunc[StackMallocIdx], 3182 ConstantInt::get(IntptrTy, LocalStackSize)); 3183 IRB.SetInsertPoint(InsBefore); 3184 FakeStack = createPHI(IRB, UseAfterReturnIsEnabled, FakeStackValue, Term, 3185 ConstantInt::get(IntptrTy, 0)); 3186 } else { 3187 // assert(ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode:Always) 3188 // void *FakeStack = __asan_stack_malloc_N(LocalStackSize); 3189 // void *LocalStackBase = (FakeStack) ? FakeStack : 3190 // alloca(LocalStackSize); 3191 StackMallocIdx = StackMallocSizeClass(LocalStackSize); 3192 FakeStack = IRB.CreateCall(AsanStackMallocFunc[StackMallocIdx], 3193 ConstantInt::get(IntptrTy, LocalStackSize)); 3194 } 3195 Value *NoFakeStack = 3196 IRB.CreateICmpEQ(FakeStack, Constant::getNullValue(IntptrTy)); 3197 Instruction *Term = 3198 SplitBlockAndInsertIfThen(NoFakeStack, InsBefore, false); 3199 IRBuilder<> IRBIf(Term); 3200 Value *AllocaValue = 3201 DoDynamicAlloca ? createAllocaForLayout(IRBIf, L, true) : StaticAlloca; 3202 3203 IRB.SetInsertPoint(InsBefore); 3204 LocalStackBase = createPHI(IRB, NoFakeStack, AllocaValue, Term, FakeStack); 3205 IRB.CreateStore(LocalStackBase, LocalStackBaseAlloca); 3206 DIExprFlags |= DIExpression::DerefBefore; 3207 } else { 3208 // void *FakeStack = nullptr; 3209 // void *LocalStackBase = alloca(LocalStackSize); 3210 FakeStack = ConstantInt::get(IntptrTy, 0); 3211 LocalStackBase = 3212 DoDynamicAlloca ? createAllocaForLayout(IRB, L, true) : StaticAlloca; 3213 LocalStackBaseAlloca = LocalStackBase; 3214 } 3215 3216 // It shouldn't matter whether we pass an `alloca` or a `ptrtoint` as the 3217 // dbg.declare address opereand, but passing a `ptrtoint` seems to confuse 3218 // later passes and can result in dropped variable coverage in debug info. 3219 Value *LocalStackBaseAllocaPtr = 3220 isa<PtrToIntInst>(LocalStackBaseAlloca) 3221 ? cast<PtrToIntInst>(LocalStackBaseAlloca)->getPointerOperand() 3222 : LocalStackBaseAlloca; 3223 assert(isa<AllocaInst>(LocalStackBaseAllocaPtr) && 3224 "Variable descriptions relative to ASan stack base will be dropped"); 3225 3226 // Replace Alloca instructions with base+offset. 3227 for (const auto &Desc : SVD) { 3228 AllocaInst *AI = Desc.AI; 3229 replaceDbgDeclare(AI, LocalStackBaseAllocaPtr, DIB, DIExprFlags, 3230 Desc.Offset); 3231 Value *NewAllocaPtr = IRB.CreateIntToPtr( 3232 IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Desc.Offset)), 3233 AI->getType()); 3234 AI->replaceAllUsesWith(NewAllocaPtr); 3235 } 3236 3237 // The left-most redzone has enough space for at least 4 pointers. 3238 // Write the Magic value to redzone[0]. 3239 Value *BasePlus0 = IRB.CreateIntToPtr(LocalStackBase, IntptrPtrTy); 3240 IRB.CreateStore(ConstantInt::get(IntptrTy, kCurrentStackFrameMagic), 3241 BasePlus0); 3242 // Write the frame description constant to redzone[1]. 3243 Value *BasePlus1 = IRB.CreateIntToPtr( 3244 IRB.CreateAdd(LocalStackBase, 3245 ConstantInt::get(IntptrTy, ASan.LongSize / 8)), 3246 IntptrPtrTy); 3247 GlobalVariable *StackDescriptionGlobal = 3248 createPrivateGlobalForString(*F.getParent(), DescriptionString, 3249 /*AllowMerging*/ true, kAsanGenPrefix); 3250 Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal, IntptrTy); 3251 IRB.CreateStore(Description, BasePlus1); 3252 // Write the PC to redzone[2]. 3253 Value *BasePlus2 = IRB.CreateIntToPtr( 3254 IRB.CreateAdd(LocalStackBase, 3255 ConstantInt::get(IntptrTy, 2 * ASan.LongSize / 8)), 3256 IntptrPtrTy); 3257 IRB.CreateStore(IRB.CreatePointerCast(&F, IntptrTy), BasePlus2); 3258 3259 const auto &ShadowAfterScope = GetShadowBytesAfterScope(SVD, L); 3260 3261 // Poison the stack red zones at the entry. 3262 Value *ShadowBase = ASan.memToShadow(LocalStackBase, IRB); 3263 // As mask we must use most poisoned case: red zones and after scope. 3264 // As bytes we can use either the same or just red zones only. 3265 copyToShadow(ShadowAfterScope, ShadowAfterScope, IRB, ShadowBase); 3266 3267 if (!StaticAllocaPoisonCallVec.empty()) { 3268 const auto &ShadowInScope = GetShadowBytes(SVD, L); 3269 3270 // Poison static allocas near lifetime intrinsics. 3271 for (const auto &APC : StaticAllocaPoisonCallVec) { 3272 const ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI]; 3273 assert(Desc.Offset % L.Granularity == 0); 3274 size_t Begin = Desc.Offset / L.Granularity; 3275 size_t End = Begin + (APC.Size + L.Granularity - 1) / L.Granularity; 3276 3277 IRBuilder<> IRB(APC.InsBefore); 3278 copyToShadow(ShadowAfterScope, 3279 APC.DoPoison ? ShadowAfterScope : ShadowInScope, Begin, End, 3280 IRB, ShadowBase); 3281 } 3282 } 3283 3284 SmallVector<uint8_t, 64> ShadowClean(ShadowAfterScope.size(), 0); 3285 SmallVector<uint8_t, 64> ShadowAfterReturn; 3286 3287 // (Un)poison the stack before all ret instructions. 3288 for (Instruction *Ret : RetVec) { 3289 IRBuilder<> IRBRet(Ret); 3290 // Mark the current frame as retired. 3291 IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic), 3292 BasePlus0); 3293 if (DoStackMalloc) { 3294 assert(StackMallocIdx >= 0); 3295 // if FakeStack != 0 // LocalStackBase == FakeStack 3296 // // In use-after-return mode, poison the whole stack frame. 3297 // if StackMallocIdx <= 4 3298 // // For small sizes inline the whole thing: 3299 // memset(ShadowBase, kAsanStackAfterReturnMagic, ShadowSize); 3300 // **SavedFlagPtr(FakeStack) = 0 3301 // else 3302 // __asan_stack_free_N(FakeStack, LocalStackSize) 3303 // else 3304 // <This is not a fake stack; unpoison the redzones> 3305 Value *Cmp = 3306 IRBRet.CreateICmpNE(FakeStack, Constant::getNullValue(IntptrTy)); 3307 Instruction *ThenTerm, *ElseTerm; 3308 SplitBlockAndInsertIfThenElse(Cmp, Ret, &ThenTerm, &ElseTerm); 3309 3310 IRBuilder<> IRBPoison(ThenTerm); 3311 if (StackMallocIdx <= 4) { 3312 int ClassSize = kMinStackMallocSize << StackMallocIdx; 3313 ShadowAfterReturn.resize(ClassSize / L.Granularity, 3314 kAsanStackUseAfterReturnMagic); 3315 copyToShadow(ShadowAfterReturn, ShadowAfterReturn, IRBPoison, 3316 ShadowBase); 3317 Value *SavedFlagPtrPtr = IRBPoison.CreateAdd( 3318 FakeStack, 3319 ConstantInt::get(IntptrTy, ClassSize - ASan.LongSize / 8)); 3320 Value *SavedFlagPtr = IRBPoison.CreateLoad( 3321 IntptrTy, IRBPoison.CreateIntToPtr(SavedFlagPtrPtr, IntptrPtrTy)); 3322 IRBPoison.CreateStore( 3323 Constant::getNullValue(IRBPoison.getInt8Ty()), 3324 IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getInt8PtrTy())); 3325 } else { 3326 // For larger frames call __asan_stack_free_*. 3327 IRBPoison.CreateCall( 3328 AsanStackFreeFunc[StackMallocIdx], 3329 {FakeStack, ConstantInt::get(IntptrTy, LocalStackSize)}); 3330 } 3331 3332 IRBuilder<> IRBElse(ElseTerm); 3333 copyToShadow(ShadowAfterScope, ShadowClean, IRBElse, ShadowBase); 3334 } else { 3335 copyToShadow(ShadowAfterScope, ShadowClean, IRBRet, ShadowBase); 3336 } 3337 } 3338 3339 // We are done. Remove the old unused alloca instructions. 3340 for (auto AI : AllocaVec) AI->eraseFromParent(); 3341 } 3342 3343 void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size, 3344 IRBuilder<> &IRB, bool DoPoison) { 3345 // For now just insert the call to ASan runtime. 3346 Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy); 3347 Value *SizeArg = ConstantInt::get(IntptrTy, Size); 3348 IRB.CreateCall( 3349 DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc, 3350 {AddrArg, SizeArg}); 3351 } 3352 3353 // Handling llvm.lifetime intrinsics for a given %alloca: 3354 // (1) collect all llvm.lifetime.xxx(%size, %value) describing the alloca. 3355 // (2) if %size is constant, poison memory for llvm.lifetime.end (to detect 3356 // invalid accesses) and unpoison it for llvm.lifetime.start (the memory 3357 // could be poisoned by previous llvm.lifetime.end instruction, as the 3358 // variable may go in and out of scope several times, e.g. in loops). 3359 // (3) if we poisoned at least one %alloca in a function, 3360 // unpoison the whole stack frame at function exit. 3361 void FunctionStackPoisoner::handleDynamicAllocaCall(AllocaInst *AI) { 3362 IRBuilder<> IRB(AI); 3363 3364 const Align Alignment = std::max(Align(kAllocaRzSize), AI->getAlign()); 3365 const uint64_t AllocaRedzoneMask = kAllocaRzSize - 1; 3366 3367 Value *Zero = Constant::getNullValue(IntptrTy); 3368 Value *AllocaRzSize = ConstantInt::get(IntptrTy, kAllocaRzSize); 3369 Value *AllocaRzMask = ConstantInt::get(IntptrTy, AllocaRedzoneMask); 3370 3371 // Since we need to extend alloca with additional memory to locate 3372 // redzones, and OldSize is number of allocated blocks with 3373 // ElementSize size, get allocated memory size in bytes by 3374 // OldSize * ElementSize. 3375 const unsigned ElementSize = 3376 F.getParent()->getDataLayout().getTypeAllocSize(AI->getAllocatedType()); 3377 Value *OldSize = 3378 IRB.CreateMul(IRB.CreateIntCast(AI->getArraySize(), IntptrTy, false), 3379 ConstantInt::get(IntptrTy, ElementSize)); 3380 3381 // PartialSize = OldSize % 32 3382 Value *PartialSize = IRB.CreateAnd(OldSize, AllocaRzMask); 3383 3384 // Misalign = kAllocaRzSize - PartialSize; 3385 Value *Misalign = IRB.CreateSub(AllocaRzSize, PartialSize); 3386 3387 // PartialPadding = Misalign != kAllocaRzSize ? Misalign : 0; 3388 Value *Cond = IRB.CreateICmpNE(Misalign, AllocaRzSize); 3389 Value *PartialPadding = IRB.CreateSelect(Cond, Misalign, Zero); 3390 3391 // AdditionalChunkSize = Alignment + PartialPadding + kAllocaRzSize 3392 // Alignment is added to locate left redzone, PartialPadding for possible 3393 // partial redzone and kAllocaRzSize for right redzone respectively. 3394 Value *AdditionalChunkSize = IRB.CreateAdd( 3395 ConstantInt::get(IntptrTy, Alignment.value() + kAllocaRzSize), 3396 PartialPadding); 3397 3398 Value *NewSize = IRB.CreateAdd(OldSize, AdditionalChunkSize); 3399 3400 // Insert new alloca with new NewSize and Alignment params. 3401 AllocaInst *NewAlloca = IRB.CreateAlloca(IRB.getInt8Ty(), NewSize); 3402 NewAlloca->setAlignment(Alignment); 3403 3404 // NewAddress = Address + Alignment 3405 Value *NewAddress = 3406 IRB.CreateAdd(IRB.CreatePtrToInt(NewAlloca, IntptrTy), 3407 ConstantInt::get(IntptrTy, Alignment.value())); 3408 3409 // Insert __asan_alloca_poison call for new created alloca. 3410 IRB.CreateCall(AsanAllocaPoisonFunc, {NewAddress, OldSize}); 3411 3412 // Store the last alloca's address to DynamicAllocaLayout. We'll need this 3413 // for unpoisoning stuff. 3414 IRB.CreateStore(IRB.CreatePtrToInt(NewAlloca, IntptrTy), DynamicAllocaLayout); 3415 3416 Value *NewAddressPtr = IRB.CreateIntToPtr(NewAddress, AI->getType()); 3417 3418 // Replace all uses of AddessReturnedByAlloca with NewAddressPtr. 3419 AI->replaceAllUsesWith(NewAddressPtr); 3420 3421 // We are done. Erase old alloca from parent. 3422 AI->eraseFromParent(); 3423 } 3424 3425 // isSafeAccess returns true if Addr is always inbounds with respect to its 3426 // base object. For example, it is a field access or an array access with 3427 // constant inbounds index. 3428 bool AddressSanitizer::isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, 3429 Value *Addr, uint64_t TypeSize) const { 3430 SizeOffsetType SizeOffset = ObjSizeVis.compute(Addr); 3431 if (!ObjSizeVis.bothKnown(SizeOffset)) return false; 3432 uint64_t Size = SizeOffset.first.getZExtValue(); 3433 int64_t Offset = SizeOffset.second.getSExtValue(); 3434 // Three checks are required to ensure safety: 3435 // . Offset >= 0 (since the offset is given from the base ptr) 3436 // . Size >= Offset (unsigned) 3437 // . Size - Offset >= NeededSize (unsigned) 3438 return Offset >= 0 && Size >= uint64_t(Offset) && 3439 Size - uint64_t(Offset) >= TypeSize / 8; 3440 } 3441