1 //===- AddressSanitizer.cpp - memory error detector -----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of AddressSanitizer, an address basic correctness
10 // checker.
11 // Details of the algorithm:
12 // https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm
13 //
14 // FIXME: This sanitizer does not yet handle scalable vectors
15 //
16 //===----------------------------------------------------------------------===//
17
18 #include "llvm/Transforms/Instrumentation/AddressSanitizer.h"
19 #include "llvm/ADT/ArrayRef.h"
20 #include "llvm/ADT/DenseMap.h"
21 #include "llvm/ADT/DepthFirstIterator.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/ADT/StringExtras.h"
26 #include "llvm/ADT/StringRef.h"
27 #include "llvm/ADT/Twine.h"
28 #include "llvm/Analysis/GlobalsModRef.h"
29 #include "llvm/Analysis/MemoryBuiltins.h"
30 #include "llvm/Analysis/StackSafetyAnalysis.h"
31 #include "llvm/Analysis/TargetLibraryInfo.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/BinaryFormat/MachO.h"
34 #include "llvm/Demangle/Demangle.h"
35 #include "llvm/IR/Argument.h"
36 #include "llvm/IR/Attributes.h"
37 #include "llvm/IR/BasicBlock.h"
38 #include "llvm/IR/Comdat.h"
39 #include "llvm/IR/Constant.h"
40 #include "llvm/IR/Constants.h"
41 #include "llvm/IR/DIBuilder.h"
42 #include "llvm/IR/DataLayout.h"
43 #include "llvm/IR/DebugInfoMetadata.h"
44 #include "llvm/IR/DebugLoc.h"
45 #include "llvm/IR/DerivedTypes.h"
46 #include "llvm/IR/Function.h"
47 #include "llvm/IR/GlobalAlias.h"
48 #include "llvm/IR/GlobalValue.h"
49 #include "llvm/IR/GlobalVariable.h"
50 #include "llvm/IR/IRBuilder.h"
51 #include "llvm/IR/InlineAsm.h"
52 #include "llvm/IR/InstVisitor.h"
53 #include "llvm/IR/InstrTypes.h"
54 #include "llvm/IR/Instruction.h"
55 #include "llvm/IR/Instructions.h"
56 #include "llvm/IR/IntrinsicInst.h"
57 #include "llvm/IR/Intrinsics.h"
58 #include "llvm/IR/LLVMContext.h"
59 #include "llvm/IR/MDBuilder.h"
60 #include "llvm/IR/Metadata.h"
61 #include "llvm/IR/Module.h"
62 #include "llvm/IR/Type.h"
63 #include "llvm/IR/Use.h"
64 #include "llvm/IR/Value.h"
65 #include "llvm/MC/MCSectionMachO.h"
66 #include "llvm/Support/Casting.h"
67 #include "llvm/Support/CommandLine.h"
68 #include "llvm/Support/Debug.h"
69 #include "llvm/Support/ErrorHandling.h"
70 #include "llvm/Support/MathExtras.h"
71 #include "llvm/Support/raw_ostream.h"
72 #include "llvm/TargetParser/Triple.h"
73 #include "llvm/Transforms/Instrumentation.h"
74 #include "llvm/Transforms/Instrumentation/AddressSanitizerCommon.h"
75 #include "llvm/Transforms/Instrumentation/AddressSanitizerOptions.h"
76 #include "llvm/Transforms/Utils/ASanStackFrameLayout.h"
77 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
78 #include "llvm/Transforms/Utils/Local.h"
79 #include "llvm/Transforms/Utils/ModuleUtils.h"
80 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
81 #include <algorithm>
82 #include <cassert>
83 #include <cstddef>
84 #include <cstdint>
85 #include <iomanip>
86 #include <limits>
87 #include <sstream>
88 #include <string>
89 #include <tuple>
90
91 using namespace llvm;
92
93 #define DEBUG_TYPE "asan"
94
95 static const uint64_t kDefaultShadowScale = 3;
96 static const uint64_t kDefaultShadowOffset32 = 1ULL << 29;
97 static const uint64_t kDefaultShadowOffset64 = 1ULL << 44;
98 static const uint64_t kDynamicShadowSentinel =
99 std::numeric_limits<uint64_t>::max();
100 static const uint64_t kSmallX86_64ShadowOffsetBase = 0x7FFFFFFF; // < 2G.
101 static const uint64_t kSmallX86_64ShadowOffsetAlignMask = ~0xFFFULL;
102 static const uint64_t kLinuxKasan_ShadowOffset64 = 0xdffffc0000000000;
103 static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 44;
104 static const uint64_t kSystemZ_ShadowOffset64 = 1ULL << 52;
105 static const uint64_t kMIPS_ShadowOffsetN32 = 1ULL << 29;
106 static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa0000;
107 static const uint64_t kMIPS64_ShadowOffset64 = 1ULL << 37;
108 static const uint64_t kAArch64_ShadowOffset64 = 1ULL << 36;
109 static const uint64_t kLoongArch64_ShadowOffset64 = 1ULL << 46;
110 static const uint64_t kRISCV64_ShadowOffset64 = 0xd55550000;
111 static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30;
112 static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46;
113 static const uint64_t kFreeBSDAArch64_ShadowOffset64 = 1ULL << 47;
114 static const uint64_t kFreeBSDKasan_ShadowOffset64 = 0xdffff7c000000000;
115 static const uint64_t kNetBSD_ShadowOffset32 = 1ULL << 30;
116 static const uint64_t kNetBSD_ShadowOffset64 = 1ULL << 46;
117 static const uint64_t kNetBSDKasan_ShadowOffset64 = 0xdfff900000000000;
118 static const uint64_t kPS_ShadowOffset64 = 1ULL << 40;
119 static const uint64_t kWindowsShadowOffset32 = 3ULL << 28;
120 static const uint64_t kEmscriptenShadowOffset = 0;
121
122 // The shadow memory space is dynamically allocated.
123 static const uint64_t kWindowsShadowOffset64 = kDynamicShadowSentinel;
124
125 static const size_t kMinStackMallocSize = 1 << 6; // 64B
126 static const size_t kMaxStackMallocSize = 1 << 16; // 64K
127 static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3;
128 static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E;
129
130 const char kAsanModuleCtorName[] = "asan.module_ctor";
131 const char kAsanModuleDtorName[] = "asan.module_dtor";
132 static const uint64_t kAsanCtorAndDtorPriority = 1;
133 // On Emscripten, the system needs more than one priorities for constructors.
134 static const uint64_t kAsanEmscriptenCtorAndDtorPriority = 50;
135 const char kAsanReportErrorTemplate[] = "__asan_report_";
136 const char kAsanRegisterGlobalsName[] = "__asan_register_globals";
137 const char kAsanUnregisterGlobalsName[] = "__asan_unregister_globals";
138 const char kAsanRegisterImageGlobalsName[] = "__asan_register_image_globals";
139 const char kAsanUnregisterImageGlobalsName[] =
140 "__asan_unregister_image_globals";
141 const char kAsanRegisterElfGlobalsName[] = "__asan_register_elf_globals";
142 const char kAsanUnregisterElfGlobalsName[] = "__asan_unregister_elf_globals";
143 const char kAsanPoisonGlobalsName[] = "__asan_before_dynamic_init";
144 const char kAsanUnpoisonGlobalsName[] = "__asan_after_dynamic_init";
145 const char kAsanInitName[] = "__asan_init";
146 const char kAsanVersionCheckNamePrefix[] = "__asan_version_mismatch_check_v";
147 const char kAsanPtrCmp[] = "__sanitizer_ptr_cmp";
148 const char kAsanPtrSub[] = "__sanitizer_ptr_sub";
149 const char kAsanHandleNoReturnName[] = "__asan_handle_no_return";
150 static const int kMaxAsanStackMallocSizeClass = 10;
151 const char kAsanStackMallocNameTemplate[] = "__asan_stack_malloc_";
152 const char kAsanStackMallocAlwaysNameTemplate[] =
153 "__asan_stack_malloc_always_";
154 const char kAsanStackFreeNameTemplate[] = "__asan_stack_free_";
155 const char kAsanGenPrefix[] = "___asan_gen_";
156 const char kODRGenPrefix[] = "__odr_asan_gen_";
157 const char kSanCovGenPrefix[] = "__sancov_gen_";
158 const char kAsanSetShadowPrefix[] = "__asan_set_shadow_";
159 const char kAsanPoisonStackMemoryName[] = "__asan_poison_stack_memory";
160 const char kAsanUnpoisonStackMemoryName[] = "__asan_unpoison_stack_memory";
161
162 // ASan version script has __asan_* wildcard. Triple underscore prevents a
163 // linker (gold) warning about attempting to export a local symbol.
164 const char kAsanGlobalsRegisteredFlagName[] = "___asan_globals_registered";
165
166 const char kAsanOptionDetectUseAfterReturn[] =
167 "__asan_option_detect_stack_use_after_return";
168
169 const char kAsanShadowMemoryDynamicAddress[] =
170 "__asan_shadow_memory_dynamic_address";
171
172 const char kAsanAllocaPoison[] = "__asan_alloca_poison";
173 const char kAsanAllocasUnpoison[] = "__asan_allocas_unpoison";
174
175 const char kAMDGPUAddressSharedName[] = "llvm.amdgcn.is.shared";
176 const char kAMDGPUAddressPrivateName[] = "llvm.amdgcn.is.private";
177 const char kAMDGPUBallotName[] = "llvm.amdgcn.ballot.i64";
178 const char kAMDGPUUnreachableName[] = "llvm.amdgcn.unreachable";
179
180 // Accesses sizes are powers of two: 1, 2, 4, 8, 16.
181 static const size_t kNumberOfAccessSizes = 5;
182
183 static const uint64_t kAllocaRzSize = 32;
184
185 // ASanAccessInfo implementation constants.
186 constexpr size_t kCompileKernelShift = 0;
187 constexpr size_t kCompileKernelMask = 0x1;
188 constexpr size_t kAccessSizeIndexShift = 1;
189 constexpr size_t kAccessSizeIndexMask = 0xf;
190 constexpr size_t kIsWriteShift = 5;
191 constexpr size_t kIsWriteMask = 0x1;
192
193 // Command-line flags.
194
195 static cl::opt<bool> ClEnableKasan(
196 "asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"),
197 cl::Hidden, cl::init(false));
198
199 static cl::opt<bool> ClRecover(
200 "asan-recover",
201 cl::desc("Enable recovery mode (continue-after-error)."),
202 cl::Hidden, cl::init(false));
203
204 static cl::opt<bool> ClInsertVersionCheck(
205 "asan-guard-against-version-mismatch",
206 cl::desc("Guard against compiler/runtime version mismatch."), cl::Hidden,
207 cl::init(true));
208
209 // This flag may need to be replaced with -f[no-]asan-reads.
210 static cl::opt<bool> ClInstrumentReads("asan-instrument-reads",
211 cl::desc("instrument read instructions"),
212 cl::Hidden, cl::init(true));
213
214 static cl::opt<bool> ClInstrumentWrites(
215 "asan-instrument-writes", cl::desc("instrument write instructions"),
216 cl::Hidden, cl::init(true));
217
218 static cl::opt<bool>
219 ClUseStackSafety("asan-use-stack-safety", cl::Hidden, cl::init(true),
220 cl::Hidden, cl::desc("Use Stack Safety analysis results"),
221 cl::Optional);
222
223 static cl::opt<bool> ClInstrumentAtomics(
224 "asan-instrument-atomics",
225 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
226 cl::init(true));
227
228 static cl::opt<bool>
229 ClInstrumentByval("asan-instrument-byval",
230 cl::desc("instrument byval call arguments"), cl::Hidden,
231 cl::init(true));
232
233 static cl::opt<bool> ClAlwaysSlowPath(
234 "asan-always-slow-path",
235 cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden,
236 cl::init(false));
237
238 static cl::opt<bool> ClForceDynamicShadow(
239 "asan-force-dynamic-shadow",
240 cl::desc("Load shadow address into a local variable for each function"),
241 cl::Hidden, cl::init(false));
242
243 static cl::opt<bool>
244 ClWithIfunc("asan-with-ifunc",
245 cl::desc("Access dynamic shadow through an ifunc global on "
246 "platforms that support this"),
247 cl::Hidden, cl::init(true));
248
249 static cl::opt<bool> ClWithIfuncSuppressRemat(
250 "asan-with-ifunc-suppress-remat",
251 cl::desc("Suppress rematerialization of dynamic shadow address by passing "
252 "it through inline asm in prologue."),
253 cl::Hidden, cl::init(true));
254
255 // This flag limits the number of instructions to be instrumented
256 // in any given BB. Normally, this should be set to unlimited (INT_MAX),
257 // but due to http://llvm.org/bugs/show_bug.cgi?id=12652 we temporary
258 // set it to 10000.
259 static cl::opt<int> ClMaxInsnsToInstrumentPerBB(
260 "asan-max-ins-per-bb", cl::init(10000),
261 cl::desc("maximal number of instructions to instrument in any given BB"),
262 cl::Hidden);
263
264 // This flag may need to be replaced with -f[no]asan-stack.
265 static cl::opt<bool> ClStack("asan-stack", cl::desc("Handle stack memory"),
266 cl::Hidden, cl::init(true));
267 static cl::opt<uint32_t> ClMaxInlinePoisoningSize(
268 "asan-max-inline-poisoning-size",
269 cl::desc(
270 "Inline shadow poisoning for blocks up to the given size in bytes."),
271 cl::Hidden, cl::init(64));
272
273 static cl::opt<AsanDetectStackUseAfterReturnMode> ClUseAfterReturn(
274 "asan-use-after-return",
275 cl::desc("Sets the mode of detection for stack-use-after-return."),
276 cl::values(
277 clEnumValN(AsanDetectStackUseAfterReturnMode::Never, "never",
278 "Never detect stack use after return."),
279 clEnumValN(
280 AsanDetectStackUseAfterReturnMode::Runtime, "runtime",
281 "Detect stack use after return if "
282 "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set."),
283 clEnumValN(AsanDetectStackUseAfterReturnMode::Always, "always",
284 "Always detect stack use after return.")),
285 cl::Hidden, cl::init(AsanDetectStackUseAfterReturnMode::Runtime));
286
287 static cl::opt<bool> ClRedzoneByvalArgs("asan-redzone-byval-args",
288 cl::desc("Create redzones for byval "
289 "arguments (extra copy "
290 "required)"), cl::Hidden,
291 cl::init(true));
292
293 static cl::opt<bool> ClUseAfterScope("asan-use-after-scope",
294 cl::desc("Check stack-use-after-scope"),
295 cl::Hidden, cl::init(false));
296
297 // This flag may need to be replaced with -f[no]asan-globals.
298 static cl::opt<bool> ClGlobals("asan-globals",
299 cl::desc("Handle global objects"), cl::Hidden,
300 cl::init(true));
301
302 static cl::opt<bool> ClInitializers("asan-initialization-order",
303 cl::desc("Handle C++ initializer order"),
304 cl::Hidden, cl::init(true));
305
306 static cl::opt<bool> ClInvalidPointerPairs(
307 "asan-detect-invalid-pointer-pair",
308 cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden,
309 cl::init(false));
310
311 static cl::opt<bool> ClInvalidPointerCmp(
312 "asan-detect-invalid-pointer-cmp",
313 cl::desc("Instrument <, <=, >, >= with pointer operands"), cl::Hidden,
314 cl::init(false));
315
316 static cl::opt<bool> ClInvalidPointerSub(
317 "asan-detect-invalid-pointer-sub",
318 cl::desc("Instrument - operations with pointer operands"), cl::Hidden,
319 cl::init(false));
320
321 static cl::opt<unsigned> ClRealignStack(
322 "asan-realign-stack",
323 cl::desc("Realign stack to the value of this flag (power of two)"),
324 cl::Hidden, cl::init(32));
325
326 static cl::opt<int> ClInstrumentationWithCallsThreshold(
327 "asan-instrumentation-with-call-threshold",
328 cl::desc("If the function being instrumented contains more than "
329 "this number of memory accesses, use callbacks instead of "
330 "inline checks (-1 means never use callbacks)."),
331 cl::Hidden, cl::init(7000));
332
333 static cl::opt<std::string> ClMemoryAccessCallbackPrefix(
334 "asan-memory-access-callback-prefix",
335 cl::desc("Prefix for memory access callbacks"), cl::Hidden,
336 cl::init("__asan_"));
337
338 static cl::opt<bool> ClKasanMemIntrinCallbackPrefix(
339 "asan-kernel-mem-intrinsic-prefix",
340 cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden,
341 cl::init(false));
342
343 static cl::opt<bool>
344 ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas",
345 cl::desc("instrument dynamic allocas"),
346 cl::Hidden, cl::init(true));
347
348 static cl::opt<bool> ClSkipPromotableAllocas(
349 "asan-skip-promotable-allocas",
350 cl::desc("Do not instrument promotable allocas"), cl::Hidden,
351 cl::init(true));
352
353 static cl::opt<AsanCtorKind> ClConstructorKind(
354 "asan-constructor-kind",
355 cl::desc("Sets the ASan constructor kind"),
356 cl::values(clEnumValN(AsanCtorKind::None, "none", "No constructors"),
357 clEnumValN(AsanCtorKind::Global, "global",
358 "Use global constructors")),
359 cl::init(AsanCtorKind::Global), cl::Hidden);
360 // These flags allow to change the shadow mapping.
361 // The shadow mapping looks like
362 // Shadow = (Mem >> scale) + offset
363
364 static cl::opt<int> ClMappingScale("asan-mapping-scale",
365 cl::desc("scale of asan shadow mapping"),
366 cl::Hidden, cl::init(0));
367
368 static cl::opt<uint64_t>
369 ClMappingOffset("asan-mapping-offset",
370 cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"),
371 cl::Hidden, cl::init(0));
372
373 // Optimization flags. Not user visible, used mostly for testing
374 // and benchmarking the tool.
375
376 static cl::opt<bool> ClOpt("asan-opt", cl::desc("Optimize instrumentation"),
377 cl::Hidden, cl::init(true));
378
379 static cl::opt<bool> ClOptimizeCallbacks("asan-optimize-callbacks",
380 cl::desc("Optimize callbacks"),
381 cl::Hidden, cl::init(false));
382
383 static cl::opt<bool> ClOptSameTemp(
384 "asan-opt-same-temp", cl::desc("Instrument the same temp just once"),
385 cl::Hidden, cl::init(true));
386
387 static cl::opt<bool> ClOptGlobals("asan-opt-globals",
388 cl::desc("Don't instrument scalar globals"),
389 cl::Hidden, cl::init(true));
390
391 static cl::opt<bool> ClOptStack(
392 "asan-opt-stack", cl::desc("Don't instrument scalar stack variables"),
393 cl::Hidden, cl::init(false));
394
395 static cl::opt<bool> ClDynamicAllocaStack(
396 "asan-stack-dynamic-alloca",
397 cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden,
398 cl::init(true));
399
400 static cl::opt<uint32_t> ClForceExperiment(
401 "asan-force-experiment",
402 cl::desc("Force optimization experiment (for testing)"), cl::Hidden,
403 cl::init(0));
404
405 static cl::opt<bool>
406 ClUsePrivateAlias("asan-use-private-alias",
407 cl::desc("Use private aliases for global variables"),
408 cl::Hidden, cl::init(true));
409
410 static cl::opt<bool>
411 ClUseOdrIndicator("asan-use-odr-indicator",
412 cl::desc("Use odr indicators to improve ODR reporting"),
413 cl::Hidden, cl::init(true));
414
415 static cl::opt<bool>
416 ClUseGlobalsGC("asan-globals-live-support",
417 cl::desc("Use linker features to support dead "
418 "code stripping of globals"),
419 cl::Hidden, cl::init(true));
420
421 // This is on by default even though there is a bug in gold:
422 // https://sourceware.org/bugzilla/show_bug.cgi?id=19002
423 static cl::opt<bool>
424 ClWithComdat("asan-with-comdat",
425 cl::desc("Place ASan constructors in comdat sections"),
426 cl::Hidden, cl::init(true));
427
428 static cl::opt<AsanDtorKind> ClOverrideDestructorKind(
429 "asan-destructor-kind",
430 cl::desc("Sets the ASan destructor kind. The default is to use the value "
431 "provided to the pass constructor"),
432 cl::values(clEnumValN(AsanDtorKind::None, "none", "No destructors"),
433 clEnumValN(AsanDtorKind::Global, "global",
434 "Use global destructors")),
435 cl::init(AsanDtorKind::Invalid), cl::Hidden);
436
437 // Debug flags.
438
439 static cl::opt<int> ClDebug("asan-debug", cl::desc("debug"), cl::Hidden,
440 cl::init(0));
441
442 static cl::opt<int> ClDebugStack("asan-debug-stack", cl::desc("debug stack"),
443 cl::Hidden, cl::init(0));
444
445 static cl::opt<std::string> ClDebugFunc("asan-debug-func", cl::Hidden,
446 cl::desc("Debug func"));
447
448 static cl::opt<int> ClDebugMin("asan-debug-min", cl::desc("Debug min inst"),
449 cl::Hidden, cl::init(-1));
450
451 static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug max inst"),
452 cl::Hidden, cl::init(-1));
453
454 STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
455 STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
456 STATISTIC(NumOptimizedAccessesToGlobalVar,
457 "Number of optimized accesses to global vars");
458 STATISTIC(NumOptimizedAccessesToStackVar,
459 "Number of optimized accesses to stack vars");
460
461 namespace {
462
463 /// This struct defines the shadow mapping using the rule:
464 /// shadow = (mem >> Scale) ADD-or-OR Offset.
465 /// If InGlobal is true, then
466 /// extern char __asan_shadow[];
467 /// shadow = (mem >> Scale) + &__asan_shadow
468 struct ShadowMapping {
469 int Scale;
470 uint64_t Offset;
471 bool OrShadowOffset;
472 bool InGlobal;
473 };
474
475 } // end anonymous namespace
476
getShadowMapping(const Triple & TargetTriple,int LongSize,bool IsKasan)477 static ShadowMapping getShadowMapping(const Triple &TargetTriple, int LongSize,
478 bool IsKasan) {
479 bool IsAndroid = TargetTriple.isAndroid();
480 bool IsIOS = TargetTriple.isiOS() || TargetTriple.isWatchOS() ||
481 TargetTriple.isDriverKit();
482 bool IsMacOS = TargetTriple.isMacOSX();
483 bool IsFreeBSD = TargetTriple.isOSFreeBSD();
484 bool IsNetBSD = TargetTriple.isOSNetBSD();
485 bool IsPS = TargetTriple.isPS();
486 bool IsLinux = TargetTriple.isOSLinux();
487 bool IsPPC64 = TargetTriple.getArch() == Triple::ppc64 ||
488 TargetTriple.getArch() == Triple::ppc64le;
489 bool IsSystemZ = TargetTriple.getArch() == Triple::systemz;
490 bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64;
491 bool IsMIPSN32ABI = TargetTriple.getEnvironment() == Triple::GNUABIN32;
492 bool IsMIPS32 = TargetTriple.isMIPS32();
493 bool IsMIPS64 = TargetTriple.isMIPS64();
494 bool IsArmOrThumb = TargetTriple.isARM() || TargetTriple.isThumb();
495 bool IsAArch64 = TargetTriple.getArch() == Triple::aarch64 ||
496 TargetTriple.getArch() == Triple::aarch64_be;
497 bool IsLoongArch64 = TargetTriple.isLoongArch64();
498 bool IsRISCV64 = TargetTriple.getArch() == Triple::riscv64;
499 bool IsWindows = TargetTriple.isOSWindows();
500 bool IsFuchsia = TargetTriple.isOSFuchsia();
501 bool IsEmscripten = TargetTriple.isOSEmscripten();
502 bool IsAMDGPU = TargetTriple.isAMDGPU();
503
504 ShadowMapping Mapping;
505
506 Mapping.Scale = kDefaultShadowScale;
507 if (ClMappingScale.getNumOccurrences() > 0) {
508 Mapping.Scale = ClMappingScale;
509 }
510
511 if (LongSize == 32) {
512 if (IsAndroid)
513 Mapping.Offset = kDynamicShadowSentinel;
514 else if (IsMIPSN32ABI)
515 Mapping.Offset = kMIPS_ShadowOffsetN32;
516 else if (IsMIPS32)
517 Mapping.Offset = kMIPS32_ShadowOffset32;
518 else if (IsFreeBSD)
519 Mapping.Offset = kFreeBSD_ShadowOffset32;
520 else if (IsNetBSD)
521 Mapping.Offset = kNetBSD_ShadowOffset32;
522 else if (IsIOS)
523 Mapping.Offset = kDynamicShadowSentinel;
524 else if (IsWindows)
525 Mapping.Offset = kWindowsShadowOffset32;
526 else if (IsEmscripten)
527 Mapping.Offset = kEmscriptenShadowOffset;
528 else
529 Mapping.Offset = kDefaultShadowOffset32;
530 } else { // LongSize == 64
531 // Fuchsia is always PIE, which means that the beginning of the address
532 // space is always available.
533 if (IsFuchsia)
534 Mapping.Offset = 0;
535 else if (IsPPC64)
536 Mapping.Offset = kPPC64_ShadowOffset64;
537 else if (IsSystemZ)
538 Mapping.Offset = kSystemZ_ShadowOffset64;
539 else if (IsFreeBSD && IsAArch64)
540 Mapping.Offset = kFreeBSDAArch64_ShadowOffset64;
541 else if (IsFreeBSD && !IsMIPS64) {
542 if (IsKasan)
543 Mapping.Offset = kFreeBSDKasan_ShadowOffset64;
544 else
545 Mapping.Offset = kFreeBSD_ShadowOffset64;
546 } else if (IsNetBSD) {
547 if (IsKasan)
548 Mapping.Offset = kNetBSDKasan_ShadowOffset64;
549 else
550 Mapping.Offset = kNetBSD_ShadowOffset64;
551 } else if (IsPS)
552 Mapping.Offset = kPS_ShadowOffset64;
553 else if (IsLinux && IsX86_64) {
554 if (IsKasan)
555 Mapping.Offset = kLinuxKasan_ShadowOffset64;
556 else
557 Mapping.Offset = (kSmallX86_64ShadowOffsetBase &
558 (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale));
559 } else if (IsWindows && IsX86_64) {
560 Mapping.Offset = kWindowsShadowOffset64;
561 } else if (IsMIPS64)
562 Mapping.Offset = kMIPS64_ShadowOffset64;
563 else if (IsIOS)
564 Mapping.Offset = kDynamicShadowSentinel;
565 else if (IsMacOS && IsAArch64)
566 Mapping.Offset = kDynamicShadowSentinel;
567 else if (IsAArch64)
568 Mapping.Offset = kAArch64_ShadowOffset64;
569 else if (IsLoongArch64)
570 Mapping.Offset = kLoongArch64_ShadowOffset64;
571 else if (IsRISCV64)
572 Mapping.Offset = kRISCV64_ShadowOffset64;
573 else if (IsAMDGPU)
574 Mapping.Offset = (kSmallX86_64ShadowOffsetBase &
575 (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale));
576 else
577 Mapping.Offset = kDefaultShadowOffset64;
578 }
579
580 if (ClForceDynamicShadow) {
581 Mapping.Offset = kDynamicShadowSentinel;
582 }
583
584 if (ClMappingOffset.getNumOccurrences() > 0) {
585 Mapping.Offset = ClMappingOffset;
586 }
587
588 // OR-ing shadow offset if more efficient (at least on x86) if the offset
589 // is a power of two, but on ppc64 and loongarch64 we have to use add since
590 // the shadow offset is not necessarily 1/8-th of the address space. On
591 // SystemZ, we could OR the constant in a single instruction, but it's more
592 // efficient to load it once and use indexed addressing.
593 Mapping.OrShadowOffset = !IsAArch64 && !IsPPC64 && !IsSystemZ && !IsPS &&
594 !IsRISCV64 && !IsLoongArch64 &&
595 !(Mapping.Offset & (Mapping.Offset - 1)) &&
596 Mapping.Offset != kDynamicShadowSentinel;
597 bool IsAndroidWithIfuncSupport =
598 IsAndroid && !TargetTriple.isAndroidVersionLT(21);
599 Mapping.InGlobal = ClWithIfunc && IsAndroidWithIfuncSupport && IsArmOrThumb;
600
601 return Mapping;
602 }
603
604 namespace llvm {
getAddressSanitizerParams(const Triple & TargetTriple,int LongSize,bool IsKasan,uint64_t * ShadowBase,int * MappingScale,bool * OrShadowOffset)605 void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize,
606 bool IsKasan, uint64_t *ShadowBase,
607 int *MappingScale, bool *OrShadowOffset) {
608 auto Mapping = getShadowMapping(TargetTriple, LongSize, IsKasan);
609 *ShadowBase = Mapping.Offset;
610 *MappingScale = Mapping.Scale;
611 *OrShadowOffset = Mapping.OrShadowOffset;
612 }
613
ASanAccessInfo(int32_t Packed)614 ASanAccessInfo::ASanAccessInfo(int32_t Packed)
615 : Packed(Packed),
616 AccessSizeIndex((Packed >> kAccessSizeIndexShift) & kAccessSizeIndexMask),
617 IsWrite((Packed >> kIsWriteShift) & kIsWriteMask),
618 CompileKernel((Packed >> kCompileKernelShift) & kCompileKernelMask) {}
619
ASanAccessInfo(bool IsWrite,bool CompileKernel,uint8_t AccessSizeIndex)620 ASanAccessInfo::ASanAccessInfo(bool IsWrite, bool CompileKernel,
621 uint8_t AccessSizeIndex)
622 : Packed((IsWrite << kIsWriteShift) +
623 (CompileKernel << kCompileKernelShift) +
624 (AccessSizeIndex << kAccessSizeIndexShift)),
625 AccessSizeIndex(AccessSizeIndex), IsWrite(IsWrite),
626 CompileKernel(CompileKernel) {}
627
628 } // namespace llvm
629
getRedzoneSizeForScale(int MappingScale)630 static uint64_t getRedzoneSizeForScale(int MappingScale) {
631 // Redzone used for stack and globals is at least 32 bytes.
632 // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively.
633 return std::max(32U, 1U << MappingScale);
634 }
635
GetCtorAndDtorPriority(Triple & TargetTriple)636 static uint64_t GetCtorAndDtorPriority(Triple &TargetTriple) {
637 if (TargetTriple.isOSEmscripten()) {
638 return kAsanEmscriptenCtorAndDtorPriority;
639 } else {
640 return kAsanCtorAndDtorPriority;
641 }
642 }
643
644 namespace {
645
646 /// AddressSanitizer: instrument the code in module to find memory bugs.
647 struct AddressSanitizer {
AddressSanitizer__anon8ad1e4980211::AddressSanitizer648 AddressSanitizer(Module &M, const StackSafetyGlobalInfo *SSGI,
649 int InstrumentationWithCallsThreshold,
650 uint32_t MaxInlinePoisoningSize, bool CompileKernel = false,
651 bool Recover = false, bool UseAfterScope = false,
652 AsanDetectStackUseAfterReturnMode UseAfterReturn =
653 AsanDetectStackUseAfterReturnMode::Runtime)
654 : CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan
655 : CompileKernel),
656 Recover(ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover),
657 UseAfterScope(UseAfterScope || ClUseAfterScope),
658 UseAfterReturn(ClUseAfterReturn.getNumOccurrences() ? ClUseAfterReturn
659 : UseAfterReturn),
660 SSGI(SSGI),
661 InstrumentationWithCallsThreshold(
662 ClInstrumentationWithCallsThreshold.getNumOccurrences() > 0
663 ? ClInstrumentationWithCallsThreshold
664 : InstrumentationWithCallsThreshold),
665 MaxInlinePoisoningSize(ClMaxInlinePoisoningSize.getNumOccurrences() > 0
666 ? ClMaxInlinePoisoningSize
667 : MaxInlinePoisoningSize) {
668 C = &(M.getContext());
669 DL = &M.getDataLayout();
670 LongSize = M.getDataLayout().getPointerSizeInBits();
671 IntptrTy = Type::getIntNTy(*C, LongSize);
672 PtrTy = PointerType::getUnqual(*C);
673 Int32Ty = Type::getInt32Ty(*C);
674 TargetTriple = Triple(M.getTargetTriple());
675
676 Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel);
677
678 assert(this->UseAfterReturn != AsanDetectStackUseAfterReturnMode::Invalid);
679 }
680
getAllocaSizeInBytes__anon8ad1e4980211::AddressSanitizer681 TypeSize getAllocaSizeInBytes(const AllocaInst &AI) const {
682 return *AI.getAllocationSize(AI.getModule()->getDataLayout());
683 }
684
685 /// Check if we want (and can) handle this alloca.
686 bool isInterestingAlloca(const AllocaInst &AI);
687
688 bool ignoreAccess(Instruction *Inst, Value *Ptr);
689 void getInterestingMemoryOperands(
690 Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting);
691
692 void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
693 InterestingMemoryOperand &O, bool UseCalls,
694 const DataLayout &DL);
695 void instrumentPointerComparisonOrSubtraction(Instruction *I);
696 void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore,
697 Value *Addr, MaybeAlign Alignment,
698 uint32_t TypeStoreSize, bool IsWrite,
699 Value *SizeArgument, bool UseCalls, uint32_t Exp);
700 Instruction *instrumentAMDGPUAddress(Instruction *OrigIns,
701 Instruction *InsertBefore, Value *Addr,
702 uint32_t TypeStoreSize, bool IsWrite,
703 Value *SizeArgument);
704 Instruction *genAMDGPUReportBlock(IRBuilder<> &IRB, Value *Cond,
705 bool Recover);
706 void instrumentUnusualSizeOrAlignment(Instruction *I,
707 Instruction *InsertBefore, Value *Addr,
708 TypeSize TypeStoreSize, bool IsWrite,
709 Value *SizeArgument, bool UseCalls,
710 uint32_t Exp);
711 void instrumentMaskedLoadOrStore(AddressSanitizer *Pass, const DataLayout &DL,
712 Type *IntptrTy, Value *Mask, Value *EVL,
713 Value *Stride, Instruction *I, Value *Addr,
714 MaybeAlign Alignment, unsigned Granularity,
715 Type *OpType, bool IsWrite,
716 Value *SizeArgument, bool UseCalls,
717 uint32_t Exp);
718 Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
719 Value *ShadowValue, uint32_t TypeStoreSize);
720 Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr,
721 bool IsWrite, size_t AccessSizeIndex,
722 Value *SizeArgument, uint32_t Exp);
723 void instrumentMemIntrinsic(MemIntrinsic *MI);
724 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
725 bool suppressInstrumentationSiteForDebug(int &Instrumented);
726 bool instrumentFunction(Function &F, const TargetLibraryInfo *TLI);
727 bool maybeInsertAsanInitAtFunctionEntry(Function &F);
728 bool maybeInsertDynamicShadowAtFunctionEntry(Function &F);
729 void markEscapedLocalAllocas(Function &F);
730
731 private:
732 friend struct FunctionStackPoisoner;
733
734 void initializeCallbacks(Module &M, const TargetLibraryInfo *TLI);
735
736 bool LooksLikeCodeInBug11395(Instruction *I);
737 bool GlobalIsLinkerInitialized(GlobalVariable *G);
738 bool isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, Value *Addr,
739 TypeSize TypeStoreSize) const;
740
741 /// Helper to cleanup per-function state.
742 struct FunctionStateRAII {
743 AddressSanitizer *Pass;
744
FunctionStateRAII__anon8ad1e4980211::AddressSanitizer::FunctionStateRAII745 FunctionStateRAII(AddressSanitizer *Pass) : Pass(Pass) {
746 assert(Pass->ProcessedAllocas.empty() &&
747 "last pass forgot to clear cache");
748 assert(!Pass->LocalDynamicShadow);
749 }
750
~FunctionStateRAII__anon8ad1e4980211::AddressSanitizer::FunctionStateRAII751 ~FunctionStateRAII() {
752 Pass->LocalDynamicShadow = nullptr;
753 Pass->ProcessedAllocas.clear();
754 }
755 };
756
757 LLVMContext *C;
758 const DataLayout *DL;
759 Triple TargetTriple;
760 int LongSize;
761 bool CompileKernel;
762 bool Recover;
763 bool UseAfterScope;
764 AsanDetectStackUseAfterReturnMode UseAfterReturn;
765 Type *IntptrTy;
766 Type *Int32Ty;
767 PointerType *PtrTy;
768 ShadowMapping Mapping;
769 FunctionCallee AsanHandleNoReturnFunc;
770 FunctionCallee AsanPtrCmpFunction, AsanPtrSubFunction;
771 Constant *AsanShadowGlobal;
772
773 // These arrays is indexed by AccessIsWrite, Experiment and log2(AccessSize).
774 FunctionCallee AsanErrorCallback[2][2][kNumberOfAccessSizes];
775 FunctionCallee AsanMemoryAccessCallback[2][2][kNumberOfAccessSizes];
776
777 // These arrays is indexed by AccessIsWrite and Experiment.
778 FunctionCallee AsanErrorCallbackSized[2][2];
779 FunctionCallee AsanMemoryAccessCallbackSized[2][2];
780
781 FunctionCallee AsanMemmove, AsanMemcpy, AsanMemset;
782 Value *LocalDynamicShadow = nullptr;
783 const StackSafetyGlobalInfo *SSGI;
784 DenseMap<const AllocaInst *, bool> ProcessedAllocas;
785
786 FunctionCallee AMDGPUAddressShared;
787 FunctionCallee AMDGPUAddressPrivate;
788 int InstrumentationWithCallsThreshold;
789 uint32_t MaxInlinePoisoningSize;
790 };
791
792 class ModuleAddressSanitizer {
793 public:
ModuleAddressSanitizer(Module & M,bool InsertVersionCheck,bool CompileKernel=false,bool Recover=false,bool UseGlobalsGC=true,bool UseOdrIndicator=true,AsanDtorKind DestructorKind=AsanDtorKind::Global,AsanCtorKind ConstructorKind=AsanCtorKind::Global)794 ModuleAddressSanitizer(Module &M, bool InsertVersionCheck,
795 bool CompileKernel = false, bool Recover = false,
796 bool UseGlobalsGC = true, bool UseOdrIndicator = true,
797 AsanDtorKind DestructorKind = AsanDtorKind::Global,
798 AsanCtorKind ConstructorKind = AsanCtorKind::Global)
799 : CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan
800 : CompileKernel),
801 InsertVersionCheck(ClInsertVersionCheck.getNumOccurrences() > 0
802 ? ClInsertVersionCheck
803 : InsertVersionCheck),
804 Recover(ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover),
805 UseGlobalsGC(UseGlobalsGC && ClUseGlobalsGC && !this->CompileKernel),
806 // Enable aliases as they should have no downside with ODR indicators.
807 UsePrivateAlias(ClUsePrivateAlias.getNumOccurrences() > 0
808 ? ClUsePrivateAlias
809 : UseOdrIndicator),
810 UseOdrIndicator(ClUseOdrIndicator.getNumOccurrences() > 0
811 ? ClUseOdrIndicator
812 : UseOdrIndicator),
813 // Not a typo: ClWithComdat is almost completely pointless without
814 // ClUseGlobalsGC (because then it only works on modules without
815 // globals, which are rare); it is a prerequisite for ClUseGlobalsGC;
816 // and both suffer from gold PR19002 for which UseGlobalsGC constructor
817 // argument is designed as workaround. Therefore, disable both
818 // ClWithComdat and ClUseGlobalsGC unless the frontend says it's ok to
819 // do globals-gc.
820 UseCtorComdat(UseGlobalsGC && ClWithComdat && !this->CompileKernel),
821 DestructorKind(DestructorKind),
822 ConstructorKind(ClConstructorKind.getNumOccurrences() > 0
823 ? ClConstructorKind
824 : ConstructorKind) {
825 C = &(M.getContext());
826 int LongSize = M.getDataLayout().getPointerSizeInBits();
827 IntptrTy = Type::getIntNTy(*C, LongSize);
828 PtrTy = PointerType::getUnqual(*C);
829 TargetTriple = Triple(M.getTargetTriple());
830 Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel);
831
832 if (ClOverrideDestructorKind != AsanDtorKind::Invalid)
833 this->DestructorKind = ClOverrideDestructorKind;
834 assert(this->DestructorKind != AsanDtorKind::Invalid);
835 }
836
837 bool instrumentModule(Module &);
838
839 private:
840 void initializeCallbacks(Module &M);
841
842 void instrumentGlobals(IRBuilder<> &IRB, Module &M, bool *CtorComdat);
843 void InstrumentGlobalsCOFF(IRBuilder<> &IRB, Module &M,
844 ArrayRef<GlobalVariable *> ExtendedGlobals,
845 ArrayRef<Constant *> MetadataInitializers);
846 void instrumentGlobalsELF(IRBuilder<> &IRB, Module &M,
847 ArrayRef<GlobalVariable *> ExtendedGlobals,
848 ArrayRef<Constant *> MetadataInitializers,
849 const std::string &UniqueModuleId);
850 void InstrumentGlobalsMachO(IRBuilder<> &IRB, Module &M,
851 ArrayRef<GlobalVariable *> ExtendedGlobals,
852 ArrayRef<Constant *> MetadataInitializers);
853 void
854 InstrumentGlobalsWithMetadataArray(IRBuilder<> &IRB, Module &M,
855 ArrayRef<GlobalVariable *> ExtendedGlobals,
856 ArrayRef<Constant *> MetadataInitializers);
857
858 GlobalVariable *CreateMetadataGlobal(Module &M, Constant *Initializer,
859 StringRef OriginalName);
860 void SetComdatForGlobalMetadata(GlobalVariable *G, GlobalVariable *Metadata,
861 StringRef InternalSuffix);
862 Instruction *CreateAsanModuleDtor(Module &M);
863
864 const GlobalVariable *getExcludedAliasedGlobal(const GlobalAlias &GA) const;
865 bool shouldInstrumentGlobal(GlobalVariable *G) const;
866 bool ShouldUseMachOGlobalsSection() const;
867 StringRef getGlobalMetadataSection() const;
868 void poisonOneInitializer(Function &GlobalInit, GlobalValue *ModuleName);
869 void createInitializerPoisonCalls(Module &M, GlobalValue *ModuleName);
getMinRedzoneSizeForGlobal() const870 uint64_t getMinRedzoneSizeForGlobal() const {
871 return getRedzoneSizeForScale(Mapping.Scale);
872 }
873 uint64_t getRedzoneSizeForGlobal(uint64_t SizeInBytes) const;
874 int GetAsanVersion(const Module &M) const;
875
876 bool CompileKernel;
877 bool InsertVersionCheck;
878 bool Recover;
879 bool UseGlobalsGC;
880 bool UsePrivateAlias;
881 bool UseOdrIndicator;
882 bool UseCtorComdat;
883 AsanDtorKind DestructorKind;
884 AsanCtorKind ConstructorKind;
885 Type *IntptrTy;
886 PointerType *PtrTy;
887 LLVMContext *C;
888 Triple TargetTriple;
889 ShadowMapping Mapping;
890 FunctionCallee AsanPoisonGlobals;
891 FunctionCallee AsanUnpoisonGlobals;
892 FunctionCallee AsanRegisterGlobals;
893 FunctionCallee AsanUnregisterGlobals;
894 FunctionCallee AsanRegisterImageGlobals;
895 FunctionCallee AsanUnregisterImageGlobals;
896 FunctionCallee AsanRegisterElfGlobals;
897 FunctionCallee AsanUnregisterElfGlobals;
898
899 Function *AsanCtorFunction = nullptr;
900 Function *AsanDtorFunction = nullptr;
901 };
902
903 // Stack poisoning does not play well with exception handling.
904 // When an exception is thrown, we essentially bypass the code
905 // that unpoisones the stack. This is why the run-time library has
906 // to intercept __cxa_throw (as well as longjmp, etc) and unpoison the entire
907 // stack in the interceptor. This however does not work inside the
908 // actual function which catches the exception. Most likely because the
909 // compiler hoists the load of the shadow value somewhere too high.
910 // This causes asan to report a non-existing bug on 453.povray.
911 // It sounds like an LLVM bug.
912 struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
913 Function &F;
914 AddressSanitizer &ASan;
915 DIBuilder DIB;
916 LLVMContext *C;
917 Type *IntptrTy;
918 Type *IntptrPtrTy;
919 ShadowMapping Mapping;
920
921 SmallVector<AllocaInst *, 16> AllocaVec;
922 SmallVector<AllocaInst *, 16> StaticAllocasToMoveUp;
923 SmallVector<Instruction *, 8> RetVec;
924
925 FunctionCallee AsanStackMallocFunc[kMaxAsanStackMallocSizeClass + 1],
926 AsanStackFreeFunc[kMaxAsanStackMallocSizeClass + 1];
927 FunctionCallee AsanSetShadowFunc[0x100] = {};
928 FunctionCallee AsanPoisonStackMemoryFunc, AsanUnpoisonStackMemoryFunc;
929 FunctionCallee AsanAllocaPoisonFunc, AsanAllocasUnpoisonFunc;
930
931 // Stores a place and arguments of poisoning/unpoisoning call for alloca.
932 struct AllocaPoisonCall {
933 IntrinsicInst *InsBefore;
934 AllocaInst *AI;
935 uint64_t Size;
936 bool DoPoison;
937 };
938 SmallVector<AllocaPoisonCall, 8> DynamicAllocaPoisonCallVec;
939 SmallVector<AllocaPoisonCall, 8> StaticAllocaPoisonCallVec;
940 bool HasUntracedLifetimeIntrinsic = false;
941
942 SmallVector<AllocaInst *, 1> DynamicAllocaVec;
943 SmallVector<IntrinsicInst *, 1> StackRestoreVec;
944 AllocaInst *DynamicAllocaLayout = nullptr;
945 IntrinsicInst *LocalEscapeCall = nullptr;
946
947 bool HasInlineAsm = false;
948 bool HasReturnsTwiceCall = false;
949 bool PoisonStack;
950
FunctionStackPoisoner__anon8ad1e4980211::FunctionStackPoisoner951 FunctionStackPoisoner(Function &F, AddressSanitizer &ASan)
952 : F(F), ASan(ASan), DIB(*F.getParent(), /*AllowUnresolved*/ false),
953 C(ASan.C), IntptrTy(ASan.IntptrTy),
954 IntptrPtrTy(PointerType::get(IntptrTy, 0)), Mapping(ASan.Mapping),
955 PoisonStack(ClStack &&
956 !Triple(F.getParent()->getTargetTriple()).isAMDGPU()) {}
957
runOnFunction__anon8ad1e4980211::FunctionStackPoisoner958 bool runOnFunction() {
959 if (!PoisonStack)
960 return false;
961
962 if (ClRedzoneByvalArgs)
963 copyArgsPassedByValToAllocas();
964
965 // Collect alloca, ret, lifetime instructions etc.
966 for (BasicBlock *BB : depth_first(&F.getEntryBlock())) visit(*BB);
967
968 if (AllocaVec.empty() && DynamicAllocaVec.empty()) return false;
969
970 initializeCallbacks(*F.getParent());
971
972 if (HasUntracedLifetimeIntrinsic) {
973 // If there are lifetime intrinsics which couldn't be traced back to an
974 // alloca, we may not know exactly when a variable enters scope, and
975 // therefore should "fail safe" by not poisoning them.
976 StaticAllocaPoisonCallVec.clear();
977 DynamicAllocaPoisonCallVec.clear();
978 }
979
980 processDynamicAllocas();
981 processStaticAllocas();
982
983 if (ClDebugStack) {
984 LLVM_DEBUG(dbgs() << F);
985 }
986 return true;
987 }
988
989 // Arguments marked with the "byval" attribute are implicitly copied without
990 // using an alloca instruction. To produce redzones for those arguments, we
991 // copy them a second time into memory allocated with an alloca instruction.
992 void copyArgsPassedByValToAllocas();
993
994 // Finds all Alloca instructions and puts
995 // poisoned red zones around all of them.
996 // Then unpoison everything back before the function returns.
997 void processStaticAllocas();
998 void processDynamicAllocas();
999
1000 void createDynamicAllocasInitStorage();
1001
1002 // ----------------------- Visitors.
1003 /// Collect all Ret instructions, or the musttail call instruction if it
1004 /// precedes the return instruction.
visitReturnInst__anon8ad1e4980211::FunctionStackPoisoner1005 void visitReturnInst(ReturnInst &RI) {
1006 if (CallInst *CI = RI.getParent()->getTerminatingMustTailCall())
1007 RetVec.push_back(CI);
1008 else
1009 RetVec.push_back(&RI);
1010 }
1011
1012 /// Collect all Resume instructions.
visitResumeInst__anon8ad1e4980211::FunctionStackPoisoner1013 void visitResumeInst(ResumeInst &RI) { RetVec.push_back(&RI); }
1014
1015 /// Collect all CatchReturnInst instructions.
visitCleanupReturnInst__anon8ad1e4980211::FunctionStackPoisoner1016 void visitCleanupReturnInst(CleanupReturnInst &CRI) { RetVec.push_back(&CRI); }
1017
unpoisonDynamicAllocasBeforeInst__anon8ad1e4980211::FunctionStackPoisoner1018 void unpoisonDynamicAllocasBeforeInst(Instruction *InstBefore,
1019 Value *SavedStack) {
1020 IRBuilder<> IRB(InstBefore);
1021 Value *DynamicAreaPtr = IRB.CreatePtrToInt(SavedStack, IntptrTy);
1022 // When we insert _asan_allocas_unpoison before @llvm.stackrestore, we
1023 // need to adjust extracted SP to compute the address of the most recent
1024 // alloca. We have a special @llvm.get.dynamic.area.offset intrinsic for
1025 // this purpose.
1026 if (!isa<ReturnInst>(InstBefore)) {
1027 Function *DynamicAreaOffsetFunc = Intrinsic::getDeclaration(
1028 InstBefore->getModule(), Intrinsic::get_dynamic_area_offset,
1029 {IntptrTy});
1030
1031 Value *DynamicAreaOffset = IRB.CreateCall(DynamicAreaOffsetFunc, {});
1032
1033 DynamicAreaPtr = IRB.CreateAdd(IRB.CreatePtrToInt(SavedStack, IntptrTy),
1034 DynamicAreaOffset);
1035 }
1036
1037 IRB.CreateCall(
1038 AsanAllocasUnpoisonFunc,
1039 {IRB.CreateLoad(IntptrTy, DynamicAllocaLayout), DynamicAreaPtr});
1040 }
1041
1042 // Unpoison dynamic allocas redzones.
unpoisonDynamicAllocas__anon8ad1e4980211::FunctionStackPoisoner1043 void unpoisonDynamicAllocas() {
1044 for (Instruction *Ret : RetVec)
1045 unpoisonDynamicAllocasBeforeInst(Ret, DynamicAllocaLayout);
1046
1047 for (Instruction *StackRestoreInst : StackRestoreVec)
1048 unpoisonDynamicAllocasBeforeInst(StackRestoreInst,
1049 StackRestoreInst->getOperand(0));
1050 }
1051
1052 // Deploy and poison redzones around dynamic alloca call. To do this, we
1053 // should replace this call with another one with changed parameters and
1054 // replace all its uses with new address, so
1055 // addr = alloca type, old_size, align
1056 // is replaced by
1057 // new_size = (old_size + additional_size) * sizeof(type)
1058 // tmp = alloca i8, new_size, max(align, 32)
1059 // addr = tmp + 32 (first 32 bytes are for the left redzone).
1060 // Additional_size is added to make new memory allocation contain not only
1061 // requested memory, but also left, partial and right redzones.
1062 void handleDynamicAllocaCall(AllocaInst *AI);
1063
1064 /// Collect Alloca instructions we want (and can) handle.
visitAllocaInst__anon8ad1e4980211::FunctionStackPoisoner1065 void visitAllocaInst(AllocaInst &AI) {
1066 // FIXME: Handle scalable vectors instead of ignoring them.
1067 if (!ASan.isInterestingAlloca(AI) ||
1068 isa<ScalableVectorType>(AI.getAllocatedType())) {
1069 if (AI.isStaticAlloca()) {
1070 // Skip over allocas that are present *before* the first instrumented
1071 // alloca, we don't want to move those around.
1072 if (AllocaVec.empty())
1073 return;
1074
1075 StaticAllocasToMoveUp.push_back(&AI);
1076 }
1077 return;
1078 }
1079
1080 if (!AI.isStaticAlloca())
1081 DynamicAllocaVec.push_back(&AI);
1082 else
1083 AllocaVec.push_back(&AI);
1084 }
1085
1086 /// Collect lifetime intrinsic calls to check for use-after-scope
1087 /// errors.
visitIntrinsicInst__anon8ad1e4980211::FunctionStackPoisoner1088 void visitIntrinsicInst(IntrinsicInst &II) {
1089 Intrinsic::ID ID = II.getIntrinsicID();
1090 if (ID == Intrinsic::stackrestore) StackRestoreVec.push_back(&II);
1091 if (ID == Intrinsic::localescape) LocalEscapeCall = &II;
1092 if (!ASan.UseAfterScope)
1093 return;
1094 if (!II.isLifetimeStartOrEnd())
1095 return;
1096 // Found lifetime intrinsic, add ASan instrumentation if necessary.
1097 auto *Size = cast<ConstantInt>(II.getArgOperand(0));
1098 // If size argument is undefined, don't do anything.
1099 if (Size->isMinusOne()) return;
1100 // Check that size doesn't saturate uint64_t and can
1101 // be stored in IntptrTy.
1102 const uint64_t SizeValue = Size->getValue().getLimitedValue();
1103 if (SizeValue == ~0ULL ||
1104 !ConstantInt::isValueValidForType(IntptrTy, SizeValue))
1105 return;
1106 // Find alloca instruction that corresponds to llvm.lifetime argument.
1107 // Currently we can only handle lifetime markers pointing to the
1108 // beginning of the alloca.
1109 AllocaInst *AI = findAllocaForValue(II.getArgOperand(1), true);
1110 if (!AI) {
1111 HasUntracedLifetimeIntrinsic = true;
1112 return;
1113 }
1114 // We're interested only in allocas we can handle.
1115 if (!ASan.isInterestingAlloca(*AI))
1116 return;
1117 bool DoPoison = (ID == Intrinsic::lifetime_end);
1118 AllocaPoisonCall APC = {&II, AI, SizeValue, DoPoison};
1119 if (AI->isStaticAlloca())
1120 StaticAllocaPoisonCallVec.push_back(APC);
1121 else if (ClInstrumentDynamicAllocas)
1122 DynamicAllocaPoisonCallVec.push_back(APC);
1123 }
1124
visitCallBase__anon8ad1e4980211::FunctionStackPoisoner1125 void visitCallBase(CallBase &CB) {
1126 if (CallInst *CI = dyn_cast<CallInst>(&CB)) {
1127 HasInlineAsm |= CI->isInlineAsm() && &CB != ASan.LocalDynamicShadow;
1128 HasReturnsTwiceCall |= CI->canReturnTwice();
1129 }
1130 }
1131
1132 // ---------------------- Helpers.
1133 void initializeCallbacks(Module &M);
1134
1135 // Copies bytes from ShadowBytes into shadow memory for indexes where
1136 // ShadowMask is not zero. If ShadowMask[i] is zero, we assume that
1137 // ShadowBytes[i] is constantly zero and doesn't need to be overwritten.
1138 void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes,
1139 IRBuilder<> &IRB, Value *ShadowBase);
1140 void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes,
1141 size_t Begin, size_t End, IRBuilder<> &IRB,
1142 Value *ShadowBase);
1143 void copyToShadowInline(ArrayRef<uint8_t> ShadowMask,
1144 ArrayRef<uint8_t> ShadowBytes, size_t Begin,
1145 size_t End, IRBuilder<> &IRB, Value *ShadowBase);
1146
1147 void poisonAlloca(Value *V, uint64_t Size, IRBuilder<> &IRB, bool DoPoison);
1148
1149 Value *createAllocaForLayout(IRBuilder<> &IRB, const ASanStackFrameLayout &L,
1150 bool Dynamic);
1151 PHINode *createPHI(IRBuilder<> &IRB, Value *Cond, Value *ValueIfTrue,
1152 Instruction *ThenTerm, Value *ValueIfFalse);
1153 };
1154
1155 } // end anonymous namespace
1156
printPipeline(raw_ostream & OS,function_ref<StringRef (StringRef)> MapClassName2PassName)1157 void AddressSanitizerPass::printPipeline(
1158 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
1159 static_cast<PassInfoMixin<AddressSanitizerPass> *>(this)->printPipeline(
1160 OS, MapClassName2PassName);
1161 OS << '<';
1162 if (Options.CompileKernel)
1163 OS << "kernel";
1164 OS << '>';
1165 }
1166
AddressSanitizerPass(const AddressSanitizerOptions & Options,bool UseGlobalGC,bool UseOdrIndicator,AsanDtorKind DestructorKind,AsanCtorKind ConstructorKind)1167 AddressSanitizerPass::AddressSanitizerPass(
1168 const AddressSanitizerOptions &Options, bool UseGlobalGC,
1169 bool UseOdrIndicator, AsanDtorKind DestructorKind,
1170 AsanCtorKind ConstructorKind)
1171 : Options(Options), UseGlobalGC(UseGlobalGC),
1172 UseOdrIndicator(UseOdrIndicator), DestructorKind(DestructorKind),
1173 ConstructorKind(ConstructorKind) {}
1174
run(Module & M,ModuleAnalysisManager & MAM)1175 PreservedAnalyses AddressSanitizerPass::run(Module &M,
1176 ModuleAnalysisManager &MAM) {
1177 ModuleAddressSanitizer ModuleSanitizer(
1178 M, Options.InsertVersionCheck, Options.CompileKernel, Options.Recover,
1179 UseGlobalGC, UseOdrIndicator, DestructorKind, ConstructorKind);
1180 bool Modified = false;
1181 auto &FAM = MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
1182 const StackSafetyGlobalInfo *const SSGI =
1183 ClUseStackSafety ? &MAM.getResult<StackSafetyGlobalAnalysis>(M) : nullptr;
1184 for (Function &F : M) {
1185 AddressSanitizer FunctionSanitizer(
1186 M, SSGI, Options.InstrumentationWithCallsThreshold,
1187 Options.MaxInlinePoisoningSize, Options.CompileKernel, Options.Recover,
1188 Options.UseAfterScope, Options.UseAfterReturn);
1189 const TargetLibraryInfo &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
1190 Modified |= FunctionSanitizer.instrumentFunction(F, &TLI);
1191 }
1192 Modified |= ModuleSanitizer.instrumentModule(M);
1193 if (!Modified)
1194 return PreservedAnalyses::all();
1195
1196 PreservedAnalyses PA = PreservedAnalyses::none();
1197 // GlobalsAA is considered stateless and does not get invalidated unless
1198 // explicitly invalidated; PreservedAnalyses::none() is not enough. Sanitizers
1199 // make changes that require GlobalsAA to be invalidated.
1200 PA.abandon<GlobalsAA>();
1201 return PA;
1202 }
1203
TypeStoreSizeToSizeIndex(uint32_t TypeSize)1204 static size_t TypeStoreSizeToSizeIndex(uint32_t TypeSize) {
1205 size_t Res = llvm::countr_zero(TypeSize / 8);
1206 assert(Res < kNumberOfAccessSizes);
1207 return Res;
1208 }
1209
1210 /// Check if \p G has been created by a trusted compiler pass.
GlobalWasGeneratedByCompiler(GlobalVariable * G)1211 static bool GlobalWasGeneratedByCompiler(GlobalVariable *G) {
1212 // Do not instrument @llvm.global_ctors, @llvm.used, etc.
1213 if (G->getName().starts_with("llvm.") ||
1214 // Do not instrument gcov counter arrays.
1215 G->getName().starts_with("__llvm_gcov_ctr") ||
1216 // Do not instrument rtti proxy symbols for function sanitizer.
1217 G->getName().starts_with("__llvm_rtti_proxy"))
1218 return true;
1219
1220 // Do not instrument asan globals.
1221 if (G->getName().starts_with(kAsanGenPrefix) ||
1222 G->getName().starts_with(kSanCovGenPrefix) ||
1223 G->getName().starts_with(kODRGenPrefix))
1224 return true;
1225
1226 return false;
1227 }
1228
isUnsupportedAMDGPUAddrspace(Value * Addr)1229 static bool isUnsupportedAMDGPUAddrspace(Value *Addr) {
1230 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
1231 unsigned int AddrSpace = PtrTy->getPointerAddressSpace();
1232 if (AddrSpace == 3 || AddrSpace == 5)
1233 return true;
1234 return false;
1235 }
1236
memToShadow(Value * Shadow,IRBuilder<> & IRB)1237 Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
1238 // Shadow >> scale
1239 Shadow = IRB.CreateLShr(Shadow, Mapping.Scale);
1240 if (Mapping.Offset == 0) return Shadow;
1241 // (Shadow >> scale) | offset
1242 Value *ShadowBase;
1243 if (LocalDynamicShadow)
1244 ShadowBase = LocalDynamicShadow;
1245 else
1246 ShadowBase = ConstantInt::get(IntptrTy, Mapping.Offset);
1247 if (Mapping.OrShadowOffset)
1248 return IRB.CreateOr(Shadow, ShadowBase);
1249 else
1250 return IRB.CreateAdd(Shadow, ShadowBase);
1251 }
1252
1253 // Instrument memset/memmove/memcpy
instrumentMemIntrinsic(MemIntrinsic * MI)1254 void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
1255 InstrumentationIRBuilder IRB(MI);
1256 if (isa<MemTransferInst>(MI)) {
1257 IRB.CreateCall(isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy,
1258 {MI->getOperand(0), MI->getOperand(1),
1259 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
1260 } else if (isa<MemSetInst>(MI)) {
1261 IRB.CreateCall(
1262 AsanMemset,
1263 {MI->getOperand(0),
1264 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
1265 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
1266 }
1267 MI->eraseFromParent();
1268 }
1269
1270 /// Check if we want (and can) handle this alloca.
isInterestingAlloca(const AllocaInst & AI)1271 bool AddressSanitizer::isInterestingAlloca(const AllocaInst &AI) {
1272 auto PreviouslySeenAllocaInfo = ProcessedAllocas.find(&AI);
1273
1274 if (PreviouslySeenAllocaInfo != ProcessedAllocas.end())
1275 return PreviouslySeenAllocaInfo->getSecond();
1276
1277 bool IsInteresting =
1278 (AI.getAllocatedType()->isSized() &&
1279 // alloca() may be called with 0 size, ignore it.
1280 ((!AI.isStaticAlloca()) || !getAllocaSizeInBytes(AI).isZero()) &&
1281 // We are only interested in allocas not promotable to registers.
1282 // Promotable allocas are common under -O0.
1283 (!ClSkipPromotableAllocas || !isAllocaPromotable(&AI)) &&
1284 // inalloca allocas are not treated as static, and we don't want
1285 // dynamic alloca instrumentation for them as well.
1286 !AI.isUsedWithInAlloca() &&
1287 // swifterror allocas are register promoted by ISel
1288 !AI.isSwiftError() &&
1289 // safe allocas are not interesting
1290 !(SSGI && SSGI->isSafe(AI)));
1291
1292 ProcessedAllocas[&AI] = IsInteresting;
1293 return IsInteresting;
1294 }
1295
ignoreAccess(Instruction * Inst,Value * Ptr)1296 bool AddressSanitizer::ignoreAccess(Instruction *Inst, Value *Ptr) {
1297 // Instrument accesses from different address spaces only for AMDGPU.
1298 Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
1299 if (PtrTy->getPointerAddressSpace() != 0 &&
1300 !(TargetTriple.isAMDGPU() && !isUnsupportedAMDGPUAddrspace(Ptr)))
1301 return true;
1302
1303 // Ignore swifterror addresses.
1304 // swifterror memory addresses are mem2reg promoted by instruction
1305 // selection. As such they cannot have regular uses like an instrumentation
1306 // function and it makes no sense to track them as memory.
1307 if (Ptr->isSwiftError())
1308 return true;
1309
1310 // Treat memory accesses to promotable allocas as non-interesting since they
1311 // will not cause memory violations. This greatly speeds up the instrumented
1312 // executable at -O0.
1313 if (auto AI = dyn_cast_or_null<AllocaInst>(Ptr))
1314 if (ClSkipPromotableAllocas && !isInterestingAlloca(*AI))
1315 return true;
1316
1317 if (SSGI != nullptr && SSGI->stackAccessIsSafe(*Inst) &&
1318 findAllocaForValue(Ptr))
1319 return true;
1320
1321 return false;
1322 }
1323
getInterestingMemoryOperands(Instruction * I,SmallVectorImpl<InterestingMemoryOperand> & Interesting)1324 void AddressSanitizer::getInterestingMemoryOperands(
1325 Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting) {
1326 // Do not instrument the load fetching the dynamic shadow address.
1327 if (LocalDynamicShadow == I)
1328 return;
1329
1330 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1331 if (!ClInstrumentReads || ignoreAccess(I, LI->getPointerOperand()))
1332 return;
1333 Interesting.emplace_back(I, LI->getPointerOperandIndex(), false,
1334 LI->getType(), LI->getAlign());
1335 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
1336 if (!ClInstrumentWrites || ignoreAccess(I, SI->getPointerOperand()))
1337 return;
1338 Interesting.emplace_back(I, SI->getPointerOperandIndex(), true,
1339 SI->getValueOperand()->getType(), SI->getAlign());
1340 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
1341 if (!ClInstrumentAtomics || ignoreAccess(I, RMW->getPointerOperand()))
1342 return;
1343 Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true,
1344 RMW->getValOperand()->getType(), std::nullopt);
1345 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
1346 if (!ClInstrumentAtomics || ignoreAccess(I, XCHG->getPointerOperand()))
1347 return;
1348 Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,
1349 XCHG->getCompareOperand()->getType(),
1350 std::nullopt);
1351 } else if (auto CI = dyn_cast<CallInst>(I)) {
1352 switch (CI->getIntrinsicID()) {
1353 case Intrinsic::masked_load:
1354 case Intrinsic::masked_store:
1355 case Intrinsic::masked_gather:
1356 case Intrinsic::masked_scatter: {
1357 bool IsWrite = CI->getType()->isVoidTy();
1358 // Masked store has an initial operand for the value.
1359 unsigned OpOffset = IsWrite ? 1 : 0;
1360 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1361 return;
1362
1363 auto BasePtr = CI->getOperand(OpOffset);
1364 if (ignoreAccess(I, BasePtr))
1365 return;
1366 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1367 MaybeAlign Alignment = Align(1);
1368 // Otherwise no alignment guarantees. We probably got Undef.
1369 if (auto *Op = dyn_cast<ConstantInt>(CI->getOperand(1 + OpOffset)))
1370 Alignment = Op->getMaybeAlignValue();
1371 Value *Mask = CI->getOperand(2 + OpOffset);
1372 Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, Mask);
1373 break;
1374 }
1375 case Intrinsic::masked_expandload:
1376 case Intrinsic::masked_compressstore: {
1377 bool IsWrite = CI->getIntrinsicID() == Intrinsic::masked_compressstore;
1378 unsigned OpOffset = IsWrite ? 1 : 0;
1379 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1380 return;
1381 auto BasePtr = CI->getOperand(OpOffset);
1382 if (ignoreAccess(I, BasePtr))
1383 return;
1384 MaybeAlign Alignment = BasePtr->getPointerAlignment(*DL);
1385 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1386
1387 IRBuilder IB(I);
1388 Value *Mask = CI->getOperand(1 + OpOffset);
1389 // Use the popcount of Mask as the effective vector length.
1390 Type *ExtTy = VectorType::get(IntptrTy, cast<VectorType>(Ty));
1391 Value *ExtMask = IB.CreateZExt(Mask, ExtTy);
1392 Value *EVL = IB.CreateAddReduce(ExtMask);
1393 Value *TrueMask = ConstantInt::get(Mask->getType(), 1);
1394 Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, TrueMask,
1395 EVL);
1396 break;
1397 }
1398 case Intrinsic::vp_load:
1399 case Intrinsic::vp_store:
1400 case Intrinsic::experimental_vp_strided_load:
1401 case Intrinsic::experimental_vp_strided_store: {
1402 auto *VPI = cast<VPIntrinsic>(CI);
1403 unsigned IID = CI->getIntrinsicID();
1404 bool IsWrite = CI->getType()->isVoidTy();
1405 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1406 return;
1407 unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
1408 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1409 MaybeAlign Alignment = VPI->getOperand(PtrOpNo)->getPointerAlignment(*DL);
1410 Value *Stride = nullptr;
1411 if (IID == Intrinsic::experimental_vp_strided_store ||
1412 IID == Intrinsic::experimental_vp_strided_load) {
1413 Stride = VPI->getOperand(PtrOpNo + 1);
1414 // Use the pointer alignment as the element alignment if the stride is a
1415 // mutiple of the pointer alignment. Otherwise, the element alignment
1416 // should be Align(1).
1417 unsigned PointerAlign = Alignment.valueOrOne().value();
1418 if (!isa<ConstantInt>(Stride) ||
1419 cast<ConstantInt>(Stride)->getZExtValue() % PointerAlign != 0)
1420 Alignment = Align(1);
1421 }
1422 Interesting.emplace_back(I, PtrOpNo, IsWrite, Ty, Alignment,
1423 VPI->getMaskParam(), VPI->getVectorLengthParam(),
1424 Stride);
1425 break;
1426 }
1427 case Intrinsic::vp_gather:
1428 case Intrinsic::vp_scatter: {
1429 auto *VPI = cast<VPIntrinsic>(CI);
1430 unsigned IID = CI->getIntrinsicID();
1431 bool IsWrite = IID == Intrinsic::vp_scatter;
1432 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1433 return;
1434 unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
1435 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1436 MaybeAlign Alignment = VPI->getPointerAlignment();
1437 Interesting.emplace_back(I, PtrOpNo, IsWrite, Ty, Alignment,
1438 VPI->getMaskParam(),
1439 VPI->getVectorLengthParam());
1440 break;
1441 }
1442 default:
1443 for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
1444 if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
1445 ignoreAccess(I, CI->getArgOperand(ArgNo)))
1446 continue;
1447 Type *Ty = CI->getParamByValType(ArgNo);
1448 Interesting.emplace_back(I, ArgNo, false, Ty, Align(1));
1449 }
1450 }
1451 }
1452 }
1453
isPointerOperand(Value * V)1454 static bool isPointerOperand(Value *V) {
1455 return V->getType()->isPointerTy() || isa<PtrToIntInst>(V);
1456 }
1457
1458 // This is a rough heuristic; it may cause both false positives and
1459 // false negatives. The proper implementation requires cooperation with
1460 // the frontend.
isInterestingPointerComparison(Instruction * I)1461 static bool isInterestingPointerComparison(Instruction *I) {
1462 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(I)) {
1463 if (!Cmp->isRelational())
1464 return false;
1465 } else {
1466 return false;
1467 }
1468 return isPointerOperand(I->getOperand(0)) &&
1469 isPointerOperand(I->getOperand(1));
1470 }
1471
1472 // This is a rough heuristic; it may cause both false positives and
1473 // false negatives. The proper implementation requires cooperation with
1474 // the frontend.
isInterestingPointerSubtraction(Instruction * I)1475 static bool isInterestingPointerSubtraction(Instruction *I) {
1476 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) {
1477 if (BO->getOpcode() != Instruction::Sub)
1478 return false;
1479 } else {
1480 return false;
1481 }
1482 return isPointerOperand(I->getOperand(0)) &&
1483 isPointerOperand(I->getOperand(1));
1484 }
1485
GlobalIsLinkerInitialized(GlobalVariable * G)1486 bool AddressSanitizer::GlobalIsLinkerInitialized(GlobalVariable *G) {
1487 // If a global variable does not have dynamic initialization we don't
1488 // have to instrument it. However, if a global does not have initializer
1489 // at all, we assume it has dynamic initializer (in other TU).
1490 if (!G->hasInitializer())
1491 return false;
1492
1493 if (G->hasSanitizerMetadata() && G->getSanitizerMetadata().IsDynInit)
1494 return false;
1495
1496 return true;
1497 }
1498
instrumentPointerComparisonOrSubtraction(Instruction * I)1499 void AddressSanitizer::instrumentPointerComparisonOrSubtraction(
1500 Instruction *I) {
1501 IRBuilder<> IRB(I);
1502 FunctionCallee F = isa<ICmpInst>(I) ? AsanPtrCmpFunction : AsanPtrSubFunction;
1503 Value *Param[2] = {I->getOperand(0), I->getOperand(1)};
1504 for (Value *&i : Param) {
1505 if (i->getType()->isPointerTy())
1506 i = IRB.CreatePointerCast(i, IntptrTy);
1507 }
1508 IRB.CreateCall(F, Param);
1509 }
1510
doInstrumentAddress(AddressSanitizer * Pass,Instruction * I,Instruction * InsertBefore,Value * Addr,MaybeAlign Alignment,unsigned Granularity,TypeSize TypeStoreSize,bool IsWrite,Value * SizeArgument,bool UseCalls,uint32_t Exp)1511 static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I,
1512 Instruction *InsertBefore, Value *Addr,
1513 MaybeAlign Alignment, unsigned Granularity,
1514 TypeSize TypeStoreSize, bool IsWrite,
1515 Value *SizeArgument, bool UseCalls,
1516 uint32_t Exp) {
1517 // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check
1518 // if the data is properly aligned.
1519 if (!TypeStoreSize.isScalable()) {
1520 const auto FixedSize = TypeStoreSize.getFixedValue();
1521 switch (FixedSize) {
1522 case 8:
1523 case 16:
1524 case 32:
1525 case 64:
1526 case 128:
1527 if (!Alignment || *Alignment >= Granularity ||
1528 *Alignment >= FixedSize / 8)
1529 return Pass->instrumentAddress(I, InsertBefore, Addr, Alignment,
1530 FixedSize, IsWrite, nullptr, UseCalls,
1531 Exp);
1532 }
1533 }
1534 Pass->instrumentUnusualSizeOrAlignment(I, InsertBefore, Addr, TypeStoreSize,
1535 IsWrite, nullptr, UseCalls, Exp);
1536 }
1537
instrumentMaskedLoadOrStore(AddressSanitizer * Pass,const DataLayout & DL,Type * IntptrTy,Value * Mask,Value * EVL,Value * Stride,Instruction * I,Value * Addr,MaybeAlign Alignment,unsigned Granularity,Type * OpType,bool IsWrite,Value * SizeArgument,bool UseCalls,uint32_t Exp)1538 void AddressSanitizer::instrumentMaskedLoadOrStore(
1539 AddressSanitizer *Pass, const DataLayout &DL, Type *IntptrTy, Value *Mask,
1540 Value *EVL, Value *Stride, Instruction *I, Value *Addr,
1541 MaybeAlign Alignment, unsigned Granularity, Type *OpType, bool IsWrite,
1542 Value *SizeArgument, bool UseCalls, uint32_t Exp) {
1543 auto *VTy = cast<VectorType>(OpType);
1544 TypeSize ElemTypeSize = DL.getTypeStoreSizeInBits(VTy->getScalarType());
1545 auto Zero = ConstantInt::get(IntptrTy, 0);
1546
1547 IRBuilder IB(I);
1548 Instruction *LoopInsertBefore = I;
1549 if (EVL) {
1550 // The end argument of SplitBlockAndInsertForLane is assumed bigger
1551 // than zero, so we should check whether EVL is zero here.
1552 Type *EVLType = EVL->getType();
1553 Value *IsEVLZero = IB.CreateICmpNE(EVL, ConstantInt::get(EVLType, 0));
1554 LoopInsertBefore = SplitBlockAndInsertIfThen(IsEVLZero, I, false);
1555 IB.SetInsertPoint(LoopInsertBefore);
1556 // Cast EVL to IntptrTy.
1557 EVL = IB.CreateZExtOrTrunc(EVL, IntptrTy);
1558 // To avoid undefined behavior for extracting with out of range index, use
1559 // the minimum of evl and element count as trip count.
1560 Value *EC = IB.CreateElementCount(IntptrTy, VTy->getElementCount());
1561 EVL = IB.CreateBinaryIntrinsic(Intrinsic::umin, EVL, EC);
1562 } else {
1563 EVL = IB.CreateElementCount(IntptrTy, VTy->getElementCount());
1564 }
1565
1566 // Cast Stride to IntptrTy.
1567 if (Stride)
1568 Stride = IB.CreateZExtOrTrunc(Stride, IntptrTy);
1569
1570 SplitBlockAndInsertForEachLane(EVL, LoopInsertBefore,
1571 [&](IRBuilderBase &IRB, Value *Index) {
1572 Value *MaskElem = IRB.CreateExtractElement(Mask, Index);
1573 if (auto *MaskElemC = dyn_cast<ConstantInt>(MaskElem)) {
1574 if (MaskElemC->isZero())
1575 // No check
1576 return;
1577 // Unconditional check
1578 } else {
1579 // Conditional check
1580 Instruction *ThenTerm = SplitBlockAndInsertIfThen(
1581 MaskElem, &*IRB.GetInsertPoint(), false);
1582 IRB.SetInsertPoint(ThenTerm);
1583 }
1584
1585 Value *InstrumentedAddress;
1586 if (isa<VectorType>(Addr->getType())) {
1587 assert(
1588 cast<VectorType>(Addr->getType())->getElementType()->isPointerTy() &&
1589 "Expected vector of pointer.");
1590 InstrumentedAddress = IRB.CreateExtractElement(Addr, Index);
1591 } else if (Stride) {
1592 Index = IRB.CreateMul(Index, Stride);
1593 InstrumentedAddress = IRB.CreatePtrAdd(Addr, Index);
1594 } else {
1595 InstrumentedAddress = IRB.CreateGEP(VTy, Addr, {Zero, Index});
1596 }
1597 doInstrumentAddress(Pass, I, &*IRB.GetInsertPoint(),
1598 InstrumentedAddress, Alignment, Granularity,
1599 ElemTypeSize, IsWrite, SizeArgument, UseCalls, Exp);
1600 });
1601 }
1602
instrumentMop(ObjectSizeOffsetVisitor & ObjSizeVis,InterestingMemoryOperand & O,bool UseCalls,const DataLayout & DL)1603 void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
1604 InterestingMemoryOperand &O, bool UseCalls,
1605 const DataLayout &DL) {
1606 Value *Addr = O.getPtr();
1607
1608 // Optimization experiments.
1609 // The experiments can be used to evaluate potential optimizations that remove
1610 // instrumentation (assess false negatives). Instead of completely removing
1611 // some instrumentation, you set Exp to a non-zero value (mask of optimization
1612 // experiments that want to remove instrumentation of this instruction).
1613 // If Exp is non-zero, this pass will emit special calls into runtime
1614 // (e.g. __asan_report_exp_load1 instead of __asan_report_load1). These calls
1615 // make runtime terminate the program in a special way (with a different
1616 // exit status). Then you run the new compiler on a buggy corpus, collect
1617 // the special terminations (ideally, you don't see them at all -- no false
1618 // negatives) and make the decision on the optimization.
1619 uint32_t Exp = ClForceExperiment;
1620
1621 if (ClOpt && ClOptGlobals) {
1622 // If initialization order checking is disabled, a simple access to a
1623 // dynamically initialized global is always valid.
1624 GlobalVariable *G = dyn_cast<GlobalVariable>(getUnderlyingObject(Addr));
1625 if (G && (!ClInitializers || GlobalIsLinkerInitialized(G)) &&
1626 isSafeAccess(ObjSizeVis, Addr, O.TypeStoreSize)) {
1627 NumOptimizedAccessesToGlobalVar++;
1628 return;
1629 }
1630 }
1631
1632 if (ClOpt && ClOptStack) {
1633 // A direct inbounds access to a stack variable is always valid.
1634 if (isa<AllocaInst>(getUnderlyingObject(Addr)) &&
1635 isSafeAccess(ObjSizeVis, Addr, O.TypeStoreSize)) {
1636 NumOptimizedAccessesToStackVar++;
1637 return;
1638 }
1639 }
1640
1641 if (O.IsWrite)
1642 NumInstrumentedWrites++;
1643 else
1644 NumInstrumentedReads++;
1645
1646 unsigned Granularity = 1 << Mapping.Scale;
1647 if (O.MaybeMask) {
1648 instrumentMaskedLoadOrStore(this, DL, IntptrTy, O.MaybeMask, O.MaybeEVL,
1649 O.MaybeStride, O.getInsn(), Addr, O.Alignment,
1650 Granularity, O.OpType, O.IsWrite, nullptr,
1651 UseCalls, Exp);
1652 } else {
1653 doInstrumentAddress(this, O.getInsn(), O.getInsn(), Addr, O.Alignment,
1654 Granularity, O.TypeStoreSize, O.IsWrite, nullptr, UseCalls,
1655 Exp);
1656 }
1657 }
1658
generateCrashCode(Instruction * InsertBefore,Value * Addr,bool IsWrite,size_t AccessSizeIndex,Value * SizeArgument,uint32_t Exp)1659 Instruction *AddressSanitizer::generateCrashCode(Instruction *InsertBefore,
1660 Value *Addr, bool IsWrite,
1661 size_t AccessSizeIndex,
1662 Value *SizeArgument,
1663 uint32_t Exp) {
1664 InstrumentationIRBuilder IRB(InsertBefore);
1665 Value *ExpVal = Exp == 0 ? nullptr : ConstantInt::get(IRB.getInt32Ty(), Exp);
1666 CallInst *Call = nullptr;
1667 if (SizeArgument) {
1668 if (Exp == 0)
1669 Call = IRB.CreateCall(AsanErrorCallbackSized[IsWrite][0],
1670 {Addr, SizeArgument});
1671 else
1672 Call = IRB.CreateCall(AsanErrorCallbackSized[IsWrite][1],
1673 {Addr, SizeArgument, ExpVal});
1674 } else {
1675 if (Exp == 0)
1676 Call =
1677 IRB.CreateCall(AsanErrorCallback[IsWrite][0][AccessSizeIndex], Addr);
1678 else
1679 Call = IRB.CreateCall(AsanErrorCallback[IsWrite][1][AccessSizeIndex],
1680 {Addr, ExpVal});
1681 }
1682
1683 Call->setCannotMerge();
1684 return Call;
1685 }
1686
createSlowPathCmp(IRBuilder<> & IRB,Value * AddrLong,Value * ShadowValue,uint32_t TypeStoreSize)1687 Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
1688 Value *ShadowValue,
1689 uint32_t TypeStoreSize) {
1690 size_t Granularity = static_cast<size_t>(1) << Mapping.Scale;
1691 // Addr & (Granularity - 1)
1692 Value *LastAccessedByte =
1693 IRB.CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1));
1694 // (Addr & (Granularity - 1)) + size - 1
1695 if (TypeStoreSize / 8 > 1)
1696 LastAccessedByte = IRB.CreateAdd(
1697 LastAccessedByte, ConstantInt::get(IntptrTy, TypeStoreSize / 8 - 1));
1698 // (uint8_t) ((Addr & (Granularity-1)) + size - 1)
1699 LastAccessedByte =
1700 IRB.CreateIntCast(LastAccessedByte, ShadowValue->getType(), false);
1701 // ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue
1702 return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue);
1703 }
1704
instrumentAMDGPUAddress(Instruction * OrigIns,Instruction * InsertBefore,Value * Addr,uint32_t TypeStoreSize,bool IsWrite,Value * SizeArgument)1705 Instruction *AddressSanitizer::instrumentAMDGPUAddress(
1706 Instruction *OrigIns, Instruction *InsertBefore, Value *Addr,
1707 uint32_t TypeStoreSize, bool IsWrite, Value *SizeArgument) {
1708 // Do not instrument unsupported addrspaces.
1709 if (isUnsupportedAMDGPUAddrspace(Addr))
1710 return nullptr;
1711 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
1712 // Follow host instrumentation for global and constant addresses.
1713 if (PtrTy->getPointerAddressSpace() != 0)
1714 return InsertBefore;
1715 // Instrument generic addresses in supported addressspaces.
1716 IRBuilder<> IRB(InsertBefore);
1717 Value *IsShared = IRB.CreateCall(AMDGPUAddressShared, {Addr});
1718 Value *IsPrivate = IRB.CreateCall(AMDGPUAddressPrivate, {Addr});
1719 Value *IsSharedOrPrivate = IRB.CreateOr(IsShared, IsPrivate);
1720 Value *Cmp = IRB.CreateNot(IsSharedOrPrivate);
1721 Value *AddrSpaceZeroLanding =
1722 SplitBlockAndInsertIfThen(Cmp, InsertBefore, false);
1723 InsertBefore = cast<Instruction>(AddrSpaceZeroLanding);
1724 return InsertBefore;
1725 }
1726
genAMDGPUReportBlock(IRBuilder<> & IRB,Value * Cond,bool Recover)1727 Instruction *AddressSanitizer::genAMDGPUReportBlock(IRBuilder<> &IRB,
1728 Value *Cond, bool Recover) {
1729 Module &M = *IRB.GetInsertBlock()->getModule();
1730 Value *ReportCond = Cond;
1731 if (!Recover) {
1732 auto Ballot = M.getOrInsertFunction(kAMDGPUBallotName, IRB.getInt64Ty(),
1733 IRB.getInt1Ty());
1734 ReportCond = IRB.CreateIsNotNull(IRB.CreateCall(Ballot, {Cond}));
1735 }
1736
1737 auto *Trm =
1738 SplitBlockAndInsertIfThen(ReportCond, &*IRB.GetInsertPoint(), false,
1739 MDBuilder(*C).createBranchWeights(1, 100000));
1740 Trm->getParent()->setName("asan.report");
1741
1742 if (Recover)
1743 return Trm;
1744
1745 Trm = SplitBlockAndInsertIfThen(Cond, Trm, false);
1746 IRB.SetInsertPoint(Trm);
1747 return IRB.CreateCall(
1748 M.getOrInsertFunction(kAMDGPUUnreachableName, IRB.getVoidTy()), {});
1749 }
1750
instrumentAddress(Instruction * OrigIns,Instruction * InsertBefore,Value * Addr,MaybeAlign Alignment,uint32_t TypeStoreSize,bool IsWrite,Value * SizeArgument,bool UseCalls,uint32_t Exp)1751 void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
1752 Instruction *InsertBefore, Value *Addr,
1753 MaybeAlign Alignment,
1754 uint32_t TypeStoreSize, bool IsWrite,
1755 Value *SizeArgument, bool UseCalls,
1756 uint32_t Exp) {
1757 if (TargetTriple.isAMDGPU()) {
1758 InsertBefore = instrumentAMDGPUAddress(OrigIns, InsertBefore, Addr,
1759 TypeStoreSize, IsWrite, SizeArgument);
1760 if (!InsertBefore)
1761 return;
1762 }
1763
1764 InstrumentationIRBuilder IRB(InsertBefore);
1765 size_t AccessSizeIndex = TypeStoreSizeToSizeIndex(TypeStoreSize);
1766 const ASanAccessInfo AccessInfo(IsWrite, CompileKernel, AccessSizeIndex);
1767
1768 if (UseCalls && ClOptimizeCallbacks) {
1769 const ASanAccessInfo AccessInfo(IsWrite, CompileKernel, AccessSizeIndex);
1770 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1771 IRB.CreateCall(
1772 Intrinsic::getDeclaration(M, Intrinsic::asan_check_memaccess),
1773 {IRB.CreatePointerCast(Addr, PtrTy),
1774 ConstantInt::get(Int32Ty, AccessInfo.Packed)});
1775 return;
1776 }
1777
1778 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
1779 if (UseCalls) {
1780 if (Exp == 0)
1781 IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex],
1782 AddrLong);
1783 else
1784 IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex],
1785 {AddrLong, ConstantInt::get(IRB.getInt32Ty(), Exp)});
1786 return;
1787 }
1788
1789 Type *ShadowTy =
1790 IntegerType::get(*C, std::max(8U, TypeStoreSize >> Mapping.Scale));
1791 Type *ShadowPtrTy = PointerType::get(ShadowTy, 0);
1792 Value *ShadowPtr = memToShadow(AddrLong, IRB);
1793 const uint64_t ShadowAlign =
1794 std::max<uint64_t>(Alignment.valueOrOne().value() >> Mapping.Scale, 1);
1795 Value *ShadowValue = IRB.CreateAlignedLoad(
1796 ShadowTy, IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy), Align(ShadowAlign));
1797
1798 Value *Cmp = IRB.CreateIsNotNull(ShadowValue);
1799 size_t Granularity = 1ULL << Mapping.Scale;
1800 Instruction *CrashTerm = nullptr;
1801
1802 bool GenSlowPath = (ClAlwaysSlowPath || (TypeStoreSize < 8 * Granularity));
1803
1804 if (TargetTriple.isAMDGCN()) {
1805 if (GenSlowPath) {
1806 auto *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeStoreSize);
1807 Cmp = IRB.CreateAnd(Cmp, Cmp2);
1808 }
1809 CrashTerm = genAMDGPUReportBlock(IRB, Cmp, Recover);
1810 } else if (GenSlowPath) {
1811 // We use branch weights for the slow path check, to indicate that the slow
1812 // path is rarely taken. This seems to be the case for SPEC benchmarks.
1813 Instruction *CheckTerm = SplitBlockAndInsertIfThen(
1814 Cmp, InsertBefore, false, MDBuilder(*C).createBranchWeights(1, 100000));
1815 assert(cast<BranchInst>(CheckTerm)->isUnconditional());
1816 BasicBlock *NextBB = CheckTerm->getSuccessor(0);
1817 IRB.SetInsertPoint(CheckTerm);
1818 Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeStoreSize);
1819 if (Recover) {
1820 CrashTerm = SplitBlockAndInsertIfThen(Cmp2, CheckTerm, false);
1821 } else {
1822 BasicBlock *CrashBlock =
1823 BasicBlock::Create(*C, "", NextBB->getParent(), NextBB);
1824 CrashTerm = new UnreachableInst(*C, CrashBlock);
1825 BranchInst *NewTerm = BranchInst::Create(CrashBlock, NextBB, Cmp2);
1826 ReplaceInstWithInst(CheckTerm, NewTerm);
1827 }
1828 } else {
1829 CrashTerm = SplitBlockAndInsertIfThen(Cmp, InsertBefore, !Recover);
1830 }
1831
1832 Instruction *Crash = generateCrashCode(CrashTerm, AddrLong, IsWrite,
1833 AccessSizeIndex, SizeArgument, Exp);
1834 if (OrigIns->getDebugLoc())
1835 Crash->setDebugLoc(OrigIns->getDebugLoc());
1836 }
1837
1838 // Instrument unusual size or unusual alignment.
1839 // We can not do it with a single check, so we do 1-byte check for the first
1840 // and the last bytes. We call __asan_report_*_n(addr, real_size) to be able
1841 // to report the actual access size.
instrumentUnusualSizeOrAlignment(Instruction * I,Instruction * InsertBefore,Value * Addr,TypeSize TypeStoreSize,bool IsWrite,Value * SizeArgument,bool UseCalls,uint32_t Exp)1842 void AddressSanitizer::instrumentUnusualSizeOrAlignment(
1843 Instruction *I, Instruction *InsertBefore, Value *Addr, TypeSize TypeStoreSize,
1844 bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp) {
1845 InstrumentationIRBuilder IRB(InsertBefore);
1846 Value *NumBits = IRB.CreateTypeSize(IntptrTy, TypeStoreSize);
1847 Value *Size = IRB.CreateLShr(NumBits, ConstantInt::get(IntptrTy, 3));
1848
1849 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
1850 if (UseCalls) {
1851 if (Exp == 0)
1852 IRB.CreateCall(AsanMemoryAccessCallbackSized[IsWrite][0],
1853 {AddrLong, Size});
1854 else
1855 IRB.CreateCall(AsanMemoryAccessCallbackSized[IsWrite][1],
1856 {AddrLong, Size, ConstantInt::get(IRB.getInt32Ty(), Exp)});
1857 } else {
1858 Value *SizeMinusOne = IRB.CreateSub(Size, ConstantInt::get(IntptrTy, 1));
1859 Value *LastByte = IRB.CreateIntToPtr(
1860 IRB.CreateAdd(AddrLong, SizeMinusOne),
1861 Addr->getType());
1862 instrumentAddress(I, InsertBefore, Addr, {}, 8, IsWrite, Size, false, Exp);
1863 instrumentAddress(I, InsertBefore, LastByte, {}, 8, IsWrite, Size, false,
1864 Exp);
1865 }
1866 }
1867
poisonOneInitializer(Function & GlobalInit,GlobalValue * ModuleName)1868 void ModuleAddressSanitizer::poisonOneInitializer(Function &GlobalInit,
1869 GlobalValue *ModuleName) {
1870 // Set up the arguments to our poison/unpoison functions.
1871 IRBuilder<> IRB(&GlobalInit.front(),
1872 GlobalInit.front().getFirstInsertionPt());
1873
1874 // Add a call to poison all external globals before the given function starts.
1875 Value *ModuleNameAddr = ConstantExpr::getPointerCast(ModuleName, IntptrTy);
1876 IRB.CreateCall(AsanPoisonGlobals, ModuleNameAddr);
1877
1878 // Add calls to unpoison all globals before each return instruction.
1879 for (auto &BB : GlobalInit)
1880 if (ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator()))
1881 CallInst::Create(AsanUnpoisonGlobals, "", RI);
1882 }
1883
createInitializerPoisonCalls(Module & M,GlobalValue * ModuleName)1884 void ModuleAddressSanitizer::createInitializerPoisonCalls(
1885 Module &M, GlobalValue *ModuleName) {
1886 GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors");
1887 if (!GV)
1888 return;
1889
1890 ConstantArray *CA = dyn_cast<ConstantArray>(GV->getInitializer());
1891 if (!CA)
1892 return;
1893
1894 for (Use &OP : CA->operands()) {
1895 if (isa<ConstantAggregateZero>(OP)) continue;
1896 ConstantStruct *CS = cast<ConstantStruct>(OP);
1897
1898 // Must have a function or null ptr.
1899 if (Function *F = dyn_cast<Function>(CS->getOperand(1))) {
1900 if (F->getName() == kAsanModuleCtorName) continue;
1901 auto *Priority = cast<ConstantInt>(CS->getOperand(0));
1902 // Don't instrument CTORs that will run before asan.module_ctor.
1903 if (Priority->getLimitedValue() <= GetCtorAndDtorPriority(TargetTriple))
1904 continue;
1905 poisonOneInitializer(*F, ModuleName);
1906 }
1907 }
1908 }
1909
1910 const GlobalVariable *
getExcludedAliasedGlobal(const GlobalAlias & GA) const1911 ModuleAddressSanitizer::getExcludedAliasedGlobal(const GlobalAlias &GA) const {
1912 // In case this function should be expanded to include rules that do not just
1913 // apply when CompileKernel is true, either guard all existing rules with an
1914 // 'if (CompileKernel) { ... }' or be absolutely sure that all these rules
1915 // should also apply to user space.
1916 assert(CompileKernel && "Only expecting to be called when compiling kernel");
1917
1918 const Constant *C = GA.getAliasee();
1919
1920 // When compiling the kernel, globals that are aliased by symbols prefixed
1921 // by "__" are special and cannot be padded with a redzone.
1922 if (GA.getName().starts_with("__"))
1923 return dyn_cast<GlobalVariable>(C->stripPointerCastsAndAliases());
1924
1925 return nullptr;
1926 }
1927
shouldInstrumentGlobal(GlobalVariable * G) const1928 bool ModuleAddressSanitizer::shouldInstrumentGlobal(GlobalVariable *G) const {
1929 Type *Ty = G->getValueType();
1930 LLVM_DEBUG(dbgs() << "GLOBAL: " << *G << "\n");
1931
1932 if (G->hasSanitizerMetadata() && G->getSanitizerMetadata().NoAddress)
1933 return false;
1934 if (!Ty->isSized()) return false;
1935 if (!G->hasInitializer()) return false;
1936 // Globals in address space 1 and 4 are supported for AMDGPU.
1937 if (G->getAddressSpace() &&
1938 !(TargetTriple.isAMDGPU() && !isUnsupportedAMDGPUAddrspace(G)))
1939 return false;
1940 if (GlobalWasGeneratedByCompiler(G)) return false; // Our own globals.
1941 // Two problems with thread-locals:
1942 // - The address of the main thread's copy can't be computed at link-time.
1943 // - Need to poison all copies, not just the main thread's one.
1944 if (G->isThreadLocal()) return false;
1945 // For now, just ignore this Global if the alignment is large.
1946 if (G->getAlign() && *G->getAlign() > getMinRedzoneSizeForGlobal()) return false;
1947
1948 // For non-COFF targets, only instrument globals known to be defined by this
1949 // TU.
1950 // FIXME: We can instrument comdat globals on ELF if we are using the
1951 // GC-friendly metadata scheme.
1952 if (!TargetTriple.isOSBinFormatCOFF()) {
1953 if (!G->hasExactDefinition() || G->hasComdat())
1954 return false;
1955 } else {
1956 // On COFF, don't instrument non-ODR linkages.
1957 if (G->isInterposable())
1958 return false;
1959 }
1960
1961 // If a comdat is present, it must have a selection kind that implies ODR
1962 // semantics: no duplicates, any, or exact match.
1963 if (Comdat *C = G->getComdat()) {
1964 switch (C->getSelectionKind()) {
1965 case Comdat::Any:
1966 case Comdat::ExactMatch:
1967 case Comdat::NoDeduplicate:
1968 break;
1969 case Comdat::Largest:
1970 case Comdat::SameSize:
1971 return false;
1972 }
1973 }
1974
1975 if (G->hasSection()) {
1976 // The kernel uses explicit sections for mostly special global variables
1977 // that we should not instrument. E.g. the kernel may rely on their layout
1978 // without redzones, or remove them at link time ("discard.*"), etc.
1979 if (CompileKernel)
1980 return false;
1981
1982 StringRef Section = G->getSection();
1983
1984 // Globals from llvm.metadata aren't emitted, do not instrument them.
1985 if (Section == "llvm.metadata") return false;
1986 // Do not instrument globals from special LLVM sections.
1987 if (Section.contains("__llvm") || Section.contains("__LLVM"))
1988 return false;
1989
1990 // Do not instrument function pointers to initialization and termination
1991 // routines: dynamic linker will not properly handle redzones.
1992 if (Section.starts_with(".preinit_array") ||
1993 Section.starts_with(".init_array") ||
1994 Section.starts_with(".fini_array")) {
1995 return false;
1996 }
1997
1998 // Do not instrument user-defined sections (with names resembling
1999 // valid C identifiers)
2000 if (TargetTriple.isOSBinFormatELF()) {
2001 if (llvm::all_of(Section,
2002 [](char c) { return llvm::isAlnum(c) || c == '_'; }))
2003 return false;
2004 }
2005
2006 // On COFF, if the section name contains '$', it is highly likely that the
2007 // user is using section sorting to create an array of globals similar to
2008 // the way initialization callbacks are registered in .init_array and
2009 // .CRT$XCU. The ATL also registers things in .ATL$__[azm]. Adding redzones
2010 // to such globals is counterproductive, because the intent is that they
2011 // will form an array, and out-of-bounds accesses are expected.
2012 // See https://github.com/google/sanitizers/issues/305
2013 // and http://msdn.microsoft.com/en-US/en-en/library/bb918180(v=vs.120).aspx
2014 if (TargetTriple.isOSBinFormatCOFF() && Section.contains('$')) {
2015 LLVM_DEBUG(dbgs() << "Ignoring global in sorted section (contains '$'): "
2016 << *G << "\n");
2017 return false;
2018 }
2019
2020 if (TargetTriple.isOSBinFormatMachO()) {
2021 StringRef ParsedSegment, ParsedSection;
2022 unsigned TAA = 0, StubSize = 0;
2023 bool TAAParsed;
2024 cantFail(MCSectionMachO::ParseSectionSpecifier(
2025 Section, ParsedSegment, ParsedSection, TAA, TAAParsed, StubSize));
2026
2027 // Ignore the globals from the __OBJC section. The ObjC runtime assumes
2028 // those conform to /usr/lib/objc/runtime.h, so we can't add redzones to
2029 // them.
2030 if (ParsedSegment == "__OBJC" ||
2031 (ParsedSegment == "__DATA" && ParsedSection.starts_with("__objc_"))) {
2032 LLVM_DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G << "\n");
2033 return false;
2034 }
2035 // See https://github.com/google/sanitizers/issues/32
2036 // Constant CFString instances are compiled in the following way:
2037 // -- the string buffer is emitted into
2038 // __TEXT,__cstring,cstring_literals
2039 // -- the constant NSConstantString structure referencing that buffer
2040 // is placed into __DATA,__cfstring
2041 // Therefore there's no point in placing redzones into __DATA,__cfstring.
2042 // Moreover, it causes the linker to crash on OS X 10.7
2043 if (ParsedSegment == "__DATA" && ParsedSection == "__cfstring") {
2044 LLVM_DEBUG(dbgs() << "Ignoring CFString: " << *G << "\n");
2045 return false;
2046 }
2047 // The linker merges the contents of cstring_literals and removes the
2048 // trailing zeroes.
2049 if (ParsedSegment == "__TEXT" && (TAA & MachO::S_CSTRING_LITERALS)) {
2050 LLVM_DEBUG(dbgs() << "Ignoring a cstring literal: " << *G << "\n");
2051 return false;
2052 }
2053 }
2054 }
2055
2056 if (CompileKernel) {
2057 // Globals that prefixed by "__" are special and cannot be padded with a
2058 // redzone.
2059 if (G->getName().starts_with("__"))
2060 return false;
2061 }
2062
2063 return true;
2064 }
2065
2066 // On Mach-O platforms, we emit global metadata in a separate section of the
2067 // binary in order to allow the linker to properly dead strip. This is only
2068 // supported on recent versions of ld64.
ShouldUseMachOGlobalsSection() const2069 bool ModuleAddressSanitizer::ShouldUseMachOGlobalsSection() const {
2070 if (!TargetTriple.isOSBinFormatMachO())
2071 return false;
2072
2073 if (TargetTriple.isMacOSX() && !TargetTriple.isMacOSXVersionLT(10, 11))
2074 return true;
2075 if (TargetTriple.isiOS() /* or tvOS */ && !TargetTriple.isOSVersionLT(9))
2076 return true;
2077 if (TargetTriple.isWatchOS() && !TargetTriple.isOSVersionLT(2))
2078 return true;
2079 if (TargetTriple.isDriverKit())
2080 return true;
2081 if (TargetTriple.isXROS())
2082 return true;
2083
2084 return false;
2085 }
2086
getGlobalMetadataSection() const2087 StringRef ModuleAddressSanitizer::getGlobalMetadataSection() const {
2088 switch (TargetTriple.getObjectFormat()) {
2089 case Triple::COFF: return ".ASAN$GL";
2090 case Triple::ELF: return "asan_globals";
2091 case Triple::MachO: return "__DATA,__asan_globals,regular";
2092 case Triple::Wasm:
2093 case Triple::GOFF:
2094 case Triple::SPIRV:
2095 case Triple::XCOFF:
2096 case Triple::DXContainer:
2097 report_fatal_error(
2098 "ModuleAddressSanitizer not implemented for object file format");
2099 case Triple::UnknownObjectFormat:
2100 break;
2101 }
2102 llvm_unreachable("unsupported object format");
2103 }
2104
initializeCallbacks(Module & M)2105 void ModuleAddressSanitizer::initializeCallbacks(Module &M) {
2106 IRBuilder<> IRB(*C);
2107
2108 // Declare our poisoning and unpoisoning functions.
2109 AsanPoisonGlobals =
2110 M.getOrInsertFunction(kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy);
2111 AsanUnpoisonGlobals =
2112 M.getOrInsertFunction(kAsanUnpoisonGlobalsName, IRB.getVoidTy());
2113
2114 // Declare functions that register/unregister globals.
2115 AsanRegisterGlobals = M.getOrInsertFunction(
2116 kAsanRegisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy);
2117 AsanUnregisterGlobals = M.getOrInsertFunction(
2118 kAsanUnregisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy);
2119
2120 // Declare the functions that find globals in a shared object and then invoke
2121 // the (un)register function on them.
2122 AsanRegisterImageGlobals = M.getOrInsertFunction(
2123 kAsanRegisterImageGlobalsName, IRB.getVoidTy(), IntptrTy);
2124 AsanUnregisterImageGlobals = M.getOrInsertFunction(
2125 kAsanUnregisterImageGlobalsName, IRB.getVoidTy(), IntptrTy);
2126
2127 AsanRegisterElfGlobals =
2128 M.getOrInsertFunction(kAsanRegisterElfGlobalsName, IRB.getVoidTy(),
2129 IntptrTy, IntptrTy, IntptrTy);
2130 AsanUnregisterElfGlobals =
2131 M.getOrInsertFunction(kAsanUnregisterElfGlobalsName, IRB.getVoidTy(),
2132 IntptrTy, IntptrTy, IntptrTy);
2133 }
2134
2135 // Put the metadata and the instrumented global in the same group. This ensures
2136 // that the metadata is discarded if the instrumented global is discarded.
SetComdatForGlobalMetadata(GlobalVariable * G,GlobalVariable * Metadata,StringRef InternalSuffix)2137 void ModuleAddressSanitizer::SetComdatForGlobalMetadata(
2138 GlobalVariable *G, GlobalVariable *Metadata, StringRef InternalSuffix) {
2139 Module &M = *G->getParent();
2140 Comdat *C = G->getComdat();
2141 if (!C) {
2142 if (!G->hasName()) {
2143 // If G is unnamed, it must be internal. Give it an artificial name
2144 // so we can put it in a comdat.
2145 assert(G->hasLocalLinkage());
2146 G->setName(Twine(kAsanGenPrefix) + "_anon_global");
2147 }
2148
2149 if (!InternalSuffix.empty() && G->hasLocalLinkage()) {
2150 std::string Name = std::string(G->getName());
2151 Name += InternalSuffix;
2152 C = M.getOrInsertComdat(Name);
2153 } else {
2154 C = M.getOrInsertComdat(G->getName());
2155 }
2156
2157 // Make this IMAGE_COMDAT_SELECT_NODUPLICATES on COFF. Also upgrade private
2158 // linkage to internal linkage so that a symbol table entry is emitted. This
2159 // is necessary in order to create the comdat group.
2160 if (TargetTriple.isOSBinFormatCOFF()) {
2161 C->setSelectionKind(Comdat::NoDeduplicate);
2162 if (G->hasPrivateLinkage())
2163 G->setLinkage(GlobalValue::InternalLinkage);
2164 }
2165 G->setComdat(C);
2166 }
2167
2168 assert(G->hasComdat());
2169 Metadata->setComdat(G->getComdat());
2170 }
2171
2172 // Create a separate metadata global and put it in the appropriate ASan
2173 // global registration section.
2174 GlobalVariable *
CreateMetadataGlobal(Module & M,Constant * Initializer,StringRef OriginalName)2175 ModuleAddressSanitizer::CreateMetadataGlobal(Module &M, Constant *Initializer,
2176 StringRef OriginalName) {
2177 auto Linkage = TargetTriple.isOSBinFormatMachO()
2178 ? GlobalVariable::InternalLinkage
2179 : GlobalVariable::PrivateLinkage;
2180 GlobalVariable *Metadata = new GlobalVariable(
2181 M, Initializer->getType(), false, Linkage, Initializer,
2182 Twine("__asan_global_") + GlobalValue::dropLLVMManglingEscape(OriginalName));
2183 Metadata->setSection(getGlobalMetadataSection());
2184 // Place metadata in a large section for x86-64 ELF binaries to mitigate
2185 // relocation pressure.
2186 setGlobalVariableLargeSection(TargetTriple, *Metadata);
2187 return Metadata;
2188 }
2189
CreateAsanModuleDtor(Module & M)2190 Instruction *ModuleAddressSanitizer::CreateAsanModuleDtor(Module &M) {
2191 AsanDtorFunction = Function::createWithDefaultAttr(
2192 FunctionType::get(Type::getVoidTy(*C), false),
2193 GlobalValue::InternalLinkage, 0, kAsanModuleDtorName, &M);
2194 AsanDtorFunction->addFnAttr(Attribute::NoUnwind);
2195 // Ensure Dtor cannot be discarded, even if in a comdat.
2196 appendToUsed(M, {AsanDtorFunction});
2197 BasicBlock *AsanDtorBB = BasicBlock::Create(*C, "", AsanDtorFunction);
2198
2199 return ReturnInst::Create(*C, AsanDtorBB);
2200 }
2201
InstrumentGlobalsCOFF(IRBuilder<> & IRB,Module & M,ArrayRef<GlobalVariable * > ExtendedGlobals,ArrayRef<Constant * > MetadataInitializers)2202 void ModuleAddressSanitizer::InstrumentGlobalsCOFF(
2203 IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals,
2204 ArrayRef<Constant *> MetadataInitializers) {
2205 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2206 auto &DL = M.getDataLayout();
2207
2208 SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size());
2209 for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2210 Constant *Initializer = MetadataInitializers[i];
2211 GlobalVariable *G = ExtendedGlobals[i];
2212 GlobalVariable *Metadata =
2213 CreateMetadataGlobal(M, Initializer, G->getName());
2214 MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G));
2215 Metadata->setMetadata(LLVMContext::MD_associated, MD);
2216 MetadataGlobals[i] = Metadata;
2217
2218 // The MSVC linker always inserts padding when linking incrementally. We
2219 // cope with that by aligning each struct to its size, which must be a power
2220 // of two.
2221 unsigned SizeOfGlobalStruct = DL.getTypeAllocSize(Initializer->getType());
2222 assert(isPowerOf2_32(SizeOfGlobalStruct) &&
2223 "global metadata will not be padded appropriately");
2224 Metadata->setAlignment(assumeAligned(SizeOfGlobalStruct));
2225
2226 SetComdatForGlobalMetadata(G, Metadata, "");
2227 }
2228
2229 // Update llvm.compiler.used, adding the new metadata globals. This is
2230 // needed so that during LTO these variables stay alive.
2231 if (!MetadataGlobals.empty())
2232 appendToCompilerUsed(M, MetadataGlobals);
2233 }
2234
instrumentGlobalsELF(IRBuilder<> & IRB,Module & M,ArrayRef<GlobalVariable * > ExtendedGlobals,ArrayRef<Constant * > MetadataInitializers,const std::string & UniqueModuleId)2235 void ModuleAddressSanitizer::instrumentGlobalsELF(
2236 IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals,
2237 ArrayRef<Constant *> MetadataInitializers,
2238 const std::string &UniqueModuleId) {
2239 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2240
2241 // Putting globals in a comdat changes the semantic and potentially cause
2242 // false negative odr violations at link time. If odr indicators are used, we
2243 // keep the comdat sections, as link time odr violations will be dectected on
2244 // the odr indicator symbols.
2245 bool UseComdatForGlobalsGC = UseOdrIndicator && !UniqueModuleId.empty();
2246
2247 SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size());
2248 for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2249 GlobalVariable *G = ExtendedGlobals[i];
2250 GlobalVariable *Metadata =
2251 CreateMetadataGlobal(M, MetadataInitializers[i], G->getName());
2252 MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G));
2253 Metadata->setMetadata(LLVMContext::MD_associated, MD);
2254 MetadataGlobals[i] = Metadata;
2255
2256 if (UseComdatForGlobalsGC)
2257 SetComdatForGlobalMetadata(G, Metadata, UniqueModuleId);
2258 }
2259
2260 // Update llvm.compiler.used, adding the new metadata globals. This is
2261 // needed so that during LTO these variables stay alive.
2262 if (!MetadataGlobals.empty())
2263 appendToCompilerUsed(M, MetadataGlobals);
2264
2265 // RegisteredFlag serves two purposes. First, we can pass it to dladdr()
2266 // to look up the loaded image that contains it. Second, we can store in it
2267 // whether registration has already occurred, to prevent duplicate
2268 // registration.
2269 //
2270 // Common linkage ensures that there is only one global per shared library.
2271 GlobalVariable *RegisteredFlag = new GlobalVariable(
2272 M, IntptrTy, false, GlobalVariable::CommonLinkage,
2273 ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName);
2274 RegisteredFlag->setVisibility(GlobalVariable::HiddenVisibility);
2275
2276 // Create start and stop symbols.
2277 GlobalVariable *StartELFMetadata = new GlobalVariable(
2278 M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr,
2279 "__start_" + getGlobalMetadataSection());
2280 StartELFMetadata->setVisibility(GlobalVariable::HiddenVisibility);
2281 GlobalVariable *StopELFMetadata = new GlobalVariable(
2282 M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr,
2283 "__stop_" + getGlobalMetadataSection());
2284 StopELFMetadata->setVisibility(GlobalVariable::HiddenVisibility);
2285
2286 // Create a call to register the globals with the runtime.
2287 if (ConstructorKind == AsanCtorKind::Global)
2288 IRB.CreateCall(AsanRegisterElfGlobals,
2289 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy),
2290 IRB.CreatePointerCast(StartELFMetadata, IntptrTy),
2291 IRB.CreatePointerCast(StopELFMetadata, IntptrTy)});
2292
2293 // We also need to unregister globals at the end, e.g., when a shared library
2294 // gets closed.
2295 if (DestructorKind != AsanDtorKind::None && !MetadataGlobals.empty()) {
2296 IRBuilder<> IrbDtor(CreateAsanModuleDtor(M));
2297 IrbDtor.CreateCall(AsanUnregisterElfGlobals,
2298 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy),
2299 IRB.CreatePointerCast(StartELFMetadata, IntptrTy),
2300 IRB.CreatePointerCast(StopELFMetadata, IntptrTy)});
2301 }
2302 }
2303
InstrumentGlobalsMachO(IRBuilder<> & IRB,Module & M,ArrayRef<GlobalVariable * > ExtendedGlobals,ArrayRef<Constant * > MetadataInitializers)2304 void ModuleAddressSanitizer::InstrumentGlobalsMachO(
2305 IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals,
2306 ArrayRef<Constant *> MetadataInitializers) {
2307 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2308
2309 // On recent Mach-O platforms, use a structure which binds the liveness of
2310 // the global variable to the metadata struct. Keep the list of "Liveness" GV
2311 // created to be added to llvm.compiler.used
2312 StructType *LivenessTy = StructType::get(IntptrTy, IntptrTy);
2313 SmallVector<GlobalValue *, 16> LivenessGlobals(ExtendedGlobals.size());
2314
2315 for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2316 Constant *Initializer = MetadataInitializers[i];
2317 GlobalVariable *G = ExtendedGlobals[i];
2318 GlobalVariable *Metadata =
2319 CreateMetadataGlobal(M, Initializer, G->getName());
2320
2321 // On recent Mach-O platforms, we emit the global metadata in a way that
2322 // allows the linker to properly strip dead globals.
2323 auto LivenessBinder =
2324 ConstantStruct::get(LivenessTy, Initializer->getAggregateElement(0u),
2325 ConstantExpr::getPointerCast(Metadata, IntptrTy));
2326 GlobalVariable *Liveness = new GlobalVariable(
2327 M, LivenessTy, false, GlobalVariable::InternalLinkage, LivenessBinder,
2328 Twine("__asan_binder_") + G->getName());
2329 Liveness->setSection("__DATA,__asan_liveness,regular,live_support");
2330 LivenessGlobals[i] = Liveness;
2331 }
2332
2333 // Update llvm.compiler.used, adding the new liveness globals. This is
2334 // needed so that during LTO these variables stay alive. The alternative
2335 // would be to have the linker handling the LTO symbols, but libLTO
2336 // current API does not expose access to the section for each symbol.
2337 if (!LivenessGlobals.empty())
2338 appendToCompilerUsed(M, LivenessGlobals);
2339
2340 // RegisteredFlag serves two purposes. First, we can pass it to dladdr()
2341 // to look up the loaded image that contains it. Second, we can store in it
2342 // whether registration has already occurred, to prevent duplicate
2343 // registration.
2344 //
2345 // common linkage ensures that there is only one global per shared library.
2346 GlobalVariable *RegisteredFlag = new GlobalVariable(
2347 M, IntptrTy, false, GlobalVariable::CommonLinkage,
2348 ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName);
2349 RegisteredFlag->setVisibility(GlobalVariable::HiddenVisibility);
2350
2351 if (ConstructorKind == AsanCtorKind::Global)
2352 IRB.CreateCall(AsanRegisterImageGlobals,
2353 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)});
2354
2355 // We also need to unregister globals at the end, e.g., when a shared library
2356 // gets closed.
2357 if (DestructorKind != AsanDtorKind::None) {
2358 IRBuilder<> IrbDtor(CreateAsanModuleDtor(M));
2359 IrbDtor.CreateCall(AsanUnregisterImageGlobals,
2360 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)});
2361 }
2362 }
2363
InstrumentGlobalsWithMetadataArray(IRBuilder<> & IRB,Module & M,ArrayRef<GlobalVariable * > ExtendedGlobals,ArrayRef<Constant * > MetadataInitializers)2364 void ModuleAddressSanitizer::InstrumentGlobalsWithMetadataArray(
2365 IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals,
2366 ArrayRef<Constant *> MetadataInitializers) {
2367 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2368 unsigned N = ExtendedGlobals.size();
2369 assert(N > 0);
2370
2371 // On platforms that don't have a custom metadata section, we emit an array
2372 // of global metadata structures.
2373 ArrayType *ArrayOfGlobalStructTy =
2374 ArrayType::get(MetadataInitializers[0]->getType(), N);
2375 auto AllGlobals = new GlobalVariable(
2376 M, ArrayOfGlobalStructTy, false, GlobalVariable::InternalLinkage,
2377 ConstantArray::get(ArrayOfGlobalStructTy, MetadataInitializers), "");
2378 if (Mapping.Scale > 3)
2379 AllGlobals->setAlignment(Align(1ULL << Mapping.Scale));
2380
2381 if (ConstructorKind == AsanCtorKind::Global)
2382 IRB.CreateCall(AsanRegisterGlobals,
2383 {IRB.CreatePointerCast(AllGlobals, IntptrTy),
2384 ConstantInt::get(IntptrTy, N)});
2385
2386 // We also need to unregister globals at the end, e.g., when a shared library
2387 // gets closed.
2388 if (DestructorKind != AsanDtorKind::None) {
2389 IRBuilder<> IrbDtor(CreateAsanModuleDtor(M));
2390 IrbDtor.CreateCall(AsanUnregisterGlobals,
2391 {IRB.CreatePointerCast(AllGlobals, IntptrTy),
2392 ConstantInt::get(IntptrTy, N)});
2393 }
2394 }
2395
2396 // This function replaces all global variables with new variables that have
2397 // trailing redzones. It also creates a function that poisons
2398 // redzones and inserts this function into llvm.global_ctors.
2399 // Sets *CtorComdat to true if the global registration code emitted into the
2400 // asan constructor is comdat-compatible.
instrumentGlobals(IRBuilder<> & IRB,Module & M,bool * CtorComdat)2401 void ModuleAddressSanitizer::instrumentGlobals(IRBuilder<> &IRB, Module &M,
2402 bool *CtorComdat) {
2403 // Build set of globals that are aliased by some GA, where
2404 // getExcludedAliasedGlobal(GA) returns the relevant GlobalVariable.
2405 SmallPtrSet<const GlobalVariable *, 16> AliasedGlobalExclusions;
2406 if (CompileKernel) {
2407 for (auto &GA : M.aliases()) {
2408 if (const GlobalVariable *GV = getExcludedAliasedGlobal(GA))
2409 AliasedGlobalExclusions.insert(GV);
2410 }
2411 }
2412
2413 SmallVector<GlobalVariable *, 16> GlobalsToChange;
2414 for (auto &G : M.globals()) {
2415 if (!AliasedGlobalExclusions.count(&G) && shouldInstrumentGlobal(&G))
2416 GlobalsToChange.push_back(&G);
2417 }
2418
2419 size_t n = GlobalsToChange.size();
2420 auto &DL = M.getDataLayout();
2421
2422 // A global is described by a structure
2423 // size_t beg;
2424 // size_t size;
2425 // size_t size_with_redzone;
2426 // const char *name;
2427 // const char *module_name;
2428 // size_t has_dynamic_init;
2429 // size_t padding_for_windows_msvc_incremental_link;
2430 // size_t odr_indicator;
2431 // We initialize an array of such structures and pass it to a run-time call.
2432 StructType *GlobalStructTy =
2433 StructType::get(IntptrTy, IntptrTy, IntptrTy, IntptrTy, IntptrTy,
2434 IntptrTy, IntptrTy, IntptrTy);
2435 SmallVector<GlobalVariable *, 16> NewGlobals(n);
2436 SmallVector<Constant *, 16> Initializers(n);
2437
2438 bool HasDynamicallyInitializedGlobals = false;
2439
2440 // We shouldn't merge same module names, as this string serves as unique
2441 // module ID in runtime.
2442 GlobalVariable *ModuleName =
2443 n != 0
2444 ? createPrivateGlobalForString(M, M.getModuleIdentifier(),
2445 /*AllowMerging*/ false, kAsanGenPrefix)
2446 : nullptr;
2447
2448 for (size_t i = 0; i < n; i++) {
2449 GlobalVariable *G = GlobalsToChange[i];
2450
2451 GlobalValue::SanitizerMetadata MD;
2452 if (G->hasSanitizerMetadata())
2453 MD = G->getSanitizerMetadata();
2454
2455 // The runtime library tries demangling symbol names in the descriptor but
2456 // functionality like __cxa_demangle may be unavailable (e.g.
2457 // -static-libstdc++). So we demangle the symbol names here.
2458 std::string NameForGlobal = G->getName().str();
2459 GlobalVariable *Name =
2460 createPrivateGlobalForString(M, llvm::demangle(NameForGlobal),
2461 /*AllowMerging*/ true, kAsanGenPrefix);
2462
2463 Type *Ty = G->getValueType();
2464 const uint64_t SizeInBytes = DL.getTypeAllocSize(Ty);
2465 const uint64_t RightRedzoneSize = getRedzoneSizeForGlobal(SizeInBytes);
2466 Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize);
2467
2468 StructType *NewTy = StructType::get(Ty, RightRedZoneTy);
2469 Constant *NewInitializer = ConstantStruct::get(
2470 NewTy, G->getInitializer(), Constant::getNullValue(RightRedZoneTy));
2471
2472 // Create a new global variable with enough space for a redzone.
2473 GlobalValue::LinkageTypes Linkage = G->getLinkage();
2474 if (G->isConstant() && Linkage == GlobalValue::PrivateLinkage)
2475 Linkage = GlobalValue::InternalLinkage;
2476 GlobalVariable *NewGlobal = new GlobalVariable(
2477 M, NewTy, G->isConstant(), Linkage, NewInitializer, "", G,
2478 G->getThreadLocalMode(), G->getAddressSpace());
2479 NewGlobal->copyAttributesFrom(G);
2480 NewGlobal->setComdat(G->getComdat());
2481 NewGlobal->setAlignment(Align(getMinRedzoneSizeForGlobal()));
2482 // Don't fold globals with redzones. ODR violation detector and redzone
2483 // poisoning implicitly creates a dependence on the global's address, so it
2484 // is no longer valid for it to be marked unnamed_addr.
2485 NewGlobal->setUnnamedAddr(GlobalValue::UnnamedAddr::None);
2486
2487 // Move null-terminated C strings to "__asan_cstring" section on Darwin.
2488 if (TargetTriple.isOSBinFormatMachO() && !G->hasSection() &&
2489 G->isConstant()) {
2490 auto Seq = dyn_cast<ConstantDataSequential>(G->getInitializer());
2491 if (Seq && Seq->isCString())
2492 NewGlobal->setSection("__TEXT,__asan_cstring,regular");
2493 }
2494
2495 // Transfer the debug info and type metadata. The payload starts at offset
2496 // zero so we can copy the metadata over as is.
2497 NewGlobal->copyMetadata(G, 0);
2498
2499 Value *Indices2[2];
2500 Indices2[0] = IRB.getInt32(0);
2501 Indices2[1] = IRB.getInt32(0);
2502
2503 G->replaceAllUsesWith(
2504 ConstantExpr::getGetElementPtr(NewTy, NewGlobal, Indices2, true));
2505 NewGlobal->takeName(G);
2506 G->eraseFromParent();
2507 NewGlobals[i] = NewGlobal;
2508
2509 Constant *ODRIndicator = ConstantPointerNull::get(PtrTy);
2510 GlobalValue *InstrumentedGlobal = NewGlobal;
2511
2512 bool CanUsePrivateAliases =
2513 TargetTriple.isOSBinFormatELF() || TargetTriple.isOSBinFormatMachO() ||
2514 TargetTriple.isOSBinFormatWasm();
2515 if (CanUsePrivateAliases && UsePrivateAlias) {
2516 // Create local alias for NewGlobal to avoid crash on ODR between
2517 // instrumented and non-instrumented libraries.
2518 InstrumentedGlobal =
2519 GlobalAlias::create(GlobalValue::PrivateLinkage, "", NewGlobal);
2520 }
2521
2522 // ODR should not happen for local linkage.
2523 if (NewGlobal->hasLocalLinkage()) {
2524 ODRIndicator =
2525 ConstantExpr::getIntToPtr(ConstantInt::get(IntptrTy, -1), PtrTy);
2526 } else if (UseOdrIndicator) {
2527 // With local aliases, we need to provide another externally visible
2528 // symbol __odr_asan_XXX to detect ODR violation.
2529 auto *ODRIndicatorSym =
2530 new GlobalVariable(M, IRB.getInt8Ty(), false, Linkage,
2531 Constant::getNullValue(IRB.getInt8Ty()),
2532 kODRGenPrefix + NameForGlobal, nullptr,
2533 NewGlobal->getThreadLocalMode());
2534
2535 // Set meaningful attributes for indicator symbol.
2536 ODRIndicatorSym->setVisibility(NewGlobal->getVisibility());
2537 ODRIndicatorSym->setDLLStorageClass(NewGlobal->getDLLStorageClass());
2538 ODRIndicatorSym->setAlignment(Align(1));
2539 ODRIndicator = ODRIndicatorSym;
2540 }
2541
2542 Constant *Initializer = ConstantStruct::get(
2543 GlobalStructTy,
2544 ConstantExpr::getPointerCast(InstrumentedGlobal, IntptrTy),
2545 ConstantInt::get(IntptrTy, SizeInBytes),
2546 ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize),
2547 ConstantExpr::getPointerCast(Name, IntptrTy),
2548 ConstantExpr::getPointerCast(ModuleName, IntptrTy),
2549 ConstantInt::get(IntptrTy, MD.IsDynInit),
2550 Constant::getNullValue(IntptrTy),
2551 ConstantExpr::getPointerCast(ODRIndicator, IntptrTy));
2552
2553 if (ClInitializers && MD.IsDynInit)
2554 HasDynamicallyInitializedGlobals = true;
2555
2556 LLVM_DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n");
2557
2558 Initializers[i] = Initializer;
2559 }
2560
2561 // Add instrumented globals to llvm.compiler.used list to avoid LTO from
2562 // ConstantMerge'ing them.
2563 SmallVector<GlobalValue *, 16> GlobalsToAddToUsedList;
2564 for (size_t i = 0; i < n; i++) {
2565 GlobalVariable *G = NewGlobals[i];
2566 if (G->getName().empty()) continue;
2567 GlobalsToAddToUsedList.push_back(G);
2568 }
2569 appendToCompilerUsed(M, ArrayRef<GlobalValue *>(GlobalsToAddToUsedList));
2570
2571 if (UseGlobalsGC && TargetTriple.isOSBinFormatELF()) {
2572 // Use COMDAT and register globals even if n == 0 to ensure that (a) the
2573 // linkage unit will only have one module constructor, and (b) the register
2574 // function will be called. The module destructor is not created when n ==
2575 // 0.
2576 *CtorComdat = true;
2577 instrumentGlobalsELF(IRB, M, NewGlobals, Initializers,
2578 getUniqueModuleId(&M));
2579 } else if (n == 0) {
2580 // When UseGlobalsGC is false, COMDAT can still be used if n == 0, because
2581 // all compile units will have identical module constructor/destructor.
2582 *CtorComdat = TargetTriple.isOSBinFormatELF();
2583 } else {
2584 *CtorComdat = false;
2585 if (UseGlobalsGC && TargetTriple.isOSBinFormatCOFF()) {
2586 InstrumentGlobalsCOFF(IRB, M, NewGlobals, Initializers);
2587 } else if (UseGlobalsGC && ShouldUseMachOGlobalsSection()) {
2588 InstrumentGlobalsMachO(IRB, M, NewGlobals, Initializers);
2589 } else {
2590 InstrumentGlobalsWithMetadataArray(IRB, M, NewGlobals, Initializers);
2591 }
2592 }
2593
2594 // Create calls for poisoning before initializers run and unpoisoning after.
2595 if (HasDynamicallyInitializedGlobals)
2596 createInitializerPoisonCalls(M, ModuleName);
2597
2598 LLVM_DEBUG(dbgs() << M);
2599 }
2600
2601 uint64_t
getRedzoneSizeForGlobal(uint64_t SizeInBytes) const2602 ModuleAddressSanitizer::getRedzoneSizeForGlobal(uint64_t SizeInBytes) const {
2603 constexpr uint64_t kMaxRZ = 1 << 18;
2604 const uint64_t MinRZ = getMinRedzoneSizeForGlobal();
2605
2606 uint64_t RZ = 0;
2607 if (SizeInBytes <= MinRZ / 2) {
2608 // Reduce redzone size for small size objects, e.g. int, char[1]. MinRZ is
2609 // at least 32 bytes, optimize when SizeInBytes is less than or equal to
2610 // half of MinRZ.
2611 RZ = MinRZ - SizeInBytes;
2612 } else {
2613 // Calculate RZ, where MinRZ <= RZ <= MaxRZ, and RZ ~ 1/4 * SizeInBytes.
2614 RZ = std::clamp((SizeInBytes / MinRZ / 4) * MinRZ, MinRZ, kMaxRZ);
2615
2616 // Round up to multiple of MinRZ.
2617 if (SizeInBytes % MinRZ)
2618 RZ += MinRZ - (SizeInBytes % MinRZ);
2619 }
2620
2621 assert((RZ + SizeInBytes) % MinRZ == 0);
2622
2623 return RZ;
2624 }
2625
GetAsanVersion(const Module & M) const2626 int ModuleAddressSanitizer::GetAsanVersion(const Module &M) const {
2627 int LongSize = M.getDataLayout().getPointerSizeInBits();
2628 bool isAndroid = Triple(M.getTargetTriple()).isAndroid();
2629 int Version = 8;
2630 // 32-bit Android is one version ahead because of the switch to dynamic
2631 // shadow.
2632 Version += (LongSize == 32 && isAndroid);
2633 return Version;
2634 }
2635
instrumentModule(Module & M)2636 bool ModuleAddressSanitizer::instrumentModule(Module &M) {
2637 initializeCallbacks(M);
2638
2639 // Create a module constructor. A destructor is created lazily because not all
2640 // platforms, and not all modules need it.
2641 if (ConstructorKind == AsanCtorKind::Global) {
2642 if (CompileKernel) {
2643 // The kernel always builds with its own runtime, and therefore does not
2644 // need the init and version check calls.
2645 AsanCtorFunction = createSanitizerCtor(M, kAsanModuleCtorName);
2646 } else {
2647 std::string AsanVersion = std::to_string(GetAsanVersion(M));
2648 std::string VersionCheckName =
2649 InsertVersionCheck ? (kAsanVersionCheckNamePrefix + AsanVersion) : "";
2650 std::tie(AsanCtorFunction, std::ignore) =
2651 createSanitizerCtorAndInitFunctions(M, kAsanModuleCtorName,
2652 kAsanInitName, /*InitArgTypes=*/{},
2653 /*InitArgs=*/{}, VersionCheckName);
2654 }
2655 }
2656
2657 bool CtorComdat = true;
2658 if (ClGlobals) {
2659 assert(AsanCtorFunction || ConstructorKind == AsanCtorKind::None);
2660 if (AsanCtorFunction) {
2661 IRBuilder<> IRB(AsanCtorFunction->getEntryBlock().getTerminator());
2662 instrumentGlobals(IRB, M, &CtorComdat);
2663 } else {
2664 IRBuilder<> IRB(*C);
2665 instrumentGlobals(IRB, M, &CtorComdat);
2666 }
2667 }
2668
2669 const uint64_t Priority = GetCtorAndDtorPriority(TargetTriple);
2670
2671 // Put the constructor and destructor in comdat if both
2672 // (1) global instrumentation is not TU-specific
2673 // (2) target is ELF.
2674 if (UseCtorComdat && TargetTriple.isOSBinFormatELF() && CtorComdat) {
2675 if (AsanCtorFunction) {
2676 AsanCtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleCtorName));
2677 appendToGlobalCtors(M, AsanCtorFunction, Priority, AsanCtorFunction);
2678 }
2679 if (AsanDtorFunction) {
2680 AsanDtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleDtorName));
2681 appendToGlobalDtors(M, AsanDtorFunction, Priority, AsanDtorFunction);
2682 }
2683 } else {
2684 if (AsanCtorFunction)
2685 appendToGlobalCtors(M, AsanCtorFunction, Priority);
2686 if (AsanDtorFunction)
2687 appendToGlobalDtors(M, AsanDtorFunction, Priority);
2688 }
2689
2690 return true;
2691 }
2692
initializeCallbacks(Module & M,const TargetLibraryInfo * TLI)2693 void AddressSanitizer::initializeCallbacks(Module &M, const TargetLibraryInfo *TLI) {
2694 IRBuilder<> IRB(*C);
2695 // Create __asan_report* callbacks.
2696 // IsWrite, TypeSize and Exp are encoded in the function name.
2697 for (int Exp = 0; Exp < 2; Exp++) {
2698 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
2699 const std::string TypeStr = AccessIsWrite ? "store" : "load";
2700 const std::string ExpStr = Exp ? "exp_" : "";
2701 const std::string EndingStr = Recover ? "_noabort" : "";
2702
2703 SmallVector<Type *, 3> Args2 = {IntptrTy, IntptrTy};
2704 SmallVector<Type *, 2> Args1{1, IntptrTy};
2705 AttributeList AL2;
2706 AttributeList AL1;
2707 if (Exp) {
2708 Type *ExpType = Type::getInt32Ty(*C);
2709 Args2.push_back(ExpType);
2710 Args1.push_back(ExpType);
2711 if (auto AK = TLI->getExtAttrForI32Param(false)) {
2712 AL2 = AL2.addParamAttribute(*C, 2, AK);
2713 AL1 = AL1.addParamAttribute(*C, 1, AK);
2714 }
2715 }
2716 AsanErrorCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction(
2717 kAsanReportErrorTemplate + ExpStr + TypeStr + "_n" + EndingStr,
2718 FunctionType::get(IRB.getVoidTy(), Args2, false), AL2);
2719
2720 AsanMemoryAccessCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction(
2721 ClMemoryAccessCallbackPrefix + ExpStr + TypeStr + "N" + EndingStr,
2722 FunctionType::get(IRB.getVoidTy(), Args2, false), AL2);
2723
2724 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
2725 AccessSizeIndex++) {
2726 const std::string Suffix = TypeStr + itostr(1ULL << AccessSizeIndex);
2727 AsanErrorCallback[AccessIsWrite][Exp][AccessSizeIndex] =
2728 M.getOrInsertFunction(
2729 kAsanReportErrorTemplate + ExpStr + Suffix + EndingStr,
2730 FunctionType::get(IRB.getVoidTy(), Args1, false), AL1);
2731
2732 AsanMemoryAccessCallback[AccessIsWrite][Exp][AccessSizeIndex] =
2733 M.getOrInsertFunction(
2734 ClMemoryAccessCallbackPrefix + ExpStr + Suffix + EndingStr,
2735 FunctionType::get(IRB.getVoidTy(), Args1, false), AL1);
2736 }
2737 }
2738 }
2739
2740 const std::string MemIntrinCallbackPrefix =
2741 (CompileKernel && !ClKasanMemIntrinCallbackPrefix)
2742 ? std::string("")
2743 : ClMemoryAccessCallbackPrefix;
2744 AsanMemmove = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memmove",
2745 PtrTy, PtrTy, PtrTy, IntptrTy);
2746 AsanMemcpy = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memcpy", PtrTy,
2747 PtrTy, PtrTy, IntptrTy);
2748 AsanMemset = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memset",
2749 TLI->getAttrList(C, {1}, /*Signed=*/false),
2750 PtrTy, PtrTy, IRB.getInt32Ty(), IntptrTy);
2751
2752 AsanHandleNoReturnFunc =
2753 M.getOrInsertFunction(kAsanHandleNoReturnName, IRB.getVoidTy());
2754
2755 AsanPtrCmpFunction =
2756 M.getOrInsertFunction(kAsanPtrCmp, IRB.getVoidTy(), IntptrTy, IntptrTy);
2757 AsanPtrSubFunction =
2758 M.getOrInsertFunction(kAsanPtrSub, IRB.getVoidTy(), IntptrTy, IntptrTy);
2759 if (Mapping.InGlobal)
2760 AsanShadowGlobal = M.getOrInsertGlobal("__asan_shadow",
2761 ArrayType::get(IRB.getInt8Ty(), 0));
2762
2763 AMDGPUAddressShared =
2764 M.getOrInsertFunction(kAMDGPUAddressSharedName, IRB.getInt1Ty(), PtrTy);
2765 AMDGPUAddressPrivate =
2766 M.getOrInsertFunction(kAMDGPUAddressPrivateName, IRB.getInt1Ty(), PtrTy);
2767 }
2768
maybeInsertAsanInitAtFunctionEntry(Function & F)2769 bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) {
2770 // For each NSObject descendant having a +load method, this method is invoked
2771 // by the ObjC runtime before any of the static constructors is called.
2772 // Therefore we need to instrument such methods with a call to __asan_init
2773 // at the beginning in order to initialize our runtime before any access to
2774 // the shadow memory.
2775 // We cannot just ignore these methods, because they may call other
2776 // instrumented functions.
2777 if (F.getName().contains(" load]")) {
2778 FunctionCallee AsanInitFunction =
2779 declareSanitizerInitFunction(*F.getParent(), kAsanInitName, {});
2780 IRBuilder<> IRB(&F.front(), F.front().begin());
2781 IRB.CreateCall(AsanInitFunction, {});
2782 return true;
2783 }
2784 return false;
2785 }
2786
maybeInsertDynamicShadowAtFunctionEntry(Function & F)2787 bool AddressSanitizer::maybeInsertDynamicShadowAtFunctionEntry(Function &F) {
2788 // Generate code only when dynamic addressing is needed.
2789 if (Mapping.Offset != kDynamicShadowSentinel)
2790 return false;
2791
2792 IRBuilder<> IRB(&F.front().front());
2793 if (Mapping.InGlobal) {
2794 if (ClWithIfuncSuppressRemat) {
2795 // An empty inline asm with input reg == output reg.
2796 // An opaque pointer-to-int cast, basically.
2797 InlineAsm *Asm = InlineAsm::get(
2798 FunctionType::get(IntptrTy, {AsanShadowGlobal->getType()}, false),
2799 StringRef(""), StringRef("=r,0"),
2800 /*hasSideEffects=*/false);
2801 LocalDynamicShadow =
2802 IRB.CreateCall(Asm, {AsanShadowGlobal}, ".asan.shadow");
2803 } else {
2804 LocalDynamicShadow =
2805 IRB.CreatePointerCast(AsanShadowGlobal, IntptrTy, ".asan.shadow");
2806 }
2807 } else {
2808 Value *GlobalDynamicAddress = F.getParent()->getOrInsertGlobal(
2809 kAsanShadowMemoryDynamicAddress, IntptrTy);
2810 LocalDynamicShadow = IRB.CreateLoad(IntptrTy, GlobalDynamicAddress);
2811 }
2812 return true;
2813 }
2814
markEscapedLocalAllocas(Function & F)2815 void AddressSanitizer::markEscapedLocalAllocas(Function &F) {
2816 // Find the one possible call to llvm.localescape and pre-mark allocas passed
2817 // to it as uninteresting. This assumes we haven't started processing allocas
2818 // yet. This check is done up front because iterating the use list in
2819 // isInterestingAlloca would be algorithmically slower.
2820 assert(ProcessedAllocas.empty() && "must process localescape before allocas");
2821
2822 // Try to get the declaration of llvm.localescape. If it's not in the module,
2823 // we can exit early.
2824 if (!F.getParent()->getFunction("llvm.localescape")) return;
2825
2826 // Look for a call to llvm.localescape call in the entry block. It can't be in
2827 // any other block.
2828 for (Instruction &I : F.getEntryBlock()) {
2829 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I);
2830 if (II && II->getIntrinsicID() == Intrinsic::localescape) {
2831 // We found a call. Mark all the allocas passed in as uninteresting.
2832 for (Value *Arg : II->args()) {
2833 AllocaInst *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
2834 assert(AI && AI->isStaticAlloca() &&
2835 "non-static alloca arg to localescape");
2836 ProcessedAllocas[AI] = false;
2837 }
2838 break;
2839 }
2840 }
2841 }
2842
suppressInstrumentationSiteForDebug(int & Instrumented)2843 bool AddressSanitizer::suppressInstrumentationSiteForDebug(int &Instrumented) {
2844 bool ShouldInstrument =
2845 ClDebugMin < 0 || ClDebugMax < 0 ||
2846 (Instrumented >= ClDebugMin && Instrumented <= ClDebugMax);
2847 Instrumented++;
2848 return !ShouldInstrument;
2849 }
2850
instrumentFunction(Function & F,const TargetLibraryInfo * TLI)2851 bool AddressSanitizer::instrumentFunction(Function &F,
2852 const TargetLibraryInfo *TLI) {
2853 if (F.empty())
2854 return false;
2855 if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) return false;
2856 if (!ClDebugFunc.empty() && ClDebugFunc == F.getName()) return false;
2857 if (F.getName().starts_with("__asan_")) return false;
2858
2859 bool FunctionModified = false;
2860
2861 // If needed, insert __asan_init before checking for SanitizeAddress attr.
2862 // This function needs to be called even if the function body is not
2863 // instrumented.
2864 if (maybeInsertAsanInitAtFunctionEntry(F))
2865 FunctionModified = true;
2866
2867 // Leave if the function doesn't need instrumentation.
2868 if (!F.hasFnAttribute(Attribute::SanitizeAddress)) return FunctionModified;
2869
2870 if (F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
2871 return FunctionModified;
2872
2873 LLVM_DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n");
2874
2875 initializeCallbacks(*F.getParent(), TLI);
2876
2877 FunctionStateRAII CleanupObj(this);
2878
2879 FunctionModified |= maybeInsertDynamicShadowAtFunctionEntry(F);
2880
2881 // We can't instrument allocas used with llvm.localescape. Only static allocas
2882 // can be passed to that intrinsic.
2883 markEscapedLocalAllocas(F);
2884
2885 // We want to instrument every address only once per basic block (unless there
2886 // are calls between uses).
2887 SmallPtrSet<Value *, 16> TempsToInstrument;
2888 SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
2889 SmallVector<MemIntrinsic *, 16> IntrinToInstrument;
2890 SmallVector<Instruction *, 8> NoReturnCalls;
2891 SmallVector<BasicBlock *, 16> AllBlocks;
2892 SmallVector<Instruction *, 16> PointerComparisonsOrSubtracts;
2893
2894 // Fill the set of memory operations to instrument.
2895 for (auto &BB : F) {
2896 AllBlocks.push_back(&BB);
2897 TempsToInstrument.clear();
2898 int NumInsnsPerBB = 0;
2899 for (auto &Inst : BB) {
2900 if (LooksLikeCodeInBug11395(&Inst)) return false;
2901 // Skip instructions inserted by another instrumentation.
2902 if (Inst.hasMetadata(LLVMContext::MD_nosanitize))
2903 continue;
2904 SmallVector<InterestingMemoryOperand, 1> InterestingOperands;
2905 getInterestingMemoryOperands(&Inst, InterestingOperands);
2906
2907 if (!InterestingOperands.empty()) {
2908 for (auto &Operand : InterestingOperands) {
2909 if (ClOpt && ClOptSameTemp) {
2910 Value *Ptr = Operand.getPtr();
2911 // If we have a mask, skip instrumentation if we've already
2912 // instrumented the full object. But don't add to TempsToInstrument
2913 // because we might get another load/store with a different mask.
2914 if (Operand.MaybeMask) {
2915 if (TempsToInstrument.count(Ptr))
2916 continue; // We've seen this (whole) temp in the current BB.
2917 } else {
2918 if (!TempsToInstrument.insert(Ptr).second)
2919 continue; // We've seen this temp in the current BB.
2920 }
2921 }
2922 OperandsToInstrument.push_back(Operand);
2923 NumInsnsPerBB++;
2924 }
2925 } else if (((ClInvalidPointerPairs || ClInvalidPointerCmp) &&
2926 isInterestingPointerComparison(&Inst)) ||
2927 ((ClInvalidPointerPairs || ClInvalidPointerSub) &&
2928 isInterestingPointerSubtraction(&Inst))) {
2929 PointerComparisonsOrSubtracts.push_back(&Inst);
2930 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst)) {
2931 // ok, take it.
2932 IntrinToInstrument.push_back(MI);
2933 NumInsnsPerBB++;
2934 } else {
2935 if (auto *CB = dyn_cast<CallBase>(&Inst)) {
2936 // A call inside BB.
2937 TempsToInstrument.clear();
2938 if (CB->doesNotReturn())
2939 NoReturnCalls.push_back(CB);
2940 }
2941 if (CallInst *CI = dyn_cast<CallInst>(&Inst))
2942 maybeMarkSanitizerLibraryCallNoBuiltin(CI, TLI);
2943 }
2944 if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB) break;
2945 }
2946 }
2947
2948 bool UseCalls = (InstrumentationWithCallsThreshold >= 0 &&
2949 OperandsToInstrument.size() + IntrinToInstrument.size() >
2950 (unsigned)InstrumentationWithCallsThreshold);
2951 const DataLayout &DL = F.getParent()->getDataLayout();
2952 ObjectSizeOpts ObjSizeOpts;
2953 ObjSizeOpts.RoundToAlign = true;
2954 ObjectSizeOffsetVisitor ObjSizeVis(DL, TLI, F.getContext(), ObjSizeOpts);
2955
2956 // Instrument.
2957 int NumInstrumented = 0;
2958 for (auto &Operand : OperandsToInstrument) {
2959 if (!suppressInstrumentationSiteForDebug(NumInstrumented))
2960 instrumentMop(ObjSizeVis, Operand, UseCalls,
2961 F.getParent()->getDataLayout());
2962 FunctionModified = true;
2963 }
2964 for (auto *Inst : IntrinToInstrument) {
2965 if (!suppressInstrumentationSiteForDebug(NumInstrumented))
2966 instrumentMemIntrinsic(Inst);
2967 FunctionModified = true;
2968 }
2969
2970 FunctionStackPoisoner FSP(F, *this);
2971 bool ChangedStack = FSP.runOnFunction();
2972
2973 // We must unpoison the stack before NoReturn calls (throw, _exit, etc).
2974 // See e.g. https://github.com/google/sanitizers/issues/37
2975 for (auto *CI : NoReturnCalls) {
2976 IRBuilder<> IRB(CI);
2977 IRB.CreateCall(AsanHandleNoReturnFunc, {});
2978 }
2979
2980 for (auto *Inst : PointerComparisonsOrSubtracts) {
2981 instrumentPointerComparisonOrSubtraction(Inst);
2982 FunctionModified = true;
2983 }
2984
2985 if (ChangedStack || !NoReturnCalls.empty())
2986 FunctionModified = true;
2987
2988 LLVM_DEBUG(dbgs() << "ASAN done instrumenting: " << FunctionModified << " "
2989 << F << "\n");
2990
2991 return FunctionModified;
2992 }
2993
2994 // Workaround for bug 11395: we don't want to instrument stack in functions
2995 // with large assembly blobs (32-bit only), otherwise reg alloc may crash.
2996 // FIXME: remove once the bug 11395 is fixed.
LooksLikeCodeInBug11395(Instruction * I)2997 bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) {
2998 if (LongSize != 32) return false;
2999 CallInst *CI = dyn_cast<CallInst>(I);
3000 if (!CI || !CI->isInlineAsm()) return false;
3001 if (CI->arg_size() <= 5)
3002 return false;
3003 // We have inline assembly with quite a few arguments.
3004 return true;
3005 }
3006
initializeCallbacks(Module & M)3007 void FunctionStackPoisoner::initializeCallbacks(Module &M) {
3008 IRBuilder<> IRB(*C);
3009 if (ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Always ||
3010 ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Runtime) {
3011 const char *MallocNameTemplate =
3012 ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Always
3013 ? kAsanStackMallocAlwaysNameTemplate
3014 : kAsanStackMallocNameTemplate;
3015 for (int Index = 0; Index <= kMaxAsanStackMallocSizeClass; Index++) {
3016 std::string Suffix = itostr(Index);
3017 AsanStackMallocFunc[Index] = M.getOrInsertFunction(
3018 MallocNameTemplate + Suffix, IntptrTy, IntptrTy);
3019 AsanStackFreeFunc[Index] =
3020 M.getOrInsertFunction(kAsanStackFreeNameTemplate + Suffix,
3021 IRB.getVoidTy(), IntptrTy, IntptrTy);
3022 }
3023 }
3024 if (ASan.UseAfterScope) {
3025 AsanPoisonStackMemoryFunc = M.getOrInsertFunction(
3026 kAsanPoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy);
3027 AsanUnpoisonStackMemoryFunc = M.getOrInsertFunction(
3028 kAsanUnpoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy);
3029 }
3030
3031 for (size_t Val : {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0xf1, 0xf2,
3032 0xf3, 0xf5, 0xf8}) {
3033 std::ostringstream Name;
3034 Name << kAsanSetShadowPrefix;
3035 Name << std::setw(2) << std::setfill('0') << std::hex << Val;
3036 AsanSetShadowFunc[Val] =
3037 M.getOrInsertFunction(Name.str(), IRB.getVoidTy(), IntptrTy, IntptrTy);
3038 }
3039
3040 AsanAllocaPoisonFunc = M.getOrInsertFunction(
3041 kAsanAllocaPoison, IRB.getVoidTy(), IntptrTy, IntptrTy);
3042 AsanAllocasUnpoisonFunc = M.getOrInsertFunction(
3043 kAsanAllocasUnpoison, IRB.getVoidTy(), IntptrTy, IntptrTy);
3044 }
3045
copyToShadowInline(ArrayRef<uint8_t> ShadowMask,ArrayRef<uint8_t> ShadowBytes,size_t Begin,size_t End,IRBuilder<> & IRB,Value * ShadowBase)3046 void FunctionStackPoisoner::copyToShadowInline(ArrayRef<uint8_t> ShadowMask,
3047 ArrayRef<uint8_t> ShadowBytes,
3048 size_t Begin, size_t End,
3049 IRBuilder<> &IRB,
3050 Value *ShadowBase) {
3051 if (Begin >= End)
3052 return;
3053
3054 const size_t LargestStoreSizeInBytes =
3055 std::min<size_t>(sizeof(uint64_t), ASan.LongSize / 8);
3056
3057 const bool IsLittleEndian = F.getParent()->getDataLayout().isLittleEndian();
3058
3059 // Poison given range in shadow using larges store size with out leading and
3060 // trailing zeros in ShadowMask. Zeros never change, so they need neither
3061 // poisoning nor up-poisoning. Still we don't mind if some of them get into a
3062 // middle of a store.
3063 for (size_t i = Begin; i < End;) {
3064 if (!ShadowMask[i]) {
3065 assert(!ShadowBytes[i]);
3066 ++i;
3067 continue;
3068 }
3069
3070 size_t StoreSizeInBytes = LargestStoreSizeInBytes;
3071 // Fit store size into the range.
3072 while (StoreSizeInBytes > End - i)
3073 StoreSizeInBytes /= 2;
3074
3075 // Minimize store size by trimming trailing zeros.
3076 for (size_t j = StoreSizeInBytes - 1; j && !ShadowMask[i + j]; --j) {
3077 while (j <= StoreSizeInBytes / 2)
3078 StoreSizeInBytes /= 2;
3079 }
3080
3081 uint64_t Val = 0;
3082 for (size_t j = 0; j < StoreSizeInBytes; j++) {
3083 if (IsLittleEndian)
3084 Val |= (uint64_t)ShadowBytes[i + j] << (8 * j);
3085 else
3086 Val = (Val << 8) | ShadowBytes[i + j];
3087 }
3088
3089 Value *Ptr = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i));
3090 Value *Poison = IRB.getIntN(StoreSizeInBytes * 8, Val);
3091 IRB.CreateAlignedStore(
3092 Poison, IRB.CreateIntToPtr(Ptr, PointerType::getUnqual(Poison->getContext())),
3093 Align(1));
3094
3095 i += StoreSizeInBytes;
3096 }
3097 }
3098
copyToShadow(ArrayRef<uint8_t> ShadowMask,ArrayRef<uint8_t> ShadowBytes,IRBuilder<> & IRB,Value * ShadowBase)3099 void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask,
3100 ArrayRef<uint8_t> ShadowBytes,
3101 IRBuilder<> &IRB, Value *ShadowBase) {
3102 copyToShadow(ShadowMask, ShadowBytes, 0, ShadowMask.size(), IRB, ShadowBase);
3103 }
3104
copyToShadow(ArrayRef<uint8_t> ShadowMask,ArrayRef<uint8_t> ShadowBytes,size_t Begin,size_t End,IRBuilder<> & IRB,Value * ShadowBase)3105 void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask,
3106 ArrayRef<uint8_t> ShadowBytes,
3107 size_t Begin, size_t End,
3108 IRBuilder<> &IRB, Value *ShadowBase) {
3109 assert(ShadowMask.size() == ShadowBytes.size());
3110 size_t Done = Begin;
3111 for (size_t i = Begin, j = Begin + 1; i < End; i = j++) {
3112 if (!ShadowMask[i]) {
3113 assert(!ShadowBytes[i]);
3114 continue;
3115 }
3116 uint8_t Val = ShadowBytes[i];
3117 if (!AsanSetShadowFunc[Val])
3118 continue;
3119
3120 // Skip same values.
3121 for (; j < End && ShadowMask[j] && Val == ShadowBytes[j]; ++j) {
3122 }
3123
3124 if (j - i >= ASan.MaxInlinePoisoningSize) {
3125 copyToShadowInline(ShadowMask, ShadowBytes, Done, i, IRB, ShadowBase);
3126 IRB.CreateCall(AsanSetShadowFunc[Val],
3127 {IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)),
3128 ConstantInt::get(IntptrTy, j - i)});
3129 Done = j;
3130 }
3131 }
3132
3133 copyToShadowInline(ShadowMask, ShadowBytes, Done, End, IRB, ShadowBase);
3134 }
3135
3136 // Fake stack allocator (asan_fake_stack.h) has 11 size classes
3137 // for every power of 2 from kMinStackMallocSize to kMaxAsanStackMallocSizeClass
StackMallocSizeClass(uint64_t LocalStackSize)3138 static int StackMallocSizeClass(uint64_t LocalStackSize) {
3139 assert(LocalStackSize <= kMaxStackMallocSize);
3140 uint64_t MaxSize = kMinStackMallocSize;
3141 for (int i = 0;; i++, MaxSize *= 2)
3142 if (LocalStackSize <= MaxSize) return i;
3143 llvm_unreachable("impossible LocalStackSize");
3144 }
3145
copyArgsPassedByValToAllocas()3146 void FunctionStackPoisoner::copyArgsPassedByValToAllocas() {
3147 Instruction *CopyInsertPoint = &F.front().front();
3148 if (CopyInsertPoint == ASan.LocalDynamicShadow) {
3149 // Insert after the dynamic shadow location is determined
3150 CopyInsertPoint = CopyInsertPoint->getNextNode();
3151 assert(CopyInsertPoint);
3152 }
3153 IRBuilder<> IRB(CopyInsertPoint);
3154 const DataLayout &DL = F.getParent()->getDataLayout();
3155 for (Argument &Arg : F.args()) {
3156 if (Arg.hasByValAttr()) {
3157 Type *Ty = Arg.getParamByValType();
3158 const Align Alignment =
3159 DL.getValueOrABITypeAlignment(Arg.getParamAlign(), Ty);
3160
3161 AllocaInst *AI = IRB.CreateAlloca(
3162 Ty, nullptr,
3163 (Arg.hasName() ? Arg.getName() : "Arg" + Twine(Arg.getArgNo())) +
3164 ".byval");
3165 AI->setAlignment(Alignment);
3166 Arg.replaceAllUsesWith(AI);
3167
3168 uint64_t AllocSize = DL.getTypeAllocSize(Ty);
3169 IRB.CreateMemCpy(AI, Alignment, &Arg, Alignment, AllocSize);
3170 }
3171 }
3172 }
3173
createPHI(IRBuilder<> & IRB,Value * Cond,Value * ValueIfTrue,Instruction * ThenTerm,Value * ValueIfFalse)3174 PHINode *FunctionStackPoisoner::createPHI(IRBuilder<> &IRB, Value *Cond,
3175 Value *ValueIfTrue,
3176 Instruction *ThenTerm,
3177 Value *ValueIfFalse) {
3178 PHINode *PHI = IRB.CreatePHI(IntptrTy, 2);
3179 BasicBlock *CondBlock = cast<Instruction>(Cond)->getParent();
3180 PHI->addIncoming(ValueIfFalse, CondBlock);
3181 BasicBlock *ThenBlock = ThenTerm->getParent();
3182 PHI->addIncoming(ValueIfTrue, ThenBlock);
3183 return PHI;
3184 }
3185
createAllocaForLayout(IRBuilder<> & IRB,const ASanStackFrameLayout & L,bool Dynamic)3186 Value *FunctionStackPoisoner::createAllocaForLayout(
3187 IRBuilder<> &IRB, const ASanStackFrameLayout &L, bool Dynamic) {
3188 AllocaInst *Alloca;
3189 if (Dynamic) {
3190 Alloca = IRB.CreateAlloca(IRB.getInt8Ty(),
3191 ConstantInt::get(IRB.getInt64Ty(), L.FrameSize),
3192 "MyAlloca");
3193 } else {
3194 Alloca = IRB.CreateAlloca(ArrayType::get(IRB.getInt8Ty(), L.FrameSize),
3195 nullptr, "MyAlloca");
3196 assert(Alloca->isStaticAlloca());
3197 }
3198 assert((ClRealignStack & (ClRealignStack - 1)) == 0);
3199 uint64_t FrameAlignment = std::max(L.FrameAlignment, uint64_t(ClRealignStack));
3200 Alloca->setAlignment(Align(FrameAlignment));
3201 return IRB.CreatePointerCast(Alloca, IntptrTy);
3202 }
3203
createDynamicAllocasInitStorage()3204 void FunctionStackPoisoner::createDynamicAllocasInitStorage() {
3205 BasicBlock &FirstBB = *F.begin();
3206 IRBuilder<> IRB(dyn_cast<Instruction>(FirstBB.begin()));
3207 DynamicAllocaLayout = IRB.CreateAlloca(IntptrTy, nullptr);
3208 IRB.CreateStore(Constant::getNullValue(IntptrTy), DynamicAllocaLayout);
3209 DynamicAllocaLayout->setAlignment(Align(32));
3210 }
3211
processDynamicAllocas()3212 void FunctionStackPoisoner::processDynamicAllocas() {
3213 if (!ClInstrumentDynamicAllocas || DynamicAllocaVec.empty()) {
3214 assert(DynamicAllocaPoisonCallVec.empty());
3215 return;
3216 }
3217
3218 // Insert poison calls for lifetime intrinsics for dynamic allocas.
3219 for (const auto &APC : DynamicAllocaPoisonCallVec) {
3220 assert(APC.InsBefore);
3221 assert(APC.AI);
3222 assert(ASan.isInterestingAlloca(*APC.AI));
3223 assert(!APC.AI->isStaticAlloca());
3224
3225 IRBuilder<> IRB(APC.InsBefore);
3226 poisonAlloca(APC.AI, APC.Size, IRB, APC.DoPoison);
3227 // Dynamic allocas will be unpoisoned unconditionally below in
3228 // unpoisonDynamicAllocas.
3229 // Flag that we need unpoison static allocas.
3230 }
3231
3232 // Handle dynamic allocas.
3233 createDynamicAllocasInitStorage();
3234 for (auto &AI : DynamicAllocaVec)
3235 handleDynamicAllocaCall(AI);
3236 unpoisonDynamicAllocas();
3237 }
3238
3239 /// Collect instructions in the entry block after \p InsBefore which initialize
3240 /// permanent storage for a function argument. These instructions must remain in
3241 /// the entry block so that uninitialized values do not appear in backtraces. An
3242 /// added benefit is that this conserves spill slots. This does not move stores
3243 /// before instrumented / "interesting" allocas.
findStoresToUninstrumentedArgAllocas(AddressSanitizer & ASan,Instruction & InsBefore,SmallVectorImpl<Instruction * > & InitInsts)3244 static void findStoresToUninstrumentedArgAllocas(
3245 AddressSanitizer &ASan, Instruction &InsBefore,
3246 SmallVectorImpl<Instruction *> &InitInsts) {
3247 Instruction *Start = InsBefore.getNextNonDebugInstruction();
3248 for (Instruction *It = Start; It; It = It->getNextNonDebugInstruction()) {
3249 // Argument initialization looks like:
3250 // 1) store <Argument>, <Alloca> OR
3251 // 2) <CastArgument> = cast <Argument> to ...
3252 // store <CastArgument> to <Alloca>
3253 // Do not consider any other kind of instruction.
3254 //
3255 // Note: This covers all known cases, but may not be exhaustive. An
3256 // alternative to pattern-matching stores is to DFS over all Argument uses:
3257 // this might be more general, but is probably much more complicated.
3258 if (isa<AllocaInst>(It) || isa<CastInst>(It))
3259 continue;
3260 if (auto *Store = dyn_cast<StoreInst>(It)) {
3261 // The store destination must be an alloca that isn't interesting for
3262 // ASan to instrument. These are moved up before InsBefore, and they're
3263 // not interesting because allocas for arguments can be mem2reg'd.
3264 auto *Alloca = dyn_cast<AllocaInst>(Store->getPointerOperand());
3265 if (!Alloca || ASan.isInterestingAlloca(*Alloca))
3266 continue;
3267
3268 Value *Val = Store->getValueOperand();
3269 bool IsDirectArgInit = isa<Argument>(Val);
3270 bool IsArgInitViaCast =
3271 isa<CastInst>(Val) &&
3272 isa<Argument>(cast<CastInst>(Val)->getOperand(0)) &&
3273 // Check that the cast appears directly before the store. Otherwise
3274 // moving the cast before InsBefore may break the IR.
3275 Val == It->getPrevNonDebugInstruction();
3276 bool IsArgInit = IsDirectArgInit || IsArgInitViaCast;
3277 if (!IsArgInit)
3278 continue;
3279
3280 if (IsArgInitViaCast)
3281 InitInsts.push_back(cast<Instruction>(Val));
3282 InitInsts.push_back(Store);
3283 continue;
3284 }
3285
3286 // Do not reorder past unknown instructions: argument initialization should
3287 // only involve casts and stores.
3288 return;
3289 }
3290 }
3291
processStaticAllocas()3292 void FunctionStackPoisoner::processStaticAllocas() {
3293 if (AllocaVec.empty()) {
3294 assert(StaticAllocaPoisonCallVec.empty());
3295 return;
3296 }
3297
3298 int StackMallocIdx = -1;
3299 DebugLoc EntryDebugLocation;
3300 if (auto SP = F.getSubprogram())
3301 EntryDebugLocation =
3302 DILocation::get(SP->getContext(), SP->getScopeLine(), 0, SP);
3303
3304 Instruction *InsBefore = AllocaVec[0];
3305 IRBuilder<> IRB(InsBefore);
3306
3307 // Make sure non-instrumented allocas stay in the entry block. Otherwise,
3308 // debug info is broken, because only entry-block allocas are treated as
3309 // regular stack slots.
3310 auto InsBeforeB = InsBefore->getParent();
3311 assert(InsBeforeB == &F.getEntryBlock());
3312 for (auto *AI : StaticAllocasToMoveUp)
3313 if (AI->getParent() == InsBeforeB)
3314 AI->moveBefore(InsBefore);
3315
3316 // Move stores of arguments into entry-block allocas as well. This prevents
3317 // extra stack slots from being generated (to house the argument values until
3318 // they can be stored into the allocas). This also prevents uninitialized
3319 // values from being shown in backtraces.
3320 SmallVector<Instruction *, 8> ArgInitInsts;
3321 findStoresToUninstrumentedArgAllocas(ASan, *InsBefore, ArgInitInsts);
3322 for (Instruction *ArgInitInst : ArgInitInsts)
3323 ArgInitInst->moveBefore(InsBefore);
3324
3325 // If we have a call to llvm.localescape, keep it in the entry block.
3326 if (LocalEscapeCall) LocalEscapeCall->moveBefore(InsBefore);
3327
3328 SmallVector<ASanStackVariableDescription, 16> SVD;
3329 SVD.reserve(AllocaVec.size());
3330 for (AllocaInst *AI : AllocaVec) {
3331 ASanStackVariableDescription D = {AI->getName().data(),
3332 ASan.getAllocaSizeInBytes(*AI),
3333 0,
3334 AI->getAlign().value(),
3335 AI,
3336 0,
3337 0};
3338 SVD.push_back(D);
3339 }
3340
3341 // Minimal header size (left redzone) is 4 pointers,
3342 // i.e. 32 bytes on 64-bit platforms and 16 bytes in 32-bit platforms.
3343 uint64_t Granularity = 1ULL << Mapping.Scale;
3344 uint64_t MinHeaderSize = std::max((uint64_t)ASan.LongSize / 2, Granularity);
3345 const ASanStackFrameLayout &L =
3346 ComputeASanStackFrameLayout(SVD, Granularity, MinHeaderSize);
3347
3348 // Build AllocaToSVDMap for ASanStackVariableDescription lookup.
3349 DenseMap<const AllocaInst *, ASanStackVariableDescription *> AllocaToSVDMap;
3350 for (auto &Desc : SVD)
3351 AllocaToSVDMap[Desc.AI] = &Desc;
3352
3353 // Update SVD with information from lifetime intrinsics.
3354 for (const auto &APC : StaticAllocaPoisonCallVec) {
3355 assert(APC.InsBefore);
3356 assert(APC.AI);
3357 assert(ASan.isInterestingAlloca(*APC.AI));
3358 assert(APC.AI->isStaticAlloca());
3359
3360 ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI];
3361 Desc.LifetimeSize = Desc.Size;
3362 if (const DILocation *FnLoc = EntryDebugLocation.get()) {
3363 if (const DILocation *LifetimeLoc = APC.InsBefore->getDebugLoc().get()) {
3364 if (LifetimeLoc->getFile() == FnLoc->getFile())
3365 if (unsigned Line = LifetimeLoc->getLine())
3366 Desc.Line = std::min(Desc.Line ? Desc.Line : Line, Line);
3367 }
3368 }
3369 }
3370
3371 auto DescriptionString = ComputeASanStackFrameDescription(SVD);
3372 LLVM_DEBUG(dbgs() << DescriptionString << " --- " << L.FrameSize << "\n");
3373 uint64_t LocalStackSize = L.FrameSize;
3374 bool DoStackMalloc =
3375 ASan.UseAfterReturn != AsanDetectStackUseAfterReturnMode::Never &&
3376 !ASan.CompileKernel && LocalStackSize <= kMaxStackMallocSize;
3377 bool DoDynamicAlloca = ClDynamicAllocaStack;
3378 // Don't do dynamic alloca or stack malloc if:
3379 // 1) There is inline asm: too often it makes assumptions on which registers
3380 // are available.
3381 // 2) There is a returns_twice call (typically setjmp), which is
3382 // optimization-hostile, and doesn't play well with introduced indirect
3383 // register-relative calculation of local variable addresses.
3384 DoDynamicAlloca &= !HasInlineAsm && !HasReturnsTwiceCall;
3385 DoStackMalloc &= !HasInlineAsm && !HasReturnsTwiceCall;
3386
3387 Value *StaticAlloca =
3388 DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L, false);
3389
3390 Value *FakeStack;
3391 Value *LocalStackBase;
3392 Value *LocalStackBaseAlloca;
3393 uint8_t DIExprFlags = DIExpression::ApplyOffset;
3394
3395 if (DoStackMalloc) {
3396 LocalStackBaseAlloca =
3397 IRB.CreateAlloca(IntptrTy, nullptr, "asan_local_stack_base");
3398 if (ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Runtime) {
3399 // void *FakeStack = __asan_option_detect_stack_use_after_return
3400 // ? __asan_stack_malloc_N(LocalStackSize)
3401 // : nullptr;
3402 // void *LocalStackBase = (FakeStack) ? FakeStack :
3403 // alloca(LocalStackSize);
3404 Constant *OptionDetectUseAfterReturn = F.getParent()->getOrInsertGlobal(
3405 kAsanOptionDetectUseAfterReturn, IRB.getInt32Ty());
3406 Value *UseAfterReturnIsEnabled = IRB.CreateICmpNE(
3407 IRB.CreateLoad(IRB.getInt32Ty(), OptionDetectUseAfterReturn),
3408 Constant::getNullValue(IRB.getInt32Ty()));
3409 Instruction *Term =
3410 SplitBlockAndInsertIfThen(UseAfterReturnIsEnabled, InsBefore, false);
3411 IRBuilder<> IRBIf(Term);
3412 StackMallocIdx = StackMallocSizeClass(LocalStackSize);
3413 assert(StackMallocIdx <= kMaxAsanStackMallocSizeClass);
3414 Value *FakeStackValue =
3415 IRBIf.CreateCall(AsanStackMallocFunc[StackMallocIdx],
3416 ConstantInt::get(IntptrTy, LocalStackSize));
3417 IRB.SetInsertPoint(InsBefore);
3418 FakeStack = createPHI(IRB, UseAfterReturnIsEnabled, FakeStackValue, Term,
3419 ConstantInt::get(IntptrTy, 0));
3420 } else {
3421 // assert(ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode:Always)
3422 // void *FakeStack = __asan_stack_malloc_N(LocalStackSize);
3423 // void *LocalStackBase = (FakeStack) ? FakeStack :
3424 // alloca(LocalStackSize);
3425 StackMallocIdx = StackMallocSizeClass(LocalStackSize);
3426 FakeStack = IRB.CreateCall(AsanStackMallocFunc[StackMallocIdx],
3427 ConstantInt::get(IntptrTy, LocalStackSize));
3428 }
3429 Value *NoFakeStack =
3430 IRB.CreateICmpEQ(FakeStack, Constant::getNullValue(IntptrTy));
3431 Instruction *Term =
3432 SplitBlockAndInsertIfThen(NoFakeStack, InsBefore, false);
3433 IRBuilder<> IRBIf(Term);
3434 Value *AllocaValue =
3435 DoDynamicAlloca ? createAllocaForLayout(IRBIf, L, true) : StaticAlloca;
3436
3437 IRB.SetInsertPoint(InsBefore);
3438 LocalStackBase = createPHI(IRB, NoFakeStack, AllocaValue, Term, FakeStack);
3439 IRB.CreateStore(LocalStackBase, LocalStackBaseAlloca);
3440 DIExprFlags |= DIExpression::DerefBefore;
3441 } else {
3442 // void *FakeStack = nullptr;
3443 // void *LocalStackBase = alloca(LocalStackSize);
3444 FakeStack = ConstantInt::get(IntptrTy, 0);
3445 LocalStackBase =
3446 DoDynamicAlloca ? createAllocaForLayout(IRB, L, true) : StaticAlloca;
3447 LocalStackBaseAlloca = LocalStackBase;
3448 }
3449
3450 // It shouldn't matter whether we pass an `alloca` or a `ptrtoint` as the
3451 // dbg.declare address opereand, but passing a `ptrtoint` seems to confuse
3452 // later passes and can result in dropped variable coverage in debug info.
3453 Value *LocalStackBaseAllocaPtr =
3454 isa<PtrToIntInst>(LocalStackBaseAlloca)
3455 ? cast<PtrToIntInst>(LocalStackBaseAlloca)->getPointerOperand()
3456 : LocalStackBaseAlloca;
3457 assert(isa<AllocaInst>(LocalStackBaseAllocaPtr) &&
3458 "Variable descriptions relative to ASan stack base will be dropped");
3459
3460 // Replace Alloca instructions with base+offset.
3461 for (const auto &Desc : SVD) {
3462 AllocaInst *AI = Desc.AI;
3463 replaceDbgDeclare(AI, LocalStackBaseAllocaPtr, DIB, DIExprFlags,
3464 Desc.Offset);
3465 Value *NewAllocaPtr = IRB.CreateIntToPtr(
3466 IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Desc.Offset)),
3467 AI->getType());
3468 AI->replaceAllUsesWith(NewAllocaPtr);
3469 }
3470
3471 // The left-most redzone has enough space for at least 4 pointers.
3472 // Write the Magic value to redzone[0].
3473 Value *BasePlus0 = IRB.CreateIntToPtr(LocalStackBase, IntptrPtrTy);
3474 IRB.CreateStore(ConstantInt::get(IntptrTy, kCurrentStackFrameMagic),
3475 BasePlus0);
3476 // Write the frame description constant to redzone[1].
3477 Value *BasePlus1 = IRB.CreateIntToPtr(
3478 IRB.CreateAdd(LocalStackBase,
3479 ConstantInt::get(IntptrTy, ASan.LongSize / 8)),
3480 IntptrPtrTy);
3481 GlobalVariable *StackDescriptionGlobal =
3482 createPrivateGlobalForString(*F.getParent(), DescriptionString,
3483 /*AllowMerging*/ true, kAsanGenPrefix);
3484 Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal, IntptrTy);
3485 IRB.CreateStore(Description, BasePlus1);
3486 // Write the PC to redzone[2].
3487 Value *BasePlus2 = IRB.CreateIntToPtr(
3488 IRB.CreateAdd(LocalStackBase,
3489 ConstantInt::get(IntptrTy, 2 * ASan.LongSize / 8)),
3490 IntptrPtrTy);
3491 IRB.CreateStore(IRB.CreatePointerCast(&F, IntptrTy), BasePlus2);
3492
3493 const auto &ShadowAfterScope = GetShadowBytesAfterScope(SVD, L);
3494
3495 // Poison the stack red zones at the entry.
3496 Value *ShadowBase = ASan.memToShadow(LocalStackBase, IRB);
3497 // As mask we must use most poisoned case: red zones and after scope.
3498 // As bytes we can use either the same or just red zones only.
3499 copyToShadow(ShadowAfterScope, ShadowAfterScope, IRB, ShadowBase);
3500
3501 if (!StaticAllocaPoisonCallVec.empty()) {
3502 const auto &ShadowInScope = GetShadowBytes(SVD, L);
3503
3504 // Poison static allocas near lifetime intrinsics.
3505 for (const auto &APC : StaticAllocaPoisonCallVec) {
3506 const ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI];
3507 assert(Desc.Offset % L.Granularity == 0);
3508 size_t Begin = Desc.Offset / L.Granularity;
3509 size_t End = Begin + (APC.Size + L.Granularity - 1) / L.Granularity;
3510
3511 IRBuilder<> IRB(APC.InsBefore);
3512 copyToShadow(ShadowAfterScope,
3513 APC.DoPoison ? ShadowAfterScope : ShadowInScope, Begin, End,
3514 IRB, ShadowBase);
3515 }
3516 }
3517
3518 SmallVector<uint8_t, 64> ShadowClean(ShadowAfterScope.size(), 0);
3519 SmallVector<uint8_t, 64> ShadowAfterReturn;
3520
3521 // (Un)poison the stack before all ret instructions.
3522 for (Instruction *Ret : RetVec) {
3523 IRBuilder<> IRBRet(Ret);
3524 // Mark the current frame as retired.
3525 IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic),
3526 BasePlus0);
3527 if (DoStackMalloc) {
3528 assert(StackMallocIdx >= 0);
3529 // if FakeStack != 0 // LocalStackBase == FakeStack
3530 // // In use-after-return mode, poison the whole stack frame.
3531 // if StackMallocIdx <= 4
3532 // // For small sizes inline the whole thing:
3533 // memset(ShadowBase, kAsanStackAfterReturnMagic, ShadowSize);
3534 // **SavedFlagPtr(FakeStack) = 0
3535 // else
3536 // __asan_stack_free_N(FakeStack, LocalStackSize)
3537 // else
3538 // <This is not a fake stack; unpoison the redzones>
3539 Value *Cmp =
3540 IRBRet.CreateICmpNE(FakeStack, Constant::getNullValue(IntptrTy));
3541 Instruction *ThenTerm, *ElseTerm;
3542 SplitBlockAndInsertIfThenElse(Cmp, Ret, &ThenTerm, &ElseTerm);
3543
3544 IRBuilder<> IRBPoison(ThenTerm);
3545 if (ASan.MaxInlinePoisoningSize != 0 && StackMallocIdx <= 4) {
3546 int ClassSize = kMinStackMallocSize << StackMallocIdx;
3547 ShadowAfterReturn.resize(ClassSize / L.Granularity,
3548 kAsanStackUseAfterReturnMagic);
3549 copyToShadow(ShadowAfterReturn, ShadowAfterReturn, IRBPoison,
3550 ShadowBase);
3551 Value *SavedFlagPtrPtr = IRBPoison.CreateAdd(
3552 FakeStack,
3553 ConstantInt::get(IntptrTy, ClassSize - ASan.LongSize / 8));
3554 Value *SavedFlagPtr = IRBPoison.CreateLoad(
3555 IntptrTy, IRBPoison.CreateIntToPtr(SavedFlagPtrPtr, IntptrPtrTy));
3556 IRBPoison.CreateStore(
3557 Constant::getNullValue(IRBPoison.getInt8Ty()),
3558 IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getPtrTy()));
3559 } else {
3560 // For larger frames call __asan_stack_free_*.
3561 IRBPoison.CreateCall(
3562 AsanStackFreeFunc[StackMallocIdx],
3563 {FakeStack, ConstantInt::get(IntptrTy, LocalStackSize)});
3564 }
3565
3566 IRBuilder<> IRBElse(ElseTerm);
3567 copyToShadow(ShadowAfterScope, ShadowClean, IRBElse, ShadowBase);
3568 } else {
3569 copyToShadow(ShadowAfterScope, ShadowClean, IRBRet, ShadowBase);
3570 }
3571 }
3572
3573 // We are done. Remove the old unused alloca instructions.
3574 for (auto *AI : AllocaVec)
3575 AI->eraseFromParent();
3576 }
3577
poisonAlloca(Value * V,uint64_t Size,IRBuilder<> & IRB,bool DoPoison)3578 void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size,
3579 IRBuilder<> &IRB, bool DoPoison) {
3580 // For now just insert the call to ASan runtime.
3581 Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy);
3582 Value *SizeArg = ConstantInt::get(IntptrTy, Size);
3583 IRB.CreateCall(
3584 DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc,
3585 {AddrArg, SizeArg});
3586 }
3587
3588 // Handling llvm.lifetime intrinsics for a given %alloca:
3589 // (1) collect all llvm.lifetime.xxx(%size, %value) describing the alloca.
3590 // (2) if %size is constant, poison memory for llvm.lifetime.end (to detect
3591 // invalid accesses) and unpoison it for llvm.lifetime.start (the memory
3592 // could be poisoned by previous llvm.lifetime.end instruction, as the
3593 // variable may go in and out of scope several times, e.g. in loops).
3594 // (3) if we poisoned at least one %alloca in a function,
3595 // unpoison the whole stack frame at function exit.
handleDynamicAllocaCall(AllocaInst * AI)3596 void FunctionStackPoisoner::handleDynamicAllocaCall(AllocaInst *AI) {
3597 IRBuilder<> IRB(AI);
3598
3599 const Align Alignment = std::max(Align(kAllocaRzSize), AI->getAlign());
3600 const uint64_t AllocaRedzoneMask = kAllocaRzSize - 1;
3601
3602 Value *Zero = Constant::getNullValue(IntptrTy);
3603 Value *AllocaRzSize = ConstantInt::get(IntptrTy, kAllocaRzSize);
3604 Value *AllocaRzMask = ConstantInt::get(IntptrTy, AllocaRedzoneMask);
3605
3606 // Since we need to extend alloca with additional memory to locate
3607 // redzones, and OldSize is number of allocated blocks with
3608 // ElementSize size, get allocated memory size in bytes by
3609 // OldSize * ElementSize.
3610 const unsigned ElementSize =
3611 F.getParent()->getDataLayout().getTypeAllocSize(AI->getAllocatedType());
3612 Value *OldSize =
3613 IRB.CreateMul(IRB.CreateIntCast(AI->getArraySize(), IntptrTy, false),
3614 ConstantInt::get(IntptrTy, ElementSize));
3615
3616 // PartialSize = OldSize % 32
3617 Value *PartialSize = IRB.CreateAnd(OldSize, AllocaRzMask);
3618
3619 // Misalign = kAllocaRzSize - PartialSize;
3620 Value *Misalign = IRB.CreateSub(AllocaRzSize, PartialSize);
3621
3622 // PartialPadding = Misalign != kAllocaRzSize ? Misalign : 0;
3623 Value *Cond = IRB.CreateICmpNE(Misalign, AllocaRzSize);
3624 Value *PartialPadding = IRB.CreateSelect(Cond, Misalign, Zero);
3625
3626 // AdditionalChunkSize = Alignment + PartialPadding + kAllocaRzSize
3627 // Alignment is added to locate left redzone, PartialPadding for possible
3628 // partial redzone and kAllocaRzSize for right redzone respectively.
3629 Value *AdditionalChunkSize = IRB.CreateAdd(
3630 ConstantInt::get(IntptrTy, Alignment.value() + kAllocaRzSize),
3631 PartialPadding);
3632
3633 Value *NewSize = IRB.CreateAdd(OldSize, AdditionalChunkSize);
3634
3635 // Insert new alloca with new NewSize and Alignment params.
3636 AllocaInst *NewAlloca = IRB.CreateAlloca(IRB.getInt8Ty(), NewSize);
3637 NewAlloca->setAlignment(Alignment);
3638
3639 // NewAddress = Address + Alignment
3640 Value *NewAddress =
3641 IRB.CreateAdd(IRB.CreatePtrToInt(NewAlloca, IntptrTy),
3642 ConstantInt::get(IntptrTy, Alignment.value()));
3643
3644 // Insert __asan_alloca_poison call for new created alloca.
3645 IRB.CreateCall(AsanAllocaPoisonFunc, {NewAddress, OldSize});
3646
3647 // Store the last alloca's address to DynamicAllocaLayout. We'll need this
3648 // for unpoisoning stuff.
3649 IRB.CreateStore(IRB.CreatePtrToInt(NewAlloca, IntptrTy), DynamicAllocaLayout);
3650
3651 Value *NewAddressPtr = IRB.CreateIntToPtr(NewAddress, AI->getType());
3652
3653 // Replace all uses of AddessReturnedByAlloca with NewAddressPtr.
3654 AI->replaceAllUsesWith(NewAddressPtr);
3655
3656 // We are done. Erase old alloca from parent.
3657 AI->eraseFromParent();
3658 }
3659
3660 // isSafeAccess returns true if Addr is always inbounds with respect to its
3661 // base object. For example, it is a field access or an array access with
3662 // constant inbounds index.
isSafeAccess(ObjectSizeOffsetVisitor & ObjSizeVis,Value * Addr,TypeSize TypeStoreSize) const3663 bool AddressSanitizer::isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis,
3664 Value *Addr, TypeSize TypeStoreSize) const {
3665 if (TypeStoreSize.isScalable())
3666 // TODO: We can use vscale_range to convert a scalable value to an
3667 // upper bound on the access size.
3668 return false;
3669
3670 SizeOffsetAPInt SizeOffset = ObjSizeVis.compute(Addr);
3671 if (!SizeOffset.bothKnown())
3672 return false;
3673
3674 uint64_t Size = SizeOffset.Size.getZExtValue();
3675 int64_t Offset = SizeOffset.Offset.getSExtValue();
3676
3677 // Three checks are required to ensure safety:
3678 // . Offset >= 0 (since the offset is given from the base ptr)
3679 // . Size >= Offset (unsigned)
3680 // . Size - Offset >= NeededSize (unsigned)
3681 return Offset >= 0 && Size >= uint64_t(Offset) &&
3682 Size - uint64_t(Offset) >= TypeStoreSize / 8;
3683 }
3684