1 //===- MemorySanitizer.cpp - detector of uninitialized reads --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file is a part of MemorySanitizer, a detector of uninitialized
11 /// reads.
12 ///
13 /// The algorithm of the tool is similar to Memcheck
14 /// (http://goo.gl/QKbem). We associate a few shadow bits with every
15 /// byte of the application memory, poison the shadow of the malloc-ed
16 /// or alloca-ed memory, load the shadow bits on every memory read,
17 /// propagate the shadow bits through some of the arithmetic
18 /// instruction (including MOV), store the shadow bits on every memory
19 /// write, report a bug on some other instructions (e.g. JMP) if the
20 /// associated shadow is poisoned.
21 ///
22 /// But there are differences too. The first and the major one:
23 /// compiler instrumentation instead of binary instrumentation. This
24 /// gives us much better register allocation, possible compiler
25 /// optimizations and a fast start-up. But this brings the major issue
26 /// as well: msan needs to see all program events, including system
27 /// calls and reads/writes in system libraries, so we either need to
28 /// compile *everything* with msan or use a binary translation
29 /// component (e.g. DynamoRIO) to instrument pre-built libraries.
30 /// Another difference from Memcheck is that we use 8 shadow bits per
31 /// byte of application memory and use a direct shadow mapping. This
32 /// greatly simplifies the instrumentation code and avoids races on
33 /// shadow updates (Memcheck is single-threaded so races are not a
34 /// concern there. Memcheck uses 2 shadow bits per byte with a slow
35 /// path storage that uses 8 bits per byte).
36 ///
37 /// The default value of shadow is 0, which means "clean" (not poisoned).
38 ///
39 /// Every module initializer should call __msan_init to ensure that the
40 /// shadow memory is ready. On error, __msan_warning is called. Since
41 /// parameters and return values may be passed via registers, we have a
42 /// specialized thread-local shadow for return values
43 /// (__msan_retval_tls) and parameters (__msan_param_tls).
44 ///
45 ///                           Origin tracking.
46 ///
47 /// MemorySanitizer can track origins (allocation points) of all uninitialized
48 /// values. This behavior is controlled with a flag (msan-track-origins) and is
49 /// disabled by default.
50 ///
51 /// Origins are 4-byte values created and interpreted by the runtime library.
52 /// They are stored in a second shadow mapping, one 4-byte value for 4 bytes
53 /// of application memory. Propagation of origins is basically a bunch of
54 /// "select" instructions that pick the origin of a dirty argument, if an
55 /// instruction has one.
56 ///
57 /// Every 4 aligned, consecutive bytes of application memory have one origin
58 /// value associated with them. If these bytes contain uninitialized data
59 /// coming from 2 different allocations, the last store wins. Because of this,
60 /// MemorySanitizer reports can show unrelated origins, but this is unlikely in
61 /// practice.
62 ///
63 /// Origins are meaningless for fully initialized values, so MemorySanitizer
64 /// avoids storing origin to memory when a fully initialized value is stored.
65 /// This way it avoids needless overwriting origin of the 4-byte region on
66 /// a short (i.e. 1 byte) clean store, and it is also good for performance.
67 ///
68 ///                            Atomic handling.
69 ///
70 /// Ideally, every atomic store of application value should update the
71 /// corresponding shadow location in an atomic way. Unfortunately, atomic store
72 /// of two disjoint locations can not be done without severe slowdown.
73 ///
74 /// Therefore, we implement an approximation that may err on the safe side.
75 /// In this implementation, every atomically accessed location in the program
76 /// may only change from (partially) uninitialized to fully initialized, but
77 /// not the other way around. We load the shadow _after_ the application load,
78 /// and we store the shadow _before_ the app store. Also, we always store clean
79 /// shadow (if the application store is atomic). This way, if the store-load
80 /// pair constitutes a happens-before arc, shadow store and load are correctly
81 /// ordered such that the load will get either the value that was stored, or
82 /// some later value (which is always clean).
83 ///
84 /// This does not work very well with Compare-And-Swap (CAS) and
85 /// Read-Modify-Write (RMW) operations. To follow the above logic, CAS and RMW
86 /// must store the new shadow before the app operation, and load the shadow
87 /// after the app operation. Computers don't work this way. Current
88 /// implementation ignores the load aspect of CAS/RMW, always returning a clean
89 /// value. It implements the store part as a simple atomic store by storing a
90 /// clean shadow.
91 ///
92 ///                      Instrumenting inline assembly.
93 ///
94 /// For inline assembly code LLVM has little idea about which memory locations
95 /// become initialized depending on the arguments. It can be possible to figure
96 /// out which arguments are meant to point to inputs and outputs, but the
97 /// actual semantics can be only visible at runtime. In the Linux kernel it's
98 /// also possible that the arguments only indicate the offset for a base taken
99 /// from a segment register, so it's dangerous to treat any asm() arguments as
100 /// pointers. We take a conservative approach generating calls to
101 ///   __msan_instrument_asm_store(ptr, size)
102 /// , which defer the memory unpoisoning to the runtime library.
103 /// The latter can perform more complex address checks to figure out whether
104 /// it's safe to touch the shadow memory.
105 /// Like with atomic operations, we call __msan_instrument_asm_store() before
106 /// the assembly call, so that changes to the shadow memory will be seen by
107 /// other threads together with main memory initialization.
108 ///
109 ///                  KernelMemorySanitizer (KMSAN) implementation.
110 ///
111 /// The major differences between KMSAN and MSan instrumentation are:
112 ///  - KMSAN always tracks the origins and implies msan-keep-going=true;
113 ///  - KMSAN allocates shadow and origin memory for each page separately, so
114 ///    there are no explicit accesses to shadow and origin in the
115 ///    instrumentation.
116 ///    Shadow and origin values for a particular X-byte memory location
117 ///    (X=1,2,4,8) are accessed through pointers obtained via the
118 ///      __msan_metadata_ptr_for_load_X(ptr)
119 ///      __msan_metadata_ptr_for_store_X(ptr)
120 ///    functions. The corresponding functions check that the X-byte accesses
121 ///    are possible and returns the pointers to shadow and origin memory.
122 ///    Arbitrary sized accesses are handled with:
123 ///      __msan_metadata_ptr_for_load_n(ptr, size)
124 ///      __msan_metadata_ptr_for_store_n(ptr, size);
125 ///    Note that the sanitizer code has to deal with how shadow/origin pairs
126 ///    returned by the these functions are represented in different ABIs. In
127 ///    the X86_64 ABI they are returned in RDX:RAX, and in the SystemZ ABI they
128 ///    are written to memory pointed to by a hidden parameter.
129 ///  - TLS variables are stored in a single per-task struct. A call to a
130 ///    function __msan_get_context_state() returning a pointer to that struct
131 ///    is inserted into every instrumented function before the entry block;
132 ///  - __msan_warning() takes a 32-bit origin parameter;
133 ///  - local variables are poisoned with __msan_poison_alloca() upon function
134 ///    entry and unpoisoned with __msan_unpoison_alloca() before leaving the
135 ///    function;
136 ///  - the pass doesn't declare any global variables or add global constructors
137 ///    to the translation unit.
138 ///
139 /// Also, KMSAN currently ignores uninitialized memory passed into inline asm
140 /// calls, making sure we're on the safe side wrt. possible false positives.
141 ///
142 ///  KernelMemorySanitizer only supports X86_64 and SystemZ at the moment.
143 ///
144 //
145 // FIXME: This sanitizer does not yet handle scalable vectors
146 //
147 //===----------------------------------------------------------------------===//
148 
149 #include "llvm/Transforms/Instrumentation/MemorySanitizer.h"
150 #include "llvm/ADT/APInt.h"
151 #include "llvm/ADT/ArrayRef.h"
152 #include "llvm/ADT/DenseMap.h"
153 #include "llvm/ADT/DepthFirstIterator.h"
154 #include "llvm/ADT/SetVector.h"
155 #include "llvm/ADT/SmallVector.h"
156 #include "llvm/ADT/StringExtras.h"
157 #include "llvm/ADT/StringRef.h"
158 #include "llvm/Analysis/GlobalsModRef.h"
159 #include "llvm/Analysis/TargetLibraryInfo.h"
160 #include "llvm/Analysis/ValueTracking.h"
161 #include "llvm/IR/Argument.h"
162 #include "llvm/IR/AttributeMask.h"
163 #include "llvm/IR/Attributes.h"
164 #include "llvm/IR/BasicBlock.h"
165 #include "llvm/IR/CallingConv.h"
166 #include "llvm/IR/Constant.h"
167 #include "llvm/IR/Constants.h"
168 #include "llvm/IR/DataLayout.h"
169 #include "llvm/IR/DerivedTypes.h"
170 #include "llvm/IR/Function.h"
171 #include "llvm/IR/GlobalValue.h"
172 #include "llvm/IR/GlobalVariable.h"
173 #include "llvm/IR/IRBuilder.h"
174 #include "llvm/IR/InlineAsm.h"
175 #include "llvm/IR/InstVisitor.h"
176 #include "llvm/IR/InstrTypes.h"
177 #include "llvm/IR/Instruction.h"
178 #include "llvm/IR/Instructions.h"
179 #include "llvm/IR/IntrinsicInst.h"
180 #include "llvm/IR/Intrinsics.h"
181 #include "llvm/IR/IntrinsicsX86.h"
182 #include "llvm/IR/MDBuilder.h"
183 #include "llvm/IR/Module.h"
184 #include "llvm/IR/Type.h"
185 #include "llvm/IR/Value.h"
186 #include "llvm/IR/ValueMap.h"
187 #include "llvm/Support/Alignment.h"
188 #include "llvm/Support/AtomicOrdering.h"
189 #include "llvm/Support/Casting.h"
190 #include "llvm/Support/CommandLine.h"
191 #include "llvm/Support/Debug.h"
192 #include "llvm/Support/DebugCounter.h"
193 #include "llvm/Support/ErrorHandling.h"
194 #include "llvm/Support/MathExtras.h"
195 #include "llvm/Support/raw_ostream.h"
196 #include "llvm/TargetParser/Triple.h"
197 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
198 #include "llvm/Transforms/Utils/Local.h"
199 #include "llvm/Transforms/Utils/ModuleUtils.h"
200 #include <algorithm>
201 #include <cassert>
202 #include <cstddef>
203 #include <cstdint>
204 #include <memory>
205 #include <string>
206 #include <tuple>
207 
208 using namespace llvm;
209 
210 #define DEBUG_TYPE "msan"
211 
212 DEBUG_COUNTER(DebugInsertCheck, "msan-insert-check",
213               "Controls which checks to insert");
214 
215 static const unsigned kOriginSize = 4;
216 static const Align kMinOriginAlignment = Align(4);
217 static const Align kShadowTLSAlignment = Align(8);
218 
219 // These constants must be kept in sync with the ones in msan.h.
220 static const unsigned kParamTLSSize = 800;
221 static const unsigned kRetvalTLSSize = 800;
222 
223 // Accesses sizes are powers of two: 1, 2, 4, 8.
224 static const size_t kNumberOfAccessSizes = 4;
225 
226 /// Track origins of uninitialized values.
227 ///
228 /// Adds a section to MemorySanitizer report that points to the allocation
229 /// (stack or heap) the uninitialized bits came from originally.
230 static cl::opt<int> ClTrackOrigins(
231     "msan-track-origins",
232     cl::desc("Track origins (allocation sites) of poisoned memory"), cl::Hidden,
233     cl::init(0));
234 
235 static cl::opt<bool> ClKeepGoing("msan-keep-going",
236                                  cl::desc("keep going after reporting a UMR"),
237                                  cl::Hidden, cl::init(false));
238 
239 static cl::opt<bool>
240     ClPoisonStack("msan-poison-stack",
241                   cl::desc("poison uninitialized stack variables"), cl::Hidden,
242                   cl::init(true));
243 
244 static cl::opt<bool> ClPoisonStackWithCall(
245     "msan-poison-stack-with-call",
246     cl::desc("poison uninitialized stack variables with a call"), cl::Hidden,
247     cl::init(false));
248 
249 static cl::opt<int> ClPoisonStackPattern(
250     "msan-poison-stack-pattern",
251     cl::desc("poison uninitialized stack variables with the given pattern"),
252     cl::Hidden, cl::init(0xff));
253 
254 static cl::opt<bool>
255     ClPrintStackNames("msan-print-stack-names",
256                       cl::desc("Print name of local stack variable"),
257                       cl::Hidden, cl::init(true));
258 
259 static cl::opt<bool> ClPoisonUndef("msan-poison-undef",
260                                    cl::desc("poison undef temps"), cl::Hidden,
261                                    cl::init(true));
262 
263 static cl::opt<bool>
264     ClHandleICmp("msan-handle-icmp",
265                  cl::desc("propagate shadow through ICmpEQ and ICmpNE"),
266                  cl::Hidden, cl::init(true));
267 
268 static cl::opt<bool>
269     ClHandleICmpExact("msan-handle-icmp-exact",
270                       cl::desc("exact handling of relational integer ICmp"),
271                       cl::Hidden, cl::init(false));
272 
273 static cl::opt<bool> ClHandleLifetimeIntrinsics(
274     "msan-handle-lifetime-intrinsics",
275     cl::desc(
276         "when possible, poison scoped variables at the beginning of the scope "
277         "(slower, but more precise)"),
278     cl::Hidden, cl::init(true));
279 
280 // When compiling the Linux kernel, we sometimes see false positives related to
281 // MSan being unable to understand that inline assembly calls may initialize
282 // local variables.
283 // This flag makes the compiler conservatively unpoison every memory location
284 // passed into an assembly call. Note that this may cause false positives.
285 // Because it's impossible to figure out the array sizes, we can only unpoison
286 // the first sizeof(type) bytes for each type* pointer.
287 // The instrumentation is only enabled in KMSAN builds, and only if
288 // -msan-handle-asm-conservative is on. This is done because we may want to
289 // quickly disable assembly instrumentation when it breaks.
290 static cl::opt<bool> ClHandleAsmConservative(
291     "msan-handle-asm-conservative",
292     cl::desc("conservative handling of inline assembly"), cl::Hidden,
293     cl::init(true));
294 
295 // This flag controls whether we check the shadow of the address
296 // operand of load or store. Such bugs are very rare, since load from
297 // a garbage address typically results in SEGV, but still happen
298 // (e.g. only lower bits of address are garbage, or the access happens
299 // early at program startup where malloc-ed memory is more likely to
300 // be zeroed. As of 2012-08-28 this flag adds 20% slowdown.
301 static cl::opt<bool> ClCheckAccessAddress(
302     "msan-check-access-address",
303     cl::desc("report accesses through a pointer which has poisoned shadow"),
304     cl::Hidden, cl::init(true));
305 
306 static cl::opt<bool> ClEagerChecks(
307     "msan-eager-checks",
308     cl::desc("check arguments and return values at function call boundaries"),
309     cl::Hidden, cl::init(false));
310 
311 static cl::opt<bool> ClDumpStrictInstructions(
312     "msan-dump-strict-instructions",
313     cl::desc("print out instructions with default strict semantics"),
314     cl::Hidden, cl::init(false));
315 
316 static cl::opt<int> ClInstrumentationWithCallThreshold(
317     "msan-instrumentation-with-call-threshold",
318     cl::desc(
319         "If the function being instrumented requires more than "
320         "this number of checks and origin stores, use callbacks instead of "
321         "inline checks (-1 means never use callbacks)."),
322     cl::Hidden, cl::init(3500));
323 
324 static cl::opt<bool>
325     ClEnableKmsan("msan-kernel",
326                   cl::desc("Enable KernelMemorySanitizer instrumentation"),
327                   cl::Hidden, cl::init(false));
328 
329 static cl::opt<bool>
330     ClDisableChecks("msan-disable-checks",
331                     cl::desc("Apply no_sanitize to the whole file"), cl::Hidden,
332                     cl::init(false));
333 
334 static cl::opt<bool>
335     ClCheckConstantShadow("msan-check-constant-shadow",
336                           cl::desc("Insert checks for constant shadow values"),
337                           cl::Hidden, cl::init(true));
338 
339 // This is off by default because of a bug in gold:
340 // https://sourceware.org/bugzilla/show_bug.cgi?id=19002
341 static cl::opt<bool>
342     ClWithComdat("msan-with-comdat",
343                  cl::desc("Place MSan constructors in comdat sections"),
344                  cl::Hidden, cl::init(false));
345 
346 // These options allow to specify custom memory map parameters
347 // See MemoryMapParams for details.
348 static cl::opt<uint64_t> ClAndMask("msan-and-mask",
349                                    cl::desc("Define custom MSan AndMask"),
350                                    cl::Hidden, cl::init(0));
351 
352 static cl::opt<uint64_t> ClXorMask("msan-xor-mask",
353                                    cl::desc("Define custom MSan XorMask"),
354                                    cl::Hidden, cl::init(0));
355 
356 static cl::opt<uint64_t> ClShadowBase("msan-shadow-base",
357                                       cl::desc("Define custom MSan ShadowBase"),
358                                       cl::Hidden, cl::init(0));
359 
360 static cl::opt<uint64_t> ClOriginBase("msan-origin-base",
361                                       cl::desc("Define custom MSan OriginBase"),
362                                       cl::Hidden, cl::init(0));
363 
364 static cl::opt<int>
365     ClDisambiguateWarning("msan-disambiguate-warning-threshold",
366                           cl::desc("Define threshold for number of checks per "
367                                    "debug location to force origin update."),
368                           cl::Hidden, cl::init(3));
369 
370 const char kMsanModuleCtorName[] = "msan.module_ctor";
371 const char kMsanInitName[] = "__msan_init";
372 
373 namespace {
374 
375 // Memory map parameters used in application-to-shadow address calculation.
376 // Offset = (Addr & ~AndMask) ^ XorMask
377 // Shadow = ShadowBase + Offset
378 // Origin = OriginBase + Offset
379 struct MemoryMapParams {
380   uint64_t AndMask;
381   uint64_t XorMask;
382   uint64_t ShadowBase;
383   uint64_t OriginBase;
384 };
385 
386 struct PlatformMemoryMapParams {
387   const MemoryMapParams *bits32;
388   const MemoryMapParams *bits64;
389 };
390 
391 } // end anonymous namespace
392 
393 // i386 Linux
394 static const MemoryMapParams Linux_I386_MemoryMapParams = {
395     0x000080000000, // AndMask
396     0,              // XorMask (not used)
397     0,              // ShadowBase (not used)
398     0x000040000000, // OriginBase
399 };
400 
401 // x86_64 Linux
402 static const MemoryMapParams Linux_X86_64_MemoryMapParams = {
403     0,              // AndMask (not used)
404     0x500000000000, // XorMask
405     0,              // ShadowBase (not used)
406     0x100000000000, // OriginBase
407 };
408 
409 // mips64 Linux
410 static const MemoryMapParams Linux_MIPS64_MemoryMapParams = {
411     0,              // AndMask (not used)
412     0x008000000000, // XorMask
413     0,              // ShadowBase (not used)
414     0x002000000000, // OriginBase
415 };
416 
417 // ppc64 Linux
418 static const MemoryMapParams Linux_PowerPC64_MemoryMapParams = {
419     0xE00000000000, // AndMask
420     0x100000000000, // XorMask
421     0x080000000000, // ShadowBase
422     0x1C0000000000, // OriginBase
423 };
424 
425 // s390x Linux
426 static const MemoryMapParams Linux_S390X_MemoryMapParams = {
427     0xC00000000000, // AndMask
428     0,              // XorMask (not used)
429     0x080000000000, // ShadowBase
430     0x1C0000000000, // OriginBase
431 };
432 
433 // aarch64 Linux
434 static const MemoryMapParams Linux_AArch64_MemoryMapParams = {
435     0,               // AndMask (not used)
436     0x0B00000000000, // XorMask
437     0,               // ShadowBase (not used)
438     0x0200000000000, // OriginBase
439 };
440 
441 // loongarch64 Linux
442 static const MemoryMapParams Linux_LoongArch64_MemoryMapParams = {
443     0,              // AndMask (not used)
444     0x500000000000, // XorMask
445     0,              // ShadowBase (not used)
446     0x100000000000, // OriginBase
447 };
448 
449 // aarch64 FreeBSD
450 static const MemoryMapParams FreeBSD_AArch64_MemoryMapParams = {
451     0x1800000000000, // AndMask
452     0x0400000000000, // XorMask
453     0x0200000000000, // ShadowBase
454     0x0700000000000, // OriginBase
455 };
456 
457 // i386 FreeBSD
458 static const MemoryMapParams FreeBSD_I386_MemoryMapParams = {
459     0x000180000000, // AndMask
460     0x000040000000, // XorMask
461     0x000020000000, // ShadowBase
462     0x000700000000, // OriginBase
463 };
464 
465 // x86_64 FreeBSD
466 static const MemoryMapParams FreeBSD_X86_64_MemoryMapParams = {
467     0xc00000000000, // AndMask
468     0x200000000000, // XorMask
469     0x100000000000, // ShadowBase
470     0x380000000000, // OriginBase
471 };
472 
473 // x86_64 NetBSD
474 static const MemoryMapParams NetBSD_X86_64_MemoryMapParams = {
475     0,              // AndMask
476     0x500000000000, // XorMask
477     0,              // ShadowBase
478     0x100000000000, // OriginBase
479 };
480 
481 static const PlatformMemoryMapParams Linux_X86_MemoryMapParams = {
482     &Linux_I386_MemoryMapParams,
483     &Linux_X86_64_MemoryMapParams,
484 };
485 
486 static const PlatformMemoryMapParams Linux_MIPS_MemoryMapParams = {
487     nullptr,
488     &Linux_MIPS64_MemoryMapParams,
489 };
490 
491 static const PlatformMemoryMapParams Linux_PowerPC_MemoryMapParams = {
492     nullptr,
493     &Linux_PowerPC64_MemoryMapParams,
494 };
495 
496 static const PlatformMemoryMapParams Linux_S390_MemoryMapParams = {
497     nullptr,
498     &Linux_S390X_MemoryMapParams,
499 };
500 
501 static const PlatformMemoryMapParams Linux_ARM_MemoryMapParams = {
502     nullptr,
503     &Linux_AArch64_MemoryMapParams,
504 };
505 
506 static const PlatformMemoryMapParams Linux_LoongArch_MemoryMapParams = {
507     nullptr,
508     &Linux_LoongArch64_MemoryMapParams,
509 };
510 
511 static const PlatformMemoryMapParams FreeBSD_ARM_MemoryMapParams = {
512     nullptr,
513     &FreeBSD_AArch64_MemoryMapParams,
514 };
515 
516 static const PlatformMemoryMapParams FreeBSD_X86_MemoryMapParams = {
517     &FreeBSD_I386_MemoryMapParams,
518     &FreeBSD_X86_64_MemoryMapParams,
519 };
520 
521 static const PlatformMemoryMapParams NetBSD_X86_MemoryMapParams = {
522     nullptr,
523     &NetBSD_X86_64_MemoryMapParams,
524 };
525 
526 namespace {
527 
528 /// Instrument functions of a module to detect uninitialized reads.
529 ///
530 /// Instantiating MemorySanitizer inserts the msan runtime library API function
531 /// declarations into the module if they don't exist already. Instantiating
532 /// ensures the __msan_init function is in the list of global constructors for
533 /// the module.
534 class MemorySanitizer {
535 public:
MemorySanitizer(Module & M,MemorySanitizerOptions Options)536   MemorySanitizer(Module &M, MemorySanitizerOptions Options)
537       : CompileKernel(Options.Kernel), TrackOrigins(Options.TrackOrigins),
538         Recover(Options.Recover), EagerChecks(Options.EagerChecks) {
539     initializeModule(M);
540   }
541 
542   // MSan cannot be moved or copied because of MapParams.
543   MemorySanitizer(MemorySanitizer &&) = delete;
544   MemorySanitizer &operator=(MemorySanitizer &&) = delete;
545   MemorySanitizer(const MemorySanitizer &) = delete;
546   MemorySanitizer &operator=(const MemorySanitizer &) = delete;
547 
548   bool sanitizeFunction(Function &F, TargetLibraryInfo &TLI);
549 
550 private:
551   friend struct MemorySanitizerVisitor;
552   friend struct VarArgHelperBase;
553   friend struct VarArgAMD64Helper;
554   friend struct VarArgMIPS64Helper;
555   friend struct VarArgAArch64Helper;
556   friend struct VarArgPowerPC64Helper;
557   friend struct VarArgSystemZHelper;
558 
559   void initializeModule(Module &M);
560   void initializeCallbacks(Module &M, const TargetLibraryInfo &TLI);
561   void createKernelApi(Module &M, const TargetLibraryInfo &TLI);
562   void createUserspaceApi(Module &M, const TargetLibraryInfo &TLI);
563 
564   template <typename... ArgsTy>
565   FunctionCallee getOrInsertMsanMetadataFunction(Module &M, StringRef Name,
566                                                  ArgsTy... Args);
567 
568   /// True if we're compiling the Linux kernel.
569   bool CompileKernel;
570   /// Track origins (allocation points) of uninitialized values.
571   int TrackOrigins;
572   bool Recover;
573   bool EagerChecks;
574 
575   Triple TargetTriple;
576   LLVMContext *C;
577   Type *IntptrTy;  ///< Integer type with the size of a ptr in default AS.
578   Type *OriginTy;
579   PointerType *PtrTy; ///< Integer type with the size of a ptr in default AS.
580 
581   // XxxTLS variables represent the per-thread state in MSan and per-task state
582   // in KMSAN.
583   // For the userspace these point to thread-local globals. In the kernel land
584   // they point to the members of a per-task struct obtained via a call to
585   // __msan_get_context_state().
586 
587   /// Thread-local shadow storage for function parameters.
588   Value *ParamTLS;
589 
590   /// Thread-local origin storage for function parameters.
591   Value *ParamOriginTLS;
592 
593   /// Thread-local shadow storage for function return value.
594   Value *RetvalTLS;
595 
596   /// Thread-local origin storage for function return value.
597   Value *RetvalOriginTLS;
598 
599   /// Thread-local shadow storage for in-register va_arg function.
600   Value *VAArgTLS;
601 
602   /// Thread-local shadow storage for in-register va_arg function.
603   Value *VAArgOriginTLS;
604 
605   /// Thread-local shadow storage for va_arg overflow area.
606   Value *VAArgOverflowSizeTLS;
607 
608   /// Are the instrumentation callbacks set up?
609   bool CallbacksInitialized = false;
610 
611   /// The run-time callback to print a warning.
612   FunctionCallee WarningFn;
613 
614   // These arrays are indexed by log2(AccessSize).
615   FunctionCallee MaybeWarningFn[kNumberOfAccessSizes];
616   FunctionCallee MaybeStoreOriginFn[kNumberOfAccessSizes];
617 
618   /// Run-time helper that generates a new origin value for a stack
619   /// allocation.
620   FunctionCallee MsanSetAllocaOriginWithDescriptionFn;
621   // No description version
622   FunctionCallee MsanSetAllocaOriginNoDescriptionFn;
623 
624   /// Run-time helper that poisons stack on function entry.
625   FunctionCallee MsanPoisonStackFn;
626 
627   /// Run-time helper that records a store (or any event) of an
628   /// uninitialized value and returns an updated origin id encoding this info.
629   FunctionCallee MsanChainOriginFn;
630 
631   /// Run-time helper that paints an origin over a region.
632   FunctionCallee MsanSetOriginFn;
633 
634   /// MSan runtime replacements for memmove, memcpy and memset.
635   FunctionCallee MemmoveFn, MemcpyFn, MemsetFn;
636 
637   /// KMSAN callback for task-local function argument shadow.
638   StructType *MsanContextStateTy;
639   FunctionCallee MsanGetContextStateFn;
640 
641   /// Functions for poisoning/unpoisoning local variables
642   FunctionCallee MsanPoisonAllocaFn, MsanUnpoisonAllocaFn;
643 
644   /// Pair of shadow/origin pointers.
645   Type *MsanMetadata;
646 
647   /// Each of the MsanMetadataPtrXxx functions returns a MsanMetadata.
648   FunctionCallee MsanMetadataPtrForLoadN, MsanMetadataPtrForStoreN;
649   FunctionCallee MsanMetadataPtrForLoad_1_8[4];
650   FunctionCallee MsanMetadataPtrForStore_1_8[4];
651   FunctionCallee MsanInstrumentAsmStoreFn;
652 
653   /// Storage for return values of the MsanMetadataPtrXxx functions.
654   Value *MsanMetadataAlloca;
655 
656   /// Helper to choose between different MsanMetadataPtrXxx().
657   FunctionCallee getKmsanShadowOriginAccessFn(bool isStore, int size);
658 
659   /// Memory map parameters used in application-to-shadow calculation.
660   const MemoryMapParams *MapParams;
661 
662   /// Custom memory map parameters used when -msan-shadow-base or
663   // -msan-origin-base is provided.
664   MemoryMapParams CustomMapParams;
665 
666   MDNode *ColdCallWeights;
667 
668   /// Branch weights for origin store.
669   MDNode *OriginStoreWeights;
670 };
671 
insertModuleCtor(Module & M)672 void insertModuleCtor(Module &M) {
673   getOrCreateSanitizerCtorAndInitFunctions(
674       M, kMsanModuleCtorName, kMsanInitName,
675       /*InitArgTypes=*/{},
676       /*InitArgs=*/{},
677       // This callback is invoked when the functions are created the first
678       // time. Hook them into the global ctors list in that case:
679       [&](Function *Ctor, FunctionCallee) {
680         if (!ClWithComdat) {
681           appendToGlobalCtors(M, Ctor, 0);
682           return;
683         }
684         Comdat *MsanCtorComdat = M.getOrInsertComdat(kMsanModuleCtorName);
685         Ctor->setComdat(MsanCtorComdat);
686         appendToGlobalCtors(M, Ctor, 0, Ctor);
687       });
688 }
689 
getOptOrDefault(const cl::opt<T> & Opt,T Default)690 template <class T> T getOptOrDefault(const cl::opt<T> &Opt, T Default) {
691   return (Opt.getNumOccurrences() > 0) ? Opt : Default;
692 }
693 
694 } // end anonymous namespace
695 
MemorySanitizerOptions(int TO,bool R,bool K,bool EagerChecks)696 MemorySanitizerOptions::MemorySanitizerOptions(int TO, bool R, bool K,
697                                                bool EagerChecks)
698     : Kernel(getOptOrDefault(ClEnableKmsan, K)),
699       TrackOrigins(getOptOrDefault(ClTrackOrigins, Kernel ? 2 : TO)),
700       Recover(getOptOrDefault(ClKeepGoing, Kernel || R)),
701       EagerChecks(getOptOrDefault(ClEagerChecks, EagerChecks)) {}
702 
run(Module & M,ModuleAnalysisManager & AM)703 PreservedAnalyses MemorySanitizerPass::run(Module &M,
704                                            ModuleAnalysisManager &AM) {
705   bool Modified = false;
706   if (!Options.Kernel) {
707     insertModuleCtor(M);
708     Modified = true;
709   }
710 
711   auto &FAM = AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
712   for (Function &F : M) {
713     if (F.empty())
714       continue;
715     MemorySanitizer Msan(*F.getParent(), Options);
716     Modified |=
717         Msan.sanitizeFunction(F, FAM.getResult<TargetLibraryAnalysis>(F));
718   }
719 
720   if (!Modified)
721     return PreservedAnalyses::all();
722 
723   PreservedAnalyses PA = PreservedAnalyses::none();
724   // GlobalsAA is considered stateless and does not get invalidated unless
725   // explicitly invalidated; PreservedAnalyses::none() is not enough. Sanitizers
726   // make changes that require GlobalsAA to be invalidated.
727   PA.abandon<GlobalsAA>();
728   return PA;
729 }
730 
printPipeline(raw_ostream & OS,function_ref<StringRef (StringRef)> MapClassName2PassName)731 void MemorySanitizerPass::printPipeline(
732     raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
733   static_cast<PassInfoMixin<MemorySanitizerPass> *>(this)->printPipeline(
734       OS, MapClassName2PassName);
735   OS << '<';
736   if (Options.Recover)
737     OS << "recover;";
738   if (Options.Kernel)
739     OS << "kernel;";
740   if (Options.EagerChecks)
741     OS << "eager-checks;";
742   OS << "track-origins=" << Options.TrackOrigins;
743   OS << '>';
744 }
745 
746 /// Create a non-const global initialized with the given string.
747 ///
748 /// Creates a writable global for Str so that we can pass it to the
749 /// run-time lib. Runtime uses first 4 bytes of the string to store the
750 /// frame ID, so the string needs to be mutable.
createPrivateConstGlobalForString(Module & M,StringRef Str)751 static GlobalVariable *createPrivateConstGlobalForString(Module &M,
752                                                          StringRef Str) {
753   Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str);
754   return new GlobalVariable(M, StrConst->getType(), /*isConstant=*/true,
755                             GlobalValue::PrivateLinkage, StrConst, "");
756 }
757 
758 template <typename... ArgsTy>
759 FunctionCallee
getOrInsertMsanMetadataFunction(Module & M,StringRef Name,ArgsTy...Args)760 MemorySanitizer::getOrInsertMsanMetadataFunction(Module &M, StringRef Name,
761                                                  ArgsTy... Args) {
762   if (TargetTriple.getArch() == Triple::systemz) {
763     // SystemZ ABI: shadow/origin pair is returned via a hidden parameter.
764     return M.getOrInsertFunction(Name, Type::getVoidTy(*C),
765                                  PointerType::get(MsanMetadata, 0),
766                                  std::forward<ArgsTy>(Args)...);
767   }
768 
769   return M.getOrInsertFunction(Name, MsanMetadata,
770                                std::forward<ArgsTy>(Args)...);
771 }
772 
773 /// Create KMSAN API callbacks.
createKernelApi(Module & M,const TargetLibraryInfo & TLI)774 void MemorySanitizer::createKernelApi(Module &M, const TargetLibraryInfo &TLI) {
775   IRBuilder<> IRB(*C);
776 
777   // These will be initialized in insertKmsanPrologue().
778   RetvalTLS = nullptr;
779   RetvalOriginTLS = nullptr;
780   ParamTLS = nullptr;
781   ParamOriginTLS = nullptr;
782   VAArgTLS = nullptr;
783   VAArgOriginTLS = nullptr;
784   VAArgOverflowSizeTLS = nullptr;
785 
786   WarningFn = M.getOrInsertFunction("__msan_warning",
787                                     TLI.getAttrList(C, {0}, /*Signed=*/false),
788                                     IRB.getVoidTy(), IRB.getInt32Ty());
789 
790   // Requests the per-task context state (kmsan_context_state*) from the
791   // runtime library.
792   MsanContextStateTy = StructType::get(
793       ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8),
794       ArrayType::get(IRB.getInt64Ty(), kRetvalTLSSize / 8),
795       ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8),
796       ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), /* va_arg_origin */
797       IRB.getInt64Ty(), ArrayType::get(OriginTy, kParamTLSSize / 4), OriginTy,
798       OriginTy);
799   MsanGetContextStateFn = M.getOrInsertFunction(
800       "__msan_get_context_state", PointerType::get(MsanContextStateTy, 0));
801 
802   MsanMetadata = StructType::get(PointerType::get(IRB.getInt8Ty(), 0),
803                                  PointerType::get(IRB.getInt32Ty(), 0));
804 
805   for (int ind = 0, size = 1; ind < 4; ind++, size <<= 1) {
806     std::string name_load =
807         "__msan_metadata_ptr_for_load_" + std::to_string(size);
808     std::string name_store =
809         "__msan_metadata_ptr_for_store_" + std::to_string(size);
810     MsanMetadataPtrForLoad_1_8[ind] = getOrInsertMsanMetadataFunction(
811         M, name_load, PointerType::get(IRB.getInt8Ty(), 0));
812     MsanMetadataPtrForStore_1_8[ind] = getOrInsertMsanMetadataFunction(
813         M, name_store, PointerType::get(IRB.getInt8Ty(), 0));
814   }
815 
816   MsanMetadataPtrForLoadN = getOrInsertMsanMetadataFunction(
817       M, "__msan_metadata_ptr_for_load_n", PointerType::get(IRB.getInt8Ty(), 0),
818       IRB.getInt64Ty());
819   MsanMetadataPtrForStoreN = getOrInsertMsanMetadataFunction(
820       M, "__msan_metadata_ptr_for_store_n",
821       PointerType::get(IRB.getInt8Ty(), 0), IRB.getInt64Ty());
822 
823   // Functions for poisoning and unpoisoning memory.
824   MsanPoisonAllocaFn = M.getOrInsertFunction(
825       "__msan_poison_alloca", IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy);
826   MsanUnpoisonAllocaFn = M.getOrInsertFunction(
827       "__msan_unpoison_alloca", IRB.getVoidTy(), PtrTy, IntptrTy);
828 }
829 
getOrInsertGlobal(Module & M,StringRef Name,Type * Ty)830 static Constant *getOrInsertGlobal(Module &M, StringRef Name, Type *Ty) {
831   return M.getOrInsertGlobal(Name, Ty, [&] {
832     return new GlobalVariable(M, Ty, false, GlobalVariable::ExternalLinkage,
833                               nullptr, Name, nullptr,
834                               GlobalVariable::InitialExecTLSModel);
835   });
836 }
837 
838 /// Insert declarations for userspace-specific functions and globals.
createUserspaceApi(Module & M,const TargetLibraryInfo & TLI)839 void MemorySanitizer::createUserspaceApi(Module &M, const TargetLibraryInfo &TLI) {
840   IRBuilder<> IRB(*C);
841 
842   // Create the callback.
843   // FIXME: this function should have "Cold" calling conv,
844   // which is not yet implemented.
845   if (TrackOrigins) {
846     StringRef WarningFnName = Recover ? "__msan_warning_with_origin"
847                                       : "__msan_warning_with_origin_noreturn";
848     WarningFn = M.getOrInsertFunction(WarningFnName,
849                                       TLI.getAttrList(C, {0}, /*Signed=*/false),
850                                       IRB.getVoidTy(), IRB.getInt32Ty());
851   } else {
852     StringRef WarningFnName =
853         Recover ? "__msan_warning" : "__msan_warning_noreturn";
854     WarningFn = M.getOrInsertFunction(WarningFnName, IRB.getVoidTy());
855   }
856 
857   // Create the global TLS variables.
858   RetvalTLS =
859       getOrInsertGlobal(M, "__msan_retval_tls",
860                         ArrayType::get(IRB.getInt64Ty(), kRetvalTLSSize / 8));
861 
862   RetvalOriginTLS = getOrInsertGlobal(M, "__msan_retval_origin_tls", OriginTy);
863 
864   ParamTLS =
865       getOrInsertGlobal(M, "__msan_param_tls",
866                         ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8));
867 
868   ParamOriginTLS =
869       getOrInsertGlobal(M, "__msan_param_origin_tls",
870                         ArrayType::get(OriginTy, kParamTLSSize / 4));
871 
872   VAArgTLS =
873       getOrInsertGlobal(M, "__msan_va_arg_tls",
874                         ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8));
875 
876   VAArgOriginTLS =
877       getOrInsertGlobal(M, "__msan_va_arg_origin_tls",
878                         ArrayType::get(OriginTy, kParamTLSSize / 4));
879 
880   VAArgOverflowSizeTLS =
881       getOrInsertGlobal(M, "__msan_va_arg_overflow_size_tls", IRB.getInt64Ty());
882 
883   for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
884        AccessSizeIndex++) {
885     unsigned AccessSize = 1 << AccessSizeIndex;
886     std::string FunctionName = "__msan_maybe_warning_" + itostr(AccessSize);
887     MaybeWarningFn[AccessSizeIndex] = M.getOrInsertFunction(
888         FunctionName, TLI.getAttrList(C, {0, 1}, /*Signed=*/false),
889         IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), IRB.getInt32Ty());
890 
891     FunctionName = "__msan_maybe_store_origin_" + itostr(AccessSize);
892     MaybeStoreOriginFn[AccessSizeIndex] = M.getOrInsertFunction(
893         FunctionName, TLI.getAttrList(C, {0, 2}, /*Signed=*/false),
894         IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), PtrTy,
895         IRB.getInt32Ty());
896   }
897 
898   MsanSetAllocaOriginWithDescriptionFn =
899       M.getOrInsertFunction("__msan_set_alloca_origin_with_descr",
900                             IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy, PtrTy);
901   MsanSetAllocaOriginNoDescriptionFn =
902       M.getOrInsertFunction("__msan_set_alloca_origin_no_descr",
903                             IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy);
904   MsanPoisonStackFn = M.getOrInsertFunction("__msan_poison_stack",
905                                             IRB.getVoidTy(), PtrTy, IntptrTy);
906 }
907 
908 /// Insert extern declaration of runtime-provided functions and globals.
initializeCallbacks(Module & M,const TargetLibraryInfo & TLI)909 void MemorySanitizer::initializeCallbacks(Module &M, const TargetLibraryInfo &TLI) {
910   // Only do this once.
911   if (CallbacksInitialized)
912     return;
913 
914   IRBuilder<> IRB(*C);
915   // Initialize callbacks that are common for kernel and userspace
916   // instrumentation.
917   MsanChainOriginFn = M.getOrInsertFunction(
918       "__msan_chain_origin",
919       TLI.getAttrList(C, {0}, /*Signed=*/false, /*Ret=*/true), IRB.getInt32Ty(),
920       IRB.getInt32Ty());
921   MsanSetOriginFn = M.getOrInsertFunction(
922       "__msan_set_origin", TLI.getAttrList(C, {2}, /*Signed=*/false),
923       IRB.getVoidTy(), PtrTy, IntptrTy, IRB.getInt32Ty());
924   MemmoveFn =
925       M.getOrInsertFunction("__msan_memmove", PtrTy, PtrTy, PtrTy, IntptrTy);
926   MemcpyFn =
927       M.getOrInsertFunction("__msan_memcpy", PtrTy, PtrTy, PtrTy, IntptrTy);
928   MemsetFn = M.getOrInsertFunction("__msan_memset",
929                                    TLI.getAttrList(C, {1}, /*Signed=*/true),
930                                    PtrTy, PtrTy, IRB.getInt32Ty(), IntptrTy);
931 
932   MsanInstrumentAsmStoreFn =
933       M.getOrInsertFunction("__msan_instrument_asm_store", IRB.getVoidTy(),
934                             PointerType::get(IRB.getInt8Ty(), 0), IntptrTy);
935 
936   if (CompileKernel) {
937     createKernelApi(M, TLI);
938   } else {
939     createUserspaceApi(M, TLI);
940   }
941   CallbacksInitialized = true;
942 }
943 
getKmsanShadowOriginAccessFn(bool isStore,int size)944 FunctionCallee MemorySanitizer::getKmsanShadowOriginAccessFn(bool isStore,
945                                                              int size) {
946   FunctionCallee *Fns =
947       isStore ? MsanMetadataPtrForStore_1_8 : MsanMetadataPtrForLoad_1_8;
948   switch (size) {
949   case 1:
950     return Fns[0];
951   case 2:
952     return Fns[1];
953   case 4:
954     return Fns[2];
955   case 8:
956     return Fns[3];
957   default:
958     return nullptr;
959   }
960 }
961 
962 /// Module-level initialization.
963 ///
964 /// inserts a call to __msan_init to the module's constructor list.
initializeModule(Module & M)965 void MemorySanitizer::initializeModule(Module &M) {
966   auto &DL = M.getDataLayout();
967 
968   TargetTriple = Triple(M.getTargetTriple());
969 
970   bool ShadowPassed = ClShadowBase.getNumOccurrences() > 0;
971   bool OriginPassed = ClOriginBase.getNumOccurrences() > 0;
972   // Check the overrides first
973   if (ShadowPassed || OriginPassed) {
974     CustomMapParams.AndMask = ClAndMask;
975     CustomMapParams.XorMask = ClXorMask;
976     CustomMapParams.ShadowBase = ClShadowBase;
977     CustomMapParams.OriginBase = ClOriginBase;
978     MapParams = &CustomMapParams;
979   } else {
980     switch (TargetTriple.getOS()) {
981     case Triple::FreeBSD:
982       switch (TargetTriple.getArch()) {
983       case Triple::aarch64:
984         MapParams = FreeBSD_ARM_MemoryMapParams.bits64;
985         break;
986       case Triple::x86_64:
987         MapParams = FreeBSD_X86_MemoryMapParams.bits64;
988         break;
989       case Triple::x86:
990         MapParams = FreeBSD_X86_MemoryMapParams.bits32;
991         break;
992       default:
993         report_fatal_error("unsupported architecture");
994       }
995       break;
996     case Triple::NetBSD:
997       switch (TargetTriple.getArch()) {
998       case Triple::x86_64:
999         MapParams = NetBSD_X86_MemoryMapParams.bits64;
1000         break;
1001       default:
1002         report_fatal_error("unsupported architecture");
1003       }
1004       break;
1005     case Triple::Linux:
1006       switch (TargetTriple.getArch()) {
1007       case Triple::x86_64:
1008         MapParams = Linux_X86_MemoryMapParams.bits64;
1009         break;
1010       case Triple::x86:
1011         MapParams = Linux_X86_MemoryMapParams.bits32;
1012         break;
1013       case Triple::mips64:
1014       case Triple::mips64el:
1015         MapParams = Linux_MIPS_MemoryMapParams.bits64;
1016         break;
1017       case Triple::ppc64:
1018       case Triple::ppc64le:
1019         MapParams = Linux_PowerPC_MemoryMapParams.bits64;
1020         break;
1021       case Triple::systemz:
1022         MapParams = Linux_S390_MemoryMapParams.bits64;
1023         break;
1024       case Triple::aarch64:
1025       case Triple::aarch64_be:
1026         MapParams = Linux_ARM_MemoryMapParams.bits64;
1027         break;
1028       case Triple::loongarch64:
1029         MapParams = Linux_LoongArch_MemoryMapParams.bits64;
1030         break;
1031       default:
1032         report_fatal_error("unsupported architecture");
1033       }
1034       break;
1035     default:
1036       report_fatal_error("unsupported operating system");
1037     }
1038   }
1039 
1040   C = &(M.getContext());
1041   IRBuilder<> IRB(*C);
1042   IntptrTy = IRB.getIntPtrTy(DL);
1043   OriginTy = IRB.getInt32Ty();
1044   PtrTy = IRB.getPtrTy();
1045 
1046   ColdCallWeights = MDBuilder(*C).createBranchWeights(1, 1000);
1047   OriginStoreWeights = MDBuilder(*C).createBranchWeights(1, 1000);
1048 
1049   if (!CompileKernel) {
1050     if (TrackOrigins)
1051       M.getOrInsertGlobal("__msan_track_origins", IRB.getInt32Ty(), [&] {
1052         return new GlobalVariable(
1053             M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
1054             IRB.getInt32(TrackOrigins), "__msan_track_origins");
1055       });
1056 
1057     if (Recover)
1058       M.getOrInsertGlobal("__msan_keep_going", IRB.getInt32Ty(), [&] {
1059         return new GlobalVariable(M, IRB.getInt32Ty(), true,
1060                                   GlobalValue::WeakODRLinkage,
1061                                   IRB.getInt32(Recover), "__msan_keep_going");
1062       });
1063   }
1064 }
1065 
1066 namespace {
1067 
1068 /// A helper class that handles instrumentation of VarArg
1069 /// functions on a particular platform.
1070 ///
1071 /// Implementations are expected to insert the instrumentation
1072 /// necessary to propagate argument shadow through VarArg function
1073 /// calls. Visit* methods are called during an InstVisitor pass over
1074 /// the function, and should avoid creating new basic blocks. A new
1075 /// instance of this class is created for each instrumented function.
1076 struct VarArgHelper {
1077   virtual ~VarArgHelper() = default;
1078 
1079   /// Visit a CallBase.
1080   virtual void visitCallBase(CallBase &CB, IRBuilder<> &IRB) = 0;
1081 
1082   /// Visit a va_start call.
1083   virtual void visitVAStartInst(VAStartInst &I) = 0;
1084 
1085   /// Visit a va_copy call.
1086   virtual void visitVACopyInst(VACopyInst &I) = 0;
1087 
1088   /// Finalize function instrumentation.
1089   ///
1090   /// This method is called after visiting all interesting (see above)
1091   /// instructions in a function.
1092   virtual void finalizeInstrumentation() = 0;
1093 };
1094 
1095 struct MemorySanitizerVisitor;
1096 
1097 } // end anonymous namespace
1098 
1099 static VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
1100                                         MemorySanitizerVisitor &Visitor);
1101 
TypeSizeToSizeIndex(TypeSize TS)1102 static unsigned TypeSizeToSizeIndex(TypeSize TS) {
1103   if (TS.isScalable())
1104     // Scalable types unconditionally take slowpaths.
1105     return kNumberOfAccessSizes;
1106   unsigned TypeSizeFixed = TS.getFixedValue();
1107   if (TypeSizeFixed <= 8)
1108     return 0;
1109   return Log2_32_Ceil((TypeSizeFixed + 7) / 8);
1110 }
1111 
1112 namespace {
1113 
1114 /// Helper class to attach debug information of the given instruction onto new
1115 /// instructions inserted after.
1116 class NextNodeIRBuilder : public IRBuilder<> {
1117 public:
NextNodeIRBuilder(Instruction * IP)1118   explicit NextNodeIRBuilder(Instruction *IP) : IRBuilder<>(IP->getNextNode()) {
1119     SetCurrentDebugLocation(IP->getDebugLoc());
1120   }
1121 };
1122 
1123 /// This class does all the work for a given function. Store and Load
1124 /// instructions store and load corresponding shadow and origin
1125 /// values. Most instructions propagate shadow from arguments to their
1126 /// return values. Certain instructions (most importantly, BranchInst)
1127 /// test their argument shadow and print reports (with a runtime call) if it's
1128 /// non-zero.
1129 struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
1130   Function &F;
1131   MemorySanitizer &MS;
1132   SmallVector<PHINode *, 16> ShadowPHINodes, OriginPHINodes;
1133   ValueMap<Value *, Value *> ShadowMap, OriginMap;
1134   std::unique_ptr<VarArgHelper> VAHelper;
1135   const TargetLibraryInfo *TLI;
1136   Instruction *FnPrologueEnd;
1137 
1138   // The following flags disable parts of MSan instrumentation based on
1139   // exclusion list contents and command-line options.
1140   bool InsertChecks;
1141   bool PropagateShadow;
1142   bool PoisonStack;
1143   bool PoisonUndef;
1144 
1145   struct ShadowOriginAndInsertPoint {
1146     Value *Shadow;
1147     Value *Origin;
1148     Instruction *OrigIns;
1149 
ShadowOriginAndInsertPoint__anon9debe40b0811::MemorySanitizerVisitor::ShadowOriginAndInsertPoint1150     ShadowOriginAndInsertPoint(Value *S, Value *O, Instruction *I)
1151         : Shadow(S), Origin(O), OrigIns(I) {}
1152   };
1153   SmallVector<ShadowOriginAndInsertPoint, 16> InstrumentationList;
1154   DenseMap<const DILocation *, int> LazyWarningDebugLocationCount;
1155   bool InstrumentLifetimeStart = ClHandleLifetimeIntrinsics;
1156   SmallSetVector<AllocaInst *, 16> AllocaSet;
1157   SmallVector<std::pair<IntrinsicInst *, AllocaInst *>, 16> LifetimeStartList;
1158   SmallVector<StoreInst *, 16> StoreList;
1159   int64_t SplittableBlocksCount = 0;
1160 
MemorySanitizerVisitor__anon9debe40b0811::MemorySanitizerVisitor1161   MemorySanitizerVisitor(Function &F, MemorySanitizer &MS,
1162                          const TargetLibraryInfo &TLI)
1163       : F(F), MS(MS), VAHelper(CreateVarArgHelper(F, MS, *this)), TLI(&TLI) {
1164     bool SanitizeFunction =
1165         F.hasFnAttribute(Attribute::SanitizeMemory) && !ClDisableChecks;
1166     InsertChecks = SanitizeFunction;
1167     PropagateShadow = SanitizeFunction;
1168     PoisonStack = SanitizeFunction && ClPoisonStack;
1169     PoisonUndef = SanitizeFunction && ClPoisonUndef;
1170 
1171     // In the presence of unreachable blocks, we may see Phi nodes with
1172     // incoming nodes from such blocks. Since InstVisitor skips unreachable
1173     // blocks, such nodes will not have any shadow value associated with them.
1174     // It's easier to remove unreachable blocks than deal with missing shadow.
1175     removeUnreachableBlocks(F);
1176 
1177     MS.initializeCallbacks(*F.getParent(), TLI);
1178     FnPrologueEnd = IRBuilder<>(F.getEntryBlock().getFirstNonPHI())
1179                         .CreateIntrinsic(Intrinsic::donothing, {}, {});
1180 
1181     if (MS.CompileKernel) {
1182       IRBuilder<> IRB(FnPrologueEnd);
1183       insertKmsanPrologue(IRB);
1184     }
1185 
1186     LLVM_DEBUG(if (!InsertChecks) dbgs()
1187                << "MemorySanitizer is not inserting checks into '"
1188                << F.getName() << "'\n");
1189   }
1190 
instrumentWithCalls__anon9debe40b0811::MemorySanitizerVisitor1191   bool instrumentWithCalls(Value *V) {
1192     // Constants likely will be eliminated by follow-up passes.
1193     if (isa<Constant>(V))
1194       return false;
1195 
1196     ++SplittableBlocksCount;
1197     return ClInstrumentationWithCallThreshold >= 0 &&
1198            SplittableBlocksCount > ClInstrumentationWithCallThreshold;
1199   }
1200 
isInPrologue__anon9debe40b0811::MemorySanitizerVisitor1201   bool isInPrologue(Instruction &I) {
1202     return I.getParent() == FnPrologueEnd->getParent() &&
1203            (&I == FnPrologueEnd || I.comesBefore(FnPrologueEnd));
1204   }
1205 
1206   // Creates a new origin and records the stack trace. In general we can call
1207   // this function for any origin manipulation we like. However it will cost
1208   // runtime resources. So use this wisely only if it can provide additional
1209   // information helpful to a user.
updateOrigin__anon9debe40b0811::MemorySanitizerVisitor1210   Value *updateOrigin(Value *V, IRBuilder<> &IRB) {
1211     if (MS.TrackOrigins <= 1)
1212       return V;
1213     return IRB.CreateCall(MS.MsanChainOriginFn, V);
1214   }
1215 
originToIntptr__anon9debe40b0811::MemorySanitizerVisitor1216   Value *originToIntptr(IRBuilder<> &IRB, Value *Origin) {
1217     const DataLayout &DL = F.getParent()->getDataLayout();
1218     unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
1219     if (IntptrSize == kOriginSize)
1220       return Origin;
1221     assert(IntptrSize == kOriginSize * 2);
1222     Origin = IRB.CreateIntCast(Origin, MS.IntptrTy, /* isSigned */ false);
1223     return IRB.CreateOr(Origin, IRB.CreateShl(Origin, kOriginSize * 8));
1224   }
1225 
1226   /// Fill memory range with the given origin value.
paintOrigin__anon9debe40b0811::MemorySanitizerVisitor1227   void paintOrigin(IRBuilder<> &IRB, Value *Origin, Value *OriginPtr,
1228                    TypeSize TS, Align Alignment) {
1229     const DataLayout &DL = F.getParent()->getDataLayout();
1230     const Align IntptrAlignment = DL.getABITypeAlign(MS.IntptrTy);
1231     unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
1232     assert(IntptrAlignment >= kMinOriginAlignment);
1233     assert(IntptrSize >= kOriginSize);
1234 
1235     // Note: The loop based formation works for fixed length vectors too,
1236     // however we prefer to unroll and specialize alignment below.
1237     if (TS.isScalable()) {
1238       Value *Size = IRB.CreateTypeSize(IRB.getInt32Ty(), TS);
1239       Value *RoundUp = IRB.CreateAdd(Size, IRB.getInt32(kOriginSize - 1));
1240       Value *End = IRB.CreateUDiv(RoundUp, IRB.getInt32(kOriginSize));
1241       auto [InsertPt, Index] =
1242         SplitBlockAndInsertSimpleForLoop(End, &*IRB.GetInsertPoint());
1243       IRB.SetInsertPoint(InsertPt);
1244 
1245       Value *GEP = IRB.CreateGEP(MS.OriginTy, OriginPtr, Index);
1246       IRB.CreateAlignedStore(Origin, GEP, kMinOriginAlignment);
1247       return;
1248     }
1249 
1250     unsigned Size = TS.getFixedValue();
1251 
1252     unsigned Ofs = 0;
1253     Align CurrentAlignment = Alignment;
1254     if (Alignment >= IntptrAlignment && IntptrSize > kOriginSize) {
1255       Value *IntptrOrigin = originToIntptr(IRB, Origin);
1256       Value *IntptrOriginPtr =
1257           IRB.CreatePointerCast(OriginPtr, PointerType::get(MS.IntptrTy, 0));
1258       for (unsigned i = 0; i < Size / IntptrSize; ++i) {
1259         Value *Ptr = i ? IRB.CreateConstGEP1_32(MS.IntptrTy, IntptrOriginPtr, i)
1260                        : IntptrOriginPtr;
1261         IRB.CreateAlignedStore(IntptrOrigin, Ptr, CurrentAlignment);
1262         Ofs += IntptrSize / kOriginSize;
1263         CurrentAlignment = IntptrAlignment;
1264       }
1265     }
1266 
1267     for (unsigned i = Ofs; i < (Size + kOriginSize - 1) / kOriginSize; ++i) {
1268       Value *GEP =
1269           i ? IRB.CreateConstGEP1_32(MS.OriginTy, OriginPtr, i) : OriginPtr;
1270       IRB.CreateAlignedStore(Origin, GEP, CurrentAlignment);
1271       CurrentAlignment = kMinOriginAlignment;
1272     }
1273   }
1274 
storeOrigin__anon9debe40b0811::MemorySanitizerVisitor1275   void storeOrigin(IRBuilder<> &IRB, Value *Addr, Value *Shadow, Value *Origin,
1276                    Value *OriginPtr, Align Alignment) {
1277     const DataLayout &DL = F.getParent()->getDataLayout();
1278     const Align OriginAlignment = std::max(kMinOriginAlignment, Alignment);
1279     TypeSize StoreSize = DL.getTypeStoreSize(Shadow->getType());
1280     Value *ConvertedShadow = convertShadowToScalar(Shadow, IRB);
1281     if (auto *ConstantShadow = dyn_cast<Constant>(ConvertedShadow)) {
1282       if (!ClCheckConstantShadow || ConstantShadow->isZeroValue()) {
1283         // Origin is not needed: value is initialized or const shadow is
1284         // ignored.
1285         return;
1286       }
1287       if (llvm::isKnownNonZero(ConvertedShadow, DL)) {
1288         // Copy origin as the value is definitely uninitialized.
1289         paintOrigin(IRB, updateOrigin(Origin, IRB), OriginPtr, StoreSize,
1290                     OriginAlignment);
1291         return;
1292       }
1293       // Fallback to runtime check, which still can be optimized out later.
1294     }
1295 
1296     TypeSize TypeSizeInBits = DL.getTypeSizeInBits(ConvertedShadow->getType());
1297     unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
1298     if (instrumentWithCalls(ConvertedShadow) &&
1299         SizeIndex < kNumberOfAccessSizes && !MS.CompileKernel) {
1300       FunctionCallee Fn = MS.MaybeStoreOriginFn[SizeIndex];
1301       Value *ConvertedShadow2 =
1302           IRB.CreateZExt(ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
1303       CallBase *CB = IRB.CreateCall(Fn, {ConvertedShadow2, Addr, Origin});
1304       CB->addParamAttr(0, Attribute::ZExt);
1305       CB->addParamAttr(2, Attribute::ZExt);
1306     } else {
1307       Value *Cmp = convertToBool(ConvertedShadow, IRB, "_mscmp");
1308       Instruction *CheckTerm = SplitBlockAndInsertIfThen(
1309           Cmp, &*IRB.GetInsertPoint(), false, MS.OriginStoreWeights);
1310       IRBuilder<> IRBNew(CheckTerm);
1311       paintOrigin(IRBNew, updateOrigin(Origin, IRBNew), OriginPtr, StoreSize,
1312                   OriginAlignment);
1313     }
1314   }
1315 
materializeStores__anon9debe40b0811::MemorySanitizerVisitor1316   void materializeStores() {
1317     for (StoreInst *SI : StoreList) {
1318       IRBuilder<> IRB(SI);
1319       Value *Val = SI->getValueOperand();
1320       Value *Addr = SI->getPointerOperand();
1321       Value *Shadow = SI->isAtomic() ? getCleanShadow(Val) : getShadow(Val);
1322       Value *ShadowPtr, *OriginPtr;
1323       Type *ShadowTy = Shadow->getType();
1324       const Align Alignment = SI->getAlign();
1325       const Align OriginAlignment = std::max(kMinOriginAlignment, Alignment);
1326       std::tie(ShadowPtr, OriginPtr) =
1327           getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ true);
1328 
1329       StoreInst *NewSI = IRB.CreateAlignedStore(Shadow, ShadowPtr, Alignment);
1330       LLVM_DEBUG(dbgs() << "  STORE: " << *NewSI << "\n");
1331       (void)NewSI;
1332 
1333       if (SI->isAtomic())
1334         SI->setOrdering(addReleaseOrdering(SI->getOrdering()));
1335 
1336       if (MS.TrackOrigins && !SI->isAtomic())
1337         storeOrigin(IRB, Addr, Shadow, getOrigin(Val), OriginPtr,
1338                     OriginAlignment);
1339     }
1340   }
1341 
1342   // Returns true if Debug Location curresponds to multiple warnings.
shouldDisambiguateWarningLocation__anon9debe40b0811::MemorySanitizerVisitor1343   bool shouldDisambiguateWarningLocation(const DebugLoc &DebugLoc) {
1344     if (MS.TrackOrigins < 2)
1345       return false;
1346 
1347     if (LazyWarningDebugLocationCount.empty())
1348       for (const auto &I : InstrumentationList)
1349         ++LazyWarningDebugLocationCount[I.OrigIns->getDebugLoc()];
1350 
1351     return LazyWarningDebugLocationCount[DebugLoc] >= ClDisambiguateWarning;
1352   }
1353 
1354   /// Helper function to insert a warning at IRB's current insert point.
insertWarningFn__anon9debe40b0811::MemorySanitizerVisitor1355   void insertWarningFn(IRBuilder<> &IRB, Value *Origin) {
1356     if (!Origin)
1357       Origin = (Value *)IRB.getInt32(0);
1358     assert(Origin->getType()->isIntegerTy());
1359 
1360     if (shouldDisambiguateWarningLocation(IRB.getCurrentDebugLocation())) {
1361       // Try to create additional origin with debug info of the last origin
1362       // instruction. It may provide additional information to the user.
1363       if (Instruction *OI = dyn_cast_or_null<Instruction>(Origin)) {
1364         assert(MS.TrackOrigins);
1365         auto NewDebugLoc = OI->getDebugLoc();
1366         // Origin update with missing or the same debug location provides no
1367         // additional value.
1368         if (NewDebugLoc && NewDebugLoc != IRB.getCurrentDebugLocation()) {
1369           // Insert update just before the check, so we call runtime only just
1370           // before the report.
1371           IRBuilder<> IRBOrigin(&*IRB.GetInsertPoint());
1372           IRBOrigin.SetCurrentDebugLocation(NewDebugLoc);
1373           Origin = updateOrigin(Origin, IRBOrigin);
1374         }
1375       }
1376     }
1377 
1378     if (MS.CompileKernel || MS.TrackOrigins)
1379       IRB.CreateCall(MS.WarningFn, Origin)->setCannotMerge();
1380     else
1381       IRB.CreateCall(MS.WarningFn)->setCannotMerge();
1382     // FIXME: Insert UnreachableInst if !MS.Recover?
1383     // This may invalidate some of the following checks and needs to be done
1384     // at the very end.
1385   }
1386 
materializeOneCheck__anon9debe40b0811::MemorySanitizerVisitor1387   void materializeOneCheck(IRBuilder<> &IRB, Value *ConvertedShadow,
1388                            Value *Origin) {
1389     const DataLayout &DL = F.getParent()->getDataLayout();
1390     TypeSize TypeSizeInBits = DL.getTypeSizeInBits(ConvertedShadow->getType());
1391     unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
1392     if (instrumentWithCalls(ConvertedShadow) &&
1393         SizeIndex < kNumberOfAccessSizes && !MS.CompileKernel) {
1394       FunctionCallee Fn = MS.MaybeWarningFn[SizeIndex];
1395       Value *ConvertedShadow2 =
1396           IRB.CreateZExt(ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
1397       CallBase *CB = IRB.CreateCall(
1398           Fn, {ConvertedShadow2,
1399                MS.TrackOrigins && Origin ? Origin : (Value *)IRB.getInt32(0)});
1400       CB->addParamAttr(0, Attribute::ZExt);
1401       CB->addParamAttr(1, Attribute::ZExt);
1402     } else {
1403       Value *Cmp = convertToBool(ConvertedShadow, IRB, "_mscmp");
1404       Instruction *CheckTerm = SplitBlockAndInsertIfThen(
1405           Cmp, &*IRB.GetInsertPoint(),
1406           /* Unreachable */ !MS.Recover, MS.ColdCallWeights);
1407 
1408       IRB.SetInsertPoint(CheckTerm);
1409       insertWarningFn(IRB, Origin);
1410       LLVM_DEBUG(dbgs() << "  CHECK: " << *Cmp << "\n");
1411     }
1412   }
1413 
materializeInstructionChecks__anon9debe40b0811::MemorySanitizerVisitor1414   void materializeInstructionChecks(
1415       ArrayRef<ShadowOriginAndInsertPoint> InstructionChecks) {
1416     const DataLayout &DL = F.getParent()->getDataLayout();
1417     // Disable combining in some cases. TrackOrigins checks each shadow to pick
1418     // correct origin.
1419     bool Combine = !MS.TrackOrigins;
1420     Instruction *Instruction = InstructionChecks.front().OrigIns;
1421     Value *Shadow = nullptr;
1422     for (const auto &ShadowData : InstructionChecks) {
1423       assert(ShadowData.OrigIns == Instruction);
1424       IRBuilder<> IRB(Instruction);
1425 
1426       Value *ConvertedShadow = ShadowData.Shadow;
1427 
1428       if (auto *ConstantShadow = dyn_cast<Constant>(ConvertedShadow)) {
1429         if (!ClCheckConstantShadow || ConstantShadow->isZeroValue()) {
1430           // Skip, value is initialized or const shadow is ignored.
1431           continue;
1432         }
1433         if (llvm::isKnownNonZero(ConvertedShadow, DL)) {
1434           // Report as the value is definitely uninitialized.
1435           insertWarningFn(IRB, ShadowData.Origin);
1436           if (!MS.Recover)
1437             return; // Always fail and stop here, not need to check the rest.
1438           // Skip entire instruction,
1439           continue;
1440         }
1441         // Fallback to runtime check, which still can be optimized out later.
1442       }
1443 
1444       if (!Combine) {
1445         materializeOneCheck(IRB, ConvertedShadow, ShadowData.Origin);
1446         continue;
1447       }
1448 
1449       if (!Shadow) {
1450         Shadow = ConvertedShadow;
1451         continue;
1452       }
1453 
1454       Shadow = convertToBool(Shadow, IRB, "_mscmp");
1455       ConvertedShadow = convertToBool(ConvertedShadow, IRB, "_mscmp");
1456       Shadow = IRB.CreateOr(Shadow, ConvertedShadow, "_msor");
1457     }
1458 
1459     if (Shadow) {
1460       assert(Combine);
1461       IRBuilder<> IRB(Instruction);
1462       materializeOneCheck(IRB, Shadow, nullptr);
1463     }
1464   }
1465 
materializeChecks__anon9debe40b0811::MemorySanitizerVisitor1466   void materializeChecks() {
1467     llvm::stable_sort(InstrumentationList,
1468                       [](const ShadowOriginAndInsertPoint &L,
1469                          const ShadowOriginAndInsertPoint &R) {
1470                         return L.OrigIns < R.OrigIns;
1471                       });
1472 
1473     for (auto I = InstrumentationList.begin();
1474          I != InstrumentationList.end();) {
1475       auto J =
1476           std::find_if(I + 1, InstrumentationList.end(),
1477                        [L = I->OrigIns](const ShadowOriginAndInsertPoint &R) {
1478                          return L != R.OrigIns;
1479                        });
1480       // Process all checks of instruction at once.
1481       materializeInstructionChecks(ArrayRef<ShadowOriginAndInsertPoint>(I, J));
1482       I = J;
1483     }
1484 
1485     LLVM_DEBUG(dbgs() << "DONE:\n" << F);
1486   }
1487 
1488   // Returns the last instruction in the new prologue
insertKmsanPrologue__anon9debe40b0811::MemorySanitizerVisitor1489   void insertKmsanPrologue(IRBuilder<> &IRB) {
1490     Value *ContextState = IRB.CreateCall(MS.MsanGetContextStateFn, {});
1491     Constant *Zero = IRB.getInt32(0);
1492     MS.ParamTLS = IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1493                                 {Zero, IRB.getInt32(0)}, "param_shadow");
1494     MS.RetvalTLS = IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1495                                  {Zero, IRB.getInt32(1)}, "retval_shadow");
1496     MS.VAArgTLS = IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1497                                 {Zero, IRB.getInt32(2)}, "va_arg_shadow");
1498     MS.VAArgOriginTLS = IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1499                                       {Zero, IRB.getInt32(3)}, "va_arg_origin");
1500     MS.VAArgOverflowSizeTLS =
1501         IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1502                       {Zero, IRB.getInt32(4)}, "va_arg_overflow_size");
1503     MS.ParamOriginTLS = IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1504                                       {Zero, IRB.getInt32(5)}, "param_origin");
1505     MS.RetvalOriginTLS =
1506         IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1507                       {Zero, IRB.getInt32(6)}, "retval_origin");
1508     if (MS.TargetTriple.getArch() == Triple::systemz)
1509       MS.MsanMetadataAlloca = IRB.CreateAlloca(MS.MsanMetadata, 0u);
1510   }
1511 
1512   /// Add MemorySanitizer instrumentation to a function.
runOnFunction__anon9debe40b0811::MemorySanitizerVisitor1513   bool runOnFunction() {
1514     // Iterate all BBs in depth-first order and create shadow instructions
1515     // for all instructions (where applicable).
1516     // For PHI nodes we create dummy shadow PHIs which will be finalized later.
1517     for (BasicBlock *BB : depth_first(FnPrologueEnd->getParent()))
1518       visit(*BB);
1519 
1520     // Finalize PHI nodes.
1521     for (PHINode *PN : ShadowPHINodes) {
1522       PHINode *PNS = cast<PHINode>(getShadow(PN));
1523       PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : nullptr;
1524       size_t NumValues = PN->getNumIncomingValues();
1525       for (size_t v = 0; v < NumValues; v++) {
1526         PNS->addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
1527         if (PNO)
1528           PNO->addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
1529       }
1530     }
1531 
1532     VAHelper->finalizeInstrumentation();
1533 
1534     // Poison llvm.lifetime.start intrinsics, if we haven't fallen back to
1535     // instrumenting only allocas.
1536     if (InstrumentLifetimeStart) {
1537       for (auto Item : LifetimeStartList) {
1538         instrumentAlloca(*Item.second, Item.first);
1539         AllocaSet.remove(Item.second);
1540       }
1541     }
1542     // Poison the allocas for which we didn't instrument the corresponding
1543     // lifetime intrinsics.
1544     for (AllocaInst *AI : AllocaSet)
1545       instrumentAlloca(*AI);
1546 
1547     // Insert shadow value checks.
1548     materializeChecks();
1549 
1550     // Delayed instrumentation of StoreInst.
1551     // This may not add new address checks.
1552     materializeStores();
1553 
1554     return true;
1555   }
1556 
1557   /// Compute the shadow type that corresponds to a given Value.
getShadowTy__anon9debe40b0811::MemorySanitizerVisitor1558   Type *getShadowTy(Value *V) { return getShadowTy(V->getType()); }
1559 
1560   /// Compute the shadow type that corresponds to a given Type.
getShadowTy__anon9debe40b0811::MemorySanitizerVisitor1561   Type *getShadowTy(Type *OrigTy) {
1562     if (!OrigTy->isSized()) {
1563       return nullptr;
1564     }
1565     // For integer type, shadow is the same as the original type.
1566     // This may return weird-sized types like i1.
1567     if (IntegerType *IT = dyn_cast<IntegerType>(OrigTy))
1568       return IT;
1569     const DataLayout &DL = F.getParent()->getDataLayout();
1570     if (VectorType *VT = dyn_cast<VectorType>(OrigTy)) {
1571       uint32_t EltSize = DL.getTypeSizeInBits(VT->getElementType());
1572       return VectorType::get(IntegerType::get(*MS.C, EltSize),
1573                              VT->getElementCount());
1574     }
1575     if (ArrayType *AT = dyn_cast<ArrayType>(OrigTy)) {
1576       return ArrayType::get(getShadowTy(AT->getElementType()),
1577                             AT->getNumElements());
1578     }
1579     if (StructType *ST = dyn_cast<StructType>(OrigTy)) {
1580       SmallVector<Type *, 4> Elements;
1581       for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
1582         Elements.push_back(getShadowTy(ST->getElementType(i)));
1583       StructType *Res = StructType::get(*MS.C, Elements, ST->isPacked());
1584       LLVM_DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n");
1585       return Res;
1586     }
1587     uint32_t TypeSize = DL.getTypeSizeInBits(OrigTy);
1588     return IntegerType::get(*MS.C, TypeSize);
1589   }
1590 
1591   /// Extract combined shadow of struct elements as a bool
collapseStructShadow__anon9debe40b0811::MemorySanitizerVisitor1592   Value *collapseStructShadow(StructType *Struct, Value *Shadow,
1593                               IRBuilder<> &IRB) {
1594     Value *FalseVal = IRB.getIntN(/* width */ 1, /* value */ 0);
1595     Value *Aggregator = FalseVal;
1596 
1597     for (unsigned Idx = 0; Idx < Struct->getNumElements(); Idx++) {
1598       // Combine by ORing together each element's bool shadow
1599       Value *ShadowItem = IRB.CreateExtractValue(Shadow, Idx);
1600       Value *ShadowBool = convertToBool(ShadowItem, IRB);
1601 
1602       if (Aggregator != FalseVal)
1603         Aggregator = IRB.CreateOr(Aggregator, ShadowBool);
1604       else
1605         Aggregator = ShadowBool;
1606     }
1607 
1608     return Aggregator;
1609   }
1610 
1611   // Extract combined shadow of array elements
collapseArrayShadow__anon9debe40b0811::MemorySanitizerVisitor1612   Value *collapseArrayShadow(ArrayType *Array, Value *Shadow,
1613                              IRBuilder<> &IRB) {
1614     if (!Array->getNumElements())
1615       return IRB.getIntN(/* width */ 1, /* value */ 0);
1616 
1617     Value *FirstItem = IRB.CreateExtractValue(Shadow, 0);
1618     Value *Aggregator = convertShadowToScalar(FirstItem, IRB);
1619 
1620     for (unsigned Idx = 1; Idx < Array->getNumElements(); Idx++) {
1621       Value *ShadowItem = IRB.CreateExtractValue(Shadow, Idx);
1622       Value *ShadowInner = convertShadowToScalar(ShadowItem, IRB);
1623       Aggregator = IRB.CreateOr(Aggregator, ShadowInner);
1624     }
1625     return Aggregator;
1626   }
1627 
1628   /// Convert a shadow value to it's flattened variant. The resulting
1629   /// shadow may not necessarily have the same bit width as the input
1630   /// value, but it will always be comparable to zero.
convertShadowToScalar__anon9debe40b0811::MemorySanitizerVisitor1631   Value *convertShadowToScalar(Value *V, IRBuilder<> &IRB) {
1632     if (StructType *Struct = dyn_cast<StructType>(V->getType()))
1633       return collapseStructShadow(Struct, V, IRB);
1634     if (ArrayType *Array = dyn_cast<ArrayType>(V->getType()))
1635       return collapseArrayShadow(Array, V, IRB);
1636     if (isa<VectorType>(V->getType())) {
1637       if (isa<ScalableVectorType>(V->getType()))
1638         return convertShadowToScalar(IRB.CreateOrReduce(V), IRB);
1639       unsigned BitWidth =
1640         V->getType()->getPrimitiveSizeInBits().getFixedValue();
1641       return IRB.CreateBitCast(V, IntegerType::get(*MS.C, BitWidth));
1642     }
1643     return V;
1644   }
1645 
1646   // Convert a scalar value to an i1 by comparing with 0
convertToBool__anon9debe40b0811::MemorySanitizerVisitor1647   Value *convertToBool(Value *V, IRBuilder<> &IRB, const Twine &name = "") {
1648     Type *VTy = V->getType();
1649     if (!VTy->isIntegerTy())
1650       return convertToBool(convertShadowToScalar(V, IRB), IRB, name);
1651     if (VTy->getIntegerBitWidth() == 1)
1652       // Just converting a bool to a bool, so do nothing.
1653       return V;
1654     return IRB.CreateICmpNE(V, ConstantInt::get(VTy, 0), name);
1655   }
1656 
ptrToIntPtrType__anon9debe40b0811::MemorySanitizerVisitor1657   Type *ptrToIntPtrType(Type *PtrTy) const {
1658     if (VectorType *VectTy = dyn_cast<VectorType>(PtrTy)) {
1659       return VectorType::get(ptrToIntPtrType(VectTy->getElementType()),
1660                              VectTy->getElementCount());
1661     }
1662     assert(PtrTy->isIntOrPtrTy());
1663     return MS.IntptrTy;
1664   }
1665 
getPtrToShadowPtrType__anon9debe40b0811::MemorySanitizerVisitor1666   Type *getPtrToShadowPtrType(Type *IntPtrTy, Type *ShadowTy) const {
1667     if (VectorType *VectTy = dyn_cast<VectorType>(IntPtrTy)) {
1668       return VectorType::get(
1669           getPtrToShadowPtrType(VectTy->getElementType(), ShadowTy),
1670           VectTy->getElementCount());
1671     }
1672     assert(IntPtrTy == MS.IntptrTy);
1673     return PointerType::get(*MS.C, 0);
1674   }
1675 
constToIntPtr__anon9debe40b0811::MemorySanitizerVisitor1676   Constant *constToIntPtr(Type *IntPtrTy, uint64_t C) const {
1677     if (VectorType *VectTy = dyn_cast<VectorType>(IntPtrTy)) {
1678       return ConstantVector::getSplat(
1679           VectTy->getElementCount(), constToIntPtr(VectTy->getElementType(), C));
1680     }
1681     assert(IntPtrTy == MS.IntptrTy);
1682     return ConstantInt::get(MS.IntptrTy, C);
1683   }
1684 
1685   /// Compute the integer shadow offset that corresponds to a given
1686   /// application address.
1687   ///
1688   /// Offset = (Addr & ~AndMask) ^ XorMask
1689   /// Addr can be a ptr or <N x ptr>. In both cases ShadowTy the shadow type of
1690   /// a single pointee.
1691   /// Returns <shadow_ptr, origin_ptr> or <<N x shadow_ptr>, <N x origin_ptr>>.
getShadowPtrOffset__anon9debe40b0811::MemorySanitizerVisitor1692   Value *getShadowPtrOffset(Value *Addr, IRBuilder<> &IRB) {
1693     Type *IntptrTy = ptrToIntPtrType(Addr->getType());
1694     Value *OffsetLong = IRB.CreatePointerCast(Addr, IntptrTy);
1695 
1696     if (uint64_t AndMask = MS.MapParams->AndMask)
1697       OffsetLong = IRB.CreateAnd(OffsetLong, constToIntPtr(IntptrTy, ~AndMask));
1698 
1699     if (uint64_t XorMask = MS.MapParams->XorMask)
1700       OffsetLong = IRB.CreateXor(OffsetLong, constToIntPtr(IntptrTy, XorMask));
1701     return OffsetLong;
1702   }
1703 
1704   /// Compute the shadow and origin addresses corresponding to a given
1705   /// application address.
1706   ///
1707   /// Shadow = ShadowBase + Offset
1708   /// Origin = (OriginBase + Offset) & ~3ULL
1709   /// Addr can be a ptr or <N x ptr>. In both cases ShadowTy the shadow type of
1710   /// a single pointee.
1711   /// Returns <shadow_ptr, origin_ptr> or <<N x shadow_ptr>, <N x origin_ptr>>.
1712   std::pair<Value *, Value *>
getShadowOriginPtrUserspace__anon9debe40b0811::MemorySanitizerVisitor1713   getShadowOriginPtrUserspace(Value *Addr, IRBuilder<> &IRB, Type *ShadowTy,
1714                               MaybeAlign Alignment) {
1715     VectorType *VectTy = dyn_cast<VectorType>(Addr->getType());
1716     if (!VectTy) {
1717       assert(Addr->getType()->isPointerTy());
1718     } else {
1719       assert(VectTy->getElementType()->isPointerTy());
1720     }
1721     Type *IntptrTy = ptrToIntPtrType(Addr->getType());
1722     Value *ShadowOffset = getShadowPtrOffset(Addr, IRB);
1723     Value *ShadowLong = ShadowOffset;
1724     if (uint64_t ShadowBase = MS.MapParams->ShadowBase) {
1725       ShadowLong =
1726           IRB.CreateAdd(ShadowLong, constToIntPtr(IntptrTy, ShadowBase));
1727     }
1728     Value *ShadowPtr = IRB.CreateIntToPtr(
1729         ShadowLong, getPtrToShadowPtrType(IntptrTy, ShadowTy));
1730 
1731     Value *OriginPtr = nullptr;
1732     if (MS.TrackOrigins) {
1733       Value *OriginLong = ShadowOffset;
1734       uint64_t OriginBase = MS.MapParams->OriginBase;
1735       if (OriginBase != 0)
1736         OriginLong =
1737             IRB.CreateAdd(OriginLong, constToIntPtr(IntptrTy, OriginBase));
1738       if (!Alignment || *Alignment < kMinOriginAlignment) {
1739         uint64_t Mask = kMinOriginAlignment.value() - 1;
1740         OriginLong = IRB.CreateAnd(OriginLong, constToIntPtr(IntptrTy, ~Mask));
1741       }
1742       OriginPtr = IRB.CreateIntToPtr(
1743           OriginLong, getPtrToShadowPtrType(IntptrTy, MS.OriginTy));
1744     }
1745     return std::make_pair(ShadowPtr, OriginPtr);
1746   }
1747 
1748   template <typename... ArgsTy>
createMetadataCall__anon9debe40b0811::MemorySanitizerVisitor1749   Value *createMetadataCall(IRBuilder<> &IRB, FunctionCallee Callee,
1750                             ArgsTy... Args) {
1751     if (MS.TargetTriple.getArch() == Triple::systemz) {
1752       IRB.CreateCall(Callee,
1753                      {MS.MsanMetadataAlloca, std::forward<ArgsTy>(Args)...});
1754       return IRB.CreateLoad(MS.MsanMetadata, MS.MsanMetadataAlloca);
1755     }
1756 
1757     return IRB.CreateCall(Callee, {std::forward<ArgsTy>(Args)...});
1758   }
1759 
getShadowOriginPtrKernelNoVec__anon9debe40b0811::MemorySanitizerVisitor1760   std::pair<Value *, Value *> getShadowOriginPtrKernelNoVec(Value *Addr,
1761                                                             IRBuilder<> &IRB,
1762                                                             Type *ShadowTy,
1763                                                             bool isStore) {
1764     Value *ShadowOriginPtrs;
1765     const DataLayout &DL = F.getParent()->getDataLayout();
1766     TypeSize Size = DL.getTypeStoreSize(ShadowTy);
1767 
1768     FunctionCallee Getter = MS.getKmsanShadowOriginAccessFn(isStore, Size);
1769     Value *AddrCast =
1770         IRB.CreatePointerCast(Addr, PointerType::get(IRB.getInt8Ty(), 0));
1771     if (Getter) {
1772       ShadowOriginPtrs = createMetadataCall(IRB, Getter, AddrCast);
1773     } else {
1774       Value *SizeVal = ConstantInt::get(MS.IntptrTy, Size);
1775       ShadowOriginPtrs = createMetadataCall(
1776           IRB,
1777           isStore ? MS.MsanMetadataPtrForStoreN : MS.MsanMetadataPtrForLoadN,
1778           AddrCast, SizeVal);
1779     }
1780     Value *ShadowPtr = IRB.CreateExtractValue(ShadowOriginPtrs, 0);
1781     ShadowPtr = IRB.CreatePointerCast(ShadowPtr, PointerType::get(ShadowTy, 0));
1782     Value *OriginPtr = IRB.CreateExtractValue(ShadowOriginPtrs, 1);
1783 
1784     return std::make_pair(ShadowPtr, OriginPtr);
1785   }
1786 
1787   /// Addr can be a ptr or <N x ptr>. In both cases ShadowTy the shadow type of
1788   /// a single pointee.
1789   /// Returns <shadow_ptr, origin_ptr> or <<N x shadow_ptr>, <N x origin_ptr>>.
getShadowOriginPtrKernel__anon9debe40b0811::MemorySanitizerVisitor1790   std::pair<Value *, Value *> getShadowOriginPtrKernel(Value *Addr,
1791                                                        IRBuilder<> &IRB,
1792                                                        Type *ShadowTy,
1793                                                        bool isStore) {
1794     VectorType *VectTy = dyn_cast<VectorType>(Addr->getType());
1795     if (!VectTy) {
1796       assert(Addr->getType()->isPointerTy());
1797       return getShadowOriginPtrKernelNoVec(Addr, IRB, ShadowTy, isStore);
1798     }
1799 
1800     // TODO: Support callbacs with vectors of addresses.
1801     unsigned NumElements = cast<FixedVectorType>(VectTy)->getNumElements();
1802     Value *ShadowPtrs = ConstantInt::getNullValue(
1803         FixedVectorType::get(IRB.getPtrTy(), NumElements));
1804     Value *OriginPtrs = nullptr;
1805     if (MS.TrackOrigins)
1806       OriginPtrs = ConstantInt::getNullValue(
1807           FixedVectorType::get(IRB.getPtrTy(), NumElements));
1808     for (unsigned i = 0; i < NumElements; ++i) {
1809       Value *OneAddr =
1810           IRB.CreateExtractElement(Addr, ConstantInt::get(IRB.getInt32Ty(), i));
1811       auto [ShadowPtr, OriginPtr] =
1812           getShadowOriginPtrKernelNoVec(OneAddr, IRB, ShadowTy, isStore);
1813 
1814       ShadowPtrs = IRB.CreateInsertElement(
1815           ShadowPtrs, ShadowPtr, ConstantInt::get(IRB.getInt32Ty(), i));
1816       if (MS.TrackOrigins)
1817         OriginPtrs = IRB.CreateInsertElement(
1818             OriginPtrs, OriginPtr, ConstantInt::get(IRB.getInt32Ty(), i));
1819     }
1820     return {ShadowPtrs, OriginPtrs};
1821   }
1822 
getShadowOriginPtr__anon9debe40b0811::MemorySanitizerVisitor1823   std::pair<Value *, Value *> getShadowOriginPtr(Value *Addr, IRBuilder<> &IRB,
1824                                                  Type *ShadowTy,
1825                                                  MaybeAlign Alignment,
1826                                                  bool isStore) {
1827     if (MS.CompileKernel)
1828       return getShadowOriginPtrKernel(Addr, IRB, ShadowTy, isStore);
1829     return getShadowOriginPtrUserspace(Addr, IRB, ShadowTy, Alignment);
1830   }
1831 
1832   /// Compute the shadow address for a given function argument.
1833   ///
1834   /// Shadow = ParamTLS+ArgOffset.
getShadowPtrForArgument__anon9debe40b0811::MemorySanitizerVisitor1835   Value *getShadowPtrForArgument(IRBuilder<> &IRB, int ArgOffset) {
1836     Value *Base = IRB.CreatePointerCast(MS.ParamTLS, MS.IntptrTy);
1837     if (ArgOffset)
1838       Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
1839     return IRB.CreateIntToPtr(Base, IRB.getPtrTy(0), "_msarg");
1840   }
1841 
1842   /// Compute the origin address for a given function argument.
getOriginPtrForArgument__anon9debe40b0811::MemorySanitizerVisitor1843   Value *getOriginPtrForArgument(IRBuilder<> &IRB, int ArgOffset) {
1844     if (!MS.TrackOrigins)
1845       return nullptr;
1846     Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy);
1847     if (ArgOffset)
1848       Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
1849     return IRB.CreateIntToPtr(Base, IRB.getPtrTy(0), "_msarg_o");
1850   }
1851 
1852   /// Compute the shadow address for a retval.
getShadowPtrForRetval__anon9debe40b0811::MemorySanitizerVisitor1853   Value *getShadowPtrForRetval(IRBuilder<> &IRB) {
1854     return IRB.CreatePointerCast(MS.RetvalTLS, IRB.getPtrTy(0), "_msret");
1855   }
1856 
1857   /// Compute the origin address for a retval.
getOriginPtrForRetval__anon9debe40b0811::MemorySanitizerVisitor1858   Value *getOriginPtrForRetval() {
1859     // We keep a single origin for the entire retval. Might be too optimistic.
1860     return MS.RetvalOriginTLS;
1861   }
1862 
1863   /// Set SV to be the shadow value for V.
setShadow__anon9debe40b0811::MemorySanitizerVisitor1864   void setShadow(Value *V, Value *SV) {
1865     assert(!ShadowMap.count(V) && "Values may only have one shadow");
1866     ShadowMap[V] = PropagateShadow ? SV : getCleanShadow(V);
1867   }
1868 
1869   /// Set Origin to be the origin value for V.
setOrigin__anon9debe40b0811::MemorySanitizerVisitor1870   void setOrigin(Value *V, Value *Origin) {
1871     if (!MS.TrackOrigins)
1872       return;
1873     assert(!OriginMap.count(V) && "Values may only have one origin");
1874     LLVM_DEBUG(dbgs() << "ORIGIN: " << *V << "  ==> " << *Origin << "\n");
1875     OriginMap[V] = Origin;
1876   }
1877 
getCleanShadow__anon9debe40b0811::MemorySanitizerVisitor1878   Constant *getCleanShadow(Type *OrigTy) {
1879     Type *ShadowTy = getShadowTy(OrigTy);
1880     if (!ShadowTy)
1881       return nullptr;
1882     return Constant::getNullValue(ShadowTy);
1883   }
1884 
1885   /// Create a clean shadow value for a given value.
1886   ///
1887   /// Clean shadow (all zeroes) means all bits of the value are defined
1888   /// (initialized).
getCleanShadow__anon9debe40b0811::MemorySanitizerVisitor1889   Constant *getCleanShadow(Value *V) { return getCleanShadow(V->getType()); }
1890 
1891   /// Create a dirty shadow of a given shadow type.
getPoisonedShadow__anon9debe40b0811::MemorySanitizerVisitor1892   Constant *getPoisonedShadow(Type *ShadowTy) {
1893     assert(ShadowTy);
1894     if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy))
1895       return Constant::getAllOnesValue(ShadowTy);
1896     if (ArrayType *AT = dyn_cast<ArrayType>(ShadowTy)) {
1897       SmallVector<Constant *, 4> Vals(AT->getNumElements(),
1898                                       getPoisonedShadow(AT->getElementType()));
1899       return ConstantArray::get(AT, Vals);
1900     }
1901     if (StructType *ST = dyn_cast<StructType>(ShadowTy)) {
1902       SmallVector<Constant *, 4> Vals;
1903       for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
1904         Vals.push_back(getPoisonedShadow(ST->getElementType(i)));
1905       return ConstantStruct::get(ST, Vals);
1906     }
1907     llvm_unreachable("Unexpected shadow type");
1908   }
1909 
1910   /// Create a dirty shadow for a given value.
getPoisonedShadow__anon9debe40b0811::MemorySanitizerVisitor1911   Constant *getPoisonedShadow(Value *V) {
1912     Type *ShadowTy = getShadowTy(V);
1913     if (!ShadowTy)
1914       return nullptr;
1915     return getPoisonedShadow(ShadowTy);
1916   }
1917 
1918   /// Create a clean (zero) origin.
getCleanOrigin__anon9debe40b0811::MemorySanitizerVisitor1919   Value *getCleanOrigin() { return Constant::getNullValue(MS.OriginTy); }
1920 
1921   /// Get the shadow value for a given Value.
1922   ///
1923   /// This function either returns the value set earlier with setShadow,
1924   /// or extracts if from ParamTLS (for function arguments).
getShadow__anon9debe40b0811::MemorySanitizerVisitor1925   Value *getShadow(Value *V) {
1926     if (Instruction *I = dyn_cast<Instruction>(V)) {
1927       if (!PropagateShadow || I->getMetadata(LLVMContext::MD_nosanitize))
1928         return getCleanShadow(V);
1929       // For instructions the shadow is already stored in the map.
1930       Value *Shadow = ShadowMap[V];
1931       if (!Shadow) {
1932         LLVM_DEBUG(dbgs() << "No shadow: " << *V << "\n" << *(I->getParent()));
1933         (void)I;
1934         assert(Shadow && "No shadow for a value");
1935       }
1936       return Shadow;
1937     }
1938     if (UndefValue *U = dyn_cast<UndefValue>(V)) {
1939       Value *AllOnes = (PropagateShadow && PoisonUndef) ? getPoisonedShadow(V)
1940                                                         : getCleanShadow(V);
1941       LLVM_DEBUG(dbgs() << "Undef: " << *U << " ==> " << *AllOnes << "\n");
1942       (void)U;
1943       return AllOnes;
1944     }
1945     if (Argument *A = dyn_cast<Argument>(V)) {
1946       // For arguments we compute the shadow on demand and store it in the map.
1947       Value *&ShadowPtr = ShadowMap[V];
1948       if (ShadowPtr)
1949         return ShadowPtr;
1950       Function *F = A->getParent();
1951       IRBuilder<> EntryIRB(FnPrologueEnd);
1952       unsigned ArgOffset = 0;
1953       const DataLayout &DL = F->getParent()->getDataLayout();
1954       for (auto &FArg : F->args()) {
1955         if (!FArg.getType()->isSized()) {
1956           LLVM_DEBUG(dbgs() << "Arg is not sized\n");
1957           continue;
1958         }
1959 
1960         unsigned Size = FArg.hasByValAttr()
1961                             ? DL.getTypeAllocSize(FArg.getParamByValType())
1962                             : DL.getTypeAllocSize(FArg.getType());
1963 
1964         if (A == &FArg) {
1965           bool Overflow = ArgOffset + Size > kParamTLSSize;
1966           if (FArg.hasByValAttr()) {
1967             // ByVal pointer itself has clean shadow. We copy the actual
1968             // argument shadow to the underlying memory.
1969             // Figure out maximal valid memcpy alignment.
1970             const Align ArgAlign = DL.getValueOrABITypeAlignment(
1971                 FArg.getParamAlign(), FArg.getParamByValType());
1972             Value *CpShadowPtr, *CpOriginPtr;
1973             std::tie(CpShadowPtr, CpOriginPtr) =
1974                 getShadowOriginPtr(V, EntryIRB, EntryIRB.getInt8Ty(), ArgAlign,
1975                                    /*isStore*/ true);
1976             if (!PropagateShadow || Overflow) {
1977               // ParamTLS overflow.
1978               EntryIRB.CreateMemSet(
1979                   CpShadowPtr, Constant::getNullValue(EntryIRB.getInt8Ty()),
1980                   Size, ArgAlign);
1981             } else {
1982               Value *Base = getShadowPtrForArgument(EntryIRB, ArgOffset);
1983               const Align CopyAlign = std::min(ArgAlign, kShadowTLSAlignment);
1984               Value *Cpy = EntryIRB.CreateMemCpy(CpShadowPtr, CopyAlign, Base,
1985                                                  CopyAlign, Size);
1986               LLVM_DEBUG(dbgs() << "  ByValCpy: " << *Cpy << "\n");
1987               (void)Cpy;
1988 
1989               if (MS.TrackOrigins) {
1990                 Value *OriginPtr =
1991                     getOriginPtrForArgument(EntryIRB, ArgOffset);
1992                 // FIXME: OriginSize should be:
1993                 // alignTo(V % kMinOriginAlignment + Size, kMinOriginAlignment)
1994                 unsigned OriginSize = alignTo(Size, kMinOriginAlignment);
1995                 EntryIRB.CreateMemCpy(
1996                     CpOriginPtr,
1997                     /* by getShadowOriginPtr */ kMinOriginAlignment, OriginPtr,
1998                     /* by origin_tls[ArgOffset] */ kMinOriginAlignment,
1999                     OriginSize);
2000               }
2001             }
2002           }
2003 
2004           if (!PropagateShadow || Overflow || FArg.hasByValAttr() ||
2005               (MS.EagerChecks && FArg.hasAttribute(Attribute::NoUndef))) {
2006             ShadowPtr = getCleanShadow(V);
2007             setOrigin(A, getCleanOrigin());
2008           } else {
2009             // Shadow over TLS
2010             Value *Base = getShadowPtrForArgument(EntryIRB, ArgOffset);
2011             ShadowPtr = EntryIRB.CreateAlignedLoad(getShadowTy(&FArg), Base,
2012                                                    kShadowTLSAlignment);
2013             if (MS.TrackOrigins) {
2014               Value *OriginPtr =
2015                   getOriginPtrForArgument(EntryIRB, ArgOffset);
2016               setOrigin(A, EntryIRB.CreateLoad(MS.OriginTy, OriginPtr));
2017             }
2018           }
2019           LLVM_DEBUG(dbgs()
2020                      << "  ARG:    " << FArg << " ==> " << *ShadowPtr << "\n");
2021           break;
2022         }
2023 
2024         ArgOffset += alignTo(Size, kShadowTLSAlignment);
2025       }
2026       assert(ShadowPtr && "Could not find shadow for an argument");
2027       return ShadowPtr;
2028     }
2029     // For everything else the shadow is zero.
2030     return getCleanShadow(V);
2031   }
2032 
2033   /// Get the shadow for i-th argument of the instruction I.
getShadow__anon9debe40b0811::MemorySanitizerVisitor2034   Value *getShadow(Instruction *I, int i) {
2035     return getShadow(I->getOperand(i));
2036   }
2037 
2038   /// Get the origin for a value.
getOrigin__anon9debe40b0811::MemorySanitizerVisitor2039   Value *getOrigin(Value *V) {
2040     if (!MS.TrackOrigins)
2041       return nullptr;
2042     if (!PropagateShadow || isa<Constant>(V) || isa<InlineAsm>(V))
2043       return getCleanOrigin();
2044     assert((isa<Instruction>(V) || isa<Argument>(V)) &&
2045            "Unexpected value type in getOrigin()");
2046     if (Instruction *I = dyn_cast<Instruction>(V)) {
2047       if (I->getMetadata(LLVMContext::MD_nosanitize))
2048         return getCleanOrigin();
2049     }
2050     Value *Origin = OriginMap[V];
2051     assert(Origin && "Missing origin");
2052     return Origin;
2053   }
2054 
2055   /// Get the origin for i-th argument of the instruction I.
getOrigin__anon9debe40b0811::MemorySanitizerVisitor2056   Value *getOrigin(Instruction *I, int i) {
2057     return getOrigin(I->getOperand(i));
2058   }
2059 
2060   /// Remember the place where a shadow check should be inserted.
2061   ///
2062   /// This location will be later instrumented with a check that will print a
2063   /// UMR warning in runtime if the shadow value is not 0.
insertShadowCheck__anon9debe40b0811::MemorySanitizerVisitor2064   void insertShadowCheck(Value *Shadow, Value *Origin, Instruction *OrigIns) {
2065     assert(Shadow);
2066     if (!InsertChecks)
2067       return;
2068 
2069     if (!DebugCounter::shouldExecute(DebugInsertCheck)) {
2070       LLVM_DEBUG(dbgs() << "Skipping check of " << *Shadow << " before "
2071                         << *OrigIns << "\n");
2072       return;
2073     }
2074 #ifndef NDEBUG
2075     Type *ShadowTy = Shadow->getType();
2076     assert((isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy) ||
2077             isa<StructType>(ShadowTy) || isa<ArrayType>(ShadowTy)) &&
2078            "Can only insert checks for integer, vector, and aggregate shadow "
2079            "types");
2080 #endif
2081     InstrumentationList.push_back(
2082         ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
2083   }
2084 
2085   /// Remember the place where a shadow check should be inserted.
2086   ///
2087   /// This location will be later instrumented with a check that will print a
2088   /// UMR warning in runtime if the value is not fully defined.
insertShadowCheck__anon9debe40b0811::MemorySanitizerVisitor2089   void insertShadowCheck(Value *Val, Instruction *OrigIns) {
2090     assert(Val);
2091     Value *Shadow, *Origin;
2092     if (ClCheckConstantShadow) {
2093       Shadow = getShadow(Val);
2094       if (!Shadow)
2095         return;
2096       Origin = getOrigin(Val);
2097     } else {
2098       Shadow = dyn_cast_or_null<Instruction>(getShadow(Val));
2099       if (!Shadow)
2100         return;
2101       Origin = dyn_cast_or_null<Instruction>(getOrigin(Val));
2102     }
2103     insertShadowCheck(Shadow, Origin, OrigIns);
2104   }
2105 
addReleaseOrdering__anon9debe40b0811::MemorySanitizerVisitor2106   AtomicOrdering addReleaseOrdering(AtomicOrdering a) {
2107     switch (a) {
2108     case AtomicOrdering::NotAtomic:
2109       return AtomicOrdering::NotAtomic;
2110     case AtomicOrdering::Unordered:
2111     case AtomicOrdering::Monotonic:
2112     case AtomicOrdering::Release:
2113       return AtomicOrdering::Release;
2114     case AtomicOrdering::Acquire:
2115     case AtomicOrdering::AcquireRelease:
2116       return AtomicOrdering::AcquireRelease;
2117     case AtomicOrdering::SequentiallyConsistent:
2118       return AtomicOrdering::SequentiallyConsistent;
2119     }
2120     llvm_unreachable("Unknown ordering");
2121   }
2122 
makeAddReleaseOrderingTable__anon9debe40b0811::MemorySanitizerVisitor2123   Value *makeAddReleaseOrderingTable(IRBuilder<> &IRB) {
2124     constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
2125     uint32_t OrderingTable[NumOrderings] = {};
2126 
2127     OrderingTable[(int)AtomicOrderingCABI::relaxed] =
2128         OrderingTable[(int)AtomicOrderingCABI::release] =
2129             (int)AtomicOrderingCABI::release;
2130     OrderingTable[(int)AtomicOrderingCABI::consume] =
2131         OrderingTable[(int)AtomicOrderingCABI::acquire] =
2132             OrderingTable[(int)AtomicOrderingCABI::acq_rel] =
2133                 (int)AtomicOrderingCABI::acq_rel;
2134     OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
2135         (int)AtomicOrderingCABI::seq_cst;
2136 
2137     return ConstantDataVector::get(IRB.getContext(),
2138                                    ArrayRef(OrderingTable, NumOrderings));
2139   }
2140 
addAcquireOrdering__anon9debe40b0811::MemorySanitizerVisitor2141   AtomicOrdering addAcquireOrdering(AtomicOrdering a) {
2142     switch (a) {
2143     case AtomicOrdering::NotAtomic:
2144       return AtomicOrdering::NotAtomic;
2145     case AtomicOrdering::Unordered:
2146     case AtomicOrdering::Monotonic:
2147     case AtomicOrdering::Acquire:
2148       return AtomicOrdering::Acquire;
2149     case AtomicOrdering::Release:
2150     case AtomicOrdering::AcquireRelease:
2151       return AtomicOrdering::AcquireRelease;
2152     case AtomicOrdering::SequentiallyConsistent:
2153       return AtomicOrdering::SequentiallyConsistent;
2154     }
2155     llvm_unreachable("Unknown ordering");
2156   }
2157 
makeAddAcquireOrderingTable__anon9debe40b0811::MemorySanitizerVisitor2158   Value *makeAddAcquireOrderingTable(IRBuilder<> &IRB) {
2159     constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
2160     uint32_t OrderingTable[NumOrderings] = {};
2161 
2162     OrderingTable[(int)AtomicOrderingCABI::relaxed] =
2163         OrderingTable[(int)AtomicOrderingCABI::acquire] =
2164             OrderingTable[(int)AtomicOrderingCABI::consume] =
2165                 (int)AtomicOrderingCABI::acquire;
2166     OrderingTable[(int)AtomicOrderingCABI::release] =
2167         OrderingTable[(int)AtomicOrderingCABI::acq_rel] =
2168             (int)AtomicOrderingCABI::acq_rel;
2169     OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
2170         (int)AtomicOrderingCABI::seq_cst;
2171 
2172     return ConstantDataVector::get(IRB.getContext(),
2173                                    ArrayRef(OrderingTable, NumOrderings));
2174   }
2175 
2176   // ------------------- Visitors.
2177   using InstVisitor<MemorySanitizerVisitor>::visit;
visit__anon9debe40b0811::MemorySanitizerVisitor2178   void visit(Instruction &I) {
2179     if (I.getMetadata(LLVMContext::MD_nosanitize))
2180       return;
2181     // Don't want to visit if we're in the prologue
2182     if (isInPrologue(I))
2183       return;
2184     InstVisitor<MemorySanitizerVisitor>::visit(I);
2185   }
2186 
2187   /// Instrument LoadInst
2188   ///
2189   /// Loads the corresponding shadow and (optionally) origin.
2190   /// Optionally, checks that the load address is fully defined.
visitLoadInst__anon9debe40b0811::MemorySanitizerVisitor2191   void visitLoadInst(LoadInst &I) {
2192     assert(I.getType()->isSized() && "Load type must have size");
2193     assert(!I.getMetadata(LLVMContext::MD_nosanitize));
2194     NextNodeIRBuilder IRB(&I);
2195     Type *ShadowTy = getShadowTy(&I);
2196     Value *Addr = I.getPointerOperand();
2197     Value *ShadowPtr = nullptr, *OriginPtr = nullptr;
2198     const Align Alignment = I.getAlign();
2199     if (PropagateShadow) {
2200       std::tie(ShadowPtr, OriginPtr) =
2201           getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false);
2202       setShadow(&I,
2203                 IRB.CreateAlignedLoad(ShadowTy, ShadowPtr, Alignment, "_msld"));
2204     } else {
2205       setShadow(&I, getCleanShadow(&I));
2206     }
2207 
2208     if (ClCheckAccessAddress)
2209       insertShadowCheck(I.getPointerOperand(), &I);
2210 
2211     if (I.isAtomic())
2212       I.setOrdering(addAcquireOrdering(I.getOrdering()));
2213 
2214     if (MS.TrackOrigins) {
2215       if (PropagateShadow) {
2216         const Align OriginAlignment = std::max(kMinOriginAlignment, Alignment);
2217         setOrigin(
2218             &I, IRB.CreateAlignedLoad(MS.OriginTy, OriginPtr, OriginAlignment));
2219       } else {
2220         setOrigin(&I, getCleanOrigin());
2221       }
2222     }
2223   }
2224 
2225   /// Instrument StoreInst
2226   ///
2227   /// Stores the corresponding shadow and (optionally) origin.
2228   /// Optionally, checks that the store address is fully defined.
visitStoreInst__anon9debe40b0811::MemorySanitizerVisitor2229   void visitStoreInst(StoreInst &I) {
2230     StoreList.push_back(&I);
2231     if (ClCheckAccessAddress)
2232       insertShadowCheck(I.getPointerOperand(), &I);
2233   }
2234 
handleCASOrRMW__anon9debe40b0811::MemorySanitizerVisitor2235   void handleCASOrRMW(Instruction &I) {
2236     assert(isa<AtomicRMWInst>(I) || isa<AtomicCmpXchgInst>(I));
2237 
2238     IRBuilder<> IRB(&I);
2239     Value *Addr = I.getOperand(0);
2240     Value *Val = I.getOperand(1);
2241     Value *ShadowPtr = getShadowOriginPtr(Addr, IRB, getShadowTy(Val), Align(1),
2242                                           /*isStore*/ true)
2243                            .first;
2244 
2245     if (ClCheckAccessAddress)
2246       insertShadowCheck(Addr, &I);
2247 
2248     // Only test the conditional argument of cmpxchg instruction.
2249     // The other argument can potentially be uninitialized, but we can not
2250     // detect this situation reliably without possible false positives.
2251     if (isa<AtomicCmpXchgInst>(I))
2252       insertShadowCheck(Val, &I);
2253 
2254     IRB.CreateStore(getCleanShadow(Val), ShadowPtr);
2255 
2256     setShadow(&I, getCleanShadow(&I));
2257     setOrigin(&I, getCleanOrigin());
2258   }
2259 
visitAtomicRMWInst__anon9debe40b0811::MemorySanitizerVisitor2260   void visitAtomicRMWInst(AtomicRMWInst &I) {
2261     handleCASOrRMW(I);
2262     I.setOrdering(addReleaseOrdering(I.getOrdering()));
2263   }
2264 
visitAtomicCmpXchgInst__anon9debe40b0811::MemorySanitizerVisitor2265   void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
2266     handleCASOrRMW(I);
2267     I.setSuccessOrdering(addReleaseOrdering(I.getSuccessOrdering()));
2268   }
2269 
2270   // Vector manipulation.
visitExtractElementInst__anon9debe40b0811::MemorySanitizerVisitor2271   void visitExtractElementInst(ExtractElementInst &I) {
2272     insertShadowCheck(I.getOperand(1), &I);
2273     IRBuilder<> IRB(&I);
2274     setShadow(&I, IRB.CreateExtractElement(getShadow(&I, 0), I.getOperand(1),
2275                                            "_msprop"));
2276     setOrigin(&I, getOrigin(&I, 0));
2277   }
2278 
visitInsertElementInst__anon9debe40b0811::MemorySanitizerVisitor2279   void visitInsertElementInst(InsertElementInst &I) {
2280     insertShadowCheck(I.getOperand(2), &I);
2281     IRBuilder<> IRB(&I);
2282     auto *Shadow0 = getShadow(&I, 0);
2283     auto *Shadow1 = getShadow(&I, 1);
2284     setShadow(&I, IRB.CreateInsertElement(Shadow0, Shadow1, I.getOperand(2),
2285                                           "_msprop"));
2286     setOriginForNaryOp(I);
2287   }
2288 
visitShuffleVectorInst__anon9debe40b0811::MemorySanitizerVisitor2289   void visitShuffleVectorInst(ShuffleVectorInst &I) {
2290     IRBuilder<> IRB(&I);
2291     auto *Shadow0 = getShadow(&I, 0);
2292     auto *Shadow1 = getShadow(&I, 1);
2293     setShadow(&I, IRB.CreateShuffleVector(Shadow0, Shadow1, I.getShuffleMask(),
2294                                           "_msprop"));
2295     setOriginForNaryOp(I);
2296   }
2297 
2298   // Casts.
visitSExtInst__anon9debe40b0811::MemorySanitizerVisitor2299   void visitSExtInst(SExtInst &I) {
2300     IRBuilder<> IRB(&I);
2301     setShadow(&I, IRB.CreateSExt(getShadow(&I, 0), I.getType(), "_msprop"));
2302     setOrigin(&I, getOrigin(&I, 0));
2303   }
2304 
visitZExtInst__anon9debe40b0811::MemorySanitizerVisitor2305   void visitZExtInst(ZExtInst &I) {
2306     IRBuilder<> IRB(&I);
2307     setShadow(&I, IRB.CreateZExt(getShadow(&I, 0), I.getType(), "_msprop"));
2308     setOrigin(&I, getOrigin(&I, 0));
2309   }
2310 
visitTruncInst__anon9debe40b0811::MemorySanitizerVisitor2311   void visitTruncInst(TruncInst &I) {
2312     IRBuilder<> IRB(&I);
2313     setShadow(&I, IRB.CreateTrunc(getShadow(&I, 0), I.getType(), "_msprop"));
2314     setOrigin(&I, getOrigin(&I, 0));
2315   }
2316 
visitBitCastInst__anon9debe40b0811::MemorySanitizerVisitor2317   void visitBitCastInst(BitCastInst &I) {
2318     // Special case: if this is the bitcast (there is exactly 1 allowed) between
2319     // a musttail call and a ret, don't instrument. New instructions are not
2320     // allowed after a musttail call.
2321     if (auto *CI = dyn_cast<CallInst>(I.getOperand(0)))
2322       if (CI->isMustTailCall())
2323         return;
2324     IRBuilder<> IRB(&I);
2325     setShadow(&I, IRB.CreateBitCast(getShadow(&I, 0), getShadowTy(&I)));
2326     setOrigin(&I, getOrigin(&I, 0));
2327   }
2328 
visitPtrToIntInst__anon9debe40b0811::MemorySanitizerVisitor2329   void visitPtrToIntInst(PtrToIntInst &I) {
2330     IRBuilder<> IRB(&I);
2331     setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
2332                                     "_msprop_ptrtoint"));
2333     setOrigin(&I, getOrigin(&I, 0));
2334   }
2335 
visitIntToPtrInst__anon9debe40b0811::MemorySanitizerVisitor2336   void visitIntToPtrInst(IntToPtrInst &I) {
2337     IRBuilder<> IRB(&I);
2338     setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
2339                                     "_msprop_inttoptr"));
2340     setOrigin(&I, getOrigin(&I, 0));
2341   }
2342 
visitFPToSIInst__anon9debe40b0811::MemorySanitizerVisitor2343   void visitFPToSIInst(CastInst &I) { handleShadowOr(I); }
visitFPToUIInst__anon9debe40b0811::MemorySanitizerVisitor2344   void visitFPToUIInst(CastInst &I) { handleShadowOr(I); }
visitSIToFPInst__anon9debe40b0811::MemorySanitizerVisitor2345   void visitSIToFPInst(CastInst &I) { handleShadowOr(I); }
visitUIToFPInst__anon9debe40b0811::MemorySanitizerVisitor2346   void visitUIToFPInst(CastInst &I) { handleShadowOr(I); }
visitFPExtInst__anon9debe40b0811::MemorySanitizerVisitor2347   void visitFPExtInst(CastInst &I) { handleShadowOr(I); }
visitFPTruncInst__anon9debe40b0811::MemorySanitizerVisitor2348   void visitFPTruncInst(CastInst &I) { handleShadowOr(I); }
2349 
2350   /// Propagate shadow for bitwise AND.
2351   ///
2352   /// This code is exact, i.e. if, for example, a bit in the left argument
2353   /// is defined and 0, then neither the value not definedness of the
2354   /// corresponding bit in B don't affect the resulting shadow.
visitAnd__anon9debe40b0811::MemorySanitizerVisitor2355   void visitAnd(BinaryOperator &I) {
2356     IRBuilder<> IRB(&I);
2357     //  "And" of 0 and a poisoned value results in unpoisoned value.
2358     //  1&1 => 1;     0&1 => 0;     p&1 => p;
2359     //  1&0 => 0;     0&0 => 0;     p&0 => 0;
2360     //  1&p => p;     0&p => 0;     p&p => p;
2361     //  S = (S1 & S2) | (V1 & S2) | (S1 & V2)
2362     Value *S1 = getShadow(&I, 0);
2363     Value *S2 = getShadow(&I, 1);
2364     Value *V1 = I.getOperand(0);
2365     Value *V2 = I.getOperand(1);
2366     if (V1->getType() != S1->getType()) {
2367       V1 = IRB.CreateIntCast(V1, S1->getType(), false);
2368       V2 = IRB.CreateIntCast(V2, S2->getType(), false);
2369     }
2370     Value *S1S2 = IRB.CreateAnd(S1, S2);
2371     Value *V1S2 = IRB.CreateAnd(V1, S2);
2372     Value *S1V2 = IRB.CreateAnd(S1, V2);
2373     setShadow(&I, IRB.CreateOr({S1S2, V1S2, S1V2}));
2374     setOriginForNaryOp(I);
2375   }
2376 
visitOr__anon9debe40b0811::MemorySanitizerVisitor2377   void visitOr(BinaryOperator &I) {
2378     IRBuilder<> IRB(&I);
2379     //  "Or" of 1 and a poisoned value results in unpoisoned value.
2380     //  1|1 => 1;     0|1 => 1;     p|1 => 1;
2381     //  1|0 => 1;     0|0 => 0;     p|0 => p;
2382     //  1|p => 1;     0|p => p;     p|p => p;
2383     //  S = (S1 & S2) | (~V1 & S2) | (S1 & ~V2)
2384     Value *S1 = getShadow(&I, 0);
2385     Value *S2 = getShadow(&I, 1);
2386     Value *V1 = IRB.CreateNot(I.getOperand(0));
2387     Value *V2 = IRB.CreateNot(I.getOperand(1));
2388     if (V1->getType() != S1->getType()) {
2389       V1 = IRB.CreateIntCast(V1, S1->getType(), false);
2390       V2 = IRB.CreateIntCast(V2, S2->getType(), false);
2391     }
2392     Value *S1S2 = IRB.CreateAnd(S1, S2);
2393     Value *V1S2 = IRB.CreateAnd(V1, S2);
2394     Value *S1V2 = IRB.CreateAnd(S1, V2);
2395     setShadow(&I, IRB.CreateOr({S1S2, V1S2, S1V2}));
2396     setOriginForNaryOp(I);
2397   }
2398 
2399   /// Default propagation of shadow and/or origin.
2400   ///
2401   /// This class implements the general case of shadow propagation, used in all
2402   /// cases where we don't know and/or don't care about what the operation
2403   /// actually does. It converts all input shadow values to a common type
2404   /// (extending or truncating as necessary), and bitwise OR's them.
2405   ///
2406   /// This is much cheaper than inserting checks (i.e. requiring inputs to be
2407   /// fully initialized), and less prone to false positives.
2408   ///
2409   /// This class also implements the general case of origin propagation. For a
2410   /// Nary operation, result origin is set to the origin of an argument that is
2411   /// not entirely initialized. If there is more than one such arguments, the
2412   /// rightmost of them is picked. It does not matter which one is picked if all
2413   /// arguments are initialized.
2414   template <bool CombineShadow> class Combiner {
2415     Value *Shadow = nullptr;
2416     Value *Origin = nullptr;
2417     IRBuilder<> &IRB;
2418     MemorySanitizerVisitor *MSV;
2419 
2420   public:
Combiner(MemorySanitizerVisitor * MSV,IRBuilder<> & IRB)2421     Combiner(MemorySanitizerVisitor *MSV, IRBuilder<> &IRB)
2422         : IRB(IRB), MSV(MSV) {}
2423 
2424     /// Add a pair of shadow and origin values to the mix.
Add(Value * OpShadow,Value * OpOrigin)2425     Combiner &Add(Value *OpShadow, Value *OpOrigin) {
2426       if (CombineShadow) {
2427         assert(OpShadow);
2428         if (!Shadow)
2429           Shadow = OpShadow;
2430         else {
2431           OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType());
2432           Shadow = IRB.CreateOr(Shadow, OpShadow, "_msprop");
2433         }
2434       }
2435 
2436       if (MSV->MS.TrackOrigins) {
2437         assert(OpOrigin);
2438         if (!Origin) {
2439           Origin = OpOrigin;
2440         } else {
2441           Constant *ConstOrigin = dyn_cast<Constant>(OpOrigin);
2442           // No point in adding something that might result in 0 origin value.
2443           if (!ConstOrigin || !ConstOrigin->isNullValue()) {
2444             Value *Cond = MSV->convertToBool(OpShadow, IRB);
2445             Origin = IRB.CreateSelect(Cond, OpOrigin, Origin);
2446           }
2447         }
2448       }
2449       return *this;
2450     }
2451 
2452     /// Add an application value to the mix.
Add(Value * V)2453     Combiner &Add(Value *V) {
2454       Value *OpShadow = MSV->getShadow(V);
2455       Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) : nullptr;
2456       return Add(OpShadow, OpOrigin);
2457     }
2458 
2459     /// Set the current combined values as the given instruction's shadow
2460     /// and origin.
Done(Instruction * I)2461     void Done(Instruction *I) {
2462       if (CombineShadow) {
2463         assert(Shadow);
2464         Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(I));
2465         MSV->setShadow(I, Shadow);
2466       }
2467       if (MSV->MS.TrackOrigins) {
2468         assert(Origin);
2469         MSV->setOrigin(I, Origin);
2470       }
2471     }
2472   };
2473 
2474   using ShadowAndOriginCombiner = Combiner<true>;
2475   using OriginCombiner = Combiner<false>;
2476 
2477   /// Propagate origin for arbitrary operation.
setOriginForNaryOp__anon9debe40b0811::MemorySanitizerVisitor2478   void setOriginForNaryOp(Instruction &I) {
2479     if (!MS.TrackOrigins)
2480       return;
2481     IRBuilder<> IRB(&I);
2482     OriginCombiner OC(this, IRB);
2483     for (Use &Op : I.operands())
2484       OC.Add(Op.get());
2485     OC.Done(&I);
2486   }
2487 
VectorOrPrimitiveTypeSizeInBits__anon9debe40b0811::MemorySanitizerVisitor2488   size_t VectorOrPrimitiveTypeSizeInBits(Type *Ty) {
2489     assert(!(Ty->isVectorTy() && Ty->getScalarType()->isPointerTy()) &&
2490            "Vector of pointers is not a valid shadow type");
2491     return Ty->isVectorTy() ? cast<FixedVectorType>(Ty)->getNumElements() *
2492                                   Ty->getScalarSizeInBits()
2493                             : Ty->getPrimitiveSizeInBits();
2494   }
2495 
2496   /// Cast between two shadow types, extending or truncating as
2497   /// necessary.
CreateShadowCast__anon9debe40b0811::MemorySanitizerVisitor2498   Value *CreateShadowCast(IRBuilder<> &IRB, Value *V, Type *dstTy,
2499                           bool Signed = false) {
2500     Type *srcTy = V->getType();
2501     size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
2502     size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
2503     if (srcSizeInBits > 1 && dstSizeInBits == 1)
2504       return IRB.CreateICmpNE(V, getCleanShadow(V));
2505 
2506     if (dstTy->isIntegerTy() && srcTy->isIntegerTy())
2507       return IRB.CreateIntCast(V, dstTy, Signed);
2508     if (dstTy->isVectorTy() && srcTy->isVectorTy() &&
2509         cast<VectorType>(dstTy)->getElementCount() ==
2510             cast<VectorType>(srcTy)->getElementCount())
2511       return IRB.CreateIntCast(V, dstTy, Signed);
2512     Value *V1 = IRB.CreateBitCast(V, Type::getIntNTy(*MS.C, srcSizeInBits));
2513     Value *V2 =
2514         IRB.CreateIntCast(V1, Type::getIntNTy(*MS.C, dstSizeInBits), Signed);
2515     return IRB.CreateBitCast(V2, dstTy);
2516     // TODO: handle struct types.
2517   }
2518 
2519   /// Cast an application value to the type of its own shadow.
CreateAppToShadowCast__anon9debe40b0811::MemorySanitizerVisitor2520   Value *CreateAppToShadowCast(IRBuilder<> &IRB, Value *V) {
2521     Type *ShadowTy = getShadowTy(V);
2522     if (V->getType() == ShadowTy)
2523       return V;
2524     if (V->getType()->isPtrOrPtrVectorTy())
2525       return IRB.CreatePtrToInt(V, ShadowTy);
2526     else
2527       return IRB.CreateBitCast(V, ShadowTy);
2528   }
2529 
2530   /// Propagate shadow for arbitrary operation.
handleShadowOr__anon9debe40b0811::MemorySanitizerVisitor2531   void handleShadowOr(Instruction &I) {
2532     IRBuilder<> IRB(&I);
2533     ShadowAndOriginCombiner SC(this, IRB);
2534     for (Use &Op : I.operands())
2535       SC.Add(Op.get());
2536     SC.Done(&I);
2537   }
2538 
visitFNeg__anon9debe40b0811::MemorySanitizerVisitor2539   void visitFNeg(UnaryOperator &I) { handleShadowOr(I); }
2540 
2541   // Handle multiplication by constant.
2542   //
2543   // Handle a special case of multiplication by constant that may have one or
2544   // more zeros in the lower bits. This makes corresponding number of lower bits
2545   // of the result zero as well. We model it by shifting the other operand
2546   // shadow left by the required number of bits. Effectively, we transform
2547   // (X * (A * 2**B)) to ((X << B) * A) and instrument (X << B) as (Sx << B).
2548   // We use multiplication by 2**N instead of shift to cover the case of
2549   // multiplication by 0, which may occur in some elements of a vector operand.
handleMulByConstant__anon9debe40b0811::MemorySanitizerVisitor2550   void handleMulByConstant(BinaryOperator &I, Constant *ConstArg,
2551                            Value *OtherArg) {
2552     Constant *ShadowMul;
2553     Type *Ty = ConstArg->getType();
2554     if (auto *VTy = dyn_cast<VectorType>(Ty)) {
2555       unsigned NumElements = cast<FixedVectorType>(VTy)->getNumElements();
2556       Type *EltTy = VTy->getElementType();
2557       SmallVector<Constant *, 16> Elements;
2558       for (unsigned Idx = 0; Idx < NumElements; ++Idx) {
2559         if (ConstantInt *Elt =
2560                 dyn_cast<ConstantInt>(ConstArg->getAggregateElement(Idx))) {
2561           const APInt &V = Elt->getValue();
2562           APInt V2 = APInt(V.getBitWidth(), 1) << V.countr_zero();
2563           Elements.push_back(ConstantInt::get(EltTy, V2));
2564         } else {
2565           Elements.push_back(ConstantInt::get(EltTy, 1));
2566         }
2567       }
2568       ShadowMul = ConstantVector::get(Elements);
2569     } else {
2570       if (ConstantInt *Elt = dyn_cast<ConstantInt>(ConstArg)) {
2571         const APInt &V = Elt->getValue();
2572         APInt V2 = APInt(V.getBitWidth(), 1) << V.countr_zero();
2573         ShadowMul = ConstantInt::get(Ty, V2);
2574       } else {
2575         ShadowMul = ConstantInt::get(Ty, 1);
2576       }
2577     }
2578 
2579     IRBuilder<> IRB(&I);
2580     setShadow(&I,
2581               IRB.CreateMul(getShadow(OtherArg), ShadowMul, "msprop_mul_cst"));
2582     setOrigin(&I, getOrigin(OtherArg));
2583   }
2584 
visitMul__anon9debe40b0811::MemorySanitizerVisitor2585   void visitMul(BinaryOperator &I) {
2586     Constant *constOp0 = dyn_cast<Constant>(I.getOperand(0));
2587     Constant *constOp1 = dyn_cast<Constant>(I.getOperand(1));
2588     if (constOp0 && !constOp1)
2589       handleMulByConstant(I, constOp0, I.getOperand(1));
2590     else if (constOp1 && !constOp0)
2591       handleMulByConstant(I, constOp1, I.getOperand(0));
2592     else
2593       handleShadowOr(I);
2594   }
2595 
visitFAdd__anon9debe40b0811::MemorySanitizerVisitor2596   void visitFAdd(BinaryOperator &I) { handleShadowOr(I); }
visitFSub__anon9debe40b0811::MemorySanitizerVisitor2597   void visitFSub(BinaryOperator &I) { handleShadowOr(I); }
visitFMul__anon9debe40b0811::MemorySanitizerVisitor2598   void visitFMul(BinaryOperator &I) { handleShadowOr(I); }
visitAdd__anon9debe40b0811::MemorySanitizerVisitor2599   void visitAdd(BinaryOperator &I) { handleShadowOr(I); }
visitSub__anon9debe40b0811::MemorySanitizerVisitor2600   void visitSub(BinaryOperator &I) { handleShadowOr(I); }
visitXor__anon9debe40b0811::MemorySanitizerVisitor2601   void visitXor(BinaryOperator &I) { handleShadowOr(I); }
2602 
handleIntegerDiv__anon9debe40b0811::MemorySanitizerVisitor2603   void handleIntegerDiv(Instruction &I) {
2604     IRBuilder<> IRB(&I);
2605     // Strict on the second argument.
2606     insertShadowCheck(I.getOperand(1), &I);
2607     setShadow(&I, getShadow(&I, 0));
2608     setOrigin(&I, getOrigin(&I, 0));
2609   }
2610 
visitUDiv__anon9debe40b0811::MemorySanitizerVisitor2611   void visitUDiv(BinaryOperator &I) { handleIntegerDiv(I); }
visitSDiv__anon9debe40b0811::MemorySanitizerVisitor2612   void visitSDiv(BinaryOperator &I) { handleIntegerDiv(I); }
visitURem__anon9debe40b0811::MemorySanitizerVisitor2613   void visitURem(BinaryOperator &I) { handleIntegerDiv(I); }
visitSRem__anon9debe40b0811::MemorySanitizerVisitor2614   void visitSRem(BinaryOperator &I) { handleIntegerDiv(I); }
2615 
2616   // Floating point division is side-effect free. We can not require that the
2617   // divisor is fully initialized and must propagate shadow. See PR37523.
visitFDiv__anon9debe40b0811::MemorySanitizerVisitor2618   void visitFDiv(BinaryOperator &I) { handleShadowOr(I); }
visitFRem__anon9debe40b0811::MemorySanitizerVisitor2619   void visitFRem(BinaryOperator &I) { handleShadowOr(I); }
2620 
2621   /// Instrument == and != comparisons.
2622   ///
2623   /// Sometimes the comparison result is known even if some of the bits of the
2624   /// arguments are not.
handleEqualityComparison__anon9debe40b0811::MemorySanitizerVisitor2625   void handleEqualityComparison(ICmpInst &I) {
2626     IRBuilder<> IRB(&I);
2627     Value *A = I.getOperand(0);
2628     Value *B = I.getOperand(1);
2629     Value *Sa = getShadow(A);
2630     Value *Sb = getShadow(B);
2631 
2632     // Get rid of pointers and vectors of pointers.
2633     // For ints (and vectors of ints), types of A and Sa match,
2634     // and this is a no-op.
2635     A = IRB.CreatePointerCast(A, Sa->getType());
2636     B = IRB.CreatePointerCast(B, Sb->getType());
2637 
2638     // A == B  <==>  (C = A^B) == 0
2639     // A != B  <==>  (C = A^B) != 0
2640     // Sc = Sa | Sb
2641     Value *C = IRB.CreateXor(A, B);
2642     Value *Sc = IRB.CreateOr(Sa, Sb);
2643     // Now dealing with i = (C == 0) comparison (or C != 0, does not matter now)
2644     // Result is defined if one of the following is true
2645     // * there is a defined 1 bit in C
2646     // * C is fully defined
2647     // Si = !(C & ~Sc) && Sc
2648     Value *Zero = Constant::getNullValue(Sc->getType());
2649     Value *MinusOne = Constant::getAllOnesValue(Sc->getType());
2650     Value *LHS = IRB.CreateICmpNE(Sc, Zero);
2651     Value *RHS =
2652         IRB.CreateICmpEQ(IRB.CreateAnd(IRB.CreateXor(Sc, MinusOne), C), Zero);
2653     Value *Si = IRB.CreateAnd(LHS, RHS);
2654     Si->setName("_msprop_icmp");
2655     setShadow(&I, Si);
2656     setOriginForNaryOp(I);
2657   }
2658 
2659   /// Build the lowest possible value of V, taking into account V's
2660   ///        uninitialized bits.
getLowestPossibleValue__anon9debe40b0811::MemorySanitizerVisitor2661   Value *getLowestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa,
2662                                 bool isSigned) {
2663     if (isSigned) {
2664       // Split shadow into sign bit and other bits.
2665       Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1);
2666       Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits);
2667       // Maximise the undefined shadow bit, minimize other undefined bits.
2668       return IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaOtherBits)),
2669                           SaSignBit);
2670     } else {
2671       // Minimize undefined bits.
2672       return IRB.CreateAnd(A, IRB.CreateNot(Sa));
2673     }
2674   }
2675 
2676   /// Build the highest possible value of V, taking into account V's
2677   ///        uninitialized bits.
getHighestPossibleValue__anon9debe40b0811::MemorySanitizerVisitor2678   Value *getHighestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa,
2679                                  bool isSigned) {
2680     if (isSigned) {
2681       // Split shadow into sign bit and other bits.
2682       Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1);
2683       Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits);
2684       // Minimise the undefined shadow bit, maximise other undefined bits.
2685       return IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaSignBit)),
2686                           SaOtherBits);
2687     } else {
2688       // Maximize undefined bits.
2689       return IRB.CreateOr(A, Sa);
2690     }
2691   }
2692 
2693   /// Instrument relational comparisons.
2694   ///
2695   /// This function does exact shadow propagation for all relational
2696   /// comparisons of integers, pointers and vectors of those.
2697   /// FIXME: output seems suboptimal when one of the operands is a constant
handleRelationalComparisonExact__anon9debe40b0811::MemorySanitizerVisitor2698   void handleRelationalComparisonExact(ICmpInst &I) {
2699     IRBuilder<> IRB(&I);
2700     Value *A = I.getOperand(0);
2701     Value *B = I.getOperand(1);
2702     Value *Sa = getShadow(A);
2703     Value *Sb = getShadow(B);
2704 
2705     // Get rid of pointers and vectors of pointers.
2706     // For ints (and vectors of ints), types of A and Sa match,
2707     // and this is a no-op.
2708     A = IRB.CreatePointerCast(A, Sa->getType());
2709     B = IRB.CreatePointerCast(B, Sb->getType());
2710 
2711     // Let [a0, a1] be the interval of possible values of A, taking into account
2712     // its undefined bits. Let [b0, b1] be the interval of possible values of B.
2713     // Then (A cmp B) is defined iff (a0 cmp b1) == (a1 cmp b0).
2714     bool IsSigned = I.isSigned();
2715     Value *S1 = IRB.CreateICmp(I.getPredicate(),
2716                                getLowestPossibleValue(IRB, A, Sa, IsSigned),
2717                                getHighestPossibleValue(IRB, B, Sb, IsSigned));
2718     Value *S2 = IRB.CreateICmp(I.getPredicate(),
2719                                getHighestPossibleValue(IRB, A, Sa, IsSigned),
2720                                getLowestPossibleValue(IRB, B, Sb, IsSigned));
2721     Value *Si = IRB.CreateXor(S1, S2);
2722     setShadow(&I, Si);
2723     setOriginForNaryOp(I);
2724   }
2725 
2726   /// Instrument signed relational comparisons.
2727   ///
2728   /// Handle sign bit tests: x<0, x>=0, x<=-1, x>-1 by propagating the highest
2729   /// bit of the shadow. Everything else is delegated to handleShadowOr().
handleSignedRelationalComparison__anon9debe40b0811::MemorySanitizerVisitor2730   void handleSignedRelationalComparison(ICmpInst &I) {
2731     Constant *constOp;
2732     Value *op = nullptr;
2733     CmpInst::Predicate pre;
2734     if ((constOp = dyn_cast<Constant>(I.getOperand(1)))) {
2735       op = I.getOperand(0);
2736       pre = I.getPredicate();
2737     } else if ((constOp = dyn_cast<Constant>(I.getOperand(0)))) {
2738       op = I.getOperand(1);
2739       pre = I.getSwappedPredicate();
2740     } else {
2741       handleShadowOr(I);
2742       return;
2743     }
2744 
2745     if ((constOp->isNullValue() &&
2746          (pre == CmpInst::ICMP_SLT || pre == CmpInst::ICMP_SGE)) ||
2747         (constOp->isAllOnesValue() &&
2748          (pre == CmpInst::ICMP_SGT || pre == CmpInst::ICMP_SLE))) {
2749       IRBuilder<> IRB(&I);
2750       Value *Shadow = IRB.CreateICmpSLT(getShadow(op), getCleanShadow(op),
2751                                         "_msprop_icmp_s");
2752       setShadow(&I, Shadow);
2753       setOrigin(&I, getOrigin(op));
2754     } else {
2755       handleShadowOr(I);
2756     }
2757   }
2758 
visitICmpInst__anon9debe40b0811::MemorySanitizerVisitor2759   void visitICmpInst(ICmpInst &I) {
2760     if (!ClHandleICmp) {
2761       handleShadowOr(I);
2762       return;
2763     }
2764     if (I.isEquality()) {
2765       handleEqualityComparison(I);
2766       return;
2767     }
2768 
2769     assert(I.isRelational());
2770     if (ClHandleICmpExact) {
2771       handleRelationalComparisonExact(I);
2772       return;
2773     }
2774     if (I.isSigned()) {
2775       handleSignedRelationalComparison(I);
2776       return;
2777     }
2778 
2779     assert(I.isUnsigned());
2780     if ((isa<Constant>(I.getOperand(0)) || isa<Constant>(I.getOperand(1)))) {
2781       handleRelationalComparisonExact(I);
2782       return;
2783     }
2784 
2785     handleShadowOr(I);
2786   }
2787 
visitFCmpInst__anon9debe40b0811::MemorySanitizerVisitor2788   void visitFCmpInst(FCmpInst &I) { handleShadowOr(I); }
2789 
handleShift__anon9debe40b0811::MemorySanitizerVisitor2790   void handleShift(BinaryOperator &I) {
2791     IRBuilder<> IRB(&I);
2792     // If any of the S2 bits are poisoned, the whole thing is poisoned.
2793     // Otherwise perform the same shift on S1.
2794     Value *S1 = getShadow(&I, 0);
2795     Value *S2 = getShadow(&I, 1);
2796     Value *S2Conv =
2797         IRB.CreateSExt(IRB.CreateICmpNE(S2, getCleanShadow(S2)), S2->getType());
2798     Value *V2 = I.getOperand(1);
2799     Value *Shift = IRB.CreateBinOp(I.getOpcode(), S1, V2);
2800     setShadow(&I, IRB.CreateOr(Shift, S2Conv));
2801     setOriginForNaryOp(I);
2802   }
2803 
visitShl__anon9debe40b0811::MemorySanitizerVisitor2804   void visitShl(BinaryOperator &I) { handleShift(I); }
visitAShr__anon9debe40b0811::MemorySanitizerVisitor2805   void visitAShr(BinaryOperator &I) { handleShift(I); }
visitLShr__anon9debe40b0811::MemorySanitizerVisitor2806   void visitLShr(BinaryOperator &I) { handleShift(I); }
2807 
handleFunnelShift__anon9debe40b0811::MemorySanitizerVisitor2808   void handleFunnelShift(IntrinsicInst &I) {
2809     IRBuilder<> IRB(&I);
2810     // If any of the S2 bits are poisoned, the whole thing is poisoned.
2811     // Otherwise perform the same shift on S0 and S1.
2812     Value *S0 = getShadow(&I, 0);
2813     Value *S1 = getShadow(&I, 1);
2814     Value *S2 = getShadow(&I, 2);
2815     Value *S2Conv =
2816         IRB.CreateSExt(IRB.CreateICmpNE(S2, getCleanShadow(S2)), S2->getType());
2817     Value *V2 = I.getOperand(2);
2818     Function *Intrin = Intrinsic::getDeclaration(
2819         I.getModule(), I.getIntrinsicID(), S2Conv->getType());
2820     Value *Shift = IRB.CreateCall(Intrin, {S0, S1, V2});
2821     setShadow(&I, IRB.CreateOr(Shift, S2Conv));
2822     setOriginForNaryOp(I);
2823   }
2824 
2825   /// Instrument llvm.memmove
2826   ///
2827   /// At this point we don't know if llvm.memmove will be inlined or not.
2828   /// If we don't instrument it and it gets inlined,
2829   /// our interceptor will not kick in and we will lose the memmove.
2830   /// If we instrument the call here, but it does not get inlined,
2831   /// we will memove the shadow twice: which is bad in case
2832   /// of overlapping regions. So, we simply lower the intrinsic to a call.
2833   ///
2834   /// Similar situation exists for memcpy and memset.
visitMemMoveInst__anon9debe40b0811::MemorySanitizerVisitor2835   void visitMemMoveInst(MemMoveInst &I) {
2836     getShadow(I.getArgOperand(1)); // Ensure shadow initialized
2837     IRBuilder<> IRB(&I);
2838     IRB.CreateCall(MS.MemmoveFn,
2839                    {I.getArgOperand(0), I.getArgOperand(1),
2840                     IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2841     I.eraseFromParent();
2842   }
2843 
2844   /// Instrument memcpy
2845   ///
2846   /// Similar to memmove: avoid copying shadow twice. This is somewhat
2847   /// unfortunate as it may slowdown small constant memcpys.
2848   /// FIXME: consider doing manual inline for small constant sizes and proper
2849   /// alignment.
2850   ///
2851   /// Note: This also handles memcpy.inline, which promises no calls to external
2852   /// functions as an optimization. However, with instrumentation enabled this
2853   /// is difficult to promise; additionally, we know that the MSan runtime
2854   /// exists and provides __msan_memcpy(). Therefore, we assume that with
2855   /// instrumentation it's safe to turn memcpy.inline into a call to
2856   /// __msan_memcpy(). Should this be wrong, such as when implementing memcpy()
2857   /// itself, instrumentation should be disabled with the no_sanitize attribute.
visitMemCpyInst__anon9debe40b0811::MemorySanitizerVisitor2858   void visitMemCpyInst(MemCpyInst &I) {
2859     getShadow(I.getArgOperand(1)); // Ensure shadow initialized
2860     IRBuilder<> IRB(&I);
2861     IRB.CreateCall(MS.MemcpyFn,
2862                    {I.getArgOperand(0), I.getArgOperand(1),
2863                     IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2864     I.eraseFromParent();
2865   }
2866 
2867   // Same as memcpy.
visitMemSetInst__anon9debe40b0811::MemorySanitizerVisitor2868   void visitMemSetInst(MemSetInst &I) {
2869     IRBuilder<> IRB(&I);
2870     IRB.CreateCall(
2871         MS.MemsetFn,
2872         {I.getArgOperand(0),
2873          IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
2874          IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2875     I.eraseFromParent();
2876   }
2877 
visitVAStartInst__anon9debe40b0811::MemorySanitizerVisitor2878   void visitVAStartInst(VAStartInst &I) { VAHelper->visitVAStartInst(I); }
2879 
visitVACopyInst__anon9debe40b0811::MemorySanitizerVisitor2880   void visitVACopyInst(VACopyInst &I) { VAHelper->visitVACopyInst(I); }
2881 
2882   /// Handle vector store-like intrinsics.
2883   ///
2884   /// Instrument intrinsics that look like a simple SIMD store: writes memory,
2885   /// has 1 pointer argument and 1 vector argument, returns void.
handleVectorStoreIntrinsic__anon9debe40b0811::MemorySanitizerVisitor2886   bool handleVectorStoreIntrinsic(IntrinsicInst &I) {
2887     IRBuilder<> IRB(&I);
2888     Value *Addr = I.getArgOperand(0);
2889     Value *Shadow = getShadow(&I, 1);
2890     Value *ShadowPtr, *OriginPtr;
2891 
2892     // We don't know the pointer alignment (could be unaligned SSE store!).
2893     // Have to assume to worst case.
2894     std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
2895         Addr, IRB, Shadow->getType(), Align(1), /*isStore*/ true);
2896     IRB.CreateAlignedStore(Shadow, ShadowPtr, Align(1));
2897 
2898     if (ClCheckAccessAddress)
2899       insertShadowCheck(Addr, &I);
2900 
2901     // FIXME: factor out common code from materializeStores
2902     if (MS.TrackOrigins)
2903       IRB.CreateStore(getOrigin(&I, 1), OriginPtr);
2904     return true;
2905   }
2906 
2907   /// Handle vector load-like intrinsics.
2908   ///
2909   /// Instrument intrinsics that look like a simple SIMD load: reads memory,
2910   /// has 1 pointer argument, returns a vector.
handleVectorLoadIntrinsic__anon9debe40b0811::MemorySanitizerVisitor2911   bool handleVectorLoadIntrinsic(IntrinsicInst &I) {
2912     IRBuilder<> IRB(&I);
2913     Value *Addr = I.getArgOperand(0);
2914 
2915     Type *ShadowTy = getShadowTy(&I);
2916     Value *ShadowPtr = nullptr, *OriginPtr = nullptr;
2917     if (PropagateShadow) {
2918       // We don't know the pointer alignment (could be unaligned SSE load!).
2919       // Have to assume to worst case.
2920       const Align Alignment = Align(1);
2921       std::tie(ShadowPtr, OriginPtr) =
2922           getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false);
2923       setShadow(&I,
2924                 IRB.CreateAlignedLoad(ShadowTy, ShadowPtr, Alignment, "_msld"));
2925     } else {
2926       setShadow(&I, getCleanShadow(&I));
2927     }
2928 
2929     if (ClCheckAccessAddress)
2930       insertShadowCheck(Addr, &I);
2931 
2932     if (MS.TrackOrigins) {
2933       if (PropagateShadow)
2934         setOrigin(&I, IRB.CreateLoad(MS.OriginTy, OriginPtr));
2935       else
2936         setOrigin(&I, getCleanOrigin());
2937     }
2938     return true;
2939   }
2940 
2941   /// Handle (SIMD arithmetic)-like intrinsics.
2942   ///
2943   /// Instrument intrinsics with any number of arguments of the same type,
2944   /// equal to the return type. The type should be simple (no aggregates or
2945   /// pointers; vectors are fine).
2946   /// Caller guarantees that this intrinsic does not access memory.
maybeHandleSimpleNomemIntrinsic__anon9debe40b0811::MemorySanitizerVisitor2947   bool maybeHandleSimpleNomemIntrinsic(IntrinsicInst &I) {
2948     Type *RetTy = I.getType();
2949     if (!(RetTy->isIntOrIntVectorTy() || RetTy->isFPOrFPVectorTy() ||
2950           RetTy->isX86_MMXTy()))
2951       return false;
2952 
2953     unsigned NumArgOperands = I.arg_size();
2954     for (unsigned i = 0; i < NumArgOperands; ++i) {
2955       Type *Ty = I.getArgOperand(i)->getType();
2956       if (Ty != RetTy)
2957         return false;
2958     }
2959 
2960     IRBuilder<> IRB(&I);
2961     ShadowAndOriginCombiner SC(this, IRB);
2962     for (unsigned i = 0; i < NumArgOperands; ++i)
2963       SC.Add(I.getArgOperand(i));
2964     SC.Done(&I);
2965 
2966     return true;
2967   }
2968 
2969   /// Heuristically instrument unknown intrinsics.
2970   ///
2971   /// The main purpose of this code is to do something reasonable with all
2972   /// random intrinsics we might encounter, most importantly - SIMD intrinsics.
2973   /// We recognize several classes of intrinsics by their argument types and
2974   /// ModRefBehaviour and apply special instrumentation when we are reasonably
2975   /// sure that we know what the intrinsic does.
2976   ///
2977   /// We special-case intrinsics where this approach fails. See llvm.bswap
2978   /// handling as an example of that.
handleUnknownIntrinsic__anon9debe40b0811::MemorySanitizerVisitor2979   bool handleUnknownIntrinsic(IntrinsicInst &I) {
2980     unsigned NumArgOperands = I.arg_size();
2981     if (NumArgOperands == 0)
2982       return false;
2983 
2984     if (NumArgOperands == 2 && I.getArgOperand(0)->getType()->isPointerTy() &&
2985         I.getArgOperand(1)->getType()->isVectorTy() &&
2986         I.getType()->isVoidTy() && !I.onlyReadsMemory()) {
2987       // This looks like a vector store.
2988       return handleVectorStoreIntrinsic(I);
2989     }
2990 
2991     if (NumArgOperands == 1 && I.getArgOperand(0)->getType()->isPointerTy() &&
2992         I.getType()->isVectorTy() && I.onlyReadsMemory()) {
2993       // This looks like a vector load.
2994       return handleVectorLoadIntrinsic(I);
2995     }
2996 
2997     if (I.doesNotAccessMemory())
2998       if (maybeHandleSimpleNomemIntrinsic(I))
2999         return true;
3000 
3001     // FIXME: detect and handle SSE maskstore/maskload
3002     return false;
3003   }
3004 
handleInvariantGroup__anon9debe40b0811::MemorySanitizerVisitor3005   void handleInvariantGroup(IntrinsicInst &I) {
3006     setShadow(&I, getShadow(&I, 0));
3007     setOrigin(&I, getOrigin(&I, 0));
3008   }
3009 
handleLifetimeStart__anon9debe40b0811::MemorySanitizerVisitor3010   void handleLifetimeStart(IntrinsicInst &I) {
3011     if (!PoisonStack)
3012       return;
3013     AllocaInst *AI = llvm::findAllocaForValue(I.getArgOperand(1));
3014     if (!AI)
3015       InstrumentLifetimeStart = false;
3016     LifetimeStartList.push_back(std::make_pair(&I, AI));
3017   }
3018 
handleBswap__anon9debe40b0811::MemorySanitizerVisitor3019   void handleBswap(IntrinsicInst &I) {
3020     IRBuilder<> IRB(&I);
3021     Value *Op = I.getArgOperand(0);
3022     Type *OpType = Op->getType();
3023     Function *BswapFunc = Intrinsic::getDeclaration(
3024         F.getParent(), Intrinsic::bswap, ArrayRef(&OpType, 1));
3025     setShadow(&I, IRB.CreateCall(BswapFunc, getShadow(Op)));
3026     setOrigin(&I, getOrigin(Op));
3027   }
3028 
handleCountZeroes__anon9debe40b0811::MemorySanitizerVisitor3029   void handleCountZeroes(IntrinsicInst &I) {
3030     IRBuilder<> IRB(&I);
3031     Value *Src = I.getArgOperand(0);
3032 
3033     // Set the Output shadow based on input Shadow
3034     Value *BoolShadow = IRB.CreateIsNotNull(getShadow(Src), "_mscz_bs");
3035 
3036     // If zero poison is requested, mix in with the shadow
3037     Constant *IsZeroPoison = cast<Constant>(I.getOperand(1));
3038     if (!IsZeroPoison->isZeroValue()) {
3039       Value *BoolZeroPoison = IRB.CreateIsNull(Src, "_mscz_bzp");
3040       BoolShadow = IRB.CreateOr(BoolShadow, BoolZeroPoison, "_mscz_bs");
3041     }
3042 
3043     Value *OutputShadow =
3044         IRB.CreateSExt(BoolShadow, getShadowTy(Src), "_mscz_os");
3045 
3046     setShadow(&I, OutputShadow);
3047     setOriginForNaryOp(I);
3048   }
3049 
3050   // Instrument vector convert intrinsic.
3051   //
3052   // This function instruments intrinsics like cvtsi2ss:
3053   // %Out = int_xxx_cvtyyy(%ConvertOp)
3054   // or
3055   // %Out = int_xxx_cvtyyy(%CopyOp, %ConvertOp)
3056   // Intrinsic converts \p NumUsedElements elements of \p ConvertOp to the same
3057   // number \p Out elements, and (if has 2 arguments) copies the rest of the
3058   // elements from \p CopyOp.
3059   // In most cases conversion involves floating-point value which may trigger a
3060   // hardware exception when not fully initialized. For this reason we require
3061   // \p ConvertOp[0:NumUsedElements] to be fully initialized and trap otherwise.
3062   // We copy the shadow of \p CopyOp[NumUsedElements:] to \p
3063   // Out[NumUsedElements:]. This means that intrinsics without \p CopyOp always
3064   // return a fully initialized value.
handleVectorConvertIntrinsic__anon9debe40b0811::MemorySanitizerVisitor3065   void handleVectorConvertIntrinsic(IntrinsicInst &I, int NumUsedElements,
3066                                     bool HasRoundingMode = false) {
3067     IRBuilder<> IRB(&I);
3068     Value *CopyOp, *ConvertOp;
3069 
3070     assert((!HasRoundingMode ||
3071             isa<ConstantInt>(I.getArgOperand(I.arg_size() - 1))) &&
3072            "Invalid rounding mode");
3073 
3074     switch (I.arg_size() - HasRoundingMode) {
3075     case 2:
3076       CopyOp = I.getArgOperand(0);
3077       ConvertOp = I.getArgOperand(1);
3078       break;
3079     case 1:
3080       ConvertOp = I.getArgOperand(0);
3081       CopyOp = nullptr;
3082       break;
3083     default:
3084       llvm_unreachable("Cvt intrinsic with unsupported number of arguments.");
3085     }
3086 
3087     // The first *NumUsedElements* elements of ConvertOp are converted to the
3088     // same number of output elements. The rest of the output is copied from
3089     // CopyOp, or (if not available) filled with zeroes.
3090     // Combine shadow for elements of ConvertOp that are used in this operation,
3091     // and insert a check.
3092     // FIXME: consider propagating shadow of ConvertOp, at least in the case of
3093     // int->any conversion.
3094     Value *ConvertShadow = getShadow(ConvertOp);
3095     Value *AggShadow = nullptr;
3096     if (ConvertOp->getType()->isVectorTy()) {
3097       AggShadow = IRB.CreateExtractElement(
3098           ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), 0));
3099       for (int i = 1; i < NumUsedElements; ++i) {
3100         Value *MoreShadow = IRB.CreateExtractElement(
3101             ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), i));
3102         AggShadow = IRB.CreateOr(AggShadow, MoreShadow);
3103       }
3104     } else {
3105       AggShadow = ConvertShadow;
3106     }
3107     assert(AggShadow->getType()->isIntegerTy());
3108     insertShadowCheck(AggShadow, getOrigin(ConvertOp), &I);
3109 
3110     // Build result shadow by zero-filling parts of CopyOp shadow that come from
3111     // ConvertOp.
3112     if (CopyOp) {
3113       assert(CopyOp->getType() == I.getType());
3114       assert(CopyOp->getType()->isVectorTy());
3115       Value *ResultShadow = getShadow(CopyOp);
3116       Type *EltTy = cast<VectorType>(ResultShadow->getType())->getElementType();
3117       for (int i = 0; i < NumUsedElements; ++i) {
3118         ResultShadow = IRB.CreateInsertElement(
3119             ResultShadow, ConstantInt::getNullValue(EltTy),
3120             ConstantInt::get(IRB.getInt32Ty(), i));
3121       }
3122       setShadow(&I, ResultShadow);
3123       setOrigin(&I, getOrigin(CopyOp));
3124     } else {
3125       setShadow(&I, getCleanShadow(&I));
3126       setOrigin(&I, getCleanOrigin());
3127     }
3128   }
3129 
3130   // Given a scalar or vector, extract lower 64 bits (or less), and return all
3131   // zeroes if it is zero, and all ones otherwise.
Lower64ShadowExtend__anon9debe40b0811::MemorySanitizerVisitor3132   Value *Lower64ShadowExtend(IRBuilder<> &IRB, Value *S, Type *T) {
3133     if (S->getType()->isVectorTy())
3134       S = CreateShadowCast(IRB, S, IRB.getInt64Ty(), /* Signed */ true);
3135     assert(S->getType()->getPrimitiveSizeInBits() <= 64);
3136     Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S));
3137     return CreateShadowCast(IRB, S2, T, /* Signed */ true);
3138   }
3139 
3140   // Given a vector, extract its first element, and return all
3141   // zeroes if it is zero, and all ones otherwise.
LowerElementShadowExtend__anon9debe40b0811::MemorySanitizerVisitor3142   Value *LowerElementShadowExtend(IRBuilder<> &IRB, Value *S, Type *T) {
3143     Value *S1 = IRB.CreateExtractElement(S, (uint64_t)0);
3144     Value *S2 = IRB.CreateICmpNE(S1, getCleanShadow(S1));
3145     return CreateShadowCast(IRB, S2, T, /* Signed */ true);
3146   }
3147 
VariableShadowExtend__anon9debe40b0811::MemorySanitizerVisitor3148   Value *VariableShadowExtend(IRBuilder<> &IRB, Value *S) {
3149     Type *T = S->getType();
3150     assert(T->isVectorTy());
3151     Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S));
3152     return IRB.CreateSExt(S2, T);
3153   }
3154 
3155   // Instrument vector shift intrinsic.
3156   //
3157   // This function instruments intrinsics like int_x86_avx2_psll_w.
3158   // Intrinsic shifts %In by %ShiftSize bits.
3159   // %ShiftSize may be a vector. In that case the lower 64 bits determine shift
3160   // size, and the rest is ignored. Behavior is defined even if shift size is
3161   // greater than register (or field) width.
handleVectorShiftIntrinsic__anon9debe40b0811::MemorySanitizerVisitor3162   void handleVectorShiftIntrinsic(IntrinsicInst &I, bool Variable) {
3163     assert(I.arg_size() == 2);
3164     IRBuilder<> IRB(&I);
3165     // If any of the S2 bits are poisoned, the whole thing is poisoned.
3166     // Otherwise perform the same shift on S1.
3167     Value *S1 = getShadow(&I, 0);
3168     Value *S2 = getShadow(&I, 1);
3169     Value *S2Conv = Variable ? VariableShadowExtend(IRB, S2)
3170                              : Lower64ShadowExtend(IRB, S2, getShadowTy(&I));
3171     Value *V1 = I.getOperand(0);
3172     Value *V2 = I.getOperand(1);
3173     Value *Shift = IRB.CreateCall(I.getFunctionType(), I.getCalledOperand(),
3174                                   {IRB.CreateBitCast(S1, V1->getType()), V2});
3175     Shift = IRB.CreateBitCast(Shift, getShadowTy(&I));
3176     setShadow(&I, IRB.CreateOr(Shift, S2Conv));
3177     setOriginForNaryOp(I);
3178   }
3179 
3180   // Get an X86_MMX-sized vector type.
getMMXVectorTy__anon9debe40b0811::MemorySanitizerVisitor3181   Type *getMMXVectorTy(unsigned EltSizeInBits) {
3182     const unsigned X86_MMXSizeInBits = 64;
3183     assert(EltSizeInBits != 0 && (X86_MMXSizeInBits % EltSizeInBits) == 0 &&
3184            "Illegal MMX vector element size");
3185     return FixedVectorType::get(IntegerType::get(*MS.C, EltSizeInBits),
3186                                 X86_MMXSizeInBits / EltSizeInBits);
3187   }
3188 
3189   // Returns a signed counterpart for an (un)signed-saturate-and-pack
3190   // intrinsic.
getSignedPackIntrinsic__anon9debe40b0811::MemorySanitizerVisitor3191   Intrinsic::ID getSignedPackIntrinsic(Intrinsic::ID id) {
3192     switch (id) {
3193     case Intrinsic::x86_sse2_packsswb_128:
3194     case Intrinsic::x86_sse2_packuswb_128:
3195       return Intrinsic::x86_sse2_packsswb_128;
3196 
3197     case Intrinsic::x86_sse2_packssdw_128:
3198     case Intrinsic::x86_sse41_packusdw:
3199       return Intrinsic::x86_sse2_packssdw_128;
3200 
3201     case Intrinsic::x86_avx2_packsswb:
3202     case Intrinsic::x86_avx2_packuswb:
3203       return Intrinsic::x86_avx2_packsswb;
3204 
3205     case Intrinsic::x86_avx2_packssdw:
3206     case Intrinsic::x86_avx2_packusdw:
3207       return Intrinsic::x86_avx2_packssdw;
3208 
3209     case Intrinsic::x86_mmx_packsswb:
3210     case Intrinsic::x86_mmx_packuswb:
3211       return Intrinsic::x86_mmx_packsswb;
3212 
3213     case Intrinsic::x86_mmx_packssdw:
3214       return Intrinsic::x86_mmx_packssdw;
3215     default:
3216       llvm_unreachable("unexpected intrinsic id");
3217     }
3218   }
3219 
3220   // Instrument vector pack intrinsic.
3221   //
3222   // This function instruments intrinsics like x86_mmx_packsswb, that
3223   // packs elements of 2 input vectors into half as many bits with saturation.
3224   // Shadow is propagated with the signed variant of the same intrinsic applied
3225   // to sext(Sa != zeroinitializer), sext(Sb != zeroinitializer).
3226   // EltSizeInBits is used only for x86mmx arguments.
handleVectorPackIntrinsic__anon9debe40b0811::MemorySanitizerVisitor3227   void handleVectorPackIntrinsic(IntrinsicInst &I, unsigned EltSizeInBits = 0) {
3228     assert(I.arg_size() == 2);
3229     bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
3230     IRBuilder<> IRB(&I);
3231     Value *S1 = getShadow(&I, 0);
3232     Value *S2 = getShadow(&I, 1);
3233     assert(isX86_MMX || S1->getType()->isVectorTy());
3234 
3235     // SExt and ICmpNE below must apply to individual elements of input vectors.
3236     // In case of x86mmx arguments, cast them to appropriate vector types and
3237     // back.
3238     Type *T = isX86_MMX ? getMMXVectorTy(EltSizeInBits) : S1->getType();
3239     if (isX86_MMX) {
3240       S1 = IRB.CreateBitCast(S1, T);
3241       S2 = IRB.CreateBitCast(S2, T);
3242     }
3243     Value *S1_ext =
3244         IRB.CreateSExt(IRB.CreateICmpNE(S1, Constant::getNullValue(T)), T);
3245     Value *S2_ext =
3246         IRB.CreateSExt(IRB.CreateICmpNE(S2, Constant::getNullValue(T)), T);
3247     if (isX86_MMX) {
3248       Type *X86_MMXTy = Type::getX86_MMXTy(*MS.C);
3249       S1_ext = IRB.CreateBitCast(S1_ext, X86_MMXTy);
3250       S2_ext = IRB.CreateBitCast(S2_ext, X86_MMXTy);
3251     }
3252 
3253     Function *ShadowFn = Intrinsic::getDeclaration(
3254         F.getParent(), getSignedPackIntrinsic(I.getIntrinsicID()));
3255 
3256     Value *S =
3257         IRB.CreateCall(ShadowFn, {S1_ext, S2_ext}, "_msprop_vector_pack");
3258     if (isX86_MMX)
3259       S = IRB.CreateBitCast(S, getShadowTy(&I));
3260     setShadow(&I, S);
3261     setOriginForNaryOp(I);
3262   }
3263 
3264   // Instrument sum-of-absolute-differences intrinsic.
handleVectorSadIntrinsic__anon9debe40b0811::MemorySanitizerVisitor3265   void handleVectorSadIntrinsic(IntrinsicInst &I) {
3266     const unsigned SignificantBitsPerResultElement = 16;
3267     bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
3268     Type *ResTy = isX86_MMX ? IntegerType::get(*MS.C, 64) : I.getType();
3269     unsigned ZeroBitsPerResultElement =
3270         ResTy->getScalarSizeInBits() - SignificantBitsPerResultElement;
3271 
3272     IRBuilder<> IRB(&I);
3273     auto *Shadow0 = getShadow(&I, 0);
3274     auto *Shadow1 = getShadow(&I, 1);
3275     Value *S = IRB.CreateOr(Shadow0, Shadow1);
3276     S = IRB.CreateBitCast(S, ResTy);
3277     S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)),
3278                        ResTy);
3279     S = IRB.CreateLShr(S, ZeroBitsPerResultElement);
3280     S = IRB.CreateBitCast(S, getShadowTy(&I));
3281     setShadow(&I, S);
3282     setOriginForNaryOp(I);
3283   }
3284 
3285   // Instrument multiply-add intrinsic.
handleVectorPmaddIntrinsic__anon9debe40b0811::MemorySanitizerVisitor3286   void handleVectorPmaddIntrinsic(IntrinsicInst &I,
3287                                   unsigned EltSizeInBits = 0) {
3288     bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
3289     Type *ResTy = isX86_MMX ? getMMXVectorTy(EltSizeInBits * 2) : I.getType();
3290     IRBuilder<> IRB(&I);
3291     auto *Shadow0 = getShadow(&I, 0);
3292     auto *Shadow1 = getShadow(&I, 1);
3293     Value *S = IRB.CreateOr(Shadow0, Shadow1);
3294     S = IRB.CreateBitCast(S, ResTy);
3295     S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)),
3296                        ResTy);
3297     S = IRB.CreateBitCast(S, getShadowTy(&I));
3298     setShadow(&I, S);
3299     setOriginForNaryOp(I);
3300   }
3301 
3302   // Instrument compare-packed intrinsic.
3303   // Basically, an or followed by sext(icmp ne 0) to end up with all-zeros or
3304   // all-ones shadow.
handleVectorComparePackedIntrinsic__anon9debe40b0811::MemorySanitizerVisitor3305   void handleVectorComparePackedIntrinsic(IntrinsicInst &I) {
3306     IRBuilder<> IRB(&I);
3307     Type *ResTy = getShadowTy(&I);
3308     auto *Shadow0 = getShadow(&I, 0);
3309     auto *Shadow1 = getShadow(&I, 1);
3310     Value *S0 = IRB.CreateOr(Shadow0, Shadow1);
3311     Value *S = IRB.CreateSExt(
3312         IRB.CreateICmpNE(S0, Constant::getNullValue(ResTy)), ResTy);
3313     setShadow(&I, S);
3314     setOriginForNaryOp(I);
3315   }
3316 
3317   // Instrument compare-scalar intrinsic.
3318   // This handles both cmp* intrinsics which return the result in the first
3319   // element of a vector, and comi* which return the result as i32.
handleVectorCompareScalarIntrinsic__anon9debe40b0811::MemorySanitizerVisitor3320   void handleVectorCompareScalarIntrinsic(IntrinsicInst &I) {
3321     IRBuilder<> IRB(&I);
3322     auto *Shadow0 = getShadow(&I, 0);
3323     auto *Shadow1 = getShadow(&I, 1);
3324     Value *S0 = IRB.CreateOr(Shadow0, Shadow1);
3325     Value *S = LowerElementShadowExtend(IRB, S0, getShadowTy(&I));
3326     setShadow(&I, S);
3327     setOriginForNaryOp(I);
3328   }
3329 
3330   // Instrument generic vector reduction intrinsics
3331   // by ORing together all their fields.
handleVectorReduceIntrinsic__anon9debe40b0811::MemorySanitizerVisitor3332   void handleVectorReduceIntrinsic(IntrinsicInst &I) {
3333     IRBuilder<> IRB(&I);
3334     Value *S = IRB.CreateOrReduce(getShadow(&I, 0));
3335     setShadow(&I, S);
3336     setOrigin(&I, getOrigin(&I, 0));
3337   }
3338 
3339   // Instrument vector.reduce.or intrinsic.
3340   // Valid (non-poisoned) set bits in the operand pull low the
3341   // corresponding shadow bits.
handleVectorReduceOrIntrinsic__anon9debe40b0811::MemorySanitizerVisitor3342   void handleVectorReduceOrIntrinsic(IntrinsicInst &I) {
3343     IRBuilder<> IRB(&I);
3344     Value *OperandShadow = getShadow(&I, 0);
3345     Value *OperandUnsetBits = IRB.CreateNot(I.getOperand(0));
3346     Value *OperandUnsetOrPoison = IRB.CreateOr(OperandUnsetBits, OperandShadow);
3347     // Bit N is clean if any field's bit N is 1 and unpoison
3348     Value *OutShadowMask = IRB.CreateAndReduce(OperandUnsetOrPoison);
3349     // Otherwise, it is clean if every field's bit N is unpoison
3350     Value *OrShadow = IRB.CreateOrReduce(OperandShadow);
3351     Value *S = IRB.CreateAnd(OutShadowMask, OrShadow);
3352 
3353     setShadow(&I, S);
3354     setOrigin(&I, getOrigin(&I, 0));
3355   }
3356 
3357   // Instrument vector.reduce.and intrinsic.
3358   // Valid (non-poisoned) unset bits in the operand pull down the
3359   // corresponding shadow bits.
handleVectorReduceAndIntrinsic__anon9debe40b0811::MemorySanitizerVisitor3360   void handleVectorReduceAndIntrinsic(IntrinsicInst &I) {
3361     IRBuilder<> IRB(&I);
3362     Value *OperandShadow = getShadow(&I, 0);
3363     Value *OperandSetOrPoison = IRB.CreateOr(I.getOperand(0), OperandShadow);
3364     // Bit N is clean if any field's bit N is 0 and unpoison
3365     Value *OutShadowMask = IRB.CreateAndReduce(OperandSetOrPoison);
3366     // Otherwise, it is clean if every field's bit N is unpoison
3367     Value *OrShadow = IRB.CreateOrReduce(OperandShadow);
3368     Value *S = IRB.CreateAnd(OutShadowMask, OrShadow);
3369 
3370     setShadow(&I, S);
3371     setOrigin(&I, getOrigin(&I, 0));
3372   }
3373 
handleStmxcsr__anon9debe40b0811::MemorySanitizerVisitor3374   void handleStmxcsr(IntrinsicInst &I) {
3375     IRBuilder<> IRB(&I);
3376     Value *Addr = I.getArgOperand(0);
3377     Type *Ty = IRB.getInt32Ty();
3378     Value *ShadowPtr =
3379         getShadowOriginPtr(Addr, IRB, Ty, Align(1), /*isStore*/ true).first;
3380 
3381     IRB.CreateStore(getCleanShadow(Ty), ShadowPtr);
3382 
3383     if (ClCheckAccessAddress)
3384       insertShadowCheck(Addr, &I);
3385   }
3386 
handleLdmxcsr__anon9debe40b0811::MemorySanitizerVisitor3387   void handleLdmxcsr(IntrinsicInst &I) {
3388     if (!InsertChecks)
3389       return;
3390 
3391     IRBuilder<> IRB(&I);
3392     Value *Addr = I.getArgOperand(0);
3393     Type *Ty = IRB.getInt32Ty();
3394     const Align Alignment = Align(1);
3395     Value *ShadowPtr, *OriginPtr;
3396     std::tie(ShadowPtr, OriginPtr) =
3397         getShadowOriginPtr(Addr, IRB, Ty, Alignment, /*isStore*/ false);
3398 
3399     if (ClCheckAccessAddress)
3400       insertShadowCheck(Addr, &I);
3401 
3402     Value *Shadow = IRB.CreateAlignedLoad(Ty, ShadowPtr, Alignment, "_ldmxcsr");
3403     Value *Origin = MS.TrackOrigins ? IRB.CreateLoad(MS.OriginTy, OriginPtr)
3404                                     : getCleanOrigin();
3405     insertShadowCheck(Shadow, Origin, &I);
3406   }
3407 
handleMaskedExpandLoad__anon9debe40b0811::MemorySanitizerVisitor3408   void handleMaskedExpandLoad(IntrinsicInst &I) {
3409     IRBuilder<> IRB(&I);
3410     Value *Ptr = I.getArgOperand(0);
3411     Value *Mask = I.getArgOperand(1);
3412     Value *PassThru = I.getArgOperand(2);
3413 
3414     if (ClCheckAccessAddress) {
3415       insertShadowCheck(Ptr, &I);
3416       insertShadowCheck(Mask, &I);
3417     }
3418 
3419     if (!PropagateShadow) {
3420       setShadow(&I, getCleanShadow(&I));
3421       setOrigin(&I, getCleanOrigin());
3422       return;
3423     }
3424 
3425     Type *ShadowTy = getShadowTy(&I);
3426     Type *ElementShadowTy = cast<VectorType>(ShadowTy)->getElementType();
3427     auto [ShadowPtr, OriginPtr] =
3428         getShadowOriginPtr(Ptr, IRB, ElementShadowTy, {}, /*isStore*/ false);
3429 
3430     Value *Shadow = IRB.CreateMaskedExpandLoad(
3431         ShadowTy, ShadowPtr, Mask, getShadow(PassThru), "_msmaskedexpload");
3432 
3433     setShadow(&I, Shadow);
3434 
3435     // TODO: Store origins.
3436     setOrigin(&I, getCleanOrigin());
3437   }
3438 
handleMaskedCompressStore__anon9debe40b0811::MemorySanitizerVisitor3439   void handleMaskedCompressStore(IntrinsicInst &I) {
3440     IRBuilder<> IRB(&I);
3441     Value *Values = I.getArgOperand(0);
3442     Value *Ptr = I.getArgOperand(1);
3443     Value *Mask = I.getArgOperand(2);
3444 
3445     if (ClCheckAccessAddress) {
3446       insertShadowCheck(Ptr, &I);
3447       insertShadowCheck(Mask, &I);
3448     }
3449 
3450     Value *Shadow = getShadow(Values);
3451     Type *ElementShadowTy =
3452         getShadowTy(cast<VectorType>(Values->getType())->getElementType());
3453     auto [ShadowPtr, OriginPtrs] =
3454         getShadowOriginPtr(Ptr, IRB, ElementShadowTy, {}, /*isStore*/ true);
3455 
3456     IRB.CreateMaskedCompressStore(Shadow, ShadowPtr, Mask);
3457 
3458     // TODO: Store origins.
3459   }
3460 
handleMaskedGather__anon9debe40b0811::MemorySanitizerVisitor3461   void handleMaskedGather(IntrinsicInst &I) {
3462     IRBuilder<> IRB(&I);
3463     Value *Ptrs = I.getArgOperand(0);
3464     const Align Alignment(
3465         cast<ConstantInt>(I.getArgOperand(1))->getZExtValue());
3466     Value *Mask = I.getArgOperand(2);
3467     Value *PassThru = I.getArgOperand(3);
3468 
3469     Type *PtrsShadowTy = getShadowTy(Ptrs);
3470     if (ClCheckAccessAddress) {
3471       insertShadowCheck(Mask, &I);
3472       Value *MaskedPtrShadow = IRB.CreateSelect(
3473           Mask, getShadow(Ptrs), Constant::getNullValue((PtrsShadowTy)),
3474           "_msmaskedptrs");
3475       insertShadowCheck(MaskedPtrShadow, getOrigin(Ptrs), &I);
3476     }
3477 
3478     if (!PropagateShadow) {
3479       setShadow(&I, getCleanShadow(&I));
3480       setOrigin(&I, getCleanOrigin());
3481       return;
3482     }
3483 
3484     Type *ShadowTy = getShadowTy(&I);
3485     Type *ElementShadowTy = cast<VectorType>(ShadowTy)->getElementType();
3486     auto [ShadowPtrs, OriginPtrs] = getShadowOriginPtr(
3487         Ptrs, IRB, ElementShadowTy, Alignment, /*isStore*/ false);
3488 
3489     Value *Shadow =
3490         IRB.CreateMaskedGather(ShadowTy, ShadowPtrs, Alignment, Mask,
3491                                getShadow(PassThru), "_msmaskedgather");
3492 
3493     setShadow(&I, Shadow);
3494 
3495     // TODO: Store origins.
3496     setOrigin(&I, getCleanOrigin());
3497   }
3498 
handleMaskedScatter__anon9debe40b0811::MemorySanitizerVisitor3499   void handleMaskedScatter(IntrinsicInst &I) {
3500     IRBuilder<> IRB(&I);
3501     Value *Values = I.getArgOperand(0);
3502     Value *Ptrs = I.getArgOperand(1);
3503     const Align Alignment(
3504         cast<ConstantInt>(I.getArgOperand(2))->getZExtValue());
3505     Value *Mask = I.getArgOperand(3);
3506 
3507     Type *PtrsShadowTy = getShadowTy(Ptrs);
3508     if (ClCheckAccessAddress) {
3509       insertShadowCheck(Mask, &I);
3510       Value *MaskedPtrShadow = IRB.CreateSelect(
3511           Mask, getShadow(Ptrs), Constant::getNullValue((PtrsShadowTy)),
3512           "_msmaskedptrs");
3513       insertShadowCheck(MaskedPtrShadow, getOrigin(Ptrs), &I);
3514     }
3515 
3516     Value *Shadow = getShadow(Values);
3517     Type *ElementShadowTy =
3518         getShadowTy(cast<VectorType>(Values->getType())->getElementType());
3519     auto [ShadowPtrs, OriginPtrs] = getShadowOriginPtr(
3520         Ptrs, IRB, ElementShadowTy, Alignment, /*isStore*/ true);
3521 
3522     IRB.CreateMaskedScatter(Shadow, ShadowPtrs, Alignment, Mask);
3523 
3524     // TODO: Store origin.
3525   }
3526 
handleMaskedStore__anon9debe40b0811::MemorySanitizerVisitor3527   void handleMaskedStore(IntrinsicInst &I) {
3528     IRBuilder<> IRB(&I);
3529     Value *V = I.getArgOperand(0);
3530     Value *Ptr = I.getArgOperand(1);
3531     const Align Alignment(
3532         cast<ConstantInt>(I.getArgOperand(2))->getZExtValue());
3533     Value *Mask = I.getArgOperand(3);
3534     Value *Shadow = getShadow(V);
3535 
3536     if (ClCheckAccessAddress) {
3537       insertShadowCheck(Ptr, &I);
3538       insertShadowCheck(Mask, &I);
3539     }
3540 
3541     Value *ShadowPtr;
3542     Value *OriginPtr;
3543     std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
3544         Ptr, IRB, Shadow->getType(), Alignment, /*isStore*/ true);
3545 
3546     IRB.CreateMaskedStore(Shadow, ShadowPtr, Alignment, Mask);
3547 
3548     if (!MS.TrackOrigins)
3549       return;
3550 
3551     auto &DL = F.getParent()->getDataLayout();
3552     paintOrigin(IRB, getOrigin(V), OriginPtr,
3553                 DL.getTypeStoreSize(Shadow->getType()),
3554                 std::max(Alignment, kMinOriginAlignment));
3555   }
3556 
handleMaskedLoad__anon9debe40b0811::MemorySanitizerVisitor3557   void handleMaskedLoad(IntrinsicInst &I) {
3558     IRBuilder<> IRB(&I);
3559     Value *Ptr = I.getArgOperand(0);
3560     const Align Alignment(
3561         cast<ConstantInt>(I.getArgOperand(1))->getZExtValue());
3562     Value *Mask = I.getArgOperand(2);
3563     Value *PassThru = I.getArgOperand(3);
3564 
3565     if (ClCheckAccessAddress) {
3566       insertShadowCheck(Ptr, &I);
3567       insertShadowCheck(Mask, &I);
3568     }
3569 
3570     if (!PropagateShadow) {
3571       setShadow(&I, getCleanShadow(&I));
3572       setOrigin(&I, getCleanOrigin());
3573       return;
3574     }
3575 
3576     Type *ShadowTy = getShadowTy(&I);
3577     Value *ShadowPtr, *OriginPtr;
3578     std::tie(ShadowPtr, OriginPtr) =
3579         getShadowOriginPtr(Ptr, IRB, ShadowTy, Alignment, /*isStore*/ false);
3580     setShadow(&I, IRB.CreateMaskedLoad(ShadowTy, ShadowPtr, Alignment, Mask,
3581                                        getShadow(PassThru), "_msmaskedld"));
3582 
3583     if (!MS.TrackOrigins)
3584       return;
3585 
3586     // Choose between PassThru's and the loaded value's origins.
3587     Value *MaskedPassThruShadow = IRB.CreateAnd(
3588         getShadow(PassThru), IRB.CreateSExt(IRB.CreateNeg(Mask), ShadowTy));
3589 
3590     Value *NotNull = convertToBool(MaskedPassThruShadow, IRB, "_mscmp");
3591 
3592     Value *PtrOrigin = IRB.CreateLoad(MS.OriginTy, OriginPtr);
3593     Value *Origin = IRB.CreateSelect(NotNull, getOrigin(PassThru), PtrOrigin);
3594 
3595     setOrigin(&I, Origin);
3596   }
3597 
3598   // Instrument BMI / BMI2 intrinsics.
3599   // All of these intrinsics are Z = I(X, Y)
3600   // where the types of all operands and the result match, and are either i32 or
3601   // i64. The following instrumentation happens to work for all of them:
3602   //   Sz = I(Sx, Y) | (sext (Sy != 0))
handleBmiIntrinsic__anon9debe40b0811::MemorySanitizerVisitor3603   void handleBmiIntrinsic(IntrinsicInst &I) {
3604     IRBuilder<> IRB(&I);
3605     Type *ShadowTy = getShadowTy(&I);
3606 
3607     // If any bit of the mask operand is poisoned, then the whole thing is.
3608     Value *SMask = getShadow(&I, 1);
3609     SMask = IRB.CreateSExt(IRB.CreateICmpNE(SMask, getCleanShadow(ShadowTy)),
3610                            ShadowTy);
3611     // Apply the same intrinsic to the shadow of the first operand.
3612     Value *S = IRB.CreateCall(I.getCalledFunction(),
3613                               {getShadow(&I, 0), I.getOperand(1)});
3614     S = IRB.CreateOr(SMask, S);
3615     setShadow(&I, S);
3616     setOriginForNaryOp(I);
3617   }
3618 
getPclmulMask__anon9debe40b0811::MemorySanitizerVisitor3619   SmallVector<int, 8> getPclmulMask(unsigned Width, bool OddElements) {
3620     SmallVector<int, 8> Mask;
3621     for (unsigned X = OddElements ? 1 : 0; X < Width; X += 2) {
3622       Mask.append(2, X);
3623     }
3624     return Mask;
3625   }
3626 
3627   // Instrument pclmul intrinsics.
3628   // These intrinsics operate either on odd or on even elements of the input
3629   // vectors, depending on the constant in the 3rd argument, ignoring the rest.
3630   // Replace the unused elements with copies of the used ones, ex:
3631   //   (0, 1, 2, 3) -> (0, 0, 2, 2) (even case)
3632   // or
3633   //   (0, 1, 2, 3) -> (1, 1, 3, 3) (odd case)
3634   // and then apply the usual shadow combining logic.
handlePclmulIntrinsic__anon9debe40b0811::MemorySanitizerVisitor3635   void handlePclmulIntrinsic(IntrinsicInst &I) {
3636     IRBuilder<> IRB(&I);
3637     unsigned Width =
3638         cast<FixedVectorType>(I.getArgOperand(0)->getType())->getNumElements();
3639     assert(isa<ConstantInt>(I.getArgOperand(2)) &&
3640            "pclmul 3rd operand must be a constant");
3641     unsigned Imm = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue();
3642     Value *Shuf0 = IRB.CreateShuffleVector(getShadow(&I, 0),
3643                                            getPclmulMask(Width, Imm & 0x01));
3644     Value *Shuf1 = IRB.CreateShuffleVector(getShadow(&I, 1),
3645                                            getPclmulMask(Width, Imm & 0x10));
3646     ShadowAndOriginCombiner SOC(this, IRB);
3647     SOC.Add(Shuf0, getOrigin(&I, 0));
3648     SOC.Add(Shuf1, getOrigin(&I, 1));
3649     SOC.Done(&I);
3650   }
3651 
3652   // Instrument _mm_*_sd|ss intrinsics
handleUnarySdSsIntrinsic__anon9debe40b0811::MemorySanitizerVisitor3653   void handleUnarySdSsIntrinsic(IntrinsicInst &I) {
3654     IRBuilder<> IRB(&I);
3655     unsigned Width =
3656         cast<FixedVectorType>(I.getArgOperand(0)->getType())->getNumElements();
3657     Value *First = getShadow(&I, 0);
3658     Value *Second = getShadow(&I, 1);
3659     // First element of second operand, remaining elements of first operand
3660     SmallVector<int, 16> Mask;
3661     Mask.push_back(Width);
3662     for (unsigned i = 1; i < Width; i++)
3663       Mask.push_back(i);
3664     Value *Shadow = IRB.CreateShuffleVector(First, Second, Mask);
3665 
3666     setShadow(&I, Shadow);
3667     setOriginForNaryOp(I);
3668   }
3669 
handleVtestIntrinsic__anon9debe40b0811::MemorySanitizerVisitor3670   void handleVtestIntrinsic(IntrinsicInst &I) {
3671     IRBuilder<> IRB(&I);
3672     Value *Shadow0 = getShadow(&I, 0);
3673     Value *Shadow1 = getShadow(&I, 1);
3674     Value *Or = IRB.CreateOr(Shadow0, Shadow1);
3675     Value *NZ = IRB.CreateICmpNE(Or, Constant::getNullValue(Or->getType()));
3676     Value *Scalar = convertShadowToScalar(NZ, IRB);
3677     Value *Shadow = IRB.CreateZExt(Scalar, getShadowTy(&I));
3678 
3679     setShadow(&I, Shadow);
3680     setOriginForNaryOp(I);
3681   }
3682 
handleBinarySdSsIntrinsic__anon9debe40b0811::MemorySanitizerVisitor3683   void handleBinarySdSsIntrinsic(IntrinsicInst &I) {
3684     IRBuilder<> IRB(&I);
3685     unsigned Width =
3686         cast<FixedVectorType>(I.getArgOperand(0)->getType())->getNumElements();
3687     Value *First = getShadow(&I, 0);
3688     Value *Second = getShadow(&I, 1);
3689     Value *OrShadow = IRB.CreateOr(First, Second);
3690     // First element of both OR'd together, remaining elements of first operand
3691     SmallVector<int, 16> Mask;
3692     Mask.push_back(Width);
3693     for (unsigned i = 1; i < Width; i++)
3694       Mask.push_back(i);
3695     Value *Shadow = IRB.CreateShuffleVector(First, OrShadow, Mask);
3696 
3697     setShadow(&I, Shadow);
3698     setOriginForNaryOp(I);
3699   }
3700 
3701   // Instrument abs intrinsic.
3702   // handleUnknownIntrinsic can't handle it because of the last
3703   // is_int_min_poison argument which does not match the result type.
handleAbsIntrinsic__anon9debe40b0811::MemorySanitizerVisitor3704   void handleAbsIntrinsic(IntrinsicInst &I) {
3705     assert(I.getType()->isIntOrIntVectorTy());
3706     assert(I.getArgOperand(0)->getType() == I.getType());
3707 
3708     // FIXME: Handle is_int_min_poison.
3709     IRBuilder<> IRB(&I);
3710     setShadow(&I, getShadow(&I, 0));
3711     setOrigin(&I, getOrigin(&I, 0));
3712   }
3713 
handleIsFpClass__anon9debe40b0811::MemorySanitizerVisitor3714   void handleIsFpClass(IntrinsicInst &I) {
3715     IRBuilder<> IRB(&I);
3716     Value *Shadow = getShadow(&I, 0);
3717     setShadow(&I, IRB.CreateICmpNE(Shadow, getCleanShadow(Shadow)));
3718     setOrigin(&I, getOrigin(&I, 0));
3719   }
3720 
visitIntrinsicInst__anon9debe40b0811::MemorySanitizerVisitor3721   void visitIntrinsicInst(IntrinsicInst &I) {
3722     switch (I.getIntrinsicID()) {
3723     case Intrinsic::abs:
3724       handleAbsIntrinsic(I);
3725       break;
3726     case Intrinsic::is_fpclass:
3727       handleIsFpClass(I);
3728       break;
3729     case Intrinsic::lifetime_start:
3730       handleLifetimeStart(I);
3731       break;
3732     case Intrinsic::launder_invariant_group:
3733     case Intrinsic::strip_invariant_group:
3734       handleInvariantGroup(I);
3735       break;
3736     case Intrinsic::bswap:
3737       handleBswap(I);
3738       break;
3739     case Intrinsic::ctlz:
3740     case Intrinsic::cttz:
3741       handleCountZeroes(I);
3742       break;
3743     case Intrinsic::masked_compressstore:
3744       handleMaskedCompressStore(I);
3745       break;
3746     case Intrinsic::masked_expandload:
3747       handleMaskedExpandLoad(I);
3748       break;
3749     case Intrinsic::masked_gather:
3750       handleMaskedGather(I);
3751       break;
3752     case Intrinsic::masked_scatter:
3753       handleMaskedScatter(I);
3754       break;
3755     case Intrinsic::masked_store:
3756       handleMaskedStore(I);
3757       break;
3758     case Intrinsic::masked_load:
3759       handleMaskedLoad(I);
3760       break;
3761     case Intrinsic::vector_reduce_and:
3762       handleVectorReduceAndIntrinsic(I);
3763       break;
3764     case Intrinsic::vector_reduce_or:
3765       handleVectorReduceOrIntrinsic(I);
3766       break;
3767     case Intrinsic::vector_reduce_add:
3768     case Intrinsic::vector_reduce_xor:
3769     case Intrinsic::vector_reduce_mul:
3770       handleVectorReduceIntrinsic(I);
3771       break;
3772     case Intrinsic::x86_sse_stmxcsr:
3773       handleStmxcsr(I);
3774       break;
3775     case Intrinsic::x86_sse_ldmxcsr:
3776       handleLdmxcsr(I);
3777       break;
3778     case Intrinsic::x86_avx512_vcvtsd2usi64:
3779     case Intrinsic::x86_avx512_vcvtsd2usi32:
3780     case Intrinsic::x86_avx512_vcvtss2usi64:
3781     case Intrinsic::x86_avx512_vcvtss2usi32:
3782     case Intrinsic::x86_avx512_cvttss2usi64:
3783     case Intrinsic::x86_avx512_cvttss2usi:
3784     case Intrinsic::x86_avx512_cvttsd2usi64:
3785     case Intrinsic::x86_avx512_cvttsd2usi:
3786     case Intrinsic::x86_avx512_cvtusi2ss:
3787     case Intrinsic::x86_avx512_cvtusi642sd:
3788     case Intrinsic::x86_avx512_cvtusi642ss:
3789       handleVectorConvertIntrinsic(I, 1, true);
3790       break;
3791     case Intrinsic::x86_sse2_cvtsd2si64:
3792     case Intrinsic::x86_sse2_cvtsd2si:
3793     case Intrinsic::x86_sse2_cvtsd2ss:
3794     case Intrinsic::x86_sse2_cvttsd2si64:
3795     case Intrinsic::x86_sse2_cvttsd2si:
3796     case Intrinsic::x86_sse_cvtss2si64:
3797     case Intrinsic::x86_sse_cvtss2si:
3798     case Intrinsic::x86_sse_cvttss2si64:
3799     case Intrinsic::x86_sse_cvttss2si:
3800       handleVectorConvertIntrinsic(I, 1);
3801       break;
3802     case Intrinsic::x86_sse_cvtps2pi:
3803     case Intrinsic::x86_sse_cvttps2pi:
3804       handleVectorConvertIntrinsic(I, 2);
3805       break;
3806 
3807     case Intrinsic::x86_avx512_psll_w_512:
3808     case Intrinsic::x86_avx512_psll_d_512:
3809     case Intrinsic::x86_avx512_psll_q_512:
3810     case Intrinsic::x86_avx512_pslli_w_512:
3811     case Intrinsic::x86_avx512_pslli_d_512:
3812     case Intrinsic::x86_avx512_pslli_q_512:
3813     case Intrinsic::x86_avx512_psrl_w_512:
3814     case Intrinsic::x86_avx512_psrl_d_512:
3815     case Intrinsic::x86_avx512_psrl_q_512:
3816     case Intrinsic::x86_avx512_psra_w_512:
3817     case Intrinsic::x86_avx512_psra_d_512:
3818     case Intrinsic::x86_avx512_psra_q_512:
3819     case Intrinsic::x86_avx512_psrli_w_512:
3820     case Intrinsic::x86_avx512_psrli_d_512:
3821     case Intrinsic::x86_avx512_psrli_q_512:
3822     case Intrinsic::x86_avx512_psrai_w_512:
3823     case Intrinsic::x86_avx512_psrai_d_512:
3824     case Intrinsic::x86_avx512_psrai_q_512:
3825     case Intrinsic::x86_avx512_psra_q_256:
3826     case Intrinsic::x86_avx512_psra_q_128:
3827     case Intrinsic::x86_avx512_psrai_q_256:
3828     case Intrinsic::x86_avx512_psrai_q_128:
3829     case Intrinsic::x86_avx2_psll_w:
3830     case Intrinsic::x86_avx2_psll_d:
3831     case Intrinsic::x86_avx2_psll_q:
3832     case Intrinsic::x86_avx2_pslli_w:
3833     case Intrinsic::x86_avx2_pslli_d:
3834     case Intrinsic::x86_avx2_pslli_q:
3835     case Intrinsic::x86_avx2_psrl_w:
3836     case Intrinsic::x86_avx2_psrl_d:
3837     case Intrinsic::x86_avx2_psrl_q:
3838     case Intrinsic::x86_avx2_psra_w:
3839     case Intrinsic::x86_avx2_psra_d:
3840     case Intrinsic::x86_avx2_psrli_w:
3841     case Intrinsic::x86_avx2_psrli_d:
3842     case Intrinsic::x86_avx2_psrli_q:
3843     case Intrinsic::x86_avx2_psrai_w:
3844     case Intrinsic::x86_avx2_psrai_d:
3845     case Intrinsic::x86_sse2_psll_w:
3846     case Intrinsic::x86_sse2_psll_d:
3847     case Intrinsic::x86_sse2_psll_q:
3848     case Intrinsic::x86_sse2_pslli_w:
3849     case Intrinsic::x86_sse2_pslli_d:
3850     case Intrinsic::x86_sse2_pslli_q:
3851     case Intrinsic::x86_sse2_psrl_w:
3852     case Intrinsic::x86_sse2_psrl_d:
3853     case Intrinsic::x86_sse2_psrl_q:
3854     case Intrinsic::x86_sse2_psra_w:
3855     case Intrinsic::x86_sse2_psra_d:
3856     case Intrinsic::x86_sse2_psrli_w:
3857     case Intrinsic::x86_sse2_psrli_d:
3858     case Intrinsic::x86_sse2_psrli_q:
3859     case Intrinsic::x86_sse2_psrai_w:
3860     case Intrinsic::x86_sse2_psrai_d:
3861     case Intrinsic::x86_mmx_psll_w:
3862     case Intrinsic::x86_mmx_psll_d:
3863     case Intrinsic::x86_mmx_psll_q:
3864     case Intrinsic::x86_mmx_pslli_w:
3865     case Intrinsic::x86_mmx_pslli_d:
3866     case Intrinsic::x86_mmx_pslli_q:
3867     case Intrinsic::x86_mmx_psrl_w:
3868     case Intrinsic::x86_mmx_psrl_d:
3869     case Intrinsic::x86_mmx_psrl_q:
3870     case Intrinsic::x86_mmx_psra_w:
3871     case Intrinsic::x86_mmx_psra_d:
3872     case Intrinsic::x86_mmx_psrli_w:
3873     case Intrinsic::x86_mmx_psrli_d:
3874     case Intrinsic::x86_mmx_psrli_q:
3875     case Intrinsic::x86_mmx_psrai_w:
3876     case Intrinsic::x86_mmx_psrai_d:
3877       handleVectorShiftIntrinsic(I, /* Variable */ false);
3878       break;
3879     case Intrinsic::x86_avx2_psllv_d:
3880     case Intrinsic::x86_avx2_psllv_d_256:
3881     case Intrinsic::x86_avx512_psllv_d_512:
3882     case Intrinsic::x86_avx2_psllv_q:
3883     case Intrinsic::x86_avx2_psllv_q_256:
3884     case Intrinsic::x86_avx512_psllv_q_512:
3885     case Intrinsic::x86_avx2_psrlv_d:
3886     case Intrinsic::x86_avx2_psrlv_d_256:
3887     case Intrinsic::x86_avx512_psrlv_d_512:
3888     case Intrinsic::x86_avx2_psrlv_q:
3889     case Intrinsic::x86_avx2_psrlv_q_256:
3890     case Intrinsic::x86_avx512_psrlv_q_512:
3891     case Intrinsic::x86_avx2_psrav_d:
3892     case Intrinsic::x86_avx2_psrav_d_256:
3893     case Intrinsic::x86_avx512_psrav_d_512:
3894     case Intrinsic::x86_avx512_psrav_q_128:
3895     case Intrinsic::x86_avx512_psrav_q_256:
3896     case Intrinsic::x86_avx512_psrav_q_512:
3897       handleVectorShiftIntrinsic(I, /* Variable */ true);
3898       break;
3899 
3900     case Intrinsic::x86_sse2_packsswb_128:
3901     case Intrinsic::x86_sse2_packssdw_128:
3902     case Intrinsic::x86_sse2_packuswb_128:
3903     case Intrinsic::x86_sse41_packusdw:
3904     case Intrinsic::x86_avx2_packsswb:
3905     case Intrinsic::x86_avx2_packssdw:
3906     case Intrinsic::x86_avx2_packuswb:
3907     case Intrinsic::x86_avx2_packusdw:
3908       handleVectorPackIntrinsic(I);
3909       break;
3910 
3911     case Intrinsic::x86_mmx_packsswb:
3912     case Intrinsic::x86_mmx_packuswb:
3913       handleVectorPackIntrinsic(I, 16);
3914       break;
3915 
3916     case Intrinsic::x86_mmx_packssdw:
3917       handleVectorPackIntrinsic(I, 32);
3918       break;
3919 
3920     case Intrinsic::x86_mmx_psad_bw:
3921     case Intrinsic::x86_sse2_psad_bw:
3922     case Intrinsic::x86_avx2_psad_bw:
3923       handleVectorSadIntrinsic(I);
3924       break;
3925 
3926     case Intrinsic::x86_sse2_pmadd_wd:
3927     case Intrinsic::x86_avx2_pmadd_wd:
3928     case Intrinsic::x86_ssse3_pmadd_ub_sw_128:
3929     case Intrinsic::x86_avx2_pmadd_ub_sw:
3930       handleVectorPmaddIntrinsic(I);
3931       break;
3932 
3933     case Intrinsic::x86_ssse3_pmadd_ub_sw:
3934       handleVectorPmaddIntrinsic(I, 8);
3935       break;
3936 
3937     case Intrinsic::x86_mmx_pmadd_wd:
3938       handleVectorPmaddIntrinsic(I, 16);
3939       break;
3940 
3941     case Intrinsic::x86_sse_cmp_ss:
3942     case Intrinsic::x86_sse2_cmp_sd:
3943     case Intrinsic::x86_sse_comieq_ss:
3944     case Intrinsic::x86_sse_comilt_ss:
3945     case Intrinsic::x86_sse_comile_ss:
3946     case Intrinsic::x86_sse_comigt_ss:
3947     case Intrinsic::x86_sse_comige_ss:
3948     case Intrinsic::x86_sse_comineq_ss:
3949     case Intrinsic::x86_sse_ucomieq_ss:
3950     case Intrinsic::x86_sse_ucomilt_ss:
3951     case Intrinsic::x86_sse_ucomile_ss:
3952     case Intrinsic::x86_sse_ucomigt_ss:
3953     case Intrinsic::x86_sse_ucomige_ss:
3954     case Intrinsic::x86_sse_ucomineq_ss:
3955     case Intrinsic::x86_sse2_comieq_sd:
3956     case Intrinsic::x86_sse2_comilt_sd:
3957     case Intrinsic::x86_sse2_comile_sd:
3958     case Intrinsic::x86_sse2_comigt_sd:
3959     case Intrinsic::x86_sse2_comige_sd:
3960     case Intrinsic::x86_sse2_comineq_sd:
3961     case Intrinsic::x86_sse2_ucomieq_sd:
3962     case Intrinsic::x86_sse2_ucomilt_sd:
3963     case Intrinsic::x86_sse2_ucomile_sd:
3964     case Intrinsic::x86_sse2_ucomigt_sd:
3965     case Intrinsic::x86_sse2_ucomige_sd:
3966     case Intrinsic::x86_sse2_ucomineq_sd:
3967       handleVectorCompareScalarIntrinsic(I);
3968       break;
3969 
3970     case Intrinsic::x86_avx_cmp_pd_256:
3971     case Intrinsic::x86_avx_cmp_ps_256:
3972     case Intrinsic::x86_sse2_cmp_pd:
3973     case Intrinsic::x86_sse_cmp_ps:
3974       handleVectorComparePackedIntrinsic(I);
3975       break;
3976 
3977     case Intrinsic::x86_bmi_bextr_32:
3978     case Intrinsic::x86_bmi_bextr_64:
3979     case Intrinsic::x86_bmi_bzhi_32:
3980     case Intrinsic::x86_bmi_bzhi_64:
3981     case Intrinsic::x86_bmi_pdep_32:
3982     case Intrinsic::x86_bmi_pdep_64:
3983     case Intrinsic::x86_bmi_pext_32:
3984     case Intrinsic::x86_bmi_pext_64:
3985       handleBmiIntrinsic(I);
3986       break;
3987 
3988     case Intrinsic::x86_pclmulqdq:
3989     case Intrinsic::x86_pclmulqdq_256:
3990     case Intrinsic::x86_pclmulqdq_512:
3991       handlePclmulIntrinsic(I);
3992       break;
3993 
3994     case Intrinsic::x86_sse41_round_sd:
3995     case Intrinsic::x86_sse41_round_ss:
3996       handleUnarySdSsIntrinsic(I);
3997       break;
3998     case Intrinsic::x86_sse2_max_sd:
3999     case Intrinsic::x86_sse_max_ss:
4000     case Intrinsic::x86_sse2_min_sd:
4001     case Intrinsic::x86_sse_min_ss:
4002       handleBinarySdSsIntrinsic(I);
4003       break;
4004 
4005     case Intrinsic::x86_avx_vtestc_pd:
4006     case Intrinsic::x86_avx_vtestc_pd_256:
4007     case Intrinsic::x86_avx_vtestc_ps:
4008     case Intrinsic::x86_avx_vtestc_ps_256:
4009     case Intrinsic::x86_avx_vtestnzc_pd:
4010     case Intrinsic::x86_avx_vtestnzc_pd_256:
4011     case Intrinsic::x86_avx_vtestnzc_ps:
4012     case Intrinsic::x86_avx_vtestnzc_ps_256:
4013     case Intrinsic::x86_avx_vtestz_pd:
4014     case Intrinsic::x86_avx_vtestz_pd_256:
4015     case Intrinsic::x86_avx_vtestz_ps:
4016     case Intrinsic::x86_avx_vtestz_ps_256:
4017     case Intrinsic::x86_avx_ptestc_256:
4018     case Intrinsic::x86_avx_ptestnzc_256:
4019     case Intrinsic::x86_avx_ptestz_256:
4020     case Intrinsic::x86_sse41_ptestc:
4021     case Intrinsic::x86_sse41_ptestnzc:
4022     case Intrinsic::x86_sse41_ptestz:
4023       handleVtestIntrinsic(I);
4024       break;
4025 
4026     case Intrinsic::fshl:
4027     case Intrinsic::fshr:
4028       handleFunnelShift(I);
4029       break;
4030 
4031     case Intrinsic::is_constant:
4032       // The result of llvm.is.constant() is always defined.
4033       setShadow(&I, getCleanShadow(&I));
4034       setOrigin(&I, getCleanOrigin());
4035       break;
4036 
4037     default:
4038       if (!handleUnknownIntrinsic(I))
4039         visitInstruction(I);
4040       break;
4041     }
4042   }
4043 
visitLibAtomicLoad__anon9debe40b0811::MemorySanitizerVisitor4044   void visitLibAtomicLoad(CallBase &CB) {
4045     // Since we use getNextNode here, we can't have CB terminate the BB.
4046     assert(isa<CallInst>(CB));
4047 
4048     IRBuilder<> IRB(&CB);
4049     Value *Size = CB.getArgOperand(0);
4050     Value *SrcPtr = CB.getArgOperand(1);
4051     Value *DstPtr = CB.getArgOperand(2);
4052     Value *Ordering = CB.getArgOperand(3);
4053     // Convert the call to have at least Acquire ordering to make sure
4054     // the shadow operations aren't reordered before it.
4055     Value *NewOrdering =
4056         IRB.CreateExtractElement(makeAddAcquireOrderingTable(IRB), Ordering);
4057     CB.setArgOperand(3, NewOrdering);
4058 
4059     NextNodeIRBuilder NextIRB(&CB);
4060     Value *SrcShadowPtr, *SrcOriginPtr;
4061     std::tie(SrcShadowPtr, SrcOriginPtr) =
4062         getShadowOriginPtr(SrcPtr, NextIRB, NextIRB.getInt8Ty(), Align(1),
4063                            /*isStore*/ false);
4064     Value *DstShadowPtr =
4065         getShadowOriginPtr(DstPtr, NextIRB, NextIRB.getInt8Ty(), Align(1),
4066                            /*isStore*/ true)
4067             .first;
4068 
4069     NextIRB.CreateMemCpy(DstShadowPtr, Align(1), SrcShadowPtr, Align(1), Size);
4070     if (MS.TrackOrigins) {
4071       Value *SrcOrigin = NextIRB.CreateAlignedLoad(MS.OriginTy, SrcOriginPtr,
4072                                                    kMinOriginAlignment);
4073       Value *NewOrigin = updateOrigin(SrcOrigin, NextIRB);
4074       NextIRB.CreateCall(MS.MsanSetOriginFn, {DstPtr, Size, NewOrigin});
4075     }
4076   }
4077 
visitLibAtomicStore__anon9debe40b0811::MemorySanitizerVisitor4078   void visitLibAtomicStore(CallBase &CB) {
4079     IRBuilder<> IRB(&CB);
4080     Value *Size = CB.getArgOperand(0);
4081     Value *DstPtr = CB.getArgOperand(2);
4082     Value *Ordering = CB.getArgOperand(3);
4083     // Convert the call to have at least Release ordering to make sure
4084     // the shadow operations aren't reordered after it.
4085     Value *NewOrdering =
4086         IRB.CreateExtractElement(makeAddReleaseOrderingTable(IRB), Ordering);
4087     CB.setArgOperand(3, NewOrdering);
4088 
4089     Value *DstShadowPtr =
4090         getShadowOriginPtr(DstPtr, IRB, IRB.getInt8Ty(), Align(1),
4091                            /*isStore*/ true)
4092             .first;
4093 
4094     // Atomic store always paints clean shadow/origin. See file header.
4095     IRB.CreateMemSet(DstShadowPtr, getCleanShadow(IRB.getInt8Ty()), Size,
4096                      Align(1));
4097   }
4098 
visitCallBase__anon9debe40b0811::MemorySanitizerVisitor4099   void visitCallBase(CallBase &CB) {
4100     assert(!CB.getMetadata(LLVMContext::MD_nosanitize));
4101     if (CB.isInlineAsm()) {
4102       // For inline asm (either a call to asm function, or callbr instruction),
4103       // do the usual thing: check argument shadow and mark all outputs as
4104       // clean. Note that any side effects of the inline asm that are not
4105       // immediately visible in its constraints are not handled.
4106       // For now, handle inline asm by default for KMSAN.
4107       bool HandleAsm = ClHandleAsmConservative.getNumOccurrences()
4108                            ? ClHandleAsmConservative
4109                            : MS.CompileKernel;
4110       if (HandleAsm)
4111         visitAsmInstruction(CB);
4112       else
4113         visitInstruction(CB);
4114       return;
4115     }
4116     LibFunc LF;
4117     if (TLI->getLibFunc(CB, LF)) {
4118       // libatomic.a functions need to have special handling because there isn't
4119       // a good way to intercept them or compile the library with
4120       // instrumentation.
4121       switch (LF) {
4122       case LibFunc_atomic_load:
4123         if (!isa<CallInst>(CB)) {
4124           llvm::errs() << "MSAN -- cannot instrument invoke of libatomic load."
4125                           "Ignoring!\n";
4126           break;
4127         }
4128         visitLibAtomicLoad(CB);
4129         return;
4130       case LibFunc_atomic_store:
4131         visitLibAtomicStore(CB);
4132         return;
4133       default:
4134         break;
4135       }
4136     }
4137 
4138     if (auto *Call = dyn_cast<CallInst>(&CB)) {
4139       assert(!isa<IntrinsicInst>(Call) && "intrinsics are handled elsewhere");
4140 
4141       // We are going to insert code that relies on the fact that the callee
4142       // will become a non-readonly function after it is instrumented by us. To
4143       // prevent this code from being optimized out, mark that function
4144       // non-readonly in advance.
4145       // TODO: We can likely do better than dropping memory() completely here.
4146       AttributeMask B;
4147       B.addAttribute(Attribute::Memory).addAttribute(Attribute::Speculatable);
4148 
4149       Call->removeFnAttrs(B);
4150       if (Function *Func = Call->getCalledFunction()) {
4151         Func->removeFnAttrs(B);
4152       }
4153 
4154       maybeMarkSanitizerLibraryCallNoBuiltin(Call, TLI);
4155     }
4156     IRBuilder<> IRB(&CB);
4157     bool MayCheckCall = MS.EagerChecks;
4158     if (Function *Func = CB.getCalledFunction()) {
4159       // __sanitizer_unaligned_{load,store} functions may be called by users
4160       // and always expects shadows in the TLS. So don't check them.
4161       MayCheckCall &= !Func->getName().starts_with("__sanitizer_unaligned_");
4162     }
4163 
4164     unsigned ArgOffset = 0;
4165     LLVM_DEBUG(dbgs() << "  CallSite: " << CB << "\n");
4166     for (const auto &[i, A] : llvm::enumerate(CB.args())) {
4167       if (!A->getType()->isSized()) {
4168         LLVM_DEBUG(dbgs() << "Arg " << i << " is not sized: " << CB << "\n");
4169         continue;
4170       }
4171       unsigned Size = 0;
4172       const DataLayout &DL = F.getParent()->getDataLayout();
4173 
4174       bool ByVal = CB.paramHasAttr(i, Attribute::ByVal);
4175       bool NoUndef = CB.paramHasAttr(i, Attribute::NoUndef);
4176       bool EagerCheck = MayCheckCall && !ByVal && NoUndef;
4177 
4178       if (EagerCheck) {
4179         insertShadowCheck(A, &CB);
4180         Size = DL.getTypeAllocSize(A->getType());
4181       } else {
4182         Value *Store = nullptr;
4183         // Compute the Shadow for arg even if it is ByVal, because
4184         // in that case getShadow() will copy the actual arg shadow to
4185         // __msan_param_tls.
4186         Value *ArgShadow = getShadow(A);
4187         Value *ArgShadowBase = getShadowPtrForArgument(IRB, ArgOffset);
4188         LLVM_DEBUG(dbgs() << "  Arg#" << i << ": " << *A
4189                           << " Shadow: " << *ArgShadow << "\n");
4190         if (ByVal) {
4191           // ByVal requires some special handling as it's too big for a single
4192           // load
4193           assert(A->getType()->isPointerTy() &&
4194                  "ByVal argument is not a pointer!");
4195           Size = DL.getTypeAllocSize(CB.getParamByValType(i));
4196           if (ArgOffset + Size > kParamTLSSize)
4197             break;
4198           const MaybeAlign ParamAlignment(CB.getParamAlign(i));
4199           MaybeAlign Alignment = std::nullopt;
4200           if (ParamAlignment)
4201             Alignment = std::min(*ParamAlignment, kShadowTLSAlignment);
4202           Value *AShadowPtr, *AOriginPtr;
4203           std::tie(AShadowPtr, AOriginPtr) =
4204               getShadowOriginPtr(A, IRB, IRB.getInt8Ty(), Alignment,
4205                                  /*isStore*/ false);
4206           if (!PropagateShadow) {
4207             Store = IRB.CreateMemSet(ArgShadowBase,
4208                                      Constant::getNullValue(IRB.getInt8Ty()),
4209                                      Size, Alignment);
4210           } else {
4211             Store = IRB.CreateMemCpy(ArgShadowBase, Alignment, AShadowPtr,
4212                                      Alignment, Size);
4213             if (MS.TrackOrigins) {
4214               Value *ArgOriginBase = getOriginPtrForArgument(IRB, ArgOffset);
4215               // FIXME: OriginSize should be:
4216               // alignTo(A % kMinOriginAlignment + Size, kMinOriginAlignment)
4217               unsigned OriginSize = alignTo(Size, kMinOriginAlignment);
4218               IRB.CreateMemCpy(
4219                   ArgOriginBase,
4220                   /* by origin_tls[ArgOffset] */ kMinOriginAlignment,
4221                   AOriginPtr,
4222                   /* by getShadowOriginPtr */ kMinOriginAlignment, OriginSize);
4223             }
4224           }
4225         } else {
4226           // Any other parameters mean we need bit-grained tracking of uninit
4227           // data
4228           Size = DL.getTypeAllocSize(A->getType());
4229           if (ArgOffset + Size > kParamTLSSize)
4230             break;
4231           Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase,
4232                                          kShadowTLSAlignment);
4233           Constant *Cst = dyn_cast<Constant>(ArgShadow);
4234           if (MS.TrackOrigins && !(Cst && Cst->isNullValue())) {
4235             IRB.CreateStore(getOrigin(A),
4236                             getOriginPtrForArgument(IRB, ArgOffset));
4237           }
4238         }
4239         (void)Store;
4240         assert(Store != nullptr);
4241         LLVM_DEBUG(dbgs() << "  Param:" << *Store << "\n");
4242       }
4243       assert(Size != 0);
4244       ArgOffset += alignTo(Size, kShadowTLSAlignment);
4245     }
4246     LLVM_DEBUG(dbgs() << "  done with call args\n");
4247 
4248     FunctionType *FT = CB.getFunctionType();
4249     if (FT->isVarArg()) {
4250       VAHelper->visitCallBase(CB, IRB);
4251     }
4252 
4253     // Now, get the shadow for the RetVal.
4254     if (!CB.getType()->isSized())
4255       return;
4256     // Don't emit the epilogue for musttail call returns.
4257     if (isa<CallInst>(CB) && cast<CallInst>(CB).isMustTailCall())
4258       return;
4259 
4260     if (MayCheckCall && CB.hasRetAttr(Attribute::NoUndef)) {
4261       setShadow(&CB, getCleanShadow(&CB));
4262       setOrigin(&CB, getCleanOrigin());
4263       return;
4264     }
4265 
4266     IRBuilder<> IRBBefore(&CB);
4267     // Until we have full dynamic coverage, make sure the retval shadow is 0.
4268     Value *Base = getShadowPtrForRetval(IRBBefore);
4269     IRBBefore.CreateAlignedStore(getCleanShadow(&CB), Base,
4270                                  kShadowTLSAlignment);
4271     BasicBlock::iterator NextInsn;
4272     if (isa<CallInst>(CB)) {
4273       NextInsn = ++CB.getIterator();
4274       assert(NextInsn != CB.getParent()->end());
4275     } else {
4276       BasicBlock *NormalDest = cast<InvokeInst>(CB).getNormalDest();
4277       if (!NormalDest->getSinglePredecessor()) {
4278         // FIXME: this case is tricky, so we are just conservative here.
4279         // Perhaps we need to split the edge between this BB and NormalDest,
4280         // but a naive attempt to use SplitEdge leads to a crash.
4281         setShadow(&CB, getCleanShadow(&CB));
4282         setOrigin(&CB, getCleanOrigin());
4283         return;
4284       }
4285       // FIXME: NextInsn is likely in a basic block that has not been visited
4286       // yet. Anything inserted there will be instrumented by MSan later!
4287       NextInsn = NormalDest->getFirstInsertionPt();
4288       assert(NextInsn != NormalDest->end() &&
4289              "Could not find insertion point for retval shadow load");
4290     }
4291     IRBuilder<> IRBAfter(&*NextInsn);
4292     Value *RetvalShadow = IRBAfter.CreateAlignedLoad(
4293         getShadowTy(&CB), getShadowPtrForRetval(IRBAfter),
4294         kShadowTLSAlignment, "_msret");
4295     setShadow(&CB, RetvalShadow);
4296     if (MS.TrackOrigins)
4297       setOrigin(&CB, IRBAfter.CreateLoad(MS.OriginTy,
4298                                          getOriginPtrForRetval()));
4299   }
4300 
isAMustTailRetVal__anon9debe40b0811::MemorySanitizerVisitor4301   bool isAMustTailRetVal(Value *RetVal) {
4302     if (auto *I = dyn_cast<BitCastInst>(RetVal)) {
4303       RetVal = I->getOperand(0);
4304     }
4305     if (auto *I = dyn_cast<CallInst>(RetVal)) {
4306       return I->isMustTailCall();
4307     }
4308     return false;
4309   }
4310 
visitReturnInst__anon9debe40b0811::MemorySanitizerVisitor4311   void visitReturnInst(ReturnInst &I) {
4312     IRBuilder<> IRB(&I);
4313     Value *RetVal = I.getReturnValue();
4314     if (!RetVal)
4315       return;
4316     // Don't emit the epilogue for musttail call returns.
4317     if (isAMustTailRetVal(RetVal))
4318       return;
4319     Value *ShadowPtr = getShadowPtrForRetval(IRB);
4320     bool HasNoUndef = F.hasRetAttribute(Attribute::NoUndef);
4321     bool StoreShadow = !(MS.EagerChecks && HasNoUndef);
4322     // FIXME: Consider using SpecialCaseList to specify a list of functions that
4323     // must always return fully initialized values. For now, we hardcode "main".
4324     bool EagerCheck = (MS.EagerChecks && HasNoUndef) || (F.getName() == "main");
4325 
4326     Value *Shadow = getShadow(RetVal);
4327     bool StoreOrigin = true;
4328     if (EagerCheck) {
4329       insertShadowCheck(RetVal, &I);
4330       Shadow = getCleanShadow(RetVal);
4331       StoreOrigin = false;
4332     }
4333 
4334     // The caller may still expect information passed over TLS if we pass our
4335     // check
4336     if (StoreShadow) {
4337       IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
4338       if (MS.TrackOrigins && StoreOrigin)
4339         IRB.CreateStore(getOrigin(RetVal), getOriginPtrForRetval());
4340     }
4341   }
4342 
visitPHINode__anon9debe40b0811::MemorySanitizerVisitor4343   void visitPHINode(PHINode &I) {
4344     IRBuilder<> IRB(&I);
4345     if (!PropagateShadow) {
4346       setShadow(&I, getCleanShadow(&I));
4347       setOrigin(&I, getCleanOrigin());
4348       return;
4349     }
4350 
4351     ShadowPHINodes.push_back(&I);
4352     setShadow(&I, IRB.CreatePHI(getShadowTy(&I), I.getNumIncomingValues(),
4353                                 "_msphi_s"));
4354     if (MS.TrackOrigins)
4355       setOrigin(
4356           &I, IRB.CreatePHI(MS.OriginTy, I.getNumIncomingValues(), "_msphi_o"));
4357   }
4358 
getLocalVarIdptr__anon9debe40b0811::MemorySanitizerVisitor4359   Value *getLocalVarIdptr(AllocaInst &I) {
4360     ConstantInt *IntConst =
4361         ConstantInt::get(Type::getInt32Ty((*F.getParent()).getContext()), 0);
4362     return new GlobalVariable(*F.getParent(), IntConst->getType(),
4363                               /*isConstant=*/false, GlobalValue::PrivateLinkage,
4364                               IntConst);
4365   }
4366 
getLocalVarDescription__anon9debe40b0811::MemorySanitizerVisitor4367   Value *getLocalVarDescription(AllocaInst &I) {
4368     return createPrivateConstGlobalForString(*F.getParent(), I.getName());
4369   }
4370 
poisonAllocaUserspace__anon9debe40b0811::MemorySanitizerVisitor4371   void poisonAllocaUserspace(AllocaInst &I, IRBuilder<> &IRB, Value *Len) {
4372     if (PoisonStack && ClPoisonStackWithCall) {
4373       IRB.CreateCall(MS.MsanPoisonStackFn, {&I, Len});
4374     } else {
4375       Value *ShadowBase, *OriginBase;
4376       std::tie(ShadowBase, OriginBase) = getShadowOriginPtr(
4377           &I, IRB, IRB.getInt8Ty(), Align(1), /*isStore*/ true);
4378 
4379       Value *PoisonValue = IRB.getInt8(PoisonStack ? ClPoisonStackPattern : 0);
4380       IRB.CreateMemSet(ShadowBase, PoisonValue, Len, I.getAlign());
4381     }
4382 
4383     if (PoisonStack && MS.TrackOrigins) {
4384       Value *Idptr = getLocalVarIdptr(I);
4385       if (ClPrintStackNames) {
4386         Value *Descr = getLocalVarDescription(I);
4387         IRB.CreateCall(MS.MsanSetAllocaOriginWithDescriptionFn,
4388                        {&I, Len, Idptr, Descr});
4389       } else {
4390         IRB.CreateCall(MS.MsanSetAllocaOriginNoDescriptionFn, {&I, Len, Idptr});
4391       }
4392     }
4393   }
4394 
poisonAllocaKmsan__anon9debe40b0811::MemorySanitizerVisitor4395   void poisonAllocaKmsan(AllocaInst &I, IRBuilder<> &IRB, Value *Len) {
4396     Value *Descr = getLocalVarDescription(I);
4397     if (PoisonStack) {
4398       IRB.CreateCall(MS.MsanPoisonAllocaFn, {&I, Len, Descr});
4399     } else {
4400       IRB.CreateCall(MS.MsanUnpoisonAllocaFn, {&I, Len});
4401     }
4402   }
4403 
instrumentAlloca__anon9debe40b0811::MemorySanitizerVisitor4404   void instrumentAlloca(AllocaInst &I, Instruction *InsPoint = nullptr) {
4405     if (!InsPoint)
4406       InsPoint = &I;
4407     NextNodeIRBuilder IRB(InsPoint);
4408     const DataLayout &DL = F.getParent()->getDataLayout();
4409     uint64_t TypeSize = DL.getTypeAllocSize(I.getAllocatedType());
4410     Value *Len = ConstantInt::get(MS.IntptrTy, TypeSize);
4411     if (I.isArrayAllocation())
4412       Len = IRB.CreateMul(Len,
4413                           IRB.CreateZExtOrTrunc(I.getArraySize(), MS.IntptrTy));
4414 
4415     if (MS.CompileKernel)
4416       poisonAllocaKmsan(I, IRB, Len);
4417     else
4418       poisonAllocaUserspace(I, IRB, Len);
4419   }
4420 
visitAllocaInst__anon9debe40b0811::MemorySanitizerVisitor4421   void visitAllocaInst(AllocaInst &I) {
4422     setShadow(&I, getCleanShadow(&I));
4423     setOrigin(&I, getCleanOrigin());
4424     // We'll get to this alloca later unless it's poisoned at the corresponding
4425     // llvm.lifetime.start.
4426     AllocaSet.insert(&I);
4427   }
4428 
visitSelectInst__anon9debe40b0811::MemorySanitizerVisitor4429   void visitSelectInst(SelectInst &I) {
4430     IRBuilder<> IRB(&I);
4431     // a = select b, c, d
4432     Value *B = I.getCondition();
4433     Value *C = I.getTrueValue();
4434     Value *D = I.getFalseValue();
4435     Value *Sb = getShadow(B);
4436     Value *Sc = getShadow(C);
4437     Value *Sd = getShadow(D);
4438 
4439     // Result shadow if condition shadow is 0.
4440     Value *Sa0 = IRB.CreateSelect(B, Sc, Sd);
4441     Value *Sa1;
4442     if (I.getType()->isAggregateType()) {
4443       // To avoid "sign extending" i1 to an arbitrary aggregate type, we just do
4444       // an extra "select". This results in much more compact IR.
4445       // Sa = select Sb, poisoned, (select b, Sc, Sd)
4446       Sa1 = getPoisonedShadow(getShadowTy(I.getType()));
4447     } else {
4448       // Sa = select Sb, [ (c^d) | Sc | Sd ], [ b ? Sc : Sd ]
4449       // If Sb (condition is poisoned), look for bits in c and d that are equal
4450       // and both unpoisoned.
4451       // If !Sb (condition is unpoisoned), simply pick one of Sc and Sd.
4452 
4453       // Cast arguments to shadow-compatible type.
4454       C = CreateAppToShadowCast(IRB, C);
4455       D = CreateAppToShadowCast(IRB, D);
4456 
4457       // Result shadow if condition shadow is 1.
4458       Sa1 = IRB.CreateOr({IRB.CreateXor(C, D), Sc, Sd});
4459     }
4460     Value *Sa = IRB.CreateSelect(Sb, Sa1, Sa0, "_msprop_select");
4461     setShadow(&I, Sa);
4462     if (MS.TrackOrigins) {
4463       // Origins are always i32, so any vector conditions must be flattened.
4464       // FIXME: consider tracking vector origins for app vectors?
4465       if (B->getType()->isVectorTy()) {
4466         B = convertToBool(B, IRB);
4467         Sb = convertToBool(Sb, IRB);
4468       }
4469       // a = select b, c, d
4470       // Oa = Sb ? Ob : (b ? Oc : Od)
4471       setOrigin(
4472           &I, IRB.CreateSelect(Sb, getOrigin(I.getCondition()),
4473                                IRB.CreateSelect(B, getOrigin(I.getTrueValue()),
4474                                                 getOrigin(I.getFalseValue()))));
4475     }
4476   }
4477 
visitLandingPadInst__anon9debe40b0811::MemorySanitizerVisitor4478   void visitLandingPadInst(LandingPadInst &I) {
4479     // Do nothing.
4480     // See https://github.com/google/sanitizers/issues/504
4481     setShadow(&I, getCleanShadow(&I));
4482     setOrigin(&I, getCleanOrigin());
4483   }
4484 
visitCatchSwitchInst__anon9debe40b0811::MemorySanitizerVisitor4485   void visitCatchSwitchInst(CatchSwitchInst &I) {
4486     setShadow(&I, getCleanShadow(&I));
4487     setOrigin(&I, getCleanOrigin());
4488   }
4489 
visitFuncletPadInst__anon9debe40b0811::MemorySanitizerVisitor4490   void visitFuncletPadInst(FuncletPadInst &I) {
4491     setShadow(&I, getCleanShadow(&I));
4492     setOrigin(&I, getCleanOrigin());
4493   }
4494 
visitGetElementPtrInst__anon9debe40b0811::MemorySanitizerVisitor4495   void visitGetElementPtrInst(GetElementPtrInst &I) { handleShadowOr(I); }
4496 
visitExtractValueInst__anon9debe40b0811::MemorySanitizerVisitor4497   void visitExtractValueInst(ExtractValueInst &I) {
4498     IRBuilder<> IRB(&I);
4499     Value *Agg = I.getAggregateOperand();
4500     LLVM_DEBUG(dbgs() << "ExtractValue:  " << I << "\n");
4501     Value *AggShadow = getShadow(Agg);
4502     LLVM_DEBUG(dbgs() << "   AggShadow:  " << *AggShadow << "\n");
4503     Value *ResShadow = IRB.CreateExtractValue(AggShadow, I.getIndices());
4504     LLVM_DEBUG(dbgs() << "   ResShadow:  " << *ResShadow << "\n");
4505     setShadow(&I, ResShadow);
4506     setOriginForNaryOp(I);
4507   }
4508 
visitInsertValueInst__anon9debe40b0811::MemorySanitizerVisitor4509   void visitInsertValueInst(InsertValueInst &I) {
4510     IRBuilder<> IRB(&I);
4511     LLVM_DEBUG(dbgs() << "InsertValue:  " << I << "\n");
4512     Value *AggShadow = getShadow(I.getAggregateOperand());
4513     Value *InsShadow = getShadow(I.getInsertedValueOperand());
4514     LLVM_DEBUG(dbgs() << "   AggShadow:  " << *AggShadow << "\n");
4515     LLVM_DEBUG(dbgs() << "   InsShadow:  " << *InsShadow << "\n");
4516     Value *Res = IRB.CreateInsertValue(AggShadow, InsShadow, I.getIndices());
4517     LLVM_DEBUG(dbgs() << "   Res:        " << *Res << "\n");
4518     setShadow(&I, Res);
4519     setOriginForNaryOp(I);
4520   }
4521 
dumpInst__anon9debe40b0811::MemorySanitizerVisitor4522   void dumpInst(Instruction &I) {
4523     if (CallInst *CI = dyn_cast<CallInst>(&I)) {
4524       errs() << "ZZZ call " << CI->getCalledFunction()->getName() << "\n";
4525     } else {
4526       errs() << "ZZZ " << I.getOpcodeName() << "\n";
4527     }
4528     errs() << "QQQ " << I << "\n";
4529   }
4530 
visitResumeInst__anon9debe40b0811::MemorySanitizerVisitor4531   void visitResumeInst(ResumeInst &I) {
4532     LLVM_DEBUG(dbgs() << "Resume: " << I << "\n");
4533     // Nothing to do here.
4534   }
4535 
visitCleanupReturnInst__anon9debe40b0811::MemorySanitizerVisitor4536   void visitCleanupReturnInst(CleanupReturnInst &CRI) {
4537     LLVM_DEBUG(dbgs() << "CleanupReturn: " << CRI << "\n");
4538     // Nothing to do here.
4539   }
4540 
visitCatchReturnInst__anon9debe40b0811::MemorySanitizerVisitor4541   void visitCatchReturnInst(CatchReturnInst &CRI) {
4542     LLVM_DEBUG(dbgs() << "CatchReturn: " << CRI << "\n");
4543     // Nothing to do here.
4544   }
4545 
instrumentAsmArgument__anon9debe40b0811::MemorySanitizerVisitor4546   void instrumentAsmArgument(Value *Operand, Type *ElemTy, Instruction &I,
4547                              IRBuilder<> &IRB, const DataLayout &DL,
4548                              bool isOutput) {
4549     // For each assembly argument, we check its value for being initialized.
4550     // If the argument is a pointer, we assume it points to a single element
4551     // of the corresponding type (or to a 8-byte word, if the type is unsized).
4552     // Each such pointer is instrumented with a call to the runtime library.
4553     Type *OpType = Operand->getType();
4554     // Check the operand value itself.
4555     insertShadowCheck(Operand, &I);
4556     if (!OpType->isPointerTy() || !isOutput) {
4557       assert(!isOutput);
4558       return;
4559     }
4560     if (!ElemTy->isSized())
4561       return;
4562     Value *SizeVal =
4563       IRB.CreateTypeSize(MS.IntptrTy, DL.getTypeStoreSize(ElemTy));
4564     if (MS.CompileKernel) {
4565       IRB.CreateCall(MS.MsanInstrumentAsmStoreFn, {Operand, SizeVal});
4566     } else {
4567       // ElemTy, derived from elementtype(), does not encode the alignment of
4568       // the pointer. Conservatively assume that the shadow memory is unaligned.
4569       auto [ShadowPtr, _] =
4570           getShadowOriginPtrUserspace(Operand, IRB, IRB.getInt8Ty(), Align(1));
4571       IRB.CreateAlignedStore(getCleanShadow(ElemTy), ShadowPtr, Align(1));
4572     }
4573   }
4574 
4575   /// Get the number of output arguments returned by pointers.
getNumOutputArgs__anon9debe40b0811::MemorySanitizerVisitor4576   int getNumOutputArgs(InlineAsm *IA, CallBase *CB) {
4577     int NumRetOutputs = 0;
4578     int NumOutputs = 0;
4579     Type *RetTy = cast<Value>(CB)->getType();
4580     if (!RetTy->isVoidTy()) {
4581       // Register outputs are returned via the CallInst return value.
4582       auto *ST = dyn_cast<StructType>(RetTy);
4583       if (ST)
4584         NumRetOutputs = ST->getNumElements();
4585       else
4586         NumRetOutputs = 1;
4587     }
4588     InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
4589     for (const InlineAsm::ConstraintInfo &Info : Constraints) {
4590       switch (Info.Type) {
4591       case InlineAsm::isOutput:
4592         NumOutputs++;
4593         break;
4594       default:
4595         break;
4596       }
4597     }
4598     return NumOutputs - NumRetOutputs;
4599   }
4600 
visitAsmInstruction__anon9debe40b0811::MemorySanitizerVisitor4601   void visitAsmInstruction(Instruction &I) {
4602     // Conservative inline assembly handling: check for poisoned shadow of
4603     // asm() arguments, then unpoison the result and all the memory locations
4604     // pointed to by those arguments.
4605     // An inline asm() statement in C++ contains lists of input and output
4606     // arguments used by the assembly code. These are mapped to operands of the
4607     // CallInst as follows:
4608     //  - nR register outputs ("=r) are returned by value in a single structure
4609     //  (SSA value of the CallInst);
4610     //  - nO other outputs ("=m" and others) are returned by pointer as first
4611     // nO operands of the CallInst;
4612     //  - nI inputs ("r", "m" and others) are passed to CallInst as the
4613     // remaining nI operands.
4614     // The total number of asm() arguments in the source is nR+nO+nI, and the
4615     // corresponding CallInst has nO+nI+1 operands (the last operand is the
4616     // function to be called).
4617     const DataLayout &DL = F.getParent()->getDataLayout();
4618     CallBase *CB = cast<CallBase>(&I);
4619     IRBuilder<> IRB(&I);
4620     InlineAsm *IA = cast<InlineAsm>(CB->getCalledOperand());
4621     int OutputArgs = getNumOutputArgs(IA, CB);
4622     // The last operand of a CallInst is the function itself.
4623     int NumOperands = CB->getNumOperands() - 1;
4624 
4625     // Check input arguments. Doing so before unpoisoning output arguments, so
4626     // that we won't overwrite uninit values before checking them.
4627     for (int i = OutputArgs; i < NumOperands; i++) {
4628       Value *Operand = CB->getOperand(i);
4629       instrumentAsmArgument(Operand, CB->getParamElementType(i), I, IRB, DL,
4630                             /*isOutput*/ false);
4631     }
4632     // Unpoison output arguments. This must happen before the actual InlineAsm
4633     // call, so that the shadow for memory published in the asm() statement
4634     // remains valid.
4635     for (int i = 0; i < OutputArgs; i++) {
4636       Value *Operand = CB->getOperand(i);
4637       instrumentAsmArgument(Operand, CB->getParamElementType(i), I, IRB, DL,
4638                             /*isOutput*/ true);
4639     }
4640 
4641     setShadow(&I, getCleanShadow(&I));
4642     setOrigin(&I, getCleanOrigin());
4643   }
4644 
visitFreezeInst__anon9debe40b0811::MemorySanitizerVisitor4645   void visitFreezeInst(FreezeInst &I) {
4646     // Freeze always returns a fully defined value.
4647     setShadow(&I, getCleanShadow(&I));
4648     setOrigin(&I, getCleanOrigin());
4649   }
4650 
visitInstruction__anon9debe40b0811::MemorySanitizerVisitor4651   void visitInstruction(Instruction &I) {
4652     // Everything else: stop propagating and check for poisoned shadow.
4653     if (ClDumpStrictInstructions)
4654       dumpInst(I);
4655     LLVM_DEBUG(dbgs() << "DEFAULT: " << I << "\n");
4656     for (size_t i = 0, n = I.getNumOperands(); i < n; i++) {
4657       Value *Operand = I.getOperand(i);
4658       if (Operand->getType()->isSized())
4659         insertShadowCheck(Operand, &I);
4660     }
4661     setShadow(&I, getCleanShadow(&I));
4662     setOrigin(&I, getCleanOrigin());
4663   }
4664 };
4665 
4666 struct VarArgHelperBase : public VarArgHelper {
4667   Function &F;
4668   MemorySanitizer &MS;
4669   MemorySanitizerVisitor &MSV;
4670   SmallVector<CallInst *, 16> VAStartInstrumentationList;
4671   const unsigned VAListTagSize;
4672 
VarArgHelperBase__anon9debe40b0811::VarArgHelperBase4673   VarArgHelperBase(Function &F, MemorySanitizer &MS,
4674                    MemorySanitizerVisitor &MSV, unsigned VAListTagSize)
4675       : F(F), MS(MS), MSV(MSV), VAListTagSize(VAListTagSize) {}
4676 
getShadowAddrForVAArgument__anon9debe40b0811::VarArgHelperBase4677   Value *getShadowAddrForVAArgument(IRBuilder<> &IRB, unsigned ArgOffset) {
4678     Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
4679     return IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
4680   }
4681 
4682   /// Compute the shadow address for a given va_arg.
getShadowPtrForVAArgument__anon9debe40b0811::VarArgHelperBase4683   Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
4684                                    unsigned ArgOffset) {
4685     Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
4686     Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
4687     return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
4688                               "_msarg_va_s");
4689   }
4690 
4691   /// Compute the shadow address for a given va_arg.
getShadowPtrForVAArgument__anon9debe40b0811::VarArgHelperBase4692   Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
4693                                    unsigned ArgOffset, unsigned ArgSize) {
4694     // Make sure we don't overflow __msan_va_arg_tls.
4695     if (ArgOffset + ArgSize > kParamTLSSize)
4696       return nullptr;
4697     return getShadowPtrForVAArgument(Ty, IRB, ArgOffset);
4698   }
4699 
4700   /// Compute the origin address for a given va_arg.
getOriginPtrForVAArgument__anon9debe40b0811::VarArgHelperBase4701   Value *getOriginPtrForVAArgument(IRBuilder<> &IRB, int ArgOffset) {
4702     Value *Base = IRB.CreatePointerCast(MS.VAArgOriginTLS, MS.IntptrTy);
4703     // getOriginPtrForVAArgument() is always called after
4704     // getShadowPtrForVAArgument(), so __msan_va_arg_origin_tls can never
4705     // overflow.
4706     Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
4707     return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0),
4708                               "_msarg_va_o");
4709   }
4710 
CleanUnusedTLS__anon9debe40b0811::VarArgHelperBase4711   void CleanUnusedTLS(IRBuilder<> &IRB, Value *ShadowBase,
4712                       unsigned BaseOffset) {
4713     // The tails of __msan_va_arg_tls is not large enough to fit full
4714     // value shadow, but it will be copied to backup anyway. Make it
4715     // clean.
4716     if (BaseOffset >= kParamTLSSize)
4717       return;
4718     Value *TailSize =
4719         ConstantInt::getSigned(IRB.getInt32Ty(), kParamTLSSize - BaseOffset);
4720     IRB.CreateMemSet(ShadowBase, ConstantInt::getNullValue(IRB.getInt8Ty()),
4721                      TailSize, Align(8));
4722   }
4723 
unpoisonVAListTagForInst__anon9debe40b0811::VarArgHelperBase4724   void unpoisonVAListTagForInst(IntrinsicInst &I) {
4725     IRBuilder<> IRB(&I);
4726     Value *VAListTag = I.getArgOperand(0);
4727     const Align Alignment = Align(8);
4728     auto [ShadowPtr, OriginPtr] = MSV.getShadowOriginPtr(
4729         VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
4730     // Unpoison the whole __va_list_tag.
4731     IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
4732                      VAListTagSize, Alignment, false);
4733   }
4734 
visitVAStartInst__anon9debe40b0811::VarArgHelperBase4735   void visitVAStartInst(VAStartInst &I) override {
4736     if (F.getCallingConv() == CallingConv::Win64)
4737       return;
4738     VAStartInstrumentationList.push_back(&I);
4739     unpoisonVAListTagForInst(I);
4740   }
4741 
visitVACopyInst__anon9debe40b0811::VarArgHelperBase4742   void visitVACopyInst(VACopyInst &I) override {
4743     if (F.getCallingConv() == CallingConv::Win64)
4744       return;
4745     unpoisonVAListTagForInst(I);
4746   }
4747 };
4748 
4749 /// AMD64-specific implementation of VarArgHelper.
4750 struct VarArgAMD64Helper : public VarArgHelperBase {
4751   // An unfortunate workaround for asymmetric lowering of va_arg stuff.
4752   // See a comment in visitCallBase for more details.
4753   static const unsigned AMD64GpEndOffset = 48; // AMD64 ABI Draft 0.99.6 p3.5.7
4754   static const unsigned AMD64FpEndOffsetSSE = 176;
4755   // If SSE is disabled, fp_offset in va_list is zero.
4756   static const unsigned AMD64FpEndOffsetNoSSE = AMD64GpEndOffset;
4757 
4758   unsigned AMD64FpEndOffset;
4759   AllocaInst *VAArgTLSCopy = nullptr;
4760   AllocaInst *VAArgTLSOriginCopy = nullptr;
4761   Value *VAArgOverflowSize = nullptr;
4762 
4763   enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
4764 
VarArgAMD64Helper__anon9debe40b0811::VarArgAMD64Helper4765   VarArgAMD64Helper(Function &F, MemorySanitizer &MS,
4766                     MemorySanitizerVisitor &MSV)
4767       : VarArgHelperBase(F, MS, MSV, /*VAListTagSize=*/24) {
4768     AMD64FpEndOffset = AMD64FpEndOffsetSSE;
4769     for (const auto &Attr : F.getAttributes().getFnAttrs()) {
4770       if (Attr.isStringAttribute() &&
4771           (Attr.getKindAsString() == "target-features")) {
4772         if (Attr.getValueAsString().contains("-sse"))
4773           AMD64FpEndOffset = AMD64FpEndOffsetNoSSE;
4774         break;
4775       }
4776     }
4777   }
4778 
classifyArgument__anon9debe40b0811::VarArgAMD64Helper4779   ArgKind classifyArgument(Value *arg) {
4780     // A very rough approximation of X86_64 argument classification rules.
4781     Type *T = arg->getType();
4782     if (T->isX86_FP80Ty())
4783       return AK_Memory;
4784     if (T->isFPOrFPVectorTy() || T->isX86_MMXTy())
4785       return AK_FloatingPoint;
4786     if (T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64)
4787       return AK_GeneralPurpose;
4788     if (T->isPointerTy())
4789       return AK_GeneralPurpose;
4790     return AK_Memory;
4791   }
4792 
4793   // For VarArg functions, store the argument shadow in an ABI-specific format
4794   // that corresponds to va_list layout.
4795   // We do this because Clang lowers va_arg in the frontend, and this pass
4796   // only sees the low level code that deals with va_list internals.
4797   // A much easier alternative (provided that Clang emits va_arg instructions)
4798   // would have been to associate each live instance of va_list with a copy of
4799   // MSanParamTLS, and extract shadow on va_arg() call in the argument list
4800   // order.
visitCallBase__anon9debe40b0811::VarArgAMD64Helper4801   void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
4802     unsigned GpOffset = 0;
4803     unsigned FpOffset = AMD64GpEndOffset;
4804     unsigned OverflowOffset = AMD64FpEndOffset;
4805     const DataLayout &DL = F.getParent()->getDataLayout();
4806 
4807     for (const auto &[ArgNo, A] : llvm::enumerate(CB.args())) {
4808       bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
4809       bool IsByVal = CB.paramHasAttr(ArgNo, Attribute::ByVal);
4810       if (IsByVal) {
4811         // ByVal arguments always go to the overflow area.
4812         // Fixed arguments passed through the overflow area will be stepped
4813         // over by va_start, so don't count them towards the offset.
4814         if (IsFixed)
4815           continue;
4816         assert(A->getType()->isPointerTy());
4817         Type *RealTy = CB.getParamByValType(ArgNo);
4818         uint64_t ArgSize = DL.getTypeAllocSize(RealTy);
4819         uint64_t AlignedSize = alignTo(ArgSize, 8);
4820         unsigned BaseOffset = OverflowOffset;
4821         Value *ShadowBase =
4822             getShadowPtrForVAArgument(RealTy, IRB, OverflowOffset);
4823         Value *OriginBase = nullptr;
4824         if (MS.TrackOrigins)
4825           OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset);
4826         OverflowOffset += AlignedSize;
4827 
4828         if (OverflowOffset > kParamTLSSize) {
4829           CleanUnusedTLS(IRB, ShadowBase, BaseOffset);
4830           continue; // We have no space to copy shadow there.
4831         }
4832 
4833         Value *ShadowPtr, *OriginPtr;
4834         std::tie(ShadowPtr, OriginPtr) =
4835             MSV.getShadowOriginPtr(A, IRB, IRB.getInt8Ty(), kShadowTLSAlignment,
4836                                    /*isStore*/ false);
4837         IRB.CreateMemCpy(ShadowBase, kShadowTLSAlignment, ShadowPtr,
4838                          kShadowTLSAlignment, ArgSize);
4839         if (MS.TrackOrigins)
4840           IRB.CreateMemCpy(OriginBase, kShadowTLSAlignment, OriginPtr,
4841                            kShadowTLSAlignment, ArgSize);
4842       } else {
4843         ArgKind AK = classifyArgument(A);
4844         if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
4845           AK = AK_Memory;
4846         if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
4847           AK = AK_Memory;
4848         Value *ShadowBase, *OriginBase = nullptr;
4849         switch (AK) {
4850         case AK_GeneralPurpose:
4851           ShadowBase = getShadowPtrForVAArgument(A->getType(), IRB, GpOffset);
4852           if (MS.TrackOrigins)
4853             OriginBase = getOriginPtrForVAArgument(IRB, GpOffset);
4854           GpOffset += 8;
4855           assert(GpOffset <= kParamTLSSize);
4856           break;
4857         case AK_FloatingPoint:
4858           ShadowBase = getShadowPtrForVAArgument(A->getType(), IRB, FpOffset);
4859           if (MS.TrackOrigins)
4860             OriginBase = getOriginPtrForVAArgument(IRB, FpOffset);
4861           FpOffset += 16;
4862           assert(FpOffset <= kParamTLSSize);
4863           break;
4864         case AK_Memory:
4865           if (IsFixed)
4866             continue;
4867           uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
4868           uint64_t AlignedSize = alignTo(ArgSize, 8);
4869           unsigned BaseOffset = OverflowOffset;
4870           ShadowBase =
4871               getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset);
4872           if (MS.TrackOrigins) {
4873             OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset);
4874           }
4875           OverflowOffset += AlignedSize;
4876           if (OverflowOffset > kParamTLSSize) {
4877             // We have no space to copy shadow there.
4878             CleanUnusedTLS(IRB, ShadowBase, BaseOffset);
4879             continue;
4880           }
4881         }
4882         // Take fixed arguments into account for GpOffset and FpOffset,
4883         // but don't actually store shadows for them.
4884         // TODO(glider): don't call get*PtrForVAArgument() for them.
4885         if (IsFixed)
4886           continue;
4887         Value *Shadow = MSV.getShadow(A);
4888         IRB.CreateAlignedStore(Shadow, ShadowBase, kShadowTLSAlignment);
4889         if (MS.TrackOrigins) {
4890           Value *Origin = MSV.getOrigin(A);
4891           TypeSize StoreSize = DL.getTypeStoreSize(Shadow->getType());
4892           MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
4893                           std::max(kShadowTLSAlignment, kMinOriginAlignment));
4894         }
4895       }
4896     }
4897     Constant *OverflowSize =
4898         ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
4899     IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
4900   }
4901 
finalizeInstrumentation__anon9debe40b0811::VarArgAMD64Helper4902   void finalizeInstrumentation() override {
4903     assert(!VAArgOverflowSize && !VAArgTLSCopy &&
4904            "finalizeInstrumentation called twice");
4905     if (!VAStartInstrumentationList.empty()) {
4906       // If there is a va_start in this function, make a backup copy of
4907       // va_arg_tls somewhere in the function entry block.
4908       IRBuilder<> IRB(MSV.FnPrologueEnd);
4909       VAArgOverflowSize =
4910           IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
4911       Value *CopySize = IRB.CreateAdd(
4912           ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset), VAArgOverflowSize);
4913       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
4914       VAArgTLSCopy->setAlignment(kShadowTLSAlignment);
4915       IRB.CreateMemSet(VAArgTLSCopy, Constant::getNullValue(IRB.getInt8Ty()),
4916                        CopySize, kShadowTLSAlignment, false);
4917 
4918       Value *SrcSize = IRB.CreateBinaryIntrinsic(
4919           Intrinsic::umin, CopySize,
4920           ConstantInt::get(MS.IntptrTy, kParamTLSSize));
4921       IRB.CreateMemCpy(VAArgTLSCopy, kShadowTLSAlignment, MS.VAArgTLS,
4922                        kShadowTLSAlignment, SrcSize);
4923       if (MS.TrackOrigins) {
4924         VAArgTLSOriginCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
4925         VAArgTLSOriginCopy->setAlignment(kShadowTLSAlignment);
4926         IRB.CreateMemCpy(VAArgTLSOriginCopy, kShadowTLSAlignment,
4927                          MS.VAArgOriginTLS, kShadowTLSAlignment, SrcSize);
4928       }
4929     }
4930 
4931     // Instrument va_start.
4932     // Copy va_list shadow from the backup copy of the TLS contents.
4933     for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
4934       CallInst *OrigInst = VAStartInstrumentationList[i];
4935       NextNodeIRBuilder IRB(OrigInst);
4936       Value *VAListTag = OrigInst->getArgOperand(0);
4937 
4938       Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C); // i64*
4939       Value *RegSaveAreaPtrPtr = IRB.CreateIntToPtr(
4940           IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
4941                         ConstantInt::get(MS.IntptrTy, 16)),
4942           PointerType::get(RegSaveAreaPtrTy, 0));
4943       Value *RegSaveAreaPtr =
4944           IRB.CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
4945       Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
4946       const Align Alignment = Align(16);
4947       std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
4948           MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(),
4949                                  Alignment, /*isStore*/ true);
4950       IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
4951                        AMD64FpEndOffset);
4952       if (MS.TrackOrigins)
4953         IRB.CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
4954                          Alignment, AMD64FpEndOffset);
4955       Type *OverflowArgAreaPtrTy = PointerType::getUnqual(*MS.C); // i64*
4956       Value *OverflowArgAreaPtrPtr = IRB.CreateIntToPtr(
4957           IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
4958                         ConstantInt::get(MS.IntptrTy, 8)),
4959           PointerType::get(OverflowArgAreaPtrTy, 0));
4960       Value *OverflowArgAreaPtr =
4961           IRB.CreateLoad(OverflowArgAreaPtrTy, OverflowArgAreaPtrPtr);
4962       Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
4963       std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
4964           MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.getInt8Ty(),
4965                                  Alignment, /*isStore*/ true);
4966       Value *SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSCopy,
4967                                              AMD64FpEndOffset);
4968       IRB.CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
4969                        VAArgOverflowSize);
4970       if (MS.TrackOrigins) {
4971         SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSOriginCopy,
4972                                         AMD64FpEndOffset);
4973         IRB.CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
4974                          VAArgOverflowSize);
4975       }
4976     }
4977   }
4978 };
4979 
4980 /// MIPS64-specific implementation of VarArgHelper.
4981 /// NOTE: This is also used for LoongArch64.
4982 struct VarArgMIPS64Helper : public VarArgHelperBase {
4983   AllocaInst *VAArgTLSCopy = nullptr;
4984   Value *VAArgSize = nullptr;
4985 
VarArgMIPS64Helper__anon9debe40b0811::VarArgMIPS64Helper4986   VarArgMIPS64Helper(Function &F, MemorySanitizer &MS,
4987                      MemorySanitizerVisitor &MSV)
4988       : VarArgHelperBase(F, MS, MSV, /*VAListTagSize=*/8) {}
4989 
visitCallBase__anon9debe40b0811::VarArgMIPS64Helper4990   void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
4991     unsigned VAArgOffset = 0;
4992     const DataLayout &DL = F.getParent()->getDataLayout();
4993     for (Value *A :
4994          llvm::drop_begin(CB.args(), CB.getFunctionType()->getNumParams())) {
4995       Triple TargetTriple(F.getParent()->getTargetTriple());
4996       Value *Base;
4997       uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
4998       if (TargetTriple.getArch() == Triple::mips64) {
4999         // Adjusting the shadow for argument with size < 8 to match the
5000         // placement of bits in big endian system
5001         if (ArgSize < 8)
5002           VAArgOffset += (8 - ArgSize);
5003       }
5004       Base = getShadowPtrForVAArgument(A->getType(), IRB, VAArgOffset, ArgSize);
5005       VAArgOffset += ArgSize;
5006       VAArgOffset = alignTo(VAArgOffset, 8);
5007       if (!Base)
5008         continue;
5009       IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
5010     }
5011 
5012     Constant *TotalVAArgSize = ConstantInt::get(IRB.getInt64Ty(), VAArgOffset);
5013     // Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of
5014     // a new class member i.e. it is the total size of all VarArgs.
5015     IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
5016   }
5017 
finalizeInstrumentation__anon9debe40b0811::VarArgMIPS64Helper5018   void finalizeInstrumentation() override {
5019     assert(!VAArgSize && !VAArgTLSCopy &&
5020            "finalizeInstrumentation called twice");
5021     IRBuilder<> IRB(MSV.FnPrologueEnd);
5022     VAArgSize = IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
5023     Value *CopySize =
5024         IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0), VAArgSize);
5025 
5026     if (!VAStartInstrumentationList.empty()) {
5027       // If there is a va_start in this function, make a backup copy of
5028       // va_arg_tls somewhere in the function entry block.
5029       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
5030       VAArgTLSCopy->setAlignment(kShadowTLSAlignment);
5031       IRB.CreateMemSet(VAArgTLSCopy, Constant::getNullValue(IRB.getInt8Ty()),
5032                        CopySize, kShadowTLSAlignment, false);
5033 
5034       Value *SrcSize = IRB.CreateBinaryIntrinsic(
5035           Intrinsic::umin, CopySize,
5036           ConstantInt::get(MS.IntptrTy, kParamTLSSize));
5037       IRB.CreateMemCpy(VAArgTLSCopy, kShadowTLSAlignment, MS.VAArgTLS,
5038                        kShadowTLSAlignment, SrcSize);
5039     }
5040 
5041     // Instrument va_start.
5042     // Copy va_list shadow from the backup copy of the TLS contents.
5043     for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
5044       CallInst *OrigInst = VAStartInstrumentationList[i];
5045       NextNodeIRBuilder IRB(OrigInst);
5046       Value *VAListTag = OrigInst->getArgOperand(0);
5047       Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C); // i64*
5048       Value *RegSaveAreaPtrPtr =
5049           IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
5050                              PointerType::get(RegSaveAreaPtrTy, 0));
5051       Value *RegSaveAreaPtr =
5052           IRB.CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
5053       Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
5054       const Align Alignment = Align(8);
5055       std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
5056           MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(),
5057                                  Alignment, /*isStore*/ true);
5058       IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
5059                        CopySize);
5060     }
5061   }
5062 };
5063 
5064 /// AArch64-specific implementation of VarArgHelper.
5065 struct VarArgAArch64Helper : public VarArgHelperBase {
5066   static const unsigned kAArch64GrArgSize = 64;
5067   static const unsigned kAArch64VrArgSize = 128;
5068 
5069   static const unsigned AArch64GrBegOffset = 0;
5070   static const unsigned AArch64GrEndOffset = kAArch64GrArgSize;
5071   // Make VR space aligned to 16 bytes.
5072   static const unsigned AArch64VrBegOffset = AArch64GrEndOffset;
5073   static const unsigned AArch64VrEndOffset =
5074       AArch64VrBegOffset + kAArch64VrArgSize;
5075   static const unsigned AArch64VAEndOffset = AArch64VrEndOffset;
5076 
5077   AllocaInst *VAArgTLSCopy = nullptr;
5078   Value *VAArgOverflowSize = nullptr;
5079 
5080   enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
5081 
VarArgAArch64Helper__anon9debe40b0811::VarArgAArch64Helper5082   VarArgAArch64Helper(Function &F, MemorySanitizer &MS,
5083                       MemorySanitizerVisitor &MSV)
5084       : VarArgHelperBase(F, MS, MSV, /*VAListTagSize=*/32) {}
5085 
5086   // A very rough approximation of aarch64 argument classification rules.
classifyArgument__anon9debe40b0811::VarArgAArch64Helper5087   std::pair<ArgKind, uint64_t> classifyArgument(Type *T) {
5088     if (T->isIntOrPtrTy() && T->getPrimitiveSizeInBits() <= 64)
5089       return {AK_GeneralPurpose, 1};
5090     if (T->isFloatingPointTy() && T->getPrimitiveSizeInBits() <= 128)
5091       return {AK_FloatingPoint, 1};
5092 
5093     if (T->isArrayTy()) {
5094       auto R = classifyArgument(T->getArrayElementType());
5095       R.second *= T->getScalarType()->getArrayNumElements();
5096       return R;
5097     }
5098 
5099     if (const FixedVectorType *FV = dyn_cast<FixedVectorType>(T)) {
5100       auto R = classifyArgument(FV->getScalarType());
5101       R.second *= FV->getNumElements();
5102       return R;
5103     }
5104 
5105     LLVM_DEBUG(errs() << "Unknown vararg type: " << *T << "\n");
5106     return {AK_Memory, 0};
5107   }
5108 
5109   // The instrumentation stores the argument shadow in a non ABI-specific
5110   // format because it does not know which argument is named (since Clang,
5111   // like x86_64 case, lowers the va_args in the frontend and this pass only
5112   // sees the low level code that deals with va_list internals).
5113   // The first seven GR registers are saved in the first 56 bytes of the
5114   // va_arg tls arra, followed by the first 8 FP/SIMD registers, and then
5115   // the remaining arguments.
5116   // Using constant offset within the va_arg TLS array allows fast copy
5117   // in the finalize instrumentation.
visitCallBase__anon9debe40b0811::VarArgAArch64Helper5118   void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
5119     unsigned GrOffset = AArch64GrBegOffset;
5120     unsigned VrOffset = AArch64VrBegOffset;
5121     unsigned OverflowOffset = AArch64VAEndOffset;
5122 
5123     const DataLayout &DL = F.getParent()->getDataLayout();
5124     for (const auto &[ArgNo, A] : llvm::enumerate(CB.args())) {
5125       bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
5126       auto [AK, RegNum] = classifyArgument(A->getType());
5127       if (AK == AK_GeneralPurpose &&
5128           (GrOffset + RegNum * 8) > AArch64GrEndOffset)
5129         AK = AK_Memory;
5130       if (AK == AK_FloatingPoint &&
5131           (VrOffset + RegNum * 16) > AArch64VrEndOffset)
5132         AK = AK_Memory;
5133       Value *Base;
5134       switch (AK) {
5135       case AK_GeneralPurpose:
5136         Base = getShadowPtrForVAArgument(A->getType(), IRB, GrOffset);
5137         GrOffset += 8 * RegNum;
5138         break;
5139       case AK_FloatingPoint:
5140         Base = getShadowPtrForVAArgument(A->getType(), IRB, VrOffset);
5141         VrOffset += 16 * RegNum;
5142         break;
5143       case AK_Memory:
5144         // Don't count fixed arguments in the overflow area - va_start will
5145         // skip right over them.
5146         if (IsFixed)
5147           continue;
5148         uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
5149         uint64_t AlignedSize = alignTo(ArgSize, 8);
5150         unsigned BaseOffset = OverflowOffset;
5151         Base = getShadowPtrForVAArgument(A->getType(), IRB, BaseOffset);
5152         OverflowOffset += AlignedSize;
5153         if (OverflowOffset > kParamTLSSize) {
5154           // We have no space to copy shadow there.
5155           CleanUnusedTLS(IRB, Base, BaseOffset);
5156           continue;
5157         }
5158         break;
5159       }
5160       // Count Gp/Vr fixed arguments to their respective offsets, but don't
5161       // bother to actually store a shadow.
5162       if (IsFixed)
5163         continue;
5164       IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
5165     }
5166     Constant *OverflowSize =
5167         ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AArch64VAEndOffset);
5168     IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
5169   }
5170 
5171   // Retrieve a va_list field of 'void*' size.
getVAField64__anon9debe40b0811::VarArgAArch64Helper5172   Value *getVAField64(IRBuilder<> &IRB, Value *VAListTag, int offset) {
5173     Value *SaveAreaPtrPtr = IRB.CreateIntToPtr(
5174         IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
5175                       ConstantInt::get(MS.IntptrTy, offset)),
5176         PointerType::get(*MS.C, 0));
5177     return IRB.CreateLoad(Type::getInt64Ty(*MS.C), SaveAreaPtrPtr);
5178   }
5179 
5180   // Retrieve a va_list field of 'int' size.
getVAField32__anon9debe40b0811::VarArgAArch64Helper5181   Value *getVAField32(IRBuilder<> &IRB, Value *VAListTag, int offset) {
5182     Value *SaveAreaPtr = IRB.CreateIntToPtr(
5183         IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
5184                       ConstantInt::get(MS.IntptrTy, offset)),
5185         PointerType::get(*MS.C, 0));
5186     Value *SaveArea32 = IRB.CreateLoad(IRB.getInt32Ty(), SaveAreaPtr);
5187     return IRB.CreateSExt(SaveArea32, MS.IntptrTy);
5188   }
5189 
finalizeInstrumentation__anon9debe40b0811::VarArgAArch64Helper5190   void finalizeInstrumentation() override {
5191     assert(!VAArgOverflowSize && !VAArgTLSCopy &&
5192            "finalizeInstrumentation called twice");
5193     if (!VAStartInstrumentationList.empty()) {
5194       // If there is a va_start in this function, make a backup copy of
5195       // va_arg_tls somewhere in the function entry block.
5196       IRBuilder<> IRB(MSV.FnPrologueEnd);
5197       VAArgOverflowSize =
5198           IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
5199       Value *CopySize = IRB.CreateAdd(
5200           ConstantInt::get(MS.IntptrTy, AArch64VAEndOffset), VAArgOverflowSize);
5201       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
5202       VAArgTLSCopy->setAlignment(kShadowTLSAlignment);
5203       IRB.CreateMemSet(VAArgTLSCopy, Constant::getNullValue(IRB.getInt8Ty()),
5204                        CopySize, kShadowTLSAlignment, false);
5205 
5206       Value *SrcSize = IRB.CreateBinaryIntrinsic(
5207           Intrinsic::umin, CopySize,
5208           ConstantInt::get(MS.IntptrTy, kParamTLSSize));
5209       IRB.CreateMemCpy(VAArgTLSCopy, kShadowTLSAlignment, MS.VAArgTLS,
5210                        kShadowTLSAlignment, SrcSize);
5211     }
5212 
5213     Value *GrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64GrArgSize);
5214     Value *VrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64VrArgSize);
5215 
5216     // Instrument va_start, copy va_list shadow from the backup copy of
5217     // the TLS contents.
5218     for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
5219       CallInst *OrigInst = VAStartInstrumentationList[i];
5220       NextNodeIRBuilder IRB(OrigInst);
5221 
5222       Value *VAListTag = OrigInst->getArgOperand(0);
5223 
5224       // The variadic ABI for AArch64 creates two areas to save the incoming
5225       // argument registers (one for 64-bit general register xn-x7 and another
5226       // for 128-bit FP/SIMD vn-v7).
5227       // We need then to propagate the shadow arguments on both regions
5228       // 'va::__gr_top + va::__gr_offs' and 'va::__vr_top + va::__vr_offs'.
5229       // The remaining arguments are saved on shadow for 'va::stack'.
5230       // One caveat is it requires only to propagate the non-named arguments,
5231       // however on the call site instrumentation 'all' the arguments are
5232       // saved. So to copy the shadow values from the va_arg TLS array
5233       // we need to adjust the offset for both GR and VR fields based on
5234       // the __{gr,vr}_offs value (since they are stores based on incoming
5235       // named arguments).
5236       Type *RegSaveAreaPtrTy = IRB.getPtrTy();
5237 
5238       // Read the stack pointer from the va_list.
5239       Value *StackSaveAreaPtr =
5240           IRB.CreateIntToPtr(getVAField64(IRB, VAListTag, 0), RegSaveAreaPtrTy);
5241 
5242       // Read both the __gr_top and __gr_off and add them up.
5243       Value *GrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 8);
5244       Value *GrOffSaveArea = getVAField32(IRB, VAListTag, 24);
5245 
5246       Value *GrRegSaveAreaPtr = IRB.CreateIntToPtr(
5247           IRB.CreateAdd(GrTopSaveAreaPtr, GrOffSaveArea), RegSaveAreaPtrTy);
5248 
5249       // Read both the __vr_top and __vr_off and add them up.
5250       Value *VrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 16);
5251       Value *VrOffSaveArea = getVAField32(IRB, VAListTag, 28);
5252 
5253       Value *VrRegSaveAreaPtr = IRB.CreateIntToPtr(
5254           IRB.CreateAdd(VrTopSaveAreaPtr, VrOffSaveArea), RegSaveAreaPtrTy);
5255 
5256       // It does not know how many named arguments is being used and, on the
5257       // callsite all the arguments were saved.  Since __gr_off is defined as
5258       // '0 - ((8 - named_gr) * 8)', the idea is to just propagate the variadic
5259       // argument by ignoring the bytes of shadow from named arguments.
5260       Value *GrRegSaveAreaShadowPtrOff =
5261           IRB.CreateAdd(GrArgSize, GrOffSaveArea);
5262 
5263       Value *GrRegSaveAreaShadowPtr =
5264           MSV.getShadowOriginPtr(GrRegSaveAreaPtr, IRB, IRB.getInt8Ty(),
5265                                  Align(8), /*isStore*/ true)
5266               .first;
5267 
5268       Value *GrSrcPtr =
5269           IRB.CreateInBoundsPtrAdd(VAArgTLSCopy, GrRegSaveAreaShadowPtrOff);
5270       Value *GrCopySize = IRB.CreateSub(GrArgSize, GrRegSaveAreaShadowPtrOff);
5271 
5272       IRB.CreateMemCpy(GrRegSaveAreaShadowPtr, Align(8), GrSrcPtr, Align(8),
5273                        GrCopySize);
5274 
5275       // Again, but for FP/SIMD values.
5276       Value *VrRegSaveAreaShadowPtrOff =
5277           IRB.CreateAdd(VrArgSize, VrOffSaveArea);
5278 
5279       Value *VrRegSaveAreaShadowPtr =
5280           MSV.getShadowOriginPtr(VrRegSaveAreaPtr, IRB, IRB.getInt8Ty(),
5281                                  Align(8), /*isStore*/ true)
5282               .first;
5283 
5284       Value *VrSrcPtr = IRB.CreateInBoundsPtrAdd(
5285           IRB.CreateInBoundsPtrAdd(VAArgTLSCopy,
5286                                    IRB.getInt32(AArch64VrBegOffset)),
5287           VrRegSaveAreaShadowPtrOff);
5288       Value *VrCopySize = IRB.CreateSub(VrArgSize, VrRegSaveAreaShadowPtrOff);
5289 
5290       IRB.CreateMemCpy(VrRegSaveAreaShadowPtr, Align(8), VrSrcPtr, Align(8),
5291                        VrCopySize);
5292 
5293       // And finally for remaining arguments.
5294       Value *StackSaveAreaShadowPtr =
5295           MSV.getShadowOriginPtr(StackSaveAreaPtr, IRB, IRB.getInt8Ty(),
5296                                  Align(16), /*isStore*/ true)
5297               .first;
5298 
5299       Value *StackSrcPtr = IRB.CreateInBoundsPtrAdd(
5300           VAArgTLSCopy, IRB.getInt32(AArch64VAEndOffset));
5301 
5302       IRB.CreateMemCpy(StackSaveAreaShadowPtr, Align(16), StackSrcPtr,
5303                        Align(16), VAArgOverflowSize);
5304     }
5305   }
5306 };
5307 
5308 /// PowerPC64-specific implementation of VarArgHelper.
5309 struct VarArgPowerPC64Helper : public VarArgHelperBase {
5310   AllocaInst *VAArgTLSCopy = nullptr;
5311   Value *VAArgSize = nullptr;
5312 
VarArgPowerPC64Helper__anon9debe40b0811::VarArgPowerPC64Helper5313   VarArgPowerPC64Helper(Function &F, MemorySanitizer &MS,
5314                         MemorySanitizerVisitor &MSV)
5315       : VarArgHelperBase(F, MS, MSV, /*VAListTagSize=*/8) {}
5316 
visitCallBase__anon9debe40b0811::VarArgPowerPC64Helper5317   void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
5318     // For PowerPC, we need to deal with alignment of stack arguments -
5319     // they are mostly aligned to 8 bytes, but vectors and i128 arrays
5320     // are aligned to 16 bytes, byvals can be aligned to 8 or 16 bytes,
5321     // For that reason, we compute current offset from stack pointer (which is
5322     // always properly aligned), and offset for the first vararg, then subtract
5323     // them.
5324     unsigned VAArgBase;
5325     Triple TargetTriple(F.getParent()->getTargetTriple());
5326     // Parameter save area starts at 48 bytes from frame pointer for ABIv1,
5327     // and 32 bytes for ABIv2.  This is usually determined by target
5328     // endianness, but in theory could be overridden by function attribute.
5329     if (TargetTriple.getArch() == Triple::ppc64)
5330       VAArgBase = 48;
5331     else
5332       VAArgBase = 32;
5333     unsigned VAArgOffset = VAArgBase;
5334     const DataLayout &DL = F.getParent()->getDataLayout();
5335     for (const auto &[ArgNo, A] : llvm::enumerate(CB.args())) {
5336       bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
5337       bool IsByVal = CB.paramHasAttr(ArgNo, Attribute::ByVal);
5338       if (IsByVal) {
5339         assert(A->getType()->isPointerTy());
5340         Type *RealTy = CB.getParamByValType(ArgNo);
5341         uint64_t ArgSize = DL.getTypeAllocSize(RealTy);
5342         Align ArgAlign = CB.getParamAlign(ArgNo).value_or(Align(8));
5343         if (ArgAlign < 8)
5344           ArgAlign = Align(8);
5345         VAArgOffset = alignTo(VAArgOffset, ArgAlign);
5346         if (!IsFixed) {
5347           Value *Base = getShadowPtrForVAArgument(
5348               RealTy, IRB, VAArgOffset - VAArgBase, ArgSize);
5349           if (Base) {
5350             Value *AShadowPtr, *AOriginPtr;
5351             std::tie(AShadowPtr, AOriginPtr) =
5352                 MSV.getShadowOriginPtr(A, IRB, IRB.getInt8Ty(),
5353                                        kShadowTLSAlignment, /*isStore*/ false);
5354 
5355             IRB.CreateMemCpy(Base, kShadowTLSAlignment, AShadowPtr,
5356                              kShadowTLSAlignment, ArgSize);
5357           }
5358         }
5359         VAArgOffset += alignTo(ArgSize, Align(8));
5360       } else {
5361         Value *Base;
5362         uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
5363         Align ArgAlign = Align(8);
5364         if (A->getType()->isArrayTy()) {
5365           // Arrays are aligned to element size, except for long double
5366           // arrays, which are aligned to 8 bytes.
5367           Type *ElementTy = A->getType()->getArrayElementType();
5368           if (!ElementTy->isPPC_FP128Ty())
5369             ArgAlign = Align(DL.getTypeAllocSize(ElementTy));
5370         } else if (A->getType()->isVectorTy()) {
5371           // Vectors are naturally aligned.
5372           ArgAlign = Align(ArgSize);
5373         }
5374         if (ArgAlign < 8)
5375           ArgAlign = Align(8);
5376         VAArgOffset = alignTo(VAArgOffset, ArgAlign);
5377         if (DL.isBigEndian()) {
5378           // Adjusting the shadow for argument with size < 8 to match the
5379           // placement of bits in big endian system
5380           if (ArgSize < 8)
5381             VAArgOffset += (8 - ArgSize);
5382         }
5383         if (!IsFixed) {
5384           Base = getShadowPtrForVAArgument(A->getType(), IRB,
5385                                            VAArgOffset - VAArgBase, ArgSize);
5386           if (Base)
5387             IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
5388         }
5389         VAArgOffset += ArgSize;
5390         VAArgOffset = alignTo(VAArgOffset, Align(8));
5391       }
5392       if (IsFixed)
5393         VAArgBase = VAArgOffset;
5394     }
5395 
5396     Constant *TotalVAArgSize =
5397         ConstantInt::get(IRB.getInt64Ty(), VAArgOffset - VAArgBase);
5398     // Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of
5399     // a new class member i.e. it is the total size of all VarArgs.
5400     IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
5401   }
5402 
finalizeInstrumentation__anon9debe40b0811::VarArgPowerPC64Helper5403   void finalizeInstrumentation() override {
5404     assert(!VAArgSize && !VAArgTLSCopy &&
5405            "finalizeInstrumentation called twice");
5406     IRBuilder<> IRB(MSV.FnPrologueEnd);
5407     VAArgSize = IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
5408     Value *CopySize =
5409         IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0), VAArgSize);
5410 
5411     if (!VAStartInstrumentationList.empty()) {
5412       // If there is a va_start in this function, make a backup copy of
5413       // va_arg_tls somewhere in the function entry block.
5414 
5415       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
5416       VAArgTLSCopy->setAlignment(kShadowTLSAlignment);
5417       IRB.CreateMemSet(VAArgTLSCopy, Constant::getNullValue(IRB.getInt8Ty()),
5418                        CopySize, kShadowTLSAlignment, false);
5419 
5420       Value *SrcSize = IRB.CreateBinaryIntrinsic(
5421           Intrinsic::umin, CopySize,
5422           ConstantInt::get(MS.IntptrTy, kParamTLSSize));
5423       IRB.CreateMemCpy(VAArgTLSCopy, kShadowTLSAlignment, MS.VAArgTLS,
5424                        kShadowTLSAlignment, SrcSize);
5425     }
5426 
5427     // Instrument va_start.
5428     // Copy va_list shadow from the backup copy of the TLS contents.
5429     for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
5430       CallInst *OrigInst = VAStartInstrumentationList[i];
5431       NextNodeIRBuilder IRB(OrigInst);
5432       Value *VAListTag = OrigInst->getArgOperand(0);
5433       Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C); // i64*
5434       Value *RegSaveAreaPtrPtr =
5435           IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
5436                              PointerType::get(RegSaveAreaPtrTy, 0));
5437       Value *RegSaveAreaPtr =
5438           IRB.CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
5439       Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
5440       const Align Alignment = Align(8);
5441       std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
5442           MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(),
5443                                  Alignment, /*isStore*/ true);
5444       IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
5445                        CopySize);
5446     }
5447   }
5448 };
5449 
5450 /// SystemZ-specific implementation of VarArgHelper.
5451 struct VarArgSystemZHelper : public VarArgHelperBase {
5452   static const unsigned SystemZGpOffset = 16;
5453   static const unsigned SystemZGpEndOffset = 56;
5454   static const unsigned SystemZFpOffset = 128;
5455   static const unsigned SystemZFpEndOffset = 160;
5456   static const unsigned SystemZMaxVrArgs = 8;
5457   static const unsigned SystemZRegSaveAreaSize = 160;
5458   static const unsigned SystemZOverflowOffset = 160;
5459   static const unsigned SystemZVAListTagSize = 32;
5460   static const unsigned SystemZOverflowArgAreaPtrOffset = 16;
5461   static const unsigned SystemZRegSaveAreaPtrOffset = 24;
5462 
5463   bool IsSoftFloatABI;
5464   AllocaInst *VAArgTLSCopy = nullptr;
5465   AllocaInst *VAArgTLSOriginCopy = nullptr;
5466   Value *VAArgOverflowSize = nullptr;
5467 
5468   enum class ArgKind {
5469     GeneralPurpose,
5470     FloatingPoint,
5471     Vector,
5472     Memory,
5473     Indirect,
5474   };
5475 
5476   enum class ShadowExtension { None, Zero, Sign };
5477 
VarArgSystemZHelper__anon9debe40b0811::VarArgSystemZHelper5478   VarArgSystemZHelper(Function &F, MemorySanitizer &MS,
5479                       MemorySanitizerVisitor &MSV)
5480       : VarArgHelperBase(F, MS, MSV, SystemZVAListTagSize),
5481         IsSoftFloatABI(F.getFnAttribute("use-soft-float").getValueAsBool()) {}
5482 
classifyArgument__anon9debe40b0811::VarArgSystemZHelper5483   ArgKind classifyArgument(Type *T) {
5484     // T is a SystemZABIInfo::classifyArgumentType() output, and there are
5485     // only a few possibilities of what it can be. In particular, enums, single
5486     // element structs and large types have already been taken care of.
5487 
5488     // Some i128 and fp128 arguments are converted to pointers only in the
5489     // back end.
5490     if (T->isIntegerTy(128) || T->isFP128Ty())
5491       return ArgKind::Indirect;
5492     if (T->isFloatingPointTy())
5493       return IsSoftFloatABI ? ArgKind::GeneralPurpose : ArgKind::FloatingPoint;
5494     if (T->isIntegerTy() || T->isPointerTy())
5495       return ArgKind::GeneralPurpose;
5496     if (T->isVectorTy())
5497       return ArgKind::Vector;
5498     return ArgKind::Memory;
5499   }
5500 
getShadowExtension__anon9debe40b0811::VarArgSystemZHelper5501   ShadowExtension getShadowExtension(const CallBase &CB, unsigned ArgNo) {
5502     // ABI says: "One of the simple integer types no more than 64 bits wide.
5503     // ... If such an argument is shorter than 64 bits, replace it by a full
5504     // 64-bit integer representing the same number, using sign or zero
5505     // extension". Shadow for an integer argument has the same type as the
5506     // argument itself, so it can be sign or zero extended as well.
5507     bool ZExt = CB.paramHasAttr(ArgNo, Attribute::ZExt);
5508     bool SExt = CB.paramHasAttr(ArgNo, Attribute::SExt);
5509     if (ZExt) {
5510       assert(!SExt);
5511       return ShadowExtension::Zero;
5512     }
5513     if (SExt) {
5514       assert(!ZExt);
5515       return ShadowExtension::Sign;
5516     }
5517     return ShadowExtension::None;
5518   }
5519 
visitCallBase__anon9debe40b0811::VarArgSystemZHelper5520   void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
5521     unsigned GpOffset = SystemZGpOffset;
5522     unsigned FpOffset = SystemZFpOffset;
5523     unsigned VrIndex = 0;
5524     unsigned OverflowOffset = SystemZOverflowOffset;
5525     const DataLayout &DL = F.getParent()->getDataLayout();
5526     for (const auto &[ArgNo, A] : llvm::enumerate(CB.args())) {
5527       bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
5528       // SystemZABIInfo does not produce ByVal parameters.
5529       assert(!CB.paramHasAttr(ArgNo, Attribute::ByVal));
5530       Type *T = A->getType();
5531       ArgKind AK = classifyArgument(T);
5532       if (AK == ArgKind::Indirect) {
5533         T = PointerType::get(T, 0);
5534         AK = ArgKind::GeneralPurpose;
5535       }
5536       if (AK == ArgKind::GeneralPurpose && GpOffset >= SystemZGpEndOffset)
5537         AK = ArgKind::Memory;
5538       if (AK == ArgKind::FloatingPoint && FpOffset >= SystemZFpEndOffset)
5539         AK = ArgKind::Memory;
5540       if (AK == ArgKind::Vector && (VrIndex >= SystemZMaxVrArgs || !IsFixed))
5541         AK = ArgKind::Memory;
5542       Value *ShadowBase = nullptr;
5543       Value *OriginBase = nullptr;
5544       ShadowExtension SE = ShadowExtension::None;
5545       switch (AK) {
5546       case ArgKind::GeneralPurpose: {
5547         // Always keep track of GpOffset, but store shadow only for varargs.
5548         uint64_t ArgSize = 8;
5549         if (GpOffset + ArgSize <= kParamTLSSize) {
5550           if (!IsFixed) {
5551             SE = getShadowExtension(CB, ArgNo);
5552             uint64_t GapSize = 0;
5553             if (SE == ShadowExtension::None) {
5554               uint64_t ArgAllocSize = DL.getTypeAllocSize(T);
5555               assert(ArgAllocSize <= ArgSize);
5556               GapSize = ArgSize - ArgAllocSize;
5557             }
5558             ShadowBase = getShadowAddrForVAArgument(IRB, GpOffset + GapSize);
5559             if (MS.TrackOrigins)
5560               OriginBase = getOriginPtrForVAArgument(IRB, GpOffset + GapSize);
5561           }
5562           GpOffset += ArgSize;
5563         } else {
5564           GpOffset = kParamTLSSize;
5565         }
5566         break;
5567       }
5568       case ArgKind::FloatingPoint: {
5569         // Always keep track of FpOffset, but store shadow only for varargs.
5570         uint64_t ArgSize = 8;
5571         if (FpOffset + ArgSize <= kParamTLSSize) {
5572           if (!IsFixed) {
5573             // PoP says: "A short floating-point datum requires only the
5574             // left-most 32 bit positions of a floating-point register".
5575             // Therefore, in contrast to AK_GeneralPurpose and AK_Memory,
5576             // don't extend shadow and don't mind the gap.
5577             ShadowBase = getShadowAddrForVAArgument(IRB, FpOffset);
5578             if (MS.TrackOrigins)
5579               OriginBase = getOriginPtrForVAArgument(IRB, FpOffset);
5580           }
5581           FpOffset += ArgSize;
5582         } else {
5583           FpOffset = kParamTLSSize;
5584         }
5585         break;
5586       }
5587       case ArgKind::Vector: {
5588         // Keep track of VrIndex. No need to store shadow, since vector varargs
5589         // go through AK_Memory.
5590         assert(IsFixed);
5591         VrIndex++;
5592         break;
5593       }
5594       case ArgKind::Memory: {
5595         // Keep track of OverflowOffset and store shadow only for varargs.
5596         // Ignore fixed args, since we need to copy only the vararg portion of
5597         // the overflow area shadow.
5598         if (!IsFixed) {
5599           uint64_t ArgAllocSize = DL.getTypeAllocSize(T);
5600           uint64_t ArgSize = alignTo(ArgAllocSize, 8);
5601           if (OverflowOffset + ArgSize <= kParamTLSSize) {
5602             SE = getShadowExtension(CB, ArgNo);
5603             uint64_t GapSize =
5604                 SE == ShadowExtension::None ? ArgSize - ArgAllocSize : 0;
5605             ShadowBase =
5606                 getShadowAddrForVAArgument(IRB, OverflowOffset + GapSize);
5607             if (MS.TrackOrigins)
5608               OriginBase =
5609                   getOriginPtrForVAArgument(IRB, OverflowOffset + GapSize);
5610             OverflowOffset += ArgSize;
5611           } else {
5612             OverflowOffset = kParamTLSSize;
5613           }
5614         }
5615         break;
5616       }
5617       case ArgKind::Indirect:
5618         llvm_unreachable("Indirect must be converted to GeneralPurpose");
5619       }
5620       if (ShadowBase == nullptr)
5621         continue;
5622       Value *Shadow = MSV.getShadow(A);
5623       if (SE != ShadowExtension::None)
5624         Shadow = MSV.CreateShadowCast(IRB, Shadow, IRB.getInt64Ty(),
5625                                       /*Signed*/ SE == ShadowExtension::Sign);
5626       ShadowBase = IRB.CreateIntToPtr(
5627           ShadowBase, PointerType::get(Shadow->getType(), 0), "_msarg_va_s");
5628       IRB.CreateStore(Shadow, ShadowBase);
5629       if (MS.TrackOrigins) {
5630         Value *Origin = MSV.getOrigin(A);
5631         TypeSize StoreSize = DL.getTypeStoreSize(Shadow->getType());
5632         MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
5633                         kMinOriginAlignment);
5634       }
5635     }
5636     Constant *OverflowSize = ConstantInt::get(
5637         IRB.getInt64Ty(), OverflowOffset - SystemZOverflowOffset);
5638     IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
5639   }
5640 
copyRegSaveArea__anon9debe40b0811::VarArgSystemZHelper5641   void copyRegSaveArea(IRBuilder<> &IRB, Value *VAListTag) {
5642     Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C); // i64*
5643     Value *RegSaveAreaPtrPtr = IRB.CreateIntToPtr(
5644         IRB.CreateAdd(
5645             IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
5646             ConstantInt::get(MS.IntptrTy, SystemZRegSaveAreaPtrOffset)),
5647         PointerType::get(RegSaveAreaPtrTy, 0));
5648     Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
5649     Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
5650     const Align Alignment = Align(8);
5651     std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
5652         MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(), Alignment,
5653                                /*isStore*/ true);
5654     // TODO(iii): copy only fragments filled by visitCallBase()
5655     // TODO(iii): support packed-stack && !use-soft-float
5656     // For use-soft-float functions, it is enough to copy just the GPRs.
5657     unsigned RegSaveAreaSize =
5658         IsSoftFloatABI ? SystemZGpEndOffset : SystemZRegSaveAreaSize;
5659     IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
5660                      RegSaveAreaSize);
5661     if (MS.TrackOrigins)
5662       IRB.CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
5663                        Alignment, RegSaveAreaSize);
5664   }
5665 
5666   // FIXME: This implementation limits OverflowOffset to kParamTLSSize, so we
5667   // don't know real overflow size and can't clear shadow beyond kParamTLSSize.
copyOverflowArea__anon9debe40b0811::VarArgSystemZHelper5668   void copyOverflowArea(IRBuilder<> &IRB, Value *VAListTag) {
5669     Type *OverflowArgAreaPtrTy = PointerType::getUnqual(*MS.C); // i64*
5670     Value *OverflowArgAreaPtrPtr = IRB.CreateIntToPtr(
5671         IRB.CreateAdd(
5672             IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
5673             ConstantInt::get(MS.IntptrTy, SystemZOverflowArgAreaPtrOffset)),
5674         PointerType::get(OverflowArgAreaPtrTy, 0));
5675     Value *OverflowArgAreaPtr =
5676         IRB.CreateLoad(OverflowArgAreaPtrTy, OverflowArgAreaPtrPtr);
5677     Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
5678     const Align Alignment = Align(8);
5679     std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
5680         MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.getInt8Ty(),
5681                                Alignment, /*isStore*/ true);
5682     Value *SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSCopy,
5683                                            SystemZOverflowOffset);
5684     IRB.CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
5685                      VAArgOverflowSize);
5686     if (MS.TrackOrigins) {
5687       SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSOriginCopy,
5688                                       SystemZOverflowOffset);
5689       IRB.CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
5690                        VAArgOverflowSize);
5691     }
5692   }
5693 
finalizeInstrumentation__anon9debe40b0811::VarArgSystemZHelper5694   void finalizeInstrumentation() override {
5695     assert(!VAArgOverflowSize && !VAArgTLSCopy &&
5696            "finalizeInstrumentation called twice");
5697     if (!VAStartInstrumentationList.empty()) {
5698       // If there is a va_start in this function, make a backup copy of
5699       // va_arg_tls somewhere in the function entry block.
5700       IRBuilder<> IRB(MSV.FnPrologueEnd);
5701       VAArgOverflowSize =
5702           IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
5703       Value *CopySize =
5704           IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, SystemZOverflowOffset),
5705                         VAArgOverflowSize);
5706       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
5707       VAArgTLSCopy->setAlignment(kShadowTLSAlignment);
5708       IRB.CreateMemSet(VAArgTLSCopy, Constant::getNullValue(IRB.getInt8Ty()),
5709                        CopySize, kShadowTLSAlignment, false);
5710 
5711       Value *SrcSize = IRB.CreateBinaryIntrinsic(
5712           Intrinsic::umin, CopySize,
5713           ConstantInt::get(MS.IntptrTy, kParamTLSSize));
5714       IRB.CreateMemCpy(VAArgTLSCopy, kShadowTLSAlignment, MS.VAArgTLS,
5715                        kShadowTLSAlignment, SrcSize);
5716       if (MS.TrackOrigins) {
5717         VAArgTLSOriginCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
5718         VAArgTLSOriginCopy->setAlignment(kShadowTLSAlignment);
5719         IRB.CreateMemCpy(VAArgTLSOriginCopy, kShadowTLSAlignment,
5720                          MS.VAArgOriginTLS, kShadowTLSAlignment, SrcSize);
5721       }
5722     }
5723 
5724     // Instrument va_start.
5725     // Copy va_list shadow from the backup copy of the TLS contents.
5726     for (size_t VaStartNo = 0, VaStartNum = VAStartInstrumentationList.size();
5727          VaStartNo < VaStartNum; VaStartNo++) {
5728       CallInst *OrigInst = VAStartInstrumentationList[VaStartNo];
5729       NextNodeIRBuilder IRB(OrigInst);
5730       Value *VAListTag = OrigInst->getArgOperand(0);
5731       copyRegSaveArea(IRB, VAListTag);
5732       copyOverflowArea(IRB, VAListTag);
5733     }
5734   }
5735 };
5736 
5737 // Loongarch64 is not a MIPS, but the current vargs calling convention matches
5738 // the MIPS.
5739 using VarArgLoongArch64Helper = VarArgMIPS64Helper;
5740 
5741 /// A no-op implementation of VarArgHelper.
5742 struct VarArgNoOpHelper : public VarArgHelper {
VarArgNoOpHelper__anon9debe40b0811::VarArgNoOpHelper5743   VarArgNoOpHelper(Function &F, MemorySanitizer &MS,
5744                    MemorySanitizerVisitor &MSV) {}
5745 
visitCallBase__anon9debe40b0811::VarArgNoOpHelper5746   void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {}
5747 
visitVAStartInst__anon9debe40b0811::VarArgNoOpHelper5748   void visitVAStartInst(VAStartInst &I) override {}
5749 
visitVACopyInst__anon9debe40b0811::VarArgNoOpHelper5750   void visitVACopyInst(VACopyInst &I) override {}
5751 
finalizeInstrumentation__anon9debe40b0811::VarArgNoOpHelper5752   void finalizeInstrumentation() override {}
5753 };
5754 
5755 } // end anonymous namespace
5756 
CreateVarArgHelper(Function & Func,MemorySanitizer & Msan,MemorySanitizerVisitor & Visitor)5757 static VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
5758                                         MemorySanitizerVisitor &Visitor) {
5759   // VarArg handling is only implemented on AMD64. False positives are possible
5760   // on other platforms.
5761   Triple TargetTriple(Func.getParent()->getTargetTriple());
5762   if (TargetTriple.getArch() == Triple::x86_64)
5763     return new VarArgAMD64Helper(Func, Msan, Visitor);
5764   else if (TargetTriple.isMIPS64())
5765     return new VarArgMIPS64Helper(Func, Msan, Visitor);
5766   else if (TargetTriple.getArch() == Triple::aarch64)
5767     return new VarArgAArch64Helper(Func, Msan, Visitor);
5768   else if (TargetTriple.getArch() == Triple::ppc64 ||
5769            TargetTriple.getArch() == Triple::ppc64le)
5770     return new VarArgPowerPC64Helper(Func, Msan, Visitor);
5771   else if (TargetTriple.getArch() == Triple::systemz)
5772     return new VarArgSystemZHelper(Func, Msan, Visitor);
5773   else if (TargetTriple.isLoongArch64())
5774     return new VarArgLoongArch64Helper(Func, Msan, Visitor);
5775   else
5776     return new VarArgNoOpHelper(Func, Msan, Visitor);
5777 }
5778 
sanitizeFunction(Function & F,TargetLibraryInfo & TLI)5779 bool MemorySanitizer::sanitizeFunction(Function &F, TargetLibraryInfo &TLI) {
5780   if (!CompileKernel && F.getName() == kMsanModuleCtorName)
5781     return false;
5782 
5783   if (F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
5784     return false;
5785 
5786   MemorySanitizerVisitor Visitor(F, *this, TLI);
5787 
5788   // Clear out memory attributes.
5789   AttributeMask B;
5790   B.addAttribute(Attribute::Memory).addAttribute(Attribute::Speculatable);
5791   F.removeFnAttrs(B);
5792 
5793   return Visitor.runOnFunction();
5794 }
5795