1 //===- LowerTypeTests.cpp - type metadata lowering pass -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass lowers type metadata and calls to the llvm.type.test intrinsic.
10 // It also ensures that globals are properly laid out for the
11 // llvm.icall.branch.funnel intrinsic.
12 // See http://llvm.org/docs/TypeMetadata.html for more information.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "llvm/Transforms/IPO/LowerTypeTests.h"
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/DenseMap.h"
20 #include "llvm/ADT/EquivalenceClasses.h"
21 #include "llvm/ADT/PointerUnion.h"
22 #include "llvm/ADT/SetVector.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/ADT/StringRef.h"
26 #include "llvm/ADT/TinyPtrVector.h"
27 #include "llvm/ADT/Triple.h"
28 #include "llvm/Analysis/TypeMetadataUtils.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/BasicBlock.h"
32 #include "llvm/IR/Constant.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/DerivedTypes.h"
36 #include "llvm/IR/Function.h"
37 #include "llvm/IR/GlobalAlias.h"
38 #include "llvm/IR/GlobalObject.h"
39 #include "llvm/IR/GlobalValue.h"
40 #include "llvm/IR/GlobalVariable.h"
41 #include "llvm/IR/IRBuilder.h"
42 #include "llvm/IR/InlineAsm.h"
43 #include "llvm/IR/Instruction.h"
44 #include "llvm/IR/Instructions.h"
45 #include "llvm/IR/IntrinsicInst.h"
46 #include "llvm/IR/Intrinsics.h"
47 #include "llvm/IR/LLVMContext.h"
48 #include "llvm/IR/Metadata.h"
49 #include "llvm/IR/Module.h"
50 #include "llvm/IR/ModuleSummaryIndex.h"
51 #include "llvm/IR/ModuleSummaryIndexYAML.h"
52 #include "llvm/IR/Operator.h"
53 #include "llvm/IR/PassManager.h"
54 #include "llvm/IR/Type.h"
55 #include "llvm/IR/Use.h"
56 #include "llvm/IR/User.h"
57 #include "llvm/IR/Value.h"
58 #include "llvm/InitializePasses.h"
59 #include "llvm/Pass.h"
60 #include "llvm/Support/Allocator.h"
61 #include "llvm/Support/Casting.h"
62 #include "llvm/Support/CommandLine.h"
63 #include "llvm/Support/Debug.h"
64 #include "llvm/Support/Error.h"
65 #include "llvm/Support/ErrorHandling.h"
66 #include "llvm/Support/FileSystem.h"
67 #include "llvm/Support/MathExtras.h"
68 #include "llvm/Support/MemoryBuffer.h"
69 #include "llvm/Support/TrailingObjects.h"
70 #include "llvm/Support/YAMLTraits.h"
71 #include "llvm/Support/raw_ostream.h"
72 #include "llvm/Transforms/IPO.h"
73 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
74 #include "llvm/Transforms/Utils/ModuleUtils.h"
75 #include <algorithm>
76 #include <cassert>
77 #include <cstdint>
78 #include <memory>
79 #include <set>
80 #include <string>
81 #include <system_error>
82 #include <utility>
83 #include <vector>
84 
85 using namespace llvm;
86 using namespace lowertypetests;
87 
88 #define DEBUG_TYPE "lowertypetests"
89 
90 STATISTIC(ByteArraySizeBits, "Byte array size in bits");
91 STATISTIC(ByteArraySizeBytes, "Byte array size in bytes");
92 STATISTIC(NumByteArraysCreated, "Number of byte arrays created");
93 STATISTIC(NumTypeTestCallsLowered, "Number of type test calls lowered");
94 STATISTIC(NumTypeIdDisjointSets, "Number of disjoint sets of type identifiers");
95 
96 static cl::opt<bool> AvoidReuse(
97     "lowertypetests-avoid-reuse",
98     cl::desc("Try to avoid reuse of byte array addresses using aliases"),
99     cl::Hidden, cl::init(true));
100 
101 static cl::opt<PassSummaryAction> ClSummaryAction(
102     "lowertypetests-summary-action",
103     cl::desc("What to do with the summary when running this pass"),
104     cl::values(clEnumValN(PassSummaryAction::None, "none", "Do nothing"),
105                clEnumValN(PassSummaryAction::Import, "import",
106                           "Import typeid resolutions from summary and globals"),
107                clEnumValN(PassSummaryAction::Export, "export",
108                           "Export typeid resolutions to summary and globals")),
109     cl::Hidden);
110 
111 static cl::opt<std::string> ClReadSummary(
112     "lowertypetests-read-summary",
113     cl::desc("Read summary from given YAML file before running pass"),
114     cl::Hidden);
115 
116 static cl::opt<std::string> ClWriteSummary(
117     "lowertypetests-write-summary",
118     cl::desc("Write summary to given YAML file after running pass"),
119     cl::Hidden);
120 
121 static cl::opt<bool>
122     ClDropTypeTests("lowertypetests-drop-type-tests",
123                     cl::desc("Simply drop type test assume sequences"),
124                     cl::Hidden, cl::init(false));
125 
126 bool BitSetInfo::containsGlobalOffset(uint64_t Offset) const {
127   if (Offset < ByteOffset)
128     return false;
129 
130   if ((Offset - ByteOffset) % (uint64_t(1) << AlignLog2) != 0)
131     return false;
132 
133   uint64_t BitOffset = (Offset - ByteOffset) >> AlignLog2;
134   if (BitOffset >= BitSize)
135     return false;
136 
137   return Bits.count(BitOffset);
138 }
139 
140 void BitSetInfo::print(raw_ostream &OS) const {
141   OS << "offset " << ByteOffset << " size " << BitSize << " align "
142      << (1 << AlignLog2);
143 
144   if (isAllOnes()) {
145     OS << " all-ones\n";
146     return;
147   }
148 
149   OS << " { ";
150   for (uint64_t B : Bits)
151     OS << B << ' ';
152   OS << "}\n";
153 }
154 
155 BitSetInfo BitSetBuilder::build() {
156   if (Min > Max)
157     Min = 0;
158 
159   // Normalize each offset against the minimum observed offset, and compute
160   // the bitwise OR of each of the offsets. The number of trailing zeros
161   // in the mask gives us the log2 of the alignment of all offsets, which
162   // allows us to compress the bitset by only storing one bit per aligned
163   // address.
164   uint64_t Mask = 0;
165   for (uint64_t &Offset : Offsets) {
166     Offset -= Min;
167     Mask |= Offset;
168   }
169 
170   BitSetInfo BSI;
171   BSI.ByteOffset = Min;
172 
173   BSI.AlignLog2 = 0;
174   if (Mask != 0)
175     BSI.AlignLog2 = countTrailingZeros(Mask);
176 
177   // Build the compressed bitset while normalizing the offsets against the
178   // computed alignment.
179   BSI.BitSize = ((Max - Min) >> BSI.AlignLog2) + 1;
180   for (uint64_t Offset : Offsets) {
181     Offset >>= BSI.AlignLog2;
182     BSI.Bits.insert(Offset);
183   }
184 
185   return BSI;
186 }
187 
188 void GlobalLayoutBuilder::addFragment(const std::set<uint64_t> &F) {
189   // Create a new fragment to hold the layout for F.
190   Fragments.emplace_back();
191   std::vector<uint64_t> &Fragment = Fragments.back();
192   uint64_t FragmentIndex = Fragments.size() - 1;
193 
194   for (auto ObjIndex : F) {
195     uint64_t OldFragmentIndex = FragmentMap[ObjIndex];
196     if (OldFragmentIndex == 0) {
197       // We haven't seen this object index before, so just add it to the current
198       // fragment.
199       Fragment.push_back(ObjIndex);
200     } else {
201       // This index belongs to an existing fragment. Copy the elements of the
202       // old fragment into this one and clear the old fragment. We don't update
203       // the fragment map just yet, this ensures that any further references to
204       // indices from the old fragment in this fragment do not insert any more
205       // indices.
206       std::vector<uint64_t> &OldFragment = Fragments[OldFragmentIndex];
207       llvm::append_range(Fragment, OldFragment);
208       OldFragment.clear();
209     }
210   }
211 
212   // Update the fragment map to point our object indices to this fragment.
213   for (uint64_t ObjIndex : Fragment)
214     FragmentMap[ObjIndex] = FragmentIndex;
215 }
216 
217 void ByteArrayBuilder::allocate(const std::set<uint64_t> &Bits,
218                                 uint64_t BitSize, uint64_t &AllocByteOffset,
219                                 uint8_t &AllocMask) {
220   // Find the smallest current allocation.
221   unsigned Bit = 0;
222   for (unsigned I = 1; I != BitsPerByte; ++I)
223     if (BitAllocs[I] < BitAllocs[Bit])
224       Bit = I;
225 
226   AllocByteOffset = BitAllocs[Bit];
227 
228   // Add our size to it.
229   unsigned ReqSize = AllocByteOffset + BitSize;
230   BitAllocs[Bit] = ReqSize;
231   if (Bytes.size() < ReqSize)
232     Bytes.resize(ReqSize);
233 
234   // Set our bits.
235   AllocMask = 1 << Bit;
236   for (uint64_t B : Bits)
237     Bytes[AllocByteOffset + B] |= AllocMask;
238 }
239 
240 bool lowertypetests::isJumpTableCanonical(Function *F) {
241   if (F->isDeclarationForLinker())
242     return false;
243   auto *CI = mdconst::extract_or_null<ConstantInt>(
244       F->getParent()->getModuleFlag("CFI Canonical Jump Tables"));
245   if (!CI || CI->getZExtValue() != 0)
246     return true;
247   return F->hasFnAttribute("cfi-canonical-jump-table");
248 }
249 
250 namespace {
251 
252 struct ByteArrayInfo {
253   std::set<uint64_t> Bits;
254   uint64_t BitSize;
255   GlobalVariable *ByteArray;
256   GlobalVariable *MaskGlobal;
257   uint8_t *MaskPtr = nullptr;
258 };
259 
260 /// A POD-like structure that we use to store a global reference together with
261 /// its metadata types. In this pass we frequently need to query the set of
262 /// metadata types referenced by a global, which at the IR level is an expensive
263 /// operation involving a map lookup; this data structure helps to reduce the
264 /// number of times we need to do this lookup.
265 class GlobalTypeMember final : TrailingObjects<GlobalTypeMember, MDNode *> {
266   friend TrailingObjects;
267 
268   GlobalObject *GO;
269   size_t NTypes;
270 
271   // For functions: true if the jump table is canonical. This essentially means
272   // whether the canonical address (i.e. the symbol table entry) of the function
273   // is provided by the local jump table. This is normally the same as whether
274   // the function is defined locally, but if canonical jump tables are disabled
275   // by the user then the jump table never provides a canonical definition.
276   bool IsJumpTableCanonical;
277 
278   // For functions: true if this function is either defined or used in a thinlto
279   // module and its jumptable entry needs to be exported to thinlto backends.
280   bool IsExported;
281 
282   size_t numTrailingObjects(OverloadToken<MDNode *>) const { return NTypes; }
283 
284 public:
285   static GlobalTypeMember *create(BumpPtrAllocator &Alloc, GlobalObject *GO,
286                                   bool IsJumpTableCanonical, bool IsExported,
287                                   ArrayRef<MDNode *> Types) {
288     auto *GTM = static_cast<GlobalTypeMember *>(Alloc.Allocate(
289         totalSizeToAlloc<MDNode *>(Types.size()), alignof(GlobalTypeMember)));
290     GTM->GO = GO;
291     GTM->NTypes = Types.size();
292     GTM->IsJumpTableCanonical = IsJumpTableCanonical;
293     GTM->IsExported = IsExported;
294     std::uninitialized_copy(Types.begin(), Types.end(),
295                             GTM->getTrailingObjects<MDNode *>());
296     return GTM;
297   }
298 
299   GlobalObject *getGlobal() const {
300     return GO;
301   }
302 
303   bool isJumpTableCanonical() const {
304     return IsJumpTableCanonical;
305   }
306 
307   bool isExported() const {
308     return IsExported;
309   }
310 
311   ArrayRef<MDNode *> types() const {
312     return ArrayRef(getTrailingObjects<MDNode *>(), NTypes);
313   }
314 };
315 
316 struct ICallBranchFunnel final
317     : TrailingObjects<ICallBranchFunnel, GlobalTypeMember *> {
318   static ICallBranchFunnel *create(BumpPtrAllocator &Alloc, CallInst *CI,
319                                    ArrayRef<GlobalTypeMember *> Targets,
320                                    unsigned UniqueId) {
321     auto *Call = static_cast<ICallBranchFunnel *>(
322         Alloc.Allocate(totalSizeToAlloc<GlobalTypeMember *>(Targets.size()),
323                        alignof(ICallBranchFunnel)));
324     Call->CI = CI;
325     Call->UniqueId = UniqueId;
326     Call->NTargets = Targets.size();
327     std::uninitialized_copy(Targets.begin(), Targets.end(),
328                             Call->getTrailingObjects<GlobalTypeMember *>());
329     return Call;
330   }
331 
332   CallInst *CI;
333   ArrayRef<GlobalTypeMember *> targets() const {
334     return ArrayRef(getTrailingObjects<GlobalTypeMember *>(), NTargets);
335   }
336 
337   unsigned UniqueId;
338 
339 private:
340   size_t NTargets;
341 };
342 
343 struct ScopedSaveAliaseesAndUsed {
344   Module &M;
345   SmallVector<GlobalValue *, 4> Used, CompilerUsed;
346   std::vector<std::pair<GlobalAlias *, Function *>> FunctionAliases;
347   std::vector<std::pair<GlobalIFunc *, Function *>> ResolverIFuncs;
348 
349   ScopedSaveAliaseesAndUsed(Module &M) : M(M) {
350     // The users of this class want to replace all function references except
351     // for aliases and llvm.used/llvm.compiler.used with references to a jump
352     // table. We avoid replacing aliases in order to avoid introducing a double
353     // indirection (or an alias pointing to a declaration in ThinLTO mode), and
354     // we avoid replacing llvm.used/llvm.compiler.used because these global
355     // variables describe properties of the global, not the jump table (besides,
356     // offseted references to the jump table in llvm.used are invalid).
357     // Unfortunately, LLVM doesn't have a "RAUW except for these (possibly
358     // indirect) users", so what we do is save the list of globals referenced by
359     // llvm.used/llvm.compiler.used and aliases, erase the used lists, let RAUW
360     // replace the aliasees and then set them back to their original values at
361     // the end.
362     if (GlobalVariable *GV = collectUsedGlobalVariables(M, Used, false))
363       GV->eraseFromParent();
364     if (GlobalVariable *GV = collectUsedGlobalVariables(M, CompilerUsed, true))
365       GV->eraseFromParent();
366 
367     for (auto &GA : M.aliases()) {
368       // FIXME: This should look past all aliases not just interposable ones,
369       // see discussion on D65118.
370       if (auto *F = dyn_cast<Function>(GA.getAliasee()->stripPointerCasts()))
371         FunctionAliases.push_back({&GA, F});
372     }
373 
374     for (auto &GI : M.ifuncs())
375       if (auto *F = dyn_cast<Function>(GI.getResolver()->stripPointerCasts()))
376         ResolverIFuncs.push_back({&GI, F});
377   }
378 
379   ~ScopedSaveAliaseesAndUsed() {
380     appendToUsed(M, Used);
381     appendToCompilerUsed(M, CompilerUsed);
382 
383     for (auto P : FunctionAliases)
384       P.first->setAliasee(
385           ConstantExpr::getBitCast(P.second, P.first->getType()));
386 
387     for (auto P : ResolverIFuncs) {
388       // This does not preserve pointer casts that may have been stripped by the
389       // constructor, but the resolver's type is different from that of the
390       // ifunc anyway.
391       P.first->setResolver(P.second);
392     }
393   }
394 };
395 
396 class LowerTypeTestsModule {
397   Module &M;
398 
399   ModuleSummaryIndex *ExportSummary;
400   const ModuleSummaryIndex *ImportSummary;
401   // Set when the client has invoked this to simply drop all type test assume
402   // sequences.
403   bool DropTypeTests;
404 
405   Triple::ArchType Arch;
406   Triple::OSType OS;
407   Triple::ObjectFormatType ObjectFormat;
408 
409   IntegerType *Int1Ty = Type::getInt1Ty(M.getContext());
410   IntegerType *Int8Ty = Type::getInt8Ty(M.getContext());
411   PointerType *Int8PtrTy = Type::getInt8PtrTy(M.getContext());
412   ArrayType *Int8Arr0Ty = ArrayType::get(Type::getInt8Ty(M.getContext()), 0);
413   IntegerType *Int32Ty = Type::getInt32Ty(M.getContext());
414   PointerType *Int32PtrTy = PointerType::getUnqual(Int32Ty);
415   IntegerType *Int64Ty = Type::getInt64Ty(M.getContext());
416   IntegerType *IntPtrTy = M.getDataLayout().getIntPtrType(M.getContext(), 0);
417 
418   // Indirect function call index assignment counter for WebAssembly
419   uint64_t IndirectIndex = 1;
420 
421   // Mapping from type identifiers to the call sites that test them, as well as
422   // whether the type identifier needs to be exported to ThinLTO backends as
423   // part of the regular LTO phase of the ThinLTO pipeline (see exportTypeId).
424   struct TypeIdUserInfo {
425     std::vector<CallInst *> CallSites;
426     bool IsExported = false;
427   };
428   DenseMap<Metadata *, TypeIdUserInfo> TypeIdUsers;
429 
430   /// This structure describes how to lower type tests for a particular type
431   /// identifier. It is either built directly from the global analysis (during
432   /// regular LTO or the regular LTO phase of ThinLTO), or indirectly using type
433   /// identifier summaries and external symbol references (in ThinLTO backends).
434   struct TypeIdLowering {
435     TypeTestResolution::Kind TheKind = TypeTestResolution::Unsat;
436 
437     /// All except Unsat: the start address within the combined global.
438     Constant *OffsetedGlobal;
439 
440     /// ByteArray, Inline, AllOnes: log2 of the required global alignment
441     /// relative to the start address.
442     Constant *AlignLog2;
443 
444     /// ByteArray, Inline, AllOnes: one less than the size of the memory region
445     /// covering members of this type identifier as a multiple of 2^AlignLog2.
446     Constant *SizeM1;
447 
448     /// ByteArray: the byte array to test the address against.
449     Constant *TheByteArray;
450 
451     /// ByteArray: the bit mask to apply to bytes loaded from the byte array.
452     Constant *BitMask;
453 
454     /// Inline: the bit mask to test the address against.
455     Constant *InlineBits;
456   };
457 
458   std::vector<ByteArrayInfo> ByteArrayInfos;
459 
460   Function *WeakInitializerFn = nullptr;
461 
462   bool shouldExportConstantsAsAbsoluteSymbols();
463   uint8_t *exportTypeId(StringRef TypeId, const TypeIdLowering &TIL);
464   TypeIdLowering importTypeId(StringRef TypeId);
465   void importTypeTest(CallInst *CI);
466   void importFunction(Function *F, bool isJumpTableCanonical,
467                       std::vector<GlobalAlias *> &AliasesToErase);
468 
469   BitSetInfo
470   buildBitSet(Metadata *TypeId,
471               const DenseMap<GlobalTypeMember *, uint64_t> &GlobalLayout);
472   ByteArrayInfo *createByteArray(BitSetInfo &BSI);
473   void allocateByteArrays();
474   Value *createBitSetTest(IRBuilder<> &B, const TypeIdLowering &TIL,
475                           Value *BitOffset);
476   void lowerTypeTestCalls(
477       ArrayRef<Metadata *> TypeIds, Constant *CombinedGlobalAddr,
478       const DenseMap<GlobalTypeMember *, uint64_t> &GlobalLayout);
479   Value *lowerTypeTestCall(Metadata *TypeId, CallInst *CI,
480                            const TypeIdLowering &TIL);
481 
482   void buildBitSetsFromGlobalVariables(ArrayRef<Metadata *> TypeIds,
483                                        ArrayRef<GlobalTypeMember *> Globals);
484   unsigned getJumpTableEntrySize();
485   Type *getJumpTableEntryType();
486   void createJumpTableEntry(raw_ostream &AsmOS, raw_ostream &ConstraintOS,
487                             Triple::ArchType JumpTableArch,
488                             SmallVectorImpl<Value *> &AsmArgs, Function *Dest);
489   void verifyTypeMDNode(GlobalObject *GO, MDNode *Type);
490   void buildBitSetsFromFunctions(ArrayRef<Metadata *> TypeIds,
491                                  ArrayRef<GlobalTypeMember *> Functions);
492   void buildBitSetsFromFunctionsNative(ArrayRef<Metadata *> TypeIds,
493                                        ArrayRef<GlobalTypeMember *> Functions);
494   void buildBitSetsFromFunctionsWASM(ArrayRef<Metadata *> TypeIds,
495                                      ArrayRef<GlobalTypeMember *> Functions);
496   void
497   buildBitSetsFromDisjointSet(ArrayRef<Metadata *> TypeIds,
498                               ArrayRef<GlobalTypeMember *> Globals,
499                               ArrayRef<ICallBranchFunnel *> ICallBranchFunnels);
500 
501   void replaceWeakDeclarationWithJumpTablePtr(Function *F, Constant *JT,
502                                               bool IsJumpTableCanonical);
503   void moveInitializerToModuleConstructor(GlobalVariable *GV);
504   void findGlobalVariableUsersOf(Constant *C,
505                                  SmallSetVector<GlobalVariable *, 8> &Out);
506 
507   void createJumpTable(Function *F, ArrayRef<GlobalTypeMember *> Functions);
508 
509   /// replaceCfiUses - Go through the uses list for this definition
510   /// and make each use point to "V" instead of "this" when the use is outside
511   /// the block. 'This's use list is expected to have at least one element.
512   /// Unlike replaceAllUsesWith this function skips blockaddr and direct call
513   /// uses.
514   void replaceCfiUses(Function *Old, Value *New, bool IsJumpTableCanonical);
515 
516   /// replaceDirectCalls - Go through the uses list for this definition and
517   /// replace each use, which is a direct function call.
518   void replaceDirectCalls(Value *Old, Value *New);
519 
520 public:
521   LowerTypeTestsModule(Module &M, ModuleSummaryIndex *ExportSummary,
522                        const ModuleSummaryIndex *ImportSummary,
523                        bool DropTypeTests);
524 
525   bool lower();
526 
527   // Lower the module using the action and summary passed as command line
528   // arguments. For testing purposes only.
529   static bool runForTesting(Module &M);
530 };
531 } // end anonymous namespace
532 
533 /// Build a bit set for TypeId using the object layouts in
534 /// GlobalLayout.
535 BitSetInfo LowerTypeTestsModule::buildBitSet(
536     Metadata *TypeId,
537     const DenseMap<GlobalTypeMember *, uint64_t> &GlobalLayout) {
538   BitSetBuilder BSB;
539 
540   // Compute the byte offset of each address associated with this type
541   // identifier.
542   for (const auto &GlobalAndOffset : GlobalLayout) {
543     for (MDNode *Type : GlobalAndOffset.first->types()) {
544       if (Type->getOperand(1) != TypeId)
545         continue;
546       uint64_t Offset =
547           cast<ConstantInt>(
548               cast<ConstantAsMetadata>(Type->getOperand(0))->getValue())
549               ->getZExtValue();
550       BSB.addOffset(GlobalAndOffset.second + Offset);
551     }
552   }
553 
554   return BSB.build();
555 }
556 
557 /// Build a test that bit BitOffset mod sizeof(Bits)*8 is set in
558 /// Bits. This pattern matches to the bt instruction on x86.
559 static Value *createMaskedBitTest(IRBuilder<> &B, Value *Bits,
560                                   Value *BitOffset) {
561   auto BitsType = cast<IntegerType>(Bits->getType());
562   unsigned BitWidth = BitsType->getBitWidth();
563 
564   BitOffset = B.CreateZExtOrTrunc(BitOffset, BitsType);
565   Value *BitIndex =
566       B.CreateAnd(BitOffset, ConstantInt::get(BitsType, BitWidth - 1));
567   Value *BitMask = B.CreateShl(ConstantInt::get(BitsType, 1), BitIndex);
568   Value *MaskedBits = B.CreateAnd(Bits, BitMask);
569   return B.CreateICmpNE(MaskedBits, ConstantInt::get(BitsType, 0));
570 }
571 
572 ByteArrayInfo *LowerTypeTestsModule::createByteArray(BitSetInfo &BSI) {
573   // Create globals to stand in for byte arrays and masks. These never actually
574   // get initialized, we RAUW and erase them later in allocateByteArrays() once
575   // we know the offset and mask to use.
576   auto ByteArrayGlobal = new GlobalVariable(
577       M, Int8Ty, /*isConstant=*/true, GlobalValue::PrivateLinkage, nullptr);
578   auto MaskGlobal = new GlobalVariable(M, Int8Ty, /*isConstant=*/true,
579                                        GlobalValue::PrivateLinkage, nullptr);
580 
581   ByteArrayInfos.emplace_back();
582   ByteArrayInfo *BAI = &ByteArrayInfos.back();
583 
584   BAI->Bits = BSI.Bits;
585   BAI->BitSize = BSI.BitSize;
586   BAI->ByteArray = ByteArrayGlobal;
587   BAI->MaskGlobal = MaskGlobal;
588   return BAI;
589 }
590 
591 void LowerTypeTestsModule::allocateByteArrays() {
592   llvm::stable_sort(ByteArrayInfos,
593                     [](const ByteArrayInfo &BAI1, const ByteArrayInfo &BAI2) {
594                       return BAI1.BitSize > BAI2.BitSize;
595                     });
596 
597   std::vector<uint64_t> ByteArrayOffsets(ByteArrayInfos.size());
598 
599   ByteArrayBuilder BAB;
600   for (unsigned I = 0; I != ByteArrayInfos.size(); ++I) {
601     ByteArrayInfo *BAI = &ByteArrayInfos[I];
602 
603     uint8_t Mask;
604     BAB.allocate(BAI->Bits, BAI->BitSize, ByteArrayOffsets[I], Mask);
605 
606     BAI->MaskGlobal->replaceAllUsesWith(
607         ConstantExpr::getIntToPtr(ConstantInt::get(Int8Ty, Mask), Int8PtrTy));
608     BAI->MaskGlobal->eraseFromParent();
609     if (BAI->MaskPtr)
610       *BAI->MaskPtr = Mask;
611   }
612 
613   Constant *ByteArrayConst = ConstantDataArray::get(M.getContext(), BAB.Bytes);
614   auto ByteArray =
615       new GlobalVariable(M, ByteArrayConst->getType(), /*isConstant=*/true,
616                          GlobalValue::PrivateLinkage, ByteArrayConst);
617 
618   for (unsigned I = 0; I != ByteArrayInfos.size(); ++I) {
619     ByteArrayInfo *BAI = &ByteArrayInfos[I];
620 
621     Constant *Idxs[] = {ConstantInt::get(IntPtrTy, 0),
622                         ConstantInt::get(IntPtrTy, ByteArrayOffsets[I])};
623     Constant *GEP = ConstantExpr::getInBoundsGetElementPtr(
624         ByteArrayConst->getType(), ByteArray, Idxs);
625 
626     // Create an alias instead of RAUW'ing the gep directly. On x86 this ensures
627     // that the pc-relative displacement is folded into the lea instead of the
628     // test instruction getting another displacement.
629     GlobalAlias *Alias = GlobalAlias::create(
630         Int8Ty, 0, GlobalValue::PrivateLinkage, "bits", GEP, &M);
631     BAI->ByteArray->replaceAllUsesWith(Alias);
632     BAI->ByteArray->eraseFromParent();
633   }
634 
635   ByteArraySizeBits = BAB.BitAllocs[0] + BAB.BitAllocs[1] + BAB.BitAllocs[2] +
636                       BAB.BitAllocs[3] + BAB.BitAllocs[4] + BAB.BitAllocs[5] +
637                       BAB.BitAllocs[6] + BAB.BitAllocs[7];
638   ByteArraySizeBytes = BAB.Bytes.size();
639 }
640 
641 /// Build a test that bit BitOffset is set in the type identifier that was
642 /// lowered to TIL, which must be either an Inline or a ByteArray.
643 Value *LowerTypeTestsModule::createBitSetTest(IRBuilder<> &B,
644                                               const TypeIdLowering &TIL,
645                                               Value *BitOffset) {
646   if (TIL.TheKind == TypeTestResolution::Inline) {
647     // If the bit set is sufficiently small, we can avoid a load by bit testing
648     // a constant.
649     return createMaskedBitTest(B, TIL.InlineBits, BitOffset);
650   } else {
651     Constant *ByteArray = TIL.TheByteArray;
652     if (AvoidReuse && !ImportSummary) {
653       // Each use of the byte array uses a different alias. This makes the
654       // backend less likely to reuse previously computed byte array addresses,
655       // improving the security of the CFI mechanism based on this pass.
656       // This won't work when importing because TheByteArray is external.
657       ByteArray = GlobalAlias::create(Int8Ty, 0, GlobalValue::PrivateLinkage,
658                                       "bits_use", ByteArray, &M);
659     }
660 
661     Value *ByteAddr = B.CreateGEP(Int8Ty, ByteArray, BitOffset);
662     Value *Byte = B.CreateLoad(Int8Ty, ByteAddr);
663 
664     Value *ByteAndMask =
665         B.CreateAnd(Byte, ConstantExpr::getPtrToInt(TIL.BitMask, Int8Ty));
666     return B.CreateICmpNE(ByteAndMask, ConstantInt::get(Int8Ty, 0));
667   }
668 }
669 
670 static bool isKnownTypeIdMember(Metadata *TypeId, const DataLayout &DL,
671                                 Value *V, uint64_t COffset) {
672   if (auto GV = dyn_cast<GlobalObject>(V)) {
673     SmallVector<MDNode *, 2> Types;
674     GV->getMetadata(LLVMContext::MD_type, Types);
675     for (MDNode *Type : Types) {
676       if (Type->getOperand(1) != TypeId)
677         continue;
678       uint64_t Offset =
679           cast<ConstantInt>(
680               cast<ConstantAsMetadata>(Type->getOperand(0))->getValue())
681               ->getZExtValue();
682       if (COffset == Offset)
683         return true;
684     }
685     return false;
686   }
687 
688   if (auto GEP = dyn_cast<GEPOperator>(V)) {
689     APInt APOffset(DL.getPointerSizeInBits(0), 0);
690     bool Result = GEP->accumulateConstantOffset(DL, APOffset);
691     if (!Result)
692       return false;
693     COffset += APOffset.getZExtValue();
694     return isKnownTypeIdMember(TypeId, DL, GEP->getPointerOperand(), COffset);
695   }
696 
697   if (auto Op = dyn_cast<Operator>(V)) {
698     if (Op->getOpcode() == Instruction::BitCast)
699       return isKnownTypeIdMember(TypeId, DL, Op->getOperand(0), COffset);
700 
701     if (Op->getOpcode() == Instruction::Select)
702       return isKnownTypeIdMember(TypeId, DL, Op->getOperand(1), COffset) &&
703              isKnownTypeIdMember(TypeId, DL, Op->getOperand(2), COffset);
704   }
705 
706   return false;
707 }
708 
709 /// Lower a llvm.type.test call to its implementation. Returns the value to
710 /// replace the call with.
711 Value *LowerTypeTestsModule::lowerTypeTestCall(Metadata *TypeId, CallInst *CI,
712                                                const TypeIdLowering &TIL) {
713   // Delay lowering if the resolution is currently unknown.
714   if (TIL.TheKind == TypeTestResolution::Unknown)
715     return nullptr;
716   if (TIL.TheKind == TypeTestResolution::Unsat)
717     return ConstantInt::getFalse(M.getContext());
718 
719   Value *Ptr = CI->getArgOperand(0);
720   const DataLayout &DL = M.getDataLayout();
721   if (isKnownTypeIdMember(TypeId, DL, Ptr, 0))
722     return ConstantInt::getTrue(M.getContext());
723 
724   BasicBlock *InitialBB = CI->getParent();
725 
726   IRBuilder<> B(CI);
727 
728   Value *PtrAsInt = B.CreatePtrToInt(Ptr, IntPtrTy);
729 
730   Constant *OffsetedGlobalAsInt =
731       ConstantExpr::getPtrToInt(TIL.OffsetedGlobal, IntPtrTy);
732   if (TIL.TheKind == TypeTestResolution::Single)
733     return B.CreateICmpEQ(PtrAsInt, OffsetedGlobalAsInt);
734 
735   Value *PtrOffset = B.CreateSub(PtrAsInt, OffsetedGlobalAsInt);
736 
737   // We need to check that the offset both falls within our range and is
738   // suitably aligned. We can check both properties at the same time by
739   // performing a right rotate by log2(alignment) followed by an integer
740   // comparison against the bitset size. The rotate will move the lower
741   // order bits that need to be zero into the higher order bits of the
742   // result, causing the comparison to fail if they are nonzero. The rotate
743   // also conveniently gives us a bit offset to use during the load from
744   // the bitset.
745   Value *OffsetSHR =
746       B.CreateLShr(PtrOffset, ConstantExpr::getZExt(TIL.AlignLog2, IntPtrTy));
747   Value *OffsetSHL = B.CreateShl(
748       PtrOffset, ConstantExpr::getZExt(
749                      ConstantExpr::getSub(
750                          ConstantInt::get(Int8Ty, DL.getPointerSizeInBits(0)),
751                          TIL.AlignLog2),
752                      IntPtrTy));
753   Value *BitOffset = B.CreateOr(OffsetSHR, OffsetSHL);
754 
755   Value *OffsetInRange = B.CreateICmpULE(BitOffset, TIL.SizeM1);
756 
757   // If the bit set is all ones, testing against it is unnecessary.
758   if (TIL.TheKind == TypeTestResolution::AllOnes)
759     return OffsetInRange;
760 
761   // See if the intrinsic is used in the following common pattern:
762   //   br(llvm.type.test(...), thenbb, elsebb)
763   // where nothing happens between the type test and the br.
764   // If so, create slightly simpler IR.
765   if (CI->hasOneUse())
766     if (auto *Br = dyn_cast<BranchInst>(*CI->user_begin()))
767       if (CI->getNextNode() == Br) {
768         BasicBlock *Then = InitialBB->splitBasicBlock(CI->getIterator());
769         BasicBlock *Else = Br->getSuccessor(1);
770         BranchInst *NewBr = BranchInst::Create(Then, Else, OffsetInRange);
771         NewBr->setMetadata(LLVMContext::MD_prof,
772                            Br->getMetadata(LLVMContext::MD_prof));
773         ReplaceInstWithInst(InitialBB->getTerminator(), NewBr);
774 
775         // Update phis in Else resulting from InitialBB being split
776         for (auto &Phi : Else->phis())
777           Phi.addIncoming(Phi.getIncomingValueForBlock(Then), InitialBB);
778 
779         IRBuilder<> ThenB(CI);
780         return createBitSetTest(ThenB, TIL, BitOffset);
781       }
782 
783   IRBuilder<> ThenB(SplitBlockAndInsertIfThen(OffsetInRange, CI, false));
784 
785   // Now that we know that the offset is in range and aligned, load the
786   // appropriate bit from the bitset.
787   Value *Bit = createBitSetTest(ThenB, TIL, BitOffset);
788 
789   // The value we want is 0 if we came directly from the initial block
790   // (having failed the range or alignment checks), or the loaded bit if
791   // we came from the block in which we loaded it.
792   B.SetInsertPoint(CI);
793   PHINode *P = B.CreatePHI(Int1Ty, 2);
794   P->addIncoming(ConstantInt::get(Int1Ty, 0), InitialBB);
795   P->addIncoming(Bit, ThenB.GetInsertBlock());
796   return P;
797 }
798 
799 /// Given a disjoint set of type identifiers and globals, lay out the globals,
800 /// build the bit sets and lower the llvm.type.test calls.
801 void LowerTypeTestsModule::buildBitSetsFromGlobalVariables(
802     ArrayRef<Metadata *> TypeIds, ArrayRef<GlobalTypeMember *> Globals) {
803   // Build a new global with the combined contents of the referenced globals.
804   // This global is a struct whose even-indexed elements contain the original
805   // contents of the referenced globals and whose odd-indexed elements contain
806   // any padding required to align the next element to the next power of 2 plus
807   // any additional padding required to meet its alignment requirements.
808   std::vector<Constant *> GlobalInits;
809   const DataLayout &DL = M.getDataLayout();
810   DenseMap<GlobalTypeMember *, uint64_t> GlobalLayout;
811   Align MaxAlign;
812   uint64_t CurOffset = 0;
813   uint64_t DesiredPadding = 0;
814   for (GlobalTypeMember *G : Globals) {
815     auto *GV = cast<GlobalVariable>(G->getGlobal());
816     Align Alignment =
817         DL.getValueOrABITypeAlignment(GV->getAlign(), GV->getValueType());
818     MaxAlign = std::max(MaxAlign, Alignment);
819     uint64_t GVOffset = alignTo(CurOffset + DesiredPadding, Alignment);
820     GlobalLayout[G] = GVOffset;
821     if (GVOffset != 0) {
822       uint64_t Padding = GVOffset - CurOffset;
823       GlobalInits.push_back(
824           ConstantAggregateZero::get(ArrayType::get(Int8Ty, Padding)));
825     }
826 
827     GlobalInits.push_back(GV->getInitializer());
828     uint64_t InitSize = DL.getTypeAllocSize(GV->getValueType());
829     CurOffset = GVOffset + InitSize;
830 
831     // Compute the amount of padding that we'd like for the next element.
832     DesiredPadding = NextPowerOf2(InitSize - 1) - InitSize;
833 
834     // Experiments of different caps with Chromium on both x64 and ARM64
835     // have shown that the 32-byte cap generates the smallest binary on
836     // both platforms while different caps yield similar performance.
837     // (see https://lists.llvm.org/pipermail/llvm-dev/2018-July/124694.html)
838     if (DesiredPadding > 32)
839       DesiredPadding = alignTo(InitSize, 32) - InitSize;
840   }
841 
842   Constant *NewInit = ConstantStruct::getAnon(M.getContext(), GlobalInits);
843   auto *CombinedGlobal =
844       new GlobalVariable(M, NewInit->getType(), /*isConstant=*/true,
845                          GlobalValue::PrivateLinkage, NewInit);
846   CombinedGlobal->setAlignment(MaxAlign);
847 
848   StructType *NewTy = cast<StructType>(NewInit->getType());
849   lowerTypeTestCalls(TypeIds, CombinedGlobal, GlobalLayout);
850 
851   // Build aliases pointing to offsets into the combined global for each
852   // global from which we built the combined global, and replace references
853   // to the original globals with references to the aliases.
854   for (unsigned I = 0; I != Globals.size(); ++I) {
855     GlobalVariable *GV = cast<GlobalVariable>(Globals[I]->getGlobal());
856 
857     // Multiply by 2 to account for padding elements.
858     Constant *CombinedGlobalIdxs[] = {ConstantInt::get(Int32Ty, 0),
859                                       ConstantInt::get(Int32Ty, I * 2)};
860     Constant *CombinedGlobalElemPtr = ConstantExpr::getGetElementPtr(
861         NewInit->getType(), CombinedGlobal, CombinedGlobalIdxs);
862     assert(GV->getType()->getAddressSpace() == 0);
863     GlobalAlias *GAlias =
864         GlobalAlias::create(NewTy->getElementType(I * 2), 0, GV->getLinkage(),
865                             "", CombinedGlobalElemPtr, &M);
866     GAlias->setVisibility(GV->getVisibility());
867     GAlias->takeName(GV);
868     GV->replaceAllUsesWith(GAlias);
869     GV->eraseFromParent();
870   }
871 }
872 
873 bool LowerTypeTestsModule::shouldExportConstantsAsAbsoluteSymbols() {
874   return (Arch == Triple::x86 || Arch == Triple::x86_64) &&
875          ObjectFormat == Triple::ELF;
876 }
877 
878 /// Export the given type identifier so that ThinLTO backends may import it.
879 /// Type identifiers are exported by adding coarse-grained information about how
880 /// to test the type identifier to the summary, and creating symbols in the
881 /// object file (aliases and absolute symbols) containing fine-grained
882 /// information about the type identifier.
883 ///
884 /// Returns a pointer to the location in which to store the bitmask, if
885 /// applicable.
886 uint8_t *LowerTypeTestsModule::exportTypeId(StringRef TypeId,
887                                             const TypeIdLowering &TIL) {
888   TypeTestResolution &TTRes =
889       ExportSummary->getOrInsertTypeIdSummary(TypeId).TTRes;
890   TTRes.TheKind = TIL.TheKind;
891 
892   auto ExportGlobal = [&](StringRef Name, Constant *C) {
893     GlobalAlias *GA =
894         GlobalAlias::create(Int8Ty, 0, GlobalValue::ExternalLinkage,
895                             "__typeid_" + TypeId + "_" + Name, C, &M);
896     GA->setVisibility(GlobalValue::HiddenVisibility);
897   };
898 
899   auto ExportConstant = [&](StringRef Name, uint64_t &Storage, Constant *C) {
900     if (shouldExportConstantsAsAbsoluteSymbols())
901       ExportGlobal(Name, ConstantExpr::getIntToPtr(C, Int8PtrTy));
902     else
903       Storage = cast<ConstantInt>(C)->getZExtValue();
904   };
905 
906   if (TIL.TheKind != TypeTestResolution::Unsat)
907     ExportGlobal("global_addr", TIL.OffsetedGlobal);
908 
909   if (TIL.TheKind == TypeTestResolution::ByteArray ||
910       TIL.TheKind == TypeTestResolution::Inline ||
911       TIL.TheKind == TypeTestResolution::AllOnes) {
912     ExportConstant("align", TTRes.AlignLog2, TIL.AlignLog2);
913     ExportConstant("size_m1", TTRes.SizeM1, TIL.SizeM1);
914 
915     uint64_t BitSize = cast<ConstantInt>(TIL.SizeM1)->getZExtValue() + 1;
916     if (TIL.TheKind == TypeTestResolution::Inline)
917       TTRes.SizeM1BitWidth = (BitSize <= 32) ? 5 : 6;
918     else
919       TTRes.SizeM1BitWidth = (BitSize <= 128) ? 7 : 32;
920   }
921 
922   if (TIL.TheKind == TypeTestResolution::ByteArray) {
923     ExportGlobal("byte_array", TIL.TheByteArray);
924     if (shouldExportConstantsAsAbsoluteSymbols())
925       ExportGlobal("bit_mask", TIL.BitMask);
926     else
927       return &TTRes.BitMask;
928   }
929 
930   if (TIL.TheKind == TypeTestResolution::Inline)
931     ExportConstant("inline_bits", TTRes.InlineBits, TIL.InlineBits);
932 
933   return nullptr;
934 }
935 
936 LowerTypeTestsModule::TypeIdLowering
937 LowerTypeTestsModule::importTypeId(StringRef TypeId) {
938   const TypeIdSummary *TidSummary = ImportSummary->getTypeIdSummary(TypeId);
939   if (!TidSummary)
940     return {}; // Unsat: no globals match this type id.
941   const TypeTestResolution &TTRes = TidSummary->TTRes;
942 
943   TypeIdLowering TIL;
944   TIL.TheKind = TTRes.TheKind;
945 
946   auto ImportGlobal = [&](StringRef Name) {
947     // Give the global a type of length 0 so that it is not assumed not to alias
948     // with any other global.
949     Constant *C = M.getOrInsertGlobal(("__typeid_" + TypeId + "_" + Name).str(),
950                                       Int8Arr0Ty);
951     if (auto *GV = dyn_cast<GlobalVariable>(C))
952       GV->setVisibility(GlobalValue::HiddenVisibility);
953     C = ConstantExpr::getBitCast(C, Int8PtrTy);
954     return C;
955   };
956 
957   auto ImportConstant = [&](StringRef Name, uint64_t Const, unsigned AbsWidth,
958                             Type *Ty) {
959     if (!shouldExportConstantsAsAbsoluteSymbols()) {
960       Constant *C =
961           ConstantInt::get(isa<IntegerType>(Ty) ? Ty : Int64Ty, Const);
962       if (!isa<IntegerType>(Ty))
963         C = ConstantExpr::getIntToPtr(C, Ty);
964       return C;
965     }
966 
967     Constant *C = ImportGlobal(Name);
968     auto *GV = cast<GlobalVariable>(C->stripPointerCasts());
969     if (isa<IntegerType>(Ty))
970       C = ConstantExpr::getPtrToInt(C, Ty);
971     if (GV->getMetadata(LLVMContext::MD_absolute_symbol))
972       return C;
973 
974     auto SetAbsRange = [&](uint64_t Min, uint64_t Max) {
975       auto *MinC = ConstantAsMetadata::get(ConstantInt::get(IntPtrTy, Min));
976       auto *MaxC = ConstantAsMetadata::get(ConstantInt::get(IntPtrTy, Max));
977       GV->setMetadata(LLVMContext::MD_absolute_symbol,
978                       MDNode::get(M.getContext(), {MinC, MaxC}));
979     };
980     if (AbsWidth == IntPtrTy->getBitWidth())
981       SetAbsRange(~0ull, ~0ull); // Full set.
982     else
983       SetAbsRange(0, 1ull << AbsWidth);
984     return C;
985   };
986 
987   if (TIL.TheKind != TypeTestResolution::Unsat)
988     TIL.OffsetedGlobal = ImportGlobal("global_addr");
989 
990   if (TIL.TheKind == TypeTestResolution::ByteArray ||
991       TIL.TheKind == TypeTestResolution::Inline ||
992       TIL.TheKind == TypeTestResolution::AllOnes) {
993     TIL.AlignLog2 = ImportConstant("align", TTRes.AlignLog2, 8, Int8Ty);
994     TIL.SizeM1 =
995         ImportConstant("size_m1", TTRes.SizeM1, TTRes.SizeM1BitWidth, IntPtrTy);
996   }
997 
998   if (TIL.TheKind == TypeTestResolution::ByteArray) {
999     TIL.TheByteArray = ImportGlobal("byte_array");
1000     TIL.BitMask = ImportConstant("bit_mask", TTRes.BitMask, 8, Int8PtrTy);
1001   }
1002 
1003   if (TIL.TheKind == TypeTestResolution::Inline)
1004     TIL.InlineBits = ImportConstant(
1005         "inline_bits", TTRes.InlineBits, 1 << TTRes.SizeM1BitWidth,
1006         TTRes.SizeM1BitWidth <= 5 ? Int32Ty : Int64Ty);
1007 
1008   return TIL;
1009 }
1010 
1011 void LowerTypeTestsModule::importTypeTest(CallInst *CI) {
1012   auto TypeIdMDVal = dyn_cast<MetadataAsValue>(CI->getArgOperand(1));
1013   if (!TypeIdMDVal)
1014     report_fatal_error("Second argument of llvm.type.test must be metadata");
1015 
1016   auto TypeIdStr = dyn_cast<MDString>(TypeIdMDVal->getMetadata());
1017   // If this is a local unpromoted type, which doesn't have a metadata string,
1018   // treat as Unknown and delay lowering, so that we can still utilize it for
1019   // later optimizations.
1020   if (!TypeIdStr)
1021     return;
1022 
1023   TypeIdLowering TIL = importTypeId(TypeIdStr->getString());
1024   Value *Lowered = lowerTypeTestCall(TypeIdStr, CI, TIL);
1025   if (Lowered) {
1026     CI->replaceAllUsesWith(Lowered);
1027     CI->eraseFromParent();
1028   }
1029 }
1030 
1031 // ThinLTO backend: the function F has a jump table entry; update this module
1032 // accordingly. isJumpTableCanonical describes the type of the jump table entry.
1033 void LowerTypeTestsModule::importFunction(
1034     Function *F, bool isJumpTableCanonical,
1035     std::vector<GlobalAlias *> &AliasesToErase) {
1036   assert(F->getType()->getAddressSpace() == 0);
1037 
1038   GlobalValue::VisibilityTypes Visibility = F->getVisibility();
1039   std::string Name = std::string(F->getName());
1040 
1041   if (F->isDeclarationForLinker() && isJumpTableCanonical) {
1042     // Non-dso_local functions may be overriden at run time,
1043     // don't short curcuit them
1044     if (F->isDSOLocal()) {
1045       Function *RealF = Function::Create(F->getFunctionType(),
1046                                          GlobalValue::ExternalLinkage,
1047                                          F->getAddressSpace(),
1048                                          Name + ".cfi", &M);
1049       RealF->setVisibility(GlobalVariable::HiddenVisibility);
1050       replaceDirectCalls(F, RealF);
1051     }
1052     return;
1053   }
1054 
1055   Function *FDecl;
1056   if (!isJumpTableCanonical) {
1057     // Either a declaration of an external function or a reference to a locally
1058     // defined jump table.
1059     FDecl = Function::Create(F->getFunctionType(), GlobalValue::ExternalLinkage,
1060                              F->getAddressSpace(), Name + ".cfi_jt", &M);
1061     FDecl->setVisibility(GlobalValue::HiddenVisibility);
1062   } else {
1063     F->setName(Name + ".cfi");
1064     F->setLinkage(GlobalValue::ExternalLinkage);
1065     FDecl = Function::Create(F->getFunctionType(), GlobalValue::ExternalLinkage,
1066                              F->getAddressSpace(), Name, &M);
1067     FDecl->setVisibility(Visibility);
1068     Visibility = GlobalValue::HiddenVisibility;
1069 
1070     // Delete aliases pointing to this function, they'll be re-created in the
1071     // merged output. Don't do it yet though because ScopedSaveAliaseesAndUsed
1072     // will want to reset the aliasees first.
1073     for (auto &U : F->uses()) {
1074       if (auto *A = dyn_cast<GlobalAlias>(U.getUser())) {
1075         Function *AliasDecl = Function::Create(
1076             F->getFunctionType(), GlobalValue::ExternalLinkage,
1077             F->getAddressSpace(), "", &M);
1078         AliasDecl->takeName(A);
1079         A->replaceAllUsesWith(AliasDecl);
1080         AliasesToErase.push_back(A);
1081       }
1082     }
1083   }
1084 
1085   if (F->hasExternalWeakLinkage())
1086     replaceWeakDeclarationWithJumpTablePtr(F, FDecl, isJumpTableCanonical);
1087   else
1088     replaceCfiUses(F, FDecl, isJumpTableCanonical);
1089 
1090   // Set visibility late because it's used in replaceCfiUses() to determine
1091   // whether uses need to to be replaced.
1092   F->setVisibility(Visibility);
1093 }
1094 
1095 void LowerTypeTestsModule::lowerTypeTestCalls(
1096     ArrayRef<Metadata *> TypeIds, Constant *CombinedGlobalAddr,
1097     const DenseMap<GlobalTypeMember *, uint64_t> &GlobalLayout) {
1098   CombinedGlobalAddr = ConstantExpr::getBitCast(CombinedGlobalAddr, Int8PtrTy);
1099 
1100   // For each type identifier in this disjoint set...
1101   for (Metadata *TypeId : TypeIds) {
1102     // Build the bitset.
1103     BitSetInfo BSI = buildBitSet(TypeId, GlobalLayout);
1104     LLVM_DEBUG({
1105       if (auto MDS = dyn_cast<MDString>(TypeId))
1106         dbgs() << MDS->getString() << ": ";
1107       else
1108         dbgs() << "<unnamed>: ";
1109       BSI.print(dbgs());
1110     });
1111 
1112     ByteArrayInfo *BAI = nullptr;
1113     TypeIdLowering TIL;
1114     TIL.OffsetedGlobal = ConstantExpr::getGetElementPtr(
1115         Int8Ty, CombinedGlobalAddr, ConstantInt::get(IntPtrTy, BSI.ByteOffset)),
1116     TIL.AlignLog2 = ConstantInt::get(Int8Ty, BSI.AlignLog2);
1117     TIL.SizeM1 = ConstantInt::get(IntPtrTy, BSI.BitSize - 1);
1118     if (BSI.isAllOnes()) {
1119       TIL.TheKind = (BSI.BitSize == 1) ? TypeTestResolution::Single
1120                                        : TypeTestResolution::AllOnes;
1121     } else if (BSI.BitSize <= 64) {
1122       TIL.TheKind = TypeTestResolution::Inline;
1123       uint64_t InlineBits = 0;
1124       for (auto Bit : BSI.Bits)
1125         InlineBits |= uint64_t(1) << Bit;
1126       if (InlineBits == 0)
1127         TIL.TheKind = TypeTestResolution::Unsat;
1128       else
1129         TIL.InlineBits = ConstantInt::get(
1130             (BSI.BitSize <= 32) ? Int32Ty : Int64Ty, InlineBits);
1131     } else {
1132       TIL.TheKind = TypeTestResolution::ByteArray;
1133       ++NumByteArraysCreated;
1134       BAI = createByteArray(BSI);
1135       TIL.TheByteArray = BAI->ByteArray;
1136       TIL.BitMask = BAI->MaskGlobal;
1137     }
1138 
1139     TypeIdUserInfo &TIUI = TypeIdUsers[TypeId];
1140 
1141     if (TIUI.IsExported) {
1142       uint8_t *MaskPtr = exportTypeId(cast<MDString>(TypeId)->getString(), TIL);
1143       if (BAI)
1144         BAI->MaskPtr = MaskPtr;
1145     }
1146 
1147     // Lower each call to llvm.type.test for this type identifier.
1148     for (CallInst *CI : TIUI.CallSites) {
1149       ++NumTypeTestCallsLowered;
1150       Value *Lowered = lowerTypeTestCall(TypeId, CI, TIL);
1151       if (Lowered) {
1152         CI->replaceAllUsesWith(Lowered);
1153         CI->eraseFromParent();
1154       }
1155     }
1156   }
1157 }
1158 
1159 void LowerTypeTestsModule::verifyTypeMDNode(GlobalObject *GO, MDNode *Type) {
1160   if (Type->getNumOperands() != 2)
1161     report_fatal_error("All operands of type metadata must have 2 elements");
1162 
1163   if (GO->isThreadLocal())
1164     report_fatal_error("Bit set element may not be thread-local");
1165   if (isa<GlobalVariable>(GO) && GO->hasSection())
1166     report_fatal_error(
1167         "A member of a type identifier may not have an explicit section");
1168 
1169   // FIXME: We previously checked that global var member of a type identifier
1170   // must be a definition, but the IR linker may leave type metadata on
1171   // declarations. We should restore this check after fixing PR31759.
1172 
1173   auto OffsetConstMD = dyn_cast<ConstantAsMetadata>(Type->getOperand(0));
1174   if (!OffsetConstMD)
1175     report_fatal_error("Type offset must be a constant");
1176   auto OffsetInt = dyn_cast<ConstantInt>(OffsetConstMD->getValue());
1177   if (!OffsetInt)
1178     report_fatal_error("Type offset must be an integer constant");
1179 }
1180 
1181 static const unsigned kX86JumpTableEntrySize = 8;
1182 static const unsigned kX86IBTJumpTableEntrySize = 16;
1183 static const unsigned kARMJumpTableEntrySize = 4;
1184 static const unsigned kARMBTIJumpTableEntrySize = 8;
1185 static const unsigned kRISCVJumpTableEntrySize = 8;
1186 
1187 unsigned LowerTypeTestsModule::getJumpTableEntrySize() {
1188   switch (Arch) {
1189     case Triple::x86:
1190     case Triple::x86_64:
1191       if (const auto *MD = mdconst::extract_or_null<ConstantInt>(
1192             M.getModuleFlag("cf-protection-branch")))
1193         if (MD->getZExtValue())
1194           return kX86IBTJumpTableEntrySize;
1195       return kX86JumpTableEntrySize;
1196     case Triple::arm:
1197     case Triple::thumb:
1198       return kARMJumpTableEntrySize;
1199     case Triple::aarch64:
1200       if (const auto *BTE = mdconst::extract_or_null<ConstantInt>(
1201             M.getModuleFlag("branch-target-enforcement")))
1202         if (BTE->getZExtValue())
1203           return kARMBTIJumpTableEntrySize;
1204       return kARMJumpTableEntrySize;
1205     case Triple::riscv32:
1206     case Triple::riscv64:
1207       return kRISCVJumpTableEntrySize;
1208     default:
1209       report_fatal_error("Unsupported architecture for jump tables");
1210   }
1211 }
1212 
1213 // Create a jump table entry for the target. This consists of an instruction
1214 // sequence containing a relative branch to Dest. Appends inline asm text,
1215 // constraints and arguments to AsmOS, ConstraintOS and AsmArgs.
1216 void LowerTypeTestsModule::createJumpTableEntry(
1217     raw_ostream &AsmOS, raw_ostream &ConstraintOS,
1218     Triple::ArchType JumpTableArch, SmallVectorImpl<Value *> &AsmArgs,
1219     Function *Dest) {
1220   unsigned ArgIndex = AsmArgs.size();
1221 
1222   if (JumpTableArch == Triple::x86 || JumpTableArch == Triple::x86_64) {
1223     bool Endbr = false;
1224     if (const auto *MD = mdconst::extract_or_null<ConstantInt>(
1225           Dest->getParent()->getModuleFlag("cf-protection-branch")))
1226       Endbr = MD->getZExtValue() != 0;
1227     if (Endbr)
1228       AsmOS << (JumpTableArch == Triple::x86 ? "endbr32\n" : "endbr64\n");
1229     AsmOS << "jmp ${" << ArgIndex << ":c}@plt\n";
1230     if (Endbr)
1231       AsmOS << ".balign 16, 0xcc\n";
1232     else
1233       AsmOS << "int3\nint3\nint3\n";
1234   } else if (JumpTableArch == Triple::arm) {
1235     AsmOS << "b $" << ArgIndex << "\n";
1236   } else if (JumpTableArch == Triple::aarch64) {
1237     if (const auto *BTE = mdconst::extract_or_null<ConstantInt>(
1238           Dest->getParent()->getModuleFlag("branch-target-enforcement")))
1239       if (BTE->getZExtValue())
1240         AsmOS << "bti c\n";
1241     AsmOS << "b $" << ArgIndex << "\n";
1242   } else if (JumpTableArch == Triple::thumb) {
1243     AsmOS << "b.w $" << ArgIndex << "\n";
1244   } else if (JumpTableArch == Triple::riscv32 ||
1245              JumpTableArch == Triple::riscv64) {
1246     AsmOS << "tail $" << ArgIndex << "@plt\n";
1247   } else {
1248     report_fatal_error("Unsupported architecture for jump tables");
1249   }
1250 
1251   ConstraintOS << (ArgIndex > 0 ? ",s" : "s");
1252   AsmArgs.push_back(Dest);
1253 }
1254 
1255 Type *LowerTypeTestsModule::getJumpTableEntryType() {
1256   return ArrayType::get(Int8Ty, getJumpTableEntrySize());
1257 }
1258 
1259 /// Given a disjoint set of type identifiers and functions, build the bit sets
1260 /// and lower the llvm.type.test calls, architecture dependently.
1261 void LowerTypeTestsModule::buildBitSetsFromFunctions(
1262     ArrayRef<Metadata *> TypeIds, ArrayRef<GlobalTypeMember *> Functions) {
1263   if (Arch == Triple::x86 || Arch == Triple::x86_64 || Arch == Triple::arm ||
1264       Arch == Triple::thumb || Arch == Triple::aarch64 ||
1265       Arch == Triple::riscv32 || Arch == Triple::riscv64)
1266     buildBitSetsFromFunctionsNative(TypeIds, Functions);
1267   else if (Arch == Triple::wasm32 || Arch == Triple::wasm64)
1268     buildBitSetsFromFunctionsWASM(TypeIds, Functions);
1269   else
1270     report_fatal_error("Unsupported architecture for jump tables");
1271 }
1272 
1273 void LowerTypeTestsModule::moveInitializerToModuleConstructor(
1274     GlobalVariable *GV) {
1275   if (WeakInitializerFn == nullptr) {
1276     WeakInitializerFn = Function::Create(
1277         FunctionType::get(Type::getVoidTy(M.getContext()),
1278                           /* IsVarArg */ false),
1279         GlobalValue::InternalLinkage,
1280         M.getDataLayout().getProgramAddressSpace(),
1281         "__cfi_global_var_init", &M);
1282     BasicBlock *BB =
1283         BasicBlock::Create(M.getContext(), "entry", WeakInitializerFn);
1284     ReturnInst::Create(M.getContext(), BB);
1285     WeakInitializerFn->setSection(
1286         ObjectFormat == Triple::MachO
1287             ? "__TEXT,__StaticInit,regular,pure_instructions"
1288             : ".text.startup");
1289     // This code is equivalent to relocation application, and should run at the
1290     // earliest possible time (i.e. with the highest priority).
1291     appendToGlobalCtors(M, WeakInitializerFn, /* Priority */ 0);
1292   }
1293 
1294   IRBuilder<> IRB(WeakInitializerFn->getEntryBlock().getTerminator());
1295   GV->setConstant(false);
1296   IRB.CreateAlignedStore(GV->getInitializer(), GV, GV->getAlign());
1297   GV->setInitializer(Constant::getNullValue(GV->getValueType()));
1298 }
1299 
1300 void LowerTypeTestsModule::findGlobalVariableUsersOf(
1301     Constant *C, SmallSetVector<GlobalVariable *, 8> &Out) {
1302   for (auto *U : C->users()){
1303     if (auto *GV = dyn_cast<GlobalVariable>(U))
1304       Out.insert(GV);
1305     else if (auto *C2 = dyn_cast<Constant>(U))
1306       findGlobalVariableUsersOf(C2, Out);
1307   }
1308 }
1309 
1310 // Replace all uses of F with (F ? JT : 0).
1311 void LowerTypeTestsModule::replaceWeakDeclarationWithJumpTablePtr(
1312     Function *F, Constant *JT, bool IsJumpTableCanonical) {
1313   // The target expression can not appear in a constant initializer on most
1314   // (all?) targets. Switch to a runtime initializer.
1315   SmallSetVector<GlobalVariable *, 8> GlobalVarUsers;
1316   findGlobalVariableUsersOf(F, GlobalVarUsers);
1317   for (auto *GV : GlobalVarUsers)
1318     moveInitializerToModuleConstructor(GV);
1319 
1320   // Can not RAUW F with an expression that uses F. Replace with a temporary
1321   // placeholder first.
1322   Function *PlaceholderFn =
1323       Function::Create(cast<FunctionType>(F->getValueType()),
1324                        GlobalValue::ExternalWeakLinkage,
1325                        F->getAddressSpace(), "", &M);
1326   replaceCfiUses(F, PlaceholderFn, IsJumpTableCanonical);
1327 
1328   Constant *Target = ConstantExpr::getSelect(
1329       ConstantExpr::getICmp(CmpInst::ICMP_NE, F,
1330                             Constant::getNullValue(F->getType())),
1331       JT, Constant::getNullValue(F->getType()));
1332   PlaceholderFn->replaceAllUsesWith(Target);
1333   PlaceholderFn->eraseFromParent();
1334 }
1335 
1336 static bool isThumbFunction(Function *F, Triple::ArchType ModuleArch) {
1337   Attribute TFAttr = F->getFnAttribute("target-features");
1338   if (TFAttr.isValid()) {
1339     SmallVector<StringRef, 6> Features;
1340     TFAttr.getValueAsString().split(Features, ',');
1341     for (StringRef Feature : Features) {
1342       if (Feature == "-thumb-mode")
1343         return false;
1344       else if (Feature == "+thumb-mode")
1345         return true;
1346     }
1347   }
1348 
1349   return ModuleArch == Triple::thumb;
1350 }
1351 
1352 // Each jump table must be either ARM or Thumb as a whole for the bit-test math
1353 // to work. Pick one that matches the majority of members to minimize interop
1354 // veneers inserted by the linker.
1355 static Triple::ArchType
1356 selectJumpTableArmEncoding(ArrayRef<GlobalTypeMember *> Functions,
1357                            Triple::ArchType ModuleArch) {
1358   if (ModuleArch != Triple::arm && ModuleArch != Triple::thumb)
1359     return ModuleArch;
1360 
1361   unsigned ArmCount = 0, ThumbCount = 0;
1362   for (const auto GTM : Functions) {
1363     if (!GTM->isJumpTableCanonical()) {
1364       // PLT stubs are always ARM.
1365       // FIXME: This is the wrong heuristic for non-canonical jump tables.
1366       ++ArmCount;
1367       continue;
1368     }
1369 
1370     Function *F = cast<Function>(GTM->getGlobal());
1371     ++(isThumbFunction(F, ModuleArch) ? ThumbCount : ArmCount);
1372   }
1373 
1374   return ArmCount > ThumbCount ? Triple::arm : Triple::thumb;
1375 }
1376 
1377 void LowerTypeTestsModule::createJumpTable(
1378     Function *F, ArrayRef<GlobalTypeMember *> Functions) {
1379   std::string AsmStr, ConstraintStr;
1380   raw_string_ostream AsmOS(AsmStr), ConstraintOS(ConstraintStr);
1381   SmallVector<Value *, 16> AsmArgs;
1382   AsmArgs.reserve(Functions.size() * 2);
1383 
1384   Triple::ArchType JumpTableArch = selectJumpTableArmEncoding(Functions, Arch);
1385 
1386   for (GlobalTypeMember *GTM : Functions)
1387     createJumpTableEntry(AsmOS, ConstraintOS, JumpTableArch, AsmArgs,
1388                          cast<Function>(GTM->getGlobal()));
1389 
1390   // Align the whole table by entry size.
1391   F->setAlignment(Align(getJumpTableEntrySize()));
1392   // Skip prologue.
1393   // Disabled on win32 due to https://llvm.org/bugs/show_bug.cgi?id=28641#c3.
1394   // Luckily, this function does not get any prologue even without the
1395   // attribute.
1396   if (OS != Triple::Win32)
1397     F->addFnAttr(Attribute::Naked);
1398   if (JumpTableArch == Triple::arm)
1399     F->addFnAttr("target-features", "-thumb-mode");
1400   if (JumpTableArch == Triple::thumb) {
1401     F->addFnAttr("target-features", "+thumb-mode");
1402     // Thumb jump table assembly needs Thumb2. The following attribute is added
1403     // by Clang for -march=armv7.
1404     F->addFnAttr("target-cpu", "cortex-a8");
1405   }
1406   // When -mbranch-protection= is used, the inline asm adds a BTI. Suppress BTI
1407   // for the function to avoid double BTI. This is a no-op without
1408   // -mbranch-protection=.
1409   if (JumpTableArch == Triple::aarch64) {
1410     F->addFnAttr("branch-target-enforcement", "false");
1411     F->addFnAttr("sign-return-address", "none");
1412   }
1413   if (JumpTableArch == Triple::riscv32 || JumpTableArch == Triple::riscv64) {
1414     // Make sure the jump table assembly is not modified by the assembler or
1415     // the linker.
1416     F->addFnAttr("target-features", "-c,-relax");
1417   }
1418   // When -fcf-protection= is used, the inline asm adds an ENDBR. Suppress ENDBR
1419   // for the function to avoid double ENDBR. This is a no-op without
1420   // -fcf-protection=.
1421   if (JumpTableArch == Triple::x86 || JumpTableArch == Triple::x86_64)
1422     F->addFnAttr(Attribute::NoCfCheck);
1423   // Make sure we don't emit .eh_frame for this function.
1424   F->addFnAttr(Attribute::NoUnwind);
1425 
1426   BasicBlock *BB = BasicBlock::Create(M.getContext(), "entry", F);
1427   IRBuilder<> IRB(BB);
1428 
1429   SmallVector<Type *, 16> ArgTypes;
1430   ArgTypes.reserve(AsmArgs.size());
1431   for (const auto &Arg : AsmArgs)
1432     ArgTypes.push_back(Arg->getType());
1433   InlineAsm *JumpTableAsm =
1434       InlineAsm::get(FunctionType::get(IRB.getVoidTy(), ArgTypes, false),
1435                      AsmOS.str(), ConstraintOS.str(),
1436                      /*hasSideEffects=*/true);
1437 
1438   IRB.CreateCall(JumpTableAsm, AsmArgs);
1439   IRB.CreateUnreachable();
1440 }
1441 
1442 /// Given a disjoint set of type identifiers and functions, build a jump table
1443 /// for the functions, build the bit sets and lower the llvm.type.test calls.
1444 void LowerTypeTestsModule::buildBitSetsFromFunctionsNative(
1445     ArrayRef<Metadata *> TypeIds, ArrayRef<GlobalTypeMember *> Functions) {
1446   // Unlike the global bitset builder, the function bitset builder cannot
1447   // re-arrange functions in a particular order and base its calculations on the
1448   // layout of the functions' entry points, as we have no idea how large a
1449   // particular function will end up being (the size could even depend on what
1450   // this pass does!) Instead, we build a jump table, which is a block of code
1451   // consisting of one branch instruction for each of the functions in the bit
1452   // set that branches to the target function, and redirect any taken function
1453   // addresses to the corresponding jump table entry. In the object file's
1454   // symbol table, the symbols for the target functions also refer to the jump
1455   // table entries, so that addresses taken outside the module will pass any
1456   // verification done inside the module.
1457   //
1458   // In more concrete terms, suppose we have three functions f, g, h which are
1459   // of the same type, and a function foo that returns their addresses:
1460   //
1461   // f:
1462   // mov 0, %eax
1463   // ret
1464   //
1465   // g:
1466   // mov 1, %eax
1467   // ret
1468   //
1469   // h:
1470   // mov 2, %eax
1471   // ret
1472   //
1473   // foo:
1474   // mov f, %eax
1475   // mov g, %edx
1476   // mov h, %ecx
1477   // ret
1478   //
1479   // We output the jump table as module-level inline asm string. The end result
1480   // will (conceptually) look like this:
1481   //
1482   // f = .cfi.jumptable
1483   // g = .cfi.jumptable + 4
1484   // h = .cfi.jumptable + 8
1485   // .cfi.jumptable:
1486   // jmp f.cfi  ; 5 bytes
1487   // int3       ; 1 byte
1488   // int3       ; 1 byte
1489   // int3       ; 1 byte
1490   // jmp g.cfi  ; 5 bytes
1491   // int3       ; 1 byte
1492   // int3       ; 1 byte
1493   // int3       ; 1 byte
1494   // jmp h.cfi  ; 5 bytes
1495   // int3       ; 1 byte
1496   // int3       ; 1 byte
1497   // int3       ; 1 byte
1498   //
1499   // f.cfi:
1500   // mov 0, %eax
1501   // ret
1502   //
1503   // g.cfi:
1504   // mov 1, %eax
1505   // ret
1506   //
1507   // h.cfi:
1508   // mov 2, %eax
1509   // ret
1510   //
1511   // foo:
1512   // mov f, %eax
1513   // mov g, %edx
1514   // mov h, %ecx
1515   // ret
1516   //
1517   // Because the addresses of f, g, h are evenly spaced at a power of 2, in the
1518   // normal case the check can be carried out using the same kind of simple
1519   // arithmetic that we normally use for globals.
1520 
1521   // FIXME: find a better way to represent the jumptable in the IR.
1522   assert(!Functions.empty());
1523 
1524   // Build a simple layout based on the regular layout of jump tables.
1525   DenseMap<GlobalTypeMember *, uint64_t> GlobalLayout;
1526   unsigned EntrySize = getJumpTableEntrySize();
1527   for (unsigned I = 0; I != Functions.size(); ++I)
1528     GlobalLayout[Functions[I]] = I * EntrySize;
1529 
1530   Function *JumpTableFn =
1531       Function::Create(FunctionType::get(Type::getVoidTy(M.getContext()),
1532                                          /* IsVarArg */ false),
1533                        GlobalValue::PrivateLinkage,
1534                        M.getDataLayout().getProgramAddressSpace(),
1535                        ".cfi.jumptable", &M);
1536   ArrayType *JumpTableType =
1537       ArrayType::get(getJumpTableEntryType(), Functions.size());
1538   auto JumpTable =
1539       ConstantExpr::getPointerCast(JumpTableFn, JumpTableType->getPointerTo(0));
1540 
1541   lowerTypeTestCalls(TypeIds, JumpTable, GlobalLayout);
1542 
1543   {
1544     ScopedSaveAliaseesAndUsed S(M);
1545 
1546     // Build aliases pointing to offsets into the jump table, and replace
1547     // references to the original functions with references to the aliases.
1548     for (unsigned I = 0; I != Functions.size(); ++I) {
1549       Function *F = cast<Function>(Functions[I]->getGlobal());
1550       bool IsJumpTableCanonical = Functions[I]->isJumpTableCanonical();
1551 
1552       Constant *CombinedGlobalElemPtr = ConstantExpr::getBitCast(
1553           ConstantExpr::getInBoundsGetElementPtr(
1554               JumpTableType, JumpTable,
1555               ArrayRef<Constant *>{ConstantInt::get(IntPtrTy, 0),
1556                                    ConstantInt::get(IntPtrTy, I)}),
1557           F->getType());
1558 
1559       const bool IsExported = Functions[I]->isExported();
1560       if (!IsJumpTableCanonical) {
1561         GlobalValue::LinkageTypes LT = IsExported
1562                                            ? GlobalValue::ExternalLinkage
1563                                            : GlobalValue::InternalLinkage;
1564         GlobalAlias *JtAlias = GlobalAlias::create(F->getValueType(), 0, LT,
1565                                                    F->getName() + ".cfi_jt",
1566                                                    CombinedGlobalElemPtr, &M);
1567         if (IsExported)
1568           JtAlias->setVisibility(GlobalValue::HiddenVisibility);
1569         else
1570           appendToUsed(M, {JtAlias});
1571       }
1572 
1573       if (IsExported) {
1574         if (IsJumpTableCanonical)
1575           ExportSummary->cfiFunctionDefs().insert(std::string(F->getName()));
1576         else
1577           ExportSummary->cfiFunctionDecls().insert(std::string(F->getName()));
1578       }
1579 
1580       if (!IsJumpTableCanonical) {
1581         if (F->hasExternalWeakLinkage())
1582           replaceWeakDeclarationWithJumpTablePtr(F, CombinedGlobalElemPtr,
1583                                                  IsJumpTableCanonical);
1584         else
1585           replaceCfiUses(F, CombinedGlobalElemPtr, IsJumpTableCanonical);
1586       } else {
1587         assert(F->getType()->getAddressSpace() == 0);
1588 
1589         GlobalAlias *FAlias =
1590             GlobalAlias::create(F->getValueType(), 0, F->getLinkage(), "",
1591                                 CombinedGlobalElemPtr, &M);
1592         FAlias->setVisibility(F->getVisibility());
1593         FAlias->takeName(F);
1594         if (FAlias->hasName())
1595           F->setName(FAlias->getName() + ".cfi");
1596         replaceCfiUses(F, FAlias, IsJumpTableCanonical);
1597         if (!F->hasLocalLinkage())
1598           F->setVisibility(GlobalVariable::HiddenVisibility);
1599       }
1600     }
1601   }
1602 
1603   createJumpTable(JumpTableFn, Functions);
1604 }
1605 
1606 /// Assign a dummy layout using an incrementing counter, tag each function
1607 /// with its index represented as metadata, and lower each type test to an
1608 /// integer range comparison. During generation of the indirect function call
1609 /// table in the backend, it will assign the given indexes.
1610 /// Note: Dynamic linking is not supported, as the WebAssembly ABI has not yet
1611 /// been finalized.
1612 void LowerTypeTestsModule::buildBitSetsFromFunctionsWASM(
1613     ArrayRef<Metadata *> TypeIds, ArrayRef<GlobalTypeMember *> Functions) {
1614   assert(!Functions.empty());
1615 
1616   // Build consecutive monotonic integer ranges for each call target set
1617   DenseMap<GlobalTypeMember *, uint64_t> GlobalLayout;
1618 
1619   for (GlobalTypeMember *GTM : Functions) {
1620     Function *F = cast<Function>(GTM->getGlobal());
1621 
1622     // Skip functions that are not address taken, to avoid bloating the table
1623     if (!F->hasAddressTaken())
1624       continue;
1625 
1626     // Store metadata with the index for each function
1627     MDNode *MD = MDNode::get(F->getContext(),
1628                              ArrayRef<Metadata *>(ConstantAsMetadata::get(
1629                                  ConstantInt::get(Int64Ty, IndirectIndex))));
1630     F->setMetadata("wasm.index", MD);
1631 
1632     // Assign the counter value
1633     GlobalLayout[GTM] = IndirectIndex++;
1634   }
1635 
1636   // The indirect function table index space starts at zero, so pass a NULL
1637   // pointer as the subtracted "jump table" offset.
1638   lowerTypeTestCalls(TypeIds, ConstantPointerNull::get(Int32PtrTy),
1639                      GlobalLayout);
1640 }
1641 
1642 void LowerTypeTestsModule::buildBitSetsFromDisjointSet(
1643     ArrayRef<Metadata *> TypeIds, ArrayRef<GlobalTypeMember *> Globals,
1644     ArrayRef<ICallBranchFunnel *> ICallBranchFunnels) {
1645   DenseMap<Metadata *, uint64_t> TypeIdIndices;
1646   for (unsigned I = 0; I != TypeIds.size(); ++I)
1647     TypeIdIndices[TypeIds[I]] = I;
1648 
1649   // For each type identifier, build a set of indices that refer to members of
1650   // the type identifier.
1651   std::vector<std::set<uint64_t>> TypeMembers(TypeIds.size());
1652   unsigned GlobalIndex = 0;
1653   DenseMap<GlobalTypeMember *, uint64_t> GlobalIndices;
1654   for (GlobalTypeMember *GTM : Globals) {
1655     for (MDNode *Type : GTM->types()) {
1656       // Type = { offset, type identifier }
1657       auto I = TypeIdIndices.find(Type->getOperand(1));
1658       if (I != TypeIdIndices.end())
1659         TypeMembers[I->second].insert(GlobalIndex);
1660     }
1661     GlobalIndices[GTM] = GlobalIndex;
1662     GlobalIndex++;
1663   }
1664 
1665   for (ICallBranchFunnel *JT : ICallBranchFunnels) {
1666     TypeMembers.emplace_back();
1667     std::set<uint64_t> &TMSet = TypeMembers.back();
1668     for (GlobalTypeMember *T : JT->targets())
1669       TMSet.insert(GlobalIndices[T]);
1670   }
1671 
1672   // Order the sets of indices by size. The GlobalLayoutBuilder works best
1673   // when given small index sets first.
1674   llvm::stable_sort(TypeMembers, [](const std::set<uint64_t> &O1,
1675                                     const std::set<uint64_t> &O2) {
1676     return O1.size() < O2.size();
1677   });
1678 
1679   // Create a GlobalLayoutBuilder and provide it with index sets as layout
1680   // fragments. The GlobalLayoutBuilder tries to lay out members of fragments as
1681   // close together as possible.
1682   GlobalLayoutBuilder GLB(Globals.size());
1683   for (auto &&MemSet : TypeMembers)
1684     GLB.addFragment(MemSet);
1685 
1686   // Build a vector of globals with the computed layout.
1687   bool IsGlobalSet =
1688       Globals.empty() || isa<GlobalVariable>(Globals[0]->getGlobal());
1689   std::vector<GlobalTypeMember *> OrderedGTMs(Globals.size());
1690   auto OGTMI = OrderedGTMs.begin();
1691   for (auto &&F : GLB.Fragments) {
1692     for (auto &&Offset : F) {
1693       if (IsGlobalSet != isa<GlobalVariable>(Globals[Offset]->getGlobal()))
1694         report_fatal_error("Type identifier may not contain both global "
1695                            "variables and functions");
1696       *OGTMI++ = Globals[Offset];
1697     }
1698   }
1699 
1700   // Build the bitsets from this disjoint set.
1701   if (IsGlobalSet)
1702     buildBitSetsFromGlobalVariables(TypeIds, OrderedGTMs);
1703   else
1704     buildBitSetsFromFunctions(TypeIds, OrderedGTMs);
1705 }
1706 
1707 /// Lower all type tests in this module.
1708 LowerTypeTestsModule::LowerTypeTestsModule(
1709     Module &M, ModuleSummaryIndex *ExportSummary,
1710     const ModuleSummaryIndex *ImportSummary, bool DropTypeTests)
1711     : M(M), ExportSummary(ExportSummary), ImportSummary(ImportSummary),
1712       DropTypeTests(DropTypeTests || ClDropTypeTests) {
1713   assert(!(ExportSummary && ImportSummary));
1714   Triple TargetTriple(M.getTargetTriple());
1715   Arch = TargetTriple.getArch();
1716   OS = TargetTriple.getOS();
1717   ObjectFormat = TargetTriple.getObjectFormat();
1718 }
1719 
1720 bool LowerTypeTestsModule::runForTesting(Module &M) {
1721   ModuleSummaryIndex Summary(/*HaveGVs=*/false);
1722 
1723   // Handle the command-line summary arguments. This code is for testing
1724   // purposes only, so we handle errors directly.
1725   if (!ClReadSummary.empty()) {
1726     ExitOnError ExitOnErr("-lowertypetests-read-summary: " + ClReadSummary +
1727                           ": ");
1728     auto ReadSummaryFile =
1729         ExitOnErr(errorOrToExpected(MemoryBuffer::getFile(ClReadSummary)));
1730 
1731     yaml::Input In(ReadSummaryFile->getBuffer());
1732     In >> Summary;
1733     ExitOnErr(errorCodeToError(In.error()));
1734   }
1735 
1736   bool Changed =
1737       LowerTypeTestsModule(
1738           M, ClSummaryAction == PassSummaryAction::Export ? &Summary : nullptr,
1739           ClSummaryAction == PassSummaryAction::Import ? &Summary : nullptr,
1740           /*DropTypeTests*/ false)
1741           .lower();
1742 
1743   if (!ClWriteSummary.empty()) {
1744     ExitOnError ExitOnErr("-lowertypetests-write-summary: " + ClWriteSummary +
1745                           ": ");
1746     std::error_code EC;
1747     raw_fd_ostream OS(ClWriteSummary, EC, sys::fs::OF_TextWithCRLF);
1748     ExitOnErr(errorCodeToError(EC));
1749 
1750     yaml::Output Out(OS);
1751     Out << Summary;
1752   }
1753 
1754   return Changed;
1755 }
1756 
1757 static bool isDirectCall(Use& U) {
1758   auto *Usr = dyn_cast<CallInst>(U.getUser());
1759   if (Usr) {
1760     auto *CB = dyn_cast<CallBase>(Usr);
1761     if (CB && CB->isCallee(&U))
1762       return true;
1763   }
1764   return false;
1765 }
1766 
1767 void LowerTypeTestsModule::replaceCfiUses(Function *Old, Value *New,
1768                                           bool IsJumpTableCanonical) {
1769   SmallSetVector<Constant *, 4> Constants;
1770   for (Use &U : llvm::make_early_inc_range(Old->uses())) {
1771     // Skip block addresses and no_cfi values, which refer to the function
1772     // body instead of the jump table.
1773     if (isa<BlockAddress, NoCFIValue>(U.getUser()))
1774       continue;
1775 
1776     // Skip direct calls to externally defined or non-dso_local functions
1777     if (isDirectCall(U) && (Old->isDSOLocal() || !IsJumpTableCanonical))
1778       continue;
1779 
1780     // Must handle Constants specially, we cannot call replaceUsesOfWith on a
1781     // constant because they are uniqued.
1782     if (auto *C = dyn_cast<Constant>(U.getUser())) {
1783       if (!isa<GlobalValue>(C)) {
1784         // Save unique users to avoid processing operand replacement
1785         // more than once.
1786         Constants.insert(C);
1787         continue;
1788       }
1789     }
1790 
1791     U.set(New);
1792   }
1793 
1794   // Process operand replacement of saved constants.
1795   for (auto *C : Constants)
1796     C->handleOperandChange(Old, New);
1797 }
1798 
1799 void LowerTypeTestsModule::replaceDirectCalls(Value *Old, Value *New) {
1800   Old->replaceUsesWithIf(New, isDirectCall);
1801 }
1802 
1803 static void dropTypeTests(Module &M, Function &TypeTestFunc) {
1804   for (Use &U : llvm::make_early_inc_range(TypeTestFunc.uses())) {
1805     auto *CI = cast<CallInst>(U.getUser());
1806     // Find and erase llvm.assume intrinsics for this llvm.type.test call.
1807     for (Use &CIU : llvm::make_early_inc_range(CI->uses()))
1808       if (auto *Assume = dyn_cast<AssumeInst>(CIU.getUser()))
1809         Assume->eraseFromParent();
1810     // If the assume was merged with another assume, we might have a use on a
1811     // phi (which will feed the assume). Simply replace the use on the phi
1812     // with "true" and leave the merged assume.
1813     if (!CI->use_empty()) {
1814       assert(
1815           all_of(CI->users(), [](User *U) -> bool { return isa<PHINode>(U); }));
1816       CI->replaceAllUsesWith(ConstantInt::getTrue(M.getContext()));
1817     }
1818     CI->eraseFromParent();
1819   }
1820 }
1821 
1822 bool LowerTypeTestsModule::lower() {
1823   Function *TypeTestFunc =
1824       M.getFunction(Intrinsic::getName(Intrinsic::type_test));
1825 
1826   if (DropTypeTests) {
1827     if (TypeTestFunc)
1828       dropTypeTests(M, *TypeTestFunc);
1829     // Normally we'd have already removed all @llvm.public.type.test calls,
1830     // except for in the case where we originally were performing ThinLTO but
1831     // decided not to in the backend.
1832     Function *PublicTypeTestFunc =
1833         M.getFunction(Intrinsic::getName(Intrinsic::public_type_test));
1834     if (PublicTypeTestFunc)
1835       dropTypeTests(M, *PublicTypeTestFunc);
1836     if (TypeTestFunc || PublicTypeTestFunc) {
1837       // We have deleted the type intrinsics, so we no longer have enough
1838       // information to reason about the liveness of virtual function pointers
1839       // in GlobalDCE.
1840       for (GlobalVariable &GV : M.globals())
1841         GV.eraseMetadata(LLVMContext::MD_vcall_visibility);
1842       return true;
1843     }
1844     return false;
1845   }
1846 
1847   // If only some of the modules were split, we cannot correctly perform
1848   // this transformation. We already checked for the presense of type tests
1849   // with partially split modules during the thin link, and would have emitted
1850   // an error if any were found, so here we can simply return.
1851   if ((ExportSummary && ExportSummary->partiallySplitLTOUnits()) ||
1852       (ImportSummary && ImportSummary->partiallySplitLTOUnits()))
1853     return false;
1854 
1855   Function *ICallBranchFunnelFunc =
1856       M.getFunction(Intrinsic::getName(Intrinsic::icall_branch_funnel));
1857   if ((!TypeTestFunc || TypeTestFunc->use_empty()) &&
1858       (!ICallBranchFunnelFunc || ICallBranchFunnelFunc->use_empty()) &&
1859       !ExportSummary && !ImportSummary)
1860     return false;
1861 
1862   if (ImportSummary) {
1863     if (TypeTestFunc)
1864       for (Use &U : llvm::make_early_inc_range(TypeTestFunc->uses()))
1865         importTypeTest(cast<CallInst>(U.getUser()));
1866 
1867     if (ICallBranchFunnelFunc && !ICallBranchFunnelFunc->use_empty())
1868       report_fatal_error(
1869           "unexpected call to llvm.icall.branch.funnel during import phase");
1870 
1871     SmallVector<Function *, 8> Defs;
1872     SmallVector<Function *, 8> Decls;
1873     for (auto &F : M) {
1874       // CFI functions are either external, or promoted. A local function may
1875       // have the same name, but it's not the one we are looking for.
1876       if (F.hasLocalLinkage())
1877         continue;
1878       if (ImportSummary->cfiFunctionDefs().count(std::string(F.getName())))
1879         Defs.push_back(&F);
1880       else if (ImportSummary->cfiFunctionDecls().count(
1881                    std::string(F.getName())))
1882         Decls.push_back(&F);
1883     }
1884 
1885     std::vector<GlobalAlias *> AliasesToErase;
1886     {
1887       ScopedSaveAliaseesAndUsed S(M);
1888       for (auto *F : Defs)
1889         importFunction(F, /*isJumpTableCanonical*/ true, AliasesToErase);
1890       for (auto *F : Decls)
1891         importFunction(F, /*isJumpTableCanonical*/ false, AliasesToErase);
1892     }
1893     for (GlobalAlias *GA : AliasesToErase)
1894       GA->eraseFromParent();
1895 
1896     return true;
1897   }
1898 
1899   // Equivalence class set containing type identifiers and the globals that
1900   // reference them. This is used to partition the set of type identifiers in
1901   // the module into disjoint sets.
1902   using GlobalClassesTy = EquivalenceClasses<
1903       PointerUnion<GlobalTypeMember *, Metadata *, ICallBranchFunnel *>>;
1904   GlobalClassesTy GlobalClasses;
1905 
1906   // Verify the type metadata and build a few data structures to let us
1907   // efficiently enumerate the type identifiers associated with a global:
1908   // a list of GlobalTypeMembers (a GlobalObject stored alongside a vector
1909   // of associated type metadata) and a mapping from type identifiers to their
1910   // list of GlobalTypeMembers and last observed index in the list of globals.
1911   // The indices will be used later to deterministically order the list of type
1912   // identifiers.
1913   BumpPtrAllocator Alloc;
1914   struct TIInfo {
1915     unsigned UniqueId;
1916     std::vector<GlobalTypeMember *> RefGlobals;
1917   };
1918   DenseMap<Metadata *, TIInfo> TypeIdInfo;
1919   unsigned CurUniqueId = 0;
1920   SmallVector<MDNode *, 2> Types;
1921 
1922   // Cross-DSO CFI emits jumptable entries for exported functions as well as
1923   // address taken functions in case they are address taken in other modules.
1924   const bool CrossDsoCfi = M.getModuleFlag("Cross-DSO CFI") != nullptr;
1925 
1926   struct ExportedFunctionInfo {
1927     CfiFunctionLinkage Linkage;
1928     MDNode *FuncMD; // {name, linkage, type[, type...]}
1929   };
1930   DenseMap<StringRef, ExportedFunctionInfo> ExportedFunctions;
1931   if (ExportSummary) {
1932     // A set of all functions that are address taken by a live global object.
1933     DenseSet<GlobalValue::GUID> AddressTaken;
1934     for (auto &I : *ExportSummary)
1935       for (auto &GVS : I.second.SummaryList)
1936         if (GVS->isLive())
1937           for (const auto &Ref : GVS->refs())
1938             AddressTaken.insert(Ref.getGUID());
1939 
1940     NamedMDNode *CfiFunctionsMD = M.getNamedMetadata("cfi.functions");
1941     if (CfiFunctionsMD) {
1942       for (auto *FuncMD : CfiFunctionsMD->operands()) {
1943         assert(FuncMD->getNumOperands() >= 2);
1944         StringRef FunctionName =
1945             cast<MDString>(FuncMD->getOperand(0))->getString();
1946         CfiFunctionLinkage Linkage = static_cast<CfiFunctionLinkage>(
1947             cast<ConstantAsMetadata>(FuncMD->getOperand(1))
1948                 ->getValue()
1949                 ->getUniqueInteger()
1950                 .getZExtValue());
1951         const GlobalValue::GUID GUID = GlobalValue::getGUID(
1952                 GlobalValue::dropLLVMManglingEscape(FunctionName));
1953         // Do not emit jumptable entries for functions that are not-live and
1954         // have no live references (and are not exported with cross-DSO CFI.)
1955         if (!ExportSummary->isGUIDLive(GUID))
1956           continue;
1957         if (!AddressTaken.count(GUID)) {
1958           if (!CrossDsoCfi || Linkage != CFL_Definition)
1959             continue;
1960 
1961           bool Exported = false;
1962           if (auto VI = ExportSummary->getValueInfo(GUID))
1963             for (const auto &GVS : VI.getSummaryList())
1964               if (GVS->isLive() && !GlobalValue::isLocalLinkage(GVS->linkage()))
1965                 Exported = true;
1966 
1967           if (!Exported)
1968             continue;
1969         }
1970         auto P = ExportedFunctions.insert({FunctionName, {Linkage, FuncMD}});
1971         if (!P.second && P.first->second.Linkage != CFL_Definition)
1972           P.first->second = {Linkage, FuncMD};
1973       }
1974 
1975       for (const auto &P : ExportedFunctions) {
1976         StringRef FunctionName = P.first;
1977         CfiFunctionLinkage Linkage = P.second.Linkage;
1978         MDNode *FuncMD = P.second.FuncMD;
1979         Function *F = M.getFunction(FunctionName);
1980         if (F && F->hasLocalLinkage()) {
1981           // Locally defined function that happens to have the same name as a
1982           // function defined in a ThinLTO module. Rename it to move it out of
1983           // the way of the external reference that we're about to create.
1984           // Note that setName will find a unique name for the function, so even
1985           // if there is an existing function with the suffix there won't be a
1986           // name collision.
1987           F->setName(F->getName() + ".1");
1988           F = nullptr;
1989         }
1990 
1991         if (!F)
1992           F = Function::Create(
1993               FunctionType::get(Type::getVoidTy(M.getContext()), false),
1994               GlobalVariable::ExternalLinkage,
1995               M.getDataLayout().getProgramAddressSpace(), FunctionName, &M);
1996 
1997         // If the function is available_externally, remove its definition so
1998         // that it is handled the same way as a declaration. Later we will try
1999         // to create an alias using this function's linkage, which will fail if
2000         // the linkage is available_externally. This will also result in us
2001         // following the code path below to replace the type metadata.
2002         if (F->hasAvailableExternallyLinkage()) {
2003           F->setLinkage(GlobalValue::ExternalLinkage);
2004           F->deleteBody();
2005           F->setComdat(nullptr);
2006           F->clearMetadata();
2007         }
2008 
2009         // Update the linkage for extern_weak declarations when a definition
2010         // exists.
2011         if (Linkage == CFL_Definition && F->hasExternalWeakLinkage())
2012           F->setLinkage(GlobalValue::ExternalLinkage);
2013 
2014         // If the function in the full LTO module is a declaration, replace its
2015         // type metadata with the type metadata we found in cfi.functions. That
2016         // metadata is presumed to be more accurate than the metadata attached
2017         // to the declaration.
2018         if (F->isDeclaration()) {
2019           if (Linkage == CFL_WeakDeclaration)
2020             F->setLinkage(GlobalValue::ExternalWeakLinkage);
2021 
2022           F->eraseMetadata(LLVMContext::MD_type);
2023           for (unsigned I = 2; I < FuncMD->getNumOperands(); ++I)
2024             F->addMetadata(LLVMContext::MD_type,
2025                            *cast<MDNode>(FuncMD->getOperand(I).get()));
2026         }
2027       }
2028     }
2029   }
2030 
2031   DenseMap<GlobalObject *, GlobalTypeMember *> GlobalTypeMembers;
2032   for (GlobalObject &GO : M.global_objects()) {
2033     if (isa<GlobalVariable>(GO) && GO.isDeclarationForLinker())
2034       continue;
2035 
2036     Types.clear();
2037     GO.getMetadata(LLVMContext::MD_type, Types);
2038 
2039     bool IsJumpTableCanonical = false;
2040     bool IsExported = false;
2041     if (Function *F = dyn_cast<Function>(&GO)) {
2042       IsJumpTableCanonical = isJumpTableCanonical(F);
2043       if (ExportedFunctions.count(F->getName())) {
2044         IsJumpTableCanonical |=
2045             ExportedFunctions[F->getName()].Linkage == CFL_Definition;
2046         IsExported = true;
2047       // TODO: The logic here checks only that the function is address taken,
2048       // not that the address takers are live. This can be updated to check
2049       // their liveness and emit fewer jumptable entries once monolithic LTO
2050       // builds also emit summaries.
2051       } else if (!F->hasAddressTaken()) {
2052         if (!CrossDsoCfi || !IsJumpTableCanonical || F->hasLocalLinkage())
2053           continue;
2054       }
2055     }
2056 
2057     auto *GTM = GlobalTypeMember::create(Alloc, &GO, IsJumpTableCanonical,
2058                                          IsExported, Types);
2059     GlobalTypeMembers[&GO] = GTM;
2060     for (MDNode *Type : Types) {
2061       verifyTypeMDNode(&GO, Type);
2062       auto &Info = TypeIdInfo[Type->getOperand(1)];
2063       Info.UniqueId = ++CurUniqueId;
2064       Info.RefGlobals.push_back(GTM);
2065     }
2066   }
2067 
2068   auto AddTypeIdUse = [&](Metadata *TypeId) -> TypeIdUserInfo & {
2069     // Add the call site to the list of call sites for this type identifier. We
2070     // also use TypeIdUsers to keep track of whether we have seen this type
2071     // identifier before. If we have, we don't need to re-add the referenced
2072     // globals to the equivalence class.
2073     auto Ins = TypeIdUsers.insert({TypeId, {}});
2074     if (Ins.second) {
2075       // Add the type identifier to the equivalence class.
2076       GlobalClassesTy::iterator GCI = GlobalClasses.insert(TypeId);
2077       GlobalClassesTy::member_iterator CurSet = GlobalClasses.findLeader(GCI);
2078 
2079       // Add the referenced globals to the type identifier's equivalence class.
2080       for (GlobalTypeMember *GTM : TypeIdInfo[TypeId].RefGlobals)
2081         CurSet = GlobalClasses.unionSets(
2082             CurSet, GlobalClasses.findLeader(GlobalClasses.insert(GTM)));
2083     }
2084 
2085     return Ins.first->second;
2086   };
2087 
2088   if (TypeTestFunc) {
2089     for (const Use &U : TypeTestFunc->uses()) {
2090       auto CI = cast<CallInst>(U.getUser());
2091       // If this type test is only used by llvm.assume instructions, it
2092       // was used for whole program devirtualization, and is being kept
2093       // for use by other optimization passes. We do not need or want to
2094       // lower it here. We also don't want to rewrite any associated globals
2095       // unnecessarily. These will be removed by a subsequent LTT invocation
2096       // with the DropTypeTests flag set.
2097       bool OnlyAssumeUses = !CI->use_empty();
2098       for (const Use &CIU : CI->uses()) {
2099         if (isa<AssumeInst>(CIU.getUser()))
2100           continue;
2101         OnlyAssumeUses = false;
2102         break;
2103       }
2104       if (OnlyAssumeUses)
2105         continue;
2106 
2107       auto TypeIdMDVal = dyn_cast<MetadataAsValue>(CI->getArgOperand(1));
2108       if (!TypeIdMDVal)
2109         report_fatal_error("Second argument of llvm.type.test must be metadata");
2110       auto TypeId = TypeIdMDVal->getMetadata();
2111       AddTypeIdUse(TypeId).CallSites.push_back(CI);
2112     }
2113   }
2114 
2115   if (ICallBranchFunnelFunc) {
2116     for (const Use &U : ICallBranchFunnelFunc->uses()) {
2117       if (Arch != Triple::x86_64)
2118         report_fatal_error(
2119             "llvm.icall.branch.funnel not supported on this target");
2120 
2121       auto CI = cast<CallInst>(U.getUser());
2122 
2123       std::vector<GlobalTypeMember *> Targets;
2124       if (CI->arg_size() % 2 != 1)
2125         report_fatal_error("number of arguments should be odd");
2126 
2127       GlobalClassesTy::member_iterator CurSet;
2128       for (unsigned I = 1; I != CI->arg_size(); I += 2) {
2129         int64_t Offset;
2130         auto *Base = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
2131             CI->getOperand(I), Offset, M.getDataLayout()));
2132         if (!Base)
2133           report_fatal_error(
2134               "Expected branch funnel operand to be global value");
2135 
2136         GlobalTypeMember *GTM = GlobalTypeMembers[Base];
2137         Targets.push_back(GTM);
2138         GlobalClassesTy::member_iterator NewSet =
2139             GlobalClasses.findLeader(GlobalClasses.insert(GTM));
2140         if (I == 1)
2141           CurSet = NewSet;
2142         else
2143           CurSet = GlobalClasses.unionSets(CurSet, NewSet);
2144       }
2145 
2146       GlobalClasses.unionSets(
2147           CurSet, GlobalClasses.findLeader(
2148                       GlobalClasses.insert(ICallBranchFunnel::create(
2149                           Alloc, CI, Targets, ++CurUniqueId))));
2150     }
2151   }
2152 
2153   if (ExportSummary) {
2154     DenseMap<GlobalValue::GUID, TinyPtrVector<Metadata *>> MetadataByGUID;
2155     for (auto &P : TypeIdInfo) {
2156       if (auto *TypeId = dyn_cast<MDString>(P.first))
2157         MetadataByGUID[GlobalValue::getGUID(TypeId->getString())].push_back(
2158             TypeId);
2159     }
2160 
2161     for (auto &P : *ExportSummary) {
2162       for (auto &S : P.second.SummaryList) {
2163         if (!ExportSummary->isGlobalValueLive(S.get()))
2164           continue;
2165         if (auto *FS = dyn_cast<FunctionSummary>(S->getBaseObject()))
2166           for (GlobalValue::GUID G : FS->type_tests())
2167             for (Metadata *MD : MetadataByGUID[G])
2168               AddTypeIdUse(MD).IsExported = true;
2169       }
2170     }
2171   }
2172 
2173   if (GlobalClasses.empty())
2174     return false;
2175 
2176   // Build a list of disjoint sets ordered by their maximum global index for
2177   // determinism.
2178   std::vector<std::pair<GlobalClassesTy::iterator, unsigned>> Sets;
2179   for (GlobalClassesTy::iterator I = GlobalClasses.begin(),
2180                                  E = GlobalClasses.end();
2181        I != E; ++I) {
2182     if (!I->isLeader())
2183       continue;
2184     ++NumTypeIdDisjointSets;
2185 
2186     unsigned MaxUniqueId = 0;
2187     for (GlobalClassesTy::member_iterator MI = GlobalClasses.member_begin(I);
2188          MI != GlobalClasses.member_end(); ++MI) {
2189       if (auto *MD = MI->dyn_cast<Metadata *>())
2190         MaxUniqueId = std::max(MaxUniqueId, TypeIdInfo[MD].UniqueId);
2191       else if (auto *BF = MI->dyn_cast<ICallBranchFunnel *>())
2192         MaxUniqueId = std::max(MaxUniqueId, BF->UniqueId);
2193     }
2194     Sets.emplace_back(I, MaxUniqueId);
2195   }
2196   llvm::sort(Sets, llvm::less_second());
2197 
2198   // For each disjoint set we found...
2199   for (const auto &S : Sets) {
2200     // Build the list of type identifiers in this disjoint set.
2201     std::vector<Metadata *> TypeIds;
2202     std::vector<GlobalTypeMember *> Globals;
2203     std::vector<ICallBranchFunnel *> ICallBranchFunnels;
2204     for (GlobalClassesTy::member_iterator MI =
2205              GlobalClasses.member_begin(S.first);
2206          MI != GlobalClasses.member_end(); ++MI) {
2207       if (MI->is<Metadata *>())
2208         TypeIds.push_back(MI->get<Metadata *>());
2209       else if (MI->is<GlobalTypeMember *>())
2210         Globals.push_back(MI->get<GlobalTypeMember *>());
2211       else
2212         ICallBranchFunnels.push_back(MI->get<ICallBranchFunnel *>());
2213     }
2214 
2215     // Order type identifiers by unique ID for determinism. This ordering is
2216     // stable as there is a one-to-one mapping between metadata and unique IDs.
2217     llvm::sort(TypeIds, [&](Metadata *M1, Metadata *M2) {
2218       return TypeIdInfo[M1].UniqueId < TypeIdInfo[M2].UniqueId;
2219     });
2220 
2221     // Same for the branch funnels.
2222     llvm::sort(ICallBranchFunnels,
2223                [&](ICallBranchFunnel *F1, ICallBranchFunnel *F2) {
2224                  return F1->UniqueId < F2->UniqueId;
2225                });
2226 
2227     // Build bitsets for this disjoint set.
2228     buildBitSetsFromDisjointSet(TypeIds, Globals, ICallBranchFunnels);
2229   }
2230 
2231   allocateByteArrays();
2232 
2233   // Parse alias data to replace stand-in function declarations for aliases
2234   // with an alias to the intended target.
2235   if (ExportSummary) {
2236     if (NamedMDNode *AliasesMD = M.getNamedMetadata("aliases")) {
2237       for (auto *AliasMD : AliasesMD->operands()) {
2238         assert(AliasMD->getNumOperands() >= 4);
2239         StringRef AliasName =
2240             cast<MDString>(AliasMD->getOperand(0))->getString();
2241         StringRef Aliasee = cast<MDString>(AliasMD->getOperand(1))->getString();
2242 
2243         if (!ExportedFunctions.count(Aliasee) ||
2244             ExportedFunctions[Aliasee].Linkage != CFL_Definition ||
2245             !M.getNamedAlias(Aliasee))
2246           continue;
2247 
2248         GlobalValue::VisibilityTypes Visibility =
2249             static_cast<GlobalValue::VisibilityTypes>(
2250                 cast<ConstantAsMetadata>(AliasMD->getOperand(2))
2251                     ->getValue()
2252                     ->getUniqueInteger()
2253                     .getZExtValue());
2254         bool Weak =
2255             static_cast<bool>(cast<ConstantAsMetadata>(AliasMD->getOperand(3))
2256                                   ->getValue()
2257                                   ->getUniqueInteger()
2258                                   .getZExtValue());
2259 
2260         auto *Alias = GlobalAlias::create("", M.getNamedAlias(Aliasee));
2261         Alias->setVisibility(Visibility);
2262         if (Weak)
2263           Alias->setLinkage(GlobalValue::WeakAnyLinkage);
2264 
2265         if (auto *F = M.getFunction(AliasName)) {
2266           Alias->takeName(F);
2267           F->replaceAllUsesWith(Alias);
2268           F->eraseFromParent();
2269         } else {
2270           Alias->setName(AliasName);
2271         }
2272       }
2273     }
2274   }
2275 
2276   // Emit .symver directives for exported functions, if they exist.
2277   if (ExportSummary) {
2278     if (NamedMDNode *SymversMD = M.getNamedMetadata("symvers")) {
2279       for (auto *Symver : SymversMD->operands()) {
2280         assert(Symver->getNumOperands() >= 2);
2281         StringRef SymbolName =
2282             cast<MDString>(Symver->getOperand(0))->getString();
2283         StringRef Alias = cast<MDString>(Symver->getOperand(1))->getString();
2284 
2285         if (!ExportedFunctions.count(SymbolName))
2286           continue;
2287 
2288         M.appendModuleInlineAsm(
2289             (llvm::Twine(".symver ") + SymbolName + ", " + Alias).str());
2290       }
2291     }
2292   }
2293 
2294   return true;
2295 }
2296 
2297 PreservedAnalyses LowerTypeTestsPass::run(Module &M,
2298                                           ModuleAnalysisManager &AM) {
2299   bool Changed;
2300   if (UseCommandLine)
2301     Changed = LowerTypeTestsModule::runForTesting(M);
2302   else
2303     Changed =
2304         LowerTypeTestsModule(M, ExportSummary, ImportSummary, DropTypeTests)
2305             .lower();
2306   if (!Changed)
2307     return PreservedAnalyses::all();
2308   return PreservedAnalyses::none();
2309 }
2310