1 //===- LowerTypeTests.cpp - type metadata lowering pass -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass lowers type metadata and calls to the llvm.type.test intrinsic.
10 // It also ensures that globals are properly laid out for the
11 // llvm.icall.branch.funnel intrinsic.
12 // See http://llvm.org/docs/TypeMetadata.html for more information.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "llvm/Transforms/IPO/LowerTypeTests.h"
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/DenseMap.h"
20 #include "llvm/ADT/EquivalenceClasses.h"
21 #include "llvm/ADT/PointerUnion.h"
22 #include "llvm/ADT/SetVector.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/ADT/StringRef.h"
26 #include "llvm/ADT/TinyPtrVector.h"
27 #include "llvm/ADT/Triple.h"
28 #include "llvm/Analysis/TypeMetadataUtils.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/BasicBlock.h"
32 #include "llvm/IR/Constant.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/DerivedTypes.h"
36 #include "llvm/IR/Function.h"
37 #include "llvm/IR/GlobalAlias.h"
38 #include "llvm/IR/GlobalObject.h"
39 #include "llvm/IR/GlobalValue.h"
40 #include "llvm/IR/GlobalVariable.h"
41 #include "llvm/IR/IRBuilder.h"
42 #include "llvm/IR/InlineAsm.h"
43 #include "llvm/IR/Instruction.h"
44 #include "llvm/IR/Instructions.h"
45 #include "llvm/IR/IntrinsicInst.h"
46 #include "llvm/IR/Intrinsics.h"
47 #include "llvm/IR/LLVMContext.h"
48 #include "llvm/IR/Metadata.h"
49 #include "llvm/IR/Module.h"
50 #include "llvm/IR/ModuleSummaryIndex.h"
51 #include "llvm/IR/ModuleSummaryIndexYAML.h"
52 #include "llvm/IR/Operator.h"
53 #include "llvm/IR/PassManager.h"
54 #include "llvm/IR/Type.h"
55 #include "llvm/IR/Use.h"
56 #include "llvm/IR/User.h"
57 #include "llvm/IR/Value.h"
58 #include "llvm/InitializePasses.h"
59 #include "llvm/Pass.h"
60 #include "llvm/Support/Allocator.h"
61 #include "llvm/Support/Casting.h"
62 #include "llvm/Support/CommandLine.h"
63 #include "llvm/Support/Debug.h"
64 #include "llvm/Support/Error.h"
65 #include "llvm/Support/ErrorHandling.h"
66 #include "llvm/Support/FileSystem.h"
67 #include "llvm/Support/MathExtras.h"
68 #include "llvm/Support/MemoryBuffer.h"
69 #include "llvm/Support/TrailingObjects.h"
70 #include "llvm/Support/YAMLTraits.h"
71 #include "llvm/Support/raw_ostream.h"
72 #include "llvm/Transforms/IPO.h"
73 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
74 #include "llvm/Transforms/Utils/ModuleUtils.h"
75 #include <algorithm>
76 #include <cassert>
77 #include <cstdint>
78 #include <memory>
79 #include <set>
80 #include <string>
81 #include <system_error>
82 #include <utility>
83 #include <vector>
84 
85 using namespace llvm;
86 using namespace lowertypetests;
87 
88 #define DEBUG_TYPE "lowertypetests"
89 
90 STATISTIC(ByteArraySizeBits, "Byte array size in bits");
91 STATISTIC(ByteArraySizeBytes, "Byte array size in bytes");
92 STATISTIC(NumByteArraysCreated, "Number of byte arrays created");
93 STATISTIC(NumTypeTestCallsLowered, "Number of type test calls lowered");
94 STATISTIC(NumTypeIdDisjointSets, "Number of disjoint sets of type identifiers");
95 
96 static cl::opt<bool> AvoidReuse(
97     "lowertypetests-avoid-reuse",
98     cl::desc("Try to avoid reuse of byte array addresses using aliases"),
99     cl::Hidden, cl::init(true));
100 
101 static cl::opt<PassSummaryAction> ClSummaryAction(
102     "lowertypetests-summary-action",
103     cl::desc("What to do with the summary when running this pass"),
104     cl::values(clEnumValN(PassSummaryAction::None, "none", "Do nothing"),
105                clEnumValN(PassSummaryAction::Import, "import",
106                           "Import typeid resolutions from summary and globals"),
107                clEnumValN(PassSummaryAction::Export, "export",
108                           "Export typeid resolutions to summary and globals")),
109     cl::Hidden);
110 
111 static cl::opt<std::string> ClReadSummary(
112     "lowertypetests-read-summary",
113     cl::desc("Read summary from given YAML file before running pass"),
114     cl::Hidden);
115 
116 static cl::opt<std::string> ClWriteSummary(
117     "lowertypetests-write-summary",
118     cl::desc("Write summary to given YAML file after running pass"),
119     cl::Hidden);
120 
121 static cl::opt<bool>
122     ClDropTypeTests("lowertypetests-drop-type-tests",
123                     cl::desc("Simply drop type test assume sequences"),
124                     cl::Hidden, cl::init(false));
125 
126 bool BitSetInfo::containsGlobalOffset(uint64_t Offset) const {
127   if (Offset < ByteOffset)
128     return false;
129 
130   if ((Offset - ByteOffset) % (uint64_t(1) << AlignLog2) != 0)
131     return false;
132 
133   uint64_t BitOffset = (Offset - ByteOffset) >> AlignLog2;
134   if (BitOffset >= BitSize)
135     return false;
136 
137   return Bits.count(BitOffset);
138 }
139 
140 void BitSetInfo::print(raw_ostream &OS) const {
141   OS << "offset " << ByteOffset << " size " << BitSize << " align "
142      << (1 << AlignLog2);
143 
144   if (isAllOnes()) {
145     OS << " all-ones\n";
146     return;
147   }
148 
149   OS << " { ";
150   for (uint64_t B : Bits)
151     OS << B << ' ';
152   OS << "}\n";
153 }
154 
155 BitSetInfo BitSetBuilder::build() {
156   if (Min > Max)
157     Min = 0;
158 
159   // Normalize each offset against the minimum observed offset, and compute
160   // the bitwise OR of each of the offsets. The number of trailing zeros
161   // in the mask gives us the log2 of the alignment of all offsets, which
162   // allows us to compress the bitset by only storing one bit per aligned
163   // address.
164   uint64_t Mask = 0;
165   for (uint64_t &Offset : Offsets) {
166     Offset -= Min;
167     Mask |= Offset;
168   }
169 
170   BitSetInfo BSI;
171   BSI.ByteOffset = Min;
172 
173   BSI.AlignLog2 = 0;
174   if (Mask != 0)
175     BSI.AlignLog2 = countTrailingZeros(Mask, ZB_Undefined);
176 
177   // Build the compressed bitset while normalizing the offsets against the
178   // computed alignment.
179   BSI.BitSize = ((Max - Min) >> BSI.AlignLog2) + 1;
180   for (uint64_t Offset : Offsets) {
181     Offset >>= BSI.AlignLog2;
182     BSI.Bits.insert(Offset);
183   }
184 
185   return BSI;
186 }
187 
188 void GlobalLayoutBuilder::addFragment(const std::set<uint64_t> &F) {
189   // Create a new fragment to hold the layout for F.
190   Fragments.emplace_back();
191   std::vector<uint64_t> &Fragment = Fragments.back();
192   uint64_t FragmentIndex = Fragments.size() - 1;
193 
194   for (auto ObjIndex : F) {
195     uint64_t OldFragmentIndex = FragmentMap[ObjIndex];
196     if (OldFragmentIndex == 0) {
197       // We haven't seen this object index before, so just add it to the current
198       // fragment.
199       Fragment.push_back(ObjIndex);
200     } else {
201       // This index belongs to an existing fragment. Copy the elements of the
202       // old fragment into this one and clear the old fragment. We don't update
203       // the fragment map just yet, this ensures that any further references to
204       // indices from the old fragment in this fragment do not insert any more
205       // indices.
206       std::vector<uint64_t> &OldFragment = Fragments[OldFragmentIndex];
207       llvm::append_range(Fragment, OldFragment);
208       OldFragment.clear();
209     }
210   }
211 
212   // Update the fragment map to point our object indices to this fragment.
213   for (uint64_t ObjIndex : Fragment)
214     FragmentMap[ObjIndex] = FragmentIndex;
215 }
216 
217 void ByteArrayBuilder::allocate(const std::set<uint64_t> &Bits,
218                                 uint64_t BitSize, uint64_t &AllocByteOffset,
219                                 uint8_t &AllocMask) {
220   // Find the smallest current allocation.
221   unsigned Bit = 0;
222   for (unsigned I = 1; I != BitsPerByte; ++I)
223     if (BitAllocs[I] < BitAllocs[Bit])
224       Bit = I;
225 
226   AllocByteOffset = BitAllocs[Bit];
227 
228   // Add our size to it.
229   unsigned ReqSize = AllocByteOffset + BitSize;
230   BitAllocs[Bit] = ReqSize;
231   if (Bytes.size() < ReqSize)
232     Bytes.resize(ReqSize);
233 
234   // Set our bits.
235   AllocMask = 1 << Bit;
236   for (uint64_t B : Bits)
237     Bytes[AllocByteOffset + B] |= AllocMask;
238 }
239 
240 bool lowertypetests::isJumpTableCanonical(Function *F) {
241   if (F->isDeclarationForLinker())
242     return false;
243   auto *CI = mdconst::extract_or_null<ConstantInt>(
244       F->getParent()->getModuleFlag("CFI Canonical Jump Tables"));
245   if (!CI || CI->getZExtValue() != 0)
246     return true;
247   return F->hasFnAttribute("cfi-canonical-jump-table");
248 }
249 
250 namespace {
251 
252 struct ByteArrayInfo {
253   std::set<uint64_t> Bits;
254   uint64_t BitSize;
255   GlobalVariable *ByteArray;
256   GlobalVariable *MaskGlobal;
257   uint8_t *MaskPtr = nullptr;
258 };
259 
260 /// A POD-like structure that we use to store a global reference together with
261 /// its metadata types. In this pass we frequently need to query the set of
262 /// metadata types referenced by a global, which at the IR level is an expensive
263 /// operation involving a map lookup; this data structure helps to reduce the
264 /// number of times we need to do this lookup.
265 class GlobalTypeMember final : TrailingObjects<GlobalTypeMember, MDNode *> {
266   friend TrailingObjects;
267 
268   GlobalObject *GO;
269   size_t NTypes;
270 
271   // For functions: true if the jump table is canonical. This essentially means
272   // whether the canonical address (i.e. the symbol table entry) of the function
273   // is provided by the local jump table. This is normally the same as whether
274   // the function is defined locally, but if canonical jump tables are disabled
275   // by the user then the jump table never provides a canonical definition.
276   bool IsJumpTableCanonical;
277 
278   // For functions: true if this function is either defined or used in a thinlto
279   // module and its jumptable entry needs to be exported to thinlto backends.
280   bool IsExported;
281 
282   size_t numTrailingObjects(OverloadToken<MDNode *>) const { return NTypes; }
283 
284 public:
285   static GlobalTypeMember *create(BumpPtrAllocator &Alloc, GlobalObject *GO,
286                                   bool IsJumpTableCanonical, bool IsExported,
287                                   ArrayRef<MDNode *> Types) {
288     auto *GTM = static_cast<GlobalTypeMember *>(Alloc.Allocate(
289         totalSizeToAlloc<MDNode *>(Types.size()), alignof(GlobalTypeMember)));
290     GTM->GO = GO;
291     GTM->NTypes = Types.size();
292     GTM->IsJumpTableCanonical = IsJumpTableCanonical;
293     GTM->IsExported = IsExported;
294     std::uninitialized_copy(Types.begin(), Types.end(),
295                             GTM->getTrailingObjects<MDNode *>());
296     return GTM;
297   }
298 
299   GlobalObject *getGlobal() const {
300     return GO;
301   }
302 
303   bool isJumpTableCanonical() const {
304     return IsJumpTableCanonical;
305   }
306 
307   bool isExported() const {
308     return IsExported;
309   }
310 
311   ArrayRef<MDNode *> types() const {
312     return makeArrayRef(getTrailingObjects<MDNode *>(), NTypes);
313   }
314 };
315 
316 struct ICallBranchFunnel final
317     : TrailingObjects<ICallBranchFunnel, GlobalTypeMember *> {
318   static ICallBranchFunnel *create(BumpPtrAllocator &Alloc, CallInst *CI,
319                                    ArrayRef<GlobalTypeMember *> Targets,
320                                    unsigned UniqueId) {
321     auto *Call = static_cast<ICallBranchFunnel *>(
322         Alloc.Allocate(totalSizeToAlloc<GlobalTypeMember *>(Targets.size()),
323                        alignof(ICallBranchFunnel)));
324     Call->CI = CI;
325     Call->UniqueId = UniqueId;
326     Call->NTargets = Targets.size();
327     std::uninitialized_copy(Targets.begin(), Targets.end(),
328                             Call->getTrailingObjects<GlobalTypeMember *>());
329     return Call;
330   }
331 
332   CallInst *CI;
333   ArrayRef<GlobalTypeMember *> targets() const {
334     return makeArrayRef(getTrailingObjects<GlobalTypeMember *>(), NTargets);
335   }
336 
337   unsigned UniqueId;
338 
339 private:
340   size_t NTargets;
341 };
342 
343 struct ScopedSaveAliaseesAndUsed {
344   Module &M;
345   SmallVector<GlobalValue *, 4> Used, CompilerUsed;
346   std::vector<std::pair<GlobalAlias *, Function *>> FunctionAliases;
347   std::vector<std::pair<GlobalIFunc *, Function *>> ResolverIFuncs;
348 
349   ScopedSaveAliaseesAndUsed(Module &M) : M(M) {
350     // The users of this class want to replace all function references except
351     // for aliases and llvm.used/llvm.compiler.used with references to a jump
352     // table. We avoid replacing aliases in order to avoid introducing a double
353     // indirection (or an alias pointing to a declaration in ThinLTO mode), and
354     // we avoid replacing llvm.used/llvm.compiler.used because these global
355     // variables describe properties of the global, not the jump table (besides,
356     // offseted references to the jump table in llvm.used are invalid).
357     // Unfortunately, LLVM doesn't have a "RAUW except for these (possibly
358     // indirect) users", so what we do is save the list of globals referenced by
359     // llvm.used/llvm.compiler.used and aliases, erase the used lists, let RAUW
360     // replace the aliasees and then set them back to their original values at
361     // the end.
362     if (GlobalVariable *GV = collectUsedGlobalVariables(M, Used, false))
363       GV->eraseFromParent();
364     if (GlobalVariable *GV = collectUsedGlobalVariables(M, CompilerUsed, true))
365       GV->eraseFromParent();
366 
367     for (auto &GA : M.aliases()) {
368       // FIXME: This should look past all aliases not just interposable ones,
369       // see discussion on D65118.
370       if (auto *F = dyn_cast<Function>(GA.getAliasee()->stripPointerCasts()))
371         FunctionAliases.push_back({&GA, F});
372     }
373 
374     for (auto &GI : M.ifuncs())
375       if (auto *F = dyn_cast<Function>(GI.getResolver()->stripPointerCasts()))
376         ResolverIFuncs.push_back({&GI, F});
377   }
378 
379   ~ScopedSaveAliaseesAndUsed() {
380     appendToUsed(M, Used);
381     appendToCompilerUsed(M, CompilerUsed);
382 
383     for (auto P : FunctionAliases)
384       P.first->setAliasee(
385           ConstantExpr::getBitCast(P.second, P.first->getType()));
386 
387     for (auto P : ResolverIFuncs) {
388       // This does not preserve pointer casts that may have been stripped by the
389       // constructor, but the resolver's type is different from that of the
390       // ifunc anyway.
391       P.first->setResolver(P.second);
392     }
393   }
394 };
395 
396 class LowerTypeTestsModule {
397   Module &M;
398 
399   ModuleSummaryIndex *ExportSummary;
400   const ModuleSummaryIndex *ImportSummary;
401   // Set when the client has invoked this to simply drop all type test assume
402   // sequences.
403   bool DropTypeTests;
404 
405   Triple::ArchType Arch;
406   Triple::OSType OS;
407   Triple::ObjectFormatType ObjectFormat;
408 
409   IntegerType *Int1Ty = Type::getInt1Ty(M.getContext());
410   IntegerType *Int8Ty = Type::getInt8Ty(M.getContext());
411   PointerType *Int8PtrTy = Type::getInt8PtrTy(M.getContext());
412   ArrayType *Int8Arr0Ty = ArrayType::get(Type::getInt8Ty(M.getContext()), 0);
413   IntegerType *Int32Ty = Type::getInt32Ty(M.getContext());
414   PointerType *Int32PtrTy = PointerType::getUnqual(Int32Ty);
415   IntegerType *Int64Ty = Type::getInt64Ty(M.getContext());
416   IntegerType *IntPtrTy = M.getDataLayout().getIntPtrType(M.getContext(), 0);
417 
418   // Indirect function call index assignment counter for WebAssembly
419   uint64_t IndirectIndex = 1;
420 
421   // Mapping from type identifiers to the call sites that test them, as well as
422   // whether the type identifier needs to be exported to ThinLTO backends as
423   // part of the regular LTO phase of the ThinLTO pipeline (see exportTypeId).
424   struct TypeIdUserInfo {
425     std::vector<CallInst *> CallSites;
426     bool IsExported = false;
427   };
428   DenseMap<Metadata *, TypeIdUserInfo> TypeIdUsers;
429 
430   /// This structure describes how to lower type tests for a particular type
431   /// identifier. It is either built directly from the global analysis (during
432   /// regular LTO or the regular LTO phase of ThinLTO), or indirectly using type
433   /// identifier summaries and external symbol references (in ThinLTO backends).
434   struct TypeIdLowering {
435     TypeTestResolution::Kind TheKind = TypeTestResolution::Unsat;
436 
437     /// All except Unsat: the start address within the combined global.
438     Constant *OffsetedGlobal;
439 
440     /// ByteArray, Inline, AllOnes: log2 of the required global alignment
441     /// relative to the start address.
442     Constant *AlignLog2;
443 
444     /// ByteArray, Inline, AllOnes: one less than the size of the memory region
445     /// covering members of this type identifier as a multiple of 2^AlignLog2.
446     Constant *SizeM1;
447 
448     /// ByteArray: the byte array to test the address against.
449     Constant *TheByteArray;
450 
451     /// ByteArray: the bit mask to apply to bytes loaded from the byte array.
452     Constant *BitMask;
453 
454     /// Inline: the bit mask to test the address against.
455     Constant *InlineBits;
456   };
457 
458   std::vector<ByteArrayInfo> ByteArrayInfos;
459 
460   Function *WeakInitializerFn = nullptr;
461 
462   bool shouldExportConstantsAsAbsoluteSymbols();
463   uint8_t *exportTypeId(StringRef TypeId, const TypeIdLowering &TIL);
464   TypeIdLowering importTypeId(StringRef TypeId);
465   void importTypeTest(CallInst *CI);
466   void importFunction(Function *F, bool isJumpTableCanonical,
467                       std::vector<GlobalAlias *> &AliasesToErase);
468 
469   BitSetInfo
470   buildBitSet(Metadata *TypeId,
471               const DenseMap<GlobalTypeMember *, uint64_t> &GlobalLayout);
472   ByteArrayInfo *createByteArray(BitSetInfo &BSI);
473   void allocateByteArrays();
474   Value *createBitSetTest(IRBuilder<> &B, const TypeIdLowering &TIL,
475                           Value *BitOffset);
476   void lowerTypeTestCalls(
477       ArrayRef<Metadata *> TypeIds, Constant *CombinedGlobalAddr,
478       const DenseMap<GlobalTypeMember *, uint64_t> &GlobalLayout);
479   Value *lowerTypeTestCall(Metadata *TypeId, CallInst *CI,
480                            const TypeIdLowering &TIL);
481 
482   void buildBitSetsFromGlobalVariables(ArrayRef<Metadata *> TypeIds,
483                                        ArrayRef<GlobalTypeMember *> Globals);
484   unsigned getJumpTableEntrySize();
485   Type *getJumpTableEntryType();
486   void createJumpTableEntry(raw_ostream &AsmOS, raw_ostream &ConstraintOS,
487                             Triple::ArchType JumpTableArch,
488                             SmallVectorImpl<Value *> &AsmArgs, Function *Dest);
489   void verifyTypeMDNode(GlobalObject *GO, MDNode *Type);
490   void buildBitSetsFromFunctions(ArrayRef<Metadata *> TypeIds,
491                                  ArrayRef<GlobalTypeMember *> Functions);
492   void buildBitSetsFromFunctionsNative(ArrayRef<Metadata *> TypeIds,
493                                        ArrayRef<GlobalTypeMember *> Functions);
494   void buildBitSetsFromFunctionsWASM(ArrayRef<Metadata *> TypeIds,
495                                      ArrayRef<GlobalTypeMember *> Functions);
496   void
497   buildBitSetsFromDisjointSet(ArrayRef<Metadata *> TypeIds,
498                               ArrayRef<GlobalTypeMember *> Globals,
499                               ArrayRef<ICallBranchFunnel *> ICallBranchFunnels);
500 
501   void replaceWeakDeclarationWithJumpTablePtr(Function *F, Constant *JT,
502                                               bool IsJumpTableCanonical);
503   void moveInitializerToModuleConstructor(GlobalVariable *GV);
504   void findGlobalVariableUsersOf(Constant *C,
505                                  SmallSetVector<GlobalVariable *, 8> &Out);
506 
507   void createJumpTable(Function *F, ArrayRef<GlobalTypeMember *> Functions);
508 
509   /// replaceCfiUses - Go through the uses list for this definition
510   /// and make each use point to "V" instead of "this" when the use is outside
511   /// the block. 'This's use list is expected to have at least one element.
512   /// Unlike replaceAllUsesWith this function skips blockaddr and direct call
513   /// uses.
514   void replaceCfiUses(Function *Old, Value *New, bool IsJumpTableCanonical);
515 
516   /// replaceDirectCalls - Go through the uses list for this definition and
517   /// replace each use, which is a direct function call.
518   void replaceDirectCalls(Value *Old, Value *New);
519 
520 public:
521   LowerTypeTestsModule(Module &M, ModuleSummaryIndex *ExportSummary,
522                        const ModuleSummaryIndex *ImportSummary,
523                        bool DropTypeTests);
524 
525   bool lower();
526 
527   // Lower the module using the action and summary passed as command line
528   // arguments. For testing purposes only.
529   static bool runForTesting(Module &M);
530 };
531 
532 struct LowerTypeTests : public ModulePass {
533   static char ID;
534 
535   bool UseCommandLine = false;
536 
537   ModuleSummaryIndex *ExportSummary;
538   const ModuleSummaryIndex *ImportSummary;
539   bool DropTypeTests;
540 
541   LowerTypeTests() : ModulePass(ID), UseCommandLine(true) {
542     initializeLowerTypeTestsPass(*PassRegistry::getPassRegistry());
543   }
544 
545   LowerTypeTests(ModuleSummaryIndex *ExportSummary,
546                  const ModuleSummaryIndex *ImportSummary, bool DropTypeTests)
547       : ModulePass(ID), ExportSummary(ExportSummary),
548         ImportSummary(ImportSummary),
549         DropTypeTests(DropTypeTests || ClDropTypeTests) {
550     initializeLowerTypeTestsPass(*PassRegistry::getPassRegistry());
551   }
552 
553   bool runOnModule(Module &M) override {
554     if (UseCommandLine)
555       return LowerTypeTestsModule::runForTesting(M);
556     return LowerTypeTestsModule(M, ExportSummary, ImportSummary, DropTypeTests)
557         .lower();
558   }
559 };
560 
561 } // end anonymous namespace
562 
563 char LowerTypeTests::ID = 0;
564 
565 INITIALIZE_PASS(LowerTypeTests, "lowertypetests", "Lower type metadata", false,
566                 false)
567 
568 ModulePass *
569 llvm::createLowerTypeTestsPass(ModuleSummaryIndex *ExportSummary,
570                                const ModuleSummaryIndex *ImportSummary,
571                                bool DropTypeTests) {
572   return new LowerTypeTests(ExportSummary, ImportSummary, DropTypeTests);
573 }
574 
575 /// Build a bit set for TypeId using the object layouts in
576 /// GlobalLayout.
577 BitSetInfo LowerTypeTestsModule::buildBitSet(
578     Metadata *TypeId,
579     const DenseMap<GlobalTypeMember *, uint64_t> &GlobalLayout) {
580   BitSetBuilder BSB;
581 
582   // Compute the byte offset of each address associated with this type
583   // identifier.
584   for (auto &GlobalAndOffset : GlobalLayout) {
585     for (MDNode *Type : GlobalAndOffset.first->types()) {
586       if (Type->getOperand(1) != TypeId)
587         continue;
588       uint64_t Offset =
589           cast<ConstantInt>(
590               cast<ConstantAsMetadata>(Type->getOperand(0))->getValue())
591               ->getZExtValue();
592       BSB.addOffset(GlobalAndOffset.second + Offset);
593     }
594   }
595 
596   return BSB.build();
597 }
598 
599 /// Build a test that bit BitOffset mod sizeof(Bits)*8 is set in
600 /// Bits. This pattern matches to the bt instruction on x86.
601 static Value *createMaskedBitTest(IRBuilder<> &B, Value *Bits,
602                                   Value *BitOffset) {
603   auto BitsType = cast<IntegerType>(Bits->getType());
604   unsigned BitWidth = BitsType->getBitWidth();
605 
606   BitOffset = B.CreateZExtOrTrunc(BitOffset, BitsType);
607   Value *BitIndex =
608       B.CreateAnd(BitOffset, ConstantInt::get(BitsType, BitWidth - 1));
609   Value *BitMask = B.CreateShl(ConstantInt::get(BitsType, 1), BitIndex);
610   Value *MaskedBits = B.CreateAnd(Bits, BitMask);
611   return B.CreateICmpNE(MaskedBits, ConstantInt::get(BitsType, 0));
612 }
613 
614 ByteArrayInfo *LowerTypeTestsModule::createByteArray(BitSetInfo &BSI) {
615   // Create globals to stand in for byte arrays and masks. These never actually
616   // get initialized, we RAUW and erase them later in allocateByteArrays() once
617   // we know the offset and mask to use.
618   auto ByteArrayGlobal = new GlobalVariable(
619       M, Int8Ty, /*isConstant=*/true, GlobalValue::PrivateLinkage, nullptr);
620   auto MaskGlobal = new GlobalVariable(M, Int8Ty, /*isConstant=*/true,
621                                        GlobalValue::PrivateLinkage, nullptr);
622 
623   ByteArrayInfos.emplace_back();
624   ByteArrayInfo *BAI = &ByteArrayInfos.back();
625 
626   BAI->Bits = BSI.Bits;
627   BAI->BitSize = BSI.BitSize;
628   BAI->ByteArray = ByteArrayGlobal;
629   BAI->MaskGlobal = MaskGlobal;
630   return BAI;
631 }
632 
633 void LowerTypeTestsModule::allocateByteArrays() {
634   llvm::stable_sort(ByteArrayInfos,
635                     [](const ByteArrayInfo &BAI1, const ByteArrayInfo &BAI2) {
636                       return BAI1.BitSize > BAI2.BitSize;
637                     });
638 
639   std::vector<uint64_t> ByteArrayOffsets(ByteArrayInfos.size());
640 
641   ByteArrayBuilder BAB;
642   for (unsigned I = 0; I != ByteArrayInfos.size(); ++I) {
643     ByteArrayInfo *BAI = &ByteArrayInfos[I];
644 
645     uint8_t Mask;
646     BAB.allocate(BAI->Bits, BAI->BitSize, ByteArrayOffsets[I], Mask);
647 
648     BAI->MaskGlobal->replaceAllUsesWith(
649         ConstantExpr::getIntToPtr(ConstantInt::get(Int8Ty, Mask), Int8PtrTy));
650     BAI->MaskGlobal->eraseFromParent();
651     if (BAI->MaskPtr)
652       *BAI->MaskPtr = Mask;
653   }
654 
655   Constant *ByteArrayConst = ConstantDataArray::get(M.getContext(), BAB.Bytes);
656   auto ByteArray =
657       new GlobalVariable(M, ByteArrayConst->getType(), /*isConstant=*/true,
658                          GlobalValue::PrivateLinkage, ByteArrayConst);
659 
660   for (unsigned I = 0; I != ByteArrayInfos.size(); ++I) {
661     ByteArrayInfo *BAI = &ByteArrayInfos[I];
662 
663     Constant *Idxs[] = {ConstantInt::get(IntPtrTy, 0),
664                         ConstantInt::get(IntPtrTy, ByteArrayOffsets[I])};
665     Constant *GEP = ConstantExpr::getInBoundsGetElementPtr(
666         ByteArrayConst->getType(), ByteArray, Idxs);
667 
668     // Create an alias instead of RAUW'ing the gep directly. On x86 this ensures
669     // that the pc-relative displacement is folded into the lea instead of the
670     // test instruction getting another displacement.
671     GlobalAlias *Alias = GlobalAlias::create(
672         Int8Ty, 0, GlobalValue::PrivateLinkage, "bits", GEP, &M);
673     BAI->ByteArray->replaceAllUsesWith(Alias);
674     BAI->ByteArray->eraseFromParent();
675   }
676 
677   ByteArraySizeBits = BAB.BitAllocs[0] + BAB.BitAllocs[1] + BAB.BitAllocs[2] +
678                       BAB.BitAllocs[3] + BAB.BitAllocs[4] + BAB.BitAllocs[5] +
679                       BAB.BitAllocs[6] + BAB.BitAllocs[7];
680   ByteArraySizeBytes = BAB.Bytes.size();
681 }
682 
683 /// Build a test that bit BitOffset is set in the type identifier that was
684 /// lowered to TIL, which must be either an Inline or a ByteArray.
685 Value *LowerTypeTestsModule::createBitSetTest(IRBuilder<> &B,
686                                               const TypeIdLowering &TIL,
687                                               Value *BitOffset) {
688   if (TIL.TheKind == TypeTestResolution::Inline) {
689     // If the bit set is sufficiently small, we can avoid a load by bit testing
690     // a constant.
691     return createMaskedBitTest(B, TIL.InlineBits, BitOffset);
692   } else {
693     Constant *ByteArray = TIL.TheByteArray;
694     if (AvoidReuse && !ImportSummary) {
695       // Each use of the byte array uses a different alias. This makes the
696       // backend less likely to reuse previously computed byte array addresses,
697       // improving the security of the CFI mechanism based on this pass.
698       // This won't work when importing because TheByteArray is external.
699       ByteArray = GlobalAlias::create(Int8Ty, 0, GlobalValue::PrivateLinkage,
700                                       "bits_use", ByteArray, &M);
701     }
702 
703     Value *ByteAddr = B.CreateGEP(Int8Ty, ByteArray, BitOffset);
704     Value *Byte = B.CreateLoad(Int8Ty, ByteAddr);
705 
706     Value *ByteAndMask =
707         B.CreateAnd(Byte, ConstantExpr::getPtrToInt(TIL.BitMask, Int8Ty));
708     return B.CreateICmpNE(ByteAndMask, ConstantInt::get(Int8Ty, 0));
709   }
710 }
711 
712 static bool isKnownTypeIdMember(Metadata *TypeId, const DataLayout &DL,
713                                 Value *V, uint64_t COffset) {
714   if (auto GV = dyn_cast<GlobalObject>(V)) {
715     SmallVector<MDNode *, 2> Types;
716     GV->getMetadata(LLVMContext::MD_type, Types);
717     for (MDNode *Type : Types) {
718       if (Type->getOperand(1) != TypeId)
719         continue;
720       uint64_t Offset =
721           cast<ConstantInt>(
722               cast<ConstantAsMetadata>(Type->getOperand(0))->getValue())
723               ->getZExtValue();
724       if (COffset == Offset)
725         return true;
726     }
727     return false;
728   }
729 
730   if (auto GEP = dyn_cast<GEPOperator>(V)) {
731     APInt APOffset(DL.getPointerSizeInBits(0), 0);
732     bool Result = GEP->accumulateConstantOffset(DL, APOffset);
733     if (!Result)
734       return false;
735     COffset += APOffset.getZExtValue();
736     return isKnownTypeIdMember(TypeId, DL, GEP->getPointerOperand(), COffset);
737   }
738 
739   if (auto Op = dyn_cast<Operator>(V)) {
740     if (Op->getOpcode() == Instruction::BitCast)
741       return isKnownTypeIdMember(TypeId, DL, Op->getOperand(0), COffset);
742 
743     if (Op->getOpcode() == Instruction::Select)
744       return isKnownTypeIdMember(TypeId, DL, Op->getOperand(1), COffset) &&
745              isKnownTypeIdMember(TypeId, DL, Op->getOperand(2), COffset);
746   }
747 
748   return false;
749 }
750 
751 /// Lower a llvm.type.test call to its implementation. Returns the value to
752 /// replace the call with.
753 Value *LowerTypeTestsModule::lowerTypeTestCall(Metadata *TypeId, CallInst *CI,
754                                                const TypeIdLowering &TIL) {
755   // Delay lowering if the resolution is currently unknown.
756   if (TIL.TheKind == TypeTestResolution::Unknown)
757     return nullptr;
758   if (TIL.TheKind == TypeTestResolution::Unsat)
759     return ConstantInt::getFalse(M.getContext());
760 
761   Value *Ptr = CI->getArgOperand(0);
762   const DataLayout &DL = M.getDataLayout();
763   if (isKnownTypeIdMember(TypeId, DL, Ptr, 0))
764     return ConstantInt::getTrue(M.getContext());
765 
766   BasicBlock *InitialBB = CI->getParent();
767 
768   IRBuilder<> B(CI);
769 
770   Value *PtrAsInt = B.CreatePtrToInt(Ptr, IntPtrTy);
771 
772   Constant *OffsetedGlobalAsInt =
773       ConstantExpr::getPtrToInt(TIL.OffsetedGlobal, IntPtrTy);
774   if (TIL.TheKind == TypeTestResolution::Single)
775     return B.CreateICmpEQ(PtrAsInt, OffsetedGlobalAsInt);
776 
777   Value *PtrOffset = B.CreateSub(PtrAsInt, OffsetedGlobalAsInt);
778 
779   // We need to check that the offset both falls within our range and is
780   // suitably aligned. We can check both properties at the same time by
781   // performing a right rotate by log2(alignment) followed by an integer
782   // comparison against the bitset size. The rotate will move the lower
783   // order bits that need to be zero into the higher order bits of the
784   // result, causing the comparison to fail if they are nonzero. The rotate
785   // also conveniently gives us a bit offset to use during the load from
786   // the bitset.
787   Value *OffsetSHR =
788       B.CreateLShr(PtrOffset, ConstantExpr::getZExt(TIL.AlignLog2, IntPtrTy));
789   Value *OffsetSHL = B.CreateShl(
790       PtrOffset, ConstantExpr::getZExt(
791                      ConstantExpr::getSub(
792                          ConstantInt::get(Int8Ty, DL.getPointerSizeInBits(0)),
793                          TIL.AlignLog2),
794                      IntPtrTy));
795   Value *BitOffset = B.CreateOr(OffsetSHR, OffsetSHL);
796 
797   Value *OffsetInRange = B.CreateICmpULE(BitOffset, TIL.SizeM1);
798 
799   // If the bit set is all ones, testing against it is unnecessary.
800   if (TIL.TheKind == TypeTestResolution::AllOnes)
801     return OffsetInRange;
802 
803   // See if the intrinsic is used in the following common pattern:
804   //   br(llvm.type.test(...), thenbb, elsebb)
805   // where nothing happens between the type test and the br.
806   // If so, create slightly simpler IR.
807   if (CI->hasOneUse())
808     if (auto *Br = dyn_cast<BranchInst>(*CI->user_begin()))
809       if (CI->getNextNode() == Br) {
810         BasicBlock *Then = InitialBB->splitBasicBlock(CI->getIterator());
811         BasicBlock *Else = Br->getSuccessor(1);
812         BranchInst *NewBr = BranchInst::Create(Then, Else, OffsetInRange);
813         NewBr->setMetadata(LLVMContext::MD_prof,
814                            Br->getMetadata(LLVMContext::MD_prof));
815         ReplaceInstWithInst(InitialBB->getTerminator(), NewBr);
816 
817         // Update phis in Else resulting from InitialBB being split
818         for (auto &Phi : Else->phis())
819           Phi.addIncoming(Phi.getIncomingValueForBlock(Then), InitialBB);
820 
821         IRBuilder<> ThenB(CI);
822         return createBitSetTest(ThenB, TIL, BitOffset);
823       }
824 
825   IRBuilder<> ThenB(SplitBlockAndInsertIfThen(OffsetInRange, CI, false));
826 
827   // Now that we know that the offset is in range and aligned, load the
828   // appropriate bit from the bitset.
829   Value *Bit = createBitSetTest(ThenB, TIL, BitOffset);
830 
831   // The value we want is 0 if we came directly from the initial block
832   // (having failed the range or alignment checks), or the loaded bit if
833   // we came from the block in which we loaded it.
834   B.SetInsertPoint(CI);
835   PHINode *P = B.CreatePHI(Int1Ty, 2);
836   P->addIncoming(ConstantInt::get(Int1Ty, 0), InitialBB);
837   P->addIncoming(Bit, ThenB.GetInsertBlock());
838   return P;
839 }
840 
841 /// Given a disjoint set of type identifiers and globals, lay out the globals,
842 /// build the bit sets and lower the llvm.type.test calls.
843 void LowerTypeTestsModule::buildBitSetsFromGlobalVariables(
844     ArrayRef<Metadata *> TypeIds, ArrayRef<GlobalTypeMember *> Globals) {
845   // Build a new global with the combined contents of the referenced globals.
846   // This global is a struct whose even-indexed elements contain the original
847   // contents of the referenced globals and whose odd-indexed elements contain
848   // any padding required to align the next element to the next power of 2 plus
849   // any additional padding required to meet its alignment requirements.
850   std::vector<Constant *> GlobalInits;
851   const DataLayout &DL = M.getDataLayout();
852   DenseMap<GlobalTypeMember *, uint64_t> GlobalLayout;
853   Align MaxAlign;
854   uint64_t CurOffset = 0;
855   uint64_t DesiredPadding = 0;
856   for (GlobalTypeMember *G : Globals) {
857     auto *GV = cast<GlobalVariable>(G->getGlobal());
858     Align Alignment =
859         DL.getValueOrABITypeAlignment(GV->getAlign(), GV->getValueType());
860     MaxAlign = std::max(MaxAlign, Alignment);
861     uint64_t GVOffset = alignTo(CurOffset + DesiredPadding, Alignment);
862     GlobalLayout[G] = GVOffset;
863     if (GVOffset != 0) {
864       uint64_t Padding = GVOffset - CurOffset;
865       GlobalInits.push_back(
866           ConstantAggregateZero::get(ArrayType::get(Int8Ty, Padding)));
867     }
868 
869     GlobalInits.push_back(GV->getInitializer());
870     uint64_t InitSize = DL.getTypeAllocSize(GV->getValueType());
871     CurOffset = GVOffset + InitSize;
872 
873     // Compute the amount of padding that we'd like for the next element.
874     DesiredPadding = NextPowerOf2(InitSize - 1) - InitSize;
875 
876     // Experiments of different caps with Chromium on both x64 and ARM64
877     // have shown that the 32-byte cap generates the smallest binary on
878     // both platforms while different caps yield similar performance.
879     // (see https://lists.llvm.org/pipermail/llvm-dev/2018-July/124694.html)
880     if (DesiredPadding > 32)
881       DesiredPadding = alignTo(InitSize, 32) - InitSize;
882   }
883 
884   Constant *NewInit = ConstantStruct::getAnon(M.getContext(), GlobalInits);
885   auto *CombinedGlobal =
886       new GlobalVariable(M, NewInit->getType(), /*isConstant=*/true,
887                          GlobalValue::PrivateLinkage, NewInit);
888   CombinedGlobal->setAlignment(MaxAlign);
889 
890   StructType *NewTy = cast<StructType>(NewInit->getType());
891   lowerTypeTestCalls(TypeIds, CombinedGlobal, GlobalLayout);
892 
893   // Build aliases pointing to offsets into the combined global for each
894   // global from which we built the combined global, and replace references
895   // to the original globals with references to the aliases.
896   for (unsigned I = 0; I != Globals.size(); ++I) {
897     GlobalVariable *GV = cast<GlobalVariable>(Globals[I]->getGlobal());
898 
899     // Multiply by 2 to account for padding elements.
900     Constant *CombinedGlobalIdxs[] = {ConstantInt::get(Int32Ty, 0),
901                                       ConstantInt::get(Int32Ty, I * 2)};
902     Constant *CombinedGlobalElemPtr = ConstantExpr::getGetElementPtr(
903         NewInit->getType(), CombinedGlobal, CombinedGlobalIdxs);
904     assert(GV->getType()->getAddressSpace() == 0);
905     GlobalAlias *GAlias =
906         GlobalAlias::create(NewTy->getElementType(I * 2), 0, GV->getLinkage(),
907                             "", CombinedGlobalElemPtr, &M);
908     GAlias->setVisibility(GV->getVisibility());
909     GAlias->takeName(GV);
910     GV->replaceAllUsesWith(GAlias);
911     GV->eraseFromParent();
912   }
913 }
914 
915 bool LowerTypeTestsModule::shouldExportConstantsAsAbsoluteSymbols() {
916   return (Arch == Triple::x86 || Arch == Triple::x86_64) &&
917          ObjectFormat == Triple::ELF;
918 }
919 
920 /// Export the given type identifier so that ThinLTO backends may import it.
921 /// Type identifiers are exported by adding coarse-grained information about how
922 /// to test the type identifier to the summary, and creating symbols in the
923 /// object file (aliases and absolute symbols) containing fine-grained
924 /// information about the type identifier.
925 ///
926 /// Returns a pointer to the location in which to store the bitmask, if
927 /// applicable.
928 uint8_t *LowerTypeTestsModule::exportTypeId(StringRef TypeId,
929                                             const TypeIdLowering &TIL) {
930   TypeTestResolution &TTRes =
931       ExportSummary->getOrInsertTypeIdSummary(TypeId).TTRes;
932   TTRes.TheKind = TIL.TheKind;
933 
934   auto ExportGlobal = [&](StringRef Name, Constant *C) {
935     GlobalAlias *GA =
936         GlobalAlias::create(Int8Ty, 0, GlobalValue::ExternalLinkage,
937                             "__typeid_" + TypeId + "_" + Name, C, &M);
938     GA->setVisibility(GlobalValue::HiddenVisibility);
939   };
940 
941   auto ExportConstant = [&](StringRef Name, uint64_t &Storage, Constant *C) {
942     if (shouldExportConstantsAsAbsoluteSymbols())
943       ExportGlobal(Name, ConstantExpr::getIntToPtr(C, Int8PtrTy));
944     else
945       Storage = cast<ConstantInt>(C)->getZExtValue();
946   };
947 
948   if (TIL.TheKind != TypeTestResolution::Unsat)
949     ExportGlobal("global_addr", TIL.OffsetedGlobal);
950 
951   if (TIL.TheKind == TypeTestResolution::ByteArray ||
952       TIL.TheKind == TypeTestResolution::Inline ||
953       TIL.TheKind == TypeTestResolution::AllOnes) {
954     ExportConstant("align", TTRes.AlignLog2, TIL.AlignLog2);
955     ExportConstant("size_m1", TTRes.SizeM1, TIL.SizeM1);
956 
957     uint64_t BitSize = cast<ConstantInt>(TIL.SizeM1)->getZExtValue() + 1;
958     if (TIL.TheKind == TypeTestResolution::Inline)
959       TTRes.SizeM1BitWidth = (BitSize <= 32) ? 5 : 6;
960     else
961       TTRes.SizeM1BitWidth = (BitSize <= 128) ? 7 : 32;
962   }
963 
964   if (TIL.TheKind == TypeTestResolution::ByteArray) {
965     ExportGlobal("byte_array", TIL.TheByteArray);
966     if (shouldExportConstantsAsAbsoluteSymbols())
967       ExportGlobal("bit_mask", TIL.BitMask);
968     else
969       return &TTRes.BitMask;
970   }
971 
972   if (TIL.TheKind == TypeTestResolution::Inline)
973     ExportConstant("inline_bits", TTRes.InlineBits, TIL.InlineBits);
974 
975   return nullptr;
976 }
977 
978 LowerTypeTestsModule::TypeIdLowering
979 LowerTypeTestsModule::importTypeId(StringRef TypeId) {
980   const TypeIdSummary *TidSummary = ImportSummary->getTypeIdSummary(TypeId);
981   if (!TidSummary)
982     return {}; // Unsat: no globals match this type id.
983   const TypeTestResolution &TTRes = TidSummary->TTRes;
984 
985   TypeIdLowering TIL;
986   TIL.TheKind = TTRes.TheKind;
987 
988   auto ImportGlobal = [&](StringRef Name) {
989     // Give the global a type of length 0 so that it is not assumed not to alias
990     // with any other global.
991     Constant *C = M.getOrInsertGlobal(("__typeid_" + TypeId + "_" + Name).str(),
992                                       Int8Arr0Ty);
993     if (auto *GV = dyn_cast<GlobalVariable>(C))
994       GV->setVisibility(GlobalValue::HiddenVisibility);
995     C = ConstantExpr::getBitCast(C, Int8PtrTy);
996     return C;
997   };
998 
999   auto ImportConstant = [&](StringRef Name, uint64_t Const, unsigned AbsWidth,
1000                             Type *Ty) {
1001     if (!shouldExportConstantsAsAbsoluteSymbols()) {
1002       Constant *C =
1003           ConstantInt::get(isa<IntegerType>(Ty) ? Ty : Int64Ty, Const);
1004       if (!isa<IntegerType>(Ty))
1005         C = ConstantExpr::getIntToPtr(C, Ty);
1006       return C;
1007     }
1008 
1009     Constant *C = ImportGlobal(Name);
1010     auto *GV = cast<GlobalVariable>(C->stripPointerCasts());
1011     if (isa<IntegerType>(Ty))
1012       C = ConstantExpr::getPtrToInt(C, Ty);
1013     if (GV->getMetadata(LLVMContext::MD_absolute_symbol))
1014       return C;
1015 
1016     auto SetAbsRange = [&](uint64_t Min, uint64_t Max) {
1017       auto *MinC = ConstantAsMetadata::get(ConstantInt::get(IntPtrTy, Min));
1018       auto *MaxC = ConstantAsMetadata::get(ConstantInt::get(IntPtrTy, Max));
1019       GV->setMetadata(LLVMContext::MD_absolute_symbol,
1020                       MDNode::get(M.getContext(), {MinC, MaxC}));
1021     };
1022     if (AbsWidth == IntPtrTy->getBitWidth())
1023       SetAbsRange(~0ull, ~0ull); // Full set.
1024     else
1025       SetAbsRange(0, 1ull << AbsWidth);
1026     return C;
1027   };
1028 
1029   if (TIL.TheKind != TypeTestResolution::Unsat)
1030     TIL.OffsetedGlobal = ImportGlobal("global_addr");
1031 
1032   if (TIL.TheKind == TypeTestResolution::ByteArray ||
1033       TIL.TheKind == TypeTestResolution::Inline ||
1034       TIL.TheKind == TypeTestResolution::AllOnes) {
1035     TIL.AlignLog2 = ImportConstant("align", TTRes.AlignLog2, 8, Int8Ty);
1036     TIL.SizeM1 =
1037         ImportConstant("size_m1", TTRes.SizeM1, TTRes.SizeM1BitWidth, IntPtrTy);
1038   }
1039 
1040   if (TIL.TheKind == TypeTestResolution::ByteArray) {
1041     TIL.TheByteArray = ImportGlobal("byte_array");
1042     TIL.BitMask = ImportConstant("bit_mask", TTRes.BitMask, 8, Int8PtrTy);
1043   }
1044 
1045   if (TIL.TheKind == TypeTestResolution::Inline)
1046     TIL.InlineBits = ImportConstant(
1047         "inline_bits", TTRes.InlineBits, 1 << TTRes.SizeM1BitWidth,
1048         TTRes.SizeM1BitWidth <= 5 ? Int32Ty : Int64Ty);
1049 
1050   return TIL;
1051 }
1052 
1053 void LowerTypeTestsModule::importTypeTest(CallInst *CI) {
1054   auto TypeIdMDVal = dyn_cast<MetadataAsValue>(CI->getArgOperand(1));
1055   if (!TypeIdMDVal)
1056     report_fatal_error("Second argument of llvm.type.test must be metadata");
1057 
1058   auto TypeIdStr = dyn_cast<MDString>(TypeIdMDVal->getMetadata());
1059   // If this is a local unpromoted type, which doesn't have a metadata string,
1060   // treat as Unknown and delay lowering, so that we can still utilize it for
1061   // later optimizations.
1062   if (!TypeIdStr)
1063     return;
1064 
1065   TypeIdLowering TIL = importTypeId(TypeIdStr->getString());
1066   Value *Lowered = lowerTypeTestCall(TypeIdStr, CI, TIL);
1067   if (Lowered) {
1068     CI->replaceAllUsesWith(Lowered);
1069     CI->eraseFromParent();
1070   }
1071 }
1072 
1073 // ThinLTO backend: the function F has a jump table entry; update this module
1074 // accordingly. isJumpTableCanonical describes the type of the jump table entry.
1075 void LowerTypeTestsModule::importFunction(
1076     Function *F, bool isJumpTableCanonical,
1077     std::vector<GlobalAlias *> &AliasesToErase) {
1078   assert(F->getType()->getAddressSpace() == 0);
1079 
1080   GlobalValue::VisibilityTypes Visibility = F->getVisibility();
1081   std::string Name = std::string(F->getName());
1082 
1083   if (F->isDeclarationForLinker() && isJumpTableCanonical) {
1084     // Non-dso_local functions may be overriden at run time,
1085     // don't short curcuit them
1086     if (F->isDSOLocal()) {
1087       Function *RealF = Function::Create(F->getFunctionType(),
1088                                          GlobalValue::ExternalLinkage,
1089                                          F->getAddressSpace(),
1090                                          Name + ".cfi", &M);
1091       RealF->setVisibility(GlobalVariable::HiddenVisibility);
1092       replaceDirectCalls(F, RealF);
1093     }
1094     return;
1095   }
1096 
1097   Function *FDecl;
1098   if (!isJumpTableCanonical) {
1099     // Either a declaration of an external function or a reference to a locally
1100     // defined jump table.
1101     FDecl = Function::Create(F->getFunctionType(), GlobalValue::ExternalLinkage,
1102                              F->getAddressSpace(), Name + ".cfi_jt", &M);
1103     FDecl->setVisibility(GlobalValue::HiddenVisibility);
1104   } else {
1105     F->setName(Name + ".cfi");
1106     F->setLinkage(GlobalValue::ExternalLinkage);
1107     FDecl = Function::Create(F->getFunctionType(), GlobalValue::ExternalLinkage,
1108                              F->getAddressSpace(), Name, &M);
1109     FDecl->setVisibility(Visibility);
1110     Visibility = GlobalValue::HiddenVisibility;
1111 
1112     // Delete aliases pointing to this function, they'll be re-created in the
1113     // merged output. Don't do it yet though because ScopedSaveAliaseesAndUsed
1114     // will want to reset the aliasees first.
1115     for (auto &U : F->uses()) {
1116       if (auto *A = dyn_cast<GlobalAlias>(U.getUser())) {
1117         Function *AliasDecl = Function::Create(
1118             F->getFunctionType(), GlobalValue::ExternalLinkage,
1119             F->getAddressSpace(), "", &M);
1120         AliasDecl->takeName(A);
1121         A->replaceAllUsesWith(AliasDecl);
1122         AliasesToErase.push_back(A);
1123       }
1124     }
1125   }
1126 
1127   if (F->hasExternalWeakLinkage())
1128     replaceWeakDeclarationWithJumpTablePtr(F, FDecl, isJumpTableCanonical);
1129   else
1130     replaceCfiUses(F, FDecl, isJumpTableCanonical);
1131 
1132   // Set visibility late because it's used in replaceCfiUses() to determine
1133   // whether uses need to to be replaced.
1134   F->setVisibility(Visibility);
1135 }
1136 
1137 void LowerTypeTestsModule::lowerTypeTestCalls(
1138     ArrayRef<Metadata *> TypeIds, Constant *CombinedGlobalAddr,
1139     const DenseMap<GlobalTypeMember *, uint64_t> &GlobalLayout) {
1140   CombinedGlobalAddr = ConstantExpr::getBitCast(CombinedGlobalAddr, Int8PtrTy);
1141 
1142   // For each type identifier in this disjoint set...
1143   for (Metadata *TypeId : TypeIds) {
1144     // Build the bitset.
1145     BitSetInfo BSI = buildBitSet(TypeId, GlobalLayout);
1146     LLVM_DEBUG({
1147       if (auto MDS = dyn_cast<MDString>(TypeId))
1148         dbgs() << MDS->getString() << ": ";
1149       else
1150         dbgs() << "<unnamed>: ";
1151       BSI.print(dbgs());
1152     });
1153 
1154     ByteArrayInfo *BAI = nullptr;
1155     TypeIdLowering TIL;
1156     TIL.OffsetedGlobal = ConstantExpr::getGetElementPtr(
1157         Int8Ty, CombinedGlobalAddr, ConstantInt::get(IntPtrTy, BSI.ByteOffset)),
1158     TIL.AlignLog2 = ConstantInt::get(Int8Ty, BSI.AlignLog2);
1159     TIL.SizeM1 = ConstantInt::get(IntPtrTy, BSI.BitSize - 1);
1160     if (BSI.isAllOnes()) {
1161       TIL.TheKind = (BSI.BitSize == 1) ? TypeTestResolution::Single
1162                                        : TypeTestResolution::AllOnes;
1163     } else if (BSI.BitSize <= 64) {
1164       TIL.TheKind = TypeTestResolution::Inline;
1165       uint64_t InlineBits = 0;
1166       for (auto Bit : BSI.Bits)
1167         InlineBits |= uint64_t(1) << Bit;
1168       if (InlineBits == 0)
1169         TIL.TheKind = TypeTestResolution::Unsat;
1170       else
1171         TIL.InlineBits = ConstantInt::get(
1172             (BSI.BitSize <= 32) ? Int32Ty : Int64Ty, InlineBits);
1173     } else {
1174       TIL.TheKind = TypeTestResolution::ByteArray;
1175       ++NumByteArraysCreated;
1176       BAI = createByteArray(BSI);
1177       TIL.TheByteArray = BAI->ByteArray;
1178       TIL.BitMask = BAI->MaskGlobal;
1179     }
1180 
1181     TypeIdUserInfo &TIUI = TypeIdUsers[TypeId];
1182 
1183     if (TIUI.IsExported) {
1184       uint8_t *MaskPtr = exportTypeId(cast<MDString>(TypeId)->getString(), TIL);
1185       if (BAI)
1186         BAI->MaskPtr = MaskPtr;
1187     }
1188 
1189     // Lower each call to llvm.type.test for this type identifier.
1190     for (CallInst *CI : TIUI.CallSites) {
1191       ++NumTypeTestCallsLowered;
1192       Value *Lowered = lowerTypeTestCall(TypeId, CI, TIL);
1193       if (Lowered) {
1194         CI->replaceAllUsesWith(Lowered);
1195         CI->eraseFromParent();
1196       }
1197     }
1198   }
1199 }
1200 
1201 void LowerTypeTestsModule::verifyTypeMDNode(GlobalObject *GO, MDNode *Type) {
1202   if (Type->getNumOperands() != 2)
1203     report_fatal_error("All operands of type metadata must have 2 elements");
1204 
1205   if (GO->isThreadLocal())
1206     report_fatal_error("Bit set element may not be thread-local");
1207   if (isa<GlobalVariable>(GO) && GO->hasSection())
1208     report_fatal_error(
1209         "A member of a type identifier may not have an explicit section");
1210 
1211   // FIXME: We previously checked that global var member of a type identifier
1212   // must be a definition, but the IR linker may leave type metadata on
1213   // declarations. We should restore this check after fixing PR31759.
1214 
1215   auto OffsetConstMD = dyn_cast<ConstantAsMetadata>(Type->getOperand(0));
1216   if (!OffsetConstMD)
1217     report_fatal_error("Type offset must be a constant");
1218   auto OffsetInt = dyn_cast<ConstantInt>(OffsetConstMD->getValue());
1219   if (!OffsetInt)
1220     report_fatal_error("Type offset must be an integer constant");
1221 }
1222 
1223 static const unsigned kX86JumpTableEntrySize = 8;
1224 static const unsigned kARMJumpTableEntrySize = 4;
1225 static const unsigned kARMBTIJumpTableEntrySize = 8;
1226 
1227 unsigned LowerTypeTestsModule::getJumpTableEntrySize() {
1228   switch (Arch) {
1229     case Triple::x86:
1230     case Triple::x86_64:
1231       return kX86JumpTableEntrySize;
1232     case Triple::arm:
1233     case Triple::thumb:
1234       return kARMJumpTableEntrySize;
1235     case Triple::aarch64:
1236       if (const auto *BTE = mdconst::extract_or_null<ConstantInt>(
1237             M.getModuleFlag("branch-target-enforcement")))
1238         if (BTE->getZExtValue())
1239           return kARMBTIJumpTableEntrySize;
1240       return kARMJumpTableEntrySize;
1241     default:
1242       report_fatal_error("Unsupported architecture for jump tables");
1243   }
1244 }
1245 
1246 // Create a jump table entry for the target. This consists of an instruction
1247 // sequence containing a relative branch to Dest. Appends inline asm text,
1248 // constraints and arguments to AsmOS, ConstraintOS and AsmArgs.
1249 void LowerTypeTestsModule::createJumpTableEntry(
1250     raw_ostream &AsmOS, raw_ostream &ConstraintOS,
1251     Triple::ArchType JumpTableArch, SmallVectorImpl<Value *> &AsmArgs,
1252     Function *Dest) {
1253   unsigned ArgIndex = AsmArgs.size();
1254 
1255   if (JumpTableArch == Triple::x86 || JumpTableArch == Triple::x86_64) {
1256     AsmOS << "jmp ${" << ArgIndex << ":c}@plt\n";
1257     AsmOS << "int3\nint3\nint3\n";
1258   } else if (JumpTableArch == Triple::arm) {
1259     AsmOS << "b $" << ArgIndex << "\n";
1260   } else if (JumpTableArch == Triple::aarch64) {
1261     if (const auto *BTE = mdconst::extract_or_null<ConstantInt>(
1262           Dest->getParent()->getModuleFlag("branch-target-enforcement")))
1263       if (BTE->getZExtValue())
1264         AsmOS << "bti c\n";
1265     AsmOS << "b $" << ArgIndex << "\n";
1266   } else if (JumpTableArch == Triple::thumb) {
1267     AsmOS << "b.w $" << ArgIndex << "\n";
1268   } else {
1269     report_fatal_error("Unsupported architecture for jump tables");
1270   }
1271 
1272   ConstraintOS << (ArgIndex > 0 ? ",s" : "s");
1273   AsmArgs.push_back(Dest);
1274 }
1275 
1276 Type *LowerTypeTestsModule::getJumpTableEntryType() {
1277   return ArrayType::get(Int8Ty, getJumpTableEntrySize());
1278 }
1279 
1280 /// Given a disjoint set of type identifiers and functions, build the bit sets
1281 /// and lower the llvm.type.test calls, architecture dependently.
1282 void LowerTypeTestsModule::buildBitSetsFromFunctions(
1283     ArrayRef<Metadata *> TypeIds, ArrayRef<GlobalTypeMember *> Functions) {
1284   if (Arch == Triple::x86 || Arch == Triple::x86_64 || Arch == Triple::arm ||
1285       Arch == Triple::thumb || Arch == Triple::aarch64)
1286     buildBitSetsFromFunctionsNative(TypeIds, Functions);
1287   else if (Arch == Triple::wasm32 || Arch == Triple::wasm64)
1288     buildBitSetsFromFunctionsWASM(TypeIds, Functions);
1289   else
1290     report_fatal_error("Unsupported architecture for jump tables");
1291 }
1292 
1293 void LowerTypeTestsModule::moveInitializerToModuleConstructor(
1294     GlobalVariable *GV) {
1295   if (WeakInitializerFn == nullptr) {
1296     WeakInitializerFn = Function::Create(
1297         FunctionType::get(Type::getVoidTy(M.getContext()),
1298                           /* IsVarArg */ false),
1299         GlobalValue::InternalLinkage,
1300         M.getDataLayout().getProgramAddressSpace(),
1301         "__cfi_global_var_init", &M);
1302     BasicBlock *BB =
1303         BasicBlock::Create(M.getContext(), "entry", WeakInitializerFn);
1304     ReturnInst::Create(M.getContext(), BB);
1305     WeakInitializerFn->setSection(
1306         ObjectFormat == Triple::MachO
1307             ? "__TEXT,__StaticInit,regular,pure_instructions"
1308             : ".text.startup");
1309     // This code is equivalent to relocation application, and should run at the
1310     // earliest possible time (i.e. with the highest priority).
1311     appendToGlobalCtors(M, WeakInitializerFn, /* Priority */ 0);
1312   }
1313 
1314   IRBuilder<> IRB(WeakInitializerFn->getEntryBlock().getTerminator());
1315   GV->setConstant(false);
1316   IRB.CreateAlignedStore(GV->getInitializer(), GV, GV->getAlign());
1317   GV->setInitializer(Constant::getNullValue(GV->getValueType()));
1318 }
1319 
1320 void LowerTypeTestsModule::findGlobalVariableUsersOf(
1321     Constant *C, SmallSetVector<GlobalVariable *, 8> &Out) {
1322   for (auto *U : C->users()){
1323     if (auto *GV = dyn_cast<GlobalVariable>(U))
1324       Out.insert(GV);
1325     else if (auto *C2 = dyn_cast<Constant>(U))
1326       findGlobalVariableUsersOf(C2, Out);
1327   }
1328 }
1329 
1330 // Replace all uses of F with (F ? JT : 0).
1331 void LowerTypeTestsModule::replaceWeakDeclarationWithJumpTablePtr(
1332     Function *F, Constant *JT, bool IsJumpTableCanonical) {
1333   // The target expression can not appear in a constant initializer on most
1334   // (all?) targets. Switch to a runtime initializer.
1335   SmallSetVector<GlobalVariable *, 8> GlobalVarUsers;
1336   findGlobalVariableUsersOf(F, GlobalVarUsers);
1337   for (auto GV : GlobalVarUsers)
1338     moveInitializerToModuleConstructor(GV);
1339 
1340   // Can not RAUW F with an expression that uses F. Replace with a temporary
1341   // placeholder first.
1342   Function *PlaceholderFn =
1343       Function::Create(cast<FunctionType>(F->getValueType()),
1344                        GlobalValue::ExternalWeakLinkage,
1345                        F->getAddressSpace(), "", &M);
1346   replaceCfiUses(F, PlaceholderFn, IsJumpTableCanonical);
1347 
1348   Constant *Target = ConstantExpr::getSelect(
1349       ConstantExpr::getICmp(CmpInst::ICMP_NE, F,
1350                             Constant::getNullValue(F->getType())),
1351       JT, Constant::getNullValue(F->getType()));
1352   PlaceholderFn->replaceAllUsesWith(Target);
1353   PlaceholderFn->eraseFromParent();
1354 }
1355 
1356 static bool isThumbFunction(Function *F, Triple::ArchType ModuleArch) {
1357   Attribute TFAttr = F->getFnAttribute("target-features");
1358   if (TFAttr.isValid()) {
1359     SmallVector<StringRef, 6> Features;
1360     TFAttr.getValueAsString().split(Features, ',');
1361     for (StringRef Feature : Features) {
1362       if (Feature == "-thumb-mode")
1363         return false;
1364       else if (Feature == "+thumb-mode")
1365         return true;
1366     }
1367   }
1368 
1369   return ModuleArch == Triple::thumb;
1370 }
1371 
1372 // Each jump table must be either ARM or Thumb as a whole for the bit-test math
1373 // to work. Pick one that matches the majority of members to minimize interop
1374 // veneers inserted by the linker.
1375 static Triple::ArchType
1376 selectJumpTableArmEncoding(ArrayRef<GlobalTypeMember *> Functions,
1377                            Triple::ArchType ModuleArch) {
1378   if (ModuleArch != Triple::arm && ModuleArch != Triple::thumb)
1379     return ModuleArch;
1380 
1381   unsigned ArmCount = 0, ThumbCount = 0;
1382   for (const auto GTM : Functions) {
1383     if (!GTM->isJumpTableCanonical()) {
1384       // PLT stubs are always ARM.
1385       // FIXME: This is the wrong heuristic for non-canonical jump tables.
1386       ++ArmCount;
1387       continue;
1388     }
1389 
1390     Function *F = cast<Function>(GTM->getGlobal());
1391     ++(isThumbFunction(F, ModuleArch) ? ThumbCount : ArmCount);
1392   }
1393 
1394   return ArmCount > ThumbCount ? Triple::arm : Triple::thumb;
1395 }
1396 
1397 void LowerTypeTestsModule::createJumpTable(
1398     Function *F, ArrayRef<GlobalTypeMember *> Functions) {
1399   std::string AsmStr, ConstraintStr;
1400   raw_string_ostream AsmOS(AsmStr), ConstraintOS(ConstraintStr);
1401   SmallVector<Value *, 16> AsmArgs;
1402   AsmArgs.reserve(Functions.size() * 2);
1403 
1404   Triple::ArchType JumpTableArch = selectJumpTableArmEncoding(Functions, Arch);
1405 
1406   for (unsigned I = 0; I != Functions.size(); ++I)
1407     createJumpTableEntry(AsmOS, ConstraintOS, JumpTableArch, AsmArgs,
1408                          cast<Function>(Functions[I]->getGlobal()));
1409 
1410   // Align the whole table by entry size.
1411   F->setAlignment(Align(getJumpTableEntrySize()));
1412   // Skip prologue.
1413   // Disabled on win32 due to https://llvm.org/bugs/show_bug.cgi?id=28641#c3.
1414   // Luckily, this function does not get any prologue even without the
1415   // attribute.
1416   if (OS != Triple::Win32)
1417     F->addFnAttr(Attribute::Naked);
1418   if (JumpTableArch == Triple::arm)
1419     F->addFnAttr("target-features", "-thumb-mode");
1420   if (JumpTableArch == Triple::thumb) {
1421     F->addFnAttr("target-features", "+thumb-mode");
1422     // Thumb jump table assembly needs Thumb2. The following attribute is added
1423     // by Clang for -march=armv7.
1424     F->addFnAttr("target-cpu", "cortex-a8");
1425   }
1426   if (JumpTableArch == Triple::aarch64) {
1427     F->addFnAttr("branch-target-enforcement", "false");
1428     F->addFnAttr("sign-return-address", "none");
1429   }
1430   // Make sure we don't emit .eh_frame for this function.
1431   F->addFnAttr(Attribute::NoUnwind);
1432 
1433   BasicBlock *BB = BasicBlock::Create(M.getContext(), "entry", F);
1434   IRBuilder<> IRB(BB);
1435 
1436   SmallVector<Type *, 16> ArgTypes;
1437   ArgTypes.reserve(AsmArgs.size());
1438   for (const auto &Arg : AsmArgs)
1439     ArgTypes.push_back(Arg->getType());
1440   InlineAsm *JumpTableAsm =
1441       InlineAsm::get(FunctionType::get(IRB.getVoidTy(), ArgTypes, false),
1442                      AsmOS.str(), ConstraintOS.str(),
1443                      /*hasSideEffects=*/true);
1444 
1445   IRB.CreateCall(JumpTableAsm, AsmArgs);
1446   IRB.CreateUnreachable();
1447 }
1448 
1449 /// Given a disjoint set of type identifiers and functions, build a jump table
1450 /// for the functions, build the bit sets and lower the llvm.type.test calls.
1451 void LowerTypeTestsModule::buildBitSetsFromFunctionsNative(
1452     ArrayRef<Metadata *> TypeIds, ArrayRef<GlobalTypeMember *> Functions) {
1453   // Unlike the global bitset builder, the function bitset builder cannot
1454   // re-arrange functions in a particular order and base its calculations on the
1455   // layout of the functions' entry points, as we have no idea how large a
1456   // particular function will end up being (the size could even depend on what
1457   // this pass does!) Instead, we build a jump table, which is a block of code
1458   // consisting of one branch instruction for each of the functions in the bit
1459   // set that branches to the target function, and redirect any taken function
1460   // addresses to the corresponding jump table entry. In the object file's
1461   // symbol table, the symbols for the target functions also refer to the jump
1462   // table entries, so that addresses taken outside the module will pass any
1463   // verification done inside the module.
1464   //
1465   // In more concrete terms, suppose we have three functions f, g, h which are
1466   // of the same type, and a function foo that returns their addresses:
1467   //
1468   // f:
1469   // mov 0, %eax
1470   // ret
1471   //
1472   // g:
1473   // mov 1, %eax
1474   // ret
1475   //
1476   // h:
1477   // mov 2, %eax
1478   // ret
1479   //
1480   // foo:
1481   // mov f, %eax
1482   // mov g, %edx
1483   // mov h, %ecx
1484   // ret
1485   //
1486   // We output the jump table as module-level inline asm string. The end result
1487   // will (conceptually) look like this:
1488   //
1489   // f = .cfi.jumptable
1490   // g = .cfi.jumptable + 4
1491   // h = .cfi.jumptable + 8
1492   // .cfi.jumptable:
1493   // jmp f.cfi  ; 5 bytes
1494   // int3       ; 1 byte
1495   // int3       ; 1 byte
1496   // int3       ; 1 byte
1497   // jmp g.cfi  ; 5 bytes
1498   // int3       ; 1 byte
1499   // int3       ; 1 byte
1500   // int3       ; 1 byte
1501   // jmp h.cfi  ; 5 bytes
1502   // int3       ; 1 byte
1503   // int3       ; 1 byte
1504   // int3       ; 1 byte
1505   //
1506   // f.cfi:
1507   // mov 0, %eax
1508   // ret
1509   //
1510   // g.cfi:
1511   // mov 1, %eax
1512   // ret
1513   //
1514   // h.cfi:
1515   // mov 2, %eax
1516   // ret
1517   //
1518   // foo:
1519   // mov f, %eax
1520   // mov g, %edx
1521   // mov h, %ecx
1522   // ret
1523   //
1524   // Because the addresses of f, g, h are evenly spaced at a power of 2, in the
1525   // normal case the check can be carried out using the same kind of simple
1526   // arithmetic that we normally use for globals.
1527 
1528   // FIXME: find a better way to represent the jumptable in the IR.
1529   assert(!Functions.empty());
1530 
1531   // Build a simple layout based on the regular layout of jump tables.
1532   DenseMap<GlobalTypeMember *, uint64_t> GlobalLayout;
1533   unsigned EntrySize = getJumpTableEntrySize();
1534   for (unsigned I = 0; I != Functions.size(); ++I)
1535     GlobalLayout[Functions[I]] = I * EntrySize;
1536 
1537   Function *JumpTableFn =
1538       Function::Create(FunctionType::get(Type::getVoidTy(M.getContext()),
1539                                          /* IsVarArg */ false),
1540                        GlobalValue::PrivateLinkage,
1541                        M.getDataLayout().getProgramAddressSpace(),
1542                        ".cfi.jumptable", &M);
1543   ArrayType *JumpTableType =
1544       ArrayType::get(getJumpTableEntryType(), Functions.size());
1545   auto JumpTable =
1546       ConstantExpr::getPointerCast(JumpTableFn, JumpTableType->getPointerTo(0));
1547 
1548   lowerTypeTestCalls(TypeIds, JumpTable, GlobalLayout);
1549 
1550   {
1551     ScopedSaveAliaseesAndUsed S(M);
1552 
1553     // Build aliases pointing to offsets into the jump table, and replace
1554     // references to the original functions with references to the aliases.
1555     for (unsigned I = 0; I != Functions.size(); ++I) {
1556       Function *F = cast<Function>(Functions[I]->getGlobal());
1557       bool IsJumpTableCanonical = Functions[I]->isJumpTableCanonical();
1558 
1559       Constant *CombinedGlobalElemPtr = ConstantExpr::getBitCast(
1560           ConstantExpr::getInBoundsGetElementPtr(
1561               JumpTableType, JumpTable,
1562               ArrayRef<Constant *>{ConstantInt::get(IntPtrTy, 0),
1563                                    ConstantInt::get(IntPtrTy, I)}),
1564           F->getType());
1565 
1566       const bool IsExported = Functions[I]->isExported();
1567       if (!IsJumpTableCanonical) {
1568         GlobalValue::LinkageTypes LT = IsExported
1569                                            ? GlobalValue::ExternalLinkage
1570                                            : GlobalValue::InternalLinkage;
1571         GlobalAlias *JtAlias = GlobalAlias::create(F->getValueType(), 0, LT,
1572                                                    F->getName() + ".cfi_jt",
1573                                                    CombinedGlobalElemPtr, &M);
1574         if (IsExported)
1575           JtAlias->setVisibility(GlobalValue::HiddenVisibility);
1576         else
1577           appendToUsed(M, {JtAlias});
1578       }
1579 
1580       if (IsExported) {
1581         if (IsJumpTableCanonical)
1582           ExportSummary->cfiFunctionDefs().insert(std::string(F->getName()));
1583         else
1584           ExportSummary->cfiFunctionDecls().insert(std::string(F->getName()));
1585       }
1586 
1587       if (!IsJumpTableCanonical) {
1588         if (F->hasExternalWeakLinkage())
1589           replaceWeakDeclarationWithJumpTablePtr(F, CombinedGlobalElemPtr,
1590                                                  IsJumpTableCanonical);
1591         else
1592           replaceCfiUses(F, CombinedGlobalElemPtr, IsJumpTableCanonical);
1593       } else {
1594         assert(F->getType()->getAddressSpace() == 0);
1595 
1596         GlobalAlias *FAlias =
1597             GlobalAlias::create(F->getValueType(), 0, F->getLinkage(), "",
1598                                 CombinedGlobalElemPtr, &M);
1599         FAlias->setVisibility(F->getVisibility());
1600         FAlias->takeName(F);
1601         if (FAlias->hasName())
1602           F->setName(FAlias->getName() + ".cfi");
1603         replaceCfiUses(F, FAlias, IsJumpTableCanonical);
1604         if (!F->hasLocalLinkage())
1605           F->setVisibility(GlobalVariable::HiddenVisibility);
1606       }
1607     }
1608   }
1609 
1610   createJumpTable(JumpTableFn, Functions);
1611 }
1612 
1613 /// Assign a dummy layout using an incrementing counter, tag each function
1614 /// with its index represented as metadata, and lower each type test to an
1615 /// integer range comparison. During generation of the indirect function call
1616 /// table in the backend, it will assign the given indexes.
1617 /// Note: Dynamic linking is not supported, as the WebAssembly ABI has not yet
1618 /// been finalized.
1619 void LowerTypeTestsModule::buildBitSetsFromFunctionsWASM(
1620     ArrayRef<Metadata *> TypeIds, ArrayRef<GlobalTypeMember *> Functions) {
1621   assert(!Functions.empty());
1622 
1623   // Build consecutive monotonic integer ranges for each call target set
1624   DenseMap<GlobalTypeMember *, uint64_t> GlobalLayout;
1625 
1626   for (GlobalTypeMember *GTM : Functions) {
1627     Function *F = cast<Function>(GTM->getGlobal());
1628 
1629     // Skip functions that are not address taken, to avoid bloating the table
1630     if (!F->hasAddressTaken())
1631       continue;
1632 
1633     // Store metadata with the index for each function
1634     MDNode *MD = MDNode::get(F->getContext(),
1635                              ArrayRef<Metadata *>(ConstantAsMetadata::get(
1636                                  ConstantInt::get(Int64Ty, IndirectIndex))));
1637     F->setMetadata("wasm.index", MD);
1638 
1639     // Assign the counter value
1640     GlobalLayout[GTM] = IndirectIndex++;
1641   }
1642 
1643   // The indirect function table index space starts at zero, so pass a NULL
1644   // pointer as the subtracted "jump table" offset.
1645   lowerTypeTestCalls(TypeIds, ConstantPointerNull::get(Int32PtrTy),
1646                      GlobalLayout);
1647 }
1648 
1649 void LowerTypeTestsModule::buildBitSetsFromDisjointSet(
1650     ArrayRef<Metadata *> TypeIds, ArrayRef<GlobalTypeMember *> Globals,
1651     ArrayRef<ICallBranchFunnel *> ICallBranchFunnels) {
1652   DenseMap<Metadata *, uint64_t> TypeIdIndices;
1653   for (unsigned I = 0; I != TypeIds.size(); ++I)
1654     TypeIdIndices[TypeIds[I]] = I;
1655 
1656   // For each type identifier, build a set of indices that refer to members of
1657   // the type identifier.
1658   std::vector<std::set<uint64_t>> TypeMembers(TypeIds.size());
1659   unsigned GlobalIndex = 0;
1660   DenseMap<GlobalTypeMember *, uint64_t> GlobalIndices;
1661   for (GlobalTypeMember *GTM : Globals) {
1662     for (MDNode *Type : GTM->types()) {
1663       // Type = { offset, type identifier }
1664       auto I = TypeIdIndices.find(Type->getOperand(1));
1665       if (I != TypeIdIndices.end())
1666         TypeMembers[I->second].insert(GlobalIndex);
1667     }
1668     GlobalIndices[GTM] = GlobalIndex;
1669     GlobalIndex++;
1670   }
1671 
1672   for (ICallBranchFunnel *JT : ICallBranchFunnels) {
1673     TypeMembers.emplace_back();
1674     std::set<uint64_t> &TMSet = TypeMembers.back();
1675     for (GlobalTypeMember *T : JT->targets())
1676       TMSet.insert(GlobalIndices[T]);
1677   }
1678 
1679   // Order the sets of indices by size. The GlobalLayoutBuilder works best
1680   // when given small index sets first.
1681   llvm::stable_sort(TypeMembers, [](const std::set<uint64_t> &O1,
1682                                     const std::set<uint64_t> &O2) {
1683     return O1.size() < O2.size();
1684   });
1685 
1686   // Create a GlobalLayoutBuilder and provide it with index sets as layout
1687   // fragments. The GlobalLayoutBuilder tries to lay out members of fragments as
1688   // close together as possible.
1689   GlobalLayoutBuilder GLB(Globals.size());
1690   for (auto &&MemSet : TypeMembers)
1691     GLB.addFragment(MemSet);
1692 
1693   // Build a vector of globals with the computed layout.
1694   bool IsGlobalSet =
1695       Globals.empty() || isa<GlobalVariable>(Globals[0]->getGlobal());
1696   std::vector<GlobalTypeMember *> OrderedGTMs(Globals.size());
1697   auto OGTMI = OrderedGTMs.begin();
1698   for (auto &&F : GLB.Fragments) {
1699     for (auto &&Offset : F) {
1700       if (IsGlobalSet != isa<GlobalVariable>(Globals[Offset]->getGlobal()))
1701         report_fatal_error("Type identifier may not contain both global "
1702                            "variables and functions");
1703       *OGTMI++ = Globals[Offset];
1704     }
1705   }
1706 
1707   // Build the bitsets from this disjoint set.
1708   if (IsGlobalSet)
1709     buildBitSetsFromGlobalVariables(TypeIds, OrderedGTMs);
1710   else
1711     buildBitSetsFromFunctions(TypeIds, OrderedGTMs);
1712 }
1713 
1714 /// Lower all type tests in this module.
1715 LowerTypeTestsModule::LowerTypeTestsModule(
1716     Module &M, ModuleSummaryIndex *ExportSummary,
1717     const ModuleSummaryIndex *ImportSummary, bool DropTypeTests)
1718     : M(M), ExportSummary(ExportSummary), ImportSummary(ImportSummary),
1719       DropTypeTests(DropTypeTests || ClDropTypeTests) {
1720   assert(!(ExportSummary && ImportSummary));
1721   Triple TargetTriple(M.getTargetTriple());
1722   Arch = TargetTriple.getArch();
1723   OS = TargetTriple.getOS();
1724   ObjectFormat = TargetTriple.getObjectFormat();
1725 }
1726 
1727 bool LowerTypeTestsModule::runForTesting(Module &M) {
1728   ModuleSummaryIndex Summary(/*HaveGVs=*/false);
1729 
1730   // Handle the command-line summary arguments. This code is for testing
1731   // purposes only, so we handle errors directly.
1732   if (!ClReadSummary.empty()) {
1733     ExitOnError ExitOnErr("-lowertypetests-read-summary: " + ClReadSummary +
1734                           ": ");
1735     auto ReadSummaryFile =
1736         ExitOnErr(errorOrToExpected(MemoryBuffer::getFile(ClReadSummary)));
1737 
1738     yaml::Input In(ReadSummaryFile->getBuffer());
1739     In >> Summary;
1740     ExitOnErr(errorCodeToError(In.error()));
1741   }
1742 
1743   bool Changed =
1744       LowerTypeTestsModule(
1745           M, ClSummaryAction == PassSummaryAction::Export ? &Summary : nullptr,
1746           ClSummaryAction == PassSummaryAction::Import ? &Summary : nullptr,
1747           /*DropTypeTests*/ false)
1748           .lower();
1749 
1750   if (!ClWriteSummary.empty()) {
1751     ExitOnError ExitOnErr("-lowertypetests-write-summary: " + ClWriteSummary +
1752                           ": ");
1753     std::error_code EC;
1754     raw_fd_ostream OS(ClWriteSummary, EC, sys::fs::OF_TextWithCRLF);
1755     ExitOnErr(errorCodeToError(EC));
1756 
1757     yaml::Output Out(OS);
1758     Out << Summary;
1759   }
1760 
1761   return Changed;
1762 }
1763 
1764 static bool isDirectCall(Use& U) {
1765   auto *Usr = dyn_cast<CallInst>(U.getUser());
1766   if (Usr) {
1767     auto *CB = dyn_cast<CallBase>(Usr);
1768     if (CB && CB->isCallee(&U))
1769       return true;
1770   }
1771   return false;
1772 }
1773 
1774 void LowerTypeTestsModule::replaceCfiUses(Function *Old, Value *New,
1775                                           bool IsJumpTableCanonical) {
1776   SmallSetVector<Constant *, 4> Constants;
1777   for (Use &U : llvm::make_early_inc_range(Old->uses())) {
1778     // Skip block addresses and no_cfi values, which refer to the function
1779     // body instead of the jump table.
1780     if (isa<BlockAddress, NoCFIValue>(U.getUser()))
1781       continue;
1782 
1783     // Skip direct calls to externally defined or non-dso_local functions
1784     if (isDirectCall(U) && (Old->isDSOLocal() || !IsJumpTableCanonical))
1785       continue;
1786 
1787     // Must handle Constants specially, we cannot call replaceUsesOfWith on a
1788     // constant because they are uniqued.
1789     if (auto *C = dyn_cast<Constant>(U.getUser())) {
1790       if (!isa<GlobalValue>(C)) {
1791         // Save unique users to avoid processing operand replacement
1792         // more than once.
1793         Constants.insert(C);
1794         continue;
1795       }
1796     }
1797 
1798     U.set(New);
1799   }
1800 
1801   // Process operand replacement of saved constants.
1802   for (auto *C : Constants)
1803     C->handleOperandChange(Old, New);
1804 }
1805 
1806 void LowerTypeTestsModule::replaceDirectCalls(Value *Old, Value *New) {
1807   Old->replaceUsesWithIf(New, isDirectCall);
1808 }
1809 
1810 bool LowerTypeTestsModule::lower() {
1811   Function *TypeTestFunc =
1812       M.getFunction(Intrinsic::getName(Intrinsic::type_test));
1813 
1814   if (DropTypeTests && TypeTestFunc) {
1815     for (Use &U : llvm::make_early_inc_range(TypeTestFunc->uses())) {
1816       auto *CI = cast<CallInst>(U.getUser());
1817       // Find and erase llvm.assume intrinsics for this llvm.type.test call.
1818       for (Use &CIU : llvm::make_early_inc_range(CI->uses()))
1819         if (auto *Assume = dyn_cast<AssumeInst>(CIU.getUser()))
1820           Assume->eraseFromParent();
1821       // If the assume was merged with another assume, we might have a use on a
1822       // phi (which will feed the assume). Simply replace the use on the phi
1823       // with "true" and leave the merged assume.
1824       if (!CI->use_empty()) {
1825         assert(all_of(CI->users(),
1826                       [](User *U) -> bool { return isa<PHINode>(U); }));
1827         CI->replaceAllUsesWith(ConstantInt::getTrue(M.getContext()));
1828       }
1829       CI->eraseFromParent();
1830     }
1831 
1832     // We have deleted the type intrinsics, so we no longer have enough
1833     // information to reason about the liveness of virtual function pointers
1834     // in GlobalDCE.
1835     for (GlobalVariable &GV : M.globals())
1836       GV.eraseMetadata(LLVMContext::MD_vcall_visibility);
1837 
1838     return true;
1839   }
1840 
1841   // If only some of the modules were split, we cannot correctly perform
1842   // this transformation. We already checked for the presense of type tests
1843   // with partially split modules during the thin link, and would have emitted
1844   // an error if any were found, so here we can simply return.
1845   if ((ExportSummary && ExportSummary->partiallySplitLTOUnits()) ||
1846       (ImportSummary && ImportSummary->partiallySplitLTOUnits()))
1847     return false;
1848 
1849   Function *ICallBranchFunnelFunc =
1850       M.getFunction(Intrinsic::getName(Intrinsic::icall_branch_funnel));
1851   if ((!TypeTestFunc || TypeTestFunc->use_empty()) &&
1852       (!ICallBranchFunnelFunc || ICallBranchFunnelFunc->use_empty()) &&
1853       !ExportSummary && !ImportSummary)
1854     return false;
1855 
1856   if (ImportSummary) {
1857     if (TypeTestFunc)
1858       for (Use &U : llvm::make_early_inc_range(TypeTestFunc->uses()))
1859         importTypeTest(cast<CallInst>(U.getUser()));
1860 
1861     if (ICallBranchFunnelFunc && !ICallBranchFunnelFunc->use_empty())
1862       report_fatal_error(
1863           "unexpected call to llvm.icall.branch.funnel during import phase");
1864 
1865     SmallVector<Function *, 8> Defs;
1866     SmallVector<Function *, 8> Decls;
1867     for (auto &F : M) {
1868       // CFI functions are either external, or promoted. A local function may
1869       // have the same name, but it's not the one we are looking for.
1870       if (F.hasLocalLinkage())
1871         continue;
1872       if (ImportSummary->cfiFunctionDefs().count(std::string(F.getName())))
1873         Defs.push_back(&F);
1874       else if (ImportSummary->cfiFunctionDecls().count(
1875                    std::string(F.getName())))
1876         Decls.push_back(&F);
1877     }
1878 
1879     std::vector<GlobalAlias *> AliasesToErase;
1880     {
1881       ScopedSaveAliaseesAndUsed S(M);
1882       for (auto F : Defs)
1883         importFunction(F, /*isJumpTableCanonical*/ true, AliasesToErase);
1884       for (auto F : Decls)
1885         importFunction(F, /*isJumpTableCanonical*/ false, AliasesToErase);
1886     }
1887     for (GlobalAlias *GA : AliasesToErase)
1888       GA->eraseFromParent();
1889 
1890     return true;
1891   }
1892 
1893   // Equivalence class set containing type identifiers and the globals that
1894   // reference them. This is used to partition the set of type identifiers in
1895   // the module into disjoint sets.
1896   using GlobalClassesTy = EquivalenceClasses<
1897       PointerUnion<GlobalTypeMember *, Metadata *, ICallBranchFunnel *>>;
1898   GlobalClassesTy GlobalClasses;
1899 
1900   // Verify the type metadata and build a few data structures to let us
1901   // efficiently enumerate the type identifiers associated with a global:
1902   // a list of GlobalTypeMembers (a GlobalObject stored alongside a vector
1903   // of associated type metadata) and a mapping from type identifiers to their
1904   // list of GlobalTypeMembers and last observed index in the list of globals.
1905   // The indices will be used later to deterministically order the list of type
1906   // identifiers.
1907   BumpPtrAllocator Alloc;
1908   struct TIInfo {
1909     unsigned UniqueId;
1910     std::vector<GlobalTypeMember *> RefGlobals;
1911   };
1912   DenseMap<Metadata *, TIInfo> TypeIdInfo;
1913   unsigned CurUniqueId = 0;
1914   SmallVector<MDNode *, 2> Types;
1915 
1916   // Cross-DSO CFI emits jumptable entries for exported functions as well as
1917   // address taken functions in case they are address taken in other modules.
1918   const bool CrossDsoCfi = M.getModuleFlag("Cross-DSO CFI") != nullptr;
1919 
1920   struct ExportedFunctionInfo {
1921     CfiFunctionLinkage Linkage;
1922     MDNode *FuncMD; // {name, linkage, type[, type...]}
1923   };
1924   DenseMap<StringRef, ExportedFunctionInfo> ExportedFunctions;
1925   if (ExportSummary) {
1926     // A set of all functions that are address taken by a live global object.
1927     DenseSet<GlobalValue::GUID> AddressTaken;
1928     for (auto &I : *ExportSummary)
1929       for (auto &GVS : I.second.SummaryList)
1930         if (GVS->isLive())
1931           for (auto &Ref : GVS->refs())
1932             AddressTaken.insert(Ref.getGUID());
1933 
1934     NamedMDNode *CfiFunctionsMD = M.getNamedMetadata("cfi.functions");
1935     if (CfiFunctionsMD) {
1936       for (auto FuncMD : CfiFunctionsMD->operands()) {
1937         assert(FuncMD->getNumOperands() >= 2);
1938         StringRef FunctionName =
1939             cast<MDString>(FuncMD->getOperand(0))->getString();
1940         CfiFunctionLinkage Linkage = static_cast<CfiFunctionLinkage>(
1941             cast<ConstantAsMetadata>(FuncMD->getOperand(1))
1942                 ->getValue()
1943                 ->getUniqueInteger()
1944                 .getZExtValue());
1945         const GlobalValue::GUID GUID = GlobalValue::getGUID(
1946                 GlobalValue::dropLLVMManglingEscape(FunctionName));
1947         // Do not emit jumptable entries for functions that are not-live and
1948         // have no live references (and are not exported with cross-DSO CFI.)
1949         if (!ExportSummary->isGUIDLive(GUID))
1950           continue;
1951         if (!AddressTaken.count(GUID)) {
1952           if (!CrossDsoCfi || Linkage != CFL_Definition)
1953             continue;
1954 
1955           bool Exported = false;
1956           if (auto VI = ExportSummary->getValueInfo(GUID))
1957             for (auto &GVS : VI.getSummaryList())
1958               if (GVS->isLive() && !GlobalValue::isLocalLinkage(GVS->linkage()))
1959                 Exported = true;
1960 
1961           if (!Exported)
1962             continue;
1963         }
1964         auto P = ExportedFunctions.insert({FunctionName, {Linkage, FuncMD}});
1965         if (!P.second && P.first->second.Linkage != CFL_Definition)
1966           P.first->second = {Linkage, FuncMD};
1967       }
1968 
1969       for (const auto &P : ExportedFunctions) {
1970         StringRef FunctionName = P.first;
1971         CfiFunctionLinkage Linkage = P.second.Linkage;
1972         MDNode *FuncMD = P.second.FuncMD;
1973         Function *F = M.getFunction(FunctionName);
1974         if (F && F->hasLocalLinkage()) {
1975           // Locally defined function that happens to have the same name as a
1976           // function defined in a ThinLTO module. Rename it to move it out of
1977           // the way of the external reference that we're about to create.
1978           // Note that setName will find a unique name for the function, so even
1979           // if there is an existing function with the suffix there won't be a
1980           // name collision.
1981           F->setName(F->getName() + ".1");
1982           F = nullptr;
1983         }
1984 
1985         if (!F)
1986           F = Function::Create(
1987               FunctionType::get(Type::getVoidTy(M.getContext()), false),
1988               GlobalVariable::ExternalLinkage,
1989               M.getDataLayout().getProgramAddressSpace(), FunctionName, &M);
1990 
1991         // If the function is available_externally, remove its definition so
1992         // that it is handled the same way as a declaration. Later we will try
1993         // to create an alias using this function's linkage, which will fail if
1994         // the linkage is available_externally. This will also result in us
1995         // following the code path below to replace the type metadata.
1996         if (F->hasAvailableExternallyLinkage()) {
1997           F->setLinkage(GlobalValue::ExternalLinkage);
1998           F->deleteBody();
1999           F->setComdat(nullptr);
2000           F->clearMetadata();
2001         }
2002 
2003         // Update the linkage for extern_weak declarations when a definition
2004         // exists.
2005         if (Linkage == CFL_Definition && F->hasExternalWeakLinkage())
2006           F->setLinkage(GlobalValue::ExternalLinkage);
2007 
2008         // If the function in the full LTO module is a declaration, replace its
2009         // type metadata with the type metadata we found in cfi.functions. That
2010         // metadata is presumed to be more accurate than the metadata attached
2011         // to the declaration.
2012         if (F->isDeclaration()) {
2013           if (Linkage == CFL_WeakDeclaration)
2014             F->setLinkage(GlobalValue::ExternalWeakLinkage);
2015 
2016           F->eraseMetadata(LLVMContext::MD_type);
2017           for (unsigned I = 2; I < FuncMD->getNumOperands(); ++I)
2018             F->addMetadata(LLVMContext::MD_type,
2019                            *cast<MDNode>(FuncMD->getOperand(I).get()));
2020         }
2021       }
2022     }
2023   }
2024 
2025   DenseMap<GlobalObject *, GlobalTypeMember *> GlobalTypeMembers;
2026   for (GlobalObject &GO : M.global_objects()) {
2027     if (isa<GlobalVariable>(GO) && GO.isDeclarationForLinker())
2028       continue;
2029 
2030     Types.clear();
2031     GO.getMetadata(LLVMContext::MD_type, Types);
2032 
2033     bool IsJumpTableCanonical = false;
2034     bool IsExported = false;
2035     if (Function *F = dyn_cast<Function>(&GO)) {
2036       IsJumpTableCanonical = isJumpTableCanonical(F);
2037       if (ExportedFunctions.count(F->getName())) {
2038         IsJumpTableCanonical |=
2039             ExportedFunctions[F->getName()].Linkage == CFL_Definition;
2040         IsExported = true;
2041       // TODO: The logic here checks only that the function is address taken,
2042       // not that the address takers are live. This can be updated to check
2043       // their liveness and emit fewer jumptable entries once monolithic LTO
2044       // builds also emit summaries.
2045       } else if (!F->hasAddressTaken()) {
2046         if (!CrossDsoCfi || !IsJumpTableCanonical || F->hasLocalLinkage())
2047           continue;
2048       }
2049     }
2050 
2051     auto *GTM = GlobalTypeMember::create(Alloc, &GO, IsJumpTableCanonical,
2052                                          IsExported, Types);
2053     GlobalTypeMembers[&GO] = GTM;
2054     for (MDNode *Type : Types) {
2055       verifyTypeMDNode(&GO, Type);
2056       auto &Info = TypeIdInfo[Type->getOperand(1)];
2057       Info.UniqueId = ++CurUniqueId;
2058       Info.RefGlobals.push_back(GTM);
2059     }
2060   }
2061 
2062   auto AddTypeIdUse = [&](Metadata *TypeId) -> TypeIdUserInfo & {
2063     // Add the call site to the list of call sites for this type identifier. We
2064     // also use TypeIdUsers to keep track of whether we have seen this type
2065     // identifier before. If we have, we don't need to re-add the referenced
2066     // globals to the equivalence class.
2067     auto Ins = TypeIdUsers.insert({TypeId, {}});
2068     if (Ins.second) {
2069       // Add the type identifier to the equivalence class.
2070       GlobalClassesTy::iterator GCI = GlobalClasses.insert(TypeId);
2071       GlobalClassesTy::member_iterator CurSet = GlobalClasses.findLeader(GCI);
2072 
2073       // Add the referenced globals to the type identifier's equivalence class.
2074       for (GlobalTypeMember *GTM : TypeIdInfo[TypeId].RefGlobals)
2075         CurSet = GlobalClasses.unionSets(
2076             CurSet, GlobalClasses.findLeader(GlobalClasses.insert(GTM)));
2077     }
2078 
2079     return Ins.first->second;
2080   };
2081 
2082   if (TypeTestFunc) {
2083     for (const Use &U : TypeTestFunc->uses()) {
2084       auto CI = cast<CallInst>(U.getUser());
2085       // If this type test is only used by llvm.assume instructions, it
2086       // was used for whole program devirtualization, and is being kept
2087       // for use by other optimization passes. We do not need or want to
2088       // lower it here. We also don't want to rewrite any associated globals
2089       // unnecessarily. These will be removed by a subsequent LTT invocation
2090       // with the DropTypeTests flag set.
2091       bool OnlyAssumeUses = !CI->use_empty();
2092       for (const Use &CIU : CI->uses()) {
2093         if (isa<AssumeInst>(CIU.getUser()))
2094           continue;
2095         OnlyAssumeUses = false;
2096         break;
2097       }
2098       if (OnlyAssumeUses)
2099         continue;
2100 
2101       auto TypeIdMDVal = dyn_cast<MetadataAsValue>(CI->getArgOperand(1));
2102       if (!TypeIdMDVal)
2103         report_fatal_error("Second argument of llvm.type.test must be metadata");
2104       auto TypeId = TypeIdMDVal->getMetadata();
2105       AddTypeIdUse(TypeId).CallSites.push_back(CI);
2106     }
2107   }
2108 
2109   if (ICallBranchFunnelFunc) {
2110     for (const Use &U : ICallBranchFunnelFunc->uses()) {
2111       if (Arch != Triple::x86_64)
2112         report_fatal_error(
2113             "llvm.icall.branch.funnel not supported on this target");
2114 
2115       auto CI = cast<CallInst>(U.getUser());
2116 
2117       std::vector<GlobalTypeMember *> Targets;
2118       if (CI->arg_size() % 2 != 1)
2119         report_fatal_error("number of arguments should be odd");
2120 
2121       GlobalClassesTy::member_iterator CurSet;
2122       for (unsigned I = 1; I != CI->arg_size(); I += 2) {
2123         int64_t Offset;
2124         auto *Base = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
2125             CI->getOperand(I), Offset, M.getDataLayout()));
2126         if (!Base)
2127           report_fatal_error(
2128               "Expected branch funnel operand to be global value");
2129 
2130         GlobalTypeMember *GTM = GlobalTypeMembers[Base];
2131         Targets.push_back(GTM);
2132         GlobalClassesTy::member_iterator NewSet =
2133             GlobalClasses.findLeader(GlobalClasses.insert(GTM));
2134         if (I == 1)
2135           CurSet = NewSet;
2136         else
2137           CurSet = GlobalClasses.unionSets(CurSet, NewSet);
2138       }
2139 
2140       GlobalClasses.unionSets(
2141           CurSet, GlobalClasses.findLeader(
2142                       GlobalClasses.insert(ICallBranchFunnel::create(
2143                           Alloc, CI, Targets, ++CurUniqueId))));
2144     }
2145   }
2146 
2147   if (ExportSummary) {
2148     DenseMap<GlobalValue::GUID, TinyPtrVector<Metadata *>> MetadataByGUID;
2149     for (auto &P : TypeIdInfo) {
2150       if (auto *TypeId = dyn_cast<MDString>(P.first))
2151         MetadataByGUID[GlobalValue::getGUID(TypeId->getString())].push_back(
2152             TypeId);
2153     }
2154 
2155     for (auto &P : *ExportSummary) {
2156       for (auto &S : P.second.SummaryList) {
2157         if (!ExportSummary->isGlobalValueLive(S.get()))
2158           continue;
2159         if (auto *FS = dyn_cast<FunctionSummary>(S->getBaseObject()))
2160           for (GlobalValue::GUID G : FS->type_tests())
2161             for (Metadata *MD : MetadataByGUID[G])
2162               AddTypeIdUse(MD).IsExported = true;
2163       }
2164     }
2165   }
2166 
2167   if (GlobalClasses.empty())
2168     return false;
2169 
2170   // Build a list of disjoint sets ordered by their maximum global index for
2171   // determinism.
2172   std::vector<std::pair<GlobalClassesTy::iterator, unsigned>> Sets;
2173   for (GlobalClassesTy::iterator I = GlobalClasses.begin(),
2174                                  E = GlobalClasses.end();
2175        I != E; ++I) {
2176     if (!I->isLeader())
2177       continue;
2178     ++NumTypeIdDisjointSets;
2179 
2180     unsigned MaxUniqueId = 0;
2181     for (GlobalClassesTy::member_iterator MI = GlobalClasses.member_begin(I);
2182          MI != GlobalClasses.member_end(); ++MI) {
2183       if (auto *MD = MI->dyn_cast<Metadata *>())
2184         MaxUniqueId = std::max(MaxUniqueId, TypeIdInfo[MD].UniqueId);
2185       else if (auto *BF = MI->dyn_cast<ICallBranchFunnel *>())
2186         MaxUniqueId = std::max(MaxUniqueId, BF->UniqueId);
2187     }
2188     Sets.emplace_back(I, MaxUniqueId);
2189   }
2190   llvm::sort(Sets,
2191              [](const std::pair<GlobalClassesTy::iterator, unsigned> &S1,
2192                 const std::pair<GlobalClassesTy::iterator, unsigned> &S2) {
2193                return S1.second < S2.second;
2194              });
2195 
2196   // For each disjoint set we found...
2197   for (const auto &S : Sets) {
2198     // Build the list of type identifiers in this disjoint set.
2199     std::vector<Metadata *> TypeIds;
2200     std::vector<GlobalTypeMember *> Globals;
2201     std::vector<ICallBranchFunnel *> ICallBranchFunnels;
2202     for (GlobalClassesTy::member_iterator MI =
2203              GlobalClasses.member_begin(S.first);
2204          MI != GlobalClasses.member_end(); ++MI) {
2205       if (MI->is<Metadata *>())
2206         TypeIds.push_back(MI->get<Metadata *>());
2207       else if (MI->is<GlobalTypeMember *>())
2208         Globals.push_back(MI->get<GlobalTypeMember *>());
2209       else
2210         ICallBranchFunnels.push_back(MI->get<ICallBranchFunnel *>());
2211     }
2212 
2213     // Order type identifiers by unique ID for determinism. This ordering is
2214     // stable as there is a one-to-one mapping between metadata and unique IDs.
2215     llvm::sort(TypeIds, [&](Metadata *M1, Metadata *M2) {
2216       return TypeIdInfo[M1].UniqueId < TypeIdInfo[M2].UniqueId;
2217     });
2218 
2219     // Same for the branch funnels.
2220     llvm::sort(ICallBranchFunnels,
2221                [&](ICallBranchFunnel *F1, ICallBranchFunnel *F2) {
2222                  return F1->UniqueId < F2->UniqueId;
2223                });
2224 
2225     // Build bitsets for this disjoint set.
2226     buildBitSetsFromDisjointSet(TypeIds, Globals, ICallBranchFunnels);
2227   }
2228 
2229   allocateByteArrays();
2230 
2231   // Parse alias data to replace stand-in function declarations for aliases
2232   // with an alias to the intended target.
2233   if (ExportSummary) {
2234     if (NamedMDNode *AliasesMD = M.getNamedMetadata("aliases")) {
2235       for (auto AliasMD : AliasesMD->operands()) {
2236         assert(AliasMD->getNumOperands() >= 4);
2237         StringRef AliasName =
2238             cast<MDString>(AliasMD->getOperand(0))->getString();
2239         StringRef Aliasee = cast<MDString>(AliasMD->getOperand(1))->getString();
2240 
2241         if (!ExportedFunctions.count(Aliasee) ||
2242             ExportedFunctions[Aliasee].Linkage != CFL_Definition ||
2243             !M.getNamedAlias(Aliasee))
2244           continue;
2245 
2246         GlobalValue::VisibilityTypes Visibility =
2247             static_cast<GlobalValue::VisibilityTypes>(
2248                 cast<ConstantAsMetadata>(AliasMD->getOperand(2))
2249                     ->getValue()
2250                     ->getUniqueInteger()
2251                     .getZExtValue());
2252         bool Weak =
2253             static_cast<bool>(cast<ConstantAsMetadata>(AliasMD->getOperand(3))
2254                                   ->getValue()
2255                                   ->getUniqueInteger()
2256                                   .getZExtValue());
2257 
2258         auto *Alias = GlobalAlias::create("", M.getNamedAlias(Aliasee));
2259         Alias->setVisibility(Visibility);
2260         if (Weak)
2261           Alias->setLinkage(GlobalValue::WeakAnyLinkage);
2262 
2263         if (auto *F = M.getFunction(AliasName)) {
2264           Alias->takeName(F);
2265           F->replaceAllUsesWith(Alias);
2266           F->eraseFromParent();
2267         } else {
2268           Alias->setName(AliasName);
2269         }
2270       }
2271     }
2272   }
2273 
2274   // Emit .symver directives for exported functions, if they exist.
2275   if (ExportSummary) {
2276     if (NamedMDNode *SymversMD = M.getNamedMetadata("symvers")) {
2277       for (auto Symver : SymversMD->operands()) {
2278         assert(Symver->getNumOperands() >= 2);
2279         StringRef SymbolName =
2280             cast<MDString>(Symver->getOperand(0))->getString();
2281         StringRef Alias = cast<MDString>(Symver->getOperand(1))->getString();
2282 
2283         if (!ExportedFunctions.count(SymbolName))
2284           continue;
2285 
2286         M.appendModuleInlineAsm(
2287             (llvm::Twine(".symver ") + SymbolName + ", " + Alias).str());
2288       }
2289     }
2290   }
2291 
2292   return true;
2293 }
2294 
2295 PreservedAnalyses LowerTypeTestsPass::run(Module &M,
2296                                           ModuleAnalysisManager &AM) {
2297   bool Changed;
2298   if (UseCommandLine)
2299     Changed = LowerTypeTestsModule::runForTesting(M);
2300   else
2301     Changed =
2302         LowerTypeTestsModule(M, ExportSummary, ImportSummary, DropTypeTests)
2303             .lower();
2304   if (!Changed)
2305     return PreservedAnalyses::all();
2306   return PreservedAnalyses::none();
2307 }
2308