1 //===- LowerTypeTests.cpp - type metadata lowering pass -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass lowers type metadata and calls to the llvm.type.test intrinsic.
10 // It also ensures that globals are properly laid out for the
11 // llvm.icall.branch.funnel intrinsic.
12 // See http://llvm.org/docs/TypeMetadata.html for more information.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "llvm/Transforms/IPO/LowerTypeTests.h"
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/DenseMap.h"
20 #include "llvm/ADT/EquivalenceClasses.h"
21 #include "llvm/ADT/PointerUnion.h"
22 #include "llvm/ADT/SetVector.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/ADT/StringRef.h"
26 #include "llvm/ADT/TinyPtrVector.h"
27 #include "llvm/ADT/Triple.h"
28 #include "llvm/Analysis/TypeMetadataUtils.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/BasicBlock.h"
32 #include "llvm/IR/Constant.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/DerivedTypes.h"
36 #include "llvm/IR/Function.h"
37 #include "llvm/IR/GlobalAlias.h"
38 #include "llvm/IR/GlobalObject.h"
39 #include "llvm/IR/GlobalValue.h"
40 #include "llvm/IR/GlobalVariable.h"
41 #include "llvm/IR/IRBuilder.h"
42 #include "llvm/IR/InlineAsm.h"
43 #include "llvm/IR/Instruction.h"
44 #include "llvm/IR/Instructions.h"
45 #include "llvm/IR/IntrinsicInst.h"
46 #include "llvm/IR/Intrinsics.h"
47 #include "llvm/IR/LLVMContext.h"
48 #include "llvm/IR/Metadata.h"
49 #include "llvm/IR/Module.h"
50 #include "llvm/IR/ModuleSummaryIndex.h"
51 #include "llvm/IR/ModuleSummaryIndexYAML.h"
52 #include "llvm/IR/Operator.h"
53 #include "llvm/IR/PassManager.h"
54 #include "llvm/IR/Type.h"
55 #include "llvm/IR/Use.h"
56 #include "llvm/IR/User.h"
57 #include "llvm/IR/Value.h"
58 #include "llvm/InitializePasses.h"
59 #include "llvm/Pass.h"
60 #include "llvm/Support/Allocator.h"
61 #include "llvm/Support/Casting.h"
62 #include "llvm/Support/CommandLine.h"
63 #include "llvm/Support/Debug.h"
64 #include "llvm/Support/Error.h"
65 #include "llvm/Support/ErrorHandling.h"
66 #include "llvm/Support/FileSystem.h"
67 #include "llvm/Support/MathExtras.h"
68 #include "llvm/Support/MemoryBuffer.h"
69 #include "llvm/Support/TrailingObjects.h"
70 #include "llvm/Support/YAMLTraits.h"
71 #include "llvm/Support/raw_ostream.h"
72 #include "llvm/Transforms/IPO.h"
73 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
74 #include "llvm/Transforms/Utils/ModuleUtils.h"
75 #include <algorithm>
76 #include <cassert>
77 #include <cstdint>
78 #include <memory>
79 #include <set>
80 #include <string>
81 #include <system_error>
82 #include <utility>
83 #include <vector>
84 
85 using namespace llvm;
86 using namespace lowertypetests;
87 
88 #define DEBUG_TYPE "lowertypetests"
89 
90 STATISTIC(ByteArraySizeBits, "Byte array size in bits");
91 STATISTIC(ByteArraySizeBytes, "Byte array size in bytes");
92 STATISTIC(NumByteArraysCreated, "Number of byte arrays created");
93 STATISTIC(NumTypeTestCallsLowered, "Number of type test calls lowered");
94 STATISTIC(NumTypeIdDisjointSets, "Number of disjoint sets of type identifiers");
95 
96 static cl::opt<bool> AvoidReuse(
97     "lowertypetests-avoid-reuse",
98     cl::desc("Try to avoid reuse of byte array addresses using aliases"),
99     cl::Hidden, cl::init(true));
100 
101 static cl::opt<PassSummaryAction> ClSummaryAction(
102     "lowertypetests-summary-action",
103     cl::desc("What to do with the summary when running this pass"),
104     cl::values(clEnumValN(PassSummaryAction::None, "none", "Do nothing"),
105                clEnumValN(PassSummaryAction::Import, "import",
106                           "Import typeid resolutions from summary and globals"),
107                clEnumValN(PassSummaryAction::Export, "export",
108                           "Export typeid resolutions to summary and globals")),
109     cl::Hidden);
110 
111 static cl::opt<std::string> ClReadSummary(
112     "lowertypetests-read-summary",
113     cl::desc("Read summary from given YAML file before running pass"),
114     cl::Hidden);
115 
116 static cl::opt<std::string> ClWriteSummary(
117     "lowertypetests-write-summary",
118     cl::desc("Write summary to given YAML file after running pass"),
119     cl::Hidden);
120 
121 static cl::opt<bool>
122     ClDropTypeTests("lowertypetests-drop-type-tests",
123                     cl::desc("Simply drop type test assume sequences"),
124                     cl::Hidden, cl::init(false));
125 
126 bool BitSetInfo::containsGlobalOffset(uint64_t Offset) const {
127   if (Offset < ByteOffset)
128     return false;
129 
130   if ((Offset - ByteOffset) % (uint64_t(1) << AlignLog2) != 0)
131     return false;
132 
133   uint64_t BitOffset = (Offset - ByteOffset) >> AlignLog2;
134   if (BitOffset >= BitSize)
135     return false;
136 
137   return Bits.count(BitOffset);
138 }
139 
140 void BitSetInfo::print(raw_ostream &OS) const {
141   OS << "offset " << ByteOffset << " size " << BitSize << " align "
142      << (1 << AlignLog2);
143 
144   if (isAllOnes()) {
145     OS << " all-ones\n";
146     return;
147   }
148 
149   OS << " { ";
150   for (uint64_t B : Bits)
151     OS << B << ' ';
152   OS << "}\n";
153 }
154 
155 BitSetInfo BitSetBuilder::build() {
156   if (Min > Max)
157     Min = 0;
158 
159   // Normalize each offset against the minimum observed offset, and compute
160   // the bitwise OR of each of the offsets. The number of trailing zeros
161   // in the mask gives us the log2 of the alignment of all offsets, which
162   // allows us to compress the bitset by only storing one bit per aligned
163   // address.
164   uint64_t Mask = 0;
165   for (uint64_t &Offset : Offsets) {
166     Offset -= Min;
167     Mask |= Offset;
168   }
169 
170   BitSetInfo BSI;
171   BSI.ByteOffset = Min;
172 
173   BSI.AlignLog2 = 0;
174   if (Mask != 0)
175     BSI.AlignLog2 = countTrailingZeros(Mask, ZB_Undefined);
176 
177   // Build the compressed bitset while normalizing the offsets against the
178   // computed alignment.
179   BSI.BitSize = ((Max - Min) >> BSI.AlignLog2) + 1;
180   for (uint64_t Offset : Offsets) {
181     Offset >>= BSI.AlignLog2;
182     BSI.Bits.insert(Offset);
183   }
184 
185   return BSI;
186 }
187 
188 void GlobalLayoutBuilder::addFragment(const std::set<uint64_t> &F) {
189   // Create a new fragment to hold the layout for F.
190   Fragments.emplace_back();
191   std::vector<uint64_t> &Fragment = Fragments.back();
192   uint64_t FragmentIndex = Fragments.size() - 1;
193 
194   for (auto ObjIndex : F) {
195     uint64_t OldFragmentIndex = FragmentMap[ObjIndex];
196     if (OldFragmentIndex == 0) {
197       // We haven't seen this object index before, so just add it to the current
198       // fragment.
199       Fragment.push_back(ObjIndex);
200     } else {
201       // This index belongs to an existing fragment. Copy the elements of the
202       // old fragment into this one and clear the old fragment. We don't update
203       // the fragment map just yet, this ensures that any further references to
204       // indices from the old fragment in this fragment do not insert any more
205       // indices.
206       std::vector<uint64_t> &OldFragment = Fragments[OldFragmentIndex];
207       llvm::append_range(Fragment, OldFragment);
208       OldFragment.clear();
209     }
210   }
211 
212   // Update the fragment map to point our object indices to this fragment.
213   for (uint64_t ObjIndex : Fragment)
214     FragmentMap[ObjIndex] = FragmentIndex;
215 }
216 
217 void ByteArrayBuilder::allocate(const std::set<uint64_t> &Bits,
218                                 uint64_t BitSize, uint64_t &AllocByteOffset,
219                                 uint8_t &AllocMask) {
220   // Find the smallest current allocation.
221   unsigned Bit = 0;
222   for (unsigned I = 1; I != BitsPerByte; ++I)
223     if (BitAllocs[I] < BitAllocs[Bit])
224       Bit = I;
225 
226   AllocByteOffset = BitAllocs[Bit];
227 
228   // Add our size to it.
229   unsigned ReqSize = AllocByteOffset + BitSize;
230   BitAllocs[Bit] = ReqSize;
231   if (Bytes.size() < ReqSize)
232     Bytes.resize(ReqSize);
233 
234   // Set our bits.
235   AllocMask = 1 << Bit;
236   for (uint64_t B : Bits)
237     Bytes[AllocByteOffset + B] |= AllocMask;
238 }
239 
240 bool lowertypetests::isJumpTableCanonical(Function *F) {
241   if (F->isDeclarationForLinker())
242     return false;
243   auto *CI = mdconst::extract_or_null<ConstantInt>(
244       F->getParent()->getModuleFlag("CFI Canonical Jump Tables"));
245   if (!CI || CI->getZExtValue() != 0)
246     return true;
247   return F->hasFnAttribute("cfi-canonical-jump-table");
248 }
249 
250 namespace {
251 
252 struct ByteArrayInfo {
253   std::set<uint64_t> Bits;
254   uint64_t BitSize;
255   GlobalVariable *ByteArray;
256   GlobalVariable *MaskGlobal;
257   uint8_t *MaskPtr = nullptr;
258 };
259 
260 /// A POD-like structure that we use to store a global reference together with
261 /// its metadata types. In this pass we frequently need to query the set of
262 /// metadata types referenced by a global, which at the IR level is an expensive
263 /// operation involving a map lookup; this data structure helps to reduce the
264 /// number of times we need to do this lookup.
265 class GlobalTypeMember final : TrailingObjects<GlobalTypeMember, MDNode *> {
266   friend TrailingObjects;
267 
268   GlobalObject *GO;
269   size_t NTypes;
270 
271   // For functions: true if the jump table is canonical. This essentially means
272   // whether the canonical address (i.e. the symbol table entry) of the function
273   // is provided by the local jump table. This is normally the same as whether
274   // the function is defined locally, but if canonical jump tables are disabled
275   // by the user then the jump table never provides a canonical definition.
276   bool IsJumpTableCanonical;
277 
278   // For functions: true if this function is either defined or used in a thinlto
279   // module and its jumptable entry needs to be exported to thinlto backends.
280   bool IsExported;
281 
282   size_t numTrailingObjects(OverloadToken<MDNode *>) const { return NTypes; }
283 
284 public:
285   static GlobalTypeMember *create(BumpPtrAllocator &Alloc, GlobalObject *GO,
286                                   bool IsJumpTableCanonical, bool IsExported,
287                                   ArrayRef<MDNode *> Types) {
288     auto *GTM = static_cast<GlobalTypeMember *>(Alloc.Allocate(
289         totalSizeToAlloc<MDNode *>(Types.size()), alignof(GlobalTypeMember)));
290     GTM->GO = GO;
291     GTM->NTypes = Types.size();
292     GTM->IsJumpTableCanonical = IsJumpTableCanonical;
293     GTM->IsExported = IsExported;
294     std::uninitialized_copy(Types.begin(), Types.end(),
295                             GTM->getTrailingObjects<MDNode *>());
296     return GTM;
297   }
298 
299   GlobalObject *getGlobal() const {
300     return GO;
301   }
302 
303   bool isJumpTableCanonical() const {
304     return IsJumpTableCanonical;
305   }
306 
307   bool isExported() const {
308     return IsExported;
309   }
310 
311   ArrayRef<MDNode *> types() const {
312     return makeArrayRef(getTrailingObjects<MDNode *>(), NTypes);
313   }
314 };
315 
316 struct ICallBranchFunnel final
317     : TrailingObjects<ICallBranchFunnel, GlobalTypeMember *> {
318   static ICallBranchFunnel *create(BumpPtrAllocator &Alloc, CallInst *CI,
319                                    ArrayRef<GlobalTypeMember *> Targets,
320                                    unsigned UniqueId) {
321     auto *Call = static_cast<ICallBranchFunnel *>(
322         Alloc.Allocate(totalSizeToAlloc<GlobalTypeMember *>(Targets.size()),
323                        alignof(ICallBranchFunnel)));
324     Call->CI = CI;
325     Call->UniqueId = UniqueId;
326     Call->NTargets = Targets.size();
327     std::uninitialized_copy(Targets.begin(), Targets.end(),
328                             Call->getTrailingObjects<GlobalTypeMember *>());
329     return Call;
330   }
331 
332   CallInst *CI;
333   ArrayRef<GlobalTypeMember *> targets() const {
334     return makeArrayRef(getTrailingObjects<GlobalTypeMember *>(), NTargets);
335   }
336 
337   unsigned UniqueId;
338 
339 private:
340   size_t NTargets;
341 };
342 
343 struct ScopedSaveAliaseesAndUsed {
344   Module &M;
345   SmallVector<GlobalValue *, 4> Used, CompilerUsed;
346   std::vector<std::pair<GlobalAlias *, Function *>> FunctionAliases;
347   std::vector<std::pair<GlobalIFunc *, Function *>> ResolverIFuncs;
348 
349   ScopedSaveAliaseesAndUsed(Module &M) : M(M) {
350     // The users of this class want to replace all function references except
351     // for aliases and llvm.used/llvm.compiler.used with references to a jump
352     // table. We avoid replacing aliases in order to avoid introducing a double
353     // indirection (or an alias pointing to a declaration in ThinLTO mode), and
354     // we avoid replacing llvm.used/llvm.compiler.used because these global
355     // variables describe properties of the global, not the jump table (besides,
356     // offseted references to the jump table in llvm.used are invalid).
357     // Unfortunately, LLVM doesn't have a "RAUW except for these (possibly
358     // indirect) users", so what we do is save the list of globals referenced by
359     // llvm.used/llvm.compiler.used and aliases, erase the used lists, let RAUW
360     // replace the aliasees and then set them back to their original values at
361     // the end.
362     if (GlobalVariable *GV = collectUsedGlobalVariables(M, Used, false))
363       GV->eraseFromParent();
364     if (GlobalVariable *GV = collectUsedGlobalVariables(M, CompilerUsed, true))
365       GV->eraseFromParent();
366 
367     for (auto &GA : M.aliases()) {
368       // FIXME: This should look past all aliases not just interposable ones,
369       // see discussion on D65118.
370       if (auto *F = dyn_cast<Function>(GA.getAliasee()->stripPointerCasts()))
371         FunctionAliases.push_back({&GA, F});
372     }
373 
374     for (auto &GI : M.ifuncs())
375       if (auto *F = dyn_cast<Function>(GI.getResolver()->stripPointerCasts()))
376         ResolverIFuncs.push_back({&GI, F});
377   }
378 
379   ~ScopedSaveAliaseesAndUsed() {
380     appendToUsed(M, Used);
381     appendToCompilerUsed(M, CompilerUsed);
382 
383     for (auto P : FunctionAliases)
384       P.first->setAliasee(
385           ConstantExpr::getBitCast(P.second, P.first->getType()));
386 
387     for (auto P : ResolverIFuncs) {
388       // This does not preserve pointer casts that may have been stripped by the
389       // constructor, but the resolver's type is different from that of the
390       // ifunc anyway.
391       P.first->setResolver(P.second);
392     }
393   }
394 };
395 
396 class LowerTypeTestsModule {
397   Module &M;
398 
399   ModuleSummaryIndex *ExportSummary;
400   const ModuleSummaryIndex *ImportSummary;
401   // Set when the client has invoked this to simply drop all type test assume
402   // sequences.
403   bool DropTypeTests;
404 
405   Triple::ArchType Arch;
406   Triple::OSType OS;
407   Triple::ObjectFormatType ObjectFormat;
408 
409   IntegerType *Int1Ty = Type::getInt1Ty(M.getContext());
410   IntegerType *Int8Ty = Type::getInt8Ty(M.getContext());
411   PointerType *Int8PtrTy = Type::getInt8PtrTy(M.getContext());
412   ArrayType *Int8Arr0Ty = ArrayType::get(Type::getInt8Ty(M.getContext()), 0);
413   IntegerType *Int32Ty = Type::getInt32Ty(M.getContext());
414   PointerType *Int32PtrTy = PointerType::getUnqual(Int32Ty);
415   IntegerType *Int64Ty = Type::getInt64Ty(M.getContext());
416   IntegerType *IntPtrTy = M.getDataLayout().getIntPtrType(M.getContext(), 0);
417 
418   // Indirect function call index assignment counter for WebAssembly
419   uint64_t IndirectIndex = 1;
420 
421   // Mapping from type identifiers to the call sites that test them, as well as
422   // whether the type identifier needs to be exported to ThinLTO backends as
423   // part of the regular LTO phase of the ThinLTO pipeline (see exportTypeId).
424   struct TypeIdUserInfo {
425     std::vector<CallInst *> CallSites;
426     bool IsExported = false;
427   };
428   DenseMap<Metadata *, TypeIdUserInfo> TypeIdUsers;
429 
430   /// This structure describes how to lower type tests for a particular type
431   /// identifier. It is either built directly from the global analysis (during
432   /// regular LTO or the regular LTO phase of ThinLTO), or indirectly using type
433   /// identifier summaries and external symbol references (in ThinLTO backends).
434   struct TypeIdLowering {
435     TypeTestResolution::Kind TheKind = TypeTestResolution::Unsat;
436 
437     /// All except Unsat: the start address within the combined global.
438     Constant *OffsetedGlobal;
439 
440     /// ByteArray, Inline, AllOnes: log2 of the required global alignment
441     /// relative to the start address.
442     Constant *AlignLog2;
443 
444     /// ByteArray, Inline, AllOnes: one less than the size of the memory region
445     /// covering members of this type identifier as a multiple of 2^AlignLog2.
446     Constant *SizeM1;
447 
448     /// ByteArray: the byte array to test the address against.
449     Constant *TheByteArray;
450 
451     /// ByteArray: the bit mask to apply to bytes loaded from the byte array.
452     Constant *BitMask;
453 
454     /// Inline: the bit mask to test the address against.
455     Constant *InlineBits;
456   };
457 
458   std::vector<ByteArrayInfo> ByteArrayInfos;
459 
460   Function *WeakInitializerFn = nullptr;
461 
462   bool shouldExportConstantsAsAbsoluteSymbols();
463   uint8_t *exportTypeId(StringRef TypeId, const TypeIdLowering &TIL);
464   TypeIdLowering importTypeId(StringRef TypeId);
465   void importTypeTest(CallInst *CI);
466   void importFunction(Function *F, bool isJumpTableCanonical,
467                       std::vector<GlobalAlias *> &AliasesToErase);
468 
469   BitSetInfo
470   buildBitSet(Metadata *TypeId,
471               const DenseMap<GlobalTypeMember *, uint64_t> &GlobalLayout);
472   ByteArrayInfo *createByteArray(BitSetInfo &BSI);
473   void allocateByteArrays();
474   Value *createBitSetTest(IRBuilder<> &B, const TypeIdLowering &TIL,
475                           Value *BitOffset);
476   void lowerTypeTestCalls(
477       ArrayRef<Metadata *> TypeIds, Constant *CombinedGlobalAddr,
478       const DenseMap<GlobalTypeMember *, uint64_t> &GlobalLayout);
479   Value *lowerTypeTestCall(Metadata *TypeId, CallInst *CI,
480                            const TypeIdLowering &TIL);
481 
482   void buildBitSetsFromGlobalVariables(ArrayRef<Metadata *> TypeIds,
483                                        ArrayRef<GlobalTypeMember *> Globals);
484   unsigned getJumpTableEntrySize();
485   Type *getJumpTableEntryType();
486   void createJumpTableEntry(raw_ostream &AsmOS, raw_ostream &ConstraintOS,
487                             Triple::ArchType JumpTableArch,
488                             SmallVectorImpl<Value *> &AsmArgs, Function *Dest);
489   void verifyTypeMDNode(GlobalObject *GO, MDNode *Type);
490   void buildBitSetsFromFunctions(ArrayRef<Metadata *> TypeIds,
491                                  ArrayRef<GlobalTypeMember *> Functions);
492   void buildBitSetsFromFunctionsNative(ArrayRef<Metadata *> TypeIds,
493                                        ArrayRef<GlobalTypeMember *> Functions);
494   void buildBitSetsFromFunctionsWASM(ArrayRef<Metadata *> TypeIds,
495                                      ArrayRef<GlobalTypeMember *> Functions);
496   void
497   buildBitSetsFromDisjointSet(ArrayRef<Metadata *> TypeIds,
498                               ArrayRef<GlobalTypeMember *> Globals,
499                               ArrayRef<ICallBranchFunnel *> ICallBranchFunnels);
500 
501   void replaceWeakDeclarationWithJumpTablePtr(Function *F, Constant *JT,
502                                               bool IsJumpTableCanonical);
503   void moveInitializerToModuleConstructor(GlobalVariable *GV);
504   void findGlobalVariableUsersOf(Constant *C,
505                                  SmallSetVector<GlobalVariable *, 8> &Out);
506 
507   void createJumpTable(Function *F, ArrayRef<GlobalTypeMember *> Functions);
508 
509   /// replaceCfiUses - Go through the uses list for this definition
510   /// and make each use point to "V" instead of "this" when the use is outside
511   /// the block. 'This's use list is expected to have at least one element.
512   /// Unlike replaceAllUsesWith this function skips blockaddr and direct call
513   /// uses.
514   void replaceCfiUses(Function *Old, Value *New, bool IsJumpTableCanonical);
515 
516   /// replaceDirectCalls - Go through the uses list for this definition and
517   /// replace each use, which is a direct function call.
518   void replaceDirectCalls(Value *Old, Value *New);
519 
520 public:
521   LowerTypeTestsModule(Module &M, ModuleSummaryIndex *ExportSummary,
522                        const ModuleSummaryIndex *ImportSummary,
523                        bool DropTypeTests);
524 
525   bool lower();
526 
527   // Lower the module using the action and summary passed as command line
528   // arguments. For testing purposes only.
529   static bool runForTesting(Module &M);
530 };
531 } // end anonymous namespace
532 
533 /// Build a bit set for TypeId using the object layouts in
534 /// GlobalLayout.
535 BitSetInfo LowerTypeTestsModule::buildBitSet(
536     Metadata *TypeId,
537     const DenseMap<GlobalTypeMember *, uint64_t> &GlobalLayout) {
538   BitSetBuilder BSB;
539 
540   // Compute the byte offset of each address associated with this type
541   // identifier.
542   for (auto &GlobalAndOffset : GlobalLayout) {
543     for (MDNode *Type : GlobalAndOffset.first->types()) {
544       if (Type->getOperand(1) != TypeId)
545         continue;
546       uint64_t Offset =
547           cast<ConstantInt>(
548               cast<ConstantAsMetadata>(Type->getOperand(0))->getValue())
549               ->getZExtValue();
550       BSB.addOffset(GlobalAndOffset.second + Offset);
551     }
552   }
553 
554   return BSB.build();
555 }
556 
557 /// Build a test that bit BitOffset mod sizeof(Bits)*8 is set in
558 /// Bits. This pattern matches to the bt instruction on x86.
559 static Value *createMaskedBitTest(IRBuilder<> &B, Value *Bits,
560                                   Value *BitOffset) {
561   auto BitsType = cast<IntegerType>(Bits->getType());
562   unsigned BitWidth = BitsType->getBitWidth();
563 
564   BitOffset = B.CreateZExtOrTrunc(BitOffset, BitsType);
565   Value *BitIndex =
566       B.CreateAnd(BitOffset, ConstantInt::get(BitsType, BitWidth - 1));
567   Value *BitMask = B.CreateShl(ConstantInt::get(BitsType, 1), BitIndex);
568   Value *MaskedBits = B.CreateAnd(Bits, BitMask);
569   return B.CreateICmpNE(MaskedBits, ConstantInt::get(BitsType, 0));
570 }
571 
572 ByteArrayInfo *LowerTypeTestsModule::createByteArray(BitSetInfo &BSI) {
573   // Create globals to stand in for byte arrays and masks. These never actually
574   // get initialized, we RAUW and erase them later in allocateByteArrays() once
575   // we know the offset and mask to use.
576   auto ByteArrayGlobal = new GlobalVariable(
577       M, Int8Ty, /*isConstant=*/true, GlobalValue::PrivateLinkage, nullptr);
578   auto MaskGlobal = new GlobalVariable(M, Int8Ty, /*isConstant=*/true,
579                                        GlobalValue::PrivateLinkage, nullptr);
580 
581   ByteArrayInfos.emplace_back();
582   ByteArrayInfo *BAI = &ByteArrayInfos.back();
583 
584   BAI->Bits = BSI.Bits;
585   BAI->BitSize = BSI.BitSize;
586   BAI->ByteArray = ByteArrayGlobal;
587   BAI->MaskGlobal = MaskGlobal;
588   return BAI;
589 }
590 
591 void LowerTypeTestsModule::allocateByteArrays() {
592   llvm::stable_sort(ByteArrayInfos,
593                     [](const ByteArrayInfo &BAI1, const ByteArrayInfo &BAI2) {
594                       return BAI1.BitSize > BAI2.BitSize;
595                     });
596 
597   std::vector<uint64_t> ByteArrayOffsets(ByteArrayInfos.size());
598 
599   ByteArrayBuilder BAB;
600   for (unsigned I = 0; I != ByteArrayInfos.size(); ++I) {
601     ByteArrayInfo *BAI = &ByteArrayInfos[I];
602 
603     uint8_t Mask;
604     BAB.allocate(BAI->Bits, BAI->BitSize, ByteArrayOffsets[I], Mask);
605 
606     BAI->MaskGlobal->replaceAllUsesWith(
607         ConstantExpr::getIntToPtr(ConstantInt::get(Int8Ty, Mask), Int8PtrTy));
608     BAI->MaskGlobal->eraseFromParent();
609     if (BAI->MaskPtr)
610       *BAI->MaskPtr = Mask;
611   }
612 
613   Constant *ByteArrayConst = ConstantDataArray::get(M.getContext(), BAB.Bytes);
614   auto ByteArray =
615       new GlobalVariable(M, ByteArrayConst->getType(), /*isConstant=*/true,
616                          GlobalValue::PrivateLinkage, ByteArrayConst);
617 
618   for (unsigned I = 0; I != ByteArrayInfos.size(); ++I) {
619     ByteArrayInfo *BAI = &ByteArrayInfos[I];
620 
621     Constant *Idxs[] = {ConstantInt::get(IntPtrTy, 0),
622                         ConstantInt::get(IntPtrTy, ByteArrayOffsets[I])};
623     Constant *GEP = ConstantExpr::getInBoundsGetElementPtr(
624         ByteArrayConst->getType(), ByteArray, Idxs);
625 
626     // Create an alias instead of RAUW'ing the gep directly. On x86 this ensures
627     // that the pc-relative displacement is folded into the lea instead of the
628     // test instruction getting another displacement.
629     GlobalAlias *Alias = GlobalAlias::create(
630         Int8Ty, 0, GlobalValue::PrivateLinkage, "bits", GEP, &M);
631     BAI->ByteArray->replaceAllUsesWith(Alias);
632     BAI->ByteArray->eraseFromParent();
633   }
634 
635   ByteArraySizeBits = BAB.BitAllocs[0] + BAB.BitAllocs[1] + BAB.BitAllocs[2] +
636                       BAB.BitAllocs[3] + BAB.BitAllocs[4] + BAB.BitAllocs[5] +
637                       BAB.BitAllocs[6] + BAB.BitAllocs[7];
638   ByteArraySizeBytes = BAB.Bytes.size();
639 }
640 
641 /// Build a test that bit BitOffset is set in the type identifier that was
642 /// lowered to TIL, which must be either an Inline or a ByteArray.
643 Value *LowerTypeTestsModule::createBitSetTest(IRBuilder<> &B,
644                                               const TypeIdLowering &TIL,
645                                               Value *BitOffset) {
646   if (TIL.TheKind == TypeTestResolution::Inline) {
647     // If the bit set is sufficiently small, we can avoid a load by bit testing
648     // a constant.
649     return createMaskedBitTest(B, TIL.InlineBits, BitOffset);
650   } else {
651     Constant *ByteArray = TIL.TheByteArray;
652     if (AvoidReuse && !ImportSummary) {
653       // Each use of the byte array uses a different alias. This makes the
654       // backend less likely to reuse previously computed byte array addresses,
655       // improving the security of the CFI mechanism based on this pass.
656       // This won't work when importing because TheByteArray is external.
657       ByteArray = GlobalAlias::create(Int8Ty, 0, GlobalValue::PrivateLinkage,
658                                       "bits_use", ByteArray, &M);
659     }
660 
661     Value *ByteAddr = B.CreateGEP(Int8Ty, ByteArray, BitOffset);
662     Value *Byte = B.CreateLoad(Int8Ty, ByteAddr);
663 
664     Value *ByteAndMask =
665         B.CreateAnd(Byte, ConstantExpr::getPtrToInt(TIL.BitMask, Int8Ty));
666     return B.CreateICmpNE(ByteAndMask, ConstantInt::get(Int8Ty, 0));
667   }
668 }
669 
670 static bool isKnownTypeIdMember(Metadata *TypeId, const DataLayout &DL,
671                                 Value *V, uint64_t COffset) {
672   if (auto GV = dyn_cast<GlobalObject>(V)) {
673     SmallVector<MDNode *, 2> Types;
674     GV->getMetadata(LLVMContext::MD_type, Types);
675     for (MDNode *Type : Types) {
676       if (Type->getOperand(1) != TypeId)
677         continue;
678       uint64_t Offset =
679           cast<ConstantInt>(
680               cast<ConstantAsMetadata>(Type->getOperand(0))->getValue())
681               ->getZExtValue();
682       if (COffset == Offset)
683         return true;
684     }
685     return false;
686   }
687 
688   if (auto GEP = dyn_cast<GEPOperator>(V)) {
689     APInt APOffset(DL.getPointerSizeInBits(0), 0);
690     bool Result = GEP->accumulateConstantOffset(DL, APOffset);
691     if (!Result)
692       return false;
693     COffset += APOffset.getZExtValue();
694     return isKnownTypeIdMember(TypeId, DL, GEP->getPointerOperand(), COffset);
695   }
696 
697   if (auto Op = dyn_cast<Operator>(V)) {
698     if (Op->getOpcode() == Instruction::BitCast)
699       return isKnownTypeIdMember(TypeId, DL, Op->getOperand(0), COffset);
700 
701     if (Op->getOpcode() == Instruction::Select)
702       return isKnownTypeIdMember(TypeId, DL, Op->getOperand(1), COffset) &&
703              isKnownTypeIdMember(TypeId, DL, Op->getOperand(2), COffset);
704   }
705 
706   return false;
707 }
708 
709 /// Lower a llvm.type.test call to its implementation. Returns the value to
710 /// replace the call with.
711 Value *LowerTypeTestsModule::lowerTypeTestCall(Metadata *TypeId, CallInst *CI,
712                                                const TypeIdLowering &TIL) {
713   // Delay lowering if the resolution is currently unknown.
714   if (TIL.TheKind == TypeTestResolution::Unknown)
715     return nullptr;
716   if (TIL.TheKind == TypeTestResolution::Unsat)
717     return ConstantInt::getFalse(M.getContext());
718 
719   Value *Ptr = CI->getArgOperand(0);
720   const DataLayout &DL = M.getDataLayout();
721   if (isKnownTypeIdMember(TypeId, DL, Ptr, 0))
722     return ConstantInt::getTrue(M.getContext());
723 
724   BasicBlock *InitialBB = CI->getParent();
725 
726   IRBuilder<> B(CI);
727 
728   Value *PtrAsInt = B.CreatePtrToInt(Ptr, IntPtrTy);
729 
730   Constant *OffsetedGlobalAsInt =
731       ConstantExpr::getPtrToInt(TIL.OffsetedGlobal, IntPtrTy);
732   if (TIL.TheKind == TypeTestResolution::Single)
733     return B.CreateICmpEQ(PtrAsInt, OffsetedGlobalAsInt);
734 
735   Value *PtrOffset = B.CreateSub(PtrAsInt, OffsetedGlobalAsInt);
736 
737   // We need to check that the offset both falls within our range and is
738   // suitably aligned. We can check both properties at the same time by
739   // performing a right rotate by log2(alignment) followed by an integer
740   // comparison against the bitset size. The rotate will move the lower
741   // order bits that need to be zero into the higher order bits of the
742   // result, causing the comparison to fail if they are nonzero. The rotate
743   // also conveniently gives us a bit offset to use during the load from
744   // the bitset.
745   Value *OffsetSHR =
746       B.CreateLShr(PtrOffset, ConstantExpr::getZExt(TIL.AlignLog2, IntPtrTy));
747   Value *OffsetSHL = B.CreateShl(
748       PtrOffset, ConstantExpr::getZExt(
749                      ConstantExpr::getSub(
750                          ConstantInt::get(Int8Ty, DL.getPointerSizeInBits(0)),
751                          TIL.AlignLog2),
752                      IntPtrTy));
753   Value *BitOffset = B.CreateOr(OffsetSHR, OffsetSHL);
754 
755   Value *OffsetInRange = B.CreateICmpULE(BitOffset, TIL.SizeM1);
756 
757   // If the bit set is all ones, testing against it is unnecessary.
758   if (TIL.TheKind == TypeTestResolution::AllOnes)
759     return OffsetInRange;
760 
761   // See if the intrinsic is used in the following common pattern:
762   //   br(llvm.type.test(...), thenbb, elsebb)
763   // where nothing happens between the type test and the br.
764   // If so, create slightly simpler IR.
765   if (CI->hasOneUse())
766     if (auto *Br = dyn_cast<BranchInst>(*CI->user_begin()))
767       if (CI->getNextNode() == Br) {
768         BasicBlock *Then = InitialBB->splitBasicBlock(CI->getIterator());
769         BasicBlock *Else = Br->getSuccessor(1);
770         BranchInst *NewBr = BranchInst::Create(Then, Else, OffsetInRange);
771         NewBr->setMetadata(LLVMContext::MD_prof,
772                            Br->getMetadata(LLVMContext::MD_prof));
773         ReplaceInstWithInst(InitialBB->getTerminator(), NewBr);
774 
775         // Update phis in Else resulting from InitialBB being split
776         for (auto &Phi : Else->phis())
777           Phi.addIncoming(Phi.getIncomingValueForBlock(Then), InitialBB);
778 
779         IRBuilder<> ThenB(CI);
780         return createBitSetTest(ThenB, TIL, BitOffset);
781       }
782 
783   IRBuilder<> ThenB(SplitBlockAndInsertIfThen(OffsetInRange, CI, false));
784 
785   // Now that we know that the offset is in range and aligned, load the
786   // appropriate bit from the bitset.
787   Value *Bit = createBitSetTest(ThenB, TIL, BitOffset);
788 
789   // The value we want is 0 if we came directly from the initial block
790   // (having failed the range or alignment checks), or the loaded bit if
791   // we came from the block in which we loaded it.
792   B.SetInsertPoint(CI);
793   PHINode *P = B.CreatePHI(Int1Ty, 2);
794   P->addIncoming(ConstantInt::get(Int1Ty, 0), InitialBB);
795   P->addIncoming(Bit, ThenB.GetInsertBlock());
796   return P;
797 }
798 
799 /// Given a disjoint set of type identifiers and globals, lay out the globals,
800 /// build the bit sets and lower the llvm.type.test calls.
801 void LowerTypeTestsModule::buildBitSetsFromGlobalVariables(
802     ArrayRef<Metadata *> TypeIds, ArrayRef<GlobalTypeMember *> Globals) {
803   // Build a new global with the combined contents of the referenced globals.
804   // This global is a struct whose even-indexed elements contain the original
805   // contents of the referenced globals and whose odd-indexed elements contain
806   // any padding required to align the next element to the next power of 2 plus
807   // any additional padding required to meet its alignment requirements.
808   std::vector<Constant *> GlobalInits;
809   const DataLayout &DL = M.getDataLayout();
810   DenseMap<GlobalTypeMember *, uint64_t> GlobalLayout;
811   Align MaxAlign;
812   uint64_t CurOffset = 0;
813   uint64_t DesiredPadding = 0;
814   for (GlobalTypeMember *G : Globals) {
815     auto *GV = cast<GlobalVariable>(G->getGlobal());
816     Align Alignment =
817         DL.getValueOrABITypeAlignment(GV->getAlign(), GV->getValueType());
818     MaxAlign = std::max(MaxAlign, Alignment);
819     uint64_t GVOffset = alignTo(CurOffset + DesiredPadding, Alignment);
820     GlobalLayout[G] = GVOffset;
821     if (GVOffset != 0) {
822       uint64_t Padding = GVOffset - CurOffset;
823       GlobalInits.push_back(
824           ConstantAggregateZero::get(ArrayType::get(Int8Ty, Padding)));
825     }
826 
827     GlobalInits.push_back(GV->getInitializer());
828     uint64_t InitSize = DL.getTypeAllocSize(GV->getValueType());
829     CurOffset = GVOffset + InitSize;
830 
831     // Compute the amount of padding that we'd like for the next element.
832     DesiredPadding = NextPowerOf2(InitSize - 1) - InitSize;
833 
834     // Experiments of different caps with Chromium on both x64 and ARM64
835     // have shown that the 32-byte cap generates the smallest binary on
836     // both platforms while different caps yield similar performance.
837     // (see https://lists.llvm.org/pipermail/llvm-dev/2018-July/124694.html)
838     if (DesiredPadding > 32)
839       DesiredPadding = alignTo(InitSize, 32) - InitSize;
840   }
841 
842   Constant *NewInit = ConstantStruct::getAnon(M.getContext(), GlobalInits);
843   auto *CombinedGlobal =
844       new GlobalVariable(M, NewInit->getType(), /*isConstant=*/true,
845                          GlobalValue::PrivateLinkage, NewInit);
846   CombinedGlobal->setAlignment(MaxAlign);
847 
848   StructType *NewTy = cast<StructType>(NewInit->getType());
849   lowerTypeTestCalls(TypeIds, CombinedGlobal, GlobalLayout);
850 
851   // Build aliases pointing to offsets into the combined global for each
852   // global from which we built the combined global, and replace references
853   // to the original globals with references to the aliases.
854   for (unsigned I = 0; I != Globals.size(); ++I) {
855     GlobalVariable *GV = cast<GlobalVariable>(Globals[I]->getGlobal());
856 
857     // Multiply by 2 to account for padding elements.
858     Constant *CombinedGlobalIdxs[] = {ConstantInt::get(Int32Ty, 0),
859                                       ConstantInt::get(Int32Ty, I * 2)};
860     Constant *CombinedGlobalElemPtr = ConstantExpr::getGetElementPtr(
861         NewInit->getType(), CombinedGlobal, CombinedGlobalIdxs);
862     assert(GV->getType()->getAddressSpace() == 0);
863     GlobalAlias *GAlias =
864         GlobalAlias::create(NewTy->getElementType(I * 2), 0, GV->getLinkage(),
865                             "", CombinedGlobalElemPtr, &M);
866     GAlias->setVisibility(GV->getVisibility());
867     GAlias->takeName(GV);
868     GV->replaceAllUsesWith(GAlias);
869     GV->eraseFromParent();
870   }
871 }
872 
873 bool LowerTypeTestsModule::shouldExportConstantsAsAbsoluteSymbols() {
874   return (Arch == Triple::x86 || Arch == Triple::x86_64) &&
875          ObjectFormat == Triple::ELF;
876 }
877 
878 /// Export the given type identifier so that ThinLTO backends may import it.
879 /// Type identifiers are exported by adding coarse-grained information about how
880 /// to test the type identifier to the summary, and creating symbols in the
881 /// object file (aliases and absolute symbols) containing fine-grained
882 /// information about the type identifier.
883 ///
884 /// Returns a pointer to the location in which to store the bitmask, if
885 /// applicable.
886 uint8_t *LowerTypeTestsModule::exportTypeId(StringRef TypeId,
887                                             const TypeIdLowering &TIL) {
888   TypeTestResolution &TTRes =
889       ExportSummary->getOrInsertTypeIdSummary(TypeId).TTRes;
890   TTRes.TheKind = TIL.TheKind;
891 
892   auto ExportGlobal = [&](StringRef Name, Constant *C) {
893     GlobalAlias *GA =
894         GlobalAlias::create(Int8Ty, 0, GlobalValue::ExternalLinkage,
895                             "__typeid_" + TypeId + "_" + Name, C, &M);
896     GA->setVisibility(GlobalValue::HiddenVisibility);
897   };
898 
899   auto ExportConstant = [&](StringRef Name, uint64_t &Storage, Constant *C) {
900     if (shouldExportConstantsAsAbsoluteSymbols())
901       ExportGlobal(Name, ConstantExpr::getIntToPtr(C, Int8PtrTy));
902     else
903       Storage = cast<ConstantInt>(C)->getZExtValue();
904   };
905 
906   if (TIL.TheKind != TypeTestResolution::Unsat)
907     ExportGlobal("global_addr", TIL.OffsetedGlobal);
908 
909   if (TIL.TheKind == TypeTestResolution::ByteArray ||
910       TIL.TheKind == TypeTestResolution::Inline ||
911       TIL.TheKind == TypeTestResolution::AllOnes) {
912     ExportConstant("align", TTRes.AlignLog2, TIL.AlignLog2);
913     ExportConstant("size_m1", TTRes.SizeM1, TIL.SizeM1);
914 
915     uint64_t BitSize = cast<ConstantInt>(TIL.SizeM1)->getZExtValue() + 1;
916     if (TIL.TheKind == TypeTestResolution::Inline)
917       TTRes.SizeM1BitWidth = (BitSize <= 32) ? 5 : 6;
918     else
919       TTRes.SizeM1BitWidth = (BitSize <= 128) ? 7 : 32;
920   }
921 
922   if (TIL.TheKind == TypeTestResolution::ByteArray) {
923     ExportGlobal("byte_array", TIL.TheByteArray);
924     if (shouldExportConstantsAsAbsoluteSymbols())
925       ExportGlobal("bit_mask", TIL.BitMask);
926     else
927       return &TTRes.BitMask;
928   }
929 
930   if (TIL.TheKind == TypeTestResolution::Inline)
931     ExportConstant("inline_bits", TTRes.InlineBits, TIL.InlineBits);
932 
933   return nullptr;
934 }
935 
936 LowerTypeTestsModule::TypeIdLowering
937 LowerTypeTestsModule::importTypeId(StringRef TypeId) {
938   const TypeIdSummary *TidSummary = ImportSummary->getTypeIdSummary(TypeId);
939   if (!TidSummary)
940     return {}; // Unsat: no globals match this type id.
941   const TypeTestResolution &TTRes = TidSummary->TTRes;
942 
943   TypeIdLowering TIL;
944   TIL.TheKind = TTRes.TheKind;
945 
946   auto ImportGlobal = [&](StringRef Name) {
947     // Give the global a type of length 0 so that it is not assumed not to alias
948     // with any other global.
949     Constant *C = M.getOrInsertGlobal(("__typeid_" + TypeId + "_" + Name).str(),
950                                       Int8Arr0Ty);
951     if (auto *GV = dyn_cast<GlobalVariable>(C))
952       GV->setVisibility(GlobalValue::HiddenVisibility);
953     C = ConstantExpr::getBitCast(C, Int8PtrTy);
954     return C;
955   };
956 
957   auto ImportConstant = [&](StringRef Name, uint64_t Const, unsigned AbsWidth,
958                             Type *Ty) {
959     if (!shouldExportConstantsAsAbsoluteSymbols()) {
960       Constant *C =
961           ConstantInt::get(isa<IntegerType>(Ty) ? Ty : Int64Ty, Const);
962       if (!isa<IntegerType>(Ty))
963         C = ConstantExpr::getIntToPtr(C, Ty);
964       return C;
965     }
966 
967     Constant *C = ImportGlobal(Name);
968     auto *GV = cast<GlobalVariable>(C->stripPointerCasts());
969     if (isa<IntegerType>(Ty))
970       C = ConstantExpr::getPtrToInt(C, Ty);
971     if (GV->getMetadata(LLVMContext::MD_absolute_symbol))
972       return C;
973 
974     auto SetAbsRange = [&](uint64_t Min, uint64_t Max) {
975       auto *MinC = ConstantAsMetadata::get(ConstantInt::get(IntPtrTy, Min));
976       auto *MaxC = ConstantAsMetadata::get(ConstantInt::get(IntPtrTy, Max));
977       GV->setMetadata(LLVMContext::MD_absolute_symbol,
978                       MDNode::get(M.getContext(), {MinC, MaxC}));
979     };
980     if (AbsWidth == IntPtrTy->getBitWidth())
981       SetAbsRange(~0ull, ~0ull); // Full set.
982     else
983       SetAbsRange(0, 1ull << AbsWidth);
984     return C;
985   };
986 
987   if (TIL.TheKind != TypeTestResolution::Unsat)
988     TIL.OffsetedGlobal = ImportGlobal("global_addr");
989 
990   if (TIL.TheKind == TypeTestResolution::ByteArray ||
991       TIL.TheKind == TypeTestResolution::Inline ||
992       TIL.TheKind == TypeTestResolution::AllOnes) {
993     TIL.AlignLog2 = ImportConstant("align", TTRes.AlignLog2, 8, Int8Ty);
994     TIL.SizeM1 =
995         ImportConstant("size_m1", TTRes.SizeM1, TTRes.SizeM1BitWidth, IntPtrTy);
996   }
997 
998   if (TIL.TheKind == TypeTestResolution::ByteArray) {
999     TIL.TheByteArray = ImportGlobal("byte_array");
1000     TIL.BitMask = ImportConstant("bit_mask", TTRes.BitMask, 8, Int8PtrTy);
1001   }
1002 
1003   if (TIL.TheKind == TypeTestResolution::Inline)
1004     TIL.InlineBits = ImportConstant(
1005         "inline_bits", TTRes.InlineBits, 1 << TTRes.SizeM1BitWidth,
1006         TTRes.SizeM1BitWidth <= 5 ? Int32Ty : Int64Ty);
1007 
1008   return TIL;
1009 }
1010 
1011 void LowerTypeTestsModule::importTypeTest(CallInst *CI) {
1012   auto TypeIdMDVal = dyn_cast<MetadataAsValue>(CI->getArgOperand(1));
1013   if (!TypeIdMDVal)
1014     report_fatal_error("Second argument of llvm.type.test must be metadata");
1015 
1016   auto TypeIdStr = dyn_cast<MDString>(TypeIdMDVal->getMetadata());
1017   // If this is a local unpromoted type, which doesn't have a metadata string,
1018   // treat as Unknown and delay lowering, so that we can still utilize it for
1019   // later optimizations.
1020   if (!TypeIdStr)
1021     return;
1022 
1023   TypeIdLowering TIL = importTypeId(TypeIdStr->getString());
1024   Value *Lowered = lowerTypeTestCall(TypeIdStr, CI, TIL);
1025   if (Lowered) {
1026     CI->replaceAllUsesWith(Lowered);
1027     CI->eraseFromParent();
1028   }
1029 }
1030 
1031 // ThinLTO backend: the function F has a jump table entry; update this module
1032 // accordingly. isJumpTableCanonical describes the type of the jump table entry.
1033 void LowerTypeTestsModule::importFunction(
1034     Function *F, bool isJumpTableCanonical,
1035     std::vector<GlobalAlias *> &AliasesToErase) {
1036   assert(F->getType()->getAddressSpace() == 0);
1037 
1038   GlobalValue::VisibilityTypes Visibility = F->getVisibility();
1039   std::string Name = std::string(F->getName());
1040 
1041   if (F->isDeclarationForLinker() && isJumpTableCanonical) {
1042     // Non-dso_local functions may be overriden at run time,
1043     // don't short curcuit them
1044     if (F->isDSOLocal()) {
1045       Function *RealF = Function::Create(F->getFunctionType(),
1046                                          GlobalValue::ExternalLinkage,
1047                                          F->getAddressSpace(),
1048                                          Name + ".cfi", &M);
1049       RealF->setVisibility(GlobalVariable::HiddenVisibility);
1050       replaceDirectCalls(F, RealF);
1051     }
1052     return;
1053   }
1054 
1055   Function *FDecl;
1056   if (!isJumpTableCanonical) {
1057     // Either a declaration of an external function or a reference to a locally
1058     // defined jump table.
1059     FDecl = Function::Create(F->getFunctionType(), GlobalValue::ExternalLinkage,
1060                              F->getAddressSpace(), Name + ".cfi_jt", &M);
1061     FDecl->setVisibility(GlobalValue::HiddenVisibility);
1062   } else {
1063     F->setName(Name + ".cfi");
1064     F->setLinkage(GlobalValue::ExternalLinkage);
1065     FDecl = Function::Create(F->getFunctionType(), GlobalValue::ExternalLinkage,
1066                              F->getAddressSpace(), Name, &M);
1067     FDecl->setVisibility(Visibility);
1068     Visibility = GlobalValue::HiddenVisibility;
1069 
1070     // Delete aliases pointing to this function, they'll be re-created in the
1071     // merged output. Don't do it yet though because ScopedSaveAliaseesAndUsed
1072     // will want to reset the aliasees first.
1073     for (auto &U : F->uses()) {
1074       if (auto *A = dyn_cast<GlobalAlias>(U.getUser())) {
1075         Function *AliasDecl = Function::Create(
1076             F->getFunctionType(), GlobalValue::ExternalLinkage,
1077             F->getAddressSpace(), "", &M);
1078         AliasDecl->takeName(A);
1079         A->replaceAllUsesWith(AliasDecl);
1080         AliasesToErase.push_back(A);
1081       }
1082     }
1083   }
1084 
1085   if (F->hasExternalWeakLinkage())
1086     replaceWeakDeclarationWithJumpTablePtr(F, FDecl, isJumpTableCanonical);
1087   else
1088     replaceCfiUses(F, FDecl, isJumpTableCanonical);
1089 
1090   // Set visibility late because it's used in replaceCfiUses() to determine
1091   // whether uses need to to be replaced.
1092   F->setVisibility(Visibility);
1093 }
1094 
1095 void LowerTypeTestsModule::lowerTypeTestCalls(
1096     ArrayRef<Metadata *> TypeIds, Constant *CombinedGlobalAddr,
1097     const DenseMap<GlobalTypeMember *, uint64_t> &GlobalLayout) {
1098   CombinedGlobalAddr = ConstantExpr::getBitCast(CombinedGlobalAddr, Int8PtrTy);
1099 
1100   // For each type identifier in this disjoint set...
1101   for (Metadata *TypeId : TypeIds) {
1102     // Build the bitset.
1103     BitSetInfo BSI = buildBitSet(TypeId, GlobalLayout);
1104     LLVM_DEBUG({
1105       if (auto MDS = dyn_cast<MDString>(TypeId))
1106         dbgs() << MDS->getString() << ": ";
1107       else
1108         dbgs() << "<unnamed>: ";
1109       BSI.print(dbgs());
1110     });
1111 
1112     ByteArrayInfo *BAI = nullptr;
1113     TypeIdLowering TIL;
1114     TIL.OffsetedGlobal = ConstantExpr::getGetElementPtr(
1115         Int8Ty, CombinedGlobalAddr, ConstantInt::get(IntPtrTy, BSI.ByteOffset)),
1116     TIL.AlignLog2 = ConstantInt::get(Int8Ty, BSI.AlignLog2);
1117     TIL.SizeM1 = ConstantInt::get(IntPtrTy, BSI.BitSize - 1);
1118     if (BSI.isAllOnes()) {
1119       TIL.TheKind = (BSI.BitSize == 1) ? TypeTestResolution::Single
1120                                        : TypeTestResolution::AllOnes;
1121     } else if (BSI.BitSize <= 64) {
1122       TIL.TheKind = TypeTestResolution::Inline;
1123       uint64_t InlineBits = 0;
1124       for (auto Bit : BSI.Bits)
1125         InlineBits |= uint64_t(1) << Bit;
1126       if (InlineBits == 0)
1127         TIL.TheKind = TypeTestResolution::Unsat;
1128       else
1129         TIL.InlineBits = ConstantInt::get(
1130             (BSI.BitSize <= 32) ? Int32Ty : Int64Ty, InlineBits);
1131     } else {
1132       TIL.TheKind = TypeTestResolution::ByteArray;
1133       ++NumByteArraysCreated;
1134       BAI = createByteArray(BSI);
1135       TIL.TheByteArray = BAI->ByteArray;
1136       TIL.BitMask = BAI->MaskGlobal;
1137     }
1138 
1139     TypeIdUserInfo &TIUI = TypeIdUsers[TypeId];
1140 
1141     if (TIUI.IsExported) {
1142       uint8_t *MaskPtr = exportTypeId(cast<MDString>(TypeId)->getString(), TIL);
1143       if (BAI)
1144         BAI->MaskPtr = MaskPtr;
1145     }
1146 
1147     // Lower each call to llvm.type.test for this type identifier.
1148     for (CallInst *CI : TIUI.CallSites) {
1149       ++NumTypeTestCallsLowered;
1150       Value *Lowered = lowerTypeTestCall(TypeId, CI, TIL);
1151       if (Lowered) {
1152         CI->replaceAllUsesWith(Lowered);
1153         CI->eraseFromParent();
1154       }
1155     }
1156   }
1157 }
1158 
1159 void LowerTypeTestsModule::verifyTypeMDNode(GlobalObject *GO, MDNode *Type) {
1160   if (Type->getNumOperands() != 2)
1161     report_fatal_error("All operands of type metadata must have 2 elements");
1162 
1163   if (GO->isThreadLocal())
1164     report_fatal_error("Bit set element may not be thread-local");
1165   if (isa<GlobalVariable>(GO) && GO->hasSection())
1166     report_fatal_error(
1167         "A member of a type identifier may not have an explicit section");
1168 
1169   // FIXME: We previously checked that global var member of a type identifier
1170   // must be a definition, but the IR linker may leave type metadata on
1171   // declarations. We should restore this check after fixing PR31759.
1172 
1173   auto OffsetConstMD = dyn_cast<ConstantAsMetadata>(Type->getOperand(0));
1174   if (!OffsetConstMD)
1175     report_fatal_error("Type offset must be a constant");
1176   auto OffsetInt = dyn_cast<ConstantInt>(OffsetConstMD->getValue());
1177   if (!OffsetInt)
1178     report_fatal_error("Type offset must be an integer constant");
1179 }
1180 
1181 static const unsigned kX86JumpTableEntrySize = 8;
1182 static const unsigned kARMJumpTableEntrySize = 4;
1183 static const unsigned kARMBTIJumpTableEntrySize = 8;
1184 static const unsigned kRISCVJumpTableEntrySize = 8;
1185 
1186 unsigned LowerTypeTestsModule::getJumpTableEntrySize() {
1187   switch (Arch) {
1188     case Triple::x86:
1189     case Triple::x86_64:
1190       return kX86JumpTableEntrySize;
1191     case Triple::arm:
1192     case Triple::thumb:
1193       return kARMJumpTableEntrySize;
1194     case Triple::aarch64:
1195       if (const auto *BTE = mdconst::extract_or_null<ConstantInt>(
1196             M.getModuleFlag("branch-target-enforcement")))
1197         if (BTE->getZExtValue())
1198           return kARMBTIJumpTableEntrySize;
1199       return kARMJumpTableEntrySize;
1200     case Triple::riscv32:
1201     case Triple::riscv64:
1202       return kRISCVJumpTableEntrySize;
1203     default:
1204       report_fatal_error("Unsupported architecture for jump tables");
1205   }
1206 }
1207 
1208 // Create a jump table entry for the target. This consists of an instruction
1209 // sequence containing a relative branch to Dest. Appends inline asm text,
1210 // constraints and arguments to AsmOS, ConstraintOS and AsmArgs.
1211 void LowerTypeTestsModule::createJumpTableEntry(
1212     raw_ostream &AsmOS, raw_ostream &ConstraintOS,
1213     Triple::ArchType JumpTableArch, SmallVectorImpl<Value *> &AsmArgs,
1214     Function *Dest) {
1215   unsigned ArgIndex = AsmArgs.size();
1216 
1217   if (JumpTableArch == Triple::x86 || JumpTableArch == Triple::x86_64) {
1218     AsmOS << "jmp ${" << ArgIndex << ":c}@plt\n";
1219     AsmOS << "int3\nint3\nint3\n";
1220   } else if (JumpTableArch == Triple::arm) {
1221     AsmOS << "b $" << ArgIndex << "\n";
1222   } else if (JumpTableArch == Triple::aarch64) {
1223     if (const auto *BTE = mdconst::extract_or_null<ConstantInt>(
1224           Dest->getParent()->getModuleFlag("branch-target-enforcement")))
1225       if (BTE->getZExtValue())
1226         AsmOS << "bti c\n";
1227     AsmOS << "b $" << ArgIndex << "\n";
1228   } else if (JumpTableArch == Triple::thumb) {
1229     AsmOS << "b.w $" << ArgIndex << "\n";
1230   } else if (JumpTableArch == Triple::riscv32 ||
1231              JumpTableArch == Triple::riscv64) {
1232     AsmOS << "tail $" << ArgIndex << "@plt\n";
1233   } else {
1234     report_fatal_error("Unsupported architecture for jump tables");
1235   }
1236 
1237   ConstraintOS << (ArgIndex > 0 ? ",s" : "s");
1238   AsmArgs.push_back(Dest);
1239 }
1240 
1241 Type *LowerTypeTestsModule::getJumpTableEntryType() {
1242   return ArrayType::get(Int8Ty, getJumpTableEntrySize());
1243 }
1244 
1245 /// Given a disjoint set of type identifiers and functions, build the bit sets
1246 /// and lower the llvm.type.test calls, architecture dependently.
1247 void LowerTypeTestsModule::buildBitSetsFromFunctions(
1248     ArrayRef<Metadata *> TypeIds, ArrayRef<GlobalTypeMember *> Functions) {
1249   if (Arch == Triple::x86 || Arch == Triple::x86_64 || Arch == Triple::arm ||
1250       Arch == Triple::thumb || Arch == Triple::aarch64 ||
1251       Arch == Triple::riscv32 || Arch == Triple::riscv64)
1252     buildBitSetsFromFunctionsNative(TypeIds, Functions);
1253   else if (Arch == Triple::wasm32 || Arch == Triple::wasm64)
1254     buildBitSetsFromFunctionsWASM(TypeIds, Functions);
1255   else
1256     report_fatal_error("Unsupported architecture for jump tables");
1257 }
1258 
1259 void LowerTypeTestsModule::moveInitializerToModuleConstructor(
1260     GlobalVariable *GV) {
1261   if (WeakInitializerFn == nullptr) {
1262     WeakInitializerFn = Function::Create(
1263         FunctionType::get(Type::getVoidTy(M.getContext()),
1264                           /* IsVarArg */ false),
1265         GlobalValue::InternalLinkage,
1266         M.getDataLayout().getProgramAddressSpace(),
1267         "__cfi_global_var_init", &M);
1268     BasicBlock *BB =
1269         BasicBlock::Create(M.getContext(), "entry", WeakInitializerFn);
1270     ReturnInst::Create(M.getContext(), BB);
1271     WeakInitializerFn->setSection(
1272         ObjectFormat == Triple::MachO
1273             ? "__TEXT,__StaticInit,regular,pure_instructions"
1274             : ".text.startup");
1275     // This code is equivalent to relocation application, and should run at the
1276     // earliest possible time (i.e. with the highest priority).
1277     appendToGlobalCtors(M, WeakInitializerFn, /* Priority */ 0);
1278   }
1279 
1280   IRBuilder<> IRB(WeakInitializerFn->getEntryBlock().getTerminator());
1281   GV->setConstant(false);
1282   IRB.CreateAlignedStore(GV->getInitializer(), GV, GV->getAlign());
1283   GV->setInitializer(Constant::getNullValue(GV->getValueType()));
1284 }
1285 
1286 void LowerTypeTestsModule::findGlobalVariableUsersOf(
1287     Constant *C, SmallSetVector<GlobalVariable *, 8> &Out) {
1288   for (auto *U : C->users()){
1289     if (auto *GV = dyn_cast<GlobalVariable>(U))
1290       Out.insert(GV);
1291     else if (auto *C2 = dyn_cast<Constant>(U))
1292       findGlobalVariableUsersOf(C2, Out);
1293   }
1294 }
1295 
1296 // Replace all uses of F with (F ? JT : 0).
1297 void LowerTypeTestsModule::replaceWeakDeclarationWithJumpTablePtr(
1298     Function *F, Constant *JT, bool IsJumpTableCanonical) {
1299   // The target expression can not appear in a constant initializer on most
1300   // (all?) targets. Switch to a runtime initializer.
1301   SmallSetVector<GlobalVariable *, 8> GlobalVarUsers;
1302   findGlobalVariableUsersOf(F, GlobalVarUsers);
1303   for (auto GV : GlobalVarUsers)
1304     moveInitializerToModuleConstructor(GV);
1305 
1306   // Can not RAUW F with an expression that uses F. Replace with a temporary
1307   // placeholder first.
1308   Function *PlaceholderFn =
1309       Function::Create(cast<FunctionType>(F->getValueType()),
1310                        GlobalValue::ExternalWeakLinkage,
1311                        F->getAddressSpace(), "", &M);
1312   replaceCfiUses(F, PlaceholderFn, IsJumpTableCanonical);
1313 
1314   Constant *Target = ConstantExpr::getSelect(
1315       ConstantExpr::getICmp(CmpInst::ICMP_NE, F,
1316                             Constant::getNullValue(F->getType())),
1317       JT, Constant::getNullValue(F->getType()));
1318   PlaceholderFn->replaceAllUsesWith(Target);
1319   PlaceholderFn->eraseFromParent();
1320 }
1321 
1322 static bool isThumbFunction(Function *F, Triple::ArchType ModuleArch) {
1323   Attribute TFAttr = F->getFnAttribute("target-features");
1324   if (TFAttr.isValid()) {
1325     SmallVector<StringRef, 6> Features;
1326     TFAttr.getValueAsString().split(Features, ',');
1327     for (StringRef Feature : Features) {
1328       if (Feature == "-thumb-mode")
1329         return false;
1330       else if (Feature == "+thumb-mode")
1331         return true;
1332     }
1333   }
1334 
1335   return ModuleArch == Triple::thumb;
1336 }
1337 
1338 // Each jump table must be either ARM or Thumb as a whole for the bit-test math
1339 // to work. Pick one that matches the majority of members to minimize interop
1340 // veneers inserted by the linker.
1341 static Triple::ArchType
1342 selectJumpTableArmEncoding(ArrayRef<GlobalTypeMember *> Functions,
1343                            Triple::ArchType ModuleArch) {
1344   if (ModuleArch != Triple::arm && ModuleArch != Triple::thumb)
1345     return ModuleArch;
1346 
1347   unsigned ArmCount = 0, ThumbCount = 0;
1348   for (const auto GTM : Functions) {
1349     if (!GTM->isJumpTableCanonical()) {
1350       // PLT stubs are always ARM.
1351       // FIXME: This is the wrong heuristic for non-canonical jump tables.
1352       ++ArmCount;
1353       continue;
1354     }
1355 
1356     Function *F = cast<Function>(GTM->getGlobal());
1357     ++(isThumbFunction(F, ModuleArch) ? ThumbCount : ArmCount);
1358   }
1359 
1360   return ArmCount > ThumbCount ? Triple::arm : Triple::thumb;
1361 }
1362 
1363 void LowerTypeTestsModule::createJumpTable(
1364     Function *F, ArrayRef<GlobalTypeMember *> Functions) {
1365   std::string AsmStr, ConstraintStr;
1366   raw_string_ostream AsmOS(AsmStr), ConstraintOS(ConstraintStr);
1367   SmallVector<Value *, 16> AsmArgs;
1368   AsmArgs.reserve(Functions.size() * 2);
1369 
1370   Triple::ArchType JumpTableArch = selectJumpTableArmEncoding(Functions, Arch);
1371 
1372   for (unsigned I = 0; I != Functions.size(); ++I)
1373     createJumpTableEntry(AsmOS, ConstraintOS, JumpTableArch, AsmArgs,
1374                          cast<Function>(Functions[I]->getGlobal()));
1375 
1376   // Align the whole table by entry size.
1377   F->setAlignment(Align(getJumpTableEntrySize()));
1378   // Skip prologue.
1379   // Disabled on win32 due to https://llvm.org/bugs/show_bug.cgi?id=28641#c3.
1380   // Luckily, this function does not get any prologue even without the
1381   // attribute.
1382   if (OS != Triple::Win32)
1383     F->addFnAttr(Attribute::Naked);
1384   if (JumpTableArch == Triple::arm)
1385     F->addFnAttr("target-features", "-thumb-mode");
1386   if (JumpTableArch == Triple::thumb) {
1387     F->addFnAttr("target-features", "+thumb-mode");
1388     // Thumb jump table assembly needs Thumb2. The following attribute is added
1389     // by Clang for -march=armv7.
1390     F->addFnAttr("target-cpu", "cortex-a8");
1391   }
1392   if (JumpTableArch == Triple::aarch64) {
1393     F->addFnAttr("branch-target-enforcement", "false");
1394     F->addFnAttr("sign-return-address", "none");
1395   }
1396   if (JumpTableArch == Triple::riscv32 || JumpTableArch == Triple::riscv64) {
1397     // Make sure the jump table assembly is not modified by the assembler or
1398     // the linker.
1399     F->addFnAttr("target-features", "-c,-relax");
1400   }
1401   // Make sure we don't emit .eh_frame for this function.
1402   F->addFnAttr(Attribute::NoUnwind);
1403 
1404   BasicBlock *BB = BasicBlock::Create(M.getContext(), "entry", F);
1405   IRBuilder<> IRB(BB);
1406 
1407   SmallVector<Type *, 16> ArgTypes;
1408   ArgTypes.reserve(AsmArgs.size());
1409   for (const auto &Arg : AsmArgs)
1410     ArgTypes.push_back(Arg->getType());
1411   InlineAsm *JumpTableAsm =
1412       InlineAsm::get(FunctionType::get(IRB.getVoidTy(), ArgTypes, false),
1413                      AsmOS.str(), ConstraintOS.str(),
1414                      /*hasSideEffects=*/true);
1415 
1416   IRB.CreateCall(JumpTableAsm, AsmArgs);
1417   IRB.CreateUnreachable();
1418 }
1419 
1420 /// Given a disjoint set of type identifiers and functions, build a jump table
1421 /// for the functions, build the bit sets and lower the llvm.type.test calls.
1422 void LowerTypeTestsModule::buildBitSetsFromFunctionsNative(
1423     ArrayRef<Metadata *> TypeIds, ArrayRef<GlobalTypeMember *> Functions) {
1424   // Unlike the global bitset builder, the function bitset builder cannot
1425   // re-arrange functions in a particular order and base its calculations on the
1426   // layout of the functions' entry points, as we have no idea how large a
1427   // particular function will end up being (the size could even depend on what
1428   // this pass does!) Instead, we build a jump table, which is a block of code
1429   // consisting of one branch instruction for each of the functions in the bit
1430   // set that branches to the target function, and redirect any taken function
1431   // addresses to the corresponding jump table entry. In the object file's
1432   // symbol table, the symbols for the target functions also refer to the jump
1433   // table entries, so that addresses taken outside the module will pass any
1434   // verification done inside the module.
1435   //
1436   // In more concrete terms, suppose we have three functions f, g, h which are
1437   // of the same type, and a function foo that returns their addresses:
1438   //
1439   // f:
1440   // mov 0, %eax
1441   // ret
1442   //
1443   // g:
1444   // mov 1, %eax
1445   // ret
1446   //
1447   // h:
1448   // mov 2, %eax
1449   // ret
1450   //
1451   // foo:
1452   // mov f, %eax
1453   // mov g, %edx
1454   // mov h, %ecx
1455   // ret
1456   //
1457   // We output the jump table as module-level inline asm string. The end result
1458   // will (conceptually) look like this:
1459   //
1460   // f = .cfi.jumptable
1461   // g = .cfi.jumptable + 4
1462   // h = .cfi.jumptable + 8
1463   // .cfi.jumptable:
1464   // jmp f.cfi  ; 5 bytes
1465   // int3       ; 1 byte
1466   // int3       ; 1 byte
1467   // int3       ; 1 byte
1468   // jmp g.cfi  ; 5 bytes
1469   // int3       ; 1 byte
1470   // int3       ; 1 byte
1471   // int3       ; 1 byte
1472   // jmp h.cfi  ; 5 bytes
1473   // int3       ; 1 byte
1474   // int3       ; 1 byte
1475   // int3       ; 1 byte
1476   //
1477   // f.cfi:
1478   // mov 0, %eax
1479   // ret
1480   //
1481   // g.cfi:
1482   // mov 1, %eax
1483   // ret
1484   //
1485   // h.cfi:
1486   // mov 2, %eax
1487   // ret
1488   //
1489   // foo:
1490   // mov f, %eax
1491   // mov g, %edx
1492   // mov h, %ecx
1493   // ret
1494   //
1495   // Because the addresses of f, g, h are evenly spaced at a power of 2, in the
1496   // normal case the check can be carried out using the same kind of simple
1497   // arithmetic that we normally use for globals.
1498 
1499   // FIXME: find a better way to represent the jumptable in the IR.
1500   assert(!Functions.empty());
1501 
1502   // Build a simple layout based on the regular layout of jump tables.
1503   DenseMap<GlobalTypeMember *, uint64_t> GlobalLayout;
1504   unsigned EntrySize = getJumpTableEntrySize();
1505   for (unsigned I = 0; I != Functions.size(); ++I)
1506     GlobalLayout[Functions[I]] = I * EntrySize;
1507 
1508   Function *JumpTableFn =
1509       Function::Create(FunctionType::get(Type::getVoidTy(M.getContext()),
1510                                          /* IsVarArg */ false),
1511                        GlobalValue::PrivateLinkage,
1512                        M.getDataLayout().getProgramAddressSpace(),
1513                        ".cfi.jumptable", &M);
1514   ArrayType *JumpTableType =
1515       ArrayType::get(getJumpTableEntryType(), Functions.size());
1516   auto JumpTable =
1517       ConstantExpr::getPointerCast(JumpTableFn, JumpTableType->getPointerTo(0));
1518 
1519   lowerTypeTestCalls(TypeIds, JumpTable, GlobalLayout);
1520 
1521   {
1522     ScopedSaveAliaseesAndUsed S(M);
1523 
1524     // Build aliases pointing to offsets into the jump table, and replace
1525     // references to the original functions with references to the aliases.
1526     for (unsigned I = 0; I != Functions.size(); ++I) {
1527       Function *F = cast<Function>(Functions[I]->getGlobal());
1528       bool IsJumpTableCanonical = Functions[I]->isJumpTableCanonical();
1529 
1530       Constant *CombinedGlobalElemPtr = ConstantExpr::getBitCast(
1531           ConstantExpr::getInBoundsGetElementPtr(
1532               JumpTableType, JumpTable,
1533               ArrayRef<Constant *>{ConstantInt::get(IntPtrTy, 0),
1534                                    ConstantInt::get(IntPtrTy, I)}),
1535           F->getType());
1536 
1537       const bool IsExported = Functions[I]->isExported();
1538       if (!IsJumpTableCanonical) {
1539         GlobalValue::LinkageTypes LT = IsExported
1540                                            ? GlobalValue::ExternalLinkage
1541                                            : GlobalValue::InternalLinkage;
1542         GlobalAlias *JtAlias = GlobalAlias::create(F->getValueType(), 0, LT,
1543                                                    F->getName() + ".cfi_jt",
1544                                                    CombinedGlobalElemPtr, &M);
1545         if (IsExported)
1546           JtAlias->setVisibility(GlobalValue::HiddenVisibility);
1547         else
1548           appendToUsed(M, {JtAlias});
1549       }
1550 
1551       if (IsExported) {
1552         if (IsJumpTableCanonical)
1553           ExportSummary->cfiFunctionDefs().insert(std::string(F->getName()));
1554         else
1555           ExportSummary->cfiFunctionDecls().insert(std::string(F->getName()));
1556       }
1557 
1558       if (!IsJumpTableCanonical) {
1559         if (F->hasExternalWeakLinkage())
1560           replaceWeakDeclarationWithJumpTablePtr(F, CombinedGlobalElemPtr,
1561                                                  IsJumpTableCanonical);
1562         else
1563           replaceCfiUses(F, CombinedGlobalElemPtr, IsJumpTableCanonical);
1564       } else {
1565         assert(F->getType()->getAddressSpace() == 0);
1566 
1567         GlobalAlias *FAlias =
1568             GlobalAlias::create(F->getValueType(), 0, F->getLinkage(), "",
1569                                 CombinedGlobalElemPtr, &M);
1570         FAlias->setVisibility(F->getVisibility());
1571         FAlias->takeName(F);
1572         if (FAlias->hasName())
1573           F->setName(FAlias->getName() + ".cfi");
1574         replaceCfiUses(F, FAlias, IsJumpTableCanonical);
1575         if (!F->hasLocalLinkage())
1576           F->setVisibility(GlobalVariable::HiddenVisibility);
1577       }
1578     }
1579   }
1580 
1581   createJumpTable(JumpTableFn, Functions);
1582 }
1583 
1584 /// Assign a dummy layout using an incrementing counter, tag each function
1585 /// with its index represented as metadata, and lower each type test to an
1586 /// integer range comparison. During generation of the indirect function call
1587 /// table in the backend, it will assign the given indexes.
1588 /// Note: Dynamic linking is not supported, as the WebAssembly ABI has not yet
1589 /// been finalized.
1590 void LowerTypeTestsModule::buildBitSetsFromFunctionsWASM(
1591     ArrayRef<Metadata *> TypeIds, ArrayRef<GlobalTypeMember *> Functions) {
1592   assert(!Functions.empty());
1593 
1594   // Build consecutive monotonic integer ranges for each call target set
1595   DenseMap<GlobalTypeMember *, uint64_t> GlobalLayout;
1596 
1597   for (GlobalTypeMember *GTM : Functions) {
1598     Function *F = cast<Function>(GTM->getGlobal());
1599 
1600     // Skip functions that are not address taken, to avoid bloating the table
1601     if (!F->hasAddressTaken())
1602       continue;
1603 
1604     // Store metadata with the index for each function
1605     MDNode *MD = MDNode::get(F->getContext(),
1606                              ArrayRef<Metadata *>(ConstantAsMetadata::get(
1607                                  ConstantInt::get(Int64Ty, IndirectIndex))));
1608     F->setMetadata("wasm.index", MD);
1609 
1610     // Assign the counter value
1611     GlobalLayout[GTM] = IndirectIndex++;
1612   }
1613 
1614   // The indirect function table index space starts at zero, so pass a NULL
1615   // pointer as the subtracted "jump table" offset.
1616   lowerTypeTestCalls(TypeIds, ConstantPointerNull::get(Int32PtrTy),
1617                      GlobalLayout);
1618 }
1619 
1620 void LowerTypeTestsModule::buildBitSetsFromDisjointSet(
1621     ArrayRef<Metadata *> TypeIds, ArrayRef<GlobalTypeMember *> Globals,
1622     ArrayRef<ICallBranchFunnel *> ICallBranchFunnels) {
1623   DenseMap<Metadata *, uint64_t> TypeIdIndices;
1624   for (unsigned I = 0; I != TypeIds.size(); ++I)
1625     TypeIdIndices[TypeIds[I]] = I;
1626 
1627   // For each type identifier, build a set of indices that refer to members of
1628   // the type identifier.
1629   std::vector<std::set<uint64_t>> TypeMembers(TypeIds.size());
1630   unsigned GlobalIndex = 0;
1631   DenseMap<GlobalTypeMember *, uint64_t> GlobalIndices;
1632   for (GlobalTypeMember *GTM : Globals) {
1633     for (MDNode *Type : GTM->types()) {
1634       // Type = { offset, type identifier }
1635       auto I = TypeIdIndices.find(Type->getOperand(1));
1636       if (I != TypeIdIndices.end())
1637         TypeMembers[I->second].insert(GlobalIndex);
1638     }
1639     GlobalIndices[GTM] = GlobalIndex;
1640     GlobalIndex++;
1641   }
1642 
1643   for (ICallBranchFunnel *JT : ICallBranchFunnels) {
1644     TypeMembers.emplace_back();
1645     std::set<uint64_t> &TMSet = TypeMembers.back();
1646     for (GlobalTypeMember *T : JT->targets())
1647       TMSet.insert(GlobalIndices[T]);
1648   }
1649 
1650   // Order the sets of indices by size. The GlobalLayoutBuilder works best
1651   // when given small index sets first.
1652   llvm::stable_sort(TypeMembers, [](const std::set<uint64_t> &O1,
1653                                     const std::set<uint64_t> &O2) {
1654     return O1.size() < O2.size();
1655   });
1656 
1657   // Create a GlobalLayoutBuilder and provide it with index sets as layout
1658   // fragments. The GlobalLayoutBuilder tries to lay out members of fragments as
1659   // close together as possible.
1660   GlobalLayoutBuilder GLB(Globals.size());
1661   for (auto &&MemSet : TypeMembers)
1662     GLB.addFragment(MemSet);
1663 
1664   // Build a vector of globals with the computed layout.
1665   bool IsGlobalSet =
1666       Globals.empty() || isa<GlobalVariable>(Globals[0]->getGlobal());
1667   std::vector<GlobalTypeMember *> OrderedGTMs(Globals.size());
1668   auto OGTMI = OrderedGTMs.begin();
1669   for (auto &&F : GLB.Fragments) {
1670     for (auto &&Offset : F) {
1671       if (IsGlobalSet != isa<GlobalVariable>(Globals[Offset]->getGlobal()))
1672         report_fatal_error("Type identifier may not contain both global "
1673                            "variables and functions");
1674       *OGTMI++ = Globals[Offset];
1675     }
1676   }
1677 
1678   // Build the bitsets from this disjoint set.
1679   if (IsGlobalSet)
1680     buildBitSetsFromGlobalVariables(TypeIds, OrderedGTMs);
1681   else
1682     buildBitSetsFromFunctions(TypeIds, OrderedGTMs);
1683 }
1684 
1685 /// Lower all type tests in this module.
1686 LowerTypeTestsModule::LowerTypeTestsModule(
1687     Module &M, ModuleSummaryIndex *ExportSummary,
1688     const ModuleSummaryIndex *ImportSummary, bool DropTypeTests)
1689     : M(M), ExportSummary(ExportSummary), ImportSummary(ImportSummary),
1690       DropTypeTests(DropTypeTests || ClDropTypeTests) {
1691   assert(!(ExportSummary && ImportSummary));
1692   Triple TargetTriple(M.getTargetTriple());
1693   Arch = TargetTriple.getArch();
1694   OS = TargetTriple.getOS();
1695   ObjectFormat = TargetTriple.getObjectFormat();
1696 }
1697 
1698 bool LowerTypeTestsModule::runForTesting(Module &M) {
1699   ModuleSummaryIndex Summary(/*HaveGVs=*/false);
1700 
1701   // Handle the command-line summary arguments. This code is for testing
1702   // purposes only, so we handle errors directly.
1703   if (!ClReadSummary.empty()) {
1704     ExitOnError ExitOnErr("-lowertypetests-read-summary: " + ClReadSummary +
1705                           ": ");
1706     auto ReadSummaryFile =
1707         ExitOnErr(errorOrToExpected(MemoryBuffer::getFile(ClReadSummary)));
1708 
1709     yaml::Input In(ReadSummaryFile->getBuffer());
1710     In >> Summary;
1711     ExitOnErr(errorCodeToError(In.error()));
1712   }
1713 
1714   bool Changed =
1715       LowerTypeTestsModule(
1716           M, ClSummaryAction == PassSummaryAction::Export ? &Summary : nullptr,
1717           ClSummaryAction == PassSummaryAction::Import ? &Summary : nullptr,
1718           /*DropTypeTests*/ false)
1719           .lower();
1720 
1721   if (!ClWriteSummary.empty()) {
1722     ExitOnError ExitOnErr("-lowertypetests-write-summary: " + ClWriteSummary +
1723                           ": ");
1724     std::error_code EC;
1725     raw_fd_ostream OS(ClWriteSummary, EC, sys::fs::OF_TextWithCRLF);
1726     ExitOnErr(errorCodeToError(EC));
1727 
1728     yaml::Output Out(OS);
1729     Out << Summary;
1730   }
1731 
1732   return Changed;
1733 }
1734 
1735 static bool isDirectCall(Use& U) {
1736   auto *Usr = dyn_cast<CallInst>(U.getUser());
1737   if (Usr) {
1738     auto *CB = dyn_cast<CallBase>(Usr);
1739     if (CB && CB->isCallee(&U))
1740       return true;
1741   }
1742   return false;
1743 }
1744 
1745 void LowerTypeTestsModule::replaceCfiUses(Function *Old, Value *New,
1746                                           bool IsJumpTableCanonical) {
1747   SmallSetVector<Constant *, 4> Constants;
1748   for (Use &U : llvm::make_early_inc_range(Old->uses())) {
1749     // Skip block addresses and no_cfi values, which refer to the function
1750     // body instead of the jump table.
1751     if (isa<BlockAddress, NoCFIValue>(U.getUser()))
1752       continue;
1753 
1754     // Skip direct calls to externally defined or non-dso_local functions
1755     if (isDirectCall(U) && (Old->isDSOLocal() || !IsJumpTableCanonical))
1756       continue;
1757 
1758     // Must handle Constants specially, we cannot call replaceUsesOfWith on a
1759     // constant because they are uniqued.
1760     if (auto *C = dyn_cast<Constant>(U.getUser())) {
1761       if (!isa<GlobalValue>(C)) {
1762         // Save unique users to avoid processing operand replacement
1763         // more than once.
1764         Constants.insert(C);
1765         continue;
1766       }
1767     }
1768 
1769     U.set(New);
1770   }
1771 
1772   // Process operand replacement of saved constants.
1773   for (auto *C : Constants)
1774     C->handleOperandChange(Old, New);
1775 }
1776 
1777 void LowerTypeTestsModule::replaceDirectCalls(Value *Old, Value *New) {
1778   Old->replaceUsesWithIf(New, isDirectCall);
1779 }
1780 
1781 static void dropTypeTests(Module &M, Function &TypeTestFunc) {
1782   for (Use &U : llvm::make_early_inc_range(TypeTestFunc.uses())) {
1783     auto *CI = cast<CallInst>(U.getUser());
1784     // Find and erase llvm.assume intrinsics for this llvm.type.test call.
1785     for (Use &CIU : llvm::make_early_inc_range(CI->uses()))
1786       if (auto *Assume = dyn_cast<AssumeInst>(CIU.getUser()))
1787         Assume->eraseFromParent();
1788     // If the assume was merged with another assume, we might have a use on a
1789     // phi (which will feed the assume). Simply replace the use on the phi
1790     // with "true" and leave the merged assume.
1791     if (!CI->use_empty()) {
1792       assert(
1793           all_of(CI->users(), [](User *U) -> bool { return isa<PHINode>(U); }));
1794       CI->replaceAllUsesWith(ConstantInt::getTrue(M.getContext()));
1795     }
1796     CI->eraseFromParent();
1797   }
1798 }
1799 
1800 bool LowerTypeTestsModule::lower() {
1801   Function *TypeTestFunc =
1802       M.getFunction(Intrinsic::getName(Intrinsic::type_test));
1803 
1804   if (DropTypeTests) {
1805     if (TypeTestFunc)
1806       dropTypeTests(M, *TypeTestFunc);
1807     // Normally we'd have already removed all @llvm.public.type.test calls,
1808     // except for in the case where we originally were performing ThinLTO but
1809     // decided not to in the backend.
1810     Function *PublicTypeTestFunc =
1811         M.getFunction(Intrinsic::getName(Intrinsic::public_type_test));
1812     if (PublicTypeTestFunc)
1813       dropTypeTests(M, *PublicTypeTestFunc);
1814     if (TypeTestFunc || PublicTypeTestFunc) {
1815       // We have deleted the type intrinsics, so we no longer have enough
1816       // information to reason about the liveness of virtual function pointers
1817       // in GlobalDCE.
1818       for (GlobalVariable &GV : M.globals())
1819         GV.eraseMetadata(LLVMContext::MD_vcall_visibility);
1820       return true;
1821     }
1822     return false;
1823   }
1824 
1825   // If only some of the modules were split, we cannot correctly perform
1826   // this transformation. We already checked for the presense of type tests
1827   // with partially split modules during the thin link, and would have emitted
1828   // an error if any were found, so here we can simply return.
1829   if ((ExportSummary && ExportSummary->partiallySplitLTOUnits()) ||
1830       (ImportSummary && ImportSummary->partiallySplitLTOUnits()))
1831     return false;
1832 
1833   Function *ICallBranchFunnelFunc =
1834       M.getFunction(Intrinsic::getName(Intrinsic::icall_branch_funnel));
1835   if ((!TypeTestFunc || TypeTestFunc->use_empty()) &&
1836       (!ICallBranchFunnelFunc || ICallBranchFunnelFunc->use_empty()) &&
1837       !ExportSummary && !ImportSummary)
1838     return false;
1839 
1840   if (ImportSummary) {
1841     if (TypeTestFunc)
1842       for (Use &U : llvm::make_early_inc_range(TypeTestFunc->uses()))
1843         importTypeTest(cast<CallInst>(U.getUser()));
1844 
1845     if (ICallBranchFunnelFunc && !ICallBranchFunnelFunc->use_empty())
1846       report_fatal_error(
1847           "unexpected call to llvm.icall.branch.funnel during import phase");
1848 
1849     SmallVector<Function *, 8> Defs;
1850     SmallVector<Function *, 8> Decls;
1851     for (auto &F : M) {
1852       // CFI functions are either external, or promoted. A local function may
1853       // have the same name, but it's not the one we are looking for.
1854       if (F.hasLocalLinkage())
1855         continue;
1856       if (ImportSummary->cfiFunctionDefs().count(std::string(F.getName())))
1857         Defs.push_back(&F);
1858       else if (ImportSummary->cfiFunctionDecls().count(
1859                    std::string(F.getName())))
1860         Decls.push_back(&F);
1861     }
1862 
1863     std::vector<GlobalAlias *> AliasesToErase;
1864     {
1865       ScopedSaveAliaseesAndUsed S(M);
1866       for (auto F : Defs)
1867         importFunction(F, /*isJumpTableCanonical*/ true, AliasesToErase);
1868       for (auto F : Decls)
1869         importFunction(F, /*isJumpTableCanonical*/ false, AliasesToErase);
1870     }
1871     for (GlobalAlias *GA : AliasesToErase)
1872       GA->eraseFromParent();
1873 
1874     return true;
1875   }
1876 
1877   // Equivalence class set containing type identifiers and the globals that
1878   // reference them. This is used to partition the set of type identifiers in
1879   // the module into disjoint sets.
1880   using GlobalClassesTy = EquivalenceClasses<
1881       PointerUnion<GlobalTypeMember *, Metadata *, ICallBranchFunnel *>>;
1882   GlobalClassesTy GlobalClasses;
1883 
1884   // Verify the type metadata and build a few data structures to let us
1885   // efficiently enumerate the type identifiers associated with a global:
1886   // a list of GlobalTypeMembers (a GlobalObject stored alongside a vector
1887   // of associated type metadata) and a mapping from type identifiers to their
1888   // list of GlobalTypeMembers and last observed index in the list of globals.
1889   // The indices will be used later to deterministically order the list of type
1890   // identifiers.
1891   BumpPtrAllocator Alloc;
1892   struct TIInfo {
1893     unsigned UniqueId;
1894     std::vector<GlobalTypeMember *> RefGlobals;
1895   };
1896   DenseMap<Metadata *, TIInfo> TypeIdInfo;
1897   unsigned CurUniqueId = 0;
1898   SmallVector<MDNode *, 2> Types;
1899 
1900   // Cross-DSO CFI emits jumptable entries for exported functions as well as
1901   // address taken functions in case they are address taken in other modules.
1902   const bool CrossDsoCfi = M.getModuleFlag("Cross-DSO CFI") != nullptr;
1903 
1904   struct ExportedFunctionInfo {
1905     CfiFunctionLinkage Linkage;
1906     MDNode *FuncMD; // {name, linkage, type[, type...]}
1907   };
1908   DenseMap<StringRef, ExportedFunctionInfo> ExportedFunctions;
1909   if (ExportSummary) {
1910     // A set of all functions that are address taken by a live global object.
1911     DenseSet<GlobalValue::GUID> AddressTaken;
1912     for (auto &I : *ExportSummary)
1913       for (auto &GVS : I.second.SummaryList)
1914         if (GVS->isLive())
1915           for (auto &Ref : GVS->refs())
1916             AddressTaken.insert(Ref.getGUID());
1917 
1918     NamedMDNode *CfiFunctionsMD = M.getNamedMetadata("cfi.functions");
1919     if (CfiFunctionsMD) {
1920       for (auto FuncMD : CfiFunctionsMD->operands()) {
1921         assert(FuncMD->getNumOperands() >= 2);
1922         StringRef FunctionName =
1923             cast<MDString>(FuncMD->getOperand(0))->getString();
1924         CfiFunctionLinkage Linkage = static_cast<CfiFunctionLinkage>(
1925             cast<ConstantAsMetadata>(FuncMD->getOperand(1))
1926                 ->getValue()
1927                 ->getUniqueInteger()
1928                 .getZExtValue());
1929         const GlobalValue::GUID GUID = GlobalValue::getGUID(
1930                 GlobalValue::dropLLVMManglingEscape(FunctionName));
1931         // Do not emit jumptable entries for functions that are not-live and
1932         // have no live references (and are not exported with cross-DSO CFI.)
1933         if (!ExportSummary->isGUIDLive(GUID))
1934           continue;
1935         if (!AddressTaken.count(GUID)) {
1936           if (!CrossDsoCfi || Linkage != CFL_Definition)
1937             continue;
1938 
1939           bool Exported = false;
1940           if (auto VI = ExportSummary->getValueInfo(GUID))
1941             for (auto &GVS : VI.getSummaryList())
1942               if (GVS->isLive() && !GlobalValue::isLocalLinkage(GVS->linkage()))
1943                 Exported = true;
1944 
1945           if (!Exported)
1946             continue;
1947         }
1948         auto P = ExportedFunctions.insert({FunctionName, {Linkage, FuncMD}});
1949         if (!P.second && P.first->second.Linkage != CFL_Definition)
1950           P.first->second = {Linkage, FuncMD};
1951       }
1952 
1953       for (const auto &P : ExportedFunctions) {
1954         StringRef FunctionName = P.first;
1955         CfiFunctionLinkage Linkage = P.second.Linkage;
1956         MDNode *FuncMD = P.second.FuncMD;
1957         Function *F = M.getFunction(FunctionName);
1958         if (F && F->hasLocalLinkage()) {
1959           // Locally defined function that happens to have the same name as a
1960           // function defined in a ThinLTO module. Rename it to move it out of
1961           // the way of the external reference that we're about to create.
1962           // Note that setName will find a unique name for the function, so even
1963           // if there is an existing function with the suffix there won't be a
1964           // name collision.
1965           F->setName(F->getName() + ".1");
1966           F = nullptr;
1967         }
1968 
1969         if (!F)
1970           F = Function::Create(
1971               FunctionType::get(Type::getVoidTy(M.getContext()), false),
1972               GlobalVariable::ExternalLinkage,
1973               M.getDataLayout().getProgramAddressSpace(), FunctionName, &M);
1974 
1975         // If the function is available_externally, remove its definition so
1976         // that it is handled the same way as a declaration. Later we will try
1977         // to create an alias using this function's linkage, which will fail if
1978         // the linkage is available_externally. This will also result in us
1979         // following the code path below to replace the type metadata.
1980         if (F->hasAvailableExternallyLinkage()) {
1981           F->setLinkage(GlobalValue::ExternalLinkage);
1982           F->deleteBody();
1983           F->setComdat(nullptr);
1984           F->clearMetadata();
1985         }
1986 
1987         // Update the linkage for extern_weak declarations when a definition
1988         // exists.
1989         if (Linkage == CFL_Definition && F->hasExternalWeakLinkage())
1990           F->setLinkage(GlobalValue::ExternalLinkage);
1991 
1992         // If the function in the full LTO module is a declaration, replace its
1993         // type metadata with the type metadata we found in cfi.functions. That
1994         // metadata is presumed to be more accurate than the metadata attached
1995         // to the declaration.
1996         if (F->isDeclaration()) {
1997           if (Linkage == CFL_WeakDeclaration)
1998             F->setLinkage(GlobalValue::ExternalWeakLinkage);
1999 
2000           F->eraseMetadata(LLVMContext::MD_type);
2001           for (unsigned I = 2; I < FuncMD->getNumOperands(); ++I)
2002             F->addMetadata(LLVMContext::MD_type,
2003                            *cast<MDNode>(FuncMD->getOperand(I).get()));
2004         }
2005       }
2006     }
2007   }
2008 
2009   DenseMap<GlobalObject *, GlobalTypeMember *> GlobalTypeMembers;
2010   for (GlobalObject &GO : M.global_objects()) {
2011     if (isa<GlobalVariable>(GO) && GO.isDeclarationForLinker())
2012       continue;
2013 
2014     Types.clear();
2015     GO.getMetadata(LLVMContext::MD_type, Types);
2016 
2017     bool IsJumpTableCanonical = false;
2018     bool IsExported = false;
2019     if (Function *F = dyn_cast<Function>(&GO)) {
2020       IsJumpTableCanonical = isJumpTableCanonical(F);
2021       if (ExportedFunctions.count(F->getName())) {
2022         IsJumpTableCanonical |=
2023             ExportedFunctions[F->getName()].Linkage == CFL_Definition;
2024         IsExported = true;
2025       // TODO: The logic here checks only that the function is address taken,
2026       // not that the address takers are live. This can be updated to check
2027       // their liveness and emit fewer jumptable entries once monolithic LTO
2028       // builds also emit summaries.
2029       } else if (!F->hasAddressTaken()) {
2030         if (!CrossDsoCfi || !IsJumpTableCanonical || F->hasLocalLinkage())
2031           continue;
2032       }
2033     }
2034 
2035     auto *GTM = GlobalTypeMember::create(Alloc, &GO, IsJumpTableCanonical,
2036                                          IsExported, Types);
2037     GlobalTypeMembers[&GO] = GTM;
2038     for (MDNode *Type : Types) {
2039       verifyTypeMDNode(&GO, Type);
2040       auto &Info = TypeIdInfo[Type->getOperand(1)];
2041       Info.UniqueId = ++CurUniqueId;
2042       Info.RefGlobals.push_back(GTM);
2043     }
2044   }
2045 
2046   auto AddTypeIdUse = [&](Metadata *TypeId) -> TypeIdUserInfo & {
2047     // Add the call site to the list of call sites for this type identifier. We
2048     // also use TypeIdUsers to keep track of whether we have seen this type
2049     // identifier before. If we have, we don't need to re-add the referenced
2050     // globals to the equivalence class.
2051     auto Ins = TypeIdUsers.insert({TypeId, {}});
2052     if (Ins.second) {
2053       // Add the type identifier to the equivalence class.
2054       GlobalClassesTy::iterator GCI = GlobalClasses.insert(TypeId);
2055       GlobalClassesTy::member_iterator CurSet = GlobalClasses.findLeader(GCI);
2056 
2057       // Add the referenced globals to the type identifier's equivalence class.
2058       for (GlobalTypeMember *GTM : TypeIdInfo[TypeId].RefGlobals)
2059         CurSet = GlobalClasses.unionSets(
2060             CurSet, GlobalClasses.findLeader(GlobalClasses.insert(GTM)));
2061     }
2062 
2063     return Ins.first->second;
2064   };
2065 
2066   if (TypeTestFunc) {
2067     for (const Use &U : TypeTestFunc->uses()) {
2068       auto CI = cast<CallInst>(U.getUser());
2069       // If this type test is only used by llvm.assume instructions, it
2070       // was used for whole program devirtualization, and is being kept
2071       // for use by other optimization passes. We do not need or want to
2072       // lower it here. We also don't want to rewrite any associated globals
2073       // unnecessarily. These will be removed by a subsequent LTT invocation
2074       // with the DropTypeTests flag set.
2075       bool OnlyAssumeUses = !CI->use_empty();
2076       for (const Use &CIU : CI->uses()) {
2077         if (isa<AssumeInst>(CIU.getUser()))
2078           continue;
2079         OnlyAssumeUses = false;
2080         break;
2081       }
2082       if (OnlyAssumeUses)
2083         continue;
2084 
2085       auto TypeIdMDVal = dyn_cast<MetadataAsValue>(CI->getArgOperand(1));
2086       if (!TypeIdMDVal)
2087         report_fatal_error("Second argument of llvm.type.test must be metadata");
2088       auto TypeId = TypeIdMDVal->getMetadata();
2089       AddTypeIdUse(TypeId).CallSites.push_back(CI);
2090     }
2091   }
2092 
2093   if (ICallBranchFunnelFunc) {
2094     for (const Use &U : ICallBranchFunnelFunc->uses()) {
2095       if (Arch != Triple::x86_64)
2096         report_fatal_error(
2097             "llvm.icall.branch.funnel not supported on this target");
2098 
2099       auto CI = cast<CallInst>(U.getUser());
2100 
2101       std::vector<GlobalTypeMember *> Targets;
2102       if (CI->arg_size() % 2 != 1)
2103         report_fatal_error("number of arguments should be odd");
2104 
2105       GlobalClassesTy::member_iterator CurSet;
2106       for (unsigned I = 1; I != CI->arg_size(); I += 2) {
2107         int64_t Offset;
2108         auto *Base = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
2109             CI->getOperand(I), Offset, M.getDataLayout()));
2110         if (!Base)
2111           report_fatal_error(
2112               "Expected branch funnel operand to be global value");
2113 
2114         GlobalTypeMember *GTM = GlobalTypeMembers[Base];
2115         Targets.push_back(GTM);
2116         GlobalClassesTy::member_iterator NewSet =
2117             GlobalClasses.findLeader(GlobalClasses.insert(GTM));
2118         if (I == 1)
2119           CurSet = NewSet;
2120         else
2121           CurSet = GlobalClasses.unionSets(CurSet, NewSet);
2122       }
2123 
2124       GlobalClasses.unionSets(
2125           CurSet, GlobalClasses.findLeader(
2126                       GlobalClasses.insert(ICallBranchFunnel::create(
2127                           Alloc, CI, Targets, ++CurUniqueId))));
2128     }
2129   }
2130 
2131   if (ExportSummary) {
2132     DenseMap<GlobalValue::GUID, TinyPtrVector<Metadata *>> MetadataByGUID;
2133     for (auto &P : TypeIdInfo) {
2134       if (auto *TypeId = dyn_cast<MDString>(P.first))
2135         MetadataByGUID[GlobalValue::getGUID(TypeId->getString())].push_back(
2136             TypeId);
2137     }
2138 
2139     for (auto &P : *ExportSummary) {
2140       for (auto &S : P.second.SummaryList) {
2141         if (!ExportSummary->isGlobalValueLive(S.get()))
2142           continue;
2143         if (auto *FS = dyn_cast<FunctionSummary>(S->getBaseObject()))
2144           for (GlobalValue::GUID G : FS->type_tests())
2145             for (Metadata *MD : MetadataByGUID[G])
2146               AddTypeIdUse(MD).IsExported = true;
2147       }
2148     }
2149   }
2150 
2151   if (GlobalClasses.empty())
2152     return false;
2153 
2154   // Build a list of disjoint sets ordered by their maximum global index for
2155   // determinism.
2156   std::vector<std::pair<GlobalClassesTy::iterator, unsigned>> Sets;
2157   for (GlobalClassesTy::iterator I = GlobalClasses.begin(),
2158                                  E = GlobalClasses.end();
2159        I != E; ++I) {
2160     if (!I->isLeader())
2161       continue;
2162     ++NumTypeIdDisjointSets;
2163 
2164     unsigned MaxUniqueId = 0;
2165     for (GlobalClassesTy::member_iterator MI = GlobalClasses.member_begin(I);
2166          MI != GlobalClasses.member_end(); ++MI) {
2167       if (auto *MD = MI->dyn_cast<Metadata *>())
2168         MaxUniqueId = std::max(MaxUniqueId, TypeIdInfo[MD].UniqueId);
2169       else if (auto *BF = MI->dyn_cast<ICallBranchFunnel *>())
2170         MaxUniqueId = std::max(MaxUniqueId, BF->UniqueId);
2171     }
2172     Sets.emplace_back(I, MaxUniqueId);
2173   }
2174   llvm::sort(Sets, llvm::less_second());
2175 
2176   // For each disjoint set we found...
2177   for (const auto &S : Sets) {
2178     // Build the list of type identifiers in this disjoint set.
2179     std::vector<Metadata *> TypeIds;
2180     std::vector<GlobalTypeMember *> Globals;
2181     std::vector<ICallBranchFunnel *> ICallBranchFunnels;
2182     for (GlobalClassesTy::member_iterator MI =
2183              GlobalClasses.member_begin(S.first);
2184          MI != GlobalClasses.member_end(); ++MI) {
2185       if (MI->is<Metadata *>())
2186         TypeIds.push_back(MI->get<Metadata *>());
2187       else if (MI->is<GlobalTypeMember *>())
2188         Globals.push_back(MI->get<GlobalTypeMember *>());
2189       else
2190         ICallBranchFunnels.push_back(MI->get<ICallBranchFunnel *>());
2191     }
2192 
2193     // Order type identifiers by unique ID for determinism. This ordering is
2194     // stable as there is a one-to-one mapping between metadata and unique IDs.
2195     llvm::sort(TypeIds, [&](Metadata *M1, Metadata *M2) {
2196       return TypeIdInfo[M1].UniqueId < TypeIdInfo[M2].UniqueId;
2197     });
2198 
2199     // Same for the branch funnels.
2200     llvm::sort(ICallBranchFunnels,
2201                [&](ICallBranchFunnel *F1, ICallBranchFunnel *F2) {
2202                  return F1->UniqueId < F2->UniqueId;
2203                });
2204 
2205     // Build bitsets for this disjoint set.
2206     buildBitSetsFromDisjointSet(TypeIds, Globals, ICallBranchFunnels);
2207   }
2208 
2209   allocateByteArrays();
2210 
2211   // Parse alias data to replace stand-in function declarations for aliases
2212   // with an alias to the intended target.
2213   if (ExportSummary) {
2214     if (NamedMDNode *AliasesMD = M.getNamedMetadata("aliases")) {
2215       for (auto AliasMD : AliasesMD->operands()) {
2216         assert(AliasMD->getNumOperands() >= 4);
2217         StringRef AliasName =
2218             cast<MDString>(AliasMD->getOperand(0))->getString();
2219         StringRef Aliasee = cast<MDString>(AliasMD->getOperand(1))->getString();
2220 
2221         if (!ExportedFunctions.count(Aliasee) ||
2222             ExportedFunctions[Aliasee].Linkage != CFL_Definition ||
2223             !M.getNamedAlias(Aliasee))
2224           continue;
2225 
2226         GlobalValue::VisibilityTypes Visibility =
2227             static_cast<GlobalValue::VisibilityTypes>(
2228                 cast<ConstantAsMetadata>(AliasMD->getOperand(2))
2229                     ->getValue()
2230                     ->getUniqueInteger()
2231                     .getZExtValue());
2232         bool Weak =
2233             static_cast<bool>(cast<ConstantAsMetadata>(AliasMD->getOperand(3))
2234                                   ->getValue()
2235                                   ->getUniqueInteger()
2236                                   .getZExtValue());
2237 
2238         auto *Alias = GlobalAlias::create("", M.getNamedAlias(Aliasee));
2239         Alias->setVisibility(Visibility);
2240         if (Weak)
2241           Alias->setLinkage(GlobalValue::WeakAnyLinkage);
2242 
2243         if (auto *F = M.getFunction(AliasName)) {
2244           Alias->takeName(F);
2245           F->replaceAllUsesWith(Alias);
2246           F->eraseFromParent();
2247         } else {
2248           Alias->setName(AliasName);
2249         }
2250       }
2251     }
2252   }
2253 
2254   // Emit .symver directives for exported functions, if they exist.
2255   if (ExportSummary) {
2256     if (NamedMDNode *SymversMD = M.getNamedMetadata("symvers")) {
2257       for (auto Symver : SymversMD->operands()) {
2258         assert(Symver->getNumOperands() >= 2);
2259         StringRef SymbolName =
2260             cast<MDString>(Symver->getOperand(0))->getString();
2261         StringRef Alias = cast<MDString>(Symver->getOperand(1))->getString();
2262 
2263         if (!ExportedFunctions.count(SymbolName))
2264           continue;
2265 
2266         M.appendModuleInlineAsm(
2267             (llvm::Twine(".symver ") + SymbolName + ", " + Alias).str());
2268       }
2269     }
2270   }
2271 
2272   return true;
2273 }
2274 
2275 PreservedAnalyses LowerTypeTestsPass::run(Module &M,
2276                                           ModuleAnalysisManager &AM) {
2277   bool Changed;
2278   if (UseCommandLine)
2279     Changed = LowerTypeTestsModule::runForTesting(M);
2280   else
2281     Changed =
2282         LowerTypeTestsModule(M, ExportSummary, ImportSummary, DropTypeTests)
2283             .lower();
2284   if (!Changed)
2285     return PreservedAnalyses::all();
2286   return PreservedAnalyses::none();
2287 }
2288