1 //===- CloneFunction.cpp - Clone a function into another function ---------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the CloneFunctionInto interface, which is used as the
10 // low-level function cloner. This is used by the CloneFunction and function
11 // inliner to do the dirty work of copying the body of a function around.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "llvm/ADT/SetVector.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/Analysis/DomTreeUpdater.h"
18 #include "llvm/Analysis/InstructionSimplify.h"
19 #include "llvm/Analysis/LoopInfo.h"
20 #include "llvm/IR/CFG.h"
21 #include "llvm/IR/Constants.h"
22 #include "llvm/IR/DebugInfo.h"
23 #include "llvm/IR/DerivedTypes.h"
24 #include "llvm/IR/Function.h"
25 #include "llvm/IR/Instructions.h"
26 #include "llvm/IR/IntrinsicInst.h"
27 #include "llvm/IR/LLVMContext.h"
28 #include "llvm/IR/MDBuilder.h"
29 #include "llvm/IR/Metadata.h"
30 #include "llvm/IR/Module.h"
31 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
32 #include "llvm/Transforms/Utils/Cloning.h"
33 #include "llvm/Transforms/Utils/Local.h"
34 #include "llvm/Transforms/Utils/ValueMapper.h"
35 #include <map>
36 #include <optional>
37 using namespace llvm;
38
39 #define DEBUG_TYPE "clone-function"
40
41 /// See comments in Cloning.h.
CloneBasicBlock(const BasicBlock * BB,ValueToValueMapTy & VMap,const Twine & NameSuffix,Function * F,ClonedCodeInfo * CodeInfo,DebugInfoFinder * DIFinder)42 BasicBlock *llvm::CloneBasicBlock(const BasicBlock *BB, ValueToValueMapTy &VMap,
43 const Twine &NameSuffix, Function *F,
44 ClonedCodeInfo *CodeInfo,
45 DebugInfoFinder *DIFinder) {
46 BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), "", F);
47 if (BB->hasName())
48 NewBB->setName(BB->getName() + NameSuffix);
49
50 bool hasCalls = false, hasDynamicAllocas = false, hasMemProfMetadata = false;
51 Module *TheModule = F ? F->getParent() : nullptr;
52
53 // Loop over all instructions, and copy them over.
54 for (const Instruction &I : *BB) {
55 if (DIFinder && TheModule)
56 DIFinder->processInstruction(*TheModule, I);
57
58 Instruction *NewInst = I.clone();
59 if (I.hasName())
60 NewInst->setName(I.getName() + NameSuffix);
61 NewInst->insertInto(NewBB, NewBB->end());
62 VMap[&I] = NewInst; // Add instruction map to value.
63
64 if (isa<CallInst>(I) && !I.isDebugOrPseudoInst()) {
65 hasCalls = true;
66 hasMemProfMetadata |= I.hasMetadata(LLVMContext::MD_memprof);
67 }
68 if (const AllocaInst *AI = dyn_cast<AllocaInst>(&I)) {
69 if (!AI->isStaticAlloca()) {
70 hasDynamicAllocas = true;
71 }
72 }
73 }
74
75 if (CodeInfo) {
76 CodeInfo->ContainsCalls |= hasCalls;
77 CodeInfo->ContainsMemProfMetadata |= hasMemProfMetadata;
78 CodeInfo->ContainsDynamicAllocas |= hasDynamicAllocas;
79 }
80 return NewBB;
81 }
82
83 // Clone OldFunc into NewFunc, transforming the old arguments into references to
84 // VMap values.
85 //
CloneFunctionInto(Function * NewFunc,const Function * OldFunc,ValueToValueMapTy & VMap,CloneFunctionChangeType Changes,SmallVectorImpl<ReturnInst * > & Returns,const char * NameSuffix,ClonedCodeInfo * CodeInfo,ValueMapTypeRemapper * TypeMapper,ValueMaterializer * Materializer)86 void llvm::CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
87 ValueToValueMapTy &VMap,
88 CloneFunctionChangeType Changes,
89 SmallVectorImpl<ReturnInst *> &Returns,
90 const char *NameSuffix, ClonedCodeInfo *CodeInfo,
91 ValueMapTypeRemapper *TypeMapper,
92 ValueMaterializer *Materializer) {
93 assert(NameSuffix && "NameSuffix cannot be null!");
94
95 #ifndef NDEBUG
96 for (const Argument &I : OldFunc->args())
97 assert(VMap.count(&I) && "No mapping from source argument specified!");
98 #endif
99
100 bool ModuleLevelChanges = Changes > CloneFunctionChangeType::LocalChangesOnly;
101
102 // Copy all attributes other than those stored in the AttributeList. We need
103 // to remap the parameter indices of the AttributeList.
104 AttributeList NewAttrs = NewFunc->getAttributes();
105 NewFunc->copyAttributesFrom(OldFunc);
106 NewFunc->setAttributes(NewAttrs);
107
108 const RemapFlags FuncGlobalRefFlags =
109 ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges;
110
111 // Fix up the personality function that got copied over.
112 if (OldFunc->hasPersonalityFn())
113 NewFunc->setPersonalityFn(MapValue(OldFunc->getPersonalityFn(), VMap,
114 FuncGlobalRefFlags, TypeMapper,
115 Materializer));
116
117 if (OldFunc->hasPrefixData()) {
118 NewFunc->setPrefixData(MapValue(OldFunc->getPrefixData(), VMap,
119 FuncGlobalRefFlags, TypeMapper,
120 Materializer));
121 }
122
123 if (OldFunc->hasPrologueData()) {
124 NewFunc->setPrologueData(MapValue(OldFunc->getPrologueData(), VMap,
125 FuncGlobalRefFlags, TypeMapper,
126 Materializer));
127 }
128
129 SmallVector<AttributeSet, 4> NewArgAttrs(NewFunc->arg_size());
130 AttributeList OldAttrs = OldFunc->getAttributes();
131
132 // Clone any argument attributes that are present in the VMap.
133 for (const Argument &OldArg : OldFunc->args()) {
134 if (Argument *NewArg = dyn_cast<Argument>(VMap[&OldArg])) {
135 NewArgAttrs[NewArg->getArgNo()] =
136 OldAttrs.getParamAttrs(OldArg.getArgNo());
137 }
138 }
139
140 NewFunc->setAttributes(
141 AttributeList::get(NewFunc->getContext(), OldAttrs.getFnAttrs(),
142 OldAttrs.getRetAttrs(), NewArgAttrs));
143
144 // Everything else beyond this point deals with function instructions,
145 // so if we are dealing with a function declaration, we're done.
146 if (OldFunc->isDeclaration())
147 return;
148
149 // When we remap instructions within the same module, we want to avoid
150 // duplicating inlined DISubprograms, so record all subprograms we find as we
151 // duplicate instructions and then freeze them in the MD map. We also record
152 // information about dbg.value and dbg.declare to avoid duplicating the
153 // types.
154 std::optional<DebugInfoFinder> DIFinder;
155
156 // Track the subprogram attachment that needs to be cloned to fine-tune the
157 // mapping within the same module.
158 DISubprogram *SPClonedWithinModule = nullptr;
159 if (Changes < CloneFunctionChangeType::DifferentModule) {
160 assert((NewFunc->getParent() == nullptr ||
161 NewFunc->getParent() == OldFunc->getParent()) &&
162 "Expected NewFunc to have the same parent, or no parent");
163
164 // Need to find subprograms, types, and compile units.
165 DIFinder.emplace();
166
167 SPClonedWithinModule = OldFunc->getSubprogram();
168 if (SPClonedWithinModule)
169 DIFinder->processSubprogram(SPClonedWithinModule);
170 } else {
171 assert((NewFunc->getParent() == nullptr ||
172 NewFunc->getParent() != OldFunc->getParent()) &&
173 "Expected NewFunc to have different parents, or no parent");
174
175 if (Changes == CloneFunctionChangeType::DifferentModule) {
176 assert(NewFunc->getParent() &&
177 "Need parent of new function to maintain debug info invariants");
178
179 // Need to find all the compile units.
180 DIFinder.emplace();
181 }
182 }
183
184 // Loop over all of the basic blocks in the function, cloning them as
185 // appropriate. Note that we save BE this way in order to handle cloning of
186 // recursive functions into themselves.
187 for (const BasicBlock &BB : *OldFunc) {
188
189 // Create a new basic block and copy instructions into it!
190 BasicBlock *CBB = CloneBasicBlock(&BB, VMap, NameSuffix, NewFunc, CodeInfo,
191 DIFinder ? &*DIFinder : nullptr);
192
193 // Add basic block mapping.
194 VMap[&BB] = CBB;
195
196 // It is only legal to clone a function if a block address within that
197 // function is never referenced outside of the function. Given that, we
198 // want to map block addresses from the old function to block addresses in
199 // the clone. (This is different from the generic ValueMapper
200 // implementation, which generates an invalid blockaddress when
201 // cloning a function.)
202 if (BB.hasAddressTaken()) {
203 Constant *OldBBAddr = BlockAddress::get(const_cast<Function *>(OldFunc),
204 const_cast<BasicBlock *>(&BB));
205 VMap[OldBBAddr] = BlockAddress::get(NewFunc, CBB);
206 }
207
208 // Note return instructions for the caller.
209 if (ReturnInst *RI = dyn_cast<ReturnInst>(CBB->getTerminator()))
210 Returns.push_back(RI);
211 }
212
213 if (Changes < CloneFunctionChangeType::DifferentModule &&
214 DIFinder->subprogram_count() > 0) {
215 // Turn on module-level changes, since we need to clone (some of) the
216 // debug info metadata.
217 //
218 // FIXME: Metadata effectively owned by a function should be made
219 // local, and only that local metadata should be cloned.
220 ModuleLevelChanges = true;
221
222 auto mapToSelfIfNew = [&VMap](MDNode *N) {
223 // Avoid clobbering an existing mapping.
224 (void)VMap.MD().try_emplace(N, N);
225 };
226
227 // Avoid cloning types, compile units, and (other) subprograms.
228 SmallPtrSet<const DISubprogram *, 16> MappedToSelfSPs;
229 for (DISubprogram *ISP : DIFinder->subprograms()) {
230 if (ISP != SPClonedWithinModule) {
231 mapToSelfIfNew(ISP);
232 MappedToSelfSPs.insert(ISP);
233 }
234 }
235
236 // If a subprogram isn't going to be cloned skip its lexical blocks as well.
237 for (DIScope *S : DIFinder->scopes()) {
238 auto *LScope = dyn_cast<DILocalScope>(S);
239 if (LScope && MappedToSelfSPs.count(LScope->getSubprogram()))
240 mapToSelfIfNew(S);
241 }
242
243 for (DICompileUnit *CU : DIFinder->compile_units())
244 mapToSelfIfNew(CU);
245
246 for (DIType *Type : DIFinder->types())
247 mapToSelfIfNew(Type);
248 } else {
249 assert(!SPClonedWithinModule &&
250 "Subprogram should be in DIFinder->subprogram_count()...");
251 }
252
253 const auto RemapFlag = ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges;
254 // Duplicate the metadata that is attached to the cloned function.
255 // Subprograms/CUs/types that were already mapped to themselves won't be
256 // duplicated.
257 SmallVector<std::pair<unsigned, MDNode *>, 1> MDs;
258 OldFunc->getAllMetadata(MDs);
259 for (auto MD : MDs) {
260 NewFunc->addMetadata(MD.first, *MapMetadata(MD.second, VMap, RemapFlag,
261 TypeMapper, Materializer));
262 }
263
264 // Loop over all of the instructions in the new function, fixing up operand
265 // references as we go. This uses VMap to do all the hard work.
266 for (Function::iterator
267 BB = cast<BasicBlock>(VMap[&OldFunc->front()])->getIterator(),
268 BE = NewFunc->end();
269 BB != BE; ++BB)
270 // Loop over all instructions, fixing each one as we find it...
271 for (Instruction &II : *BB)
272 RemapInstruction(&II, VMap, RemapFlag, TypeMapper, Materializer);
273
274 // Only update !llvm.dbg.cu for DifferentModule (not CloneModule). In the
275 // same module, the compile unit will already be listed (or not). When
276 // cloning a module, CloneModule() will handle creating the named metadata.
277 if (Changes != CloneFunctionChangeType::DifferentModule)
278 return;
279
280 // Update !llvm.dbg.cu with compile units added to the new module if this
281 // function is being cloned in isolation.
282 //
283 // FIXME: This is making global / module-level changes, which doesn't seem
284 // like the right encapsulation Consider dropping the requirement to update
285 // !llvm.dbg.cu (either obsoleting the node, or restricting it to
286 // non-discardable compile units) instead of discovering compile units by
287 // visiting the metadata attached to global values, which would allow this
288 // code to be deleted. Alternatively, perhaps give responsibility for this
289 // update to CloneFunctionInto's callers.
290 auto *NewModule = NewFunc->getParent();
291 auto *NMD = NewModule->getOrInsertNamedMetadata("llvm.dbg.cu");
292 // Avoid multiple insertions of the same DICompileUnit to NMD.
293 SmallPtrSet<const void *, 8> Visited;
294 for (auto *Operand : NMD->operands())
295 Visited.insert(Operand);
296 for (auto *Unit : DIFinder->compile_units()) {
297 MDNode *MappedUnit =
298 MapMetadata(Unit, VMap, RF_None, TypeMapper, Materializer);
299 if (Visited.insert(MappedUnit).second)
300 NMD->addOperand(MappedUnit);
301 }
302 }
303
304 /// Return a copy of the specified function and add it to that function's
305 /// module. Also, any references specified in the VMap are changed to refer to
306 /// their mapped value instead of the original one. If any of the arguments to
307 /// the function are in the VMap, the arguments are deleted from the resultant
308 /// function. The VMap is updated to include mappings from all of the
309 /// instructions and basicblocks in the function from their old to new values.
310 ///
CloneFunction(Function * F,ValueToValueMapTy & VMap,ClonedCodeInfo * CodeInfo)311 Function *llvm::CloneFunction(Function *F, ValueToValueMapTy &VMap,
312 ClonedCodeInfo *CodeInfo) {
313 std::vector<Type *> ArgTypes;
314
315 // The user might be deleting arguments to the function by specifying them in
316 // the VMap. If so, we need to not add the arguments to the arg ty vector
317 //
318 for (const Argument &I : F->args())
319 if (VMap.count(&I) == 0) // Haven't mapped the argument to anything yet?
320 ArgTypes.push_back(I.getType());
321
322 // Create a new function type...
323 FunctionType *FTy =
324 FunctionType::get(F->getFunctionType()->getReturnType(), ArgTypes,
325 F->getFunctionType()->isVarArg());
326
327 // Create the new function...
328 Function *NewF = Function::Create(FTy, F->getLinkage(), F->getAddressSpace(),
329 F->getName(), F->getParent());
330
331 // Loop over the arguments, copying the names of the mapped arguments over...
332 Function::arg_iterator DestI = NewF->arg_begin();
333 for (const Argument &I : F->args())
334 if (VMap.count(&I) == 0) { // Is this argument preserved?
335 DestI->setName(I.getName()); // Copy the name over...
336 VMap[&I] = &*DestI++; // Add mapping to VMap
337 }
338
339 SmallVector<ReturnInst *, 8> Returns; // Ignore returns cloned.
340 CloneFunctionInto(NewF, F, VMap, CloneFunctionChangeType::LocalChangesOnly,
341 Returns, "", CodeInfo);
342
343 return NewF;
344 }
345
346 namespace {
347 /// This is a private class used to implement CloneAndPruneFunctionInto.
348 struct PruningFunctionCloner {
349 Function *NewFunc;
350 const Function *OldFunc;
351 ValueToValueMapTy &VMap;
352 bool ModuleLevelChanges;
353 const char *NameSuffix;
354 ClonedCodeInfo *CodeInfo;
355 bool HostFuncIsStrictFP;
356
357 Instruction *cloneInstruction(BasicBlock::const_iterator II);
358
359 public:
PruningFunctionCloner__anon926c1d320211::PruningFunctionCloner360 PruningFunctionCloner(Function *newFunc, const Function *oldFunc,
361 ValueToValueMapTy &valueMap, bool moduleLevelChanges,
362 const char *nameSuffix, ClonedCodeInfo *codeInfo)
363 : NewFunc(newFunc), OldFunc(oldFunc), VMap(valueMap),
364 ModuleLevelChanges(moduleLevelChanges), NameSuffix(nameSuffix),
365 CodeInfo(codeInfo) {
366 HostFuncIsStrictFP =
367 newFunc->getAttributes().hasFnAttr(Attribute::StrictFP);
368 }
369
370 /// The specified block is found to be reachable, clone it and
371 /// anything that it can reach.
372 void CloneBlock(const BasicBlock *BB, BasicBlock::const_iterator StartingInst,
373 std::vector<const BasicBlock *> &ToClone);
374 };
375 } // namespace
376
hasRoundingModeOperand(Intrinsic::ID CIID)377 static bool hasRoundingModeOperand(Intrinsic::ID CIID) {
378 switch (CIID) {
379 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
380 case Intrinsic::INTRINSIC: \
381 return ROUND_MODE == 1;
382 #define FUNCTION INSTRUCTION
383 #include "llvm/IR/ConstrainedOps.def"
384 default:
385 llvm_unreachable("Unexpected constrained intrinsic id");
386 }
387 }
388
389 Instruction *
cloneInstruction(BasicBlock::const_iterator II)390 PruningFunctionCloner::cloneInstruction(BasicBlock::const_iterator II) {
391 const Instruction &OldInst = *II;
392 Instruction *NewInst = nullptr;
393 if (HostFuncIsStrictFP) {
394 Intrinsic::ID CIID = getConstrainedIntrinsicID(OldInst);
395 if (CIID != Intrinsic::not_intrinsic) {
396 // Instead of cloning the instruction, a call to constrained intrinsic
397 // should be created.
398 // Assume the first arguments of constrained intrinsics are the same as
399 // the operands of original instruction.
400
401 // Determine overloaded types of the intrinsic.
402 SmallVector<Type *, 2> TParams;
403 SmallVector<Intrinsic::IITDescriptor, 8> Descriptor;
404 getIntrinsicInfoTableEntries(CIID, Descriptor);
405 for (unsigned I = 0, E = Descriptor.size(); I != E; ++I) {
406 Intrinsic::IITDescriptor Operand = Descriptor[I];
407 switch (Operand.Kind) {
408 case Intrinsic::IITDescriptor::Argument:
409 if (Operand.getArgumentKind() !=
410 Intrinsic::IITDescriptor::AK_MatchType) {
411 if (I == 0)
412 TParams.push_back(OldInst.getType());
413 else
414 TParams.push_back(OldInst.getOperand(I - 1)->getType());
415 }
416 break;
417 case Intrinsic::IITDescriptor::SameVecWidthArgument:
418 ++I;
419 break;
420 default:
421 break;
422 }
423 }
424
425 // Create intrinsic call.
426 LLVMContext &Ctx = NewFunc->getContext();
427 Function *IFn =
428 Intrinsic::getDeclaration(NewFunc->getParent(), CIID, TParams);
429 SmallVector<Value *, 4> Args;
430 unsigned NumOperands = OldInst.getNumOperands();
431 if (isa<CallInst>(OldInst))
432 --NumOperands;
433 for (unsigned I = 0; I < NumOperands; ++I) {
434 Value *Op = OldInst.getOperand(I);
435 Args.push_back(Op);
436 }
437 if (const auto *CmpI = dyn_cast<FCmpInst>(&OldInst)) {
438 FCmpInst::Predicate Pred = CmpI->getPredicate();
439 StringRef PredName = FCmpInst::getPredicateName(Pred);
440 Args.push_back(MetadataAsValue::get(Ctx, MDString::get(Ctx, PredName)));
441 }
442
443 // The last arguments of a constrained intrinsic are metadata that
444 // represent rounding mode (absents in some intrinsics) and exception
445 // behavior. The inlined function uses default settings.
446 if (hasRoundingModeOperand(CIID))
447 Args.push_back(
448 MetadataAsValue::get(Ctx, MDString::get(Ctx, "round.tonearest")));
449 Args.push_back(
450 MetadataAsValue::get(Ctx, MDString::get(Ctx, "fpexcept.ignore")));
451
452 NewInst = CallInst::Create(IFn, Args, OldInst.getName() + ".strict");
453 }
454 }
455 if (!NewInst)
456 NewInst = II->clone();
457 return NewInst;
458 }
459
460 /// The specified block is found to be reachable, clone it and
461 /// anything that it can reach.
CloneBlock(const BasicBlock * BB,BasicBlock::const_iterator StartingInst,std::vector<const BasicBlock * > & ToClone)462 void PruningFunctionCloner::CloneBlock(
463 const BasicBlock *BB, BasicBlock::const_iterator StartingInst,
464 std::vector<const BasicBlock *> &ToClone) {
465 WeakTrackingVH &BBEntry = VMap[BB];
466
467 // Have we already cloned this block?
468 if (BBEntry)
469 return;
470
471 // Nope, clone it now.
472 BasicBlock *NewBB;
473 BBEntry = NewBB = BasicBlock::Create(BB->getContext());
474 if (BB->hasName())
475 NewBB->setName(BB->getName() + NameSuffix);
476
477 // It is only legal to clone a function if a block address within that
478 // function is never referenced outside of the function. Given that, we
479 // want to map block addresses from the old function to block addresses in
480 // the clone. (This is different from the generic ValueMapper
481 // implementation, which generates an invalid blockaddress when
482 // cloning a function.)
483 //
484 // Note that we don't need to fix the mapping for unreachable blocks;
485 // the default mapping there is safe.
486 if (BB->hasAddressTaken()) {
487 Constant *OldBBAddr = BlockAddress::get(const_cast<Function *>(OldFunc),
488 const_cast<BasicBlock *>(BB));
489 VMap[OldBBAddr] = BlockAddress::get(NewFunc, NewBB);
490 }
491
492 bool hasCalls = false, hasDynamicAllocas = false, hasStaticAllocas = false;
493 bool hasMemProfMetadata = false;
494
495 // Loop over all instructions, and copy them over, DCE'ing as we go. This
496 // loop doesn't include the terminator.
497 for (BasicBlock::const_iterator II = StartingInst, IE = --BB->end(); II != IE;
498 ++II) {
499
500 Instruction *NewInst = cloneInstruction(II);
501
502 if (HostFuncIsStrictFP) {
503 // All function calls in the inlined function must get 'strictfp'
504 // attribute to prevent undesirable optimizations.
505 if (auto *Call = dyn_cast<CallInst>(NewInst))
506 Call->addFnAttr(Attribute::StrictFP);
507 }
508
509 // Eagerly remap operands to the newly cloned instruction, except for PHI
510 // nodes for which we defer processing until we update the CFG. Also defer
511 // debug intrinsic processing because they may contain use-before-defs.
512 if (!isa<PHINode>(NewInst) && !isa<DbgVariableIntrinsic>(NewInst)) {
513 RemapInstruction(NewInst, VMap,
514 ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges);
515
516 // If we can simplify this instruction to some other value, simply add
517 // a mapping to that value rather than inserting a new instruction into
518 // the basic block.
519 if (Value *V =
520 simplifyInstruction(NewInst, BB->getModule()->getDataLayout())) {
521 // On the off-chance that this simplifies to an instruction in the old
522 // function, map it back into the new function.
523 if (NewFunc != OldFunc)
524 if (Value *MappedV = VMap.lookup(V))
525 V = MappedV;
526
527 if (!NewInst->mayHaveSideEffects()) {
528 VMap[&*II] = V;
529 NewInst->deleteValue();
530 continue;
531 }
532 }
533 }
534
535 if (II->hasName())
536 NewInst->setName(II->getName() + NameSuffix);
537 VMap[&*II] = NewInst; // Add instruction map to value.
538 NewInst->insertInto(NewBB, NewBB->end());
539 if (isa<CallInst>(II) && !II->isDebugOrPseudoInst()) {
540 hasCalls = true;
541 hasMemProfMetadata |= II->hasMetadata(LLVMContext::MD_memprof);
542 }
543
544 if (CodeInfo) {
545 CodeInfo->OrigVMap[&*II] = NewInst;
546 if (auto *CB = dyn_cast<CallBase>(&*II))
547 if (CB->hasOperandBundles())
548 CodeInfo->OperandBundleCallSites.push_back(NewInst);
549 }
550
551 if (const AllocaInst *AI = dyn_cast<AllocaInst>(II)) {
552 if (isa<ConstantInt>(AI->getArraySize()))
553 hasStaticAllocas = true;
554 else
555 hasDynamicAllocas = true;
556 }
557 }
558
559 // Finally, clone over the terminator.
560 const Instruction *OldTI = BB->getTerminator();
561 bool TerminatorDone = false;
562 if (const BranchInst *BI = dyn_cast<BranchInst>(OldTI)) {
563 if (BI->isConditional()) {
564 // If the condition was a known constant in the callee...
565 ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition());
566 // Or is a known constant in the caller...
567 if (!Cond) {
568 Value *V = VMap.lookup(BI->getCondition());
569 Cond = dyn_cast_or_null<ConstantInt>(V);
570 }
571
572 // Constant fold to uncond branch!
573 if (Cond) {
574 BasicBlock *Dest = BI->getSuccessor(!Cond->getZExtValue());
575 VMap[OldTI] = BranchInst::Create(Dest, NewBB);
576 ToClone.push_back(Dest);
577 TerminatorDone = true;
578 }
579 }
580 } else if (const SwitchInst *SI = dyn_cast<SwitchInst>(OldTI)) {
581 // If switching on a value known constant in the caller.
582 ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition());
583 if (!Cond) { // Or known constant after constant prop in the callee...
584 Value *V = VMap.lookup(SI->getCondition());
585 Cond = dyn_cast_or_null<ConstantInt>(V);
586 }
587 if (Cond) { // Constant fold to uncond branch!
588 SwitchInst::ConstCaseHandle Case = *SI->findCaseValue(Cond);
589 BasicBlock *Dest = const_cast<BasicBlock *>(Case.getCaseSuccessor());
590 VMap[OldTI] = BranchInst::Create(Dest, NewBB);
591 ToClone.push_back(Dest);
592 TerminatorDone = true;
593 }
594 }
595
596 if (!TerminatorDone) {
597 Instruction *NewInst = OldTI->clone();
598 if (OldTI->hasName())
599 NewInst->setName(OldTI->getName() + NameSuffix);
600 NewInst->insertInto(NewBB, NewBB->end());
601 VMap[OldTI] = NewInst; // Add instruction map to value.
602
603 if (CodeInfo) {
604 CodeInfo->OrigVMap[OldTI] = NewInst;
605 if (auto *CB = dyn_cast<CallBase>(OldTI))
606 if (CB->hasOperandBundles())
607 CodeInfo->OperandBundleCallSites.push_back(NewInst);
608 }
609
610 // Recursively clone any reachable successor blocks.
611 append_range(ToClone, successors(BB->getTerminator()));
612 }
613
614 if (CodeInfo) {
615 CodeInfo->ContainsCalls |= hasCalls;
616 CodeInfo->ContainsMemProfMetadata |= hasMemProfMetadata;
617 CodeInfo->ContainsDynamicAllocas |= hasDynamicAllocas;
618 CodeInfo->ContainsDynamicAllocas |=
619 hasStaticAllocas && BB != &BB->getParent()->front();
620 }
621 }
622
623 /// This works like CloneAndPruneFunctionInto, except that it does not clone the
624 /// entire function. Instead it starts at an instruction provided by the caller
625 /// and copies (and prunes) only the code reachable from that instruction.
CloneAndPruneIntoFromInst(Function * NewFunc,const Function * OldFunc,const Instruction * StartingInst,ValueToValueMapTy & VMap,bool ModuleLevelChanges,SmallVectorImpl<ReturnInst * > & Returns,const char * NameSuffix,ClonedCodeInfo * CodeInfo)626 void llvm::CloneAndPruneIntoFromInst(Function *NewFunc, const Function *OldFunc,
627 const Instruction *StartingInst,
628 ValueToValueMapTy &VMap,
629 bool ModuleLevelChanges,
630 SmallVectorImpl<ReturnInst *> &Returns,
631 const char *NameSuffix,
632 ClonedCodeInfo *CodeInfo) {
633 assert(NameSuffix && "NameSuffix cannot be null!");
634
635 ValueMapTypeRemapper *TypeMapper = nullptr;
636 ValueMaterializer *Materializer = nullptr;
637
638 #ifndef NDEBUG
639 // If the cloning starts at the beginning of the function, verify that
640 // the function arguments are mapped.
641 if (!StartingInst)
642 for (const Argument &II : OldFunc->args())
643 assert(VMap.count(&II) && "No mapping from source argument specified!");
644 #endif
645
646 PruningFunctionCloner PFC(NewFunc, OldFunc, VMap, ModuleLevelChanges,
647 NameSuffix, CodeInfo);
648 const BasicBlock *StartingBB;
649 if (StartingInst)
650 StartingBB = StartingInst->getParent();
651 else {
652 StartingBB = &OldFunc->getEntryBlock();
653 StartingInst = &StartingBB->front();
654 }
655
656 // Collect debug intrinsics for remapping later.
657 SmallVector<const DbgVariableIntrinsic *, 8> DbgIntrinsics;
658 for (const auto &BB : *OldFunc) {
659 for (const auto &I : BB) {
660 if (const auto *DVI = dyn_cast<DbgVariableIntrinsic>(&I))
661 DbgIntrinsics.push_back(DVI);
662 }
663 }
664
665 // Clone the entry block, and anything recursively reachable from it.
666 std::vector<const BasicBlock *> CloneWorklist;
667 PFC.CloneBlock(StartingBB, StartingInst->getIterator(), CloneWorklist);
668 while (!CloneWorklist.empty()) {
669 const BasicBlock *BB = CloneWorklist.back();
670 CloneWorklist.pop_back();
671 PFC.CloneBlock(BB, BB->begin(), CloneWorklist);
672 }
673
674 // Loop over all of the basic blocks in the old function. If the block was
675 // reachable, we have cloned it and the old block is now in the value map:
676 // insert it into the new function in the right order. If not, ignore it.
677 //
678 // Defer PHI resolution until rest of function is resolved.
679 SmallVector<const PHINode *, 16> PHIToResolve;
680 for (const BasicBlock &BI : *OldFunc) {
681 Value *V = VMap.lookup(&BI);
682 BasicBlock *NewBB = cast_or_null<BasicBlock>(V);
683 if (!NewBB)
684 continue; // Dead block.
685
686 // Add the new block to the new function.
687 NewFunc->insert(NewFunc->end(), NewBB);
688
689 // Handle PHI nodes specially, as we have to remove references to dead
690 // blocks.
691 for (const PHINode &PN : BI.phis()) {
692 // PHI nodes may have been remapped to non-PHI nodes by the caller or
693 // during the cloning process.
694 if (isa<PHINode>(VMap[&PN]))
695 PHIToResolve.push_back(&PN);
696 else
697 break;
698 }
699
700 // Finally, remap the terminator instructions, as those can't be remapped
701 // until all BBs are mapped.
702 RemapInstruction(NewBB->getTerminator(), VMap,
703 ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges,
704 TypeMapper, Materializer);
705 }
706
707 // Defer PHI resolution until rest of function is resolved, PHI resolution
708 // requires the CFG to be up-to-date.
709 for (unsigned phino = 0, e = PHIToResolve.size(); phino != e;) {
710 const PHINode *OPN = PHIToResolve[phino];
711 unsigned NumPreds = OPN->getNumIncomingValues();
712 const BasicBlock *OldBB = OPN->getParent();
713 BasicBlock *NewBB = cast<BasicBlock>(VMap[OldBB]);
714
715 // Map operands for blocks that are live and remove operands for blocks
716 // that are dead.
717 for (; phino != PHIToResolve.size() &&
718 PHIToResolve[phino]->getParent() == OldBB;
719 ++phino) {
720 OPN = PHIToResolve[phino];
721 PHINode *PN = cast<PHINode>(VMap[OPN]);
722 for (unsigned pred = 0, e = NumPreds; pred != e; ++pred) {
723 Value *V = VMap.lookup(PN->getIncomingBlock(pred));
724 if (BasicBlock *MappedBlock = cast_or_null<BasicBlock>(V)) {
725 Value *InVal =
726 MapValue(PN->getIncomingValue(pred), VMap,
727 ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges);
728 assert(InVal && "Unknown input value?");
729 PN->setIncomingValue(pred, InVal);
730 PN->setIncomingBlock(pred, MappedBlock);
731 } else {
732 PN->removeIncomingValue(pred, false);
733 --pred; // Revisit the next entry.
734 --e;
735 }
736 }
737 }
738
739 // The loop above has removed PHI entries for those blocks that are dead
740 // and has updated others. However, if a block is live (i.e. copied over)
741 // but its terminator has been changed to not go to this block, then our
742 // phi nodes will have invalid entries. Update the PHI nodes in this
743 // case.
744 PHINode *PN = cast<PHINode>(NewBB->begin());
745 NumPreds = pred_size(NewBB);
746 if (NumPreds != PN->getNumIncomingValues()) {
747 assert(NumPreds < PN->getNumIncomingValues());
748 // Count how many times each predecessor comes to this block.
749 std::map<BasicBlock *, unsigned> PredCount;
750 for (BasicBlock *Pred : predecessors(NewBB))
751 --PredCount[Pred];
752
753 // Figure out how many entries to remove from each PHI.
754 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
755 ++PredCount[PN->getIncomingBlock(i)];
756
757 // At this point, the excess predecessor entries are positive in the
758 // map. Loop over all of the PHIs and remove excess predecessor
759 // entries.
760 BasicBlock::iterator I = NewBB->begin();
761 for (; (PN = dyn_cast<PHINode>(I)); ++I) {
762 for (const auto &PCI : PredCount) {
763 BasicBlock *Pred = PCI.first;
764 for (unsigned NumToRemove = PCI.second; NumToRemove; --NumToRemove)
765 PN->removeIncomingValue(Pred, false);
766 }
767 }
768 }
769
770 // If the loops above have made these phi nodes have 0 or 1 operand,
771 // replace them with poison or the input value. We must do this for
772 // correctness, because 0-operand phis are not valid.
773 PN = cast<PHINode>(NewBB->begin());
774 if (PN->getNumIncomingValues() == 0) {
775 BasicBlock::iterator I = NewBB->begin();
776 BasicBlock::const_iterator OldI = OldBB->begin();
777 while ((PN = dyn_cast<PHINode>(I++))) {
778 Value *NV = PoisonValue::get(PN->getType());
779 PN->replaceAllUsesWith(NV);
780 assert(VMap[&*OldI] == PN && "VMap mismatch");
781 VMap[&*OldI] = NV;
782 PN->eraseFromParent();
783 ++OldI;
784 }
785 }
786 }
787
788 // Make a second pass over the PHINodes now that all of them have been
789 // remapped into the new function, simplifying the PHINode and performing any
790 // recursive simplifications exposed. This will transparently update the
791 // WeakTrackingVH in the VMap. Notably, we rely on that so that if we coalesce
792 // two PHINodes, the iteration over the old PHIs remains valid, and the
793 // mapping will just map us to the new node (which may not even be a PHI
794 // node).
795 const DataLayout &DL = NewFunc->getParent()->getDataLayout();
796 SmallSetVector<const Value *, 8> Worklist;
797 for (unsigned Idx = 0, Size = PHIToResolve.size(); Idx != Size; ++Idx)
798 if (isa<PHINode>(VMap[PHIToResolve[Idx]]))
799 Worklist.insert(PHIToResolve[Idx]);
800
801 // Note that we must test the size on each iteration, the worklist can grow.
802 for (unsigned Idx = 0; Idx != Worklist.size(); ++Idx) {
803 const Value *OrigV = Worklist[Idx];
804 auto *I = dyn_cast_or_null<Instruction>(VMap.lookup(OrigV));
805 if (!I)
806 continue;
807
808 // Skip over non-intrinsic callsites, we don't want to remove any nodes from
809 // the CGSCC.
810 CallBase *CB = dyn_cast<CallBase>(I);
811 if (CB && CB->getCalledFunction() &&
812 !CB->getCalledFunction()->isIntrinsic())
813 continue;
814
815 // See if this instruction simplifies.
816 Value *SimpleV = simplifyInstruction(I, DL);
817 if (!SimpleV)
818 continue;
819
820 // Stash away all the uses of the old instruction so we can check them for
821 // recursive simplifications after a RAUW. This is cheaper than checking all
822 // uses of To on the recursive step in most cases.
823 for (const User *U : OrigV->users())
824 Worklist.insert(cast<Instruction>(U));
825
826 // Replace the instruction with its simplified value.
827 I->replaceAllUsesWith(SimpleV);
828
829 // If the original instruction had no side effects, remove it.
830 if (isInstructionTriviallyDead(I))
831 I->eraseFromParent();
832 else
833 VMap[OrigV] = I;
834 }
835
836 // Remap debug intrinsic operands now that all values have been mapped.
837 // Doing this now (late) preserves use-before-defs in debug intrinsics. If
838 // we didn't do this, ValueAsMetadata(use-before-def) operands would be
839 // replaced by empty metadata. This would signal later cleanup passes to
840 // remove the debug intrinsics, potentially causing incorrect locations.
841 for (const auto *DVI : DbgIntrinsics) {
842 if (DbgVariableIntrinsic *NewDVI =
843 cast_or_null<DbgVariableIntrinsic>(VMap.lookup(DVI)))
844 RemapInstruction(NewDVI, VMap,
845 ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges,
846 TypeMapper, Materializer);
847 }
848
849 // Simplify conditional branches and switches with a constant operand. We try
850 // to prune these out when cloning, but if the simplification required
851 // looking through PHI nodes, those are only available after forming the full
852 // basic block. That may leave some here, and we still want to prune the dead
853 // code as early as possible.
854 Function::iterator Begin = cast<BasicBlock>(VMap[StartingBB])->getIterator();
855 for (BasicBlock &BB : make_range(Begin, NewFunc->end()))
856 ConstantFoldTerminator(&BB);
857
858 // Some blocks may have become unreachable as a result. Find and delete them.
859 {
860 SmallPtrSet<BasicBlock *, 16> ReachableBlocks;
861 SmallVector<BasicBlock *, 16> Worklist;
862 Worklist.push_back(&*Begin);
863 while (!Worklist.empty()) {
864 BasicBlock *BB = Worklist.pop_back_val();
865 if (ReachableBlocks.insert(BB).second)
866 append_range(Worklist, successors(BB));
867 }
868
869 SmallVector<BasicBlock *, 16> UnreachableBlocks;
870 for (BasicBlock &BB : make_range(Begin, NewFunc->end()))
871 if (!ReachableBlocks.contains(&BB))
872 UnreachableBlocks.push_back(&BB);
873 DeleteDeadBlocks(UnreachableBlocks);
874 }
875
876 // Now that the inlined function body has been fully constructed, go through
877 // and zap unconditional fall-through branches. This happens all the time when
878 // specializing code: code specialization turns conditional branches into
879 // uncond branches, and this code folds them.
880 Function::iterator I = Begin;
881 while (I != NewFunc->end()) {
882 BranchInst *BI = dyn_cast<BranchInst>(I->getTerminator());
883 if (!BI || BI->isConditional()) {
884 ++I;
885 continue;
886 }
887
888 BasicBlock *Dest = BI->getSuccessor(0);
889 if (!Dest->getSinglePredecessor()) {
890 ++I;
891 continue;
892 }
893
894 // We shouldn't be able to get single-entry PHI nodes here, as instsimplify
895 // above should have zapped all of them..
896 assert(!isa<PHINode>(Dest->begin()));
897
898 // We know all single-entry PHI nodes in the inlined function have been
899 // removed, so we just need to splice the blocks.
900 BI->eraseFromParent();
901
902 // Make all PHI nodes that referred to Dest now refer to I as their source.
903 Dest->replaceAllUsesWith(&*I);
904
905 // Move all the instructions in the succ to the pred.
906 I->splice(I->end(), Dest);
907
908 // Remove the dest block.
909 Dest->eraseFromParent();
910
911 // Do not increment I, iteratively merge all things this block branches to.
912 }
913
914 // Make a final pass over the basic blocks from the old function to gather
915 // any return instructions which survived folding. We have to do this here
916 // because we can iteratively remove and merge returns above.
917 for (Function::iterator I = cast<BasicBlock>(VMap[StartingBB])->getIterator(),
918 E = NewFunc->end();
919 I != E; ++I)
920 if (ReturnInst *RI = dyn_cast<ReturnInst>(I->getTerminator()))
921 Returns.push_back(RI);
922 }
923
924 /// This works exactly like CloneFunctionInto,
925 /// except that it does some simple constant prop and DCE on the fly. The
926 /// effect of this is to copy significantly less code in cases where (for
927 /// example) a function call with constant arguments is inlined, and those
928 /// constant arguments cause a significant amount of code in the callee to be
929 /// dead. Since this doesn't produce an exact copy of the input, it can't be
930 /// used for things like CloneFunction or CloneModule.
CloneAndPruneFunctionInto(Function * NewFunc,const Function * OldFunc,ValueToValueMapTy & VMap,bool ModuleLevelChanges,SmallVectorImpl<ReturnInst * > & Returns,const char * NameSuffix,ClonedCodeInfo * CodeInfo)931 void llvm::CloneAndPruneFunctionInto(
932 Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap,
933 bool ModuleLevelChanges, SmallVectorImpl<ReturnInst *> &Returns,
934 const char *NameSuffix, ClonedCodeInfo *CodeInfo) {
935 CloneAndPruneIntoFromInst(NewFunc, OldFunc, &OldFunc->front().front(), VMap,
936 ModuleLevelChanges, Returns, NameSuffix, CodeInfo);
937 }
938
939 /// Remaps instructions in \p Blocks using the mapping in \p VMap.
remapInstructionsInBlocks(const SmallVectorImpl<BasicBlock * > & Blocks,ValueToValueMapTy & VMap)940 void llvm::remapInstructionsInBlocks(
941 const SmallVectorImpl<BasicBlock *> &Blocks, ValueToValueMapTy &VMap) {
942 // Rewrite the code to refer to itself.
943 for (auto *BB : Blocks)
944 for (auto &Inst : *BB)
945 RemapInstruction(&Inst, VMap,
946 RF_NoModuleLevelChanges | RF_IgnoreMissingLocals);
947 }
948
949 /// Clones a loop \p OrigLoop. Returns the loop and the blocks in \p
950 /// Blocks.
951 ///
952 /// Updates LoopInfo and DominatorTree assuming the loop is dominated by block
953 /// \p LoopDomBB. Insert the new blocks before block specified in \p Before.
cloneLoopWithPreheader(BasicBlock * Before,BasicBlock * LoopDomBB,Loop * OrigLoop,ValueToValueMapTy & VMap,const Twine & NameSuffix,LoopInfo * LI,DominatorTree * DT,SmallVectorImpl<BasicBlock * > & Blocks)954 Loop *llvm::cloneLoopWithPreheader(BasicBlock *Before, BasicBlock *LoopDomBB,
955 Loop *OrigLoop, ValueToValueMapTy &VMap,
956 const Twine &NameSuffix, LoopInfo *LI,
957 DominatorTree *DT,
958 SmallVectorImpl<BasicBlock *> &Blocks) {
959 Function *F = OrigLoop->getHeader()->getParent();
960 Loop *ParentLoop = OrigLoop->getParentLoop();
961 DenseMap<Loop *, Loop *> LMap;
962
963 Loop *NewLoop = LI->AllocateLoop();
964 LMap[OrigLoop] = NewLoop;
965 if (ParentLoop)
966 ParentLoop->addChildLoop(NewLoop);
967 else
968 LI->addTopLevelLoop(NewLoop);
969
970 BasicBlock *OrigPH = OrigLoop->getLoopPreheader();
971 assert(OrigPH && "No preheader");
972 BasicBlock *NewPH = CloneBasicBlock(OrigPH, VMap, NameSuffix, F);
973 // To rename the loop PHIs.
974 VMap[OrigPH] = NewPH;
975 Blocks.push_back(NewPH);
976
977 // Update LoopInfo.
978 if (ParentLoop)
979 ParentLoop->addBasicBlockToLoop(NewPH, *LI);
980
981 // Update DominatorTree.
982 DT->addNewBlock(NewPH, LoopDomBB);
983
984 for (Loop *CurLoop : OrigLoop->getLoopsInPreorder()) {
985 Loop *&NewLoop = LMap[CurLoop];
986 if (!NewLoop) {
987 NewLoop = LI->AllocateLoop();
988
989 // Establish the parent/child relationship.
990 Loop *OrigParent = CurLoop->getParentLoop();
991 assert(OrigParent && "Could not find the original parent loop");
992 Loop *NewParentLoop = LMap[OrigParent];
993 assert(NewParentLoop && "Could not find the new parent loop");
994
995 NewParentLoop->addChildLoop(NewLoop);
996 }
997 }
998
999 for (BasicBlock *BB : OrigLoop->getBlocks()) {
1000 Loop *CurLoop = LI->getLoopFor(BB);
1001 Loop *&NewLoop = LMap[CurLoop];
1002 assert(NewLoop && "Expecting new loop to be allocated");
1003
1004 BasicBlock *NewBB = CloneBasicBlock(BB, VMap, NameSuffix, F);
1005 VMap[BB] = NewBB;
1006
1007 // Update LoopInfo.
1008 NewLoop->addBasicBlockToLoop(NewBB, *LI);
1009
1010 // Add DominatorTree node. After seeing all blocks, update to correct
1011 // IDom.
1012 DT->addNewBlock(NewBB, NewPH);
1013
1014 Blocks.push_back(NewBB);
1015 }
1016
1017 for (BasicBlock *BB : OrigLoop->getBlocks()) {
1018 // Update loop headers.
1019 Loop *CurLoop = LI->getLoopFor(BB);
1020 if (BB == CurLoop->getHeader())
1021 LMap[CurLoop]->moveToHeader(cast<BasicBlock>(VMap[BB]));
1022
1023 // Update DominatorTree.
1024 BasicBlock *IDomBB = DT->getNode(BB)->getIDom()->getBlock();
1025 DT->changeImmediateDominator(cast<BasicBlock>(VMap[BB]),
1026 cast<BasicBlock>(VMap[IDomBB]));
1027 }
1028
1029 // Move them physically from the end of the block list.
1030 F->splice(Before->getIterator(), F, NewPH->getIterator());
1031 F->splice(Before->getIterator(), F, NewLoop->getHeader()->getIterator(),
1032 F->end());
1033
1034 return NewLoop;
1035 }
1036
1037 /// Duplicate non-Phi instructions from the beginning of block up to
1038 /// StopAt instruction into a split block between BB and its predecessor.
DuplicateInstructionsInSplitBetween(BasicBlock * BB,BasicBlock * PredBB,Instruction * StopAt,ValueToValueMapTy & ValueMapping,DomTreeUpdater & DTU)1039 BasicBlock *llvm::DuplicateInstructionsInSplitBetween(
1040 BasicBlock *BB, BasicBlock *PredBB, Instruction *StopAt,
1041 ValueToValueMapTy &ValueMapping, DomTreeUpdater &DTU) {
1042
1043 assert(count(successors(PredBB), BB) == 1 &&
1044 "There must be a single edge between PredBB and BB!");
1045 // We are going to have to map operands from the original BB block to the new
1046 // copy of the block 'NewBB'. If there are PHI nodes in BB, evaluate them to
1047 // account for entry from PredBB.
1048 BasicBlock::iterator BI = BB->begin();
1049 for (; PHINode *PN = dyn_cast<PHINode>(BI); ++BI)
1050 ValueMapping[PN] = PN->getIncomingValueForBlock(PredBB);
1051
1052 BasicBlock *NewBB = SplitEdge(PredBB, BB);
1053 NewBB->setName(PredBB->getName() + ".split");
1054 Instruction *NewTerm = NewBB->getTerminator();
1055
1056 // FIXME: SplitEdge does not yet take a DTU, so we include the split edge
1057 // in the update set here.
1058 DTU.applyUpdates({{DominatorTree::Delete, PredBB, BB},
1059 {DominatorTree::Insert, PredBB, NewBB},
1060 {DominatorTree::Insert, NewBB, BB}});
1061
1062 // Clone the non-phi instructions of BB into NewBB, keeping track of the
1063 // mapping and using it to remap operands in the cloned instructions.
1064 // Stop once we see the terminator too. This covers the case where BB's
1065 // terminator gets replaced and StopAt == BB's terminator.
1066 for (; StopAt != &*BI && BB->getTerminator() != &*BI; ++BI) {
1067 Instruction *New = BI->clone();
1068 New->setName(BI->getName());
1069 New->insertBefore(NewTerm);
1070 ValueMapping[&*BI] = New;
1071
1072 // Remap operands to patch up intra-block references.
1073 for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i)
1074 if (Instruction *Inst = dyn_cast<Instruction>(New->getOperand(i))) {
1075 auto I = ValueMapping.find(Inst);
1076 if (I != ValueMapping.end())
1077 New->setOperand(i, I->second);
1078 }
1079 }
1080
1081 return NewBB;
1082 }
1083
cloneNoAliasScopes(ArrayRef<MDNode * > NoAliasDeclScopes,DenseMap<MDNode *,MDNode * > & ClonedScopes,StringRef Ext,LLVMContext & Context)1084 void llvm::cloneNoAliasScopes(ArrayRef<MDNode *> NoAliasDeclScopes,
1085 DenseMap<MDNode *, MDNode *> &ClonedScopes,
1086 StringRef Ext, LLVMContext &Context) {
1087 MDBuilder MDB(Context);
1088
1089 for (auto *ScopeList : NoAliasDeclScopes) {
1090 for (const auto &MDOperand : ScopeList->operands()) {
1091 if (MDNode *MD = dyn_cast<MDNode>(MDOperand)) {
1092 AliasScopeNode SNANode(MD);
1093
1094 std::string Name;
1095 auto ScopeName = SNANode.getName();
1096 if (!ScopeName.empty())
1097 Name = (Twine(ScopeName) + ":" + Ext).str();
1098 else
1099 Name = std::string(Ext);
1100
1101 MDNode *NewScope = MDB.createAnonymousAliasScope(
1102 const_cast<MDNode *>(SNANode.getDomain()), Name);
1103 ClonedScopes.insert(std::make_pair(MD, NewScope));
1104 }
1105 }
1106 }
1107 }
1108
adaptNoAliasScopes(Instruction * I,const DenseMap<MDNode *,MDNode * > & ClonedScopes,LLVMContext & Context)1109 void llvm::adaptNoAliasScopes(Instruction *I,
1110 const DenseMap<MDNode *, MDNode *> &ClonedScopes,
1111 LLVMContext &Context) {
1112 auto CloneScopeList = [&](const MDNode *ScopeList) -> MDNode * {
1113 bool NeedsReplacement = false;
1114 SmallVector<Metadata *, 8> NewScopeList;
1115 for (const auto &MDOp : ScopeList->operands()) {
1116 if (MDNode *MD = dyn_cast<MDNode>(MDOp)) {
1117 if (auto *NewMD = ClonedScopes.lookup(MD)) {
1118 NewScopeList.push_back(NewMD);
1119 NeedsReplacement = true;
1120 continue;
1121 }
1122 NewScopeList.push_back(MD);
1123 }
1124 }
1125 if (NeedsReplacement)
1126 return MDNode::get(Context, NewScopeList);
1127 return nullptr;
1128 };
1129
1130 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(I))
1131 if (auto *NewScopeList = CloneScopeList(Decl->getScopeList()))
1132 Decl->setScopeList(NewScopeList);
1133
1134 auto replaceWhenNeeded = [&](unsigned MD_ID) {
1135 if (const MDNode *CSNoAlias = I->getMetadata(MD_ID))
1136 if (auto *NewScopeList = CloneScopeList(CSNoAlias))
1137 I->setMetadata(MD_ID, NewScopeList);
1138 };
1139 replaceWhenNeeded(LLVMContext::MD_noalias);
1140 replaceWhenNeeded(LLVMContext::MD_alias_scope);
1141 }
1142
cloneAndAdaptNoAliasScopes(ArrayRef<MDNode * > NoAliasDeclScopes,ArrayRef<BasicBlock * > NewBlocks,LLVMContext & Context,StringRef Ext)1143 void llvm::cloneAndAdaptNoAliasScopes(ArrayRef<MDNode *> NoAliasDeclScopes,
1144 ArrayRef<BasicBlock *> NewBlocks,
1145 LLVMContext &Context, StringRef Ext) {
1146 if (NoAliasDeclScopes.empty())
1147 return;
1148
1149 DenseMap<MDNode *, MDNode *> ClonedScopes;
1150 LLVM_DEBUG(dbgs() << "cloneAndAdaptNoAliasScopes: cloning "
1151 << NoAliasDeclScopes.size() << " node(s)\n");
1152
1153 cloneNoAliasScopes(NoAliasDeclScopes, ClonedScopes, Ext, Context);
1154 // Identify instructions using metadata that needs adaptation
1155 for (BasicBlock *NewBlock : NewBlocks)
1156 for (Instruction &I : *NewBlock)
1157 adaptNoAliasScopes(&I, ClonedScopes, Context);
1158 }
1159
cloneAndAdaptNoAliasScopes(ArrayRef<MDNode * > NoAliasDeclScopes,Instruction * IStart,Instruction * IEnd,LLVMContext & Context,StringRef Ext)1160 void llvm::cloneAndAdaptNoAliasScopes(ArrayRef<MDNode *> NoAliasDeclScopes,
1161 Instruction *IStart, Instruction *IEnd,
1162 LLVMContext &Context, StringRef Ext) {
1163 if (NoAliasDeclScopes.empty())
1164 return;
1165
1166 DenseMap<MDNode *, MDNode *> ClonedScopes;
1167 LLVM_DEBUG(dbgs() << "cloneAndAdaptNoAliasScopes: cloning "
1168 << NoAliasDeclScopes.size() << " node(s)\n");
1169
1170 cloneNoAliasScopes(NoAliasDeclScopes, ClonedScopes, Ext, Context);
1171 // Identify instructions using metadata that needs adaptation
1172 assert(IStart->getParent() == IEnd->getParent() && "different basic block ?");
1173 auto ItStart = IStart->getIterator();
1174 auto ItEnd = IEnd->getIterator();
1175 ++ItEnd; // IEnd is included, increment ItEnd to get the end of the range
1176 for (auto &I : llvm::make_range(ItStart, ItEnd))
1177 adaptNoAliasScopes(&I, ClonedScopes, Context);
1178 }
1179
identifyNoAliasScopesToClone(ArrayRef<BasicBlock * > BBs,SmallVectorImpl<MDNode * > & NoAliasDeclScopes)1180 void llvm::identifyNoAliasScopesToClone(
1181 ArrayRef<BasicBlock *> BBs, SmallVectorImpl<MDNode *> &NoAliasDeclScopes) {
1182 for (BasicBlock *BB : BBs)
1183 for (Instruction &I : *BB)
1184 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I))
1185 NoAliasDeclScopes.push_back(Decl->getScopeList());
1186 }
1187
identifyNoAliasScopesToClone(BasicBlock::iterator Start,BasicBlock::iterator End,SmallVectorImpl<MDNode * > & NoAliasDeclScopes)1188 void llvm::identifyNoAliasScopesToClone(
1189 BasicBlock::iterator Start, BasicBlock::iterator End,
1190 SmallVectorImpl<MDNode *> &NoAliasDeclScopes) {
1191 for (Instruction &I : make_range(Start, End))
1192 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I))
1193 NoAliasDeclScopes.push_back(Decl->getScopeList());
1194 }
1195