1 //===- StackProtector.cpp - Stack Protector Insertion ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass inserts stack protectors into functions which need them. A variable
10 // with a random value in it is stored onto the stack before the local variables
11 // are allocated. Upon exiting the block, the stored value is checked. If it's
12 // changed, then there was some sort of violation and the program aborts.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "llvm/CodeGen/StackProtector.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/Analysis/BranchProbabilityInfo.h"
21 #include "llvm/Analysis/MemoryLocation.h"
22 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
23 #include "llvm/CodeGen/Passes.h"
24 #include "llvm/CodeGen/TargetLowering.h"
25 #include "llvm/CodeGen/TargetPassConfig.h"
26 #include "llvm/CodeGen/TargetSubtargetInfo.h"
27 #include "llvm/IR/Attributes.h"
28 #include "llvm/IR/BasicBlock.h"
29 #include "llvm/IR/Constants.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/DerivedTypes.h"
32 #include "llvm/IR/Dominators.h"
33 #include "llvm/IR/EHPersonalities.h"
34 #include "llvm/IR/Function.h"
35 #include "llvm/IR/IRBuilder.h"
36 #include "llvm/IR/Instruction.h"
37 #include "llvm/IR/Instructions.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/Intrinsics.h"
40 #include "llvm/IR/MDBuilder.h"
41 #include "llvm/IR/Module.h"
42 #include "llvm/IR/Type.h"
43 #include "llvm/IR/User.h"
44 #include "llvm/InitializePasses.h"
45 #include "llvm/Pass.h"
46 #include "llvm/Support/Casting.h"
47 #include "llvm/Support/CommandLine.h"
48 #include "llvm/Target/TargetMachine.h"
49 #include "llvm/Target/TargetOptions.h"
50 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
51 #include <optional>
52 #include <utility>
53 
54 using namespace llvm;
55 
56 #define DEBUG_TYPE "stack-protector"
57 
58 STATISTIC(NumFunProtected, "Number of functions protected");
59 STATISTIC(NumAddrTaken, "Number of local variables that have their address"
60                         " taken.");
61 
62 static cl::opt<bool> EnableSelectionDAGSP("enable-selectiondag-sp",
63                                           cl::init(true), cl::Hidden);
64 static cl::opt<bool> DisableCheckNoReturn("disable-check-noreturn-call",
65                                           cl::init(false), cl::Hidden);
66 
67 /// InsertStackProtectors - Insert code into the prologue and epilogue of the
68 /// function.
69 ///
70 ///  - The prologue code loads and stores the stack guard onto the stack.
71 ///  - The epilogue checks the value stored in the prologue against the original
72 ///    value. It calls __stack_chk_fail if they differ.
73 static bool InsertStackProtectors(const TargetMachine *TM, Function *F,
74                                   DomTreeUpdater *DTU, bool &HasPrologue,
75                                   bool &HasIRCheck);
76 
77 /// CreateFailBB - Create a basic block to jump to when the stack protector
78 /// check fails.
79 static BasicBlock *CreateFailBB(Function *F, const Triple &Trip);
80 
shouldEmitSDCheck(const BasicBlock & BB) const81 bool SSPLayoutInfo::shouldEmitSDCheck(const BasicBlock &BB) const {
82   return HasPrologue && !HasIRCheck && isa<ReturnInst>(BB.getTerminator());
83 }
84 
copyToMachineFrameInfo(MachineFrameInfo & MFI) const85 void SSPLayoutInfo::copyToMachineFrameInfo(MachineFrameInfo &MFI) const {
86   if (Layout.empty())
87     return;
88 
89   for (int I = 0, E = MFI.getObjectIndexEnd(); I != E; ++I) {
90     if (MFI.isDeadObjectIndex(I))
91       continue;
92 
93     const AllocaInst *AI = MFI.getObjectAllocation(I);
94     if (!AI)
95       continue;
96 
97     SSPLayoutMap::const_iterator LI = Layout.find(AI);
98     if (LI == Layout.end())
99       continue;
100 
101     MFI.setObjectSSPLayout(I, LI->second);
102   }
103 }
104 
run(Function & F,FunctionAnalysisManager & FAM)105 SSPLayoutInfo SSPLayoutAnalysis::run(Function &F,
106                                      FunctionAnalysisManager &FAM) {
107 
108   SSPLayoutInfo Info;
109   Info.RequireStackProtector =
110       SSPLayoutAnalysis::requiresStackProtector(&F, &Info.Layout);
111   Info.SSPBufferSize = F.getFnAttributeAsParsedInteger(
112       "stack-protector-buffer-size", SSPLayoutInfo::DefaultSSPBufferSize);
113   return Info;
114 }
115 
116 AnalysisKey SSPLayoutAnalysis::Key;
117 
run(Function & F,FunctionAnalysisManager & FAM)118 PreservedAnalyses StackProtectorPass::run(Function &F,
119                                           FunctionAnalysisManager &FAM) {
120   auto &Info = FAM.getResult<SSPLayoutAnalysis>(F);
121   auto *DT = FAM.getCachedResult<DominatorTreeAnalysis>(F);
122   DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
123 
124   if (!Info.RequireStackProtector)
125     return PreservedAnalyses::all();
126 
127   // TODO(etienneb): Functions with funclets are not correctly supported now.
128   // Do nothing if this is funclet-based personality.
129   if (F.hasPersonalityFn()) {
130     EHPersonality Personality = classifyEHPersonality(F.getPersonalityFn());
131     if (isFuncletEHPersonality(Personality))
132       return PreservedAnalyses::all();
133   }
134 
135   ++NumFunProtected;
136   bool Changed = InsertStackProtectors(TM, &F, DT ? &DTU : nullptr,
137                                        Info.HasPrologue, Info.HasIRCheck);
138 #ifdef EXPENSIVE_CHECKS
139   assert((!DT || DT->verify(DominatorTree::VerificationLevel::Full)) &&
140          "Failed to maintain validity of domtree!");
141 #endif
142 
143   if (!Changed)
144     return PreservedAnalyses::all();
145   PreservedAnalyses PA;
146   PA.preserve<SSPLayoutAnalysis>();
147   PA.preserve<DominatorTreeAnalysis>();
148   return PA;
149 }
150 
151 char StackProtector::ID = 0;
152 
StackProtector()153 StackProtector::StackProtector() : FunctionPass(ID) {
154   initializeStackProtectorPass(*PassRegistry::getPassRegistry());
155 }
156 
157 INITIALIZE_PASS_BEGIN(StackProtector, DEBUG_TYPE,
158                       "Insert stack protectors", false, true)
INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)159 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
160 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
161 INITIALIZE_PASS_END(StackProtector, DEBUG_TYPE,
162                     "Insert stack protectors", false, true)
163 
164 FunctionPass *llvm::createStackProtectorPass() { return new StackProtector(); }
165 
getAnalysisUsage(AnalysisUsage & AU) const166 void StackProtector::getAnalysisUsage(AnalysisUsage &AU) const {
167   AU.addRequired<TargetPassConfig>();
168   AU.addPreserved<DominatorTreeWrapperPass>();
169 }
170 
runOnFunction(Function & Fn)171 bool StackProtector::runOnFunction(Function &Fn) {
172   F = &Fn;
173   M = F->getParent();
174   if (auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>())
175     DTU.emplace(DTWP->getDomTree(), DomTreeUpdater::UpdateStrategy::Lazy);
176   TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
177   LayoutInfo.HasPrologue = false;
178   LayoutInfo.HasIRCheck = false;
179 
180   LayoutInfo.SSPBufferSize = Fn.getFnAttributeAsParsedInteger(
181       "stack-protector-buffer-size", SSPLayoutInfo::DefaultSSPBufferSize);
182   if (!requiresStackProtector(F, &LayoutInfo.Layout))
183     return false;
184 
185   // TODO(etienneb): Functions with funclets are not correctly supported now.
186   // Do nothing if this is funclet-based personality.
187   if (Fn.hasPersonalityFn()) {
188     EHPersonality Personality = classifyEHPersonality(Fn.getPersonalityFn());
189     if (isFuncletEHPersonality(Personality))
190       return false;
191   }
192 
193   ++NumFunProtected;
194   bool Changed =
195       InsertStackProtectors(TM, F, DTU ? &*DTU : nullptr,
196                             LayoutInfo.HasPrologue, LayoutInfo.HasIRCheck);
197 #ifdef EXPENSIVE_CHECKS
198   assert((!DTU ||
199           DTU->getDomTree().verify(DominatorTree::VerificationLevel::Full)) &&
200          "Failed to maintain validity of domtree!");
201 #endif
202   DTU.reset();
203   return Changed;
204 }
205 
206 /// \param [out] IsLarge is set to true if a protectable array is found and
207 /// it is "large" ( >= ssp-buffer-size).  In the case of a structure with
208 /// multiple arrays, this gets set if any of them is large.
ContainsProtectableArray(Type * Ty,Module * M,unsigned SSPBufferSize,bool & IsLarge,bool Strong,bool InStruct)209 static bool ContainsProtectableArray(Type *Ty, Module *M, unsigned SSPBufferSize,
210                                      bool &IsLarge, bool Strong,
211                                      bool InStruct) {
212   if (!Ty)
213     return false;
214   if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
215     if (!AT->getElementType()->isIntegerTy(8)) {
216       // If we're on a non-Darwin platform or we're inside of a structure, don't
217       // add stack protectors unless the array is a character array.
218       // However, in strong mode any array, regardless of type and size,
219       // triggers a protector.
220       if (!Strong && (InStruct || !Triple(M->getTargetTriple()).isOSDarwin()))
221         return false;
222     }
223 
224     // If an array has more than SSPBufferSize bytes of allocated space, then we
225     // emit stack protectors.
226     if (SSPBufferSize <= M->getDataLayout().getTypeAllocSize(AT)) {
227       IsLarge = true;
228       return true;
229     }
230 
231     if (Strong)
232       // Require a protector for all arrays in strong mode
233       return true;
234   }
235 
236   const StructType *ST = dyn_cast<StructType>(Ty);
237   if (!ST)
238     return false;
239 
240   bool NeedsProtector = false;
241   for (Type *ET : ST->elements())
242     if (ContainsProtectableArray(ET, M, SSPBufferSize, IsLarge, Strong, true)) {
243       // If the element is a protectable array and is large (>= SSPBufferSize)
244       // then we are done.  If the protectable array is not large, then
245       // keep looking in case a subsequent element is a large array.
246       if (IsLarge)
247         return true;
248       NeedsProtector = true;
249     }
250 
251   return NeedsProtector;
252 }
253 
254 /// Check whether a stack allocation has its address taken.
HasAddressTaken(const Instruction * AI,TypeSize AllocSize,Module * M,SmallPtrSet<const PHINode *,16> & VisitedPHIs)255 static bool HasAddressTaken(const Instruction *AI, TypeSize AllocSize,
256                             Module *M,
257                             SmallPtrSet<const PHINode *, 16> &VisitedPHIs) {
258   const DataLayout &DL = M->getDataLayout();
259   for (const User *U : AI->users()) {
260     const auto *I = cast<Instruction>(U);
261     // If this instruction accesses memory make sure it doesn't access beyond
262     // the bounds of the allocated object.
263     std::optional<MemoryLocation> MemLoc = MemoryLocation::getOrNone(I);
264     if (MemLoc && MemLoc->Size.hasValue() &&
265         !TypeSize::isKnownGE(AllocSize, MemLoc->Size.getValue()))
266       return true;
267     switch (I->getOpcode()) {
268     case Instruction::Store:
269       if (AI == cast<StoreInst>(I)->getValueOperand())
270         return true;
271       break;
272     case Instruction::AtomicCmpXchg:
273       // cmpxchg conceptually includes both a load and store from the same
274       // location. So, like store, the value being stored is what matters.
275       if (AI == cast<AtomicCmpXchgInst>(I)->getNewValOperand())
276         return true;
277       break;
278     case Instruction::PtrToInt:
279       if (AI == cast<PtrToIntInst>(I)->getOperand(0))
280         return true;
281       break;
282     case Instruction::Call: {
283       // Ignore intrinsics that do not become real instructions.
284       // TODO: Narrow this to intrinsics that have store-like effects.
285       const auto *CI = cast<CallInst>(I);
286       if (!CI->isDebugOrPseudoInst() && !CI->isLifetimeStartOrEnd())
287         return true;
288       break;
289     }
290     case Instruction::Invoke:
291       return true;
292     case Instruction::GetElementPtr: {
293       // If the GEP offset is out-of-bounds, or is non-constant and so has to be
294       // assumed to be potentially out-of-bounds, then any memory access that
295       // would use it could also be out-of-bounds meaning stack protection is
296       // required.
297       const GetElementPtrInst *GEP = cast<GetElementPtrInst>(I);
298       unsigned IndexSize = DL.getIndexTypeSizeInBits(I->getType());
299       APInt Offset(IndexSize, 0);
300       if (!GEP->accumulateConstantOffset(DL, Offset))
301         return true;
302       TypeSize OffsetSize = TypeSize::getFixed(Offset.getLimitedValue());
303       if (!TypeSize::isKnownGT(AllocSize, OffsetSize))
304         return true;
305       // Adjust AllocSize to be the space remaining after this offset.
306       // We can't subtract a fixed size from a scalable one, so in that case
307       // assume the scalable value is of minimum size.
308       TypeSize NewAllocSize =
309           TypeSize::getFixed(AllocSize.getKnownMinValue()) - OffsetSize;
310       if (HasAddressTaken(I, NewAllocSize, M, VisitedPHIs))
311         return true;
312       break;
313     }
314     case Instruction::BitCast:
315     case Instruction::Select:
316     case Instruction::AddrSpaceCast:
317       if (HasAddressTaken(I, AllocSize, M, VisitedPHIs))
318         return true;
319       break;
320     case Instruction::PHI: {
321       // Keep track of what PHI nodes we have already visited to ensure
322       // they are only visited once.
323       const auto *PN = cast<PHINode>(I);
324       if (VisitedPHIs.insert(PN).second)
325         if (HasAddressTaken(PN, AllocSize, M, VisitedPHIs))
326           return true;
327       break;
328     }
329     case Instruction::Load:
330     case Instruction::AtomicRMW:
331     case Instruction::Ret:
332       // These instructions take an address operand, but have load-like or
333       // other innocuous behavior that should not trigger a stack protector.
334       // atomicrmw conceptually has both load and store semantics, but the
335       // value being stored must be integer; so if a pointer is being stored,
336       // we'll catch it in the PtrToInt case above.
337       break;
338     default:
339       // Conservatively return true for any instruction that takes an address
340       // operand, but is not handled above.
341       return true;
342     }
343   }
344   return false;
345 }
346 
347 /// Search for the first call to the llvm.stackprotector intrinsic and return it
348 /// if present.
findStackProtectorIntrinsic(Function & F)349 static const CallInst *findStackProtectorIntrinsic(Function &F) {
350   for (const BasicBlock &BB : F)
351     for (const Instruction &I : BB)
352       if (const auto *II = dyn_cast<IntrinsicInst>(&I))
353         if (II->getIntrinsicID() == Intrinsic::stackprotector)
354           return II;
355   return nullptr;
356 }
357 
358 /// Check whether or not this function needs a stack protector based
359 /// upon the stack protector level.
360 ///
361 /// We use two heuristics: a standard (ssp) and strong (sspstrong).
362 /// The standard heuristic which will add a guard variable to functions that
363 /// call alloca with a either a variable size or a size >= SSPBufferSize,
364 /// functions with character buffers larger than SSPBufferSize, and functions
365 /// with aggregates containing character buffers larger than SSPBufferSize. The
366 /// strong heuristic will add a guard variables to functions that call alloca
367 /// regardless of size, functions with any buffer regardless of type and size,
368 /// functions with aggregates that contain any buffer regardless of type and
369 /// size, and functions that contain stack-based variables that have had their
370 /// address taken.
requiresStackProtector(Function * F,SSPLayoutMap * Layout)371 bool SSPLayoutAnalysis::requiresStackProtector(Function *F,
372                                                SSPLayoutMap *Layout) {
373   Module *M = F->getParent();
374   bool Strong = false;
375   bool NeedsProtector = false;
376 
377   // The set of PHI nodes visited when determining if a variable's reference has
378   // been taken.  This set is maintained to ensure we don't visit the same PHI
379   // node multiple times.
380   SmallPtrSet<const PHINode *, 16> VisitedPHIs;
381 
382   unsigned SSPBufferSize = F->getFnAttributeAsParsedInteger(
383       "stack-protector-buffer-size", SSPLayoutInfo::DefaultSSPBufferSize);
384 
385   if (F->hasFnAttribute(Attribute::SafeStack))
386     return false;
387 
388   // We are constructing the OptimizationRemarkEmitter on the fly rather than
389   // using the analysis pass to avoid building DominatorTree and LoopInfo which
390   // are not available this late in the IR pipeline.
391   OptimizationRemarkEmitter ORE(F);
392 
393   if (F->hasFnAttribute(Attribute::StackProtectReq)) {
394     if (!Layout)
395       return true;
396     ORE.emit([&]() {
397       return OptimizationRemark(DEBUG_TYPE, "StackProtectorRequested", F)
398              << "Stack protection applied to function "
399              << ore::NV("Function", F)
400              << " due to a function attribute or command-line switch";
401     });
402     NeedsProtector = true;
403     Strong = true; // Use the same heuristic as strong to determine SSPLayout
404   } else if (F->hasFnAttribute(Attribute::StackProtectStrong))
405     Strong = true;
406   else if (!F->hasFnAttribute(Attribute::StackProtect))
407     return false;
408 
409   for (const BasicBlock &BB : *F) {
410     for (const Instruction &I : BB) {
411       if (const AllocaInst *AI = dyn_cast<AllocaInst>(&I)) {
412         if (AI->isArrayAllocation()) {
413           auto RemarkBuilder = [&]() {
414             return OptimizationRemark(DEBUG_TYPE, "StackProtectorAllocaOrArray",
415                                       &I)
416                    << "Stack protection applied to function "
417                    << ore::NV("Function", F)
418                    << " due to a call to alloca or use of a variable length "
419                       "array";
420           };
421           if (const auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) {
422             if (CI->getLimitedValue(SSPBufferSize) >= SSPBufferSize) {
423               // A call to alloca with size >= SSPBufferSize requires
424               // stack protectors.
425               if (!Layout)
426                 return true;
427               Layout->insert(
428                   std::make_pair(AI, MachineFrameInfo::SSPLK_LargeArray));
429               ORE.emit(RemarkBuilder);
430               NeedsProtector = true;
431             } else if (Strong) {
432               // Require protectors for all alloca calls in strong mode.
433               if (!Layout)
434                 return true;
435               Layout->insert(
436                   std::make_pair(AI, MachineFrameInfo::SSPLK_SmallArray));
437               ORE.emit(RemarkBuilder);
438               NeedsProtector = true;
439             }
440           } else {
441             // A call to alloca with a variable size requires protectors.
442             if (!Layout)
443               return true;
444             Layout->insert(
445                 std::make_pair(AI, MachineFrameInfo::SSPLK_LargeArray));
446             ORE.emit(RemarkBuilder);
447             NeedsProtector = true;
448           }
449           continue;
450         }
451 
452         bool IsLarge = false;
453         if (ContainsProtectableArray(AI->getAllocatedType(), M, SSPBufferSize,
454                                      IsLarge, Strong, false)) {
455           if (!Layout)
456             return true;
457           Layout->insert(std::make_pair(
458               AI, IsLarge ? MachineFrameInfo::SSPLK_LargeArray
459                           : MachineFrameInfo::SSPLK_SmallArray));
460           ORE.emit([&]() {
461             return OptimizationRemark(DEBUG_TYPE, "StackProtectorBuffer", &I)
462                    << "Stack protection applied to function "
463                    << ore::NV("Function", F)
464                    << " due to a stack allocated buffer or struct containing a "
465                       "buffer";
466           });
467           NeedsProtector = true;
468           continue;
469         }
470 
471         if (Strong &&
472             HasAddressTaken(
473                 AI, M->getDataLayout().getTypeAllocSize(AI->getAllocatedType()),
474                 M, VisitedPHIs)) {
475           ++NumAddrTaken;
476           if (!Layout)
477             return true;
478           Layout->insert(std::make_pair(AI, MachineFrameInfo::SSPLK_AddrOf));
479           ORE.emit([&]() {
480             return OptimizationRemark(DEBUG_TYPE, "StackProtectorAddressTaken",
481                                       &I)
482                    << "Stack protection applied to function "
483                    << ore::NV("Function", F)
484                    << " due to the address of a local variable being taken";
485           });
486           NeedsProtector = true;
487         }
488         // Clear any PHIs that we visited, to make sure we examine all uses of
489         // any subsequent allocas that we look at.
490         VisitedPHIs.clear();
491       }
492     }
493   }
494 
495   return NeedsProtector;
496 }
497 
498 /// Create a stack guard loading and populate whether SelectionDAG SSP is
499 /// supported.
getStackGuard(const TargetLoweringBase * TLI,Module * M,IRBuilder<> & B,bool * SupportsSelectionDAGSP=nullptr)500 static Value *getStackGuard(const TargetLoweringBase *TLI, Module *M,
501                             IRBuilder<> &B,
502                             bool *SupportsSelectionDAGSP = nullptr) {
503   Value *Guard = TLI->getIRStackGuard(B);
504   StringRef GuardMode = M->getStackProtectorGuard();
505   if ((GuardMode == "tls" || GuardMode.empty()) && Guard)
506     return B.CreateLoad(B.getPtrTy(), Guard, true, "StackGuard");
507 
508   // Use SelectionDAG SSP handling, since there isn't an IR guard.
509   //
510   // This is more or less weird, since we optionally output whether we
511   // should perform a SelectionDAG SP here. The reason is that it's strictly
512   // defined as !TLI->getIRStackGuard(B), where getIRStackGuard is also
513   // mutating. There is no way to get this bit without mutating the IR, so
514   // getting this bit has to happen in this right time.
515   //
516   // We could have define a new function TLI::supportsSelectionDAGSP(), but that
517   // will put more burden on the backends' overriding work, especially when it
518   // actually conveys the same information getIRStackGuard() already gives.
519   if (SupportsSelectionDAGSP)
520     *SupportsSelectionDAGSP = true;
521   TLI->insertSSPDeclarations(*M);
522   return B.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::stackguard));
523 }
524 
525 /// Insert code into the entry block that stores the stack guard
526 /// variable onto the stack:
527 ///
528 ///   entry:
529 ///     StackGuardSlot = alloca i8*
530 ///     StackGuard = <stack guard>
531 ///     call void @llvm.stackprotector(StackGuard, StackGuardSlot)
532 ///
533 /// Returns true if the platform/triple supports the stackprotectorcreate pseudo
534 /// node.
CreatePrologue(Function * F,Module * M,Instruction * CheckLoc,const TargetLoweringBase * TLI,AllocaInst * & AI)535 static bool CreatePrologue(Function *F, Module *M, Instruction *CheckLoc,
536                            const TargetLoweringBase *TLI, AllocaInst *&AI) {
537   bool SupportsSelectionDAGSP = false;
538   IRBuilder<> B(&F->getEntryBlock().front());
539   PointerType *PtrTy = PointerType::getUnqual(CheckLoc->getContext());
540   AI = B.CreateAlloca(PtrTy, nullptr, "StackGuardSlot");
541 
542   Value *GuardSlot = getStackGuard(TLI, M, B, &SupportsSelectionDAGSP);
543   B.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::stackprotector),
544                {GuardSlot, AI});
545   return SupportsSelectionDAGSP;
546 }
547 
InsertStackProtectors(const TargetMachine * TM,Function * F,DomTreeUpdater * DTU,bool & HasPrologue,bool & HasIRCheck)548 bool InsertStackProtectors(const TargetMachine *TM, Function *F,
549                            DomTreeUpdater *DTU, bool &HasPrologue,
550                            bool &HasIRCheck) {
551   auto *M = F->getParent();
552   auto *TLI = TM->getSubtargetImpl(*F)->getTargetLowering();
553 
554   // If the target wants to XOR the frame pointer into the guard value, it's
555   // impossible to emit the check in IR, so the target *must* support stack
556   // protection in SDAG.
557   bool SupportsSelectionDAGSP =
558       TLI->useStackGuardXorFP() ||
559       (EnableSelectionDAGSP && !TM->Options.EnableFastISel);
560   AllocaInst *AI = nullptr; // Place on stack that stores the stack guard.
561   BasicBlock *FailBB = nullptr;
562 
563   for (BasicBlock &BB : llvm::make_early_inc_range(*F)) {
564     // This is stack protector auto generated check BB, skip it.
565     if (&BB == FailBB)
566       continue;
567     Instruction *CheckLoc = dyn_cast<ReturnInst>(BB.getTerminator());
568     if (!CheckLoc && !DisableCheckNoReturn)
569       for (auto &Inst : BB)
570         if (auto *CB = dyn_cast<CallBase>(&Inst))
571           // Do stack check before noreturn calls that aren't nounwind (e.g:
572           // __cxa_throw).
573           if (CB->doesNotReturn() && !CB->doesNotThrow()) {
574             CheckLoc = CB;
575             break;
576           }
577 
578     if (!CheckLoc)
579       continue;
580 
581     // Generate prologue instrumentation if not already generated.
582     if (!HasPrologue) {
583       HasPrologue = true;
584       SupportsSelectionDAGSP &= CreatePrologue(F, M, CheckLoc, TLI, AI);
585     }
586 
587     // SelectionDAG based code generation. Nothing else needs to be done here.
588     // The epilogue instrumentation is postponed to SelectionDAG.
589     if (SupportsSelectionDAGSP)
590       break;
591 
592     // Find the stack guard slot if the prologue was not created by this pass
593     // itself via a previous call to CreatePrologue().
594     if (!AI) {
595       const CallInst *SPCall = findStackProtectorIntrinsic(*F);
596       assert(SPCall && "Call to llvm.stackprotector is missing");
597       AI = cast<AllocaInst>(SPCall->getArgOperand(1));
598     }
599 
600     // Set HasIRCheck to true, so that SelectionDAG will not generate its own
601     // version. SelectionDAG called 'shouldEmitSDCheck' to check whether
602     // instrumentation has already been generated.
603     HasIRCheck = true;
604 
605     // If we're instrumenting a block with a tail call, the check has to be
606     // inserted before the call rather than between it and the return. The
607     // verifier guarantees that a tail call is either directly before the
608     // return or with a single correct bitcast of the return value in between so
609     // we don't need to worry about many situations here.
610     Instruction *Prev = CheckLoc->getPrevNonDebugInstruction();
611     if (Prev && isa<CallInst>(Prev) && cast<CallInst>(Prev)->isTailCall())
612       CheckLoc = Prev;
613     else if (Prev) {
614       Prev = Prev->getPrevNonDebugInstruction();
615       if (Prev && isa<CallInst>(Prev) && cast<CallInst>(Prev)->isTailCall())
616         CheckLoc = Prev;
617     }
618 
619     // Generate epilogue instrumentation. The epilogue intrumentation can be
620     // function-based or inlined depending on which mechanism the target is
621     // providing.
622     if (Function *GuardCheck = TLI->getSSPStackGuardCheck(*M)) {
623       // Generate the function-based epilogue instrumentation.
624       // The target provides a guard check function, generate a call to it.
625       IRBuilder<> B(CheckLoc);
626       LoadInst *Guard = B.CreateLoad(B.getPtrTy(), AI, true, "Guard");
627       CallInst *Call = B.CreateCall(GuardCheck, {Guard});
628       Call->setAttributes(GuardCheck->getAttributes());
629       Call->setCallingConv(GuardCheck->getCallingConv());
630     } else {
631       // Generate the epilogue with inline instrumentation.
632       // If we do not support SelectionDAG based calls, generate IR level
633       // calls.
634       //
635       // For each block with a return instruction, convert this:
636       //
637       //   return:
638       //     ...
639       //     ret ...
640       //
641       // into this:
642       //
643       //   return:
644       //     ...
645       //     %1 = <stack guard>
646       //     %2 = load StackGuardSlot
647       //     %3 = icmp ne i1 %1, %2
648       //     br i1 %3, label %CallStackCheckFailBlk, label %SP_return
649       //
650       //   SP_return:
651       //     ret ...
652       //
653       //   CallStackCheckFailBlk:
654       //     call void @__stack_chk_fail()
655       //     unreachable
656 
657       // Create the FailBB. We duplicate the BB every time since the MI tail
658       // merge pass will merge together all of the various BB into one including
659       // fail BB generated by the stack protector pseudo instruction.
660       if (!FailBB)
661         FailBB = CreateFailBB(F, TM->getTargetTriple());
662 
663       IRBuilder<> B(CheckLoc);
664       Value *Guard = getStackGuard(TLI, M, B);
665       LoadInst *LI2 = B.CreateLoad(B.getPtrTy(), AI, true);
666       auto *Cmp = cast<ICmpInst>(B.CreateICmpNE(Guard, LI2));
667       auto SuccessProb =
668           BranchProbabilityInfo::getBranchProbStackProtector(true);
669       auto FailureProb =
670           BranchProbabilityInfo::getBranchProbStackProtector(false);
671       MDNode *Weights = MDBuilder(F->getContext())
672                             .createBranchWeights(FailureProb.getNumerator(),
673                                                  SuccessProb.getNumerator());
674 
675       SplitBlockAndInsertIfThen(Cmp, CheckLoc,
676                                 /*Unreachable=*/false, Weights, DTU,
677                                 /*LI=*/nullptr, /*ThenBlock=*/FailBB);
678 
679       auto *BI = cast<BranchInst>(Cmp->getParent()->getTerminator());
680       BasicBlock *NewBB = BI->getSuccessor(1);
681       NewBB->setName("SP_return");
682       NewBB->moveAfter(&BB);
683 
684       Cmp->setPredicate(Cmp->getInversePredicate());
685       BI->swapSuccessors();
686     }
687   }
688 
689   // Return if we didn't modify any basic blocks. i.e., there are no return
690   // statements in the function.
691   return HasPrologue;
692 }
693 
CreateFailBB(Function * F,const Triple & Trip)694 BasicBlock *CreateFailBB(Function *F, const Triple &Trip) {
695   auto *M = F->getParent();
696   LLVMContext &Context = F->getContext();
697   BasicBlock *FailBB = BasicBlock::Create(Context, "CallStackCheckFailBlk", F);
698   IRBuilder<> B(FailBB);
699   if (F->getSubprogram())
700     B.SetCurrentDebugLocation(
701         DILocation::get(Context, 0, 0, F->getSubprogram()));
702   FunctionCallee StackChkFail;
703   SmallVector<Value *, 1> Args;
704   if (Trip.isOSOpenBSD()) {
705     StackChkFail = M->getOrInsertFunction("__stack_smash_handler",
706                                           Type::getVoidTy(Context),
707                                           PointerType::getUnqual(Context));
708     Args.push_back(B.CreateGlobalStringPtr(F->getName(), "SSH"));
709   } else {
710     StackChkFail =
711         M->getOrInsertFunction("__stack_chk_fail", Type::getVoidTy(Context));
712   }
713   cast<Function>(StackChkFail.getCallee())->addFnAttr(Attribute::NoReturn);
714   B.CreateCall(StackChkFail, Args);
715   B.CreateUnreachable();
716   return FailBB;
717 }
718