1 //===------ BPFPreserveStaticOffset.cpp -----------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // TLDR: replaces llvm.preserve.static.offset + GEP + load / store
10 //           with llvm.bpf.getelementptr.and.load / store
11 //
12 // This file implements BPFPreserveStaticOffsetPass transformation.
13 // This transformation address two BPF verifier specific issues:
14 //
15 // (a) Access to the fields of some structural types is allowed only
16 //     using load and store instructions with static immediate offsets.
17 //
18 //     Examples of such types are `struct __sk_buff` and `struct
19 //     bpf_sock_ops`.  This is so because offsets of the fields of
20 //     these structures do not match real offsets in the running
21 //     kernel. During BPF program load LDX and STX instructions
22 //     referring to the fields of these types are rewritten so that
23 //     offsets match real offsets. For this rewrite to happen field
24 //     offsets have to be encoded as immediate operands of the
25 //     instructions.
26 //
27 //     See kernel/bpf/verifier.c:convert_ctx_access function in the
28 //     Linux kernel source tree for details.
29 //
30 // (b) Pointers to context parameters of BPF programs must not be
31 //     modified before access.
32 //
33 //     During BPF program verification a tag PTR_TO_CTX is tracked for
34 //     register values. In case if register with such tag is modified
35 //     BPF program is not allowed to read or write memory using this
36 //     register. See kernel/bpf/verifier.c:check_mem_access function
37 //     in the Linux kernel source tree for details.
38 //
39 // The following sequence of the IR instructions:
40 //
41 //   %x = getelementptr %ptr, %constant_offset
42 //   %y = load %x
43 //
44 // Is translated as a single machine instruction:
45 //
46 //   LDW %ptr, %constant_offset
47 //
48 // In order for cases (a) and (b) to work the sequence %x-%y above has
49 // to be preserved by the IR passes.
50 //
51 // However, several optimization passes might sink `load` instruction
52 // or hoist `getelementptr` instruction so that the instructions are
53 // no longer in sequence. Examples of such passes are:
54 // SimplifyCFGPass, InstCombinePass, GVNPass.
55 // After such modification the verifier would reject the BPF program.
56 //
57 // To avoid this issue the patterns like (load/store (getelementptr ...))
58 // are replaced by calls to BPF specific intrinsic functions:
59 // - llvm.bpf.getelementptr.and.load
60 // - llvm.bpf.getelementptr.and.store
61 //
62 // These calls are lowered back to (load/store (getelementptr ...))
63 // by BPFCheckAndAdjustIR pass right before the translation from IR to
64 // machine instructions.
65 //
66 // The transformation is split into the following steps:
67 // - When IR is generated from AST the calls to intrinsic function
68 //   llvm.preserve.static.offset are inserted.
69 // - BPFPreserveStaticOffsetPass is executed as early as possible
70 //   with AllowPatial set to true, this handles marked GEP chains
71 //   with constant offsets.
72 // - BPFPreserveStaticOffsetPass is executed at ScalarOptimizerLateEPCallback
73 //   with AllowPatial set to false, this handles marked GEP chains
74 //   with offsets that became constant after loop unrolling, e.g.
75 //   to handle the following code:
76 //
77 // struct context { int x[4]; } __attribute__((preserve_static_offset));
78 //
79 //   struct context *ctx = ...;
80 // #pragma clang loop unroll(full)
81 //   for (int i = 0; i < 4; ++i)
82 //     foo(ctx->x[i]);
83 //
84 // The early BPFPreserveStaticOffsetPass run is necessary to allow
85 // additional GVN / CSE opportunities after functions inlining.
86 // The relative order of optimization applied to function:
87 // - early stage (1)
88 // - ...
89 // - function inlining (2)
90 // - ...
91 // - loop unrolling
92 // - ...
93 // - ScalarOptimizerLateEPCallback (3)
94 //
95 // When function A is inlined into function B all optimizations for A
96 // are already done, while some passes remain for B. In case if
97 // BPFPreserveStaticOffsetPass is done at (3) but not done at (1)
98 // the code after (2) would contain a mix of
99 // (load (gep %p)) and (get.and.load %p) usages:
100 // - the (load (gep %p)) would come from the calling function;
101 // - the (get.and.load %p) would come from the callee function.
102 // Thus clobbering CSE / GVN passes done after inlining.
103 
104 #include "BPF.h"
105 #include "BPFCORE.h"
106 #include "llvm/ADT/SmallPtrSet.h"
107 #include "llvm/ADT/SmallVector.h"
108 #include "llvm/IR/Argument.h"
109 #include "llvm/IR/Attributes.h"
110 #include "llvm/IR/BasicBlock.h"
111 #include "llvm/IR/Constants.h"
112 #include "llvm/IR/DebugInfoMetadata.h"
113 #include "llvm/IR/DiagnosticInfo.h"
114 #include "llvm/IR/IRBuilder.h"
115 #include "llvm/IR/InstIterator.h"
116 #include "llvm/IR/Instructions.h"
117 #include "llvm/IR/Intrinsics.h"
118 #include "llvm/IR/IntrinsicsBPF.h"
119 #include "llvm/Support/Debug.h"
120 #include "llvm/Support/ErrorHandling.h"
121 
122 #define DEBUG_TYPE "bpf-preserve-static-offset"
123 
124 using namespace llvm;
125 
126 static const unsigned GepAndLoadFirstIdxArg = 6;
127 static const unsigned GepAndStoreFirstIdxArg = 7;
128 
isIntrinsicCall(Value * I,Intrinsic::ID Id)129 static bool isIntrinsicCall(Value *I, Intrinsic::ID Id) {
130   if (auto *Call = dyn_cast<CallInst>(I))
131     if (Function *Func = Call->getCalledFunction())
132       return Func->getIntrinsicID() == Id;
133   return false;
134 }
135 
isPreserveStaticOffsetCall(Value * I)136 static bool isPreserveStaticOffsetCall(Value *I) {
137   return isIntrinsicCall(I, Intrinsic::preserve_static_offset);
138 }
139 
isGEPAndLoad(Value * I)140 static CallInst *isGEPAndLoad(Value *I) {
141   if (isIntrinsicCall(I, Intrinsic::bpf_getelementptr_and_load))
142     return cast<CallInst>(I);
143   return nullptr;
144 }
145 
isGEPAndStore(Value * I)146 static CallInst *isGEPAndStore(Value *I) {
147   if (isIntrinsicCall(I, Intrinsic::bpf_getelementptr_and_store))
148     return cast<CallInst>(I);
149   return nullptr;
150 }
151 
152 template <class T = Instruction>
mergeDILocations(SmallVector<T * > & Insns)153 static DILocation *mergeDILocations(SmallVector<T *> &Insns) {
154   DILocation *Merged = (*Insns.begin())->getDebugLoc();
155   for (T *I : Insns)
156     Merged = DILocation::getMergedLocation(Merged, I->getDebugLoc());
157   return Merged;
158 }
159 
makeIntrinsicCall(Module * M,Intrinsic::BPFIntrinsics Intrinsic,ArrayRef<Type * > Types,ArrayRef<Value * > Args)160 static CallInst *makeIntrinsicCall(Module *M,
161                                    Intrinsic::BPFIntrinsics Intrinsic,
162                                    ArrayRef<Type *> Types,
163                                    ArrayRef<Value *> Args) {
164 
165   Function *Fn = Intrinsic::getDeclaration(M, Intrinsic, Types);
166   return CallInst::Create(Fn, Args);
167 }
168 
setParamElementType(CallInst * Call,unsigned ArgNo,Type * Type)169 static void setParamElementType(CallInst *Call, unsigned ArgNo, Type *Type) {
170   LLVMContext &C = Call->getContext();
171   Call->addParamAttr(ArgNo, Attribute::get(C, Attribute::ElementType, Type));
172 }
173 
setParamReadNone(CallInst * Call,unsigned ArgNo)174 static void setParamReadNone(CallInst *Call, unsigned ArgNo) {
175   LLVMContext &C = Call->getContext();
176   Call->addParamAttr(ArgNo, Attribute::get(C, Attribute::ReadNone));
177 }
178 
setParamReadOnly(CallInst * Call,unsigned ArgNo)179 static void setParamReadOnly(CallInst *Call, unsigned ArgNo) {
180   LLVMContext &C = Call->getContext();
181   Call->addParamAttr(ArgNo, Attribute::get(C, Attribute::ReadOnly));
182 }
183 
setParamWriteOnly(CallInst * Call,unsigned ArgNo)184 static void setParamWriteOnly(CallInst *Call, unsigned ArgNo) {
185   LLVMContext &C = Call->getContext();
186   Call->addParamAttr(ArgNo, Attribute::get(C, Attribute::WriteOnly));
187 }
188 
189 namespace {
190 struct GEPChainInfo {
191   bool InBounds;
192   Type *SourceElementType;
193   SmallVector<Value *> Indices;
194   SmallVector<GetElementPtrInst *> Members;
195 
GEPChainInfo__anoncf8cba080111::GEPChainInfo196   GEPChainInfo() { reset(); }
197 
reset__anoncf8cba080111::GEPChainInfo198   void reset() {
199     InBounds = true;
200     SourceElementType = nullptr;
201     Indices.clear();
202     Members.clear();
203   }
204 };
205 } // Anonymous namespace
206 
207 template <class T = std::disjunction<LoadInst, StoreInst>>
fillCommonArgs(LLVMContext & C,SmallVector<Value * > & Args,GEPChainInfo & GEP,T * Insn)208 static void fillCommonArgs(LLVMContext &C, SmallVector<Value *> &Args,
209                            GEPChainInfo &GEP, T *Insn) {
210   Type *Int8Ty = Type::getInt8Ty(C);
211   Type *Int1Ty = Type::getInt1Ty(C);
212   // Implementation of Align guarantees that ShiftValue < 64
213   unsigned AlignShiftValue = Log2_64(Insn->getAlign().value());
214   Args.push_back(GEP.Members[0]->getPointerOperand());
215   Args.push_back(ConstantInt::get(Int1Ty, Insn->isVolatile()));
216   Args.push_back(ConstantInt::get(Int8Ty, (unsigned)Insn->getOrdering()));
217   Args.push_back(ConstantInt::get(Int8Ty, (unsigned)Insn->getSyncScopeID()));
218   Args.push_back(ConstantInt::get(Int8Ty, AlignShiftValue));
219   Args.push_back(ConstantInt::get(Int1Ty, GEP.InBounds));
220   Args.append(GEP.Indices.begin(), GEP.Indices.end());
221 }
222 
makeGEPAndLoad(Module * M,GEPChainInfo & GEP,LoadInst * Load)223 static Instruction *makeGEPAndLoad(Module *M, GEPChainInfo &GEP,
224                                    LoadInst *Load) {
225   SmallVector<Value *> Args;
226   fillCommonArgs(M->getContext(), Args, GEP, Load);
227   CallInst *Call = makeIntrinsicCall(M, Intrinsic::bpf_getelementptr_and_load,
228                                      {Load->getType()}, Args);
229   setParamElementType(Call, 0, GEP.SourceElementType);
230   Call->applyMergedLocation(mergeDILocations(GEP.Members), Load->getDebugLoc());
231   Call->setName((*GEP.Members.rbegin())->getName());
232   if (Load->isUnordered()) {
233     Call->setOnlyReadsMemory();
234     Call->setOnlyAccessesArgMemory();
235     setParamReadOnly(Call, 0);
236   }
237   for (unsigned I = GepAndLoadFirstIdxArg; I < Args.size(); ++I)
238     Call->addParamAttr(I, Attribute::ImmArg);
239   Call->setAAMetadata(Load->getAAMetadata());
240   return Call;
241 }
242 
makeGEPAndStore(Module * M,GEPChainInfo & GEP,StoreInst * Store)243 static Instruction *makeGEPAndStore(Module *M, GEPChainInfo &GEP,
244                                     StoreInst *Store) {
245   SmallVector<Value *> Args;
246   Args.push_back(Store->getValueOperand());
247   fillCommonArgs(M->getContext(), Args, GEP, Store);
248   CallInst *Call =
249       makeIntrinsicCall(M, Intrinsic::bpf_getelementptr_and_store,
250                         {Store->getValueOperand()->getType()}, Args);
251   setParamElementType(Call, 1, GEP.SourceElementType);
252   if (Store->getValueOperand()->getType()->isPointerTy())
253     setParamReadNone(Call, 0);
254   Call->applyMergedLocation(mergeDILocations(GEP.Members),
255                             Store->getDebugLoc());
256   if (Store->isUnordered()) {
257     Call->setOnlyWritesMemory();
258     Call->setOnlyAccessesArgMemory();
259     setParamWriteOnly(Call, 1);
260   }
261   for (unsigned I = GepAndStoreFirstIdxArg; I < Args.size(); ++I)
262     Call->addParamAttr(I, Attribute::ImmArg);
263   Call->setAAMetadata(Store->getAAMetadata());
264   return Call;
265 }
266 
getOperandAsUnsigned(CallInst * Call,unsigned ArgNo)267 static unsigned getOperandAsUnsigned(CallInst *Call, unsigned ArgNo) {
268   if (auto *Int = dyn_cast<ConstantInt>(Call->getOperand(ArgNo)))
269     return Int->getValue().getZExtValue();
270   std::string Report;
271   raw_string_ostream ReportS(Report);
272   ReportS << "Expecting ConstantInt as argument #" << ArgNo << " of " << *Call
273           << "\n";
274   report_fatal_error(StringRef(Report));
275 }
276 
reconstructGEP(CallInst * Call,int Delta)277 static GetElementPtrInst *reconstructGEP(CallInst *Call, int Delta) {
278   SmallVector<Value *> Indices;
279   Indices.append(Call->data_operands_begin() + 6 + Delta,
280                  Call->data_operands_end());
281   Type *GEPPointeeType = Call->getParamElementType(Delta);
282   auto *GEP =
283       GetElementPtrInst::Create(GEPPointeeType, Call->getOperand(Delta),
284                                 ArrayRef<Value *>(Indices), Call->getName());
285   GEP->setIsInBounds(getOperandAsUnsigned(Call, 5 + Delta));
286   return GEP;
287 }
288 
289 template <class T = std::disjunction<LoadInst, StoreInst>>
reconstructCommon(CallInst * Call,GetElementPtrInst * GEP,T * Insn,int Delta)290 static void reconstructCommon(CallInst *Call, GetElementPtrInst *GEP, T *Insn,
291                               int Delta) {
292   Insn->setVolatile(getOperandAsUnsigned(Call, 1 + Delta));
293   Insn->setOrdering((AtomicOrdering)getOperandAsUnsigned(Call, 2 + Delta));
294   Insn->setSyncScopeID(getOperandAsUnsigned(Call, 3 + Delta));
295   unsigned AlignShiftValue = getOperandAsUnsigned(Call, 4 + Delta);
296   Insn->setAlignment(Align(1ULL << AlignShiftValue));
297   GEP->setDebugLoc(Call->getDebugLoc());
298   Insn->setDebugLoc(Call->getDebugLoc());
299   Insn->setAAMetadata(Call->getAAMetadata());
300 }
301 
302 std::pair<GetElementPtrInst *, LoadInst *>
reconstructLoad(CallInst * Call)303 BPFPreserveStaticOffsetPass::reconstructLoad(CallInst *Call) {
304   GetElementPtrInst *GEP = reconstructGEP(Call, 0);
305   Type *ReturnType = Call->getFunctionType()->getReturnType();
306   auto *Load = new LoadInst(ReturnType, GEP, "",
307                             /* These would be set in reconstructCommon */
308                             false, Align(1));
309   reconstructCommon(Call, GEP, Load, 0);
310   return std::pair{GEP, Load};
311 }
312 
313 std::pair<GetElementPtrInst *, StoreInst *>
reconstructStore(CallInst * Call)314 BPFPreserveStaticOffsetPass::reconstructStore(CallInst *Call) {
315   GetElementPtrInst *GEP = reconstructGEP(Call, 1);
316   auto *Store = new StoreInst(Call->getOperand(0), GEP,
317                               /* These would be set in reconstructCommon */
318                               false, Align(1));
319   reconstructCommon(Call, GEP, Store, 1);
320   return std::pair{GEP, Store};
321 }
322 
isZero(Value * V)323 static bool isZero(Value *V) {
324   auto *CI = dyn_cast<ConstantInt>(V);
325   return CI && CI->isZero();
326 }
327 
328 // Given a chain of GEP instructions collect information necessary to
329 // merge this chain as a single GEP instruction of form:
330 //   getelementptr %<type>, ptr %p, i32 0, <field_idx1>, <field_idx2>, ...
foldGEPChainAsStructAccess(SmallVector<GetElementPtrInst * > & GEPs,GEPChainInfo & Info)331 static bool foldGEPChainAsStructAccess(SmallVector<GetElementPtrInst *> &GEPs,
332                                        GEPChainInfo &Info) {
333   if (GEPs.empty())
334     return false;
335 
336   if (!all_of(GEPs, [=](GetElementPtrInst *GEP) {
337         return GEP->hasAllConstantIndices();
338       }))
339     return false;
340 
341   GetElementPtrInst *First = GEPs[0];
342   Info.InBounds = First->isInBounds();
343   Info.SourceElementType = First->getSourceElementType();
344   Type *ResultElementType = First->getResultElementType();
345   Info.Indices.append(First->idx_begin(), First->idx_end());
346   Info.Members.push_back(First);
347 
348   for (auto *Iter = GEPs.begin() + 1; Iter != GEPs.end(); ++Iter) {
349     GetElementPtrInst *GEP = *Iter;
350     if (!isZero(*GEP->idx_begin())) {
351       Info.reset();
352       return false;
353     }
354     if (!GEP->getSourceElementType() ||
355         GEP->getSourceElementType() != ResultElementType) {
356       Info.reset();
357       return false;
358     }
359     Info.InBounds &= GEP->isInBounds();
360     Info.Indices.append(GEP->idx_begin() + 1, GEP->idx_end());
361     Info.Members.push_back(GEP);
362     ResultElementType = GEP->getResultElementType();
363   }
364 
365   return true;
366 }
367 
368 // Given a chain of GEP instructions collect information necessary to
369 // merge this chain as a single GEP instruction of form:
370 //   getelementptr i8, ptr %p, i64 %offset
foldGEPChainAsU8Access(SmallVector<GetElementPtrInst * > & GEPs,GEPChainInfo & Info)371 static bool foldGEPChainAsU8Access(SmallVector<GetElementPtrInst *> &GEPs,
372                                    GEPChainInfo &Info) {
373   if (GEPs.empty())
374     return false;
375 
376   GetElementPtrInst *First = GEPs[0];
377   const DataLayout &DL = First->getModule()->getDataLayout();
378   LLVMContext &C = First->getContext();
379   Type *PtrTy = First->getType()->getScalarType();
380   APInt Offset(DL.getIndexTypeSizeInBits(PtrTy), 0);
381   for (GetElementPtrInst *GEP : GEPs) {
382     if (!GEP->accumulateConstantOffset(DL, Offset)) {
383       Info.reset();
384       return false;
385     }
386     Info.InBounds &= GEP->isInBounds();
387     Info.Members.push_back(GEP);
388   }
389   Info.SourceElementType = Type::getInt8Ty(C);
390   Info.Indices.push_back(ConstantInt::get(C, Offset));
391 
392   return true;
393 }
394 
reportNonStaticGEPChain(Instruction * Insn)395 static void reportNonStaticGEPChain(Instruction *Insn) {
396   auto Msg = DiagnosticInfoUnsupported(
397       *Insn->getFunction(),
398       Twine("Non-constant offset in access to a field of a type marked "
399             "with preserve_static_offset might be rejected by BPF verifier")
400           .concat(Insn->getDebugLoc()
401                       ? ""
402                       : " (pass -g option to get exact location)"),
403       Insn->getDebugLoc(), DS_Warning);
404   Insn->getContext().diagnose(Msg);
405 }
406 
allZeroIndices(SmallVector<GetElementPtrInst * > & GEPs)407 static bool allZeroIndices(SmallVector<GetElementPtrInst *> &GEPs) {
408   return GEPs.empty() || all_of(GEPs, [=](GetElementPtrInst *GEP) {
409            return GEP->hasAllZeroIndices();
410          });
411 }
412 
tryToReplaceWithGEPBuiltin(Instruction * LoadOrStoreTemplate,SmallVector<GetElementPtrInst * > & GEPs,Instruction * InsnToReplace)413 static bool tryToReplaceWithGEPBuiltin(Instruction *LoadOrStoreTemplate,
414                                        SmallVector<GetElementPtrInst *> &GEPs,
415                                        Instruction *InsnToReplace) {
416   GEPChainInfo GEPChain;
417   if (!foldGEPChainAsStructAccess(GEPs, GEPChain) &&
418       !foldGEPChainAsU8Access(GEPs, GEPChain)) {
419     return false;
420   }
421   Module *M = InsnToReplace->getModule();
422   if (auto *Load = dyn_cast<LoadInst>(LoadOrStoreTemplate)) {
423     Instruction *Replacement = makeGEPAndLoad(M, GEPChain, Load);
424     Replacement->insertBefore(InsnToReplace);
425     InsnToReplace->replaceAllUsesWith(Replacement);
426   }
427   if (auto *Store = dyn_cast<StoreInst>(LoadOrStoreTemplate)) {
428     Instruction *Replacement = makeGEPAndStore(M, GEPChain, Store);
429     Replacement->insertBefore(InsnToReplace);
430   }
431   return true;
432 }
433 
434 // Check if U->getPointerOperand() == I
isPointerOperand(Value * I,User * U)435 static bool isPointerOperand(Value *I, User *U) {
436   if (auto *L = dyn_cast<LoadInst>(U))
437     return L->getPointerOperand() == I;
438   if (auto *S = dyn_cast<StoreInst>(U))
439     return S->getPointerOperand() == I;
440   if (auto *GEP = dyn_cast<GetElementPtrInst>(U))
441     return GEP->getPointerOperand() == I;
442   if (auto *Call = isGEPAndLoad(U))
443     return Call->getArgOperand(0) == I;
444   if (auto *Call = isGEPAndStore(U))
445     return Call->getArgOperand(1) == I;
446   return false;
447 }
448 
isInlineableCall(User * U)449 static bool isInlineableCall(User *U) {
450   if (auto *Call = dyn_cast<CallInst>(U))
451     return Call->hasFnAttr(Attribute::InlineHint);
452   return false;
453 }
454 
455 static void rewriteAccessChain(Instruction *Insn,
456                                SmallVector<GetElementPtrInst *> &GEPs,
457                                SmallVector<Instruction *> &Visited,
458                                bool AllowPatial, bool &StillUsed);
459 
rewriteUses(Instruction * Insn,SmallVector<GetElementPtrInst * > & GEPs,SmallVector<Instruction * > & Visited,bool AllowPatial,bool & StillUsed)460 static void rewriteUses(Instruction *Insn,
461                         SmallVector<GetElementPtrInst *> &GEPs,
462                         SmallVector<Instruction *> &Visited, bool AllowPatial,
463                         bool &StillUsed) {
464   for (User *U : Insn->users()) {
465     auto *UI = dyn_cast<Instruction>(U);
466     if (UI && (isPointerOperand(Insn, UI) || isPreserveStaticOffsetCall(UI) ||
467                isInlineableCall(UI)))
468       rewriteAccessChain(UI, GEPs, Visited, AllowPatial, StillUsed);
469     else
470       LLVM_DEBUG({
471         llvm::dbgs() << "unsupported usage in BPFPreserveStaticOffsetPass:\n";
472         llvm::dbgs() << "  Insn: " << *Insn << "\n";
473         llvm::dbgs() << "  User: " << *U << "\n";
474       });
475   }
476 }
477 
478 // A DFS traversal of GEP chain trees starting from Root.
479 //
480 // Recursion descends through GEP instructions and
481 // llvm.preserve.static.offset calls. Recursion stops at any other
482 // instruction. If load or store instruction is reached it is replaced
483 // by a call to `llvm.bpf.getelementptr.and.load` or
484 // `llvm.bpf.getelementptr.and.store` intrinsic.
485 // If `llvm.bpf.getelementptr.and.load/store` is reached the accumulated
486 // GEPs are merged into the intrinsic call.
487 // If nested calls to `llvm.preserve.static.offset` are encountered these
488 // calls are marked for deletion.
489 //
490 // Parameters description:
491 // - Insn - current position in the tree
492 // - GEPs - GEP instructions for the current branch
493 // - Visited - a list of visited instructions in DFS order,
494 //   order is important for unused instruction deletion.
495 // - AllowPartial - when true GEP chains that can't be folded are
496 //   not reported, otherwise diagnostic message is show for such chains.
497 // - StillUsed - set to true if one of the GEP chains could not be
498 //   folded, makes sense when AllowPartial is false, means that root
499 //   preserve.static.offset call is still in use and should remain
500 //   until the next run of this pass.
rewriteAccessChain(Instruction * Insn,SmallVector<GetElementPtrInst * > & GEPs,SmallVector<Instruction * > & Visited,bool AllowPatial,bool & StillUsed)501 static void rewriteAccessChain(Instruction *Insn,
502                                SmallVector<GetElementPtrInst *> &GEPs,
503                                SmallVector<Instruction *> &Visited,
504                                bool AllowPatial, bool &StillUsed) {
505   auto MarkAndTraverseUses = [&]() {
506     Visited.push_back(Insn);
507     rewriteUses(Insn, GEPs, Visited, AllowPatial, StillUsed);
508   };
509   auto TryToReplace = [&](Instruction *LoadOrStore) {
510     // Do nothing for (preserve.static.offset (load/store ..)) or for
511     // GEPs with zero indices. Such constructs lead to zero offset and
512     // are simplified by other passes.
513     if (allZeroIndices(GEPs))
514       return;
515     if (tryToReplaceWithGEPBuiltin(LoadOrStore, GEPs, Insn)) {
516       Visited.push_back(Insn);
517       return;
518     }
519     if (!AllowPatial)
520       reportNonStaticGEPChain(Insn);
521     StillUsed = true;
522   };
523   if (isa<LoadInst>(Insn) || isa<StoreInst>(Insn)) {
524     TryToReplace(Insn);
525   } else if (isGEPAndLoad(Insn)) {
526     auto [GEP, Load] =
527         BPFPreserveStaticOffsetPass::reconstructLoad(cast<CallInst>(Insn));
528     GEPs.push_back(GEP);
529     TryToReplace(Load);
530     GEPs.pop_back();
531     delete Load;
532     delete GEP;
533   } else if (isGEPAndStore(Insn)) {
534     // This  case can't be merged with the above because
535     // `delete Load` / `delete Store` wants a concrete type,
536     // destructor of Instruction is protected.
537     auto [GEP, Store] =
538         BPFPreserveStaticOffsetPass::reconstructStore(cast<CallInst>(Insn));
539     GEPs.push_back(GEP);
540     TryToReplace(Store);
541     GEPs.pop_back();
542     delete Store;
543     delete GEP;
544   } else if (auto *GEP = dyn_cast<GetElementPtrInst>(Insn)) {
545     GEPs.push_back(GEP);
546     MarkAndTraverseUses();
547     GEPs.pop_back();
548   } else if (isPreserveStaticOffsetCall(Insn)) {
549     MarkAndTraverseUses();
550   } else if (isInlineableCall(Insn)) {
551     // Preserve preserve.static.offset call for parameters of
552     // functions that might be inlined. These would be removed on a
553     // second pass after inlining.
554     // Might happen when a pointer to a preserve_static_offset
555     // structure is passed as parameter of a function that would be
556     // inlined inside a loop that would be unrolled.
557     if (AllowPatial)
558       StillUsed = true;
559   } else {
560     SmallString<128> Buf;
561     raw_svector_ostream BufStream(Buf);
562     BufStream << *Insn;
563     report_fatal_error(
564         Twine("Unexpected rewriteAccessChain Insn = ").concat(Buf));
565   }
566 }
567 
removeMarkerCall(Instruction * Marker)568 static void removeMarkerCall(Instruction *Marker) {
569   Marker->replaceAllUsesWith(Marker->getOperand(0));
570   Marker->eraseFromParent();
571 }
572 
rewriteAccessChain(Instruction * Marker,bool AllowPatial,SmallPtrSetImpl<Instruction * > & RemovedMarkers)573 static bool rewriteAccessChain(Instruction *Marker, bool AllowPatial,
574                                SmallPtrSetImpl<Instruction *> &RemovedMarkers) {
575   SmallVector<GetElementPtrInst *> GEPs;
576   SmallVector<Instruction *> Visited;
577   bool StillUsed = false;
578   rewriteUses(Marker, GEPs, Visited, AllowPatial, StillUsed);
579   // Check if Visited instructions could be removed, iterate in
580   // reverse to unblock instructions higher in the chain.
581   for (auto V = Visited.rbegin(); V != Visited.rend(); ++V) {
582     if (isPreserveStaticOffsetCall(*V)) {
583       removeMarkerCall(*V);
584       RemovedMarkers.insert(*V);
585     } else if ((*V)->use_empty()) {
586       (*V)->eraseFromParent();
587     }
588   }
589   return StillUsed;
590 }
591 
592 static std::vector<Instruction *>
collectPreserveStaticOffsetCalls(Function & F)593 collectPreserveStaticOffsetCalls(Function &F) {
594   std::vector<Instruction *> Calls;
595   for (Instruction &Insn : instructions(F))
596     if (isPreserveStaticOffsetCall(&Insn))
597       Calls.push_back(&Insn);
598   return Calls;
599 }
600 
isPreserveArrayIndex(Value * V)601 bool isPreserveArrayIndex(Value *V) {
602   return isIntrinsicCall(V, Intrinsic::preserve_array_access_index);
603 }
604 
isPreserveStructIndex(Value * V)605 bool isPreserveStructIndex(Value *V) {
606   return isIntrinsicCall(V, Intrinsic::preserve_struct_access_index);
607 }
608 
isPreserveUnionIndex(Value * V)609 bool isPreserveUnionIndex(Value *V) {
610   return isIntrinsicCall(V, Intrinsic::preserve_union_access_index);
611 }
612 
removePAICalls(Instruction * Marker)613 static void removePAICalls(Instruction *Marker) {
614   auto IsPointerOperand = [](Value *Op, User *U) {
615     if (auto *GEP = dyn_cast<GetElementPtrInst>(U))
616       return GEP->getPointerOperand() == Op;
617     if (isPreserveStaticOffsetCall(U) || isPreserveArrayIndex(U) ||
618         isPreserveStructIndex(U) || isPreserveUnionIndex(U))
619       return cast<CallInst>(U)->getArgOperand(0) == Op;
620     return false;
621   };
622 
623   SmallVector<Value *, 32> WorkList;
624   WorkList.push_back(Marker);
625   do {
626     Value *V = WorkList.pop_back_val();
627     for (User *U : V->users())
628       if (IsPointerOperand(V, U))
629         WorkList.push_back(U);
630     auto *Call = dyn_cast<CallInst>(V);
631     if (!Call)
632       continue;
633     if (isPreserveArrayIndex(V))
634       BPFCoreSharedInfo::removeArrayAccessCall(Call);
635     else if (isPreserveStructIndex(V))
636       BPFCoreSharedInfo::removeStructAccessCall(Call);
637     else if (isPreserveUnionIndex(V))
638       BPFCoreSharedInfo::removeUnionAccessCall(Call);
639   } while (!WorkList.empty());
640 }
641 
642 // Look for sequences:
643 // - llvm.preserve.static.offset -> getelementptr... -> load
644 // - llvm.preserve.static.offset -> getelementptr... -> store
645 // And replace those with calls to intrinsics:
646 // - llvm.bpf.getelementptr.and.load
647 // - llvm.bpf.getelementptr.and.store
rewriteFunction(Function & F,bool AllowPartial)648 static bool rewriteFunction(Function &F, bool AllowPartial) {
649   LLVM_DEBUG(dbgs() << "********** BPFPreserveStaticOffsetPass (AllowPartial="
650                     << AllowPartial << ") ************\n");
651 
652   auto MarkerCalls = collectPreserveStaticOffsetCalls(F);
653   SmallPtrSet<Instruction *, 16> RemovedMarkers;
654 
655   LLVM_DEBUG(dbgs() << "There are " << MarkerCalls.size()
656                     << " preserve.static.offset calls\n");
657 
658   if (MarkerCalls.empty())
659     return false;
660 
661   for (auto *Call : MarkerCalls)
662     removePAICalls(Call);
663 
664   for (auto *Call : MarkerCalls) {
665     if (RemovedMarkers.contains(Call))
666       continue;
667     bool StillUsed = rewriteAccessChain(Call, AllowPartial, RemovedMarkers);
668     if (!StillUsed || !AllowPartial)
669       removeMarkerCall(Call);
670   }
671 
672   return true;
673 }
674 
675 PreservedAnalyses
run(Function & F,FunctionAnalysisManager & AM)676 llvm::BPFPreserveStaticOffsetPass::run(Function &F,
677                                        FunctionAnalysisManager &AM) {
678   return rewriteFunction(F, AllowPartial) ? PreservedAnalyses::none()
679                                           : PreservedAnalyses::all();
680 }
681