1 //===------ BPFAbstractMemberAccess.cpp - Abstracting Member Accesses -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass abstracted struct/union member accesses in order to support
10 // compile-once run-everywhere (CO-RE). The CO-RE intends to compile the program
11 // which can run on different kernels. In particular, if bpf program tries to
12 // access a particular kernel data structure member, the details of the
13 // intermediate member access will be remembered so bpf loader can do
14 // necessary adjustment right before program loading.
15 //
16 // For example,
17 //
18 // struct s {
19 // int a;
20 // int b;
21 // };
22 // struct t {
23 // struct s c;
24 // int d;
25 // };
26 // struct t e;
27 //
28 // For the member access e.c.b, the compiler will generate code
29 // &e + 4
30 //
31 // The compile-once run-everywhere instead generates the following code
32 // r = 4
33 // &e + r
34 // The "4" in "r = 4" can be changed based on a particular kernel version.
35 // For example, on a particular kernel version, if struct s is changed to
36 //
37 // struct s {
38 // int new_field;
39 // int a;
40 // int b;
41 // }
42 //
43 // By repeating the member access on the host, the bpf loader can
44 // adjust "r = 4" as "r = 8".
45 //
46 // This feature relies on the following three intrinsic calls:
47 // addr = preserve_array_access_index(base, dimension, index)
48 // addr = preserve_union_access_index(base, di_index)
49 // !llvm.preserve.access.index <union_ditype>
50 // addr = preserve_struct_access_index(base, gep_index, di_index)
51 // !llvm.preserve.access.index <struct_ditype>
52 //
53 // Bitfield member access needs special attention. User cannot take the
54 // address of a bitfield acceess. To facilitate kernel verifier
55 // for easy bitfield code optimization, a new clang intrinsic is introduced:
56 // uint32_t __builtin_preserve_field_info(member_access, info_kind)
57 // In IR, a chain with two (or more) intrinsic calls will be generated:
58 // ...
59 // addr = preserve_struct_access_index(base, 1, 1) !struct s
60 // uint32_t result = bpf_preserve_field_info(addr, info_kind)
61 //
62 // Suppose the info_kind is FIELD_SIGNEDNESS,
63 // The above two IR intrinsics will be replaced with
64 // a relocatable insn:
65 // signness = /* signness of member_access */
66 // and signness can be changed by bpf loader based on the
67 // types on the host.
68 //
69 // User can also test whether a field exists or not with
70 // uint32_t result = bpf_preserve_field_info(member_access, FIELD_EXISTENCE)
71 // The field will be always available (result = 1) during initial
72 // compilation, but bpf loader can patch with the correct value
73 // on the target host where the member_access may or may not be available
74 //
75 //===----------------------------------------------------------------------===//
76
77 #include "BPF.h"
78 #include "BPFCORE.h"
79 #include "BPFTargetMachine.h"
80 #include "llvm/IR/DebugInfoMetadata.h"
81 #include "llvm/IR/GlobalVariable.h"
82 #include "llvm/IR/Instruction.h"
83 #include "llvm/IR/Instructions.h"
84 #include "llvm/IR/IntrinsicsBPF.h"
85 #include "llvm/IR/Module.h"
86 #include "llvm/IR/PassManager.h"
87 #include "llvm/IR/Type.h"
88 #include "llvm/IR/User.h"
89 #include "llvm/IR/Value.h"
90 #include "llvm/Pass.h"
91 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
92 #include <stack>
93
94 #define DEBUG_TYPE "bpf-abstract-member-access"
95
96 namespace llvm {
97 constexpr StringRef BPFCoreSharedInfo::AmaAttr;
98 uint32_t BPFCoreSharedInfo::SeqNum;
99
insertPassThrough(Module * M,BasicBlock * BB,Instruction * Input,Instruction * Before)100 Instruction *BPFCoreSharedInfo::insertPassThrough(Module *M, BasicBlock *BB,
101 Instruction *Input,
102 Instruction *Before) {
103 Function *Fn = Intrinsic::getDeclaration(
104 M, Intrinsic::bpf_passthrough, {Input->getType(), Input->getType()});
105 Constant *SeqNumVal = ConstantInt::get(Type::getInt32Ty(BB->getContext()),
106 BPFCoreSharedInfo::SeqNum++);
107
108 auto *NewInst = CallInst::Create(Fn, {SeqNumVal, Input});
109 BB->getInstList().insert(Before->getIterator(), NewInst);
110 return NewInst;
111 }
112 } // namespace llvm
113
114 using namespace llvm;
115
116 namespace {
117 class BPFAbstractMemberAccess final {
118 public:
BPFAbstractMemberAccess(BPFTargetMachine * TM)119 BPFAbstractMemberAccess(BPFTargetMachine *TM) : TM(TM) {}
120
121 bool run(Function &F);
122
123 struct CallInfo {
124 uint32_t Kind;
125 uint32_t AccessIndex;
126 Align RecordAlignment;
127 MDNode *Metadata;
128 Value *Base;
129 };
130 typedef std::stack<std::pair<CallInst *, CallInfo>> CallInfoStack;
131
132 private:
133 enum : uint32_t {
134 BPFPreserveArrayAI = 1,
135 BPFPreserveUnionAI = 2,
136 BPFPreserveStructAI = 3,
137 BPFPreserveFieldInfoAI = 4,
138 };
139
140 TargetMachine *TM;
141 const DataLayout *DL = nullptr;
142 Module *M = nullptr;
143
144 static std::map<std::string, GlobalVariable *> GEPGlobals;
145 // A map to link preserve_*_access_index instrinsic calls.
146 std::map<CallInst *, std::pair<CallInst *, CallInfo>> AIChain;
147 // A map to hold all the base preserve_*_access_index instrinsic calls.
148 // The base call is not an input of any other preserve_*
149 // intrinsics.
150 std::map<CallInst *, CallInfo> BaseAICalls;
151
152 bool doTransformation(Function &F);
153
154 void traceAICall(CallInst *Call, CallInfo &ParentInfo);
155 void traceBitCast(BitCastInst *BitCast, CallInst *Parent,
156 CallInfo &ParentInfo);
157 void traceGEP(GetElementPtrInst *GEP, CallInst *Parent,
158 CallInfo &ParentInfo);
159 void collectAICallChains(Function &F);
160
161 bool IsPreserveDIAccessIndexCall(const CallInst *Call, CallInfo &Cinfo);
162 bool IsValidAIChain(const MDNode *ParentMeta, uint32_t ParentAI,
163 const MDNode *ChildMeta);
164 bool removePreserveAccessIndexIntrinsic(Function &F);
165 void replaceWithGEP(std::vector<CallInst *> &CallList,
166 uint32_t NumOfZerosIndex, uint32_t DIIndex);
167 bool HasPreserveFieldInfoCall(CallInfoStack &CallStack);
168 void GetStorageBitRange(DIDerivedType *MemberTy, Align RecordAlignment,
169 uint32_t &StartBitOffset, uint32_t &EndBitOffset);
170 uint32_t GetFieldInfo(uint32_t InfoKind, DICompositeType *CTy,
171 uint32_t AccessIndex, uint32_t PatchImm,
172 Align RecordAlignment);
173
174 Value *computeBaseAndAccessKey(CallInst *Call, CallInfo &CInfo,
175 std::string &AccessKey, MDNode *&BaseMeta);
176 MDNode *computeAccessKey(CallInst *Call, CallInfo &CInfo,
177 std::string &AccessKey, bool &IsInt32Ret);
178 uint64_t getConstant(const Value *IndexValue);
179 bool transformGEPChain(CallInst *Call, CallInfo &CInfo);
180 };
181
182 std::map<std::string, GlobalVariable *> BPFAbstractMemberAccess::GEPGlobals;
183
184 class BPFAbstractMemberAccessLegacyPass final : public FunctionPass {
185 BPFTargetMachine *TM;
186
runOnFunction(Function & F)187 bool runOnFunction(Function &F) override {
188 return BPFAbstractMemberAccess(TM).run(F);
189 }
190
191 public:
192 static char ID;
193
194 // Add optional BPFTargetMachine parameter so that BPF backend can add the
195 // phase with target machine to find out the endianness. The default
196 // constructor (without parameters) is used by the pass manager for managing
197 // purposes.
BPFAbstractMemberAccessLegacyPass(BPFTargetMachine * TM=nullptr)198 BPFAbstractMemberAccessLegacyPass(BPFTargetMachine *TM = nullptr)
199 : FunctionPass(ID), TM(TM) {}
200 };
201
202 } // End anonymous namespace
203
204 char BPFAbstractMemberAccessLegacyPass::ID = 0;
205 INITIALIZE_PASS(BPFAbstractMemberAccessLegacyPass, DEBUG_TYPE,
206 "BPF Abstract Member Access", false, false)
207
createBPFAbstractMemberAccess(BPFTargetMachine * TM)208 FunctionPass *llvm::createBPFAbstractMemberAccess(BPFTargetMachine *TM) {
209 return new BPFAbstractMemberAccessLegacyPass(TM);
210 }
211
run(Function & F)212 bool BPFAbstractMemberAccess::run(Function &F) {
213 LLVM_DEBUG(dbgs() << "********** Abstract Member Accesses **********\n");
214
215 M = F.getParent();
216 if (!M)
217 return false;
218
219 // Bail out if no debug info.
220 if (M->debug_compile_units().empty())
221 return false;
222
223 DL = &M->getDataLayout();
224 return doTransformation(F);
225 }
226
SkipDIDerivedTag(unsigned Tag,bool skipTypedef)227 static bool SkipDIDerivedTag(unsigned Tag, bool skipTypedef) {
228 if (Tag != dwarf::DW_TAG_typedef && Tag != dwarf::DW_TAG_const_type &&
229 Tag != dwarf::DW_TAG_volatile_type &&
230 Tag != dwarf::DW_TAG_restrict_type &&
231 Tag != dwarf::DW_TAG_member)
232 return false;
233 if (Tag == dwarf::DW_TAG_typedef && !skipTypedef)
234 return false;
235 return true;
236 }
237
stripQualifiers(DIType * Ty,bool skipTypedef=true)238 static DIType * stripQualifiers(DIType *Ty, bool skipTypedef = true) {
239 while (auto *DTy = dyn_cast<DIDerivedType>(Ty)) {
240 if (!SkipDIDerivedTag(DTy->getTag(), skipTypedef))
241 break;
242 Ty = DTy->getBaseType();
243 }
244 return Ty;
245 }
246
stripQualifiers(const DIType * Ty)247 static const DIType * stripQualifiers(const DIType *Ty) {
248 while (auto *DTy = dyn_cast<DIDerivedType>(Ty)) {
249 if (!SkipDIDerivedTag(DTy->getTag(), true))
250 break;
251 Ty = DTy->getBaseType();
252 }
253 return Ty;
254 }
255
calcArraySize(const DICompositeType * CTy,uint32_t StartDim)256 static uint32_t calcArraySize(const DICompositeType *CTy, uint32_t StartDim) {
257 DINodeArray Elements = CTy->getElements();
258 uint32_t DimSize = 1;
259 for (uint32_t I = StartDim; I < Elements.size(); ++I) {
260 if (auto *Element = dyn_cast_or_null<DINode>(Elements[I]))
261 if (Element->getTag() == dwarf::DW_TAG_subrange_type) {
262 const DISubrange *SR = cast<DISubrange>(Element);
263 auto *CI = SR->getCount().dyn_cast<ConstantInt *>();
264 DimSize *= CI->getSExtValue();
265 }
266 }
267
268 return DimSize;
269 }
270
271 /// Check whether a call is a preserve_*_access_index intrinsic call or not.
IsPreserveDIAccessIndexCall(const CallInst * Call,CallInfo & CInfo)272 bool BPFAbstractMemberAccess::IsPreserveDIAccessIndexCall(const CallInst *Call,
273 CallInfo &CInfo) {
274 if (!Call)
275 return false;
276
277 const auto *GV = dyn_cast<GlobalValue>(Call->getCalledOperand());
278 if (!GV)
279 return false;
280 if (GV->getName().startswith("llvm.preserve.array.access.index")) {
281 CInfo.Kind = BPFPreserveArrayAI;
282 CInfo.Metadata = Call->getMetadata(LLVMContext::MD_preserve_access_index);
283 if (!CInfo.Metadata)
284 report_fatal_error("Missing metadata for llvm.preserve.array.access.index intrinsic");
285 CInfo.AccessIndex = getConstant(Call->getArgOperand(2));
286 CInfo.Base = Call->getArgOperand(0);
287 CInfo.RecordAlignment =
288 DL->getABITypeAlign(CInfo.Base->getType()->getPointerElementType());
289 return true;
290 }
291 if (GV->getName().startswith("llvm.preserve.union.access.index")) {
292 CInfo.Kind = BPFPreserveUnionAI;
293 CInfo.Metadata = Call->getMetadata(LLVMContext::MD_preserve_access_index);
294 if (!CInfo.Metadata)
295 report_fatal_error("Missing metadata for llvm.preserve.union.access.index intrinsic");
296 CInfo.AccessIndex = getConstant(Call->getArgOperand(1));
297 CInfo.Base = Call->getArgOperand(0);
298 CInfo.RecordAlignment =
299 DL->getABITypeAlign(CInfo.Base->getType()->getPointerElementType());
300 return true;
301 }
302 if (GV->getName().startswith("llvm.preserve.struct.access.index")) {
303 CInfo.Kind = BPFPreserveStructAI;
304 CInfo.Metadata = Call->getMetadata(LLVMContext::MD_preserve_access_index);
305 if (!CInfo.Metadata)
306 report_fatal_error("Missing metadata for llvm.preserve.struct.access.index intrinsic");
307 CInfo.AccessIndex = getConstant(Call->getArgOperand(2));
308 CInfo.Base = Call->getArgOperand(0);
309 CInfo.RecordAlignment =
310 DL->getABITypeAlign(CInfo.Base->getType()->getPointerElementType());
311 return true;
312 }
313 if (GV->getName().startswith("llvm.bpf.preserve.field.info")) {
314 CInfo.Kind = BPFPreserveFieldInfoAI;
315 CInfo.Metadata = nullptr;
316 // Check validity of info_kind as clang did not check this.
317 uint64_t InfoKind = getConstant(Call->getArgOperand(1));
318 if (InfoKind >= BPFCoreSharedInfo::MAX_FIELD_RELOC_KIND)
319 report_fatal_error("Incorrect info_kind for llvm.bpf.preserve.field.info intrinsic");
320 CInfo.AccessIndex = InfoKind;
321 return true;
322 }
323 if (GV->getName().startswith("llvm.bpf.preserve.type.info")) {
324 CInfo.Kind = BPFPreserveFieldInfoAI;
325 CInfo.Metadata = Call->getMetadata(LLVMContext::MD_preserve_access_index);
326 if (!CInfo.Metadata)
327 report_fatal_error("Missing metadata for llvm.preserve.type.info intrinsic");
328 uint64_t Flag = getConstant(Call->getArgOperand(1));
329 if (Flag >= BPFCoreSharedInfo::MAX_PRESERVE_TYPE_INFO_FLAG)
330 report_fatal_error("Incorrect flag for llvm.bpf.preserve.type.info intrinsic");
331 if (Flag == BPFCoreSharedInfo::PRESERVE_TYPE_INFO_EXISTENCE)
332 CInfo.AccessIndex = BPFCoreSharedInfo::TYPE_EXISTENCE;
333 else
334 CInfo.AccessIndex = BPFCoreSharedInfo::TYPE_SIZE;
335 return true;
336 }
337 if (GV->getName().startswith("llvm.bpf.preserve.enum.value")) {
338 CInfo.Kind = BPFPreserveFieldInfoAI;
339 CInfo.Metadata = Call->getMetadata(LLVMContext::MD_preserve_access_index);
340 if (!CInfo.Metadata)
341 report_fatal_error("Missing metadata for llvm.preserve.enum.value intrinsic");
342 uint64_t Flag = getConstant(Call->getArgOperand(2));
343 if (Flag >= BPFCoreSharedInfo::MAX_PRESERVE_ENUM_VALUE_FLAG)
344 report_fatal_error("Incorrect flag for llvm.bpf.preserve.enum.value intrinsic");
345 if (Flag == BPFCoreSharedInfo::PRESERVE_ENUM_VALUE_EXISTENCE)
346 CInfo.AccessIndex = BPFCoreSharedInfo::ENUM_VALUE_EXISTENCE;
347 else
348 CInfo.AccessIndex = BPFCoreSharedInfo::ENUM_VALUE;
349 return true;
350 }
351
352 return false;
353 }
354
replaceWithGEP(std::vector<CallInst * > & CallList,uint32_t DimensionIndex,uint32_t GEPIndex)355 void BPFAbstractMemberAccess::replaceWithGEP(std::vector<CallInst *> &CallList,
356 uint32_t DimensionIndex,
357 uint32_t GEPIndex) {
358 for (auto Call : CallList) {
359 uint32_t Dimension = 1;
360 if (DimensionIndex > 0)
361 Dimension = getConstant(Call->getArgOperand(DimensionIndex));
362
363 Constant *Zero =
364 ConstantInt::get(Type::getInt32Ty(Call->getParent()->getContext()), 0);
365 SmallVector<Value *, 4> IdxList;
366 for (unsigned I = 0; I < Dimension; ++I)
367 IdxList.push_back(Zero);
368 IdxList.push_back(Call->getArgOperand(GEPIndex));
369
370 auto *GEP = GetElementPtrInst::CreateInBounds(Call->getArgOperand(0),
371 IdxList, "", Call);
372 Call->replaceAllUsesWith(GEP);
373 Call->eraseFromParent();
374 }
375 }
376
removePreserveAccessIndexIntrinsic(Function & F)377 bool BPFAbstractMemberAccess::removePreserveAccessIndexIntrinsic(Function &F) {
378 std::vector<CallInst *> PreserveArrayIndexCalls;
379 std::vector<CallInst *> PreserveUnionIndexCalls;
380 std::vector<CallInst *> PreserveStructIndexCalls;
381 bool Found = false;
382
383 for (auto &BB : F)
384 for (auto &I : BB) {
385 auto *Call = dyn_cast<CallInst>(&I);
386 CallInfo CInfo;
387 if (!IsPreserveDIAccessIndexCall(Call, CInfo))
388 continue;
389
390 Found = true;
391 if (CInfo.Kind == BPFPreserveArrayAI)
392 PreserveArrayIndexCalls.push_back(Call);
393 else if (CInfo.Kind == BPFPreserveUnionAI)
394 PreserveUnionIndexCalls.push_back(Call);
395 else
396 PreserveStructIndexCalls.push_back(Call);
397 }
398
399 // do the following transformation:
400 // . addr = preserve_array_access_index(base, dimension, index)
401 // is transformed to
402 // addr = GEP(base, dimenion's zero's, index)
403 // . addr = preserve_union_access_index(base, di_index)
404 // is transformed to
405 // addr = base, i.e., all usages of "addr" are replaced by "base".
406 // . addr = preserve_struct_access_index(base, gep_index, di_index)
407 // is transformed to
408 // addr = GEP(base, 0, gep_index)
409 replaceWithGEP(PreserveArrayIndexCalls, 1, 2);
410 replaceWithGEP(PreserveStructIndexCalls, 0, 1);
411 for (auto Call : PreserveUnionIndexCalls) {
412 Call->replaceAllUsesWith(Call->getArgOperand(0));
413 Call->eraseFromParent();
414 }
415
416 return Found;
417 }
418
419 /// Check whether the access index chain is valid. We check
420 /// here because there may be type casts between two
421 /// access indexes. We want to ensure memory access still valid.
IsValidAIChain(const MDNode * ParentType,uint32_t ParentAI,const MDNode * ChildType)422 bool BPFAbstractMemberAccess::IsValidAIChain(const MDNode *ParentType,
423 uint32_t ParentAI,
424 const MDNode *ChildType) {
425 if (!ChildType)
426 return true; // preserve_field_info, no type comparison needed.
427
428 const DIType *PType = stripQualifiers(cast<DIType>(ParentType));
429 const DIType *CType = stripQualifiers(cast<DIType>(ChildType));
430
431 // Child is a derived/pointer type, which is due to type casting.
432 // Pointer type cannot be in the middle of chain.
433 if (isa<DIDerivedType>(CType))
434 return false;
435
436 // Parent is a pointer type.
437 if (const auto *PtrTy = dyn_cast<DIDerivedType>(PType)) {
438 if (PtrTy->getTag() != dwarf::DW_TAG_pointer_type)
439 return false;
440 return stripQualifiers(PtrTy->getBaseType()) == CType;
441 }
442
443 // Otherwise, struct/union/array types
444 const auto *PTy = dyn_cast<DICompositeType>(PType);
445 const auto *CTy = dyn_cast<DICompositeType>(CType);
446 assert(PTy && CTy && "ParentType or ChildType is null or not composite");
447
448 uint32_t PTyTag = PTy->getTag();
449 assert(PTyTag == dwarf::DW_TAG_array_type ||
450 PTyTag == dwarf::DW_TAG_structure_type ||
451 PTyTag == dwarf::DW_TAG_union_type);
452
453 uint32_t CTyTag = CTy->getTag();
454 assert(CTyTag == dwarf::DW_TAG_array_type ||
455 CTyTag == dwarf::DW_TAG_structure_type ||
456 CTyTag == dwarf::DW_TAG_union_type);
457
458 // Multi dimensional arrays, base element should be the same
459 if (PTyTag == dwarf::DW_TAG_array_type && PTyTag == CTyTag)
460 return PTy->getBaseType() == CTy->getBaseType();
461
462 DIType *Ty;
463 if (PTyTag == dwarf::DW_TAG_array_type)
464 Ty = PTy->getBaseType();
465 else
466 Ty = dyn_cast<DIType>(PTy->getElements()[ParentAI]);
467
468 return dyn_cast<DICompositeType>(stripQualifiers(Ty)) == CTy;
469 }
470
traceAICall(CallInst * Call,CallInfo & ParentInfo)471 void BPFAbstractMemberAccess::traceAICall(CallInst *Call,
472 CallInfo &ParentInfo) {
473 for (User *U : Call->users()) {
474 Instruction *Inst = dyn_cast<Instruction>(U);
475 if (!Inst)
476 continue;
477
478 if (auto *BI = dyn_cast<BitCastInst>(Inst)) {
479 traceBitCast(BI, Call, ParentInfo);
480 } else if (auto *CI = dyn_cast<CallInst>(Inst)) {
481 CallInfo ChildInfo;
482
483 if (IsPreserveDIAccessIndexCall(CI, ChildInfo) &&
484 IsValidAIChain(ParentInfo.Metadata, ParentInfo.AccessIndex,
485 ChildInfo.Metadata)) {
486 AIChain[CI] = std::make_pair(Call, ParentInfo);
487 traceAICall(CI, ChildInfo);
488 } else {
489 BaseAICalls[Call] = ParentInfo;
490 }
491 } else if (auto *GI = dyn_cast<GetElementPtrInst>(Inst)) {
492 if (GI->hasAllZeroIndices())
493 traceGEP(GI, Call, ParentInfo);
494 else
495 BaseAICalls[Call] = ParentInfo;
496 } else {
497 BaseAICalls[Call] = ParentInfo;
498 }
499 }
500 }
501
traceBitCast(BitCastInst * BitCast,CallInst * Parent,CallInfo & ParentInfo)502 void BPFAbstractMemberAccess::traceBitCast(BitCastInst *BitCast,
503 CallInst *Parent,
504 CallInfo &ParentInfo) {
505 for (User *U : BitCast->users()) {
506 Instruction *Inst = dyn_cast<Instruction>(U);
507 if (!Inst)
508 continue;
509
510 if (auto *BI = dyn_cast<BitCastInst>(Inst)) {
511 traceBitCast(BI, Parent, ParentInfo);
512 } else if (auto *CI = dyn_cast<CallInst>(Inst)) {
513 CallInfo ChildInfo;
514 if (IsPreserveDIAccessIndexCall(CI, ChildInfo) &&
515 IsValidAIChain(ParentInfo.Metadata, ParentInfo.AccessIndex,
516 ChildInfo.Metadata)) {
517 AIChain[CI] = std::make_pair(Parent, ParentInfo);
518 traceAICall(CI, ChildInfo);
519 } else {
520 BaseAICalls[Parent] = ParentInfo;
521 }
522 } else if (auto *GI = dyn_cast<GetElementPtrInst>(Inst)) {
523 if (GI->hasAllZeroIndices())
524 traceGEP(GI, Parent, ParentInfo);
525 else
526 BaseAICalls[Parent] = ParentInfo;
527 } else {
528 BaseAICalls[Parent] = ParentInfo;
529 }
530 }
531 }
532
traceGEP(GetElementPtrInst * GEP,CallInst * Parent,CallInfo & ParentInfo)533 void BPFAbstractMemberAccess::traceGEP(GetElementPtrInst *GEP, CallInst *Parent,
534 CallInfo &ParentInfo) {
535 for (User *U : GEP->users()) {
536 Instruction *Inst = dyn_cast<Instruction>(U);
537 if (!Inst)
538 continue;
539
540 if (auto *BI = dyn_cast<BitCastInst>(Inst)) {
541 traceBitCast(BI, Parent, ParentInfo);
542 } else if (auto *CI = dyn_cast<CallInst>(Inst)) {
543 CallInfo ChildInfo;
544 if (IsPreserveDIAccessIndexCall(CI, ChildInfo) &&
545 IsValidAIChain(ParentInfo.Metadata, ParentInfo.AccessIndex,
546 ChildInfo.Metadata)) {
547 AIChain[CI] = std::make_pair(Parent, ParentInfo);
548 traceAICall(CI, ChildInfo);
549 } else {
550 BaseAICalls[Parent] = ParentInfo;
551 }
552 } else if (auto *GI = dyn_cast<GetElementPtrInst>(Inst)) {
553 if (GI->hasAllZeroIndices())
554 traceGEP(GI, Parent, ParentInfo);
555 else
556 BaseAICalls[Parent] = ParentInfo;
557 } else {
558 BaseAICalls[Parent] = ParentInfo;
559 }
560 }
561 }
562
collectAICallChains(Function & F)563 void BPFAbstractMemberAccess::collectAICallChains(Function &F) {
564 AIChain.clear();
565 BaseAICalls.clear();
566
567 for (auto &BB : F)
568 for (auto &I : BB) {
569 CallInfo CInfo;
570 auto *Call = dyn_cast<CallInst>(&I);
571 if (!IsPreserveDIAccessIndexCall(Call, CInfo) ||
572 AIChain.find(Call) != AIChain.end())
573 continue;
574
575 traceAICall(Call, CInfo);
576 }
577 }
578
getConstant(const Value * IndexValue)579 uint64_t BPFAbstractMemberAccess::getConstant(const Value *IndexValue) {
580 const ConstantInt *CV = dyn_cast<ConstantInt>(IndexValue);
581 assert(CV);
582 return CV->getValue().getZExtValue();
583 }
584
585 /// Get the start and the end of storage offset for \p MemberTy.
GetStorageBitRange(DIDerivedType * MemberTy,Align RecordAlignment,uint32_t & StartBitOffset,uint32_t & EndBitOffset)586 void BPFAbstractMemberAccess::GetStorageBitRange(DIDerivedType *MemberTy,
587 Align RecordAlignment,
588 uint32_t &StartBitOffset,
589 uint32_t &EndBitOffset) {
590 uint32_t MemberBitSize = MemberTy->getSizeInBits();
591 uint32_t MemberBitOffset = MemberTy->getOffsetInBits();
592 uint32_t AlignBits = RecordAlignment.value() * 8;
593 if (RecordAlignment > 8 || MemberBitSize > AlignBits)
594 report_fatal_error("Unsupported field expression for llvm.bpf.preserve.field.info, "
595 "requiring too big alignment");
596
597 StartBitOffset = MemberBitOffset & ~(AlignBits - 1);
598 if ((StartBitOffset + AlignBits) < (MemberBitOffset + MemberBitSize))
599 report_fatal_error("Unsupported field expression for llvm.bpf.preserve.field.info, "
600 "cross alignment boundary");
601 EndBitOffset = StartBitOffset + AlignBits;
602 }
603
GetFieldInfo(uint32_t InfoKind,DICompositeType * CTy,uint32_t AccessIndex,uint32_t PatchImm,Align RecordAlignment)604 uint32_t BPFAbstractMemberAccess::GetFieldInfo(uint32_t InfoKind,
605 DICompositeType *CTy,
606 uint32_t AccessIndex,
607 uint32_t PatchImm,
608 Align RecordAlignment) {
609 if (InfoKind == BPFCoreSharedInfo::FIELD_EXISTENCE)
610 return 1;
611
612 uint32_t Tag = CTy->getTag();
613 if (InfoKind == BPFCoreSharedInfo::FIELD_BYTE_OFFSET) {
614 if (Tag == dwarf::DW_TAG_array_type) {
615 auto *EltTy = stripQualifiers(CTy->getBaseType());
616 PatchImm += AccessIndex * calcArraySize(CTy, 1) *
617 (EltTy->getSizeInBits() >> 3);
618 } else if (Tag == dwarf::DW_TAG_structure_type) {
619 auto *MemberTy = cast<DIDerivedType>(CTy->getElements()[AccessIndex]);
620 if (!MemberTy->isBitField()) {
621 PatchImm += MemberTy->getOffsetInBits() >> 3;
622 } else {
623 unsigned SBitOffset, NextSBitOffset;
624 GetStorageBitRange(MemberTy, RecordAlignment, SBitOffset,
625 NextSBitOffset);
626 PatchImm += SBitOffset >> 3;
627 }
628 }
629 return PatchImm;
630 }
631
632 if (InfoKind == BPFCoreSharedInfo::FIELD_BYTE_SIZE) {
633 if (Tag == dwarf::DW_TAG_array_type) {
634 auto *EltTy = stripQualifiers(CTy->getBaseType());
635 return calcArraySize(CTy, 1) * (EltTy->getSizeInBits() >> 3);
636 } else {
637 auto *MemberTy = cast<DIDerivedType>(CTy->getElements()[AccessIndex]);
638 uint32_t SizeInBits = MemberTy->getSizeInBits();
639 if (!MemberTy->isBitField())
640 return SizeInBits >> 3;
641
642 unsigned SBitOffset, NextSBitOffset;
643 GetStorageBitRange(MemberTy, RecordAlignment, SBitOffset, NextSBitOffset);
644 SizeInBits = NextSBitOffset - SBitOffset;
645 if (SizeInBits & (SizeInBits - 1))
646 report_fatal_error("Unsupported field expression for llvm.bpf.preserve.field.info");
647 return SizeInBits >> 3;
648 }
649 }
650
651 if (InfoKind == BPFCoreSharedInfo::FIELD_SIGNEDNESS) {
652 const DIType *BaseTy;
653 if (Tag == dwarf::DW_TAG_array_type) {
654 // Signedness only checked when final array elements are accessed.
655 if (CTy->getElements().size() != 1)
656 report_fatal_error("Invalid array expression for llvm.bpf.preserve.field.info");
657 BaseTy = stripQualifiers(CTy->getBaseType());
658 } else {
659 auto *MemberTy = cast<DIDerivedType>(CTy->getElements()[AccessIndex]);
660 BaseTy = stripQualifiers(MemberTy->getBaseType());
661 }
662
663 // Only basic types and enum types have signedness.
664 const auto *BTy = dyn_cast<DIBasicType>(BaseTy);
665 while (!BTy) {
666 const auto *CompTy = dyn_cast<DICompositeType>(BaseTy);
667 // Report an error if the field expression does not have signedness.
668 if (!CompTy || CompTy->getTag() != dwarf::DW_TAG_enumeration_type)
669 report_fatal_error("Invalid field expression for llvm.bpf.preserve.field.info");
670 BaseTy = stripQualifiers(CompTy->getBaseType());
671 BTy = dyn_cast<DIBasicType>(BaseTy);
672 }
673 uint32_t Encoding = BTy->getEncoding();
674 return (Encoding == dwarf::DW_ATE_signed || Encoding == dwarf::DW_ATE_signed_char);
675 }
676
677 if (InfoKind == BPFCoreSharedInfo::FIELD_LSHIFT_U64) {
678 // The value is loaded into a value with FIELD_BYTE_SIZE size,
679 // and then zero or sign extended to U64.
680 // FIELD_LSHIFT_U64 and FIELD_RSHIFT_U64 are operations
681 // to extract the original value.
682 const Triple &Triple = TM->getTargetTriple();
683 DIDerivedType *MemberTy = nullptr;
684 bool IsBitField = false;
685 uint32_t SizeInBits;
686
687 if (Tag == dwarf::DW_TAG_array_type) {
688 auto *EltTy = stripQualifiers(CTy->getBaseType());
689 SizeInBits = calcArraySize(CTy, 1) * EltTy->getSizeInBits();
690 } else {
691 MemberTy = cast<DIDerivedType>(CTy->getElements()[AccessIndex]);
692 SizeInBits = MemberTy->getSizeInBits();
693 IsBitField = MemberTy->isBitField();
694 }
695
696 if (!IsBitField) {
697 if (SizeInBits > 64)
698 report_fatal_error("too big field size for llvm.bpf.preserve.field.info");
699 return 64 - SizeInBits;
700 }
701
702 unsigned SBitOffset, NextSBitOffset;
703 GetStorageBitRange(MemberTy, RecordAlignment, SBitOffset, NextSBitOffset);
704 if (NextSBitOffset - SBitOffset > 64)
705 report_fatal_error("too big field size for llvm.bpf.preserve.field.info");
706
707 unsigned OffsetInBits = MemberTy->getOffsetInBits();
708 if (Triple.getArch() == Triple::bpfel)
709 return SBitOffset + 64 - OffsetInBits - SizeInBits;
710 else
711 return OffsetInBits + 64 - NextSBitOffset;
712 }
713
714 if (InfoKind == BPFCoreSharedInfo::FIELD_RSHIFT_U64) {
715 DIDerivedType *MemberTy = nullptr;
716 bool IsBitField = false;
717 uint32_t SizeInBits;
718 if (Tag == dwarf::DW_TAG_array_type) {
719 auto *EltTy = stripQualifiers(CTy->getBaseType());
720 SizeInBits = calcArraySize(CTy, 1) * EltTy->getSizeInBits();
721 } else {
722 MemberTy = cast<DIDerivedType>(CTy->getElements()[AccessIndex]);
723 SizeInBits = MemberTy->getSizeInBits();
724 IsBitField = MemberTy->isBitField();
725 }
726
727 if (!IsBitField) {
728 if (SizeInBits > 64)
729 report_fatal_error("too big field size for llvm.bpf.preserve.field.info");
730 return 64 - SizeInBits;
731 }
732
733 unsigned SBitOffset, NextSBitOffset;
734 GetStorageBitRange(MemberTy, RecordAlignment, SBitOffset, NextSBitOffset);
735 if (NextSBitOffset - SBitOffset > 64)
736 report_fatal_error("too big field size for llvm.bpf.preserve.field.info");
737
738 return 64 - SizeInBits;
739 }
740
741 llvm_unreachable("Unknown llvm.bpf.preserve.field.info info kind");
742 }
743
HasPreserveFieldInfoCall(CallInfoStack & CallStack)744 bool BPFAbstractMemberAccess::HasPreserveFieldInfoCall(CallInfoStack &CallStack) {
745 // This is called in error return path, no need to maintain CallStack.
746 while (CallStack.size()) {
747 auto StackElem = CallStack.top();
748 if (StackElem.second.Kind == BPFPreserveFieldInfoAI)
749 return true;
750 CallStack.pop();
751 }
752 return false;
753 }
754
755 /// Compute the base of the whole preserve_* intrinsics chains, i.e., the base
756 /// pointer of the first preserve_*_access_index call, and construct the access
757 /// string, which will be the name of a global variable.
computeBaseAndAccessKey(CallInst * Call,CallInfo & CInfo,std::string & AccessKey,MDNode * & TypeMeta)758 Value *BPFAbstractMemberAccess::computeBaseAndAccessKey(CallInst *Call,
759 CallInfo &CInfo,
760 std::string &AccessKey,
761 MDNode *&TypeMeta) {
762 Value *Base = nullptr;
763 std::string TypeName;
764 CallInfoStack CallStack;
765
766 // Put the access chain into a stack with the top as the head of the chain.
767 while (Call) {
768 CallStack.push(std::make_pair(Call, CInfo));
769 CInfo = AIChain[Call].second;
770 Call = AIChain[Call].first;
771 }
772
773 // The access offset from the base of the head of chain is also
774 // calculated here as all debuginfo types are available.
775
776 // Get type name and calculate the first index.
777 // We only want to get type name from typedef, structure or union.
778 // If user wants a relocation like
779 // int *p; ... __builtin_preserve_access_index(&p[4]) ...
780 // or
781 // int a[10][20]; ... __builtin_preserve_access_index(&a[2][3]) ...
782 // we will skip them.
783 uint32_t FirstIndex = 0;
784 uint32_t PatchImm = 0; // AccessOffset or the requested field info
785 uint32_t InfoKind = BPFCoreSharedInfo::FIELD_BYTE_OFFSET;
786 while (CallStack.size()) {
787 auto StackElem = CallStack.top();
788 Call = StackElem.first;
789 CInfo = StackElem.second;
790
791 if (!Base)
792 Base = CInfo.Base;
793
794 DIType *PossibleTypeDef = stripQualifiers(cast<DIType>(CInfo.Metadata),
795 false);
796 DIType *Ty = stripQualifiers(PossibleTypeDef);
797 if (CInfo.Kind == BPFPreserveUnionAI ||
798 CInfo.Kind == BPFPreserveStructAI) {
799 // struct or union type. If the typedef is in the metadata, always
800 // use the typedef.
801 TypeName = std::string(PossibleTypeDef->getName());
802 TypeMeta = PossibleTypeDef;
803 PatchImm += FirstIndex * (Ty->getSizeInBits() >> 3);
804 break;
805 }
806
807 assert(CInfo.Kind == BPFPreserveArrayAI);
808
809 // Array entries will always be consumed for accumulative initial index.
810 CallStack.pop();
811
812 // BPFPreserveArrayAI
813 uint64_t AccessIndex = CInfo.AccessIndex;
814
815 DIType *BaseTy = nullptr;
816 bool CheckElemType = false;
817 if (const auto *CTy = dyn_cast<DICompositeType>(Ty)) {
818 // array type
819 assert(CTy->getTag() == dwarf::DW_TAG_array_type);
820
821
822 FirstIndex += AccessIndex * calcArraySize(CTy, 1);
823 BaseTy = stripQualifiers(CTy->getBaseType());
824 CheckElemType = CTy->getElements().size() == 1;
825 } else {
826 // pointer type
827 auto *DTy = cast<DIDerivedType>(Ty);
828 assert(DTy->getTag() == dwarf::DW_TAG_pointer_type);
829
830 BaseTy = stripQualifiers(DTy->getBaseType());
831 CTy = dyn_cast<DICompositeType>(BaseTy);
832 if (!CTy) {
833 CheckElemType = true;
834 } else if (CTy->getTag() != dwarf::DW_TAG_array_type) {
835 FirstIndex += AccessIndex;
836 CheckElemType = true;
837 } else {
838 FirstIndex += AccessIndex * calcArraySize(CTy, 0);
839 }
840 }
841
842 if (CheckElemType) {
843 auto *CTy = dyn_cast<DICompositeType>(BaseTy);
844 if (!CTy) {
845 if (HasPreserveFieldInfoCall(CallStack))
846 report_fatal_error("Invalid field access for llvm.preserve.field.info intrinsic");
847 return nullptr;
848 }
849
850 unsigned CTag = CTy->getTag();
851 if (CTag == dwarf::DW_TAG_structure_type || CTag == dwarf::DW_TAG_union_type) {
852 TypeName = std::string(CTy->getName());
853 } else {
854 if (HasPreserveFieldInfoCall(CallStack))
855 report_fatal_error("Invalid field access for llvm.preserve.field.info intrinsic");
856 return nullptr;
857 }
858 TypeMeta = CTy;
859 PatchImm += FirstIndex * (CTy->getSizeInBits() >> 3);
860 break;
861 }
862 }
863 assert(TypeName.size());
864 AccessKey += std::to_string(FirstIndex);
865
866 // Traverse the rest of access chain to complete offset calculation
867 // and access key construction.
868 while (CallStack.size()) {
869 auto StackElem = CallStack.top();
870 CInfo = StackElem.second;
871 CallStack.pop();
872
873 if (CInfo.Kind == BPFPreserveFieldInfoAI) {
874 InfoKind = CInfo.AccessIndex;
875 break;
876 }
877
878 // If the next Call (the top of the stack) is a BPFPreserveFieldInfoAI,
879 // the action will be extracting field info.
880 if (CallStack.size()) {
881 auto StackElem2 = CallStack.top();
882 CallInfo CInfo2 = StackElem2.second;
883 if (CInfo2.Kind == BPFPreserveFieldInfoAI) {
884 InfoKind = CInfo2.AccessIndex;
885 assert(CallStack.size() == 1);
886 }
887 }
888
889 // Access Index
890 uint64_t AccessIndex = CInfo.AccessIndex;
891 AccessKey += ":" + std::to_string(AccessIndex);
892
893 MDNode *MDN = CInfo.Metadata;
894 // At this stage, it cannot be pointer type.
895 auto *CTy = cast<DICompositeType>(stripQualifiers(cast<DIType>(MDN)));
896 PatchImm = GetFieldInfo(InfoKind, CTy, AccessIndex, PatchImm,
897 CInfo.RecordAlignment);
898 }
899
900 // Access key is the
901 // "llvm." + type name + ":" + reloc type + ":" + patched imm + "$" +
902 // access string,
903 // uniquely identifying one relocation.
904 // The prefix "llvm." indicates this is a temporary global, which should
905 // not be emitted to ELF file.
906 AccessKey = "llvm." + TypeName + ":" + std::to_string(InfoKind) + ":" +
907 std::to_string(PatchImm) + "$" + AccessKey;
908
909 return Base;
910 }
911
computeAccessKey(CallInst * Call,CallInfo & CInfo,std::string & AccessKey,bool & IsInt32Ret)912 MDNode *BPFAbstractMemberAccess::computeAccessKey(CallInst *Call,
913 CallInfo &CInfo,
914 std::string &AccessKey,
915 bool &IsInt32Ret) {
916 DIType *Ty = stripQualifiers(cast<DIType>(CInfo.Metadata), false);
917 assert(!Ty->getName().empty());
918
919 int64_t PatchImm;
920 std::string AccessStr("0");
921 if (CInfo.AccessIndex == BPFCoreSharedInfo::TYPE_EXISTENCE) {
922 PatchImm = 1;
923 } else if (CInfo.AccessIndex == BPFCoreSharedInfo::TYPE_SIZE) {
924 // typedef debuginfo type has size 0, get the eventual base type.
925 DIType *BaseTy = stripQualifiers(Ty, true);
926 PatchImm = BaseTy->getSizeInBits() / 8;
927 } else {
928 // ENUM_VALUE_EXISTENCE and ENUM_VALUE
929 IsInt32Ret = false;
930
931 const auto *CE = cast<ConstantExpr>(Call->getArgOperand(1));
932 const GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0));
933 assert(GV->hasInitializer());
934 const ConstantDataArray *DA = cast<ConstantDataArray>(GV->getInitializer());
935 assert(DA->isString());
936 StringRef ValueStr = DA->getAsString();
937
938 // ValueStr format: <EnumeratorStr>:<Value>
939 size_t Separator = ValueStr.find_first_of(':');
940 StringRef EnumeratorStr = ValueStr.substr(0, Separator);
941
942 // Find enumerator index in the debuginfo
943 DIType *BaseTy = stripQualifiers(Ty, true);
944 const auto *CTy = cast<DICompositeType>(BaseTy);
945 assert(CTy->getTag() == dwarf::DW_TAG_enumeration_type);
946 int EnumIndex = 0;
947 for (const auto Element : CTy->getElements()) {
948 const auto *Enum = cast<DIEnumerator>(Element);
949 if (Enum->getName() == EnumeratorStr) {
950 AccessStr = std::to_string(EnumIndex);
951 break;
952 }
953 EnumIndex++;
954 }
955
956 if (CInfo.AccessIndex == BPFCoreSharedInfo::ENUM_VALUE) {
957 StringRef EValueStr = ValueStr.substr(Separator + 1);
958 PatchImm = std::stoll(std::string(EValueStr));
959 } else {
960 PatchImm = 1;
961 }
962 }
963
964 AccessKey = "llvm." + Ty->getName().str() + ":" +
965 std::to_string(CInfo.AccessIndex) + std::string(":") +
966 std::to_string(PatchImm) + std::string("$") + AccessStr;
967
968 return Ty;
969 }
970
971 /// Call/Kind is the base preserve_*_access_index() call. Attempts to do
972 /// transformation to a chain of relocable GEPs.
transformGEPChain(CallInst * Call,CallInfo & CInfo)973 bool BPFAbstractMemberAccess::transformGEPChain(CallInst *Call,
974 CallInfo &CInfo) {
975 std::string AccessKey;
976 MDNode *TypeMeta;
977 Value *Base = nullptr;
978 bool IsInt32Ret;
979
980 IsInt32Ret = CInfo.Kind == BPFPreserveFieldInfoAI;
981 if (CInfo.Kind == BPFPreserveFieldInfoAI && CInfo.Metadata) {
982 TypeMeta = computeAccessKey(Call, CInfo, AccessKey, IsInt32Ret);
983 } else {
984 Base = computeBaseAndAccessKey(Call, CInfo, AccessKey, TypeMeta);
985 if (!Base)
986 return false;
987 }
988
989 BasicBlock *BB = Call->getParent();
990 GlobalVariable *GV;
991
992 if (GEPGlobals.find(AccessKey) == GEPGlobals.end()) {
993 IntegerType *VarType;
994 if (IsInt32Ret)
995 VarType = Type::getInt32Ty(BB->getContext()); // 32bit return value
996 else
997 VarType = Type::getInt64Ty(BB->getContext()); // 64bit ptr or enum value
998
999 GV = new GlobalVariable(*M, VarType, false, GlobalVariable::ExternalLinkage,
1000 NULL, AccessKey);
1001 GV->addAttribute(BPFCoreSharedInfo::AmaAttr);
1002 GV->setMetadata(LLVMContext::MD_preserve_access_index, TypeMeta);
1003 GEPGlobals[AccessKey] = GV;
1004 } else {
1005 GV = GEPGlobals[AccessKey];
1006 }
1007
1008 if (CInfo.Kind == BPFPreserveFieldInfoAI) {
1009 // Load the global variable which represents the returned field info.
1010 LoadInst *LDInst;
1011 if (IsInt32Ret)
1012 LDInst = new LoadInst(Type::getInt32Ty(BB->getContext()), GV, "", Call);
1013 else
1014 LDInst = new LoadInst(Type::getInt64Ty(BB->getContext()), GV, "", Call);
1015
1016 Instruction *PassThroughInst =
1017 BPFCoreSharedInfo::insertPassThrough(M, BB, LDInst, Call);
1018 Call->replaceAllUsesWith(PassThroughInst);
1019 Call->eraseFromParent();
1020 return true;
1021 }
1022
1023 // For any original GEP Call and Base %2 like
1024 // %4 = bitcast %struct.net_device** %dev1 to i64*
1025 // it is transformed to:
1026 // %6 = load llvm.sk_buff:0:50$0:0:0:2:0
1027 // %7 = bitcast %struct.sk_buff* %2 to i8*
1028 // %8 = getelementptr i8, i8* %7, %6
1029 // %9 = bitcast i8* %8 to i64*
1030 // using %9 instead of %4
1031 // The original Call inst is removed.
1032
1033 // Load the global variable.
1034 auto *LDInst = new LoadInst(Type::getInt64Ty(BB->getContext()), GV, "", Call);
1035
1036 // Generate a BitCast
1037 auto *BCInst = new BitCastInst(Base, Type::getInt8PtrTy(BB->getContext()));
1038 BB->getInstList().insert(Call->getIterator(), BCInst);
1039
1040 // Generate a GetElementPtr
1041 auto *GEP = GetElementPtrInst::Create(Type::getInt8Ty(BB->getContext()),
1042 BCInst, LDInst);
1043 BB->getInstList().insert(Call->getIterator(), GEP);
1044
1045 // Generate a BitCast
1046 auto *BCInst2 = new BitCastInst(GEP, Call->getType());
1047 BB->getInstList().insert(Call->getIterator(), BCInst2);
1048
1049 // For the following code,
1050 // Block0:
1051 // ...
1052 // if (...) goto Block1 else ...
1053 // Block1:
1054 // %6 = load llvm.sk_buff:0:50$0:0:0:2:0
1055 // %7 = bitcast %struct.sk_buff* %2 to i8*
1056 // %8 = getelementptr i8, i8* %7, %6
1057 // ...
1058 // goto CommonExit
1059 // Block2:
1060 // ...
1061 // if (...) goto Block3 else ...
1062 // Block3:
1063 // %6 = load llvm.bpf_map:0:40$0:0:0:2:0
1064 // %7 = bitcast %struct.sk_buff* %2 to i8*
1065 // %8 = getelementptr i8, i8* %7, %6
1066 // ...
1067 // goto CommonExit
1068 // CommonExit
1069 // SimplifyCFG may generate:
1070 // Block0:
1071 // ...
1072 // if (...) goto Block_Common else ...
1073 // Block2:
1074 // ...
1075 // if (...) goto Block_Common else ...
1076 // Block_Common:
1077 // PHI = [llvm.sk_buff:0:50$0:0:0:2:0, llvm.bpf_map:0:40$0:0:0:2:0]
1078 // %6 = load PHI
1079 // %7 = bitcast %struct.sk_buff* %2 to i8*
1080 // %8 = getelementptr i8, i8* %7, %6
1081 // ...
1082 // goto CommonExit
1083 // For the above code, we cannot perform proper relocation since
1084 // "load PHI" has two possible relocations.
1085 //
1086 // To prevent above tail merging, we use __builtin_bpf_passthrough()
1087 // where one of its parameters is a seq_num. Since two
1088 // __builtin_bpf_passthrough() funcs will always have different seq_num,
1089 // tail merging cannot happen. The __builtin_bpf_passthrough() will be
1090 // removed in the beginning of Target IR passes.
1091 //
1092 // This approach is also used in other places when global var
1093 // representing a relocation is used.
1094 Instruction *PassThroughInst =
1095 BPFCoreSharedInfo::insertPassThrough(M, BB, BCInst2, Call);
1096 Call->replaceAllUsesWith(PassThroughInst);
1097 Call->eraseFromParent();
1098
1099 return true;
1100 }
1101
doTransformation(Function & F)1102 bool BPFAbstractMemberAccess::doTransformation(Function &F) {
1103 bool Transformed = false;
1104
1105 // Collect PreserveDIAccessIndex Intrinsic call chains.
1106 // The call chains will be used to generate the access
1107 // patterns similar to GEP.
1108 collectAICallChains(F);
1109
1110 for (auto &C : BaseAICalls)
1111 Transformed = transformGEPChain(C.first, C.second) || Transformed;
1112
1113 return removePreserveAccessIndexIntrinsic(F) || Transformed;
1114 }
1115
1116 PreservedAnalyses
run(Function & F,FunctionAnalysisManager & AM)1117 BPFAbstractMemberAccessPass::run(Function &F, FunctionAnalysisManager &AM) {
1118 return BPFAbstractMemberAccess(TM).run(F) ? PreservedAnalyses::none()
1119 : PreservedAnalyses::all();
1120 }
1121