1 //===-- BPFISelLowering.cpp - BPF DAG Lowering Implementation  ------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the interfaces that BPF uses to lower LLVM code into a
11 // selection DAG.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "BPFISelLowering.h"
16 #include "BPF.h"
17 #include "BPFSubtarget.h"
18 #include "BPFTargetMachine.h"
19 #include "llvm/CodeGen/CallingConvLower.h"
20 #include "llvm/CodeGen/MachineFrameInfo.h"
21 #include "llvm/CodeGen/MachineFunction.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineRegisterInfo.h"
24 #include "llvm/CodeGen/SelectionDAGISel.h"
25 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
26 #include "llvm/CodeGen/ValueTypes.h"
27 #include "llvm/IR/DiagnosticInfo.h"
28 #include "llvm/IR/DiagnosticPrinter.h"
29 #include "llvm/Support/Debug.h"
30 #include "llvm/Support/ErrorHandling.h"
31 #include "llvm/Support/raw_ostream.h"
32 using namespace llvm;
33 
34 #define DEBUG_TYPE "bpf-lower"
35 
36 static cl::opt<bool> BPFExpandMemcpyInOrder("bpf-expand-memcpy-in-order",
37   cl::Hidden, cl::init(false),
38   cl::desc("Expand memcpy into load/store pairs in order"));
39 
fail(const SDLoc & DL,SelectionDAG & DAG,const Twine & Msg)40 static void fail(const SDLoc &DL, SelectionDAG &DAG, const Twine &Msg) {
41   MachineFunction &MF = DAG.getMachineFunction();
42   DAG.getContext()->diagnose(
43       DiagnosticInfoUnsupported(MF.getFunction(), Msg, DL.getDebugLoc()));
44 }
45 
fail(const SDLoc & DL,SelectionDAG & DAG,const char * Msg,SDValue Val)46 static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg,
47                  SDValue Val) {
48   MachineFunction &MF = DAG.getMachineFunction();
49   std::string Str;
50   raw_string_ostream OS(Str);
51   OS << Msg;
52   Val->print(OS);
53   OS.flush();
54   DAG.getContext()->diagnose(
55       DiagnosticInfoUnsupported(MF.getFunction(), Str, DL.getDebugLoc()));
56 }
57 
BPFTargetLowering(const TargetMachine & TM,const BPFSubtarget & STI)58 BPFTargetLowering::BPFTargetLowering(const TargetMachine &TM,
59                                      const BPFSubtarget &STI)
60     : TargetLowering(TM) {
61 
62   // Set up the register classes.
63   addRegisterClass(MVT::i64, &BPF::GPRRegClass);
64   if (STI.getHasAlu32())
65     addRegisterClass(MVT::i32, &BPF::GPR32RegClass);
66 
67   // Compute derived properties from the register classes
68   computeRegisterProperties(STI.getRegisterInfo());
69 
70   setStackPointerRegisterToSaveRestore(BPF::R11);
71 
72   setOperationAction(ISD::BR_CC, MVT::i64, Custom);
73   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
74   setOperationAction(ISD::BRIND, MVT::Other, Expand);
75   setOperationAction(ISD::BRCOND, MVT::Other, Expand);
76 
77   setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
78 
79   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom);
80   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
81   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
82 
83   for (auto VT : { MVT::i32, MVT::i64 }) {
84     if (VT == MVT::i32 && !STI.getHasAlu32())
85       continue;
86 
87     setOperationAction(ISD::SDIVREM, VT, Expand);
88     setOperationAction(ISD::UDIVREM, VT, Expand);
89     setOperationAction(ISD::SREM, VT, Expand);
90     setOperationAction(ISD::UREM, VT, Expand);
91     setOperationAction(ISD::MULHU, VT, Expand);
92     setOperationAction(ISD::MULHS, VT, Expand);
93     setOperationAction(ISD::UMUL_LOHI, VT, Expand);
94     setOperationAction(ISD::SMUL_LOHI, VT, Expand);
95     setOperationAction(ISD::ROTR, VT, Expand);
96     setOperationAction(ISD::ROTL, VT, Expand);
97     setOperationAction(ISD::SHL_PARTS, VT, Expand);
98     setOperationAction(ISD::SRL_PARTS, VT, Expand);
99     setOperationAction(ISD::SRA_PARTS, VT, Expand);
100     setOperationAction(ISD::CTPOP, VT, Expand);
101 
102     setOperationAction(ISD::SETCC, VT, Expand);
103     setOperationAction(ISD::SELECT, VT, Expand);
104     setOperationAction(ISD::SELECT_CC, VT, Custom);
105   }
106 
107   if (STI.getHasAlu32()) {
108     setOperationAction(ISD::BSWAP, MVT::i32, Promote);
109     setOperationAction(ISD::BR_CC, MVT::i32, Promote);
110   }
111 
112   setOperationAction(ISD::CTTZ, MVT::i64, Custom);
113   setOperationAction(ISD::CTLZ, MVT::i64, Custom);
114   setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Custom);
115   setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
116 
117   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
118   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
119   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
120   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Expand);
121 
122   // Extended load operations for i1 types must be promoted
123   for (MVT VT : MVT::integer_valuetypes()) {
124     setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
125     setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
126     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
127 
128     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand);
129     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Expand);
130     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand);
131   }
132 
133   setBooleanContents(ZeroOrOneBooleanContent);
134 
135   // Function alignments (log2)
136   setMinFunctionAlignment(3);
137   setPrefFunctionAlignment(3);
138 
139   if (BPFExpandMemcpyInOrder) {
140     // LLVM generic code will try to expand memcpy into load/store pairs at this
141     // stage which is before quite a few IR optimization passes, therefore the
142     // loads and stores could potentially be moved apart from each other which
143     // will cause trouble to memcpy pattern matcher inside kernel eBPF JIT
144     // compilers.
145     //
146     // When -bpf-expand-memcpy-in-order specified, we want to defer the expand
147     // of memcpy to later stage in IR optimization pipeline so those load/store
148     // pairs won't be touched and could be kept in order. Hence, we set
149     // MaxStoresPerMem* to zero to disable the generic getMemcpyLoadsAndStores
150     // code path, and ask LLVM to use target expander EmitTargetCodeForMemcpy.
151     MaxStoresPerMemset = MaxStoresPerMemsetOptSize = 0;
152     MaxStoresPerMemcpy = MaxStoresPerMemcpyOptSize = 0;
153     MaxStoresPerMemmove = MaxStoresPerMemmoveOptSize = 0;
154   } else {
155     // inline memcpy() for kernel to see explicit copy
156     unsigned CommonMaxStores =
157       STI.getSelectionDAGInfo()->getCommonMaxStoresPerMemFunc();
158 
159     MaxStoresPerMemset = MaxStoresPerMemsetOptSize = CommonMaxStores;
160     MaxStoresPerMemcpy = MaxStoresPerMemcpyOptSize = CommonMaxStores;
161     MaxStoresPerMemmove = MaxStoresPerMemmoveOptSize = CommonMaxStores;
162   }
163 
164   // CPU/Feature control
165   HasAlu32 = STI.getHasAlu32();
166   HasJmpExt = STI.getHasJmpExt();
167 }
168 
isOffsetFoldingLegal(const GlobalAddressSDNode * GA) const169 bool BPFTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
170   return false;
171 }
172 
173 std::pair<unsigned, const TargetRegisterClass *>
getRegForInlineAsmConstraint(const TargetRegisterInfo * TRI,StringRef Constraint,MVT VT) const174 BPFTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
175                                                 StringRef Constraint,
176                                                 MVT VT) const {
177   if (Constraint.size() == 1)
178     // GCC Constraint Letters
179     switch (Constraint[0]) {
180     case 'r': // GENERAL_REGS
181       return std::make_pair(0U, &BPF::GPRRegClass);
182     default:
183       break;
184     }
185 
186   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
187 }
188 
LowerOperation(SDValue Op,SelectionDAG & DAG) const189 SDValue BPFTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
190   switch (Op.getOpcode()) {
191   case ISD::BR_CC:
192     return LowerBR_CC(Op, DAG);
193   case ISD::GlobalAddress:
194     return LowerGlobalAddress(Op, DAG);
195   case ISD::SELECT_CC:
196     return LowerSELECT_CC(Op, DAG);
197   default:
198     llvm_unreachable("unimplemented operand");
199   }
200 }
201 
202 // Calling Convention Implementation
203 #include "BPFGenCallingConv.inc"
204 
LowerFormalArguments(SDValue Chain,CallingConv::ID CallConv,bool IsVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,const SDLoc & DL,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals) const205 SDValue BPFTargetLowering::LowerFormalArguments(
206     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
207     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
208     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
209   switch (CallConv) {
210   default:
211     report_fatal_error("Unsupported calling convention");
212   case CallingConv::C:
213   case CallingConv::Fast:
214     break;
215   }
216 
217   MachineFunction &MF = DAG.getMachineFunction();
218   MachineRegisterInfo &RegInfo = MF.getRegInfo();
219 
220   // Assign locations to all of the incoming arguments.
221   SmallVector<CCValAssign, 16> ArgLocs;
222   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
223   CCInfo.AnalyzeFormalArguments(Ins, getHasAlu32() ? CC_BPF32 : CC_BPF64);
224 
225   for (auto &VA : ArgLocs) {
226     if (VA.isRegLoc()) {
227       // Arguments passed in registers
228       EVT RegVT = VA.getLocVT();
229       MVT::SimpleValueType SimpleTy = RegVT.getSimpleVT().SimpleTy;
230       switch (SimpleTy) {
231       default: {
232         errs() << "LowerFormalArguments Unhandled argument type: "
233                << RegVT.getEVTString() << '\n';
234         llvm_unreachable(0);
235       }
236       case MVT::i32:
237       case MVT::i64:
238         unsigned VReg = RegInfo.createVirtualRegister(SimpleTy == MVT::i64 ?
239                                                       &BPF::GPRRegClass :
240                                                       &BPF::GPR32RegClass);
241         RegInfo.addLiveIn(VA.getLocReg(), VReg);
242         SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, RegVT);
243 
244         // If this is an value that has been promoted to wider types, insert an
245         // assert[sz]ext to capture this, then truncate to the right size.
246         if (VA.getLocInfo() == CCValAssign::SExt)
247           ArgValue = DAG.getNode(ISD::AssertSext, DL, RegVT, ArgValue,
248                                  DAG.getValueType(VA.getValVT()));
249         else if (VA.getLocInfo() == CCValAssign::ZExt)
250           ArgValue = DAG.getNode(ISD::AssertZext, DL, RegVT, ArgValue,
251                                  DAG.getValueType(VA.getValVT()));
252 
253         if (VA.getLocInfo() != CCValAssign::Full)
254           ArgValue = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), ArgValue);
255 
256         InVals.push_back(ArgValue);
257 
258 	break;
259       }
260     } else {
261       fail(DL, DAG, "defined with too many args");
262       InVals.push_back(DAG.getConstant(0, DL, VA.getLocVT()));
263     }
264   }
265 
266   if (IsVarArg || MF.getFunction().hasStructRetAttr()) {
267     fail(DL, DAG, "functions with VarArgs or StructRet are not supported");
268   }
269 
270   return Chain;
271 }
272 
273 const unsigned BPFTargetLowering::MaxArgs = 5;
274 
LowerCall(TargetLowering::CallLoweringInfo & CLI,SmallVectorImpl<SDValue> & InVals) const275 SDValue BPFTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
276                                      SmallVectorImpl<SDValue> &InVals) const {
277   SelectionDAG &DAG = CLI.DAG;
278   auto &Outs = CLI.Outs;
279   auto &OutVals = CLI.OutVals;
280   auto &Ins = CLI.Ins;
281   SDValue Chain = CLI.Chain;
282   SDValue Callee = CLI.Callee;
283   bool &IsTailCall = CLI.IsTailCall;
284   CallingConv::ID CallConv = CLI.CallConv;
285   bool IsVarArg = CLI.IsVarArg;
286   MachineFunction &MF = DAG.getMachineFunction();
287 
288   // BPF target does not support tail call optimization.
289   IsTailCall = false;
290 
291   switch (CallConv) {
292   default:
293     report_fatal_error("Unsupported calling convention");
294   case CallingConv::Fast:
295   case CallingConv::C:
296     break;
297   }
298 
299   // Analyze operands of the call, assigning locations to each operand.
300   SmallVector<CCValAssign, 16> ArgLocs;
301   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
302 
303   CCInfo.AnalyzeCallOperands(Outs, getHasAlu32() ? CC_BPF32 : CC_BPF64);
304 
305   unsigned NumBytes = CCInfo.getNextStackOffset();
306 
307   if (Outs.size() > MaxArgs)
308     fail(CLI.DL, DAG, "too many args to ", Callee);
309 
310   for (auto &Arg : Outs) {
311     ISD::ArgFlagsTy Flags = Arg.Flags;
312     if (!Flags.isByVal())
313       continue;
314 
315     fail(CLI.DL, DAG, "pass by value not supported ", Callee);
316   }
317 
318   auto PtrVT = getPointerTy(MF.getDataLayout());
319   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
320 
321   SmallVector<std::pair<unsigned, SDValue>, MaxArgs> RegsToPass;
322 
323   // Walk arg assignments
324   for (unsigned i = 0,
325                 e = std::min(static_cast<unsigned>(ArgLocs.size()), MaxArgs);
326        i != e; ++i) {
327     CCValAssign &VA = ArgLocs[i];
328     SDValue Arg = OutVals[i];
329 
330     // Promote the value if needed.
331     switch (VA.getLocInfo()) {
332     default:
333       llvm_unreachable("Unknown loc info");
334     case CCValAssign::Full:
335       break;
336     case CCValAssign::SExt:
337       Arg = DAG.getNode(ISD::SIGN_EXTEND, CLI.DL, VA.getLocVT(), Arg);
338       break;
339     case CCValAssign::ZExt:
340       Arg = DAG.getNode(ISD::ZERO_EXTEND, CLI.DL, VA.getLocVT(), Arg);
341       break;
342     case CCValAssign::AExt:
343       Arg = DAG.getNode(ISD::ANY_EXTEND, CLI.DL, VA.getLocVT(), Arg);
344       break;
345     }
346 
347     // Push arguments into RegsToPass vector
348     if (VA.isRegLoc())
349       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
350     else
351       llvm_unreachable("call arg pass bug");
352   }
353 
354   SDValue InFlag;
355 
356   // Build a sequence of copy-to-reg nodes chained together with token chain and
357   // flag operands which copy the outgoing args into registers.  The InFlag in
358   // necessary since all emitted instructions must be stuck together.
359   for (auto &Reg : RegsToPass) {
360     Chain = DAG.getCopyToReg(Chain, CLI.DL, Reg.first, Reg.second, InFlag);
361     InFlag = Chain.getValue(1);
362   }
363 
364   // If the callee is a GlobalAddress node (quite common, every direct call is)
365   // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
366   // Likewise ExternalSymbol -> TargetExternalSymbol.
367   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
368     Callee = DAG.getTargetGlobalAddress(G->getGlobal(), CLI.DL, PtrVT,
369                                         G->getOffset(), 0);
370   } else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
371     Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT, 0);
372     fail(CLI.DL, DAG, Twine("A call to built-in function '"
373                             + StringRef(E->getSymbol())
374                             + "' is not supported."));
375   }
376 
377   // Returns a chain & a flag for retval copy to use.
378   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
379   SmallVector<SDValue, 8> Ops;
380   Ops.push_back(Chain);
381   Ops.push_back(Callee);
382 
383   // Add argument registers to the end of the list so that they are
384   // known live into the call.
385   for (auto &Reg : RegsToPass)
386     Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
387 
388   if (InFlag.getNode())
389     Ops.push_back(InFlag);
390 
391   Chain = DAG.getNode(BPFISD::CALL, CLI.DL, NodeTys, Ops);
392   InFlag = Chain.getValue(1);
393 
394   // Create the CALLSEQ_END node.
395   Chain = DAG.getCALLSEQ_END(
396       Chain, DAG.getConstant(NumBytes, CLI.DL, PtrVT, true),
397       DAG.getConstant(0, CLI.DL, PtrVT, true), InFlag, CLI.DL);
398   InFlag = Chain.getValue(1);
399 
400   // Handle result values, copying them out of physregs into vregs that we
401   // return.
402   return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, CLI.DL, DAG,
403                          InVals);
404 }
405 
406 SDValue
LowerReturn(SDValue Chain,CallingConv::ID CallConv,bool IsVarArg,const SmallVectorImpl<ISD::OutputArg> & Outs,const SmallVectorImpl<SDValue> & OutVals,const SDLoc & DL,SelectionDAG & DAG) const407 BPFTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
408                                bool IsVarArg,
409                                const SmallVectorImpl<ISD::OutputArg> &Outs,
410                                const SmallVectorImpl<SDValue> &OutVals,
411                                const SDLoc &DL, SelectionDAG &DAG) const {
412   unsigned Opc = BPFISD::RET_FLAG;
413 
414   // CCValAssign - represent the assignment of the return value to a location
415   SmallVector<CCValAssign, 16> RVLocs;
416   MachineFunction &MF = DAG.getMachineFunction();
417 
418   // CCState - Info about the registers and stack slot.
419   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
420 
421   if (MF.getFunction().getReturnType()->isAggregateType()) {
422     fail(DL, DAG, "only integer returns supported");
423     return DAG.getNode(Opc, DL, MVT::Other, Chain);
424   }
425 
426   // Analize return values.
427   CCInfo.AnalyzeReturn(Outs, getHasAlu32() ? RetCC_BPF32 : RetCC_BPF64);
428 
429   SDValue Flag;
430   SmallVector<SDValue, 4> RetOps(1, Chain);
431 
432   // Copy the result values into the output registers.
433   for (unsigned i = 0; i != RVLocs.size(); ++i) {
434     CCValAssign &VA = RVLocs[i];
435     assert(VA.isRegLoc() && "Can only return in registers!");
436 
437     Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVals[i], Flag);
438 
439     // Guarantee that all emitted copies are stuck together,
440     // avoiding something bad.
441     Flag = Chain.getValue(1);
442     RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
443   }
444 
445   RetOps[0] = Chain; // Update chain.
446 
447   // Add the flag if we have it.
448   if (Flag.getNode())
449     RetOps.push_back(Flag);
450 
451   return DAG.getNode(Opc, DL, MVT::Other, RetOps);
452 }
453 
LowerCallResult(SDValue Chain,SDValue InFlag,CallingConv::ID CallConv,bool IsVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,const SDLoc & DL,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals) const454 SDValue BPFTargetLowering::LowerCallResult(
455     SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg,
456     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
457     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
458 
459   MachineFunction &MF = DAG.getMachineFunction();
460   // Assign locations to each value returned by this call.
461   SmallVector<CCValAssign, 16> RVLocs;
462   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
463 
464   if (Ins.size() >= 2) {
465     fail(DL, DAG, "only small returns supported");
466     for (unsigned i = 0, e = Ins.size(); i != e; ++i)
467       InVals.push_back(DAG.getConstant(0, DL, Ins[i].VT));
468     return DAG.getCopyFromReg(Chain, DL, 1, Ins[0].VT, InFlag).getValue(1);
469   }
470 
471   CCInfo.AnalyzeCallResult(Ins, getHasAlu32() ? RetCC_BPF32 : RetCC_BPF64);
472 
473   // Copy all of the result registers out of their specified physreg.
474   for (auto &Val : RVLocs) {
475     Chain = DAG.getCopyFromReg(Chain, DL, Val.getLocReg(),
476                                Val.getValVT(), InFlag).getValue(1);
477     InFlag = Chain.getValue(2);
478     InVals.push_back(Chain.getValue(0));
479   }
480 
481   return Chain;
482 }
483 
NegateCC(SDValue & LHS,SDValue & RHS,ISD::CondCode & CC)484 static void NegateCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC) {
485   switch (CC) {
486   default:
487     break;
488   case ISD::SETULT:
489   case ISD::SETULE:
490   case ISD::SETLT:
491   case ISD::SETLE:
492     CC = ISD::getSetCCSwappedOperands(CC);
493     std::swap(LHS, RHS);
494     break;
495   }
496 }
497 
LowerBR_CC(SDValue Op,SelectionDAG & DAG) const498 SDValue BPFTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
499   SDValue Chain = Op.getOperand(0);
500   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
501   SDValue LHS = Op.getOperand(2);
502   SDValue RHS = Op.getOperand(3);
503   SDValue Dest = Op.getOperand(4);
504   SDLoc DL(Op);
505 
506   if (!getHasJmpExt())
507     NegateCC(LHS, RHS, CC);
508 
509   return DAG.getNode(BPFISD::BR_CC, DL, Op.getValueType(), Chain, LHS, RHS,
510                      DAG.getConstant(CC, DL, MVT::i64), Dest);
511 }
512 
LowerSELECT_CC(SDValue Op,SelectionDAG & DAG) const513 SDValue BPFTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
514   SDValue LHS = Op.getOperand(0);
515   SDValue RHS = Op.getOperand(1);
516   SDValue TrueV = Op.getOperand(2);
517   SDValue FalseV = Op.getOperand(3);
518   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
519   SDLoc DL(Op);
520 
521   if (!getHasJmpExt())
522     NegateCC(LHS, RHS, CC);
523 
524   SDValue TargetCC = DAG.getConstant(CC, DL, LHS.getValueType());
525   SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
526   SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
527 
528   return DAG.getNode(BPFISD::SELECT_CC, DL, VTs, Ops);
529 }
530 
getTargetNodeName(unsigned Opcode) const531 const char *BPFTargetLowering::getTargetNodeName(unsigned Opcode) const {
532   switch ((BPFISD::NodeType)Opcode) {
533   case BPFISD::FIRST_NUMBER:
534     break;
535   case BPFISD::RET_FLAG:
536     return "BPFISD::RET_FLAG";
537   case BPFISD::CALL:
538     return "BPFISD::CALL";
539   case BPFISD::SELECT_CC:
540     return "BPFISD::SELECT_CC";
541   case BPFISD::BR_CC:
542     return "BPFISD::BR_CC";
543   case BPFISD::Wrapper:
544     return "BPFISD::Wrapper";
545   case BPFISD::MEMCPY:
546     return "BPFISD::MEMCPY";
547   }
548   return nullptr;
549 }
550 
LowerGlobalAddress(SDValue Op,SelectionDAG & DAG) const551 SDValue BPFTargetLowering::LowerGlobalAddress(SDValue Op,
552                                               SelectionDAG &DAG) const {
553   auto N = cast<GlobalAddressSDNode>(Op);
554   assert(N->getOffset() == 0 && "Invalid offset for global address");
555 
556   SDLoc DL(Op);
557   const GlobalValue *GV = N->getGlobal();
558   SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i64);
559 
560   return DAG.getNode(BPFISD::Wrapper, DL, MVT::i64, GA);
561 }
562 
563 unsigned
EmitSubregExt(MachineInstr & MI,MachineBasicBlock * BB,unsigned Reg,bool isSigned) const564 BPFTargetLowering::EmitSubregExt(MachineInstr &MI, MachineBasicBlock *BB,
565                                  unsigned Reg, bool isSigned) const {
566   const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
567   const TargetRegisterClass *RC = getRegClassFor(MVT::i64);
568   int RShiftOp = isSigned ? BPF::SRA_ri : BPF::SRL_ri;
569   MachineFunction *F = BB->getParent();
570   DebugLoc DL = MI.getDebugLoc();
571 
572   MachineRegisterInfo &RegInfo = F->getRegInfo();
573   unsigned PromotedReg0 = RegInfo.createVirtualRegister(RC);
574   unsigned PromotedReg1 = RegInfo.createVirtualRegister(RC);
575   unsigned PromotedReg2 = RegInfo.createVirtualRegister(RC);
576   BuildMI(BB, DL, TII.get(BPF::MOV_32_64), PromotedReg0).addReg(Reg);
577   BuildMI(BB, DL, TII.get(BPF::SLL_ri), PromotedReg1)
578     .addReg(PromotedReg0).addImm(32);
579   BuildMI(BB, DL, TII.get(RShiftOp), PromotedReg2)
580     .addReg(PromotedReg1).addImm(32);
581 
582   return PromotedReg2;
583 }
584 
585 MachineBasicBlock *
EmitInstrWithCustomInserterMemcpy(MachineInstr & MI,MachineBasicBlock * BB) const586 BPFTargetLowering::EmitInstrWithCustomInserterMemcpy(MachineInstr &MI,
587                                                      MachineBasicBlock *BB)
588                                                      const {
589   MachineFunction *MF = MI.getParent()->getParent();
590   MachineRegisterInfo &MRI = MF->getRegInfo();
591   MachineInstrBuilder MIB(*MF, MI);
592   unsigned ScratchReg;
593 
594   // This function does custom insertion during lowering BPFISD::MEMCPY which
595   // only has two register operands from memcpy semantics, the copy source
596   // address and the copy destination address.
597   //
598   // Because we will expand BPFISD::MEMCPY into load/store pairs, we will need
599   // a third scratch register to serve as the destination register of load and
600   // source register of store.
601   //
602   // The scratch register here is with the Define | Dead | EarlyClobber flags.
603   // The EarlyClobber flag has the semantic property that the operand it is
604   // attached to is clobbered before the rest of the inputs are read. Hence it
605   // must be unique among the operands to the instruction. The Define flag is
606   // needed to coerce the machine verifier that an Undef value isn't a problem
607   // as we anyway is loading memory into it. The Dead flag is needed as the
608   // value in scratch isn't supposed to be used by any other instruction.
609   ScratchReg = MRI.createVirtualRegister(&BPF::GPRRegClass);
610   MIB.addReg(ScratchReg,
611              RegState::Define | RegState::Dead | RegState::EarlyClobber);
612 
613   return BB;
614 }
615 
616 MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr & MI,MachineBasicBlock * BB) const617 BPFTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
618                                                MachineBasicBlock *BB) const {
619   const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
620   DebugLoc DL = MI.getDebugLoc();
621   unsigned Opc = MI.getOpcode();
622   bool isSelectRROp = (Opc == BPF::Select ||
623                        Opc == BPF::Select_64_32 ||
624                        Opc == BPF::Select_32 ||
625                        Opc == BPF::Select_32_64);
626 
627   bool isMemcpyOp = Opc == BPF::MEMCPY;
628 
629 #ifndef NDEBUG
630   bool isSelectRIOp = (Opc == BPF::Select_Ri ||
631                        Opc == BPF::Select_Ri_64_32 ||
632                        Opc == BPF::Select_Ri_32 ||
633                        Opc == BPF::Select_Ri_32_64);
634 
635 
636   assert((isSelectRROp || isSelectRIOp || isMemcpyOp) &&
637          "Unexpected instr type to insert");
638 #endif
639 
640   if (isMemcpyOp)
641     return EmitInstrWithCustomInserterMemcpy(MI, BB);
642 
643   bool is32BitCmp = (Opc == BPF::Select_32 ||
644                      Opc == BPF::Select_32_64 ||
645                      Opc == BPF::Select_Ri_32 ||
646                      Opc == BPF::Select_Ri_32_64);
647 
648   // To "insert" a SELECT instruction, we actually have to insert the diamond
649   // control-flow pattern.  The incoming instruction knows the destination vreg
650   // to set, the condition code register to branch on, the true/false values to
651   // select between, and a branch opcode to use.
652   const BasicBlock *LLVM_BB = BB->getBasicBlock();
653   MachineFunction::iterator I = ++BB->getIterator();
654 
655   // ThisMBB:
656   // ...
657   //  TrueVal = ...
658   //  jmp_XX r1, r2 goto Copy1MBB
659   //  fallthrough --> Copy0MBB
660   MachineBasicBlock *ThisMBB = BB;
661   MachineFunction *F = BB->getParent();
662   MachineBasicBlock *Copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
663   MachineBasicBlock *Copy1MBB = F->CreateMachineBasicBlock(LLVM_BB);
664 
665   F->insert(I, Copy0MBB);
666   F->insert(I, Copy1MBB);
667   // Update machine-CFG edges by transferring all successors of the current
668   // block to the new block which will contain the Phi node for the select.
669   Copy1MBB->splice(Copy1MBB->begin(), BB,
670                    std::next(MachineBasicBlock::iterator(MI)), BB->end());
671   Copy1MBB->transferSuccessorsAndUpdatePHIs(BB);
672   // Next, add the true and fallthrough blocks as its successors.
673   BB->addSuccessor(Copy0MBB);
674   BB->addSuccessor(Copy1MBB);
675 
676   // Insert Branch if Flag
677   int CC = MI.getOperand(3).getImm();
678   int NewCC;
679   switch (CC) {
680   case ISD::SETGT:
681     NewCC = isSelectRROp ? BPF::JSGT_rr : BPF::JSGT_ri;
682     break;
683   case ISD::SETUGT:
684     NewCC = isSelectRROp ? BPF::JUGT_rr : BPF::JUGT_ri;
685     break;
686   case ISD::SETGE:
687     NewCC = isSelectRROp ? BPF::JSGE_rr : BPF::JSGE_ri;
688     break;
689   case ISD::SETUGE:
690     NewCC = isSelectRROp ? BPF::JUGE_rr : BPF::JUGE_ri;
691     break;
692   case ISD::SETEQ:
693     NewCC = isSelectRROp ? BPF::JEQ_rr : BPF::JEQ_ri;
694     break;
695   case ISD::SETNE:
696     NewCC = isSelectRROp ? BPF::JNE_rr : BPF::JNE_ri;
697     break;
698   case ISD::SETLT:
699     NewCC = isSelectRROp ? BPF::JSLT_rr : BPF::JSLT_ri;
700     break;
701   case ISD::SETULT:
702     NewCC = isSelectRROp ? BPF::JULT_rr : BPF::JULT_ri;
703     break;
704   case ISD::SETLE:
705     NewCC = isSelectRROp ? BPF::JSLE_rr : BPF::JSLE_ri;
706     break;
707   case ISD::SETULE:
708     NewCC = isSelectRROp ? BPF::JULE_rr : BPF::JULE_ri;
709     break;
710   default:
711     report_fatal_error("unimplemented select CondCode " + Twine(CC));
712   }
713 
714   unsigned LHS = MI.getOperand(1).getReg();
715   bool isSignedCmp = (CC == ISD::SETGT ||
716                       CC == ISD::SETGE ||
717                       CC == ISD::SETLT ||
718                       CC == ISD::SETLE);
719 
720   // eBPF at the moment only has 64-bit comparison. Any 32-bit comparison need
721   // to be promoted, however if the 32-bit comparison operands are destination
722   // registers then they are implicitly zero-extended already, there is no
723   // need of explicit zero-extend sequence for them.
724   //
725   // We simply do extension for all situations in this method, but we will
726   // try to remove those unnecessary in BPFMIPeephole pass.
727   if (is32BitCmp)
728     LHS = EmitSubregExt(MI, BB, LHS, isSignedCmp);
729 
730   if (isSelectRROp) {
731     unsigned RHS = MI.getOperand(2).getReg();
732 
733     if (is32BitCmp)
734       RHS = EmitSubregExt(MI, BB, RHS, isSignedCmp);
735 
736     BuildMI(BB, DL, TII.get(NewCC)).addReg(LHS).addReg(RHS).addMBB(Copy1MBB);
737   } else {
738     int64_t imm32 = MI.getOperand(2).getImm();
739     // sanity check before we build J*_ri instruction.
740     assert (isInt<32>(imm32));
741     BuildMI(BB, DL, TII.get(NewCC))
742         .addReg(LHS).addImm(imm32).addMBB(Copy1MBB);
743   }
744 
745   // Copy0MBB:
746   //  %FalseValue = ...
747   //  # fallthrough to Copy1MBB
748   BB = Copy0MBB;
749 
750   // Update machine-CFG edges
751   BB->addSuccessor(Copy1MBB);
752 
753   // Copy1MBB:
754   //  %Result = phi [ %FalseValue, Copy0MBB ], [ %TrueValue, ThisMBB ]
755   // ...
756   BB = Copy1MBB;
757   BuildMI(*BB, BB->begin(), DL, TII.get(BPF::PHI), MI.getOperand(0).getReg())
758       .addReg(MI.getOperand(5).getReg())
759       .addMBB(Copy0MBB)
760       .addReg(MI.getOperand(4).getReg())
761       .addMBB(ThisMBB);
762 
763   MI.eraseFromParent(); // The pseudo instruction is gone now.
764   return BB;
765 }
766 
getSetCCResultType(const DataLayout &,LLVMContext &,EVT VT) const767 EVT BPFTargetLowering::getSetCCResultType(const DataLayout &, LLVMContext &,
768                                           EVT VT) const {
769   return getHasAlu32() ? MVT::i32 : MVT::i64;
770 }
771 
getScalarShiftAmountTy(const DataLayout & DL,EVT VT) const772 MVT BPFTargetLowering::getScalarShiftAmountTy(const DataLayout &DL,
773                                               EVT VT) const {
774   return (getHasAlu32() && VT == MVT::i32) ? MVT::i32 : MVT::i64;
775 }
776