1 //===-- HexagonISelLowering.cpp - Hexagon DAG Lowering Implementation -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the interfaces that Hexagon uses to lower LLVM code
10 // into a selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "HexagonISelLowering.h"
15 #include "Hexagon.h"
16 #include "HexagonMachineFunctionInfo.h"
17 #include "HexagonRegisterInfo.h"
18 #include "HexagonSubtarget.h"
19 #include "HexagonTargetMachine.h"
20 #include "HexagonTargetObjectFile.h"
21 #include "llvm/ADT/APInt.h"
22 #include "llvm/ADT/ArrayRef.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/StringSwitch.h"
25 #include "llvm/CodeGen/CallingConvLower.h"
26 #include "llvm/CodeGen/MachineFrameInfo.h"
27 #include "llvm/CodeGen/MachineFunction.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/RuntimeLibcalls.h"
31 #include "llvm/CodeGen/SelectionDAG.h"
32 #include "llvm/CodeGen/TargetCallingConv.h"
33 #include "llvm/CodeGen/ValueTypes.h"
34 #include "llvm/IR/BasicBlock.h"
35 #include "llvm/IR/CallingConv.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/DerivedTypes.h"
38 #include "llvm/IR/Function.h"
39 #include "llvm/IR/GlobalValue.h"
40 #include "llvm/IR/InlineAsm.h"
41 #include "llvm/IR/Instructions.h"
42 #include "llvm/IR/IntrinsicInst.h"
43 #include "llvm/IR/Intrinsics.h"
44 #include "llvm/IR/IntrinsicsHexagon.h"
45 #include "llvm/IR/Module.h"
46 #include "llvm/IR/Type.h"
47 #include "llvm/IR/Value.h"
48 #include "llvm/MC/MCRegisterInfo.h"
49 #include "llvm/Support/Casting.h"
50 #include "llvm/Support/CodeGen.h"
51 #include "llvm/Support/CommandLine.h"
52 #include "llvm/Support/Debug.h"
53 #include "llvm/Support/ErrorHandling.h"
54 #include "llvm/Support/MathExtras.h"
55 #include "llvm/Support/raw_ostream.h"
56 #include "llvm/Target/TargetMachine.h"
57 #include <algorithm>
58 #include <cassert>
59 #include <cstddef>
60 #include <cstdint>
61 #include <limits>
62 #include <utility>
63
64 using namespace llvm;
65
66 #define DEBUG_TYPE "hexagon-lowering"
67
68 static cl::opt<bool> EmitJumpTables("hexagon-emit-jump-tables",
69 cl::init(true), cl::Hidden,
70 cl::desc("Control jump table emission on Hexagon target"));
71
72 static cl::opt<bool> EnableHexSDNodeSched("enable-hexagon-sdnode-sched",
73 cl::Hidden, cl::ZeroOrMore, cl::init(false),
74 cl::desc("Enable Hexagon SDNode scheduling"));
75
76 static cl::opt<bool> EnableFastMath("ffast-math",
77 cl::Hidden, cl::ZeroOrMore, cl::init(false),
78 cl::desc("Enable Fast Math processing"));
79
80 static cl::opt<int> MinimumJumpTables("minimum-jump-tables",
81 cl::Hidden, cl::ZeroOrMore, cl::init(5),
82 cl::desc("Set minimum jump tables"));
83
84 static cl::opt<int> MaxStoresPerMemcpyCL("max-store-memcpy",
85 cl::Hidden, cl::ZeroOrMore, cl::init(6),
86 cl::desc("Max #stores to inline memcpy"));
87
88 static cl::opt<int> MaxStoresPerMemcpyOptSizeCL("max-store-memcpy-Os",
89 cl::Hidden, cl::ZeroOrMore, cl::init(4),
90 cl::desc("Max #stores to inline memcpy"));
91
92 static cl::opt<int> MaxStoresPerMemmoveCL("max-store-memmove",
93 cl::Hidden, cl::ZeroOrMore, cl::init(6),
94 cl::desc("Max #stores to inline memmove"));
95
96 static cl::opt<int> MaxStoresPerMemmoveOptSizeCL("max-store-memmove-Os",
97 cl::Hidden, cl::ZeroOrMore, cl::init(4),
98 cl::desc("Max #stores to inline memmove"));
99
100 static cl::opt<int> MaxStoresPerMemsetCL("max-store-memset",
101 cl::Hidden, cl::ZeroOrMore, cl::init(8),
102 cl::desc("Max #stores to inline memset"));
103
104 static cl::opt<int> MaxStoresPerMemsetOptSizeCL("max-store-memset-Os",
105 cl::Hidden, cl::ZeroOrMore, cl::init(4),
106 cl::desc("Max #stores to inline memset"));
107
108 static cl::opt<bool> AlignLoads("hexagon-align-loads",
109 cl::Hidden, cl::init(false),
110 cl::desc("Rewrite unaligned loads as a pair of aligned loads"));
111
112 static cl::opt<bool>
113 DisableArgsMinAlignment("hexagon-disable-args-min-alignment", cl::Hidden,
114 cl::init(false),
115 cl::desc("Disable minimum alignment of 1 for "
116 "arguments passed by value on stack"));
117
118 namespace {
119
120 class HexagonCCState : public CCState {
121 unsigned NumNamedVarArgParams = 0;
122
123 public:
HexagonCCState(CallingConv::ID CC,bool IsVarArg,MachineFunction & MF,SmallVectorImpl<CCValAssign> & locs,LLVMContext & C,unsigned NumNamedArgs)124 HexagonCCState(CallingConv::ID CC, bool IsVarArg, MachineFunction &MF,
125 SmallVectorImpl<CCValAssign> &locs, LLVMContext &C,
126 unsigned NumNamedArgs)
127 : CCState(CC, IsVarArg, MF, locs, C),
128 NumNamedVarArgParams(NumNamedArgs) {}
getNumNamedVarArgParams() const129 unsigned getNumNamedVarArgParams() const { return NumNamedVarArgParams; }
130 };
131
132 } // end anonymous namespace
133
134
135 // Implement calling convention for Hexagon.
136
CC_SkipOdd(unsigned & ValNo,MVT & ValVT,MVT & LocVT,CCValAssign::LocInfo & LocInfo,ISD::ArgFlagsTy & ArgFlags,CCState & State)137 static bool CC_SkipOdd(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
138 CCValAssign::LocInfo &LocInfo,
139 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
140 static const MCPhysReg ArgRegs[] = {
141 Hexagon::R0, Hexagon::R1, Hexagon::R2,
142 Hexagon::R3, Hexagon::R4, Hexagon::R5
143 };
144 const unsigned NumArgRegs = array_lengthof(ArgRegs);
145 unsigned RegNum = State.getFirstUnallocated(ArgRegs);
146
147 // RegNum is an index into ArgRegs: skip a register if RegNum is odd.
148 if (RegNum != NumArgRegs && RegNum % 2 == 1)
149 State.AllocateReg(ArgRegs[RegNum]);
150
151 // Always return false here, as this function only makes sure that the first
152 // unallocated register has an even register number and does not actually
153 // allocate a register for the current argument.
154 return false;
155 }
156
157 #include "HexagonGenCallingConv.inc"
158
159
160 SDValue
LowerINTRINSIC_WO_CHAIN(SDValue Op,SelectionDAG & DAG) const161 HexagonTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG)
162 const {
163 return SDValue();
164 }
165
166 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
167 /// by "Src" to address "Dst" of size "Size". Alignment information is
168 /// specified by the specific parameter attribute. The copy will be passed as
169 /// a byval function parameter. Sometimes what we are copying is the end of a
170 /// larger object, the part that does not fit in registers.
CreateCopyOfByValArgument(SDValue Src,SDValue Dst,SDValue Chain,ISD::ArgFlagsTy Flags,SelectionDAG & DAG,const SDLoc & dl)171 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
172 SDValue Chain, ISD::ArgFlagsTy Flags,
173 SelectionDAG &DAG, const SDLoc &dl) {
174 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
175 return DAG.getMemcpy(
176 Chain, dl, Dst, Src, SizeNode, Flags.getNonZeroByValAlign(),
177 /*isVolatile=*/false, /*AlwaysInline=*/false,
178 /*isTailCall=*/false, MachinePointerInfo(), MachinePointerInfo());
179 }
180
181 bool
CanLowerReturn(CallingConv::ID CallConv,MachineFunction & MF,bool IsVarArg,const SmallVectorImpl<ISD::OutputArg> & Outs,LLVMContext & Context) const182 HexagonTargetLowering::CanLowerReturn(
183 CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
184 const SmallVectorImpl<ISD::OutputArg> &Outs,
185 LLVMContext &Context) const {
186 SmallVector<CCValAssign, 16> RVLocs;
187 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
188
189 if (MF.getSubtarget<HexagonSubtarget>().useHVXOps())
190 return CCInfo.CheckReturn(Outs, RetCC_Hexagon_HVX);
191 return CCInfo.CheckReturn(Outs, RetCC_Hexagon);
192 }
193
194 // LowerReturn - Lower ISD::RET. If a struct is larger than 8 bytes and is
195 // passed by value, the function prototype is modified to return void and
196 // the value is stored in memory pointed by a pointer passed by caller.
197 SDValue
LowerReturn(SDValue Chain,CallingConv::ID CallConv,bool IsVarArg,const SmallVectorImpl<ISD::OutputArg> & Outs,const SmallVectorImpl<SDValue> & OutVals,const SDLoc & dl,SelectionDAG & DAG) const198 HexagonTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
199 bool IsVarArg,
200 const SmallVectorImpl<ISD::OutputArg> &Outs,
201 const SmallVectorImpl<SDValue> &OutVals,
202 const SDLoc &dl, SelectionDAG &DAG) const {
203 // CCValAssign - represent the assignment of the return value to locations.
204 SmallVector<CCValAssign, 16> RVLocs;
205
206 // CCState - Info about the registers and stack slot.
207 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
208 *DAG.getContext());
209
210 // Analyze return values of ISD::RET
211 if (Subtarget.useHVXOps())
212 CCInfo.AnalyzeReturn(Outs, RetCC_Hexagon_HVX);
213 else
214 CCInfo.AnalyzeReturn(Outs, RetCC_Hexagon);
215
216 SDValue Flag;
217 SmallVector<SDValue, 4> RetOps(1, Chain);
218
219 // Copy the result values into the output registers.
220 for (unsigned i = 0; i != RVLocs.size(); ++i) {
221 CCValAssign &VA = RVLocs[i];
222
223 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag);
224
225 // Guarantee that all emitted copies are stuck together with flags.
226 Flag = Chain.getValue(1);
227 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
228 }
229
230 RetOps[0] = Chain; // Update chain.
231
232 // Add the flag if we have it.
233 if (Flag.getNode())
234 RetOps.push_back(Flag);
235
236 return DAG.getNode(HexagonISD::RET_FLAG, dl, MVT::Other, RetOps);
237 }
238
mayBeEmittedAsTailCall(const CallInst * CI) const239 bool HexagonTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
240 // If either no tail call or told not to tail call at all, don't.
241 return CI->isTailCall();
242 }
243
getRegisterByName(const char * RegName,LLT VT,const MachineFunction &) const244 Register HexagonTargetLowering::getRegisterByName(
245 const char* RegName, LLT VT, const MachineFunction &) const {
246 // Just support r19, the linux kernel uses it.
247 Register Reg = StringSwitch<Register>(RegName)
248 .Case("r0", Hexagon::R0)
249 .Case("r1", Hexagon::R1)
250 .Case("r2", Hexagon::R2)
251 .Case("r3", Hexagon::R3)
252 .Case("r4", Hexagon::R4)
253 .Case("r5", Hexagon::R5)
254 .Case("r6", Hexagon::R6)
255 .Case("r7", Hexagon::R7)
256 .Case("r8", Hexagon::R8)
257 .Case("r9", Hexagon::R9)
258 .Case("r10", Hexagon::R10)
259 .Case("r11", Hexagon::R11)
260 .Case("r12", Hexagon::R12)
261 .Case("r13", Hexagon::R13)
262 .Case("r14", Hexagon::R14)
263 .Case("r15", Hexagon::R15)
264 .Case("r16", Hexagon::R16)
265 .Case("r17", Hexagon::R17)
266 .Case("r18", Hexagon::R18)
267 .Case("r19", Hexagon::R19)
268 .Case("r20", Hexagon::R20)
269 .Case("r21", Hexagon::R21)
270 .Case("r22", Hexagon::R22)
271 .Case("r23", Hexagon::R23)
272 .Case("r24", Hexagon::R24)
273 .Case("r25", Hexagon::R25)
274 .Case("r26", Hexagon::R26)
275 .Case("r27", Hexagon::R27)
276 .Case("r28", Hexagon::R28)
277 .Case("r29", Hexagon::R29)
278 .Case("r30", Hexagon::R30)
279 .Case("r31", Hexagon::R31)
280 .Case("r1:0", Hexagon::D0)
281 .Case("r3:2", Hexagon::D1)
282 .Case("r5:4", Hexagon::D2)
283 .Case("r7:6", Hexagon::D3)
284 .Case("r9:8", Hexagon::D4)
285 .Case("r11:10", Hexagon::D5)
286 .Case("r13:12", Hexagon::D6)
287 .Case("r15:14", Hexagon::D7)
288 .Case("r17:16", Hexagon::D8)
289 .Case("r19:18", Hexagon::D9)
290 .Case("r21:20", Hexagon::D10)
291 .Case("r23:22", Hexagon::D11)
292 .Case("r25:24", Hexagon::D12)
293 .Case("r27:26", Hexagon::D13)
294 .Case("r29:28", Hexagon::D14)
295 .Case("r31:30", Hexagon::D15)
296 .Case("sp", Hexagon::R29)
297 .Case("fp", Hexagon::R30)
298 .Case("lr", Hexagon::R31)
299 .Case("p0", Hexagon::P0)
300 .Case("p1", Hexagon::P1)
301 .Case("p2", Hexagon::P2)
302 .Case("p3", Hexagon::P3)
303 .Case("sa0", Hexagon::SA0)
304 .Case("lc0", Hexagon::LC0)
305 .Case("sa1", Hexagon::SA1)
306 .Case("lc1", Hexagon::LC1)
307 .Case("m0", Hexagon::M0)
308 .Case("m1", Hexagon::M1)
309 .Case("usr", Hexagon::USR)
310 .Case("ugp", Hexagon::UGP)
311 .Default(Register());
312 if (Reg)
313 return Reg;
314
315 report_fatal_error("Invalid register name global variable");
316 }
317
318 /// LowerCallResult - Lower the result values of an ISD::CALL into the
319 /// appropriate copies out of appropriate physical registers. This assumes that
320 /// Chain/Glue are the input chain/glue to use, and that TheCall is the call
321 /// being lowered. Returns a SDNode with the same number of values as the
322 /// ISD::CALL.
LowerCallResult(SDValue Chain,SDValue Glue,CallingConv::ID CallConv,bool IsVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,const SDLoc & dl,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals,const SmallVectorImpl<SDValue> & OutVals,SDValue Callee) const323 SDValue HexagonTargetLowering::LowerCallResult(
324 SDValue Chain, SDValue Glue, CallingConv::ID CallConv, bool IsVarArg,
325 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
326 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
327 const SmallVectorImpl<SDValue> &OutVals, SDValue Callee) const {
328 // Assign locations to each value returned by this call.
329 SmallVector<CCValAssign, 16> RVLocs;
330
331 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
332 *DAG.getContext());
333
334 if (Subtarget.useHVXOps())
335 CCInfo.AnalyzeCallResult(Ins, RetCC_Hexagon_HVX);
336 else
337 CCInfo.AnalyzeCallResult(Ins, RetCC_Hexagon);
338
339 // Copy all of the result registers out of their specified physreg.
340 for (unsigned i = 0; i != RVLocs.size(); ++i) {
341 SDValue RetVal;
342 if (RVLocs[i].getValVT() == MVT::i1) {
343 // Return values of type MVT::i1 require special handling. The reason
344 // is that MVT::i1 is associated with the PredRegs register class, but
345 // values of that type are still returned in R0. Generate an explicit
346 // copy into a predicate register from R0, and treat the value of the
347 // predicate register as the call result.
348 auto &MRI = DAG.getMachineFunction().getRegInfo();
349 SDValue FR0 = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(),
350 MVT::i32, Glue);
351 // FR0 = (Value, Chain, Glue)
352 Register PredR = MRI.createVirtualRegister(&Hexagon::PredRegsRegClass);
353 SDValue TPR = DAG.getCopyToReg(FR0.getValue(1), dl, PredR,
354 FR0.getValue(0), FR0.getValue(2));
355 // TPR = (Chain, Glue)
356 // Don't glue this CopyFromReg, because it copies from a virtual
357 // register. If it is glued to the call, InstrEmitter will add it
358 // as an implicit def to the call (EmitMachineNode).
359 RetVal = DAG.getCopyFromReg(TPR.getValue(0), dl, PredR, MVT::i1);
360 Glue = TPR.getValue(1);
361 Chain = TPR.getValue(0);
362 } else {
363 RetVal = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(),
364 RVLocs[i].getValVT(), Glue);
365 Glue = RetVal.getValue(2);
366 Chain = RetVal.getValue(1);
367 }
368 InVals.push_back(RetVal.getValue(0));
369 }
370
371 return Chain;
372 }
373
374 /// LowerCall - Functions arguments are copied from virtual regs to
375 /// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
376 SDValue
LowerCall(TargetLowering::CallLoweringInfo & CLI,SmallVectorImpl<SDValue> & InVals) const377 HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
378 SmallVectorImpl<SDValue> &InVals) const {
379 SelectionDAG &DAG = CLI.DAG;
380 SDLoc &dl = CLI.DL;
381 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
382 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
383 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
384 SDValue Chain = CLI.Chain;
385 SDValue Callee = CLI.Callee;
386 CallingConv::ID CallConv = CLI.CallConv;
387 bool IsVarArg = CLI.IsVarArg;
388 bool DoesNotReturn = CLI.DoesNotReturn;
389
390 bool IsStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
391 MachineFunction &MF = DAG.getMachineFunction();
392 MachineFrameInfo &MFI = MF.getFrameInfo();
393 auto PtrVT = getPointerTy(MF.getDataLayout());
394
395 unsigned NumParams = CLI.CB ? CLI.CB->getFunctionType()->getNumParams() : 0;
396 if (GlobalAddressSDNode *GAN = dyn_cast<GlobalAddressSDNode>(Callee))
397 Callee = DAG.getTargetGlobalAddress(GAN->getGlobal(), dl, MVT::i32);
398
399 // Linux ABI treats var-arg calls the same way as regular ones.
400 bool TreatAsVarArg = !Subtarget.isEnvironmentMusl() && IsVarArg;
401
402 // Analyze operands of the call, assigning locations to each operand.
403 SmallVector<CCValAssign, 16> ArgLocs;
404 HexagonCCState CCInfo(CallConv, TreatAsVarArg, MF, ArgLocs, *DAG.getContext(),
405 NumParams);
406
407 if (Subtarget.useHVXOps())
408 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon_HVX);
409 else if (DisableArgsMinAlignment)
410 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon_Legacy);
411 else
412 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon);
413
414 if (CLI.IsTailCall) {
415 bool StructAttrFlag = MF.getFunction().hasStructRetAttr();
416 CLI.IsTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
417 IsVarArg, IsStructRet, StructAttrFlag, Outs,
418 OutVals, Ins, DAG);
419 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
420 CCValAssign &VA = ArgLocs[i];
421 if (VA.isMemLoc()) {
422 CLI.IsTailCall = false;
423 break;
424 }
425 }
426 LLVM_DEBUG(dbgs() << (CLI.IsTailCall ? "Eligible for Tail Call\n"
427 : "Argument must be passed on stack. "
428 "Not eligible for Tail Call\n"));
429 }
430 // Get a count of how many bytes are to be pushed on the stack.
431 unsigned NumBytes = CCInfo.getNextStackOffset();
432 SmallVector<std::pair<unsigned, SDValue>, 16> RegsToPass;
433 SmallVector<SDValue, 8> MemOpChains;
434
435 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
436 SDValue StackPtr =
437 DAG.getCopyFromReg(Chain, dl, HRI.getStackRegister(), PtrVT);
438
439 bool NeedsArgAlign = false;
440 Align LargestAlignSeen;
441 // Walk the register/memloc assignments, inserting copies/loads.
442 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
443 CCValAssign &VA = ArgLocs[i];
444 SDValue Arg = OutVals[i];
445 ISD::ArgFlagsTy Flags = Outs[i].Flags;
446 // Record if we need > 8 byte alignment on an argument.
447 bool ArgAlign = Subtarget.isHVXVectorType(VA.getValVT());
448 NeedsArgAlign |= ArgAlign;
449
450 // Promote the value if needed.
451 switch (VA.getLocInfo()) {
452 default:
453 // Loc info must be one of Full, BCvt, SExt, ZExt, or AExt.
454 llvm_unreachable("Unknown loc info!");
455 case CCValAssign::Full:
456 break;
457 case CCValAssign::BCvt:
458 Arg = DAG.getBitcast(VA.getLocVT(), Arg);
459 break;
460 case CCValAssign::SExt:
461 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
462 break;
463 case CCValAssign::ZExt:
464 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
465 break;
466 case CCValAssign::AExt:
467 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
468 break;
469 }
470
471 if (VA.isMemLoc()) {
472 unsigned LocMemOffset = VA.getLocMemOffset();
473 SDValue MemAddr = DAG.getConstant(LocMemOffset, dl,
474 StackPtr.getValueType());
475 MemAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, MemAddr);
476 if (ArgAlign)
477 LargestAlignSeen = std::max(
478 LargestAlignSeen, Align(VA.getLocVT().getStoreSizeInBits() / 8));
479 if (Flags.isByVal()) {
480 // The argument is a struct passed by value. According to LLVM, "Arg"
481 // is a pointer.
482 MemOpChains.push_back(CreateCopyOfByValArgument(Arg, MemAddr, Chain,
483 Flags, DAG, dl));
484 } else {
485 MachinePointerInfo LocPI = MachinePointerInfo::getStack(
486 DAG.getMachineFunction(), LocMemOffset);
487 SDValue S = DAG.getStore(Chain, dl, Arg, MemAddr, LocPI);
488 MemOpChains.push_back(S);
489 }
490 continue;
491 }
492
493 // Arguments that can be passed on register must be kept at RegsToPass
494 // vector.
495 if (VA.isRegLoc())
496 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
497 }
498
499 if (NeedsArgAlign && Subtarget.hasV60Ops()) {
500 LLVM_DEBUG(dbgs() << "Function needs byte stack align due to call args\n");
501 Align VecAlign(HRI.getSpillAlignment(Hexagon::HvxVRRegClass));
502 LargestAlignSeen = std::max(LargestAlignSeen, VecAlign);
503 MFI.ensureMaxAlignment(LargestAlignSeen);
504 }
505 // Transform all store nodes into one single node because all store
506 // nodes are independent of each other.
507 if (!MemOpChains.empty())
508 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
509
510 SDValue Glue;
511 if (!CLI.IsTailCall) {
512 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
513 Glue = Chain.getValue(1);
514 }
515
516 // Build a sequence of copy-to-reg nodes chained together with token
517 // chain and flag operands which copy the outgoing args into registers.
518 // The Glue is necessary since all emitted instructions must be
519 // stuck together.
520 if (!CLI.IsTailCall) {
521 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
522 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
523 RegsToPass[i].second, Glue);
524 Glue = Chain.getValue(1);
525 }
526 } else {
527 // For tail calls lower the arguments to the 'real' stack slot.
528 //
529 // Force all the incoming stack arguments to be loaded from the stack
530 // before any new outgoing arguments are stored to the stack, because the
531 // outgoing stack slots may alias the incoming argument stack slots, and
532 // the alias isn't otherwise explicit. This is slightly more conservative
533 // than necessary, because it means that each store effectively depends
534 // on every argument instead of just those arguments it would clobber.
535 //
536 // Do not flag preceding copytoreg stuff together with the following stuff.
537 Glue = SDValue();
538 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
539 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
540 RegsToPass[i].second, Glue);
541 Glue = Chain.getValue(1);
542 }
543 Glue = SDValue();
544 }
545
546 bool LongCalls = MF.getSubtarget<HexagonSubtarget>().useLongCalls();
547 unsigned Flags = LongCalls ? HexagonII::HMOTF_ConstExtended : 0;
548
549 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
550 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
551 // node so that legalize doesn't hack it.
552 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
553 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, PtrVT, 0, Flags);
554 } else if (ExternalSymbolSDNode *S =
555 dyn_cast<ExternalSymbolSDNode>(Callee)) {
556 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, Flags);
557 }
558
559 // Returns a chain & a flag for retval copy to use.
560 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
561 SmallVector<SDValue, 8> Ops;
562 Ops.push_back(Chain);
563 Ops.push_back(Callee);
564
565 // Add argument registers to the end of the list so that they are
566 // known live into the call.
567 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
568 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
569 RegsToPass[i].second.getValueType()));
570 }
571
572 const uint32_t *Mask = HRI.getCallPreservedMask(MF, CallConv);
573 assert(Mask && "Missing call preserved mask for calling convention");
574 Ops.push_back(DAG.getRegisterMask(Mask));
575
576 if (Glue.getNode())
577 Ops.push_back(Glue);
578
579 if (CLI.IsTailCall) {
580 MFI.setHasTailCall();
581 return DAG.getNode(HexagonISD::TC_RETURN, dl, NodeTys, Ops);
582 }
583
584 // Set this here because we need to know this for "hasFP" in frame lowering.
585 // The target-independent code calls getFrameRegister before setting it, and
586 // getFrameRegister uses hasFP to determine whether the function has FP.
587 MFI.setHasCalls(true);
588
589 unsigned OpCode = DoesNotReturn ? HexagonISD::CALLnr : HexagonISD::CALL;
590 Chain = DAG.getNode(OpCode, dl, NodeTys, Ops);
591 Glue = Chain.getValue(1);
592
593 // Create the CALLSEQ_END node.
594 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
595 DAG.getIntPtrConstant(0, dl, true), Glue, dl);
596 Glue = Chain.getValue(1);
597
598 // Handle result values, copying them out of physregs into vregs that we
599 // return.
600 return LowerCallResult(Chain, Glue, CallConv, IsVarArg, Ins, dl, DAG,
601 InVals, OutVals, Callee);
602 }
603
604 /// Returns true by value, base pointer and offset pointer and addressing
605 /// mode by reference if this node can be combined with a load / store to
606 /// form a post-indexed load / store.
getPostIndexedAddressParts(SDNode * N,SDNode * Op,SDValue & Base,SDValue & Offset,ISD::MemIndexedMode & AM,SelectionDAG & DAG) const607 bool HexagonTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
608 SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM,
609 SelectionDAG &DAG) const {
610 LSBaseSDNode *LSN = dyn_cast<LSBaseSDNode>(N);
611 if (!LSN)
612 return false;
613 EVT VT = LSN->getMemoryVT();
614 if (!VT.isSimple())
615 return false;
616 bool IsLegalType = VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 ||
617 VT == MVT::i64 || VT == MVT::f32 || VT == MVT::f64 ||
618 VT == MVT::v2i16 || VT == MVT::v2i32 || VT == MVT::v4i8 ||
619 VT == MVT::v4i16 || VT == MVT::v8i8 ||
620 Subtarget.isHVXVectorType(VT.getSimpleVT());
621 if (!IsLegalType)
622 return false;
623
624 if (Op->getOpcode() != ISD::ADD)
625 return false;
626 Base = Op->getOperand(0);
627 Offset = Op->getOperand(1);
628 if (!isa<ConstantSDNode>(Offset.getNode()))
629 return false;
630 AM = ISD::POST_INC;
631
632 int32_t V = cast<ConstantSDNode>(Offset.getNode())->getSExtValue();
633 return Subtarget.getInstrInfo()->isValidAutoIncImm(VT, V);
634 }
635
636 SDValue
LowerINLINEASM(SDValue Op,SelectionDAG & DAG) const637 HexagonTargetLowering::LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const {
638 MachineFunction &MF = DAG.getMachineFunction();
639 auto &HMFI = *MF.getInfo<HexagonMachineFunctionInfo>();
640 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
641 unsigned LR = HRI.getRARegister();
642
643 if ((Op.getOpcode() != ISD::INLINEASM &&
644 Op.getOpcode() != ISD::INLINEASM_BR) || HMFI.hasClobberLR())
645 return Op;
646
647 unsigned NumOps = Op.getNumOperands();
648 if (Op.getOperand(NumOps-1).getValueType() == MVT::Glue)
649 --NumOps; // Ignore the flag operand.
650
651 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
652 unsigned Flags = cast<ConstantSDNode>(Op.getOperand(i))->getZExtValue();
653 unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
654 ++i; // Skip the ID value.
655
656 switch (InlineAsm::getKind(Flags)) {
657 default:
658 llvm_unreachable("Bad flags!");
659 case InlineAsm::Kind_RegUse:
660 case InlineAsm::Kind_Imm:
661 case InlineAsm::Kind_Mem:
662 i += NumVals;
663 break;
664 case InlineAsm::Kind_Clobber:
665 case InlineAsm::Kind_RegDef:
666 case InlineAsm::Kind_RegDefEarlyClobber: {
667 for (; NumVals; --NumVals, ++i) {
668 unsigned Reg = cast<RegisterSDNode>(Op.getOperand(i))->getReg();
669 if (Reg != LR)
670 continue;
671 HMFI.setHasClobberLR(true);
672 return Op;
673 }
674 break;
675 }
676 }
677 }
678
679 return Op;
680 }
681
682 // Need to transform ISD::PREFETCH into something that doesn't inherit
683 // all of the properties of ISD::PREFETCH, specifically SDNPMayLoad and
684 // SDNPMayStore.
LowerPREFETCH(SDValue Op,SelectionDAG & DAG) const685 SDValue HexagonTargetLowering::LowerPREFETCH(SDValue Op,
686 SelectionDAG &DAG) const {
687 SDValue Chain = Op.getOperand(0);
688 SDValue Addr = Op.getOperand(1);
689 // Lower it to DCFETCH($reg, #0). A "pat" will try to merge the offset in,
690 // if the "reg" is fed by an "add".
691 SDLoc DL(Op);
692 SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
693 return DAG.getNode(HexagonISD::DCFETCH, DL, MVT::Other, Chain, Addr, Zero);
694 }
695
696 // Custom-handle ISD::READCYCLECOUNTER because the target-independent SDNode
697 // is marked as having side-effects, while the register read on Hexagon does
698 // not have any. TableGen refuses to accept the direct pattern from that node
699 // to the A4_tfrcpp.
LowerREADCYCLECOUNTER(SDValue Op,SelectionDAG & DAG) const700 SDValue HexagonTargetLowering::LowerREADCYCLECOUNTER(SDValue Op,
701 SelectionDAG &DAG) const {
702 SDValue Chain = Op.getOperand(0);
703 SDLoc dl(Op);
704 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
705 return DAG.getNode(HexagonISD::READCYCLE, dl, VTs, Chain);
706 }
707
LowerINTRINSIC_VOID(SDValue Op,SelectionDAG & DAG) const708 SDValue HexagonTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
709 SelectionDAG &DAG) const {
710 SDValue Chain = Op.getOperand(0);
711 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
712 // Lower the hexagon_prefetch builtin to DCFETCH, as above.
713 if (IntNo == Intrinsic::hexagon_prefetch) {
714 SDValue Addr = Op.getOperand(2);
715 SDLoc DL(Op);
716 SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
717 return DAG.getNode(HexagonISD::DCFETCH, DL, MVT::Other, Chain, Addr, Zero);
718 }
719 return SDValue();
720 }
721
722 SDValue
LowerDYNAMIC_STACKALLOC(SDValue Op,SelectionDAG & DAG) const723 HexagonTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
724 SelectionDAG &DAG) const {
725 SDValue Chain = Op.getOperand(0);
726 SDValue Size = Op.getOperand(1);
727 SDValue Align = Op.getOperand(2);
728 SDLoc dl(Op);
729
730 ConstantSDNode *AlignConst = dyn_cast<ConstantSDNode>(Align);
731 assert(AlignConst && "Non-constant Align in LowerDYNAMIC_STACKALLOC");
732
733 unsigned A = AlignConst->getSExtValue();
734 auto &HFI = *Subtarget.getFrameLowering();
735 // "Zero" means natural stack alignment.
736 if (A == 0)
737 A = HFI.getStackAlign().value();
738
739 LLVM_DEBUG({
740 dbgs () << __func__ << " Align: " << A << " Size: ";
741 Size.getNode()->dump(&DAG);
742 dbgs() << "\n";
743 });
744
745 SDValue AC = DAG.getConstant(A, dl, MVT::i32);
746 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
747 SDValue AA = DAG.getNode(HexagonISD::ALLOCA, dl, VTs, Chain, Size, AC);
748
749 DAG.ReplaceAllUsesOfValueWith(Op, AA);
750 return AA;
751 }
752
LowerFormalArguments(SDValue Chain,CallingConv::ID CallConv,bool IsVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,const SDLoc & dl,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals) const753 SDValue HexagonTargetLowering::LowerFormalArguments(
754 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
755 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
756 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
757 MachineFunction &MF = DAG.getMachineFunction();
758 MachineFrameInfo &MFI = MF.getFrameInfo();
759 MachineRegisterInfo &MRI = MF.getRegInfo();
760
761 // Linux ABI treats var-arg calls the same way as regular ones.
762 bool TreatAsVarArg = !Subtarget.isEnvironmentMusl() && IsVarArg;
763
764 // Assign locations to all of the incoming arguments.
765 SmallVector<CCValAssign, 16> ArgLocs;
766 HexagonCCState CCInfo(CallConv, TreatAsVarArg, MF, ArgLocs,
767 *DAG.getContext(),
768 MF.getFunction().getFunctionType()->getNumParams());
769
770 if (Subtarget.useHVXOps())
771 CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon_HVX);
772 else if (DisableArgsMinAlignment)
773 CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon_Legacy);
774 else
775 CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon);
776
777 // For LLVM, in the case when returning a struct by value (>8byte),
778 // the first argument is a pointer that points to the location on caller's
779 // stack where the return value will be stored. For Hexagon, the location on
780 // caller's stack is passed only when the struct size is smaller than (and
781 // equal to) 8 bytes. If not, no address will be passed into callee and
782 // callee return the result direclty through R0/R1.
783 auto NextSingleReg = [] (const TargetRegisterClass &RC, unsigned Reg) {
784 switch (RC.getID()) {
785 case Hexagon::IntRegsRegClassID:
786 return Reg - Hexagon::R0 + 1;
787 case Hexagon::DoubleRegsRegClassID:
788 return (Reg - Hexagon::D0 + 1) * 2;
789 case Hexagon::HvxVRRegClassID:
790 return Reg - Hexagon::V0 + 1;
791 case Hexagon::HvxWRRegClassID:
792 return (Reg - Hexagon::W0 + 1) * 2;
793 }
794 llvm_unreachable("Unexpected register class");
795 };
796
797 auto &HFL = const_cast<HexagonFrameLowering&>(*Subtarget.getFrameLowering());
798 auto &HMFI = *MF.getInfo<HexagonMachineFunctionInfo>();
799 HFL.FirstVarArgSavedReg = 0;
800 HMFI.setFirstNamedArgFrameIndex(-int(MFI.getNumFixedObjects()));
801
802 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
803 CCValAssign &VA = ArgLocs[i];
804 ISD::ArgFlagsTy Flags = Ins[i].Flags;
805 bool ByVal = Flags.isByVal();
806
807 // Arguments passed in registers:
808 // 1. 32- and 64-bit values and HVX vectors are passed directly,
809 // 2. Large structs are passed via an address, and the address is
810 // passed in a register.
811 if (VA.isRegLoc() && ByVal && Flags.getByValSize() <= 8)
812 llvm_unreachable("ByValSize must be bigger than 8 bytes");
813
814 bool InReg = VA.isRegLoc() &&
815 (!ByVal || (ByVal && Flags.getByValSize() > 8));
816
817 if (InReg) {
818 MVT RegVT = VA.getLocVT();
819 if (VA.getLocInfo() == CCValAssign::BCvt)
820 RegVT = VA.getValVT();
821
822 const TargetRegisterClass *RC = getRegClassFor(RegVT);
823 Register VReg = MRI.createVirtualRegister(RC);
824 SDValue Copy = DAG.getCopyFromReg(Chain, dl, VReg, RegVT);
825
826 // Treat values of type MVT::i1 specially: they are passed in
827 // registers of type i32, but they need to remain as values of
828 // type i1 for consistency of the argument lowering.
829 if (VA.getValVT() == MVT::i1) {
830 assert(RegVT.getSizeInBits() <= 32);
831 SDValue T = DAG.getNode(ISD::AND, dl, RegVT,
832 Copy, DAG.getConstant(1, dl, RegVT));
833 Copy = DAG.getSetCC(dl, MVT::i1, T, DAG.getConstant(0, dl, RegVT),
834 ISD::SETNE);
835 } else {
836 #ifndef NDEBUG
837 unsigned RegSize = RegVT.getSizeInBits();
838 assert(RegSize == 32 || RegSize == 64 ||
839 Subtarget.isHVXVectorType(RegVT));
840 #endif
841 }
842 InVals.push_back(Copy);
843 MRI.addLiveIn(VA.getLocReg(), VReg);
844 HFL.FirstVarArgSavedReg = NextSingleReg(*RC, VA.getLocReg());
845 } else {
846 assert(VA.isMemLoc() && "Argument should be passed in memory");
847
848 // If it's a byval parameter, then we need to compute the
849 // "real" size, not the size of the pointer.
850 unsigned ObjSize = Flags.isByVal()
851 ? Flags.getByValSize()
852 : VA.getLocVT().getStoreSizeInBits() / 8;
853
854 // Create the frame index object for this incoming parameter.
855 int Offset = HEXAGON_LRFP_SIZE + VA.getLocMemOffset();
856 int FI = MFI.CreateFixedObject(ObjSize, Offset, true);
857 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
858
859 if (Flags.isByVal()) {
860 // If it's a pass-by-value aggregate, then do not dereference the stack
861 // location. Instead, we should generate a reference to the stack
862 // location.
863 InVals.push_back(FIN);
864 } else {
865 SDValue L = DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
866 MachinePointerInfo::getFixedStack(MF, FI, 0));
867 InVals.push_back(L);
868 }
869 }
870 }
871
872 if (IsVarArg && Subtarget.isEnvironmentMusl()) {
873 for (int i = HFL.FirstVarArgSavedReg; i < 6; i++)
874 MRI.addLiveIn(Hexagon::R0+i);
875 }
876
877 if (IsVarArg && Subtarget.isEnvironmentMusl()) {
878 HMFI.setFirstNamedArgFrameIndex(HMFI.getFirstNamedArgFrameIndex() - 1);
879 HMFI.setLastNamedArgFrameIndex(-int(MFI.getNumFixedObjects()));
880
881 // Create Frame index for the start of register saved area.
882 int NumVarArgRegs = 6 - HFL.FirstVarArgSavedReg;
883 bool RequiresPadding = (NumVarArgRegs & 1);
884 int RegSaveAreaSizePlusPadding = RequiresPadding
885 ? (NumVarArgRegs + 1) * 4
886 : NumVarArgRegs * 4;
887
888 if (RegSaveAreaSizePlusPadding > 0) {
889 // The offset to saved register area should be 8 byte aligned.
890 int RegAreaStart = HEXAGON_LRFP_SIZE + CCInfo.getNextStackOffset();
891 if (!(RegAreaStart % 8))
892 RegAreaStart = (RegAreaStart + 7) & -8;
893
894 int RegSaveAreaFrameIndex =
895 MFI.CreateFixedObject(RegSaveAreaSizePlusPadding, RegAreaStart, true);
896 HMFI.setRegSavedAreaStartFrameIndex(RegSaveAreaFrameIndex);
897
898 // This will point to the next argument passed via stack.
899 int Offset = RegAreaStart + RegSaveAreaSizePlusPadding;
900 int FI = MFI.CreateFixedObject(Hexagon_PointerSize, Offset, true);
901 HMFI.setVarArgsFrameIndex(FI);
902 } else {
903 // This will point to the next argument passed via stack, when
904 // there is no saved register area.
905 int Offset = HEXAGON_LRFP_SIZE + CCInfo.getNextStackOffset();
906 int FI = MFI.CreateFixedObject(Hexagon_PointerSize, Offset, true);
907 HMFI.setRegSavedAreaStartFrameIndex(FI);
908 HMFI.setVarArgsFrameIndex(FI);
909 }
910 }
911
912
913 if (IsVarArg && !Subtarget.isEnvironmentMusl()) {
914 // This will point to the next argument passed via stack.
915 int Offset = HEXAGON_LRFP_SIZE + CCInfo.getNextStackOffset();
916 int FI = MFI.CreateFixedObject(Hexagon_PointerSize, Offset, true);
917 HMFI.setVarArgsFrameIndex(FI);
918 }
919
920 return Chain;
921 }
922
923 SDValue
LowerVASTART(SDValue Op,SelectionDAG & DAG) const924 HexagonTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
925 // VASTART stores the address of the VarArgsFrameIndex slot into the
926 // memory location argument.
927 MachineFunction &MF = DAG.getMachineFunction();
928 HexagonMachineFunctionInfo *QFI = MF.getInfo<HexagonMachineFunctionInfo>();
929 SDValue Addr = DAG.getFrameIndex(QFI->getVarArgsFrameIndex(), MVT::i32);
930 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
931
932 if (!Subtarget.isEnvironmentMusl()) {
933 return DAG.getStore(Op.getOperand(0), SDLoc(Op), Addr, Op.getOperand(1),
934 MachinePointerInfo(SV));
935 }
936 auto &FuncInfo = *MF.getInfo<HexagonMachineFunctionInfo>();
937 auto &HFL = *Subtarget.getFrameLowering();
938 SDLoc DL(Op);
939 SmallVector<SDValue, 8> MemOps;
940
941 // Get frame index of va_list.
942 SDValue FIN = Op.getOperand(1);
943
944 // If first Vararg register is odd, add 4 bytes to start of
945 // saved register area to point to the first register location.
946 // This is because the saved register area has to be 8 byte aligned.
947 // Incase of an odd start register, there will be 4 bytes of padding in
948 // the beginning of saved register area. If all registers area used up,
949 // the following condition will handle it correctly.
950 SDValue SavedRegAreaStartFrameIndex =
951 DAG.getFrameIndex(FuncInfo.getRegSavedAreaStartFrameIndex(), MVT::i32);
952
953 auto PtrVT = getPointerTy(DAG.getDataLayout());
954
955 if (HFL.FirstVarArgSavedReg & 1)
956 SavedRegAreaStartFrameIndex =
957 DAG.getNode(ISD::ADD, DL, PtrVT,
958 DAG.getFrameIndex(FuncInfo.getRegSavedAreaStartFrameIndex(),
959 MVT::i32),
960 DAG.getIntPtrConstant(4, DL));
961
962 // Store the saved register area start pointer.
963 SDValue Store =
964 DAG.getStore(Op.getOperand(0), DL,
965 SavedRegAreaStartFrameIndex,
966 FIN, MachinePointerInfo(SV));
967 MemOps.push_back(Store);
968
969 // Store saved register area end pointer.
970 FIN = DAG.getNode(ISD::ADD, DL, PtrVT,
971 FIN, DAG.getIntPtrConstant(4, DL));
972 Store = DAG.getStore(Op.getOperand(0), DL,
973 DAG.getFrameIndex(FuncInfo.getVarArgsFrameIndex(),
974 PtrVT),
975 FIN, MachinePointerInfo(SV, 4));
976 MemOps.push_back(Store);
977
978 // Store overflow area pointer.
979 FIN = DAG.getNode(ISD::ADD, DL, PtrVT,
980 FIN, DAG.getIntPtrConstant(4, DL));
981 Store = DAG.getStore(Op.getOperand(0), DL,
982 DAG.getFrameIndex(FuncInfo.getVarArgsFrameIndex(),
983 PtrVT),
984 FIN, MachinePointerInfo(SV, 8));
985 MemOps.push_back(Store);
986
987 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
988 }
989
990 SDValue
LowerVACOPY(SDValue Op,SelectionDAG & DAG) const991 HexagonTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
992 // Assert that the linux ABI is enabled for the current compilation.
993 assert(Subtarget.isEnvironmentMusl() && "Linux ABI should be enabled");
994 SDValue Chain = Op.getOperand(0);
995 SDValue DestPtr = Op.getOperand(1);
996 SDValue SrcPtr = Op.getOperand(2);
997 const Value *DestSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
998 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
999 SDLoc DL(Op);
1000 // Size of the va_list is 12 bytes as it has 3 pointers. Therefore,
1001 // we need to memcopy 12 bytes from va_list to another similar list.
1002 return DAG.getMemcpy(Chain, DL, DestPtr, SrcPtr,
1003 DAG.getIntPtrConstant(12, DL), Align(4),
1004 /*isVolatile*/ false, false, false,
1005 MachinePointerInfo(DestSV), MachinePointerInfo(SrcSV));
1006 }
1007
LowerSETCC(SDValue Op,SelectionDAG & DAG) const1008 SDValue HexagonTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
1009 const SDLoc &dl(Op);
1010 SDValue LHS = Op.getOperand(0);
1011 SDValue RHS = Op.getOperand(1);
1012 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
1013 MVT ResTy = ty(Op);
1014 MVT OpTy = ty(LHS);
1015
1016 if (OpTy == MVT::v2i16 || OpTy == MVT::v4i8) {
1017 MVT ElemTy = OpTy.getVectorElementType();
1018 assert(ElemTy.isScalarInteger());
1019 MVT WideTy = MVT::getVectorVT(MVT::getIntegerVT(2*ElemTy.getSizeInBits()),
1020 OpTy.getVectorNumElements());
1021 return DAG.getSetCC(dl, ResTy,
1022 DAG.getSExtOrTrunc(LHS, SDLoc(LHS), WideTy),
1023 DAG.getSExtOrTrunc(RHS, SDLoc(RHS), WideTy), CC);
1024 }
1025
1026 // Treat all other vector types as legal.
1027 if (ResTy.isVector())
1028 return Op;
1029
1030 // Comparisons of short integers should use sign-extend, not zero-extend,
1031 // since we can represent small negative values in the compare instructions.
1032 // The LLVM default is to use zero-extend arbitrarily in these cases.
1033 auto isSExtFree = [this](SDValue N) {
1034 switch (N.getOpcode()) {
1035 case ISD::TRUNCATE: {
1036 // A sign-extend of a truncate of a sign-extend is free.
1037 SDValue Op = N.getOperand(0);
1038 if (Op.getOpcode() != ISD::AssertSext)
1039 return false;
1040 EVT OrigTy = cast<VTSDNode>(Op.getOperand(1))->getVT();
1041 unsigned ThisBW = ty(N).getSizeInBits();
1042 unsigned OrigBW = OrigTy.getSizeInBits();
1043 // The type that was sign-extended to get the AssertSext must be
1044 // narrower than the type of N (so that N has still the same value
1045 // as the original).
1046 return ThisBW >= OrigBW;
1047 }
1048 case ISD::LOAD:
1049 // We have sign-extended loads.
1050 return true;
1051 }
1052 return false;
1053 };
1054
1055 if (OpTy == MVT::i8 || OpTy == MVT::i16) {
1056 ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS);
1057 bool IsNegative = C && C->getAPIntValue().isNegative();
1058 if (IsNegative || isSExtFree(LHS) || isSExtFree(RHS))
1059 return DAG.getSetCC(dl, ResTy,
1060 DAG.getSExtOrTrunc(LHS, SDLoc(LHS), MVT::i32),
1061 DAG.getSExtOrTrunc(RHS, SDLoc(RHS), MVT::i32), CC);
1062 }
1063
1064 return SDValue();
1065 }
1066
1067 SDValue
LowerVSELECT(SDValue Op,SelectionDAG & DAG) const1068 HexagonTargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
1069 SDValue PredOp = Op.getOperand(0);
1070 SDValue Op1 = Op.getOperand(1), Op2 = Op.getOperand(2);
1071 MVT OpTy = ty(Op1);
1072 const SDLoc &dl(Op);
1073
1074 if (OpTy == MVT::v2i16 || OpTy == MVT::v4i8) {
1075 MVT ElemTy = OpTy.getVectorElementType();
1076 assert(ElemTy.isScalarInteger());
1077 MVT WideTy = MVT::getVectorVT(MVT::getIntegerVT(2*ElemTy.getSizeInBits()),
1078 OpTy.getVectorNumElements());
1079 // Generate (trunc (select (_, sext, sext))).
1080 return DAG.getSExtOrTrunc(
1081 DAG.getSelect(dl, WideTy, PredOp,
1082 DAG.getSExtOrTrunc(Op1, dl, WideTy),
1083 DAG.getSExtOrTrunc(Op2, dl, WideTy)),
1084 dl, OpTy);
1085 }
1086
1087 return SDValue();
1088 }
1089
1090 SDValue
LowerConstantPool(SDValue Op,SelectionDAG & DAG) const1091 HexagonTargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
1092 EVT ValTy = Op.getValueType();
1093 ConstantPoolSDNode *CPN = cast<ConstantPoolSDNode>(Op);
1094 Constant *CVal = nullptr;
1095 bool isVTi1Type = false;
1096 if (auto *CV = dyn_cast<ConstantVector>(CPN->getConstVal())) {
1097 if (cast<VectorType>(CV->getType())->getElementType()->isIntegerTy(1)) {
1098 IRBuilder<> IRB(CV->getContext());
1099 SmallVector<Constant*, 128> NewConst;
1100 unsigned VecLen = CV->getNumOperands();
1101 assert(isPowerOf2_32(VecLen) &&
1102 "conversion only supported for pow2 VectorSize");
1103 for (unsigned i = 0; i < VecLen; ++i)
1104 NewConst.push_back(IRB.getInt8(CV->getOperand(i)->isZeroValue()));
1105
1106 CVal = ConstantVector::get(NewConst);
1107 isVTi1Type = true;
1108 }
1109 }
1110 Align Alignment = CPN->getAlign();
1111 bool IsPositionIndependent = isPositionIndependent();
1112 unsigned char TF = IsPositionIndependent ? HexagonII::MO_PCREL : 0;
1113
1114 unsigned Offset = 0;
1115 SDValue T;
1116 if (CPN->isMachineConstantPoolEntry())
1117 T = DAG.getTargetConstantPool(CPN->getMachineCPVal(), ValTy, Alignment,
1118 Offset, TF);
1119 else if (isVTi1Type)
1120 T = DAG.getTargetConstantPool(CVal, ValTy, Alignment, Offset, TF);
1121 else
1122 T = DAG.getTargetConstantPool(CPN->getConstVal(), ValTy, Alignment, Offset,
1123 TF);
1124
1125 assert(cast<ConstantPoolSDNode>(T)->getTargetFlags() == TF &&
1126 "Inconsistent target flag encountered");
1127
1128 if (IsPositionIndependent)
1129 return DAG.getNode(HexagonISD::AT_PCREL, SDLoc(Op), ValTy, T);
1130 return DAG.getNode(HexagonISD::CP, SDLoc(Op), ValTy, T);
1131 }
1132
1133 SDValue
LowerJumpTable(SDValue Op,SelectionDAG & DAG) const1134 HexagonTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
1135 EVT VT = Op.getValueType();
1136 int Idx = cast<JumpTableSDNode>(Op)->getIndex();
1137 if (isPositionIndependent()) {
1138 SDValue T = DAG.getTargetJumpTable(Idx, VT, HexagonII::MO_PCREL);
1139 return DAG.getNode(HexagonISD::AT_PCREL, SDLoc(Op), VT, T);
1140 }
1141
1142 SDValue T = DAG.getTargetJumpTable(Idx, VT);
1143 return DAG.getNode(HexagonISD::JT, SDLoc(Op), VT, T);
1144 }
1145
1146 SDValue
LowerRETURNADDR(SDValue Op,SelectionDAG & DAG) const1147 HexagonTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {
1148 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
1149 MachineFunction &MF = DAG.getMachineFunction();
1150 MachineFrameInfo &MFI = MF.getFrameInfo();
1151 MFI.setReturnAddressIsTaken(true);
1152
1153 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
1154 return SDValue();
1155
1156 EVT VT = Op.getValueType();
1157 SDLoc dl(Op);
1158 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1159 if (Depth) {
1160 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
1161 SDValue Offset = DAG.getConstant(4, dl, MVT::i32);
1162 return DAG.getLoad(VT, dl, DAG.getEntryNode(),
1163 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
1164 MachinePointerInfo());
1165 }
1166
1167 // Return LR, which contains the return address. Mark it an implicit live-in.
1168 unsigned Reg = MF.addLiveIn(HRI.getRARegister(), getRegClassFor(MVT::i32));
1169 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
1170 }
1171
1172 SDValue
LowerFRAMEADDR(SDValue Op,SelectionDAG & DAG) const1173 HexagonTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
1174 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
1175 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
1176 MFI.setFrameAddressIsTaken(true);
1177
1178 EVT VT = Op.getValueType();
1179 SDLoc dl(Op);
1180 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1181 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl,
1182 HRI.getFrameRegister(), VT);
1183 while (Depth--)
1184 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
1185 MachinePointerInfo());
1186 return FrameAddr;
1187 }
1188
1189 SDValue
LowerATOMIC_FENCE(SDValue Op,SelectionDAG & DAG) const1190 HexagonTargetLowering::LowerATOMIC_FENCE(SDValue Op, SelectionDAG& DAG) const {
1191 SDLoc dl(Op);
1192 return DAG.getNode(HexagonISD::BARRIER, dl, MVT::Other, Op.getOperand(0));
1193 }
1194
1195 SDValue
LowerGLOBALADDRESS(SDValue Op,SelectionDAG & DAG) const1196 HexagonTargetLowering::LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const {
1197 SDLoc dl(Op);
1198 auto *GAN = cast<GlobalAddressSDNode>(Op);
1199 auto PtrVT = getPointerTy(DAG.getDataLayout());
1200 auto *GV = GAN->getGlobal();
1201 int64_t Offset = GAN->getOffset();
1202
1203 auto &HLOF = *HTM.getObjFileLowering();
1204 Reloc::Model RM = HTM.getRelocationModel();
1205
1206 if (RM == Reloc::Static) {
1207 SDValue GA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, Offset);
1208 const GlobalObject *GO = GV->getBaseObject();
1209 if (GO && Subtarget.useSmallData() && HLOF.isGlobalInSmallSection(GO, HTM))
1210 return DAG.getNode(HexagonISD::CONST32_GP, dl, PtrVT, GA);
1211 return DAG.getNode(HexagonISD::CONST32, dl, PtrVT, GA);
1212 }
1213
1214 bool UsePCRel = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
1215 if (UsePCRel) {
1216 SDValue GA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, Offset,
1217 HexagonII::MO_PCREL);
1218 return DAG.getNode(HexagonISD::AT_PCREL, dl, PtrVT, GA);
1219 }
1220
1221 // Use GOT index.
1222 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT);
1223 SDValue GA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, HexagonII::MO_GOT);
1224 SDValue Off = DAG.getConstant(Offset, dl, MVT::i32);
1225 return DAG.getNode(HexagonISD::AT_GOT, dl, PtrVT, GOT, GA, Off);
1226 }
1227
1228 // Specifies that for loads and stores VT can be promoted to PromotedLdStVT.
1229 SDValue
LowerBlockAddress(SDValue Op,SelectionDAG & DAG) const1230 HexagonTargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
1231 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
1232 SDLoc dl(Op);
1233 EVT PtrVT = getPointerTy(DAG.getDataLayout());
1234
1235 Reloc::Model RM = HTM.getRelocationModel();
1236 if (RM == Reloc::Static) {
1237 SDValue A = DAG.getTargetBlockAddress(BA, PtrVT);
1238 return DAG.getNode(HexagonISD::CONST32_GP, dl, PtrVT, A);
1239 }
1240
1241 SDValue A = DAG.getTargetBlockAddress(BA, PtrVT, 0, HexagonII::MO_PCREL);
1242 return DAG.getNode(HexagonISD::AT_PCREL, dl, PtrVT, A);
1243 }
1244
1245 SDValue
LowerGLOBAL_OFFSET_TABLE(SDValue Op,SelectionDAG & DAG) const1246 HexagonTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG)
1247 const {
1248 EVT PtrVT = getPointerTy(DAG.getDataLayout());
1249 SDValue GOTSym = DAG.getTargetExternalSymbol(HEXAGON_GOT_SYM_NAME, PtrVT,
1250 HexagonII::MO_PCREL);
1251 return DAG.getNode(HexagonISD::AT_PCREL, SDLoc(Op), PtrVT, GOTSym);
1252 }
1253
1254 SDValue
GetDynamicTLSAddr(SelectionDAG & DAG,SDValue Chain,GlobalAddressSDNode * GA,SDValue Glue,EVT PtrVT,unsigned ReturnReg,unsigned char OperandFlags) const1255 HexagonTargetLowering::GetDynamicTLSAddr(SelectionDAG &DAG, SDValue Chain,
1256 GlobalAddressSDNode *GA, SDValue Glue, EVT PtrVT, unsigned ReturnReg,
1257 unsigned char OperandFlags) const {
1258 MachineFunction &MF = DAG.getMachineFunction();
1259 MachineFrameInfo &MFI = MF.getFrameInfo();
1260 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1261 SDLoc dl(GA);
1262 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
1263 GA->getValueType(0),
1264 GA->getOffset(),
1265 OperandFlags);
1266 // Create Operands for the call.The Operands should have the following:
1267 // 1. Chain SDValue
1268 // 2. Callee which in this case is the Global address value.
1269 // 3. Registers live into the call.In this case its R0, as we
1270 // have just one argument to be passed.
1271 // 4. Glue.
1272 // Note: The order is important.
1273
1274 const auto &HRI = *Subtarget.getRegisterInfo();
1275 const uint32_t *Mask = HRI.getCallPreservedMask(MF, CallingConv::C);
1276 assert(Mask && "Missing call preserved mask for calling convention");
1277 SDValue Ops[] = { Chain, TGA, DAG.getRegister(Hexagon::R0, PtrVT),
1278 DAG.getRegisterMask(Mask), Glue };
1279 Chain = DAG.getNode(HexagonISD::CALL, dl, NodeTys, Ops);
1280
1281 // Inform MFI that function has calls.
1282 MFI.setAdjustsStack(true);
1283
1284 Glue = Chain.getValue(1);
1285 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Glue);
1286 }
1287
1288 //
1289 // Lower using the intial executable model for TLS addresses
1290 //
1291 SDValue
LowerToTLSInitialExecModel(GlobalAddressSDNode * GA,SelectionDAG & DAG) const1292 HexagonTargetLowering::LowerToTLSInitialExecModel(GlobalAddressSDNode *GA,
1293 SelectionDAG &DAG) const {
1294 SDLoc dl(GA);
1295 int64_t Offset = GA->getOffset();
1296 auto PtrVT = getPointerTy(DAG.getDataLayout());
1297
1298 // Get the thread pointer.
1299 SDValue TP = DAG.getCopyFromReg(DAG.getEntryNode(), dl, Hexagon::UGP, PtrVT);
1300
1301 bool IsPositionIndependent = isPositionIndependent();
1302 unsigned char TF =
1303 IsPositionIndependent ? HexagonII::MO_IEGOT : HexagonII::MO_IE;
1304
1305 // First generate the TLS symbol address
1306 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, PtrVT,
1307 Offset, TF);
1308
1309 SDValue Sym = DAG.getNode(HexagonISD::CONST32, dl, PtrVT, TGA);
1310
1311 if (IsPositionIndependent) {
1312 // Generate the GOT pointer in case of position independent code
1313 SDValue GOT = LowerGLOBAL_OFFSET_TABLE(Sym, DAG);
1314
1315 // Add the TLS Symbol address to GOT pointer.This gives
1316 // GOT relative relocation for the symbol.
1317 Sym = DAG.getNode(ISD::ADD, dl, PtrVT, GOT, Sym);
1318 }
1319
1320 // Load the offset value for TLS symbol.This offset is relative to
1321 // thread pointer.
1322 SDValue LoadOffset =
1323 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Sym, MachinePointerInfo());
1324
1325 // Address of the thread local variable is the add of thread
1326 // pointer and the offset of the variable.
1327 return DAG.getNode(ISD::ADD, dl, PtrVT, TP, LoadOffset);
1328 }
1329
1330 //
1331 // Lower using the local executable model for TLS addresses
1332 //
1333 SDValue
LowerToTLSLocalExecModel(GlobalAddressSDNode * GA,SelectionDAG & DAG) const1334 HexagonTargetLowering::LowerToTLSLocalExecModel(GlobalAddressSDNode *GA,
1335 SelectionDAG &DAG) const {
1336 SDLoc dl(GA);
1337 int64_t Offset = GA->getOffset();
1338 auto PtrVT = getPointerTy(DAG.getDataLayout());
1339
1340 // Get the thread pointer.
1341 SDValue TP = DAG.getCopyFromReg(DAG.getEntryNode(), dl, Hexagon::UGP, PtrVT);
1342 // Generate the TLS symbol address
1343 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, PtrVT, Offset,
1344 HexagonII::MO_TPREL);
1345 SDValue Sym = DAG.getNode(HexagonISD::CONST32, dl, PtrVT, TGA);
1346
1347 // Address of the thread local variable is the add of thread
1348 // pointer and the offset of the variable.
1349 return DAG.getNode(ISD::ADD, dl, PtrVT, TP, Sym);
1350 }
1351
1352 //
1353 // Lower using the general dynamic model for TLS addresses
1354 //
1355 SDValue
LowerToTLSGeneralDynamicModel(GlobalAddressSDNode * GA,SelectionDAG & DAG) const1356 HexagonTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
1357 SelectionDAG &DAG) const {
1358 SDLoc dl(GA);
1359 int64_t Offset = GA->getOffset();
1360 auto PtrVT = getPointerTy(DAG.getDataLayout());
1361
1362 // First generate the TLS symbol address
1363 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, PtrVT, Offset,
1364 HexagonII::MO_GDGOT);
1365
1366 // Then, generate the GOT pointer
1367 SDValue GOT = LowerGLOBAL_OFFSET_TABLE(TGA, DAG);
1368
1369 // Add the TLS symbol and the GOT pointer
1370 SDValue Sym = DAG.getNode(HexagonISD::CONST32, dl, PtrVT, TGA);
1371 SDValue Chain = DAG.getNode(ISD::ADD, dl, PtrVT, GOT, Sym);
1372
1373 // Copy over the argument to R0
1374 SDValue InFlag;
1375 Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, Hexagon::R0, Chain, InFlag);
1376 InFlag = Chain.getValue(1);
1377
1378 unsigned Flags =
1379 static_cast<const HexagonSubtarget &>(DAG.getSubtarget()).useLongCalls()
1380 ? HexagonII::MO_GDPLT | HexagonII::HMOTF_ConstExtended
1381 : HexagonII::MO_GDPLT;
1382
1383 return GetDynamicTLSAddr(DAG, Chain, GA, InFlag, PtrVT,
1384 Hexagon::R0, Flags);
1385 }
1386
1387 //
1388 // Lower TLS addresses.
1389 //
1390 // For now for dynamic models, we only support the general dynamic model.
1391 //
1392 SDValue
LowerGlobalTLSAddress(SDValue Op,SelectionDAG & DAG) const1393 HexagonTargetLowering::LowerGlobalTLSAddress(SDValue Op,
1394 SelectionDAG &DAG) const {
1395 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
1396
1397 switch (HTM.getTLSModel(GA->getGlobal())) {
1398 case TLSModel::GeneralDynamic:
1399 case TLSModel::LocalDynamic:
1400 return LowerToTLSGeneralDynamicModel(GA, DAG);
1401 case TLSModel::InitialExec:
1402 return LowerToTLSInitialExecModel(GA, DAG);
1403 case TLSModel::LocalExec:
1404 return LowerToTLSLocalExecModel(GA, DAG);
1405 }
1406 llvm_unreachable("Bogus TLS model");
1407 }
1408
1409 //===----------------------------------------------------------------------===//
1410 // TargetLowering Implementation
1411 //===----------------------------------------------------------------------===//
1412
HexagonTargetLowering(const TargetMachine & TM,const HexagonSubtarget & ST)1413 HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM,
1414 const HexagonSubtarget &ST)
1415 : TargetLowering(TM), HTM(static_cast<const HexagonTargetMachine&>(TM)),
1416 Subtarget(ST) {
1417 auto &HRI = *Subtarget.getRegisterInfo();
1418
1419 setPrefLoopAlignment(Align(16));
1420 setMinFunctionAlignment(Align(4));
1421 setPrefFunctionAlignment(Align(16));
1422 setStackPointerRegisterToSaveRestore(HRI.getStackRegister());
1423 setBooleanContents(TargetLoweringBase::UndefinedBooleanContent);
1424 setBooleanVectorContents(TargetLoweringBase::UndefinedBooleanContent);
1425
1426 setMaxAtomicSizeInBitsSupported(64);
1427 setMinCmpXchgSizeInBits(32);
1428
1429 if (EnableHexSDNodeSched)
1430 setSchedulingPreference(Sched::VLIW);
1431 else
1432 setSchedulingPreference(Sched::Source);
1433
1434 // Limits for inline expansion of memcpy/memmove
1435 MaxStoresPerMemcpy = MaxStoresPerMemcpyCL;
1436 MaxStoresPerMemcpyOptSize = MaxStoresPerMemcpyOptSizeCL;
1437 MaxStoresPerMemmove = MaxStoresPerMemmoveCL;
1438 MaxStoresPerMemmoveOptSize = MaxStoresPerMemmoveOptSizeCL;
1439 MaxStoresPerMemset = MaxStoresPerMemsetCL;
1440 MaxStoresPerMemsetOptSize = MaxStoresPerMemsetOptSizeCL;
1441
1442 //
1443 // Set up register classes.
1444 //
1445
1446 addRegisterClass(MVT::i1, &Hexagon::PredRegsRegClass);
1447 addRegisterClass(MVT::v2i1, &Hexagon::PredRegsRegClass); // bbbbaaaa
1448 addRegisterClass(MVT::v4i1, &Hexagon::PredRegsRegClass); // ddccbbaa
1449 addRegisterClass(MVT::v8i1, &Hexagon::PredRegsRegClass); // hgfedcba
1450 addRegisterClass(MVT::i32, &Hexagon::IntRegsRegClass);
1451 addRegisterClass(MVT::v2i16, &Hexagon::IntRegsRegClass);
1452 addRegisterClass(MVT::v4i8, &Hexagon::IntRegsRegClass);
1453 addRegisterClass(MVT::i64, &Hexagon::DoubleRegsRegClass);
1454 addRegisterClass(MVT::v8i8, &Hexagon::DoubleRegsRegClass);
1455 addRegisterClass(MVT::v4i16, &Hexagon::DoubleRegsRegClass);
1456 addRegisterClass(MVT::v2i32, &Hexagon::DoubleRegsRegClass);
1457
1458 addRegisterClass(MVT::f32, &Hexagon::IntRegsRegClass);
1459 addRegisterClass(MVT::f64, &Hexagon::DoubleRegsRegClass);
1460
1461 //
1462 // Handling of scalar operations.
1463 //
1464 // All operations default to "legal", except:
1465 // - indexed loads and stores (pre-/post-incremented),
1466 // - ANY_EXTEND_VECTOR_INREG, ATOMIC_CMP_SWAP_WITH_SUCCESS, CONCAT_VECTORS,
1467 // ConstantFP, DEBUGTRAP, FCEIL, FCOPYSIGN, FEXP, FEXP2, FFLOOR, FGETSIGN,
1468 // FLOG, FLOG2, FLOG10, FMAXNUM, FMINNUM, FNEARBYINT, FRINT, FROUND, TRAP,
1469 // FTRUNC, PREFETCH, SIGN_EXTEND_VECTOR_INREG, ZERO_EXTEND_VECTOR_INREG,
1470 // which default to "expand" for at least one type.
1471
1472 // Misc operations.
1473 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
1474 setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
1475 setOperationAction(ISD::TRAP, MVT::Other, Legal);
1476 setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
1477 setOperationAction(ISD::JumpTable, MVT::i32, Custom);
1478 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
1479 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
1480 setOperationAction(ISD::INLINEASM, MVT::Other, Custom);
1481 setOperationAction(ISD::INLINEASM_BR, MVT::Other, Custom);
1482 setOperationAction(ISD::PREFETCH, MVT::Other, Custom);
1483 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom);
1484 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
1485 setOperationAction(ISD::EH_RETURN, MVT::Other, Custom);
1486 setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom);
1487 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
1488 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
1489
1490 // Custom legalize GlobalAddress nodes into CONST32.
1491 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
1492 setOperationAction(ISD::GlobalAddress, MVT::i8, Custom);
1493 setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
1494
1495 // Hexagon needs to optimize cases with negative constants.
1496 setOperationAction(ISD::SETCC, MVT::i8, Custom);
1497 setOperationAction(ISD::SETCC, MVT::i16, Custom);
1498 setOperationAction(ISD::SETCC, MVT::v4i8, Custom);
1499 setOperationAction(ISD::SETCC, MVT::v2i16, Custom);
1500
1501 // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1502 setOperationAction(ISD::VASTART, MVT::Other, Custom);
1503 setOperationAction(ISD::VAEND, MVT::Other, Expand);
1504 setOperationAction(ISD::VAARG, MVT::Other, Expand);
1505 if (Subtarget.isEnvironmentMusl())
1506 setOperationAction(ISD::VACOPY, MVT::Other, Custom);
1507 else
1508 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
1509
1510 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
1511 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
1512 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
1513
1514 if (EmitJumpTables)
1515 setMinimumJumpTableEntries(MinimumJumpTables);
1516 else
1517 setMinimumJumpTableEntries(std::numeric_limits<unsigned>::max());
1518 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
1519
1520 for (unsigned LegalIntOp :
1521 {ISD::ABS, ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}) {
1522 setOperationAction(LegalIntOp, MVT::i32, Legal);
1523 setOperationAction(LegalIntOp, MVT::i64, Legal);
1524 }
1525
1526 // Hexagon has A4_addp_c and A4_subp_c that take and generate a carry bit,
1527 // but they only operate on i64.
1528 for (MVT VT : MVT::integer_valuetypes()) {
1529 setOperationAction(ISD::UADDO, VT, Custom);
1530 setOperationAction(ISD::USUBO, VT, Custom);
1531 setOperationAction(ISD::SADDO, VT, Expand);
1532 setOperationAction(ISD::SSUBO, VT, Expand);
1533 setOperationAction(ISD::ADDCARRY, VT, Expand);
1534 setOperationAction(ISD::SUBCARRY, VT, Expand);
1535 }
1536 setOperationAction(ISD::ADDCARRY, MVT::i64, Custom);
1537 setOperationAction(ISD::SUBCARRY, MVT::i64, Custom);
1538
1539 setOperationAction(ISD::CTLZ, MVT::i8, Promote);
1540 setOperationAction(ISD::CTLZ, MVT::i16, Promote);
1541 setOperationAction(ISD::CTTZ, MVT::i8, Promote);
1542 setOperationAction(ISD::CTTZ, MVT::i16, Promote);
1543
1544 // Popcount can count # of 1s in i64 but returns i32.
1545 setOperationAction(ISD::CTPOP, MVT::i8, Promote);
1546 setOperationAction(ISD::CTPOP, MVT::i16, Promote);
1547 setOperationAction(ISD::CTPOP, MVT::i32, Promote);
1548 setOperationAction(ISD::CTPOP, MVT::i64, Legal);
1549
1550 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
1551 setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
1552 setOperationAction(ISD::BSWAP, MVT::i32, Legal);
1553 setOperationAction(ISD::BSWAP, MVT::i64, Legal);
1554
1555 setOperationAction(ISD::FSHL, MVT::i32, Legal);
1556 setOperationAction(ISD::FSHL, MVT::i64, Legal);
1557 setOperationAction(ISD::FSHR, MVT::i32, Legal);
1558 setOperationAction(ISD::FSHR, MVT::i64, Legal);
1559
1560 for (unsigned IntExpOp :
1561 {ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM,
1562 ISD::SDIVREM, ISD::UDIVREM, ISD::ROTL, ISD::ROTR,
1563 ISD::SHL_PARTS, ISD::SRA_PARTS, ISD::SRL_PARTS,
1564 ISD::SMUL_LOHI, ISD::UMUL_LOHI}) {
1565 for (MVT VT : MVT::integer_valuetypes())
1566 setOperationAction(IntExpOp, VT, Expand);
1567 }
1568
1569 for (unsigned FPExpOp :
1570 {ISD::FDIV, ISD::FREM, ISD::FSQRT, ISD::FSIN, ISD::FCOS, ISD::FSINCOS,
1571 ISD::FPOW, ISD::FCOPYSIGN}) {
1572 for (MVT VT : MVT::fp_valuetypes())
1573 setOperationAction(FPExpOp, VT, Expand);
1574 }
1575
1576 // No extending loads from i32.
1577 for (MVT VT : MVT::integer_valuetypes()) {
1578 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i32, Expand);
1579 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand);
1580 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i32, Expand);
1581 }
1582 // Turn FP truncstore into trunc + store.
1583 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
1584 // Turn FP extload into load/fpextend.
1585 for (MVT VT : MVT::fp_valuetypes())
1586 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
1587
1588 // Expand BR_CC and SELECT_CC for all integer and fp types.
1589 for (MVT VT : MVT::integer_valuetypes()) {
1590 setOperationAction(ISD::BR_CC, VT, Expand);
1591 setOperationAction(ISD::SELECT_CC, VT, Expand);
1592 }
1593 for (MVT VT : MVT::fp_valuetypes()) {
1594 setOperationAction(ISD::BR_CC, VT, Expand);
1595 setOperationAction(ISD::SELECT_CC, VT, Expand);
1596 }
1597 setOperationAction(ISD::BR_CC, MVT::Other, Expand);
1598
1599 //
1600 // Handling of vector operations.
1601 //
1602
1603 // Set the action for vector operations to "expand", then override it with
1604 // either "custom" or "legal" for specific cases.
1605 static const unsigned VectExpOps[] = {
1606 // Integer arithmetic:
1607 ISD::ADD, ISD::SUB, ISD::MUL, ISD::SDIV, ISD::UDIV,
1608 ISD::SREM, ISD::UREM, ISD::SDIVREM, ISD::UDIVREM, ISD::SADDO,
1609 ISD::UADDO, ISD::SSUBO, ISD::USUBO, ISD::SMUL_LOHI, ISD::UMUL_LOHI,
1610 // Logical/bit:
1611 ISD::AND, ISD::OR, ISD::XOR, ISD::ROTL, ISD::ROTR,
1612 ISD::CTPOP, ISD::CTLZ, ISD::CTTZ,
1613 // Floating point arithmetic/math functions:
1614 ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FMA, ISD::FDIV,
1615 ISD::FREM, ISD::FNEG, ISD::FABS, ISD::FSQRT, ISD::FSIN,
1616 ISD::FCOS, ISD::FPOW, ISD::FLOG, ISD::FLOG2,
1617 ISD::FLOG10, ISD::FEXP, ISD::FEXP2, ISD::FCEIL, ISD::FTRUNC,
1618 ISD::FRINT, ISD::FNEARBYINT, ISD::FROUND, ISD::FFLOOR,
1619 ISD::FMINNUM, ISD::FMAXNUM, ISD::FSINCOS,
1620 // Misc:
1621 ISD::BR_CC, ISD::SELECT_CC, ISD::ConstantPool,
1622 // Vector:
1623 ISD::BUILD_VECTOR, ISD::SCALAR_TO_VECTOR,
1624 ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT,
1625 ISD::EXTRACT_SUBVECTOR, ISD::INSERT_SUBVECTOR,
1626 ISD::CONCAT_VECTORS, ISD::VECTOR_SHUFFLE,
1627 ISD::SPLAT_VECTOR,
1628 };
1629
1630 for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
1631 for (unsigned VectExpOp : VectExpOps)
1632 setOperationAction(VectExpOp, VT, Expand);
1633
1634 // Expand all extending loads and truncating stores:
1635 for (MVT TargetVT : MVT::fixedlen_vector_valuetypes()) {
1636 if (TargetVT == VT)
1637 continue;
1638 setLoadExtAction(ISD::EXTLOAD, TargetVT, VT, Expand);
1639 setLoadExtAction(ISD::ZEXTLOAD, TargetVT, VT, Expand);
1640 setLoadExtAction(ISD::SEXTLOAD, TargetVT, VT, Expand);
1641 setTruncStoreAction(VT, TargetVT, Expand);
1642 }
1643
1644 // Normalize all inputs to SELECT to be vectors of i32.
1645 if (VT.getVectorElementType() != MVT::i32) {
1646 MVT VT32 = MVT::getVectorVT(MVT::i32, VT.getSizeInBits()/32);
1647 setOperationAction(ISD::SELECT, VT, Promote);
1648 AddPromotedToType(ISD::SELECT, VT, VT32);
1649 }
1650 setOperationAction(ISD::SRA, VT, Custom);
1651 setOperationAction(ISD::SHL, VT, Custom);
1652 setOperationAction(ISD::SRL, VT, Custom);
1653 }
1654
1655 // Extending loads from (native) vectors of i8 into (native) vectors of i16
1656 // are legal.
1657 setLoadExtAction(ISD::EXTLOAD, MVT::v2i16, MVT::v2i8, Legal);
1658 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i16, MVT::v2i8, Legal);
1659 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i16, MVT::v2i8, Legal);
1660 setLoadExtAction(ISD::EXTLOAD, MVT::v4i16, MVT::v4i8, Legal);
1661 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i16, MVT::v4i8, Legal);
1662 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i16, MVT::v4i8, Legal);
1663
1664 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Legal);
1665 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Legal);
1666 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal);
1667
1668 // Types natively supported:
1669 for (MVT NativeVT : {MVT::v8i1, MVT::v4i1, MVT::v2i1, MVT::v4i8,
1670 MVT::v8i8, MVT::v2i16, MVT::v4i16, MVT::v2i32}) {
1671 setOperationAction(ISD::BUILD_VECTOR, NativeVT, Custom);
1672 setOperationAction(ISD::EXTRACT_VECTOR_ELT, NativeVT, Custom);
1673 setOperationAction(ISD::INSERT_VECTOR_ELT, NativeVT, Custom);
1674 setOperationAction(ISD::EXTRACT_SUBVECTOR, NativeVT, Custom);
1675 setOperationAction(ISD::INSERT_SUBVECTOR, NativeVT, Custom);
1676 setOperationAction(ISD::CONCAT_VECTORS, NativeVT, Custom);
1677
1678 setOperationAction(ISD::ADD, NativeVT, Legal);
1679 setOperationAction(ISD::SUB, NativeVT, Legal);
1680 setOperationAction(ISD::MUL, NativeVT, Legal);
1681 setOperationAction(ISD::AND, NativeVT, Legal);
1682 setOperationAction(ISD::OR, NativeVT, Legal);
1683 setOperationAction(ISD::XOR, NativeVT, Legal);
1684
1685 if (NativeVT.getVectorElementType() != MVT::i1)
1686 setOperationAction(ISD::SPLAT_VECTOR, NativeVT, Legal);
1687 }
1688
1689 for (MVT VT : {MVT::v8i8, MVT::v4i16, MVT::v2i32}) {
1690 setOperationAction(ISD::SMIN, VT, Legal);
1691 setOperationAction(ISD::SMAX, VT, Legal);
1692 setOperationAction(ISD::UMIN, VT, Legal);
1693 setOperationAction(ISD::UMAX, VT, Legal);
1694 }
1695
1696 // Custom lower unaligned loads.
1697 // Also, for both loads and stores, verify the alignment of the address
1698 // in case it is a compile-time constant. This is a usability feature to
1699 // provide a meaningful error message to users.
1700 for (MVT VT : {MVT::i16, MVT::i32, MVT::v4i8, MVT::i64, MVT::v8i8,
1701 MVT::v2i16, MVT::v4i16, MVT::v2i32}) {
1702 setOperationAction(ISD::LOAD, VT, Custom);
1703 setOperationAction(ISD::STORE, VT, Custom);
1704 }
1705
1706 for (MVT VT : {MVT::v2i16, MVT::v4i8, MVT::v8i8, MVT::v2i32, MVT::v4i16,
1707 MVT::v2i32}) {
1708 setCondCodeAction(ISD::SETNE, VT, Expand);
1709 setCondCodeAction(ISD::SETLE, VT, Expand);
1710 setCondCodeAction(ISD::SETGE, VT, Expand);
1711 setCondCodeAction(ISD::SETLT, VT, Expand);
1712 setCondCodeAction(ISD::SETULE, VT, Expand);
1713 setCondCodeAction(ISD::SETUGE, VT, Expand);
1714 setCondCodeAction(ISD::SETULT, VT, Expand);
1715 }
1716
1717 // Custom-lower bitcasts from i8 to v8i1.
1718 setOperationAction(ISD::BITCAST, MVT::i8, Custom);
1719 setOperationAction(ISD::SETCC, MVT::v2i16, Custom);
1720 setOperationAction(ISD::VSELECT, MVT::v4i8, Custom);
1721 setOperationAction(ISD::VSELECT, MVT::v2i16, Custom);
1722 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i8, Custom);
1723 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom);
1724 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom);
1725
1726 // V5+.
1727 setOperationAction(ISD::FMA, MVT::f64, Expand);
1728 setOperationAction(ISD::FADD, MVT::f64, Expand);
1729 setOperationAction(ISD::FSUB, MVT::f64, Expand);
1730 setOperationAction(ISD::FMUL, MVT::f64, Expand);
1731
1732 setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
1733 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
1734
1735 setOperationAction(ISD::FP_TO_UINT, MVT::i1, Promote);
1736 setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote);
1737 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
1738 setOperationAction(ISD::FP_TO_SINT, MVT::i1, Promote);
1739 setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote);
1740 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
1741 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote);
1742 setOperationAction(ISD::UINT_TO_FP, MVT::i8, Promote);
1743 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
1744 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
1745 setOperationAction(ISD::SINT_TO_FP, MVT::i8, Promote);
1746 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
1747
1748 // Handling of indexed loads/stores: default is "expand".
1749 //
1750 for (MVT VT : {MVT::i8, MVT::i16, MVT::i32, MVT::i64, MVT::f32, MVT::f64,
1751 MVT::v2i16, MVT::v2i32, MVT::v4i8, MVT::v4i16, MVT::v8i8}) {
1752 setIndexedLoadAction(ISD::POST_INC, VT, Legal);
1753 setIndexedStoreAction(ISD::POST_INC, VT, Legal);
1754 }
1755
1756 // Subtarget-specific operation actions.
1757 //
1758 if (Subtarget.hasV60Ops()) {
1759 setOperationAction(ISD::ROTL, MVT::i32, Legal);
1760 setOperationAction(ISD::ROTL, MVT::i64, Legal);
1761 setOperationAction(ISD::ROTR, MVT::i32, Legal);
1762 setOperationAction(ISD::ROTR, MVT::i64, Legal);
1763 }
1764 if (Subtarget.hasV66Ops()) {
1765 setOperationAction(ISD::FADD, MVT::f64, Legal);
1766 setOperationAction(ISD::FSUB, MVT::f64, Legal);
1767 }
1768 if (Subtarget.hasV67Ops()) {
1769 setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
1770 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
1771 setOperationAction(ISD::FMUL, MVT::f64, Legal);
1772 }
1773
1774 setTargetDAGCombine(ISD::VSELECT);
1775
1776 if (Subtarget.useHVXOps())
1777 initializeHVXLowering();
1778
1779 computeRegisterProperties(&HRI);
1780
1781 //
1782 // Library calls for unsupported operations
1783 //
1784 bool FastMath = EnableFastMath;
1785
1786 setLibcallName(RTLIB::SDIV_I32, "__hexagon_divsi3");
1787 setLibcallName(RTLIB::SDIV_I64, "__hexagon_divdi3");
1788 setLibcallName(RTLIB::UDIV_I32, "__hexagon_udivsi3");
1789 setLibcallName(RTLIB::UDIV_I64, "__hexagon_udivdi3");
1790 setLibcallName(RTLIB::SREM_I32, "__hexagon_modsi3");
1791 setLibcallName(RTLIB::SREM_I64, "__hexagon_moddi3");
1792 setLibcallName(RTLIB::UREM_I32, "__hexagon_umodsi3");
1793 setLibcallName(RTLIB::UREM_I64, "__hexagon_umoddi3");
1794
1795 setLibcallName(RTLIB::SINTTOFP_I128_F64, "__hexagon_floattidf");
1796 setLibcallName(RTLIB::SINTTOFP_I128_F32, "__hexagon_floattisf");
1797 setLibcallName(RTLIB::FPTOUINT_F32_I128, "__hexagon_fixunssfti");
1798 setLibcallName(RTLIB::FPTOUINT_F64_I128, "__hexagon_fixunsdfti");
1799 setLibcallName(RTLIB::FPTOSINT_F32_I128, "__hexagon_fixsfti");
1800 setLibcallName(RTLIB::FPTOSINT_F64_I128, "__hexagon_fixdfti");
1801
1802 // This is the only fast library function for sqrtd.
1803 if (FastMath)
1804 setLibcallName(RTLIB::SQRT_F64, "__hexagon_fast2_sqrtdf2");
1805
1806 // Prefix is: nothing for "slow-math",
1807 // "fast2_" for V5+ fast-math double-precision
1808 // (actually, keep fast-math and fast-math2 separate for now)
1809 if (FastMath) {
1810 setLibcallName(RTLIB::ADD_F64, "__hexagon_fast_adddf3");
1811 setLibcallName(RTLIB::SUB_F64, "__hexagon_fast_subdf3");
1812 setLibcallName(RTLIB::MUL_F64, "__hexagon_fast_muldf3");
1813 setLibcallName(RTLIB::DIV_F64, "__hexagon_fast_divdf3");
1814 setLibcallName(RTLIB::DIV_F32, "__hexagon_fast_divsf3");
1815 } else {
1816 setLibcallName(RTLIB::ADD_F64, "__hexagon_adddf3");
1817 setLibcallName(RTLIB::SUB_F64, "__hexagon_subdf3");
1818 setLibcallName(RTLIB::MUL_F64, "__hexagon_muldf3");
1819 setLibcallName(RTLIB::DIV_F64, "__hexagon_divdf3");
1820 setLibcallName(RTLIB::DIV_F32, "__hexagon_divsf3");
1821 }
1822
1823 if (FastMath)
1824 setLibcallName(RTLIB::SQRT_F32, "__hexagon_fast2_sqrtf");
1825 else
1826 setLibcallName(RTLIB::SQRT_F32, "__hexagon_sqrtf");
1827
1828 // These cause problems when the shift amount is non-constant.
1829 setLibcallName(RTLIB::SHL_I128, nullptr);
1830 setLibcallName(RTLIB::SRL_I128, nullptr);
1831 setLibcallName(RTLIB::SRA_I128, nullptr);
1832 }
1833
getTargetNodeName(unsigned Opcode) const1834 const char* HexagonTargetLowering::getTargetNodeName(unsigned Opcode) const {
1835 switch ((HexagonISD::NodeType)Opcode) {
1836 case HexagonISD::ADDC: return "HexagonISD::ADDC";
1837 case HexagonISD::SUBC: return "HexagonISD::SUBC";
1838 case HexagonISD::ALLOCA: return "HexagonISD::ALLOCA";
1839 case HexagonISD::AT_GOT: return "HexagonISD::AT_GOT";
1840 case HexagonISD::AT_PCREL: return "HexagonISD::AT_PCREL";
1841 case HexagonISD::BARRIER: return "HexagonISD::BARRIER";
1842 case HexagonISD::CALL: return "HexagonISD::CALL";
1843 case HexagonISD::CALLnr: return "HexagonISD::CALLnr";
1844 case HexagonISD::CALLR: return "HexagonISD::CALLR";
1845 case HexagonISD::COMBINE: return "HexagonISD::COMBINE";
1846 case HexagonISD::CONST32_GP: return "HexagonISD::CONST32_GP";
1847 case HexagonISD::CONST32: return "HexagonISD::CONST32";
1848 case HexagonISD::CP: return "HexagonISD::CP";
1849 case HexagonISD::DCFETCH: return "HexagonISD::DCFETCH";
1850 case HexagonISD::EH_RETURN: return "HexagonISD::EH_RETURN";
1851 case HexagonISD::TSTBIT: return "HexagonISD::TSTBIT";
1852 case HexagonISD::EXTRACTU: return "HexagonISD::EXTRACTU";
1853 case HexagonISD::INSERT: return "HexagonISD::INSERT";
1854 case HexagonISD::JT: return "HexagonISD::JT";
1855 case HexagonISD::RET_FLAG: return "HexagonISD::RET_FLAG";
1856 case HexagonISD::TC_RETURN: return "HexagonISD::TC_RETURN";
1857 case HexagonISD::VASL: return "HexagonISD::VASL";
1858 case HexagonISD::VASR: return "HexagonISD::VASR";
1859 case HexagonISD::VLSR: return "HexagonISD::VLSR";
1860 case HexagonISD::VEXTRACTW: return "HexagonISD::VEXTRACTW";
1861 case HexagonISD::VINSERTW0: return "HexagonISD::VINSERTW0";
1862 case HexagonISD::VROR: return "HexagonISD::VROR";
1863 case HexagonISD::READCYCLE: return "HexagonISD::READCYCLE";
1864 case HexagonISD::PTRUE: return "HexagonISD::PTRUE";
1865 case HexagonISD::PFALSE: return "HexagonISD::PFALSE";
1866 case HexagonISD::D2P: return "HexagonISD::D2P";
1867 case HexagonISD::P2D: return "HexagonISD::P2D";
1868 case HexagonISD::V2Q: return "HexagonISD::V2Q";
1869 case HexagonISD::Q2V: return "HexagonISD::Q2V";
1870 case HexagonISD::QCAT: return "HexagonISD::QCAT";
1871 case HexagonISD::QTRUE: return "HexagonISD::QTRUE";
1872 case HexagonISD::QFALSE: return "HexagonISD::QFALSE";
1873 case HexagonISD::TYPECAST: return "HexagonISD::TYPECAST";
1874 case HexagonISD::VALIGN: return "HexagonISD::VALIGN";
1875 case HexagonISD::VALIGNADDR: return "HexagonISD::VALIGNADDR";
1876 case HexagonISD::VPACKL: return "HexagonISD::VPACKL";
1877 case HexagonISD::VUNPACK: return "HexagonISD::VUNPACK";
1878 case HexagonISD::VUNPACKU: return "HexagonISD::VUNPACKU";
1879 case HexagonISD::ISEL: return "HexagonISD::ISEL";
1880 case HexagonISD::OP_END: break;
1881 }
1882 return nullptr;
1883 }
1884
1885 void
validateConstPtrAlignment(SDValue Ptr,const SDLoc & dl,unsigned NeedAlign) const1886 HexagonTargetLowering::validateConstPtrAlignment(SDValue Ptr, const SDLoc &dl,
1887 unsigned NeedAlign) const {
1888 auto *CA = dyn_cast<ConstantSDNode>(Ptr);
1889 if (!CA)
1890 return;
1891 unsigned Addr = CA->getZExtValue();
1892 unsigned HaveAlign = Addr != 0 ? 1u << countTrailingZeros(Addr) : NeedAlign;
1893 if (HaveAlign < NeedAlign) {
1894 std::string ErrMsg;
1895 raw_string_ostream O(ErrMsg);
1896 O << "Misaligned constant address: " << format_hex(Addr, 10)
1897 << " has alignment " << HaveAlign
1898 << ", but the memory access requires " << NeedAlign;
1899 if (DebugLoc DL = dl.getDebugLoc())
1900 DL.print(O << ", at ");
1901 report_fatal_error(O.str());
1902 }
1903 }
1904
1905 // Bit-reverse Load Intrinsic: Check if the instruction is a bit reverse load
1906 // intrinsic.
isBrevLdIntrinsic(const Value * Inst)1907 static bool isBrevLdIntrinsic(const Value *Inst) {
1908 unsigned ID = cast<IntrinsicInst>(Inst)->getIntrinsicID();
1909 return (ID == Intrinsic::hexagon_L2_loadrd_pbr ||
1910 ID == Intrinsic::hexagon_L2_loadri_pbr ||
1911 ID == Intrinsic::hexagon_L2_loadrh_pbr ||
1912 ID == Intrinsic::hexagon_L2_loadruh_pbr ||
1913 ID == Intrinsic::hexagon_L2_loadrb_pbr ||
1914 ID == Intrinsic::hexagon_L2_loadrub_pbr);
1915 }
1916
1917 // Bit-reverse Load Intrinsic :Crawl up and figure out the object from previous
1918 // instruction. So far we only handle bitcast, extract value and bit reverse
1919 // load intrinsic instructions. Should we handle CGEP ?
getBrevLdObject(Value * V)1920 static Value *getBrevLdObject(Value *V) {
1921 if (Operator::getOpcode(V) == Instruction::ExtractValue ||
1922 Operator::getOpcode(V) == Instruction::BitCast)
1923 V = cast<Operator>(V)->getOperand(0);
1924 else if (isa<IntrinsicInst>(V) && isBrevLdIntrinsic(V))
1925 V = cast<Instruction>(V)->getOperand(0);
1926 return V;
1927 }
1928
1929 // Bit-reverse Load Intrinsic: For a PHI Node return either an incoming edge or
1930 // a back edge. If the back edge comes from the intrinsic itself, the incoming
1931 // edge is returned.
returnEdge(const PHINode * PN,Value * IntrBaseVal)1932 static Value *returnEdge(const PHINode *PN, Value *IntrBaseVal) {
1933 const BasicBlock *Parent = PN->getParent();
1934 int Idx = -1;
1935 for (unsigned i = 0, e = PN->getNumIncomingValues(); i < e; ++i) {
1936 BasicBlock *Blk = PN->getIncomingBlock(i);
1937 // Determine if the back edge is originated from intrinsic.
1938 if (Blk == Parent) {
1939 Value *BackEdgeVal = PN->getIncomingValue(i);
1940 Value *BaseVal;
1941 // Loop over till we return the same Value or we hit the IntrBaseVal.
1942 do {
1943 BaseVal = BackEdgeVal;
1944 BackEdgeVal = getBrevLdObject(BackEdgeVal);
1945 } while ((BaseVal != BackEdgeVal) && (IntrBaseVal != BackEdgeVal));
1946 // If the getBrevLdObject returns IntrBaseVal, we should return the
1947 // incoming edge.
1948 if (IntrBaseVal == BackEdgeVal)
1949 continue;
1950 Idx = i;
1951 break;
1952 } else // Set the node to incoming edge.
1953 Idx = i;
1954 }
1955 assert(Idx >= 0 && "Unexpected index to incoming argument in PHI");
1956 return PN->getIncomingValue(Idx);
1957 }
1958
1959 // Bit-reverse Load Intrinsic: Figure out the underlying object the base
1960 // pointer points to, for the bit-reverse load intrinsic. Setting this to
1961 // memoperand might help alias analysis to figure out the dependencies.
getUnderLyingObjectForBrevLdIntr(Value * V)1962 static Value *getUnderLyingObjectForBrevLdIntr(Value *V) {
1963 Value *IntrBaseVal = V;
1964 Value *BaseVal;
1965 // Loop over till we return the same Value, implies we either figure out
1966 // the object or we hit a PHI
1967 do {
1968 BaseVal = V;
1969 V = getBrevLdObject(V);
1970 } while (BaseVal != V);
1971
1972 // Identify the object from PHINode.
1973 if (const PHINode *PN = dyn_cast<PHINode>(V))
1974 return returnEdge(PN, IntrBaseVal);
1975 // For non PHI nodes, the object is the last value returned by getBrevLdObject
1976 else
1977 return V;
1978 }
1979
1980 /// Given an intrinsic, checks if on the target the intrinsic will need to map
1981 /// to a MemIntrinsicNode (touches memory). If this is the case, it returns
1982 /// true and store the intrinsic information into the IntrinsicInfo that was
1983 /// passed to the function.
getTgtMemIntrinsic(IntrinsicInfo & Info,const CallInst & I,MachineFunction & MF,unsigned Intrinsic) const1984 bool HexagonTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
1985 const CallInst &I,
1986 MachineFunction &MF,
1987 unsigned Intrinsic) const {
1988 switch (Intrinsic) {
1989 case Intrinsic::hexagon_L2_loadrd_pbr:
1990 case Intrinsic::hexagon_L2_loadri_pbr:
1991 case Intrinsic::hexagon_L2_loadrh_pbr:
1992 case Intrinsic::hexagon_L2_loadruh_pbr:
1993 case Intrinsic::hexagon_L2_loadrb_pbr:
1994 case Intrinsic::hexagon_L2_loadrub_pbr: {
1995 Info.opc = ISD::INTRINSIC_W_CHAIN;
1996 auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
1997 auto &Cont = I.getCalledFunction()->getParent()->getContext();
1998 // The intrinsic function call is of the form { ElTy, i8* }
1999 // @llvm.hexagon.L2.loadXX.pbr(i8*, i32). The pointer and memory access type
2000 // should be derived from ElTy.
2001 Type *ElTy = I.getCalledFunction()->getReturnType()->getStructElementType(0);
2002 Info.memVT = MVT::getVT(ElTy);
2003 llvm::Value *BasePtrVal = I.getOperand(0);
2004 Info.ptrVal = getUnderLyingObjectForBrevLdIntr(BasePtrVal);
2005 // The offset value comes through Modifier register. For now, assume the
2006 // offset is 0.
2007 Info.offset = 0;
2008 Info.align = DL.getABITypeAlign(Info.memVT.getTypeForEVT(Cont));
2009 Info.flags = MachineMemOperand::MOLoad;
2010 return true;
2011 }
2012 case Intrinsic::hexagon_V6_vgathermw:
2013 case Intrinsic::hexagon_V6_vgathermw_128B:
2014 case Intrinsic::hexagon_V6_vgathermh:
2015 case Intrinsic::hexagon_V6_vgathermh_128B:
2016 case Intrinsic::hexagon_V6_vgathermhw:
2017 case Intrinsic::hexagon_V6_vgathermhw_128B:
2018 case Intrinsic::hexagon_V6_vgathermwq:
2019 case Intrinsic::hexagon_V6_vgathermwq_128B:
2020 case Intrinsic::hexagon_V6_vgathermhq:
2021 case Intrinsic::hexagon_V6_vgathermhq_128B:
2022 case Intrinsic::hexagon_V6_vgathermhwq:
2023 case Intrinsic::hexagon_V6_vgathermhwq_128B: {
2024 const Module &M = *I.getParent()->getParent()->getParent();
2025 Info.opc = ISD::INTRINSIC_W_CHAIN;
2026 Type *VecTy = I.getArgOperand(1)->getType();
2027 Info.memVT = MVT::getVT(VecTy);
2028 Info.ptrVal = I.getArgOperand(0);
2029 Info.offset = 0;
2030 Info.align =
2031 MaybeAlign(M.getDataLayout().getTypeAllocSizeInBits(VecTy) / 8);
2032 Info.flags = MachineMemOperand::MOLoad |
2033 MachineMemOperand::MOStore |
2034 MachineMemOperand::MOVolatile;
2035 return true;
2036 }
2037 default:
2038 break;
2039 }
2040 return false;
2041 }
2042
hasBitTest(SDValue X,SDValue Y) const2043 bool HexagonTargetLowering::hasBitTest(SDValue X, SDValue Y) const {
2044 return X.getValueType().isScalarInteger(); // 'tstbit'
2045 }
2046
isTruncateFree(Type * Ty1,Type * Ty2) const2047 bool HexagonTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
2048 return isTruncateFree(EVT::getEVT(Ty1), EVT::getEVT(Ty2));
2049 }
2050
isTruncateFree(EVT VT1,EVT VT2) const2051 bool HexagonTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
2052 if (!VT1.isSimple() || !VT2.isSimple())
2053 return false;
2054 return VT1.getSimpleVT() == MVT::i64 && VT2.getSimpleVT() == MVT::i32;
2055 }
2056
isFMAFasterThanFMulAndFAdd(const MachineFunction & MF,EVT VT) const2057 bool HexagonTargetLowering::isFMAFasterThanFMulAndFAdd(
2058 const MachineFunction &MF, EVT VT) const {
2059 return isOperationLegalOrCustom(ISD::FMA, VT);
2060 }
2061
2062 // Should we expand the build vector with shuffles?
shouldExpandBuildVectorWithShuffles(EVT VT,unsigned DefinedValues) const2063 bool HexagonTargetLowering::shouldExpandBuildVectorWithShuffles(EVT VT,
2064 unsigned DefinedValues) const {
2065 return false;
2066 }
2067
isShuffleMaskLegal(ArrayRef<int> Mask,EVT VT) const2068 bool HexagonTargetLowering::isShuffleMaskLegal(ArrayRef<int> Mask,
2069 EVT VT) const {
2070 return true;
2071 }
2072
2073 TargetLoweringBase::LegalizeTypeAction
getPreferredVectorAction(MVT VT) const2074 HexagonTargetLowering::getPreferredVectorAction(MVT VT) const {
2075 unsigned VecLen = VT.getVectorNumElements();
2076 MVT ElemTy = VT.getVectorElementType();
2077
2078 if (VecLen == 1 || VT.isScalableVector())
2079 return TargetLoweringBase::TypeScalarizeVector;
2080
2081 if (Subtarget.useHVXOps()) {
2082 unsigned Action = getPreferredHvxVectorAction(VT);
2083 if (Action != ~0u)
2084 return static_cast<TargetLoweringBase::LegalizeTypeAction>(Action);
2085 }
2086
2087 // Always widen (remaining) vectors of i1.
2088 if (ElemTy == MVT::i1)
2089 return TargetLoweringBase::TypeWidenVector;
2090
2091 return TargetLoweringBase::TypeSplitVector;
2092 }
2093
2094 std::pair<SDValue, int>
getBaseAndOffset(SDValue Addr) const2095 HexagonTargetLowering::getBaseAndOffset(SDValue Addr) const {
2096 if (Addr.getOpcode() == ISD::ADD) {
2097 SDValue Op1 = Addr.getOperand(1);
2098 if (auto *CN = dyn_cast<const ConstantSDNode>(Op1.getNode()))
2099 return { Addr.getOperand(0), CN->getSExtValue() };
2100 }
2101 return { Addr, 0 };
2102 }
2103
2104 // Lower a vector shuffle (V1, V2, V3). V1 and V2 are the two vectors
2105 // to select data from, V3 is the permutation.
2106 SDValue
LowerVECTOR_SHUFFLE(SDValue Op,SelectionDAG & DAG) const2107 HexagonTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG)
2108 const {
2109 const auto *SVN = cast<ShuffleVectorSDNode>(Op);
2110 ArrayRef<int> AM = SVN->getMask();
2111 assert(AM.size() <= 8 && "Unexpected shuffle mask");
2112 unsigned VecLen = AM.size();
2113
2114 MVT VecTy = ty(Op);
2115 assert(!Subtarget.isHVXVectorType(VecTy, true) &&
2116 "HVX shuffles should be legal");
2117 assert(VecTy.getSizeInBits() <= 64 && "Unexpected vector length");
2118
2119 SDValue Op0 = Op.getOperand(0);
2120 SDValue Op1 = Op.getOperand(1);
2121 const SDLoc &dl(Op);
2122
2123 // If the inputs are not the same as the output, bail. This is not an
2124 // error situation, but complicates the handling and the default expansion
2125 // (into BUILD_VECTOR) should be adequate.
2126 if (ty(Op0) != VecTy || ty(Op1) != VecTy)
2127 return SDValue();
2128
2129 // Normalize the mask so that the first non-negative index comes from
2130 // the first operand.
2131 SmallVector<int,8> Mask(AM.begin(), AM.end());
2132 unsigned F = llvm::find_if(AM, [](int M) { return M >= 0; }) - AM.data();
2133 if (F == AM.size())
2134 return DAG.getUNDEF(VecTy);
2135 if (AM[F] >= int(VecLen)) {
2136 ShuffleVectorSDNode::commuteMask(Mask);
2137 std::swap(Op0, Op1);
2138 }
2139
2140 // Express the shuffle mask in terms of bytes.
2141 SmallVector<int,8> ByteMask;
2142 unsigned ElemBytes = VecTy.getVectorElementType().getSizeInBits() / 8;
2143 for (unsigned i = 0, e = Mask.size(); i != e; ++i) {
2144 int M = Mask[i];
2145 if (M < 0) {
2146 for (unsigned j = 0; j != ElemBytes; ++j)
2147 ByteMask.push_back(-1);
2148 } else {
2149 for (unsigned j = 0; j != ElemBytes; ++j)
2150 ByteMask.push_back(M*ElemBytes + j);
2151 }
2152 }
2153 assert(ByteMask.size() <= 8);
2154
2155 // All non-undef (non-negative) indexes are well within [0..127], so they
2156 // fit in a single byte. Build two 64-bit words:
2157 // - MaskIdx where each byte is the corresponding index (for non-negative
2158 // indexes), and 0xFF for negative indexes, and
2159 // - MaskUnd that has 0xFF for each negative index.
2160 uint64_t MaskIdx = 0;
2161 uint64_t MaskUnd = 0;
2162 for (unsigned i = 0, e = ByteMask.size(); i != e; ++i) {
2163 unsigned S = 8*i;
2164 uint64_t M = ByteMask[i] & 0xFF;
2165 if (M == 0xFF)
2166 MaskUnd |= M << S;
2167 MaskIdx |= M << S;
2168 }
2169
2170 if (ByteMask.size() == 4) {
2171 // Identity.
2172 if (MaskIdx == (0x03020100 | MaskUnd))
2173 return Op0;
2174 // Byte swap.
2175 if (MaskIdx == (0x00010203 | MaskUnd)) {
2176 SDValue T0 = DAG.getBitcast(MVT::i32, Op0);
2177 SDValue T1 = DAG.getNode(ISD::BSWAP, dl, MVT::i32, T0);
2178 return DAG.getBitcast(VecTy, T1);
2179 }
2180
2181 // Byte packs.
2182 SDValue Concat10 = DAG.getNode(HexagonISD::COMBINE, dl,
2183 typeJoin({ty(Op1), ty(Op0)}), {Op1, Op0});
2184 if (MaskIdx == (0x06040200 | MaskUnd))
2185 return getInstr(Hexagon::S2_vtrunehb, dl, VecTy, {Concat10}, DAG);
2186 if (MaskIdx == (0x07050301 | MaskUnd))
2187 return getInstr(Hexagon::S2_vtrunohb, dl, VecTy, {Concat10}, DAG);
2188
2189 SDValue Concat01 = DAG.getNode(HexagonISD::COMBINE, dl,
2190 typeJoin({ty(Op0), ty(Op1)}), {Op0, Op1});
2191 if (MaskIdx == (0x02000604 | MaskUnd))
2192 return getInstr(Hexagon::S2_vtrunehb, dl, VecTy, {Concat01}, DAG);
2193 if (MaskIdx == (0x03010705 | MaskUnd))
2194 return getInstr(Hexagon::S2_vtrunohb, dl, VecTy, {Concat01}, DAG);
2195 }
2196
2197 if (ByteMask.size() == 8) {
2198 // Identity.
2199 if (MaskIdx == (0x0706050403020100ull | MaskUnd))
2200 return Op0;
2201 // Byte swap.
2202 if (MaskIdx == (0x0001020304050607ull | MaskUnd)) {
2203 SDValue T0 = DAG.getBitcast(MVT::i64, Op0);
2204 SDValue T1 = DAG.getNode(ISD::BSWAP, dl, MVT::i64, T0);
2205 return DAG.getBitcast(VecTy, T1);
2206 }
2207
2208 // Halfword picks.
2209 if (MaskIdx == (0x0d0c050409080100ull | MaskUnd))
2210 return getInstr(Hexagon::S2_shuffeh, dl, VecTy, {Op1, Op0}, DAG);
2211 if (MaskIdx == (0x0f0e07060b0a0302ull | MaskUnd))
2212 return getInstr(Hexagon::S2_shuffoh, dl, VecTy, {Op1, Op0}, DAG);
2213 if (MaskIdx == (0x0d0c090805040100ull | MaskUnd))
2214 return getInstr(Hexagon::S2_vtrunewh, dl, VecTy, {Op1, Op0}, DAG);
2215 if (MaskIdx == (0x0f0e0b0a07060302ull | MaskUnd))
2216 return getInstr(Hexagon::S2_vtrunowh, dl, VecTy, {Op1, Op0}, DAG);
2217 if (MaskIdx == (0x0706030205040100ull | MaskUnd)) {
2218 VectorPair P = opSplit(Op0, dl, DAG);
2219 return getInstr(Hexagon::S2_packhl, dl, VecTy, {P.second, P.first}, DAG);
2220 }
2221
2222 // Byte packs.
2223 if (MaskIdx == (0x0e060c040a020800ull | MaskUnd))
2224 return getInstr(Hexagon::S2_shuffeb, dl, VecTy, {Op1, Op0}, DAG);
2225 if (MaskIdx == (0x0f070d050b030901ull | MaskUnd))
2226 return getInstr(Hexagon::S2_shuffob, dl, VecTy, {Op1, Op0}, DAG);
2227 }
2228
2229 return SDValue();
2230 }
2231
2232 // Create a Hexagon-specific node for shifting a vector by an integer.
2233 SDValue
getVectorShiftByInt(SDValue Op,SelectionDAG & DAG) const2234 HexagonTargetLowering::getVectorShiftByInt(SDValue Op, SelectionDAG &DAG)
2235 const {
2236 unsigned NewOpc;
2237 switch (Op.getOpcode()) {
2238 case ISD::SHL:
2239 NewOpc = HexagonISD::VASL;
2240 break;
2241 case ISD::SRA:
2242 NewOpc = HexagonISD::VASR;
2243 break;
2244 case ISD::SRL:
2245 NewOpc = HexagonISD::VLSR;
2246 break;
2247 default:
2248 llvm_unreachable("Unexpected shift opcode");
2249 }
2250
2251 SDValue Op0 = Op.getOperand(0);
2252 SDValue Op1 = Op.getOperand(1);
2253 const SDLoc &dl(Op);
2254
2255 switch (Op1.getOpcode()) {
2256 case ISD::BUILD_VECTOR:
2257 if (SDValue S = cast<BuildVectorSDNode>(Op1)->getSplatValue())
2258 return DAG.getNode(NewOpc, dl, ty(Op), Op0, S);
2259 break;
2260 case ISD::SPLAT_VECTOR:
2261 return DAG.getNode(NewOpc, dl, ty(Op), Op0, Op1.getOperand(0));
2262 }
2263 return SDValue();
2264 }
2265
2266 SDValue
LowerVECTOR_SHIFT(SDValue Op,SelectionDAG & DAG) const2267 HexagonTargetLowering::LowerVECTOR_SHIFT(SDValue Op, SelectionDAG &DAG) const {
2268 return getVectorShiftByInt(Op, DAG);
2269 }
2270
2271 SDValue
LowerROTL(SDValue Op,SelectionDAG & DAG) const2272 HexagonTargetLowering::LowerROTL(SDValue Op, SelectionDAG &DAG) const {
2273 if (isa<ConstantSDNode>(Op.getOperand(1).getNode()))
2274 return Op;
2275 return SDValue();
2276 }
2277
2278 SDValue
LowerBITCAST(SDValue Op,SelectionDAG & DAG) const2279 HexagonTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const {
2280 MVT ResTy = ty(Op);
2281 SDValue InpV = Op.getOperand(0);
2282 MVT InpTy = ty(InpV);
2283 assert(ResTy.getSizeInBits() == InpTy.getSizeInBits());
2284 const SDLoc &dl(Op);
2285
2286 // Handle conversion from i8 to v8i1.
2287 if (InpTy == MVT::i8) {
2288 if (ResTy == MVT::v8i1) {
2289 SDValue Sc = DAG.getBitcast(tyScalar(InpTy), InpV);
2290 SDValue Ext = DAG.getZExtOrTrunc(Sc, dl, MVT::i32);
2291 return getInstr(Hexagon::C2_tfrrp, dl, ResTy, Ext, DAG);
2292 }
2293 return SDValue();
2294 }
2295
2296 return Op;
2297 }
2298
2299 bool
getBuildVectorConstInts(ArrayRef<SDValue> Values,MVT VecTy,SelectionDAG & DAG,MutableArrayRef<ConstantInt * > Consts) const2300 HexagonTargetLowering::getBuildVectorConstInts(ArrayRef<SDValue> Values,
2301 MVT VecTy, SelectionDAG &DAG,
2302 MutableArrayRef<ConstantInt*> Consts) const {
2303 MVT ElemTy = VecTy.getVectorElementType();
2304 unsigned ElemWidth = ElemTy.getSizeInBits();
2305 IntegerType *IntTy = IntegerType::get(*DAG.getContext(), ElemWidth);
2306 bool AllConst = true;
2307
2308 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
2309 SDValue V = Values[i];
2310 if (V.isUndef()) {
2311 Consts[i] = ConstantInt::get(IntTy, 0);
2312 continue;
2313 }
2314 // Make sure to always cast to IntTy.
2315 if (auto *CN = dyn_cast<ConstantSDNode>(V.getNode())) {
2316 const ConstantInt *CI = CN->getConstantIntValue();
2317 Consts[i] = ConstantInt::get(IntTy, CI->getValue().getSExtValue());
2318 } else if (auto *CN = dyn_cast<ConstantFPSDNode>(V.getNode())) {
2319 const ConstantFP *CF = CN->getConstantFPValue();
2320 APInt A = CF->getValueAPF().bitcastToAPInt();
2321 Consts[i] = ConstantInt::get(IntTy, A.getZExtValue());
2322 } else {
2323 AllConst = false;
2324 }
2325 }
2326 return AllConst;
2327 }
2328
2329 SDValue
buildVector32(ArrayRef<SDValue> Elem,const SDLoc & dl,MVT VecTy,SelectionDAG & DAG) const2330 HexagonTargetLowering::buildVector32(ArrayRef<SDValue> Elem, const SDLoc &dl,
2331 MVT VecTy, SelectionDAG &DAG) const {
2332 MVT ElemTy = VecTy.getVectorElementType();
2333 assert(VecTy.getVectorNumElements() == Elem.size());
2334
2335 SmallVector<ConstantInt*,4> Consts(Elem.size());
2336 bool AllConst = getBuildVectorConstInts(Elem, VecTy, DAG, Consts);
2337
2338 unsigned First, Num = Elem.size();
2339 for (First = 0; First != Num; ++First) {
2340 if (!isUndef(Elem[First]))
2341 break;
2342 }
2343 if (First == Num)
2344 return DAG.getUNDEF(VecTy);
2345
2346 if (AllConst &&
2347 llvm::all_of(Consts, [](ConstantInt *CI) { return CI->isZero(); }))
2348 return getZero(dl, VecTy, DAG);
2349
2350 if (ElemTy == MVT::i16) {
2351 assert(Elem.size() == 2);
2352 if (AllConst) {
2353 uint32_t V = (Consts[0]->getZExtValue() & 0xFFFF) |
2354 Consts[1]->getZExtValue() << 16;
2355 return DAG.getBitcast(MVT::v2i16, DAG.getConstant(V, dl, MVT::i32));
2356 }
2357 SDValue N = getInstr(Hexagon::A2_combine_ll, dl, MVT::i32,
2358 {Elem[1], Elem[0]}, DAG);
2359 return DAG.getBitcast(MVT::v2i16, N);
2360 }
2361
2362 if (ElemTy == MVT::i8) {
2363 // First try generating a constant.
2364 if (AllConst) {
2365 int32_t V = (Consts[0]->getZExtValue() & 0xFF) |
2366 (Consts[1]->getZExtValue() & 0xFF) << 8 |
2367 (Consts[1]->getZExtValue() & 0xFF) << 16 |
2368 Consts[2]->getZExtValue() << 24;
2369 return DAG.getBitcast(MVT::v4i8, DAG.getConstant(V, dl, MVT::i32));
2370 }
2371
2372 // Then try splat.
2373 bool IsSplat = true;
2374 for (unsigned i = First+1; i != Num; ++i) {
2375 if (Elem[i] == Elem[First] || isUndef(Elem[i]))
2376 continue;
2377 IsSplat = false;
2378 break;
2379 }
2380 if (IsSplat) {
2381 // Legalize the operand of SPLAT_VECTOR.
2382 SDValue Ext = DAG.getZExtOrTrunc(Elem[First], dl, MVT::i32);
2383 return DAG.getNode(ISD::SPLAT_VECTOR, dl, VecTy, Ext);
2384 }
2385
2386 // Generate
2387 // (zxtb(Elem[0]) | (zxtb(Elem[1]) << 8)) |
2388 // (zxtb(Elem[2]) | (zxtb(Elem[3]) << 8)) << 16
2389 assert(Elem.size() == 4);
2390 SDValue Vs[4];
2391 for (unsigned i = 0; i != 4; ++i) {
2392 Vs[i] = DAG.getZExtOrTrunc(Elem[i], dl, MVT::i32);
2393 Vs[i] = DAG.getZeroExtendInReg(Vs[i], dl, MVT::i8);
2394 }
2395 SDValue S8 = DAG.getConstant(8, dl, MVT::i32);
2396 SDValue T0 = DAG.getNode(ISD::SHL, dl, MVT::i32, {Vs[1], S8});
2397 SDValue T1 = DAG.getNode(ISD::SHL, dl, MVT::i32, {Vs[3], S8});
2398 SDValue B0 = DAG.getNode(ISD::OR, dl, MVT::i32, {Vs[0], T0});
2399 SDValue B1 = DAG.getNode(ISD::OR, dl, MVT::i32, {Vs[2], T1});
2400
2401 SDValue R = getInstr(Hexagon::A2_combine_ll, dl, MVT::i32, {B1, B0}, DAG);
2402 return DAG.getBitcast(MVT::v4i8, R);
2403 }
2404
2405 #ifndef NDEBUG
2406 dbgs() << "VecTy: " << EVT(VecTy).getEVTString() << '\n';
2407 #endif
2408 llvm_unreachable("Unexpected vector element type");
2409 }
2410
2411 SDValue
buildVector64(ArrayRef<SDValue> Elem,const SDLoc & dl,MVT VecTy,SelectionDAG & DAG) const2412 HexagonTargetLowering::buildVector64(ArrayRef<SDValue> Elem, const SDLoc &dl,
2413 MVT VecTy, SelectionDAG &DAG) const {
2414 MVT ElemTy = VecTy.getVectorElementType();
2415 assert(VecTy.getVectorNumElements() == Elem.size());
2416
2417 SmallVector<ConstantInt*,8> Consts(Elem.size());
2418 bool AllConst = getBuildVectorConstInts(Elem, VecTy, DAG, Consts);
2419
2420 unsigned First, Num = Elem.size();
2421 for (First = 0; First != Num; ++First) {
2422 if (!isUndef(Elem[First]))
2423 break;
2424 }
2425 if (First == Num)
2426 return DAG.getUNDEF(VecTy);
2427
2428 if (AllConst &&
2429 llvm::all_of(Consts, [](ConstantInt *CI) { return CI->isZero(); }))
2430 return getZero(dl, VecTy, DAG);
2431
2432 // First try splat if possible.
2433 if (ElemTy == MVT::i16) {
2434 bool IsSplat = true;
2435 for (unsigned i = First+1; i != Num; ++i) {
2436 if (Elem[i] == Elem[First] || isUndef(Elem[i]))
2437 continue;
2438 IsSplat = false;
2439 break;
2440 }
2441 if (IsSplat) {
2442 // Legalize the operand of SPLAT_VECTOR
2443 SDValue Ext = DAG.getZExtOrTrunc(Elem[First], dl, MVT::i32);
2444 return DAG.getNode(ISD::SPLAT_VECTOR, dl, VecTy, Ext);
2445 }
2446 }
2447
2448 // Then try constant.
2449 if (AllConst) {
2450 uint64_t Val = 0;
2451 unsigned W = ElemTy.getSizeInBits();
2452 uint64_t Mask = (ElemTy == MVT::i8) ? 0xFFull
2453 : (ElemTy == MVT::i16) ? 0xFFFFull : 0xFFFFFFFFull;
2454 for (unsigned i = 0; i != Num; ++i)
2455 Val = (Val << W) | (Consts[Num-1-i]->getZExtValue() & Mask);
2456 SDValue V0 = DAG.getConstant(Val, dl, MVT::i64);
2457 return DAG.getBitcast(VecTy, V0);
2458 }
2459
2460 // Build two 32-bit vectors and concatenate.
2461 MVT HalfTy = MVT::getVectorVT(ElemTy, Num/2);
2462 SDValue L = (ElemTy == MVT::i32)
2463 ? Elem[0]
2464 : buildVector32(Elem.take_front(Num/2), dl, HalfTy, DAG);
2465 SDValue H = (ElemTy == MVT::i32)
2466 ? Elem[1]
2467 : buildVector32(Elem.drop_front(Num/2), dl, HalfTy, DAG);
2468 return DAG.getNode(HexagonISD::COMBINE, dl, VecTy, {H, L});
2469 }
2470
2471 SDValue
extractVector(SDValue VecV,SDValue IdxV,const SDLoc & dl,MVT ValTy,MVT ResTy,SelectionDAG & DAG) const2472 HexagonTargetLowering::extractVector(SDValue VecV, SDValue IdxV,
2473 const SDLoc &dl, MVT ValTy, MVT ResTy,
2474 SelectionDAG &DAG) const {
2475 MVT VecTy = ty(VecV);
2476 assert(!ValTy.isVector() ||
2477 VecTy.getVectorElementType() == ValTy.getVectorElementType());
2478 unsigned VecWidth = VecTy.getSizeInBits();
2479 unsigned ValWidth = ValTy.getSizeInBits();
2480 unsigned ElemWidth = VecTy.getVectorElementType().getSizeInBits();
2481 assert((VecWidth % ElemWidth) == 0);
2482 auto *IdxN = dyn_cast<ConstantSDNode>(IdxV);
2483
2484 // Special case for v{8,4,2}i1 (the only boolean vectors legal in Hexagon
2485 // without any coprocessors).
2486 if (ElemWidth == 1) {
2487 assert(VecWidth == VecTy.getVectorNumElements() && "Sanity failure");
2488 assert(VecWidth == 8 || VecWidth == 4 || VecWidth == 2);
2489 // Check if this is an extract of the lowest bit.
2490 if (IdxN) {
2491 // Extracting the lowest bit is a no-op, but it changes the type,
2492 // so it must be kept as an operation to avoid errors related to
2493 // type mismatches.
2494 if (IdxN->isNullValue() && ValTy.getSizeInBits() == 1)
2495 return DAG.getNode(HexagonISD::TYPECAST, dl, MVT::i1, VecV);
2496 }
2497
2498 // If the value extracted is a single bit, use tstbit.
2499 if (ValWidth == 1) {
2500 SDValue A0 = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32, {VecV}, DAG);
2501 SDValue M0 = DAG.getConstant(8 / VecWidth, dl, MVT::i32);
2502 SDValue I0 = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV, M0);
2503 return DAG.getNode(HexagonISD::TSTBIT, dl, MVT::i1, A0, I0);
2504 }
2505
2506 // Each bool vector (v2i1, v4i1, v8i1) always occupies 8 bits in
2507 // a predicate register. The elements of the vector are repeated
2508 // in the register (if necessary) so that the total number is 8.
2509 // The extracted subvector will need to be expanded in such a way.
2510 unsigned Scale = VecWidth / ValWidth;
2511
2512 // Generate (p2d VecV) >> 8*Idx to move the interesting bytes to
2513 // position 0.
2514 assert(ty(IdxV) == MVT::i32);
2515 unsigned VecRep = 8 / VecWidth;
2516 SDValue S0 = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV,
2517 DAG.getConstant(8*VecRep, dl, MVT::i32));
2518 SDValue T0 = DAG.getNode(HexagonISD::P2D, dl, MVT::i64, VecV);
2519 SDValue T1 = DAG.getNode(ISD::SRL, dl, MVT::i64, T0, S0);
2520 while (Scale > 1) {
2521 // The longest possible subvector is at most 32 bits, so it is always
2522 // contained in the low subregister.
2523 T1 = DAG.getTargetExtractSubreg(Hexagon::isub_lo, dl, MVT::i32, T1);
2524 T1 = expandPredicate(T1, dl, DAG);
2525 Scale /= 2;
2526 }
2527
2528 return DAG.getNode(HexagonISD::D2P, dl, ResTy, T1);
2529 }
2530
2531 assert(VecWidth == 32 || VecWidth == 64);
2532
2533 // Cast everything to scalar integer types.
2534 MVT ScalarTy = tyScalar(VecTy);
2535 VecV = DAG.getBitcast(ScalarTy, VecV);
2536
2537 SDValue WidthV = DAG.getConstant(ValWidth, dl, MVT::i32);
2538 SDValue ExtV;
2539
2540 if (IdxN) {
2541 unsigned Off = IdxN->getZExtValue() * ElemWidth;
2542 if (VecWidth == 64 && ValWidth == 32) {
2543 assert(Off == 0 || Off == 32);
2544 unsigned SubIdx = Off == 0 ? Hexagon::isub_lo : Hexagon::isub_hi;
2545 ExtV = DAG.getTargetExtractSubreg(SubIdx, dl, MVT::i32, VecV);
2546 } else if (Off == 0 && (ValWidth % 8) == 0) {
2547 ExtV = DAG.getZeroExtendInReg(VecV, dl, tyScalar(ValTy));
2548 } else {
2549 SDValue OffV = DAG.getConstant(Off, dl, MVT::i32);
2550 // The return type of EXTRACTU must be the same as the type of the
2551 // input vector.
2552 ExtV = DAG.getNode(HexagonISD::EXTRACTU, dl, ScalarTy,
2553 {VecV, WidthV, OffV});
2554 }
2555 } else {
2556 if (ty(IdxV) != MVT::i32)
2557 IdxV = DAG.getZExtOrTrunc(IdxV, dl, MVT::i32);
2558 SDValue OffV = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV,
2559 DAG.getConstant(ElemWidth, dl, MVT::i32));
2560 ExtV = DAG.getNode(HexagonISD::EXTRACTU, dl, ScalarTy,
2561 {VecV, WidthV, OffV});
2562 }
2563
2564 // Cast ExtV to the requested result type.
2565 ExtV = DAG.getZExtOrTrunc(ExtV, dl, tyScalar(ResTy));
2566 ExtV = DAG.getBitcast(ResTy, ExtV);
2567 return ExtV;
2568 }
2569
2570 SDValue
insertVector(SDValue VecV,SDValue ValV,SDValue IdxV,const SDLoc & dl,MVT ValTy,SelectionDAG & DAG) const2571 HexagonTargetLowering::insertVector(SDValue VecV, SDValue ValV, SDValue IdxV,
2572 const SDLoc &dl, MVT ValTy,
2573 SelectionDAG &DAG) const {
2574 MVT VecTy = ty(VecV);
2575 if (VecTy.getVectorElementType() == MVT::i1) {
2576 MVT ValTy = ty(ValV);
2577 assert(ValTy.getVectorElementType() == MVT::i1);
2578 SDValue ValR = DAG.getNode(HexagonISD::P2D, dl, MVT::i64, ValV);
2579 unsigned VecLen = VecTy.getVectorNumElements();
2580 unsigned Scale = VecLen / ValTy.getVectorNumElements();
2581 assert(Scale > 1);
2582
2583 for (unsigned R = Scale; R > 1; R /= 2) {
2584 ValR = contractPredicate(ValR, dl, DAG);
2585 ValR = DAG.getNode(HexagonISD::COMBINE, dl, MVT::i64,
2586 DAG.getUNDEF(MVT::i32), ValR);
2587 }
2588 // The longest possible subvector is at most 32 bits, so it is always
2589 // contained in the low subregister.
2590 ValR = DAG.getTargetExtractSubreg(Hexagon::isub_lo, dl, MVT::i32, ValR);
2591
2592 unsigned ValBytes = 64 / Scale;
2593 SDValue Width = DAG.getConstant(ValBytes*8, dl, MVT::i32);
2594 SDValue Idx = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV,
2595 DAG.getConstant(8, dl, MVT::i32));
2596 SDValue VecR = DAG.getNode(HexagonISD::P2D, dl, MVT::i64, VecV);
2597 SDValue Ins = DAG.getNode(HexagonISD::INSERT, dl, MVT::i32,
2598 {VecR, ValR, Width, Idx});
2599 return DAG.getNode(HexagonISD::D2P, dl, VecTy, Ins);
2600 }
2601
2602 unsigned VecWidth = VecTy.getSizeInBits();
2603 unsigned ValWidth = ValTy.getSizeInBits();
2604 assert(VecWidth == 32 || VecWidth == 64);
2605 assert((VecWidth % ValWidth) == 0);
2606
2607 // Cast everything to scalar integer types.
2608 MVT ScalarTy = MVT::getIntegerVT(VecWidth);
2609 // The actual type of ValV may be different than ValTy (which is related
2610 // to the vector type).
2611 unsigned VW = ty(ValV).getSizeInBits();
2612 ValV = DAG.getBitcast(MVT::getIntegerVT(VW), ValV);
2613 VecV = DAG.getBitcast(ScalarTy, VecV);
2614 if (VW != VecWidth)
2615 ValV = DAG.getAnyExtOrTrunc(ValV, dl, ScalarTy);
2616
2617 SDValue WidthV = DAG.getConstant(ValWidth, dl, MVT::i32);
2618 SDValue InsV;
2619
2620 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(IdxV)) {
2621 unsigned W = C->getZExtValue() * ValWidth;
2622 SDValue OffV = DAG.getConstant(W, dl, MVT::i32);
2623 InsV = DAG.getNode(HexagonISD::INSERT, dl, ScalarTy,
2624 {VecV, ValV, WidthV, OffV});
2625 } else {
2626 if (ty(IdxV) != MVT::i32)
2627 IdxV = DAG.getZExtOrTrunc(IdxV, dl, MVT::i32);
2628 SDValue OffV = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV, WidthV);
2629 InsV = DAG.getNode(HexagonISD::INSERT, dl, ScalarTy,
2630 {VecV, ValV, WidthV, OffV});
2631 }
2632
2633 return DAG.getNode(ISD::BITCAST, dl, VecTy, InsV);
2634 }
2635
2636 SDValue
expandPredicate(SDValue Vec32,const SDLoc & dl,SelectionDAG & DAG) const2637 HexagonTargetLowering::expandPredicate(SDValue Vec32, const SDLoc &dl,
2638 SelectionDAG &DAG) const {
2639 assert(ty(Vec32).getSizeInBits() == 32);
2640 if (isUndef(Vec32))
2641 return DAG.getUNDEF(MVT::i64);
2642 return getInstr(Hexagon::S2_vsxtbh, dl, MVT::i64, {Vec32}, DAG);
2643 }
2644
2645 SDValue
contractPredicate(SDValue Vec64,const SDLoc & dl,SelectionDAG & DAG) const2646 HexagonTargetLowering::contractPredicate(SDValue Vec64, const SDLoc &dl,
2647 SelectionDAG &DAG) const {
2648 assert(ty(Vec64).getSizeInBits() == 64);
2649 if (isUndef(Vec64))
2650 return DAG.getUNDEF(MVT::i32);
2651 return getInstr(Hexagon::S2_vtrunehb, dl, MVT::i32, {Vec64}, DAG);
2652 }
2653
2654 SDValue
getZero(const SDLoc & dl,MVT Ty,SelectionDAG & DAG) const2655 HexagonTargetLowering::getZero(const SDLoc &dl, MVT Ty, SelectionDAG &DAG)
2656 const {
2657 if (Ty.isVector()) {
2658 assert(Ty.isInteger() && "Only integer vectors are supported here");
2659 unsigned W = Ty.getSizeInBits();
2660 if (W <= 64)
2661 return DAG.getBitcast(Ty, DAG.getConstant(0, dl, MVT::getIntegerVT(W)));
2662 return DAG.getNode(ISD::SPLAT_VECTOR, dl, Ty, getZero(dl, MVT::i32, DAG));
2663 }
2664
2665 if (Ty.isInteger())
2666 return DAG.getConstant(0, dl, Ty);
2667 if (Ty.isFloatingPoint())
2668 return DAG.getConstantFP(0.0, dl, Ty);
2669 llvm_unreachable("Invalid type for zero");
2670 }
2671
2672 SDValue
appendUndef(SDValue Val,MVT ResTy,SelectionDAG & DAG) const2673 HexagonTargetLowering::appendUndef(SDValue Val, MVT ResTy, SelectionDAG &DAG)
2674 const {
2675 MVT ValTy = ty(Val);
2676 assert(ValTy.getVectorElementType() == ResTy.getVectorElementType());
2677
2678 unsigned ValLen = ValTy.getVectorNumElements();
2679 unsigned ResLen = ResTy.getVectorNumElements();
2680 if (ValLen == ResLen)
2681 return Val;
2682
2683 const SDLoc &dl(Val);
2684 assert(ValLen < ResLen);
2685 assert(ResLen % ValLen == 0);
2686
2687 SmallVector<SDValue, 4> Concats = {Val};
2688 for (unsigned i = 1, e = ResLen / ValLen; i < e; ++i)
2689 Concats.push_back(DAG.getUNDEF(ValTy));
2690
2691 return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResTy, Concats);
2692 }
2693
2694 SDValue
LowerBUILD_VECTOR(SDValue Op,SelectionDAG & DAG) const2695 HexagonTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
2696 MVT VecTy = ty(Op);
2697 unsigned BW = VecTy.getSizeInBits();
2698 const SDLoc &dl(Op);
2699 SmallVector<SDValue,8> Ops;
2700 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i)
2701 Ops.push_back(Op.getOperand(i));
2702
2703 if (BW == 32)
2704 return buildVector32(Ops, dl, VecTy, DAG);
2705 if (BW == 64)
2706 return buildVector64(Ops, dl, VecTy, DAG);
2707
2708 if (VecTy == MVT::v8i1 || VecTy == MVT::v4i1 || VecTy == MVT::v2i1) {
2709 // Check if this is a special case or all-0 or all-1.
2710 bool All0 = true, All1 = true;
2711 for (SDValue P : Ops) {
2712 auto *CN = dyn_cast<ConstantSDNode>(P.getNode());
2713 if (CN == nullptr) {
2714 All0 = All1 = false;
2715 break;
2716 }
2717 uint32_t C = CN->getZExtValue();
2718 All0 &= (C == 0);
2719 All1 &= (C == 1);
2720 }
2721 if (All0)
2722 return DAG.getNode(HexagonISD::PFALSE, dl, VecTy);
2723 if (All1)
2724 return DAG.getNode(HexagonISD::PTRUE, dl, VecTy);
2725
2726 // For each i1 element in the resulting predicate register, put 1
2727 // shifted by the index of the element into a general-purpose register,
2728 // then or them together and transfer it back into a predicate register.
2729 SDValue Rs[8];
2730 SDValue Z = getZero(dl, MVT::i32, DAG);
2731 // Always produce 8 bits, repeat inputs if necessary.
2732 unsigned Rep = 8 / VecTy.getVectorNumElements();
2733 for (unsigned i = 0; i != 8; ++i) {
2734 SDValue S = DAG.getConstant(1ull << i, dl, MVT::i32);
2735 Rs[i] = DAG.getSelect(dl, MVT::i32, Ops[i/Rep], S, Z);
2736 }
2737 for (ArrayRef<SDValue> A(Rs); A.size() != 1; A = A.drop_back(A.size()/2)) {
2738 for (unsigned i = 0, e = A.size()/2; i != e; ++i)
2739 Rs[i] = DAG.getNode(ISD::OR, dl, MVT::i32, Rs[2*i], Rs[2*i+1]);
2740 }
2741 // Move the value directly to a predicate register.
2742 return getInstr(Hexagon::C2_tfrrp, dl, VecTy, {Rs[0]}, DAG);
2743 }
2744
2745 return SDValue();
2746 }
2747
2748 SDValue
LowerCONCAT_VECTORS(SDValue Op,SelectionDAG & DAG) const2749 HexagonTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
2750 SelectionDAG &DAG) const {
2751 MVT VecTy = ty(Op);
2752 const SDLoc &dl(Op);
2753 if (VecTy.getSizeInBits() == 64) {
2754 assert(Op.getNumOperands() == 2);
2755 return DAG.getNode(HexagonISD::COMBINE, dl, VecTy, Op.getOperand(1),
2756 Op.getOperand(0));
2757 }
2758
2759 MVT ElemTy = VecTy.getVectorElementType();
2760 if (ElemTy == MVT::i1) {
2761 assert(VecTy == MVT::v2i1 || VecTy == MVT::v4i1 || VecTy == MVT::v8i1);
2762 MVT OpTy = ty(Op.getOperand(0));
2763 // Scale is how many times the operands need to be contracted to match
2764 // the representation in the target register.
2765 unsigned Scale = VecTy.getVectorNumElements() / OpTy.getVectorNumElements();
2766 assert(Scale == Op.getNumOperands() && Scale > 1);
2767
2768 // First, convert all bool vectors to integers, then generate pairwise
2769 // inserts to form values of doubled length. Up until there are only
2770 // two values left to concatenate, all of these values will fit in a
2771 // 32-bit integer, so keep them as i32 to use 32-bit inserts.
2772 SmallVector<SDValue,4> Words[2];
2773 unsigned IdxW = 0;
2774
2775 for (SDValue P : Op.getNode()->op_values()) {
2776 SDValue W = DAG.getNode(HexagonISD::P2D, dl, MVT::i64, P);
2777 for (unsigned R = Scale; R > 1; R /= 2) {
2778 W = contractPredicate(W, dl, DAG);
2779 W = DAG.getNode(HexagonISD::COMBINE, dl, MVT::i64,
2780 DAG.getUNDEF(MVT::i32), W);
2781 }
2782 W = DAG.getTargetExtractSubreg(Hexagon::isub_lo, dl, MVT::i32, W);
2783 Words[IdxW].push_back(W);
2784 }
2785
2786 while (Scale > 2) {
2787 SDValue WidthV = DAG.getConstant(64 / Scale, dl, MVT::i32);
2788 Words[IdxW ^ 1].clear();
2789
2790 for (unsigned i = 0, e = Words[IdxW].size(); i != e; i += 2) {
2791 SDValue W0 = Words[IdxW][i], W1 = Words[IdxW][i+1];
2792 // Insert W1 into W0 right next to the significant bits of W0.
2793 SDValue T = DAG.getNode(HexagonISD::INSERT, dl, MVT::i32,
2794 {W0, W1, WidthV, WidthV});
2795 Words[IdxW ^ 1].push_back(T);
2796 }
2797 IdxW ^= 1;
2798 Scale /= 2;
2799 }
2800
2801 // Another sanity check. At this point there should only be two words
2802 // left, and Scale should be 2.
2803 assert(Scale == 2 && Words[IdxW].size() == 2);
2804
2805 SDValue WW = DAG.getNode(HexagonISD::COMBINE, dl, MVT::i64,
2806 Words[IdxW][1], Words[IdxW][0]);
2807 return DAG.getNode(HexagonISD::D2P, dl, VecTy, WW);
2808 }
2809
2810 return SDValue();
2811 }
2812
2813 SDValue
LowerEXTRACT_VECTOR_ELT(SDValue Op,SelectionDAG & DAG) const2814 HexagonTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
2815 SelectionDAG &DAG) const {
2816 SDValue Vec = Op.getOperand(0);
2817 MVT ElemTy = ty(Vec).getVectorElementType();
2818 return extractVector(Vec, Op.getOperand(1), SDLoc(Op), ElemTy, ty(Op), DAG);
2819 }
2820
2821 SDValue
LowerEXTRACT_SUBVECTOR(SDValue Op,SelectionDAG & DAG) const2822 HexagonTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
2823 SelectionDAG &DAG) const {
2824 return extractVector(Op.getOperand(0), Op.getOperand(1), SDLoc(Op),
2825 ty(Op), ty(Op), DAG);
2826 }
2827
2828 SDValue
LowerINSERT_VECTOR_ELT(SDValue Op,SelectionDAG & DAG) const2829 HexagonTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
2830 SelectionDAG &DAG) const {
2831 return insertVector(Op.getOperand(0), Op.getOperand(1), Op.getOperand(2),
2832 SDLoc(Op), ty(Op).getVectorElementType(), DAG);
2833 }
2834
2835 SDValue
LowerINSERT_SUBVECTOR(SDValue Op,SelectionDAG & DAG) const2836 HexagonTargetLowering::LowerINSERT_SUBVECTOR(SDValue Op,
2837 SelectionDAG &DAG) const {
2838 SDValue ValV = Op.getOperand(1);
2839 return insertVector(Op.getOperand(0), ValV, Op.getOperand(2),
2840 SDLoc(Op), ty(ValV), DAG);
2841 }
2842
2843 bool
allowTruncateForTailCall(Type * Ty1,Type * Ty2) const2844 HexagonTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
2845 // Assuming the caller does not have either a signext or zeroext modifier, and
2846 // only one value is accepted, any reasonable truncation is allowed.
2847 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
2848 return false;
2849
2850 // FIXME: in principle up to 64-bit could be made safe, but it would be very
2851 // fragile at the moment: any support for multiple value returns would be
2852 // liable to disallow tail calls involving i64 -> iN truncation in many cases.
2853 return Ty1->getPrimitiveSizeInBits() <= 32;
2854 }
2855
2856 SDValue
LowerLoad(SDValue Op,SelectionDAG & DAG) const2857 HexagonTargetLowering::LowerLoad(SDValue Op, SelectionDAG &DAG) const {
2858 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
2859 unsigned ClaimAlign = LN->getAlignment();
2860 validateConstPtrAlignment(LN->getBasePtr(), SDLoc(Op), ClaimAlign);
2861 // Call LowerUnalignedLoad for all loads, it recognizes loads that
2862 // don't need extra aligning.
2863 return LowerUnalignedLoad(Op, DAG);
2864 }
2865
2866 SDValue
LowerStore(SDValue Op,SelectionDAG & DAG) const2867 HexagonTargetLowering::LowerStore(SDValue Op, SelectionDAG &DAG) const {
2868 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
2869 unsigned ClaimAlign = SN->getAlignment();
2870 SDValue Ptr = SN->getBasePtr();
2871 const SDLoc &dl(Op);
2872 validateConstPtrAlignment(Ptr, dl, ClaimAlign);
2873
2874 MVT StoreTy = SN->getMemoryVT().getSimpleVT();
2875 unsigned NeedAlign = Subtarget.getTypeAlignment(StoreTy);
2876 if (ClaimAlign < NeedAlign)
2877 return expandUnalignedStore(SN, DAG);
2878 return Op;
2879 }
2880
2881 SDValue
LowerUnalignedLoad(SDValue Op,SelectionDAG & DAG) const2882 HexagonTargetLowering::LowerUnalignedLoad(SDValue Op, SelectionDAG &DAG)
2883 const {
2884 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
2885 MVT LoadTy = ty(Op);
2886 unsigned NeedAlign = Subtarget.getTypeAlignment(LoadTy);
2887 unsigned HaveAlign = LN->getAlignment();
2888 if (HaveAlign >= NeedAlign)
2889 return Op;
2890
2891 const SDLoc &dl(Op);
2892 const DataLayout &DL = DAG.getDataLayout();
2893 LLVMContext &Ctx = *DAG.getContext();
2894
2895 // If the load aligning is disabled or the load can be broken up into two
2896 // smaller legal loads, do the default (target-independent) expansion.
2897 bool DoDefault = false;
2898 // Handle it in the default way if this is an indexed load.
2899 if (!LN->isUnindexed())
2900 DoDefault = true;
2901
2902 if (!AlignLoads) {
2903 if (allowsMemoryAccessForAlignment(Ctx, DL, LN->getMemoryVT(),
2904 *LN->getMemOperand()))
2905 return Op;
2906 DoDefault = true;
2907 }
2908 if (!DoDefault && (2 * HaveAlign) == NeedAlign) {
2909 // The PartTy is the equivalent of "getLoadableTypeOfSize(HaveAlign)".
2910 MVT PartTy = HaveAlign <= 8 ? MVT::getIntegerVT(8 * HaveAlign)
2911 : MVT::getVectorVT(MVT::i8, HaveAlign);
2912 DoDefault =
2913 allowsMemoryAccessForAlignment(Ctx, DL, PartTy, *LN->getMemOperand());
2914 }
2915 if (DoDefault) {
2916 std::pair<SDValue, SDValue> P = expandUnalignedLoad(LN, DAG);
2917 return DAG.getMergeValues({P.first, P.second}, dl);
2918 }
2919
2920 // The code below generates two loads, both aligned as NeedAlign, and
2921 // with the distance of NeedAlign between them. For that to cover the
2922 // bits that need to be loaded (and without overlapping), the size of
2923 // the loads should be equal to NeedAlign. This is true for all loadable
2924 // types, but add an assertion in case something changes in the future.
2925 assert(LoadTy.getSizeInBits() == 8*NeedAlign);
2926
2927 unsigned LoadLen = NeedAlign;
2928 SDValue Base = LN->getBasePtr();
2929 SDValue Chain = LN->getChain();
2930 auto BO = getBaseAndOffset(Base);
2931 unsigned BaseOpc = BO.first.getOpcode();
2932 if (BaseOpc == HexagonISD::VALIGNADDR && BO.second % LoadLen == 0)
2933 return Op;
2934
2935 if (BO.second % LoadLen != 0) {
2936 BO.first = DAG.getNode(ISD::ADD, dl, MVT::i32, BO.first,
2937 DAG.getConstant(BO.second % LoadLen, dl, MVT::i32));
2938 BO.second -= BO.second % LoadLen;
2939 }
2940 SDValue BaseNoOff = (BaseOpc != HexagonISD::VALIGNADDR)
2941 ? DAG.getNode(HexagonISD::VALIGNADDR, dl, MVT::i32, BO.first,
2942 DAG.getConstant(NeedAlign, dl, MVT::i32))
2943 : BO.first;
2944 SDValue Base0 =
2945 DAG.getMemBasePlusOffset(BaseNoOff, TypeSize::Fixed(BO.second), dl);
2946 SDValue Base1 = DAG.getMemBasePlusOffset(
2947 BaseNoOff, TypeSize::Fixed(BO.second + LoadLen), dl);
2948
2949 MachineMemOperand *WideMMO = nullptr;
2950 if (MachineMemOperand *MMO = LN->getMemOperand()) {
2951 MachineFunction &MF = DAG.getMachineFunction();
2952 WideMMO = MF.getMachineMemOperand(
2953 MMO->getPointerInfo(), MMO->getFlags(), 2 * LoadLen, Align(LoadLen),
2954 MMO->getAAInfo(), MMO->getRanges(), MMO->getSyncScopeID(),
2955 MMO->getOrdering(), MMO->getFailureOrdering());
2956 }
2957
2958 SDValue Load0 = DAG.getLoad(LoadTy, dl, Chain, Base0, WideMMO);
2959 SDValue Load1 = DAG.getLoad(LoadTy, dl, Chain, Base1, WideMMO);
2960
2961 SDValue Aligned = DAG.getNode(HexagonISD::VALIGN, dl, LoadTy,
2962 {Load1, Load0, BaseNoOff.getOperand(0)});
2963 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
2964 Load0.getValue(1), Load1.getValue(1));
2965 SDValue M = DAG.getMergeValues({Aligned, NewChain}, dl);
2966 return M;
2967 }
2968
2969 SDValue
LowerUAddSubO(SDValue Op,SelectionDAG & DAG) const2970 HexagonTargetLowering::LowerUAddSubO(SDValue Op, SelectionDAG &DAG) const {
2971 SDValue X = Op.getOperand(0), Y = Op.getOperand(1);
2972 auto *CY = dyn_cast<ConstantSDNode>(Y);
2973 if (!CY)
2974 return SDValue();
2975
2976 const SDLoc &dl(Op);
2977 SDVTList VTs = Op.getNode()->getVTList();
2978 assert(VTs.NumVTs == 2);
2979 assert(VTs.VTs[1] == MVT::i1);
2980 unsigned Opc = Op.getOpcode();
2981
2982 if (CY) {
2983 uint32_t VY = CY->getZExtValue();
2984 assert(VY != 0 && "This should have been folded");
2985 // X +/- 1
2986 if (VY != 1)
2987 return SDValue();
2988
2989 if (Opc == ISD::UADDO) {
2990 SDValue Op = DAG.getNode(ISD::ADD, dl, VTs.VTs[0], {X, Y});
2991 SDValue Ov = DAG.getSetCC(dl, MVT::i1, Op, getZero(dl, ty(Op), DAG),
2992 ISD::SETEQ);
2993 return DAG.getMergeValues({Op, Ov}, dl);
2994 }
2995 if (Opc == ISD::USUBO) {
2996 SDValue Op = DAG.getNode(ISD::SUB, dl, VTs.VTs[0], {X, Y});
2997 SDValue Ov = DAG.getSetCC(dl, MVT::i1, Op,
2998 DAG.getConstant(-1, dl, ty(Op)), ISD::SETEQ);
2999 return DAG.getMergeValues({Op, Ov}, dl);
3000 }
3001 }
3002
3003 return SDValue();
3004 }
3005
3006 SDValue
LowerAddSubCarry(SDValue Op,SelectionDAG & DAG) const3007 HexagonTargetLowering::LowerAddSubCarry(SDValue Op, SelectionDAG &DAG) const {
3008 const SDLoc &dl(Op);
3009 unsigned Opc = Op.getOpcode();
3010 SDValue X = Op.getOperand(0), Y = Op.getOperand(1), C = Op.getOperand(2);
3011
3012 if (Opc == ISD::ADDCARRY)
3013 return DAG.getNode(HexagonISD::ADDC, dl, Op.getNode()->getVTList(),
3014 { X, Y, C });
3015
3016 EVT CarryTy = C.getValueType();
3017 SDValue SubC = DAG.getNode(HexagonISD::SUBC, dl, Op.getNode()->getVTList(),
3018 { X, Y, DAG.getLogicalNOT(dl, C, CarryTy) });
3019 SDValue Out[] = { SubC.getValue(0),
3020 DAG.getLogicalNOT(dl, SubC.getValue(1), CarryTy) };
3021 return DAG.getMergeValues(Out, dl);
3022 }
3023
3024 SDValue
LowerEH_RETURN(SDValue Op,SelectionDAG & DAG) const3025 HexagonTargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
3026 SDValue Chain = Op.getOperand(0);
3027 SDValue Offset = Op.getOperand(1);
3028 SDValue Handler = Op.getOperand(2);
3029 SDLoc dl(Op);
3030 auto PtrVT = getPointerTy(DAG.getDataLayout());
3031
3032 // Mark function as containing a call to EH_RETURN.
3033 HexagonMachineFunctionInfo *FuncInfo =
3034 DAG.getMachineFunction().getInfo<HexagonMachineFunctionInfo>();
3035 FuncInfo->setHasEHReturn();
3036
3037 unsigned OffsetReg = Hexagon::R28;
3038
3039 SDValue StoreAddr =
3040 DAG.getNode(ISD::ADD, dl, PtrVT, DAG.getRegister(Hexagon::R30, PtrVT),
3041 DAG.getIntPtrConstant(4, dl));
3042 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo());
3043 Chain = DAG.getCopyToReg(Chain, dl, OffsetReg, Offset);
3044
3045 // Not needed we already use it as explict input to EH_RETURN.
3046 // MF.getRegInfo().addLiveOut(OffsetReg);
3047
3048 return DAG.getNode(HexagonISD::EH_RETURN, dl, MVT::Other, Chain);
3049 }
3050
3051 SDValue
LowerOperation(SDValue Op,SelectionDAG & DAG) const3052 HexagonTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
3053 unsigned Opc = Op.getOpcode();
3054
3055 // Handle INLINEASM first.
3056 if (Opc == ISD::INLINEASM || Opc == ISD::INLINEASM_BR)
3057 return LowerINLINEASM(Op, DAG);
3058
3059 if (isHvxOperation(Op.getNode(), DAG)) {
3060 // If HVX lowering returns nothing, try the default lowering.
3061 if (SDValue V = LowerHvxOperation(Op, DAG))
3062 return V;
3063 }
3064
3065 switch (Opc) {
3066 default:
3067 #ifndef NDEBUG
3068 Op.getNode()->dumpr(&DAG);
3069 if (Opc > HexagonISD::OP_BEGIN && Opc < HexagonISD::OP_END)
3070 errs() << "Error: check for a non-legal type in this operation\n";
3071 #endif
3072 llvm_unreachable("Should not custom lower this!");
3073 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
3074 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, DAG);
3075 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
3076 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
3077 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
3078 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
3079 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
3080 case ISD::BITCAST: return LowerBITCAST(Op, DAG);
3081 case ISD::LOAD: return LowerLoad(Op, DAG);
3082 case ISD::STORE: return LowerStore(Op, DAG);
3083 case ISD::UADDO:
3084 case ISD::USUBO: return LowerUAddSubO(Op, DAG);
3085 case ISD::ADDCARRY:
3086 case ISD::SUBCARRY: return LowerAddSubCarry(Op, DAG);
3087 case ISD::SRA:
3088 case ISD::SHL:
3089 case ISD::SRL: return LowerVECTOR_SHIFT(Op, DAG);
3090 case ISD::ROTL: return LowerROTL(Op, DAG);
3091 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
3092 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
3093 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
3094 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
3095 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
3096 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
3097 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG);
3098 case ISD::GlobalAddress: return LowerGLOBALADDRESS(Op, DAG);
3099 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
3100 case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG);
3101 case ISD::VACOPY: return LowerVACOPY(Op, DAG);
3102 case ISD::VASTART: return LowerVASTART(Op, DAG);
3103 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
3104 case ISD::SETCC: return LowerSETCC(Op, DAG);
3105 case ISD::VSELECT: return LowerVSELECT(Op, DAG);
3106 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
3107 case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG);
3108 case ISD::PREFETCH: return LowerPREFETCH(Op, DAG);
3109 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, DAG);
3110 break;
3111 }
3112
3113 return SDValue();
3114 }
3115
3116 void
LowerOperationWrapper(SDNode * N,SmallVectorImpl<SDValue> & Results,SelectionDAG & DAG) const3117 HexagonTargetLowering::LowerOperationWrapper(SDNode *N,
3118 SmallVectorImpl<SDValue> &Results,
3119 SelectionDAG &DAG) const {
3120 if (isHvxOperation(N, DAG)) {
3121 LowerHvxOperationWrapper(N, Results, DAG);
3122 if (!Results.empty())
3123 return;
3124 }
3125
3126 // We are only custom-lowering stores to verify the alignment of the
3127 // address if it is a compile-time constant. Since a store can be modified
3128 // during type-legalization (the value being stored may need legalization),
3129 // return empty Results here to indicate that we don't really make any
3130 // changes in the custom lowering.
3131 if (N->getOpcode() != ISD::STORE)
3132 return TargetLowering::LowerOperationWrapper(N, Results, DAG);
3133 }
3134
3135 void
ReplaceNodeResults(SDNode * N,SmallVectorImpl<SDValue> & Results,SelectionDAG & DAG) const3136 HexagonTargetLowering::ReplaceNodeResults(SDNode *N,
3137 SmallVectorImpl<SDValue> &Results,
3138 SelectionDAG &DAG) const {
3139 if (isHvxOperation(N, DAG)) {
3140 ReplaceHvxNodeResults(N, Results, DAG);
3141 if (!Results.empty())
3142 return;
3143 }
3144
3145 const SDLoc &dl(N);
3146 switch (N->getOpcode()) {
3147 case ISD::SRL:
3148 case ISD::SRA:
3149 case ISD::SHL:
3150 return;
3151 case ISD::BITCAST:
3152 // Handle a bitcast from v8i1 to i8.
3153 if (N->getValueType(0) == MVT::i8) {
3154 if (N->getOperand(0).getValueType() == MVT::v8i1) {
3155 SDValue P = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32,
3156 N->getOperand(0), DAG);
3157 SDValue T = DAG.getAnyExtOrTrunc(P, dl, MVT::i8);
3158 Results.push_back(T);
3159 }
3160 }
3161 break;
3162 }
3163 }
3164
3165 SDValue
PerformDAGCombine(SDNode * N,DAGCombinerInfo & DCI) const3166 HexagonTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI)
3167 const {
3168 if (isHvxOperation(N, DCI.DAG)) {
3169 if (SDValue V = PerformHvxDAGCombine(N, DCI))
3170 return V;
3171 return SDValue();
3172 }
3173
3174 if (DCI.isBeforeLegalizeOps())
3175 return SDValue();
3176
3177 SDValue Op(N, 0);
3178 const SDLoc &dl(Op);
3179 unsigned Opc = Op.getOpcode();
3180
3181 if (Opc == HexagonISD::P2D) {
3182 SDValue P = Op.getOperand(0);
3183 switch (P.getOpcode()) {
3184 case HexagonISD::PTRUE:
3185 return DCI.DAG.getConstant(-1, dl, ty(Op));
3186 case HexagonISD::PFALSE:
3187 return getZero(dl, ty(Op), DCI.DAG);
3188 default:
3189 break;
3190 }
3191 } else if (Opc == ISD::VSELECT) {
3192 // This is pretty much duplicated in HexagonISelLoweringHVX...
3193 //
3194 // (vselect (xor x, ptrue), v0, v1) -> (vselect x, v1, v0)
3195 SDValue Cond = Op.getOperand(0);
3196 if (Cond->getOpcode() == ISD::XOR) {
3197 SDValue C0 = Cond.getOperand(0), C1 = Cond.getOperand(1);
3198 if (C1->getOpcode() == HexagonISD::PTRUE) {
3199 SDValue VSel = DCI.DAG.getNode(ISD::VSELECT, dl, ty(Op), C0,
3200 Op.getOperand(2), Op.getOperand(1));
3201 return VSel;
3202 }
3203 }
3204 }
3205
3206 return SDValue();
3207 }
3208
3209 /// Returns relocation base for the given PIC jumptable.
3210 SDValue
getPICJumpTableRelocBase(SDValue Table,SelectionDAG & DAG) const3211 HexagonTargetLowering::getPICJumpTableRelocBase(SDValue Table,
3212 SelectionDAG &DAG) const {
3213 int Idx = cast<JumpTableSDNode>(Table)->getIndex();
3214 EVT VT = Table.getValueType();
3215 SDValue T = DAG.getTargetJumpTable(Idx, VT, HexagonII::MO_PCREL);
3216 return DAG.getNode(HexagonISD::AT_PCREL, SDLoc(Table), VT, T);
3217 }
3218
3219 //===----------------------------------------------------------------------===//
3220 // Inline Assembly Support
3221 //===----------------------------------------------------------------------===//
3222
3223 TargetLowering::ConstraintType
getConstraintType(StringRef Constraint) const3224 HexagonTargetLowering::getConstraintType(StringRef Constraint) const {
3225 if (Constraint.size() == 1) {
3226 switch (Constraint[0]) {
3227 case 'q':
3228 case 'v':
3229 if (Subtarget.useHVXOps())
3230 return C_RegisterClass;
3231 break;
3232 case 'a':
3233 return C_RegisterClass;
3234 default:
3235 break;
3236 }
3237 }
3238 return TargetLowering::getConstraintType(Constraint);
3239 }
3240
3241 std::pair<unsigned, const TargetRegisterClass*>
getRegForInlineAsmConstraint(const TargetRegisterInfo * TRI,StringRef Constraint,MVT VT) const3242 HexagonTargetLowering::getRegForInlineAsmConstraint(
3243 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
3244
3245 if (Constraint.size() == 1) {
3246 switch (Constraint[0]) {
3247 case 'r': // R0-R31
3248 switch (VT.SimpleTy) {
3249 default:
3250 return {0u, nullptr};
3251 case MVT::i1:
3252 case MVT::i8:
3253 case MVT::i16:
3254 case MVT::i32:
3255 case MVT::f32:
3256 return {0u, &Hexagon::IntRegsRegClass};
3257 case MVT::i64:
3258 case MVT::f64:
3259 return {0u, &Hexagon::DoubleRegsRegClass};
3260 }
3261 break;
3262 case 'a': // M0-M1
3263 if (VT != MVT::i32)
3264 return {0u, nullptr};
3265 return {0u, &Hexagon::ModRegsRegClass};
3266 case 'q': // q0-q3
3267 switch (VT.getSizeInBits()) {
3268 default:
3269 return {0u, nullptr};
3270 case 64:
3271 case 128:
3272 return {0u, &Hexagon::HvxQRRegClass};
3273 }
3274 break;
3275 case 'v': // V0-V31
3276 switch (VT.getSizeInBits()) {
3277 default:
3278 return {0u, nullptr};
3279 case 512:
3280 return {0u, &Hexagon::HvxVRRegClass};
3281 case 1024:
3282 if (Subtarget.hasV60Ops() && Subtarget.useHVX128BOps())
3283 return {0u, &Hexagon::HvxVRRegClass};
3284 return {0u, &Hexagon::HvxWRRegClass};
3285 case 2048:
3286 return {0u, &Hexagon::HvxWRRegClass};
3287 }
3288 break;
3289 default:
3290 return {0u, nullptr};
3291 }
3292 }
3293
3294 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
3295 }
3296
3297 /// isFPImmLegal - Returns true if the target can instruction select the
3298 /// specified FP immediate natively. If false, the legalizer will
3299 /// materialize the FP immediate as a load from a constant pool.
isFPImmLegal(const APFloat & Imm,EVT VT,bool ForCodeSize) const3300 bool HexagonTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
3301 bool ForCodeSize) const {
3302 return true;
3303 }
3304
3305 /// isLegalAddressingMode - Return true if the addressing mode represented by
3306 /// AM is legal for this target, for a load/store of the specified type.
isLegalAddressingMode(const DataLayout & DL,const AddrMode & AM,Type * Ty,unsigned AS,Instruction * I) const3307 bool HexagonTargetLowering::isLegalAddressingMode(const DataLayout &DL,
3308 const AddrMode &AM, Type *Ty,
3309 unsigned AS, Instruction *I) const {
3310 if (Ty->isSized()) {
3311 // When LSR detects uses of the same base address to access different
3312 // types (e.g. unions), it will assume a conservative type for these
3313 // uses:
3314 // LSR Use: Kind=Address of void in addrspace(4294967295), ...
3315 // The type Ty passed here would then be "void". Skip the alignment
3316 // checks, but do not return false right away, since that confuses
3317 // LSR into crashing.
3318 Align A = DL.getABITypeAlign(Ty);
3319 // The base offset must be a multiple of the alignment.
3320 if (!isAligned(A, AM.BaseOffs))
3321 return false;
3322 // The shifted offset must fit in 11 bits.
3323 if (!isInt<11>(AM.BaseOffs >> Log2(A)))
3324 return false;
3325 }
3326
3327 // No global is ever allowed as a base.
3328 if (AM.BaseGV)
3329 return false;
3330
3331 int Scale = AM.Scale;
3332 if (Scale < 0)
3333 Scale = -Scale;
3334 switch (Scale) {
3335 case 0: // No scale reg, "r+i", "r", or just "i".
3336 break;
3337 default: // No scaled addressing mode.
3338 return false;
3339 }
3340 return true;
3341 }
3342
3343 /// Return true if folding a constant offset with the given GlobalAddress is
3344 /// legal. It is frequently not legal in PIC relocation models.
isOffsetFoldingLegal(const GlobalAddressSDNode * GA) const3345 bool HexagonTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA)
3346 const {
3347 return HTM.getRelocationModel() == Reloc::Static;
3348 }
3349
3350 /// isLegalICmpImmediate - Return true if the specified immediate is legal
3351 /// icmp immediate, that is the target has icmp instructions which can compare
3352 /// a register against the immediate without having to materialize the
3353 /// immediate into a register.
isLegalICmpImmediate(int64_t Imm) const3354 bool HexagonTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
3355 return Imm >= -512 && Imm <= 511;
3356 }
3357
3358 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
3359 /// for tail call optimization. Targets which want to do tail call
3360 /// optimization should implement this function.
IsEligibleForTailCallOptimization(SDValue Callee,CallingConv::ID CalleeCC,bool IsVarArg,bool IsCalleeStructRet,bool IsCallerStructRet,const SmallVectorImpl<ISD::OutputArg> & Outs,const SmallVectorImpl<SDValue> & OutVals,const SmallVectorImpl<ISD::InputArg> & Ins,SelectionDAG & DAG) const3361 bool HexagonTargetLowering::IsEligibleForTailCallOptimization(
3362 SDValue Callee,
3363 CallingConv::ID CalleeCC,
3364 bool IsVarArg,
3365 bool IsCalleeStructRet,
3366 bool IsCallerStructRet,
3367 const SmallVectorImpl<ISD::OutputArg> &Outs,
3368 const SmallVectorImpl<SDValue> &OutVals,
3369 const SmallVectorImpl<ISD::InputArg> &Ins,
3370 SelectionDAG& DAG) const {
3371 const Function &CallerF = DAG.getMachineFunction().getFunction();
3372 CallingConv::ID CallerCC = CallerF.getCallingConv();
3373 bool CCMatch = CallerCC == CalleeCC;
3374
3375 // ***************************************************************************
3376 // Look for obvious safe cases to perform tail call optimization that do not
3377 // require ABI changes.
3378 // ***************************************************************************
3379
3380 // If this is a tail call via a function pointer, then don't do it!
3381 if (!isa<GlobalAddressSDNode>(Callee) &&
3382 !isa<ExternalSymbolSDNode>(Callee)) {
3383 return false;
3384 }
3385
3386 // Do not optimize if the calling conventions do not match and the conventions
3387 // used are not C or Fast.
3388 if (!CCMatch) {
3389 bool R = (CallerCC == CallingConv::C || CallerCC == CallingConv::Fast);
3390 bool E = (CalleeCC == CallingConv::C || CalleeCC == CallingConv::Fast);
3391 // If R & E, then ok.
3392 if (!R || !E)
3393 return false;
3394 }
3395
3396 // Do not tail call optimize vararg calls.
3397 if (IsVarArg)
3398 return false;
3399
3400 // Also avoid tail call optimization if either caller or callee uses struct
3401 // return semantics.
3402 if (IsCalleeStructRet || IsCallerStructRet)
3403 return false;
3404
3405 // In addition to the cases above, we also disable Tail Call Optimization if
3406 // the calling convention code that at least one outgoing argument needs to
3407 // go on the stack. We cannot check that here because at this point that
3408 // information is not available.
3409 return true;
3410 }
3411
3412 /// Returns the target specific optimal type for load and store operations as
3413 /// a result of memset, memcpy, and memmove lowering.
3414 ///
3415 /// If DstAlign is zero that means it's safe to destination alignment can
3416 /// satisfy any constraint. Similarly if SrcAlign is zero it means there isn't
3417 /// a need to check it against alignment requirement, probably because the
3418 /// source does not need to be loaded. If 'IsMemset' is true, that means it's
3419 /// expanding a memset. If 'ZeroMemset' is true, that means it's a memset of
3420 /// zero. 'MemcpyStrSrc' indicates whether the memcpy source is constant so it
3421 /// does not need to be loaded. It returns EVT::Other if the type should be
3422 /// determined using generic target-independent logic.
getOptimalMemOpType(const MemOp & Op,const AttributeList & FuncAttributes) const3423 EVT HexagonTargetLowering::getOptimalMemOpType(
3424 const MemOp &Op, const AttributeList &FuncAttributes) const {
3425 if (Op.size() >= 8 && Op.isAligned(Align(8)))
3426 return MVT::i64;
3427 if (Op.size() >= 4 && Op.isAligned(Align(4)))
3428 return MVT::i32;
3429 if (Op.size() >= 2 && Op.isAligned(Align(2)))
3430 return MVT::i16;
3431 return MVT::Other;
3432 }
3433
allowsMemoryAccess(LLVMContext & Context,const DataLayout & DL,EVT VT,unsigned AddrSpace,Align Alignment,MachineMemOperand::Flags Flags,bool * Fast) const3434 bool HexagonTargetLowering::allowsMemoryAccess(
3435 LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace,
3436 Align Alignment, MachineMemOperand::Flags Flags, bool *Fast) const {
3437 MVT SVT = VT.getSimpleVT();
3438 if (Subtarget.isHVXVectorType(SVT, true))
3439 return allowsHvxMemoryAccess(SVT, Flags, Fast);
3440 return TargetLoweringBase::allowsMemoryAccess(
3441 Context, DL, VT, AddrSpace, Alignment, Flags, Fast);
3442 }
3443
allowsMisalignedMemoryAccesses(EVT VT,unsigned AddrSpace,unsigned Alignment,MachineMemOperand::Flags Flags,bool * Fast) const3444 bool HexagonTargetLowering::allowsMisalignedMemoryAccesses(
3445 EVT VT, unsigned AddrSpace, unsigned Alignment,
3446 MachineMemOperand::Flags Flags, bool *Fast) const {
3447 MVT SVT = VT.getSimpleVT();
3448 if (Subtarget.isHVXVectorType(SVT, true))
3449 return allowsHvxMisalignedMemoryAccesses(SVT, Flags, Fast);
3450 if (Fast)
3451 *Fast = false;
3452 return false;
3453 }
3454
3455 std::pair<const TargetRegisterClass*, uint8_t>
findRepresentativeClass(const TargetRegisterInfo * TRI,MVT VT) const3456 HexagonTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
3457 MVT VT) const {
3458 if (Subtarget.isHVXVectorType(VT, true)) {
3459 unsigned BitWidth = VT.getSizeInBits();
3460 unsigned VecWidth = Subtarget.getVectorLength() * 8;
3461
3462 if (VT.getVectorElementType() == MVT::i1)
3463 return std::make_pair(&Hexagon::HvxQRRegClass, 1);
3464 if (BitWidth == VecWidth)
3465 return std::make_pair(&Hexagon::HvxVRRegClass, 1);
3466 assert(BitWidth == 2 * VecWidth);
3467 return std::make_pair(&Hexagon::HvxWRRegClass, 1);
3468 }
3469
3470 return TargetLowering::findRepresentativeClass(TRI, VT);
3471 }
3472
shouldReduceLoadWidth(SDNode * Load,ISD::LoadExtType ExtTy,EVT NewVT) const3473 bool HexagonTargetLowering::shouldReduceLoadWidth(SDNode *Load,
3474 ISD::LoadExtType ExtTy, EVT NewVT) const {
3475 // TODO: This may be worth removing. Check regression tests for diffs.
3476 if (!TargetLoweringBase::shouldReduceLoadWidth(Load, ExtTy, NewVT))
3477 return false;
3478
3479 auto *L = cast<LoadSDNode>(Load);
3480 std::pair<SDValue,int> BO = getBaseAndOffset(L->getBasePtr());
3481 // Small-data object, do not shrink.
3482 if (BO.first.getOpcode() == HexagonISD::CONST32_GP)
3483 return false;
3484 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(BO.first)) {
3485 auto &HTM = static_cast<const HexagonTargetMachine&>(getTargetMachine());
3486 const auto *GO = dyn_cast_or_null<const GlobalObject>(GA->getGlobal());
3487 return !GO || !HTM.getObjFileLowering()->isGlobalInSmallSection(GO, HTM);
3488 }
3489 return true;
3490 }
3491
emitLoadLinked(IRBuilder<> & Builder,Value * Addr,AtomicOrdering Ord) const3492 Value *HexagonTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
3493 AtomicOrdering Ord) const {
3494 BasicBlock *BB = Builder.GetInsertBlock();
3495 Module *M = BB->getParent()->getParent();
3496 auto PT = cast<PointerType>(Addr->getType());
3497 Type *Ty = PT->getElementType();
3498 unsigned SZ = Ty->getPrimitiveSizeInBits();
3499 assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic loads supported");
3500 Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_L2_loadw_locked
3501 : Intrinsic::hexagon_L4_loadd_locked;
3502 Function *Fn = Intrinsic::getDeclaration(M, IntID);
3503
3504 PointerType *NewPtrTy
3505 = Builder.getIntNTy(SZ)->getPointerTo(PT->getAddressSpace());
3506 Addr = Builder.CreateBitCast(Addr, NewPtrTy);
3507
3508 Value *Call = Builder.CreateCall(Fn, Addr, "larx");
3509
3510 return Builder.CreateBitCast(Call, Ty);
3511 }
3512
3513 /// Perform a store-conditional operation to Addr. Return the status of the
3514 /// store. This should be 0 if the store succeeded, non-zero otherwise.
emitStoreConditional(IRBuilder<> & Builder,Value * Val,Value * Addr,AtomicOrdering Ord) const3515 Value *HexagonTargetLowering::emitStoreConditional(IRBuilder<> &Builder,
3516 Value *Val, Value *Addr, AtomicOrdering Ord) const {
3517 BasicBlock *BB = Builder.GetInsertBlock();
3518 Module *M = BB->getParent()->getParent();
3519 Type *Ty = Val->getType();
3520 unsigned SZ = Ty->getPrimitiveSizeInBits();
3521
3522 Type *CastTy = Builder.getIntNTy(SZ);
3523 assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic stores supported");
3524 Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_S2_storew_locked
3525 : Intrinsic::hexagon_S4_stored_locked;
3526 Function *Fn = Intrinsic::getDeclaration(M, IntID);
3527
3528 unsigned AS = Addr->getType()->getPointerAddressSpace();
3529 Addr = Builder.CreateBitCast(Addr, CastTy->getPointerTo(AS));
3530 Val = Builder.CreateBitCast(Val, CastTy);
3531
3532 Value *Call = Builder.CreateCall(Fn, {Addr, Val}, "stcx");
3533 Value *Cmp = Builder.CreateICmpEQ(Call, Builder.getInt32(0), "");
3534 Value *Ext = Builder.CreateZExt(Cmp, Type::getInt32Ty(M->getContext()));
3535 return Ext;
3536 }
3537
3538 TargetLowering::AtomicExpansionKind
shouldExpandAtomicLoadInIR(LoadInst * LI) const3539 HexagonTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
3540 // Do not expand loads and stores that don't exceed 64 bits.
3541 return LI->getType()->getPrimitiveSizeInBits() > 64
3542 ? AtomicExpansionKind::LLOnly
3543 : AtomicExpansionKind::None;
3544 }
3545
shouldExpandAtomicStoreInIR(StoreInst * SI) const3546 bool HexagonTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
3547 // Do not expand loads and stores that don't exceed 64 bits.
3548 return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() > 64;
3549 }
3550
3551 TargetLowering::AtomicExpansionKind
shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst * AI) const3552 HexagonTargetLowering::shouldExpandAtomicCmpXchgInIR(
3553 AtomicCmpXchgInst *AI) const {
3554 return AtomicExpansionKind::LLSC;
3555 }
3556