1 //===-- AArch64ISelDAGToDAG.cpp - A dag to dag inst selector for AArch64 --===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines an instruction selector for the AArch64 target.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "AArch64TargetMachine.h"
15 #include "MCTargetDesc/AArch64AddressingModes.h"
16 #include "llvm/ADT/APSInt.h"
17 #include "llvm/CodeGen/SelectionDAGISel.h"
18 #include "llvm/IR/Function.h" // To access function attributes.
19 #include "llvm/IR/GlobalValue.h"
20 #include "llvm/IR/Intrinsics.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/ErrorHandling.h"
23 #include "llvm/Support/MathExtras.h"
24 #include "llvm/Support/raw_ostream.h"
25
26 using namespace llvm;
27
28 #define DEBUG_TYPE "aarch64-isel"
29
30 //===--------------------------------------------------------------------===//
31 /// AArch64DAGToDAGISel - AArch64 specific code to select AArch64 machine
32 /// instructions for SelectionDAG operations.
33 ///
34 namespace {
35
36 class AArch64DAGToDAGISel : public SelectionDAGISel {
37 AArch64TargetMachine &TM;
38
39 /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can
40 /// make the right decision when generating code for different targets.
41 const AArch64Subtarget *Subtarget;
42
43 bool ForCodeSize;
44
45 public:
AArch64DAGToDAGISel(AArch64TargetMachine & tm,CodeGenOpt::Level OptLevel)46 explicit AArch64DAGToDAGISel(AArch64TargetMachine &tm,
47 CodeGenOpt::Level OptLevel)
48 : SelectionDAGISel(tm, OptLevel), TM(tm), Subtarget(nullptr),
49 ForCodeSize(false) {}
50
getPassName() const51 const char *getPassName() const override {
52 return "AArch64 Instruction Selection";
53 }
54
runOnMachineFunction(MachineFunction & MF)55 bool runOnMachineFunction(MachineFunction &MF) override {
56 AttributeSet FnAttrs = MF.getFunction()->getAttributes();
57 ForCodeSize =
58 FnAttrs.hasAttribute(AttributeSet::FunctionIndex,
59 Attribute::OptimizeForSize) ||
60 FnAttrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize);
61 Subtarget = &TM.getSubtarget<AArch64Subtarget>();
62 return SelectionDAGISel::runOnMachineFunction(MF);
63 }
64
65 SDNode *Select(SDNode *Node) override;
66
67 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
68 /// inline asm expressions.
69 bool SelectInlineAsmMemoryOperand(const SDValue &Op,
70 char ConstraintCode,
71 std::vector<SDValue> &OutOps) override;
72
73 SDNode *SelectMLAV64LaneV128(SDNode *N);
74 SDNode *SelectMULLV64LaneV128(unsigned IntNo, SDNode *N);
75 bool SelectArithExtendedRegister(SDValue N, SDValue &Reg, SDValue &Shift);
76 bool SelectArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
77 bool SelectNegArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
SelectArithShiftedRegister(SDValue N,SDValue & Reg,SDValue & Shift)78 bool SelectArithShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
79 return SelectShiftedRegister(N, false, Reg, Shift);
80 }
SelectLogicalShiftedRegister(SDValue N,SDValue & Reg,SDValue & Shift)81 bool SelectLogicalShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
82 return SelectShiftedRegister(N, true, Reg, Shift);
83 }
SelectAddrModeIndexed8(SDValue N,SDValue & Base,SDValue & OffImm)84 bool SelectAddrModeIndexed8(SDValue N, SDValue &Base, SDValue &OffImm) {
85 return SelectAddrModeIndexed(N, 1, Base, OffImm);
86 }
SelectAddrModeIndexed16(SDValue N,SDValue & Base,SDValue & OffImm)87 bool SelectAddrModeIndexed16(SDValue N, SDValue &Base, SDValue &OffImm) {
88 return SelectAddrModeIndexed(N, 2, Base, OffImm);
89 }
SelectAddrModeIndexed32(SDValue N,SDValue & Base,SDValue & OffImm)90 bool SelectAddrModeIndexed32(SDValue N, SDValue &Base, SDValue &OffImm) {
91 return SelectAddrModeIndexed(N, 4, Base, OffImm);
92 }
SelectAddrModeIndexed64(SDValue N,SDValue & Base,SDValue & OffImm)93 bool SelectAddrModeIndexed64(SDValue N, SDValue &Base, SDValue &OffImm) {
94 return SelectAddrModeIndexed(N, 8, Base, OffImm);
95 }
SelectAddrModeIndexed128(SDValue N,SDValue & Base,SDValue & OffImm)96 bool SelectAddrModeIndexed128(SDValue N, SDValue &Base, SDValue &OffImm) {
97 return SelectAddrModeIndexed(N, 16, Base, OffImm);
98 }
SelectAddrModeUnscaled8(SDValue N,SDValue & Base,SDValue & OffImm)99 bool SelectAddrModeUnscaled8(SDValue N, SDValue &Base, SDValue &OffImm) {
100 return SelectAddrModeUnscaled(N, 1, Base, OffImm);
101 }
SelectAddrModeUnscaled16(SDValue N,SDValue & Base,SDValue & OffImm)102 bool SelectAddrModeUnscaled16(SDValue N, SDValue &Base, SDValue &OffImm) {
103 return SelectAddrModeUnscaled(N, 2, Base, OffImm);
104 }
SelectAddrModeUnscaled32(SDValue N,SDValue & Base,SDValue & OffImm)105 bool SelectAddrModeUnscaled32(SDValue N, SDValue &Base, SDValue &OffImm) {
106 return SelectAddrModeUnscaled(N, 4, Base, OffImm);
107 }
SelectAddrModeUnscaled64(SDValue N,SDValue & Base,SDValue & OffImm)108 bool SelectAddrModeUnscaled64(SDValue N, SDValue &Base, SDValue &OffImm) {
109 return SelectAddrModeUnscaled(N, 8, Base, OffImm);
110 }
SelectAddrModeUnscaled128(SDValue N,SDValue & Base,SDValue & OffImm)111 bool SelectAddrModeUnscaled128(SDValue N, SDValue &Base, SDValue &OffImm) {
112 return SelectAddrModeUnscaled(N, 16, Base, OffImm);
113 }
114
115 template<int Width>
SelectAddrModeWRO(SDValue N,SDValue & Base,SDValue & Offset,SDValue & SignExtend,SDValue & DoShift)116 bool SelectAddrModeWRO(SDValue N, SDValue &Base, SDValue &Offset,
117 SDValue &SignExtend, SDValue &DoShift) {
118 return SelectAddrModeWRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
119 }
120
121 template<int Width>
SelectAddrModeXRO(SDValue N,SDValue & Base,SDValue & Offset,SDValue & SignExtend,SDValue & DoShift)122 bool SelectAddrModeXRO(SDValue N, SDValue &Base, SDValue &Offset,
123 SDValue &SignExtend, SDValue &DoShift) {
124 return SelectAddrModeXRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
125 }
126
127
128 /// Form sequences of consecutive 64/128-bit registers for use in NEON
129 /// instructions making use of a vector-list (e.g. ldN, tbl). Vecs must have
130 /// between 1 and 4 elements. If it contains a single element that is returned
131 /// unchanged; otherwise a REG_SEQUENCE value is returned.
132 SDValue createDTuple(ArrayRef<SDValue> Vecs);
133 SDValue createQTuple(ArrayRef<SDValue> Vecs);
134
135 /// Generic helper for the createDTuple/createQTuple
136 /// functions. Those should almost always be called instead.
137 SDValue createTuple(ArrayRef<SDValue> Vecs, unsigned RegClassIDs[],
138 unsigned SubRegs[]);
139
140 SDNode *SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc, bool isExt);
141
142 SDNode *SelectIndexedLoad(SDNode *N, bool &Done);
143
144 SDNode *SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
145 unsigned SubRegIdx);
146 SDNode *SelectPostLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
147 unsigned SubRegIdx);
148 SDNode *SelectLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
149 SDNode *SelectPostLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
150
151 SDNode *SelectStore(SDNode *N, unsigned NumVecs, unsigned Opc);
152 SDNode *SelectPostStore(SDNode *N, unsigned NumVecs, unsigned Opc);
153 SDNode *SelectStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
154 SDNode *SelectPostStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
155
156 SDNode *SelectBitfieldExtractOp(SDNode *N);
157 SDNode *SelectBitfieldInsertOp(SDNode *N);
158
159 SDNode *SelectLIBM(SDNode *N);
160
161 // Include the pieces autogenerated from the target description.
162 #include "AArch64GenDAGISel.inc"
163
164 private:
165 bool SelectShiftedRegister(SDValue N, bool AllowROR, SDValue &Reg,
166 SDValue &Shift);
167 bool SelectAddrModeIndexed(SDValue N, unsigned Size, SDValue &Base,
168 SDValue &OffImm);
169 bool SelectAddrModeUnscaled(SDValue N, unsigned Size, SDValue &Base,
170 SDValue &OffImm);
171 bool SelectAddrModeWRO(SDValue N, unsigned Size, SDValue &Base,
172 SDValue &Offset, SDValue &SignExtend,
173 SDValue &DoShift);
174 bool SelectAddrModeXRO(SDValue N, unsigned Size, SDValue &Base,
175 SDValue &Offset, SDValue &SignExtend,
176 SDValue &DoShift);
177 bool isWorthFolding(SDValue V) const;
178 bool SelectExtendedSHL(SDValue N, unsigned Size, bool WantExtend,
179 SDValue &Offset, SDValue &SignExtend);
180
181 template<unsigned RegWidth>
SelectCVTFixedPosOperand(SDValue N,SDValue & FixedPos)182 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos) {
183 return SelectCVTFixedPosOperand(N, FixedPos, RegWidth);
184 }
185
186 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos, unsigned Width);
187 };
188 } // end anonymous namespace
189
190 /// isIntImmediate - This method tests to see if the node is a constant
191 /// operand. If so Imm will receive the 32-bit value.
isIntImmediate(const SDNode * N,uint64_t & Imm)192 static bool isIntImmediate(const SDNode *N, uint64_t &Imm) {
193 if (const ConstantSDNode *C = dyn_cast<const ConstantSDNode>(N)) {
194 Imm = C->getZExtValue();
195 return true;
196 }
197 return false;
198 }
199
200 // isIntImmediate - This method tests to see if a constant operand.
201 // If so Imm will receive the value.
isIntImmediate(SDValue N,uint64_t & Imm)202 static bool isIntImmediate(SDValue N, uint64_t &Imm) {
203 return isIntImmediate(N.getNode(), Imm);
204 }
205
206 // isOpcWithIntImmediate - This method tests to see if the node is a specific
207 // opcode and that it has a immediate integer right operand.
208 // If so Imm will receive the 32 bit value.
isOpcWithIntImmediate(const SDNode * N,unsigned Opc,uint64_t & Imm)209 static bool isOpcWithIntImmediate(const SDNode *N, unsigned Opc,
210 uint64_t &Imm) {
211 return N->getOpcode() == Opc &&
212 isIntImmediate(N->getOperand(1).getNode(), Imm);
213 }
214
SelectInlineAsmMemoryOperand(const SDValue & Op,char ConstraintCode,std::vector<SDValue> & OutOps)215 bool AArch64DAGToDAGISel::SelectInlineAsmMemoryOperand(
216 const SDValue &Op, char ConstraintCode, std::vector<SDValue> &OutOps) {
217 assert(ConstraintCode == 'm' && "unexpected asm memory constraint");
218 // Require the address to be in a register. That is safe for all AArch64
219 // variants and it is hard to do anything much smarter without knowing
220 // how the operand is used.
221 OutOps.push_back(Op);
222 return false;
223 }
224
225 /// SelectArithImmed - Select an immediate value that can be represented as
226 /// a 12-bit value shifted left by either 0 or 12. If so, return true with
227 /// Val set to the 12-bit value and Shift set to the shifter operand.
SelectArithImmed(SDValue N,SDValue & Val,SDValue & Shift)228 bool AArch64DAGToDAGISel::SelectArithImmed(SDValue N, SDValue &Val,
229 SDValue &Shift) {
230 // This function is called from the addsub_shifted_imm ComplexPattern,
231 // which lists [imm] as the list of opcode it's interested in, however
232 // we still need to check whether the operand is actually an immediate
233 // here because the ComplexPattern opcode list is only used in
234 // root-level opcode matching.
235 if (!isa<ConstantSDNode>(N.getNode()))
236 return false;
237
238 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
239 unsigned ShiftAmt;
240
241 if (Immed >> 12 == 0) {
242 ShiftAmt = 0;
243 } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
244 ShiftAmt = 12;
245 Immed = Immed >> 12;
246 } else
247 return false;
248
249 unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt);
250 Val = CurDAG->getTargetConstant(Immed, MVT::i32);
251 Shift = CurDAG->getTargetConstant(ShVal, MVT::i32);
252 return true;
253 }
254
255 /// SelectNegArithImmed - As above, but negates the value before trying to
256 /// select it.
SelectNegArithImmed(SDValue N,SDValue & Val,SDValue & Shift)257 bool AArch64DAGToDAGISel::SelectNegArithImmed(SDValue N, SDValue &Val,
258 SDValue &Shift) {
259 // This function is called from the addsub_shifted_imm ComplexPattern,
260 // which lists [imm] as the list of opcode it's interested in, however
261 // we still need to check whether the operand is actually an immediate
262 // here because the ComplexPattern opcode list is only used in
263 // root-level opcode matching.
264 if (!isa<ConstantSDNode>(N.getNode()))
265 return false;
266
267 // The immediate operand must be a 24-bit zero-extended immediate.
268 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
269
270 // This negation is almost always valid, but "cmp wN, #0" and "cmn wN, #0"
271 // have the opposite effect on the C flag, so this pattern mustn't match under
272 // those circumstances.
273 if (Immed == 0)
274 return false;
275
276 if (N.getValueType() == MVT::i32)
277 Immed = ~((uint32_t)Immed) + 1;
278 else
279 Immed = ~Immed + 1ULL;
280 if (Immed & 0xFFFFFFFFFF000000ULL)
281 return false;
282
283 Immed &= 0xFFFFFFULL;
284 return SelectArithImmed(CurDAG->getConstant(Immed, MVT::i32), Val, Shift);
285 }
286
287 /// getShiftTypeForNode - Translate a shift node to the corresponding
288 /// ShiftType value.
getShiftTypeForNode(SDValue N)289 static AArch64_AM::ShiftExtendType getShiftTypeForNode(SDValue N) {
290 switch (N.getOpcode()) {
291 default:
292 return AArch64_AM::InvalidShiftExtend;
293 case ISD::SHL:
294 return AArch64_AM::LSL;
295 case ISD::SRL:
296 return AArch64_AM::LSR;
297 case ISD::SRA:
298 return AArch64_AM::ASR;
299 case ISD::ROTR:
300 return AArch64_AM::ROR;
301 }
302 }
303
304 /// \brief Determine wether it is worth to fold V into an extended register.
isWorthFolding(SDValue V) const305 bool AArch64DAGToDAGISel::isWorthFolding(SDValue V) const {
306 // it hurts if the value is used at least twice, unless we are optimizing
307 // for code size.
308 if (ForCodeSize || V.hasOneUse())
309 return true;
310 return false;
311 }
312
313 /// SelectShiftedRegister - Select a "shifted register" operand. If the value
314 /// is not shifted, set the Shift operand to default of "LSL 0". The logical
315 /// instructions allow the shifted register to be rotated, but the arithmetic
316 /// instructions do not. The AllowROR parameter specifies whether ROR is
317 /// supported.
SelectShiftedRegister(SDValue N,bool AllowROR,SDValue & Reg,SDValue & Shift)318 bool AArch64DAGToDAGISel::SelectShiftedRegister(SDValue N, bool AllowROR,
319 SDValue &Reg, SDValue &Shift) {
320 AArch64_AM::ShiftExtendType ShType = getShiftTypeForNode(N);
321 if (ShType == AArch64_AM::InvalidShiftExtend)
322 return false;
323 if (!AllowROR && ShType == AArch64_AM::ROR)
324 return false;
325
326 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
327 unsigned BitSize = N.getValueType().getSizeInBits();
328 unsigned Val = RHS->getZExtValue() & (BitSize - 1);
329 unsigned ShVal = AArch64_AM::getShifterImm(ShType, Val);
330
331 Reg = N.getOperand(0);
332 Shift = CurDAG->getTargetConstant(ShVal, MVT::i32);
333 return isWorthFolding(N);
334 }
335
336 return false;
337 }
338
339 /// getExtendTypeForNode - Translate an extend node to the corresponding
340 /// ExtendType value.
341 static AArch64_AM::ShiftExtendType
getExtendTypeForNode(SDValue N,bool IsLoadStore=false)342 getExtendTypeForNode(SDValue N, bool IsLoadStore = false) {
343 if (N.getOpcode() == ISD::SIGN_EXTEND ||
344 N.getOpcode() == ISD::SIGN_EXTEND_INREG) {
345 EVT SrcVT;
346 if (N.getOpcode() == ISD::SIGN_EXTEND_INREG)
347 SrcVT = cast<VTSDNode>(N.getOperand(1))->getVT();
348 else
349 SrcVT = N.getOperand(0).getValueType();
350
351 if (!IsLoadStore && SrcVT == MVT::i8)
352 return AArch64_AM::SXTB;
353 else if (!IsLoadStore && SrcVT == MVT::i16)
354 return AArch64_AM::SXTH;
355 else if (SrcVT == MVT::i32)
356 return AArch64_AM::SXTW;
357 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
358
359 return AArch64_AM::InvalidShiftExtend;
360 } else if (N.getOpcode() == ISD::ZERO_EXTEND ||
361 N.getOpcode() == ISD::ANY_EXTEND) {
362 EVT SrcVT = N.getOperand(0).getValueType();
363 if (!IsLoadStore && SrcVT == MVT::i8)
364 return AArch64_AM::UXTB;
365 else if (!IsLoadStore && SrcVT == MVT::i16)
366 return AArch64_AM::UXTH;
367 else if (SrcVT == MVT::i32)
368 return AArch64_AM::UXTW;
369 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
370
371 return AArch64_AM::InvalidShiftExtend;
372 } else if (N.getOpcode() == ISD::AND) {
373 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
374 if (!CSD)
375 return AArch64_AM::InvalidShiftExtend;
376 uint64_t AndMask = CSD->getZExtValue();
377
378 switch (AndMask) {
379 default:
380 return AArch64_AM::InvalidShiftExtend;
381 case 0xFF:
382 return !IsLoadStore ? AArch64_AM::UXTB : AArch64_AM::InvalidShiftExtend;
383 case 0xFFFF:
384 return !IsLoadStore ? AArch64_AM::UXTH : AArch64_AM::InvalidShiftExtend;
385 case 0xFFFFFFFF:
386 return AArch64_AM::UXTW;
387 }
388 }
389
390 return AArch64_AM::InvalidShiftExtend;
391 }
392
393 // Helper for SelectMLAV64LaneV128 - Recognize high lane extracts.
checkHighLaneIndex(SDNode * DL,SDValue & LaneOp,int & LaneIdx)394 static bool checkHighLaneIndex(SDNode *DL, SDValue &LaneOp, int &LaneIdx) {
395 if (DL->getOpcode() != AArch64ISD::DUPLANE16 &&
396 DL->getOpcode() != AArch64ISD::DUPLANE32)
397 return false;
398
399 SDValue SV = DL->getOperand(0);
400 if (SV.getOpcode() != ISD::INSERT_SUBVECTOR)
401 return false;
402
403 SDValue EV = SV.getOperand(1);
404 if (EV.getOpcode() != ISD::EXTRACT_SUBVECTOR)
405 return false;
406
407 ConstantSDNode *DLidx = cast<ConstantSDNode>(DL->getOperand(1).getNode());
408 ConstantSDNode *EVidx = cast<ConstantSDNode>(EV.getOperand(1).getNode());
409 LaneIdx = DLidx->getSExtValue() + EVidx->getSExtValue();
410 LaneOp = EV.getOperand(0);
411
412 return true;
413 }
414
415 // Helper for SelectOpcV64LaneV128 - Recogzine operatinos where one operand is a
416 // high lane extract.
checkV64LaneV128(SDValue Op0,SDValue Op1,SDValue & StdOp,SDValue & LaneOp,int & LaneIdx)417 static bool checkV64LaneV128(SDValue Op0, SDValue Op1, SDValue &StdOp,
418 SDValue &LaneOp, int &LaneIdx) {
419
420 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx)) {
421 std::swap(Op0, Op1);
422 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx))
423 return false;
424 }
425 StdOp = Op1;
426 return true;
427 }
428
429 /// SelectMLAV64LaneV128 - AArch64 supports vector MLAs where one multiplicand
430 /// is a lane in the upper half of a 128-bit vector. Recognize and select this
431 /// so that we don't emit unnecessary lane extracts.
SelectMLAV64LaneV128(SDNode * N)432 SDNode *AArch64DAGToDAGISel::SelectMLAV64LaneV128(SDNode *N) {
433 SDValue Op0 = N->getOperand(0);
434 SDValue Op1 = N->getOperand(1);
435 SDValue MLAOp1; // Will hold ordinary multiplicand for MLA.
436 SDValue MLAOp2; // Will hold lane-accessed multiplicand for MLA.
437 int LaneIdx = -1; // Will hold the lane index.
438
439 if (Op1.getOpcode() != ISD::MUL ||
440 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
441 LaneIdx)) {
442 std::swap(Op0, Op1);
443 if (Op1.getOpcode() != ISD::MUL ||
444 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
445 LaneIdx))
446 return nullptr;
447 }
448
449 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, MVT::i64);
450
451 SDValue Ops[] = { Op0, MLAOp1, MLAOp2, LaneIdxVal };
452
453 unsigned MLAOpc = ~0U;
454
455 switch (N->getSimpleValueType(0).SimpleTy) {
456 default:
457 llvm_unreachable("Unrecognized MLA.");
458 case MVT::v4i16:
459 MLAOpc = AArch64::MLAv4i16_indexed;
460 break;
461 case MVT::v8i16:
462 MLAOpc = AArch64::MLAv8i16_indexed;
463 break;
464 case MVT::v2i32:
465 MLAOpc = AArch64::MLAv2i32_indexed;
466 break;
467 case MVT::v4i32:
468 MLAOpc = AArch64::MLAv4i32_indexed;
469 break;
470 }
471
472 return CurDAG->getMachineNode(MLAOpc, SDLoc(N), N->getValueType(0), Ops);
473 }
474
SelectMULLV64LaneV128(unsigned IntNo,SDNode * N)475 SDNode *AArch64DAGToDAGISel::SelectMULLV64LaneV128(unsigned IntNo, SDNode *N) {
476 SDValue SMULLOp0;
477 SDValue SMULLOp1;
478 int LaneIdx;
479
480 if (!checkV64LaneV128(N->getOperand(1), N->getOperand(2), SMULLOp0, SMULLOp1,
481 LaneIdx))
482 return nullptr;
483
484 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, MVT::i64);
485
486 SDValue Ops[] = { SMULLOp0, SMULLOp1, LaneIdxVal };
487
488 unsigned SMULLOpc = ~0U;
489
490 if (IntNo == Intrinsic::aarch64_neon_smull) {
491 switch (N->getSimpleValueType(0).SimpleTy) {
492 default:
493 llvm_unreachable("Unrecognized SMULL.");
494 case MVT::v4i32:
495 SMULLOpc = AArch64::SMULLv4i16_indexed;
496 break;
497 case MVT::v2i64:
498 SMULLOpc = AArch64::SMULLv2i32_indexed;
499 break;
500 }
501 } else if (IntNo == Intrinsic::aarch64_neon_umull) {
502 switch (N->getSimpleValueType(0).SimpleTy) {
503 default:
504 llvm_unreachable("Unrecognized SMULL.");
505 case MVT::v4i32:
506 SMULLOpc = AArch64::UMULLv4i16_indexed;
507 break;
508 case MVT::v2i64:
509 SMULLOpc = AArch64::UMULLv2i32_indexed;
510 break;
511 }
512 } else
513 llvm_unreachable("Unrecognized intrinsic.");
514
515 return CurDAG->getMachineNode(SMULLOpc, SDLoc(N), N->getValueType(0), Ops);
516 }
517
518 /// Instructions that accept extend modifiers like UXTW expect the register
519 /// being extended to be a GPR32, but the incoming DAG might be acting on a
520 /// GPR64 (either via SEXT_INREG or AND). Extract the appropriate low bits if
521 /// this is the case.
narrowIfNeeded(SelectionDAG * CurDAG,SDValue N)522 static SDValue narrowIfNeeded(SelectionDAG *CurDAG, SDValue N) {
523 if (N.getValueType() == MVT::i32)
524 return N;
525
526 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, MVT::i32);
527 MachineSDNode *Node = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
528 SDLoc(N), MVT::i32, N, SubReg);
529 return SDValue(Node, 0);
530 }
531
532
533 /// SelectArithExtendedRegister - Select a "extended register" operand. This
534 /// operand folds in an extend followed by an optional left shift.
SelectArithExtendedRegister(SDValue N,SDValue & Reg,SDValue & Shift)535 bool AArch64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg,
536 SDValue &Shift) {
537 unsigned ShiftVal = 0;
538 AArch64_AM::ShiftExtendType Ext;
539
540 if (N.getOpcode() == ISD::SHL) {
541 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
542 if (!CSD)
543 return false;
544 ShiftVal = CSD->getZExtValue();
545 if (ShiftVal > 4)
546 return false;
547
548 Ext = getExtendTypeForNode(N.getOperand(0));
549 if (Ext == AArch64_AM::InvalidShiftExtend)
550 return false;
551
552 Reg = N.getOperand(0).getOperand(0);
553 } else {
554 Ext = getExtendTypeForNode(N);
555 if (Ext == AArch64_AM::InvalidShiftExtend)
556 return false;
557
558 Reg = N.getOperand(0);
559 }
560
561 // AArch64 mandates that the RHS of the operation must use the smallest
562 // register classs that could contain the size being extended from. Thus,
563 // if we're folding a (sext i8), we need the RHS to be a GPR32, even though
564 // there might not be an actual 32-bit value in the program. We can
565 // (harmlessly) synthesize one by injected an EXTRACT_SUBREG here.
566 assert(Ext != AArch64_AM::UXTX && Ext != AArch64_AM::SXTX);
567 Reg = narrowIfNeeded(CurDAG, Reg);
568 Shift = CurDAG->getTargetConstant(getArithExtendImm(Ext, ShiftVal), MVT::i32);
569 return isWorthFolding(N);
570 }
571
572 /// If there's a use of this ADDlow that's not itself a load/store then we'll
573 /// need to create a real ADD instruction from it anyway and there's no point in
574 /// folding it into the mem op. Theoretically, it shouldn't matter, but there's
575 /// a single pseudo-instruction for an ADRP/ADD pair so over-aggressive folding
576 /// leads to duplaicated ADRP instructions.
isWorthFoldingADDlow(SDValue N)577 static bool isWorthFoldingADDlow(SDValue N) {
578 for (auto Use : N->uses()) {
579 if (Use->getOpcode() != ISD::LOAD && Use->getOpcode() != ISD::STORE &&
580 Use->getOpcode() != ISD::ATOMIC_LOAD &&
581 Use->getOpcode() != ISD::ATOMIC_STORE)
582 return false;
583
584 // ldar and stlr have much more restrictive addressing modes (just a
585 // register).
586 if (cast<MemSDNode>(Use)->getOrdering() > Monotonic)
587 return false;
588 }
589
590 return true;
591 }
592
593 /// SelectAddrModeIndexed - Select a "register plus scaled unsigned 12-bit
594 /// immediate" address. The "Size" argument is the size in bytes of the memory
595 /// reference, which determines the scale.
SelectAddrModeIndexed(SDValue N,unsigned Size,SDValue & Base,SDValue & OffImm)596 bool AArch64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size,
597 SDValue &Base, SDValue &OffImm) {
598 const TargetLowering *TLI = getTargetLowering();
599 if (N.getOpcode() == ISD::FrameIndex) {
600 int FI = cast<FrameIndexSDNode>(N)->getIndex();
601 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
602 OffImm = CurDAG->getTargetConstant(0, MVT::i64);
603 return true;
604 }
605
606 if (N.getOpcode() == AArch64ISD::ADDlow && isWorthFoldingADDlow(N)) {
607 GlobalAddressSDNode *GAN =
608 dyn_cast<GlobalAddressSDNode>(N.getOperand(1).getNode());
609 Base = N.getOperand(0);
610 OffImm = N.getOperand(1);
611 if (!GAN)
612 return true;
613
614 const GlobalValue *GV = GAN->getGlobal();
615 unsigned Alignment = GV->getAlignment();
616 const DataLayout *DL = TLI->getDataLayout();
617 Type *Ty = GV->getType()->getElementType();
618 if (Alignment == 0 && Ty->isSized())
619 Alignment = DL->getABITypeAlignment(Ty);
620
621 if (Alignment >= Size)
622 return true;
623 }
624
625 if (CurDAG->isBaseWithConstantOffset(N)) {
626 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
627 int64_t RHSC = (int64_t)RHS->getZExtValue();
628 unsigned Scale = Log2_32(Size);
629 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
630 Base = N.getOperand(0);
631 if (Base.getOpcode() == ISD::FrameIndex) {
632 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
633 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
634 }
635 OffImm = CurDAG->getTargetConstant(RHSC >> Scale, MVT::i64);
636 return true;
637 }
638 }
639 }
640
641 // Before falling back to our general case, check if the unscaled
642 // instructions can handle this. If so, that's preferable.
643 if (SelectAddrModeUnscaled(N, Size, Base, OffImm))
644 return false;
645
646 // Base only. The address will be materialized into a register before
647 // the memory is accessed.
648 // add x0, Xbase, #offset
649 // ldr x0, [x0]
650 Base = N;
651 OffImm = CurDAG->getTargetConstant(0, MVT::i64);
652 return true;
653 }
654
655 /// SelectAddrModeUnscaled - Select a "register plus unscaled signed 9-bit
656 /// immediate" address. This should only match when there is an offset that
657 /// is not valid for a scaled immediate addressing mode. The "Size" argument
658 /// is the size in bytes of the memory reference, which is needed here to know
659 /// what is valid for a scaled immediate.
SelectAddrModeUnscaled(SDValue N,unsigned Size,SDValue & Base,SDValue & OffImm)660 bool AArch64DAGToDAGISel::SelectAddrModeUnscaled(SDValue N, unsigned Size,
661 SDValue &Base,
662 SDValue &OffImm) {
663 if (!CurDAG->isBaseWithConstantOffset(N))
664 return false;
665 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
666 int64_t RHSC = RHS->getSExtValue();
667 // If the offset is valid as a scaled immediate, don't match here.
668 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 &&
669 RHSC < (0x1000 << Log2_32(Size)))
670 return false;
671 if (RHSC >= -256 && RHSC < 256) {
672 Base = N.getOperand(0);
673 if (Base.getOpcode() == ISD::FrameIndex) {
674 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
675 const TargetLowering *TLI = getTargetLowering();
676 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
677 }
678 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i64);
679 return true;
680 }
681 }
682 return false;
683 }
684
Widen(SelectionDAG * CurDAG,SDValue N)685 static SDValue Widen(SelectionDAG *CurDAG, SDValue N) {
686 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, MVT::i32);
687 SDValue ImpDef = SDValue(
688 CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, SDLoc(N), MVT::i64),
689 0);
690 MachineSDNode *Node = CurDAG->getMachineNode(
691 TargetOpcode::INSERT_SUBREG, SDLoc(N), MVT::i64, ImpDef, N, SubReg);
692 return SDValue(Node, 0);
693 }
694
695 /// \brief Check if the given SHL node (\p N), can be used to form an
696 /// extended register for an addressing mode.
SelectExtendedSHL(SDValue N,unsigned Size,bool WantExtend,SDValue & Offset,SDValue & SignExtend)697 bool AArch64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size,
698 bool WantExtend, SDValue &Offset,
699 SDValue &SignExtend) {
700 assert(N.getOpcode() == ISD::SHL && "Invalid opcode.");
701 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
702 if (!CSD || (CSD->getZExtValue() & 0x7) != CSD->getZExtValue())
703 return false;
704
705 if (WantExtend) {
706 AArch64_AM::ShiftExtendType Ext =
707 getExtendTypeForNode(N.getOperand(0), true);
708 if (Ext == AArch64_AM::InvalidShiftExtend)
709 return false;
710
711 Offset = narrowIfNeeded(CurDAG, N.getOperand(0).getOperand(0));
712 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, MVT::i32);
713 } else {
714 Offset = N.getOperand(0);
715 SignExtend = CurDAG->getTargetConstant(0, MVT::i32);
716 }
717
718 unsigned LegalShiftVal = Log2_32(Size);
719 unsigned ShiftVal = CSD->getZExtValue();
720
721 if (ShiftVal != 0 && ShiftVal != LegalShiftVal)
722 return false;
723
724 if (isWorthFolding(N))
725 return true;
726
727 return false;
728 }
729
SelectAddrModeWRO(SDValue N,unsigned Size,SDValue & Base,SDValue & Offset,SDValue & SignExtend,SDValue & DoShift)730 bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,
731 SDValue &Base, SDValue &Offset,
732 SDValue &SignExtend,
733 SDValue &DoShift) {
734 if (N.getOpcode() != ISD::ADD)
735 return false;
736 SDValue LHS = N.getOperand(0);
737 SDValue RHS = N.getOperand(1);
738
739 // We don't want to match immediate adds here, because they are better lowered
740 // to the register-immediate addressing modes.
741 if (isa<ConstantSDNode>(LHS) || isa<ConstantSDNode>(RHS))
742 return false;
743
744 // Check if this particular node is reused in any non-memory related
745 // operation. If yes, do not try to fold this node into the address
746 // computation, since the computation will be kept.
747 const SDNode *Node = N.getNode();
748 for (SDNode *UI : Node->uses()) {
749 if (!isa<MemSDNode>(*UI))
750 return false;
751 }
752
753 // Remember if it is worth folding N when it produces extended register.
754 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
755
756 // Try to match a shifted extend on the RHS.
757 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
758 SelectExtendedSHL(RHS, Size, true, Offset, SignExtend)) {
759 Base = LHS;
760 DoShift = CurDAG->getTargetConstant(true, MVT::i32);
761 return true;
762 }
763
764 // Try to match a shifted extend on the LHS.
765 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
766 SelectExtendedSHL(LHS, Size, true, Offset, SignExtend)) {
767 Base = RHS;
768 DoShift = CurDAG->getTargetConstant(true, MVT::i32);
769 return true;
770 }
771
772 // There was no shift, whatever else we find.
773 DoShift = CurDAG->getTargetConstant(false, MVT::i32);
774
775 AArch64_AM::ShiftExtendType Ext = AArch64_AM::InvalidShiftExtend;
776 // Try to match an unshifted extend on the LHS.
777 if (IsExtendedRegisterWorthFolding &&
778 (Ext = getExtendTypeForNode(LHS, true)) !=
779 AArch64_AM::InvalidShiftExtend) {
780 Base = RHS;
781 Offset = narrowIfNeeded(CurDAG, LHS.getOperand(0));
782 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, MVT::i32);
783 if (isWorthFolding(LHS))
784 return true;
785 }
786
787 // Try to match an unshifted extend on the RHS.
788 if (IsExtendedRegisterWorthFolding &&
789 (Ext = getExtendTypeForNode(RHS, true)) !=
790 AArch64_AM::InvalidShiftExtend) {
791 Base = LHS;
792 Offset = narrowIfNeeded(CurDAG, RHS.getOperand(0));
793 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, MVT::i32);
794 if (isWorthFolding(RHS))
795 return true;
796 }
797
798 return false;
799 }
800
801 // Check if the given immediate is preferred by ADD. If an immediate can be
802 // encoded in an ADD, or it can be encoded in an "ADD LSL #12" and can not be
803 // encoded by one MOVZ, return true.
isPreferredADD(int64_t ImmOff)804 static bool isPreferredADD(int64_t ImmOff) {
805 // Constant in [0x0, 0xfff] can be encoded in ADD.
806 if ((ImmOff & 0xfffffffffffff000LL) == 0x0LL)
807 return true;
808 // Check if it can be encoded in an "ADD LSL #12".
809 if ((ImmOff & 0xffffffffff000fffLL) == 0x0LL)
810 // As a single MOVZ is faster than a "ADD of LSL #12", ignore such constant.
811 return (ImmOff & 0xffffffffff00ffffLL) != 0x0LL &&
812 (ImmOff & 0xffffffffffff0fffLL) != 0x0LL;
813 return false;
814 }
815
SelectAddrModeXRO(SDValue N,unsigned Size,SDValue & Base,SDValue & Offset,SDValue & SignExtend,SDValue & DoShift)816 bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
817 SDValue &Base, SDValue &Offset,
818 SDValue &SignExtend,
819 SDValue &DoShift) {
820 if (N.getOpcode() != ISD::ADD)
821 return false;
822 SDValue LHS = N.getOperand(0);
823 SDValue RHS = N.getOperand(1);
824
825 // Check if this particular node is reused in any non-memory related
826 // operation. If yes, do not try to fold this node into the address
827 // computation, since the computation will be kept.
828 const SDNode *Node = N.getNode();
829 for (SDNode *UI : Node->uses()) {
830 if (!isa<MemSDNode>(*UI))
831 return false;
832 }
833
834 // Watch out if RHS is a wide immediate, it can not be selected into
835 // [BaseReg+Imm] addressing mode. Also it may not be able to be encoded into
836 // ADD/SUB. Instead it will use [BaseReg + 0] address mode and generate
837 // instructions like:
838 // MOV X0, WideImmediate
839 // ADD X1, BaseReg, X0
840 // LDR X2, [X1, 0]
841 // For such situation, using [BaseReg, XReg] addressing mode can save one
842 // ADD/SUB:
843 // MOV X0, WideImmediate
844 // LDR X2, [BaseReg, X0]
845 if (isa<ConstantSDNode>(RHS)) {
846 int64_t ImmOff = (int64_t)dyn_cast<ConstantSDNode>(RHS)->getZExtValue();
847 unsigned Scale = Log2_32(Size);
848 // Skip the immediate can be seleced by load/store addressing mode.
849 // Also skip the immediate can be encoded by a single ADD (SUB is also
850 // checked by using -ImmOff).
851 if ((ImmOff % Size == 0 && ImmOff >= 0 && ImmOff < (0x1000 << Scale)) ||
852 isPreferredADD(ImmOff) || isPreferredADD(-ImmOff))
853 return false;
854
855 SDLoc DL(N.getNode());
856 SDValue Ops[] = { RHS };
857 SDNode *MOVI =
858 CurDAG->getMachineNode(AArch64::MOVi64imm, DL, MVT::i64, Ops);
859 SDValue MOVIV = SDValue(MOVI, 0);
860 // This ADD of two X register will be selected into [Reg+Reg] mode.
861 N = CurDAG->getNode(ISD::ADD, DL, MVT::i64, LHS, MOVIV);
862 }
863
864 // Remember if it is worth folding N when it produces extended register.
865 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
866
867 // Try to match a shifted extend on the RHS.
868 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
869 SelectExtendedSHL(RHS, Size, false, Offset, SignExtend)) {
870 Base = LHS;
871 DoShift = CurDAG->getTargetConstant(true, MVT::i32);
872 return true;
873 }
874
875 // Try to match a shifted extend on the LHS.
876 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
877 SelectExtendedSHL(LHS, Size, false, Offset, SignExtend)) {
878 Base = RHS;
879 DoShift = CurDAG->getTargetConstant(true, MVT::i32);
880 return true;
881 }
882
883 // Match any non-shifted, non-extend, non-immediate add expression.
884 Base = LHS;
885 Offset = RHS;
886 SignExtend = CurDAG->getTargetConstant(false, MVT::i32);
887 DoShift = CurDAG->getTargetConstant(false, MVT::i32);
888 // Reg1 + Reg2 is free: no check needed.
889 return true;
890 }
891
createDTuple(ArrayRef<SDValue> Regs)892 SDValue AArch64DAGToDAGISel::createDTuple(ArrayRef<SDValue> Regs) {
893 static unsigned RegClassIDs[] = {
894 AArch64::DDRegClassID, AArch64::DDDRegClassID, AArch64::DDDDRegClassID};
895 static unsigned SubRegs[] = { AArch64::dsub0, AArch64::dsub1,
896 AArch64::dsub2, AArch64::dsub3 };
897
898 return createTuple(Regs, RegClassIDs, SubRegs);
899 }
900
createQTuple(ArrayRef<SDValue> Regs)901 SDValue AArch64DAGToDAGISel::createQTuple(ArrayRef<SDValue> Regs) {
902 static unsigned RegClassIDs[] = {
903 AArch64::QQRegClassID, AArch64::QQQRegClassID, AArch64::QQQQRegClassID};
904 static unsigned SubRegs[] = { AArch64::qsub0, AArch64::qsub1,
905 AArch64::qsub2, AArch64::qsub3 };
906
907 return createTuple(Regs, RegClassIDs, SubRegs);
908 }
909
createTuple(ArrayRef<SDValue> Regs,unsigned RegClassIDs[],unsigned SubRegs[])910 SDValue AArch64DAGToDAGISel::createTuple(ArrayRef<SDValue> Regs,
911 unsigned RegClassIDs[],
912 unsigned SubRegs[]) {
913 // There's no special register-class for a vector-list of 1 element: it's just
914 // a vector.
915 if (Regs.size() == 1)
916 return Regs[0];
917
918 assert(Regs.size() >= 2 && Regs.size() <= 4);
919
920 SDLoc DL(Regs[0].getNode());
921
922 SmallVector<SDValue, 4> Ops;
923
924 // First operand of REG_SEQUENCE is the desired RegClass.
925 Ops.push_back(
926 CurDAG->getTargetConstant(RegClassIDs[Regs.size() - 2], MVT::i32));
927
928 // Then we get pairs of source & subregister-position for the components.
929 for (unsigned i = 0; i < Regs.size(); ++i) {
930 Ops.push_back(Regs[i]);
931 Ops.push_back(CurDAG->getTargetConstant(SubRegs[i], MVT::i32));
932 }
933
934 SDNode *N =
935 CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
936 return SDValue(N, 0);
937 }
938
SelectTable(SDNode * N,unsigned NumVecs,unsigned Opc,bool isExt)939 SDNode *AArch64DAGToDAGISel::SelectTable(SDNode *N, unsigned NumVecs,
940 unsigned Opc, bool isExt) {
941 SDLoc dl(N);
942 EVT VT = N->getValueType(0);
943
944 unsigned ExtOff = isExt;
945
946 // Form a REG_SEQUENCE to force register allocation.
947 unsigned Vec0Off = ExtOff + 1;
948 SmallVector<SDValue, 4> Regs(N->op_begin() + Vec0Off,
949 N->op_begin() + Vec0Off + NumVecs);
950 SDValue RegSeq = createQTuple(Regs);
951
952 SmallVector<SDValue, 6> Ops;
953 if (isExt)
954 Ops.push_back(N->getOperand(1));
955 Ops.push_back(RegSeq);
956 Ops.push_back(N->getOperand(NumVecs + ExtOff + 1));
957 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
958 }
959
SelectIndexedLoad(SDNode * N,bool & Done)960 SDNode *AArch64DAGToDAGISel::SelectIndexedLoad(SDNode *N, bool &Done) {
961 LoadSDNode *LD = cast<LoadSDNode>(N);
962 if (LD->isUnindexed())
963 return nullptr;
964 EVT VT = LD->getMemoryVT();
965 EVT DstVT = N->getValueType(0);
966 ISD::MemIndexedMode AM = LD->getAddressingMode();
967 bool IsPre = AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
968
969 // We're not doing validity checking here. That was done when checking
970 // if we should mark the load as indexed or not. We're just selecting
971 // the right instruction.
972 unsigned Opcode = 0;
973
974 ISD::LoadExtType ExtType = LD->getExtensionType();
975 bool InsertTo64 = false;
976 if (VT == MVT::i64)
977 Opcode = IsPre ? AArch64::LDRXpre : AArch64::LDRXpost;
978 else if (VT == MVT::i32) {
979 if (ExtType == ISD::NON_EXTLOAD)
980 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
981 else if (ExtType == ISD::SEXTLOAD)
982 Opcode = IsPre ? AArch64::LDRSWpre : AArch64::LDRSWpost;
983 else {
984 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
985 InsertTo64 = true;
986 // The result of the load is only i32. It's the subreg_to_reg that makes
987 // it into an i64.
988 DstVT = MVT::i32;
989 }
990 } else if (VT == MVT::i16) {
991 if (ExtType == ISD::SEXTLOAD) {
992 if (DstVT == MVT::i64)
993 Opcode = IsPre ? AArch64::LDRSHXpre : AArch64::LDRSHXpost;
994 else
995 Opcode = IsPre ? AArch64::LDRSHWpre : AArch64::LDRSHWpost;
996 } else {
997 Opcode = IsPre ? AArch64::LDRHHpre : AArch64::LDRHHpost;
998 InsertTo64 = DstVT == MVT::i64;
999 // The result of the load is only i32. It's the subreg_to_reg that makes
1000 // it into an i64.
1001 DstVT = MVT::i32;
1002 }
1003 } else if (VT == MVT::i8) {
1004 if (ExtType == ISD::SEXTLOAD) {
1005 if (DstVT == MVT::i64)
1006 Opcode = IsPre ? AArch64::LDRSBXpre : AArch64::LDRSBXpost;
1007 else
1008 Opcode = IsPre ? AArch64::LDRSBWpre : AArch64::LDRSBWpost;
1009 } else {
1010 Opcode = IsPre ? AArch64::LDRBBpre : AArch64::LDRBBpost;
1011 InsertTo64 = DstVT == MVT::i64;
1012 // The result of the load is only i32. It's the subreg_to_reg that makes
1013 // it into an i64.
1014 DstVT = MVT::i32;
1015 }
1016 } else if (VT == MVT::f32) {
1017 Opcode = IsPre ? AArch64::LDRSpre : AArch64::LDRSpost;
1018 } else if (VT == MVT::f64 || VT.is64BitVector()) {
1019 Opcode = IsPre ? AArch64::LDRDpre : AArch64::LDRDpost;
1020 } else if (VT.is128BitVector()) {
1021 Opcode = IsPre ? AArch64::LDRQpre : AArch64::LDRQpost;
1022 } else
1023 return nullptr;
1024 SDValue Chain = LD->getChain();
1025 SDValue Base = LD->getBasePtr();
1026 ConstantSDNode *OffsetOp = cast<ConstantSDNode>(LD->getOffset());
1027 int OffsetVal = (int)OffsetOp->getZExtValue();
1028 SDValue Offset = CurDAG->getTargetConstant(OffsetVal, MVT::i64);
1029 SDValue Ops[] = { Base, Offset, Chain };
1030 SDNode *Res = CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i64, DstVT,
1031 MVT::Other, Ops);
1032 // Either way, we're replacing the node, so tell the caller that.
1033 Done = true;
1034 SDValue LoadedVal = SDValue(Res, 1);
1035 if (InsertTo64) {
1036 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, MVT::i32);
1037 LoadedVal =
1038 SDValue(CurDAG->getMachineNode(
1039 AArch64::SUBREG_TO_REG, SDLoc(N), MVT::i64,
1040 CurDAG->getTargetConstant(0, MVT::i64), LoadedVal, SubReg),
1041 0);
1042 }
1043
1044 ReplaceUses(SDValue(N, 0), LoadedVal);
1045 ReplaceUses(SDValue(N, 1), SDValue(Res, 0));
1046 ReplaceUses(SDValue(N, 2), SDValue(Res, 2));
1047
1048 return nullptr;
1049 }
1050
SelectLoad(SDNode * N,unsigned NumVecs,unsigned Opc,unsigned SubRegIdx)1051 SDNode *AArch64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs,
1052 unsigned Opc, unsigned SubRegIdx) {
1053 SDLoc dl(N);
1054 EVT VT = N->getValueType(0);
1055 SDValue Chain = N->getOperand(0);
1056
1057 SmallVector<SDValue, 6> Ops;
1058 Ops.push_back(N->getOperand(2)); // Mem operand;
1059 Ops.push_back(Chain);
1060
1061 std::vector<EVT> ResTys;
1062 ResTys.push_back(MVT::Untyped);
1063 ResTys.push_back(MVT::Other);
1064
1065 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1066 SDValue SuperReg = SDValue(Ld, 0);
1067 for (unsigned i = 0; i < NumVecs; ++i)
1068 ReplaceUses(SDValue(N, i),
1069 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
1070
1071 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
1072 return nullptr;
1073 }
1074
SelectPostLoad(SDNode * N,unsigned NumVecs,unsigned Opc,unsigned SubRegIdx)1075 SDNode *AArch64DAGToDAGISel::SelectPostLoad(SDNode *N, unsigned NumVecs,
1076 unsigned Opc, unsigned SubRegIdx) {
1077 SDLoc dl(N);
1078 EVT VT = N->getValueType(0);
1079 SDValue Chain = N->getOperand(0);
1080
1081 SmallVector<SDValue, 6> Ops;
1082 Ops.push_back(N->getOperand(1)); // Mem operand
1083 Ops.push_back(N->getOperand(2)); // Incremental
1084 Ops.push_back(Chain);
1085
1086 std::vector<EVT> ResTys;
1087 ResTys.push_back(MVT::i64); // Type of the write back register
1088 ResTys.push_back(MVT::Untyped);
1089 ResTys.push_back(MVT::Other);
1090
1091 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1092
1093 // Update uses of write back register
1094 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
1095
1096 // Update uses of vector list
1097 SDValue SuperReg = SDValue(Ld, 1);
1098 if (NumVecs == 1)
1099 ReplaceUses(SDValue(N, 0), SuperReg);
1100 else
1101 for (unsigned i = 0; i < NumVecs; ++i)
1102 ReplaceUses(SDValue(N, i),
1103 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
1104
1105 // Update the chain
1106 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
1107 return nullptr;
1108 }
1109
SelectStore(SDNode * N,unsigned NumVecs,unsigned Opc)1110 SDNode *AArch64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs,
1111 unsigned Opc) {
1112 SDLoc dl(N);
1113 EVT VT = N->getOperand(2)->getValueType(0);
1114
1115 // Form a REG_SEQUENCE to force register allocation.
1116 bool Is128Bit = VT.getSizeInBits() == 128;
1117 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1118 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
1119
1120 SmallVector<SDValue, 6> Ops;
1121 Ops.push_back(RegSeq);
1122 Ops.push_back(N->getOperand(NumVecs + 2));
1123 Ops.push_back(N->getOperand(0));
1124 SDNode *St = CurDAG->getMachineNode(Opc, dl, N->getValueType(0), Ops);
1125
1126 return St;
1127 }
1128
SelectPostStore(SDNode * N,unsigned NumVecs,unsigned Opc)1129 SDNode *AArch64DAGToDAGISel::SelectPostStore(SDNode *N, unsigned NumVecs,
1130 unsigned Opc) {
1131 SDLoc dl(N);
1132 EVT VT = N->getOperand(2)->getValueType(0);
1133 SmallVector<EVT, 2> ResTys;
1134 ResTys.push_back(MVT::i64); // Type of the write back register
1135 ResTys.push_back(MVT::Other); // Type for the Chain
1136
1137 // Form a REG_SEQUENCE to force register allocation.
1138 bool Is128Bit = VT.getSizeInBits() == 128;
1139 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1140 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
1141
1142 SmallVector<SDValue, 6> Ops;
1143 Ops.push_back(RegSeq);
1144 Ops.push_back(N->getOperand(NumVecs + 1)); // base register
1145 Ops.push_back(N->getOperand(NumVecs + 2)); // Incremental
1146 Ops.push_back(N->getOperand(0)); // Chain
1147 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1148
1149 return St;
1150 }
1151
1152 /// WidenVector - Given a value in the V64 register class, produce the
1153 /// equivalent value in the V128 register class.
1154 class WidenVector {
1155 SelectionDAG &DAG;
1156
1157 public:
WidenVector(SelectionDAG & DAG)1158 WidenVector(SelectionDAG &DAG) : DAG(DAG) {}
1159
operator ()(SDValue V64Reg)1160 SDValue operator()(SDValue V64Reg) {
1161 EVT VT = V64Reg.getValueType();
1162 unsigned NarrowSize = VT.getVectorNumElements();
1163 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1164 MVT WideTy = MVT::getVectorVT(EltTy, 2 * NarrowSize);
1165 SDLoc DL(V64Reg);
1166
1167 SDValue Undef =
1168 SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, WideTy), 0);
1169 return DAG.getTargetInsertSubreg(AArch64::dsub, DL, WideTy, Undef, V64Reg);
1170 }
1171 };
1172
1173 /// NarrowVector - Given a value in the V128 register class, produce the
1174 /// equivalent value in the V64 register class.
NarrowVector(SDValue V128Reg,SelectionDAG & DAG)1175 static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) {
1176 EVT VT = V128Reg.getValueType();
1177 unsigned WideSize = VT.getVectorNumElements();
1178 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1179 MVT NarrowTy = MVT::getVectorVT(EltTy, WideSize / 2);
1180
1181 return DAG.getTargetExtractSubreg(AArch64::dsub, SDLoc(V128Reg), NarrowTy,
1182 V128Reg);
1183 }
1184
SelectLoadLane(SDNode * N,unsigned NumVecs,unsigned Opc)1185 SDNode *AArch64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs,
1186 unsigned Opc) {
1187 SDLoc dl(N);
1188 EVT VT = N->getValueType(0);
1189 bool Narrow = VT.getSizeInBits() == 64;
1190
1191 // Form a REG_SEQUENCE to force register allocation.
1192 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1193
1194 if (Narrow)
1195 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1196 WidenVector(*CurDAG));
1197
1198 SDValue RegSeq = createQTuple(Regs);
1199
1200 std::vector<EVT> ResTys;
1201 ResTys.push_back(MVT::Untyped);
1202 ResTys.push_back(MVT::Other);
1203
1204 unsigned LaneNo =
1205 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1206
1207 SmallVector<SDValue, 6> Ops;
1208 Ops.push_back(RegSeq);
1209 Ops.push_back(CurDAG->getTargetConstant(LaneNo, MVT::i64));
1210 Ops.push_back(N->getOperand(NumVecs + 3));
1211 Ops.push_back(N->getOperand(0));
1212 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1213 SDValue SuperReg = SDValue(Ld, 0);
1214
1215 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
1216 static unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1, AArch64::qsub2,
1217 AArch64::qsub3 };
1218 for (unsigned i = 0; i < NumVecs; ++i) {
1219 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT, SuperReg);
1220 if (Narrow)
1221 NV = NarrowVector(NV, *CurDAG);
1222 ReplaceUses(SDValue(N, i), NV);
1223 }
1224
1225 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
1226
1227 return Ld;
1228 }
1229
SelectPostLoadLane(SDNode * N,unsigned NumVecs,unsigned Opc)1230 SDNode *AArch64DAGToDAGISel::SelectPostLoadLane(SDNode *N, unsigned NumVecs,
1231 unsigned Opc) {
1232 SDLoc dl(N);
1233 EVT VT = N->getValueType(0);
1234 bool Narrow = VT.getSizeInBits() == 64;
1235
1236 // Form a REG_SEQUENCE to force register allocation.
1237 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1238
1239 if (Narrow)
1240 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1241 WidenVector(*CurDAG));
1242
1243 SDValue RegSeq = createQTuple(Regs);
1244
1245 std::vector<EVT> ResTys;
1246 ResTys.push_back(MVT::i64); // Type of the write back register
1247 ResTys.push_back(MVT::Untyped);
1248 ResTys.push_back(MVT::Other);
1249
1250 unsigned LaneNo =
1251 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
1252
1253 SmallVector<SDValue, 6> Ops;
1254 Ops.push_back(RegSeq);
1255 Ops.push_back(CurDAG->getTargetConstant(LaneNo, MVT::i64)); // Lane Number
1256 Ops.push_back(N->getOperand(NumVecs + 2)); // Base register
1257 Ops.push_back(N->getOperand(NumVecs + 3)); // Incremental
1258 Ops.push_back(N->getOperand(0));
1259 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1260
1261 // Update uses of the write back register
1262 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
1263
1264 // Update uses of the vector list
1265 SDValue SuperReg = SDValue(Ld, 1);
1266 if (NumVecs == 1) {
1267 ReplaceUses(SDValue(N, 0),
1268 Narrow ? NarrowVector(SuperReg, *CurDAG) : SuperReg);
1269 } else {
1270 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
1271 static unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1, AArch64::qsub2,
1272 AArch64::qsub3 };
1273 for (unsigned i = 0; i < NumVecs; ++i) {
1274 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT,
1275 SuperReg);
1276 if (Narrow)
1277 NV = NarrowVector(NV, *CurDAG);
1278 ReplaceUses(SDValue(N, i), NV);
1279 }
1280 }
1281
1282 // Update the Chain
1283 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
1284
1285 return Ld;
1286 }
1287
SelectStoreLane(SDNode * N,unsigned NumVecs,unsigned Opc)1288 SDNode *AArch64DAGToDAGISel::SelectStoreLane(SDNode *N, unsigned NumVecs,
1289 unsigned Opc) {
1290 SDLoc dl(N);
1291 EVT VT = N->getOperand(2)->getValueType(0);
1292 bool Narrow = VT.getSizeInBits() == 64;
1293
1294 // Form a REG_SEQUENCE to force register allocation.
1295 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1296
1297 if (Narrow)
1298 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1299 WidenVector(*CurDAG));
1300
1301 SDValue RegSeq = createQTuple(Regs);
1302
1303 unsigned LaneNo =
1304 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1305
1306 SmallVector<SDValue, 6> Ops;
1307 Ops.push_back(RegSeq);
1308 Ops.push_back(CurDAG->getTargetConstant(LaneNo, MVT::i64));
1309 Ops.push_back(N->getOperand(NumVecs + 3));
1310 Ops.push_back(N->getOperand(0));
1311 SDNode *St = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
1312
1313 // Transfer memoperands.
1314 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1315 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1316 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
1317
1318 return St;
1319 }
1320
SelectPostStoreLane(SDNode * N,unsigned NumVecs,unsigned Opc)1321 SDNode *AArch64DAGToDAGISel::SelectPostStoreLane(SDNode *N, unsigned NumVecs,
1322 unsigned Opc) {
1323 SDLoc dl(N);
1324 EVT VT = N->getOperand(2)->getValueType(0);
1325 bool Narrow = VT.getSizeInBits() == 64;
1326
1327 // Form a REG_SEQUENCE to force register allocation.
1328 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1329
1330 if (Narrow)
1331 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1332 WidenVector(*CurDAG));
1333
1334 SDValue RegSeq = createQTuple(Regs);
1335
1336 SmallVector<EVT, 2> ResTys;
1337 ResTys.push_back(MVT::i64); // Type of the write back register
1338 ResTys.push_back(MVT::Other);
1339
1340 unsigned LaneNo =
1341 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
1342
1343 SmallVector<SDValue, 6> Ops;
1344 Ops.push_back(RegSeq);
1345 Ops.push_back(CurDAG->getTargetConstant(LaneNo, MVT::i64));
1346 Ops.push_back(N->getOperand(NumVecs + 2)); // Base Register
1347 Ops.push_back(N->getOperand(NumVecs + 3)); // Incremental
1348 Ops.push_back(N->getOperand(0));
1349 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1350
1351 // Transfer memoperands.
1352 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1353 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1354 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
1355
1356 return St;
1357 }
1358
isBitfieldExtractOpFromAnd(SelectionDAG * CurDAG,SDNode * N,unsigned & Opc,SDValue & Opd0,unsigned & LSB,unsigned & MSB,unsigned NumberOfIgnoredLowBits,bool BiggerPattern)1359 static bool isBitfieldExtractOpFromAnd(SelectionDAG *CurDAG, SDNode *N,
1360 unsigned &Opc, SDValue &Opd0,
1361 unsigned &LSB, unsigned &MSB,
1362 unsigned NumberOfIgnoredLowBits,
1363 bool BiggerPattern) {
1364 assert(N->getOpcode() == ISD::AND &&
1365 "N must be a AND operation to call this function");
1366
1367 EVT VT = N->getValueType(0);
1368
1369 // Here we can test the type of VT and return false when the type does not
1370 // match, but since it is done prior to that call in the current context
1371 // we turned that into an assert to avoid redundant code.
1372 assert((VT == MVT::i32 || VT == MVT::i64) &&
1373 "Type checking must have been done before calling this function");
1374
1375 // FIXME: simplify-demanded-bits in DAGCombine will probably have
1376 // changed the AND node to a 32-bit mask operation. We'll have to
1377 // undo that as part of the transform here if we want to catch all
1378 // the opportunities.
1379 // Currently the NumberOfIgnoredLowBits argument helps to recover
1380 // form these situations when matching bigger pattern (bitfield insert).
1381
1382 // For unsigned extracts, check for a shift right and mask
1383 uint64_t And_imm = 0;
1384 if (!isOpcWithIntImmediate(N, ISD::AND, And_imm))
1385 return false;
1386
1387 const SDNode *Op0 = N->getOperand(0).getNode();
1388
1389 // Because of simplify-demanded-bits in DAGCombine, the mask may have been
1390 // simplified. Try to undo that
1391 And_imm |= (1 << NumberOfIgnoredLowBits) - 1;
1392
1393 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
1394 if (And_imm & (And_imm + 1))
1395 return false;
1396
1397 bool ClampMSB = false;
1398 uint64_t Srl_imm = 0;
1399 // Handle the SRL + ANY_EXTEND case.
1400 if (VT == MVT::i64 && Op0->getOpcode() == ISD::ANY_EXTEND &&
1401 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL, Srl_imm)) {
1402 // Extend the incoming operand of the SRL to 64-bit.
1403 Opd0 = Widen(CurDAG, Op0->getOperand(0).getOperand(0));
1404 // Make sure to clamp the MSB so that we preserve the semantics of the
1405 // original operations.
1406 ClampMSB = true;
1407 } else if (VT == MVT::i32 && Op0->getOpcode() == ISD::TRUNCATE &&
1408 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL,
1409 Srl_imm)) {
1410 // If the shift result was truncated, we can still combine them.
1411 Opd0 = Op0->getOperand(0).getOperand(0);
1412
1413 // Use the type of SRL node.
1414 VT = Opd0->getValueType(0);
1415 } else if (isOpcWithIntImmediate(Op0, ISD::SRL, Srl_imm)) {
1416 Opd0 = Op0->getOperand(0);
1417 } else if (BiggerPattern) {
1418 // Let's pretend a 0 shift right has been performed.
1419 // The resulting code will be at least as good as the original one
1420 // plus it may expose more opportunities for bitfield insert pattern.
1421 // FIXME: Currently we limit this to the bigger pattern, because
1422 // some optimizations expect AND and not UBFM
1423 Opd0 = N->getOperand(0);
1424 } else
1425 return false;
1426
1427 assert((BiggerPattern || (Srl_imm > 0 && Srl_imm < VT.getSizeInBits())) &&
1428 "bad amount in shift node!");
1429
1430 LSB = Srl_imm;
1431 MSB = Srl_imm + (VT == MVT::i32 ? CountTrailingOnes_32(And_imm)
1432 : CountTrailingOnes_64(And_imm)) -
1433 1;
1434 if (ClampMSB)
1435 // Since we're moving the extend before the right shift operation, we need
1436 // to clamp the MSB to make sure we don't shift in undefined bits instead of
1437 // the zeros which would get shifted in with the original right shift
1438 // operation.
1439 MSB = MSB > 31 ? 31 : MSB;
1440
1441 Opc = VT == MVT::i32 ? AArch64::UBFMWri : AArch64::UBFMXri;
1442 return true;
1443 }
1444
isSeveralBitsExtractOpFromShr(SDNode * N,unsigned & Opc,SDValue & Opd0,unsigned & LSB,unsigned & MSB)1445 static bool isSeveralBitsExtractOpFromShr(SDNode *N, unsigned &Opc,
1446 SDValue &Opd0, unsigned &LSB,
1447 unsigned &MSB) {
1448 // We are looking for the following pattern which basically extracts several
1449 // continuous bits from the source value and places it from the LSB of the
1450 // destination value, all other bits of the destination value or set to zero:
1451 //
1452 // Value2 = AND Value, MaskImm
1453 // SRL Value2, ShiftImm
1454 //
1455 // with MaskImm >> ShiftImm to search for the bit width.
1456 //
1457 // This gets selected into a single UBFM:
1458 //
1459 // UBFM Value, ShiftImm, BitWide + Srl_imm -1
1460 //
1461
1462 if (N->getOpcode() != ISD::SRL)
1463 return false;
1464
1465 uint64_t And_mask = 0;
1466 if (!isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, And_mask))
1467 return false;
1468
1469 Opd0 = N->getOperand(0).getOperand(0);
1470
1471 uint64_t Srl_imm = 0;
1472 if (!isIntImmediate(N->getOperand(1), Srl_imm))
1473 return false;
1474
1475 // Check whether we really have several bits extract here.
1476 unsigned BitWide = 64 - CountLeadingOnes_64(~(And_mask >> Srl_imm));
1477 if (BitWide && isMask_64(And_mask >> Srl_imm)) {
1478 if (N->getValueType(0) == MVT::i32)
1479 Opc = AArch64::UBFMWri;
1480 else
1481 Opc = AArch64::UBFMXri;
1482
1483 LSB = Srl_imm;
1484 MSB = BitWide + Srl_imm - 1;
1485 return true;
1486 }
1487
1488 return false;
1489 }
1490
isBitfieldExtractOpFromShr(SDNode * N,unsigned & Opc,SDValue & Opd0,unsigned & LSB,unsigned & MSB,bool BiggerPattern)1491 static bool isBitfieldExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0,
1492 unsigned &LSB, unsigned &MSB,
1493 bool BiggerPattern) {
1494 assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) &&
1495 "N must be a SHR/SRA operation to call this function");
1496
1497 EVT VT = N->getValueType(0);
1498
1499 // Here we can test the type of VT and return false when the type does not
1500 // match, but since it is done prior to that call in the current context
1501 // we turned that into an assert to avoid redundant code.
1502 assert((VT == MVT::i32 || VT == MVT::i64) &&
1503 "Type checking must have been done before calling this function");
1504
1505 // Check for AND + SRL doing several bits extract.
1506 if (isSeveralBitsExtractOpFromShr(N, Opc, Opd0, LSB, MSB))
1507 return true;
1508
1509 // we're looking for a shift of a shift
1510 uint64_t Shl_imm = 0;
1511 uint64_t Trunc_bits = 0;
1512 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) {
1513 Opd0 = N->getOperand(0).getOperand(0);
1514 } else if (VT == MVT::i32 && N->getOpcode() == ISD::SRL &&
1515 N->getOperand(0).getNode()->getOpcode() == ISD::TRUNCATE) {
1516 // We are looking for a shift of truncate. Truncate from i64 to i32 could
1517 // be considered as setting high 32 bits as zero. Our strategy here is to
1518 // always generate 64bit UBFM. This consistency will help the CSE pass
1519 // later find more redundancy.
1520 Opd0 = N->getOperand(0).getOperand(0);
1521 Trunc_bits = Opd0->getValueType(0).getSizeInBits() - VT.getSizeInBits();
1522 VT = Opd0->getValueType(0);
1523 assert(VT == MVT::i64 && "the promoted type should be i64");
1524 } else if (BiggerPattern) {
1525 // Let's pretend a 0 shift left has been performed.
1526 // FIXME: Currently we limit this to the bigger pattern case,
1527 // because some optimizations expect AND and not UBFM
1528 Opd0 = N->getOperand(0);
1529 } else
1530 return false;
1531
1532 assert(Shl_imm < VT.getSizeInBits() && "bad amount in shift node!");
1533 uint64_t Srl_imm = 0;
1534 if (!isIntImmediate(N->getOperand(1), Srl_imm))
1535 return false;
1536
1537 assert(Srl_imm > 0 && Srl_imm < VT.getSizeInBits() &&
1538 "bad amount in shift node!");
1539 // Note: The width operand is encoded as width-1.
1540 unsigned Width = VT.getSizeInBits() - Trunc_bits - Srl_imm - 1;
1541 int sLSB = Srl_imm - Shl_imm;
1542 if (sLSB < 0)
1543 return false;
1544 LSB = sLSB;
1545 MSB = LSB + Width;
1546 // SRA requires a signed extraction
1547 if (VT == MVT::i32)
1548 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMWri : AArch64::UBFMWri;
1549 else
1550 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMXri : AArch64::UBFMXri;
1551 return true;
1552 }
1553
isBitfieldExtractOp(SelectionDAG * CurDAG,SDNode * N,unsigned & Opc,SDValue & Opd0,unsigned & LSB,unsigned & MSB,unsigned NumberOfIgnoredLowBits=0,bool BiggerPattern=false)1554 static bool isBitfieldExtractOp(SelectionDAG *CurDAG, SDNode *N, unsigned &Opc,
1555 SDValue &Opd0, unsigned &LSB, unsigned &MSB,
1556 unsigned NumberOfIgnoredLowBits = 0,
1557 bool BiggerPattern = false) {
1558 if (N->getValueType(0) != MVT::i32 && N->getValueType(0) != MVT::i64)
1559 return false;
1560
1561 switch (N->getOpcode()) {
1562 default:
1563 if (!N->isMachineOpcode())
1564 return false;
1565 break;
1566 case ISD::AND:
1567 return isBitfieldExtractOpFromAnd(CurDAG, N, Opc, Opd0, LSB, MSB,
1568 NumberOfIgnoredLowBits, BiggerPattern);
1569 case ISD::SRL:
1570 case ISD::SRA:
1571 return isBitfieldExtractOpFromShr(N, Opc, Opd0, LSB, MSB, BiggerPattern);
1572 }
1573
1574 unsigned NOpc = N->getMachineOpcode();
1575 switch (NOpc) {
1576 default:
1577 return false;
1578 case AArch64::SBFMWri:
1579 case AArch64::UBFMWri:
1580 case AArch64::SBFMXri:
1581 case AArch64::UBFMXri:
1582 Opc = NOpc;
1583 Opd0 = N->getOperand(0);
1584 LSB = cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
1585 MSB = cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
1586 return true;
1587 }
1588 // Unreachable
1589 return false;
1590 }
1591
SelectBitfieldExtractOp(SDNode * N)1592 SDNode *AArch64DAGToDAGISel::SelectBitfieldExtractOp(SDNode *N) {
1593 unsigned Opc, LSB, MSB;
1594 SDValue Opd0;
1595 if (!isBitfieldExtractOp(CurDAG, N, Opc, Opd0, LSB, MSB))
1596 return nullptr;
1597
1598 EVT VT = N->getValueType(0);
1599
1600 // If the bit extract operation is 64bit but the original type is 32bit, we
1601 // need to add one EXTRACT_SUBREG.
1602 if ((Opc == AArch64::SBFMXri || Opc == AArch64::UBFMXri) && VT == MVT::i32) {
1603 SDValue Ops64[] = {Opd0, CurDAG->getTargetConstant(LSB, MVT::i64),
1604 CurDAG->getTargetConstant(MSB, MVT::i64)};
1605
1606 SDNode *BFM = CurDAG->getMachineNode(Opc, SDLoc(N), MVT::i64, Ops64);
1607 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, MVT::i32);
1608 MachineSDNode *Node =
1609 CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, SDLoc(N), MVT::i32,
1610 SDValue(BFM, 0), SubReg);
1611 return Node;
1612 }
1613
1614 SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(LSB, VT),
1615 CurDAG->getTargetConstant(MSB, VT)};
1616 return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
1617 }
1618
1619 /// Does DstMask form a complementary pair with the mask provided by
1620 /// BitsToBeInserted, suitable for use in a BFI instruction. Roughly speaking,
1621 /// this asks whether DstMask zeroes precisely those bits that will be set by
1622 /// the other half.
isBitfieldDstMask(uint64_t DstMask,APInt BitsToBeInserted,unsigned NumberOfIgnoredHighBits,EVT VT)1623 static bool isBitfieldDstMask(uint64_t DstMask, APInt BitsToBeInserted,
1624 unsigned NumberOfIgnoredHighBits, EVT VT) {
1625 assert((VT == MVT::i32 || VT == MVT::i64) &&
1626 "i32 or i64 mask type expected!");
1627 unsigned BitWidth = VT.getSizeInBits() - NumberOfIgnoredHighBits;
1628
1629 APInt SignificantDstMask = APInt(BitWidth, DstMask);
1630 APInt SignificantBitsToBeInserted = BitsToBeInserted.zextOrTrunc(BitWidth);
1631
1632 return (SignificantDstMask & SignificantBitsToBeInserted) == 0 &&
1633 (SignificantDstMask | SignificantBitsToBeInserted).isAllOnesValue();
1634 }
1635
1636 // Look for bits that will be useful for later uses.
1637 // A bit is consider useless as soon as it is dropped and never used
1638 // before it as been dropped.
1639 // E.g., looking for useful bit of x
1640 // 1. y = x & 0x7
1641 // 2. z = y >> 2
1642 // After #1, x useful bits are 0x7, then the useful bits of x, live through
1643 // y.
1644 // After #2, the useful bits of x are 0x4.
1645 // However, if x is used on an unpredicatable instruction, then all its bits
1646 // are useful.
1647 // E.g.
1648 // 1. y = x & 0x7
1649 // 2. z = y >> 2
1650 // 3. str x, [@x]
1651 static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth = 0);
1652
getUsefulBitsFromAndWithImmediate(SDValue Op,APInt & UsefulBits,unsigned Depth)1653 static void getUsefulBitsFromAndWithImmediate(SDValue Op, APInt &UsefulBits,
1654 unsigned Depth) {
1655 uint64_t Imm =
1656 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
1657 Imm = AArch64_AM::decodeLogicalImmediate(Imm, UsefulBits.getBitWidth());
1658 UsefulBits &= APInt(UsefulBits.getBitWidth(), Imm);
1659 getUsefulBits(Op, UsefulBits, Depth + 1);
1660 }
1661
getUsefulBitsFromBitfieldMoveOpd(SDValue Op,APInt & UsefulBits,uint64_t Imm,uint64_t MSB,unsigned Depth)1662 static void getUsefulBitsFromBitfieldMoveOpd(SDValue Op, APInt &UsefulBits,
1663 uint64_t Imm, uint64_t MSB,
1664 unsigned Depth) {
1665 // inherit the bitwidth value
1666 APInt OpUsefulBits(UsefulBits);
1667 OpUsefulBits = 1;
1668
1669 if (MSB >= Imm) {
1670 OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1);
1671 --OpUsefulBits;
1672 // The interesting part will be in the lower part of the result
1673 getUsefulBits(Op, OpUsefulBits, Depth + 1);
1674 // The interesting part was starting at Imm in the argument
1675 OpUsefulBits = OpUsefulBits.shl(Imm);
1676 } else {
1677 OpUsefulBits = OpUsefulBits.shl(MSB + 1);
1678 --OpUsefulBits;
1679 // The interesting part will be shifted in the result
1680 OpUsefulBits = OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm);
1681 getUsefulBits(Op, OpUsefulBits, Depth + 1);
1682 // The interesting part was at zero in the argument
1683 OpUsefulBits = OpUsefulBits.lshr(OpUsefulBits.getBitWidth() - Imm);
1684 }
1685
1686 UsefulBits &= OpUsefulBits;
1687 }
1688
getUsefulBitsFromUBFM(SDValue Op,APInt & UsefulBits,unsigned Depth)1689 static void getUsefulBitsFromUBFM(SDValue Op, APInt &UsefulBits,
1690 unsigned Depth) {
1691 uint64_t Imm =
1692 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
1693 uint64_t MSB =
1694 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1695
1696 getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
1697 }
1698
getUsefulBitsFromOrWithShiftedReg(SDValue Op,APInt & UsefulBits,unsigned Depth)1699 static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits,
1700 unsigned Depth) {
1701 uint64_t ShiftTypeAndValue =
1702 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1703 APInt Mask(UsefulBits);
1704 Mask.clearAllBits();
1705 Mask.flipAllBits();
1706
1707 if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSL) {
1708 // Shift Left
1709 uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
1710 Mask = Mask.shl(ShiftAmt);
1711 getUsefulBits(Op, Mask, Depth + 1);
1712 Mask = Mask.lshr(ShiftAmt);
1713 } else if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSR) {
1714 // Shift Right
1715 // We do not handle AArch64_AM::ASR, because the sign will change the
1716 // number of useful bits
1717 uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
1718 Mask = Mask.lshr(ShiftAmt);
1719 getUsefulBits(Op, Mask, Depth + 1);
1720 Mask = Mask.shl(ShiftAmt);
1721 } else
1722 return;
1723
1724 UsefulBits &= Mask;
1725 }
1726
getUsefulBitsFromBFM(SDValue Op,SDValue Orig,APInt & UsefulBits,unsigned Depth)1727 static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits,
1728 unsigned Depth) {
1729 uint64_t Imm =
1730 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1731 uint64_t MSB =
1732 cast<const ConstantSDNode>(Op.getOperand(3).getNode())->getZExtValue();
1733
1734 if (Op.getOperand(1) == Orig)
1735 return getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
1736
1737 APInt OpUsefulBits(UsefulBits);
1738 OpUsefulBits = 1;
1739
1740 if (MSB >= Imm) {
1741 OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1);
1742 --OpUsefulBits;
1743 UsefulBits &= ~OpUsefulBits;
1744 getUsefulBits(Op, UsefulBits, Depth + 1);
1745 } else {
1746 OpUsefulBits = OpUsefulBits.shl(MSB + 1);
1747 --OpUsefulBits;
1748 UsefulBits = ~(OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm));
1749 getUsefulBits(Op, UsefulBits, Depth + 1);
1750 }
1751 }
1752
getUsefulBitsForUse(SDNode * UserNode,APInt & UsefulBits,SDValue Orig,unsigned Depth)1753 static void getUsefulBitsForUse(SDNode *UserNode, APInt &UsefulBits,
1754 SDValue Orig, unsigned Depth) {
1755
1756 // Users of this node should have already been instruction selected
1757 // FIXME: Can we turn that into an assert?
1758 if (!UserNode->isMachineOpcode())
1759 return;
1760
1761 switch (UserNode->getMachineOpcode()) {
1762 default:
1763 return;
1764 case AArch64::ANDSWri:
1765 case AArch64::ANDSXri:
1766 case AArch64::ANDWri:
1767 case AArch64::ANDXri:
1768 // We increment Depth only when we call the getUsefulBits
1769 return getUsefulBitsFromAndWithImmediate(SDValue(UserNode, 0), UsefulBits,
1770 Depth);
1771 case AArch64::UBFMWri:
1772 case AArch64::UBFMXri:
1773 return getUsefulBitsFromUBFM(SDValue(UserNode, 0), UsefulBits, Depth);
1774
1775 case AArch64::ORRWrs:
1776 case AArch64::ORRXrs:
1777 if (UserNode->getOperand(1) != Orig)
1778 return;
1779 return getUsefulBitsFromOrWithShiftedReg(SDValue(UserNode, 0), UsefulBits,
1780 Depth);
1781 case AArch64::BFMWri:
1782 case AArch64::BFMXri:
1783 return getUsefulBitsFromBFM(SDValue(UserNode, 0), Orig, UsefulBits, Depth);
1784 }
1785 }
1786
getUsefulBits(SDValue Op,APInt & UsefulBits,unsigned Depth)1787 static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth) {
1788 if (Depth >= 6)
1789 return;
1790 // Initialize UsefulBits
1791 if (!Depth) {
1792 unsigned Bitwidth = Op.getValueType().getScalarType().getSizeInBits();
1793 // At the beginning, assume every produced bits is useful
1794 UsefulBits = APInt(Bitwidth, 0);
1795 UsefulBits.flipAllBits();
1796 }
1797 APInt UsersUsefulBits(UsefulBits.getBitWidth(), 0);
1798
1799 for (SDNode *Node : Op.getNode()->uses()) {
1800 // A use cannot produce useful bits
1801 APInt UsefulBitsForUse = APInt(UsefulBits);
1802 getUsefulBitsForUse(Node, UsefulBitsForUse, Op, Depth);
1803 UsersUsefulBits |= UsefulBitsForUse;
1804 }
1805 // UsefulBits contains the produced bits that are meaningful for the
1806 // current definition, thus a user cannot make a bit meaningful at
1807 // this point
1808 UsefulBits &= UsersUsefulBits;
1809 }
1810
1811 /// Create a machine node performing a notional SHL of Op by ShlAmount. If
1812 /// ShlAmount is negative, do a (logical) right-shift instead. If ShlAmount is
1813 /// 0, return Op unchanged.
getLeftShift(SelectionDAG * CurDAG,SDValue Op,int ShlAmount)1814 static SDValue getLeftShift(SelectionDAG *CurDAG, SDValue Op, int ShlAmount) {
1815 if (ShlAmount == 0)
1816 return Op;
1817
1818 EVT VT = Op.getValueType();
1819 unsigned BitWidth = VT.getSizeInBits();
1820 unsigned UBFMOpc = BitWidth == 32 ? AArch64::UBFMWri : AArch64::UBFMXri;
1821
1822 SDNode *ShiftNode;
1823 if (ShlAmount > 0) {
1824 // LSL wD, wN, #Amt == UBFM wD, wN, #32-Amt, #31-Amt
1825 ShiftNode = CurDAG->getMachineNode(
1826 UBFMOpc, SDLoc(Op), VT, Op,
1827 CurDAG->getTargetConstant(BitWidth - ShlAmount, VT),
1828 CurDAG->getTargetConstant(BitWidth - 1 - ShlAmount, VT));
1829 } else {
1830 // LSR wD, wN, #Amt == UBFM wD, wN, #Amt, #32-1
1831 assert(ShlAmount < 0 && "expected right shift");
1832 int ShrAmount = -ShlAmount;
1833 ShiftNode = CurDAG->getMachineNode(
1834 UBFMOpc, SDLoc(Op), VT, Op, CurDAG->getTargetConstant(ShrAmount, VT),
1835 CurDAG->getTargetConstant(BitWidth - 1, VT));
1836 }
1837
1838 return SDValue(ShiftNode, 0);
1839 }
1840
1841 /// Does this tree qualify as an attempt to move a bitfield into position,
1842 /// essentially "(and (shl VAL, N), Mask)".
isBitfieldPositioningOp(SelectionDAG * CurDAG,SDValue Op,SDValue & Src,int & ShiftAmount,int & MaskWidth)1843 static bool isBitfieldPositioningOp(SelectionDAG *CurDAG, SDValue Op,
1844 SDValue &Src, int &ShiftAmount,
1845 int &MaskWidth) {
1846 EVT VT = Op.getValueType();
1847 unsigned BitWidth = VT.getSizeInBits();
1848 (void)BitWidth;
1849 assert(BitWidth == 32 || BitWidth == 64);
1850
1851 APInt KnownZero, KnownOne;
1852 CurDAG->computeKnownBits(Op, KnownZero, KnownOne);
1853
1854 // Non-zero in the sense that they're not provably zero, which is the key
1855 // point if we want to use this value
1856 uint64_t NonZeroBits = (~KnownZero).getZExtValue();
1857
1858 // Discard a constant AND mask if present. It's safe because the node will
1859 // already have been factored into the computeKnownBits calculation above.
1860 uint64_t AndImm;
1861 if (isOpcWithIntImmediate(Op.getNode(), ISD::AND, AndImm)) {
1862 assert((~APInt(BitWidth, AndImm) & ~KnownZero) == 0);
1863 Op = Op.getOperand(0);
1864 }
1865
1866 uint64_t ShlImm;
1867 if (!isOpcWithIntImmediate(Op.getNode(), ISD::SHL, ShlImm))
1868 return false;
1869 Op = Op.getOperand(0);
1870
1871 if (!isShiftedMask_64(NonZeroBits))
1872 return false;
1873
1874 ShiftAmount = countTrailingZeros(NonZeroBits);
1875 MaskWidth = CountTrailingOnes_64(NonZeroBits >> ShiftAmount);
1876
1877 // BFI encompasses sufficiently many nodes that it's worth inserting an extra
1878 // LSL/LSR if the mask in NonZeroBits doesn't quite match up with the ISD::SHL
1879 // amount.
1880 Src = getLeftShift(CurDAG, Op, ShlImm - ShiftAmount);
1881
1882 return true;
1883 }
1884
1885 // Given a OR operation, check if we have the following pattern
1886 // ubfm c, b, imm, imm2 (or something that does the same jobs, see
1887 // isBitfieldExtractOp)
1888 // d = e & mask2 ; where mask is a binary sequence of 1..10..0 and
1889 // countTrailingZeros(mask2) == imm2 - imm + 1
1890 // f = d | c
1891 // if yes, given reference arguments will be update so that one can replace
1892 // the OR instruction with:
1893 // f = Opc Opd0, Opd1, LSB, MSB ; where Opc is a BFM, LSB = imm, and MSB = imm2
isBitfieldInsertOpFromOr(SDNode * N,unsigned & Opc,SDValue & Dst,SDValue & Src,unsigned & ImmR,unsigned & ImmS,SelectionDAG * CurDAG)1894 static bool isBitfieldInsertOpFromOr(SDNode *N, unsigned &Opc, SDValue &Dst,
1895 SDValue &Src, unsigned &ImmR,
1896 unsigned &ImmS, SelectionDAG *CurDAG) {
1897 assert(N->getOpcode() == ISD::OR && "Expect a OR operation");
1898
1899 // Set Opc
1900 EVT VT = N->getValueType(0);
1901 if (VT == MVT::i32)
1902 Opc = AArch64::BFMWri;
1903 else if (VT == MVT::i64)
1904 Opc = AArch64::BFMXri;
1905 else
1906 return false;
1907
1908 // Because of simplify-demanded-bits in DAGCombine, involved masks may not
1909 // have the expected shape. Try to undo that.
1910 APInt UsefulBits;
1911 getUsefulBits(SDValue(N, 0), UsefulBits);
1912
1913 unsigned NumberOfIgnoredLowBits = UsefulBits.countTrailingZeros();
1914 unsigned NumberOfIgnoredHighBits = UsefulBits.countLeadingZeros();
1915
1916 // OR is commutative, check both possibilities (does llvm provide a
1917 // way to do that directely, e.g., via code matcher?)
1918 SDValue OrOpd1Val = N->getOperand(1);
1919 SDNode *OrOpd0 = N->getOperand(0).getNode();
1920 SDNode *OrOpd1 = N->getOperand(1).getNode();
1921 for (int i = 0; i < 2;
1922 ++i, std::swap(OrOpd0, OrOpd1), OrOpd1Val = N->getOperand(0)) {
1923 unsigned BFXOpc;
1924 int DstLSB, Width;
1925 if (isBitfieldExtractOp(CurDAG, OrOpd0, BFXOpc, Src, ImmR, ImmS,
1926 NumberOfIgnoredLowBits, true)) {
1927 // Check that the returned opcode is compatible with the pattern,
1928 // i.e., same type and zero extended (U and not S)
1929 if ((BFXOpc != AArch64::UBFMXri && VT == MVT::i64) ||
1930 (BFXOpc != AArch64::UBFMWri && VT == MVT::i32))
1931 continue;
1932
1933 // Compute the width of the bitfield insertion
1934 DstLSB = 0;
1935 Width = ImmS - ImmR + 1;
1936 // FIXME: This constraint is to catch bitfield insertion we may
1937 // want to widen the pattern if we want to grab general bitfied
1938 // move case
1939 if (Width <= 0)
1940 continue;
1941
1942 // If the mask on the insertee is correct, we have a BFXIL operation. We
1943 // can share the ImmR and ImmS values from the already-computed UBFM.
1944 } else if (isBitfieldPositioningOp(CurDAG, SDValue(OrOpd0, 0), Src,
1945 DstLSB, Width)) {
1946 ImmR = (VT.getSizeInBits() - DstLSB) % VT.getSizeInBits();
1947 ImmS = Width - 1;
1948 } else
1949 continue;
1950
1951 // Check the second part of the pattern
1952 EVT VT = OrOpd1->getValueType(0);
1953 assert((VT == MVT::i32 || VT == MVT::i64) && "unexpected OR operand");
1954
1955 // Compute the Known Zero for the candidate of the first operand.
1956 // This allows to catch more general case than just looking for
1957 // AND with imm. Indeed, simplify-demanded-bits may have removed
1958 // the AND instruction because it proves it was useless.
1959 APInt KnownZero, KnownOne;
1960 CurDAG->computeKnownBits(OrOpd1Val, KnownZero, KnownOne);
1961
1962 // Check if there is enough room for the second operand to appear
1963 // in the first one
1964 APInt BitsToBeInserted =
1965 APInt::getBitsSet(KnownZero.getBitWidth(), DstLSB, DstLSB + Width);
1966
1967 if ((BitsToBeInserted & ~KnownZero) != 0)
1968 continue;
1969
1970 // Set the first operand
1971 uint64_t Imm;
1972 if (isOpcWithIntImmediate(OrOpd1, ISD::AND, Imm) &&
1973 isBitfieldDstMask(Imm, BitsToBeInserted, NumberOfIgnoredHighBits, VT))
1974 // In that case, we can eliminate the AND
1975 Dst = OrOpd1->getOperand(0);
1976 else
1977 // Maybe the AND has been removed by simplify-demanded-bits
1978 // or is useful because it discards more bits
1979 Dst = OrOpd1Val;
1980
1981 // both parts match
1982 return true;
1983 }
1984
1985 return false;
1986 }
1987
SelectBitfieldInsertOp(SDNode * N)1988 SDNode *AArch64DAGToDAGISel::SelectBitfieldInsertOp(SDNode *N) {
1989 if (N->getOpcode() != ISD::OR)
1990 return nullptr;
1991
1992 unsigned Opc;
1993 unsigned LSB, MSB;
1994 SDValue Opd0, Opd1;
1995
1996 if (!isBitfieldInsertOpFromOr(N, Opc, Opd0, Opd1, LSB, MSB, CurDAG))
1997 return nullptr;
1998
1999 EVT VT = N->getValueType(0);
2000 SDValue Ops[] = { Opd0,
2001 Opd1,
2002 CurDAG->getTargetConstant(LSB, VT),
2003 CurDAG->getTargetConstant(MSB, VT) };
2004 return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2005 }
2006
SelectLIBM(SDNode * N)2007 SDNode *AArch64DAGToDAGISel::SelectLIBM(SDNode *N) {
2008 EVT VT = N->getValueType(0);
2009 unsigned Variant;
2010 unsigned Opc;
2011 unsigned FRINTXOpcs[] = { AArch64::FRINTXSr, AArch64::FRINTXDr };
2012
2013 if (VT == MVT::f32) {
2014 Variant = 0;
2015 } else if (VT == MVT::f64) {
2016 Variant = 1;
2017 } else
2018 return nullptr; // Unrecognized argument type. Fall back on default codegen.
2019
2020 // Pick the FRINTX variant needed to set the flags.
2021 unsigned FRINTXOpc = FRINTXOpcs[Variant];
2022
2023 switch (N->getOpcode()) {
2024 default:
2025 return nullptr; // Unrecognized libm ISD node. Fall back on default codegen.
2026 case ISD::FCEIL: {
2027 unsigned FRINTPOpcs[] = { AArch64::FRINTPSr, AArch64::FRINTPDr };
2028 Opc = FRINTPOpcs[Variant];
2029 break;
2030 }
2031 case ISD::FFLOOR: {
2032 unsigned FRINTMOpcs[] = { AArch64::FRINTMSr, AArch64::FRINTMDr };
2033 Opc = FRINTMOpcs[Variant];
2034 break;
2035 }
2036 case ISD::FTRUNC: {
2037 unsigned FRINTZOpcs[] = { AArch64::FRINTZSr, AArch64::FRINTZDr };
2038 Opc = FRINTZOpcs[Variant];
2039 break;
2040 }
2041 case ISD::FROUND: {
2042 unsigned FRINTAOpcs[] = { AArch64::FRINTASr, AArch64::FRINTADr };
2043 Opc = FRINTAOpcs[Variant];
2044 break;
2045 }
2046 }
2047
2048 SDLoc dl(N);
2049 SDValue In = N->getOperand(0);
2050 SmallVector<SDValue, 2> Ops;
2051 Ops.push_back(In);
2052
2053 if (!TM.Options.UnsafeFPMath) {
2054 SDNode *FRINTX = CurDAG->getMachineNode(FRINTXOpc, dl, VT, MVT::Glue, In);
2055 Ops.push_back(SDValue(FRINTX, 1));
2056 }
2057
2058 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
2059 }
2060
2061 bool
SelectCVTFixedPosOperand(SDValue N,SDValue & FixedPos,unsigned RegWidth)2062 AArch64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
2063 unsigned RegWidth) {
2064 APFloat FVal(0.0);
2065 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
2066 FVal = CN->getValueAPF();
2067 else if (LoadSDNode *LN = dyn_cast<LoadSDNode>(N)) {
2068 // Some otherwise illegal constants are allowed in this case.
2069 if (LN->getOperand(1).getOpcode() != AArch64ISD::ADDlow ||
2070 !isa<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1)))
2071 return false;
2072
2073 ConstantPoolSDNode *CN =
2074 dyn_cast<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1));
2075 FVal = cast<ConstantFP>(CN->getConstVal())->getValueAPF();
2076 } else
2077 return false;
2078
2079 // An FCVT[SU] instruction performs: convertToInt(Val * 2^fbits) where fbits
2080 // is between 1 and 32 for a destination w-register, or 1 and 64 for an
2081 // x-register.
2082 //
2083 // By this stage, we've detected (fp_to_[su]int (fmul Val, THIS_NODE)) so we
2084 // want THIS_NODE to be 2^fbits. This is much easier to deal with using
2085 // integers.
2086 bool IsExact;
2087
2088 // fbits is between 1 and 64 in the worst-case, which means the fmul
2089 // could have 2^64 as an actual operand. Need 65 bits of precision.
2090 APSInt IntVal(65, true);
2091 FVal.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact);
2092
2093 // N.b. isPowerOf2 also checks for > 0.
2094 if (!IsExact || !IntVal.isPowerOf2()) return false;
2095 unsigned FBits = IntVal.logBase2();
2096
2097 // Checks above should have guaranteed that we haven't lost information in
2098 // finding FBits, but it must still be in range.
2099 if (FBits == 0 || FBits > RegWidth) return false;
2100
2101 FixedPos = CurDAG->getTargetConstant(FBits, MVT::i32);
2102 return true;
2103 }
2104
Select(SDNode * Node)2105 SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
2106 // Dump information about the Node being selected
2107 DEBUG(errs() << "Selecting: ");
2108 DEBUG(Node->dump(CurDAG));
2109 DEBUG(errs() << "\n");
2110
2111 // If we have a custom node, we already have selected!
2112 if (Node->isMachineOpcode()) {
2113 DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
2114 Node->setNodeId(-1);
2115 return nullptr;
2116 }
2117
2118 // Few custom selection stuff.
2119 SDNode *ResNode = nullptr;
2120 EVT VT = Node->getValueType(0);
2121
2122 switch (Node->getOpcode()) {
2123 default:
2124 break;
2125
2126 case ISD::ADD:
2127 if (SDNode *I = SelectMLAV64LaneV128(Node))
2128 return I;
2129 break;
2130
2131 case ISD::LOAD: {
2132 // Try to select as an indexed load. Fall through to normal processing
2133 // if we can't.
2134 bool Done = false;
2135 SDNode *I = SelectIndexedLoad(Node, Done);
2136 if (Done)
2137 return I;
2138 break;
2139 }
2140
2141 case ISD::SRL:
2142 case ISD::AND:
2143 case ISD::SRA:
2144 if (SDNode *I = SelectBitfieldExtractOp(Node))
2145 return I;
2146 break;
2147
2148 case ISD::OR:
2149 if (SDNode *I = SelectBitfieldInsertOp(Node))
2150 return I;
2151 break;
2152
2153 case ISD::EXTRACT_VECTOR_ELT: {
2154 // Extracting lane zero is a special case where we can just use a plain
2155 // EXTRACT_SUBREG instruction, which will become FMOV. This is easier for
2156 // the rest of the compiler, especially the register allocator and copyi
2157 // propagation, to reason about, so is preferred when it's possible to
2158 // use it.
2159 ConstantSDNode *LaneNode = cast<ConstantSDNode>(Node->getOperand(1));
2160 // Bail and use the default Select() for non-zero lanes.
2161 if (LaneNode->getZExtValue() != 0)
2162 break;
2163 // If the element type is not the same as the result type, likewise
2164 // bail and use the default Select(), as there's more to do than just
2165 // a cross-class COPY. This catches extracts of i8 and i16 elements
2166 // since they will need an explicit zext.
2167 if (VT != Node->getOperand(0).getValueType().getVectorElementType())
2168 break;
2169 unsigned SubReg;
2170 switch (Node->getOperand(0)
2171 .getValueType()
2172 .getVectorElementType()
2173 .getSizeInBits()) {
2174 default:
2175 llvm_unreachable("Unexpected vector element type!");
2176 case 64:
2177 SubReg = AArch64::dsub;
2178 break;
2179 case 32:
2180 SubReg = AArch64::ssub;
2181 break;
2182 case 16:
2183 SubReg = AArch64::hsub;
2184 break;
2185 case 8:
2186 llvm_unreachable("unexpected zext-requiring extract element!");
2187 }
2188 SDValue Extract = CurDAG->getTargetExtractSubreg(SubReg, SDLoc(Node), VT,
2189 Node->getOperand(0));
2190 DEBUG(dbgs() << "ISEL: Custom selection!\n=> ");
2191 DEBUG(Extract->dumpr(CurDAG));
2192 DEBUG(dbgs() << "\n");
2193 return Extract.getNode();
2194 }
2195 case ISD::Constant: {
2196 // Materialize zero constants as copies from WZR/XZR. This allows
2197 // the coalescer to propagate these into other instructions.
2198 ConstantSDNode *ConstNode = cast<ConstantSDNode>(Node);
2199 if (ConstNode->isNullValue()) {
2200 if (VT == MVT::i32)
2201 return CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node),
2202 AArch64::WZR, MVT::i32).getNode();
2203 else if (VT == MVT::i64)
2204 return CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node),
2205 AArch64::XZR, MVT::i64).getNode();
2206 }
2207 break;
2208 }
2209
2210 case ISD::FrameIndex: {
2211 // Selects to ADDXri FI, 0 which in turn will become ADDXri SP, imm.
2212 int FI = cast<FrameIndexSDNode>(Node)->getIndex();
2213 unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
2214 const TargetLowering *TLI = getTargetLowering();
2215 SDValue TFI = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
2216 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
2217 CurDAG->getTargetConstant(Shifter, MVT::i32) };
2218 return CurDAG->SelectNodeTo(Node, AArch64::ADDXri, MVT::i64, Ops);
2219 }
2220 case ISD::INTRINSIC_W_CHAIN: {
2221 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
2222 switch (IntNo) {
2223 default:
2224 break;
2225 case Intrinsic::aarch64_ldaxp:
2226 case Intrinsic::aarch64_ldxp: {
2227 unsigned Op =
2228 IntNo == Intrinsic::aarch64_ldaxp ? AArch64::LDAXPX : AArch64::LDXPX;
2229 SDValue MemAddr = Node->getOperand(2);
2230 SDLoc DL(Node);
2231 SDValue Chain = Node->getOperand(0);
2232
2233 SDNode *Ld = CurDAG->getMachineNode(Op, DL, MVT::i64, MVT::i64,
2234 MVT::Other, MemAddr, Chain);
2235
2236 // Transfer memoperands.
2237 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2238 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
2239 cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
2240 return Ld;
2241 }
2242 case Intrinsic::aarch64_stlxp:
2243 case Intrinsic::aarch64_stxp: {
2244 unsigned Op =
2245 IntNo == Intrinsic::aarch64_stlxp ? AArch64::STLXPX : AArch64::STXPX;
2246 SDLoc DL(Node);
2247 SDValue Chain = Node->getOperand(0);
2248 SDValue ValLo = Node->getOperand(2);
2249 SDValue ValHi = Node->getOperand(3);
2250 SDValue MemAddr = Node->getOperand(4);
2251
2252 // Place arguments in the right order.
2253 SmallVector<SDValue, 7> Ops;
2254 Ops.push_back(ValLo);
2255 Ops.push_back(ValHi);
2256 Ops.push_back(MemAddr);
2257 Ops.push_back(Chain);
2258
2259 SDNode *St = CurDAG->getMachineNode(Op, DL, MVT::i32, MVT::Other, Ops);
2260 // Transfer memoperands.
2261 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2262 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
2263 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
2264
2265 return St;
2266 }
2267 case Intrinsic::aarch64_neon_ld1x2:
2268 if (VT == MVT::v8i8)
2269 return SelectLoad(Node, 2, AArch64::LD1Twov8b, AArch64::dsub0);
2270 else if (VT == MVT::v16i8)
2271 return SelectLoad(Node, 2, AArch64::LD1Twov16b, AArch64::qsub0);
2272 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2273 return SelectLoad(Node, 2, AArch64::LD1Twov4h, AArch64::dsub0);
2274 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2275 return SelectLoad(Node, 2, AArch64::LD1Twov8h, AArch64::qsub0);
2276 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2277 return SelectLoad(Node, 2, AArch64::LD1Twov2s, AArch64::dsub0);
2278 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2279 return SelectLoad(Node, 2, AArch64::LD1Twov4s, AArch64::qsub0);
2280 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2281 return SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
2282 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2283 return SelectLoad(Node, 2, AArch64::LD1Twov2d, AArch64::qsub0);
2284 break;
2285 case Intrinsic::aarch64_neon_ld1x3:
2286 if (VT == MVT::v8i8)
2287 return SelectLoad(Node, 3, AArch64::LD1Threev8b, AArch64::dsub0);
2288 else if (VT == MVT::v16i8)
2289 return SelectLoad(Node, 3, AArch64::LD1Threev16b, AArch64::qsub0);
2290 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2291 return SelectLoad(Node, 3, AArch64::LD1Threev4h, AArch64::dsub0);
2292 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2293 return SelectLoad(Node, 3, AArch64::LD1Threev8h, AArch64::qsub0);
2294 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2295 return SelectLoad(Node, 3, AArch64::LD1Threev2s, AArch64::dsub0);
2296 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2297 return SelectLoad(Node, 3, AArch64::LD1Threev4s, AArch64::qsub0);
2298 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2299 return SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
2300 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2301 return SelectLoad(Node, 3, AArch64::LD1Threev2d, AArch64::qsub0);
2302 break;
2303 case Intrinsic::aarch64_neon_ld1x4:
2304 if (VT == MVT::v8i8)
2305 return SelectLoad(Node, 4, AArch64::LD1Fourv8b, AArch64::dsub0);
2306 else if (VT == MVT::v16i8)
2307 return SelectLoad(Node, 4, AArch64::LD1Fourv16b, AArch64::qsub0);
2308 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2309 return SelectLoad(Node, 4, AArch64::LD1Fourv4h, AArch64::dsub0);
2310 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2311 return SelectLoad(Node, 4, AArch64::LD1Fourv8h, AArch64::qsub0);
2312 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2313 return SelectLoad(Node, 4, AArch64::LD1Fourv2s, AArch64::dsub0);
2314 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2315 return SelectLoad(Node, 4, AArch64::LD1Fourv4s, AArch64::qsub0);
2316 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2317 return SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
2318 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2319 return SelectLoad(Node, 4, AArch64::LD1Fourv2d, AArch64::qsub0);
2320 break;
2321 case Intrinsic::aarch64_neon_ld2:
2322 if (VT == MVT::v8i8)
2323 return SelectLoad(Node, 2, AArch64::LD2Twov8b, AArch64::dsub0);
2324 else if (VT == MVT::v16i8)
2325 return SelectLoad(Node, 2, AArch64::LD2Twov16b, AArch64::qsub0);
2326 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2327 return SelectLoad(Node, 2, AArch64::LD2Twov4h, AArch64::dsub0);
2328 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2329 return SelectLoad(Node, 2, AArch64::LD2Twov8h, AArch64::qsub0);
2330 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2331 return SelectLoad(Node, 2, AArch64::LD2Twov2s, AArch64::dsub0);
2332 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2333 return SelectLoad(Node, 2, AArch64::LD2Twov4s, AArch64::qsub0);
2334 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2335 return SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
2336 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2337 return SelectLoad(Node, 2, AArch64::LD2Twov2d, AArch64::qsub0);
2338 break;
2339 case Intrinsic::aarch64_neon_ld3:
2340 if (VT == MVT::v8i8)
2341 return SelectLoad(Node, 3, AArch64::LD3Threev8b, AArch64::dsub0);
2342 else if (VT == MVT::v16i8)
2343 return SelectLoad(Node, 3, AArch64::LD3Threev16b, AArch64::qsub0);
2344 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2345 return SelectLoad(Node, 3, AArch64::LD3Threev4h, AArch64::dsub0);
2346 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2347 return SelectLoad(Node, 3, AArch64::LD3Threev8h, AArch64::qsub0);
2348 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2349 return SelectLoad(Node, 3, AArch64::LD3Threev2s, AArch64::dsub0);
2350 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2351 return SelectLoad(Node, 3, AArch64::LD3Threev4s, AArch64::qsub0);
2352 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2353 return SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
2354 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2355 return SelectLoad(Node, 3, AArch64::LD3Threev2d, AArch64::qsub0);
2356 break;
2357 case Intrinsic::aarch64_neon_ld4:
2358 if (VT == MVT::v8i8)
2359 return SelectLoad(Node, 4, AArch64::LD4Fourv8b, AArch64::dsub0);
2360 else if (VT == MVT::v16i8)
2361 return SelectLoad(Node, 4, AArch64::LD4Fourv16b, AArch64::qsub0);
2362 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2363 return SelectLoad(Node, 4, AArch64::LD4Fourv4h, AArch64::dsub0);
2364 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2365 return SelectLoad(Node, 4, AArch64::LD4Fourv8h, AArch64::qsub0);
2366 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2367 return SelectLoad(Node, 4, AArch64::LD4Fourv2s, AArch64::dsub0);
2368 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2369 return SelectLoad(Node, 4, AArch64::LD4Fourv4s, AArch64::qsub0);
2370 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2371 return SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
2372 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2373 return SelectLoad(Node, 4, AArch64::LD4Fourv2d, AArch64::qsub0);
2374 break;
2375 case Intrinsic::aarch64_neon_ld2r:
2376 if (VT == MVT::v8i8)
2377 return SelectLoad(Node, 2, AArch64::LD2Rv8b, AArch64::dsub0);
2378 else if (VT == MVT::v16i8)
2379 return SelectLoad(Node, 2, AArch64::LD2Rv16b, AArch64::qsub0);
2380 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2381 return SelectLoad(Node, 2, AArch64::LD2Rv4h, AArch64::dsub0);
2382 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2383 return SelectLoad(Node, 2, AArch64::LD2Rv8h, AArch64::qsub0);
2384 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2385 return SelectLoad(Node, 2, AArch64::LD2Rv2s, AArch64::dsub0);
2386 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2387 return SelectLoad(Node, 2, AArch64::LD2Rv4s, AArch64::qsub0);
2388 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2389 return SelectLoad(Node, 2, AArch64::LD2Rv1d, AArch64::dsub0);
2390 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2391 return SelectLoad(Node, 2, AArch64::LD2Rv2d, AArch64::qsub0);
2392 break;
2393 case Intrinsic::aarch64_neon_ld3r:
2394 if (VT == MVT::v8i8)
2395 return SelectLoad(Node, 3, AArch64::LD3Rv8b, AArch64::dsub0);
2396 else if (VT == MVT::v16i8)
2397 return SelectLoad(Node, 3, AArch64::LD3Rv16b, AArch64::qsub0);
2398 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2399 return SelectLoad(Node, 3, AArch64::LD3Rv4h, AArch64::dsub0);
2400 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2401 return SelectLoad(Node, 3, AArch64::LD3Rv8h, AArch64::qsub0);
2402 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2403 return SelectLoad(Node, 3, AArch64::LD3Rv2s, AArch64::dsub0);
2404 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2405 return SelectLoad(Node, 3, AArch64::LD3Rv4s, AArch64::qsub0);
2406 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2407 return SelectLoad(Node, 3, AArch64::LD3Rv1d, AArch64::dsub0);
2408 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2409 return SelectLoad(Node, 3, AArch64::LD3Rv2d, AArch64::qsub0);
2410 break;
2411 case Intrinsic::aarch64_neon_ld4r:
2412 if (VT == MVT::v8i8)
2413 return SelectLoad(Node, 4, AArch64::LD4Rv8b, AArch64::dsub0);
2414 else if (VT == MVT::v16i8)
2415 return SelectLoad(Node, 4, AArch64::LD4Rv16b, AArch64::qsub0);
2416 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2417 return SelectLoad(Node, 4, AArch64::LD4Rv4h, AArch64::dsub0);
2418 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2419 return SelectLoad(Node, 4, AArch64::LD4Rv8h, AArch64::qsub0);
2420 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2421 return SelectLoad(Node, 4, AArch64::LD4Rv2s, AArch64::dsub0);
2422 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2423 return SelectLoad(Node, 4, AArch64::LD4Rv4s, AArch64::qsub0);
2424 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2425 return SelectLoad(Node, 4, AArch64::LD4Rv1d, AArch64::dsub0);
2426 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2427 return SelectLoad(Node, 4, AArch64::LD4Rv2d, AArch64::qsub0);
2428 break;
2429 case Intrinsic::aarch64_neon_ld2lane:
2430 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2431 return SelectLoadLane(Node, 2, AArch64::LD2i8);
2432 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2433 VT == MVT::v8f16)
2434 return SelectLoadLane(Node, 2, AArch64::LD2i16);
2435 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2436 VT == MVT::v2f32)
2437 return SelectLoadLane(Node, 2, AArch64::LD2i32);
2438 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2439 VT == MVT::v1f64)
2440 return SelectLoadLane(Node, 2, AArch64::LD2i64);
2441 break;
2442 case Intrinsic::aarch64_neon_ld3lane:
2443 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2444 return SelectLoadLane(Node, 3, AArch64::LD3i8);
2445 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2446 VT == MVT::v8f16)
2447 return SelectLoadLane(Node, 3, AArch64::LD3i16);
2448 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2449 VT == MVT::v2f32)
2450 return SelectLoadLane(Node, 3, AArch64::LD3i32);
2451 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2452 VT == MVT::v1f64)
2453 return SelectLoadLane(Node, 3, AArch64::LD3i64);
2454 break;
2455 case Intrinsic::aarch64_neon_ld4lane:
2456 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2457 return SelectLoadLane(Node, 4, AArch64::LD4i8);
2458 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2459 VT == MVT::v8f16)
2460 return SelectLoadLane(Node, 4, AArch64::LD4i16);
2461 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2462 VT == MVT::v2f32)
2463 return SelectLoadLane(Node, 4, AArch64::LD4i32);
2464 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2465 VT == MVT::v1f64)
2466 return SelectLoadLane(Node, 4, AArch64::LD4i64);
2467 break;
2468 }
2469 } break;
2470 case ISD::INTRINSIC_WO_CHAIN: {
2471 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
2472 switch (IntNo) {
2473 default:
2474 break;
2475 case Intrinsic::aarch64_neon_tbl2:
2476 return SelectTable(Node, 2, VT == MVT::v8i8 ? AArch64::TBLv8i8Two
2477 : AArch64::TBLv16i8Two,
2478 false);
2479 case Intrinsic::aarch64_neon_tbl3:
2480 return SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBLv8i8Three
2481 : AArch64::TBLv16i8Three,
2482 false);
2483 case Intrinsic::aarch64_neon_tbl4:
2484 return SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBLv8i8Four
2485 : AArch64::TBLv16i8Four,
2486 false);
2487 case Intrinsic::aarch64_neon_tbx2:
2488 return SelectTable(Node, 2, VT == MVT::v8i8 ? AArch64::TBXv8i8Two
2489 : AArch64::TBXv16i8Two,
2490 true);
2491 case Intrinsic::aarch64_neon_tbx3:
2492 return SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBXv8i8Three
2493 : AArch64::TBXv16i8Three,
2494 true);
2495 case Intrinsic::aarch64_neon_tbx4:
2496 return SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBXv8i8Four
2497 : AArch64::TBXv16i8Four,
2498 true);
2499 case Intrinsic::aarch64_neon_smull:
2500 case Intrinsic::aarch64_neon_umull:
2501 if (SDNode *N = SelectMULLV64LaneV128(IntNo, Node))
2502 return N;
2503 break;
2504 }
2505 break;
2506 }
2507 case ISD::INTRINSIC_VOID: {
2508 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
2509 if (Node->getNumOperands() >= 3)
2510 VT = Node->getOperand(2)->getValueType(0);
2511 switch (IntNo) {
2512 default:
2513 break;
2514 case Intrinsic::aarch64_neon_st1x2: {
2515 if (VT == MVT::v8i8)
2516 return SelectStore(Node, 2, AArch64::ST1Twov8b);
2517 else if (VT == MVT::v16i8)
2518 return SelectStore(Node, 2, AArch64::ST1Twov16b);
2519 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2520 return SelectStore(Node, 2, AArch64::ST1Twov4h);
2521 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2522 return SelectStore(Node, 2, AArch64::ST1Twov8h);
2523 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2524 return SelectStore(Node, 2, AArch64::ST1Twov2s);
2525 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2526 return SelectStore(Node, 2, AArch64::ST1Twov4s);
2527 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2528 return SelectStore(Node, 2, AArch64::ST1Twov2d);
2529 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2530 return SelectStore(Node, 2, AArch64::ST1Twov1d);
2531 break;
2532 }
2533 case Intrinsic::aarch64_neon_st1x3: {
2534 if (VT == MVT::v8i8)
2535 return SelectStore(Node, 3, AArch64::ST1Threev8b);
2536 else if (VT == MVT::v16i8)
2537 return SelectStore(Node, 3, AArch64::ST1Threev16b);
2538 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2539 return SelectStore(Node, 3, AArch64::ST1Threev4h);
2540 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2541 return SelectStore(Node, 3, AArch64::ST1Threev8h);
2542 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2543 return SelectStore(Node, 3, AArch64::ST1Threev2s);
2544 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2545 return SelectStore(Node, 3, AArch64::ST1Threev4s);
2546 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2547 return SelectStore(Node, 3, AArch64::ST1Threev2d);
2548 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2549 return SelectStore(Node, 3, AArch64::ST1Threev1d);
2550 break;
2551 }
2552 case Intrinsic::aarch64_neon_st1x4: {
2553 if (VT == MVT::v8i8)
2554 return SelectStore(Node, 4, AArch64::ST1Fourv8b);
2555 else if (VT == MVT::v16i8)
2556 return SelectStore(Node, 4, AArch64::ST1Fourv16b);
2557 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2558 return SelectStore(Node, 4, AArch64::ST1Fourv4h);
2559 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2560 return SelectStore(Node, 4, AArch64::ST1Fourv8h);
2561 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2562 return SelectStore(Node, 4, AArch64::ST1Fourv2s);
2563 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2564 return SelectStore(Node, 4, AArch64::ST1Fourv4s);
2565 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2566 return SelectStore(Node, 4, AArch64::ST1Fourv2d);
2567 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2568 return SelectStore(Node, 4, AArch64::ST1Fourv1d);
2569 break;
2570 }
2571 case Intrinsic::aarch64_neon_st2: {
2572 if (VT == MVT::v8i8)
2573 return SelectStore(Node, 2, AArch64::ST2Twov8b);
2574 else if (VT == MVT::v16i8)
2575 return SelectStore(Node, 2, AArch64::ST2Twov16b);
2576 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2577 return SelectStore(Node, 2, AArch64::ST2Twov4h);
2578 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2579 return SelectStore(Node, 2, AArch64::ST2Twov8h);
2580 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2581 return SelectStore(Node, 2, AArch64::ST2Twov2s);
2582 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2583 return SelectStore(Node, 2, AArch64::ST2Twov4s);
2584 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2585 return SelectStore(Node, 2, AArch64::ST2Twov2d);
2586 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2587 return SelectStore(Node, 2, AArch64::ST1Twov1d);
2588 break;
2589 }
2590 case Intrinsic::aarch64_neon_st3: {
2591 if (VT == MVT::v8i8)
2592 return SelectStore(Node, 3, AArch64::ST3Threev8b);
2593 else if (VT == MVT::v16i8)
2594 return SelectStore(Node, 3, AArch64::ST3Threev16b);
2595 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2596 return SelectStore(Node, 3, AArch64::ST3Threev4h);
2597 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2598 return SelectStore(Node, 3, AArch64::ST3Threev8h);
2599 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2600 return SelectStore(Node, 3, AArch64::ST3Threev2s);
2601 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2602 return SelectStore(Node, 3, AArch64::ST3Threev4s);
2603 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2604 return SelectStore(Node, 3, AArch64::ST3Threev2d);
2605 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2606 return SelectStore(Node, 3, AArch64::ST1Threev1d);
2607 break;
2608 }
2609 case Intrinsic::aarch64_neon_st4: {
2610 if (VT == MVT::v8i8)
2611 return SelectStore(Node, 4, AArch64::ST4Fourv8b);
2612 else if (VT == MVT::v16i8)
2613 return SelectStore(Node, 4, AArch64::ST4Fourv16b);
2614 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2615 return SelectStore(Node, 4, AArch64::ST4Fourv4h);
2616 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2617 return SelectStore(Node, 4, AArch64::ST4Fourv8h);
2618 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2619 return SelectStore(Node, 4, AArch64::ST4Fourv2s);
2620 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2621 return SelectStore(Node, 4, AArch64::ST4Fourv4s);
2622 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2623 return SelectStore(Node, 4, AArch64::ST4Fourv2d);
2624 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2625 return SelectStore(Node, 4, AArch64::ST1Fourv1d);
2626 break;
2627 }
2628 case Intrinsic::aarch64_neon_st2lane: {
2629 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2630 return SelectStoreLane(Node, 2, AArch64::ST2i8);
2631 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2632 VT == MVT::v8f16)
2633 return SelectStoreLane(Node, 2, AArch64::ST2i16);
2634 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2635 VT == MVT::v2f32)
2636 return SelectStoreLane(Node, 2, AArch64::ST2i32);
2637 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2638 VT == MVT::v1f64)
2639 return SelectStoreLane(Node, 2, AArch64::ST2i64);
2640 break;
2641 }
2642 case Intrinsic::aarch64_neon_st3lane: {
2643 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2644 return SelectStoreLane(Node, 3, AArch64::ST3i8);
2645 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2646 VT == MVT::v8f16)
2647 return SelectStoreLane(Node, 3, AArch64::ST3i16);
2648 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2649 VT == MVT::v2f32)
2650 return SelectStoreLane(Node, 3, AArch64::ST3i32);
2651 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2652 VT == MVT::v1f64)
2653 return SelectStoreLane(Node, 3, AArch64::ST3i64);
2654 break;
2655 }
2656 case Intrinsic::aarch64_neon_st4lane: {
2657 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2658 return SelectStoreLane(Node, 4, AArch64::ST4i8);
2659 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2660 VT == MVT::v8f16)
2661 return SelectStoreLane(Node, 4, AArch64::ST4i16);
2662 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2663 VT == MVT::v2f32)
2664 return SelectStoreLane(Node, 4, AArch64::ST4i32);
2665 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2666 VT == MVT::v1f64)
2667 return SelectStoreLane(Node, 4, AArch64::ST4i64);
2668 break;
2669 }
2670 }
2671 }
2672 case AArch64ISD::LD2post: {
2673 if (VT == MVT::v8i8)
2674 return SelectPostLoad(Node, 2, AArch64::LD2Twov8b_POST, AArch64::dsub0);
2675 else if (VT == MVT::v16i8)
2676 return SelectPostLoad(Node, 2, AArch64::LD2Twov16b_POST, AArch64::qsub0);
2677 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2678 return SelectPostLoad(Node, 2, AArch64::LD2Twov4h_POST, AArch64::dsub0);
2679 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2680 return SelectPostLoad(Node, 2, AArch64::LD2Twov8h_POST, AArch64::qsub0);
2681 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2682 return SelectPostLoad(Node, 2, AArch64::LD2Twov2s_POST, AArch64::dsub0);
2683 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2684 return SelectPostLoad(Node, 2, AArch64::LD2Twov4s_POST, AArch64::qsub0);
2685 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2686 return SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
2687 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2688 return SelectPostLoad(Node, 2, AArch64::LD2Twov2d_POST, AArch64::qsub0);
2689 break;
2690 }
2691 case AArch64ISD::LD3post: {
2692 if (VT == MVT::v8i8)
2693 return SelectPostLoad(Node, 3, AArch64::LD3Threev8b_POST, AArch64::dsub0);
2694 else if (VT == MVT::v16i8)
2695 return SelectPostLoad(Node, 3, AArch64::LD3Threev16b_POST, AArch64::qsub0);
2696 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2697 return SelectPostLoad(Node, 3, AArch64::LD3Threev4h_POST, AArch64::dsub0);
2698 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2699 return SelectPostLoad(Node, 3, AArch64::LD3Threev8h_POST, AArch64::qsub0);
2700 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2701 return SelectPostLoad(Node, 3, AArch64::LD3Threev2s_POST, AArch64::dsub0);
2702 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2703 return SelectPostLoad(Node, 3, AArch64::LD3Threev4s_POST, AArch64::qsub0);
2704 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2705 return SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
2706 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2707 return SelectPostLoad(Node, 3, AArch64::LD3Threev2d_POST, AArch64::qsub0);
2708 break;
2709 }
2710 case AArch64ISD::LD4post: {
2711 if (VT == MVT::v8i8)
2712 return SelectPostLoad(Node, 4, AArch64::LD4Fourv8b_POST, AArch64::dsub0);
2713 else if (VT == MVT::v16i8)
2714 return SelectPostLoad(Node, 4, AArch64::LD4Fourv16b_POST, AArch64::qsub0);
2715 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2716 return SelectPostLoad(Node, 4, AArch64::LD4Fourv4h_POST, AArch64::dsub0);
2717 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2718 return SelectPostLoad(Node, 4, AArch64::LD4Fourv8h_POST, AArch64::qsub0);
2719 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2720 return SelectPostLoad(Node, 4, AArch64::LD4Fourv2s_POST, AArch64::dsub0);
2721 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2722 return SelectPostLoad(Node, 4, AArch64::LD4Fourv4s_POST, AArch64::qsub0);
2723 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2724 return SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
2725 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2726 return SelectPostLoad(Node, 4, AArch64::LD4Fourv2d_POST, AArch64::qsub0);
2727 break;
2728 }
2729 case AArch64ISD::LD1x2post: {
2730 if (VT == MVT::v8i8)
2731 return SelectPostLoad(Node, 2, AArch64::LD1Twov8b_POST, AArch64::dsub0);
2732 else if (VT == MVT::v16i8)
2733 return SelectPostLoad(Node, 2, AArch64::LD1Twov16b_POST, AArch64::qsub0);
2734 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2735 return SelectPostLoad(Node, 2, AArch64::LD1Twov4h_POST, AArch64::dsub0);
2736 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2737 return SelectPostLoad(Node, 2, AArch64::LD1Twov8h_POST, AArch64::qsub0);
2738 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2739 return SelectPostLoad(Node, 2, AArch64::LD1Twov2s_POST, AArch64::dsub0);
2740 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2741 return SelectPostLoad(Node, 2, AArch64::LD1Twov4s_POST, AArch64::qsub0);
2742 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2743 return SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
2744 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2745 return SelectPostLoad(Node, 2, AArch64::LD1Twov2d_POST, AArch64::qsub0);
2746 break;
2747 }
2748 case AArch64ISD::LD1x3post: {
2749 if (VT == MVT::v8i8)
2750 return SelectPostLoad(Node, 3, AArch64::LD1Threev8b_POST, AArch64::dsub0);
2751 else if (VT == MVT::v16i8)
2752 return SelectPostLoad(Node, 3, AArch64::LD1Threev16b_POST, AArch64::qsub0);
2753 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2754 return SelectPostLoad(Node, 3, AArch64::LD1Threev4h_POST, AArch64::dsub0);
2755 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2756 return SelectPostLoad(Node, 3, AArch64::LD1Threev8h_POST, AArch64::qsub0);
2757 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2758 return SelectPostLoad(Node, 3, AArch64::LD1Threev2s_POST, AArch64::dsub0);
2759 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2760 return SelectPostLoad(Node, 3, AArch64::LD1Threev4s_POST, AArch64::qsub0);
2761 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2762 return SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
2763 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2764 return SelectPostLoad(Node, 3, AArch64::LD1Threev2d_POST, AArch64::qsub0);
2765 break;
2766 }
2767 case AArch64ISD::LD1x4post: {
2768 if (VT == MVT::v8i8)
2769 return SelectPostLoad(Node, 4, AArch64::LD1Fourv8b_POST, AArch64::dsub0);
2770 else if (VT == MVT::v16i8)
2771 return SelectPostLoad(Node, 4, AArch64::LD1Fourv16b_POST, AArch64::qsub0);
2772 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2773 return SelectPostLoad(Node, 4, AArch64::LD1Fourv4h_POST, AArch64::dsub0);
2774 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2775 return SelectPostLoad(Node, 4, AArch64::LD1Fourv8h_POST, AArch64::qsub0);
2776 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2777 return SelectPostLoad(Node, 4, AArch64::LD1Fourv2s_POST, AArch64::dsub0);
2778 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2779 return SelectPostLoad(Node, 4, AArch64::LD1Fourv4s_POST, AArch64::qsub0);
2780 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2781 return SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
2782 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2783 return SelectPostLoad(Node, 4, AArch64::LD1Fourv2d_POST, AArch64::qsub0);
2784 break;
2785 }
2786 case AArch64ISD::LD1DUPpost: {
2787 if (VT == MVT::v8i8)
2788 return SelectPostLoad(Node, 1, AArch64::LD1Rv8b_POST, AArch64::dsub0);
2789 else if (VT == MVT::v16i8)
2790 return SelectPostLoad(Node, 1, AArch64::LD1Rv16b_POST, AArch64::qsub0);
2791 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2792 return SelectPostLoad(Node, 1, AArch64::LD1Rv4h_POST, AArch64::dsub0);
2793 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2794 return SelectPostLoad(Node, 1, AArch64::LD1Rv8h_POST, AArch64::qsub0);
2795 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2796 return SelectPostLoad(Node, 1, AArch64::LD1Rv2s_POST, AArch64::dsub0);
2797 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2798 return SelectPostLoad(Node, 1, AArch64::LD1Rv4s_POST, AArch64::qsub0);
2799 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2800 return SelectPostLoad(Node, 1, AArch64::LD1Rv1d_POST, AArch64::dsub0);
2801 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2802 return SelectPostLoad(Node, 1, AArch64::LD1Rv2d_POST, AArch64::qsub0);
2803 break;
2804 }
2805 case AArch64ISD::LD2DUPpost: {
2806 if (VT == MVT::v8i8)
2807 return SelectPostLoad(Node, 2, AArch64::LD2Rv8b_POST, AArch64::dsub0);
2808 else if (VT == MVT::v16i8)
2809 return SelectPostLoad(Node, 2, AArch64::LD2Rv16b_POST, AArch64::qsub0);
2810 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2811 return SelectPostLoad(Node, 2, AArch64::LD2Rv4h_POST, AArch64::dsub0);
2812 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2813 return SelectPostLoad(Node, 2, AArch64::LD2Rv8h_POST, AArch64::qsub0);
2814 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2815 return SelectPostLoad(Node, 2, AArch64::LD2Rv2s_POST, AArch64::dsub0);
2816 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2817 return SelectPostLoad(Node, 2, AArch64::LD2Rv4s_POST, AArch64::qsub0);
2818 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2819 return SelectPostLoad(Node, 2, AArch64::LD2Rv1d_POST, AArch64::dsub0);
2820 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2821 return SelectPostLoad(Node, 2, AArch64::LD2Rv2d_POST, AArch64::qsub0);
2822 break;
2823 }
2824 case AArch64ISD::LD3DUPpost: {
2825 if (VT == MVT::v8i8)
2826 return SelectPostLoad(Node, 3, AArch64::LD3Rv8b_POST, AArch64::dsub0);
2827 else if (VT == MVT::v16i8)
2828 return SelectPostLoad(Node, 3, AArch64::LD3Rv16b_POST, AArch64::qsub0);
2829 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2830 return SelectPostLoad(Node, 3, AArch64::LD3Rv4h_POST, AArch64::dsub0);
2831 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2832 return SelectPostLoad(Node, 3, AArch64::LD3Rv8h_POST, AArch64::qsub0);
2833 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2834 return SelectPostLoad(Node, 3, AArch64::LD3Rv2s_POST, AArch64::dsub0);
2835 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2836 return SelectPostLoad(Node, 3, AArch64::LD3Rv4s_POST, AArch64::qsub0);
2837 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2838 return SelectPostLoad(Node, 3, AArch64::LD3Rv1d_POST, AArch64::dsub0);
2839 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2840 return SelectPostLoad(Node, 3, AArch64::LD3Rv2d_POST, AArch64::qsub0);
2841 break;
2842 }
2843 case AArch64ISD::LD4DUPpost: {
2844 if (VT == MVT::v8i8)
2845 return SelectPostLoad(Node, 4, AArch64::LD4Rv8b_POST, AArch64::dsub0);
2846 else if (VT == MVT::v16i8)
2847 return SelectPostLoad(Node, 4, AArch64::LD4Rv16b_POST, AArch64::qsub0);
2848 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2849 return SelectPostLoad(Node, 4, AArch64::LD4Rv4h_POST, AArch64::dsub0);
2850 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2851 return SelectPostLoad(Node, 4, AArch64::LD4Rv8h_POST, AArch64::qsub0);
2852 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2853 return SelectPostLoad(Node, 4, AArch64::LD4Rv2s_POST, AArch64::dsub0);
2854 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2855 return SelectPostLoad(Node, 4, AArch64::LD4Rv4s_POST, AArch64::qsub0);
2856 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2857 return SelectPostLoad(Node, 4, AArch64::LD4Rv1d_POST, AArch64::dsub0);
2858 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2859 return SelectPostLoad(Node, 4, AArch64::LD4Rv2d_POST, AArch64::qsub0);
2860 break;
2861 }
2862 case AArch64ISD::LD1LANEpost: {
2863 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2864 return SelectPostLoadLane(Node, 1, AArch64::LD1i8_POST);
2865 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2866 VT == MVT::v8f16)
2867 return SelectPostLoadLane(Node, 1, AArch64::LD1i16_POST);
2868 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2869 VT == MVT::v2f32)
2870 return SelectPostLoadLane(Node, 1, AArch64::LD1i32_POST);
2871 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2872 VT == MVT::v1f64)
2873 return SelectPostLoadLane(Node, 1, AArch64::LD1i64_POST);
2874 break;
2875 }
2876 case AArch64ISD::LD2LANEpost: {
2877 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2878 return SelectPostLoadLane(Node, 2, AArch64::LD2i8_POST);
2879 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2880 VT == MVT::v8f16)
2881 return SelectPostLoadLane(Node, 2, AArch64::LD2i16_POST);
2882 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2883 VT == MVT::v2f32)
2884 return SelectPostLoadLane(Node, 2, AArch64::LD2i32_POST);
2885 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2886 VT == MVT::v1f64)
2887 return SelectPostLoadLane(Node, 2, AArch64::LD2i64_POST);
2888 break;
2889 }
2890 case AArch64ISD::LD3LANEpost: {
2891 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2892 return SelectPostLoadLane(Node, 3, AArch64::LD3i8_POST);
2893 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2894 VT == MVT::v8f16)
2895 return SelectPostLoadLane(Node, 3, AArch64::LD3i16_POST);
2896 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2897 VT == MVT::v2f32)
2898 return SelectPostLoadLane(Node, 3, AArch64::LD3i32_POST);
2899 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2900 VT == MVT::v1f64)
2901 return SelectPostLoadLane(Node, 3, AArch64::LD3i64_POST);
2902 break;
2903 }
2904 case AArch64ISD::LD4LANEpost: {
2905 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2906 return SelectPostLoadLane(Node, 4, AArch64::LD4i8_POST);
2907 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2908 VT == MVT::v8f16)
2909 return SelectPostLoadLane(Node, 4, AArch64::LD4i16_POST);
2910 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2911 VT == MVT::v2f32)
2912 return SelectPostLoadLane(Node, 4, AArch64::LD4i32_POST);
2913 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2914 VT == MVT::v1f64)
2915 return SelectPostLoadLane(Node, 4, AArch64::LD4i64_POST);
2916 break;
2917 }
2918 case AArch64ISD::ST2post: {
2919 VT = Node->getOperand(1).getValueType();
2920 if (VT == MVT::v8i8)
2921 return SelectPostStore(Node, 2, AArch64::ST2Twov8b_POST);
2922 else if (VT == MVT::v16i8)
2923 return SelectPostStore(Node, 2, AArch64::ST2Twov16b_POST);
2924 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2925 return SelectPostStore(Node, 2, AArch64::ST2Twov4h_POST);
2926 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2927 return SelectPostStore(Node, 2, AArch64::ST2Twov8h_POST);
2928 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2929 return SelectPostStore(Node, 2, AArch64::ST2Twov2s_POST);
2930 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2931 return SelectPostStore(Node, 2, AArch64::ST2Twov4s_POST);
2932 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2933 return SelectPostStore(Node, 2, AArch64::ST2Twov2d_POST);
2934 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2935 return SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
2936 break;
2937 }
2938 case AArch64ISD::ST3post: {
2939 VT = Node->getOperand(1).getValueType();
2940 if (VT == MVT::v8i8)
2941 return SelectPostStore(Node, 3, AArch64::ST3Threev8b_POST);
2942 else if (VT == MVT::v16i8)
2943 return SelectPostStore(Node, 3, AArch64::ST3Threev16b_POST);
2944 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2945 return SelectPostStore(Node, 3, AArch64::ST3Threev4h_POST);
2946 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2947 return SelectPostStore(Node, 3, AArch64::ST3Threev8h_POST);
2948 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2949 return SelectPostStore(Node, 3, AArch64::ST3Threev2s_POST);
2950 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2951 return SelectPostStore(Node, 3, AArch64::ST3Threev4s_POST);
2952 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2953 return SelectPostStore(Node, 3, AArch64::ST3Threev2d_POST);
2954 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2955 return SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
2956 break;
2957 }
2958 case AArch64ISD::ST4post: {
2959 VT = Node->getOperand(1).getValueType();
2960 if (VT == MVT::v8i8)
2961 return SelectPostStore(Node, 4, AArch64::ST4Fourv8b_POST);
2962 else if (VT == MVT::v16i8)
2963 return SelectPostStore(Node, 4, AArch64::ST4Fourv16b_POST);
2964 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2965 return SelectPostStore(Node, 4, AArch64::ST4Fourv4h_POST);
2966 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2967 return SelectPostStore(Node, 4, AArch64::ST4Fourv8h_POST);
2968 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2969 return SelectPostStore(Node, 4, AArch64::ST4Fourv2s_POST);
2970 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2971 return SelectPostStore(Node, 4, AArch64::ST4Fourv4s_POST);
2972 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2973 return SelectPostStore(Node, 4, AArch64::ST4Fourv2d_POST);
2974 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2975 return SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
2976 break;
2977 }
2978 case AArch64ISD::ST1x2post: {
2979 VT = Node->getOperand(1).getValueType();
2980 if (VT == MVT::v8i8)
2981 return SelectPostStore(Node, 2, AArch64::ST1Twov8b_POST);
2982 else if (VT == MVT::v16i8)
2983 return SelectPostStore(Node, 2, AArch64::ST1Twov16b_POST);
2984 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2985 return SelectPostStore(Node, 2, AArch64::ST1Twov4h_POST);
2986 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2987 return SelectPostStore(Node, 2, AArch64::ST1Twov8h_POST);
2988 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2989 return SelectPostStore(Node, 2, AArch64::ST1Twov2s_POST);
2990 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2991 return SelectPostStore(Node, 2, AArch64::ST1Twov4s_POST);
2992 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2993 return SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
2994 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2995 return SelectPostStore(Node, 2, AArch64::ST1Twov2d_POST);
2996 break;
2997 }
2998 case AArch64ISD::ST1x3post: {
2999 VT = Node->getOperand(1).getValueType();
3000 if (VT == MVT::v8i8)
3001 return SelectPostStore(Node, 3, AArch64::ST1Threev8b_POST);
3002 else if (VT == MVT::v16i8)
3003 return SelectPostStore(Node, 3, AArch64::ST1Threev16b_POST);
3004 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3005 return SelectPostStore(Node, 3, AArch64::ST1Threev4h_POST);
3006 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3007 return SelectPostStore(Node, 3, AArch64::ST1Threev8h_POST);
3008 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3009 return SelectPostStore(Node, 3, AArch64::ST1Threev2s_POST);
3010 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3011 return SelectPostStore(Node, 3, AArch64::ST1Threev4s_POST);
3012 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3013 return SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
3014 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3015 return SelectPostStore(Node, 3, AArch64::ST1Threev2d_POST);
3016 break;
3017 }
3018 case AArch64ISD::ST1x4post: {
3019 VT = Node->getOperand(1).getValueType();
3020 if (VT == MVT::v8i8)
3021 return SelectPostStore(Node, 4, AArch64::ST1Fourv8b_POST);
3022 else if (VT == MVT::v16i8)
3023 return SelectPostStore(Node, 4, AArch64::ST1Fourv16b_POST);
3024 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3025 return SelectPostStore(Node, 4, AArch64::ST1Fourv4h_POST);
3026 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3027 return SelectPostStore(Node, 4, AArch64::ST1Fourv8h_POST);
3028 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3029 return SelectPostStore(Node, 4, AArch64::ST1Fourv2s_POST);
3030 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3031 return SelectPostStore(Node, 4, AArch64::ST1Fourv4s_POST);
3032 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3033 return SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
3034 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3035 return SelectPostStore(Node, 4, AArch64::ST1Fourv2d_POST);
3036 break;
3037 }
3038 case AArch64ISD::ST2LANEpost: {
3039 VT = Node->getOperand(1).getValueType();
3040 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3041 return SelectPostStoreLane(Node, 2, AArch64::ST2i8_POST);
3042 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3043 VT == MVT::v8f16)
3044 return SelectPostStoreLane(Node, 2, AArch64::ST2i16_POST);
3045 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3046 VT == MVT::v2f32)
3047 return SelectPostStoreLane(Node, 2, AArch64::ST2i32_POST);
3048 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3049 VT == MVT::v1f64)
3050 return SelectPostStoreLane(Node, 2, AArch64::ST2i64_POST);
3051 break;
3052 }
3053 case AArch64ISD::ST3LANEpost: {
3054 VT = Node->getOperand(1).getValueType();
3055 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3056 return SelectPostStoreLane(Node, 3, AArch64::ST3i8_POST);
3057 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3058 VT == MVT::v8f16)
3059 return SelectPostStoreLane(Node, 3, AArch64::ST3i16_POST);
3060 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3061 VT == MVT::v2f32)
3062 return SelectPostStoreLane(Node, 3, AArch64::ST3i32_POST);
3063 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3064 VT == MVT::v1f64)
3065 return SelectPostStoreLane(Node, 3, AArch64::ST3i64_POST);
3066 break;
3067 }
3068 case AArch64ISD::ST4LANEpost: {
3069 VT = Node->getOperand(1).getValueType();
3070 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3071 return SelectPostStoreLane(Node, 4, AArch64::ST4i8_POST);
3072 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3073 VT == MVT::v8f16)
3074 return SelectPostStoreLane(Node, 4, AArch64::ST4i16_POST);
3075 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3076 VT == MVT::v2f32)
3077 return SelectPostStoreLane(Node, 4, AArch64::ST4i32_POST);
3078 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3079 VT == MVT::v1f64)
3080 return SelectPostStoreLane(Node, 4, AArch64::ST4i64_POST);
3081 break;
3082 }
3083
3084 case ISD::FCEIL:
3085 case ISD::FFLOOR:
3086 case ISD::FTRUNC:
3087 case ISD::FROUND:
3088 if (SDNode *I = SelectLIBM(Node))
3089 return I;
3090 break;
3091 }
3092
3093 // Select the default instruction
3094 ResNode = SelectCode(Node);
3095
3096 DEBUG(errs() << "=> ");
3097 if (ResNode == nullptr || ResNode == Node)
3098 DEBUG(Node->dump(CurDAG));
3099 else
3100 DEBUG(ResNode->dump(CurDAG));
3101 DEBUG(errs() << "\n");
3102
3103 return ResNode;
3104 }
3105
3106 /// createAArch64ISelDag - This pass converts a legalized DAG into a
3107 /// AArch64-specific DAG, ready for instruction scheduling.
createAArch64ISelDag(AArch64TargetMachine & TM,CodeGenOpt::Level OptLevel)3108 FunctionPass *llvm::createAArch64ISelDag(AArch64TargetMachine &TM,
3109 CodeGenOpt::Level OptLevel) {
3110 return new AArch64DAGToDAGISel(TM, OptLevel);
3111 }
3112