1 //===-- SystemZISelLowering.h - SystemZ DAG lowering interface --*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that SystemZ uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #ifndef LLVM_LIB_TARGET_SYSTEMZ_SYSTEMZISELLOWERING_H
15 #define LLVM_LIB_TARGET_SYSTEMZ_SYSTEMZISELLOWERING_H
16
17 #include "SystemZ.h"
18 #include "SystemZInstrInfo.h"
19 #include "llvm/CodeGen/MachineBasicBlock.h"
20 #include "llvm/CodeGen/SelectionDAG.h"
21 #include "llvm/CodeGen/TargetLowering.h"
22
23 namespace llvm {
24 namespace SystemZISD {
25 enum NodeType : unsigned {
26 FIRST_NUMBER = ISD::BUILTIN_OP_END,
27
28 // Return with a flag operand. Operand 0 is the chain operand.
29 RET_FLAG,
30
31 // Calls a function. Operand 0 is the chain operand and operand 1
32 // is the target address. The arguments start at operand 2.
33 // There is an optional glue operand at the end.
34 CALL,
35 SIBCALL,
36
37 // TLS calls. Like regular calls, except operand 1 is the TLS symbol.
38 // (The call target is implicitly __tls_get_offset.)
39 TLS_GDCALL,
40 TLS_LDCALL,
41
42 // Wraps a TargetGlobalAddress that should be loaded using PC-relative
43 // accesses (LARL). Operand 0 is the address.
44 PCREL_WRAPPER,
45
46 // Used in cases where an offset is applied to a TargetGlobalAddress.
47 // Operand 0 is the full TargetGlobalAddress and operand 1 is a
48 // PCREL_WRAPPER for an anchor point. This is used so that we can
49 // cheaply refer to either the full address or the anchor point
50 // as a register base.
51 PCREL_OFFSET,
52
53 // Integer absolute.
54 IABS,
55
56 // Integer comparisons. There are three operands: the two values
57 // to compare, and an integer of type SystemZICMP.
58 ICMP,
59
60 // Floating-point comparisons. The two operands are the values to compare.
61 FCMP,
62
63 // Test under mask. The first operand is ANDed with the second operand
64 // and the condition codes are set on the result. The third operand is
65 // a boolean that is true if the condition codes need to distinguish
66 // between CCMASK_TM_MIXED_MSB_0 and CCMASK_TM_MIXED_MSB_1 (which the
67 // register forms do but the memory forms don't).
68 TM,
69
70 // Branches if a condition is true. Operand 0 is the chain operand;
71 // operand 1 is the 4-bit condition-code mask, with bit N in
72 // big-endian order meaning "branch if CC=N"; operand 2 is the
73 // target block and operand 3 is the flag operand.
74 BR_CCMASK,
75
76 // Selects between operand 0 and operand 1. Operand 2 is the
77 // mask of condition-code values for which operand 0 should be
78 // chosen over operand 1; it has the same form as BR_CCMASK.
79 // Operand 3 is the flag operand.
80 SELECT_CCMASK,
81
82 // Evaluates to the gap between the stack pointer and the
83 // base of the dynamically-allocatable area.
84 ADJDYNALLOC,
85
86 // For allocating stack space when using stack clash protector.
87 // Allocation is performed by block, and each block is probed.
88 PROBED_ALLOCA,
89
90 // Count number of bits set in operand 0 per byte.
91 POPCNT,
92
93 // Wrappers around the ISD opcodes of the same name. The output is GR128.
94 // Input operands may be GR64 or GR32, depending on the instruction.
95 SMUL_LOHI,
96 UMUL_LOHI,
97 SDIVREM,
98 UDIVREM,
99
100 // Add/subtract with overflow/carry. These have the same operands as
101 // the corresponding standard operations, except with the carry flag
102 // replaced by a condition code value.
103 SADDO, SSUBO, UADDO, USUBO, ADDCARRY, SUBCARRY,
104
105 // Set the condition code from a boolean value in operand 0.
106 // Operand 1 is a mask of all condition-code values that may result of this
107 // operation, operand 2 is a mask of condition-code values that may result
108 // if the boolean is true.
109 // Note that this operation is always optimized away, we will never
110 // generate any code for it.
111 GET_CCMASK,
112
113 // Use a series of MVCs to copy bytes from one memory location to another.
114 // The operands are:
115 // - the target address
116 // - the source address
117 // - the constant length
118 //
119 // This isn't a memory opcode because we'd need to attach two
120 // MachineMemOperands rather than one.
121 MVC,
122
123 // Like MVC, but implemented as a loop that handles X*256 bytes
124 // followed by straight-line code to handle the rest (if any).
125 // The value of X is passed as an additional operand.
126 MVC_LOOP,
127
128 // Similar to MVC and MVC_LOOP, but for logic operations (AND, OR, XOR).
129 NC,
130 NC_LOOP,
131 OC,
132 OC_LOOP,
133 XC,
134 XC_LOOP,
135
136 // Use CLC to compare two blocks of memory, with the same comments
137 // as for MVC and MVC_LOOP.
138 CLC,
139 CLC_LOOP,
140
141 // Use an MVST-based sequence to implement stpcpy().
142 STPCPY,
143
144 // Use a CLST-based sequence to implement strcmp(). The two input operands
145 // are the addresses of the strings to compare.
146 STRCMP,
147
148 // Use an SRST-based sequence to search a block of memory. The first
149 // operand is the end address, the second is the start, and the third
150 // is the character to search for. CC is set to 1 on success and 2
151 // on failure.
152 SEARCH_STRING,
153
154 // Store the CC value in bits 29 and 28 of an integer.
155 IPM,
156
157 // Compiler barrier only; generate a no-op.
158 MEMBARRIER,
159
160 // Transaction begin. The first operand is the chain, the second
161 // the TDB pointer, and the third the immediate control field.
162 // Returns CC value and chain.
163 TBEGIN,
164 TBEGIN_NOFLOAT,
165
166 // Transaction end. Just the chain operand. Returns CC value and chain.
167 TEND,
168
169 // Create a vector constant by filling byte N of the result with bit
170 // 15-N of the single operand.
171 BYTE_MASK,
172
173 // Create a vector constant by replicating an element-sized RISBG-style mask.
174 // The first operand specifies the starting set bit and the second operand
175 // specifies the ending set bit. Both operands count from the MSB of the
176 // element.
177 ROTATE_MASK,
178
179 // Replicate a GPR scalar value into all elements of a vector.
180 REPLICATE,
181
182 // Create a vector from two i64 GPRs.
183 JOIN_DWORDS,
184
185 // Replicate one element of a vector into all elements. The first operand
186 // is the vector and the second is the index of the element to replicate.
187 SPLAT,
188
189 // Interleave elements from the high half of operand 0 and the high half
190 // of operand 1.
191 MERGE_HIGH,
192
193 // Likewise for the low halves.
194 MERGE_LOW,
195
196 // Concatenate the vectors in the first two operands, shift them left
197 // by the third operand, and take the first half of the result.
198 SHL_DOUBLE,
199
200 // Take one element of the first v2i64 operand and the one element of
201 // the second v2i64 operand and concatenate them to form a v2i64 result.
202 // The third operand is a 4-bit value of the form 0A0B, where A and B
203 // are the element selectors for the first operand and second operands
204 // respectively.
205 PERMUTE_DWORDS,
206
207 // Perform a general vector permute on vector operands 0 and 1.
208 // Each byte of operand 2 controls the corresponding byte of the result,
209 // in the same way as a byte-level VECTOR_SHUFFLE mask.
210 PERMUTE,
211
212 // Pack vector operands 0 and 1 into a single vector with half-sized elements.
213 PACK,
214
215 // Likewise, but saturate the result and set CC. PACKS_CC does signed
216 // saturation and PACKLS_CC does unsigned saturation.
217 PACKS_CC,
218 PACKLS_CC,
219
220 // Unpack the first half of vector operand 0 into double-sized elements.
221 // UNPACK_HIGH sign-extends and UNPACKL_HIGH zero-extends.
222 UNPACK_HIGH,
223 UNPACKL_HIGH,
224
225 // Likewise for the second half.
226 UNPACK_LOW,
227 UNPACKL_LOW,
228
229 // Shift each element of vector operand 0 by the number of bits specified
230 // by scalar operand 1.
231 VSHL_BY_SCALAR,
232 VSRL_BY_SCALAR,
233 VSRA_BY_SCALAR,
234
235 // For each element of the output type, sum across all sub-elements of
236 // operand 0 belonging to the corresponding element, and add in the
237 // rightmost sub-element of the corresponding element of operand 1.
238 VSUM,
239
240 // Compare integer vector operands 0 and 1 to produce the usual 0/-1
241 // vector result. VICMPE is for equality, VICMPH for "signed greater than"
242 // and VICMPHL for "unsigned greater than".
243 VICMPE,
244 VICMPH,
245 VICMPHL,
246
247 // Likewise, but also set the condition codes on the result.
248 VICMPES,
249 VICMPHS,
250 VICMPHLS,
251
252 // Compare floating-point vector operands 0 and 1 to produce the usual 0/-1
253 // vector result. VFCMPE is for "ordered and equal", VFCMPH for "ordered and
254 // greater than" and VFCMPHE for "ordered and greater than or equal to".
255 VFCMPE,
256 VFCMPH,
257 VFCMPHE,
258
259 // Likewise, but also set the condition codes on the result.
260 VFCMPES,
261 VFCMPHS,
262 VFCMPHES,
263
264 // Test floating-point data class for vectors.
265 VFTCI,
266
267 // Extend the even f32 elements of vector operand 0 to produce a vector
268 // of f64 elements.
269 VEXTEND,
270
271 // Round the f64 elements of vector operand 0 to f32s and store them in the
272 // even elements of the result.
273 VROUND,
274
275 // AND the two vector operands together and set CC based on the result.
276 VTM,
277
278 // String operations that set CC as a side-effect.
279 VFAE_CC,
280 VFAEZ_CC,
281 VFEE_CC,
282 VFEEZ_CC,
283 VFENE_CC,
284 VFENEZ_CC,
285 VISTR_CC,
286 VSTRC_CC,
287 VSTRCZ_CC,
288 VSTRS_CC,
289 VSTRSZ_CC,
290
291 // Test Data Class.
292 //
293 // Operand 0: the value to test
294 // Operand 1: the bit mask
295 TDC,
296
297 // Strict variants of scalar floating-point comparisons.
298 // Quiet and signaling versions.
299 STRICT_FCMP = ISD::FIRST_TARGET_STRICTFP_OPCODE,
300 STRICT_FCMPS,
301
302 // Strict variants of vector floating-point comparisons.
303 // Quiet and signaling versions.
304 STRICT_VFCMPE,
305 STRICT_VFCMPH,
306 STRICT_VFCMPHE,
307 STRICT_VFCMPES,
308 STRICT_VFCMPHS,
309 STRICT_VFCMPHES,
310
311 // Strict variants of VEXTEND and VROUND.
312 STRICT_VEXTEND,
313 STRICT_VROUND,
314
315 // Wrappers around the inner loop of an 8- or 16-bit ATOMIC_SWAP or
316 // ATOMIC_LOAD_<op>.
317 //
318 // Operand 0: the address of the containing 32-bit-aligned field
319 // Operand 1: the second operand of <op>, in the high bits of an i32
320 // for everything except ATOMIC_SWAPW
321 // Operand 2: how many bits to rotate the i32 left to bring the first
322 // operand into the high bits
323 // Operand 3: the negative of operand 2, for rotating the other way
324 // Operand 4: the width of the field in bits (8 or 16)
325 ATOMIC_SWAPW = ISD::FIRST_TARGET_MEMORY_OPCODE,
326 ATOMIC_LOADW_ADD,
327 ATOMIC_LOADW_SUB,
328 ATOMIC_LOADW_AND,
329 ATOMIC_LOADW_OR,
330 ATOMIC_LOADW_XOR,
331 ATOMIC_LOADW_NAND,
332 ATOMIC_LOADW_MIN,
333 ATOMIC_LOADW_MAX,
334 ATOMIC_LOADW_UMIN,
335 ATOMIC_LOADW_UMAX,
336
337 // A wrapper around the inner loop of an ATOMIC_CMP_SWAP.
338 //
339 // Operand 0: the address of the containing 32-bit-aligned field
340 // Operand 1: the compare value, in the low bits of an i32
341 // Operand 2: the swap value, in the low bits of an i32
342 // Operand 3: how many bits to rotate the i32 left to bring the first
343 // operand into the high bits
344 // Operand 4: the negative of operand 2, for rotating the other way
345 // Operand 5: the width of the field in bits (8 or 16)
346 ATOMIC_CMP_SWAPW,
347
348 // Atomic compare-and-swap returning CC value.
349 // Val, CC, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap)
350 ATOMIC_CMP_SWAP,
351
352 // 128-bit atomic load.
353 // Val, OUTCHAIN = ATOMIC_LOAD_128(INCHAIN, ptr)
354 ATOMIC_LOAD_128,
355
356 // 128-bit atomic store.
357 // OUTCHAIN = ATOMIC_STORE_128(INCHAIN, val, ptr)
358 ATOMIC_STORE_128,
359
360 // 128-bit atomic compare-and-swap.
361 // Val, CC, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap)
362 ATOMIC_CMP_SWAP_128,
363
364 // Byte swapping load/store. Same operands as regular load/store.
365 LRV, STRV,
366
367 // Element swapping load/store. Same operands as regular load/store.
368 VLER, VSTER,
369
370 // Prefetch from the second operand using the 4-bit control code in
371 // the first operand. The code is 1 for a load prefetch and 2 for
372 // a store prefetch.
373 PREFETCH
374 };
375
376 // Return true if OPCODE is some kind of PC-relative address.
isPCREL(unsigned Opcode)377 inline bool isPCREL(unsigned Opcode) {
378 return Opcode == PCREL_WRAPPER || Opcode == PCREL_OFFSET;
379 }
380 } // end namespace SystemZISD
381
382 namespace SystemZICMP {
383 // Describes whether an integer comparison needs to be signed or unsigned,
384 // or whether either type is OK.
385 enum {
386 Any,
387 UnsignedOnly,
388 SignedOnly
389 };
390 } // end namespace SystemZICMP
391
392 class SystemZSubtarget;
393 class SystemZTargetMachine;
394
395 class SystemZTargetLowering : public TargetLowering {
396 public:
397 explicit SystemZTargetLowering(const TargetMachine &TM,
398 const SystemZSubtarget &STI);
399
400 bool useSoftFloat() const override;
401
402 // Override TargetLowering.
getScalarShiftAmountTy(const DataLayout &,EVT)403 MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override {
404 return MVT::i32;
405 }
getVectorIdxTy(const DataLayout & DL)406 MVT getVectorIdxTy(const DataLayout &DL) const override {
407 // Only the lower 12 bits of an element index are used, so we don't
408 // want to clobber the upper 32 bits of a GPR unnecessarily.
409 return MVT::i32;
410 }
getPreferredVectorAction(MVT VT)411 TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT)
412 const override {
413 // Widen subvectors to the full width rather than promoting integer
414 // elements. This is better because:
415 //
416 // (a) it means that we can handle the ABI for passing and returning
417 // sub-128 vectors without having to handle them as legal types.
418 //
419 // (b) we don't have instructions to extend on load and truncate on store,
420 // so promoting the integers is less efficient.
421 //
422 // (c) there are no multiplication instructions for the widest integer
423 // type (v2i64).
424 if (VT.getScalarSizeInBits() % 8 == 0)
425 return TypeWidenVector;
426 return TargetLoweringBase::getPreferredVectorAction(VT);
427 }
isCheapToSpeculateCtlz()428 bool isCheapToSpeculateCtlz() const override { return true; }
429 EVT getSetCCResultType(const DataLayout &DL, LLVMContext &,
430 EVT) const override;
431 bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
432 EVT VT) const override;
433 bool isFPImmLegal(const APFloat &Imm, EVT VT,
434 bool ForCodeSize) const override;
435 bool hasInlineStackProbe(MachineFunction &MF) const override;
436 bool isLegalICmpImmediate(int64_t Imm) const override;
437 bool isLegalAddImmediate(int64_t Imm) const override;
438 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
439 unsigned AS,
440 Instruction *I = nullptr) const override;
441 bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS,
442 unsigned Align,
443 MachineMemOperand::Flags Flags,
444 bool *Fast) const override;
445 bool isTruncateFree(Type *, Type *) const override;
isTruncateFree(EVT,EVT)446 bool isTruncateFree(EVT, EVT) const override;
447
448 bool shouldFormOverflowOp(unsigned Opcode, EVT VT,
449 bool MathUsed) const override {
450 // Form add and sub with overflow intrinsics regardless of any extra
451 // users of the math result.
452 return VT == MVT::i32 || VT == MVT::i64;
453 }
454
455 const char *getTargetNodeName(unsigned Opcode) const override;
456 std::pair<unsigned, const TargetRegisterClass *>
457 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
458 StringRef Constraint, MVT VT) const override;
459 TargetLowering::ConstraintType
460 getConstraintType(StringRef Constraint) const override;
461 TargetLowering::ConstraintWeight
462 getSingleConstraintMatchWeight(AsmOperandInfo &info,
463 const char *constraint) const override;
464 void LowerAsmOperandForConstraint(SDValue Op,
465 std::string &Constraint,
466 std::vector<SDValue> &Ops,
467 SelectionDAG &DAG) const override;
468
getInlineAsmMemConstraint(StringRef ConstraintCode)469 unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
470 if (ConstraintCode.size() == 1) {
471 switch(ConstraintCode[0]) {
472 default:
473 break;
474 case 'o':
475 return InlineAsm::Constraint_o;
476 case 'Q':
477 return InlineAsm::Constraint_Q;
478 case 'R':
479 return InlineAsm::Constraint_R;
480 case 'S':
481 return InlineAsm::Constraint_S;
482 case 'T':
483 return InlineAsm::Constraint_T;
484 }
485 }
486 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
487 }
488
489 Register getRegisterByName(const char *RegName, LLT VT,
490 const MachineFunction &MF) const override;
491
492 /// If a physical register, this returns the register that receives the
493 /// exception address on entry to an EH pad.
494 Register
getExceptionPointerRegister(const Constant * PersonalityFn)495 getExceptionPointerRegister(const Constant *PersonalityFn) const override {
496 return SystemZ::R6D;
497 }
498
499 /// If a physical register, this returns the register that receives the
500 /// exception typeid on entry to a landing pad.
501 Register
getExceptionSelectorRegister(const Constant * PersonalityFn)502 getExceptionSelectorRegister(const Constant *PersonalityFn) const override {
503 return SystemZ::R7D;
504 }
505
506 /// Override to support customized stack guard loading.
useLoadStackGuardNode()507 bool useLoadStackGuardNode() const override {
508 return true;
509 }
insertSSPDeclarations(Module & M)510 void insertSSPDeclarations(Module &M) const override {
511 }
512
513 MachineBasicBlock *
514 EmitInstrWithCustomInserter(MachineInstr &MI,
515 MachineBasicBlock *BB) const override;
516 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
517 void LowerOperationWrapper(SDNode *N, SmallVectorImpl<SDValue> &Results,
518 SelectionDAG &DAG) const override;
519 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
520 SelectionDAG &DAG) const override;
521 const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
522 bool allowTruncateForTailCall(Type *, Type *) const override;
523 bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
524 SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
525 bool isVarArg,
526 const SmallVectorImpl<ISD::InputArg> &Ins,
527 const SDLoc &DL, SelectionDAG &DAG,
528 SmallVectorImpl<SDValue> &InVals) const override;
529 SDValue LowerCall(CallLoweringInfo &CLI,
530 SmallVectorImpl<SDValue> &InVals) const override;
531
532 bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
533 bool isVarArg,
534 const SmallVectorImpl<ISD::OutputArg> &Outs,
535 LLVMContext &Context) const override;
536 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
537 const SmallVectorImpl<ISD::OutputArg> &Outs,
538 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
539 SelectionDAG &DAG) const override;
540 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
541
542 /// Determine which of the bits specified in Mask are known to be either
543 /// zero or one and return them in the KnownZero/KnownOne bitsets.
544 void computeKnownBitsForTargetNode(const SDValue Op,
545 KnownBits &Known,
546 const APInt &DemandedElts,
547 const SelectionDAG &DAG,
548 unsigned Depth = 0) const override;
549
550 /// Determine the number of bits in the operation that are sign bits.
551 unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
552 const APInt &DemandedElts,
553 const SelectionDAG &DAG,
554 unsigned Depth) const override;
555
getExtendForAtomicOps()556 ISD::NodeType getExtendForAtomicOps() const override {
557 return ISD::ANY_EXTEND;
558 }
559
supportSwiftError()560 bool supportSwiftError() const override {
561 return true;
562 }
563
564 unsigned getStackProbeSize(MachineFunction &MF) const;
565
566 private:
567 const SystemZSubtarget &Subtarget;
568
569 // Implement LowerOperation for individual opcodes.
570 SDValue getVectorCmp(SelectionDAG &DAG, unsigned Opcode,
571 const SDLoc &DL, EVT VT,
572 SDValue CmpOp0, SDValue CmpOp1, SDValue Chain) const;
573 SDValue lowerVectorSETCC(SelectionDAG &DAG, const SDLoc &DL,
574 EVT VT, ISD::CondCode CC,
575 SDValue CmpOp0, SDValue CmpOp1,
576 SDValue Chain = SDValue(),
577 bool IsSignaling = false) const;
578 SDValue lowerSETCC(SDValue Op, SelectionDAG &DAG) const;
579 SDValue lowerSTRICT_FSETCC(SDValue Op, SelectionDAG &DAG,
580 bool IsSignaling) const;
581 SDValue lowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
582 SDValue lowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
583 SDValue lowerGlobalAddress(GlobalAddressSDNode *Node,
584 SelectionDAG &DAG) const;
585 SDValue lowerTLSGetOffset(GlobalAddressSDNode *Node,
586 SelectionDAG &DAG, unsigned Opcode,
587 SDValue GOTOffset) const;
588 SDValue lowerThreadPointer(const SDLoc &DL, SelectionDAG &DAG) const;
589 SDValue lowerGlobalTLSAddress(GlobalAddressSDNode *Node,
590 SelectionDAG &DAG) const;
591 SDValue lowerBlockAddress(BlockAddressSDNode *Node,
592 SelectionDAG &DAG) const;
593 SDValue lowerJumpTable(JumpTableSDNode *JT, SelectionDAG &DAG) const;
594 SDValue lowerConstantPool(ConstantPoolSDNode *CP, SelectionDAG &DAG) const;
595 SDValue lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
596 SDValue lowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
597 SDValue lowerVASTART(SDValue Op, SelectionDAG &DAG) const;
598 SDValue lowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
599 SDValue lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
600 SDValue lowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, SelectionDAG &DAG) const;
601 SDValue lowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const;
602 SDValue lowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const;
603 SDValue lowerSDIVREM(SDValue Op, SelectionDAG &DAG) const;
604 SDValue lowerUDIVREM(SDValue Op, SelectionDAG &DAG) const;
605 SDValue lowerXALUO(SDValue Op, SelectionDAG &DAG) const;
606 SDValue lowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG) const;
607 SDValue lowerBITCAST(SDValue Op, SelectionDAG &DAG) const;
608 SDValue lowerOR(SDValue Op, SelectionDAG &DAG) const;
609 SDValue lowerCTPOP(SDValue Op, SelectionDAG &DAG) const;
610 SDValue lowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const;
611 SDValue lowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const;
612 SDValue lowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const;
613 SDValue lowerATOMIC_LOAD_OP(SDValue Op, SelectionDAG &DAG,
614 unsigned Opcode) const;
615 SDValue lowerATOMIC_LOAD_SUB(SDValue Op, SelectionDAG &DAG) const;
616 SDValue lowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const;
617 SDValue lowerSTACKSAVE(SDValue Op, SelectionDAG &DAG) const;
618 SDValue lowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const;
619 SDValue lowerPREFETCH(SDValue Op, SelectionDAG &DAG) const;
620 SDValue lowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const;
621 SDValue lowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
622 bool isVectorElementLoad(SDValue Op) const;
623 SDValue buildVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
624 SmallVectorImpl<SDValue> &Elems) const;
625 SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
626 SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
627 SDValue lowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
628 SDValue lowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
629 SDValue lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
630 SDValue lowerSIGN_EXTEND_VECTOR_INREG(SDValue Op, SelectionDAG &DAG) const;
631 SDValue lowerZERO_EXTEND_VECTOR_INREG(SDValue Op, SelectionDAG &DAG) const;
632 SDValue lowerShift(SDValue Op, SelectionDAG &DAG, unsigned ByScalar) const;
633
634 bool canTreatAsByteVector(EVT VT) const;
635 SDValue combineExtract(const SDLoc &DL, EVT ElemVT, EVT VecVT, SDValue OrigOp,
636 unsigned Index, DAGCombinerInfo &DCI,
637 bool Force) const;
638 SDValue combineTruncateExtract(const SDLoc &DL, EVT TruncVT, SDValue Op,
639 DAGCombinerInfo &DCI) const;
640 SDValue combineZERO_EXTEND(SDNode *N, DAGCombinerInfo &DCI) const;
641 SDValue combineSIGN_EXTEND(SDNode *N, DAGCombinerInfo &DCI) const;
642 SDValue combineSIGN_EXTEND_INREG(SDNode *N, DAGCombinerInfo &DCI) const;
643 SDValue combineMERGE(SDNode *N, DAGCombinerInfo &DCI) const;
644 bool canLoadStoreByteSwapped(EVT VT) const;
645 SDValue combineLOAD(SDNode *N, DAGCombinerInfo &DCI) const;
646 SDValue combineSTORE(SDNode *N, DAGCombinerInfo &DCI) const;
647 SDValue combineVECTOR_SHUFFLE(SDNode *N, DAGCombinerInfo &DCI) const;
648 SDValue combineEXTRACT_VECTOR_ELT(SDNode *N, DAGCombinerInfo &DCI) const;
649 SDValue combineJOIN_DWORDS(SDNode *N, DAGCombinerInfo &DCI) const;
650 SDValue combineFP_ROUND(SDNode *N, DAGCombinerInfo &DCI) const;
651 SDValue combineFP_EXTEND(SDNode *N, DAGCombinerInfo &DCI) const;
652 SDValue combineINT_TO_FP(SDNode *N, DAGCombinerInfo &DCI) const;
653 SDValue combineBSWAP(SDNode *N, DAGCombinerInfo &DCI) const;
654 SDValue combineBR_CCMASK(SDNode *N, DAGCombinerInfo &DCI) const;
655 SDValue combineSELECT_CCMASK(SDNode *N, DAGCombinerInfo &DCI) const;
656 SDValue combineGET_CCMASK(SDNode *N, DAGCombinerInfo &DCI) const;
657 SDValue combineIntDIVREM(SDNode *N, DAGCombinerInfo &DCI) const;
658 SDValue combineINTRINSIC(SDNode *N, DAGCombinerInfo &DCI) const;
659
660 SDValue unwrapAddress(SDValue N) const override;
661
662 // If the last instruction before MBBI in MBB was some form of COMPARE,
663 // try to replace it with a COMPARE AND BRANCH just before MBBI.
664 // CCMask and Target are the BRC-like operands for the branch.
665 // Return true if the change was made.
666 bool convertPrevCompareToBranch(MachineBasicBlock *MBB,
667 MachineBasicBlock::iterator MBBI,
668 unsigned CCMask,
669 MachineBasicBlock *Target) const;
670
671 // Implement EmitInstrWithCustomInserter for individual operation types.
672 MachineBasicBlock *emitSelect(MachineInstr &MI, MachineBasicBlock *BB) const;
673 MachineBasicBlock *emitCondStore(MachineInstr &MI, MachineBasicBlock *BB,
674 unsigned StoreOpcode, unsigned STOCOpcode,
675 bool Invert) const;
676 MachineBasicBlock *emitPair128(MachineInstr &MI,
677 MachineBasicBlock *MBB) const;
678 MachineBasicBlock *emitExt128(MachineInstr &MI, MachineBasicBlock *MBB,
679 bool ClearEven) const;
680 MachineBasicBlock *emitAtomicLoadBinary(MachineInstr &MI,
681 MachineBasicBlock *BB,
682 unsigned BinOpcode, unsigned BitSize,
683 bool Invert = false) const;
684 MachineBasicBlock *emitAtomicLoadMinMax(MachineInstr &MI,
685 MachineBasicBlock *MBB,
686 unsigned CompareOpcode,
687 unsigned KeepOldMask,
688 unsigned BitSize) const;
689 MachineBasicBlock *emitAtomicCmpSwapW(MachineInstr &MI,
690 MachineBasicBlock *BB) const;
691 MachineBasicBlock *emitMemMemWrapper(MachineInstr &MI, MachineBasicBlock *BB,
692 unsigned Opcode) const;
693 MachineBasicBlock *emitStringWrapper(MachineInstr &MI, MachineBasicBlock *BB,
694 unsigned Opcode) const;
695 MachineBasicBlock *emitTransactionBegin(MachineInstr &MI,
696 MachineBasicBlock *MBB,
697 unsigned Opcode, bool NoFloat) const;
698 MachineBasicBlock *emitLoadAndTestCmp0(MachineInstr &MI,
699 MachineBasicBlock *MBB,
700 unsigned Opcode) const;
701 MachineBasicBlock *emitProbedAlloca(MachineInstr &MI,
702 MachineBasicBlock *MBB) const;
703
704 MachineMemOperand::Flags
705 getTargetMMOFlags(const Instruction &I) const override;
706 const TargetRegisterClass *getRepRegClassFor(MVT VT) const override;
707 };
708
709 struct SystemZVectorConstantInfo {
710 private:
711 APInt IntBits; // The 128 bits as an integer.
712 APInt SplatBits; // Smallest splat value.
713 APInt SplatUndef; // Bits correspoding to undef operands of the BVN.
714 unsigned SplatBitSize = 0;
715 bool isFP128 = false;
716
717 public:
718 unsigned Opcode = 0;
719 SmallVector<unsigned, 2> OpVals;
720 MVT VecVT;
721 SystemZVectorConstantInfo(APFloat FPImm);
722 SystemZVectorConstantInfo(BuildVectorSDNode *BVN);
723 bool isVectorConstantLegal(const SystemZSubtarget &Subtarget);
724 };
725
726 } // end namespace llvm
727
728 #endif
729